-
Notifications
You must be signed in to change notification settings - Fork 0
/
wumpus_planners.py
299 lines (268 loc) · 12.4 KB
/
wumpus_planners.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
# wumpus_planners.py
# ------------------
# Licensing Information:
# Please DO NOT DISTRIBUTE OR PUBLISH solutions to this project.
# You are free to use and extend these projects for EDUCATIONAL PURPOSES ONLY.
# The Hunt The Wumpus AI project was developed at University of Arizona
# by Clay Morrison (clayton@sista.arizona.edu), spring 2013.
# This project extends the python code provided by Peter Norvig as part of
# the Artificial Intelligence: A Modern Approach (AIMA) book example code;
# see http://aima.cs.berkeley.edu/code.html
# In particular, the following files come directly from the AIMA python
# code: ['agents.py', 'logic.py', 'search.py', 'utils.py']
# ('logic.py' has been modified by Clay Morrison in locations with the
# comment 'CTM')
# The file ['minisat.py'] implements a slim system call wrapper to the minisat
# (see http://minisat.se) SAT solver, and is directly based on the satispy
# python project, see https://github.com/netom/satispy .
from wumpus_environment import *
from wumpus_kb import *
import search
#-------------------------------------------------------------------------------
# Distance fn
#-------------------------------------------------------------------------------
def manhattan_distance_with_heading(current, target):
"""
Return the Manhattan distance + any turn moves needed
to put target ahead of current heading
current: (x,y,h) tuple, so: [0]=x, [1]=y, [2]=h=heading)
heading: 0:^:north 1:<:west 2:v:south 3:>:east
"""
md = abs(current[0] - target[0]) + abs(current[1] - target[1])
if current[2] == 0: # heading north
# Since the agent is facing north, "side" here means
# whether the target is in a row above or below (or
# the same) as the agent.
# (Same idea is used if agent is heading south)
side = (current[1] - target[1])
if side > 0:
md += 2 # target is behind: need to turns to turn around
elif side <= 0 and current[0] != target[0]:
md += 1 # target is ahead but not directly: just need to turn once
# note: if target straight ahead (curr.x == tar.x), no turning required
elif current[2] == 1: # heading west
# Now the agent is heading west, so "side" means
# whether the target is in a column to the left or right
# (or the same) as the agent.
# (Same idea is used if agent is heading east)
side = (current[0] - target[0])
if side < 0:
md += 2 # target is behind
elif side >= 0 and current[1] != target[1]:
md += 1 # target is ahead but not directly
elif current[2] == 2: # heading south
side = (current[1] - target[1])
if side < 0:
md += 2 # target is behind
elif side >= 0 and current[0] != target[0]:
md += 1 # target is ahead but not directly
elif current[2] == 3: # heading east
side = (current[0] - target[0])
if side > 0:
md += 2 # target is behind
elif side <= 0 and current[1] != target[1]:
md += 1 # target is ahead but not directly
return md
#-------------------------------------------------------------------------------
# Plan Route
#-------------------------------------------------------------------------------
def plan_route(current, heading, goals, allowed):
"""
Given:
current location: tuple (x,y)
heading: integer representing direction
gaals: list of one or more tuple goal-states
allowed: list of locations that can be moved to
... return a list of actions (no time stamps!) that when executed
will take the agent from the current location to one of (the closest)
goal locations
You will need to:
(1) Construct a PlanRouteProblem that extends search.Problem
(2) Pass the PlanRouteProblem as the argument to astar_search
(search.astar_search(Problem)) to find the action sequence.
Astar returns a node. You can call node.solution() to exract
the list of actions.
NOTE: represent a state as a triple: (x, y, heading)
where heading will be an integer, as follows:
0='north', 1='west', 2='south', 3='east'
"""
# Ensure heading is a in integer form
if isinstance(heading,str):
heading = Explorer.heading_str_to_num[heading]
if goals and allowed:
prp = PlanRouteProblem((current[0], current[1], heading), goals, allowed)
# NOTE: PlanRouteProblem will include a method h() that computes
# the heuristic, so no need to provide here to astar_search()
node = search.astar_search(prp)
if node:
return node.solution()
# no route can be found, return empty list
return []
#-------------------------------------------------------------------------------
class PlanRouteProblem(search.Problem):
def __init__(self, initial, goals, allowed):
""" Problem defining planning of route to closest goal
Goal is generally a location (x,y) tuple, but state will be (x,y,heading) tuple
initial = initial location, (x,y) tuple
goals = list of goal (x,y) tuples
allowed = list of state (x,y) tuples that agent could move to """
self.initial = initial # initial state
self.goals = goals # list of goals that can be achieved
self.allowed = allowed # the states we can move into
def h(self,node):
"""
Heuristic that will be used by search.astar_search()
"""
"*** YOUR CODE HERE ***"
# TODO: Add code to check against wumpus/pits
goal_min = 0
for goal in self.goals:
for state in self.allowed:
goal_min = min(manhattan_distance_with_heading(node, goal), manhattan_distance_with_heading(state, goal))
return goal_min
def actions(self, state):
"""
Return list of allowed actions that can be made in state
"""
"*** YOUR CODE HERE ***"
return self.allowed
def result(self, state, action):
"""
Return the new state after applying action to state
"""
"*** YOUR CODE HERE ***"
# TODO: Complete this method!
action
pass
def goal_test(self, state):
"""
Return True if state is a goal state
"""
"*** YOUR CODE HERE ***"
for goal in self.goals:
if state == goal:
return True
return False
#-------------------------------------------------------------------------------
def test_PRP(initial):
"""
The 'expected initial states and solution pairs' below are provided
as a sanity check, showing what the PlanRouteProblem soluton is
expected to produce. Provide the 'initial state' tuple as the
argument to test_PRP, and the associate solution list of actions is
expected as the result.
The test assumes the goals are [(2,3),(3,2)], that the heuristic fn
defined in PlanRouteProblem uses the manhattan_distance_with_heading()
fn above, and the allowed locations are:
[(0,0),(0,1),(0,2),(0,3),
(1,0),(1,1),(1,2),(1,3),
(2,0), (2,3),
(3,0),(3,1),(3,2),(3,3)]
Expected intial state and solution pairs:
(0,0,0) : ['Forward', 'Forward', 'Forward', 'TurnRight', 'Forward', 'Forward']
(0,0,1) : ['TurnRight', 'Forward', 'Forward', 'Forward', 'TurnRight', 'Forward', 'Forward']
(0,0,2) : ['TurnLeft', 'Forward', 'Forward', 'Forward', 'TurnLeft', 'Forward', 'Forward']
(0,0,3) : ['Forward', 'Forward', 'Forward', 'TurnLeft', 'Forward', 'Forward']
"""
return plan_route((initial[0],initial[1]), initial[2],
# Goals:
[(2,3),(3,2)],
# Allowed locations:
[(0,0),(0,1),(0,2),(0,3),
(1,0),(1,1),(1,2),(1,3),
(2,0), (2,3),
(3,0),(3,1),(3,2),(3,3)])
#-------------------------------------------------------------------------------
# Plan Shot
#-------------------------------------------------------------------------------
def plan_shot(current, heading, goals, allowed):
""" Plan route to nearest location with heading directed toward one of the
possible wumpus locations (in goals), then append shoot action.
NOTE: This assumes you can shoot through walls!! That's ok for now. """
if goals and allowed:
psp = PlanShotProblem((current[0], current[1], heading), goals, allowed)
node = search.astar_search(psp)
if node:
plan = node.solution()
plan.append(action_shoot_str(None))
# HACK:
# since the wumpus_alive axiom asserts that a wumpus is no longer alive
# when on the previous round we perceived a scream, we
# need to enforce waiting so that itme elapses and knowledge of
# "dead wumpus" can then be inferred...
plan.append(action_wait_str(None))
return plan
# no route can be found, return empty list
return []
#-------------------------------------------------------------------------------
class PlanShotProblem(search.Problem):
def __init__(self, initial, goals, allowed):
""" Problem defining planning to move to location to be ready to
shoot at nearest wumpus location
NOTE: Just like PlanRouteProblem, except goal is to plan path to
nearest location with heading in direction of a possible
wumpus location;
Shoot and Wait actions is appended to this search solution
Goal is generally a location (x,y) tuple, but state will be (x,y,heading) tuple
initial = initial location, (x,y) tuple
goals = list of goal (x,y) tuples
allowed = list of state (x,y) tuples that agent could move to """
self.initial = initial # initial state
self.goals = goals # list of goals that can be achieved
self.allowed = allowed # the states we can move into
def h(self,node):
"""
Heuristic that will be used by search.astar_search()
"""
"*** YOUR CODE HERE ***"
# TODO: Add code to check against wumpus/pits
goal_min = 0
for goal in self.goals:
for state in self.allowed:
goal_min = min(manhattan_distance_with_heading(node, goal), manhattan_distance_with_heading(state, goal))
return goal_min
def actions(self, state):
"""
Return list of allowed actions that can be made in state
"""
"*** YOUR CODE HERE ***"
return self.allowed
def goal_test(self, state):
"""
Return True if state is a goal state
"""
"*** YOUR CODE HERE ***"
for goal in self.goals:
if state == goal:
return True
return False
#-------------------------------------------------------------------------------
def test_PSP(initial = (0,0,3)):
"""
The 'expected initial states and solution pairs' below are provided
as a sanity check, showing what the PlanShotProblem soluton is
expected to produce. Provide the 'initial state' tuple as the
argumetn to test_PRP, and the associate solution list of actions is
expected as the result.
The test assumes the goals are [(2,3),(3,2)], that the heuristic fn
defined in PlanShotProblem uses the manhattan_distance_with_heading()
fn above, and the allowed locations are:
[(0,0),(0,1),(0,2),(0,3),
(1,0),(1,1),(1,2),(1,3),
(2,0), (2,3),
(3,0),(3,1),(3,2),(3,3)]
Expected intial state and solution pairs:
(0,0,0) : ['Forward', 'Forward', 'TurnRight', 'Shoot', 'Wait']
(0,0,1) : ['TurnRight', 'Forward', 'Forward', 'TurnRight', 'Shoot', 'Wait']
(0,0,2) : ['TurnLeft', 'Forward', 'Forward', 'Forward', 'TurnLeft', 'Shoot', 'Wait']
(0,0,3) : ['Forward', 'Forward', 'Forward', 'TurnLeft', 'Shoot', 'Wait']
"""
return plan_shot((initial[0],initial[1]), initial[2],
# Goals:
[(2,3),(3,2)],
# Allowed locations:
[(0,0),(0,1),(0,2),(0,3),
(1,0),(1,1),(1,2),(1,3),
(2,0), (2,3),
(3,0),(3,1),(3,2),(3,3)])
#-------------------------------------------------------------------------------