@@ -131,7 +131,16 @@ def program(percept):
131
131
132
132
133
133
def RandomAgentProgram (actions ):
134
- """An agent that chooses an action at random, ignoring all percepts."""
134
+ """An agent that chooses an action at random, ignoring all percepts.
135
+ >>> list = ['Right', 'Left', 'Suck', 'NoOp']
136
+ >>> program = RandomAgentProgram(list)
137
+ >>> agent = Agent(program)
138
+ >>> environment = TrivialVacuumEnvironment()
139
+ >>> environment.add_thing(agent)
140
+ >>> environment.run()
141
+ >>> environment.status == {(1, 0): 'Clean' , (0, 0): 'Clean'}
142
+ True
143
+ """
135
144
return lambda percept : random .choice (actions )
136
145
137
146
# ______________________________________________________________________________
@@ -171,7 +180,14 @@ def rule_match(state, rules):
171
180
172
181
173
182
def RandomVacuumAgent ():
174
- """Randomly choose one of the actions from the vacuum environment."""
183
+ """Randomly choose one of the actions from the vacuum environment.
184
+ >>> agent = RandomVacuumAgent()
185
+ >>> environment = TrivialVacuumEnvironment()
186
+ >>> environment.add_thing(agent)
187
+ >>> environment.run()
188
+ >>> environment.status == {(1,0):'Clean' , (0,0) : 'Clean'}
189
+ True
190
+ """
175
191
return Agent (RandomAgentProgram (['Right' , 'Left' , 'Suck' , 'NoOp' ]))
176
192
177
193
@@ -192,7 +208,14 @@ def TableDrivenVacuumAgent():
192
208
193
209
194
210
def ReflexVacuumAgent ():
195
- """A reflex agent for the two-state vacuum environment. [Figure 2.8]"""
211
+ """A reflex agent for the two-state vacuum environment. [Figure 2.8]
212
+ >>> agent = ReflexVacuumAgent()
213
+ >>> environment = TrivialVacuumEnvironment()
214
+ >>> environment.add_thing(agent)
215
+ >>> environment.run()
216
+ >>> environment.status == {(1,0):'Clean' , (0,0) : 'Clean'}
217
+ True
218
+ """
196
219
def program (percept ):
197
220
location , status = percept
198
221
if status == 'Dirty' :
@@ -205,7 +228,14 @@ def program(percept):
205
228
206
229
207
230
def ModelBasedVacuumAgent ():
208
- """An agent that keeps track of what locations are clean or dirty."""
231
+ """An agent that keeps track of what locations are clean or dirty.
232
+ >>> agent = ModelBasedVacuumAgent()
233
+ >>> environment = TrivialVacuumEnvironment()
234
+ >>> environment.add_thing(agent)
235
+ >>> environment.run()
236
+ >>> environment.status == {(1,0):'Clean' , (0,0) : 'Clean'}
237
+ True
238
+ """
209
239
model = {loc_A : None , loc_B : None }
210
240
211
241
def program (percept ):
@@ -342,6 +372,22 @@ def __init__(self, direction):
342
372
self .direction = direction
343
373
344
374
def __add__ (self , heading ):
375
+ """
376
+ >>> d = Direction('right')
377
+ >>> l1 = d.__add__(Direction.L)
378
+ >>> l2 = d.__add__(Direction.R)
379
+ >>> l1.direction
380
+ 'up'
381
+ >>> l2.direction
382
+ 'down'
383
+ >>> d = Direction('down')
384
+ >>> l1 = d.__add__('right')
385
+ >>> l2 = d.__add__('left')
386
+ >>> l1.direction == Direction.L
387
+ True
388
+ >>> l2.direction == Direction.R
389
+ True
390
+ """
345
391
if self .direction == self .R :
346
392
return {
347
393
self .R : Direction (self .D ),
@@ -364,6 +410,16 @@ def __add__(self, heading):
364
410
}.get (heading , None )
365
411
366
412
def move_forward (self , from_location ):
413
+ """
414
+ >>> d = Direction('up')
415
+ >>> l1 = d.move_forward((0, 0))
416
+ >>> l1
417
+ (0, -1)
418
+ >>> d = Direction(Direction.R)
419
+ >>> l1 = d.move_forward((0, 0))
420
+ >>> l1
421
+ (1, 0)
422
+ """
367
423
x , y = from_location
368
424
if self .direction == self .R :
369
425
return (x + 1 , y )
@@ -940,14 +996,30 @@ def compare_agents(EnvFactory, AgentFactories, n=10, steps=1000):
940
996
"""See how well each of several agents do in n instances of an environment.
941
997
Pass in a factory (constructor) for environments, and several for agents.
942
998
Create n instances of the environment, and run each agent in copies of
943
- each one for steps. Return a list of (agent, average-score) tuples."""
999
+ each one for steps. Return a list of (agent, average-score) tuples.
1000
+ >>> environment = TrivialVacuumEnvironment
1001
+ >>> agents = [ModelBasedVacuumAgent, ReflexVacuumAgent]
1002
+ >>> result = compare_agents(environment, agents)
1003
+ >>> performance_ModelBasedVacummAgent = result[0][1]
1004
+ >>> performance_ReflexVacummAgent = result[1][1]
1005
+ >>> performance_ReflexVacummAgent <= performance_ModelBasedVacummAgent
1006
+ True
1007
+ """
944
1008
envs = [EnvFactory () for i in range (n )]
945
1009
return [(A , test_agent (A , steps , copy .deepcopy (envs )))
946
1010
for A in AgentFactories ]
947
1011
948
1012
949
1013
def test_agent (AgentFactory , steps , envs ):
950
- """Return the mean score of running an agent in each of the envs, for steps"""
1014
+ """Return the mean score of running an agent in each of the envs, for steps
1015
+ >>> def constant_prog(percept):
1016
+ ... return percept
1017
+ ...
1018
+ >>> agent = Agent(constant_prog)
1019
+ >>> result = agent.program(5)
1020
+ >>> result == 5
1021
+ True
1022
+ """
951
1023
def score (env ):
952
1024
agent = AgentFactory ()
953
1025
env .add_thing (agent )
0 commit comments