Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion axelrod/strategies/_strategies.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@
from .memoryone import (
MemoryOnePlayer, ALLCorALLD, FirmButFair, GTFT, SoftJoss,
StochasticCooperator, StochasticWSLS, ZDExtort2, ZDExtort2v2, ZDExtort4,
ZDGen2, ZDGTFT2, ZDSet2, WinStayLoseShift, WinShiftLoseStay)
ZDGen2, ZDGTFT2, ZDSet2, WinStayLoseShift, WinShiftLoseStay, ReactivePlayer)
from .memorytwo import MEM2
from .mindcontrol import MindController, MindWarper, MindBender
from .mindreader import MindReader, ProtectedMindReader, MirrorMindReader
Expand Down
16 changes: 16 additions & 0 deletions axelrod/strategies/memoryone.py
Original file line number Diff line number Diff line change
Expand Up @@ -543,3 +543,19 @@ def strategy(self, opponent: Player) -> Action:
if len(self.history) == 0:
return random_choice(0.6)
return self.history[-1]


class ReactivePlayer(MemoryOnePlayer):
"""
A generic reactive player. Defined by 2 probabilities conditional on the
opponent's last move: P(C|C), P(C|D).

Names:

- Reactive: [Nowak1989]_
"""
name = "Reactive Player"
def __init__(self, probabilities: Tuple[float, float]) -> None:
four_vector = (*probabilities, *probabilities)
super().__init__(four_vector)
self.name = "%s: %s" % (self.name, probabilities)
38 changes: 38 additions & 0 deletions axelrod/tests/strategies/test_memoryone.py
Original file line number Diff line number Diff line change
Expand Up @@ -490,3 +490,41 @@ def test_strategy(self):
actions = [(C, C)] * 10
self.versus_test(opponent=axelrod.Cooperator(),
expected_actions=actions, seed=1)


class TestGenericReactiveStrategy(unittest.TestCase):
"""
Tests for the Reactive Strategy which.
"""
p1 = axelrod.ReactivePlayer(probabilities=(0, 0))
p2 = axelrod.ReactivePlayer(probabilities=(1, 0))
p3 = axelrod.ReactivePlayer(probabilities=(1, 0.5))

def test_name(self):
self.assertEqual(self.p1.name,
"Reactive Player: (0, 0)")
self.assertEqual(self.p2.name,
"Reactive Player: (1, 0)")
self.assertEqual(self.p3.name,
"Reactive Player: (1, 0.5)")

def test_four_vector(self):
self.assertEqual(self.p1._four_vector,
{(C, D): 0.0, (D, C): 0.0,
(C, C): 0.0, (D, D): 0.0})
self.assertEqual(self.p2._four_vector,
{(C, D): 0.0, (D, C): 1.0,
(C, C): 1.0, (D, D): 0.0})
self.assertEqual(self.p3._four_vector,
{(C, D): 0.5, (D, C): 1.0,
(C, C): 1.0, (D, D): 0.5})

def test_stochastic_classification(self):
self.assertFalse(self.p1.classifier['stochastic'])
self.assertFalse(self.p2.classifier['stochastic'])
self.assertTrue(self.p3.classifier['stochastic'])

def test_subclass(self):
self.assertIsInstance(self.p1, MemoryOnePlayer)
self.assertIsInstance(self.p2, MemoryOnePlayer)
self.assertIsInstance(self.p3, MemoryOnePlayer)
1 change: 1 addition & 0 deletions docs/reference/bibliography.rst
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ documentation.
International Conference on Autonomous Agents and Multiagent Systems.
.. [Mittal2009] Mittal, S., & Deb, K. (2009). Optimal strategies of the iterated prisoner’s dilemma problem for multiple conflicting objectives. IEEE Transactions on Evolutionary Computation, 13(3), 554–565. https://doi.org/10.1109/TEVC.2008.2009459
.. [Nachbar1992] Nachbar J., Evolution in the finitely repeated prisoner’s dilemma, Journal of Economic Behavior & Organization, 19(3): 307-326, 1992.
.. [Nowak1989] Nowak, Martin, and Karl Sigmund. "Game-dynamical aspects of the prisoner's dilemma." Applied Mathematics and Computation 30.3 (1989): 191-213.
.. [Nowak1990] Nowak, M., & Sigmund, K. (1990). The evolution of stochastic strategies in the Prisoner's Dilemma. Acta Applicandae Mathematica. https://link.springer.com/article/10.1007/BF00049570
.. [Nowak1992] Nowak, M.., & May, R. M. (1992). Evolutionary games and spatial chaos. Nature. http://doi.org/10.1038/359826a0
.. [Nowak1993] Nowak, M., & Sigmund, K. (1993). A strategy of win-stay, lose-shift that outperforms tit-for-tat in the Prisoner’s Dilemma game. Nature, 364(6432), 56–58. http://doi.org/10.1038/364056a0
Expand Down