Skip to content

Add Player.delete_action, NormalFormGame.delete_action #444

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Nov 11, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
97 changes: 97 additions & 0 deletions quantecon/game_theory/normal_form_game.py
Original file line number Diff line number Diff line change
Expand Up @@ -194,6 +194,45 @@ def __str__(self):
s += np.array2string(self.payoff_array, separator=', ')
return s

def delete_action(self, action, player_idx=0):
"""
Return a new `Player` instance with the action(s) specified by
`action` deleted from the action set of the player specified by
`player_idx`. Deletion is not performed in place.

Parameters
----------
action : scalar(int) or array_like(int)
Integer or array like of integers representing the action(s)
to be deleted.

player_idx : scalar(int), optional(default=0)
Index of the player to delete action(s) for.

Returns
-------
Player
Copy of `self` with the action(s) deleted as specified.

Examples
--------
>>> player = Player([[3, 0], [0, 3], [1, 1]])
>>> player
Player([[3, 0],
[0, 3],
[1, 1]])
>>> player.delete_action(2)
Player([[3, 0],
[0, 3]])
>>> player.delete_action(0, player_idx=1)
Player([[0],
[3],
[1]])

"""
payoff_array_new = np.delete(self.payoff_array, action, player_idx)
return Player(payoff_array_new)

def payoff_vector(self, opponents_actions):
"""
Return an array of payoff values, one for each own action, given
Expand Down Expand Up @@ -688,6 +727,64 @@ def __setitem__(self, action_profile, payoff_profile):
tuple(action_profile[i:]) + tuple(action_profile[:i])
] = payoff_profile[i]

def delete_action(self, player_idx, action):
"""
Return a new `NormalFormGame` instance with the action(s)
specified by `action` deleted from the action set of the player
specified by `player_idx`. Deletion is not performed in place.

Parameters
----------
player_idx : scalar(int)
Index of the player to delete action(s) for.

action : scalar(int) or array_like(int)
Integer or array like of integers representing the action(s)
to be deleted.

Returns
-------
NormalFormGame
Copy of `self` with the action(s) deleted as specified.

Examples
--------
>>> g = NormalFormGame(
... [[(3, 0), (0, 1)], [(0, 0), (3, 1)], [(1, 1), (1, 0)]]
... )
>>> print(g)
2-player NormalFormGame with payoff profile array:
[[[3, 0], [0, 1]],
[[0, 0], [3, 1]],
[[1, 1], [1, 0]]]

Delete player `0`'s action `2` from `g`:

>>> g1 = g.delete_action(0, 2)
>>> print(g1)
2-player NormalFormGame with payoff profile array:
[[[3, 0], [0, 1]],
[[0, 0], [3, 1]]]

Then delete player `1`'s action `0` from `g1`:

>>> g2 = g1.delete_action(1, 0)
>>> print(g2)
2-player NormalFormGame with payoff profile array:
[[[0, 1]],
[[3, 1]]]

"""
# Allow negative indexing
if -self.N <= player_idx < 0:
player_idx = player_idx + self.N

players_new = tuple(
player.delete_action(action, player_idx-i)
for i, player in enumerate(self.players)
)
return NormalFormGame(players_new)

def is_nash(self, action_profile, tol=None):
"""
Return True if `action_profile` is a Nash equilibrium.
Expand Down
69 changes: 69 additions & 0 deletions quantecon/game_theory/tests/test_normal_form_game.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,18 @@ def setUp(self):
coordination_game_matrix = [[4, 0], [3, 2]]
self.player = Player(coordination_game_matrix)

def test_delete_action(self):
N = self.player.num_opponents + 1
action_to_delete = 0
actions_to_remain = \
np.setdiff1d(np.arange(self.player.num_actions), action_to_delete)
for i in range(N):
player_new = self.player.delete_action(action_to_delete, i)
assert_array_equal(
player_new.payoff_array,
self.player.payoff_array.take(actions_to_remain, axis=i)
)

def test_best_response_against_pure(self):
eq_(self.player.best_response(1), 1)

Expand Down Expand Up @@ -89,6 +101,18 @@ def setUp(self):
[5, 7]]]
self.player = Player(payoffs_2opponents)

def test_delete_action(self):
N = self.player.num_opponents + 1
action_to_delete = 0
actions_to_remain = \
np.setdiff1d(np.arange(self.player.num_actions), action_to_delete)
for i in range(N):
player_new = self.player.delete_action(action_to_delete, i)
assert_array_equal(
player_new.payoff_array,
self.player.payoff_array.take(actions_to_remain, axis=i)
)

def test_payoff_vector_against_pure(self):
assert_array_equal(self.player.payoff_vector((0, 1)), [6, 0])

Expand Down Expand Up @@ -185,6 +209,17 @@ def test_getitem(self):
assert_array_equal(self.g[action_profile],
self.BoS_bimatrix[action_profile])

def test_delete_action(self):
action_to_delete = 0
for i, player in enumerate(self.g.players):
g_new = self.g.delete_action(i, action_to_delete)
actions_to_remain = \
np.setdiff1d(np.arange(player.num_actions), action_to_delete)
assert_array_equal(
g_new.payoff_profile_array,
self.g.payoff_profile_array.take(actions_to_remain, axis=i)
)

def test_is_nash_pure(self):
ok_(not self.g.is_nash((1, 0)))

Expand Down Expand Up @@ -215,6 +250,17 @@ def setUp(self):
def test_getitem(self):
assert_array_equal(self.g[0, 0, 1], [6, 4, 1])

def test_delete_action(self):
action_to_delete = 0
for i, player in enumerate(self.g.players):
g_new = self.g.delete_action(i, action_to_delete)
actions_to_remain = \
np.setdiff1d(np.arange(player.num_actions), action_to_delete)
assert_array_equal(
g_new.payoff_profile_array,
self.g.payoff_profile_array.take(actions_to_remain, axis=i)
)

def test_is_nash_pure(self):
ok_(self.g.is_nash((0, 0, 0)))
ok_(not self.g.is_nash((0, 0, 1)))
Expand Down Expand Up @@ -306,6 +352,18 @@ def setUp(self):
self.best_response_action = 1
self.dominated_actions = [0, 2]

def test_delete_action(self):
N = self.player.num_opponents + 1
actions_to_delete = [0, 2]
actions_to_remain = \
np.setdiff1d(np.arange(self.player.num_actions), actions_to_delete)
for i in range(N):
player_new = self.player.delete_action(actions_to_delete, i)
assert_array_equal(
player_new.payoff_array,
self.player.payoff_array.take(actions_to_remain, axis=i)
)

def test_payoff_vector(self):
"""Trivial player: payoff_vector"""
assert_array_equal(self.player.payoff_vector(None), self.payoffs)
Expand Down Expand Up @@ -346,6 +404,17 @@ def test_getitem(self):
"""Trivial game: __getitem__"""
eq_(self.g[0], 0)

def test_delete_action(self):
actions_to_delete = [1, 2]
for i, player in enumerate(self.g.players):
g_new = self.g.delete_action(i, actions_to_delete)
actions_to_remain = \
np.setdiff1d(np.arange(player.num_actions), actions_to_delete)
assert_array_equal(
g_new.payoff_profile_array,
self.g.payoff_profile_array.take(actions_to_remain, axis=i)
)

def test_is_nash_pure(self):
"""Trivial game: is_nash with pure action"""
ok_(self.g.is_nash((1,)))
Expand Down