Skip to content

Commit 5f27eb9

Browse files
committed
Move NashResult to utilities.py
1 parent f03f83a commit 5f27eb9

File tree

4 files changed

+59
-94
lines changed

4 files changed

+59
-94
lines changed

quantecon/game_theory/lemke_howson.py

Lines changed: 3 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77
"""
88
import numpy as np
99
from numba import jit
10+
from .utilities import NashResult
1011

1112

1213
TOL_PIV = 1e-10
@@ -119,7 +120,7 @@ def lemke_howson(g, init_pivot=0, max_iter=10**6, capping=None,
119120
try:
120121
N = g.N
121122
except:
122-
raise TypeError('input must be a 2-player NormalFormGame')
123+
raise TypeError('g must be a 2-player NormalFormGame')
123124
if N != 2:
124125
raise NotImplementedError('Implemented only for 2-player games')
125126

@@ -153,7 +154,7 @@ def lemke_howson(g, init_pivot=0, max_iter=10**6, capping=None,
153154
converged=converged,
154155
num_iter=num_iter,
155156
max_iter=max_iter,
156-
init_pivot=init_pivot_used)
157+
init=init_pivot_used)
157158

158159
return NE, res
159160

@@ -598,48 +599,3 @@ def get_mixed_actions(tableaux, bases):
598599
out[start:stop] /= sum_
599600

600601
return out[:nums_actions[0]], out[nums_actions[0]:]
601-
602-
603-
class NashResult(dict):
604-
"""
605-
Contain the information about the result of Nash equilibrium
606-
computation.
607-
608-
Attributes
609-
----------
610-
NE : tuple(ndarray(float, ndim=1))
611-
Computed Nash equilibrium.
612-
613-
converged : bool
614-
Whether the routine has converged.
615-
616-
num_iter : int
617-
Total number of iterations.
618-
619-
max_iter : int
620-
Maximum number of iterations.
621-
622-
init_pivot : int
623-
Initial pivot used.
624-
625-
"""
626-
# This is sourced from sicpy.optimize.OptimizeResult.
627-
def __getattr__(self, name):
628-
try:
629-
return self[name]
630-
except KeyError:
631-
raise AttributeError(name)
632-
633-
__setattr__ = dict.__setitem__
634-
__delattr__ = dict.__delitem__
635-
636-
def __repr__(self):
637-
if self.keys():
638-
m = max(map(len, list(self.keys()))) + 1
639-
return '\n'.join([k.rjust(m) + ': ' + repr(v)
640-
for k, v in sorted(self.items())])
641-
else:
642-
return self.__class__.__name__ + "()"
643-
644-
def __dir__(self):
645-
return self.keys()

quantecon/game_theory/mclennan_tourky.py

Lines changed: 1 addition & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010
import numpy as np
1111
from ..compute_fp import _compute_fixed_point_ig
1212
from .normal_form_game import pure2mixed
13+
from .utilities import NashResult
1314

1415

1516
def mclennan_tourky(g, init=None, epsilon=1e-3, max_iter=200,
@@ -297,48 +298,3 @@ def _flatten_action_profile(action_profile, indptr):
297298
out[indptr[i]:indptr[i+1]] = mixed_action
298299

299300
return out
300-
301-
302-
class NashResult(dict):
303-
"""
304-
Contain the information about the result of Nash equilibrium
305-
computation.
306-
307-
Attributes
308-
----------
309-
NE : tuple(ndarray(float, ndim=1))
310-
Computed Nash equilibrium.
311-
312-
converged : bool
313-
Whether the routine has converged.
314-
315-
num_iter : int
316-
Number of iterations.
317-
318-
max_iter : int
319-
Maximum number of iterations.
320-
321-
init_action_profile : array_like
322-
Initial action profile used.
323-
324-
"""
325-
# This is sourced from sicpy.optimize.OptimizeResult.
326-
def __getattr__(self, name):
327-
try:
328-
return self[name]
329-
except KeyError:
330-
raise AttributeError(name)
331-
332-
__setattr__ = dict.__setitem__
333-
__delattr__ = dict.__delitem__
334-
335-
def __repr__(self):
336-
if self.keys():
337-
m = max(map(len, list(self.keys()))) + 1
338-
return '\n'.join([k.rjust(m) + ': ' + repr(v)
339-
for k, v in sorted(self.items())])
340-
else:
341-
return self.__class__.__name__ + "()"
342-
343-
def __dir__(self):
344-
return self.keys()

quantecon/game_theory/tests/test_lemke_howson.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -100,14 +100,14 @@ def test_lemke_howson_capping():
100100
capping=max_iter, full_output=True)
101101
for action0, action1 in zip(NE0, NE1):
102102
assert_allclose(action0, action1)
103-
eq_(res0.init_pivot, res1.init_pivot)
103+
eq_(res0.init, res1.init)
104104

105105
init_pivot = 1
106106
max_iter = m+n
107107
NE, res = lemke_howson(g, init_pivot=init_pivot, max_iter=max_iter,
108108
capping=1, full_output=True)
109109
eq_(res.num_iter, max_iter)
110-
eq_(res.init_pivot, init_pivot-1)
110+
eq_(res.init, init_pivot-1)
111111

112112

113113
if __name__ == '__main__':

quantecon/game_theory/utilities.py

Lines changed: 53 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,53 @@
1+
"""
2+
Utility routines for the game_theory submodule
3+
4+
"""
5+
class NashResult(dict):
6+
"""
7+
Contain the information about the result of Nash equilibrium
8+
computation.
9+
10+
Attributes
11+
----------
12+
NE : tuple(ndarray(float, ndim=1))
13+
Computed Nash equilibrium.
14+
15+
converged : bool
16+
Whether the routine has converged.
17+
18+
num_iter : int
19+
Number of iterations.
20+
21+
max_iter : int
22+
Maximum number of iterations.
23+
24+
init : scalar or array_like
25+
Initial condition used.
26+
27+
Notes
28+
-----
29+
This is sourced from `sicpy.optimize.OptimizeResult`.
30+
31+
There may be additional attributes not listed above depending of the
32+
routine.
33+
34+
"""
35+
def __getattr__(self, name):
36+
try:
37+
return self[name]
38+
except KeyError:
39+
raise AttributeError(name)
40+
41+
__setattr__ = dict.__setitem__
42+
__delattr__ = dict.__delitem__
43+
44+
def __repr__(self):
45+
if self.keys():
46+
m = max(map(len, list(self.keys()))) + 1
47+
return '\n'.join([k.rjust(m) + ': ' + repr(v)
48+
for k, v in sorted(self.items())])
49+
else:
50+
return self.__class__.__name__ + "()"
51+
52+
def __dir__(self):
53+
return self.keys()

0 commit comments

Comments
 (0)