Skip to content

Commit

Permalink
Simplify the board state to a string
Browse files Browse the repository at this point in the history
  • Loading branch information
shayakbanerjee committed Dec 15, 2017
1 parent 18dcc4d commit efed098
Show file tree
Hide file tree
Showing 8 changed files with 31 additions and 12 deletions.
5 changes: 2 additions & 3 deletions board.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,15 +80,14 @@ def getGrid(self, i, j):
return self.board[i][j]

def getEmptyBoardPlaces(self):
boardState = self.getBoardState()
emptyPlaces = []
for (i,j) in itertools.product(range(3), range(3)):
if boardState[i][j] == GridStates.EMPTY:
if self.board[i][j] == GridStates.EMPTY:
emptyPlaces.append((i,j))
return emptyPlaces

def getBoardState(self):
return tuple([tuple(row) for row in self.board])
return ''.join([''.join(row) for row in self.board])

def getDoesBoardHaveEmptyCell(self):
for (i,j) in itertools.product(range(3), range(3)):
Expand Down
11 changes: 10 additions & 1 deletion learning.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
from board import GridStates, TTTBoardDecision
import json

class TableLearning(object):
def __init__(self, DecisionClass=TTTBoardDecision):
Expand All @@ -24,5 +25,13 @@ def learnFromMove(self, player, board, prevBoardState):

def printValues(self):
from pprint import pprint
#pprint(filter(lambda x: x!=0.5, self.values.values()))
pprint(self.values)
print 'Total number of states: %s' % (len(self.values))
print 'Total number of states: %s' % (len(self.values))
print 'Total number of knowledgeable states: %s' % (len(filter(lambda x: x!=0.5, self.values.values())))

def saveLearning(self, filename):
json.dump(self.values, open(filename,'w'))

def loadLearning(self, filename):
self.values = json.load(open(filename, 'r'))
6 changes: 3 additions & 3 deletions player.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,9 +43,9 @@ def printValues(self):
self.learningAlgo.printValues()

def testNextMove(self, state, i, j):
boardCopy = Util.nestedTupleToList(deepcopy(state))
boardCopy[i][j] = self.player
return Util.nestedListToTuple(boardCopy)
boardCopy = list(state)
boardCopy[3*i+j] = self.player
return ''.join(boardCopy)

def makeNextMove(self):
previousState = self.board.getBoardState()
Expand Down
1 change: 1 addition & 0 deletions ultimate_learning.json
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
{
6 changes: 5 additions & 1 deletion ultimateboard.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,11 @@ def getBoardRowString(self, boardRow, row):
return rowString

def getBoardState(self):
return tuple([tuple(map(lambda x: x.getBoardState(), row)) for row in self.board])
boardStrings = []
for (i,j) in itertools.product(range(3), range(3)):
board = self.board[i][j]
[boardStrings.append(''.join(board.board[i])) for i in range(3)]
return ''.join(boardStrings)

def getBoardDecision(self):
return self.decision
Expand Down
Binary file added ultimateboard.pyc
Binary file not shown.
14 changes: 10 additions & 4 deletions ultimateplayer.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
from util import Util
from learning import TableLearning
import random
from copy import deepcopy

class UTTTPlayer(object):
def __init__(self):
Expand Down Expand Up @@ -47,9 +46,10 @@ def printValues(self):
self.learningAlgo.printValues()

def testNextMove(self, state, boardLocation, placeOnBoard):
boardCopy = Util.nestedTupleToList(deepcopy(state))
boardCopy[boardLocation[0]][boardLocation[1]][placeOnBoard[0]][placeOnBoard[1]] = self.player
return Util.nestedListToTuple(boardCopy)
loc = 27*boardLocation[0] + 9*boardLocation[1] + 3*placeOnBoard[0] + placeOnBoard[1]
boardCopy = list(state)
boardCopy[loc] = self.player
return ''.join(boardCopy)

def makeNextMove(self):
previousState = self.board.getBoardState()
Expand All @@ -76,6 +76,12 @@ def makeNextMove(self):
def learnFromMove(self, prevBoardState):
self.learningAlgo.learnFromMove(self.player, self.board, prevBoardState)

def saveLearning(self, filename):
self.learningAlgo.saveLearning(filename)

def loadLearning(self, filename):
self.learningAlgo.loadLearning(filename)

if __name__ == '__main__':
board = UTTTBoard()
player1 = RandomUTTTPlayer()
Expand Down
Binary file added ultimateplayer.pyc
Binary file not shown.

0 comments on commit efed098

Please sign in to comment.