-
Notifications
You must be signed in to change notification settings - Fork 6
Expand file tree
/
Copy pathCheckerAI.py
More file actions
268 lines (230 loc) · 11 KB
/
CheckerAI.py
File metadata and controls
268 lines (230 loc) · 11 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
from Transition import BoardTransition
from CheckerGame import CheckerBoard, Piece
import numpy as np
import math
import random
from copy import deepcopy
from constants import Q_TABLE_FILE, _P1PIECE, _P2PIECE, _P1KING, _P2KING, _ROWS, _COLS
#import tensorflow as tf
class CheckerAI:
#Weights of Heuristic Factors
_KING_VALUE = 2
_PIECE_VALUE = 2
_ADJ_VALUE = 0.2
_EDGE_VALUE = 0.4
_PROMO_VALUE = 0.3
_OPPONENT_MULT = -1.2
_DISTANCE_MULT = 2
_LEARNING_RATE = 1
_GAMMA = 0.95
_EPSILON = 0.9
_TERMINAL_NODE_EVAL = 1000
def __init__(self, qTableName) -> None:
self.boardTransition = BoardTransition()
self.saveToDisk = True
self.qTableName = qTableName
if (qTableName == ""):
self.qTableName = Q_TABLE_FILE
self.saveToDisk = False
self.visited = [CheckerBoard]
try:
print("Loading Trainning Data...")
self.qTable = np.load(self.qTableName, allow_pickle="TRUE").item()
print("Successfully Loaded Trainning Data")
except:
print("No Q Table exists")
self.qTable = dict()
np.save(self.qTableName, self.qTable)
# Utility function
# Takes a board state evaluates it
# Higher evaluation for kings
def evaluateBoard(self, board:CheckerBoard) -> int:
# AI Teams works on this
if (self.qTable.get(board) is not None):
# print("Using QTable val: ", self.qTable[board])
return self.qTable.get(board)
if (board.player1NumPieces + board.player1NumKings) == 0:
self.qTable[board] = -1 * self._TERMINAL_NODE_EVAL
return -1 * self._TERMINAL_NODE_EVAL
elif (board.player2NumPieces + board.player2NumKings) == 0:
self.qTable[board] = self._TERMINAL_NODE_EVAL
return self._TERMINAL_NODE_EVAL
boardValue = 0.0
P1DistanceFromPromo = 0
P2DistanceFromPromo = 0
for row in board.board:
for piece in row:
if type(piece) is Piece:
pieceValue = self._PIECE_VALUE
adjSpaces = [(piece.row + 1, piece.col + 1), (piece.row + 1, piece.col - 1), (piece.row - 1, piece.col + 1), (piece.row - 1, piece.col - 1)]
# Is the piece defending or being defended by another piece
for row, col in adjSpaces:
if 0 <= row < _ROWS and 0 <= col < _COLS:
if (type(board.board[row][col]) is Piece and (board.board[row][col].player == piece.player)):
pieceValue += self._ADJ_VALUE
# Is the piece on the edge
if piece.col == 0 or piece.col == (_COLS - 1):
pieceValue += self._EDGE_VALUE
# Is the piece defending the promotion line
if (piece.row == 0 and piece.pieceNum == _P1PIECE) or (piece.row == (_ROWS - 1) and piece.pieceNum == _P2PIECE):
pieceValue += self._PROMO_VALUE
# Is the piece a King
if piece.king:
pieceValue += self._KING_VALUE
# Does the piece not belong to the AI
if piece.player == 2:
pieceValue *= self._OPPONENT_MULT
if not piece.king:
P2DistanceFromPromo += (float(_ROWS - (piece.row + 1)) / _ROWS)
else:
if not piece.king:
P1DistanceFromPromo += (float(piece.row + 1) / _ROWS)
boardValue += pieceValue
# Add the average distance of the pieces from their promotion line
if board.player1NumPieces > 0 and board.player2NumPieces > 0:
boardValue += self._DISTANCE_MULT * ((P1DistanceFromPromo / board.player1NumPieces) - (P2DistanceFromPromo / board.player2NumPieces))
self.qTable[board] = boardValue
return boardValue
# return board.player1NumPieces - board.player2NumPieces + ((board.player1NumKings - board.player2NumKings) * self._KING_VALUE)
# current_state (board) - current state of the game board
# depth (int) - how deep in the minimax tree to go
# is_max (boolean) - True for max level, False for min level
# alpha (float) - current alpha value of tree
# beta (float) - current beta value of tree
# uses a get_children() function that should be made in transition
def minimax(self, current_state, depth, is_max, alpha : float, beta : float):
# no move will be made
if depth == 0:
return self.evaluateBoard(current_state)
possible_moves = self.boardTransition.getAllBoards(current_state)
if len(possible_moves) == 0:
# Terminal Node
return self._TERMINAL_NODE_EVAL * current_state.turn
# next_move = None
if is_max:
max_val = float('-inf')
for move in possible_moves:
# print("BOARD DEPTH " + str(depth))
# print(move)
value = self.minimax(move, depth - 1, False, alpha, beta)
alpha = max(alpha, value)
if value > max_val:
max_val = value
# next_move = move
if value >= beta:
break
return max_val
else:
min_val = float('inf')
for move in possible_moves:
# print("BOARD DEPTH " + str(depth))
# print(move)
value = self.minimax(move, depth - 1, True, alpha, beta)
beta = min(beta, value)
if value < min_val:
min_val = value
# next_move = move
if value <= alpha:
break
return min_val
# Decides if it should take the best move or explore
def exploration(self, bestMove:CheckerBoard, secondBestMove:CheckerBoard) -> CheckerBoard:
prob = random.random()
if (self.saveToDisk and secondBestMove is not None and prob > self._EPSILON):
print("Exploring a new move")
return secondBestMove
return bestMove
# gets the next best move from current board configuration
# the higher the accuracy level the better the move but it costs more performance
def nextBestMove(self, currentBoard:CheckerBoard, accuracyLevel:int = 4) -> CheckerBoard:
self.evaluated_boards = [(currentBoard, self.evaluateBoard(currentBoard))]
nextMoves = self.boardTransition.getAllBoards(currentBoard)
if (len(nextMoves) == 0):
print("No move exists")
return None
bestNextMove = None
secondBestMove = None
bestMoveVal = currentBoard.turn * math.inf
alpha = float('-inf')
beta = float('inf')
# print("CURRENT:")
# print(currentBoard)
# print("MOVES:")
# for move in nextMoves:
# print(move)
# print("POSSIBLE:")
for move in nextMoves:
moveEvaluation = self.minimax(move, accuracyLevel, currentBoard.turn == _P2PIECE, alpha, beta)
alpha = max(alpha, moveEvaluation)
if (moveEvaluation * currentBoard.turn < currentBoard.turn * bestMoveVal):
secondBestMove = bestNextMove
bestNextMove = move
bestMoveVal = moveEvaluation
if bestMoveVal <= (currentBoard.turn * self._TERMINAL_NODE_EVAL):
# if future move all lead to terminal loss
bestMoveVal = currentBoard.turn * math.inf
for move in nextMoves:
moveEvaluation = self.minimax(move, 2, currentBoard.turn == _P2PIECE, alpha, beta)
alpha = max(alpha, moveEvaluation)
if (moveEvaluation * currentBoard.turn < currentBoard.turn * bestMoveVal):
secondBestMove = bestNextMove
bestNextMove = move
bestMoveVal = moveEvaluation
# Then only play best relative move
# print("Move Confidence:", bestMoveVal)
return self.exploration(bestNextMove, secondBestMove)
# def deepEval(self, currentState:CheckerBoard, depth:int) -> int:
# possibleNextStates = self.boardTransition.getAllBoards(currentState)
# if (depth == 0 or len(possibleNextStates) == 0):
# return self.evaluateBoard(currentState)
# maxVal = -math.inf
# for state in possibleNextStates:
# val = -self.deepEval(state, depth - 1)
# if (val > maxVal):
# maxVal = val
# return maxVal
# def get_path(self) -> list["CheckerBoard"]:
# path = []
# current_node = self
# while current_node:
# path.append(current_node)
# current_node = current_node.parent
# return path[::-1]
def linkVisitedBoard(self, board:CheckerBoard):
newVisited = deepcopy(board)
self.visited.append(newVisited)
def playerNumber(self, player) ->str:
if (player == _P1PIECE):
return "1"
elif (player == _P2PIECE):
return "2"
return "0"
# Applies Reward to the model and returns the amount of reward applied
def applyQReward(self, wonPlayer:int) -> int:
totalReward = 0
maxTurns = len(self.visited)
# Our model should try to avoid draws and repeating moves
if (wonPlayer == 0):
# So if the game results in a draw don't take moves that end up in a draw
for i, board in enumerate(self.visited):
if (self.qTable.get(board) is not None):
print("Player ", self.playerNumber(wonPlayer), " won | Previous board val: ", self.qTable[board])
appliedReward = board.turn * self._LEARNING_RATE * pow(self._GAMMA, maxTurns - i)
self.qTable[board] += appliedReward
totalReward -= abs(appliedReward)
print("On turn ", i, " | New board val: ", self.qTable[board], "For Player: ", self.playerNumber(board.turn), "\n")
else:
# Otherwise apply q reward normally
for i, board in enumerate(self.visited):
if (self.qTable.get(board) is not None):
print("Player ", self.playerNumber(wonPlayer), " won | Previous board val: ", self.qTable[board])
appliedReward = -wonPlayer * self._LEARNING_RATE * pow(self._GAMMA, maxTurns - i)
self.qTable[board] += appliedReward
totalReward += abs(appliedReward)
print("On turn ", i, " | New board val: ", self.qTable[board], "For Player: ", self.playerNumber(board.turn), "\n")
self.visited.clear()
return totalReward
# Saves q table inside binary file
def __del__(self):
if (self.saveToDisk):
np.save(self.qTableName, self.qTable)