/* ***METHODS*** */ private Tuple <long, long> simulateBranch(MCTSNode_QAI currNode) { //checks to see if current node is in an simulation end state if (currNode.flag[qflags.ENDBOARD] || currNode.flag[qflags.ALLCHILDRENSIMULATED]) { //currNode.flag[qflags.ENDBOARD] = true; return(new Tuple <long, long>(0, 0)); } if (currNode.possibleChildren.Count != 0) { int newChildDataIndex = random.Next(0, currNode.possibleChildren.Count); MCTSNode_QAI newChild = currNode.addChild(currNode.possibleChildren[newChildDataIndex]); //possibly simplified currNode.possibleChildren.RemoveAt(newChildDataIndex); if (newChild.isNodeScored()) { if (newChild.isLosingNode()) { currNode.flag[qflags.CHILDHASLOSINGBOARD] = true; currNode.flag[qflags.ALLCHILDRENSIMULATED] = true; } else if (newChild.isWinState()) { currNode.flag[qflags.CHILDHASWINNINGBOARD] = true; currNode.flag[qflags.ALLCHILDRENSIMULATED] = true; } return(currNode.addResultToScore(newChild.score));; } else { Tuple <long, long> tempScore = simulateBranch(newChild); return(currNode.addResultToScore(tempScore));; } } else { MCTSNode_QAI nextChild = selectMostPromisingNodeForSimulation(currNode); if (nextChild == null) { //pos A.0 return(new Tuple <long, long>(0, 0)); } else { Tuple <long, long> tempScore = simulateBranch(nextChild); //currNode.addResultToScore(tempScore); return(currNode.addResultToScore(tempScore)); } } }
//returns true if AI will win private bool simulatePerfectGame(MCTSNode_QAI currNode) { MCTSNode_QAI bestMove = null; if (currNode.flag[qflags.CHILDHASLOSINGBOARD]) { return(false); } else if (currNode.flag[qflags.CHILDHASWINNINGBOARD]) { return(true); } else if (currNode.flag[qflags.ENDBOARD]) { if (currNode.flag[qflags.OWNER]) { return(currNode.isWinState()); } else { return(currNode.isTie()); } } for (int index = 0; index < currNode.children.Count; index++) { if (currNode.children[index].flag[qflags.OWNER] && (bestMove == null || bestMove.nodePickScore() < currNode.children[index].nodePickScore())) { bestMove = currNode.children[index]; } else if (!currNode.children[index].flag[qflags.OWNER] && (bestMove == null || bestMove.nodePickScore() > currNode.children[index].nodePickScore())) { bestMove = currNode.children[index]; } } return(simulatePerfectGame(bestMove)); }