示例#1
0
        static void Main(string[] args)
        {
            Player p1   = new Player("Red", SpaceType.Player1, PlayerColor.Red);
            Player p2   = new Player("Blue", SpaceType.Player2, PlayerColor.Blue);
            Game   plop = new Game(p1, p2);

            setUpBoard(p1, p2, plop);//calls Quinn's majestic randomized setup method that I copied over from his Eval class
            plop.start();

            while (true)
            {
                int  depth  = 3;//change as needed
                Move p1Move = alphaBetaSearch(plop, p1, p2, depth);
                plop.movePiece(p1Move.start, p1Move.end);
                //update the machine learning record data on this line
                if (plop.checkWin(p1))
                {
                    //update and complete this instance of the record data with the fact that p1 won this game on this line
                    break;
                }
                Move p2Move = alphaBetaSearch(plop, p2, p1, depth);
                plop.movePiece(p2Move.start, p2Move.end);
                //update the machine learning record data on this line
                if (plop.checkWin(p2))
                {
                    //update and complete this instance of the record data with the fact that p2 won this game on this line
                    break;
                }
            }
        }
示例#2
0
        private static double maxValue(Game state, Player max, Player min, double alpha, double beta, int depthFinal, int depthCurrent)
        {
            Game plop = new Game(max, min);

            if (plop.checkWin(max) == true)
            {
                return(double.MaxValue);
            }
            if (plop.checkWin(min) == true)
            {
                return(double.MinValue);
            }
            //saving this section for the heuristic value to be calculated and returned once we hit max search depth
            if (depthFinal == depthCurrent)
            {
                return(1); //heuristicValue(state,max);
            }
            double      v       = double.MinValue;
            List <Move> actions = actionsForMax(state, max, min, (double)alpha, beta, depthFinal, depthCurrent);

            for (int i = 0; i < actions.Count; i++)
            {
                if (v < actions[i].value)
                {
                    v = actions[i].value;
                }
                if (v >= beta)
                {
                    return(v);
                }
                if (v > alpha)
                {
                    alpha = v;
                }
            }
            return(v);
        }
示例#3
0
        private static double minValue(Game state, Player max, Player min, double alpha, double beta, int depthFinal, int depthCurrent)
        {
            Game plop = new Game(min, max);

            if (plop.checkWin(max) == true)
            {
                return(double.MaxValue);
            }
            if (plop.checkWin(min) == true)
            {
                return(double.MinValue);
            }
            if (depthFinal == depthCurrent)
            {
                return(1); //evaluation function goes here
            }
            double      v       = double.MaxValue;
            List <Move> actions = actionsForMin(state, max, min, alpha, beta, depthFinal, depthCurrent);

            for (int i = 0; i < actions.Count; i++)
            {
                if (actions[i].value < v)
                {
                    v = actions[i].value;
                }
                if (v <= alpha)
                {
                    return(v);
                }
                if (v < beta)
                {
                    beta = v;
                }
            }
            return(v);
        }