Exemple #1
0
        public Minimax(GameEngine gameEngine, int depth)
        {
            this.gameEngine = gameEngine;
            this.scoreController = gameEngine.scoreController;
            available = new List<Cell>();
            this.chosenDepth = depth;

            // setup log file if debug is on
            if (debug)
            {
                logger = new Logger(LOGFILE, depth);
            }
        }
Exemple #2
0
 public NaiveAI(GameEngine gameEngine)
 {
     this.gameEngine = gameEngine;
 }
Exemple #3
0
        public void TestMCTStree()
        {
            WeightVectorAll weights = new WeightVectorAll { Corner = 0, Empty_cells = 0, Highest_tile = 0, Monotonicity = 0, Points =0, Smoothness = 0, Snake = 1, Trapped_penalty = 0 };
             int timeLimit = 100;

            int[][] state1 = new int[][] {
                new int[]{1024,16,0,0},
                new int[]{4,32,2,0},
                new int[]{64,16,0,0},
                new int[]{16,16,2,2}
            };
            int[][] state2 = new int[][] {
                new int[]{2,0,2,0},
                new int[]{8,2,0,0},
                new int[]{16,8,4,0},
                new int[]{64,4,4,0}
            };
            int[][] state3 = new int[][] {
                new int[]{16,16,16,4},
                new int[]{64,4,0,0},
                new int[]{8,0,2,0},
                new int[]{16,0,0,0}
            };
            int[][] state4 = new int[][] {
                new int[]{0,0,0,8},
                new int[]{0,0,16,16},
                new int[]{2,0,32,32},
                new int[]{2,4,16,8}
            };
            Console.WriteLine("Testing state1:");
            GameEngine gameEngine = new GameEngine();
            Minimax minimax = new Minimax(gameEngine, 0);
            Expectimax expectimax = new Expectimax(gameEngine, 0);
            MonteCarlo mcts = new MonteCarlo(gameEngine);

            Move minimaxMove = minimax.IterativeDeepening(new State(state1, CalculateScore(state1), GameEngine.PLAYER), timeLimit);
            Move expectimaxMove = expectimax.IterativeDeepening(new State(state1, CalculateScore(state1), GameEngine.PLAYER), timeLimit, weights);
            Move mctsMove = (mcts.TimeLimitedMCTS(new State(state1, CalculateScore(state1), GameEngine.PLAYER), timeLimit)).GeneratingMove;

            Console.WriteLine("Minimax move chosen: " + ((PlayerMove)minimaxMove).Direction);
            Console.WriteLine("Expectimax move chosen: " + ((PlayerMove)expectimaxMove).Direction);
            Console.WriteLine("MCTS move chosen: " + ((PlayerMove)mctsMove).Direction);

            Console.WriteLine("Testing state2:");
            minimaxMove = minimax.IterativeDeepening(new State(state2, CalculateScore(state2), GameEngine.PLAYER), timeLimit);
            expectimaxMove = expectimax.IterativeDeepening(new State(state2, CalculateScore(state2), GameEngine.PLAYER), timeLimit, weights);
            mctsMove = (mcts.TimeLimitedMCTS(new State(state2, CalculateScore(state2), GameEngine.PLAYER), timeLimit)).GeneratingMove;

            Console.WriteLine("Minimax move chosen: " + ((PlayerMove)minimaxMove).Direction);
            Console.WriteLine("Expectimax move chosen: " + ((PlayerMove)expectimaxMove).Direction);
            Console.WriteLine("MCTS move chosen: " + ((PlayerMove)mctsMove).Direction);

            Console.WriteLine("Testing state3:");
            minimaxMove = minimax.IterativeDeepening(new State(state3, CalculateScore(state3), GameEngine.PLAYER), timeLimit);
            expectimaxMove = expectimax.IterativeDeepening(new State(state3, CalculateScore(state3), GameEngine.PLAYER), timeLimit, weights);
            mctsMove = (mcts.TimeLimitedMCTS(new State(state3, CalculateScore(state3), GameEngine.PLAYER), timeLimit)).GeneratingMove;

            Console.WriteLine("Minimax move chosen: " + ((PlayerMove)minimaxMove).Direction);
            Console.WriteLine("Expectimax move chosen: " + ((PlayerMove)expectimaxMove).Direction);
            Console.WriteLine("MCTS move chosen: " + ((PlayerMove)mctsMove).Direction);

            Console.WriteLine("Testing state4:");
            minimaxMove = minimax.IterativeDeepening(new State(state4, CalculateScore(state4), GameEngine.PLAYER), timeLimit);
            expectimaxMove = expectimax.IterativeDeepening(new State(state4, CalculateScore(state4), GameEngine.PLAYER), timeLimit, weights);
            mctsMove = (mcts.TimeLimitedMCTS(new State(state4, CalculateScore(state4), GameEngine.PLAYER), timeLimit)).GeneratingMove;

            Console.WriteLine("Minimax move chosen: " + ((PlayerMove)minimaxMove).Direction);
            Console.WriteLine("Expectimax move chosen: " + ((PlayerMove)expectimaxMove).Direction);
            Console.WriteLine("MCTS move chosen: " + ((PlayerMove)mctsMove).Direction);
        }
Exemple #4
0
 public MonteCarlo(GameEngine gameEngine)
 {
     this.gameEngine = gameEngine;
     this.random = new Random();
 }
Exemple #5
0
        // Runs a game with the user playing
        private static void StartGame()
        {
            GameEngine game = new GameEngine();
            bool gameOver = false;
            CleanConsole();

            // main game loop
            while (!gameOver)
            {
                Console.SetCursorPosition(0, 0);
                Console.WriteLine("Score: " + game.scoreController.getScore() + "              ");
                Console.WriteLine(BoardHelper.ToString(game.board));
                DIRECTION direction = GetUserInput();
                Move move = new PlayerMove(direction);
                game.SendUserAction((PlayerMove)move);
                if (new State(game.board, game.scoreController.getScore(), GameEngine.PLAYER).IsGameOver())
                {
                    gameOver = true;
                }
            }
            Console.WriteLine("Game over! Final score: " + game.scoreController.getScore());
            Thread.Sleep(500);
        }
Exemple #6
0
        // Runs random games
        private static void RunRandomGame()
        {
            int choice = GetChoice("1: Graphic run\n2: Test runs");

            if (choice == 1) // graphic game
            {
                GameEngine game = new GameEngine();
                Naive naive = new Naive(game);
                naive.RunRandomPlay(true);
            }
            else // test runs
            {
                int runs = GetChoice("Choose number of runs: ");
                StreamWriter writer = new StreamWriter(RANDOM_LOG_FILE, true);
                Dictionary<int, int> highTileCount = new Dictionary<int, int>() { { 64, 0 }, { 128, 0 }, { 256, 0 }, { 512, 0 }, { 1024, 0 }, { 2048, 0 }, { 4096, 0 }, { 8192, 0 }, { 16384, 0 }, { 32768, 0 } };
                int totalScore = 0;

                for (int i = 0; i < runs; i++)
                {
                    GameEngine game = new GameEngine();
                    Naive naive = new Naive(game);
                    State endState = naive.RunRandomPlay(false);

                    // note highest tile and points
                    int highestTile = BoardHelper.HighestTile(endState.Board);
                    int points = endState.Points;
                    totalScore += points;

                    // write stats
                    String stats = i + ":\t" + highestTile + "\t" + points + "\t";
                    Console.WriteLine(stats);
                    writer.WriteLine(stats);

                    // keep track of high cards
                    List<int> keys = new List<int>(highTileCount.Keys);
                    for (int j = 0; j < keys.Count; j++)
                    {
                        if (highestTile >= keys[j]) highTileCount[keys[j]]++;
                    }
                    Thread.Sleep(1000);
                }
                writer.Close();
                Console.WriteLine(GetStatistics(highTileCount, runs, totalScore));
            }
            Console.ReadLine();
        }
Exemple #7
0
 // Runs an entire game using the given AI type to decide on moves
 private static State RunAIGame(AI_TYPE AItype, bool print, int depth = 0, int timeLimit = 0, int iterationLimit = 0)
 {
     GameEngine game = new GameEngine();
     State end = null;
     if (AItype == AI_TYPE.CLASSIC_MINIMAX)
     {
         Minimax minimax = new Minimax(game, depth);
         end = minimax.RunClassicMinimax(print);
                     }
     else if (AItype == AI_TYPE.ALPHA_BETA)
     {
         Minimax minimax = new Minimax(game, depth);
         end = minimax.RunAlphaBeta(print);
     }
     else if (AItype == AI_TYPE.ITERATIVE_DEEPENING_ALPHA_BETA)
     {
         Minimax minimax = new Minimax(game, depth);
         end = minimax.RunIterativeDeepeningAlphaBeta(print, timeLimit);
     }
     else if (AItype == AI_TYPE.PARALLEL_ALPHA_BETA)
     {
         Minimax minimax = new Minimax(game, depth);
         end = minimax.RunParallelAlphaBeta(print);
     }
     else if (AItype == AI_TYPE.PARALLEL_ITERATIVE_DEEPENING_ALPHA_BETA)
     {
         Minimax minimax = new Minimax(game, depth);
         end = minimax.RunParallelIterativeDeepeningAlphaBeta(print, timeLimit);
     }
     else if (AItype == AI_TYPE.CLASSIC_EXPECTIMAX)
     {
         Expectimax expectimax = new Expectimax(game, depth);
         end = expectimax.RunClassicExpectimax(print, weights);
     }
     else if (AItype == AI_TYPE.EXPECTIMAX_STAR1)
     {
         Expectimax expectimax = new Expectimax(game, depth);
         end = expectimax.RunStar1Expectimax(print, weights);
     }
     else if (AItype == AI_TYPE.EXPECTIMAX_STAR1_FW_PRUNING)
     {
         Expectimax expectimax = new Expectimax(game, depth);
         end = expectimax.RunStar1WithUnlikelyPruning(print, weights);
     }
     else if (AItype == AI_TYPE.ITERATIVE_DEEPENING_EXPECTIMAX)
     {
         Expectimax expectimax = new Expectimax(game, depth);
         end = expectimax.RunIterativeDeepeningExpectimax(print, timeLimit, weights);
     }
     else if (AItype == AI_TYPE.PARALLEL_EXPECTIMAX)
     {
         Expectimax expectimax = new Expectimax(game, depth);
         end = expectimax.RunParallelClassicExpectimax(print, weights);
     }
     else if (AItype == AI_TYPE.PARALLEL_ITERATIVE_DEEPENING_EXPECTIMAX)
     {
         Expectimax expectimax = new Expectimax(game, depth);
         end = expectimax.RunParallelIterativeDeepeningExpectimax(print, timeLimit, weights);
     }
     else if (AItype == AI_TYPE.TT_ITERATIVE_DEEPENING_EXPECTIMAX)
     {
         Expectimax exptectimax = new Expectimax(game, depth);
         end = exptectimax.RunTTExpectimax(print, timeLimit, weights);
     }
     else if (AItype == AI_TYPE.TT_ITERATIVE_DEEPENING_STAR1)
     {
         Expectimax expectimax = new Expectimax(game, depth);
         end = expectimax.RunTTStar1(print, timeLimit, weights);
     }
     else if (AItype == AI_TYPE.EXPECTIMAX_WITH_ALL_IMPROVEMENTS)
     {
         Expectimax expectimax = new Expectimax(game, depth);
         end = expectimax.RunTTIterativeDeepeningExpectimaxWithStar1andForwardPruning(print, timeLimit, weights);
     }
     else if (AItype == AI_TYPE.EXPECTIMAX_WITH_ALL_IMPROVEMENTS_NO_FORWARDPRUNING)
     {
         Expectimax expectimax = new Expectimax(game, depth);
         end = expectimax.RunTTIterativeDeepeningExpectimaxWithStar1(print, timeLimit, weights);
     }
     else if (AItype == AI_TYPE.ITERATION_LIMITED_MCTS)
     {
         MonteCarlo MCTS = new MonteCarlo(game);
         end = MCTS.RunIterationLimitedMCTS(print, iterationLimit);
     }
     else if (AItype == AI_TYPE.TIME_LIMITED_MCTS)
     {
         MonteCarlo MCTS = new MonteCarlo(game);
         end = MCTS.RunTimeLimitedMCTS(print, timeLimit);
     }
     else if (AItype == AI_TYPE.ROOT_PARALLEL_ITERATION_LIMITED_MCTS)
     {
         MonteCarlo MCTS = new MonteCarlo(game);
         end = MCTS.RunRootParallelizationIterationLimitedMCTS(print, iterationLimit, NUM_THREADS);
     }
     else if (AItype == AI_TYPE.ROOT_PARALLEL_TIME_LIMITED_MCTS)
     {
         MonteCarlo MCTS = new MonteCarlo(game);
         end = MCTS.RunRootParallelizationTimeLimitedMCTS(print, timeLimit, NUM_THREADS);
     }
     else if (AItype == AI_TYPE.EXPECTIMAX_MCTS_TIME_LIMITED)
     {
         HeuristicLearning HL = new HeuristicLearning(game);
         end = HL.RunExpectimaxMCTStimeLimited(print, depth, timeLimit);
     }
     else if (AItype == AI_TYPE.EXPECTIMAX_MCTS_WITH_SIMULATIONS_TIME_LIMITED)
     {
         HeuristicLearning HL = new HeuristicLearning(game);
         end = HL.RunExpectimaxMCTSwithSimulations(print, depth, timeLimit);
     }
     else if (AItype == AI_TYPE.FINAL_COMBI)
     {
         HeuristicLearning HL = new HeuristicLearning(game);
         end = HL.RunParallelizationMCTSExpectimaxCombi(print, depth, timeLimit);
     }
     else
     {
         throw new Exception();
     }
     if (print)
     {
         Console.WriteLine("GAME OVER!\nFinal score: " + game.scoreController.getScore());
     }
     return end;
 }
Exemple #8
0
 public Expectimax(GameEngine game, int depth)
 {
     this.gameEngine = game;
     this.scoreController = gameEngine.scoreController;
     this.chosenDepth = depth;
 }