コード例 #1
0
ファイル: Program.cs プロジェクト: kstrandby/2048-AI
 // Runs an entire game using the given AI type to decide on moves
 private static State RunAIGame(AI_TYPE AItype, bool print, int depth = 0, int timeLimit = 0, int iterationLimit = 0)
 {
     GameEngine game = new GameEngine();
     State end = null;
     if (AItype == AI_TYPE.CLASSIC_MINIMAX)
     {
         Minimax minimax = new Minimax(game, depth);
         end = minimax.RunClassicMinimax(print);
                     }
     else if (AItype == AI_TYPE.ALPHA_BETA)
     {
         Minimax minimax = new Minimax(game, depth);
         end = minimax.RunAlphaBeta(print);
     }
     else if (AItype == AI_TYPE.ITERATIVE_DEEPENING_ALPHA_BETA)
     {
         Minimax minimax = new Minimax(game, depth);
         end = minimax.RunIterativeDeepeningAlphaBeta(print, timeLimit);
     }
     else if (AItype == AI_TYPE.PARALLEL_ALPHA_BETA)
     {
         Minimax minimax = new Minimax(game, depth);
         end = minimax.RunParallelAlphaBeta(print);
     }
     else if (AItype == AI_TYPE.PARALLEL_ITERATIVE_DEEPENING_ALPHA_BETA)
     {
         Minimax minimax = new Minimax(game, depth);
         end = minimax.RunParallelIterativeDeepeningAlphaBeta(print, timeLimit);
     }
     else if (AItype == AI_TYPE.CLASSIC_EXPECTIMAX)
     {
         Expectimax expectimax = new Expectimax(game, depth);
         end = expectimax.RunClassicExpectimax(print, weights);
     }
     else if (AItype == AI_TYPE.EXPECTIMAX_STAR1)
     {
         Expectimax expectimax = new Expectimax(game, depth);
         end = expectimax.RunStar1Expectimax(print, weights);
     }
     else if (AItype == AI_TYPE.EXPECTIMAX_STAR1_FW_PRUNING)
     {
         Expectimax expectimax = new Expectimax(game, depth);
         end = expectimax.RunStar1WithUnlikelyPruning(print, weights);
     }
     else if (AItype == AI_TYPE.ITERATIVE_DEEPENING_EXPECTIMAX)
     {
         Expectimax expectimax = new Expectimax(game, depth);
         end = expectimax.RunIterativeDeepeningExpectimax(print, timeLimit, weights);
     }
     else if (AItype == AI_TYPE.PARALLEL_EXPECTIMAX)
     {
         Expectimax expectimax = new Expectimax(game, depth);
         end = expectimax.RunParallelClassicExpectimax(print, weights);
     }
     else if (AItype == AI_TYPE.PARALLEL_ITERATIVE_DEEPENING_EXPECTIMAX)
     {
         Expectimax expectimax = new Expectimax(game, depth);
         end = expectimax.RunParallelIterativeDeepeningExpectimax(print, timeLimit, weights);
     }
     else if (AItype == AI_TYPE.TT_ITERATIVE_DEEPENING_EXPECTIMAX)
     {
         Expectimax exptectimax = new Expectimax(game, depth);
         end = exptectimax.RunTTExpectimax(print, timeLimit, weights);
     }
     else if (AItype == AI_TYPE.TT_ITERATIVE_DEEPENING_STAR1)
     {
         Expectimax expectimax = new Expectimax(game, depth);
         end = expectimax.RunTTStar1(print, timeLimit, weights);
     }
     else if (AItype == AI_TYPE.EXPECTIMAX_WITH_ALL_IMPROVEMENTS)
     {
         Expectimax expectimax = new Expectimax(game, depth);
         end = expectimax.RunTTIterativeDeepeningExpectimaxWithStar1andForwardPruning(print, timeLimit, weights);
     }
     else if (AItype == AI_TYPE.EXPECTIMAX_WITH_ALL_IMPROVEMENTS_NO_FORWARDPRUNING)
     {
         Expectimax expectimax = new Expectimax(game, depth);
         end = expectimax.RunTTIterativeDeepeningExpectimaxWithStar1(print, timeLimit, weights);
     }
     else if (AItype == AI_TYPE.ITERATION_LIMITED_MCTS)
     {
         MonteCarlo MCTS = new MonteCarlo(game);
         end = MCTS.RunIterationLimitedMCTS(print, iterationLimit);
     }
     else if (AItype == AI_TYPE.TIME_LIMITED_MCTS)
     {
         MonteCarlo MCTS = new MonteCarlo(game);
         end = MCTS.RunTimeLimitedMCTS(print, timeLimit);
     }
     else if (AItype == AI_TYPE.ROOT_PARALLEL_ITERATION_LIMITED_MCTS)
     {
         MonteCarlo MCTS = new MonteCarlo(game);
         end = MCTS.RunRootParallelizationIterationLimitedMCTS(print, iterationLimit, NUM_THREADS);
     }
     else if (AItype == AI_TYPE.ROOT_PARALLEL_TIME_LIMITED_MCTS)
     {
         MonteCarlo MCTS = new MonteCarlo(game);
         end = MCTS.RunRootParallelizationTimeLimitedMCTS(print, timeLimit, NUM_THREADS);
     }
     else if (AItype == AI_TYPE.EXPECTIMAX_MCTS_TIME_LIMITED)
     {
         HeuristicLearning HL = new HeuristicLearning(game);
         end = HL.RunExpectimaxMCTStimeLimited(print, depth, timeLimit);
     }
     else if (AItype == AI_TYPE.EXPECTIMAX_MCTS_WITH_SIMULATIONS_TIME_LIMITED)
     {
         HeuristicLearning HL = new HeuristicLearning(game);
         end = HL.RunExpectimaxMCTSwithSimulations(print, depth, timeLimit);
     }
     else if (AItype == AI_TYPE.FINAL_COMBI)
     {
         HeuristicLearning HL = new HeuristicLearning(game);
         end = HL.RunParallelizationMCTSExpectimaxCombi(print, depth, timeLimit);
     }
     else
     {
         throw new Exception();
     }
     if (print)
     {
         Console.WriteLine("GAME OVER!\nFinal score: " + game.scoreController.getScore());
     }
     return end;
 }