示例#1
0
        public static List <DataPoint> TopK(WeightVector w, List <DataPoint> points, int rank)
        {
            List <DataPoint> buffer = points;

            buffer.Sort((x, y) => f(x, w).CompareTo(f(y, w)));
            return(buffer.GetRange(0, rank));
        }
示例#2
0
        public decimal CalculateNormalizedScore(SawNormalizationMethod method, WeightVector weightVector, decimal[] rowValues, decimal[] minVector, decimal[] maxVector)
        {
            var score = 0.0m;

            for (int i = 0; i < NumberOfColumns; i++)
            {
                if (method == SawNormalizationMethod.LinearFirstType)
                {
                    var isProfit = DecisionCriterias.First(x => x.Position == i).IsProfit;
                    var weight   = weightVector.GetValue(i);
                    if (isProfit)
                    {
                        var denom = (maxVector[i] - minVector[i]);
                        score += denom != 0 ? (((rowValues[i] - minVector[i]) / denom) * weight) : 0;
                    }
                    else
                    {
                        var denom = (maxVector[i] - minVector[i]);
                        score += denom != 0 ? ((1 - ((rowValues[i] - minVector[i])) / denom) * weight) : 0;
                    }
                }
                else
                {
                    return(0.0m);
                }
            }
            return(score);
        }
示例#3
0
        // Classic Expectimax search
        public Move ExpectimaxAlgorithm(State state, int depth, WeightVector weights)
        {
            Move bestMove;

            if (depth == 0 || state.IsGameOver())
            {
                if (state.Player == GameEngine.PLAYER)
                {
                    bestMove = new PlayerMove(); // dummy action, as there will be no valid move
                    bestMove.Score = AI.EvaluateWithWeights(state, weights);
                    return bestMove;
                }
                else if (state.Player == GameEngine.COMPUTER)
                {
                    bestMove = new ComputerMove(); // dummy action, as there will be no valid move
                    bestMove.Score = AI.EvaluateWithWeights(state, weights);
                    return bestMove;
                }
                else throw new Exception();
            }
            if (state.Player == GameEngine.PLAYER) // AI's turn
            {
                bestMove = new PlayerMove();
                double highestScore = Double.MinValue, currentScore = Double.MinValue;

                List<Move> moves = state.GetMoves();
                foreach (Move move in moves)
                {
                    State resultingState = state.ApplyMove(move);
                    currentScore = ExpectimaxAlgorithm(resultingState, depth - 1, weights).Score;
                    if (currentScore > highestScore)
                    {
                        highestScore = currentScore;
                        bestMove = move;
                    }
                }
                bestMove.Score = highestScore;
                return bestMove;
            }
            else if (state.Player == GameEngine.COMPUTER) // computer's turn  (the random event node)
            {
                bestMove = new ComputerMove();

                // return the weighted average of all the child nodes's scores
                double average = 0;
                List<Cell> availableCells = state.GetAvailableCells();
                List<Move> moves = state.GetAllComputerMoves(availableCells);
                foreach (Move move in moves)
                {
                    State resultingState = state.ApplyMove(move);

                    average += StateProbability(((ComputerMove)move).Tile) * ExpectimaxAlgorithm(resultingState, depth - 1, weights).Score;
                }
                bestMove.Score = average / moves.Count;
                return bestMove;
            }
            else throw new Exception();
        }
示例#4
0
        public MBRModel <WeightVector> getRoot()
        {
            //getTree();
            //get root after add to tree
            RTree.Rectangle bounds     = tree.getBounds();
            WeightVector    upperRight = new WeightVector(bounds.get(0).GetValueOrDefault().max, bounds.get(1).GetValueOrDefault().max);
            WeightVector    lowerLeft  = new WeightVector(bounds.get(0).GetValueOrDefault().min, bounds.get(1).GetValueOrDefault().min);

            //retrun root
            return(new MBRModel <WeightVector>(lowerLeft, upperRight));
        }
示例#5
0
        /// <summary>
        /// Maps this datum's categories to weights.
        /// </summary>
        /// <returns>IWeights.</returns>
        public IWeights MapToWeights()
        {
            var weights = new[]
            {
                _color.R / 255.0,
                _color.G / 255.0,
                _color.B / 255.0
            };
            var vector = new WeightVector(weights);

            return(vector);
        }
示例#6
0
        public double max(List <DataPoint> list, WeightVector w)
        {
            double Max = Double.MinValue;

            if (w == null)
            {
                return(Max);
            }
            foreach (var item in list)
            {
                double temp = f(item, w);
                if (temp > Max)
                {
                    Max = temp;
                }
            }
            return(Max);
        }
示例#7
0
        // Recursive part of iterative deepening Expectimax
        private Tuple<Move, Boolean> IterativeDeepeningExpectimax(State state, int depth, double timeLimit, Stopwatch timer, WeightVector weights)
        {
            Move bestMove;

            if (depth == 0 || state.IsGameOver())
            {
                if (state.Player == GameEngine.PLAYER)
                {
                    bestMove = new PlayerMove(); // dummy action, as there will be no valid move
                    bestMove.Score = AI.EvaluateWithWeights(state, weights);
                    return new Tuple<Move, Boolean>(bestMove, true);
                }
                else if (state.Player == GameEngine.COMPUTER)
                {
                    bestMove = new ComputerMove(); // dummy action, as there will be no valid move
                    bestMove.Score = AI.EvaluateWithWeights(state, weights);
                    return new Tuple<Move, Boolean>(bestMove, true);
                }
                else throw new Exception();
            }
            if (state.Player == GameEngine.PLAYER) // AI's turn
            {
                bestMove = new PlayerMove();
                double highestScore = Double.MinValue, currentScore = Double.MinValue;

                List<Move> moves = state.GetMoves();
                foreach (Move move in moves)
                {
                    State resultingState = state.ApplyMove(move);
                    currentScore = IterativeDeepeningExpectimax(resultingState, depth - 1, timeLimit, timer, weights).Item1.Score;

                    if (currentScore > highestScore)
                    {
                        highestScore = currentScore;
                        bestMove = move;
                    }
                    if (timer.ElapsedMilliseconds > timeLimit)
                    {
                        bestMove.Score = highestScore;
                        return new Tuple<Move, Boolean>(bestMove, false); // recursion not completed, return false
                    }
                }
                bestMove.Score = highestScore;
                return new Tuple<Move, Boolean>(bestMove, true);
            }
            else if (state.Player == GameEngine.COMPUTER) // computer's turn  (the random event node)
            {
                bestMove = new ComputerMove();

                // return the weighted average of all the child nodes's scores
                double average = 0;
                List<Cell> availableCells = state.GetAvailableCells();
                List<Move> moves = state.GetAllComputerMoves(availableCells);
                int moveCheckedSoFar = 0;
                foreach (Move move in moves)
                {
                    State resultingState = state.ApplyMove(move);

                    average += StateProbability(((ComputerMove)move).Tile) * IterativeDeepeningExpectimax(resultingState, depth - 1, timeLimit, timer, weights).Item1.Score;
                    moveCheckedSoFar++;
                    if (timer.ElapsedMilliseconds > timeLimit)
                    {
                        bestMove.Score = average / moveCheckedSoFar;
                        return new Tuple<Move, Boolean>(bestMove, false); // recursion not completed, return false
                    }
                }
                bestMove.Score = average / moves.Count;
                return new Tuple<Move, Boolean>(bestMove, true);
            }
            else throw new Exception();
        }
示例#8
0
        // Runs a parallel expectimax search to speed up search
        // A search is started in a separate thread for each child of the given root node
        // This method should only be called for the the root, where depth will always
        // be > 0 and player will always be GameEngine.PLAYER - the recursion is started
        // for the children of the root using standard Expectimax algorithm
        private Move ParallelExpectimax(State state, int depth, WeightVector weights)
        {
            Move bestMove = new PlayerMove();

            List<Move> moves = state.GetMoves();
            ConcurrentBag<Tuple<double, Move>> scores = new ConcurrentBag<Tuple<double, Move>>();

            if (moves.Count == 0)
            {
                // game over
                return bestMove;
            }

            // create the resulting states before starting the threads
            List<State> resultingStates = new List<State>();
            foreach (Move move in moves)
            {
                State resultingState = state.ApplyMove(move);
                resultingStates.Add(resultingState);
            }

            // start a thread for each child
            Parallel.ForEach(resultingStates, resultingState =>
            {
                double score = ExpectimaxAlgorithm(resultingState, depth - 1, weights).Score;
                scores.Add(new Tuple<double, Move>(score, resultingState.GeneratingMove));
            });
            // find the best score
            double highestScore = Double.MinValue;
            foreach (Tuple<double, Move> score in scores)
            {
                PlayerMove move = (PlayerMove)score.Item2;
                if (score.Item1 > highestScore)
                {
                    highestScore = score.Item1;
                    bestMove = score.Item2;
                }
            }
            return bestMove;
        }
示例#9
0
        // ----------------------------------------------------------------------------------
        // Iterative Deepening Expectimax-Star1 with Transposition Table and Move Ordering
        // ----------------------------------------------------------------------------------
        // Runs an entire game using iterative deepening expectimax with star1 pruning,
        // transposition table and move ordering to decide on moves
        public State RunTTStar1(bool print, int timeLimit, WeightVector weights)
        {
            transposition_table = new ConcurrentDictionary<long, TableRow>();
            InitializeZobristTable();

            while (true)
            {
                // update state
                currentState = new State(BoardHelper.CloneBoard(gameEngine.board), scoreController.getScore(), GameEngine.PLAYER);

                if (print)
                {
                    Program.PrintState(currentState);
                }

                // run algorithm and send action choice to game engine
                Move move = TTStar1(currentState, timeLimit, weights);
                if (((PlayerMove)move).Direction == (DIRECTION)(-1))
                {
                    // game over
                    return currentState;
                }
                gameEngine.SendUserAction((PlayerMove)move);
            }
        }
示例#10
0
        // Runs the iterative deepening part of ^^
        public Move TTStar1(State state, double timeLimit, WeightVector weights)
        {
            long zob_hash = GetHash(state);

            int depth = 1;
            Stopwatch timer = new Stopwatch();
            Move bestMove = null;
            // start the search
            timer.Start();
            while (true)
            {
                if (timer.ElapsedMilliseconds > timeLimit)
                {
                    if (bestMove == null) // workaround to overcome problem with timer running out too fast with low limits
                    {
                        timeLimit += 10;
                        timer.Restart();
                    }
                    else break;
                }
                Tuple<Move, Boolean> result = RecursiveTTStar1(state, Double.MinValue, Double.MaxValue, depth, timeLimit, timer, weights);
                if (result.Item2)
                {
                    bestMove = result.Item1; // only update bestMove if full recursion
                }
                depth++;
            }

            timer.Stop();

            TableRow row = new TableRow((short)depth, ((PlayerMove)bestMove).Direction, bestMove.Score);
            transposition_table.AddOrUpdate(zob_hash, row, (key, oldValue) => row);

            return bestMove;
        }
示例#11
0
        // Recursive part of iterative deepening Expectimax with transposition table
        private Tuple<Move, Boolean> RecursiveTTExpectimax(State state, int depth, double timeLimit, Stopwatch timer, WeightVector weights)
        {
            Move bestMove;

            if (depth == 0 || state.IsGameOver())
            {

                if (state.Player == GameEngine.PLAYER)
                {
                    bestMove = new PlayerMove(); // dummy action, as there will be no valid move
                    bestMove.Score = AI.EvaluateWithWeights(state, weights);
                    return new Tuple<Move, Boolean>(bestMove, true);
                }
                else if (state.Player == GameEngine.COMPUTER)
                {
                    bestMove = new ComputerMove(); // dummy action, as there will be no valid move
                    bestMove.Score = AI.EvaluateWithWeights(state, weights);
                    return new Tuple<Move, Boolean>(bestMove, true);
                }
                else throw new Exception();
            }
            if (state.Player == GameEngine.PLAYER) // AI's turn
            {
                // transposition table look-up
                long zob_hash = GetHash(state);
                if (transposition_table.ContainsKey(zob_hash) && transposition_table[zob_hash].depth > depth)
                {
                    Move move = new PlayerMove(transposition_table[zob_hash].direction);
                    move.Score = transposition_table[zob_hash].value;
                    return new Tuple<Move, Boolean>(move, true);
                }

                bestMove = new PlayerMove();
                double highestScore = Double.MinValue, currentScore = Double.MinValue;

                List<Move> moves = state.GetMoves();
                foreach (Move move in moves)
                {
                    State resultingState = state.ApplyMove(move);
                    currentScore = RecursiveTTExpectimax(resultingState, depth - 1, timeLimit, timer, weights).Item1.Score;

                    if (currentScore > highestScore)
                    {
                        highestScore = currentScore;
                        bestMove = move;
                    }
                    if (timer.ElapsedMilliseconds > timeLimit)
                    {
                        bestMove.Score = highestScore;
                        return new Tuple<Move, Boolean>(bestMove, false); // recursion not completed, return false
                    }
                }
                bestMove.Score = highestScore;

                // add result to transposition table
                TableRow row = new TableRow((short)depth, ((PlayerMove)bestMove).Direction, bestMove.Score);
                transposition_table.AddOrUpdate(zob_hash, row, (key, oldValue) => row);
                return new Tuple<Move, Boolean>(bestMove, true);
            }
            else if (state.Player == GameEngine.COMPUTER) // computer's turn  (the random event node)
            {
                bestMove = new ComputerMove();

                // return the weighted average of all the child nodes's scores
                double average = 0;
                List<Cell> availableCells = state.GetAvailableCells();
                List<Move> moves = state.GetAllComputerMoves(availableCells);
                int moveCheckedSoFar = 0;
                foreach (Move move in moves)
                {
                    State resultingState = state.ApplyMove(move);

                    average += StateProbability(((ComputerMove)move).Tile) * RecursiveTTExpectimax(resultingState, depth - 1, timeLimit, timer, weights).Item1.Score;
                    moveCheckedSoFar++;
                    if (timer.ElapsedMilliseconds > timeLimit)
                    {
                        bestMove.Score = average / moveCheckedSoFar;
                        return new Tuple<Move, Boolean>(bestMove, false); // recursion not completed, return false
                    }
                }
                bestMove.Score = average / moves.Count;
                return new Tuple<Move, Boolean>(bestMove, true);
            }
            else throw new Exception();
        }
示例#12
0
 public static double f(DataPoint q, WeightVector w)
 {
     return(q.star.Value * w.star.Value + q.rating.Value + w.rating.Value);
 }
示例#13
0
文件: AI.cs 项目: kstrandby/2048-AI
        // Evaluation function
        public static double EvaluateWithWeights(State state, WeightVector weights)
        {
            if (state.IsGameOver()) return GetLowerBound(weights) - 10;
            else
            {
                double corner = 0, emptycells = 0, highestvalue = 0, monotonicity = 0, points = 0, smoothness = 0, snake = 0, trappedpenalty = 0;
                // Only do the heuristic calculation if the weight is not 0 (avoid unnescessary work)
                if(((WeightVectorAll)weights).Corner != 0) corner = Corner(state);
                if(((WeightVectorAll)weights).Empty_cells != 0) emptycells = EmptyCells(state);
                if(((WeightVectorAll)weights).Highest_tile != 0) highestvalue = HighestValue(state);
                if(((WeightVectorAll)weights).Monotonicity != 0) monotonicity = Monotonicity(state);
                if(((WeightVectorAll)weights).Points != 0) points = Points(state);
                if(((WeightVectorAll)weights).Smoothness != 0) smoothness = Smoothness(state);
                if(((WeightVectorAll)weights).Snake != 0) snake = WeightSnake(state);
                if(((WeightVectorAll)weights).Trapped_penalty != 0) trappedpenalty = TrappedPenalty(state);

                // evaluation function is a linear combination of heuristic values and their weights
                double eval = ((WeightVectorAll)weights).Corner * corner + ((WeightVectorAll)weights).Empty_cells * emptycells + ((WeightVectorAll)weights).Highest_tile * highestvalue
                    + ((WeightVectorAll)weights).Monotonicity * monotonicity + ((WeightVectorAll)weights).Points * points + ((WeightVectorAll)weights).Smoothness * smoothness
                    + ((WeightVectorAll)weights).Snake * snake - ((WeightVectorAll)weights).Trapped_penalty * trappedpenalty;

                if (state.IsWin())
                {
                    return eval + 10;
                }
                else return eval;
            }
        }
示例#14
0
        // Recursive part of iterative deepening Expectimax with star 1 pruning
        private Tuple<Move, Boolean> RecursiveIterativeDeepeningExpectimaxWithStar1(State state, double alpha, double beta, int depth,
            int timeLimit, Stopwatch timer, WeightVector weights)
        {
            Move bestMove;

            if (depth == 0 || state.IsGameOver())
            {
                if (state.Player == GameEngine.PLAYER)
                {
                    bestMove = new PlayerMove(); // dummy action, as there will be no valid move
                    bestMove.Score = AI.EvaluateWithWeights(state, weights);
                    return new Tuple<Move, Boolean>(bestMove, true);
                }
                else if (state.Player == GameEngine.COMPUTER)
                {
                    bestMove = new ComputerMove(); // dummy action, as there will be no valid move
                    bestMove.Score = AI.EvaluateWithWeights(state, weights);
                    return new Tuple<Move, Boolean>(bestMove, true); ;
                }
                else throw new Exception();
            }
            if (state.Player == GameEngine.PLAYER)
            {
                bestMove = new PlayerMove();
                double highestScore = Double.MinValue, currentScore = Double.MinValue;

                List<Move> moves = state.GetMoves();
                foreach (Move move in moves)
                {
                    State resultingState = state.ApplyMove(move);
                    currentScore = RecursiveIterativeDeepeningExpectimaxWithStar1(resultingState, alpha, beta, depth - 1, timeLimit, timer, weights).Item1.Score;
                    if (currentScore > highestScore)
                    {
                        highestScore = currentScore;
                        bestMove = move;
                    }
                    alpha = Math.Max(alpha, highestScore);
                    if (beta <= alpha)
                    { // beta cut-off
                        break;
                    }
                    if (timer.ElapsedMilliseconds > timeLimit)
                    {
                        bestMove.Score = highestScore;
                        return new Tuple<Move, Boolean>(bestMove, false); // recursion not completed, return false
                    }
                }

                bestMove.Score = highestScore;
                return new Tuple<Move, Boolean>(bestMove, true);
            }
            else if (state.Player == GameEngine.COMPUTER) // computer's turn  (the random event node)
            {
                bestMove = new ComputerMove();

                List<Cell> availableCells = state.GetAvailableCells();
                List<Move> moves = state.GetAllComputerMoves(availableCells);

                int numSuccessors = moves.Count;
                double upperBound = AI.GetUpperBound(weights);
                double lowerBound = AI.GetLowerBound(weights);
                double curAlpha = numSuccessors * (alpha - upperBound) + upperBound;
                double curBeta = numSuccessors * (beta - lowerBound) + lowerBound;

                double scoreSum = 0;
                int i = 1;
                foreach (Move move in moves)
                {

                    double sucAlpha = Math.Max(curAlpha, lowerBound);
                    double sucBeta = Math.Min(curBeta, upperBound);

                    State resultingState = state.ApplyMove(move);

                    double score = StateProbability(((ComputerMove)move).Tile) *
                        RecursiveIterativeDeepeningExpectimaxWithStar1(resultingState, sucAlpha, sucBeta, depth - 1, timeLimit, timer, weights).Item1.Score;
                    scoreSum += score;
                    if (score <= curAlpha)
                    {
                        scoreSum += upperBound * (numSuccessors - i);
                        bestMove.Score = scoreSum / numSuccessors;
                        return new Tuple<Move, Boolean>(bestMove, true); // pruning
                    }
                    if (score >= curBeta)
                    {
                        scoreSum += lowerBound * (numSuccessors - i);
                        bestMove.Score = scoreSum / numSuccessors;
                        return new Tuple<Move, Boolean>(bestMove, true); // pruning
                    }
                    if (timer.ElapsedMilliseconds > timeLimit)
                    {
                        bestMove.Score = scoreSum / i;
                        return new Tuple<Move, Boolean>(bestMove, false); // recursion not completed, return false
                    }
                    curAlpha += upperBound - score;
                    curBeta += lowerBound - score;

                    i++;

                }
                bestMove.Score = scoreSum / numSuccessors;
                return new Tuple<Move, Boolean>(bestMove, true);
            }

            else throw new Exception();
        }
示例#15
0
        // Expectimax with Star2 pruning
        // NB: DO NOT USE - way too slow
        private Move Star2Expectimax(State state, double alpha, double beta, int depth, WeightVector weights)
        {
            Move bestMove;

            if (depth == 0 || state.IsGameOver())
            {
                if (state.Player == GameEngine.PLAYER)
                {
                    bestMove = new PlayerMove(); // dummy action, as there will be no valid move
                    bestMove.Score = AI.Evaluate(state);
                    return bestMove;
                }
                else if (state.Player == GameEngine.COMPUTER)
                {
                    bestMove = new ComputerMove(); // dummy action, as there will be no valid move
                    bestMove.Score = AI.Evaluate(state);
                    return bestMove;
                }
                else throw new Exception();
            }
            if (state.Player == GameEngine.PLAYER)
            {
                bestMove = new PlayerMove();
                double highestScore = Double.MinValue, currentScore = Double.MinValue;

                List<Move> moves = state.GetMoves();
                foreach (Move move in moves)
                {
                    State resultingState = state.ApplyMove(move);
                    currentScore = Star2Expectimax(resultingState, alpha, beta, depth - 1, weights).Score;
                    if (currentScore > highestScore)
                    {
                        highestScore = currentScore;
                        bestMove = move;
                    }
                    alpha = Math.Max(alpha, highestScore);
                    if (beta <= alpha)
                    { // beta cut-off
                        break;
                    }
                }

                bestMove.Score = highestScore;
                return bestMove;
            }
            else if (state.Player == GameEngine.COMPUTER) // computer's turn  (the random event node)
            {
                bestMove = new ComputerMove();
                List<Cell> availableCells = state.GetAvailableCells();
                List<Move> moves = state.GetAllComputerMoves(availableCells);

                int numSuccessors = moves.Count;
                double upperBound = AI.GetUpperBound(weights);
                double lowerBound = AI.GetLowerBound(weights);

                double curAlpha = numSuccessors * (alpha - upperBound);
                double curBeta = numSuccessors * (beta - lowerBound);

                double sucAlpha = Math.Max(curAlpha, lowerBound);

                double[] probeValues = new double[numSuccessors];

                // probing phase
                double vsum = 0;
                int i = 1;
                foreach (Move move in moves)
                {
                    curBeta += lowerBound;
                    double sucBeta = Math.Min(curBeta, upperBound);

                    State resultingState = state.ApplyMove(move);
                    probeValues[i - 1] = Probe(resultingState, sucAlpha, sucBeta, depth - 1, weights);
                    vsum += probeValues[i - 1];
                    if (probeValues[i - 1] >= curBeta)
                    {
                        vsum += lowerBound * (numSuccessors - i);
                        bestMove.Score = vsum / numSuccessors;
                        return bestMove; // pruning
                    }
                    curBeta -= probeValues[i - 1];
                    i++;
                }

                // search phase
                vsum = 0;
                i = 1;
                foreach (Move move in moves)
                {
                    curAlpha += upperBound;
                    curBeta += probeValues[i - 1];
                    sucAlpha = Math.Max(curAlpha, lowerBound);
                    double sucBeta = Math.Min(curBeta, upperBound);

                    State resultingState = state.ApplyMove(move);
                    double score = StateProbability(((ComputerMove)move).Tile) * Star2Expectimax(resultingState, sucAlpha, sucBeta, depth - 1, weights).Score;
                    vsum += score;

                    if (score <= curAlpha)
                    {
                        vsum += upperBound * (numSuccessors - i);
                        bestMove.Score = vsum / numSuccessors;
                        return bestMove; // pruning
                    }
                    if (score >= curBeta)
                    {
                        vsum += lowerBound * (numSuccessors - i);
                        bestMove.Score = vsum / numSuccessors;
                        return bestMove; // pruning
                    }

                    curAlpha -= score;
                    curBeta -= score;

                    i++;
                }
                bestMove.Score = vsum / numSuccessors;
                return bestMove;
            }
            else
            {
                throw new Exception();
            }
        }
        public async Task <IList <PlanElementCandidate> > GetCandidates(Plan plan, WeightVector weightVector)
        {
            var test       = true;
            var candidates = new List <PlanElementCandidate>();

            PlanElementType[] planElementTypes = new PlanElementType[]
            {
                PlanElementType.Entertainment,
                PlanElementType.Relax,
                PlanElementType.Activity,
                PlanElementType.Culture,
                PlanElementType.Sightseeing,
                PlanElementType.Partying,
                PlanElementType.Shopping,
            };


            foreach (var planElementType in planElementTypes)
            {
                var googlePlaceTypes = GooglePlaceTypes.Table.Where(x => x.PlanElementType == planElementType).ToList();

                foreach (var type in googlePlaceTypes)
                {
                    var nearbySearchInput = _googlePlaceNearbySearchInputFactory.Create(plan.StartLocation, (int)MaximumDistanceToAccomodation, type);
                    var nearbyResults     = await _googlePlaceNearbySearchApiClient.GetAsync(nearbySearchInput);

                    int counter = 1;
                    foreach (var nr in nearbyResults.results)
                    {
                        var details = await _googlePlaceDetailsApiClient.GetAsync(_googlePlaceDetailsInputFactory.CreateAllUseful(nr.place_id));

                        if (details.IsOk)
                        {
                            var candidate = new PlanElementCandidate(details.Result.name, details.Result.place_id, details.Result.formatted_address, details.Result.geometry.location, details.Result.opening_hours, details.Result.types, details.Result.rating, details.Result.price_level, details.Result.user_ratings_total);
                            if (!candidates.Any(x => x.PlaceId == candidate.PlaceId))
                            {
                                candidates.Add(candidate);
                            }
                        }
                        ++counter;
                    }

                    if (test && candidates.Count > 15)
                    {
                        return(candidates);
                    }

                    WeightVectorLabel weightVectorLabel = WeightVector.TranslateLabel(planElementType);

                    if ((plan.PlanForm.IsOverOneWeek || weightVector.GetLabelValue(weightVectorLabel) >= 0.1m) && nearbyResults.IsMoreResults)
                    {
                        var nearbyResults2 = await _googlePlaceNearbySearchApiClient.GetNextPageTokenAsync(nearbyResults.next_page_token);

                        foreach (var nr in nearbyResults2.results)
                        {
                            var details = await _googlePlaceDetailsApiClient.GetAsync(_googlePlaceDetailsInputFactory.CreateAllUseful(nr.place_id));

                            if (details.IsOk)
                            {
                                var candidate = new PlanElementCandidate(details.Result.name, details.Result.place_id, details.Result.formatted_address, details.Result.geometry.location, details.Result.opening_hours, details.Result.types, details.Result.rating, details.Result.price_level, details.Result.user_ratings_total);

                                if (!candidates.Any(x => x.PlaceId == candidate.PlaceId))
                                {
                                    candidates.Add(candidate);
                                }
                            }
                            ++counter;
                        }
                    }

                    if (test && candidates.Count > 15)
                    {
                        return(candidates);
                    }
                }

                if (test && candidates.Count > 15)
                {
                    break;
                }
            }

            if (candidates.Count == 0)
            {
                throw new UserFriendlyException($"Nie znaleziono żadnych potencjalnych kandydatów na elementy planu");
            }

            return(candidates);
        }
示例#17
0
        // Expectimax search with Star1 pruning and forward pruning
        private Move Star1WithUnlikelyPruning(State state, double alpha, double beta, int depth, int lastSpawn, WeightVector weights)
        {
            Move bestMove;

            if (depth == 0 || state.IsGameOver())
            {
                if (state.Player == GameEngine.PLAYER)
                {
                    bestMove = new PlayerMove(); // dummy action, as there will be no valid move
                    bestMove.Score = AI.EvaluateWithWeights(state, weights);
                    return bestMove;
                }
                else if (state.Player == GameEngine.COMPUTER)
                {
                    bestMove = new ComputerMove(); // dummy action, as there will be no valid move
                    bestMove.Score = AI.EvaluateWithWeights(state, weights);
                    return bestMove;
                }
                else throw new Exception();
            }
            if (state.Player == GameEngine.PLAYER)
            {
                bestMove = new PlayerMove();
                double highestScore = Double.MinValue, currentScore = Double.MinValue;

                List<Move> moves = state.GetMoves();
                foreach (Move move in moves)
                {
                    State resultingState = state.ApplyMove(move);
                    currentScore = Star1WithUnlikelyPruning(resultingState, alpha, beta, depth - 1, lastSpawn, weights).Score;
                    if (currentScore > highestScore)
                    {
                        highestScore = currentScore;
                        bestMove = move;
                    }
                    alpha = Math.Max(alpha, highestScore);
                    if (beta <= alpha)
                    { // beta cut-off
                        break;
                    }
                }

                bestMove.Score = highestScore;
                return bestMove;
            }
            else if (state.Player == GameEngine.COMPUTER) // computer's turn  (the random event node)
            {
                bestMove = new ComputerMove();

                List<Cell> availableCells = state.GetAvailableCells();
                List<Move> moves = state.GetAllComputerMoves(availableCells);

                int numSuccessors = moves.Count;
                double upperBound = AI.GetUpperBound(weights);
                double lowerBound = AI.GetLowerBound(weights);
                double curAlpha = numSuccessors * (alpha - upperBound) + upperBound;
                double curBeta = numSuccessors * (beta - lowerBound) + lowerBound;

                double scoreSum = 0;
                int i = 1;
                foreach (Move move in moves)
                {
                    int value = ((ComputerMove)move).Tile;
                    if (value == 4 && lastSpawn == 4) continue; // unlikely event pruning (2 4-spawns in sequence only has 1% chance)

                    double sucAlpha = Math.Max(curAlpha, lowerBound);
                    double sucBeta = Math.Min(curBeta, upperBound);

                    State resultingState = state.ApplyMove(move);

                    double score = StateProbability(((ComputerMove)move).Tile) * Star1WithUnlikelyPruning(resultingState, sucAlpha, sucBeta, depth - 1, value, weights).Score;
                    scoreSum += score;
                    if (score <= curAlpha)
                    {
                        scoreSum += upperBound * (numSuccessors - i);
                        bestMove.Score = scoreSum / numSuccessors;
                        return bestMove; // pruning
                    }
                    if (score >= curBeta)
                    {
                        scoreSum += lowerBound * (numSuccessors - i);
                        bestMove.Score = scoreSum / numSuccessors;
                        return bestMove; // pruning
                    }
                    curAlpha += upperBound - score;
                    curBeta += lowerBound - score;

                    i++;

                }
                bestMove.Score = scoreSum / numSuccessors;
                return bestMove;
            }

            else throw new Exception();
        }
示例#18
0
        // Recursive part of ^^ iterative deepening Expectimax with Star1, move ordering and transposition table
        private Tuple<Move, Boolean> RecursiveTTStar1(State state, double alpha, double beta, int depth, double timeLimit, Stopwatch timer, WeightVector weights)
        {
            Move bestMove;

            if (depth == 0 || state.IsGameOver())
            {
                if (state.Player == GameEngine.PLAYER)
                {
                    bestMove = new PlayerMove(); // dummy action, as there will be no valid move
                    bestMove.Score = AI.EvaluateWithWeights(state, weights);
                    return new Tuple<Move, Boolean>(bestMove, true);
                }
                else if (state.Player == GameEngine.COMPUTER)
                {
                    bestMove = new ComputerMove(); // dummy action, as there will be no valid move
                    bestMove.Score = AI.EvaluateWithWeights(state, weights);
                    return new Tuple<Move, Boolean>(bestMove, true);
                }
                else throw new Exception();
            }
            if (state.Player == GameEngine.PLAYER) // AI's turn
            {
                DIRECTION bestDirection = (DIRECTION)(-1);
                bestMove = new PlayerMove();
                double highestScore = Double.MinValue, currentScore = Double.MinValue;

                // transposition table look-up
                long zob_hash = GetHash(state);
                if (transposition_table.ContainsKey(zob_hash) && transposition_table[zob_hash].depth > depth)
                {
                    Move move = new PlayerMove(transposition_table[zob_hash].direction);
                    move.Score = transposition_table[zob_hash].value;
                    return new Tuple<Move, Boolean>(move, true);
                }
                    // move ordering - make sure we first check the move we believe to be best based on earlier searches
                else if (transposition_table.ContainsKey(zob_hash))
                {
                    bestDirection = transposition_table[zob_hash].direction;
                    State resultingState = state.ApplyMove(new PlayerMove(bestDirection));
                    currentScore = RecursiveTTStar1(resultingState, alpha, beta, depth - 1, timeLimit, timer, weights).Item1.Score;

                    if (currentScore > highestScore)
                    {
                        highestScore = currentScore;
                        bestMove = new PlayerMove(bestDirection);
                    }
                    if (timer.ElapsedMilliseconds > timeLimit)
                    {
                        bestMove.Score = highestScore;
                        return new Tuple<Move, Boolean>(bestMove, false); // recursion not completed, return false
                    }
                }

                // now check the rest of moves
                List<Move> moves = state.GetMoves();
                foreach (Move move in moves)
                {
                    if (((PlayerMove)move).Direction != bestDirection)
                    {
                        State resultingState = state.ApplyMove(move);
                        currentScore = RecursiveTTStar1(resultingState, alpha, beta, depth - 1, timeLimit, timer, weights).Item1.Score;

                        if (currentScore > highestScore)
                        {
                            highestScore = currentScore;
                            bestMove = move;
                        }
                        alpha = Math.Max(alpha, highestScore);
                        if (beta <= alpha)
                        { // beta cut-off
                            break;
                        }
                        if (timer.ElapsedMilliseconds > timeLimit)
                        {
                            bestMove.Score = highestScore;
                            return new Tuple<Move, Boolean>(bestMove, false); // recursion not completed, return false
                        }
                    }

                }
                bestMove.Score = highestScore;

                // add result to transposition table
                TableRow row = new TableRow((short)depth, ((PlayerMove)bestMove).Direction, bestMove.Score);
                transposition_table.AddOrUpdate(zob_hash, row, (key, oldValue) => row);
                return new Tuple<Move, Boolean>(bestMove, true);
            }
            else if (state.Player == GameEngine.COMPUTER) // computer's turn  (the random event node)
            {
                bestMove = new ComputerMove();
                int moveCheckedSoFar = 0;

                List<Cell> availableCells = state.GetAvailableCells();
                List<Move> moves = state.GetAllComputerMoves(availableCells);

                int numSuccessors = moves.Count;
                double upperBound = AI.GetUpperBound(weights);
                double lowerBound = AI.GetLowerBound(weights);
                double curAlpha = numSuccessors * (alpha - upperBound) + upperBound;
                double curBeta = numSuccessors * (beta - lowerBound) + lowerBound;

                double scoreSum = 0;
                int i = 1;
                foreach (Move move in moves)
                {
                    double sucAlpha = Math.Max(curAlpha, lowerBound);
                    double sucBeta = Math.Min(curBeta, upperBound);

                    State resultingState = state.ApplyMove(move);

                    double score = StateProbability(((ComputerMove)move).Tile) * RecursiveTTStar1(resultingState, sucAlpha, sucBeta, depth - 1, timeLimit, timer, weights).Item1.Score;
                    scoreSum += score;
                    moveCheckedSoFar++;
                    if (score <= curAlpha)
                    {
                        scoreSum += upperBound * (numSuccessors - i);
                        bestMove.Score = scoreSum / numSuccessors;
                        return new Tuple<Move,bool>(bestMove, true); // pruning
                    }
                    if (score >= curBeta)
                    {
                        scoreSum += lowerBound * (numSuccessors - i);
                        bestMove.Score = scoreSum / numSuccessors;
                        return new Tuple<Move, bool>(bestMove, true); // pruning
                    }
                    if (timer.ElapsedMilliseconds > timeLimit)
                    {
                        bestMove.Score = scoreSum / moveCheckedSoFar;
                        return new Tuple<Move, Boolean>(bestMove, false); // recursion not completed, return false
                    }
                    curAlpha += upperBound - score;
                    curBeta += lowerBound - score;

                    i++;

                }
                bestMove.Score = scoreSum / numSuccessors;
                return new Tuple<Move, bool>(bestMove, true);
            }
            else throw new Exception();
        }
示例#19
0
 // Iterative Deepening Expectimax search with star1 and forward pruning
 public Move IterativeDeepeningExpectimaxWithStar1andForwardPruning(State state, double alpha, double beta, int lastSpawn, int timeLimit, WeightVector weights)
 {
     int depth = 1;
     Stopwatch timer = new Stopwatch();
     Move bestMove = null;
     // start the search
     timer.Start();
     while (timer.ElapsedMilliseconds < timeLimit)
     {
         Tuple<Move, Boolean> result = RecursiveIterativeDeepeningExpectimaxWithStar1andForwardPruning(state,alpha, beta, depth, lastSpawn, timeLimit, timer, weights);
         if (result.Item2) bestMove = result.Item1; // only update bestMove if full recursion
         depth++;
     }
     Console.WriteLine(depth);
     return bestMove;
 }
示例#20
0
        // Runs a parallel version of iterative deepening
        // A search is started in a separate thread for each child of root node
        private Move ParallelIterativeDeepeningExpectimaxWithStar1andForwardPruning(State state, double alpha, double beta, int lastSpawn, int timeLimit, WeightVector weights)
        {
            Move bestMove = new PlayerMove();

            List<Move> moves = state.GetMoves();
            ConcurrentBag<Tuple<double, Move>> scores = new ConcurrentBag<Tuple<double, Move>>();

            if (moves.Count == 0)
            {
                // game over
                return bestMove;
            }

            // create the resulting states before starting the threads
            List<State> resultingStates = new List<State>();
            foreach (Move move in moves)
            {
                State resultingState = state.ApplyMove(move);
                resultingStates.Add(resultingState);
            }

            Parallel.ForEach(resultingStates, resultingState =>
            {
                double score = IterativeDeepeningExpectimaxWithStar1andForwardPruning(resultingState, alpha, beta, lastSpawn, timeLimit, weights).Score;
                scores.Add(new Tuple<double, Move>(score, resultingState.GeneratingMove));
            });
            // find the best score
            double highestScore = Double.MinValue;
            foreach (Tuple<double, Move> score in scores)
            {
                PlayerMove move = (PlayerMove)score.Item2;
                if (score.Item1 > highestScore)
                {
                    highestScore = score.Item1;
                    bestMove = score.Item2;
                }
            }
            return bestMove;
        }
示例#21
0
 public bool contains(WeightVector e, MBRModel <WeightVector> MBR)
 {
     return(e.star >= MBR.lowerLeft.star && e.star <= MBR.upperRight.star && e.rating >= MBR.lowerLeft.rating && e.rating <= MBR.upperRight.rating);
 }
示例#22
0
 // Star2 probing
 private double Probe(State state, double alpha, double beta, int depth, WeightVector weights)
 {
     if (depth == 0 || state.IsGameOver())
     {
         return AI.Evaluate(state);
     }
     else
     {
         State choice = PickSuccessor(state);
         return Star2Expectimax(choice, alpha, beta, depth - 1, weights).Score;
     }
 }
示例#23
0
        //---------------------------------------------------------------------------------------//
        // FINAL COMBINATION OF ALL IMPROVEMENTS WITH TRANSPOSITION TABLE AND NO PARALLELIZATION
        //---------------------------------------------------------------------------------------//
        public State RunTTIterativeDeepeningExpectimaxWithStar1andForwardPruning(bool print, int timeLimit, WeightVector weights)
        {
            int lastSpawn = 0; // dont consider last spawn values at first move
            transposition_table = new ConcurrentDictionary<long, TableRow>();
            InitializeZobristTable();

            while (true)
            {
                // update state
                currentState = new State(BoardHelper.CloneBoard(gameEngine.board), scoreController.getScore(), GameEngine.PLAYER);

                if (print)
                {
                    Program.PrintState(currentState);
                }

                // run algorithm and send action choice to game engine
                Move move = TTIterativeDeepeningExpectimaxWithStar1andForwardPruning(currentState, Double.MinValue, Double.MaxValue, lastSpawn, timeLimit, weights);
                if (((PlayerMove)move).Direction == (DIRECTION)(-1))
                {
                    // game over
                    return currentState;
                }
                lastSpawn = gameEngine.SendUserAction((PlayerMove)move);
            }
        }
示例#24
0
        // ----------------------------------------------------------------------------------
        // Parallel Iterative Deepening Expectimax Search
        // ----------------------------------------------------------------------------------
        // Runs an entire game using parallelized iterative deepening expectimax search
        // to decide on moves
        public State RunParallelIterativeDeepeningExpectimax(bool print, int timeLimit, WeightVector weights)
        {
            while (true)
            {
                // update state
                currentState = new State(BoardHelper.CloneBoard(gameEngine.board), scoreController.getScore(), GameEngine.PLAYER);

                if (print)
                {
                    Program.PrintState(currentState);
                }

                // run algorithm and send action choice to game engine
                Move move = ParallelIterativeDeepeningExpectimax(currentState, timeLimit, weights);
                if (((PlayerMove)move).Direction == (DIRECTION)(-1))
                {
                    // game over
                    return currentState;
                }
                gameEngine.SendUserAction((PlayerMove)move);
            }
        }
示例#25
0
 // Iterative Deepening Expectimax search
 public Move IterativeDeepening(State state, double timeLimit, WeightVector weights)
 {
     int depth = 0;
     Stopwatch timer = new Stopwatch();
     Move bestMove = null;
     // start the search
     timer.Start();
     while (timer.ElapsedMilliseconds < timeLimit)
     {
         Tuple<Move, Boolean> result = IterativeDeepeningExpectimax(state, depth, timeLimit, timer, weights);
         if (result.Item2) bestMove = result.Item1; // only update bestMove if full recursion
         depth++;
     }
     return bestMove;
 }
示例#26
0
        //0. Price //1. Rating //2. Distance //3. Popularity
        //4.Entertainment //5. Relax //6. Activity //7. Culture //8. Sightseeing //9. Partying //10. Shopping

        public WeightVector Generate(PlanForm planForm)
        {
            var weightVector = new WeightVector();

            //-----------Dajemy na typy elementów planu 0.5m---------------
            var     allPreferedStep     = Math.Round(0.3m / (planForm.PreferedPlanElements.Count()), 2);//max 0.3/14
            decimal totalSecondCategory = 0.5m;

            //PreferedPlanElements moze mieć po maks. 2 elementy nalezace do typów od 4 do 10 -> licz.el * allPreferedStep
            weightVector.AddValue(WeightVectorLabel.Entertainment, allPreferedStep * planForm.PreferedPlanElements.Count(x => x == PlanElementType.Entertainment));
            weightVector.AddValue(WeightVectorLabel.Sightseeing, allPreferedStep * planForm.PreferedPlanElements.Count(x => x == PlanElementType.Sightseeing));
            weightVector.AddValue(WeightVectorLabel.Activity, allPreferedStep * planForm.PreferedPlanElements.Count(x => x == PlanElementType.Activity));
            weightVector.AddValue(WeightVectorLabel.Culture, allPreferedStep * planForm.PreferedPlanElements.Count(x => x == PlanElementType.Culture));
            weightVector.AddValue(WeightVectorLabel.Relax, allPreferedStep * planForm.PreferedPlanElements.Count(x => x == PlanElementType.Relax));
            weightVector.AddValue(WeightVectorLabel.Partying, allPreferedStep * planForm.PreferedPlanElements.Count(x => x == PlanElementType.Partying));
            weightVector.AddValue(WeightVectorLabel.Shopping, allPreferedStep * planForm.PreferedPlanElements.Count(x => x == PlanElementType.Shopping));

            var allSortedStep = Math.Round((totalSecondCategory - weightVector.GetTotalSum()) / 9, 2);
            var remainingRest = (totalSecondCategory - weightVector.GetTotalSum()) - 9 * allSortedStep;

            //SortedPlanElements - punkty za miejsca : 3,2,2,1,1,0,0
            for (int i = 0; i < planForm.SortedPlanElements.Count; i++)
            {
                decimal bonus = 0;
                switch (i)
                {
                case 0:
                    bonus = allSortedStep * 3 + remainingRest;
                    break;

                case 1:
                case 2:
                    bonus = allSortedStep * 2;
                    break;

                case 3:
                case 4:
                    bonus = allSortedStep * 1;
                    break;

                default:
                    break;
                }

                if (planForm.SortedPlanElements[i] == PlanElementType.Entertainment)
                {
                    weightVector.AddValue(WeightVectorLabel.Entertainment, bonus);
                }
                else if (planForm.SortedPlanElements[i] == PlanElementType.Sightseeing)
                {
                    weightVector.AddValue(WeightVectorLabel.Sightseeing, bonus);
                }
                else if (planForm.SortedPlanElements[i] == PlanElementType.Activity)
                {
                    weightVector.AddValue(WeightVectorLabel.Activity, bonus);
                }
                else if (planForm.SortedPlanElements[i] == PlanElementType.Culture)
                {
                    weightVector.AddValue(WeightVectorLabel.Culture, bonus);
                }
                else if (planForm.SortedPlanElements[i] == PlanElementType.Relax)
                {
                    weightVector.AddValue(WeightVectorLabel.Relax, bonus);
                }
                else if (planForm.SortedPlanElements[i] == PlanElementType.Partying)
                {
                    weightVector.AddValue(WeightVectorLabel.Partying, bonus);
                }
                else if (planForm.SortedPlanElements[i] == PlanElementType.Shopping)
                {
                    weightVector.AddValue(WeightVectorLabel.Shopping, bonus);
                }
            }



            //-----------Dajemy na te 0.5---------------
            decimal totalFirstCategory = 0.5m;

            weightVector.SetPriority(WeightVectorLabel.Rating, 0.2m);
            totalFirstCategory -= 0.2m;
            var partsToDivide = 1;

            //Price
            if (planForm.PricePreference == PricePreference.Cheapest)
            {
                weightVector.AddValue(WeightVectorLabel.Price, 0.1m);
                totalFirstCategory -= 0.1m;
                partsToDivide      += 2;
            }
            else if (planForm.PricePreference == PricePreference.MediumPrices)
            {
                weightVector.AddValue(WeightVectorLabel.Price, 0.05m);
                totalFirstCategory -= 0.05m;
                partsToDivide      += 1;
            }

            //Popularity
            if (planForm.AtractionPopularityPreference == AtractionPopularityPreference.MostPopular)
            {
                weightVector.AddValue(WeightVectorLabel.Popularity, 0.1m);
                totalFirstCategory -= 0.1m;
                partsToDivide      += 2;
            }
            else if (planForm.AtractionPopularityPreference == AtractionPopularityPreference.MixedPopular)
            {
                weightVector.AddValue(WeightVectorLabel.Popularity, 0.05m);
                totalFirstCategory -= 0.05m;
                partsToDivide      += 1;
            }
            else if (planForm.AtractionPopularityPreference == AtractionPopularityPreference.NotWellKnown)
            {
                weightVector.AddValue(WeightVectorLabel.Popularity, 0m);
                totalFirstCategory -= 0.00m;
            }

            //Distance - im blizej tym bardziej wazne
            if (planForm.DistanceTypePreference == DistanceTypePreference.OnlyClosest)
            {
                weightVector.AddValue(WeightVectorLabel.Distance, 0.1m);
                totalFirstCategory -= 0.1m;
                partsToDivide      += 2;
            }
            else if (planForm.DistanceTypePreference == DistanceTypePreference.MediumDistances)
            {
                weightVector.AddValue(WeightVectorLabel.Distance, 0.05m);
                totalFirstCategory -= 0.05m;
                partsToDivide      += 1;
            }
            else if (planForm.DistanceTypePreference == DistanceTypePreference.LongDistances)
            {
                weightVector.AddValue(WeightVectorLabel.Distance, 0m);
                totalFirstCategory -= 0.00m;
            }

            if (totalFirstCategory > 0)
            {
                var totalFirstCategoryStep = Math.Round(totalFirstCategory / partsToDivide, 2);

                if (planForm.PricePreference == PricePreference.Cheapest)
                {
                    weightVector.AddValue(WeightVectorLabel.Price, 2 * totalFirstCategoryStep);
                }
                else if (planForm.PricePreference == PricePreference.MediumPrices)
                {
                    weightVector.AddValue(WeightVectorLabel.Price, 1 * totalFirstCategoryStep);
                }

                if (planForm.AtractionPopularityPreference == AtractionPopularityPreference.MostPopular)
                {
                    weightVector.AddValue(WeightVectorLabel.Popularity, 2 * totalFirstCategoryStep);
                }
                else if (planForm.AtractionPopularityPreference == AtractionPopularityPreference.MixedPopular)
                {
                    weightVector.AddValue(WeightVectorLabel.Popularity, 1 * totalFirstCategoryStep);
                }

                if (planForm.DistanceTypePreference == DistanceTypePreference.OnlyClosest)
                {
                    weightVector.AddValue(WeightVectorLabel.Distance, 2 * totalFirstCategoryStep);
                }
                else if (planForm.DistanceTypePreference == DistanceTypePreference.MediumDistances)
                {
                    weightVector.AddValue(WeightVectorLabel.Distance, 1 * totalFirstCategoryStep);
                }

                weightVector.AddValue(WeightVectorLabel.Rating, 1.0m - weightVector.GetTotalSum());
            }
            var test = weightVector.Total;

            return(weightVector);
        }
示例#27
0
        // ----------------------------------------------------------------------------------
        // Expectimax with Star1 pruning
        // ----------------------------------------------------------------------------------
        // Runs an entire game using expectimax with star1 pruning to decide on moves
        public State RunStar1Expectimax(bool print, WeightVector weights)
        {
            while (true)
            {
                // update state
                currentState = new State(BoardHelper.CloneBoard(gameEngine.board), scoreController.getScore(), GameEngine.PLAYER);

                if (print)
                {
                    Program.PrintState(currentState);
                }

                // run algorithm and send action choice to game engine
                Move move = Star1Expectimax(currentState, Double.MinValue, Double.MaxValue, chosenDepth, weights);
                if (((PlayerMove)move).Direction == (DIRECTION)(-1))
                {
                    // game over
                    return currentState;
                }
                gameEngine.SendUserAction((PlayerMove)move);
            }
        }
示例#28
0
文件: AI.cs 项目: kstrandby/2048-AI
 // for Expectimax Star1 pruning
 internal static double GetUpperBound(WeightVector weights)
 {
     double bound = ((WeightVectorAll)weights).Corner * upper_corner + ((WeightVectorAll)weights).Empty_cells * upper_emptycells
         + ((WeightVectorAll)weights).Highest_tile * upper_highestvalue + ((WeightVectorAll)weights).Monotonicity * upper_monotonicity
         + ((WeightVectorAll)weights).Points * upper_points + ((WeightVectorAll)weights).Smoothness * upper_smoothness
         + ((WeightVectorAll)weights).Snake * upper_snake + ((WeightVectorAll)weights).Trapped_penalty * upper_trappedpenalty;
     return bound + 10;
 }
示例#29
0
        // ----------------------------------------------------------------------------------
        // Expectimax with Star1 and forward pruning
        // ----------------------------------------------------------------------------------
        // Runs an entire game using expectimax search with star1 and forward pruning
        // to decide on moves
        public State RunStar1WithUnlikelyPruning(bool print, WeightVector weights)
        {
            int lastSpawn = 0; // dont consider last spawn values at first move
            while (true)
            {
                // update state
                currentState = new State(BoardHelper.CloneBoard(gameEngine.board), scoreController.getScore(), GameEngine.PLAYER);

                if (print)
                {
                    Program.PrintState(currentState);
                }

                // run algorithm and send action choice to game engine
                Move move = Star1WithUnlikelyPruning(currentState, Double.MinValue, Double.MaxValue, chosenDepth, lastSpawn, weights);
                if (((PlayerMove)move).Direction == (DIRECTION)(-1))
                {
                    // game over
                    return currentState;
                }
                lastSpawn = gameEngine.SendUserAction((PlayerMove)move);
            }
        }