예제 #1
0
        public void ThinkAboutNextAction()
        {
            if (actionToDo != NONE)
            {
                return;
            }

            player = AIplayer.DummyPlayer;

            visited[player.Location.X, player.Location.Y] = true;

            BuildTree(BUILDTREEPERACTION);

            // Decide next action
            MeesGame.PlayerAction action = NONE;
            foreach (NodeActionPair NAP in entryPoint.next)
            {
                if (NAP.Value.maxScore > entryPoint.maxScore && AIplayer.DummyPlayer.PossibleActions.Contains(action))
                {
                    action = NAP.Key;
                    break;
                }
            }
            actionToDo = action;
        }
예제 #2
0
        /// <summary>
        /// Selects the node the player is on after performing the action that brings him to the given location.
        /// </summary>
        /// <param name="curNode">The current node.</param>
        /// <param name="usedAction">The action performed.</param>
        /// <param name="nextLocation">The location after performing the action.</param>
        /// <returns></returns>
        private Node goToNextNode(Node curNode, MeesGame.PlayerAction usedAction, Point nextLocation)
        {
            // Check if the node already has the next node in the list
            foreach (NodeActionPair NAP in curNode.next)
            {
                if (NAP.Key == usedAction)
                {
                    return(NAP.Value);
                }
            }

            Node node;

            // No node available, create a new one
            if (nodeGrid[nextLocation.X, nextLocation.Y] == null)
            {
                node = new Node(nextLocation);
                nodeGrid[nextLocation.X, nextLocation.Y] = node;
                curNode.next.Add(usedAction, node);
                return(node);
            }

            // The node is in the grid, use it and add it as neighbour
            node = nodeGrid[nextLocation.X, nextLocation.Y];
            curNode.next.Add(usedAction, node);
            return(node);
        }
예제 #3
0
        /// <summary>
        /// Helper method to get the reverse direction.
        /// </summary>
        /// <param name="action"></param>
        /// <returns></returns>
        private MeesGame.PlayerAction GetReverseAction(MeesGame.PlayerAction action)
        {
            switch (action)
            {
            case NORTH:
                return(SOUTH);

            case EAST:
                return(WEST);

            case SOUTH:
                return(NORTH);

            case WEST:
                return(EAST);

            default:
                return(NONE);
            }
        }
예제 #4
0
        /// <summary>
        /// This function should be called at least once before checking the best possible move.
        /// </summary>
        public void BuildTree()
        {
            MeesGame.IPlayer dummy  = player.Clone();
            Point            curLoc = dummy.Location;
            int  iterations         = 0;
            Node curNode            = entryPoint;

            nodeStack = new Stack <Node>();
            nodeStack.Push(curNode);
            MeesGame.PlayerAction lastAction = NONE;

            while (iterations < MAXITERATIONS)
            {
                iterations += 1;
                // pick a random action to perform
                MeesGame.PlayerAction chosenAction = dummy.PossibleActions[GameEnvironment.Random.Next(dummy.PossibleActions.Count)];
                if (chosenAction == NONE /* || (GetReverseAction(chosenAction) == lastAction)*/)
                {
                    continue;
                }

                //perform action
                dummy.PerformAction(chosenAction);
                nodeStack.Push(curNode);
                curNode = goToNextNode(curNode, chosenAction, dummy.Location);
                curLoc  = dummy.Location;
                if (!visited[curLoc.X, curLoc.Y])
                {
                    curNode.maxScore = getScore(curLoc);
                    break;
                }
                lastAction = chosenAction;
            }

            //max number of iterations reached, or broken out of the loop
            updateScore();
        }
예제 #5
0
 /// <summary>
 /// Make the AI pick the next action.
 /// </summary>
 public void UpdateNextAction()
 {
     AIplayer.NextAIAction = actionToDo;
     actionToDo            = NONE;
 }
예제 #6
0
        /// <summary>
        /// The Q function receives a ILevelState s and a to be performed PlayerAction a.
        /// It determines what the result is of this action and then updates the entry for (s, a) in the qValues library.
        /// </summary>
        /// <param name="s"></param>
        /// <param name="a"></param>
        private void Q(ILevelState s, MeesGame.PlayerAction a)
        {
            Tuple <ILevelState, MeesGame.PlayerAction> StateAndAction = new Tuple <ILevelState, MeesGame.PlayerAction>(s, a);

            qValues[StateAndAction] = qValues[StateAndAction] + learningRate * ((double)GetResultOfAction(s, a) + discountFactor * EstimatedOptimalFutureValue(s, a) - qValues[StateAndAction]);
        }
예제 #7
0
 /// <summary>
 /// This function estimates the best qValue that can be obtained by performing an action in the new ILevelState s' that is reached by performing PlayerAction a in ILevelState s.
 /// </summary>
 /// <param name="s"></param>
 /// <param name="a"></param>
 /// <returns></returns>
 private double EstimatedOptimalFutureValue(ILevelState s, MeesGame.PlayerAction a)
 {
     return(0);
 }
예제 #8
0
 /// <summary>
 /// Returns the score of the performed action a on the ILevelSTate s.
 /// </summary>
 /// <param name="s"></param>
 /// <param name="a"></param>
 private int GetResultOfAction(ILevelState s, MeesGame.PlayerAction a)
 {
     return(0);
 }