コード例 #1
0
        protected virtual Reward Playout(WorldModel initialPlayoutState)
        {
            GOB.Action   action;
            GOB.Action[] actions;
            Reward       reward  = new Reward();
            WorldModel   current = initialPlayoutState;
            int          random;

            actions = current.GetExecutableActions();
            if (actions.Length == 0)
            {
                reward.PlayerID = current.GetNextPlayer();
                reward.Value    = 0;
            }

            while (!current.IsTerminal())
            {
                current = current.GenerateChildWorldModel();
                random  = RandomGenerator.Next(0, actions.Length);
                action  = actions[random];
                action.ApplyActionEffects(current);
                current.CalculateNextPlayer();
            }

            reward.PlayerID = current.GetNextPlayer();
            reward.Value    = current.GetScore();
            return(reward);
        }
コード例 #2
0
        protected virtual Reward Playout(WorldModel initialPlayoutState)
        {
            WorldModel childWorldModel = initialPlayoutState.GenerateChildWorldModel();

            GOB.Action[] actions      = childWorldModel.GetExecutableActions();
            int          DepthReached = 0;

            while (!childWorldModel.IsTerminal())
            {
                if (actions.Length > 0)
                {
                    int        index = this.RandomGenerator.Next(actions.Length);
                    GOB.Action a     = actions[index];
                    //GOB.Action a = actions[6];
                    a.ApplyActionEffects(childWorldModel);
                    childWorldModel.CalculateNextPlayer();
                }
                DepthReached++;
            }

            if (DepthReached > this.MaxPlayoutDepthReached)
            {
                this.MaxPlayoutDepthReached = DepthReached;
            }

            Reward reward = new Reward
            {
                PlayerID = childWorldModel.GetNextPlayer(),
                Value    = childWorldModel.GetScore()
            };

            return(reward);
        }
コード例 #3
0
        protected override Reward Playout(WorldModel initialPlayoutState)
        {
            GOB.Action[] actions         = initialPlayoutState.GetExecutableActions();
            int          bestHvalue      = int.MaxValue;
            int          bestActionIndex = -1;
            WorldModel   currentState    = initialPlayoutState;

            while (!currentState.IsTerminal())
            {
                for (int i = 0; i < actions.Length; i++)
                {
                    GOB.Action action = actions[i];
                    int        h      = action.getHvalue();
                    if (h < bestHvalue)
                    {
                        bestActionIndex = i;
                        bestHvalue      = h;
                    }
                }
                WorldModel childState = initialPlayoutState.GenerateChildWorldModel();
                actions[bestActionIndex].ApplyActionEffects(childState);
                childState.CalculateNextPlayer();
                currentState = childState;
                base.CurrentDepth++;
            }
            Reward r = new Reward();

            r.Value = currentState.GetScore();
            return(r);
        }
コード例 #4
0
ファイル: MCTS.cs プロジェクト: RicardoPereiraIST/IAJ-Project
        protected virtual Reward Playout(WorldModel initialPlayoutState)
        {
            GOB.Action action;
            WorldModel model = initialPlayoutState.GenerateChildWorldModel();

            GOB.Action[] actions;
            Reward       reward = new Reward();

            while (!model.IsTerminal())
            {
                actions = model.GetExecutableActions();

                if (actions.Length == 0)
                {
                    break;
                }

                action = actions[RandomGenerator.Next(0, actions.Length)];
                action.ApplyActionEffects(model);
                model.CalculateNextPlayer();
            }

            reward.PlayerID = model.GetNextPlayer();
            reward.Value    = model.GetScore();
            return(reward);
        }
コード例 #5
0
ファイル: MCTS.cs プロジェクト: leiapollos/IAJLab
        protected virtual Reward Playout(WorldModel initialPlayoutState)
        {
            WorldModel currState = initialPlayoutState;

            while (!currState.IsTerminal())
            {
                var actions = currState.GetExecutableActions();
                if (actions.Length > 0)
                {
                    //currState = currState.GenerateChildWorldModel();
                    int next = this.RandomGenerator.Next(0, actions.Length);
                    currState = StochasticPlayout(actions[next], currState);
                    currState.CalculateNextPlayer();
                    //actions[next].ApplyActionEffects(currState);
                    //currState.CalculateNextPlayer();
                }
                else
                {
                    break;
                }
            }
            return(new Reward
            {
                PlayerID = currState.GetNextPlayer(),
                Value = currState.GetScore()
            });
        }
コード例 #6
0
        //Rave + Biased
        protected override Reward Playout(WorldModel initialPlayoutState)
        {
            //throw new NotImplementedException();
            ActionHistory = new List <Pair <int, GOB.Action> >();
            WorldModel childWorldModel = initialPlayoutState.GenerateChildWorldModel();

            GOB.Action action;

            int playoutReach = 0;

            while (!childWorldModel.IsTerminal())
            {
                //Select a random Action

                GOB.Action[] actions                   = childWorldModel.GetExecutableActions();
                double[]     actionIndexes             = new double[actions.Length];
                double       heuristicValue            = 0.0;
                double       accumulatedHeuristicValue = 0.0;
                double       randomIndex;
                int          chosenActionIndex = 0;
                for (int i = 0; i < actions.Length; i++)
                {
                    heuristicValue             = actions[i].H(childWorldModel);
                    accumulatedHeuristicValue += Math.Pow(Math.E, -heuristicValue);
                    actionIndexes[i]           = accumulatedHeuristicValue;
                }

                randomIndex = this.RandomGenerator.NextDouble() * accumulatedHeuristicValue;
                //Debug.Log("Acumulated: " + accumulatedHeuristicValue);
                for (int i = 0; i < actions.Length; i++)
                {
                    if (randomIndex <= actionIndexes[i])
                    {
                        chosenActionIndex = i;
                        break;
                    }
                }
                ActionHistory.Add(new Pair <int, GOB.Action>(childWorldModel.GetNextPlayer(), actions[chosenActionIndex]));
                actions[chosenActionIndex].ApplyActionEffects(childWorldModel);
                childWorldModel.CalculateNextPlayer();
                playoutReach += 1;
            }

            if (playoutReach > MaxPlayoutDepthReached)
            {
                MaxPlayoutDepthReached = playoutReach;
            }

            Reward reward = new Reward
            {
                PlayerID = childWorldModel.GetNextPlayer(),
                Value    = childWorldModel.GetScore()
            };

            return(reward);
        }
コード例 #7
0
        private MCTSNode Expand(MCTSNode parent, GOB.Action action)
        {
            WorldModel state = parent.State.GenerateChildWorldModel();
            MCTSNode   child = new MCTSNode(state);

            child.Parent = parent;

            action.ApplyActionEffects(state);
            state.CalculateNextPlayer();

            child.Action = action;
            parent.ChildNodes.Add(child);

            return(child);
        }
コード例 #8
0
        protected override Reward Playout(WorldModel initialPlayoutState)
        {
            WorldModel childWorldModel = initialPlayoutState.GenerateChildWorldModel();
            int        DepthReached    = 0;

            while (!childWorldModel.IsTerminal())
            {
                GOB.Action[] actions                   = childWorldModel.GetExecutableActions();
                double[]     actionIndexes             = new double[actions.Length];
                double       heuristicValue            = 0.0;
                double       accumulatedHeuristicValue = 0.0;
                double       randomIndex;
                int          chosenActionIndex = 0;
                for (int i = 0; i < actions.Length; i++)
                {
                    heuristicValue             = actions[i].H(childWorldModel);
                    accumulatedHeuristicValue += Math.Pow(Math.E, -heuristicValue);
                    actionIndexes[i]           = accumulatedHeuristicValue;
                }

                randomIndex = this.RandomGenerator.NextDouble() * accumulatedHeuristicValue;
                for (int i = 0; i < actions.Length; i++)
                {
                    if (randomIndex <= actionIndexes[i])
                    {
                        chosenActionIndex = i;
                        break;
                    }
                }
                actions[chosenActionIndex].ApplyActionEffects(childWorldModel);
                childWorldModel.CalculateNextPlayer();
                DepthReached++;
            }

            if (DepthReached > this.MaxPlayoutDepthReached)
            {
                this.MaxPlayoutDepthReached = DepthReached;
            }

            Reward reward = new Reward
            {
                PlayerID = this.InitialNode.PlayerID,
                Value    = childWorldModel.GetScore()
            };

            return(reward);
        }
コード例 #9
0
        protected override MCTSNode Expand(MCTSNode parent, GOB.Action action)
        {
            WorldModel currentState = parent.State.GenerateChildWorldModel();

            action.ApplyActionEffects(currentState);
            currentState.CalculateNextPlayer();

            MCTSNode newChild = new MCTSNode(currentState)
            {
                Parent = parent,
                Action = action
            };

            parent.ChildNodes.Add(newChild);

            return(newChild);
        }
コード例 #10
0
ファイル: MCTS.cs プロジェクト: DCalhas/DecisionMakingIAJ
        private MCTSNode Expand(MCTSNode parent, GOB.Action action)
        {
            WorldModel state  = parent.State.GenerateChildWorldModel();
            MCTSNode   expand = new MCTSNode(state);

            expand.Parent = parent;
            action.ApplyActionEffects(state);
            expand.Action = action;
            state.CalculateNextPlayer();
            expand.PlayerID = state.GetNextPlayer();
            parent.ChildNodes.Add(expand);

            expand.N = 0;
            expand.Q = 0;

            return(expand);
        }
コード例 #11
0
ファイル: MCTS.cs プロジェクト: leiapollos/IAJLab
        protected MCTSNode Expand(MCTSNode parent, Action action)
        {
            WorldModel newModel = parent.State.GenerateChildWorldModel();

            action.ApplyActionEffects(newModel);
            newModel.CalculateNextPlayer();
            MCTSNode node = new MCTSNode(newModel)
            {
                Action   = action,
                Parent   = parent,
                PlayerID = newModel.GetNextPlayer(),
                Q        = 0,
                N        = 0
            };

            parent.ChildNodes.Add(node);
            return(node);
        }
コード例 #12
0
        private MCTSNode Expand(MCTSNode parent, GOB.Action action)
        {
            //TODO: implement
            //WorldModel worldmodel = CurrentStateWorldModel.GenerateChildWorldModel();
            WorldModel worldmodel = parent.State.GenerateChildWorldModel();

            action.ApplyActionEffects(worldmodel);
            worldmodel.CalculateNextPlayer();
            MCTSNode n = new MCTSNode(worldmodel)
            {
                Action = action,
                Parent = parent,
                N      = 0,
                Q      = 0
            };

            parent.ChildNodes.Add(n);
            return(n);
        }
コード例 #13
0
        protected override Reward Playout(WorldModel initialPlayoutState)
        {
            ActionHistory = new List <Pair <int, GOB.Action> >();
            WorldModel state = initialPlayoutState.GenerateChildWorldModel();
            Action     nextAction;

            while (!state.IsTerminal())
            {
                Action[] actions = state.GetExecutableActions();
                if (actions.Length > 0)
                {
                    nextAction = actions[RandomGenerator.Next() % actions.Length];
                    ActionHistory.Add(new Pair <int, GOB.Action>(state.GetNextPlayer(), nextAction));
                    nextAction.ApplyActionEffects(state);
                    state.CalculateNextPlayer();
                }
            }
            Reward r = new Reward();

            r.Value = state.GetScore();
            return(r);
        }
コード例 #14
0
        protected override Reward Playout(WorldModel initialPlayoutState)
        {
            GOB.Action   action;
            GOB.Action[] actions;
            Reward       reward  = new Reward();
            WorldModel   current = initialPlayoutState;

            double        random;
            float         h          = 0;
            double        accumulate = 0;
            float         euclidean  = 0;
            double        softmax    = 0;
            List <double> interval   = new List <double>();
            WalkToTargetAndExecuteAction wa;

            actions = current.GetExecutableActions();
            if (actions.Length == 0)
            {
                reward.PlayerID = current.GetNextPlayer();
                reward.Value    = 0;
            }

            while (!current.IsTerminal())
            {
                accumulate = 0;
                interval.Clear();
                //if (actions.Length == 0)
                //    break;

                foreach (var a in actions)
                {
                    h = 0;
                    var gameMan   = this.CurrentStateWorldModel.GetGameManager();
                    var character = gameMan.characterData;
                    wa = a as WalkToTargetAndExecuteAction;
                    if (wa != null)
                    {
                        euclidean = (wa.Target.transform.position - wa.Character.transform.position).magnitude;
                        if (euclidean <= 0)
                        {
                            euclidean = 1;
                        }
                    }

                    if (a.Name.Contains("LevelUp"))                                                      //1000
                    {
                        h = 1000;
                    }
                    if (a.Name.Contains("GetHealthPotion"))                                              //0-25
                    {
                        h = (character.MaxHP - character.HP) * 1.5f;
                    }
                    else if (a.Name.Contains("PickUpChest"))                                                                 //5-25
                    {
                        h = (character.Money + 5) * 3.5f;
                    }
                    else if (a.Name.Contains("FireballSkeleton") || a.Name.Contains("FireballOrc"))                          //0-25
                    {
                        h = character.Mana * 30;
                    }
                    else if (a.Name.Contains("SwordAttackSkeleton"))
                    {
                        h = (character.HP - 5) * 2;
                    }
                    else if (a.Name.Contains("SwordAttackOrc"))
                    {
                        h = (character.HP - 10) * 2;
                    }
                    else if (a.Name.Contains("SwordAttackDragon"))
                    {
                        h = character.HP - 20;
                    }

                    if (h < 0)
                    {
                        h = 0;
                    }

                    h = h * 1000 / euclidean;

                    accumulate += h;
                    if (h > 0)
                    {
                        softmax += Math.Pow(Math.E, -h / accumulate);
                        interval.Add(softmax);
                        Debug.Log(softmax);
                    }
                    else
                    {
                        interval.Add(0);
                    }
                }

                random = RandomGenerator.NextDouble() * softmax;
                for (int j = 0; j < interval.Count; j++)
                {
                    if (random <= interval[j])
                    {
                        action  = actions[j];
                        current = current.GenerateChildWorldModel();
                        action.ApplyActionEffects(current);
                        current.CalculateNextPlayer();
                        break;
                    }

                    if (j == interval.Count - 1)
                    {
                        current         = current.GenerateChildWorldModel();
                        reward.Value    = 0;
                        reward.PlayerID = current.GetNextPlayer();
                        return(reward);
                    }
                }
            }

            reward.PlayerID = current.GetNextPlayer();
            reward.Value    = current.GetScore();
            return(reward);
        }
コード例 #15
0
        protected override Reward Playout(WorldModel initialPlayoutState)
        {
            WorldModel        model = initialPlayoutState.GenerateChildWorldModel();
            List <GOB.Action> actions;
            List <GOB.Action> executableActions = new List <GOB.Action>();

            GOB.Action nextAction = null;
            Reward     reward     = new Reward();
            double     heuristicValue;
            double     accumulatedHeuristicValue;
            double     bestValue, minValue;
            SortedDictionary <double, GOB.Action> heuristicList = new SortedDictionary <double, GOB.Action>();

            actions = model.GetActions();

            while (!model.IsTerminal())
            {
                heuristicList.Clear();
                executableActions.Clear();
                heuristicValue            = 0;
                accumulatedHeuristicValue = 0;

                bestValue = -1;
                minValue  = float.MaxValue;

                if (actions.Count == 0)
                {
                    break;
                }

                foreach (GOB.Action action in actions)
                {
                    if (action.CanExecute(model))
                    {
                        accumulatedHeuristicValue += Math.Pow(Math.E, action.H(model));
                        executableActions.Add(action);
                    }
                }

                foreach (GOB.Action action in executableActions)
                {
                    heuristicValue = Math.Pow(Math.E, action.H(model)) / accumulatedHeuristicValue;

                    if (!heuristicList.ContainsKey(heuristicValue))
                    {
                        heuristicList.Add(heuristicValue, action);
                    }

                    if (heuristicValue > bestValue)
                    {
                        bestValue = heuristicValue;
                    }
                    if (heuristicValue < minValue)
                    {
                        minValue = heuristicValue;
                    }
                }

                double randomNumber = GetRandomNumber(minValue, bestValue);

                foreach (KeyValuePair <double, GOB.Action> actionHeuristic in heuristicList)
                {
                    if (actionHeuristic.Key >= randomNumber)
                    {
                        nextAction = actionHeuristic.Value;
                        break;
                    }
                }

                if (nextAction == null)
                {
                    break;
                }

                nextAction.ApplyActionEffects(model);
                model.CalculateNextPlayer();
            }

            reward.PlayerID = model.GetNextPlayer();
            reward.Value    = model.GetScore();
            return(reward);
        }