Beispiel #1
0
        private void Init(IGoapPlanner <T, W> planner, ReGoapState <T, W> newGoal, ReGoapNode <T, W> parent, IReGoapAction <T, W> action)
        {
            expandList.Clear();

            this.planner = planner;
            this.parent  = parent;
            this.action  = action;
            if (action != null)
            {
                actionSettings = action.GetSettings(planner.GetCurrentAgent(), newGoal);
            }

            if (parent != null)
            {
                state = parent.GetState().Clone();
                // g(node)
                g = parent.GetPathCost();
            }
            else
            {
                state = planner.GetCurrentAgent().GetMemory().GetWorldState().Clone();
            }

            var nextAction = parent == null ? null : parent.action;

            if (action != null)
            {
                // since in backward search we relax the problem all preconditions are valid but are added to the current goal
                var preconditions = action.GetPreconditions(newGoal, nextAction);
                goal = newGoal + preconditions;

                var effects = action.GetEffects(newGoal, nextAction);
                state.AddFromState(effects);
                g += action.GetCost(newGoal, nextAction);

                // removing current action effects from goal, no need to do with to the whole state
                //  since the state is the sum of all the previous actions's effects.
                goal.ReplaceWithMissingDifference(effects);

                // this is needed every step to make sure that any precondition is not already satisfied
                //  by the world state
                goal.ReplaceWithMissingDifference(planner.GetCurrentAgent().GetMemory().GetWorldState());
            }
            else
            {
                var diff = ReGoapState <T, W> .Instantiate();

                newGoal.MissingDifference(state, ref diff);
                goal = diff;
            }
            h = goal.Count;
            // f(node) = g(node) + h(node)
            cost = g + h * heuristicMultiplier;
        }
Beispiel #2
0
        private void Init(IGoapPlanner <T, W> planner, ReGoapState <T, W> newGoal, ReGoapNode <T, W> parent, IReGoapAction <T, W> action)
        {
            expandList.Clear();

            this.planner = planner;
            this.parent  = parent;
            this.action  = action;
            if (action != null)
            {
                actionSettings = action.GetSettings(planner.GetCurrentAgent(), newGoal);
            }

            if (parent != null)
            {
                state = parent.GetState().Clone();
                // g(node)
                g = parent.GetPathCost();
            }
            else
            {
                state = planner.GetCurrentAgent().GetMemory().GetWorldState().Clone();
            }

            var nextAction = parent == null ? null : parent.action;

            if (action != null)
            {
                // create a new instance of the goal based on the paren't goal
                goal = ReGoapState <T, W> .Instantiate(newGoal);

                var preconditions = action.GetPreconditions(goal, nextAction);
                var effects       = action.GetEffects(goal, nextAction);
                // adding the action's effects to the current node's state
                state.AddFromState(effects);
                // addding the action's cost to the node's total cost
                g += action.GetCost(goal, nextAction);

                // add all preconditions of the current action to the goal
                goal.AddFromState(preconditions);
                // removes from goal all the conditions that are now fullfiled in the node's state
                goal.ReplaceWithMissingDifference(state);
            }
            else
            {
                var diff = ReGoapState <T, W> .Instantiate();

                newGoal.MissingDifference(state, ref diff);
                goal = diff;
            }
            h = goal.Count;
            // f(node) = g(node) + h(node)
            cost = g + h * heuristicMultiplier;
        }
Beispiel #3
0
        private void Init(ReGoapAgent agent, ReGoapState newGoalState, ReGoapNode parent, ReGoapAction action)
        {
            expandList.Clear();

            ReGoapState goal = null;

            this.reGoapAgent = agent;
            this.parentNode  = parent;
            this.action      = action;
            if (action != null)
            {
                actionSettings = action.GetSettings(newGoalState);
            }

            if (parentNode != null)
            {
                agentReGoapState = parentNode.GetState().Clone();
                g = parentNode.GetPathCost();
            }
            else
            {
                ReGoapState reGoapState = agent.GetWorldState();
                agentReGoapState = reGoapState.Clone();
            }

            if (action != null)
            {
                // create a new instance of the goal based on the paren't goal
                goal = ReGoapState.Instantiate(newGoalState);

                var preconditions = action.GetPreconditions(goal);
                var effects       = action.GetEffects(goal);
                // adding the action's effects to the current node's state
                agentReGoapState.AddFromState(effects);
                // addding the action's cost to the node's total cost
                g += action.GetCost();
                // add all preconditions of the current action to the goal
                goal.AddFromState(preconditions);
                // removes from goal all the conditions that are now fullfiled in the node's state
                goal.ReplaceWithMissingDifference(agentReGoapState);
            }
            else
            {
                goal = newGoalState.MissingDifference(agentReGoapState);
            }
            h    = goal.Count;
            cost = g + h;

            //Expand(goal);

            expandList.Clear();

            List <ReGoapAction> actionsList = reGoapAgent.GetActionsSet();

            for (var index = actionsList.Count - 1; index >= 0; index--)
            {
                ReGoapAction possibleAction = actionsList[index];

                if (!possibleAction.CheckProceduralCondition())  // 执行条件不满足排除掉
                {
                    continue;
                }

                ReGoapState precond = possibleAction.GetPreconditions(goal);
                ReGoapState effects = possibleAction.GetEffects(goal);

                if (!ReGoapState.HasAny(effects, goal)) // any effect is the current goal
                {
                    continue;
                }

                if (!ReGoapState.HasAnyConflict(precond, goal))
                {
                    ReGoapNode reGoapNode = new ReGoapNode(reGoapAgent, goal, this, possibleAction);
                    expandList.Add(reGoapNode);
                }
            }
        }
Beispiel #4
0
        private void Init(IGoapPlanner <T, W> planner, ReGoapState <T, W> newGoal, ReGoapNode <T, W> parent, IReGoapAction <T, W> action, ReGoapState <T, W> settings)
        {
            expandList.Clear();

            this.planner = planner;
            this.parent  = parent;
            this.action  = action;
            if (settings != null)
            {
                this.actionSettings = settings.Clone();
            }

            if (parent != null)
            {
                state = parent.GetState().Clone();
                // g(node)
                g = parent.GetPathCost();
            }
            else
            {
                state = planner.GetCurrentAgent().GetMemory().GetWorldState().Clone();
            }

            var nextAction = parent == null ? null : parent.action;

            if (action != null)
            {
                // create a new instance of the goal based on the paren't goal
                goal = ReGoapState <T, W> .Instantiate(newGoal);

                GoapActionStackData <T, W> stackData;
                stackData.currentState = state;
                stackData.goalState    = goal;
                stackData.next         = action;
                stackData.agent        = planner.GetCurrentAgent();
                stackData.settings     = actionSettings;

                var preconditions = action.GetPreconditions(stackData);
                var effects       = action.GetEffects(stackData);
                // addding the action's cost to the node's total cost
                g += action.GetCost(stackData);

                // adding the action's effects to the current node's state
                state.AddFromState(effects);

                // removes from goal all the conditions that are now fullfiled in the action's effects
                goal.ReplaceWithMissingDifference(effects);
                // add all preconditions of the current action to the goal
                goal.AddFromState(preconditions);
            }
            else
            {
                goal = newGoal;
            }
            h = goal.Count;
            // f(node) = g(node) + h(node)
            cost = g + h * heuristicMultiplier;

            // additionally calculate the goal without any world effect to understand if we are done
            var diff = ReGoapState <T, W> .Instantiate();

            goal.MissingDifference(planner.GetCurrentAgent().GetMemory().GetWorldState(), ref diff);
            goalMergedWithWorld = diff;
        }