public ReGoapNode(IGoapPlanner planner, ReGoapState newGoal, ReGoapNode parent, IReGoapAction action) { this.planner = planner; this.parent = parent; this.action = action; if (action != null) { actionSettings = action.GetSettings(planner.GetCurrentAgent(), goal); } if (this.parent != null) { state = this.parent.GetState(); // g(node) g = parent.GetPathCost(); } else { state = planner.GetCurrentAgent().GetMemory().GetWorldState(); } var nextAction = parent == null ? null : parent.action; if (action != null) { // backward search does NOT support negative preconditions // since in backward search we relax the problem all preconditions are valid but are added to the current goal var preconditions = action.GetPreconditions(newGoal, nextAction); goal = newGoal + preconditions; var effects = action.GetEffects(newGoal, nextAction); state += effects; g += action.GetCost(newGoal, nextAction); // removing current action effects from goal, no need to do with to the whole state // since the state is the sum of all the previous actions's effects. var missingState = new ReGoapState(); goal.MissingDifference(effects, ref missingState); goal = missingState; // this is needed every step to make sure that any precondition is not already satisfied // by the world state var worldMissingState = new ReGoapState(); goal.MissingDifference(planner.GetCurrentAgent().GetMemory().GetWorldState(), ref worldMissingState); goal = worldMissingState; } else { var diff = new ReGoapState(); newGoal.MissingDifference(state, ref diff); goal = diff; } h = goal.Count; // f(node) = g(node) + h(node) cost = g + h * heuristicMultiplier; }
private void Init(IGoapPlanner <T, W> planner, ReGoapState <T, W> newGoal, ReGoapNode <T, W> parent, IReGoapAction <T, W> action) { expandList.Clear(); this.planner = planner; this.parent = parent; this.action = action; if (action != null) { actionSettings = action.GetSettings(planner.GetCurrentAgent(), newGoal); } if (parent != null) { state = parent.GetState().Clone(); // g(node) g = parent.GetPathCost(); } else { state = planner.GetCurrentAgent().GetMemory().GetWorldState().Clone(); } var nextAction = parent == null ? null : parent.action; if (action != null) { // since in backward search we relax the problem all preconditions are valid but are added to the current goal var preconditions = action.GetPreconditions(newGoal, nextAction); goal = newGoal + preconditions; var effects = action.GetEffects(newGoal, nextAction); state.AddFromState(effects); g += action.GetCost(newGoal, nextAction); // removing current action effects from goal, no need to do with to the whole state // since the state is the sum of all the previous actions's effects. goal.ReplaceWithMissingDifference(effects); // this is needed every step to make sure that any precondition is not already satisfied // by the world state goal.ReplaceWithMissingDifference(planner.GetCurrentAgent().GetMemory().GetWorldState()); } else { var diff = ReGoapState <T, W> .Instantiate(); newGoal.MissingDifference(state, ref diff); goal = diff; } h = goal.Count; // f(node) = g(node) + h(node) cost = g + h * heuristicMultiplier; }
private void Init(IGoapPlanner <T, W> planner, ReGoapState <T, W> newGoal, ReGoapNode <T, W> parent, IReGoapAction <T, W> action) { expandList.Clear(); this.planner = planner; this.parent = parent; this.action = action; if (action != null) { actionSettings = action.GetSettings(planner.GetCurrentAgent(), newGoal); } if (parent != null) { state = parent.GetState().Clone(); // g(node) g = parent.GetPathCost(); } else { state = planner.GetCurrentAgent().GetMemory().GetWorldState().Clone(); } var nextAction = parent == null ? null : parent.action; if (action != null) { // create a new instance of the goal based on the paren't goal goal = ReGoapState <T, W> .Instantiate(newGoal); var preconditions = action.GetPreconditions(goal, nextAction); var effects = action.GetEffects(goal, nextAction); // adding the action's effects to the current node's state state.AddFromState(effects); // addding the action's cost to the node's total cost g += action.GetCost(goal, nextAction); // add all preconditions of the current action to the goal goal.AddFromState(preconditions); // removes from goal all the conditions that are now fullfiled in the node's state goal.ReplaceWithMissingDifference(state); } else { var diff = ReGoapState <T, W> .Instantiate(); newGoal.MissingDifference(state, ref diff); goal = diff; } h = goal.Count; // f(node) = g(node) + h(node) cost = g + h * heuristicMultiplier; }
public List <INode <ReGoapState <T, W> > > Expand() { expandList.Clear(); var agent = planner.GetCurrentAgent(); var actions = agent.GetActionsSet(); for (var index = actions.Count - 1; index >= 0; index--) { var possibleAction = actions[index]; possibleAction.Precalculations(agent, goal); var precond = possibleAction.GetPreconditions(goal, action); var effects = possibleAction.GetEffects(goal, action); if (possibleAction == action) { continue; } if (effects.HasAny(goal) && // any effect is the current goal !goal.HasAnyConflict(effects) && // no effect is conflicting with the goal !goal.HasAnyConflict(precond) && // no precondition is conflicting with the goal possibleAction.CheckProceduralCondition(agent, goal, parent != null ? parent.action : null)) { var newGoal = goal; expandList.Add(Instantiate(planner, newGoal, this, possibleAction)); } } return(expandList); }
public List <INode <ReGoapState <T, W> > > Expand() { expandList.Clear(); var agent = planner.GetCurrentAgent(); var actions = agent.GetActionsSet(); for (var index = actions.Count - 1; index >= 0; index--) { var possibleAction = actions[index]; possibleAction.Precalculations(agent, goal); var precond = possibleAction.GetPreconditions(goal, action); var effects = possibleAction.GetEffects(goal, action); if (effects.HasAnyGoodForGoal(state, goal) && // any effect is the current goal !goal.HasAnyConflictPrecond(effects, precond) && // no precondition is conflicting with the goal (non-arithmetic) !goal.HasAnyConflictEffect(effects) && //no effect is conflicting with goal (non-arithmetic) !goal.IsNotHelpfulAtAll(effects, precond, state) && //(arithmetic) possibleAction.CheckProceduralCondition(agent, goal, parent != null ? parent.action : null)) { var newGoal = goal; var newNode = Instantiate(planner, newGoal, this, possibleAction); expandList.Add(newNode); Utilities.ReGoapLogger.Log(string.Format(" oooo Expanded node: action: {0}\n\t effect {1}\n\t precond {2}\n\t goal {3}", possibleAction.GetName(), effects, precond, newNode.GoalString)); } else { Utilities.ReGoapLogger.Log(string.Format(" xxxx Expanded node: action: {0}\n\t effect {1}\n\t precond {2}", possibleAction.GetName(), effects, precond)); } } return(expandList); }
public IEnumerator<ReGoapActionState> GetPossibleActionsEnumerator(){ #endif var agent = planner.GetCurrentAgent(); var actions = agent.GetActionsSet(); foreach (var possibleAction in actions) { foreach (var settings in possibleAction.MultiPrecalculations(agent, goal)) { var precond = possibleAction.GetPreconditions(goal, settings, action); var effects = possibleAction.GetEffects(goal, settings, action); if (effects.DoesFullfillGoal(goal) && // any effect is the current goal !goal.HasConflict(precond, effects) && possibleAction.CheckProceduralCondition(agent, settings, goal, parent != null ? parent.action : null)) { #if DEBUG yield return new ReGoapActionState(possibleAction, settings) { preconditions = precond, effects = effects }; #else yield return new ReGoapActionState( possibleAction, settings ); #endif } #if DEBUG else if (includeInvalidAction) { if (!effects.DoesFullfillGoal(goal)) yield return new ReGoapActionState(possibleAction, settings) { isValid = false, reason = ReGoapActionState.InvalidReason.EFFECTS_DONT_HELP, preconditions = precond, effects = effects }; else if (goal.HasConflict(precond, effects)) yield return new ReGoapActionState(possibleAction, settings) { isValid = false, reason = ReGoapActionState.InvalidReason.CONFLICT, preconditions = precond, effects = effects }; else if (!possibleAction.CheckProceduralCondition(agent, settings, goal, parent != null ? parent.action : null)) yield return new ReGoapActionState(possibleAction, settings) { isValid = false, reason = ReGoapActionState.InvalidReason.PROCEDURAL_CONDITION, preconditions = precond, effects = effects }; } } #endif } }
public BGoapNode(IGoapPlanner planner, BGoapState parentGoal, BGoapNode parent, ReGoapActionState actionState) { this.planner = planner; this.parent = parent; if(actionState != null) { this.action = actionState.Action; this.actionSettings = actionState.Settings; } if (this.parent != null){ g = parent.GetPathCost(); } var nextAction = parent == null ? null : parent.action; if(action != null) { //first step - subtract effects of action var effects = action.GetEffects( parentGoal, actionSettings, nextAction ); try { goal = parentGoal.Difference( effects, false ); //dont use defaults here, only subtract what really is in the effect } catch(ArgumentException e) { Debug.Log( e ); } //then add preconditions to the current goal state var preconditions = action.GetPreconditions( parentGoal, actionSettings, nextAction ); goal = goal.Union( preconditions ); g += action.GetCost( parentGoal, actionSettings, nextAction ); } else goal = parentGoal; h = goal.Distance( planner.GetCurrentAgent().GetMemory().GetWorldState() ); // f(node) = g(node) + h(node) cost = g + h * heuristicMultiplier; }
protected virtual void FixedUpdate() { if ((planner != null) && !planner.IsPlanning() && Time.time > warnCooldown) { warnCooldown = Time.time + WarnDelay; // check if this goal is not active but CAN be activated if (((ReGoapGoal <T, W>)planner.GetCurrentGoal() != this) && IsGoalPossible()) { planner.GetCurrentAgent().WarnPossibleGoal(this); } // if this goal is active but isn't anymore possible if (((ReGoapGoal <T, W>)planner.GetCurrentGoal() == this) && !IsGoalPossible()) { planner.GetCurrentAgent().WarnPossibleGoal(this); } } }
public List <INode <ReGoapState <T, W> > > Expand() { expandList.Clear(); var agent = planner.GetCurrentAgent(); var actions = agent.GetActionsSet(); GoapActionStackData <T, W> stackData; stackData.currentState = state; stackData.goalState = goal; stackData.next = action; stackData.agent = agent; stackData.settings = null; for (var index = actions.Count - 1; index >= 0; index--) { var possibleAction = actions[index]; possibleAction.Precalculations(stackData); var settingsList = possibleAction.GetSettings(stackData); foreach (var settings in settingsList) { stackData.settings = settings; var precond = possibleAction.GetPreconditions(stackData); var effects = possibleAction.GetEffects(stackData); if (effects.HasAny(goal) && // any effect is the current goal !goal.HasAnyConflict(effects, precond) && // no precondition is conflicting with the goal or has conflict but the effects fulfils the goal !goal.HasAnyConflict(effects) && // no effect is conflicting with the goal possibleAction.CheckProceduralCondition(stackData)) { var newGoal = goal; expandList.Add(Instantiate(planner, newGoal, this, possibleAction, settings)); } } } return(expandList); }
public IEnumerator <IReGoapAction> GetPossibleActionsEnumerator() { var agent = planner.GetCurrentAgent(); var actions = agent.GetActionsSet(); for (var index = 0; index < actions.Count; index++) { var possibleAction = actions[index]; possibleAction.Precalculations(agent, goal); var precond = possibleAction.GetPreconditions(goal, action); var effects = possibleAction.GetEffects(goal, action); if (possibleAction == action) { continue; } if (effects.HasAny(goal) && // any effect is the current goal !goal.HasAnyConflict(effects) && // no effect is conflicting with the goal !goal.HasAnyConflict(precond) && // no precondition is conflicting with the goal possibleAction.CheckProceduralCondition(agent, goal, parent != null ? parent.action : null)) { yield return(possibleAction); } } }
private void Init(IGoapPlanner <T, W> planner, ReGoapState <T, W> newGoal, ReGoapNode <T, W> parent, IReGoapAction <T, W> action) { expandList.Clear(); tmpKeys.Clear(); this.planner = planner; this.parent = parent; this.action = action; if (action != null) { actionSettings = action.GetSettings(planner.GetCurrentAgent(), newGoal); } if (parent != null) { state = parent.GetState().Clone(); // g(node) g = parent.GetPathCost(); } else { state = planner.GetCurrentAgent().GetMemory().GetWorldState().Clone(); } var nextAction = parent == null ? null : parent.action; if (action != null) { // create a new instance of the goal based on the paren't goal goal = ReGoapState <T, W> .Instantiate(); var tmpGoal = ReGoapState <T, W> .Instantiate(newGoal); var preconditions = action.GetPreconditions(tmpGoal, nextAction); var effects = action.GetEffects(tmpGoal, nextAction); // adding the action's effects to the current node's state state.AddFromState(effects); // addding the action's cost to the node's total cost g += action.GetCost(tmpGoal, nextAction); //// add all preconditions of the current action to the goal //tmpGoal.AddFromState(preconditions); //// removes from goal all the conditions that are now fulfilled in the node's state //tmpGoal.ReplaceWithMissingDifference(state); ////goal.ReplaceWithMissingDifference(effects); // collect all keys from goal & precondition, unique-ed foreach (var pr in tmpGoal.GetValues()) { var k = pr.Key; if (!tmpKeys.Contains(k)) { tmpKeys.Add(k); } } foreach (var pr in preconditions.GetValues()) { var k = pr.Key; if (!tmpKeys.Contains(k)) { tmpKeys.Add(k); } } // process each keys foreach (var k in tmpKeys) { StructValue goalValue, effectValue, precondValue, stateValue, protoValue; tmpGoal.GetValues().TryGetValue(k, out goalValue); effects.GetValues().TryGetValue(k, out effectValue); preconditions.GetValues().TryGetValue(k, out precondValue); state.GetValues().TryGetValue(k, out stateValue); StructValue.EValueType valueType; _GetValueType(ref goalValue, ref effectValue, ref precondValue, ref stateValue, out valueType, out protoValue); if (valueType == StructValue.EValueType.Arithmetic) { //_EnsureArithStructValueInited(ref goalValue, ref protoValue); _EnsureArithStructValueInited(ref effectValue, ref protoValue); _EnsureArithStructValueInited(ref precondValue, ref protoValue); _EnsureArithStructValueInited(ref stateValue, ref protoValue); if (!goalValue.Inited) { goalValue = StructValue.CopyCreate(ref stateValue, -(Convert.ToSingle(stateValue.v) - Convert.ToSingle(effectValue.v))); } float fGoal = Convert.ToSingle(goalValue.v); float fEffect = Convert.ToSingle(effectValue.v); float fPrecond = Convert.ToSingle(precondValue.v); float fState = Convert.ToSingle(stateValue.v); float finalV = Math.Max( fGoal - fEffect, Math.Min(fPrecond, fPrecond - fState) ); var sv = StructValue.CopyCreate(ref protoValue, finalV); goal.SetStructValue(k, sv); } else if (valueType == StructValue.EValueType.Other) { //ReplaceWithMissingDifference if (stateValue.Inited && goalValue.Inited && goalValue.IsFulfilledBy(stateValue)) { goalValue.Invalidate(); } // AddFromPrecond // 1. if the precond is satisfied by the memory start state, then discard // 2. else this newly added goal from precond, should not be removed due to fulfilled by curStateValue if (precondValue.Inited) { bool preCondfulfilledByMem = false; var startMemoryState = planner.GetCurrentAgent().GetMemory().GetWorldState(); StructValue startMemoryValue; if (startMemoryState.GetValues().TryGetValue(k, out startMemoryValue)) { if (startMemoryValue.Inited && precondValue.IsFulfilledBy(startMemoryValue)) { preCondfulfilledByMem = true; } } if (!preCondfulfilledByMem) { if (goalValue.Inited) { goalValue = goalValue.MergeWith(precondValue); } else { goalValue = precondValue; } } } if (goalValue.Inited) { goal.SetStructValue(k, goalValue); } } else { UnityEngine.Debug.LogError("Unexpected StructValue type: " + valueType); } }// foreach (var k in tmpKeys) tmpGoal.Recycle(); } else { var diff = ReGoapState <T, W> .Instantiate(); newGoal.MissingDifference(state, ref diff); goal = diff; } h = _CalculateH(); // f(node) = g(node) + h(node) cost = g + h * planner.GetSettings().HeuristicMultiplier; }
private void Init(IGoapPlanner <T, W> planner, ReGoapState <T, W> newGoal, ReGoapNode <T, W> parent, IReGoapAction <T, W> action, ReGoapState <T, W> settings) { expandList.Clear(); this.planner = planner; this.parent = parent; this.action = action; if (settings != null) { this.actionSettings = settings.Clone(); } if (parent != null) { state = parent.GetState().Clone(); // g(node) g = parent.GetPathCost(); } else { state = planner.GetCurrentAgent().GetMemory().GetWorldState().Clone(); } var nextAction = parent == null ? null : parent.action; if (action != null) { // create a new instance of the goal based on the paren't goal goal = ReGoapState <T, W> .Instantiate(newGoal); GoapActionStackData <T, W> stackData; stackData.currentState = state; stackData.goalState = goal; stackData.next = action; stackData.agent = planner.GetCurrentAgent(); stackData.settings = actionSettings; var preconditions = action.GetPreconditions(stackData); var effects = action.GetEffects(stackData); // addding the action's cost to the node's total cost g += action.GetCost(stackData); // adding the action's effects to the current node's state state.AddFromState(effects); // removes from goal all the conditions that are now fullfiled in the action's effects goal.ReplaceWithMissingDifference(effects); // add all preconditions of the current action to the goal goal.AddFromState(preconditions); } else { goal = newGoal; } h = goal.Count; // f(node) = g(node) + h(node) cost = g + h * heuristicMultiplier; // additionally calculate the goal without any world effect to understand if we are done var diff = ReGoapState <T, W> .Instantiate(); goal.MissingDifference(planner.GetCurrentAgent().GetMemory().GetWorldState(), ref diff); goalMergedWithWorld = diff; }