/// <summary> /// Computes the forward cost heuristics for the given state in the relaxed planning graph. /// </summary> /// <param name="state">Starting state.</param> /// <param name="goalConditions">Goal conditions.</param> /// <param name="evaluationStrategy">Evaluation strategy.</param> /// <returns>Forward cost heuristic value from the specified state.</returns> private double ComputeForwardCost(IState state, IConditions goalConditions, ForwardCostEvaluationStrategy evaluationStrategy) { IStateLayer previousStateLayer = null; IStateLayer stateLayer = CreateLabeledStateLayer(state.GetRelaxedState()); ActionLayer actionLayer = new ActionLayer(); while (!stateLayer.Equals(previousStateLayer)) { // check goal conditions if (goalConditions.Evaluate(stateLayer.GetState())) { return(goalConditions.EvaluateOperatorPlanningGraphLabel(stateLayer.GetStateLabels(), evaluationStrategy)); } // build new action layer actionLayer.Clear(); foreach (var successor in RelaxedProblem.GetSuccessors(stateLayer.GetState())) { IOperator appliedOperator = successor.GetAppliedOperator(); double label = appliedOperator.ComputePlanningGraphLabel(stateLayer.GetStateLabels(), evaluationStrategy); actionLayer.Add(new ActionNode(appliedOperator, label + appliedOperator.GetCost())); } // build new state layer previousStateLayer = stateLayer; stateLayer = CreateLabeledStateLayer(stateLayer, actionLayer); } // failure, solution cannot be found from the specified state return(int.MaxValue); }
public IStateLayer GetLayer(string layerName) { IStateLayer layer = null; try { layer = nameLayerDict[layerName]; } catch { Logger.LogError(string.Format("Layer named {0} was not found.", layerName)); } return(layer); }
public IStateLayer GetLayer(int layerIndex) { IStateLayer layer = null; try { layer = layers[layerIndex]; } catch { Logger.LogError(string.Format("Layer at index {0} was not found.", layerIndex)); } return(layer); }
void BuildLayerDict() { nameStateDict = new Dictionary <string, IState>(); nameLayerDict = new Dictionary <string, IStateLayer>(); layers = GetComponents <StateLayer>(); activeLayers = new IStateLayer[stateReferences.Length]; for (int i = 0; i < stateReferences.Length; i++) { IStateLayer layer = (IStateLayer)stateReferences[i]; if (layer != null) { activeLayers[i] = layer; } } for (int i = 0; i < layers.Length; i++) { IStateLayer layer = layers[i]; nameStateDict[layer.GetType().Name] = layer; nameStateDict[StateMachineUtility.FormatLayer(layer.GetType())] = layer; } }
/// <summary> /// Creates the labeled state layer for the forward cost evaluation, from the previous state layer and the action layer. /// </summary> /// <param name="sLayer">Previous state layer.</param> /// <param name="aLayer">Action layer.</param> /// <returns>New state layer.</returns> protected override IStateLayer CreateLabeledStateLayer(IStateLayer sLayer, ActionLayer aLayer) { return(new StateLayer((StateLayer)sLayer, aLayer)); }
/// <summary> /// Creates the labeled state layer for the forward cost evaluation, from the previous state layer and the action layer. /// </summary> /// <param name="sLayer">Previous state layer.</param> /// <param name="aLayer">Action layer.</param> /// <returns>New state layer.</returns> protected abstract IStateLayer CreateLabeledStateLayer(IStateLayer sLayer, ActionLayer aLayer);
/// <summary> /// Builds the relaxed planning graph and computes the FF heuristic value. /// </summary> /// <param name="state">Starting state.</param> /// <param name="goalConditions">Goal conditions.</param> /// <returns>FF cost heuristic value from the specified state.</returns> private double ComputeFFCost(IState state, IConditions goalConditions) { // build an explicit relaxed planning graph StateLayers.Clear(); ActionLayers.Clear(); StateLayers.Add(CreateFFStateLayer(state.GetRelaxedState())); while (true) { // check goal conditions IStateLayer stateLayer = StateLayers[StateLayers.Count - 1]; if (goalConditions.Evaluate(stateLayer.GetState())) { ActionLayers.Add(new ActionLayer { CreateFFGoalActionNode(goalConditions, stateLayer.GetState()) }); break; } // build new action layer and the next state layer ActionLayer actionLayer = new ActionLayer(); IState newState = stateLayer.GetState().Clone(); foreach (var successor in RelaxedProblem.GetSuccessors(stateLayer.GetState())) { IOperator appliedOperator = successor.GetAppliedOperator(); actionLayer.Add(CreateFFActionNode(appliedOperator, stateLayer.GetState())); newState = appliedOperator.Apply(newState, true); } ActionLayers.Add(actionLayer); StateLayers.Add(CreateFFStateLayer(newState)); } // compute FF value UnsatisfiedOnCurrentLayer.Clear(); UnsatisfiedOnNextLayer.Clear(); MarkedActionNodes.Clear(); var goalNode = ActionLayers[ActionLayers.Count - 1][0]; foreach (var proposition in goalNode.Predecessors) { UnsatisfiedOnCurrentLayer.Push(proposition); } for (int i = StateLayers.Count - 1; i > 0; --i) { IStateLayer nextStateLayer = StateLayers[i - 1]; ActionLayer currentActionLayer = ActionLayers[i - 1]; while (UnsatisfiedOnCurrentLayer.Count != 0) { IProposition proposition = UnsatisfiedOnCurrentLayer.Pop(); // 1.) try to satisfy the proposition by an idle arc to the next state layer if (nextStateLayer.HasProposition(proposition)) { UnsatisfiedOnNextLayer.Push(proposition); continue; } // 2.) try to satisfy the proposition by a support action node ActionNode relevantActionNode = GetBestRelevantActionNodeFF(proposition, currentActionLayer); if (relevantActionNode != null) { MarkedActionNodes.Add(relevantActionNode.Operator); foreach (var prevProposition in relevantActionNode.Predecessors) { UnsatisfiedOnNextLayer.Push(prevProposition); } } } UnsatisfiedSwapper = UnsatisfiedOnNextLayer; UnsatisfiedOnNextLayer = UnsatisfiedOnCurrentLayer; UnsatisfiedOnCurrentLayer = UnsatisfiedSwapper; UnsatisfiedOnNextLayer.Clear(); } // the result value is a sum of costs of marked action nodes double result = 0; foreach (var markedActionNode in MarkedActionNodes) { result += markedActionNode.GetCost(); } return(result); }