void generateNodePredecessors(ref PlanningDomainBase domain, ref ADAstarNode currentNode) { List <DefaultAction> possibleTransitions = new List <DefaultAction>(); domain.generatePredecesors(ref currentNode.action.state, ref currentNode.previousState, ref goalNode.action.state, ref possibleTransitions); foreach (DefaultAction predecessorAction in possibleTransitions) { DefaultAction previousAction = predecessorAction; float newg = domain.ComputeEstimate(ref previousAction.state, ref goalNode.action.state, "g"); float newh = domain.ComputeEstimate(ref startNode.action.state, ref previousAction.state, "h"); ADAstarNode previousNode = new ADAstarNode(newg, newh, ref predecessorAction.state, ref previousAction); currentNode.predecessors.Add(previousNode); //If edge does not exist in the list, add it if (!edgeList.ContainsKey(new DefaultState[] { previousNode.action.state, currentNode.action.state })) { edgeList.Add(new DefaultState[] { previousNode.action.state, currentNode.action.state }, new Edge(previousNode.action.state, currentNode.action.state, previousNode.action.cost)); } else //If it exists update cost value if there was any change { if (edgeList[new DefaultState[] { previousNode.action.state, currentNode.action.state }].cost != currentNode.action.cost) { //If there was a change in the cost, set bool to true and update the current cost changeInEdgeCost = true; edgeList[new DefaultState[] { previousNode.action.state, currentNode.action.state }].cost = currentNode.action.cost; } } } }
public void InitializeValues(ref DefaultState startState, ref DefaultState goalState, float _inflationFactor, ref Stack <DefaultAction> plan, PlannerMode.PLANNING_MODE mode, float threshold, float maxTime) { // PLANNER_MODE.MODE = mode; PlanningDomainBase domain = default(PlanningDomainBase); edgeChangeThreshold = threshold; float score = 0; foreach (PlanningDomainBase d in _planningDomain) { if (d.evaluateDomain(ref startState) > score) { score = d.evaluateDomain(ref startState); domain = d; } } Open = new List <KeyValuePair <DefaultState, ADAstarNode> >(); Closed = new Dictionary <DefaultState, ADAstarNode>(); Incons = new Dictionary <DefaultState, ADAstarNode>(); Open.Clear(); Closed.Clear(); Incons.Clear(); // h estimate from start to start is 0 float starth = domain.ComputeEstimate(ref startState, ref startState, "h"); startNode = new ADAstarNode(Mathf.Infinity, starth, Mathf.Infinity, ref startState, ref startState); startNode.key1 = Mathf.Infinity; startNode.key2 = Mathf.Infinity; float goalh = domain.ComputeEstimate(ref startState, ref goalState, "h"); goalNode = new ADAstarNode(Mathf.Infinity, goalh, 0.0f, ref goalState, ref goalState); goalNode.key1 = GetKey(goalNode)[0]; goalNode.key2 = GetKey(goalNode)[1]; inflationFactor = _inflationFactor; generateNodePredecessors(ref domain, ref goalNode); Open.Add(new KeyValuePair <DefaultState, ADAstarNode>(goalNode.action.state, goalNode)); ComputeorImprovePath(ref domain, maxTime); }
void UpdateState(ref ADAstarNode currentNode) { PlanningDomainBase domain = default(PlanningDomainBase); List <DefaultAction> possibleTransitions = new List <DefaultAction>(); float score = 0; foreach (PlanningDomainBase d in _planningDomain) { if (d.evaluateDomain(ref startNode.action.state) > score) { score = d.evaluateDomain(ref startNode.action.state); domain = d; } } if (!currentNode.alreadyExpanded) { currentNode.g = Mathf.Infinity; } if (!domain.isAGoalState(ref currentNode.action.state, ref goalNode.action.state)) { possibleTransitions.Clear(); domain.generateTransitions(ref currentNode.action.state, ref currentNode.previousState, ref goalNode.action.state, ref possibleTransitions); // Determine min(c(s,s')+g(s')) for rhs for every successor float min_rhs = Mathf.Infinity; foreach (DefaultAction action in possibleTransitions) { DefaultAction nextAction = action; float newh = domain.ComputeEstimate(ref startNode.action.state, ref nextAction.state, "h"); float newg = domain.ComputeEstimate(ref nextAction.state, ref goalNode.action.state, "g"); //g is calculated as the distance to the goal, just use a dummy value -1.0 and calculate the distance next ADAstarNode nextNode = new ADAstarNode(newg, newh, ref currentNode.action.state, ref nextAction); if ((nextAction.cost + nextNode.g) < min_rhs) { min_rhs = nextAction.cost + nextNode.g; } } currentNode.rhs = min_rhs; float[] keys = GetKey(currentNode); currentNode.key1 = keys[0]; currentNode.key2 = keys[1]; } Debug.Log("A"); //If open contains node, remove it. //foreach(KeyValuePair<DefaultState, ADAstarNode> keyval in Open) for (int i = 0; i < Open.Count; ++i) { if (Open[i].Key != null) { if (domain.equals(Open[i].Key, currentNode.action.state, false)) { Open.RemoveAt(i); currentNode.alreadyExpanded = true; } } } //Open = BackUp; //KeyValuePair<DefaultState, ADAstarNode> keyval = new KeyValuePair<DefaultState, ADAstarNode>(currentNode.action.state, currentNode); //if(Open.Contains(keyval)) {Open.Remove(keyval); currentNode.alreadyExpanded = true;} if (currentNode.g != currentNode.rhs) { bool containsNode = false; //foreach(DefaultState key in Closed.Keys) //{ //if(domain.equals(key, currentNode.action.state)) //if(domain.equals(key, currentNode.action.state, false)) //{ containsNode = true; break; } //} if (Closed.ContainsKey(currentNode.action.state)) { containsNode = true; } if (!containsNode) { //Generate all predecessors to keep expanding the open list generateNodePredecessors(ref domain, ref currentNode); Open.Add(new KeyValuePair <DefaultState, ADAstarNode>(currentNode.action.state, currentNode)); //Sort by priority keys Open.Sort(ADAstartCopareCost.CompareCost); } else { Incons.Add(currentNode.action.state, currentNode); } } }