/// <summary> /// Builds the pattern database, storing the heuristic table in memory. /// </summary> public override void build() { /** * Create the subproblem pertaining to this additive pattern * database. We do this by taking the problem instance and swapping * the initial state with the goal. We will also save a copy of our * m_vAgents data structure, because during the building process * our state already is a projection. */ WorldState goal = new WorldState(m_Problem.m_vAgents, m_vAgents); foreach (AgentState ags in goal.allAgentsState) { ags.SwapCurrentWithGoal(); } List <uint> vBackup = m_vAgents; m_vAgents = new List <uint>(goal.allAgentsState.Length); for (uint i = 0; i < goal.allAgentsState.Length; ++i) { m_vAgents.Add(i); } /** * Initialize variables and insert the root node into our queue. We * use Byte.MaxValue to symbolize that an entry in our heuristic * table is uninitialized. The first time that we initialize an * entry in the table is also the first time that we encounter the * particular state, which is also the shortest path to that state * because we are conducting an uninformed breadth-first search. */ m_vTable = new Byte[m_vPermutations[0] * (m_Problem.m_nLocations + 1)]; for (int i = 0; i < m_vTable.Length; ++i) { m_vTable[i] = Byte.MaxValue; } Context c = new Context(); c.initialize("q1.tmp", "q2.tmp"); m_vTable[hash(goal)] = 0; c.write(goal); c.nextLevel(); while (c.m_nNodes > 0) { for (ulong n = 0; n < c.m_nNodes; ++n) { /** * Get the next node, generate its children and write the * children to the next queue file. I had previously * thought that since we are doing an uninformed breadth- * first search, the first time we generate a node we would * also have the shortest path to that node. Unfortunately, * for this particular domain, this is not true. */ List <WorldState> vChildren = new List <WorldState>(); WorldState tws = (WorldState)c.m_bf.Deserialize(c.m_fsQueue); UInt32 nHashParent = hash(tws); this.Expand(tws, vChildren); foreach (WorldState i in vChildren) { UInt32 nHash = hash(i); /** * We store only the difference in heuristic value * between the single agent shortest path heuristic and * our pattern database heuristic. The hope is that the * resulting value will always fit within the minimum * and maximum ranges of a single byte. */ Byte nCandidateValue; if (m_bOffsetFromSingleShortestPath) { int nSingleAgentShortestPath = 0; foreach (var a in i.allAgentsState) { nSingleAgentShortestPath += this.m_Problem.GetSingleAgentOptimalCost(a); } int nDifference = i.g - nSingleAgentShortestPath; Debug.Assert(nDifference >= 0); Debug.Assert(nDifference < Byte.MaxValue); nCandidateValue = (Byte)nDifference; } else { Debug.Assert(i.g < Byte.MaxValue); nCandidateValue = (Byte)i.g; } if (nCandidateValue < m_vTable[nHash]) { c.write(i); m_vTable[nHash] = nCandidateValue; } } } c.nextLevel(); } c.clear(); m_vAgents = vBackup; }
public virtual uint h(WorldState s) { return(0); }
/// <summary> /// Used when WorldState objects are put in the open list priority queue /// </summary> /// <param name="other"></param> /// <returns></returns> public virtual int CompareTo(IBinaryHeapItem other) { WorldState that = (WorldState)other; int thisF = this.h + this.g; int thatF = that.h + that.g; if (thisF < thatF) { return(-1); } if (thisF > thatF) { return(1); } // Tie breaking: bool thisIsGoal = this.GoalTest(); bool thatIsGoal = that.GoalTest(); if (thisIsGoal == true && thatIsGoal == false) // The elaborate form is necessary to keep the comparison consistent. Otherwise goalA<goalB and goalB<goalA { return(-1); } if (thatIsGoal == true && thisIsGoal == false) { return(1); } // Independence Detection framework conflicts: if (this.potentialConflictsCount < that.potentialConflictsCount) { return(-1); } if (this.potentialConflictsCount > that.potentialConflictsCount) { return(1); } // CBS framework conflicts: // It makes sense to prefer nodes that conflict less, and not just nodes that don't conflict at all, // because a 3-way conflict takes more work to resolve than if (this.cbsInternalConflictsCount < that.cbsInternalConflictsCount) { return(-1); } if (this.cbsInternalConflictsCount > that.cbsInternalConflictsCount) { return(1); } // //M-Star: prefer nodes with smaller collision sets: //if (this.collisionSets != null) // than M-Star is running //{ // // The collision sets change during collision set backpropagation and closed list hits. // // Backpropagation goes from a node's child to the node, so it's tempting to think // // it only happens when the node is already expanded and out of the open list, // // but partial expansion makes that false. // // Closed list hits can also happen while the node is waiting to be expanded. // // So the max rank can change while the node is in the open list - // // it can't be used for tie breaking :(. // if (this.collisionSets.maxRank < that.collisionSets.maxRank) // return -1; // if (that.collisionSets.maxRank > this.collisionSets.maxRank) // return 1; //} // f, collision sets, conflicts and internal conflicts being equal, prefer nodes with a larger g // - they're closer to the goal so less nodes would probably be generated by them on the way to it. if (this.g < that.g) { return(1); } if (this.g > that.g) { return(-1); } return(0); }
public void write(WorldState tws) { m_bf.Serialize(m_fsNext, tws); ++m_nNextNodes; }
/// <summary> /// Used when WorldState objects are put in the open list priority queue /// </summary> /// <param name="other"></param> /// <returns></returns> public virtual int CompareTo(IBinaryHeapItem other) { WorldState that = (WorldState)other; int thisF = this.h + this.g; int thatF = that.h + that.g; if (thisF < thatF) { return(-1); } if (thisF > thatF) { return(1); } // Tie breaking: bool thisIsGoal = this.GoalTest(); bool thatIsGoal = that.GoalTest(); if (thisIsGoal == true && thatIsGoal == false) // The elaborate form is necessary to keep the comparison consistent. Otherwise goalA<goalB and goalB<goalA { return(-1); } if (thatIsGoal == true && thisIsGoal == false) { return(1); } // Independence Detection framework conflicts: if (this.potentialConflictsCount < that.potentialConflictsCount) { return(-1); } if (this.potentialConflictsCount > that.potentialConflictsCount) { return(1); } // CBS framework conflicts: // It makes sense to prefer nodes that conflict less, and not just nodes that don't conflict at all, // because a 3-way conflict takes more work to resolve than if (this.cbsInternalConflictsCount < that.cbsInternalConflictsCount) { return(-1); } if (this.cbsInternalConflictsCount > that.cbsInternalConflictsCount) { return(1); } // f, conflicts and internal conflicts being equal, prefer nodes with a larger g // - they're closer to the goal so less nodes would probably be generated by them on the way to it. if (this.g < that.g) { return(1); } if (this.g > that.g) { return(-1); } return(0); }
protected override WorldState CreateSearchNode(WorldState from) { return(new WorldStateWithOD((WorldStateWithOD)from)); }
/// <summary> /// Expands a node. This is done recursively - generating agent possibilities one at a time. /// This includes: /// - Generating the children /// - Inserting them into OPEN /// - Insert node into CLOSED /// Why does a PDB need to know how to expand nodes? Seems like someone else's job /// </summary> /// <param name="currentNode">The node to expand</param> /// <param name="children">The generated nodes will be filled into this collection</param> public void Expand(WorldState currentNode, ICollection <WorldState> children) { this.Expand(currentNode, 0, children, new HashSet <Move>()); // TODO: Need to think if HashSet is the correct option here. }
public override void Expand(WorldState nodeP) { var node = (WorldStateForPartialExpansion)nodeP; if (node.isAlreadyExpanded() == false) { node.calcSingleAgentDeltaFs(instance, this.IsValid); expandedFullStates++; node.alreadyExpanded = true; node.targetDeltaF = 0; // Assuming a consistent heuristic (as done in the paper), the min delta F is zero. node.remainingDeltaF = node.targetDeltaF; // Just for the hasChildrenForCurrentDeltaF call. while (node.hasMoreChildren() && node.hasChildrenForCurrentDeltaF() == false) // DeltaF=0 may not be possible if all agents have obstacles between their location and the goal { node.targetDeltaF++; node.remainingDeltaF = node.targetDeltaF; } if (node.hasMoreChildren() == false) // Node has no possible children at all { node.Clear(); return; } } //Debug.Print("Expanding node " + node); // If this node was already expanded, notice its h was updated, so the deltaF refers to its original H base.Expand(node); node.targetDeltaF++; // This delta F was exhausted node.remainingDeltaF = node.targetDeltaF; while (node.hasMoreChildren() && node.hasChildrenForCurrentDeltaF() == false) { node.targetDeltaF++; node.remainingDeltaF = node.targetDeltaF; } if (node.hasMoreChildren() && node.hasChildrenForCurrentDeltaF() && node.h + node.g + node.targetDeltaF <= this.maxCost) { // Increment H before re-insertion into open list int sicEstimate = (int)SumIndividualCosts.h(node, this.instance); // Re-compute even if the heuristic used is SIC since this may be a second expansion if (node.h < sicEstimate + node.targetDeltaF) { // Assuming the heuristic used doesn't give a lower estimate than SIC for each and every one of the node's children, // (an ok assumption since SIC is quite basic, no heuristic we use is ever worse than it) // then the current target deltaF is really exhausted, since the deltaG is always correct, // and the deltaH predicted by SIC is less than or equal to the finalDeltaH. // So if the heuristic gives the same estimate as SIC for this node // (and that mainly happens when SIC happens to give a perfect estimate), // we can increment the node's h to SIC+targetDeltaH node.h = sicEstimate + node.targetDeltaF; } // Re-insert node into open list openList.Add(node); } else { node.Clear(); } }
protected override WorldState CreateSearchNode(WorldState from) { return(new WorldStateForPartialExpansion((WorldStateForPartialExpansion)from)); }