protected override WorldState CreateSearchNode(WorldState from) { return(new WorldStateForPartialExpansion((WorldStateForPartialExpansion)from)); }
private void PrintSolution(WorldState end) { }
public int TieBreak(WorldState that) { bool thisIsGoal = this.GoalTest(); bool thatIsGoal = that.GoalTest(); if (thisIsGoal == true && thatIsGoal == false) // The elaborate form is necessary to keep the comparison consistent. Otherwise goalA<goalB and goalB<goalA { return(-1); } if (thatIsGoal == true && thisIsGoal == false) { return(1); } // TODO: Ideally, prefer nodes where the minimum vertex cover of the conflict graph is smaller. // Compute an MVC of the conflict graph without the node's agents in the CBS node // before running the low level, and only compare the number of agents the node // conflicts with that aren't in the MVC. // Maybe even compute the MVC of the cardinal conflict graph and of the all conflict // graph separately and tie-break first according to the number of agents we conflict // with that aren't in the MVC of the conflict graph and then the number of agents // we conflict with that aren't in the cardinal conflict graph if (this.conflictCounts != null && that.conflictCounts != null) // Currently even when not under ID or CBS, the root node has this.conflictCounts != null { // Prefer nodes that contain conflicts with fewer agents - when a conflict is resolved, // many times other conflicts are resolved automatically thanks to conflict avoidance, // especially if the cost increases. int numberOfConflictingAgents = this.conflictCounts.Count; int thatNumberOfConflictingAgents = that.conflictCounts.Count; if (numberOfConflictingAgents < thatNumberOfConflictingAgents) { return(-1); } if (numberOfConflictingAgents > thatNumberOfConflictingAgents) { return(1); } } // Prefer nodes with fewer conflicts - the probability that some of them are cardinal is lower if (this.sumConflictCounts < that.sumConflictCounts) { return(-1); } if (this.sumConflictCounts > that.sumConflictCounts) { return(1); } // //M-Star: prefer nodes with smaller collision sets: //if (this.collisionSets != null) // than M-Star is running //{ // // The collision sets change during collision set backpropagation and closed list hits. // // Backpropagation goes from a node's child to the node, so it's tempting to think // // it only happens when the node is already expanded and out of the open list, // // but partial expansion makes that false. // // Closed list hits can also happen while the node is waiting to be expanded. // // So the max rank can change while the node is in the open list - // // it can't be used for tie breaking :(. // if (this.collisionSets.maxRank < that.collisionSets.maxRank) // return -1; // if (that.collisionSets.maxRank > this.collisionSets.maxRank) // return 1; //} // f, collision sets, conflicts and internal conflicts being equal, prefer nodes with a larger g // - they're closer to the goal so less nodes would probably be generated by them on the way to it. if (this.g < that.g) { return(1); } if (this.g > that.g) { return(-1); } return(0); }
public override void Expand(WorldState nodeP) { var node = (WorldStateForPartialExpansion)nodeP; bool wasAlreadyExpanded = true; if (node.IsAlreadyExpanded() == false) { node.calcSingleAgentDeltaFs(instance, this.IsValid); expandedFullStates++; node.alreadyExpanded = true; wasAlreadyExpanded = false; //node.hBonus = 0; // Locking any hbonus that doesn't come from partial expansion node.targetDeltaF = 0; // Assuming a consistent heuristic (as done in the paper), the min delta F is zero. node.remainingDeltaF = node.targetDeltaF; // Just for the following hasChildrenForCurrentDeltaF call. while (node.hasMoreChildren() && node.hasChildrenForCurrentDeltaF() == false) // DeltaF=0 may not be possible if all agents have obstacles between their location and the goal { node.targetDeltaF++; node.remainingDeltaF = node.targetDeltaF; } if (node.hasMoreChildren() == false) // Node has no possible children at all { node.Clear(); return; } } // If this node was already expanded, notice its h was updated, so the deltaF refers to its original H do { if (debug) { Debug.WriteLine($"Generating children with target deltaF={node.targetDeltaF}. Actual delta F may be lower because the parent node may have various H boosts."); } base.Expand(node); // base.Expand computes every child's h value from scratch, even though we could compute it incrementally from intermediate nodes if (node.IsAlreadyExpanded() == false) { // Node was cleared during expansion. // It's unnecessary and unsafe to continue to prepare it for the next partial expansion. return; // TODO: Is there a prettier way to do this? } //if (wasAlreadyExpanded) //{ // // Only doing it after expansion so that the children get the higher h // node.h -= node.targetDeltaF; // This way we retain any BPMX or other h boosts, allowing the new targetDeltaF to fully add to the base h // node.hBonus -= node.targetDeltaF; //} // FIXME: Why is this commented out? It was the only use of wasAlreadyExpanded, so if // removing the above is correct, also remove wasAlreadyExpanded. // This node's target delta F was exhausted - increment it until a target delta F with actual children is found do { node.targetDeltaF++; node.remainingDeltaF = node.targetDeltaF; // Just for the following hasChildrenForCurrentDeltaF call. } while (node.hasMoreChildren() && node.hasChildrenForCurrentDeltaF() == false); } while (node.hasMoreChildren() && node.g + node.sic + node.targetDeltaF <= node.minGoalCost); // Generate more children immediately if we have a lower bound on the solution depth if (node.hasMoreChildren() && node.hasChildrenForCurrentDeltaF() && node.g + node.sic + node.targetDeltaF <= this.maxSolutionCost) { // Assuming the heuristic used doesn't give a lower estimate than SIC for each and every one of the node's children, // (an ok assumption since SIC is quite basic, no heuristic we use is ever worse than it) // then the current target deltaF is really exhausted, since the deltaG is always correct, // and the deltaH predicted by SIC is less than or equal to the finalDeltaH. // So if the heuristic gives the same estimate as SIC for this node // (and that mainly happens when SIC happens to give a perfect estimate), // we can increment the node's f to g+SIC+targetDeltaH // Re-insert node into open list openList.Add(node); if (this.debug) { Debug.WriteLine($"Re-inserting node {node.generated} into the open list (with targetDeltaF: {node.targetDeltaF})"); Debug.WriteLine(""); } } else { node.Clear(); //TODO: Think about this.surplusNodesAvoided. Can it be correctly incremented? } }
/// <summary> /// Builds the pattern database, storing the heuristic table in memory. /// </summary> public override void build() { // Create the subproblem pertaining to this additive pattern // database. We do this by taking the problem instance and swapping // the initial state with the goal. We will also save a copy of our // agents data structure, because during the building process // our state already is a projection. WorldState goal = new WorldState(problem.agents, agentsToConsider); foreach (AgentState ags in goal.allAgentsState) { ags.SwapCurrentWithGoal(); } List <uint> vBackup = agentsToConsider; agentsToConsider = new List <uint>(goal.allAgentsState.Length); for (uint i = 0; i < goal.allAgentsState.Length; ++i) { agentsToConsider.Add(i); } // Initialize variables and insert the root node into our queue. We // use Byte.MaxValue to symbolize that an entry in our heuristic // table is uninitialized. The first time that we initialize an // entry in the table is also the first time that we encounter the // particular state, which is also the shortest path to that state // because we are conducting an uninformed breadth-first search. table = new Byte[permutations[0] * (problem.numLocations + 1)]; for (int i = 0; i < table.Length; ++i) { table[i] = Byte.MaxValue; } Context c = new Context(); c.Initialize("q1.tmp", "q2.tmp"); table[hash(goal)] = 0; c.Write(goal); c.NextLevel(); while (c.nodes > 0) { for (ulong n = 0; n < c.nodes; ++n) { // Get the next node, generate its children and write the // children to the next queue file. I had previously // thought that since we are doing an uninformed breadth- // first search, the first time we generate a node we would // also have the shortest path to that node. Unfortunately, // for this particular domain, this is not true. List <WorldState> vChildren = new List <WorldState>(); WorldState tws = (WorldState)c.binaryFormatter.Deserialize(c.fsQueue); UInt32 nHashParent = hash(tws); this.Expand(tws, vChildren); foreach (WorldState i in vChildren) { UInt32 nHash = hash(i); // We store only the difference in heuristic value // between the single agent shortest path heuristic and // our pattern database heuristic. The hope is that the // resulting value will always fit within the minimum // and maximum ranges of a single byte. Byte nCandidateValue; if (offsetFromSingleShortestPath) { int nSingleAgentShortestPath = 0; foreach (var a in i.allAgentsState) { nSingleAgentShortestPath += this.problem.GetSingleAgentOptimalCost(a); } int nDifference = i.g - nSingleAgentShortestPath; Trace.Assert(nDifference >= 0); Trace.Assert(nDifference < Byte.MaxValue); nCandidateValue = (Byte)nDifference; } else { Trace.Assert(i.g < Byte.MaxValue); nCandidateValue = (Byte)i.g; } if (nCandidateValue < table[nHash]) { c.Write(i); table[nHash] = nCandidateValue; } } } c.NextLevel(); } c.Clear(); agentsToConsider = vBackup; }
public void Write(WorldState tws) { binaryFormatter.Serialize(fsNext, tws); nextNodesCount++; }