/// <summary>
        ///
        /// Assumes g of node was already calculated!
        /// </summary>
        /// <param name="s"></param>
        /// <returns></returns>
        public uint h(WorldState s)
        {
            int sicEstimate = (int)SumIndividualCosts.h(s, this.instance);

            if (sicEstimate == 0)
            {
                return(0);
            }
            int targetCost = s.g + sicEstimate + this.minAboveSic; // Ariel's idea - using SIC directly here to calc the target

            // CBS gets an explicitly partially solved state - the agents' g may be greater than zero.
            // So the cost CBS is going to calc is not of this node but of the initial problem instance,
            // this is accounted for later too.
            // (Notice node usually has a (possibly very wrong) h set already - copied from the parent)
            return(this.h(s, targetCost, sicEstimate));
        }
        /// <summary>
        /// Computes a heuristic by running a bounded CBS search from the given node.
        /// Assumes g of node was already calculated and h isn't zero.
        /// </summary>
        /// <param name="s"></param>
        /// <param name="targetCost">Stop when the target cost is reached</param>
        /// <param name="sicEstimate">For a debug assertion.</param>
        /// <param name="lowLevelGeneratedCap">The number of low level nodes to generated</param>
        /// <param name="milliCap">The process total millisecond count to stop at</param>
        /// <param name="resume">Whether to resume the last search instead of solving the given node. Assumes the last search was from the same node as the given node.</param>
        /// <returns></returns>
        protected uint h(WorldState s, int targetCost, int sicEstimate = -1, int lowLevelGeneratedCap = -1, int milliCap = int.MaxValue, bool resume = false)
        {
            double start = this.runner.ElapsedMilliseconds();

            ProblemInstance sAsProblemInstance;

            if (resume == false)
            {
                this.cbs.Clear();
                sAsProblemInstance = s.ToProblemInstance(this.instance);
                this.cbs.Setup(sAsProblemInstance,
                               Math.Max(s.makespan,  // This forces must constraints to be upheld when dealing with A*+OD nodes,
                                                     // at the cost of forcing every agent to move when a goal could be found earlier with all must constraints upheld.
                                        s.minDepth), // No point in finding shallower goal nodes
                               this.runner);

                if (this.cbs.openList.Count > 0 && this.cbs.topMost)
                {
                    if (sicEstimate == -1)
                    {
                        sicEstimate = (int)SumIndividualCosts.h(s, this.instance);
                    }
                    Debug.Assert(((CbsNode)this.cbs.openList.Peek()).totalCost - s.g == (int)sicEstimate,
                                 "Total cost of CBS root not same as SIC + g");
                    // Notice we're substracting s.g, not sAsProblemInstance.g.
                    // Must constraints we put may have forced some moves,
                    // and we shouldn't count them as part of the estimate.
                }
            }
            else
            {
                sAsProblemInstance = this.cbs.GetProblemInstance();
            }

            if (lowLevelGeneratedCap == -1)
            {
                // Rough estimate of the branching factor:
                lowLevelGeneratedCap = (int)Math.Pow(Constants.NUM_ALLOWED_DIRECTIONS, this.instance.m_vAgents.Length);
            }

            // Calc the h:
            this.cbs.targetCost           = targetCost;
            this.cbs.milliCap             = milliCap;
            this.cbs.lowLevelGeneratedCap = lowLevelGeneratedCap;

            bool solved = this.cbs.Solve();

            if (solved && this.reportSolution)
            {
                // We're always going to find a proper goal since we respected the node's minDepth
                s.SetSolution(this.cbs.GetSinglePlans());
                s.SetGoalCost(this.cbs.totalCost); // We have to do it explicitly.
                // We can't just change the node's g to g + cbs.totalCost and its h to zero
                // because approaches like BPMX or maximazing PDBs might "fix" the h back.
                // So instead h is bumped to its maximum value when this method returns.
                s.SetSingleCosts(this.cbs.GetSingleCosts());
                this.nodesSolved++;
            }

            double end = this.runner.ElapsedMilliseconds();

            this.totalRuntime += end - start;
            this.nCalls++;

            this.cbs.AccumulateStatistics();
            this.cbs.ClearStatistics();

            if (this.cbs.totalCost < 0) // A timeout is legitimately possible if very little time was left to begin with,
                                        // and a no solution failure may theoretically be possible too.
            {
                return(this.cbs.GetHeuristic().h(s));
            }

            Debug.Assert(this.cbs.totalCost >= s.g, "CBS total cost " + this.cbs.totalCost + " is smaller than starting problem's initial cost " + s.g + "."); // = is allowed since even though this isn't a goal node (otherwise this function won't be called),
                                                                                                                                                               // a non-goal node can have h==0 if a minimum depth is specified, and all agents have reached their
                                                                                                                                                               // goal in this node, but the depth isn't large enough.

            uint cbsEstimate = (uint)(this.cbs.totalCost - s.g);

            // Discounting the moves the agents did before we started solving
            // (This is easier than making a copy of each AgentState just to zero its lastMove.time)

            this.totalImprovement += (int)(cbsEstimate - s.h); // Not computing difference from SIC to not over-count, since a node can be improved twice.
                                                               // Can be negative if the base heuristic was improved by:
                                                               // - Partial expansion
                                                               // - BPMX

            if (validate)
            {
                // Brute-force validation of admissability of estimate:
                var sic = new SumIndividualCosts();
                sic.init(this.instance, this.vAgents);
                var epeastarsic = new AStarWithPartialExpansion(sic);
                epeastarsic.Setup(sAsProblemInstance, s.makespan, runner);
                bool epeastarsicSolved = epeastarsic.Solve();
                if (epeastarsicSolved)
                {
                    Debug.Assert(epeastarsic.totalCost - s.g >= this.cbs.totalCost - s.g, "Inadmissable!!");
                }
            }

            return(cbsEstimate);
        }
Exemplo n.º 3
0
        public override void Expand(WorldState nodeP)
        {
            var node = (WorldStateForPartialExpansion)nodeP;

            if (node.isAlreadyExpanded() == false)
            {
                node.calcSingleAgentDeltaFs(instance, this.IsValid);
                expandedFullStates++;
                node.alreadyExpanded = true;
                node.targetDeltaF    = 0;                                                     // Assuming a consistent heuristic (as done in the paper), the min delta F is zero.
                node.remainingDeltaF = node.targetDeltaF;                                     // Just for the hasChildrenForCurrentDeltaF call.
                while (node.hasMoreChildren() && node.hasChildrenForCurrentDeltaF() == false) // DeltaF=0 may not be possible if all agents have obstacles between their location and the goal
                {
                    node.targetDeltaF++;
                    node.remainingDeltaF = node.targetDeltaF;
                }
                if (node.hasMoreChildren() == false) // Node has no possible children at all
                {
                    node.Clear();
                    return;
                }
            }
            //Debug.Print("Expanding node " + node);

            // If this node was already expanded, notice its h was updated, so the deltaF refers to its original H

            base.Expand(node);

            node.targetDeltaF++; // This delta F was exhausted
            node.remainingDeltaF = node.targetDeltaF;

            while (node.hasMoreChildren() && node.hasChildrenForCurrentDeltaF() == false)
            {
                node.targetDeltaF++;
                node.remainingDeltaF = node.targetDeltaF;
            }

            if (node.hasMoreChildren() && node.hasChildrenForCurrentDeltaF() && node.h + node.g + node.targetDeltaF <= this.maxCost)
            {
                // Increment H before re-insertion into open list
                int sicEstimate = (int)SumIndividualCosts.h(node, this.instance); // Re-compute even if the heuristic used is SIC since this may be a second expansion
                if (node.h < sicEstimate + node.targetDeltaF)
                {
                    // Assuming the heuristic used doesn't give a lower estimate than SIC for each and every one of the node's children,
                    // (an ok assumption since SIC is quite basic, no heuristic we use is ever worse than it)
                    // then the current target deltaF is really exhausted, since the deltaG is always correct,
                    // and the deltaH predicted by SIC is less than or equal to the finalDeltaH.
                    // So if the heuristic gives the same estimate as SIC for this node
                    // (and that mainly happens when SIC happens to give a perfect estimate),
                    // we can increment the node's h to SIC+targetDeltaH
                    node.h = sicEstimate + node.targetDeltaF;
                }

                // Re-insert node into open list
                openList.Add(node);
            }
            else
            {
                node.Clear();
            }
        }