public void TC_WeightedHeuristic() { var sasProblem = new Planner.SAS.Problem(new SASInputData(GetFilePath("TC_Gripper.sas"))); var pddlProblem = new Planner.PDDL.Problem(new PDDLInputData(GetFilePath("TC_Gripper_D.pddl"), GetFilePath("TC_Gripper_P.pddl"))); var heuristic = new WeightedHeuristic(new StripsHeuristic(sasProblem), 3); Assert.AreEqual(12, heuristic.GetValue(sasProblem.GetInitialState())); Assert.AreEqual(12, heuristic.GetValue(new Planner.SAS.State(0, 0, 0, 0, 0, 0, 0))); Assert.AreEqual(6, heuristic.GetValue(new Planner.SAS.State(0, 1, 1, 0, 0, 0, 0))); Assert.AreEqual(0, heuristic.GetValue(new Planner.SAS.State(1, 1, 1, 1, 0, 0, 0))); Assert.AreEqual(12, heuristic.GetValue(sasProblem.GetGoalConditions())); Assert.AreEqual(12, heuristic.GetValue(sasProblem.GetGoalConditions().GetCorrespondingRelativeStates(sasProblem).First())); Assert.AreEqual("Weighted STRIPS Heuristic (weight = 3)", heuristic.GetName()); Assert.AreEqual(6, heuristic.GetCallsCount()); var heuristic2 = new WeightedHeuristic(new StripsHeuristic(pddlProblem), 9); Assert.AreEqual(18, heuristic2.GetValue(pddlProblem.GetInitialState())); Assert.AreEqual(18, heuristic2.GetValue(pddlProblem.GetGoalConditions())); Assert.AreEqual(18, heuristic2.GetValue(pddlProblem.GetGoalConditions().GetCorrespondingRelativeStates(pddlProblem).First())); Assert.AreEqual("Weighted STRIPS Heuristic (weight = 9)", heuristic2.GetName()); Assert.AreEqual(3, heuristic2.GetCallsCount()); }
public MCTSSolver(Domain dom, Heuristic h) { this.dom = dom; this.h = h; this.root = TreeNode.createRoot(dom.initialState); TreeNode.dom = dom; TreeNode.solver = this; //TODO only 1 MCTS solver may run at the same time!! bestPlan = null; bestValue = int.MaxValue; //this.perfoHeuristic = new BestPerformancePolicy(dom); this.maxSimulationLength = 50; //Heuristic simulationHeuristic = new WeightedHeuristic(new NotAccomplishedGoalCount(dom), 10); Heuristic simulationHeuristic = new WeightedHeuristic(new FFHeuristic(dom), 10); //this.simulationPolicy = new RandomSimulationPolicy(dom, maxSimulationLength); //this.simulationPolicy = new HeuristicGreedySearch(new NotAccomplishedGoalCount(dom), dom, maxSimulationLength); //this.simulationPolicy = new HeuristicGreedySearch(new FFHeuristic(dom), dom, maxSimulationLength); //this.simulationPolicy = new AStarSimulationPolicy(simulationHeuristic, dom, maxSimulationLength); //this.simulationPolicy = new F_LimitedAStarSimulationPolicy(simulationHeuristic, dom, maxSimulationLength, 2 * maxSimulationLength); //this.simulationPolicy = new BeamSearchPolicy(simulationHeuristic, dom, 2); CompositeSimulationPolicy policy = new CompositeSimulationPolicy(); policy.addPolicy(new RandomSimulationPolicy(dom, maxSimulationLength)); policy.addPolicy(new HeuristicGreedySearch(new FFHeuristic(dom), dom, maxSimulationLength)); policy.addPolicy(new AStarSimulationPolicy(simulationHeuristic, dom, maxSimulationLength)); policy.addPolicy(new BeamSearchPolicy(simulationHeuristic, dom, 2)); this.simulationPolicy = policy; this.ev = new HeuristicPlanEvaluator(dom, h, maxSimulationLength); }