public MCTSSolver(Domain dom, Heuristic h) { this.dom = dom; this.h = h; this.root = TreeNode.createRoot(dom.initialState); TreeNode.dom = dom; TreeNode.solver = this; //TODO only 1 MCTS solver may run at the same time!! bestPlan = null; bestValue = int.MaxValue; //this.perfoHeuristic = new BestPerformancePolicy(dom); this.maxSimulationLength = 50; //Heuristic simulationHeuristic = new WeightedHeuristic(new NotAccomplishedGoalCount(dom), 10); Heuristic simulationHeuristic = new WeightedHeuristic(new FFHeuristic(dom), 10); //this.simulationPolicy = new RandomSimulationPolicy(dom, maxSimulationLength); //this.simulationPolicy = new HeuristicGreedySearch(new NotAccomplishedGoalCount(dom), dom, maxSimulationLength); //this.simulationPolicy = new HeuristicGreedySearch(new FFHeuristic(dom), dom, maxSimulationLength); //this.simulationPolicy = new AStarSimulationPolicy(simulationHeuristic, dom, maxSimulationLength); //this.simulationPolicy = new F_LimitedAStarSimulationPolicy(simulationHeuristic, dom, maxSimulationLength, 2 * maxSimulationLength); //this.simulationPolicy = new BeamSearchPolicy(simulationHeuristic, dom, 2); CompositeSimulationPolicy policy = new CompositeSimulationPolicy(); policy.addPolicy(new RandomSimulationPolicy(dom, maxSimulationLength)); policy.addPolicy(new HeuristicGreedySearch(new FFHeuristic(dom), dom, maxSimulationLength)); policy.addPolicy(new AStarSimulationPolicy(simulationHeuristic, dom, maxSimulationLength)); policy.addPolicy(new BeamSearchPolicy(simulationHeuristic, dom, 2)); this.simulationPolicy = policy; this.ev = new HeuristicPlanEvaluator(dom, h, maxSimulationLength); }
public Plan(Domain d, PlanEvaluator ev) { this.actions = new List <int>(); this.dom = d; this.ev = ev; }