void GeneratePlanStateRepresentation(string outputPath, ProblemDefinition problemDefinition)
        {
            var traits = problemDefinition.GetTraitsUsed()
                         .Append(PlannerAssetDatabase.TraitDefinitions.FirstOrDefault(t => t.name == nameof(PlanningAgent)))
                         .Distinct()
                         .Where(t => t != null)
                         .Select(t => new
            {
                name      = t.name,
                relations = t.Properties.Where(p =>
                                               p.Type == typeof(GameObject) || p.Type == typeof(Entity))
                            .Select(p => new { name = p.Name }),
                attributes = t.Properties.Where(p =>
                                                p.Type != typeof(GameObject) && p.Type != typeof(Entity) && GetRuntimePropertyType(p) != null)
                             .Select(p => new
                {
                    field_type = GetRuntimePropertyType(p),
                    field_name = p.Name
                })
            });

            int numTraits           = traits.Count();
            int traitsAlignmentSize = ((numTraits + 3) / 4) * 4;

            var result = m_CodeRenderer.RenderTemplate(PlannerResources.instance.TemplateStateRepresentation, new
            {
                @namespace     = $"{TypeHelper.StateRepresentationQualifier}.{problemDefinition.Name}",
                trait_list     = traits,
                num_traits     = numTraits,
                alignment_size = traitsAlignmentSize
            });

            SaveToFile(Path.Combine(outputPath, TypeHelper.StateRepresentationQualifier, problemDefinition.Name, "PlanStateRepresentation.cs"), result);
        }
        void GeneratePlanner(ProblemDefinition definition, string planName, string outputPath, bool includeEnums = false)
        {
            var customCumulativeRewardEstimator = definition.CustomCumulativeRewardEstimator;
            var rewardEstimatorTypeName         = string.IsNullOrEmpty(customCumulativeRewardEstimator) ? "DefaultCumulativeRewardEstimator" : $"global::{customCumulativeRewardEstimator}";

            var defaultCumulativeRewardEstimator = new
            {
                lower = definition.DefaultEstimateLower,
                avg   = definition.DefaultEstimateAverage,
                upper = definition.DefaultEstimateUpper,
            };

            var result = m_CodeRenderer.RenderTemplate(PlannerResources.instance.TemplatePlanExecutor, new
            {
                @namespace                     = $"{TypeHelper.PlansQualifier}.{planName}",
                plan_name                      = definition.Name,
                actions                        = definition.ActionDefinitions,
                traits                         = definition.GetTraitsUsed(),
                reward_estimator               = rewardEstimatorTypeName,
                default_reward_estimate        = defaultCumulativeRewardEstimator,
                terminations                   = definition.StateTerminationDefinitions.Where(t => t != null).Select(t => t.Name),
                include_enums                  = includeEnums,
                state_representation_qualifier = TypeHelper.StateRepresentationQualifier
            });

            SaveToFile(Path.Combine(outputPath, TypeHelper.PlansQualifier, planName, $"{definition.name}Executor.cs"), result);
        }
        bool IsPlanAssetValid(ProblemDefinition problemDefinition, Type[] customTypes = null)
        {
            bool planValid = true;

            // Check for duplicate actions
            List <string> declaredActions = new List <string>();

            foreach (var action in problemDefinition.ActionDefinitions)
            {
                if (declaredActions.Contains(action.Name))
                {
                    errorLogged?.Invoke($"{action.Name} is a duplicated action.", problemDefinition);
                    planValid = false;
                }
                else
                {
                    declaredActions.Add(action.Name);
                }
            }

            if (!string.IsNullOrEmpty(problemDefinition.CustomCumulativeRewardEstimator))
            {
                var rewardEstimatorType = customTypes?.FirstOrDefault(t => t.FullName == problemDefinition.CustomCumulativeRewardEstimator);
                if (rewardEstimatorType == null)
                {
                    errorLogged?.Invoke($"Couldn't resolve custom cumulative reward estimator type {problemDefinition.CustomCumulativeRewardEstimator}.", problemDefinition);
                    planValid = false;
                }
            }

            return(planValid);
        }
示例#4
0
    private void InstantiateProblemButtons()
    {
        foreach (var button in _questionButtons)
        {
            Destroy(button);
        }
        _questionButtons.Clear();

        for (var i = 0; i < _currentTopic.problems.Length; i++)
        {
            if (_currentTopic.problems[i].isActive != true)
            {
                continue;
            }

            var instantiatedButton = Instantiate(questionButtonPrefab, questionContentArea.transform);
            var button             = instantiatedButton.GetComponent <QuestionButton>();
            _questionButtons.Add(instantiatedButton);

            var i1 = i;
            button.questionButton.onClick.AddListener(() =>
            {
                _currentProblem = _currentTopic.problems[i1];
                SetupScreen(Screen.Explanation);
                var text = _currentProblem.longDescription;
                contentExplanation.text = text;
                _isPlacementPositioned  = false;
                OnProblemSelected?.Invoke(_currentProblem);
            });

            button.questionImage.sprite = _currentTopic.problems[i].sprite;
            button.titleText.text       = _currentTopic.problems[i].title;
        }
    }
示例#5
0
 public InformedStateSampler(ProblemDefinition probDefn, uint maxNumberCalls, SWIGTYPE_p_std__functionT_ompl__base__Cost_fF_t costFunc) : this(ompl_basePINVOKE.new_InformedStateSampler__SWIG_0(ProblemDefinition.getCPtr(probDefn), maxNumberCalls, SWIGTYPE_p_std__functionT_ompl__base__Cost_fF_t.getCPtr(costFunc)), true)
 {
     if (ompl_basePINVOKE.SWIGPendingException.Pending)
     {
         throw ompl_basePINVOKE.SWIGPendingException.Retrieve();
     }
 }
示例#6
0
 public InformedStateSampler(ProblemDefinition probDefn, SWIGTYPE_p_std__functionT_ompl__base__Cost_fF_t costFunc, SWIGTYPE_p_std__shared_ptrT_ompl__base__InformedSampler_t infSampler) : this(ompl_basePINVOKE.new_InformedStateSampler__SWIG_1(ProblemDefinition.getCPtr(probDefn), SWIGTYPE_p_std__functionT_ompl__base__Cost_fF_t.getCPtr(costFunc), SWIGTYPE_p_std__shared_ptrT_ompl__base__InformedSampler_t.getCPtr(infSampler)), true)
 {
     if (ompl_basePINVOKE.SWIGPendingException.Pending)
     {
         throw ompl_basePINVOKE.SWIGPendingException.Retrieve();
     }
 }
示例#7
0
        public virtual void Setup()
        {
            m_TraitDefinition = DynamicStruct.Create <TraitDefinition>();
            m_TraitDefinition.CreateProperty <int>("FieldA");
            SaveAsset(m_TraitDefinition, Path.Combine(k_TraitAssetsPath, "TraitA.asset"));

            m_EnumDefinition = ScriptableObject.CreateInstance <EnumDefinition>();
            m_EnumDefinition.CreateProperty <string>("ValueA");
            m_EnumDefinition.CreateProperty <string>("ValueB");
            m_EnumDefinition.CreateProperty <string>("ValueC");
            SaveAsset(m_EnumDefinition, Path.Combine(k_EnumAssetsPath, "EnumA.asset"));

            SetupTerminationDefinition("TerminationA.asset");

            SetupActionDefinition("ActionA.asset");

            m_ProblemDefinition = ScriptableObject.CreateInstance <ProblemDefinition>();
            m_ProblemDefinition.ActionDefinitions = new[]
            {
                m_ActionDefinition
            };
            m_ProblemDefinition.StateTerminationDefinitions = new[]
            {
                m_StateTerminationDefinition
            };

            SaveAsset(m_ProblemDefinition, Path.Combine(k_AssetsPath, "PlanA.asset"));

            PlannerAssetDatabase.Refresh(new [] { Path.Combine("Assets", "Temp") });
        }
示例#8
0
    private void OnProblemSelected(ProblemDefinition currentQuestion)
    {
        _problemCoroutine = currentQuestion;
        _currentProblem   = (currentQuestion.problem);
        _currentProblem.Process();

        OnAnswerValueChange?.Invoke(_currentProblem.Answer);


        _instantiatedModels = new GameObject[currentQuestion.models.Length];

        if (currentQuestion.environment != null)
        {
            _instantiatedEnvironment = Instantiate(currentQuestion.environment, transform);
        }

        if (OnUnitChange != null)
        {
            OnUnitChange.Invoke(currentQuestion.unit);
        }

        for (var i = 0; i < currentQuestion.models.Length; i++)
        {
            var model = currentQuestion.models[i];

            if (model != null)
            {
                _instantiatedModels[i] = Instantiate(model, transform);
            }
        }

        UpdatePosition(0);

        StartCoroutine(nameof(OnUpdateModelsDetails));
    }
示例#9
0
 public virtual void setProblemDefinition(ProblemDefinition pdef)
 {
     ompl_basePINVOKE.Planner_setProblemDefinition(swigCPtr, ProblemDefinition.getCPtr(pdef));
     if (ompl_basePINVOKE.SWIGPendingException.Pending)
     {
         throw ompl_basePINVOKE.SWIGPendingException.Retrieve();
     }
 }
        public bool use(ProblemDefinition pdef)
        {
            bool ret = ompl_basePINVOKE.PlannerInputStates_use__SWIG_0(swigCPtr, ProblemDefinition.getCPtr(pdef));

            if (ompl_basePINVOKE.SWIGPendingException.Pending)
            {
                throw ompl_basePINVOKE.SWIGPendingException.Retrieve();
            }
            return(ret);
        }
示例#11
0
        public ProblemDefinition c_clone()
        {
            global::System.IntPtr cPtr = ompl_basePINVOKE.ProblemDefinition_c_clone(swigCPtr);
            ProblemDefinition     ret  = (cPtr == global::System.IntPtr.Zero) ? null : new ProblemDefinition(cPtr, true);

            if (ompl_basePINVOKE.SWIGPendingException.Pending)
            {
                throw ompl_basePINVOKE.SWIGPendingException.Retrieve();
            }
            return(ret);
        }
示例#12
0
        public ProblemDefinition getProblemDefinition()
        {
            global::System.IntPtr cPtr = ompl_geometricPINVOKE.SimpleSetup_getProblemDefinition__SWIG_0(swigCPtr);
            ProblemDefinition     ret  = (cPtr == global::System.IntPtr.Zero) ? null : new ProblemDefinition(cPtr, true);

            if (ompl_geometricPINVOKE.SWIGPendingException.Pending)
            {
                throw ompl_geometricPINVOKE.SWIGPendingException.Retrieve();
            }
            return(ret);
        }
示例#13
0
        private static void makeAndSaveProblemDefinition()
        {
            var pd = new ProblemDefinition();

            /* Add a design space descriptor so that optimizatoin
             * methods for discrete variables can be used. Here we
             * make a very generous discretization, which amounts
             * to 2 million steps in each of the 2 design variables. */
            var dsd = new DesignSpaceDescription(2);

            for (var i = 0; i < 2; i++)
            {
                dsd[i] = new VariableDescriptor(-5000, 5000, 100.0);
            }
            pd.Add(dsd);

            /* Add three convergence criteria */
            pd.Add(new DeltaXConvergence(0.0001));
            pd.Add(new MaxAgeConvergence(100, 0.000000001));
            pd.Add(new MaxFnEvalsConvergence(50000));
            pd.Add(new MaxSpanInPopulationConvergence(1));

            /* setting the number of convergence criteria needed is not necessary
             * since we will be using the default value of 1. Interesting to un-
             * comment the next line and see how it affects the process. */
            //pd.NumConvergeCriteriaNeeded = 2;

            /* Add the objective function. */
            var objfn = new polynomialObjFn();

            objfn.Add("x1^2");
            objfn.Add("x2^2");
            objfn.Add("-2*x1");
            objfn.Add("-10*x2");
            objfn.Add("26");
            /* this is a simple parabola center at {1, 5} */
            pd.Add(objfn);

            var g1 = new polynomialInequality();

            g1.Add("-x1");
            g1.Add("x2"); /* this inequality translates to x2 - x1 < 0
                           * of simply x1 > x2. */
            pd.Add(g1);

            pd.Add(new double[] { 1500.0, 700.0 });
            var stream = new FileStream(filename, FileMode.Create);

            pd.SaveProbToXml(stream);
        }
        void GenerateSystemsProvider(ProblemDefinition definition, string planName, string outputPath)
        {
            var customCumulativeRewardEstimator = definition.CustomCumulativeRewardEstimator;
            var rewardEstimatorTypeName         = string.IsNullOrEmpty(customCumulativeRewardEstimator) ? "DefaultCumulativeRewardEstimator" : $"global::{customCumulativeRewardEstimator}";

            var result = m_CodeRenderer.RenderTemplate(PlannerResources.instance.TemplateSystemsProvider, new
            {
                @namespace = $"{TypeHelper.PlansQualifier}.{planName}",
                plan_name  = definition.Name,
                heuristic  = rewardEstimatorTypeName,
                state_representation_qualifier = TypeHelper.StateRepresentationQualifier
            });

            SaveToFile(Path.Combine(outputPath, TypeHelper.PlansQualifier, planName, "PlannerSystemsProvider.cs"), result);
        }
        public void Initialize(ProblemDefinition problemDefinition, string planningSimulationWorldName)
        {
            var world        = new World(planningSimulationWorldName);
            var stateManager = world.GetOrCreateSystem <StateManager>();

            world.GetOrCreateSystem <SimulationSystemGroup>().AddSystemToUpdateList(stateManager);
            var playerLoop = UnityEngine.LowLevel.PlayerLoop.GetCurrentPlayerLoop();

            ScriptBehaviourUpdateOrder.AddWorldToPlayerLoop(world, ref playerLoop);

            m_StateConverter = new PlannerStateConverter <TraitBasedObject, StateEntityKey, StateData, StateDataContext, StateManager>(problemDefinition, stateManager);

            m_Scheduler = new PlannerScheduler <StateEntityKey, ActionKey, StateManager, StateData, StateDataContext, ActionScheduler, DefaultCumulativeRewardEstimator, TerminationEvaluator, DestroyStatesJobScheduler>();
            m_Scheduler.Initialize(stateManager, new DefaultCumulativeRewardEstimator(), new TerminationEvaluator(), problemDefinition.DiscountFactor);

            m_Executor = new Match3PlanExecutor(stateManager, m_StateConverter);

            // Ensure planning jobs are not running when destroying the state manager
            stateManager.Destroying += () => m_Scheduler.CurrentJobHandle.Complete();
        }
        public async Task <ActionResult <Solution> > Post([FromBody] ProblemDefinition definition)
        {
            var problemConfiguration = _mapper.Map <ProblemConfiguration>(definition);

            var username = Request.HttpContext.User.Identity.Name;

            try
            {
                var problemSolution = _solverService.Solve(problemConfiguration);

                if (problemSolution.HasOptimalSolution)
                {
                    await _problemSolutionService.Save(problemSolution, username);
                }

                var res = _mapper.Map <Solution>(problemSolution);

                return(res);
            }
            catch (Exception e)
            {
                throw new Exception(e.Message);
            }
        }
        void GenerateActionScheduler(ProblemDefinition definition, string planName, string outputPath)
        {
            int maxArgs = 0;

            foreach (var action in definition.ActionDefinitions)
            {
                if (action != null)
                {
                    maxArgs = Math.Max(maxArgs, action.Parameters.Count());
                }
            }

            var result = m_CodeRenderer.RenderTemplate(PlannerResources.instance.TemplateActionScheduler, new
            {
                @namespace  = $"{TypeHelper.PlansQualifier}.{planName}",
                plan_name   = definition.Name,
                actions     = definition.ActionDefinitions,
                num_actions = definition.ActionDefinitions.Count(),
                num_args    = maxArgs,
                state_representation_qualifier = TypeHelper.StateRepresentationQualifier
            });

            SaveToFile(Path.Combine(outputPath, TypeHelper.PlansQualifier, planName, "ActionScheduler.cs"), result);
        }
 public override double[] Evaluate(Individual individual, IRandom random)
 {
     return(ProblemDefinition.Evaluate(individual, random));
 }
    private void FillList(ProblemDefinition currentProblem)
    {
        _inputList.Clear();
        switch (currentProblem.problem)
        {
        case DoubleMU problem:
            _inputList.Add(new InputData()
            {
                title = "Posição Inicial de A:", value = problem.initialPosition, unit = currentProblem.unit
            });
            _inputList.Add(new InputData()
            {
                title = "Velocidade de A:", value = problem.velocity, unit = currentProblem.velocityUnit
            });
            _inputList.Add(new InputData()
            {
                title = "Posição Inicial de B:", value = problem.initialPositionB, unit = currentProblem.unit
            });
            _inputList.Add(new InputData()
            {
                title = "Velocidade de B", value = problem.velocityB, unit = currentProblem.velocityUnit
            });
            break;

        case SimpleMU problem:
            _inputList.Add(new InputData()
            {
                title = "Posição inicial:", value = problem.initialPosition, unit = currentProblem.unit
            });
            _inputList.Add(new InputData()
            {
                title = "Velocidade:", value = problem.velocity, unit = currentProblem.velocityUnit
            });
            break;

        case SimpleMUV problem:
            _inputList.Add(new InputData()
            {
                title = "Posição inicial:", value = problem.initialPosition, unit = currentProblem.unit
            });
            _inputList.Add(new InputData()
            {
                title = "Velocidade inicial:", value = problem.initialVelocity, unit = currentProblem.velocityUnit
            });
            _inputList.Add(new InputData()
            {
                title = "Aceleração:", value = problem.acceleration, unit = currentProblem.velocityUnit + "²"
            });
            break;

        case SimpleOT problem:
            _inputList.Add(new InputData()
            {
                title = "Posição inicial X:", value = problem.initialPosition.x, unit = currentProblem.unit
            });
            _inputList.Add(new InputData()
            {
                title = "Posição inicial Y:", value = problem.initialPosition.y, unit = currentProblem.unit
            });
            _inputList.Add(new InputData()
            {
                title = "Velocidade Modular inicial:", value = problem.initialVelocity, unit = currentProblem.velocityUnit
            });
            _inputList.Add(new InputData()
            {
                title = "Ângulo de Lançamento:", value = problem.throwAngle, unit = "°"
            });
            _inputList.Add(new InputData()
            {
                title = "Aceleração da gravidade:", value = problem.gravity, unit = currentProblem.velocityUnit + "²"
            });
            break;
        }

        PrintList(_inputList);
    }
        public virtual SWIGTYPE_p_std__shared_ptrT_ompl__base__InformedSampler_t allocInformedStateSampler(ProblemDefinition probDefn, uint maxNumberCalls)
        {
            SWIGTYPE_p_std__shared_ptrT_ompl__base__InformedSampler_t ret = new SWIGTYPE_p_std__shared_ptrT_ompl__base__InformedSampler_t(ompl_basePINVOKE.OptimizationObjective_allocInformedStateSampler(swigCPtr, ProblemDefinition.getCPtr(probDefn), maxNumberCalls), true);

            if (ompl_basePINVOKE.SWIGPendingException.Pending)
            {
                throw ompl_basePINVOKE.SWIGPendingException.Retrieve();
            }
            return(ret);
        }
示例#21
0
        public static PlannerTerminationCondition exactSolnPlannerTerminationCondition(ProblemDefinition pdef)
        {
            PlannerTerminationCondition ret = new PlannerTerminationCondition(ompl_basePINVOKE.exactSolnPlannerTerminationCondition(ProblemDefinition.getCPtr(pdef)), true);

            if (ompl_basePINVOKE.SWIGPendingException.Pending)
            {
                throw ompl_basePINVOKE.SWIGPendingException.Retrieve();
            }
            return(ret);
        }
示例#22
0
 internal static global::System.Runtime.InteropServices.HandleRef getCPtr(ProblemDefinition obj)
 {
     return((obj == null) ? new global::System.Runtime.InteropServices.HandleRef(null, global::System.IntPtr.Zero) : obj.swigCPtr);
 }
 public override IEnumerable <Individual> GetNeighbors(Individual individual, IRandom random)
 {
     return(ProblemDefinition.GetNeighbors(individual, random));
 }
 public override void Analyze(Individual[] individuals, double[][] qualities, ResultCollection results, IRandom random)
 {
     ProblemDefinition.Analyze(individuals, qualities, results, random);
 }
示例#25
0
        private static void Main()
        {
            Parameters.Verbosity = VerbosityLevels.Normal;
            // this next line is to set the Debug statements from OOOT to the Console.
            Trace.Listeners.Add(new TextWriterTraceListener(Console.Out));

            /* In this example, we first present how the details of an optimzation
             * problem can be saved to an XML-file so that it can be read in
             * and solved as opposed to defining all the details in an imperative
             * (code line by code line) way. In the first function, the xml file
             * name "test1.xml" is created. */
            makeAndSaveProblemDefinition();

            /* now we create a series of different optimization methods and test
             * them on the problem. The problem is now opened from the file and
             * the details are stored in an object of class "Problem Definition".*/
            var stream = new FileStream(filename, FileMode.Open);

            double[] xStar;

            ProblemDefinition probTest1 = ProblemDefinition.OpenprobFromXml(stream);
            abstractOptMethod opty;

            /******************Exhaustive Search ***********************/
            //SearchIO.output("******************Exhaustive Search ***********************");
            //Console.ReadKey();
            //opty = new ExhaustiveSearch(probTest1.SpaceDescriptor, optimize.minimize);
            //opty.Add(probTest1);
            ///* No convergence criteria is needed as the process concludes when all
            // * states have been visited but for this problem that is 4 trillion states.*/
            //opty.ConvergenceMethods.Clear();
            ///* if you DID KNOW the best, you can include a criteria like...*/
            //opty.ConvergenceMethods.Add(new ToKnownBestXConvergence(new[] { 3.0, 3.0 }, 0.0000001));
            //var timer = Stopwatch.StartNew();
            //var fStar = opty.Run(out xStar);

            ///* you probably will never see this process complete. Even with the added
            // * convergence criteria (which is not factored into the estimated time of
            // * completion), you are probably looking at 1 to 2 years. */
            //printResults(opty, xStar, fStar, timer);

            /***********Gradient Based Optimization with Steepest Descent****************/
            //SearchIO.output("***********Gradient Based Optimization with Steepest Descent****************");
            //Console.ReadKey();
            //opty = new GradientBasedOptimization();
            //opty.Add(probTest1);
            //abstractSearchDirection searchDirMethod = new SteepestDescent();
            //opty.Add(searchDirMethod);
            ////abstractLineSearch lineSearchMethod = new ArithmeticMean(0.0001, 1, 100);
            ////abstractLineSearch lineSearchMethod = new DSCPowell(0.0001, 1, 100);
            //abstractLineSearch lineSearchMethod = new GoldenSection(0.0001, 1);
            //opty.Add(lineSearchMethod);
            //opty.Add(new squaredExteriorPenalty(opty, 10));
            ///* since this is not a population-based optimization method, we need to remove the MaxSpan criteria. */
            //opty.ConvergenceMethods.RemoveAll(a => a is MaxSpanInPopulationConvergence);

            //timer = Stopwatch.StartNew();
            //fStar = opty.Run(out xStar);
            //printResults(opty, xStar, fStar, timer);

            ///***********Gradient Based Optimization with Fletcher-Reeves****************/
            //SearchIO.output("***********Gradient Based Optimization with Fletcher-Reeves****************");
            //Console.ReadKey();
            ///* we don't need to reset (invoke the constructor) for GradientBasedOptimization since we are only
            // * change the seaach direction method. */
            //searchDirMethod = new FletcherReevesDirection();
            ///* you could also try the remaining 3 search direction methods. */
            ////searchDirMethod = new CyclicCoordinates();
            ////searchDirMethod = new BFGSDirection();
            ////searchDirMethod = new PowellMethod(0.001, 6);
            //opty.Add(searchDirMethod);

            //timer = Stopwatch.StartNew();
            //opty.ResetFunctionEvaluationDatabase();
            //fStar = opty.Run(out xStar);
            //printResults(opty, xStar, fStar, timer);
            ///******************Generalized Reduced Gradient***********************/
            //SearchIO.output("******************Generalized Reduced Gradient***********************");
            //Console.ReadKey();
            //opty = new GeneralizedReducedGradientActiveSet();
            //opty.Add(probTest1);
            //opty.Add(new squaredExteriorPenalty(opty, 10));
            //opty.ConvergenceMethods.RemoveAll(a => a is MaxSpanInPopulationConvergence);

            //timer = Stopwatch.StartNew();
            //fStar = opty.Run(out xStar);
            //printResults(opty, xStar, fStar, timer);

            /* GRG is the ONLY one here that handles constraints explicity. It find the
             * optimal very quickly and accurately. However, many of the other show a
             * better value of f*, this is because they are using an imperfect penalty
             * function (new squaredExteriorPenalty(opty, 10)). While it seems that GRG
             * includes it as well, it is only used in the the line search method. */


            /******************Random Hill Climbing ***********************/
            probTest1.SpaceDescriptor = new DesignSpaceDescription(new[] { new VariableDescriptor(-5000, 5000, 0.1),
                                                                           new VariableDescriptor(-5000, 5000, 0.1) });
            SearchIO.output("******************Random Hill Climbing ***********************");
            Console.ReadKey();
            opty = new HillClimbing();
            opty.Add(probTest1);
            opty.Add(new squaredExteriorPenalty(opty, 8));
            opty.Add(new RandomNeighborGenerator(probTest1.SpaceDescriptor));
            opty.Add(new KeepSingleBest(optimize.minimize));
            opty.ConvergenceMethods.RemoveAll(a => a is MaxSpanInPopulationConvergence);

            /* the deltaX convergence needs to be removed as well, since RHC will end many iterations
             * at the same point it started. */
            opty.ConvergenceMethods.RemoveAll(a => a is DeltaXConvergence);

            var timer = Stopwatch.StartNew();
            var fStar = opty.Run(out xStar);

            printResults(opty, xStar, fStar, timer);



            /******************Exhaustive Hill Climbing ***********************/
            SearchIO.output("******************Exhaustive Hill Climbing ***********************");
            Console.ReadKey();
            /* Everything else about the Random Hill Climbing stays the same. */
            opty.Add(new ExhaustiveNeighborGenerator(probTest1.SpaceDescriptor));

            timer = Stopwatch.StartNew();
            fStar = opty.Run(out xStar);
            printResults(opty, xStar, fStar, timer);



            /******************Simulated Annealing***********************/
            SearchIO.output("******************Simulated Annealing***********************");
            Console.ReadKey();
            opty = new SimulatedAnnealing(optimize.minimize);
            opty.Add(probTest1);
            opty.Add(new squaredExteriorPenalty(opty, 10));
            opty.Add(new RandomNeighborGenerator(probTest1.SpaceDescriptor, 100));
            opty.Add(new SACoolingSangiovanniVincentelli(100));
            opty.ConvergenceMethods.RemoveAll(a => a is MaxSpanInPopulationConvergence);

            /* the deltaX convergence needs to be removed as well, since RHC will end many iterations
             * at the same point it started. */
            opty.ConvergenceMethods.RemoveAll(a => a is DeltaXConvergence);


            timer = Stopwatch.StartNew();
            fStar = opty.Run(out xStar);
            printResults(opty, xStar, fStar, timer);
        }