예제 #1
0
        public IActionResult Hookes()
        {
            SolvedModel model    = new SolvedModel();
            int         nNumVars = 2;

            double[] fX = new double[] { -12, 30 };
            model.Start = new List <double>(fX);
            double[]     fParam       = new double[] { 0, 0 };
            double[]     fStepSize    = new double[] { 0.1, 0.1 };
            double[]     fMinStepSize = new double[] { 0.0000001, 0.0000001 };
            int          nIter        = 0;
            double       fEpsFx       = 0.001;
            int          i;
            object       fBestF;
            Hookes       oOpt;
            MyFxDelegate MyFx = new MyFxDelegate(Fx3);

            oOpt           = new Hookes();
            model.Tolerace = fEpsFx;

            Console.WriteLine("******** FINAL RESULTS *************");
            fBestF = oOpt.CalcOptim(nNumVars, ref fX, ref fParam, ref fStepSize, ref fMinStepSize, fEpsFx, ref nIter, MyFx);
            Console.WriteLine("Optimum at");
            for (i = 0; i < nNumVars; i++)
            {
                model.X.Add(fX[i]);
            }
            model.Function   = (double)fBestF;
            model.Iterations = nIter;

            return(View(model));
        }
예제 #2
0
        public IActionResult Fletcher()
        {
            SolvedModel model    = new SolvedModel();
            int         nNumVars = 2;

            double[] fX = new double[] { 0, 0 };
            model.Start = new List <double>(fX);
            double[]     fParam   = new double[] { 0, 0 };
            int          nIter    = 0;     // contador de iteraciones
            int          nMaxIter = 100;   // limite de iteraciones
            double       fEpsFx   = 0.001; // Toleracion
            int          i;
            double       fBestF;           // Maximo
            string       sErrorMsg = "";
            Fletcher     oOpt;
            MyFxDelegate MyFx = new MyFxDelegate(Fx3);

            oOpt            = new Fletcher();
            model.Tolerace  = fEpsFx;
            model.MaxCycles = nMaxIter;
            fBestF          = oOpt.CalcOptim(nNumVars, ref fX, ref fParam, fEpsFx, nMaxIter, ref nIter, ref sErrorMsg, MyFx);
            if (sErrorMsg.Length > 0)
            {
                model.ErrorMsg = sErrorMsg;
            }
            for (i = 0; i < nNumVars; i++)
            {
                model.X.Add(fX[i]);
            }
            model.Function   = fBestF;
            model.Iterations = nIter;
            return(View(model));
        }
        public IActionResult Hookes()
        {
            int nNumVars = 2;

            double[]      fX           = new double[] { 0, 0 };
            double[]      fParam       = new double[] { 0, 0 };
            double[]      fStepSize    = new double[] { 0.1, 0.1 };
            double[]      fMinStepSize = new double[] { 0.0000001, 0.0000001 };
            int           nIter        = 0;
            double        fEpsFx       = 0.0000001;
            int           i;
            object        fBestF;
            string        sAnswer;
            Hookes        oOpt;
            MyFxDelegate  MyFx  = new MyFxDelegate(Fx3);
            SayFxDelegate SayFx = new SayFxDelegate(SayFx3);

            oOpt = new Hookes();

            Console.WriteLine("Hooke-Jeeves Search Optimization");
            Console.WriteLine("Finding the minimum of function:");
            Console.WriteLine(SayFx());
            Console.Write("Use default input values? (Y/N) ");
            sAnswer = Console.ReadLine();
            if (sAnswer.ToUpper() == "Y")
            {
                for (i = 0; i < nNumVars; i++)
                {
                    Console.WriteLine("X({0}) = {1}", i + 1, fX[i]);
                    Console.WriteLine("Step size({0}) = {1}", i + 1, fStepSize[i]);
                    Console.WriteLine("Min step Size ({0}) = {1}", i + 1, fMinStepSize[i]);
                }
                Console.WriteLine("Function tolerance = {0}", fEpsFx);
            }
            else
            {
                for (i = 0; i < nNumVars; i++)
                {
                    fX[i]           = GetIndexedDblInput("X", i + 1, fX[i]);
                    fStepSize[i]    = GetIndexedDblInput("Step size", i + 1, fStepSize[i]);
                    fMinStepSize[i] = GetIndexedDblInput("Min step size", i + 1, fMinStepSize[i]);
                }
                fEpsFx = GetDblInput("Function tolerance", fEpsFx);
            }

            Console.WriteLine("******** FINAL RESULTS *************");
            fBestF = oOpt.CalcOptim(nNumVars, ref fX, ref fParam, ref fStepSize, ref fMinStepSize, fEpsFx, ref nIter, MyFx);
            Console.WriteLine("Optimum at");
            for (i = 0; i < nNumVars; i++)
            {
                Console.WriteLine("X({0}) = {1}", i + 1, fX[i]);
            }
            Console.WriteLine("Function value = {0}", fBestF);
            Console.WriteLine("Number of iterations = {0}", nIter);
            Console.WriteLine();
            Console.Write("Press Enter to end the program ...");
            Console.ReadLine();

            return(View());
        }
예제 #4
0
        public double CalcOptim(int nNumVars, ref double[] fX, ref double[] fParam, ref double[] fStepSize, ref double[] fMinStepSize, double fEpsFx, ref int nIter, MyFxDelegate MyFx)
        {
            int i;

            double[] fXnew = new double[nNumVars];
            double[] fDeltaX = new double[nNumVars];
            double   F, fXX, fLambda, fBestF, fLastBestF;
            bool     bStop, bMadeAnyMove;

            bool[] bMoved = new bool[nNumVars];

            m_MyFx = MyFx;

            for (i = 0; i < nNumVars; i++)
            {
                fXnew[i] = fX[i];
            }
            // calculate function value at initial point
            fBestF     = MyFx(nNumVars, ref fXnew, ref fParam);
            fLastBestF = 100 * fBestF + 100;

            nIter = 1;
            do
            {
                nIter++;

                for (i = 0; i < nNumVars; i++)
                {
                    fX[i] = fXnew[i];
                }

                for (i = 0; i < nNumVars; i++)
                {
                    bMoved[i] = false;
                    do
                    {
                        fXX      = fXnew[i];
                        fXnew[i] = fXX + fStepSize[i];
                        F        = MyFx(nNumVars, ref fXnew, ref fParam);
                        if (F < fBestF)
                        {
                            fBestF    = F;
                            bMoved[i] = true;
                        }
                        else
                        {
                            fXnew[i] = fXX - fStepSize[i];
                            F        = MyFx(nNumVars, ref fXnew, ref fParam);
                            if (F < fBestF)
                            {
                                fBestF    = F;
                                bMoved[i] = true;
                            }
                            else
                            {
                                fXnew[i] = fXX;
                                break;
                            }
                        }
                    } while (true);
                }

                // moved in any direction?
                bMadeAnyMove = true;
                for (i = 0; i < nNumVars; i++)
                {
                    if (!bMoved[i])
                    {
                        bMadeAnyMove = false;
                        break;
                    }
                }

                if (bMadeAnyMove)
                {
                    for (i = 0; i < nNumVars; i++)
                    {
                        fDeltaX[i] = fXnew[i] - fX[i];
                    }

                    fLambda = 0;
                    if (LinSearch_DirectSearch(nNumVars, ref fX, ref fParam, ref fLambda, ref fDeltaX, 0.1, 0.0001))
                    {
                        for (i = 0; i < nNumVars; i++)
                        {
                            fXnew[i] = fX[i] + fLambda * fDeltaX[i];
                        }
                    }
                }

                fBestF = MyFx(nNumVars, ref fXnew, ref fParam);

                // reduce the step size for the dimensions that had no moves
                for (i = 0; i < nNumVars; i++)
                {
                    if (!bMoved[i])
                    {
                        fStepSize[i] /= 2;
                    }
                }

                // test function value convergence
                if (Math.Abs(fBestF - fLastBestF) < fEpsFx)
                {
                    break;
                }

                fLastBestF = fBestF;

                bStop = true;
                for (i = 0; i < nNumVars; i++)
                {
                    if (fStepSize[i] >= fMinStepSize[i])
                    {
                        bStop = false;
                        break;
                    }
                }
            } while (!bStop);

            for (i = 0; i < nNumVars; i++)
            {
                fX[i] = fXnew[i];
            }

            return(fBestF);
        }
예제 #5
0
        public double CalcOptim(int nNumVars, ref double[] fX, ref double[] fParam, double fEpsFx, int nMaxIter, ref int nIter, ref string sErrorMsg, MyFxDelegate MyFx)
        {
            int i;

            double[] fDeriv = new double[nNumVars];
            double[] fDerivOld = new double[nNumVars];
            double   F, fDFNormOld, fLambda, fLastF, fDFNorm = 0;

            m_MyFx = MyFx;

            // calculate and function value at initial point
            fLastF = MyFx(nNumVars, ref fX);

            GetGradients(nNumVars, ref fX, ref fParam, ref fDeriv, ref fDFNorm);

            fLambda = 0.1;
            if (LinSearch_DirectSearch(nNumVars, ref fX, ref fParam, ref fLambda, ref fDeriv, 0.1, 0.000001))
            {
                for (i = 0; i < nNumVars; i++)
                {
                    fX[i] += fLambda * fDeriv[i];
                }
            }
            else
            {
                sErrorMsg = "Failed linear search";
                return(fLastF);
            }

            nIter = 1;
            do
            {
                nIter++;
                if (nIter > nMaxIter)
                {
                    sErrorMsg = "Reached maximum iterations limit";
                    break;
                }
                fDFNormOld = fDFNorm;
                for (i = 0; i < nNumVars; i++)
                {
                    fDerivOld[i] = fDeriv[i]; // save old gradient
                }
                GetGradients(nNumVars, ref fX, ref fParam, ref fDeriv, ref fDFNorm);
                for (i = 0; i < nNumVars; i++)
                {
                    fDeriv[i] = Math.Pow((fDFNorm / fDFNormOld), 2) * fDerivOld[i] - fDeriv[i];
                }
                if (fDFNorm <= fEpsFx)
                {
                    sErrorMsg = "Gradient norm meets convergence criteria";
                    break;
                }
                //    For i = 0 To nNumVars - 1
                //      fDeriv(i) = -fDeriv(i) / fDFNorm
                //    Next i
                fLambda = 0;
                //    If LinSearch_Newton(fX, nNumVars, fLambda, fDeriv, 0.0001, 100) Then
                if (LinSearch_DirectSearch(nNumVars, ref fX, ref fParam, ref fLambda, ref fDeriv, 0.1, 0.000001))
                {
                    for (i = 0; i < nNumVars; i++)
                    {
                        fX[i] += fLambda * fDeriv[i];
                    }
                    F = MyFx(nNumVars, ref fX);
                    if (Math.Abs(F - fLastF) < fEpsFx)
                    {
                        sErrorMsg = "Successive function values meet convergence criteria";
                        break;
                    }
                    else
                    {
                        fLastF = F;
                    }
                }
                else
                {
                    sErrorMsg = "Failed linear search";
                    break;
                }
            } while (true);

            return(fLastF);
        }