Exemplo n.º 1
0
        public static DV FirstOrder_DivisionMethod(Func <DV, D> f, DV startPoint, double accuracy, out int calcsF, out int calcsGradient, out DV[] x, out double[] fx)
        {
            //Counters
            calcsF        = 0; //Count how many times the objective function was used.
            calcsGradient = 0; //Count how many times the gradient was calculated.

            //Define our X vector
            int maxIterations = 10000;

            x  = new DV[maxIterations];
            fx = new double[maxIterations];

            //Pick an initial guess for x
            int i = 0;

            x[0]  = startPoint;
            fx[0] = f(x[0]); calcsF++;

            //Loop through gradient steps until min points are found, recompute gradient and repeat.
            double alpha = 1;

            while (true)
            {
                //Compute next step, using previous step
                i++;

                //Step 1 - Determine the gradient
                DV gradient = AD.Grad(f, x[i - 1]); calcsGradient++;

                //Step 2 - Division method, to compute the new x[i] and fx[i]
                DV          xPrev     = x[i - 1];
                Func <D, D> objFAlpha = delegate(D a)
                {
                    DV xNext = xPrev - (a * gradient);
                    return(f(xNext));
                };
                alpha = alpha * 0.8;
                double beta = UnimodalMinimization.DivisionSearch(objFAlpha, fx[i - 1], alpha, out fx[i], ref calcsF);
                x[i] = x[i - 1] - (beta * gradient);

                //Step 3 - Check if accuracy has been met. If so, then end.
                double magGradient = Math.Sqrt(AD.Pow(gradient[0], 2) + AD.Pow(gradient[1], 2));
                if (magGradient < accuracy)
                {
                    break;
                }
                //DV dx = AD.Abs(x[i] - x[i - 1]);
                //if (((err[0] < accuracy) && (err[1] < accuracy)))
                //    break;
            }

            //Return the minimization point.
            x  = x.Take(i + 1).ToArray();
            fx = fx.Take(i + 1).ToArray();
            return(x[i]);
        }
Exemplo n.º 2
0
        public static DV SecondOrder_DivisionMethod(Func <DV, D> f, DV startPoint, double accuracy, out int calcsF, out int calcsGradient, out int calcsHessian, out DV[] x, out double[] fx)
        {
            //Counters
            calcsF        = 0; //Count how many times the objective function was used.
            calcsGradient = 0; //Count how many times the gradient was calculated.
            calcsHessian  = 0; //Count how many times the second gradient was calculated.

            //Define our X vector
            int maxIterations = 10000;

            x  = new DV[maxIterations];
            fx = new double[maxIterations];

            //Pick an initial guess for x
            int i = 0;

            x[i]  = startPoint;
            fx[i] = f(x[i]); calcsF++;

            //Loop through gradient steps until zeros are found
            double alpha = 1;

            while (true)
            {
                //Compute next step, using previous step
                i++;

                //Step 1 - Determine the gradients
                DV  gradient = AD.Grad(f, x[i - 1]); calcsGradient++;
                var hess     = AD.Hessian(f, x[i - 1]); calcsHessian++;

                //Step 2 - Compute full step (alpha = 1). Loop through every entry in the DV and compute the step for each one.
                List <D> listSteps = new List <D>();
                while (true)
                {
                    try
                    {
                        int c = listSteps.Count;
                        listSteps.Add(-gradient[c] / hess[c, c]); // first-gradient divided by second-gradient
                    }
                    catch
                    { break; }
                }
                DV fullStep = new DV(listSteps.ToArray());

                //Step 3 - Division method, to compute the new x[i] and fx[i]
                DV          xPrev     = x[i - 1];
                Func <D, D> objFAlpha = delegate(D a)
                {
                    DV xNext = xPrev + (a * fullStep);
                    return(f(xNext));
                };
                alpha = alpha * 0.8;
                double beta = UnimodalMinimization.DivisionSearch(objFAlpha, fx[i - 1], alpha, out fx[i], ref calcsF);
                x[i] = x[i - 1] + (beta * fullStep);

                //Check if accuracy has been met
                double magGradient = Math.Sqrt(AD.Pow(gradient[0], 2) + AD.Pow(gradient[1], 2));
                if (magGradient < accuracy)
                {
                    break;
                }
                DV dx = AD.Abs(x[i] - x[i - 1]);
                if ((dx[0] < accuracy * 0.1) && (dx[1] < accuracy * 0.1))
                {
                    break;
                }
            }

            //Return the minimization point
            x  = x.Take(i + 1).ToArray();
            fx = fx.Take(i + 1).ToArray();
            return(x[i]);
        }