public static DV FirstOrder_DivisionMethod(Func <DV, D> f, DV startPoint, double accuracy, out int calcsF, out int calcsGradient, out DV[] x, out double[] fx) { //Counters calcsF = 0; //Count how many times the objective function was used. calcsGradient = 0; //Count how many times the gradient was calculated. //Define our X vector int maxIterations = 10000; x = new DV[maxIterations]; fx = new double[maxIterations]; //Pick an initial guess for x int i = 0; x[0] = startPoint; fx[0] = f(x[0]); calcsF++; //Loop through gradient steps until min points are found, recompute gradient and repeat. double alpha = 1; while (true) { //Compute next step, using previous step i++; //Step 1 - Determine the gradient DV gradient = AD.Grad(f, x[i - 1]); calcsGradient++; //Step 2 - Division method, to compute the new x[i] and fx[i] DV xPrev = x[i - 1]; Func <D, D> objFAlpha = delegate(D a) { DV xNext = xPrev - (a * gradient); return(f(xNext)); }; alpha = alpha * 0.8; double beta = UnimodalMinimization.DivisionSearch(objFAlpha, fx[i - 1], alpha, out fx[i], ref calcsF); x[i] = x[i - 1] - (beta * gradient); //Step 3 - Check if accuracy has been met. If so, then end. double magGradient = Math.Sqrt(AD.Pow(gradient[0], 2) + AD.Pow(gradient[1], 2)); if (magGradient < accuracy) { break; } //DV dx = AD.Abs(x[i] - x[i - 1]); //if (((err[0] < accuracy) && (err[1] < accuracy))) // break; } //Return the minimization point. x = x.Take(i + 1).ToArray(); fx = fx.Take(i + 1).ToArray(); return(x[i]); }
public static DV FirstOrder_OneDimensionalMethod(Func <DV, D> f, DV startPoint, double accuracy, out int calcsF, out int calcsGradient, out DV[] x, out double[] fx) { //Counters calcsF = 0; //Count how many times the objective function was used. calcsGradient = 0; //Count how many times the gradient was calculated. //Define our X vector int maxIterations = 1000; x = new DV[maxIterations]; fx = new double[maxIterations]; //Pick an initial guess for x int i = 0; x[i] = startPoint; fx[i] = f(x[i]); calcsF++; //Loop through gradient steps until min points are found, recompute gradient and repeat. while (true) { //Compute next step, using previous step i++; //Return failed results if (double.IsNaN(x[i - 1][0]) || double.IsNaN(x[i - 1][1]) || (i == maxIterations)) { x = x.Take(i).ToArray(); fx = fx.Take(i).ToArray(); return(null); } //Step 1 - Determine the gradient DV gradient = 0 - AD.Grad(f, x[i - 1]); calcsGradient++; DV direction = gradient / Math.Sqrt(AD.Pow(gradient[0], 2) + AD.Pow(gradient[1], 2)); //Normalize Gradient //Step 2 - Build an objective function using the gradient. // This objective function moves downward in the direction of the gradient. // It uses golden ratio optimization to find the minimum point in this direction DV xPrev = x[i - 1]; Func <D, D> objFStep = delegate(D alpha) { DV xNew = xPrev + (alpha * direction); return(f(xNew)); }; var stepSearchResults = UnimodalMinimization.goldenRatioSearch(objFStep, 0, 1, accuracy); //alpha can only be between 0 and 1 double step = (stepSearchResults.a + stepSearchResults.b) / 2; //The step required to get to the bottom calcsF += stepSearchResults.CalculationsUntilAnswer; //The number of calculations of f that were required. //Step 3 - Move to the discovered minimum point x[i] = x[i - 1] + (step * direction); fx[i] = f(x[i]); calcsF++; //Step 4 - Check if accuracy has been met. If so, then end. double magGradient = Math.Sqrt(AD.Pow(gradient[0], 2) + AD.Pow(gradient[1], 2)); if (magGradient < accuracy) { break; } DV dx = AD.Abs(x[i] - x[i - 1]); if (((dx[0] < accuracy) && (dx[1] < accuracy))) { break; } } //Return the minimization point. x = x.Take(i + 1).ToArray(); fx = fx.Take(i + 1).ToArray(); return(x[i]); }
public static DV SecondOrder_DivisionMethod(Func <DV, D> f, DV startPoint, double accuracy, out int calcsF, out int calcsGradient, out int calcsHessian, out DV[] x, out double[] fx) { //Counters calcsF = 0; //Count how many times the objective function was used. calcsGradient = 0; //Count how many times the gradient was calculated. calcsHessian = 0; //Count how many times the second gradient was calculated. //Define our X vector int maxIterations = 10000; x = new DV[maxIterations]; fx = new double[maxIterations]; //Pick an initial guess for x int i = 0; x[i] = startPoint; fx[i] = f(x[i]); calcsF++; //Loop through gradient steps until zeros are found double alpha = 1; while (true) { //Compute next step, using previous step i++; //Step 1 - Determine the gradients DV gradient = AD.Grad(f, x[i - 1]); calcsGradient++; var hess = AD.Hessian(f, x[i - 1]); calcsHessian++; //Step 2 - Compute full step (alpha = 1). Loop through every entry in the DV and compute the step for each one. List <D> listSteps = new List <D>(); while (true) { try { int c = listSteps.Count; listSteps.Add(-gradient[c] / hess[c, c]); // first-gradient divided by second-gradient } catch { break; } } DV fullStep = new DV(listSteps.ToArray()); //Step 3 - Division method, to compute the new x[i] and fx[i] DV xPrev = x[i - 1]; Func <D, D> objFAlpha = delegate(D a) { DV xNext = xPrev + (a * fullStep); return(f(xNext)); }; alpha = alpha * 0.8; double beta = UnimodalMinimization.DivisionSearch(objFAlpha, fx[i - 1], alpha, out fx[i], ref calcsF); x[i] = x[i - 1] + (beta * fullStep); //Check if accuracy has been met double magGradient = Math.Sqrt(AD.Pow(gradient[0], 2) + AD.Pow(gradient[1], 2)); if (magGradient < accuracy) { break; } DV dx = AD.Abs(x[i] - x[i - 1]); if ((dx[0] < accuracy * 0.1) && (dx[1] < accuracy * 0.1)) { break; } } //Return the minimization point x = x.Take(i + 1).ToArray(); fx = fx.Take(i + 1).ToArray(); return(x[i]); }
static void Main(string[] args) { //Objective Function for testing //double[] epsValues = { 0.1, 0.01, 0.001 }; //accuracy //double a = -100, b = 100; //Func<D, D> f = delegate (D x) { // return AD.Pow(x - 7, 2); //}; //Objective function for class //f(x) = 10*x*ln(x)-x^2/2, x E[0.2,1] List <double> epsValues = new List <double> { 0.1, 0.01, 0.001 }; //accuracy double a = 0.2, b = 1; //range Func <D, D> f = delegate(D x) { double Fx = 10 * x * AD.Log(x) - AD.Pow(x, 2) / 2; return(Fx); }; #region Direct Uniform Search //Show the table header Console.WriteLine("-----Direct Uniform Search-----"); Console.WriteLine((new UMM.SearchResult()).getTableHeader()); // console //Loop through all combinations of accuracy and interval List <int> intervals = new List <int> { 6, 10, 15, 20, 25, 30, 35, 40, 45, 50 }; foreach (double eps in epsValues) { foreach (int n in intervals) { //Calculate results UMM.SearchResult d = UMM.directUniformSearch(f, a, b, n, eps); //Display results on console Console.WriteLine(d.getTabbedResults()); } } #endregion #region Dichotomy Search Console.WriteLine(); Console.WriteLine("-----Dichotomy Search-----"); Console.WriteLine((new UMM.SearchResult()).getTableHeader()); // console //Loop through all accuracy options foreach (double eps in epsValues) { //Calculate results UMM.SearchResult d = UMM.dichotomySearch(f, a, b, eps); //Display results on console Console.WriteLine(d.getTabbedResults()); } #endregion #region Golden Ration Search Console.WriteLine(); Console.WriteLine("-----Golden Ratio Search-----"); Console.WriteLine((new UMM.SearchResult()).getTableHeader()); // console //Loop through all accuracy options foreach (double eps in epsValues) { //Calculate results UMM.SearchResult d = UMM.goldenRatioSearch(f, a, b, eps); //Display results on console Console.WriteLine(d.getTabbedResults()); } #endregion //Wait for user to exit Console.ReadKey(); }