public static void quad(int n, Func <double[], int, double> f, ref double[] x, double t, double h, double[] v, ref double[] q0, ref double[] q1, ref int nl, ref int nf, double dmin, double ldt, ref double fx, ref double qf1, ref double qa, ref double qb, ref double qc, ref double qd0, ref double qd1) //****************************************************************************80 // // Purpose: // // QUAD seeks to minimize the scalar function F along a particular curve. // // Discussion: // // The minimizer to be sought is required to lie on a curve defined // by Q0, Q1 and X. // // Licensing: // // This code is distributed under the GNU LGPL license. // // Modified: // // 04 August 2016 // // Author: // // Original FORTRAN77 version by Richard Brent. // C++ version by John Burkardt. // // Reference: // // Richard Brent, // Algorithms for Minimization with Derivatives, // Prentice Hall, 1973, // Reprinted by Dover, 2002. // // Parameters: // // Input, int N, the number of variables. // // Input, double F ( double X[], int N ), the name of the function to // be minimized. // // Input/output, double X[N], ? // // Input, double T, ? // // Input, double H, ? // // Input, double V[N,N], the matrix of search directions. // // Input/output, double Q0[N], Q1[N], two auxiliary points used to define // a curve through X. // // Input/output, ref int NL, the number of linear searches. // // Input/output, ref int NF, the number of function evaluations. // // Input, double DMIN, an estimate for the smallest eigenvalue. // // Input, double LDT, the length of the step. // // Input/output, ref double FX, the value of F(X,N). // // Input/output, ref double QF1, &QA, &QB, &QC, &QD0, &QD1 ? // { int i; double s; double temp = fx; fx = qf1; qf1 = temp; for (i = 0; i < n; i++) { temp = x[i]; x[i] = q1[i]; q1[i] = temp; } qd1 = 0.0; for (i = 0; i < n; i++) { qd1 += (x[i] - q1[i]) * (x[i] - q1[i]); } qd1 = Math.Sqrt(qd1); if (qd0 <= 0.0 || qd1 <= 0.0 || nl < 3 * n * n) { fx = qf1; qa = 0.0; qb = 0.0; qc = 1.0; } else { const int jsearch = -1; const int nits = 2; s = 0.0; double l = qd1; double value = qf1; const bool fk = true; MINNY.minny(n, jsearch, nits, ref s, ref l, ref value, fk, f, x, t, h, v, q0, q1, ref nl, ref nf, dmin, ldt, ref fx, ref qa, ref qb, ref qc, ref qd0, ref qd1); qa = l * (l - qd1) / (qd0 + qd1) / qd0; qb = -(l + qd0) * (l - qd1) / qd1 / qd0; qc = (l + qd0) * l / qd1 / (qd0 + qd1); } qd0 = qd1; for (i = 0; i < n; i++) { s = q0[i]; q0[i] = x[i]; x[i] = qa * s + qb * x[i] + qc * q1[i]; } }
public static double praxis(double t0, double h0, int n, int prin, ref double[] x, Func <double[], int, double> f) //****************************************************************************80 // // Purpose: // // PRAXIS seeks an N-dimensional minimizer X of a scalar function F(X). // // Discussion: // // PRAXIS returns the minimum of the function F(X,N) of N variables // using the principal axis method. The gradient of the function is // not required. // // The approximating quadratic form is // // Q(x") = F(x,n) + (1/2) * (x"-x)" * A * (x"-x) // // where X is the best estimate of the minimum and // // A = inverse(V") * D * inverse(V) // // V(*,*) is the matrix of search directions; // D(*) is the array of second differences. // // If F(X) has continuous second derivatives near X0, then A will tend // to the hessian of F at X0 as X approaches X0. // // Licensing: // // This code is distributed under the GNU LGPL license. // // Modified: // // 04 August 2016 // // Author: // // Original FORTRAN77 version by Richard Brent. // C++ version by John Burkardt. // // Reference: // // Richard Brent, // Algorithms for Minimization with Derivatives, // Prentice Hall, 1973, // Reprinted by Dover, 2002. // // Parameters: // // Input, double T0, is a tolerance. PRAXIS attempts to return // praxis = f(x) such that if X0 is the true local minimum near X, then // norm ( x - x0 ) < T0 + Math.Sqrt ( EPSILON ( X ) ) * norm ( X ), // where EPSILON ( X ) is the machine precision for X. // // Input, double H0, is the maximum step size. H0 should be // set to about the maximum distance from the initial guess to the minimum. // If H0 is set too large or too small, the initial rate of // convergence may be slow. // // Input, int N, the number of variables. // // Input, int PRIN, controls printing intermediate results. // 0, nothing is printed. // 1, F is printed after every n+1 or n+2 linear minimizations. // final X is printed, but intermediate X is printed only // if N is at most 4. // 2, the scale factors and the principal values of the approximating // quadratic form are also printed. // 3, X is also printed after every few linear minimizations. // 4, the principal vectors of the approximating quadratic form are // also printed. // // Input/output, double X[N], is an array containing on entry a // guess of the point of minimum, on return the estimated point of minimum. // // Input, double F ( double X[], int N ), is the name of the function to be // minimized. // // Output, double PRAXIS, the function value at the minimizer. // // Local parameters: // // Local, double DMIN, an estimate for the smallest eigenvalue. // // Local, double FX, the value of F(X,N). // // Local, bool ILLC, is TRUE if the system is ill-conditioned. // // Local, double LDT, the length of the step. // // Local, int NF, the number of function evaluations. // // Local, int NL, the number of linear searches. // { int i; int j; // // Allocation. // double[] d = new double[n]; double[] q0 = new double[n]; double[] q1 = new double[n]; double[] v = new double[n * n]; double[] y = new double[n]; double[] z = new double[n]; // // Initialization. // double machep = typeMethods.r8_epsilon(); double small = machep * machep; double vsmall = small * small; double large = 1.0 / small; double vlarge = 1.0 / vsmall; double m2 = Math.Sqrt(machep); double m4; int seed = 123456789; // // Heuristic numbers: // // If the axes may be badly scaled (which is to be avoided if // possible), then set SCBD = 10. Otherwise set SCBD = 1. // // If the problem is known to be ill-conditioned, initialize ILLC = true. // // KTM is the number of iterations without improvement before the // algorithm terminates. KTM = 4 is very cautious; usually KTM = 1 // is satisfactory. // const double scbd = 1.0; bool illc = false; const int ktm = 1; double ldfac = illc switch { true => 0.1, _ => 0.01 }; int kt = 0; int nl = 0; int nf = 1; double fx = f(x, n); double qf1 = fx; double t = small + Math.Abs(t0); double t2 = t; double dmin = small; double h = h0; h = Math.Max(h, 100.0 * t); double ldt = h; // // The initial set of search directions V is the identity matrix. // for (j = 0; j < n; j++) { for (i = 0; i < n; i++) { v[i + j * n] = 0.0; } v[j + j * n] = 1.0; } for (i = 0; i < n; i++) { d[i] = 0.0; } double qa = 0.0; double qb = 0.0; double qc = 0.0; double qd0 = 0.0; double qd1 = 0.0; typeMethods.r8vec_copy(n, x, ref q0); typeMethods.r8vec_copy(n, x, ref q1); switch (prin) { case > 0: print2(n, x, prin, fx, nf, nl); break; } // // The main loop starts here. // for (;;) { double sf = d[0]; d[0] = 0.0; // // Minimize along the first direction V(*,1). // int jsearch = 0; int nits = 2; double d2 = d[0]; double s = 0.0; double value = fx; bool fk = false; MINNY.minny(n, jsearch, nits, ref d2, ref s, ref value, fk, f, x, t, h, v, q0, q1, ref nl, ref nf, dmin, ldt, ref fx, ref qa, ref qb, ref qc, ref qd0, ref qd1); d[0] = d2; switch (s) { case <= 0.0: { for (i = 0; i < n; i++) { v[i + 0 * n] = -v[i + 0 * n]; } break; } } if (sf <= 0.9 * d[0] || d[0] <= 0.9 * sf) { for (i = 1; i < n; i++) { d[i] = 0.0; } } // // The inner loop starts here. // int k; double sl; for (k = 2; k <= n; k++) { typeMethods.r8vec_copy(n, x, ref y); sf = fx; illc = kt switch {