public void minimize_test() { #region doc_minimize // Example from https://en.wikipedia.org/wiki/Gauss%E2%80%93Newton_algorithm // In this example, the Gauss–Newton algorithm will be used to fit a model to // some data by minimizing the sum of squares of errors between the data and // model's predictions. // In a biology experiment studying the relation between substrate concentration [S] // and reaction rate in an enzyme-mediated reaction, the data in the following table // were obtained: double[][] inputs = Jagged.ColumnVector(new [] { 0.03, 0.1947, 0.425, 0.626, 1.253, 2.500, 3.740 }); double[] outputs = new[] { 0.05, 0.127, 0.094, 0.2122, 0.2729, 0.2665, 0.3317 }; // It is desired to find a curve (model function) of the form // // rate = \frac{V_{max}[S]}{K_M+[S]} // // that fits best the data in the least squares sense, with the parameters V_max // and K_M to be determined. Let's start by writing model equation below: LeastSquaresFunction function = (double[] parameters, double[] input) => { return((parameters[0] * input[0]) / (parameters[1] + input[0])); }; // Now, we can either write the gradient function of the model by hand or let // the model compute it automatically using Newton's finite differences method: LeastSquaresGradientFunction gradient = (double[] parameters, double[] input, double[] result) => { result[0] = -((-input[0]) / (parameters[1] + input[0])); result[1] = -((parameters[0] * input[0]) / Math.Pow(parameters[1] + input[0], 2)); }; // Create a new Gauss-Newton algorithm var gn = new GaussNewton(parameters: 2) { Function = function, Gradient = gradient, Solution = new[] { 0.9, 0.2 } // starting from b1 = 0.9 and b2 = 0.2 }; // Find the minimum value: gn.Minimize(inputs, outputs); // The solution will be at: double b1 = gn.Solution[0]; // will be 0.362 double b2 = gn.Solution[1]; // will be 0.556 #endregion Assert.AreEqual(0.362, b1, 1e-3); Assert.AreEqual(0.556, b2, 3e-3); }
static void Main(string[] args) { var point1 = new Vector <double>(0d); var point2 = new Vector <double>(2d); var point3 = new Vector <double>(3d); var point4 = new Vector <double>(1d); var functional = new L2Functional <double>((point1, 1d), (point2, 9d), (point3, 16d), (point4, 4d)); var optimizer = new GaussNewton <double>(1000, 1e-14); var value = optimizer.Minimize(functional, new PolynomialFunction <double>(), new Vector <double>(new[] { 1d, 0d, 1d })); Console.WriteLine(JsonSerializer.Serialize(value)); }
static void PolynomialTest() { // y = -x*x + 2*x + 3 double[] X = { 1, 2, 3, 8 }; double[] Y = { 4, 3, 0, -45 }; // f(x) = A*x*x + B*x + C GaussNewton.F f = delegate(double[] coefficients, double x) { return(coefficients[0] * x * x + coefficients[1] * x + coefficients[2]); }; GaussNewton gaussNewton = new GaussNewton(3); gaussNewton.Initialize(Y, X, f); double[] answer = gaussNewton.Coefficients; //A=-1 B=2 C=3 }
static void _Main() { double[] Y = { 9999992.348, 9999992.35, 9999992.354, 9999992.359, 9999992.361, 9999992.365, 9999992.366, 9999992.37, 9999992.371, 9999992.374, 9999992.376, 9999992.377, 9999992.379, 9999992.38, 9999992.382, 9999992.384, 9999992.386, 9999992.387, 9999992.389, 9999992.39, 9999992.39, 9999992.392, 9999992.392 }; double[] X = new double[Y.Length]; for (int i = 0; i < X.Length; i++) { X[i] = i + 1; } // f(x) = A ln(x) + B GaussNewton.F f = delegate(double[] coefficients, double x) { return(coefficients[0] * Math.Log(x) + coefficients[1]); }; GaussNewton gaussNewton = new GaussNewton(2); gaussNewton.Initialize(Y, X, f); double[] answer = gaussNewton.Coefficients; //answer[0] = 0.016 answer[1]=9999992.3386 }
public void RunTest1() { // Example from https://en.wikipedia.org/wiki/Gauss%E2%80%93Newton_algorithm double[,] data = { { 0.03, 0.1947, 0.425, 0.626, 1.253, 2.500, 3.740 }, { 0.05, 0.127, 0.094, 0.2122, 0.2729, 0.2665, 0.3317 } }; double[][] inputs = data.GetRow(0).ToJagged(); double[] outputs = data.GetRow(1); RegressionFunction rate = (double[] weights, double[] xi) => { double x = xi[0]; return((weights[0] * x) / (weights[1] + x)); }; RegressionGradientFunction grad = (double[] weights, double[] xi, double[] result) => { double x = xi[0]; FiniteDifferences diff = new FiniteDifferences(2); diff.Function = (bla) => rate(bla, xi); double[] compare = diff.Compute(weights); result[0] = -((-x) / (weights[1] + x)); result[1] = -((weights[0] * x) / Math.Pow(weights[1] + x, 2)); }; NonlinearRegression regression = new NonlinearRegression(2, rate, grad); var gn = new GaussNewton(2); gn.ParallelOptions.MaxDegreeOfParallelism = 1; NonlinearLeastSquares nls = new NonlinearLeastSquares(regression, gn); Assert.IsTrue(nls.Algorithm is GaussNewton); regression.Coefficients[0] = 0.9; // β1 regression.Coefficients[1] = 0.2; // β2 int iterations = 10; double[] errors = new double[iterations]; for (int i = 0; i < errors.Length; i++) { errors[i] = nls.Run(inputs, outputs); } double b1 = regression.Coefficients[0]; double b2 = regression.Coefficients[1]; Assert.AreEqual(0.362, b1, 1e-3); Assert.AreEqual(0.556, b2, 3e-3); for (int i = 1; i < errors.Length; i++) { Assert.IsTrue(errors[i - 1] >= errors[i]); } Assert.AreEqual(1.23859, regression.StandardErrors[0], 1e-3); Assert.AreEqual(6.06352, regression.StandardErrors[1], 3e-3); }