Пример #1
0
        private void EvaluateCovariance(IObjectiveModel objective)
        {
            objective.EvaluateAt(objective.Point); // Hessian may be not yet updated.

            var Hessian = objective.Hessian;

            if (Hessian == null || objective.DegreeOfFreedom < 1)
            {
                Covariance     = null;
                Correlation    = null;
                StandardErrors = null;
                return;
            }

            Covariance = Hessian.PseudoInverse() * objective.Value / objective.DegreeOfFreedom;

            if (Covariance != null)
            {
                StandardErrors = Covariance.Diagonal().PointwiseSqrt();

                var correlation = Covariance.Clone();
                var d           = correlation.Diagonal().PointwiseSqrt();
                var dd          = d.OuterProduct(d);
                Correlation = correlation.PointwiseDivide(dd);
            }
            else
            {
                StandardErrors = null;
                Correlation    = null;
            }
        }
        protected double EvaluateFunction(IObjectiveModel objective, Vector <double> Pint)
        {
            var Pext = ProjectToExternalParameters(Pint);

            objective.EvaluateAt(Pext);
            return(objective.Value);
        }
Пример #3
0
        public void Solve(IObjectiveModel objective, double delta)
        {
            var Gradient = objective.Gradient;
            var Hessian  = objective.Hessian;

            // newton point, the Gauss–Newton step by solving the normal equations
            var Pgn = -Hessian.PseudoInverse() * Gradient; // Hessian.Solve(Gradient) fails so many times...

            // cauchy point, steepest descent direction is given by
            var alpha = Gradient.DotProduct(Gradient) / (Hessian * Gradient).DotProduct(Gradient);
            var Psd   = -alpha * Gradient;

            // update step and prectted reduction
            if (Pgn.L2Norm() <= delta)
            {
                // Pgn is inside trust region radius
                HitBoundary = false;
                Pstep       = Pgn;
            }
            else if (alpha * Psd.L2Norm() >= delta)
            {
                // Psd is outside trust region radius
                HitBoundary = true;
                Pstep       = delta / Psd.L2Norm() * Psd;
            }
            else
            {
                // Pstep is intersection of the trust region boundary
                HitBoundary = true;
                var beta = Util.FindBeta(alpha, Psd, Pgn, delta).Item2;
                Pstep = alpha * Psd + beta * (Pgn - alpha * Psd);
            }
        }
        protected static Tuple <Vector <double>, Matrix <double> > EvaluateJacobian(IObjectiveModel objective, Vector <double> Pint)
        {
            var gradient = objective.Gradient;
            var hessian  = objective.Hessian;

            if (IsBounded)
            {
                var scaleFactors = ScaleFactorsOfJacobian(Pint); // the parameters argument is always internal.

                for (int i = 0; i < gradient.Count; i++)
                {
                    gradient[i] = gradient[i] * scaleFactors[i];
                }

                for (int i = 0; i < hessian.RowCount; i++)
                {
                    for (int j = 0; j < hessian.ColumnCount; j++)
                    {
                        hessian[i, j] = hessian[i, j] * scaleFactors[i] * scaleFactors[j];
                    }
                }
            }

            return(new Tuple <Vector <double>, Matrix <double> >(gradient, hessian));
        }
Пример #5
0
        public NonlinearMinimizationResult(IObjectiveModel modelInfo, int iterations, ExitCondition reasonForExit)
        {
            ModelInfoAtMinimum = modelInfo;
            Iterations         = iterations;
            ReasonForExit      = reasonForExit;

            EvaluateCovariance(modelInfo);
        }
Пример #6
0
        public NonlinearMinimizationResult FindMinimum(IObjectiveModel objective, double[] initialGuess,
                                                       double[] lowerBound = null, double[] upperBound = null, double[] scales = null, bool[] isFixed = null)
        {
            var lb = (lowerBound == null) ? null : CreateVector.Dense <double>(lowerBound);
            var ub = (upperBound == null) ? null : CreateVector.Dense <double>(upperBound);
            var sc = (scales == null) ? null : CreateVector.Dense <double>(scales);
            var fx = (isFixed == null) ? null : isFixed.ToList();

            return(Minimum(Subproblem, objective, CreateVector.DenseOfArray <double>(initialGuess), lb, ub, sc, fx,
                           GradientTolerance, StepTolerance, FunctionTolerance, RadiusTolerance, MaximumIterations));
        }
Пример #7
0
        public void Solve(IObjectiveModel objective, double delta)
        {
            var Gradient = objective.Gradient;
            var Hessian  = objective.Hessian;

            // define tolerance
            var gnorm     = Gradient.L2Norm();
            var tolerance = Math.Min(0.5, Math.Sqrt(gnorm)) * gnorm;

            // initialize internal variables
            var z = Vector <double> .Build.Dense(Hessian.RowCount);

            var r = Gradient;
            var d = -r;

            while (true)
            {
                var Bd  = Hessian * d;
                var dBd = d.DotProduct(Bd);

                if (dBd <= 0)
                {
                    var t = Util.FindBeta(1, z, d, delta);
                    Pstep       = z + t.Item1 * d;
                    HitBoundary = true;
                    return;
                }

                var r_sq  = r.DotProduct(r);
                var alpha = r_sq / dBd;
                var znext = z + alpha * d;
                if (znext.L2Norm() >= delta)
                {
                    var t = Util.FindBeta(1, z, d, delta);
                    Pstep       = z + t.Item2 * d;
                    HitBoundary = true;
                    return;
                }

                var rnext    = r + alpha * Bd;
                var rnext_sq = rnext.DotProduct(rnext);
                if (Math.Sqrt(rnext_sq) < tolerance)
                {
                    Pstep       = znext;
                    HitBoundary = false;
                    return;
                }

                z = znext;
                r = rnext;
                d = -rnext + rnext_sq / r_sq * d;
            }
        }
        public NonlinearMinimizationResult FindMinimum(IObjectiveModel objective, double[] initialGuess,
                                                       double[] lowerBound = null, double[] upperBound = null, double[] scales = null, bool[] isFixed = null)
        {
            if (objective == null)
            {
                throw new ArgumentNullException("objective");
            }
            if (initialGuess == null)
            {
                throw new ArgumentNullException("initialGuess");
            }

            var lb = (lowerBound == null) ? null : CreateVector.Dense <double>(lowerBound);
            var ub = (upperBound == null) ? null : CreateVector.Dense <double>(upperBound);
            var sc = (scales == null) ? null : CreateVector.Dense <double>(scales);
            var fx = (isFixed == null) ? null : isFixed.ToList();

            return(Minimum(objective, CreateVector.DenseOfArray <double>(initialGuess), lb, ub, sc, fx, InitialMu, GradientTolerance, StepTolerance, FunctionTolerance, MaximumIterations));
        }
Пример #9
0
        internal static double Fit(IEnumerable <GeoPoint2D> points, ref GeoPoint2D center2d, ref double radius)
        {
            GeoPoint2D[] pnts = null;
            if (points is GeoPoint2D[] a)
            {
                pnts = a;
            }
            else if (points is List <GeoPoint2D> l)
            {
                pnts = l.ToArray();
            }
            else
            {
                List <GeoPoint2D> lp = new List <GeoPoint2D>();
                foreach (GeoPoint2D point2D in points)
                {
                    lp.Add(point2D);
                }
                pnts = lp.ToArray();
            }
            Vector <double>             observedX = new DenseVector(pnts.Length); // there is no need to set values
            Vector <double>             observedY = new DenseVector(pnts.Length); // this is the data we want to achieve, namely 0.0
            LevenbergMarquardtMinimizer lm        = new LevenbergMarquardtMinimizer(gradientTolerance: 1e-12, maximumIterations: 20);
            IObjectiveModel             iom       = ObjectiveFunction.NonlinearModel(
                new Func <Vector <double>, Vector <double>, Vector <double> >(delegate(Vector <double> vd, Vector <double> ox) // function
            {
                // parameters: 0:cx, 1:cy, 3: radius
                GeoPoint2D cnt  = new GeoPoint2D(vd[0], vd[1]);
                DenseVector res = new DenseVector(pnts.Length);
                for (int i = 0; i < pnts.Length; i++)
                {
                    res[i] = (pnts[i] | cnt) - vd[2];
                }
#if DEBUG
                double err = 0.0;
                for (int i = 0; i < pnts.Length; i++)
                {
                    err += res[i] * res[i];
                }
#endif
                return(res);
            }),
                new Func <Vector <double>, Vector <double>, Matrix <double> >(delegate(Vector <double> vd, Vector <double> ox) // derivatives
            {
                // parameters: 0:cx, 1:cy, 3: radius
                GeoPoint2D cnt = new GeoPoint2D(vd[0], vd[1]);
                var prime      = new DenseMatrix(pnts.Length, 3);
                for (int i = 0; i < pnts.Length; i++)
                {
                    double d    = pnts[i] | cnt;
                    prime[i, 0] = -(pnts[i].x - vd[0]) / d;
                    prime[i, 1] = -(pnts[i].y - vd[1]) / d;
                    prime[i, 2] = -1;
                }
                return(prime);
            }), observedX, observedY);
            NonlinearMinimizationResult mres = lm.FindMinimum(iom, new DenseVector(new double[] { center2d.x, center2d.y, radius }));

            if (mres.ReasonForExit == ExitCondition.Converged || mres.ReasonForExit == ExitCondition.RelativeGradient)
            {
                center2d = new GeoPoint2D(mres.MinimizingPoint[0], mres.MinimizingPoint[1]);
                radius   = mres.MinimizingPoint[2];
                double err = 0.0;
                for (int i = 0; i < pnts.Length; i++)
                {
                    err += Math.Abs((pnts[i] | center2d) - radius);
                }
                return(err);
            }
            else
            {
                return(double.MaxValue);
            }
        }
        /// <summary>
        /// Non-linear least square fitting by the Levenberg-Marduardt algorithm.
        /// </summary>
        /// <param name="objective">The objective function, including model, observations, and parameter bounds.</param>
        /// <param name="initialGuess">The initial guess values.</param>
        /// <param name="initialMu">The initial damping parameter of mu.</param>
        /// <param name="gradientTolerance">The stopping threshold for infinity norm of the gradient vector.</param>
        /// <param name="stepTolerance">The stopping threshold for L2 norm of the change of parameters.</param>
        /// <param name="functionTolerance">The stopping threshold for L2 norm of the residuals.</param>
        /// <param name="maximumIterations">The max iterations.</param>
        /// <returns>The result of the Levenberg-Marquardt minimization</returns>
        public static NonlinearMinimizationResult Minimum(IObjectiveModel objective, Vector <double> initialGuess,
                                                          Vector <double> lowerBound = null, Vector <double> upperBound = null, Vector <double> scales = null, List <bool> isFixed      = null,
                                                          double initialMu           = 1E-3, double gradientTolerance   = 1E-15, double stepTolerance = 1E-15, double functionTolerance = 1E-15, int maximumIterations = -1)
        {
            // Non-linear least square fitting by the Levenberg-Marduardt algorithm.
            //
            // Levenberg-Marquardt is finding the minimum of a function F(p) that is a sum of squares of nonlinear functions.
            //
            // For given datum pair (x, y), uncertainties σ (or weighting W  =  1 / σ^2) and model function f = f(x; p),
            // let's find the parameters of the model so that the sum of the quares of the deviations is minimized.
            //
            //    F(p) = 1/2 * ∑{ Wi * (yi - f(xi; p))^2 }
            //    pbest = argmin F(p)
            //
            // We will use the following terms:
            //    Weighting W is the diagonal matrix and can be decomposed as LL', so L = 1/σ
            //    Residuals, R = L(y - f(x; p))
            //    Residual sum of squares, RSS = ||R||^2 = R.DotProduct(R)
            //    Jacobian J = df(x; p)/dp
            //    Gradient g = -J'W(y − f(x; p)) = -J'LR
            //    Approximated Hessian H = J'WJ
            //
            // The Levenberg-Marquardt algorithm is summarized as follows:
            //    initially let μ = τ * max(diag(H)).
            //    repeat
            //       solve linear equations: (H + μI)ΔP = -g
            //       let ρ = (||R||^2 - ||Rnew||^2) / (Δp'(μΔp - g)).
            //       if ρ > ε, P = P + ΔP; μ = μ * max(1/3, 1 - (2ρ - 1)^3); ν = 2;
            //       otherwise μ = μ*ν; ν = 2*ν;
            //
            // References:
            // [1]. Madsen, K., H. B. Nielsen, and O. Tingleff.
            //    "Methods for Non-Linear Least Squares Problems. Technical University of Denmark, 2004. Lecture notes." (2004).
            //    Available Online from: http://orbit.dtu.dk/files/2721358/imm3215.pdf
            // [2]. Gavin, Henri.
            //    "The Levenberg-Marquardt method for nonlinear least squares curve-fitting problems."
            //    Department of Civil and Environmental Engineering, Duke University (2017): 1-19.
            //    Availble Online from: http://people.duke.edu/~hpgavin/ce281/lm.pdf

            if (objective == null)
            {
                throw new ArgumentNullException("objective");
            }

            ValidateBounds(initialGuess, lowerBound, upperBound, scales);

            objective.SetParameters(initialGuess, isFixed);

            ExitCondition exitCondition = ExitCondition.None;

            // First, calculate function values and setup variables
            var P     = ProjectToInternalParameters(initialGuess); // current internal parameters
            var Pstep = Vector <double> .Build.Dense(P.Count);     // the change of parameters

            var RSS = EvaluateFunction(objective, P);              // Residual Sum of Squares = R'R

            if (maximumIterations < 0)
            {
                maximumIterations = 200 * (initialGuess.Count + 1);
            }

            // if RSS == NaN, stop
            if (double.IsNaN(RSS))
            {
                exitCondition = ExitCondition.InvalidValues;
                return(new NonlinearMinimizationResult(objective, -1, exitCondition));
            }

            // When only function evaluation is needed, set maximumIterations to zero,
            if (maximumIterations == 0)
            {
                exitCondition = ExitCondition.ManuallyStopped;
            }

            // if RSS <= fTol, stop
            if (RSS <= functionTolerance)
            {
                exitCondition = ExitCondition.Converged; // SmallRSS
            }

            // Evaluate gradient and Hessian
            var jac               = EvaluateJacobian(objective, P);
            var Gradient          = jac.Item1;          // objective.Gradient;
            var Hessian           = jac.Item2;          // objective.Hessian;
            var diagonalOfHessian = Hessian.Diagonal(); // diag(H)

            // if ||g||oo <= gtol, found and stop
            if (Gradient.InfinityNorm() <= gradientTolerance)
            {
                exitCondition = ExitCondition.RelativeGradient;
            }

            if (exitCondition != ExitCondition.None)
            {
                return(new NonlinearMinimizationResult(objective, -1, exitCondition));
            }

            double mu         = initialMu * diagonalOfHessian.Max(); // μ
            double nu         = 2;                                   //  ν
            int    iterations = 0;

            while (iterations < maximumIterations && exitCondition == ExitCondition.None)
            {
                iterations++;

                while (true)
                {
                    Hessian.SetDiagonal(Hessian.Diagonal() + mu); // hessian[i, i] = hessian[i, i] + mu;

                    // solve normal equations
                    Pstep = Hessian.Solve(-Gradient);

                    // if ||ΔP|| <= xTol * (||P|| + xTol), found and stop
                    if (Pstep.L2Norm() <= stepTolerance * (stepTolerance + P.DotProduct(P)))
                    {
                        exitCondition = ExitCondition.RelativePoints;
                        break;
                    }

                    var Pnew = P + Pstep; // new parameters to test
                    // evaluate function at Pnew
                    var RSSnew = EvaluateFunction(objective, Pnew);

                    if (double.IsNaN(RSSnew))
                    {
                        exitCondition = ExitCondition.InvalidValues;
                        break;
                    }

                    // calculate the ratio of the actual to the predicted reduction.
                    // ρ = (RSS - RSSnew) / (Δp'(μΔp - g))
                    var predictedReduction = Pstep.DotProduct(mu * Pstep - Gradient);
                    var rho = (predictedReduction != 0)
                            ? (RSS - RSSnew) / predictedReduction
                            : 0;

                    if (rho > 0.0)
                    {
                        // accepted
                        Pnew.CopyTo(P);
                        RSS = RSSnew;

                        // update gradient and Hessian
                        jac               = EvaluateJacobian(objective, P);
                        Gradient          = jac.Item1; // objective.Gradient;
                        Hessian           = jac.Item2; // objective.Hessian;
                        diagonalOfHessian = Hessian.Diagonal();

                        // if ||g||_oo <= gtol, found and stop
                        if (Gradient.InfinityNorm() <= gradientTolerance)
                        {
                            exitCondition = ExitCondition.RelativeGradient;
                        }

                        // if ||R||^2 < fTol, found and stop
                        if (RSS <= functionTolerance)
                        {
                            exitCondition = ExitCondition.Converged; // SmallRSS
                        }

                        mu = mu * Math.Max(1.0 / 3.0, 1.0 - Math.Pow(2.0 * rho - 1.0, 3));
                        nu = 2;

                        break;
                    }
                    else
                    {
                        // rejected, increased μ
                        mu = mu * nu;
                        nu = 2 * nu;

                        Hessian.SetDiagonal(diagonalOfHessian);
                    }
                }
            }

            if (iterations >= maximumIterations)
            {
                exitCondition = ExitCondition.ExceedIterations;
            }

            return(new NonlinearMinimizationResult(objective, iterations, exitCondition));
        }
 public NonlinearMinimizationResult FindMinimum(IObjectiveModel objective, Vector <double> initialGuess,
                                                Vector <double> lowerBound = null, Vector <double> upperBound = null, Vector <double> scales = null, List <bool> isFixed = null)
 {
     return(Minimum(objective, initialGuess, lowerBound, upperBound, scales, isFixed, InitialMu, GradientTolerance, StepTolerance, FunctionTolerance, MaximumIterations));
 }
Пример #12
0
        /// <summary>
        /// Non-linear least square fitting by the trust-region algorithm.
        /// </summary>
        /// <param name="objective">The objective model, including function, jacobian, observations, and parameter bounds.</param>
        /// <param name="subproblem">The subproblem</param>
        /// <param name="initialGuess">The initial guess values.</param>
        /// <param name="functionTolerance">The stopping threshold for L2 norm of the residuals.</param>
        /// <param name="gradientTolerance">The stopping threshold for infinity norm of the gradient vector.</param>
        /// <param name="stepTolerance">The stopping threshold for L2 norm of the change of parameters.</param>
        /// <param name="radiusTolerance">The stopping threshold for trust region radius</param>
        /// <param name="maximumIterations">The max iterations.</param>
        /// <returns></returns>
        public static NonlinearMinimizationResult Minimum(ITrustRegionSubproblem subproblem, IObjectiveModel objective, Vector <double> initialGuess,
                                                          Vector <double> lowerBound = null, Vector <double> upperBound = null, Vector <double> scales   = null, List <bool> isFixed    = null,
                                                          double gradientTolerance   = 1E-8, double stepTolerance       = 1E-8, double functionTolerance = 1E-8, double radiusTolerance = 1E-18, int maximumIterations = -1)
        {
            // Non-linear least square fitting by the trust-region algorithm.
            //
            // For given datum pair (x, y), uncertainties σ (or weighting W  =  1 / σ^2) and model function f = f(x; p),
            // let's find the parameters of the model so that the sum of the quares of the deviations is minimized.
            //
            //    F(p) = 1/2 * ∑{ Wi * (yi - f(xi; p))^2 }
            //    pbest = argmin F(p)
            //
            // Here, we will use the following terms:
            //    Weighting W is the diagonal matrix and can be decomposed as LL', so L = 1/σ
            //    Residuals, R = L(y - f(x; p))
            //    Residual sum of squares, RSS = ||R||^2 = R.DotProduct(R)
            //    Jacobian J = df(x; p)/dp
            //    Gradient g = -J'W(y − f(x; p)) = -J'LR
            //    Approximated Hessian H = J'WJ
            //
            // The trust region algorithm is summarized as follows:
            //    initially set trust-region radius, Δ
            //    repeat
            //       solve subproblem
            //       update Δ:
            //          let ρ = (RSS - RSSnew) / predRed
            //          if ρ > 0.75, Δ = 2Δ
            //          if ρ < 0.25, Δ = Δ/4
            //          if ρ > eta, P = P + ΔP
            //
            // References:
            // [1]. Madsen, K., H. B. Nielsen, and O. Tingleff.
            //    "Methods for Non-Linear Least Squares Problems. Technical University of Denmark, 2004. Lecture notes." (2004).
            //    Available Online from: http://orbit.dtu.dk/files/2721358/imm3215.pdf
            // [2]. Nocedal, Jorge, and Stephen J. Wright.
            //    Numerical optimization (2006): 101-134.
            // [3]. SciPy
            //    Available Online from: https://github.com/scipy/scipy/blob/master/scipy/optimize/_trustregion.py

            double maxDelta = 1000;
            double eta      = 0;

            if (objective == null)
            {
                throw new ArgumentNullException("objective");
            }

            ValidateBounds(initialGuess, lowerBound, upperBound, scales);

            objective.SetParameters(initialGuess, isFixed);

            ExitCondition exitCondition = ExitCondition.None;

            // First, calculate function values and setup variables
            var P     = ProjectToInternalParameters(initialGuess); // current internal parameters
            var Pstep = Vector <double> .Build.Dense(P.Count);     // the change of parameters

            var RSS = EvaluateFunction(objective, initialGuess);   // Residual Sum of Squares

            if (maximumIterations < 0)
            {
                maximumIterations = 200 * (initialGuess.Count + 1);
            }

            // if RSS == NaN, stop
            if (double.IsNaN(RSS))
            {
                exitCondition = ExitCondition.InvalidValues;
                return(new NonlinearMinimizationResult(objective, -1, exitCondition));
            }

            // When only function evaluation is needed, set maximumIterations to zero,
            if (maximumIterations == 0)
            {
                exitCondition = ExitCondition.ManuallyStopped;
            }

            // if ||R||^2 <= fTol, stop
            if (RSS <= functionTolerance)
            {
                exitCondition = ExitCondition.Converged; // SmallRSS
            }

            // evaluate projected gradient and Hessian
            var jac      = EvaluateJacobian(objective, P);
            var Gradient = jac.Item1; // objective.Gradient;
            var Hessian  = jac.Item2; // objective.Hessian;

            // if ||g||_oo <= gtol, found and stop
            if (Gradient.InfinityNorm() <= gradientTolerance)
            {
                exitCondition = ExitCondition.RelativeGradient; // SmallGradient
            }

            if (exitCondition != ExitCondition.None)
            {
                return(new NonlinearMinimizationResult(objective, -1, exitCondition));
            }

            // initialize trust-region radius, Δ
            double delta = Gradient.DotProduct(Gradient) / (Hessian * Gradient).DotProduct(Gradient);

            delta = Math.Max(1.0, Math.Min(delta, maxDelta));

            int  iterations  = 0;
            bool hitBoundary = false;

            while (iterations < maximumIterations && exitCondition == ExitCondition.None)
            {
                iterations++;

                // solve the subproblem
                subproblem.Solve(objective, delta);
                Pstep       = subproblem.Pstep;
                hitBoundary = subproblem.HitBoundary;

                // predicted reduction = L(0) - L(Δp) = -Δp'g - 1/2 * Δp'HΔp
                var predictedReduction = -Gradient.DotProduct(Pstep) - 0.5 * Pstep.DotProduct(Hessian * Pstep);

                if (Pstep.L2Norm() <= stepTolerance * (stepTolerance + P.L2Norm()))
                {
                    exitCondition = ExitCondition.RelativePoints; // SmallRelativeParameters
                    break;
                }

                var Pnew = P + Pstep; // parameters to test
                // evaluate function at Pnew
                var RSSnew = EvaluateFunction(objective, Pnew);

                // if RSS == NaN, stop
                if (double.IsNaN(RSSnew))
                {
                    exitCondition = ExitCondition.InvalidValues;
                    break;
                }

                // calculate the ratio of the actual to the predicted reduction.
                double rho = (predictedReduction != 0)
                        ? (RSS - RSSnew) / predictedReduction
                        : 0.0;

                if (rho > 0.75 && hitBoundary)
                {
                    delta = Math.Min(2.0 * delta, maxDelta);
                }
                else if (rho < 0.25)
                {
                    delta = delta * 0.25;
                    if (delta <= radiusTolerance * (radiusTolerance + P.DotProduct(P)))
                    {
                        exitCondition = ExitCondition.LackOfProgress;
                        break;
                    }
                }

                if (rho > eta)
                {
                    // accepted
                    Pnew.CopyTo(P);
                    RSS = RSSnew;

                    // evaluate projected gradient and Hessian
                    jac      = EvaluateJacobian(objective, P);
                    Gradient = jac.Item1; // objective.Gradient;
                    Hessian  = jac.Item2; // objective.Hessian;

                    // if ||g||_oo <= gtol, found and stop
                    if (Gradient.InfinityNorm() <= gradientTolerance)
                    {
                        exitCondition = ExitCondition.RelativeGradient;
                    }

                    // if ||R||^2 < fTol, found and stop
                    if (RSS <= functionTolerance)
                    {
                        exitCondition = ExitCondition.Converged; // SmallRSS
                    }
                }
            }

            if (iterations >= maximumIterations)
            {
                exitCondition = ExitCondition.ExceedIterations;
            }

            return(new NonlinearMinimizationResult(objective, iterations, exitCondition));
        }
Пример #13
0
        public static Ellipse2D FromPoints(IEnumerable <GeoPoint2D> points)
        {
            GeoPoint2D[] pnts = null;
            if (points is GeoPoint2D[] a)
            {
                pnts = a;
            }
            else if (points is List <GeoPoint2D> l)
            {
                pnts = l.ToArray();
            }
            else
            {
                List <GeoPoint2D> lp = new List <GeoPoint2D>();
                foreach (GeoPoint2D point2D in points)
                {
                    lp.Add(point2D);
                }
                pnts = lp.ToArray();
            }
            Vector <double> observedX = new DenseVector(pnts.Length + 1); // there is no need to set values
            Vector <double> observedY = new DenseVector(pnts.Length + 1); // this is the data we want to achieve, namely 1.0, points on the unit circle distance to origin

            for (int i = 0; i < observedY.Count; i++)
            {
                observedY[i] = 1;
            }
            observedY[pnts.Length] = 0; // the scalar product of minor and major axis
            LevenbergMarquardtMinimizer lm  = new LevenbergMarquardtMinimizer(gradientTolerance: 1e-12, maximumIterations: 20);
            IObjectiveModel             iom = ObjectiveFunction.NonlinearModel(
                new Func <Vector <double>, Vector <double>, Vector <double> >(delegate(Vector <double> vd, Vector <double> ox) // function
            {
                // parameters: the homogeneous matrix that projects the ellipse to the unit circle
                ModOp2D m       = new ModOp2D(vd[0], vd[1], vd[2], vd[3], vd[4], vd[5]);
                DenseVector res = new DenseVector(pnts.Length + 1);
                for (int i = 0; i < pnts.Length; i++)
                {
                    GeoPoint2D pp = m * pnts[i];
                    res[i]        = pp.x * pp.x + pp.y * pp.y;
                }
                res[pnts.Length] = m[0, 0] * m[0, 1] + m[1, 0] * m[1, 1];     // the axis should be perpendicular
#if DEBUG
                double err = 0.0;
                for (int i = 0; i < pnts.Length; i++)
                {
                    err += (res[i] - 1) * (res[i] - 1);
                }
                err += res[pnts.Length] * res[pnts.Length];
#endif
                return(res);
            }),
                new Func <Vector <double>, Vector <double>, Matrix <double> >(delegate(Vector <double> vd, Vector <double> ox) // derivatives
            {
                // parameters: the homogeneous matrix that projects the ellipse to the unit circle
                ModOp2D m = new ModOp2D(vd[0], vd[1], vd[2], vd[3], vd[4], vd[5]);
                var prime = new DenseMatrix(pnts.Length + 1, 6);
                for (int i = 0; i < pnts.Length; i++)
                {
                    GeoPoint2D pp = m * pnts[i];
                    prime[i, 0]   = 2 * pnts[i].x * pp.x;
                    prime[i, 1]   = 2 * pnts[i].y * pp.x;
                    prime[i, 2]   = 2 * pp.x;
                    prime[i, 3]   = 2 * pnts[i].x * pp.y;
                    prime[i, 4]   = 2 * pnts[i].y * pp.y;
                    prime[i, 5]   = 2 * pp.y;
                }
                prime[pnts.Length, 0] = m[0, 1];
                prime[pnts.Length, 1] = m[0, 0];
                prime[pnts.Length, 2] = 0;
                prime[pnts.Length, 3] = m[1, 1];
                prime[pnts.Length, 4] = m[1, 0];
                prime[pnts.Length, 5] = 0;
                return(prime);
            }), observedX, observedY);

            BoundingRect ext = new BoundingRect(pnts);
            NonlinearMinimizationResult mres = lm.FindMinimum(iom, new DenseVector(new double[] { 2.0 / ext.Width, 0, -ext.GetCenter().x, 0, 2.0 / ext.Height, -ext.GetCenter().y }));

            if (mres.ReasonForExit == ExitCondition.Converged || mres.ReasonForExit == ExitCondition.RelativeGradient)
            {
                Vector <double> vd = mres.MinimizingPoint;
                ModOp2D         m  = new ModOp2D(vd[0], vd[1], vd[2], vd[3], vd[4], vd[5]);
                m = m.GetInverse();
                return(new Ellipse2D(m * GeoPoint2D.Origin, m * GeoVector2D.XAxis, m * GeoVector2D.YAxis));
            }
            else
            {
                return(null);
            }
        }