コード例 #1
0
        public static double Distance(List <MDHParameters> dht, double[] target, double[] angles)
        {
            var currentPose = GetPositionVector(ForwardKinematics(dht, angles.ToList()));
            var result      = Norm.Euclidean(target.Subtract(currentPose));

            return(result);
        }
コード例 #2
0
        private void iterate(double[][] seeds, ConcurrentStack <double[]> maxcandidates, int index)
        {
            double[] point = seeds[index];
            double[] mean  = new double[point.Length];
            double[] delta = new double[point.Length];

            // we will keep moving it in the
            // direction of the density modes

            int iterations = 0;

            // until convergence or max iterations reached
            while (iterations < MaxIterations)
            {
                iterations++;

                // compute the shifted mean
                computeMeanShift(point, mean);

                // extract the mean shift vector
                for (int j = 0; j < mean.Length; j++)
                {
                    delta[j] = point[j] - mean[j];
                }

                // update the point towards a mode
                for (int j = 0; j < mean.Length; j++)
                {
                    point[j] = mean[j];
                }

                // Check if we are already near any maximum point
                if (cut && nearest(point, maxcandidates) != null)
                {
                    break;
                }

                // check for convergence: magnitude of the mean shift
                // vector converges to zero (Comaniciu 2002, page 606)
                if (Norm.Euclidean(delta) < Tolerance * Bandwidth)
                {
                    break;
                }
            }

            if (cut)
            {
                double[] match = nearest(point, maxcandidates);

                if (match != null)
                {
                    seeds[index] = match;
                }

                else
                {
                    maxcandidates.Push(point);
                }
            }
        }
コード例 #3
0
ファイル: NormTest.cs プロジェクト: haf/Accord.Net
        public void EuclideanTest()
        {
            double[,] a =
            {
                { 15.4457, 0.4187, 15.6093 },
                {  0.0000, 2.5708,  0.6534 }
            };


            double[] expected =
            {
                21.9634, 2.6525
            };

            double[] actual = Norm.Euclidean(a, 1);
            Assert.IsTrue(expected.IsEqual(actual, 0.001));


            double[] expected2 =
            {
                15.4457, 2.6047, 15.6229
            };

            double[] actual2 = Norm.Euclidean(a, 0);
            Assert.IsTrue(expected2.IsEqual(actual2, 0.001));

            double actual3 = Norm.Euclidean(a.GetRow(0));

            Assert.AreEqual(21.9634, actual3, 0.001);
        }
コード例 #4
0
ファイル: NormTest.cs プロジェクト: haf/Accord.Net
        public void EuclideanTest1()
        {
            float[,] a =
            {
                { 15.4457f, 0.4187f, 15.6093f },
                {  0.0000f, 2.5708f,  0.6534f }
            };


            float[] expected =
            {
                21.9634f, 2.6525f
            };

            float[] actual = Norm.Euclidean(a, 1);
            Assert.IsTrue(expected.IsEqual(actual, 0.001f));


            float[] expected2 =
            {
                15.4457f, 2.6047f, 15.6229f
            };

            float[] actual2 = Norm.Euclidean(a, 0);
            Assert.IsTrue(expected2.IsEqual(actual2, 0.001f));

            float actual3 = Norm.Euclidean(a.GetRow(0));

            Assert.AreEqual(21.9634f, actual3, 0.001);
        }
コード例 #5
0
        private double[][] householder(double[] a)
        {
            double[] v = a.Divide((a[0] + Special.Sign(Norm.Euclidean(a), a[0])));
            v[0] = 1;
            var H  = Jagged.Identity(a.Length);
            var vr = Jagged.RowVector(v);
            var vc = Jagged.ColumnVector(v);
            var t  = vc.Dot(vr);

            H.Subtract((2 / v.Dot(v)).Multiply(t), result: H);
            return(H);
        }
コード例 #6
0
        double lmerr()
        {
            double err = 0;

            for (int i = 0; i < patternCounter; i++)
            {
                var sample = (input.GetColumn(i));
                var target = output.GetColumn(i);
                err += Math.Sqrt(Norm.Euclidean((evallm(sample).Subtract(target))));
            }
            return(err);
        }
コード例 #7
0
        // return jacobi and error in  every pattern
        public Tuple <double[, ], double[]> jacobi()
        {
            double[,] j = Matrix.Create(patternCounter, weightNum, 0d);
            double [] err = Vector.Create(patternCounter, 0d);
            for (int i = 0; i < patternCounter; i++)
            {
                var sample = (input.GetColumn(i));
                var target = output.GetColumn(i);
                var gr     = gradient(target, sample);
                err[i] = Math.Sqrt(Norm.Euclidean(lastLayer().fneo().Subtract(target)));
                j.SetRow(i, gr);
            }

            return(new Tuple <double[, ], double[]>(j, err));
        }
コード例 #8
0
        /// <summary>
        ///   Initializes a new instance of the <see cref="GramSchmidtOrthogonalization"/> class.
        /// </summary>
        ///
        /// <param name="value">The matrix <c>A</c> to be decomposed.</param>
        /// <param name="modified">True to use modified Gram-Schmidt; false
        ///   otherwise. Default is true (and is the recommended setup).</param>
        ///
        public GramSchmidtOrthogonalization(double[,] value, bool modified)
        {
            if (value.GetLength(0) != value.GetLength(1))
            {
                throw new DimensionMismatchException("value", "Matrix must be square.");
            }

            int size = value.GetLength(0);

            q = new double[size, size];
            r = new double[size, size];

            if (modified)
            {
                for (int j = 0; j < size; j++)
                {
                    double[] v = value.GetColumn(j);

                    for (int i = 0; i < j; i++)
                    {
                        r[i, j] = q.GetColumn(i).Dot(v);
                        var t = r[i, j].Multiply(q.GetColumn(i));
                        v.Subtract(t, result: v);
                    }

                    r[j, j] = Norm.Euclidean(v);
                    q.SetColumn(j, v.Divide(r[j, j]));
                }
            }

            else
            {
                for (int j = 0; j < size; j++)
                {
                    double[] v = value.GetColumn(j);
                    double[] a = value.GetColumn(j);

                    for (int i = 0; i < j; i++)
                    {
                        r[i, j] = q.GetColumn(j).Dot(a);
                        v       = v.Subtract(r[i, j].Multiply(q.GetColumn(i)));
                    }

                    r[j, j] = Norm.Euclidean(v);
                    q.SetColumn(j, v.Divide(r[j, j]));
                }
            }
        }
コード例 #9
0
        public double Similarity(Dictionary <string, double> mainDict, Dictionary <string, double> dct2)
        {
            var norm1 = Norm.Euclidean(mainDict.Values.ToArray());
            var norm2 = Norm.Euclidean(dct2.Values.ToArray());

            var dot = 0.0;

            foreach (var label in mainDict)
            {
                if (dct2.ContainsKey(label.Key))
                {
                    dot += mainDict[label.Key] * dct2[label.Key];
                }
            }

            return(dot / (norm1 * norm2));
        }
コード例 #10
0
            protected bool Equals(KLRotation other)
            {
                var innerProduct = Math.Abs(Math.Abs(Matrix.InnerProduct(RotationAxis, other.RotationAxis)) - 1);

                if (innerProduct < 0.01)
                {
                    if (RotationPoint != null && other.RotationPoint != null)
                    {
                        var pointDifferent = new double[]
                        {
                            RotationPoint[0] - other.RotationPoint[0], RotationPoint[1] - other.RotationPoint[1],
                            RotationPoint[2] - other.RotationPoint[2]
                        };
                        var norm = Norm.Euclidean(pointDifferent);
                        if (norm != 0.0)
                        {
                            pointDifferent = pointDifferent.Divide(norm);
                        }
                        else
                        {
                            return(true);
                        }
                        var innerProduct2 = Math.Abs(Math.Abs(Matrix.InnerProduct(RotationAxis, pointDifferent)) - 1);

                        //if (FolderUtilities.FolderUtilities.KLIsVectorNull(pointDifferent, 0.01))
                        //{
                        //    return true;
                        //}
                        //else
                        if (innerProduct2 < 0.01)
                        {
                            return(true);
                        }
                    }
                    else if (RotationPoint == null || other.RotationPoint == null)
                    {
                        return(true);
                    }
                }

                return(false);
            }
コード例 #11
0
        public double[] gradient(double[] target, double[] current)
        {
            double[] gr        = Vector.Create(weightNum, 0d);
            int      pr        = weightNum - 1;
            var      res       = eval(current);
            var      lastdelta = Elementwise.Multiply(target.Subtract(res), lastLayer().gneo());
            var      err       = 2 * Math.Sqrt(Norm.Euclidean(target.Subtract(res)));

            double [,] wd = lastdelta.Outer(layers[layers.Count - 2].fneo()).Divide(err);
            for (int i = wd.GetLength(0) - 1; i >= 0; i--)
            {
                for (int j = wd.GetLength(1) - 1; j >= 0; j--)
                {
                    gr[pr--] = wd[i, j];
                }
            }
            for (int j = layers.Count - 2; j > 0; j--)
            {
                var delta = Elementwise.Multiply(layers[j + 1].weights.Transpose().Dot(lastdelta), layers[j].gneo());
                wd = lastdelta.Outer(layers[j - 1].fneo()).Divide(err);
                for (int i = wd.GetLength(0) - 1; i >= 0; i--)
                {
                    for (int jj = wd.GetLength(1) - 1; jj >= 0; jj--)
                    {
                        gr[pr--] = wd[i, jj];
                    }
                }
                lastdelta = delta;
            }
            var delta2 = Elementwise.Multiply(layers[1].weights.Transpose().Dot(lastdelta), layers[0].gneo());

            wd = delta2.Outer(current).Divide(err);
            for (int i = wd.GetLength(0) - 1; i >= 0; i--)
            {
                for (int j = wd.GetLength(1) - 1; j >= 0; j--)
                {
                    gr[pr--] = wd[i, j];
                }
            }

            return(gr);
        }
コード例 #12
0
        public IActionResult TestAI()
        {
            var a = GetCategoriesFromDescription("Google Home enables users to speak voice commands to interact with services through the Home's intelligent personal assistant called Google Assistant. A large number of services, both in-house and third-party, are integrated, allowing users to listen to music, look at videos or photos, or receive news updates entirely by voice.");
            var b = GetCategoriesFromDescription("Android is a mobile operating system developed by Google, based on the Linux kernel and designed primarily for touchscreen mobile devices such as smartphones and tablets.");
            var c = GetCategoriesFromDescription("Google Cloud Platform, offered by Google, is a suite of cloud computing services that runs on the same infrastructure that Google uses internally for its end-user products, such as Google Search and YouTube. Alongside a set of management tools, it provides a series of modular cloud services including computing, data storage, data analytics and machine learning.");
            var d = GetCategoriesFromDescription("Google is an American multinational technology company that specializes in Internet-related services and products. These include online advertising technologies, search, cloud computing, software, and hardware.");

            var norm1 = Norm.Euclidean(a.Values.ToArray());
            var norm2 = Norm.Euclidean(d.Values.ToArray());
            var dot   = 0.0;

            foreach (var label in a)
            {
                if (d.ContainsKey(label.Key))
                {
                    dot += a[label.Key] * d[label.Key];
                }
            }
            //return Ok(d);
            return(Ok(dot / (norm1 * norm2)));
        }
コード例 #13
0
        /// <summary>
        ///   Constructs a Von-Mises Fisher distribution with unit mean.
        /// </summary>
        ///
        /// <param name="mean">The mean direction vector (with unit length).</param>
        /// <param name="concentration">The concentration value κ (kappa).</param>
        ///
        public VonMisesFisherDistribution(double[] mean, double concentration)
            : base(mean.Length)
        {
            if (concentration < 0)
            {
                throw new ArgumentOutOfRangeException("concentration", "Concentration parameter kappa must be non-negative.");
            }

            if (!Norm.Euclidean(mean).IsRelativelyEqual(1, 1e-10))
            {
                throw new ArgumentOutOfRangeException("mean", "The mean vector must have unit length.");
            }

            this.mean  = mean;
            this.kappa = concentration;

            int    p   = Dimension;
            double num = Math.Pow(concentration, p / 2 - 1);
            double den = Math.Pow(2 * Math.PI, p / 2) * Bessel.I(p / 2 - 1, concentration);

            this.constant = num / den;
        }
コード例 #14
0
        /// <summary>
        ///   Implements the actual optimization algorithm. This
        ///   method should try to minimize the objective function.
        /// </summary>
        ///
        protected override bool Optimize()
        {
            // This code has been adapted from the original
            // FORTRAN function CGFAM by Jorge Nocedal, 1992.

            int irest = 1;
            int n     = NumberOfVariables;

            double[] x = Solution;

            double f = Function(x);

            g = Gradient(x);

            int method = (int)Method;

            iterations  = 0;
            evaluations = 1;
            bool bnew = true;
            int  nrst = 0;
            int  im   = 0; // Number of times betapr was negative for method 2 or 3

            searches = 0;  // Number of line search iterations after Wolfe conditions were satisfied.
            double dg0 = 0;

            for (int i = 0; i < g.Length; ++i)
            {
                d[i] = -g[i];
            }

            double gnorm = Norm.Euclidean(g);
            double xnorm = Math.Max(1.0, Norm.Euclidean(x));
            double stp1  = 1.0 / gnorm;
            double f_old = f;


            bool finish = false;

            // Make initial progress report with initialization parameters
            if (Progress != null)
            {
                Progress(this, new OptimizationProgressEventArgs
                             (iterations, evaluations, g, gnorm, Solution, xnorm, f, stp1, finish));
            }



            // Main iteration
            while (!finish)
            {
                iterations++;

                nrst++;

                // Call the line search routine of Mor'e and Thuente
                // (modified for Nocedal's CG method)
                // -------------------------------------------------
                //
                //  J.J. Mor'e and D. Thuente, "Linesearch Algorithms with Guaranteed
                //  Sufficient Decrease". ACM Transactions on Mathematical
                //  Software 20 (1994), pp 286-307.
                //

                int    nfev  = 0;
                int    info  = 0;
                double dgout = 0;

                // Save original gradient
                for (int i = 0; i < g.Length; i++)
                {
                    gold[i] = g[i];
                }

                double dg    = d.Dot(g);
                double dgold = dg;
                double stp   = 1.0;

                // Shanno-Phua's formula for trial step
                if (!bnew)
                {
                    stp = dg0 / dg;
                }

                if (iterations == 1)
                {
                    stp = stp1;
                }

                int ides = 0;
                bnew = false;

L72:

                // Call to the line search subroutine
                Status = cvsmod(ref f, d, ref stp, ref info, ref nfev, w, ref dg, ref dgout);

                if (Status != ConjugateGradientCode.Success)
                {
                    return(false);
                }

                // Test if descent direction is obtained for methods 2 and 3
                double gg     = Matrix.Dot(g, g);
                double gg0    = Matrix.Dot(g, gold);
                double betapr = (gg - gg0) / (gnorm * gnorm);

                // When nrst > n and irest == 1 then restart.
                if (irest == 1 && nrst > n)
                {
                    nrst = 0;
                    bnew = true;
                }
                else
                {
                    if (method != 1)
                    {
                        double dg1 = -gg + betapr * dgout;

                        if (dg1 >= 0.0)
                        {
                            ides++;

                            if (ides > 5)
                            {
                                Status = ConjugateGradientCode.DescentNotObtained;
                                return(false);
                            }

                            goto L72; // retry
                        }
                    }
                }

                evaluations += nfev;
                searches    += ides;

                // Determine correct beta value for method chosen
                double betafr = gg / (gnorm * gnorm);
                double beta   = 0;

                if (nrst == 0)
                {
                    beta = 0.0;
                }
                else
                {
                    if (method == 1)
                    {
                        beta = betafr;
                    }
                    else if (method == 2)
                    {
                        beta = betapr;
                    }
                    else if (method == 3)
                    {
                        beta = Math.Max(0.0, betapr);
                    }

                    if ((method == 2 || method == 3) && betapr < 0.0)
                    {
                        im++;
                    }
                }

                // Compute the new direction
                for (int i = 0; i < g.Length; i++)
                {
                    d[i] = -g[i] + beta * d[i];
                }

                dg0 = dgold * stp;

                // Check for termination
                gnorm = Norm.Euclidean(g);
                xnorm = Math.Max(1.0, Norm.Euclidean(x));

                // Convergence test
                if (gnorm / xnorm <= epsilon)
                {
                    finish = true;
                }

                // Stopping criteria by function delta
                if (tolerance > 0 && iterations > 1)
                {
                    double delta = (f_old - f) / f;
                    f_old = f;

                    if (delta < tolerance)
                    {
                        finish = true;
                    }
                }

                // Stopping criteria by max iterations
                if (maxIterations > 0)
                {
                    if (iterations > maxIterations)
                    {
                        finish = true;
                    }
                }

                if (Progress != null)
                {
                    Progress(this, new OptimizationProgressEventArgs(iterations,
                                                                     evaluations, g, gnorm, Solution, xnorm, f, stp, finish));
                }
            }

            return(Status == ConjugateGradientCode.Success);
        }
コード例 #15
0
        /// <summary>
        ///   Deflation iterative algorithm.
        /// </summary>
        ///
        /// <returns>
        ///   Returns a matrix in which each row contains
        ///   the mixing coefficients for each component.
        /// </returns>
        ///
        private double[][] deflation(double[][] X, int components, double[][] init)
        {
            // References:
            // - Hyvärinen, A (1999). Fast and Robust Fixed-Point
            //   Algorithms for Independent Component Analysis.

            // There are two ways to apply the fixed-unit algorithm to compute the whole
            // ICA iteration. The more simpler is to perform a deflation as in the Gram-
            // Schmidt orthogonalization process [Hyvärinen]. In this scheme, independent
            // components are estimated one-by-one. See referenced paper for details.

            int n = X.Rows();
            int m = X.Columns();

            // Algorithm initialization
            var W    = Jagged.Zeros(components, m);
            var wx   = new double[n];
            var gwx  = new double[n];
            var dgwx = new double[n];


            // For each component to be computed,
            for (int i = 0; i < components; i++)
            {
                // Will compute each of the basis vectors
                //  individually and sequentially, re-using
                //  previous computations to form basis W.
                //

                // Initialization
                int  iterations = 0;
                bool stop       = false;
                //double lastChange = 1;

                double[] w  = init.GetRow(i);
                double[] w0 = init.GetRow(i);


                do // until convergence
                {
                    // Start with deflation
                    for (int u = 0; u < i; u++)
                    {
                        double proj = 0;
                        for (int j = 0; j < w0.Length; j++)
                        {
                            proj += w0[j] * W[u][j];
                        }

                        for (int j = 0; j < w0.Length; j++)
                        {
                            w[j] = w0[j] - proj * W[u][j];
                        }
                    }

                    // Normalize
                    w = w.Divide(Norm.Euclidean(w));


                    // Gets the maximum parameter absolute change
                    double delta = getMaximumAbsoluteChange(w, w0);

                    // Check for convergence
                    if (!(delta > tolerance && iterations < maxIterations) || Token.IsCancellationRequested)
                    {
                        stop = true;
                    }
                    else
                    {
                        // Advance to the next iteration
                        w0 = w; w = new double[m];
                        //lastChange = delta;
                        iterations++;

                        // Compute wx = w*x
                        for (int j = 0; j < n; j++)
                        {
                            double s = 0;
                            for (int k = 0; k < w0.Length; k++)
                            {
                                s += w0[k] * X[j][k];
                            }
                            wx[j] = s;
                        }

                        // Compute g(w*x) and g'(w*x)
                        contrast.Evaluate(wx, gwx, dgwx);

                        // Compute E{ x*g(w*x) }
                        double[] means = new double[m];
                        for (int j = 0; j < means.Length; j++)
                        {
                            for (int k = 0; k < gwx.Length; k++)
                            {
                                means[j] += X[k][j] * gwx[k];
                            }
                            means[j] /= n;
                        }

                        // Compute E{ g'(w*x) }
                        double mean = Measures.Mean(dgwx);


                        // Compute next update for w according
                        //  to Hyvärinen paper's equation (20).

                        // w+ = E{ xg(w*x)} - E{ g'(w*x)}*w
                        for (int j = 0; j < means.Length; j++)
                        {
                            w[j] = means[j] - mean * w0[j];
                        }

                        // The normalization to w* will be performed
                        //  in the beginning of the next iteration.
                    }
                } while (!stop);

                // Store the just computed component
                // in the resulting component matrix.
                W.SetRow(i, w);
            }

            // Return the component basis matrix
            return(W); // vectors stored as rows.
        }
コード例 #16
0
        /// <summary>
        ///   Computes PLS parameters using SIMPLS algorithm.
        /// </summary>
        private void simpls(double[,] X0, double[,] Y0, int factors)
        {
            // Reference: Sijmen de Jong
            // "SIMPLS: an alternative approach to partial least squares regression"

            // Initialize and prepare the data
            int rows  = sourceX.GetLength(0);
            int xcols = sourceX.GetLength(1);
            int ycols = sourceY.GetLength(1);

            // Initialize storage variables
            double[,] P = new double[xcols, factors];
            double[,] Q = new double[ycols, factors];
            double[,] T = new double[rows, factors];
            double[,] U = new double[rows, factors];
            double[,] R = new double[xcols, factors];

            double[] varX = new double[factors];
            double[] varY = new double[factors];

            // Orthogonal basis
            double[,] V = new double[xcols, factors];


            // Create covariance matrix X0'Y0
            double[,] covariance = new double[xcols, ycols];
            for (int i = 0; i < xcols; i++)
            {
                for (int j = 0; j < ycols; j++)
                {
                    for (int k = 0; k < rows; k++)
                    {
                        covariance[i, j] += X0[k, i] * Y0[k, j];
                    }
                }
            }


            #region SIMPLS
            for (int iteration = 0; iteration < factors; iteration++)
            {
                // Perform SVD on the covariance matrix
                SingularValueDecomposition svd = new SingularValueDecomposition(covariance);
                double[] r = svd.LeftSingularVectors.GetColumn(0);
                double[] c = svd.RightSingularVectors.GetColumn(0);
                double   s = svd.Diagonal[0];

                // t = X0*r;
                double[] t = new double[rows];
                for (int i = 0; i < rows; i++)
                {
                    for (int j = 0; j < xcols; j++)
                    {
                        t[i] += X0[i, j] * r[j];
                    }
                }

                // Normalize t
                double norm_t = Norm.Euclidean(t);
                for (int i = 0; i < t.Length; i++)
                {
                    t[i] /= norm_t;
                }

                // p = X0'*t;
                double[] p = new double[xcols];
                for (int i = 0; i < xcols; i++)
                {
                    for (int j = 0; j < rows; j++)
                    {
                        p[i] += X0[j, i] * t[j];
                    }
                }

                // q = s*c/norm(t);
                double[] q = new double[ycols];
                for (int j = 0; j < ycols; j++)
                {
                    q[j] = s * c[j] / norm_t;
                }

                // u = Y0*q;
                double[] u = new double[rows];
                for (int i = 0; i < rows; i++)
                {
                    for (int j = 0; j < ycols; j++)
                    {
                        u[i] += Y0[i, j] * q[j];
                    }
                }

                // Normalize r using norm(t)
                for (int i = 0; i < r.Length; i++)
                {
                    r[i] /= norm_t;
                }


                // Update the orthonormal basis V
                double[] v = (double[])p.Clone();
                for (int i = 0; i < 2; i++)
                {
                    // Modified Gram-Schmidt to deal with numerical instabilities
                    //  http://en.wikipedia.org/wiki/Gram%E2%80%93Schmidt_process

                    for (int j = 0; j < iteration; j++)
                    {
                        double sum = 0.0;
                        for (int k = 0; k < v.Length; k++)
                        {
                            sum += v[k] * V[k, i];
                        }
                        for (int k = 0; k < v.Length; k++)
                        {
                            v[k] -= sum * V[k, j];
                        }
                    }
                }

                // Normalize v
                double norm_v = Norm.Euclidean(v);
                for (int i = 0; i < v.Length; i++)
                {
                    v[i] /= norm_v;
                }


                // Save iteration
                R.SetColumn(iteration, r);
                U.SetColumn(iteration, u);
                Q.SetColumn(iteration, q);
                T.SetColumn(iteration, t);
                P.SetColumn(iteration, p);
                V.SetColumn(iteration, v);

                // Explained variance
                varX[iteration] = p.InnerProduct(p);
                varY[iteration] = q.InnerProduct(q);


                // Covariance matrix deflaction
                // Cov = Cov - vi*(vi'*Cov);
                double[,] d = new double[xcols, ycols];
                for (int i = 0; i < xcols; i++)
                {
                    for (int j = 0; j < xcols; j++)
                    {
                        for (int k = 0; k < ycols; k++)
                        {
                            d[i, k] += v[i] * v[j] * covariance[j, k];
                        }
                    }
                }

                for (int i = 0; i < xcols; i++)
                {
                    for (int j = 0; j < ycols; j++)
                    {
                        covariance[i, j] -= d[i, j];
                    }
                }

                // Vi = V(:,1:i);
                // Cov = Cov - Vi*(Vi'*Cov);
                d = new double[iteration + 1, ycols];
                for (int i = 0; i < iteration + 1; i++)
                {
                    for (int j = 0; j < ycols; j++)
                    {
                        for (int k = 0; k < xcols; k++)
                        {
                            d[i, j] += V[k, i] * covariance[k, j];
                        }
                    }
                }

                for (int i = 0; i < iteration + 1; i++)
                {
                    for (int j = 0; j < ycols; j++)
                    {
                        for (int k = 0; k < ycols; k++)
                        {
                            covariance[j, k] -= V[j, i] * d[i, k];
                        }
                    }
                }
            }
            #endregion


            // Orthogonalize scores (by convention)
            for (int i = 0; i < factors; i++)
            {
                for (int s = 0; s < 2; s++)
                {
                    for (int j = 0; j < i; j++)
                    {
                        double b = 0;
                        for (int k = 0; k < rows; k++)
                        {
                            b += U[k, i] * T[k, j];
                        }
                        for (int k = 0; k < rows; k++)
                        {
                            U[k, i] -= b * T[k, j];
                        }
                    }
                }
            }


            // Set class variables
            this.scoresX   = T;    // factor score matrix T
            this.scoresY   = U;    // factor score matrix U
            this.loadingsX = P;    // loading matrix P, the loadings for X such that X = TP + F
            this.loadingsY = Q;    // loading matrix Q, the loadings for Y such that Y = TQ + E
            this.weights   = R;    // the columns of R are weight vectors
            this.coeffbase = R;


            // Calculate variance explained proportions
            this.componentProportionX = new double[factors];
            this.componentProportionY = new double[factors];

            double sumX = 0.0, sumY = 0.0;
            for (int i = 0; i < rows; i++)
            {
                // Sum of squares for matrix X
                for (int j = 0; j < xcols; j++)
                {
                    sumX += X0[i, j] * X0[i, j];
                }

                // Sum of squares for matrix Y
                for (int j = 0; j < ycols; j++)
                {
                    sumY += Y0[i, j] * Y0[i, j];
                }
            }

            // Calculate variance proportions
            for (int i = 0; i < factors; i++)
            {
                componentProportionY[i] = varY[i] / sumY;
                componentProportionX[i] = varX[i] / sumX;
            }
        }
コード例 #17
0
        //---------------------------------------------


        #region Partial Least Squares Algorithms
        /// <summary>
        ///   Computes PLS parameters using NIPALS algorithm.
        /// </summary>
        private void nipals(double[,] X0, double[,] Y0, int factors, double tolerance)
        {
            // Reference: Hervé Abdi, 2010
            // http://www.utdallas.edu/~herve/abdi-wireCS-PLS2010.pdf


            // Initialize and prepare the data
            int rows  = sourceX.GetLength(0);
            int xcols = sourceX.GetLength(1);
            int ycols = sourceY.GetLength(1);


            // Initialize storage variables
            double[,] X = (double[, ])X0.Clone();
            double[,] Y = (double[, ])Y0.Clone();

            double[,] T = new double[rows, factors];
            double[,] U = new double[rows, factors];
            double[,] P = new double[xcols, factors];
            double[,] Q = new double[ycols, factors];
            double[,] W = new double[xcols, xcols];
            double[] B = new double[xcols];

            double[] varX = new double[factors];
            double[] varY = new double[factors];


            // Initialize the algorithm
            bool stop = false;

            #region NIPALS
            for (int iteration = 0; iteration < factors && !stop; iteration++)
            {
                // Select t as the largest column from X,
                double[] t = X.GetColumn(largest(X));

                // Select u as the largest column from Y.
                double[] u = Y.GetColumn(largest(Y));

                // Will store weights for X and Y
                double[] w = new double[xcols];
                double[] q = new double[ycols];


                double norm_t = Norm.Euclidean(t);

                #region Outer iteration
                while (norm_t > 10e-15)
                {
                    // Store initial t to check convergence
                    double[] t0 = (double[])t.Clone();

                    // Step 1. w ɑ X'u (estimate X weights).
                    // - w = X'*u;
                    w = new double[xcols];
                    for (int j = 0; j < xcols; j++)
                    {
                        for (int i = 0; i < rows; i++)
                        {
                            w[j] += X[i, j] * u[i];
                        }
                    }

                    // - Normalize w [w = w/norm(w)]
                    double norm_w = Norm.Euclidean(w);
                    w = w.Divide(norm_w);



                    // Step 2. t ɑ Xw (estimate X factor scores).
                    // - t = X*w
                    t = new double[rows];
                    for (int i = 0; i < rows; i++)
                    {
                        for (int j = 0; j < xcols; j++)
                        {
                            t[i] += X[i, j] * w[j];
                        }
                    }

                    // - Normalize t [t = t/norm(t)]
                    t = t.Divide(Norm.Euclidean(t));



                    // Step 3. q ɑ Y't (estimate Y weights).
                    // - q = Y'*t0;
                    q = new double[ycols];
                    for (int j = 0; j < ycols; j++)
                    {
                        for (int i = 0; i < rows; i++)
                        {
                            q[j] += Y[i, j] * t[i];
                        }
                    }

                    // - Normalize q [q = q/norm(q)]
                    double norm_q = Norm.Euclidean(q);
                    q = q.Divide(norm_q);



                    // Step 4. u = Yq (estimate Y scores).
                    // - u = Y*q;
                    u = new double[rows];
                    for (int i = 0; i < rows; i++)
                    {
                        for (int j = 0; j < ycols; j++)
                        {
                            u[i] += Y[i, j] * q[j];
                        }
                    }



                    // Recalculate norm of the difference
                    norm_t = 0.0;
                    for (int i = 0; i < rows; i++)
                    {
                        double d = (t0[i] - t[i]);
                        norm_t += d * d;
                    }
                    norm_t = System.Math.Sqrt(norm_t);
                }
                #endregion


                // Compute factor loadings as p = X'*t
                double[] p = new double[xcols];
                for (int j = 0; j < xcols; j++)
                {
                    for (int i = 0; i < rows; i++)
                    {
                        p[j] += X[i, j] * t[i];
                    }
                }


                // Compute the regression in latent space as b = t'u
                double b = u.InnerProduct(t);


                // Calculate explained variances
                varY[iteration] = b * b;
                varX[iteration] = p.InnerProduct(p);


                // Perform deflaction of X and Y
                for (int i = 0; i < rows; i++)
                {
                    // Deflate X as X = X - t*p';
                    for (int j = 0; j < xcols; j++)
                    {
                        X[i, j] -= t[i] * p[j];
                    }

                    // Deflate Y as Y = Y - b*t*q';
                    for (int j = 0; j < ycols; j++)
                    {
                        Y[i, j] -= b * t[i] * q[j];
                    }
                }


                // Save iteration results
                T.SetColumn(iteration, t);
                P.SetColumn(iteration, p);
                U.SetColumn(iteration, u);
                Q.SetColumn(iteration, q);
                W.SetColumn(iteration, w);
                B[iteration] = b;


                // Check for residuals as stop criteria
                double[] norm_x = Norm.Euclidean(X);
                double[] norm_y = Norm.Euclidean(Y);

                stop = true;
                for (int i = 0; i < norm_x.Length && stop == true; i++)
                {
                    // If any of the residuals is higher than the tolerance
                    if (norm_x[i] > tolerance || norm_y[i] > tolerance)
                    {
                        stop = false;
                    }
                }
            }
            #endregion


            // R = inv(P')*B
            this.coeffbase = Matrix.PseudoInverse(P.Transpose()).Multiply(Matrix.Diagonal(B));


            // Set class variables
            this.scoresX   = T;    // factor score matrix T
            this.scoresY   = U;    // factor score matrix U
            this.loadingsX = P;    // loading matrix P, the loadings for X such that X = TP + F
            this.loadingsY = Q;    // loading matrix Q, the loadings for Y such that Y = TQ + E
            this.weights   = W;    // the columns of W are weight vectors


            // Calculate variance explained proportions
            this.componentProportionX = new double[factors];
            this.componentProportionY = new double[factors];

            double sumX = 0.0, sumY = 0.0;
            for (int i = 0; i < rows; i++)
            {
                // Sum of squares for matrix X
                for (int j = 0; j < xcols; j++)
                {
                    sumX += X0[i, j] * X0[i, j];
                }

                // Sum of squares for matrix Y
                for (int j = 0; j < ycols; j++)
                {
                    sumY += Y0[i, j] * Y0[i, j];
                }
            }

            // Calculate variance proportions
            for (int i = 0; i < factors; i++)
            {
                componentProportionY[i] = varY[i] / sumY;
                componentProportionX[i] = varX[i] / sumX;
            }
        }
コード例 #18
0
ファイル: MeanShift.cs プロジェクト: RitterRBC/framework
        private double[] move(KDTree <int> tree, double[][] points, int index,
                              ConcurrentStack <double[]> modes,
                              Action <ICollection <NodeDistance <KDTreeNode <int> > >, double[]> computeMean)
        {
            double[] current = points[index];
            double[] mean    = new double[current.Length];
            double[] shift   = new double[current.Length];

            // we will keep moving it in the
            // direction of the density modes

            int iterations = 0;

            // until convergence or max iterations reached
            while (iterations < MaxIterations)
            {
                iterations++;

                // Get points near the current point
                var neighbors = tree.Nearest(current, Bandwidth * 3, maximum);

                // compute the mean on the region
                computeMean(neighbors, mean);

                // extract the mean shift vector
                for (int j = 0; j < mean.Length; j++)
                {
                    shift[j] = current[j] - mean[j];
                }

                // move the point towards a mode
                for (int j = 0; j < mean.Length; j++)
                {
                    current[j] = mean[j];
                }

                // Check if we are near to a maximum point
                if (cut)
                {
                    // Check if we are near a known mode
                    foreach (double[] mode in modes)
                    {
                        // compute the distance between points
                        // if they are near, they are duplicates
                        if (Distance.Distance(points[index], mode) < Bandwidth)
                        {
                            // Yes, we are close to a known mode. Let's substitute
                            // this point with a reference to this nearest mode
                            return(points[index] = mode); // and stop moving this point
                        }
                    }
                }

                // check for convergence: magnitude of the mean shift
                // vector converges to zero (Comaniciu 2002, page 606)
                if (Norm.Euclidean(shift) < Tolerance * Bandwidth)
                {
                    break;
                }
            }

            return(supress(points, index, modes));
        }
コード例 #19
0
ファイル: MeanShift.cs プロジェクト: zhenyao2008/ai4unity
        /// <summary>
        ///   Divides the input data into clusters.
        /// </summary>
        ///
        /// <param name="points">The data where to compute the algorithm.</param>
        /// <param name="threshold">The relative convergence threshold
        /// for the algorithm. Default is 1e-3.</param>
        /// <param name="maxIterations">The maximum number of iterations. Default is 100.</param>
        ///
        public int[] Compute(double[][] points, double threshold, int maxIterations = 100)
        {
            // first, select initial points
            double[][] seeds         = createSeeds(points, 2 * Bandwidth);
            var        maxcandidates = new ConcurrentStack <double[]>();

            // construct map of the data
            tree = KDTree.FromData <int>(points, distance);

            // now, for each initial point
            global::Accord.Threading.Tasks.Parallel.For(0, seeds.Length,
#if DEBUG
                                                        new ParallelOptions()
            {
                MaxDegreeOfParallelism = 1
            },
#endif

                                                        (index) =>
            {
                double[] point = seeds[index];
                double[] mean  = new double[point.Length];
                double[] delta = new double[point.Length];

                // we will keep moving it in the
                // direction of the density modes

                int iterations = 0;

                // until convergence or max iterations reached
                while (iterations < maxIterations)
                {
                    iterations++;

                    // compute the shifted mean
                    computeMeanShift(point, mean);

                    // extract the mean shift vector
                    for (int j = 0; j < mean.Length; j++)
                    {
                        delta[j] = point[j] - mean[j];
                    }

                    // update the point towards a mode
                    for (int j = 0; j < mean.Length; j++)
                    {
                        point[j] = mean[j];
                    }

                    // Check if we are already near any maximum point
                    if (cut && nearest(point, maxcandidates) != null)
                    {
                        break;
                    }

                    // check for convergence: magnitude of the mean shift
                    // vector converges to zero (Comaniciu 2002, page 606)
                    if (Norm.Euclidean(delta) < threshold * Bandwidth)
                    {
                        break;
                    }
                }

                if (cut)
                {
                    double[] match = nearest(point, maxcandidates);

                    if (match != null)
                    {
                        seeds[index] = match;
                    }

                    else
                    {
                        maxcandidates.Push(point);
                    }
                }
            });


            // suppress non-maximum points
            double[][] maximum = cut ? maxcandidates.ToArray() : supress(seeds);

            // create a decision map using seeds
            int[] seedLabels = classifySeeds(seeds, maximum);
            tree = KDTree.FromData(seeds, seedLabels, distance);

            // create the cluster structure
            clusters = new MeanShiftClusterCollection(tree, maximum);

            // label each point
            return(clusters.Nearest(points));
        }
コード例 #20
0
        private void radButton1_Click(object sender, EventArgs e)
        {
            if (input != null || output != null)
            {
                chart1.Series.Clear();
                Error_Fun = new Series()
                {
                    ChartType = SeriesChartType.Spline
                };
                chart1.Series.Add(Error_Fun);

                List <DataPoint> err_function = new List <DataPoint>();


                //double[,] andoutput = Matrix.Create(1, 4, 0d);
                //oroutput[0, 0] = 1;
                int           nurons_num    = (int)radTrackBar1.Value;
                int           max_iteration = Convert.ToInt16(radTextBox4.Text);
                neuralNetwork net           = new neuralNetwork(input, output, new int[] { nurons_num }, new string[] { radDropDownList1.Text.ToString(), radDropDownList2.Text.ToString() });
                if (Intro.BP || Intro.BPM)
                {
                    net.change_paramaters(Convert.ToDouble(radTextBox1.Text), Convert.ToDouble(radTextBox2.Text), Convert.ToDouble(radTextBox5.Text));
                }
                else
                {
                    net.change_paramaters_levenberg(Convert.ToDouble(radTextBox1.Text), Convert.ToDouble(radTextBox2.Text), Convert.ToDouble(radTextBox3.Text), Convert.ToDouble(radTextBox6.Text));
                }


                net.init();


                for (int i = 0; i < max_iteration; i++)
                {
                    if (!Intro.Levenberg)
                    {
                        if (Intro.BP)
                        {
                            net.train("traingd", "onLine");
                        }
                        else if (Intro.BPM)
                        {
                            net.train("traingdm", "onLine");
                        }

                        net.get_class(new double[, ] {
                            { 1.1, 2.1 }, { 5.1, 3.1 }, { 3.1, 6.1 }
                        });

                        double err = 0;
                        for (int ii = 0; ii < input.GetLength(1); ii++)
                        {
                            err += Norm.Euclidean(net.eval(input.GetColumn(ii)).Subtract(output.GetColumn(ii)));
                        }
                        err_function.Add(new DataPoint(i, err));
                    }
                    else
                    {
                        net.levenbergMarqardt();
                        double err = 0;
                        for (int ii = 0; ii < input.GetLength(1); ii++)
                        {
                            err += Norm.Euclidean(net.eval(input.GetColumn(ii)).Subtract(output.GetColumn(ii)));
                        }
                        err_function.Add(new DataPoint(i, err));
                    }
                }


                foreach (DataPoint p in err_function)
                {
                    chart1.Series[0].Points.Add(p);
                }
            }
            else
            {
                MessageBox.Show("Enter Input !");
            }
        }
コード例 #21
0
ファイル: Program.cs プロジェクト: xiubjarne/framework
        private static void TestLinearASGD()
        {
            // http://leon.bottou.org/projects/sgd

            string codebookPath = "codebook.bin";
            string x_train_fn   = "x_train.txt.gz";
            string x_test_fn    = "x_test.txt.gz";

            Sparse <double>[] xTrain = null, xTest = null;
            bool[]            yTrain = null, yTest = null;

            // Check if we have the precomputed dataset on disk
            if (!File.Exists(x_train_fn) || !File.Exists(x_train_fn))
            {
                Console.WriteLine("Downloading dataset");
                RCV1v2 rcv1v2 = new RCV1v2(@"C:\Temp\");

                // Note: Leon Bottou's SGD inverts training and
                // testing when benchmarking in this dataset
                var trainWords = rcv1v2.Testing.Item1;
                var testWords  = rcv1v2.Training.Item1;

                string positiveClass = "CCAT";
                yTrain = rcv1v2.Testing.Item2.Apply(x => x.Contains(positiveClass));
                yTest  = rcv1v2.Training.Item2.Apply(x => x.Contains(positiveClass));

                TFIDF tfidf;
                if (!File.Exists(codebookPath))
                {
                    Console.WriteLine("Learning TD-IDF");
                    // Create a TF-IDF considering only words that
                    // exist in both the training and testing sets
                    tfidf = new TFIDF(testWords)
                    {
                        Tf  = TermFrequency.Log,
                        Idf = InverseDocumentFrequency.Default,
                    };

                    // Learn the training set
                    tfidf.Learn(trainWords);

                    Console.WriteLine("Saving codebook");
                    tfidf.Save(codebookPath);
                }
                else
                {
                    Console.WriteLine("Loading codebook");
                    Serializer.Load(codebookPath, out tfidf);
                }

                if (!File.Exists(x_train_fn))
                {
                    // Transform and normalize training set
                    Console.WriteLine("Pre-processing training set");
                    xTrain = tfidf.Transform(trainWords, out xTrain);

                    Console.WriteLine("Post-processing training set");
                    xTrain = xTrain.Divide(Norm.Euclidean(xTrain, dimension: 1), result: xTrain);

                    Console.WriteLine("Saving training set to disk");
                    SparseFormat.Save(xTrain, yTrain, x_train_fn, compression: SerializerCompression.GZip);
                }

                if (!File.Exists(x_test_fn))
                {
                    // Transform and normalize testing set
                    Console.WriteLine("Pre-processing testing set");
                    xTest = tfidf.Transform(testWords, out xTest);

                    Console.WriteLine("Post-processing testing set");
                    xTest = xTest.Divide(Norm.Euclidean(xTest, dimension: 1), result: xTest);

                    Console.WriteLine("Saving testing set to disk");
                    SparseFormat.Save(xTest, yTest, x_test_fn, compression: SerializerCompression.GZip);
                }
            }
            else
            {
                Console.WriteLine("Loading dataset from disk");
                if (xTrain == null || yTrain == null)
                {
                    SparseFormat.Load(x_train_fn, out xTrain, out yTrain, compression: SerializerCompression.GZip);
                }
                if (xTest == null || yTest == null)
                {
                    SparseFormat.Load(x_test_fn, out xTest, out yTest, compression: SerializerCompression.GZip);
                }
            }

            int positiveTrain = yTrain.Count(x => x);
            int positiveTest  = yTest.Count(x => x);
            int negativeTrain = yTrain.Length - positiveTrain;
            int negativeTest  = yTest.Length - positiveTest;

            Console.WriteLine("Training samples: {0} [{1}+, {2}-]", positiveTrain + negativeTrain, positiveTrain, negativeTrain);
            Console.WriteLine("Negative samples: {0} [{1}+, {2}-]", positiveTest + negativeTest, positiveTest, negativeTest);

            // Create and learn a linear sparse binary support vector machine
            var learn = new AveragedStochasticGradientDescent <Linear, Sparse <double> >()
            {
                MaxIterations = 5,
                Tolerance     = 0,
            };

            Console.WriteLine("Learning training set");
            Stopwatch sw  = Stopwatch.StartNew();
            var       svm = learn.Learn(xTrain, yTrain);

            Console.WriteLine(sw.Elapsed);


            Console.WriteLine("Predicting training set");
            sw = Stopwatch.StartNew();
            bool[] trainPred = svm.Decide(xTrain);
            Console.WriteLine(sw.Elapsed);

            var train = new ConfusionMatrix(trainPred, yTrain);

            Console.WriteLine("Train acc: " + train.Accuracy);


            Console.WriteLine("Predicting testing set");
            sw = Stopwatch.StartNew();
            bool[] testPred = svm.Decide(xTest);
            Console.WriteLine(sw.Elapsed);

            var test = new ConfusionMatrix(testPred, yTest);

            Console.WriteLine("Test acc: " + test.Accuracy);
        }
コード例 #22
0
        //---------------------------------------------


        #region FastICA Algorithms

        /// <summary>
        ///   Deflation iterative algorithm.
        /// </summary>
        /// <returns>
        ///   Returns a matrix in which each row contains
        ///   the mixing coefficients for each component.
        /// </returns>
        private double[,] deflation(double[,] X, int components, double[,] init)
        {
            int n = X.GetLength(0);
            int m = X.GetLength(1);

            // Algorithm initialization
            double[,] W = new double[components, m];
            double[] wx = new double[n];
            double[] gwx = new double[n];
            double[] dgwx = new double[n];


            // For each component to be computed,
            for (int i = 0; i < components; i++)
            {
                // Will compute each of the basis vectors
                //  invidually and sequentially, re-using
                //  previous computations to form basis W. 
                //  

                // Initialization
                int iterations = 0;
                bool stop = false;

                double[] w = init.GetRow(i);
                double[] w0 = init.GetRow(i);


                do // until convergence
                {
                    // Deflaction
                    for (int u = 0; u < i; u++)
                    {
                        double k = 0;
                        for (int j = 0; j < m; j++)
                            k += w0[j] * W[u, j];

                        for (int j = 0; j < m; j++)
                            w[j] = w0[j] - k * W[u, j];
                    }

                    // Normalize
                    w = w.Divide(Norm.Euclidean(w));


                    // Gets the maximum absolute change in the parameters
                    double delta = System.Math.Abs(System.Math.Abs(Matrix.Sum(w.ElementwiseMultiply(w0))) - 1);

                    // Check for convergence
                    if (!(delta > tolerance && iterations < maxIterations))
                    {
                        stop = true;
                    }
                    else
                    {
                        // Advance to the next iteration
                        w0 = w; w = new double[m];
                        iterations++;

                        // Compute wx = w*x
                        for (int j = 0; j < n; j++)
                        {
                            double s = 0;
                            for (int k = 0; k < m; k++)
                                s += w0[k] * X[j, k];
                            wx[j] = s;
                        }

                        // Compute g(w*x) and g'(w*x)
                        contrast.Evaluate(wx, gwx, dgwx);

                        // Compute E{ x*g(w*x) }
                        double[] means = new double[m];
                        for (int j = 0; j < m; j++)
                        {
                            for (int k = 0; k < n; k++)
                                means[j] += X[k, j] * gwx[k];
                            means[j] /= n;
                        }

                        // Compute E{ g'(w*x) }
                        double mean = GABIZ.Base.Statistics.Tools.Mean(dgwx);


                        // Compute next update for w
                        for (int j = 0; j < m; j++)
                            w[j] = means[j] - w0[j] * mean;

                    }

                } while (!stop);

                // Store the just computed component
                // in the resulting component matrix.
                W.SetRow(i, w);
            }

            // Return the component basis matrix
            return W; // vectors stored as rows.
        }
コード例 #23
0
ファイル: L-BFGS.cs プロジェクト: isadorasalvetti/Hatching
    private unsafe double minimize()
    {
        if (Function == null)
        {
            throw new InvalidOperationException(
                      "The function to be minimized has not been defined.");
        }

        if (Gradient == null)
        {
            throw new InvalidOperationException(
                      "The gradient function has not been defined.");
        }


        // Initialization
        int n = numberOfVariables, m = corrections;

        // Make initial evaluation
        f = getFunction(x);
        g = getGradient(x);

        this.iterations  = 0;
        this.evaluations = 1;


        // Obtain initial Hessian
        double[] diagonal = null;

        if (Diagonal != null)
        {
            diagonal = getDiagonal();
        }
        else
        {
            diagonal = new double[n];
            for (int i = 0; i < diagonal.Length; i++)
            {
                diagonal[i] = 1.0;
            }
        }

        fixed(double *w = work)
        {
            // The first N locations of the work vector are used to
            //  store the gradient and other temporary information.

            double *rho   = &w[n];                 // Stores the scalars rho.
            double *alpha = &w[n + m];             // Stores the alphas in computation of H*g.
            double *steps = &w[n + 2 * m];         // Stores the last M search steps.
            double *delta = &w[n + 2 * m + n * m]; // Stores the last M gradient differences.


            // Initialize work vector
            for (int i = 0; i < g.Length; i++)
            {
                steps[i] = -g[i] * diagonal[i];
            }

            // Initialize statistics
            double gnorm = Norm.Euclidean(g);
            double xnorm = Norm.Euclidean(x);
            double stp   = 1.0 / gnorm;
            double stp1  = stp;

            // Initialize loop
            int  nfev, point = 0;
            int  npt = 0, cp = 0;
            bool finish = false;

            // Make initial progress report with initialization parameters
            if (Progress != null)
            {
                Progress(this, new OptimizationProgressEventArgs
                             (iterations, evaluations, g, gnorm, x, xnorm, f, stp, finish));
            }


            // Start main
            while (!finish)
            {
                iterations++;
                double bound = iterations - 1;

                if (iterations != 1)
                {
                    if (iterations > m)
                    {
                        bound = m;
                    }

                    double ys = 0;
                    for (int i = 0; i < n; i++)
                    {
                        ys += delta[npt + i] * steps[npt + i];
                    }

                    // Compute the diagonal of the Hessian
                    // or use an approximation by the user.

                    if (Diagonal != null)
                    {
                        diagonal = getDiagonal();
                    }
                    else
                    {
                        double yy = 0;
                        for (int i = 0; i < n; i++)
                        {
                            yy += delta[npt + i] * delta[npt + i];
                        }
                        double d = ys / yy;

                        for (int i = 0; i < n; i++)
                        {
                            diagonal[i] = d;
                        }
                    }


                    // Compute -H*g using the formula given in:
                    //   Nocedal, J. 1980, "Updating quasi-Newton matrices with limited storage",
                    //   Mathematics of Computation, Vol.24, No.151, pp. 773-782.

                    cp          = (point == 0) ? m : point;
                    rho[cp - 1] = 1.0 / ys;
                    for (int i = 0; i < n; i++)
                    {
                        w[i] = -g[i];
                    }

                    cp = point;
                    for (int i = 1; i <= bound; i += 1)
                    {
                        if (--cp == -1)
                        {
                            cp = m - 1;
                        }

                        double sq = 0;
                        for (int j = 0; j < n; j++)
                        {
                            sq += steps[cp * n + j] * w[j];
                        }

                        double beta = alpha[cp] = rho[cp] * sq;
                        for (int j = 0; j < n; j++)
                        {
                            w[j] -= beta * delta[cp * n + j];
                        }
                    }

                    for (int i = 0; i < diagonal.Length; i++)
                    {
                        w[i] *= diagonal[i];
                    }

                    for (int i = 1; i <= bound; i += 1)
                    {
                        double yr = 0;
                        for (int j = 0; j < n; j++)
                        {
                            yr += delta[cp * n + j] * w[j];
                        }

                        double beta = alpha[cp] - rho[cp] * yr;
                        for (int j = 0; j < n; j++)
                        {
                            w[j] += beta * steps[cp * n + j];
                        }

                        if (++cp == m)
                        {
                            cp = 0;
                        }
                    }

                    npt = point * n;

                    // Store the search direction
                    for (int i = 0; i < n; i++)
                    {
                        steps[npt + i] = w[i];
                    }

                    stp = 1;
                }

                // Save original gradient
                for (int i = 0; i < g.Length; i++)
                {
                    w[i] = g[i];
                }


                // Obtain the one-dimensional minimizer of f by computing a line search
                mcsrch(x, ref f, ref g, &steps[point * n], ref stp, out nfev, diagonal);

                // Register evaluations
                evaluations += nfev;

                // Compute the new step and
                // new gradient differences
                for (int i = 0; i < g.Length; i++)
                {
                    steps[npt + i] *= stp;
                    delta[npt + i]  = g[i] - w[i];
                }

                if (++point == m)
                {
                    point = 0;
                }


                // Check for termination
                gnorm = Norm.Euclidean(g);
                xnorm = Norm.Euclidean(x);
                xnorm = Math.Max(1.0, xnorm);

                if (gnorm / xnorm <= tolerance)
                {
                    finish = true;
                }

                if (Progress != null)
                {
                    Progress(this, new OptimizationProgressEventArgs
                                 (iterations, evaluations, g, gnorm, x, xnorm, f, stp, finish));
                }
            }
        }

        return(f); // return the minimum value found (at solution x)
    }