예제 #1
0
        public void InverseTest()
        {
            double[,] value =
            {
                {  2,  3, 0 },
                { -1,  2, 1 },
                {  0, -1, 3 }
            };

            double[,] expectedInverse =
            {
                { 0.3043, -0.3913,  0.1304 },
                { 0.1304,  0.2609, -0.0870 },
                { 0.0435,  0.0870,  0.3043 },
            };

            var target = new LuDecomposition(value);

            double[,] actualInverse = target.Inverse();
            Assert.IsTrue(Matrix.IsEqual(expectedInverse, actualInverse, 0.001));
            Assert.IsTrue(Matrix.IsEqual(value, target.Reverse()));

            var target2 = new JaggedLuDecomposition(value.ToJagged());

            actualInverse = target2.Inverse().ToMatrix();
            Assert.IsTrue(Matrix.IsEqual(expectedInverse, actualInverse, 0.001));
            Assert.IsTrue(Matrix.IsEqual(value, target2.Reverse()));
        }
예제 #2
0
        public void InverseTestNaN()
        {
            int n = 5;

            var I = Matrix.Identity(n);

            for (int i = 0; i < n; i++)
            {
                for (int j = 0; j < n; j++)
                {
                    double[,] value = Matrix.Magic(n);

                    value[i, j] = double.NaN;

                    var target = new LuDecomposition(value);
                    Assert.IsTrue(Matrix.IsEqual(target.Solve(I), target.Inverse()));

                    var target2 = new JaggedLuDecomposition(value.ToJagged());
                    Assert.IsTrue(Matrix.IsEqual(target2.Solve(I.ToJagged()), target2.Inverse()));
                }
            }
        }
예제 #3
0
        public void InverseTestNaN()
        {
            int n = 5;

            var I = Matrix.Identity(n);

            for (int i = 0; i < n; i++)
            {
                for (int j = 0; j < n; j++)
                {
                    double[,] value = Matrix.Magic(n);

                    value[i, j] = double.NaN;

                    var target = new LuDecomposition(value);

                    var solution = target.Solve(I);
                    var inverse  = target.Inverse();

                    Assert.IsTrue(Matrix.IsEqual(solution, inverse));
                }
            }
        }
예제 #4
0
        /// <summary>
        ///   Iterates one pass of the optimization algorithm trying to find
        ///   the best regression coefficients for the logistic model.
        /// </summary>
        /// <remarks>
        ///   An iterative Newton-Raphson algorithm is used to calculate
        ///   the maximum likelihood values of the parameters.  This procedure
        ///   uses the partial second derivatives of the parameters in the
        ///   Hessian matrix to guide incremental parameter changes in an effort
        ///   to maximize the log likelihood value for the likelihood function.
        /// </remarks>
        /// <returns>
        ///   The absolute value of the largest parameter change.
        /// </returns>
        public double Regress(double[][] input, double[] output)
        {
            // Regress using Iterative Reweighted Least Squares estimation.

            // Initial definitions and memory allocations
            int N = input.Length;
            int M = this.Coefficients.Length;

            double[,] regression = new double[N, M];
            double[,] hessian    = new double[M, M];
            double[,] inverse;
            double[] gradient = new double[M];
            double[] errors   = new double[N];
            double[] R        = new double[N];
            double[] deltas;


            // Compute the regression matrix, errors and diagonal
            for (int i = 0; i < N; i++)
            {
                double y = this.Compute(input[i]);
                double o = output[i];

                // Calculate error vector
                errors[i] = y - o;

                // Calculate R diagonal
                R[i] = y * (1.0 - y);

                // Compute the regression matrix
                regression[i, 0] = 1;
                for (int j = 1; j < M; j++)
                {
                    regression[i, j] = input[i][j - 1];
                }
            }


            // Compute error gradient and "Hessian" matrix (with diagonal R)
            for (int i = 0; i < M; i++)
            {
                // Compute error gradient
                for (int j = 0; j < N; j++)
                {
                    gradient[i] += regression[j, i] * errors[j];
                }

                // Compute "Hessian" matrix (regression'*R*regression)
                for (int j = 0; j < M; j++)
                {
                    for (int k = 0; k < N; k++)
                    {
                        hessian[j, i] += regression[k, i] * (R[k] * regression[k, j]);
                    }
                }
            }


            // Decompose to solve the linear system. Usually the hessian will
            // be invertible and LU will succeed. However, sometimes the hessian
            // may be singular and a Singular Value Decomposition may be needed.

            LuDecomposition lu = new LuDecomposition(hessian);

            // The SVD is very stable, but is quite expensive, being on average
            // about 10-15 times more expensive than LU decomposition. There are
            // other ways to avoid a singular Hessian. For a very interesting
            // reading on the subject, please see:
            //
            //  - Jeff Gill & Gary King, "What to Do When Your Hessian Is Not Invertible",
            //    Sociological Methods & Research, Vol 33, No. 1, August 2004, 54-87.
            //    Available in: http://gking.harvard.edu/files/help.pdf
            //

            // Moreover, the computation of the inverse is optional, as it will
            // be used only to compute the standard errors of the regression.

            if (lu.Nonsingular)
            {
                // Solve using LU decomposition
                deltas  = lu.Solve(gradient);
                inverse = lu.Inverse(); // optional
            }
            else
            {
                // Hessian Matrix is singular, try pseudo-inverse solution
                SingularValueDecomposition svd = new SingularValueDecomposition(hessian);
                deltas  = svd.Solve(gradient);
                inverse = svd.Inverse(); // optional
            }


            // Update coefficients using the calculated deltas
            for (int i = 0; i < coefficients.Length; i++)
            {
                this.coefficients[i] -= deltas[i];
            }

            // Calculate Coefficients standard errors (optional)
            for (int i = 0; i < standardErrors.Length; i++)
            {
                standardErrors[i] = System.Math.Sqrt(inverse[i, i]);
            }


            // Return the absolute value of the largest parameter change
            return(Matrix.Max(Matrix.Abs(deltas)));
        }