예제 #1
0
        public void SolveTest5()
        {
            double[,] value =
            {
                { 2.1, 3.1 },
                { 1.6, 4.2 },
                { 2.1, 5.1 },
            };

            double[] rhs = { 6.1, 4.3, 2.1 };

            double[] expected = { 3.1839, -0.1891 };

            LuDecomposition target = new LuDecomposition(value);

            bool thrown = false;

            try
            {
                double[] actual = target.Solve(rhs);
            }
            catch (InvalidOperationException)
            {
                thrown = true;
            }

            Assert.IsTrue(thrown);
        }
예제 #2
0
        public void InverseTest()
        {
            double[,] value =
            {
                {  2,  3, 0 },
                { -1,  2, 1 },
                {  0, -1, 3 }
            };

            double[,] expectedInverse =
            {
                { 0.3043, -0.3913,  0.1304 },
                { 0.1304,  0.2609, -0.0870 },
                { 0.0435,  0.0870,  0.3043 },
            };

            var target = new LuDecomposition(value);

            double[,] actualInverse = target.Inverse();
            Assert.IsTrue(Matrix.IsEqual(expectedInverse, actualInverse, 0.001));
            Assert.IsTrue(Matrix.IsEqual(value, target.Reverse()));

            var target2 = new JaggedLuDecomposition(value.ToJagged());

            actualInverse = target2.Inverse().ToMatrix();
            Assert.IsTrue(Matrix.IsEqual(expectedInverse, actualInverse, 0.001));
            Assert.IsTrue(Matrix.IsEqual(value, target2.Reverse()));
        }
예제 #3
0
        public void SolveTransposeTest()
        {
            double[,] a =
            {
                { 2, 1, 4 },
                { 6, 2, 2 },
                { 0, 1, 6 },
            };

            double[,] b =
            {
                { 1, 0, 7 },
                { 5, 2, 1 },
                { 1, 5, 2 },
            };

            double[,] expected =
            {
                { 0.5062,  0.2813,  0.0875 },
                { 0.1375,  1.1875, -0.0750 },
                { 0.8063, -0.2188,  0.2875 },
            };

            double[,] actual = new LuDecomposition(b, true).SolveTranspose(a);
            Assert.IsTrue(Matrix.IsEqual(expected, actual, 0.001));
        }
예제 #4
0
        public void SolveTest()
        {
            double[,] value =
            {
                {  2,  3, 0 },
                { -1,  2, 1 },
                {  0, -1, 3 }
            };

            double[,] rhs =
            {
                { 1, 2, 3 },
                { 3, 2, 1 },
                { 5, 0, 1 },
            };

            double[,] expected =
            {
                { -0.2174, -0.1739, 0.6522 },
                {  0.4783,  0.7826, 0.5652 },
                {  1.8261,  0.2609, 0.5217 },
            };

            LuDecomposition target = new LuDecomposition(value);

            double[,] actual = target.Solve(rhs);

            Assert.IsTrue(Matrix.IsEqual(expected, actual, 0.001));
        }
예제 #5
0
        public void SolveTest3()
        {
            double[,] value =
            {
                {  2.000, 3.000, 0.000 },
                { -1.000, 2.000, 1.000 },
            };

            LuDecomposition target = new LuDecomposition(value);

            double[,] L = target.LowerTriangularFactor;
            double[,] U = target.UpperTriangularFactor;

            double[,] expectedL =
            {
                {  1.000, 0.000 },
                { -0.500, 1.000 },
            };

            double[,] expectedU =
            {
                { 2.000, 3.000, 0.000 },
                { 0.000, 3.500, 1.000 },
            };


            Assert.IsTrue(Matrix.IsEqual(expectedL, L, 0.001));
            Assert.IsTrue(Matrix.IsEqual(expectedU, U, 0.001));
        }
예제 #6
0
        public List <double> GetAnswer()
        {
            var lu = new LuDecomposition(matrix);

            vectorB = SwapVector(vectorB, lu.SwappedElements);
            var vectorZ = new List <double>(vectorB);

            for (int i = 0; i < dim - 1; i++)
            {
                for (int j = i + 1; j < dim; j++)
                {
                    vectorZ[j] -= lu.L[j, i] * vectorZ[i];
                }
            }

            for (int i = dim - 1; i >= 0; i--)
            {
                vectorZ[i] /= lu.U[i, i];
                ;                for (int j = i - 1; j >= 0; j--)
                {
                    vectorZ[j] -= lu.U[j, i] * vectorZ[i];
                }
            }


            return(vectorZ);
        }
예제 #7
0
        public void SolveTest1()
        {
            double[,] value =
            {
                {  2,  3, 0 },
                { -1,  2, 1 },
                {  0, -1, 3 }
            };

            double[] rhs = { 5, 0, 1 };

            double[] expected =
            {
                1.6522,
                0.5652,
                0.5217,
            };

            var target = new LuDecomposition(value);

            double[] actual = target.Solve(rhs);
            Assert.IsTrue(Matrix.IsEqual(expected, actual, 1e-3));
            Assert.IsTrue(Matrix.IsEqual(value, target.Reverse()));

            var target2 = new JaggedLuDecomposition(value.ToJagged());

            actual = target2.Solve(rhs);
            Assert.IsTrue(Matrix.IsEqual(expected, actual, 1e-3));
            Assert.IsTrue(Matrix.IsEqual(value, target2.Reverse()));
        }
예제 #8
0
        public void LogDeterminantTest()
        {
            LuDecomposition lu = new LuDecomposition(CholeskyDecompositionTest.bigmatrix);

            Assert.AreEqual(0, lu.Determinant);
            Assert.AreEqual(-2224.8931093738875, lu.LogDeterminant, 1e-12);
            Assert.IsTrue(lu.Nonsingular);
        }
예제 #9
0
        public void DeterminantTest()
        {
            double[,] value =
            {
                {  2,  3, 0 },
                { -1,  2, 1 },
                {  0, -1, 3 }
            };

            LuDecomposition lu = new LuDecomposition(value);

            Assert.AreEqual(23, lu.Determinant);
            Assert.IsTrue(lu.Nonsingular);
        }
예제 #10
0
        /// <summary>
        /// 三维空间中两直线的交点
        /// </summary>
        /// <returns>如果这两条射线所对应的空间直线能够相交,则返回其交点,如果不能相交,则返回 null。</returns>
        /// <remarks>
        /// 空间三维直线的参数方程为:
        ///    x=x0 + m*t;
        ///    y=y0 + n*t;
        ///    z=z0 + p*t;
        /// 其中{x0,y0,z0} 为直线中的某个点,{m,n,p}为直线的方向向量。</remarks>
        private static XYZ GetIntersectPoint(Line3D line1, Line3D line2)
        {
            // 构造一个六个方程、五个未知数x,y,z,t,k的线性方程组

            // 先用前五个方程解出对应的五个未知数
            double[][] dataA = new double[][]
            {
                new double[] { 1, 0, 0, -line1.Direction.X, 0 },
                new double[] { 0, 1, 0, -line1.Direction.Y, 0 },
                new double[] { 0, 0, 1, -line1.Direction.Z, 0 },
                new double[] { 1, 0, 0, 0, -line2.Direction.X },
                new double[] { 0, 1, 0, 0, -line2.Direction.Y },
            };

            double[][] dataB = new double[][]
            {
                new double[] { line1.Origin.X },
                new double[] { line1.Origin.Y },
                new double[] { line1.Origin.Z },
                new double[] { line2.Origin.X },
                new double[] { line2.Origin.Y },
            };

            Matrix A = new Matrix(dataA);
            Matrix b = new Matrix(dataB);

            // 通过LU上下三角分解法 求解线性方程组 Ax = b
            LuDecomposition d = new LuDecomposition(A);

            Matrix x = d.Solve(b);

            // 五个未知量 x,y,z,t,k 的值
            var col = x.GetColumn(0);

            // 将这五个变量代入第六个方程 z = z2 + p2 * k 中,
            // 如果此方程左边与右边相等,则表示两条直线有交点,否则两条直线不相交。
            var left  = col[2];
            var right = line2.Origin.Z + line2.Direction.Z * col[4];

            // 计算左右两边的相对误差
            double relative = Math.Abs((left - right) / Math.Max(left, right));

            //
            if (relative > 0.0001)
            {
                return(null);
            }
            return(relative < 0.0001 ? new XYZ(col[0], col[1], col[2]) : null);
        }
예제 #11
0
        public void SolveTest4()
        {
            double[,] value =
            {
                { 2.1, 3.1 },
                { 1.6, 4.2 },
            };

            double[] rhs = { 6.1, 4.3 };

            double[] expected = { 3.1839, -0.1891 };

            var target1 = new LuDecomposition(value);
            var target2 = new JaggedLuDecomposition(value.ToJagged());

            Assert.IsTrue(Matrix.IsEqual(expected, target1.Solve(rhs), 1e-3));
            Assert.IsTrue(Matrix.IsEqual(expected, target2.Solve(rhs), 1e-3));
        }
예제 #12
0
        public void LogDeterminantTest2()
        {
            double[,] value =
            {
                {  2,  3, 0 },
                { -1,  2, 1 },
                {  0, -1, 3 }
            };

            LuDecomposition lu = new LuDecomposition(value);

            Assert.AreEqual(23, lu.Determinant);

            double expected = System.Math.Log(23);
            double actual   = lu.LogDeterminant;

            Assert.AreEqual(expected, actual);
        }
예제 #13
0
        public void SolveTest4()
        {
            double[,] value =
            {
                { 2.1, 3.1 },
                { 1.6, 4.2 },
            };

            double[] rhs = { 6.1, 4.3 };

            double[] expected = { 3.1839, -0.1891 };

            LuDecomposition target = new LuDecomposition(value);

            double[] actual = target.Solve(rhs);

            Assert.IsTrue(Matrix.IsEqual(expected, actual, 0.001));
        }
예제 #14
0
        public void LuDecompositionConstructorTest()
        {
            double[,] value =
            {
                {  2, -1,  0 },
                { -1,  2, -1 },
                {  0, -1,  2 }
            };


            double[,] expectedL =
            {
                {  1.0000,       0,      0 },
                { -0.5000,  1.0000,      0 },
                {       0, -0.6667, 1.0000 },
            };


            double[,] expectedU =
            {
                { 2.0000, -1.0000,       0 },
                {      0,  1.5000, -1.0000 },
                {      0,       0,  1.3333 },
            };


            LuDecomposition target = new LuDecomposition(value);

            double[,] actualL = target.LowerTriangularFactor;
            double[,] actualU = target.UpperTriangularFactor;

            Assert.IsTrue(Matrix.IsEqual(expectedL, actualL, 0.001));
            Assert.IsTrue(Matrix.IsEqual(expectedU, actualU, 0.001));


            target = new LuDecomposition(value.Transpose(), true);

            actualL = target.LowerTriangularFactor;
            actualU = target.UpperTriangularFactor;

            Assert.IsTrue(Matrix.IsEqual(expectedL, actualL, 0.001));
            Assert.IsTrue(Matrix.IsEqual(expectedU, actualU, 0.001));
        }
예제 #15
0
파일: ExeTest.cs 프로젝트: zfybs/eZstd
        public static void m2()
        {
            double[][] dataA = new double[][]
            {
                new double[] { 0.03, 58.9 },
                new double[] { 5.31, -6.10 },
            };

            double[][] dataB = new double[][]
            {
                new double[] { 59.2 },
                new double[] { 47.0 },
            };

            Matrix A = new Matrix(dataA);
            Matrix b = new Matrix(dataB);
            // 通过LU上下三角分解法 求解线性方程组 Ax = b
            LuDecomposition d = new LuDecomposition(A);
            Matrix          x = d.Solve(b);
        }
예제 #16
0
        public void InverseTestNaN()
        {
            int n = 5;

            var I = Matrix.Identity(n);

            for (int i = 0; i < n; i++)
            {
                for (int j = 0; j < n; j++)
                {
                    double[,] value = Matrix.Magic(n);

                    value[i, j] = double.NaN;

                    var target = new LuDecomposition(value);
                    Assert.IsTrue(Matrix.IsEqual(target.Solve(I), target.Inverse()));

                    var target2 = new JaggedLuDecomposition(value.ToJagged());
                    Assert.IsTrue(Matrix.IsEqual(target2.Solve(I.ToJagged()), target2.Inverse()));
                }
            }
        }
예제 #17
0
        public void SolveTransposeTest()
        {
            double[,] a =
            {
                { 2, 1, 4 },
                { 6, 2, 2 },
                { 0, 1, 6 },
            };

            double[,] b =
            {
                { 1, 0, 7 },
                { 5, 2, 1 },
                { 1, 5, 2 },
            };

            double[,] expected =
            {
                { 0.5062,  0.2813,  0.0875 },
                { 0.1375,  1.1875, -0.0750 },
                { 0.8063, -0.2188,  0.2875 },
            };

            Assert.IsTrue(Matrix.IsEqual(expected, new LuDecomposition(b, true).SolveTranspose(a), 1e-3));
            Assert.IsTrue(Matrix.IsEqual(expected, new JaggedLuDecomposition(b.ToJagged(), true).SolveTranspose(a.ToJagged()), 1e-3));

            var target = new LuDecomposition(b, true);
            var p      = target.PivotPermutationVector;

            int[] idx = p.ArgSort();

            var r = target.LowerTriangularFactor.Dot(target.UpperTriangularFactor)
                    .Submatrix(idx, null).Transpose();

            Assert.IsTrue(Matrix.IsEqual(b, r, 1e-3));
            Assert.IsTrue(Matrix.IsEqual(b.Transpose(), target.Reverse(), 1e-3));
            Assert.IsTrue(Matrix.IsEqual(b.Transpose(), new JaggedLuDecomposition(b.ToJagged(), true).Reverse(), 1e-3));
        }
예제 #18
0
        public void InverseTestNaN()
        {
            int n = 5;

            var I = Matrix.Identity(n);

            for (int i = 0; i < n; i++)
            {
                for (int j = 0; j < n; j++)
                {
                    double[,] value = Matrix.Magic(n);

                    value[i, j] = double.NaN;

                    var target = new LuDecomposition(value);

                    var solution = target.Solve(I);
                    var inverse  = target.Inverse();

                    Assert.IsTrue(Matrix.IsEqual(solution, inverse));
                }
            }
        }
예제 #19
0
    /// <summary>
    /// Finds the coefficients of a polynomial p(x) of degree n that fits the data,
    /// p(x(i)) to y(i), in a least squares sense. The result p is a row vector of
    /// length n+1 containing the polynomial coefficients in incremental powers.
    /// </summary>
    /// <param name="x">x axis values</param>
    /// <param name="y">y axis values</param>
    /// <param name="order">polynomial order including the constant</param>
    internal PolyFit(double[] x, double[] y, int order)
    {
        // incrememnt the order to match matlab way
        var matrixX = new double[x.Length, ++order];
        var matrixY = new double[x.Length, 1];

        if (x.Length != y.Length)
        {
            throw new ArgumentException("x and y array lengths do not match!");
        }

        // copy y matrix
        for (int i = 0; i < y.Length; i++)
        {
            matrixY[i, 0] = y[i];
        }

        // create the X matrix
        for (int row = 0; row < x.Length; row++)
        {
            var nVal = 1.0;

            for (int col = 0; col < order; col++)
            {
                matrixX[row, col] = nVal;
                nVal *= x[row];
            }
        }

        var matrixXt  = matrixX.Transpose();
        var matrixXtX = matrixXt.Product(matrixX);
        var matrixXtY = matrixXt.Product(matrixY);

        var lu = new LuDecomposition(matrixXtX);

        Coeff = lu.Solve(matrixXtY).GetColumn(0).ToArray();
    }
예제 #20
0
        public void SolveTest1()
        {
            double[,] value =
            {
                {  2,  3, 0 },
                { -1,  2, 1 },
                {  0, -1, 3 }
            };

            double[] rhs = { 5, 0, 1 };

            double[] expected =
            {
                1.6522,
                0.5652,
                0.5217,
            };

            LuDecomposition target = new LuDecomposition(value);

            double[] actual = target.Solve(rhs);

            Assert.IsTrue(Matrix.IsEqual(expected, actual, 0.001));
        }
예제 #21
0
        public static CubicBezier[] BuildC2Spline(Vector2[] pts, Vector2?m0, Vector2?mn, double tension)
        {
            if (pts.Length < 2)
            {
                throw new ArgumentException("There must be at least 2 points to construct cardinal spline", "pts");
            }

            // compute reasonable starting and ending tangents if not supplied
            if (m0 == null)
            {
                m0 = tension * (pts[1] - pts[0]);
            }

            if (mn == null)
            {
                mn = tension * (pts[pts.Length - 1] - pts[pts.Length - 2]);
            }

            if (pts.Length == 2)
            {
                return(new CubicBezier[] { CubicBezier.FromCubicHermite(pts[0], m0.Value, pts[1], mn.Value) });
            }

            int n = pts.Length - 1;

            // we have > 2 points, so we will have n-1 beziers
            CubicBezier[] beziers = new CubicBezier[n];

            // build up constraint matrix
            Matrix A = new Matrix(2 * n, 2 * n, 0);

            Matrix b = new Matrix(2 * n, 2);

            // matrix row index value
            int idx = 0;

            // add starting/ending tangent constraint
            Vector2 p01 = m0.Value / 3.0 + pts[0];

            A[idx, 0] = 1;
            b[idx, 0] = p01.X; b[idx, 1] = p01.Y;
            idx++;

            Vector2 pn2 = -mn.Value / 3.0 + pts[n];

            A[idx, 2 * n - 1] = 1;
            b[idx, 0]         = pn2.X; b[idx, 1] = pn2.Y;
            idx++;

            // add C1 constraints
            for (int i = 0; i < n - 1; i++)
            {
                A[idx, i * 2 + 1]   = 1;
                A[idx, (i + 1) * 2] = 1;
                b[idx, 0]           = pts[i + 1].X * 2; b[idx, 1] = pts[i + 1].Y * 2;
                idx++;
            }

            // add C2 constraints
            for (int i = 0; i < n - 1; i++)
            {
                A[idx, i * 2]           = 1;
                A[idx, i * 2 + 1]       = -2;
                A[idx, (i + 1) * 2]     = 2;
                A[idx, (i + 1) * 2 + 1] = -1;
                b[idx, 0] = 0; b[idx, 1] = 0;
                idx++;
            }

            // build the LUDecomposition of A to solve system A*P = b;
            LuDecomposition lu = new LuDecomposition(A);
            Matrix          P  = lu.Solve(b);

            // work back the Parameters
            for (int i = 0; i < n; i++)
            {
                beziers[i] = new CubicBezier(pts[i], new Vector2(P[2 * i, 0], P[2 * i, 1]), new Vector2(P[2 * i + 1, 0], P[2 * i + 1, 1]), pts[i + 1]);
            }

            return(beziers);
        }
예제 #22
0
        /// <summary>
        ///   Iterates one pass of the optimization algorithm trying to find
        ///   the best regression coefficients for the logistic model.
        /// </summary>
        /// <remarks>
        ///   An iterative Newton-Raphson algorithm is used to calculate
        ///   the maximum likelihood values of the parameters.  This procedure
        ///   uses the partial second derivatives of the parameters in the
        ///   Hessian matrix to guide incremental parameter changes in an effort
        ///   to maximize the log likelihood value for the likelihood function.
        /// </remarks>
        /// <returns>
        ///   The absolute value of the largest parameter change.
        /// </returns>
        public double Regress(double[][] input, double[] output)
        {
            // Regress using Iterative Reweighted Least Squares estimation.

            // Initial definitions and memory allocations
            int N = input.Length;
            int M = this.Coefficients.Length;

            double[,] regression = new double[N, M];
            double[,] hessian    = new double[M, M];
            double[,] inverse;
            double[] gradient = new double[M];
            double[] errors   = new double[N];
            double[] R        = new double[N];
            double[] deltas;


            // Compute the regression matrix, errors and diagonal
            for (int i = 0; i < N; i++)
            {
                double y = this.Compute(input[i]);
                double o = output[i];

                // Calculate error vector
                errors[i] = y - o;

                // Calculate R diagonal
                R[i] = y * (1.0 - y);

                // Compute the regression matrix
                regression[i, 0] = 1;
                for (int j = 1; j < M; j++)
                {
                    regression[i, j] = input[i][j - 1];
                }
            }


            // Compute error gradient and "Hessian" matrix (with diagonal R)
            for (int i = 0; i < M; i++)
            {
                // Compute error gradient
                for (int j = 0; j < N; j++)
                {
                    gradient[i] += regression[j, i] * errors[j];
                }

                // Compute "Hessian" matrix (regression'*R*regression)
                for (int j = 0; j < M; j++)
                {
                    for (int k = 0; k < N; k++)
                    {
                        hessian[j, i] += regression[k, i] * (R[k] * regression[k, j]);
                    }
                }
            }


            // Decompose to solve the linear system. Usually the hessian will
            // be invertible and LU will succeed. However, sometimes the hessian
            // may be singular and a Singular Value Decomposition may be needed.

            LuDecomposition lu = new LuDecomposition(hessian);

            // The SVD is very stable, but is quite expensive, being on average
            // about 10-15 times more expensive than LU decomposition. There are
            // other ways to avoid a singular Hessian. For a very interesting
            // reading on the subject, please see:
            //
            //  - Jeff Gill & Gary King, "What to Do When Your Hessian Is Not Invertible",
            //    Sociological Methods & Research, Vol 33, No. 1, August 2004, 54-87.
            //    Available in: http://gking.harvard.edu/files/help.pdf
            //

            // Moreover, the computation of the inverse is optional, as it will
            // be used only to compute the standard errors of the regression.

            if (lu.Nonsingular)
            {
                // Solve using LU decomposition
                deltas  = lu.Solve(gradient);
                inverse = lu.Inverse(); // optional
            }
            else
            {
                // Hessian Matrix is singular, try pseudo-inverse solution
                SingularValueDecomposition svd = new SingularValueDecomposition(hessian);
                deltas  = svd.Solve(gradient);
                inverse = svd.Inverse(); // optional
            }


            // Update coefficients using the calculated deltas
            for (int i = 0; i < coefficients.Length; i++)
            {
                this.coefficients[i] -= deltas[i];
            }

            // Calculate Coefficients standard errors (optional)
            for (int i = 0; i < standardErrors.Length; i++)
            {
                standardErrors[i] = System.Math.Sqrt(inverse[i, i]);
            }


            // Return the absolute value of the largest parameter change
            return(Matrix.Max(Matrix.Abs(deltas)));
        }
예제 #23
0
        public void LuDecompositionConstructorTest()
        {
            #region doc_ctor
            // Let's say we would like to compute the
            // LU decomposition of the following matrix:
            double[,] matrix =
            {
                {  2, -1,  0 },
                { -1,  2, -1 },
                {  0, -1,  2 }
            };

            // Compute the LU decomposition with:
            var lu = new LuDecomposition(matrix);


            // Retrieve the lower triangular factor L:
            double[,] L = lu.LowerTriangularFactor;

            // Should be equal to
            double[,] expectedL =
            {
                {  1.0000,       0,      0 },
                { -0.5000,  1.0000,      0 },
                {       0, -0.6667, 1.0000 },
            };


            // Retrieve the upper triangular factor U:
            double[,] U = lu.UpperTriangularFactor;

            // Should be equal to
            double[,] expectedU =
            {
                { 2.0000, -1.0000,       0 },
                {      0,  1.5000, -1.0000 },
                {      0,       0,  1.3333 },
            };


            // Certify that the decomposition has worked as expected by
            // trying to reconstruct the original matrix with R = L * U:
            double[,] reconstruction = L.Dot(U);

            // reconstruction should be equal to
            // {
            //     {  2, -1,  0 },
            //     { -1,  2, -1 },
            //     {  0, -1,  2 }
            // };
            #endregion


            Assert.IsTrue(Matrix.IsEqual(matrix, reconstruction, 1e-4));
            Assert.IsTrue(Matrix.IsEqual(expectedL, L, 1e-4));
            Assert.IsTrue(Matrix.IsEqual(expectedU, U, 1e-4));


            lu = new LuDecomposition(matrix.Transpose(), true);

            L = lu.LowerTriangularFactor;
            U = lu.UpperTriangularFactor;

            Assert.IsTrue(Matrix.IsEqual(expectedL, L, 0.001));
            Assert.IsTrue(Matrix.IsEqual(expectedU, U, 0.001));
        }
예제 #24
0
        /// <summary>
        ///   Runs one iteration of the Reweighted Least Squares algorithm.
        /// </summary>
        /// <param name="inputs">The input data.</param>
        /// <param name="outputs">The outputs associated with each input vector.</param>
        /// <returns>The maximum relative change in the parameters after the iteration.</returns>
        ///
        public double Run(double[][] inputs, double[] outputs)
        {
            // Regress using Iteratively Reweighted Least Squares estimation.

            // References:
            //  - Bishop, Christopher M.; Pattern Recognition
            //    and Machine Learning. Springer; 1st ed. 2006.


            // Initial definitions and memory allocations
            int N = inputs.Length;

            double[][] design       = new double[N][];
            double[]   errors       = new double[N];
            double[]   weights      = new double[N];
            double[]   coefficients = this.regression.Coefficients;
            double[]   deltas;

            // Compute the regression matrix
            for (int i = 0; i < inputs.Length; i++)
            {
                double[] row = design[i] = new double[parameterCount];

                row[0] = 1; // for intercept
                for (int j = 0; j < inputs[i].Length; j++)
                {
                    row[j + 1] = inputs[i][j];
                }
            }


            // Compute errors and weighing matrix
            for (int i = 0; i < inputs.Length; i++)
            {
                double y = regression.Compute(inputs[i]);

                // Calculate error vector
                errors[i] = y - outputs[i];

                // Calculate weighting matrix
                weights[i] = y * (1.0 - y);
            }


            // Reset Hessian matrix and gradient
            for (int i = 0; i < gradient.Length; i++)
            {
                gradient[i] = 0;
                for (int j = 0; j < gradient.Length; j++)
                {
                    hessian[i, j] = 0;
                }
            }


            // (Re-) Compute error gradient
            for (int j = 0; j < design.Length; j++)
            {
                for (int i = 0; i < gradient.Length; i++)
                {
                    gradient[i] += design[j][i] * errors[j];
                }
            }

            // (Re-) Compute weighted "Hessian" matrix
            for (int k = 0; k < weights.Length; k++)
            {
                double[] rk = design[k];

                for (int j = 0; j < rk.Length; j++)
                {
                    for (int i = 0; i < rk.Length; i++)
                    {
                        hessian[j, i] += rk[i] * rk[j] * weights[k];
                    }
                }
            }


            // Decompose to solve the linear system. Usually the hessian will
            // be invertible and LU will succeed. However, sometimes the hessian
            // may be singular and a Singular Value Decomposition may be needed.

            LuDecomposition lu = new LuDecomposition(hessian);

            // The SVD is very stable, but is quite expensive, being on average
            // about 10-15 times more expensive than LU decomposition. There are
            // other ways to avoid a singular Hessian. For a very interesting
            // reading on the subject, please see:
            //
            //  - Jeff Gill & Gary King, "What to Do When Your Hessian Is Not Invertible",
            //    Sociological Methods & Research, Vol 33, No. 1, August 2004, 54-87.
            //    Available in: http://gking.harvard.edu/files/help.pdf
            //

            // Moreover, the computation of the inverse is optional, as it will
            // be used only to compute the standard errors of the regression.

            if (lu.Nonsingular)
            {
                // Solve using LU decomposition
                deltas        = lu.Solve(gradient);
                decomposition = lu;
            }
            else
            {
                // Hessian Matrix is singular, try pseudo-inverse solution
                decomposition = new SingularValueDecomposition(hessian);
                deltas        = decomposition.Solve(gradient);
            }

            previous = (double[])coefficients.Clone();

            // Update coefficients using the calculated deltas
            for (int i = 0; i < coefficients.Length; i++)
            {
                coefficients[i] -= deltas[i];
            }


            if (computeStandardErrors)
            {
                // Grab the regression information matrix
                double[,] inverse = decomposition.Inverse();

                // Calculate coefficients' standard errors
                double[] standardErrors = regression.StandardErrors;
                for (int i = 0; i < standardErrors.Length; i++)
                {
                    standardErrors[i] = Math.Sqrt(inverse[i, i]);
                }
            }


            // Return the relative maximum parameter change
            for (int i = 0; i < deltas.Length; i++)
            {
                deltas[i] = Math.Abs(deltas[i]) / Math.Abs(previous[i]);
            }

            return(Matrix.Max(deltas));
        }
예제 #25
0
        /// <summary>
        /// Compute the Thin Plate Spline of the image, return a 2D tab
        /// </summary>
        /// <param name="control_points">Control points  </param>    
        /// <param name="input">Input image to get the dim xy</param>
        public double[,] calc_tps(List<cPoint3D> control_points, cDRC_Region AssociatedRegion, double Regularization)
        {
            int p = control_points.Count;
            if (p < 3) return null;
            double[,] grid = new double[AssociatedRegion.SizeX, AssociatedRegion.SizeY];
            Matrix mtx_l = new Matrix(p + 3, p + 3);
            Matrix mtx_v = new Matrix(p + 3, 1);
            Matrix mtx_orig_k = new Matrix(p, p);
            double a = 0.0;
            for (int i = 0; i < p; ++i)
            {
                for (int j = i + 1; j < p; ++j)
                {
                    cPoint3D pt_i = new cPoint3D(control_points[i].X, control_points[i].Y, control_points[i].Z);
                    cPoint3D pt_j = new cPoint3D(control_points[j].X, control_points[j].Y, control_points[j].Z);

                    pt_i.Y = pt_j.Y = 0;

                    //double elen = Math.Sqrt((pt_i.X - pt_j.X) * (pt_i.X - pt_j.X) + (pt_i.Z - pt_j.Z) * (pt_i.Z - pt_j.Z));
                    double elen = pt_i.DistTo(pt_j);
                    mtx_l[i, j] = mtx_l[j, i] = mtx_orig_k[i, j] = mtx_orig_k[j, i] = tps_base_func(elen);
                    a += elen * 2; // same for upper & lower tri
                }
            }
            a /= (double)(p * p);
            //regularization = 0.3f;
            //Fill the rest of L
            for (int i = 0; i < p; ++i)
            {
                //diagonal: reqularization parameters (lambda * a^2)

                mtx_l[i, i] = mtx_orig_k[i, i] = Regularization * (a * a);

                // P (p x 3, upper right)
                mtx_l[i, p + 0] = 1.0;
                mtx_l[i, p + 1] = control_points[i].X;
                mtx_l[i, p + 2] = control_points[i].Z;

                // P transposed (3 x p, bottom left)
                mtx_l[p + 0, i] = 1.0;
                mtx_l[p + 1, i] = control_points[i].X;
                mtx_l[p + 2, i] = control_points[i].Z;
            }
            // O (3 x 3, lower right)
            for (int i = p; i < p + 3; ++i)
                for (int j = p; j < p + 3; ++j)
                    mtx_l[i, j] = 0.0;

            // Fill the right hand vector V
            for (int i = 0; i < p; ++i)
                mtx_v[i, 0] = control_points[i].Y;

            mtx_v[p + 0, 0] = mtx_v[p + 1, 0] = mtx_v[p + 2, 0] = 0.0;
            // Solve the linear system "inplace"
            Matrix mtx_v_res = new Matrix(p + 3, 1);

            LuDecomposition ty = new LuDecomposition(mtx_l);

            mtx_v_res = ty.Solve(mtx_v);
            if (mtx_v_res == null)
            {
                return null;
            }

            // Interpolate grid heights
            for (int x = 0; x < AssociatedRegion.SizeX; ++x)
            {
                for (int z = 0; z < AssociatedRegion.SizeY; ++z)
                {

                    //float x = 0f; float z = 0.5f;
                    double h = mtx_v_res[p + 0, 0] + mtx_v_res[p + 1, 0] * (float)x / (float)AssociatedRegion.SizeX + mtx_v_res[p + 2, 0] * (float)z / (float)AssociatedRegion.SizeY;
                    //double h = mtx_v[p + 0, 0] + mtx_v[p + 1, 0] * (float)x + mtx_v[p + 2, 0] * (float)z ;
                    cPoint3D pt_ia;
                    cPoint3D pt_cur = new cPoint3D((float)x / (float)AssociatedRegion.SizeX, 0, (float)z / (float)AssociatedRegion.SizeY);
                    //Vector3 pt_cur = new Vector3((float)x , 0, (float)z);
                    for (int i = 0; i < p; ++i)
                    {
                        pt_ia = control_points[i];
                        pt_ia.Y = 0;
                        h += mtx_v_res[i, 0] * tps_base_func(pt_ia.DistTo(pt_cur));
                    }

                    grid[x, z] = h;
                }
            }
            // Calc bending energy
            Matrix w = new Matrix(p, 1);
            for (int i = 0; i < p; ++i)
                w[i, 0] = mtx_v_res[i, 0];

            Matrix be;

            be = Matrix.Multiply(Matrix.Multiply(w.Transpose(), mtx_orig_k), w);
            bending_energy = be[0, 0];

            Console.WriteLine("be= " + be[0, 0]);
            return grid;
        }
예제 #26
0
        private double run(double[][] inputs, double[][] outputs)
        {
            // Regress using Lower-Bound Newton-Raphson estimation
            //
            // The main idea is to replace the Hessian matrix with a
            //   suitable lower bound. Indeed, the Hessian is lower
            //   bounded by a negative definite matrix that does not
            //   even depend on w [Krishnapuram et al].
            //
            //   - http://www.lx.it.pt/~mtf/Krishnapuram_Carin_Figueiredo_Hartemink_2005.pdf
            //


            // Initial definitions and memory allocations
            int N = inputs.Length;

            double[][] design       = new double[N][];
            double[][] coefficients = this.regression.Coefficients;

            // Compute the regression matrix
            for (int i = 0; i < inputs.Length; i++)
            {
                double[] row = design[i] = new double[M];

                row[0] = 1; // for intercept
                for (int j = 0; j < inputs[i].Length; j++)
                {
                    row[j + 1] = inputs[i][j];
                }
            }


            // Reset Hessian matrix and gradient
            for (int i = 0; i < gradient.Length; i++)
            {
                gradient[i] = 0;
            }

            if (UpdateLowerBound)
            {
                for (int i = 0; i < gradient.Length; i++)
                {
                    for (int j = 0; j < gradient.Length; j++)
                    {
                        lowerBound[i, j] = 0;
                    }
                }
            }

            // In the multinomial logistic regression, the objective
            // function is the log-likelihood function l(w). As given
            // by Krishnapuram et al and Böhning, this is a concave
            // function with Hessian given by:
            //
            //       H(w) = -sum(P(w) - p(w)p(w)')  (x)  xx'
            //      (see referenced paper for proper indices)
            //
            // In which (x) denotes the Kronocker product. By using
            // the lower bound principle, Krishnapuram has shown that
            // we can replace H(w) with a lower bound approximation B
            // which does not depend on w (eq. 8 on aforementined paper):
            //
            //      B = -(1/2) [I - 11/M]  (x)  sum(xx')
            //
            // Thus we can compute and invert this matrix only once.
            //


            // For each input sample in the dataset
            for (int i = 0; i < inputs.Length; i++)
            {
                // Grab variables related to the sample
                double[] x = design[i];
                double[] y = outputs[i];

                // Compute and estimate outputs
                this.compute(inputs[i], output);

                // Compute errors for the sample
                for (int j = 0; j < errors.Length; j++)
                {
                    errors[j] = y[j + 1] - output[j];
                }


                // Compute current gradient and Hessian
                //   We can take advantage of the block structure of the
                //   Hessian matrix and gradient vector by employing the
                //   Kronocker product. See [Böhning, 1992]

                // (Re-) Compute error gradient
                double[] g = Matrix.KroneckerProduct(errors, x);
                for (int j = 0; j < g.Length; j++)
                {
                    gradient[j] += g[j];
                }

                if (UpdateLowerBound)
                {
                    // Compute xxt matrix
                    for (int k = 0; k < x.Length; k++)
                    {
                        for (int j = 0; j < x.Length; j++)
                        {
                            xxt[k, j] = x[k] * x[j];
                        }
                    }

                    // (Re-) Compute weighted "Hessian" matrix
                    double[,] h = Matrix.KroneckerProduct(weights, xxt);
                    for (int j = 0; j < parameterCount; j++)
                    {
                        for (int k = 0; k < parameterCount; k++)
                        {
                            lowerBound[j, k] += h[j, k];
                        }
                    }
                }
            }


            if (UpdateLowerBound)
            {
                UpdateLowerBound = false;

                // Decompose to solve the linear system. Usually the hessian will
                // be invertible and LU will succeed. However, sometimes the hessian
                // may be singular and a Singular Value Decomposition may be needed.

                LuDecomposition lu = new LuDecomposition(lowerBound);

                // The SVD is very stable, but is quite expensive, being on average
                // about 10-15 times more expensive than LU decomposition. There are
                // other ways to avoid a singular Hessian. For a very interesting
                // reading on the subject, please see:
                //
                //  - Jeff Gill & Gary King, "What to Do When Your Hessian Is Not Invertible",
                //    Sociological Methods & Research, Vol 33, No. 1, August 2004, 54-87.
                //    Available in: http://gking.harvard.edu/files/help.pdf
                //

                // Moreover, the computation of the inverse is optional, as it will
                // be used only to compute the standard errors of the regression.

                if (lu.Nonsingular)
                {
                    // Solve using LU decomposition
                    deltas        = lu.Solve(gradient);
                    decomposition = lu;
                }
                else
                {
                    // Hessian Matrix is singular, try pseudo-inverse solution
                    decomposition = new SingularValueDecomposition(lowerBound);
                    deltas        = decomposition.Solve(gradient);
                }
            }
            else
            {
                deltas = decomposition.Solve(gradient);
            }


            previous = coefficients.Reshape(1);

            // Update coefficients using the calculated deltas
            for (int i = 0, k = 0; i < coefficients.Length; i++)
            {
                for (int j = 0; j < coefficients[i].Length; j++)
                {
                    coefficients[i][j] -= deltas[k++];
                }
            }

            solution = coefficients.Reshape(1);


            if (computeStandardErrors)
            {
                // Grab the regression information matrix
                double[,] inverse = decomposition.Inverse();

                // Calculate coefficients' standard errors
                double[][] standardErrors = regression.StandardErrors;
                for (int i = 0, k = 0; i < standardErrors.Length; i++)
                {
                    for (int j = 0; j < standardErrors[i].Length; j++, k++)
                    {
                        standardErrors[i][j] = Math.Sqrt(Math.Abs(inverse[k, k]));
                    }
                }
            }



            // Return the relative maximum parameter change
            for (int i = 0; i < deltas.Length; i++)
            {
                deltas[i] = Math.Abs(deltas[i]) / Math.Abs(previous[i]);
            }

            return(Matrix.Max(deltas));
        }
예제 #27
0
        static void Main(string[] args)
        {
            #region 1. Declaring matrices

            // 1.1 Using standard .NET declaration
            double[,] A =
            {
                { 1, 2, 3 },
                { 6, 2, 0 },
                { 0, 0, 1 }
            };

            double[,] B =
            {
                { 2, 0, 0 },
                { 0, 2, 0 },
                { 0, 0, 2 }
            };

            {
                // 1.2 Using Accord extension methods
                double[,] Bi = Matrix.Identity(3).Multiply(2);
                double[,] Bj = Matrix.Diagonal(3, 2.0); // both are equal to B

                // 1.2 Using Accord extension methods with implicit typing
                var I = Matrix.Identity(3);
            }
            #endregion



            #region 2. Matrix Operations
            {
                // 2.1 Addition
                var C = A.Add(B);

                // 2.2 Subtraction
                var D = A.Subtract(B);

                // 2.3 Multiplication
                {
                    // 2.3.1 By a scalar
                    var halfM = A.Multiply(0.5);

                    // 2.3.2 By a vector
                    double[] m = A.Multiply(new double[] { 1, 2, 3 });

                    // 2.3.3 By a matrix
                    var M = A.Multiply(B);

                    // 2.4 Transposing
                    var At = A.Transpose();
                }
            }


            // 2.5 Elementwise operations

            // 2.5.1 Elementwise multiplication
            A.ElementwiseMultiply(B); // A.*B

            // 2.5.1 Elementwise division
            A.ElementwiseDivide(B); // A./B

            #endregion



            #region 3. Matrix characteristics
            {
                // 3.1 Calculating the determinant
                double det = A.Determinant();

                // 3.2 Calculating the trace
                double tr = A.Trace();

                // 3.3 Computing the sum vector
                {
                    double[] sumVector = A.Sum();

                    // 3.3.1 Computing the total sum of elements
                    double sum = sumVector.Sum();

                    // 3.3.2 Computing the sum along the rows
                    sumVector = A.Sum(0); // Equivalent to Octave's sum(A, 1)

                    // 3.3.2 Computing the sum along the columns
                    sumVector = A.Sum(1); // Equivalent to Octave's sum(A, 2)
                }
            }
            #endregion



            #region 4. Linear Algebra
            {
                // 4.1 Computing the inverse
                var invA = A.Inverse();

                // 4.2 Computing the pseudo-inverse
                var pinvA = A.PseudoInverse();

                // 4.3 Solving a linear system (Ax = B)
                var x = A.Solve(B);
            }
            #endregion



            #region 5. Special operators
            {
                // 5.1 Finding the indices of elements
                double[] v   = { 5, 2, 2, 7, 1, 0 };
                int[]    idx = v.Find(e => e > 2); // finding the index of every element in v higher than 2.

                // 5.2 Selecting elements by index
                double[] u = v.Submatrix(idx); // u is { 5, 7 }

                // 5.3 Converting between different matrix representations
                double[][] jaggedA = A.ToArray(); // from multidimensional to jagged array

                // 5.4 Extracting a column or row from the matrix
                double[] a = A.GetColumn(0); // retrieves the first column
                double[] b = B.GetRow(1);    // retrieves the second row

                // 5.5 Taking the absolute of a matrix
                var absA = A.Abs();

                // 5.6 Applying some function to every element
                var newv = v.Apply(e => e + 1);
            }
            #endregion



            #region 7. Vector operations
            {
                double[] u = { 1, 2, 3 };
                double[] v = { 4, 5, 6 };

                var w1 = u.InnerProduct(v);
                var w2 = u.OuterProduct(v);
                var w3 = u.CartesianProduct(v);


                double[] m = { 1, 2, 3, 4 };
                double[,] M = Matrix.Reshape(m, 2, 2);
            }
            #endregion


            #region Decompositions
            {
                // Singular value decomposition
                {
                    SingularValueDecomposition svd = new SingularValueDecomposition(A);
                    var U = svd.LeftSingularVectors;
                    var S = svd.Diagonal;
                    var V = svd.RightSingularVectors;
                }
                // or (please see documentation for details)
                {
                    SingularValueDecomposition svd = new SingularValueDecomposition(A.Transpose());
                    var U = svd.RightSingularVectors;
                    var S = svd.Diagonal;
                    var V = svd.LeftSingularVectors;
                }

                // Eigenvalue decomposition
                {
                    EigenvalueDecomposition eig = new EigenvalueDecomposition(A);
                    var V = eig.Eigenvectors;
                    var D = eig.DiagonalMatrix;
                }

                // QR decomposition
                {
                    QrDecomposition qr = new QrDecomposition(A);
                    var             Q  = qr.OrthogonalFactor;
                    var             R  = qr.UpperTriangularFactor;
                }

                // Cholesky decomposition
                {
                    CholeskyDecomposition chol = new CholeskyDecomposition(A);
                    var R = chol.LeftTriangularFactor;
                }

                // LU decomposition
                {
                    LuDecomposition lu = new LuDecomposition(A);
                    var             L  = lu.LowerTriangularFactor;
                    var             U  = lu.UpperTriangularFactor;
                }
            }
            #endregion
        }
예제 #28
0
 public LuDecompositionWrapper(LuDecomposition lu)
 {
     _lu = lu;
 }