Наследование: ICloneable, ISolverDecomposition
Пример #1
0
        public void MahalanobisTest3()
        {
            // Example from Statistical Distance Calculator
            // http://maplepark.com/~drf5n/cgi-bin/dist.cgi

            double[,] cov = 
            {
                { 1.030303, 2.132728, 0.576716 },
                { 2.132728, 4.510515, 1.185771 },
                { 0.576716, 1.185771, 0.398922 }
            };



            double[] x, y;
            double actual, expected;

            var svd = new SingularValueDecomposition(cov, true, true, true);

            var inv = cov.Inverse();
            var pinv = svd.Inverse();
            Assert.IsTrue(inv.IsEqual(pinv, 1e-6));

            x = new double[] { 2, 4, 1 };
            y = new double[] { 0, 0, 0 };

            {
                var bla = cov.Solve(x);
                var blo = svd.Solve(x);
                var ble = inv.Multiply(x);
                var bli = pinv.Multiply(x);

                Assert.IsTrue(bla.IsEqual(blo, 1e-6));
                Assert.IsTrue(bla.IsEqual(ble, 1e-6));
                Assert.IsTrue(bla.IsEqual(bli, 1e-6));
            }

            expected = 2.0773536867741504;
            actual = Distance.Mahalanobis(x, y, inv);
            Assert.AreEqual(expected, actual, 1e-6);

            actual = Distance.Mahalanobis(x, y, svd);
            Assert.AreEqual(expected, actual, 1e-6);


            x = new double[] { 7, 5, 1 };
            y = new double[] { 1, 0.52, -79 };

            expected = 277.8828871106366;
            actual = Distance.Mahalanobis(x, y, inv);
            Assert.AreEqual(expected, actual, 1e-5);
            actual = Distance.Mahalanobis(x, y, svd);
            Assert.AreEqual(expected, actual, 1e-5);
        }
Пример #2
0
        public static double SquareMahalanobis(this double[] x, double[] y, SingularValueDecomposition covariance)
        {
            double[] d = new double[x.Length];
            for (int i = 0; i < x.Length; i++)
                d[i] = x[i] - y[i];

            double[] z = covariance.Solve(d);

            double r = 0.0;
            for (int i = 0; i < d.Length; i++)
                r += d[i] * z[i];
            return Math.Abs(r);
        }
Пример #3
0
        /// <summary>
        ///   Constructs a multivariate Gaussian distribution
        ///   with given mean vector and covariance matrix.
        /// </summary>
        public NormalDistribution(double[] mean, double[,] covariance)
            : base(mean.Length)
        {
            int k = mean.Length;

            this.mean = mean;
            this.covariance = covariance;
            variance = covariance.Diagonal();

            double detSqrt = System.Math.Sqrt(System.Math.Abs(covariance.Determinant()));
            constant = 1.0/(System.Math.Pow(2.0*System.Math.PI, k/2.0)*detSqrt);

            chol = new CholeskyDecomposition(covariance, true);

            if (chol.Determinant == 0)
            {
                // The covariance matrix is singular, use pseudo-inverse
                svd = new SingularValueDecomposition(covariance);
            }
        }
        public void InverseTest()
        {
            double[,] value = new double[,]
            { 
                  { 1.0, 1.0 },
                  { 2.0, 2.0 }
            };

            SingularValueDecomposition target = new SingularValueDecomposition(value);

            double[,] expected = new double[,]
            {
               { 0.1, 0.2 },
               { 0.1, 0.2 }
            };

            double[,] actual = target.Solve(Matrix.Identity(2));
            Assert.IsTrue(Matrix.IsEqual(expected, actual, 0.001));

            actual = target.Inverse();
            Assert.IsTrue(Matrix.IsEqual(expected, actual, 0.001));
        }
        public void InverseTest2()
        {
            int n = 5;

            var I = Matrix.Identity(n);

            for (int i = 0; i < n; i++)
            {
                for (int j = 0; j < n; j++)
                {
                    double[,] value = Matrix.Magic(n);

                    var target = new SingularValueDecomposition(value);

                    double[,] solution = target.Solve(I);
                    double[,] inverse = target.Inverse();
                    double[,] reverse = target.Reverse();

                    Assert.IsTrue(Matrix.IsEqual(solution, inverse, 1e-4));
                    Assert.IsTrue(Matrix.IsEqual(value, reverse, 1e-4));
                }
            }
        }
        public void ConstructorTest1()
        {
            double[,] value = 
            {
               {  4, -2 },
               {  3,  1 },
            };

            var svd = new SingularValueDecomposition(value);
            Assert.AreEqual(2, svd.Rank);


            var target = new GramSchmidtOrthogonalization(value);

            var Q = target.OrthogonalFactor;
            var R = target.UpperTriangularFactor;

            double[,] inv = Q.Inverse();
            double[,] transpose = Q.Transpose();
            double[,] m = Matrix.Multiply(Q, transpose);

            Assert.IsTrue(Matrix.IsEqual(m, Matrix.Identity(2), 1e-10));
            Assert.IsTrue(Matrix.IsEqual(inv, transpose, 1e-10));
        }
        public void SingularValueDecompositionConstructorTest7()
        {
            int count = 100;
            double[,] value = new double[count, 3];
            double[] output = new double[count];

            for (int i = 0; i < count; i++)
            {
                double x = i + 1;
                double y = 2 * (i + 1) - 1;
                value[i, 0] = x;
                value[i, 1] = y;
                value[i, 2] = 1;
                output[i] = 4 * x - y + 3;
            }



            SingularValueDecomposition target = new SingularValueDecomposition(value,
                computeLeftSingularVectors: true,
                computeRightSingularVectors: true);

            {
                double[,] expected = value;
                double[,] actual = target.LeftSingularVectors.Multiply(
                    Matrix.Diagonal(target.Diagonal)).Multiply(target.RightSingularVectors.Transpose());

                // Checking the decomposition
                Assert.IsTrue(Matrix.IsEqual(actual, expected, 1e-8));
            }

            {
                double[] solution = target.Solve(output);

                double[] expected= output;
                double[] actual = value.Multiply(solution);

                Assert.IsTrue(Matrix.IsEqual(actual, expected, 1e-8));
            }
        }
        public void SingularValueDecompositionConstructorTest5()
        {
            // Test using SVD assumption auto-correction feature
            // without computing the left singular vectors.

            double[,] value = new double[,]
             { 
                 { 1, 2 },
                 { 3, 4 },
                 { 5, 6 },
                 { 7, 8 }
             }.Transpose(); // value is 2x4, having less rows than columns.

            SingularValueDecomposition target = new SingularValueDecomposition(value, false, true, true);


            // Checking values
            double[,] U =
            {
                { 0.0, 0.0 },
                { 0.0, 0.0 },
            };

            // U should not have been computed
            Assert.IsTrue(Matrix.IsEqual(target.LeftSingularVectors, U));


            double[,] V = // economy svd
            {
                {  0.152483233310201,  0.822647472225661,  },
                {  0.349918371807964,  0.421375287684580,  },
                {  0.547353510305727,  0.0201031031435023, },
                {  0.744788648803490, -0.381169081397574,  },
            };

            // V can be different, but for the economy SVD it is often equal
            Assert.IsTrue(Matrix.IsEqual(target.RightSingularVectors, V, 0.0001));



            double[,] S = 
            {
                { 14.2690954992615, 0.000000000000000 },
                {  0.0000000000000,	0.626828232417543 },
            };

            // The diagonal values should be equal
            Assert.IsTrue(Matrix.IsEqual(target.Diagonal, Matrix.Diagonal(S), 0.001));
        }
        public void SingularValueDecompositionConstructorTest6()
        {
            // Test using SVD assumption auto-correction feature in place

            double[,] value1 =
            {
                { 2.5,  2.4 },
                { 0.5,  0.7 },
                { 2.2,  2.9 },
                { 1.9,  2.2 },
                { 3.1,  3.0 },
                { 2.3,  2.7 },
                { 2.0,  1.6 },
                { 1.0,  1.1 },
                { 1.5,  1.6 },
                { 1.1,  0.9 }
            };

            double[,] value2 = value1.Transpose();

            var target1 = new SingularValueDecomposition(value1, true, true, true, true);
            var target2 = new SingularValueDecomposition(value2, true, true, true, true);

            Assert.IsTrue(target1.LeftSingularVectors.IsEqual(target2.RightSingularVectors));
            Assert.IsTrue(target1.RightSingularVectors.IsEqual(target2.LeftSingularVectors));
            Assert.IsTrue(target1.DiagonalMatrix.IsEqual(target2.DiagonalMatrix));

        }
        public void SingularValueDecompositionConstructorTest2()
        {
            // test for m-x-n matrices where m > n (4 > 2)

            double[,] value = new double[,]
             { 
                 { 1, 2 },
                 { 3, 4 },
                 { 5, 6 },
                 { 7, 8 }
             }; // value is 4x2, thus having more rows than columns


            SingularValueDecomposition target = new SingularValueDecomposition(value, true, true, false);

            double[,] actual = target.LeftSingularVectors.Multiply(
                Matrix.Diagonal(target.Diagonal)).Multiply(target.RightSingularVectors.Transpose());

            // Checking the decomposition
            Assert.IsTrue(Matrix.IsEqual(actual, value, 0.01));

            double[,] U = // economy svd
            {
                {  0.152483233310201,  0.822647472225661,  },
                {  0.349918371807964,  0.421375287684580,  },
                {  0.547353510305727,  0.0201031031435023, },
                {  0.744788648803490, -0.381169081397574,  },
            };

            // U should be equal except for some sign changes
            Assert.IsTrue(Matrix.IsEqual(target.LeftSingularVectors, U, 0.001));



            // Checking values
            double[,] V =
            {
                {  0.641423027995072, -0.767187395072177 },
                {  0.767187395072177,  0.641423027995072 },
            };

            // V should be equal except for some sign changes
            Assert.IsTrue(Matrix.IsEqual(target.RightSingularVectors, V, 0.0001));


            double[,] S = 
            {
                { 14.2690954992615, 0.000000000000000 },
                {  0.0000000000000,	0.626828232417543 },
            };

            // The diagonal values should be equal
            Assert.IsTrue(Matrix.IsEqual(target.Diagonal, Matrix.Diagonal(S), 0.001));
        }
        public void SingularValueDecompositionConstructorTest4()
        {
            // Test using SVD assumption auto-correction feature
            // without computing the right singular vectors.

            double[,] value = new double[,]
             { 
                 { 1, 2 },
                 { 3, 4 },
                 { 5, 6 },
                 { 7, 8 }
             }.Transpose(); // value is 2x4, having less rows than columns.

            SingularValueDecomposition target = new SingularValueDecomposition(value, true, false, true);


            // Checking values
            double[,] U =
            {
                { 0.641423027995072, -0.767187395072177 },
                { 0.767187395072177,  0.641423027995072 },
            };

            // U should be equal despite some sign changes
            Assert.IsTrue(Matrix.IsEqual(target.LeftSingularVectors, U, 0.001));


            // Checking values
            double[,] V =
            {
                {  0.0, 0.0 },
                {  0.0, 0.0 },
                {  0.0, 0.0 },
                {  0.0, 0.0 },
            };

            // V should not have been computed.
            Assert.IsTrue(Matrix.IsEqual(target.RightSingularVectors, V));


            double[,] S = 
            {
                { 14.2690954992615, 0.000000000000000 },
                {  0.0000000000000,	0.626828232417543 },
            };

            // The diagonal values should be equal
            Assert.IsTrue(Matrix.IsEqual(target.Diagonal, Matrix.Diagonal(S), 0.001));
        }
Пример #12
0
        private double regress(double[][] inputs, double[] outputs, out double[,] designMatrix, bool robust)
        {
            if (inputs.Length != outputs.Length)
                throw new ArgumentException("Number of input and output samples does not match", "outputs");

            int parameters = Inputs;
            int rows = inputs.Length;   // number of instance points
            int cols = Inputs;          // dimension of each point

            for (int i = 0; i < inputs.Length; i++)
            {
                if (inputs[i].Length != parameters)
                {
                    throw new DimensionMismatchException("inputs", String.Format(
                        "Input vectors should have length {0}. The row at index {1} of the" +
                        " inputs matrix has length {2}.", parameters, i, inputs[i].Length));
                }
            }

            ISolverMatrixDecomposition<double> solver;


            // Create the problem's design matrix. If we
            //  have to add an intercept term, add a new
            //  extra column at the end and fill with 1s.

            if (!addIntercept)
            {
                // Just copy values over
                designMatrix = new double[rows, cols];
                for (int i = 0; i < inputs.Length; i++)
                    for (int j = 0; j < inputs[i].Length; j++)
                        designMatrix[i, j] = inputs[i][j];
            }
            else
            {
                // Add an intercept term
                designMatrix = new double[rows, cols + 1];
                for (int i = 0; i < inputs.Length; i++)
                {
                    for (int j = 0; j < inputs[i].Length; j++)
                        designMatrix[i, j] = inputs[i][j];
                    designMatrix[i, cols] = 1;
                }
            }

            // Check if we have an overdetermined or underdetermined
            //  system to select an appropriate matrix solver method.

            if (robust || cols >= rows)
            {
                // We have more variables than equations, an
                // underdetermined system. Solve using a SVD:
                solver = new SingularValueDecomposition(designMatrix,
                    computeLeftSingularVectors: true,
                    computeRightSingularVectors: true,
                    autoTranspose: true);
            }
            else
            {
                // We have more equations than variables, an
                // overdetermined system. Solve using the QR:
                solver = new QrDecomposition(designMatrix);
            }


            // Solve V*C = B to find C (the coefficients)
            coefficients = solver.Solve(outputs);


            // Calculate Sum-Of-Squares error
            double error = 0.0;
            double e;
            for (int i = 0; i < outputs.Length; i++)
            {
                e = outputs[i] - Compute(inputs[i]);
                error += e * e;
            }

            return error;
        }
        public void SingularValueDecompositionConstructorTest3()
        {
            // Test using SVD assumption auto-correction feature.

            // Test for m-x-n matrices where m < n. The available SVD
            // routine was not meant to be used in this case.

            double[,] value = new double[,]
             { 
                 { 1, 2 },
                 { 3, 4 },
                 { 5, 6 },
                 { 7, 8 }
             }.Transpose(); // value is 2x4, having less rows than columns.

            SingularValueDecomposition target = new SingularValueDecomposition(value, true, true, true);

            double[,] actual = target.LeftSingularVectors.Multiply(
                Matrix.Diagonal(target.Diagonal)).Multiply(target.RightSingularVectors.Transpose());

            // Checking the decomposition
            Assert.IsTrue(Matrix.IsEqual(actual, value, 0.01));

            // Checking values
            double[,] U =
            {
                { 0.641423027995072, -0.767187395072177 },
                { 0.767187395072177,  0.641423027995072 },
            };

            // U should be equal despite some sign changes
            Assert.IsTrue(Matrix.IsEqual(target.LeftSingularVectors, U, 0.001));


            double[,] V = // economy svd
            {
                {  0.152483233310201,  0.822647472225661,  },
                {  0.349918371807964,  0.421375287684580,  },
                {  0.547353510305727,  0.0201031031435023, },
                {  0.744788648803490, -0.381169081397574,  },
            };

            // V can be different, but for the economy SVD it is often equal
            Assert.IsTrue(Matrix.IsEqual(target.RightSingularVectors, V, 0.0001));


            double[,] S = 
            {
                { 14.2690954992615, 0.000000000000000 },
                {  0.0000000000000,	0.626828232417543 },
            };

            // The diagonal values should be equal
            Assert.IsTrue(Matrix.IsEqual(target.Diagonal, Matrix.Diagonal(S), 0.001));
        }
Пример #14
0
        public void InverseTest3x3()
        {
            double[,] value = 
            { 
                { 6.0, 1.0, 2.0 },
                { 0.0, 8.0, 1.0 },  
                { 2.0, 4.0, 5.0 }  
            };

            Assert.IsFalse(value.IsSingular());

            double[,] expected = new SingularValueDecomposition(value).Inverse();

            double[,] actual = Matrix.Inverse(value);

            Assert.IsTrue(Matrix.IsEqual(expected, actual, 1e-6));
        }
Пример #15
0
        // ------------------------------------------------------------
        /// <summary>
        ///   Computes the whitening transform for the given data, making
        ///   its covariance matrix equals the identity matrix.
        /// </summary>
        /// <param name="value">A matrix where each column represent a
        ///   variable and each row represent a observation.</param>
        /// <param name="transformMatrix">The base matrix used in the
        ///   transformation.</param>
        /// <returns>
        ///   The transformed source data (which now has unit variance).
        /// </returns>
        /// 
        public static double[,] Whitening(double[,] value, out double[,] transformMatrix)
        {
            if (value == null)
                throw new ArgumentNullException("value");

            int cols = value.GetLength(1);

            double[,] cov = Tools.Covariance(value);

            // Diagonalizes the covariance matrix
            SingularValueDecomposition svd = new SingularValueDecomposition(cov,
                true,  // compute left vectors (to become a transformation matrix)
                false, // do not compute right vectors since they aren't necessary
                true,  // transpose if necessary to avoid erroneous assumptions in SVD
                true); // perform operation in-place, reducing memory usage

            // Retrieve the transformation matrix
            transformMatrix = svd.LeftSingularVectors;

            // Perform scaling to have unit variance
            double[] singularValues = svd.Diagonal;
            for (int i = 0; i < cols; i++)
                for (int j = 0; j < singularValues.Length; j++)
                    transformMatrix[i, j] /= Math.Sqrt(singularValues[j]);

            // Return the transformed data
            return value.Multiply(transformMatrix);
        }
Пример #16
0
    private void HOSVDdecompos(float[,] pixels, List<PointI> patches, out double[,] u1, out double[,] u2, out double[,] u3, int P, int K, int KP, int PP)
    {
        double[,] C1 = new double[P, KP];
        double[,] C2 = new double[P, KP];
        double[,] C3 = new double[K, PP];

        for (int k = 0; k < K; k++)
        {
            for (int x = 0; x < P; x++)
            {
                for (int y = 0; y < P; y++)
                {
                    C1[x, k * P + y] = pixels[patches[k].X + x, patches[k].Y + y];
                    C2[x, K * y + k] = pixels[patches[k].X + y, patches[k].Y + x];
                    C3[k, x * P + y] = pixels[patches[k].X + x, patches[k].Y + y];
                }
            }
        }

        Accord.Math.Decompositions.SingularValueDecomposition svd1 = new Accord.Math.Decompositions.SingularValueDecomposition(C1, true, false, true, true);
        u1 = svd1.LeftSingularVectors;

        Accord.Math.Decompositions.SingularValueDecomposition svd2 = new Accord.Math.Decompositions.SingularValueDecomposition(C2, true, false, true, true);
        u2 = svd2.LeftSingularVectors;

        Accord.Math.Decompositions.SingularValueDecomposition svd3 = new Accord.Math.Decompositions.SingularValueDecomposition(C3, true, false, true, true);
        u3 = svd3.LeftSingularVectors;
    }
Пример #17
0
        /// <summary>
        /// Applies the rotation operator to the given dataset according to the reference dataset
        /// </summary>
        /// <param name="p">Procrusted dataset to rotate</param>
        /// <param name="p_reference">Reference procrusted dataset</param>
        /// <returns>The rotated dataset</returns>
        double[,] Rotate(ProcrustedDataset p, ProcrustedDataset p_reference)
        {
            // Rotation calculus per Amy Ross, Procrustes Analysis : http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.119.2686&rep=rep1&type=pdf

            SingularValueDecomposition svd = new SingularValueDecomposition(p_reference.Dataset.TransposeAndDot(p.Dataset));

            double[,] Q = svd.RightSingularVectors.TransposeAndDot(svd.LeftSingularVectors);

            p.RotationMatrix = Q;

            return p.Dataset.Dot(Q);
        }
Пример #18
0
        public static double Bhattacharyya(
            double[] meanX, double[,] covX, double lnDetCovX,
            double[] meanY, double[,] covY, double lnDetCovY)
        {
            int n = meanX.Length;

            // P = (covX + covY) / 2
            var P = new double[n, n];
            for (int i = 0; i < n; i++)
                for (int j = 0; j < n; j++)
                    P[i, j] = (covX[i, j] + covY[i, j]) / 2.0;

            var svd = new SingularValueDecomposition(P);
            
            double detP = svd.LogPseudoDeterminant;

            double mahalanobis = SquareMahalanobis(meanY, meanX, svd);

            double a = (1.0 / 8.0) * mahalanobis;
            double b = (0.5) * (detP - 0.5 * (lnDetCovX + lnDetCovY));

            return a + b;
        }
Пример #19
0
 public static double Mahalanobis(this double[] x, double[] y, SingularValueDecomposition covariance)
 {
     return System.Math.Sqrt(SquareMahalanobis(x, y, covariance));
 }
        /// <summary>
        ///   Creates a new object that is a copy of the current instance.
        /// </summary>
        /// <returns>
        ///   A new object that is a copy of this instance.
        /// </returns>
        public object Clone()
        {
            var svd = new SingularValueDecomposition();
            svd.m = m;
            svd.n = n;
            svd.s = (double[]) s.Clone();
            svd.si = (int[]) si.Clone();
            svd.swapped = swapped;
            if (u != null) svd.u = (double[,]) u.Clone();
            if (v != null) svd.v = (double[,]) u.Clone();

            return svd;
        }
Пример #21
0
 protected static void svd(double[,] m, out double[,] U, out double[] S, out double[,] V)
 {
     var svd = new SingularValueDecomposition(m, true, true, true);
     U = svd.LeftSingularVectors;
     S = svd.Diagonal;
     V = svd.RightSingularVectors;
 }
Пример #22
0
 // decompositions
 #region svd
 protected double[] svd(double[,] m)
 {
     var svd = new SingularValueDecomposition(m, false, false, true);
     return svd.Diagonal;
 }
        /// <summary>
        ///   Reverts a set of projected data into it's original form. Complete reverse
        ///   transformation is not always possible and is not even guaranteed to exist.
        /// </summary>
        /// <remarks>
        ///   This method works using a closed-form MDS approach as suggested by
        ///   Kwok and Tsang. It is currently a direct implementation of the algorithm
        ///   without any kind of optimization.
        ///   
        ///   Reference:
        ///   - http://cmp.felk.cvut.cz/cmp/software/stprtool/manual/kernels/preimage/list/rbfpreimg3.html
        /// </remarks>
        /// <param name="data">The kpca-transformed data.</param>
        /// <param name="neighbors">The number of nearest neighbors to use while constructing the pre-image.</param>
        public double[,] Revert(double[,] data, int neighbors)
        {
            int rows = data.GetLength(0);
            int cols = data.GetLength(1);

            double[,] reversion = new double[rows, sourceCentered.GetLength(1)];

            // number of neighbors cannot exceed the number of training vectors.
            int nn = System.Math.Min(neighbors, sourceCentered.GetLength(0));

            // For each point to be reversed
            for (int p = 0; p < rows; p++)
            {
                // 1. Get the point in feature space
                double[] y = data.GetRow(p);

                // 2. Select nn nearest neighbors of the feature space
                double[,] X = sourceCentered;
                double[] d2 = new double[Result.GetLength(0)];
                int[] inx = new int[Result.GetLength(0)];

                // 2.1 Calculate distances
                for (int i = 0; i < X.GetLength(0); i++)
                {
                    inx[i] = i;
                    d2[i] = kernel.Distance(y, Result.GetRow(i).Submatrix(y.Length));
                }

                // 2.2 Order them
                Array.Sort(d2, inx);

                // 2.3 Select nn neighbors
                inx = inx.Submatrix(nn);
                X = X.Submatrix(inx).Transpose(); // X is in input space
                d2 = d2.Submatrix(nn);       // distances in input space

                // 3. Create centering matrix
                // H = eye(nn, nn) - 1 / nn * ones(nn, nn);
                double[,] H = Matrix.Identity(nn).Subtract(Matrix.Create(nn, 1.0 / nn));

                // 4. Perform SVD
                //    [U,L,V] = svd(X*H);
                SingularValueDecomposition svd = new SingularValueDecomposition(X.Multiply(H));
                double[,] U = svd.LeftSingularVectors;
                double[,] L = Matrix.Diagonal(nn, svd.Diagonal);
                double[,] V = svd.RightSingularVectors;

                // 5. Compute projections
                //    Z = L*V';
                double[,] Z = L.Multiply(V.Transpose());

                // 6. Calculate distances
                //    d02 = sum(Z.^2)';
                double[] d02 = Matrix.Sum(Matrix.DotPow(Z,2));

                // 7. Get the pre-image using z = -0.5*inv(Z')*(d2-d02)
                double[,] inv = Matrix.PseudoInverse(Z.Transpose());
                double[] z = (-0.5).Multiply(inv).Multiply(d2.Subtract(d02));

                // 8. Project the pre-image on the original basis
                //    using x = U*z + sum(X,2)/nn;
                double[] x = (U.Multiply(z)).Add(Matrix.Sum(X.Transpose()).Multiply(1.0 / nn));

                // 9. Store the computed pre-image.
                for (int i = 0; i < reversion.GetLength(1); i++)
                    reversion[p, i] = x[i];
            }

            // if the data has been standardized or centered,
            //  we need to revert those operations as well
            if (this.Method == AnalysisMethod.Correlation)
            {
                // multiply by standard deviation and add the mean
                for (int i = 0; i < reversion.GetLength(0); i++)
                    for (int j = 0; j < reversion.GetLength(1); j++)
                        reversion[i, j] = (reversion[i, j] * StandardDeviations[j]) + Means[j];
            }
            else
            {
                // only add the mean
                for (int i = 0; i < reversion.GetLength(0); i++)
                    for (int j = 0; j < reversion.GetLength(1); j++)
                        reversion[i, j] = reversion[i, j] + Means[j];
            }

            return reversion;
        }
Пример #24
0
        /// <summary>
        ///   Performs the regression using the input vectors and output
        ///   data, returning the sum of squared errors of the fit.
        /// </summary>
        /// 
        /// <param name="inputs">The input vectors to be used in the regression.</param>
        /// <param name="outputs">The output values for each input vector.</param>
        /// <param name="informationMatrix">Gets the Fisher's information matrix.</param>
        /// <param name="robust">
        ///    Set to <c>true</c> to force the use of the <see cref="SingularValueDecomposition"/>.
        ///    This will avoid any rank exceptions, but might be more computing intensive.</param>
        /// 
        /// <returns>The Sum-Of-Squares error of the regression.</returns>
        /// 
        public double Regress(double[][] inputs, double[] outputs, 
            out double[,] informationMatrix, bool robust = true)
        {
            if (inputs.Length != outputs.Length)
                throw new ArgumentException("Number of input and output samples does not match", "outputs");

            double[,] design;

            double error = regress(inputs, outputs, out design, robust);

            double[,] cov = design.TransposeAndMultiply(design);
            informationMatrix = new SingularValueDecomposition(cov,
                computeLeftSingularVectors: true,
                computeRightSingularVectors: true,
                autoTranspose: true, inPlace: true).Inverse();

            return error;
        }
Пример #25
0
        public void InverseTest2x2()
        {
            double[,] value = 
            { 
                { 3.0, 1.0 },
                { 2.0, 2.0 }  
            };

            double[,] expected = new SingularValueDecomposition(value).Inverse();

            double[,] actual = Matrix.Inverse(value);

            Assert.IsTrue(Matrix.IsEqual(expected, actual, 1e-6));
        }
        /// <summary>
        ///   Reverts a set of projected data into it's original form. Complete reverse
        ///   transformation is not always possible and is not even guaranteed to exist.
        /// </summary>
        /// 
        /// <remarks>
        /// <para>
        ///   This method works using a closed-form MDS approach as suggested by
        ///   Kwok and Tsang. It is currently a direct implementation of the algorithm
        ///   without any kind of optimization.
        /// </para>
        /// <para>
        ///   Reference:
        ///   - http://cmp.felk.cvut.cz/cmp/software/stprtool/manual/kernels/preimage/list/rbfpreimg3.html
        /// </para>
        /// </remarks>
        /// 
        /// <param name="data">The kpca-transformed data.</param>
        /// <param name="neighbors">The number of nearest neighbors to use while constructing the pre-image.</param>
        /// 
        public double[,] Revert(double[,] data, int neighbors)
        {
            if (data == null)
                throw new ArgumentNullException("data");

            if (sourceCentered == null)
                throw new InvalidOperationException("The analysis must have been computed first.");

            if (neighbors < 2)
                throw new ArgumentOutOfRangeException("neighbors", "At least two neighbors are necessary.");

            // Verify if the current kernel supports
            // distance calculation in feature space.
            var distance = kernel as IReverseDistance;

            if (distance == null)
                throw new NotSupportedException(
                    "Current kernel does not support distance calculation in feature space.");


            int rows = data.GetLength(0);


            var result = Result;

            double[,] reversion = new double[rows, sourceCentered.GetLength(1)];

            // number of neighbors cannot exceed the number of training vectors.
            int nn = System.Math.Min(neighbors, sourceCentered.GetLength(0));


            // For each point to be reversed
            for (int p = 0; p < rows; p++)
            {
                // 1. Get the point in feature space
                double[] y = data.GetRow(p);

                // 2. Select nn nearest neighbors of the feature space
                double[,] X = sourceCentered;
                double[] d2 = new double[Result.GetLength(0)];
                int[] inx = new int[Result.GetLength(0)];

                // 2.1 Calculate distances
                for (int i = 0; i < X.GetLength(0); i++)
                {
                    inx[i] = i;
                    d2[i] = distance.ReverseDistance(y, result.GetRow(i).Submatrix(y.Length));

                    if (Double.IsNaN(d2[i]))
                        d2[i] = Double.PositiveInfinity;
                }

                // 2.2 Order them
                Array.Sort(d2, inx);

                // 2.3 Select nn neighbors

                int def = 0;
                for (int i = 0; i < d2.Length && i < nn; i++, def++)
                    if (Double.IsInfinity(d2[i]))
                        break;

                inx = inx.Submatrix(def);
                X = X.Submatrix(inx).Transpose(); // X is in input space
                d2 = d2.Submatrix(def);       // distances in input space


                // 3. Perform SVD
                //    [U,L,V] = svd(X*H);

                // TODO: If X has more columns than rows, the SV decomposition should be
                //  computed on the transpose of X and the left and right vectors should
                //  be swapped. This should be fixed after more unit tests are elaborated.
                SingularValueDecomposition svd = new SingularValueDecomposition(X,
                    computeLeftSingularVectors: true, computeRightSingularVectors: true, 
                    autoTranspose: false);

                double[,] U = svd.LeftSingularVectors;
                double[,] L = Matrix.Diagonal(def, svd.Diagonal);
                double[,] V = svd.RightSingularVectors;


                // 4. Compute projections
                //    Z = L*V';
                double[,] Z = L.Multiply(V.Transpose());


                // 5. Calculate distances
                //    d02 = sum(Z.^2)';
                double[] d02 = Matrix.Sum(Matrix.ElementwisePower(Z, 2));


                // 6. Get the pre-image using z = -0.5*inv(Z')*(d2-d02)
                double[,] inv = Matrix.PseudoInverse(Z.Transpose());

                double[] w = (-0.5).Multiply(inv).Multiply(d2.Subtract(d02));
                double[] z = w.Submatrix(U.GetLength(1));


                // 8. Project the pre-image on the original basis
                //    using x = U*z + sum(X,2)/nn;
                double[] x = (U.Multiply(z)).Add(Matrix.Sum(X.Transpose()).Multiply(1.0 / nn));


                // 9. Store the computed pre-image.
                for (int i = 0; i < reversion.GetLength(1); i++)
                    reversion[p, i] = x[i];
            }



            // if the data has been standardized or centered,
            //  we need to revert those operations as well
            if (this.Method == AnalysisMethod.Standardize)
            {
                // multiply by standard deviation and add the mean
                for (int i = 0; i < reversion.GetLength(0); i++)
                    for (int j = 0; j < reversion.GetLength(1); j++)
                        reversion[i, j] = (reversion[i, j] * StandardDeviations[j]) + Means[j];
            }
            else if (this.Method == AnalysisMethod.Center)
            {
                // only add the mean
                for (int i = 0; i < reversion.GetLength(0); i++)
                    for (int j = 0; j < reversion.GetLength(1); j++)
                        reversion[i, j] = reversion[i, j] + Means[j];
            }


            return reversion;
        }
        /// <summary>
        ///   Performs the regression using the input vectors and output
        ///   vectors, returning the sum of squared errors of the fit.
        /// </summary>
        /// 
        /// <param name="inputs">The input vectors to be used in the regression.</param>
        /// <param name="outputs">The output values for each input vector.</param>
        /// <returns>The Sum-Of-Squares error of the regression.</returns>
        /// 
        public virtual double Regress(double[][] inputs, double[][] outputs)
        {
            if (inputs.Length != outputs.Length)
                throw new ArgumentException("Number of input and output samples does not match", "outputs");

            int cols = inputs[0].Length;      // inputs
            int rows = inputs.Length;         // points

            if (insertConstant) cols++;

            for (int c = 0; c < coefficients.GetLength(1); c++)
            {
                double[] B = new double[cols];
                double[,] V = new double[cols, cols];


                // Compute V and B matrices
                for (int i = 0; i < cols; i++)
                {
                    // Least Squares Matrix
                    for (int j = 0; j < cols; j++)
                    {
                        for (int k = 0; k < rows; k++)
                        {
                            if (insertConstant)
                            {
                                double a = (i == cols - 1) ? 1 : inputs[k][i];
                                double b = (j == cols - 1) ? 1 : inputs[k][j];

                                V[i, j] += a * b;
                            }
                            else
                            {
                                V[i, j] += inputs[k][i] * inputs[k][j];
                            }
                        }
                    }

                    // Function to minimize
                    for (int k = 0; k < rows; k++)
                    {
                        if (insertConstant && (i == cols - 1))
                        {
                            B[i] += outputs[k][c];
                        }
                        else
                        {
                            B[i] += inputs[k][i] * outputs[k][c];
                        }
                    }
                }


                // Solve V*C = B to find C (the coefficients)
                double[] coef = new SingularValueDecomposition(V).Solve(B);

                if (insertConstant)
                {
                    intercepts[c] = coef[coef.Length - 1];
                    for (int i = 0; i < cols - 1; i++)
                        coefficients[i, c] = coef[i];
                }
                else
                {
                    for (int i = 0; i < cols; i++)
                        coefficients[i, c] = coef[i];
                }
            }


            // Calculate Sum-Of-Squares error
            double error = 0.0, e;
            for (int i = 0; i < outputs.Length; i++)
            {
                double[] y = Compute(inputs[i]);

                for (int c = 0; c < y.Length; c++)
                {
                    e = outputs[i][c] - y[c];
                    error += e * e;
                }
            }

            return error;
        }
Пример #28
0
        static void Main(string[] args)
        {
            Console.WriteLine("Please take a look at the source for examples!");
            Console.ReadKey();

            #region 1. Declaring matrices

            // 1.1 Using standard .NET declaration
            double[,] A = 
            {
                {1, 2, 3},
                {6, 2, 0},
                {0, 0, 1}
            };

            double[,] B = 
            {
                {2, 0, 0},
                {0, 2, 0},
                {0, 0, 2}
            };

            {
                // 1.2 Using Accord extension methods
                double[,] Bi = Matrix.Identity(3).Multiply(2);
                double[,] Bj = Matrix.Diagonal(3, 2.0); // both are equal to B

                // 1.2 Using Accord extension methods with implicit typing
                var I = Matrix.Identity(3);
            }
            #endregion



            #region 2. Matrix Operations
            {
                // 2.1 Addition
                var C = A.Add(B);

                // 2.2 Subtraction
                var D = A.Subtract(B);

                // 2.3 Multiplication
                {
                    // 2.3.1 By a scalar
                    var halfM = A.Multiply(0.5);

                    // 2.3.2 By a vector
                    double[] m = A.Multiply(new double[] { 1, 2, 3 });

                    // 2.3.3 By a matrix
                    var M = A.Multiply(B);

                    // 2.4 Transposing
                    var At = A.Transpose();
                }
            }


            // 2.5 Elementwise operations

            // 2.5.1 Elementwise multiplication
            A.ElementwiseMultiply(B); // A.*B

            // 2.5.1 Elementwise division
            A.ElementwiseDivide(B); // A./B

            #endregion



            #region 3. Matrix characteristics
            {
                // 3.1 Calculating the determinant
                double det = A.Determinant();

                // 3.2 Calculating the trace
                double tr = A.Trace();

                // 3.3 Computing the sum vector
                {
                    double[] sumVector = A.Sum();

                    // 3.3.1 Computing the total sum of elements
                    double sum = sumVector.Sum();

                    // 3.3.2 Computing the sum along the rows
                    sumVector = A.Sum(0); // Equivalent to Octave's sum(A, 1)

                    // 3.3.2 Computing the sum along the columns
                    sumVector = A.Sum(1); // Equivalent to Octave's sum(A, 2)
                }
            }
            #endregion



            #region 4. Linear Algebra
            {
                // 4.1 Computing the inverse
                var invA = A.Inverse();

                // 4.2 Computing the pseudo-inverse
                var pinvA = A.PseudoInverse();

                // 4.3 Solving a linear system (Ax = B)
                var x = A.Solve(B);
            }
            #endregion



            #region 5. Special operators
            {
                // 5.1 Finding the indices of elements
                double[] v = { 5, 2, 2, 7, 1, 0 };
                int[] idx = v.Find(e => e > 2); // finding the index of every element in v higher than 2.

                // 5.2 Selecting elements by index
                double[] u = v.Submatrix(idx); // u is { 5, 7 }

                // 5.3 Converting between different matrix representations
                double[][] jaggedA = A.ToArray(); // from multidimensional to jagged array

                // 5.4 Extracting a column or row from the matrix
                double[] a = A.GetColumn(0); // retrieves the first column
                double[] b = B.GetRow(1); // retrieves the second row

                // 5.5 Taking the absolute of a matrix
                var absA = A.Abs();

                // 5.6 Applying some function to every element
                var newv = v.Apply(e => e + 1);
            }
            #endregion



            #region 7. Vector operations
            {
                double[] u = { 1, 2, 3 };
                double[] v = { 4, 5, 6 };

                var w1 = u.InnerProduct(v);
                var w2 = u.OuterProduct(v);
                var w3 = u.CartesianProduct(v);


                double[] m = { 1, 2, 3, 4 };
                double[,] M = Matrix.Reshape(m, 2, 2);
            }
            #endregion


            #region Decompositions
            {
                // Singular value decomposition
                {
                    SingularValueDecomposition svd = new SingularValueDecomposition(A);
                    var U = svd.LeftSingularVectors;
                    var S = svd.Diagonal;
                    var V = svd.RightSingularVectors;
                }
                // or (please see documentation for details)
                {
                    SingularValueDecomposition svd = new SingularValueDecomposition(A.Transpose());
                    var U = svd.RightSingularVectors;
                    var S = svd.Diagonal;
                    var V = svd.LeftSingularVectors;
                }

                // Eigenvalue decomposition
                {
                    EigenvalueDecomposition eig = new EigenvalueDecomposition(A);
                    var V = eig.Eigenvectors;
                    var D = eig.DiagonalMatrix;
                }

                // QR decomposition
                {
                    QrDecomposition qr = new QrDecomposition(A);
                    var Q = qr.OrthogonalFactor;
                    var R = qr.UpperTriangularFactor;
                }

                // Cholesky decomposition
                {
                    CholeskyDecomposition chol = new CholeskyDecomposition(A);
                    var R = chol.LeftTriangularFactor;
                }

                // LU decomposition
                {
                    LuDecomposition lu = new LuDecomposition(A);
                    var L = lu.LowerTriangularFactor;
                    var U = lu.UpperTriangularFactor;
                }

            }
            #endregion

        }
Пример #29
0
 // decompositions
 #region svd
 /// <summary>Singular value decomposition.</summary>
 protected List<mat> svd(double[,] m)
 {
     var svd = new SingularValueDecomposition(m, true, true, true);
     return new List<mat> 
     {
         svd.LeftSingularVectors, svd.DiagonalMatrix, svd.RightSingularVectors 
     };
 }
Пример #30
0
        /// <summary>
        ///   Attempts to find the best values for the parameter vector
        ///   minimizing the discrepancy between the generated outputs
        ///   and the expected outputs for a given set of input data.
        /// </summary>
        /// 
        /// <param name="inputs">A set of input data.</param>
        /// <param name="outputs">The values associated with each 
        ///   vector in the <paramref name="inputs"/> data.</param>
        /// 
        public double Minimize(double[][] inputs, double[] outputs)
        {
            Array.Clear(hessian, 0, hessian.Length);
            Array.Clear(gradient, 0, gradient.Length);


            errors = new double[inputs.Length];
            jacobian = new double[inputs.Length, numberOfParameters];


            for (int i = 0; i < inputs.Length; i++)
                errors[i] = outputs[i] - Function(weights, inputs[i]);

            double[] g = new double[numberOfParameters];
            for (int i = 0; i < inputs.Length; i++)
            {
                Gradient(weights, inputs[i], result: g);

                for (int j = 0; j < gradient.Length; j++)
                    jacobian[i, j] = -g[j];
            }


            // Compute error gradient using Jacobian
            jacobian.TransposeAndMultiply(errors, result: gradient);

            // Compute Quasi-Hessian Matrix approximation
            jacobian.TransposeAndMultiply(jacobian, result: hessian);

            decomposition = new SingularValueDecomposition(hessian,
                computeLeftSingularVectors: true, computeRightSingularVectors: true, autoTranspose: true);

            deltas = decomposition.Solve(gradient);

            for (int i = 0; i < deltas.Length; i++)
                weights[i] -= deltas[i];

            return ComputeError(inputs, outputs);
        }