public void JaggedSingularValueDecompositionConstructorTest6() { // Test using SVD assumption auto-correction feature in place double[][] value1 = { new double[] { 2.5, 2.4 }, new double[] { 0.5, 0.7 }, new double[] { 2.2, 2.9 }, new double[] { 1.9, 2.2 }, new double[] { 3.1, 3.0 }, new double[] { 2.3, 2.7 }, new double[] { 2.0, 1.6 }, new double[] { 1.0, 1.1 }, new double[] { 1.5, 1.6 }, new double[] { 1.1, 0.9 } }; var value2 = value1.Transpose(); var cvalue1 = value1.Copy(); var cvalue2 = value2.Copy(); var target1 = new JaggedSingularValueDecomposition(cvalue1, true, true, true, true); var target2 = new JaggedSingularValueDecomposition(cvalue2, true, true, true, true); Assert.IsFalse(value1.IsEqual(cvalue1, 1e-3)); Assert.IsTrue(value2.IsEqual(cvalue2, 1e-3)); // due to auto-transpose Assert.IsTrue(target1.LeftSingularVectors.IsEqual(target2.RightSingularVectors)); Assert.IsTrue(target1.RightSingularVectors.IsEqual(target2.LeftSingularVectors)); Assert.IsTrue(target1.DiagonalMatrix.IsEqual(target2.DiagonalMatrix)); Assert.IsTrue(Matrix.IsEqual(value1, target1.Reverse(), 1e-2)); Assert.IsTrue(Matrix.IsEqual(value2, target2.Reverse(), 1e-2)); }
public void issue_614() { // https://github.com/accord-net/framework/issues/614 double[][] A = { new double[] { 1 }, new double[] { 0 } }; double[][] B = { new double[] { 1 }, new double[] { 0 } }; double[][] X = Accord.Math.Matrix.Solve(A, B, true); double[][] expected = { new double[] { 1 } }; Assert.IsTrue(expected.IsEqual(X)); X = new JaggedSingularValueDecomposition(A).Solve(B); Assert.IsTrue(expected.IsEqual(X)); }
public void JaggedSingularValueDecompositionConstructorTest1() { // This test catches the bug in SingularValueDecomposition in the line // for (int j = k + 1; j < nu; j++) // where it should be // for (int j = k + 1; j < n; j++) // Test for m-x-n matrices where m < n. The available SVD // routine was not meant to be used in this case. var value = new double[][] { new double[] { 1, 2 }, new double[] { 3, 4 }, new double[] { 5, 6 }, new double[] { 7, 8 } }.Transpose(); // value is 2x4, having less rows than columns. var target = new JaggedSingularValueDecomposition(value, true, true, false); double[][] actual = target.LeftSingularVectors .Multiply(Matrix.Diagonal(target.Diagonal).ToArray() .Multiply(target.RightSingularVectors.Transpose())); // Checking the decomposition Assert.IsTrue(Matrix.IsEqual(actual, value, 0.01)); // Checking values var U = new double[][] { new double[] { -0.641423027995072, -0.767187395072177 }, new double[] { -0.767187395072177, 0.641423027995072 }, }; // U should be equal Assert.IsTrue(Matrix.IsEqual(target.LeftSingularVectors, U, 0.001)); double[][] V = new double[][]// economy svd { new double[] { -0.152483233310201, 0.822647472225661, }, new double[] { -0.349918371807964, 0.421375287684580, }, new double[] { -0.547353510305727, 0.0201031031435023, }, new double[] { -0.744788648803490, -0.381169081397574, }, }; // V can be different, but for the economy SVD it is often equal Assert.IsTrue(Matrix.IsEqual(target.RightSingularVectors.Submatrix(0, 3, 0, 1), V, 0.0001)); double[][] S = { new double[] { 14.2690954992615, 0.000000000000000 }, new double[] { 0.0000000000000, 0.626828232417543 }, }; // The diagonal values should be equal Assert.IsTrue(Matrix.IsEqual(target.Diagonal.Submatrix(2), Matrix.Diagonal(S), 0.001)); }
public void solve_for_diagonal() { int count = 3; double[][] value = new double[count][]; double[] output = new double[count]; for (int i = 0; i < count; i++) { value[i] = new double[3]; double x = i + 1; double y = 2 * (i + 1) - 1; value[i][0] = x; value[i][1] = y; value[i][2] = System.Math.Pow(x, 2); output[i] = 4 * x - y + 3; } var target = new JaggedSingularValueDecomposition(value, computeLeftSingularVectors: true, computeRightSingularVectors: true); { double[][] expected = value; double[][] actual = Matrix.Multiply(Matrix.Multiply(target.LeftSingularVectors, target.DiagonalMatrix), target.RightSingularVectors.Transpose()); // Checking the decomposition Assert.IsTrue(Matrix.IsEqual(actual, expected, 1e-8)); } { double[][] expected = value.Inverse(); double[][] actual = Matrix.Multiply(Matrix.Multiply( target.RightSingularVectors.Transpose().Inverse(), target.DiagonalMatrix.Inverse()), target.LeftSingularVectors.Inverse() ); // Checking the invers decomposition Assert.IsTrue(Matrix.IsEqual(actual, expected, 1e-8)); } { double[][] solution = target.SolveForDiagonal(output); double[][] expected = Jagged.Diagonal(output); double[][] actual = value.Dot(solution); Assert.IsTrue(Matrix.IsEqual(actual, expected, 1e-8)); } }
public void JaggedSingularValueDecompositionConstructorTest3() { // Test using SVD assumption auto-correction feature. // Test for m-x-n matrices where m < n. The available SVD // routine was not meant to be used in this case. double[][] value = new double[][] { new double[] { 1, 2 }, new double[] { 3, 4 }, new double[] { 5, 6 }, new double[] { 7, 8 } }.Transpose(); // value is 2x4, having less rows than columns. var target = new JaggedSingularValueDecomposition(value, true, true, true); double[][] actual = Matrix.Multiply( Matrix.Multiply(target.LeftSingularVectors, target.DiagonalMatrix), target.RightSingularVectors.Transpose()); // Checking the decomposition Assert.IsTrue(Matrix.IsEqual(actual, value, 1e-2)); Assert.IsTrue(Matrix.IsEqual(value, target.Reverse(), 1e-2)); // Checking values double[][] U = { new double[] { 0.641423027995072, -0.767187395072177 }, new double[] { 0.767187395072177, 0.641423027995072 }, }; // U should be equal despite some sign changes Assert.IsTrue(Matrix.IsEqual(target.LeftSingularVectors, U, 0.001)); double[][] V = // economy svd { new double[] { 0.152483233310201, 0.822647472225661, }, new double[] { 0.349918371807964, 0.421375287684580, }, new double[] { 0.547353510305727, 0.0201031031435023, }, new double[] { 0.744788648803490, -0.381169081397574, }, }; // V can be different, but for the economy SVD it is often equal Assert.IsTrue(Matrix.IsEqual(target.RightSingularVectors, V, 0.0001)); double[][] S = { new double[] { 14.2690954992615, 0.000000000000000 }, new double[] { 0.0000000000000, 0.626828232417543 }, }; // The diagonal values should be equal Assert.IsTrue(Matrix.IsEqual(target.Diagonal, Matrix.Diagonal(S), 0.001)); }
public void JaggedSingularValueDecompositionConstructorTest2() { // test for m-x-n matrices where m > n (4 > 2) double[][] value = new double[][] { new double[] { 1, 2 }, new double[] { 3, 4 }, new double[] { 5, 6 }, new double[] { 7, 8 } }; // value is 4x2, thus having more rows than columns var target = new JaggedSingularValueDecomposition(value, true, true, false); double[][] actual = Matrix.Multiply(Matrix.Multiply(target.LeftSingularVectors, target.DiagonalMatrix), target.RightSingularVectors.Transpose()); // Checking the decomposition Assert.IsTrue(Matrix.IsEqual(actual, value, 1e-2)); Assert.IsTrue(Matrix.IsEqual(value, target.Reverse(), 1e-5)); double[][] U = // economy svd { new double[] { 0.152483233310201, 0.822647472225661, }, new double[] { 0.349918371807964, 0.421375287684580, }, new double[] { 0.547353510305727, 0.0201031031435023, }, new double[] { 0.744788648803490, -0.381169081397574, }, }; // U should be equal except for some sign changes Assert.IsTrue(Matrix.IsEqual(target.LeftSingularVectors, U, 0.001)); // Checking values double[][] V = { new double[] { 0.641423027995072, -0.767187395072177 }, new double[] { 0.767187395072177, 0.641423027995072 }, }; // V should be equal except for some sign changes Assert.IsTrue(Matrix.IsEqual(target.RightSingularVectors, V, 0.0001)); double[][] S = { new double[] { 14.2690954992615, 0.000000000000000 }, new double[] { 0.0000000000000, 0.626828232417543 }, }; // The diagonal values should be equal Assert.IsTrue(Matrix.IsEqual(target.Diagonal, Matrix.Diagonal(S), 0.001)); }
public void JaggedSingularValueDecompositionConstructorTest4() { // Test using SVD assumption auto-correction feature // without computing the right singular vectors. double[][] value = new double[][] { new double[] { 1, 2 }, new double[] { 3, 4 }, new double[] { 5, 6 }, new double[] { 7, 8 } }.Transpose(); // value is 2x4, having less rows than columns. var target = new JaggedSingularValueDecomposition(value, true, false, true); // Checking values double[][] U = { new double[] { 0.641423027995072, -0.767187395072177 }, new double[] { 0.767187395072177, 0.641423027995072 }, }; // U should be equal despite some sign changes Assert.IsTrue(Matrix.IsEqual(target.LeftSingularVectors, U, 0.001)); // Checking values double[][] V = { new double[] { 0.0, 0.0 }, new double[] { 0.0, 0.0 }, new double[] { 0.0, 0.0 }, new double[] { 0.0, 0.0 }, }; // V should not have been computed. Assert.IsTrue(Matrix.IsEqual(target.RightSingularVectors, V)); double[][] S = { new double[] { 14.2690954992615, 0.000000000000000 }, new double[] { 0.0000000000000, 0.626828232417543 }, }; // The diagonal values should be equal Assert.IsTrue(Matrix.IsEqual(target.Diagonal, Matrix.Diagonal(S), 0.001)); }
public void JaggedSingularValueDecompositionConstructorTest5() { // Test using SVD assumption auto-correction feature // without computing the left singular vectors. var value = new double[][] { new double[] { 1, 2 }, new double[] { 3, 4 }, new double[] { 5, 6 }, new double[] { 7, 8 } }.Transpose(); // value is 2x4, having less rows than columns. var target = new JaggedSingularValueDecomposition(value, false, true, true); // Checking values double[][] U = { new double[] { 0.0, 0.0 }, new double[] { 0.0, 0.0 }, }; // U should not have been computed Assert.IsTrue(Matrix.IsEqual(target.LeftSingularVectors, U)); double[][] V = // economy svd { new double[] { 0.152483233310201, 0.822647472225661, }, new double[] { 0.349918371807964, 0.421375287684580, }, new double[] { 0.547353510305727, 0.0201031031435023, }, new double[] { 0.744788648803490, -0.381169081397574, }, }; // V can be different, but for the economy SVD it is often equal Assert.IsTrue(Matrix.IsEqual(target.RightSingularVectors, V, 0.0001)); double[][] S = { new double[] { 14.2690954992615, 0.000000000000000 }, new double[] { 0.0000000000000, 0.626828232417543 }, }; // The diagonal values should be equal Assert.IsTrue(Matrix.IsEqual(target.Diagonal, Matrix.Diagonal(S), 0.001)); }
public void JaggedSingularValueDecompositionConstructorTest7() { int count = 100; double[][] value = new double[count][]; double[] output = new double[count]; for (int i = 0; i < count; i++) { value[i] = new double[3]; double x = i + 1; double y = 2 * (i + 1) - 1; value[i][0] = x; value[i][1] = y; value[i][2] = 1; output[i] = 4 * x - y + 3; } var target = new JaggedSingularValueDecomposition(value, computeLeftSingularVectors: true, computeRightSingularVectors: true); { double[][] expected = value; double[][] actual = Matrix.Multiply(Matrix.Multiply(target.LeftSingularVectors, target.DiagonalMatrix), target.RightSingularVectors.Transpose()); // Checking the decomposition Assert.IsTrue(Matrix.IsEqual(actual, expected, 1e-8)); } { double[] solution = target.Solve(output); double[] expected = output; double[] actual = value.Multiply(solution); Assert.IsTrue(Matrix.IsEqual(actual, expected, 1e-8)); } }
public double Distance( double[] meanX, double[][] covX, double lnDetCovX, double[] meanY, double[][] covY, double lnDetCovY) { int n = meanX.Length; // P = (covX + covY) / 2 var P = Jagged.Zeros(n, n); for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { P[i][j] = (covX[i][j] + covY[i][j]) / 2.0; } } var svd = new JaggedSingularValueDecomposition(P); double detP = svd.LogPseudoDeterminant; double[] d = new double[meanX.Length]; for (int i = 0; i < meanX.Length; i++) { d[i] = meanX[i] - meanY[i]; } double[] z = svd.Solve(d); double r = 0.0; for (int i = 0; i < d.Length; i++) { r += d[i] * z[i]; } double mahalanobis = Math.Abs(r); double a = (1.0 / 8.0) * mahalanobis; double b = (0.5) * (detP - 0.5 * (lnDetCovX + lnDetCovY)); return(a + b); }
public void InverseTest() { var value = new double[][] { new double[] { 1.0, 1.0 }, new double[] { 2.0, 2.0 } }; var target = new JaggedSingularValueDecomposition(value); var expected = new double[][] { new double[] { 0.1, 0.2 }, new double[] { 0.1, 0.2 } }; var actual = target.Solve(Matrix.JaggedIdentity(2)); Assert.IsTrue(Matrix.IsEqual(expected, actual, 0.001)); actual = target.Inverse(); Assert.IsTrue(Matrix.IsEqual(expected, actual, 0.001)); }
public void InverseTest1() { double[][] value = { new double[] { 41.9, 29.1, 1 }, new double[] { 43.4, 29.3, 1 }, new double[] { 43.9, 29.5, 0 }, new double[] { 44.5, 29.7, 0 }, new double[] { 47.3, 29.9, 0 }, new double[] { 47.5, 30.3, 0 }, new double[] { 47.9, 30.5, 0 }, new double[] { 50.2, 30.7, 0 }, new double[] { 52.8, 30.8, 0 }, new double[] { 53.2, 30.9, 0 }, new double[] { 56.7, 31.5, 0 }, new double[] { 57.0, 31.7, 0 }, new double[] { 63.5, 31.9, 0 }, new double[] { 65.3, 32.0, 0 }, new double[] { 71.1, 32.1, 0 }, new double[] { 77.0, 32.5, 0 }, new double[] { 77.8, 32.9, 0 } }; double[][] expected = new JaggedSingularValueDecomposition(value, computeLeftSingularVectors: true, computeRightSingularVectors: true, autoTranspose: true).Inverse(); var target = new JaggedQrDecomposition(value); double[][] actual = target.Inverse(); Assert.IsTrue(Matrix.IsEqual(expected, actual, atol: 1e-4)); var targetMat = new QrDecomposition(value.ToMatrix()); double[][] actualMat = target.Inverse(); Assert.IsTrue(Matrix.IsEqual(expected, actualMat, atol: 1e-4)); }
public void InverseTest2() { int n = 5; var I = Jagged.Identity(n); for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { double[][] value = Jagged.Magic(n); var target = new JaggedSingularValueDecomposition(value); double[][] solution = target.Solve(I); double[][] inverse = target.Inverse(); double[][] reverse = target.Reverse(); Assert.IsTrue(Matrix.IsEqual(solution, inverse, 1e-4)); Assert.IsTrue(Matrix.IsEqual(value, reverse, 1e-4)); } } }
public void InverseTestNaN() { int n = 5; var I = Matrix.Identity(n).ToJagged(); for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { double[][] value = Matrix.Magic(n).ToJagged(); value[i][j] = double.NaN; var target = new JaggedSingularValueDecomposition(value); double[][] solution = target.Solve(I); double[][] inverse = target.Inverse(); Assert.IsTrue(Matrix.IsEqual(solution, inverse)); } } }
public void InverseTestNaN() { int n = 5; var I = Matrix.Identity(n).ToArray(); for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { double[][] value = Matrix.Magic(n).ToArray(); value[i][j] = double.NaN; var target = new JaggedSingularValueDecomposition(value); double[][] solution = target.Solve(I); double[][] inverse = target.Inverse(); Assert.IsTrue(Matrix.IsEqual(solution, inverse)); } } }
public void InverseTest() { var value = new double[][] { new double[] { 1.0, 1.0 }, new double[] { 2.0, 2.0 } }; var target = new JaggedSingularValueDecomposition(value); var expected = new double[][] { new double[] { 0.1, 0.2 }, new double[] { 0.1, 0.2 } }; var actual = target.Solve(Matrix.JaggedIdentity(2)); Assert.IsTrue(Matrix.IsEqual(expected, actual, 1e-3)); Assert.IsTrue(Matrix.IsEqual(value, target.Reverse(), 1e-5)); actual = target.Inverse(); Assert.IsTrue(Matrix.IsEqual(expected, actual, 1e-3)); }
/// <summary> /// Computes the whitening transform for the given data, making /// its covariance matrix equals the identity matrix. /// </summary> /// <param name="value">A matrix where each column represent a /// variable and each row represent a observation.</param> /// <param name="transformMatrix">The base matrix used in the /// transformation.</param> /// <returns> /// The transformed source data (which now has unit variance). /// </returns> /// public static double[][] Whitening(double[][] value, out double[][] transformMatrix) { // TODO: Move into PCA and mark as obsolete if (value == null) { throw new ArgumentNullException("value"); } int cols = value.Columns(); double[][] cov = value.Covariance(); // Diagonalizes the covariance matrix var svd = new JaggedSingularValueDecomposition(cov, true, // compute left vectors (to become a transformation matrix) false, // do not compute right vectors since they aren't necessary true, // transpose if necessary to avoid erroneous assumptions in SVD true); // perform operation in-place, reducing memory usage // Retrieve the transformation matrix transformMatrix = svd.LeftSingularVectors; // Perform scaling to have unit variance double[] singularValues = svd.Diagonal; for (int i = 0; i < cols; i++) { for (int j = 0; j < singularValues.Length; j++) { transformMatrix[i][j] /= Math.Sqrt(singularValues[j]); } } // Return the transformed data return(Matrix.Dot(value, transformMatrix)); }
public void JaggedSingularValueDecompositionConstructorTest7() { int count = 100; double[][] value = new double[count][]; double[] output = new double[count]; for (int i = 0; i < count; i++) { value[i] = new double[3]; double x = i + 1; double y = 2 * (i + 1) - 1; value[i][0] = x; value[i][1] = y; value[i][2] = 1; output[i] = 4 * x - y + 3; } var target = new JaggedSingularValueDecomposition(value, computeLeftSingularVectors: true, computeRightSingularVectors: true); { double[][] expected = value; double[][] actual = target.LeftSingularVectors .Multiply(Matrix.Diagonal(target.Diagonal).ToArray() .Multiply(target.RightSingularVectors.Transpose())); // Checking the decomposition Assert.IsTrue(Matrix.IsEqual(actual, expected, 1e-8)); } { double[] solution = target.Solve(output); double[] expected = output; double[] actual = value.Multiply(solution); Assert.IsTrue(Matrix.IsEqual(actual, expected, 1e-8)); } }
public void JaggedSingularValueDecompositionConstructorTest3() { // Test using SVD assumption auto-correction feature. // Test for m-x-n matrices where m < n. The available SVD // routine was not meant to be used in this case. double[][] value = new double[][] { new double[] { 1, 2 }, new double[] { 3, 4 }, new double[] { 5, 6 }, new double[] { 7, 8 } }.Transpose(); // value is 2x4, having less rows than columns. var target = new JaggedSingularValueDecomposition(value, true, true, true); double[][] actual = target.LeftSingularVectors .Multiply(Matrix.Diagonal(target.Diagonal).ToArray() .Multiply(target.RightSingularVectors.Transpose())); // Checking the decomposition Assert.IsTrue(Matrix.IsEqual(actual, value, 0.01)); // Checking values double[][] U = { new double[] { 0.641423027995072, -0.767187395072177 }, new double[] { 0.767187395072177, 0.641423027995072 }, }; // U should be equal despite some sign changes Assert.IsTrue(Matrix.IsEqual(target.LeftSingularVectors, U, 0.001)); double[][] V = // economy svd { new double[] { 0.152483233310201, 0.822647472225661, }, new double[] { 0.349918371807964, 0.421375287684580, }, new double[] { 0.547353510305727, 0.0201031031435023, }, new double[] { 0.744788648803490, -0.381169081397574, }, }; // V can be different, but for the economy SVD it is often equal Assert.IsTrue(Matrix.IsEqual(target.RightSingularVectors, V, 0.0001)); double[][] S = { new double[] { 14.2690954992615, 0.000000000000000 }, new double[] { 0.0000000000000, 0.626828232417543 }, }; // The diagonal values should be equal Assert.IsTrue(Matrix.IsEqual(target.Diagonal, Matrix.Diagonal(S), 0.001)); }
public void JaggedSingularValueDecompositionConstructorTest2() { // test for m-x-n matrices where m > n (4 > 2) double[][] value = new double[][] { new double[] { 1, 2 }, new double[] { 3, 4 }, new double[] { 5, 6 }, new double[] { 7, 8 } }; // value is 4x2, thus having more rows than columns var target = new JaggedSingularValueDecomposition(value, true, true, false); double[][] actual = target.LeftSingularVectors .Multiply(Matrix.Diagonal(target.Diagonal).ToArray() .Multiply(target.RightSingularVectors.Transpose())); // Checking the decomposition Assert.IsTrue(Matrix.IsEqual(actual, value, 0.01)); double[][] U = // economy svd { new double[] { 0.152483233310201, 0.822647472225661, }, new double[] { 0.349918371807964, 0.421375287684580, }, new double[] { 0.547353510305727, 0.0201031031435023, }, new double[] { 0.744788648803490, -0.381169081397574, }, }; // U should be equal except for some sign changes Assert.IsTrue(Matrix.IsEqual(target.LeftSingularVectors, U, 0.001)); // Checking values double[][] V = { new double[] { 0.641423027995072, -0.767187395072177 }, new double[] { 0.767187395072177, 0.641423027995072 }, }; // V should be equal except for some sign changes Assert.IsTrue(Matrix.IsEqual(target.RightSingularVectors, V, 0.0001)); double[][] S = { new double[] { 14.2690954992615, 0.000000000000000 }, new double[] { 0.0000000000000, 0.626828232417543 }, }; // The diagonal values should be equal Assert.IsTrue(Matrix.IsEqual(target.Diagonal, Matrix.Diagonal(S), 0.001)); }
private double iterate(double[][] inputs, double[] outputs) { if (Residuals is null || inputs.Length != Residuals.Length) { Residuals = new double[inputs.Length]; for (var i = 0; i < Jacobian.Length; i++) { Jacobian[i] = new double[inputs.Length]; } } for (var i = 0; i < inputs.Length; i++) { Residuals[i] = outputs[i] - Function(Solution, inputs[i]); } if (ParallelOptions.MaxDegreeOfParallelism == 1) { for (var i = 0; i < inputs.Length; i++) { Gradient(Solution, inputs[i], gradient); for (var j = 0; j < gradient.Length; j++) { Jacobian[j][i] = -gradient[j]; } if (Token.IsCancellationRequested) { break; } } } else { Parallel.For(0, inputs.Length, ParallelOptions, () => new double[NumberOfParameters], (i, state, grad) => { Gradient(Solution, inputs[i], grad); for (var j = 0; j < grad.Length; j++) { Jacobian[j][i] = -grad[j]; } return(grad); }, grad => { } ); } // Compute error gradient using Jacobian Jacobian.Dot(Residuals, gradient); // Compute Quasi-Hessian Matrix approximation Jacobian.DotWithTransposed(Jacobian, Hessian); decomposition = new JaggedSingularValueDecomposition(Hessian, true, true, true); Deltas = decomposition.Solve(gradient); for (var i = 0; i < Deltas.Length; i++) { Solution[i] -= Deltas[i]; } return(ComputeError(inputs, outputs)); }
public virtual void Compute() { if (!onlyCovarianceMatrixAvailable) { int rows; if (this.array != null) { rows = array.Length; double[][] matrix = Adjust(array, Overwrite); var svd = new JaggedSingularValueDecomposition(matrix, computeLeftSingularVectors: true, computeRightSingularVectors: true, autoTranspose: true, inPlace: true); SingularValues = svd.Diagonal; ComponentVectors = svd.RightSingularVectors.Transpose(); } else { rows = source.GetLength(0); #pragma warning disable 612, 618 double[,] matrix = Adjust(source, Overwrite); #pragma warning restore 612, 618 var svd = new SingularValueDecomposition(matrix, computeLeftSingularVectors: true, computeRightSingularVectors: true, autoTranspose: true, inPlace: true); SingularValues = svd.Diagonal; ComponentVectors = svd.RightSingularVectors.ToArray().Transpose(); } Eigenvalues = new double[SingularValues.Length]; for (int i = 0; i < SingularValues.Length; i++) { Eigenvalues[i] = SingularValues[i] * SingularValues[i] / (rows - 1); } } else { var evd = new EigenvalueDecomposition(covarianceMatrix, assumeSymmetric: true, sort: true); Eigenvalues = evd.RealEigenvalues; var eigenvectors = evd.Eigenvectors.ToJagged(); SingularValues = Eigenvalues.Sqrt(); ComponentVectors = eigenvectors.Transpose(); } if (Whiten) { ComponentVectors = ComponentVectors.Transpose().Divide(Eigenvalues, dimension: 0).Transpose(); } CreateComponents(); if (!onlyCovarianceMatrixAvailable) { if (array != null) { result = Transform(array).ToMatrix(); } else if (source != null) { result = Transform(source.ToJagged()).ToMatrix(); } } }
public virtual void Compute() { if (!onlyCovarianceMatrixAvailable) { int rows; if (this.array != null) { rows = array.Length; // Center and standardize the source matrix double[][] matrix = Adjust(array, Overwrite); // Perform the Singular Value Decomposition (SVD) of the matrix var svd = new JaggedSingularValueDecomposition(matrix, computeLeftSingularVectors: true, computeRightSingularVectors: true, autoTranspose: true, inPlace: true); SingularValues = svd.Diagonal; // The principal components of 'Source' are the eigenvectors of Cov(Source). Thus if we // calculate the SVD of 'matrix' (which is Source standardized), the columns of matrix V // (right side of SVD) will be the principal components of Source. // The right singular vectors contains the principal components of the data matrix ComponentVectors = svd.RightSingularVectors.Transpose(); } else { rows = source.GetLength(0); // Center and standardize the source matrix #pragma warning disable 612, 618 double[,] matrix = Adjust(source, Overwrite); #pragma warning restore 612, 618 // Perform the Singular Value Decomposition (SVD) of the matrix var svd = new SingularValueDecomposition(matrix, computeLeftSingularVectors: true, computeRightSingularVectors: true, autoTranspose: true, inPlace: true); SingularValues = svd.Diagonal; // The principal components of 'Source' are the eigenvectors of Cov(Source). Thus if we // calculate the SVD of 'matrix' (which is Source standardized), the columns of matrix V // (right side of SVD) will be the principal components of Source. // The right singular vectors contains the principal components of the data matrix ComponentVectors = svd.RightSingularVectors.ToArray().Transpose(); // The left singular vectors contains the factor scores for the principal components } // Eigenvalues are the square of the singular values Eigenvalues = new double[SingularValues.Length]; for (int i = 0; i < SingularValues.Length; i++) { Eigenvalues[i] = SingularValues[i] * SingularValues[i] / (rows - 1); } } else { // We only have the covariance matrix. Compute the Eigenvalue decomposition var evd = new EigenvalueDecomposition(covarianceMatrix, assumeSymmetric: true, sort: true); // Gets the Eigenvalues and corresponding Eigenvectors Eigenvalues = evd.RealEigenvalues; var eigenvectors = evd.Eigenvectors.ToJagged(); SingularValues = Eigenvalues.Sqrt(); ComponentVectors = eigenvectors.Transpose(); } if (Whiten) { ComponentVectors = ComponentVectors.Transpose().Divide(Eigenvalues, dimension: 0).Transpose(); } CreateComponents(); if (!onlyCovarianceMatrixAvailable) { if (array != null) { result = Transform(array).ToMatrix(); } else if (source != null) { result = Transform(source.ToJagged()).ToMatrix(); } } }
/// <summary> /// Parallel (symmetric) iterative algorithm. /// </summary> /// /// <returns> /// Returns a matrix in which each row contains /// the mixing coefficients for each component. /// </returns> /// private double[][] parallel(double[][] X, int components, double[][] winit) { // References: // - Hyvärinen, A (1999). Fast and Robust Fixed-Point // Algorithms for Independent Component Analysis. // There are two ways to apply the fixed-unit algorithm to compute the whole // ICA iteration. The second approach is to perform orthogonalization at once // using an Eigendecomposition [Hyvärinen]. The Eigendecomposition can in turn // be converted to a more stable singular value decomposition and be used to // create a projection basis in the same way as in Principal Component Analysis. int n = X.Rows(); int m = X.Columns(); // Algorithm initialization double[][] W0 = winit; double[][] W = winit; double[][] K = Jagged.Zeros(components, components); bool stop = false; int iterations = 0; //double lastChange = 1; do // until convergence { // [Hyvärinen, 1997]'s paper suggests the use of the Eigendecomposition // to orthogonalize W (after equation 10). However, [E, D] = eig(W'W) // can be replaced by [U, S] = svd(W), which is more stable and avoids // computing W'W. Since the singular values are already the square roots // of the eigenvalues of W'W, the computation of E'*D^(-1/2)*E' reduces // to U*S^(-1)*U'. // Perform simultaneous decorrelation of all components at once var svd = new JaggedSingularValueDecomposition(W, computeLeftSingularVectors: true, computeRightSingularVectors: false, autoTranspose: true); double[] S = svd.Diagonal; double[][] U = svd.LeftSingularVectors; // Form orthogonal projection basis K for (int i = 0; i < components; i++) { for (int j = 0; j < components; j++) { double s = 0; for (int k = 0; k < S.Length; k++) { if (S[k] != 0.0) { s += U[i][k] * U[j][k] / S[k]; } } K[i][j] = s; } } // Orthogonalize W = Matrix.Dot(K, W); // Gets the maximum parameter absolute change double delta = getMaximumAbsoluteChange(W0, W); // Check for convergence if (delta < tolerance || iterations >= maxIterations || Token.IsCancellationRequested) { stop = true; } else { // Advance to the next iteration W0 = W; W = Jagged.Zeros(components, m); //lastChange = delta; iterations++; // For each component (in parallel) Parallel.For(0, components, parallelOptions, i => { double[] wx = new double[n]; double[] dgwx = new double[n]; double[] gwx = new double[n]; double[] means = new double[m]; // Compute wx = w*x for (int j = 0; j < wx.Length; j++) { double s = 0; for (int k = 0; k < m; k++) { s += W0[i][k] * X[j][k]; } wx[j] = s; } // Compute g(wx) and g'(wx) contrast.Evaluate(wx, gwx, dgwx); // Compute E{ x*g(w*x) } for (int j = 0; j < means.Length; j++) { for (int k = 0; k < gwx.Length; k++) { means[j] += X[k][j] * gwx[k]; } means[j] /= n; } // Compute E{ g'(w*x) } double mean = Measures.Mean(dgwx); // Compute next update for w according // to Hyvärinen paper's equation (20). // w+ = E{ xg(w*x)} - E{ g'(w*x)}*w for (int j = 0; j < means.Length; j++) { W[i][j] = means[j] - mean * W0[i][j]; } // The normalization to w* will be performed // in the beginning of the next iteration. }); } } while (!stop); // Return the component basis matrix return(W); // vectors stored as rows. }
private double iterate(double[][] inputs, double[] outputs) { if (errors == null || inputs.Length != errors.Length) { this.errors = new double[inputs.Length]; for (int i = 0; i < jacobian.Length; i++) { this.jacobian[i] = new double[inputs.Length]; } } for (int i = 0; i < inputs.Length; i++) { this.errors[i] = outputs[i] - Function(Solution, inputs[i]); } if (ParallelOptions.MaxDegreeOfParallelism == 1) { for (int i = 0; i < inputs.Length; i++) { Gradient(Solution, inputs[i], result: gradient); for (int j = 0; j < gradient.Length; j++) { jacobian[j][i] = -gradient[j]; } if (Token.IsCancellationRequested) { break; } } } else { Parallel.For(0, inputs.Length, ParallelOptions, () => new double[NumberOfParameters], (i, state, gradient) => { Gradient(Solution, inputs[i], result: gradient); for (int j = 0; j < gradient.Length; j++) { jacobian[j][i] = -gradient[j]; } return(gradient); }, (gradient) => { } ); } // Compute error gradient using Jacobian this.jacobian.Dot(errors, result: gradient); // Compute Quasi-Hessian Matrix approximation this.jacobian.DotWithTransposed(jacobian, result: hessian); this.decomposition = new JaggedSingularValueDecomposition(hessian, computeLeftSingularVectors: true, computeRightSingularVectors: true, autoTranspose: true); this.deltas = decomposition.Solve(gradient); for (int i = 0; i < deltas.Length; i++) { Solution[i] -= this.deltas[i]; } return(ComputeError(inputs, outputs)); }
public MultivariateLinearRegression Learn(double[][] x, double[] weights = null) { this.NumberOfInputs = x.Columns(); if (Method == PrincipalComponentMethod.Center || Method == PrincipalComponentMethod.Standardize) { if (weights == null) { this.Means = x.Mean(dimension: 0); double[][] matrix = Overwrite ? x : Jagged.CreateAs(x); x.Subtract(Means, dimension: (VectorType)0, result: matrix); if (Method == PrincipalComponentMethod.Standardize) { this.StandardDeviations = x.StandardDeviation(Means); matrix.Divide(StandardDeviations, dimension: (VectorType)0, result: matrix); } var svd = new JaggedSingularValueDecomposition(matrix, computeLeftSingularVectors: false, computeRightSingularVectors: true, autoTranspose: true, inPlace: true); SingularValues = svd.Diagonal; Eigenvalues = SingularValues.Pow(2); Eigenvalues.Divide(x.Rows() - 1, result: Eigenvalues); ComponentVectors = svd.RightSingularVectors.Transpose(); } else { this.Means = x.WeightedMean(weights: weights); double[][] matrix = Overwrite ? x : Jagged.CreateAs(x); x.Subtract(Means, dimension: (VectorType)0, result: matrix); if (Method == PrincipalComponentMethod.Standardize) { this.StandardDeviations = x.WeightedStandardDeviation(weights, Means); matrix.Divide(StandardDeviations, dimension: (VectorType)0, result: matrix); } double[,] cov = x.WeightedCovariance(weights, Means); var evd = new EigenvalueDecomposition(cov, assumeSymmetric: true, sort: true); Eigenvalues = evd.RealEigenvalues; SingularValues = Eigenvalues.Sqrt(); ComponentVectors = Jagged.Transpose(evd.Eigenvectors); } } else if (Method == PrincipalComponentMethod.CovarianceMatrix || Method == PrincipalComponentMethod.CorrelationMatrix) { if (weights != null) { throw new Exception(); } var evd = new JaggedEigenvalueDecomposition(x, assumeSymmetric: true, sort: true); Eigenvalues = evd.RealEigenvalues; SingularValues = Eigenvalues.Sqrt(); ComponentVectors = evd.Eigenvectors.Transpose(); } else { throw new InvalidOperationException("Invalid method, this should never happen: {0}".Format(Method)); } if (Whiten) { ComponentVectors.Divide(SingularValues, dimension: (VectorType)1, result: ComponentVectors); } CreateComponents(); return(CreateRegression()); }
public void JaggedSingularValueDecompositionConstructorTest6() { // Test using SVD assumption auto-correction feature in place double[][] value1 = { new double[] { 2.5, 2.4 }, new double[] { 0.5, 0.7 }, new double[] { 2.2, 2.9 }, new double[] { 1.9, 2.2 }, new double[] { 3.1, 3.0 }, new double[] { 2.3, 2.7 }, new double[] { 2.0, 1.6 }, new double[] { 1.0, 1.1 }, new double[] { 1.5, 1.6 }, new double[] { 1.1, 0.9 } }; var value2 = value1.Transpose(); var target1 = new JaggedSingularValueDecomposition(value1, true, true, true, true); var target2 = new JaggedSingularValueDecomposition(value2, true, true, true, true); Assert.IsTrue(target1.LeftSingularVectors.IsEqual(target2.RightSingularVectors)); Assert.IsTrue(target1.RightSingularVectors.IsEqual(target2.LeftSingularVectors)); Assert.IsTrue(target1.DiagonalMatrix.IsEqual(target2.DiagonalMatrix)); }
/// <summary> /// Learns a model that can map the given inputs to the desired outputs. /// </summary> /// /// <param name="x">The model inputs.</param> /// <param name="weights">The weight of importance for each input sample.</param> /// /// <returns> /// A model that has learned how to produce suitable outputs /// given the input data <paramref name="x" />. /// </returns> /// public MultivariateLinearRegression Learn(double[][] x, double[] weights = null) { this.NumberOfInputs = x.Columns(); if (Method == PrincipalComponentMethod.Center || Method == PrincipalComponentMethod.Standardize) { this.Means = x.Mean(dimension: 0); double[][] matrix = Overwrite ? x : Jagged.CreateAs(x); x.Subtract(Means, dimension: 0, result: matrix); if (Method == PrincipalComponentMethod.Standardize) { this.StandardDeviations = x.StandardDeviation(Means); matrix.Divide(StandardDeviations, dimension: 0, result: matrix); } // The principal components of 'Source' are the eigenvectors of Cov(Source). Thus if we // calculate the SVD of 'matrix' (which is Source standardized), the columns of matrix V // (right side of SVD) will be the principal components of Source. // Perform the Singular Value Decomposition (SVD) of the matrix var svd = new JaggedSingularValueDecomposition(matrix, computeLeftSingularVectors: false, computeRightSingularVectors: true, autoTranspose: true, inPlace: true); SingularValues = svd.Diagonal; Eigenvalues = SingularValues.Pow(2); Eigenvalues.Divide(x.Rows() - 1, result: Eigenvalues); ComponentVectors = svd.RightSingularVectors.Transpose(); } else if (Method == PrincipalComponentMethod.CovarianceMatrix || Method == PrincipalComponentMethod.CorrelationMatrix) { // We only have the covariance matrix. Compute the Eigenvalue decomposition var evd = new JaggedEigenvalueDecomposition(x, assumeSymmetric: true, sort: true); // Gets the Eigenvalues and corresponding Eigenvectors Eigenvalues = evd.RealEigenvalues; SingularValues = Eigenvalues.Sqrt(); ComponentVectors = evd.Eigenvectors.Transpose(); } else { // The method type should have been validated before we even entered this section throw new InvalidOperationException("Invalid method, this should never happen: {0}".Format(Method)); } if (Whiten) { ComponentVectors.Divide(SingularValues, dimension: 1, result: ComponentVectors); } // Computes additional information about the analysis and creates the // object-oriented structure to hold the principal components found. CreateComponents(); return(CreateRegression()); }
/// <summary> /// Reverts a set of projected data into it's original form. Complete reverse /// transformation is not always possible and is not even guaranteed to exist. /// </summary> /// /// <remarks> /// <para> /// This method works using a closed-form MDS approach as suggested by /// Kwok and Tsang. It is currently a direct implementation of the algorithm /// without any kind of optimization. /// </para> /// <para> /// Reference: /// - http://cmp.felk.cvut.cz/cmp/software/stprtool/manual/kernels/preimage/list/rbfpreimg3.html /// </para> /// </remarks> /// /// <param name="data">The kpca-transformed data.</param> /// <param name="neighbors">The number of nearest neighbors to use while constructing the pre-image.</param> /// public double[][] Revert(double[][] data, int neighbors = 10) { if (data == null) { throw new ArgumentNullException("data"); } if (sourceCentered == null) { throw new InvalidOperationException("The analysis must have been computed first."); } if (neighbors < 2) { throw new ArgumentOutOfRangeException("neighbors", "At least two neighbors are necessary."); } // Verify if the current kernel supports // distance calculation in feature space. var distance = kernel as IReverseDistance; if (distance == null) { throw new NotSupportedException( "Current kernel does not support distance calculation in feature space."); } int rows = data.Rows(); var result = this.result; double[][] reversion = Jagged.Zeros(rows, sourceCentered.Columns()); // number of neighbors cannot exceed the number of training vectors. int nn = System.Math.Min(neighbors, sourceCentered.Rows()); // For each point to be reversed for (int p = 0; p < rows; p++) { // 1. Get the point in feature space double[] y = data.GetRow(p); // 2. Select nn nearest neighbors of the feature space double[][] X = sourceCentered; double[] d2 = new double[result.GetLength(0)]; int[] inx = new int[result.GetLength(0)]; // 2.1 Calculate distances for (int i = 0; i < X.GetLength(0); i++) { inx[i] = i; d2[i] = distance.ReverseDistance(y, result.GetRow(i).First(y.Length)); if (Double.IsNaN(d2[i])) { d2[i] = Double.PositiveInfinity; } } // 2.2 Order them Array.Sort(d2, inx); // 2.3 Select nn neighbors int def = 0; for (int i = 0; i < d2.Length && i < nn; i++, def++) { if (Double.IsInfinity(d2[i])) { break; } } inx = inx.First(def); X = X.Get(inx).Transpose(); // X is in input space d2 = d2.First(def); // distances in input space // 3. Perform SVD // [U,L,V] = svd(X*H); // TODO: If X has more columns than rows, the SV decomposition should be // computed on the transpose of X and the left and right vectors should // be swapped. This should be fixed after more unit tests are elaborated. var svd = new JaggedSingularValueDecomposition(X, computeLeftSingularVectors: true, computeRightSingularVectors: true, autoTranspose: false); double[][] U = svd.LeftSingularVectors; double[][] L = Jagged.Diagonal(def, svd.Diagonal); double[][] V = svd.RightSingularVectors; // 4. Compute projections // Z = L*V'; double[][] Z = Matrix.DotWithTransposed(L, V); // 5. Calculate distances // d02 = sum(Z.^2)'; double[] d02 = Matrix.Sum(Elementwise.Pow(Z, 2), 0); // 6. Get the pre-image using // z = -0.5*inv(Z')*(d2-d02) double[][] inv = Matrix.PseudoInverse(Z.Transpose()); double[] w = (-0.5).Multiply(inv).Dot(d2.Subtract(d02)); double[] z = w.First(U.Columns()); // 8. Project the pre-image on the original basis using // x = U*z + sum(X,2)/nn; double[] x = (U.Dot(z)).Add(Matrix.Sum(X.Transpose(), 0).Multiply(1.0 / nn)); // 9. Store the computed pre-image. for (int i = 0; i < reversion.Columns(); i++) { reversion[p][i] = x[i]; } } // if the data has been standardized or centered, // we need to revert those operations as well if (this.Method == PrincipalComponentMethod.Standardize) { // multiply by standard deviation and add the mean reversion.Multiply(StandardDeviations, dimension: 0, result: reversion) .Add(Means, dimension: 0, result: reversion); } else if (this.Method == PrincipalComponentMethod.Center) { // only add the mean reversion.Add(Means, dimension: 0, result: reversion); } return(reversion); }