/// <summary> /// Computes the eigenvalues and eigenvectors of the matrix. /// </summary> /// <returns>A representation of the eigenvalues and eigenvectors of the matrix.</returns> /// <remarks> /// <para>For a generic vector v and matrix M, Mv = u will point in some direction with no particular relationship to v. /// The eigenvectors of a matrix M are vectors z that satisfy Mz = λz, i.e. multiplying an eigenvector by a /// matrix reproduces the same vector, up to a prortionality constant λ called the eigenvalue.</para> /// <para>For v to be an eigenvector of M with eigenvalue λ, (M - λI)z = 0. But for a matrix to /// anihilate any non-zero vector, that matrix must have determinant, so det(M - λI)=0. For a matrix of /// order N, this is an equation for the roots of a polynomial of order N. Since an order-N polynomial always has exactly /// N roots, an order-N matrix always has exactly N eigenvalues.</para> /// <para>An alternative way of expressing the same relationship is to say that the eigenvalues of a matrix are its /// diagonal elements when the matrix is expressed in a basis that diagonalizes it. That is, given Z such that Z<sup>-1</sup>MZ = D, /// where D is diagonal, the columns of Z are the eigenvectors of M and the diagonal elements of D are the eigenvalues.</para> /// <para>Note that the eigenvectors of a matrix are not entirely unique. Given an eigenvector z, any scaled vector αz /// is an eigenvector with the same eigenvalue, so eigenvectors are at most unique up to a rescaling. If an eigenvalue /// is degenerate, i.e. there are two or more linearly independent eigenvectors with the same eigenvalue, then any linear /// combination of the eigenvectors is also an eigenvector with that eigenvalue, and in fact any set of vectors that span the /// same subspace could be taken as the eigenvector set corresponding to that eigenvalue.</para> /// <para>The eigenvectors of a symmetric matrix are always orthogonal and the eigenvalues are always real. The transformation /// matrix Z is thus orthogonal (Z<sup>-1</sup> = Z<sup>T</sup>).</para> /// <para>Finding the eigenvalues and eigenvectors of a symmetric matrix is an O(N<sup>3</sup>) operation.</para> /// <para>If you require only the eigenvalues, not the eigenvectors, of the matrix, the <see cref="Eigenvalues"/> method /// will produce them faster than this method.</para> /// </remarks> public RealEigensystem Eigensystem() { double[][] A = SymmetricMatrixAlgorithms.Copy(values, dimension); double[] V = SquareMatrixAlgorithms.CreateUnitMatrix(dimension); SymmetricMatrixAlgorithms.JacobiEigensystem(A, V, dimension); return(new RealEigensystem(dimension, SymmetricMatrixAlgorithms.GetDiagonal(A, dimension), V)); }
// storage is in lower triangular form, i.e. values[r][c] with c <= r /// <summary> /// Initializes a new symmetric matrix. /// </summary> /// <param name="dimension">The dimension of the matrix, which must be positive.</param> public SymmetricMatrix(int dimension) { if (dimension < 1) { throw new ArgumentOutOfRangeException("dimension"); } this.dimension = dimension; values = SymmetricMatrixAlgorithms.InitializeStorage(dimension); }
/// <summary> /// Computes the Cholesky decomposition of the matrix. /// </summary> /// <returns>The Cholesky decomposition of the matrix, or null if the matrix is not positive definite.</returns> /// <remarks> /// <para>A Cholesky decomposition is a special decomposition that is possible only for positive definite matrices. /// (A positive definite matrix M has x<sup>T</sup>Mx > 0 for any vector x. Equivilently, M is positive definite if /// all its eigenvalues are positive.)</para> /// <para>The Cholesky decomposition represents M = C C<sup>T</sup>, where C is lower-left triangular (and thus C<sup>T</sup> /// is upper-right triangular. It is basically an LU decomposition where the L and U factors are related by transposition. /// Since the M is produced by multiplying C "by itself", the matrix C is sometimes call the "square root" of M.</para> /// <para>Cholesky decomposition is an O(N<sup>3</sup>) operation. It is about a factor of two faster than LU decomposition, /// so it is a faster way to obtain inverses, determinates, etc. if you know that M is positive definite.</para> /// <para>The fastest way to test whether your matrix is positive definite is attempt a Cholesky decomposition. If this /// method returns null, M is not positive definite.</para> /// </remarks> /// <seealso cref="Meta.Numerics.Matrices.CholeskyDecomposition"/> public CholeskyDecomposition CholeskyDecomposition() { double[][] cdStorage = SymmetricMatrixAlgorithms.CholeskyDecomposition(values, dimension); if (cdStorage == null) { return(null); } else { return(new CholeskyDecomposition(new SymmetricMatrix(cdStorage, dimension))); } }
/// <summary> /// Copies the matrix. /// </summary> /// <returns>An independent copy of the matrix.</returns> public SymmetricMatrix Copy() { double[][] copy = SymmetricMatrixAlgorithms.InitializeStorage(dimension); for (int r = 0; r < Dimension; r++) { for (int c = 0; c <= r; c++) { copy[r][c] = values[r][c]; } } return(new SymmetricMatrix(copy, dimension)); }
/// <summary> /// Negates a symmetric matrix. /// </summary> /// <param name="A">The matrix.</param> /// <returns>The matrix -A.</returns> public static SymmetricMatrix operator -(SymmetricMatrix A) { if (A == null) { throw new ArgumentNullException("A"); } double[][] resultStore = SymmetricMatrixAlgorithms.InitializeStorage(A.dimension); for (int r = 0; r < A.Dimension; r++) { for (int c = 0; c <= r; c++) { resultStore[r][c] = -A.values[r][c]; } } return(new SymmetricMatrix(resultStore, A.dimension)); }
/// <summary> /// Multiplies a symmetric matrix by a real factor. /// </summary> /// <param name="alpha">The factor.</param> /// <param name="A">The matrix.</param> /// <returns>The product of the matrix and the factor.</returns> public static SymmetricMatrix operator *(double alpha, SymmetricMatrix A) { if (A == null) { throw new ArgumentNullException(nameof(A)); } double[][] productStore = SymmetricMatrixAlgorithms.InitializeStorage(A.dimension); for (int r = 0; r < A.Dimension; r++) { for (int c = 0; c <= r; c++) { productStore[r][c] = alpha * A.values[r][c]; } } return(new SymmetricMatrix(productStore, A.dimension)); }
/// <summary> /// Subtracts two symmetric matrices. /// </summary> /// <param name="A">The first matrix.</param> /// <param name="B">The second matrix.</param> /// <returns>The difference <paramref name="A"/> - <paramref name="B"/>.</returns> public static SymmetricMatrix operator -(SymmetricMatrix A, SymmetricMatrix B) { if (A == null) { throw new ArgumentNullException("A"); } if (B == null) { throw new ArgumentNullException("B"); } if (A.dimension != B.dimension) { throw new DimensionMismatchException(); } double[][] differenceStore = SymmetricMatrixAlgorithms.InitializeStorage(A.dimension); for (int r = 0; r < A.dimension; r++) { for (int c = 0; c <= r; c++) { differenceStore[r][c] = A.values[r][c] - B.values[r][c]; } } return(new SymmetricMatrix(differenceStore, A.dimension)); }
/// <summary> /// Computes the eigenvalues of the matrix. /// </summary> /// <returns>An array containing the matrix eigenvalues.</returns> /// <remarks> /// <para>If you require only the eigenvalues of the matrix, not its eigenvectors, this method will return them faster than /// the <see cref="Eigensystem"/> method. If you do need the eigenvectors as well as the eigenvalues, use the <see cref="Eigensystem"/> /// method instead.</para> /// </remarks> public double[] Eigenvalues() { double[][] A = SymmetricMatrixAlgorithms.Copy(values, dimension); SymmetricMatrixAlgorithms.JacobiEigensystem(A, null, dimension); return(SymmetricMatrixAlgorithms.GetDiagonal(A, dimension)); }