/** * <p> * Creates a randomly generated set of orthonormal vectors. At most it can generate the same * number of vectors as the dimension of the vectors. * </p> * * <p> * This is done by creating random vectors then ensuring that they are orthogonal * to all the ones previously created with reflectors. * </p> * * <p> * NOTE: This employs a brute force O(N<sup>3</sup>) algorithm. * </p> * * @param dimen dimension of the space which the vectors will span. * @param numVectors How many vectors it should generate. * @param rand Used to create random vectors. * @return Array of N random orthogonal vectors of unit length. */ // is there a faster algorithm out there? This one is a bit sluggish public static DMatrixRMaj[] span(int dimen, int numVectors, IMersenneTwister rand) { if (dimen < numVectors) { throw new ArgumentException("The number of vectors must be less than or equal to the dimension"); } DMatrixRMaj[] u = new DMatrixRMaj[numVectors]; u[0] = RandomMatrices_DDRM.rectangle(dimen, 1, -1, 1, rand); NormOps_DDRM.normalizeF(u[0]); for (int i = 1; i < numVectors; i++) { // Console.WriteLine(" i = "+i); DMatrixRMaj a = new DMatrixRMaj(dimen, 1); DMatrixRMaj r = null; for (int j = 0; j < i; j++) { // Console.WriteLine("j = "+j); if (j == 0) { r = RandomMatrices_DDRM.rectangle(dimen, 1, -1, 1, rand); } // find a vector that is normal to vector j // u[i] = (1/2)*(r + Q[j]*r) a.set(r); VectorVectorMult_DDRM.householder(-2.0, u[j], r, a); CommonOps_DDRM.add(r, a, a); CommonOps_DDRM.scale(0.5, a); // UtilEjml.print(a); DMatrixRMaj t = a; a = r; r = t; // normalize it so it doesn't get too small double val = NormOps_DDRM.normF(r); if (val == 0 || double.IsNaN(val) || double.IsInfinity(val)) { throw new InvalidOperationException("Failed sanity check"); } CommonOps_DDRM.divide(r, val); } u[i] = r; } return(u); }
/** * <p> * Creates a reflector from the provided vector.<br> * <br> * Q = I - γ u u<sup>T</sup><br> * γ = 2/||u||<sup>2</sup> * </p> * * <p> * In practice {@link VectorVectorMult_DDRM#householder(double, DMatrixD1, DMatrixD1, DMatrixD1)} multHouseholder} * should be used for performance reasons since there is no need to calculate Q explicitly. * </p> * * @param u A vector. Not modified. * @return An orthogonal reflector. */ public static DMatrixRMaj createReflector(DMatrix1Row u) { if (!MatrixFeatures_DDRM.isVector(u)) { throw new ArgumentException("u must be a vector"); } double norm = NormOps_DDRM.fastNormF(u); double gamma = -2.0 / (norm * norm); DMatrixRMaj Q = CommonOps_DDRM.identity(u.getNumElements()); CommonOps_DDRM.multAddTransB(gamma, u, u, Q); return(Q); }
/** * <p> * Computes the F norm of the difference between the two Matrices:<br> * <br> * Sqrt{∑<sub>i=1:m</sub> ∑<sub>j=1:n</sub> ( a<sub>ij</sub> - b<sub>ij</sub>)<sup>2</sup>} * </p> * <p> * This is often used as a cost function. * </p> * * @see NormOps_DDRM#fastNormF * * @param a m by n matrix. Not modified. * @param b m by n matrix. Not modified. * * @return The F normal of the difference matrix. */ public static double diffNormF(DMatrixD1 a, DMatrixD1 b) { if (a.numRows != b.numRows || a.numCols != b.numCols) { throw new ArgumentException("Both matrices must have the same shape."); } int size = a.getNumElements(); DMatrixRMaj diff = new DMatrixRMaj(size, 1); for (int i = 0; i < size; i++) { diff.set(i, b.get(i) - a.get(i)); } return(NormOps_DDRM.normF(diff)); }
/** * <p> * Given an eigenvalue it computes an eigenvector using inverse iteration: * <br> * for i=1:MAX {<br> * (A - μI)z<sup>(i)</sup> = q<sup>(i-1)</sup><br> * q<sup>(i)</sup> = z<sup>(i)</sup> / ||z<sup>(i)</sup>||<br> * λ<sup>(i)</sup> = q<sup>(i)</sup><sup>T</sup> A q<sup>(i)</sup><br> * }<br> * </p> * <p> * NOTE: If there is another eigenvalue that is very similar to the provided one then there * is a chance of it converging towards that one instead. The larger a matrix is the more * likely this is to happen. * </p> * @param A Matrix whose eigenvector is being computed. Not modified. * @param eigenvalue The eigenvalue in the eigen pair. * @return The eigenvector or null if none could be found. */ public static DEigenpair computeEigenVector(DMatrixRMaj A, double eigenvalue) { if (A.numRows != A.numCols) { throw new ArgumentException("Must be a square matrix."); } DMatrixRMaj M = new DMatrixRMaj(A.numRows, A.numCols); DMatrixRMaj x = new DMatrixRMaj(A.numRows, 1); DMatrixRMaj b = new DMatrixRMaj(A.numRows, 1); CommonOps_DDRM.fill(b, 1); // perturb the eigenvalue slightly so that its not an exact solution the first time // eigenvalue -= eigenvalue*UtilEjml.EPS*10; double origEigenvalue = eigenvalue; SpecializedOps_DDRM.addIdentity(A, M, -eigenvalue); double threshold = NormOps_DDRM.normPInf(A) * UtilEjml.EPS; double prevError = double.MaxValue; bool hasWorked = false; LinearSolverDense <DMatrixRMaj> solver = LinearSolverFactory_DDRM.linear(M.numRows); double perp = 0.0001; for (int i = 0; i < 200; i++) { bool failed = false; // if the matrix is singular then the eigenvalue is within machine precision // of the true value, meaning that x must also be. if (!solver.setA(M)) { failed = true; } else { solver.solve(b, x); } // see if solve silently failed if (MatrixFeatures_DDRM.hasUncountable(x)) { failed = true; } if (failed) { if (!hasWorked) { // if it failed on the first trial try perturbing it some more double val = i % 2 == 0 ? 1.0 - perp : 1.0 + perp; // maybe this should be turn into a parameter allowing the user // to configure the wise of each step eigenvalue = origEigenvalue * Math.Pow(val, i / 2 + 1); SpecializedOps_DDRM.addIdentity(A, M, -eigenvalue); } else { // otherwise assume that it was so accurate that the matrix was singular // and return that result return(new DEigenpair(eigenvalue, b)); } } else { hasWorked = true; b.set(x); NormOps_DDRM.normalizeF(b); // compute the residual CommonOps_DDRM.mult(M, b, x); double error = NormOps_DDRM.normPInf(x); if (error - prevError > UtilEjml.EPS * 10) { // if the error increased it is probably converging towards a different // eigenvalue // CommonOps.set(b,1); prevError = double.MaxValue; hasWorked = false; double val = i % 2 == 0 ? 1.0 - perp : 1.0 + perp; eigenvalue = origEigenvalue * Math.Pow(val, 1); } else { // see if it has converged if (error <= threshold || Math.Abs(prevError - error) <= UtilEjml.EPS) { return(new DEigenpair(eigenvalue, b)); } // update everything prevError = error; eigenvalue = VectorVectorMult_DDRM.innerProdA(b, A, b); } SpecializedOps_DDRM.addIdentity(A, M, -eigenvalue); } } return(null); }