/**
         * <p>
         * Computes a metric which measures the the quality of an eigen value decomposition.  If a
         * value is returned that is close to or smaller than 1e-15 then it is within machine precision.
         * </p>
         * <p>
         * EVD quality is defined as:<br>
         * <br>
         * Quality = ||A*V - V*D|| / ||A*V||.
         *  </p>
         *
         * @param orig The original matrix. Not modified.
         * @param eig EVD of the original matrix. Not modified.
         * @return The quality of the decomposition.
         */
        public static float quality(FMatrixRMaj orig, EigenDecomposition_F32 <FMatrixRMaj> eig)
        {
            FMatrixRMaj A = orig;
            FMatrixRMaj V = EigenOps_FDRM.createMatrixV(eig);
            FMatrixRMaj D = EigenOps_FDRM.createMatrixD(eig);

            // L = A*V
            FMatrixRMaj L = new FMatrixRMaj(A.numRows, V.numCols);

            CommonOps_FDRM.mult(A, V, L);
            // R = V*D
            FMatrixRMaj R = new FMatrixRMaj(V.numRows, D.numCols);

            CommonOps_FDRM.mult(V, D, R);

            FMatrixRMaj diff = new FMatrixRMaj(L.numRows, L.numCols);

            CommonOps_FDRM.subtract(L, R, diff);

            float top    = NormOps_FDRM.normF(diff);
            float bottom = NormOps_FDRM.normF(L);

            float error = top / bottom;

            return(error);
        }
        public static float quality(FMatrixRMaj orig, FMatrixRMaj U, FMatrixRMaj W, FMatrixRMaj Vt)
        {
            // foundA = U*W*Vt
            FMatrixRMaj UW = new FMatrixRMaj(U.numRows, W.numCols);

            CommonOps_FDRM.mult(U, W, UW);
            FMatrixRMaj foundA = new FMatrixRMaj(UW.numRows, Vt.numCols);

            CommonOps_FDRM.mult(UW, Vt, foundA);

            float normA = NormOps_FDRM.normF(foundA);

            return(SpecializedOps_FDRM.diffNormF(orig, foundA) / normA);
        }
        private void solveEigenvectorDuplicateEigenvalue(float real, int first, bool isTriangle)
        {
            float scale = Math.Abs(real);

            if (scale == 0)
            {
                scale = 1;
            }

            eigenvectorTemp.reshape(N, 1, false);
            eigenvectorTemp.zero();

            if (first > 0)
            {
                if (isTriangle)
                {
                    solveUsingTriangle(real, first, eigenvectorTemp);
                }
                else
                {
                    solveWithLU(real, first, eigenvectorTemp);
                }
            }

            eigenvectorTemp.reshape(N, 1, false);

            for (int i = first; i < N; i++)
            {
                Complex_F32 c = _implicit.eigenvalues[N - i - 1];

                if (c.isReal() && Math.Abs(c.real - real) / scale < 100.0f * UtilEjml.F_EPS)
                {
                    eigenvectorTemp.data[i] = 1;

                    FMatrixRMaj v = new FMatrixRMaj(N, 1);
                    CommonOps_FDRM.multTransA(Q, eigenvectorTemp, v);
                    eigenvectors[N - i - 1] = v;
                    NormOps_FDRM.normalizeF(v);

                    eigenvectorTemp.data[i] = 0;
                }
            }
        }
예제 #4
0
        /**
         * This method computes the eigen vector with the largest eigen value by using the
         * direct power method. This technique is the easiest to implement, but the slowest to converge.
         * Works only if all the eigenvalues are real.
         *
         * @param A The matrix. Not modified.
         * @return If it converged or not.
         */
        public bool computeDirect(FMatrixRMaj A)
        {
            initPower(A);

            bool converged = false;

            for (int i = 0; i < maxIterations && !converged; i++)
            {
//            q0.print();

                CommonOps_FDRM.mult(A, q0, q1);
                float s = NormOps_FDRM.normPInf(q1);
                CommonOps_FDRM.divide(q1, s, q2);

                converged = checkConverged(A);
            }

            return(converged);
        }
예제 #5
0
        /**
         * Computes the most dominant eigen vector of A using an inverted shifted matrix.
         * The inverted shifted matrix is defined as <b>B = (A - &alpha;I)<sup>-1</sup></b> and
         * can converge faster if &alpha; is chosen wisely.
         *
         * @param A An invertible square matrix matrix.
         * @param alpha Shifting factor.
         * @return If it converged or not.
         */
        public bool computeShiftInvert(FMatrixRMaj A, float alpha)
        {
            initPower(A);

            LinearSolverDense <FMatrixRMaj> solver = LinearSolverFactory_FDRM.linear(A.numCols);

            SpecializedOps_FDRM.addIdentity(A, B, -alpha);
            solver.setA(B);

            bool converged = false;

            for (int i = 0; i < maxIterations && !converged; i++)
            {
                solver.solve(q0, q1);
                float s = NormOps_FDRM.normPInf(q1);
                CommonOps_FDRM.divide(q1, s, q2);

                converged = checkConverged(A);
            }

            return(converged);
        }