Esempio n. 1
0
        /**
         * This method computes the eigen vector with the largest eigen value by using the
         * direct power method. This technique is the easiest to implement, but the slowest to converge.
         * Works only if all the eigenvalues are real.
         *
         * @param A The matrix. Not modified.
         * @return If it converged or not.
         */
        public bool computeDirect(FMatrixRMaj A)
        {
            initPower(A);

            bool converged = false;

            for (int i = 0; i < maxIterations && !converged; i++)
            {
//            q0.print();

                CommonOps_FDRM.mult(A, q0, q1);
                float s = NormOps_FDRM.normPInf(q1);
                CommonOps_FDRM.divide(q1, s, q2);

                converged = checkConverged(A);
            }

            return(converged);
        }
Esempio n. 2
0
        /**
         * Computes the most dominant eigen vector of A using an inverted shifted matrix.
         * The inverted shifted matrix is defined as <b>B = (A - &alpha;I)<sup>-1</sup></b> and
         * can converge faster if &alpha; is chosen wisely.
         *
         * @param A An invertible square matrix matrix.
         * @param alpha Shifting factor.
         * @return If it converged or not.
         */
        public bool computeShiftInvert(FMatrixRMaj A, float alpha)
        {
            initPower(A);

            LinearSolverDense <FMatrixRMaj> solver = LinearSolverFactory_FDRM.linear(A.numCols);

            SpecializedOps_FDRM.addIdentity(A, B, -alpha);
            solver.setA(B);

            bool converged = false;

            for (int i = 0; i < maxIterations && !converged; i++)
            {
                solver.solve(q0, q1);
                float s = NormOps_FDRM.normPInf(q1);
                CommonOps_FDRM.divide(q1, s, q2);

                converged = checkConverged(A);
            }

            return(converged);
        }