public static float quality(FMatrixRMaj orig, FMatrixRMaj U, FMatrixRMaj W, FMatrixRMaj Vt)
        {
            // foundA = U*W*Vt
            FMatrixRMaj UW = new FMatrixRMaj(U.numRows, W.numCols);

            CommonOps_FDRM.mult(U, W, UW);
            FMatrixRMaj foundA = new FMatrixRMaj(UW.numRows, Vt.numCols);

            CommonOps_FDRM.mult(UW, Vt, foundA);

            float normA = NormOps_FDRM.normF(foundA);

            return(SpecializedOps_FDRM.diffNormF(orig, foundA) / normA);
        }
Пример #2
0
        /**
         * An other implementation of solve() that processes the matrices in a different order.
         * It seems to have the same runtime performance as {@link #solve} and is more complicated.
         * It is being kept around to avoid future replication of work.
         *
         * @param b A matrix that is n by m.  Not modified.
         * @param x An n by m matrix where the solution is writen to.  Modified.
         */
        //@Override
        public override void solve(FMatrixRMaj b, FMatrixRMaj x)
        {
            if (b.numCols != x.numCols || b.numRows != numRows || x.numRows != numCols)
            {
                throw new ArgumentException("Unexpected matrix size");
            }

            if (b != x)
            {
                SpecializedOps_FDRM.copyChangeRow(pivot, b, x);
            }
            else
            {
                throw new ArgumentException("Current doesn't support using the same matrix instance");
            }

            // Copy right hand side with pivoting
            int nx = b.numCols;

            float[] dataX = x.data;

            // Solve L*Y = B(piv,:)
            for (int k = 0; k < numCols; k++)
            {
                for (int i = k + 1; i < numCols; i++)
                {
                    for (int j = 0; j < nx; j++)
                    {
                        dataX[i * nx + j] -= dataX[k * nx + j] * dataLU[i * numCols + k];
                    }
                }
            }
            // Solve U*X = Y;
            for (int k = numCols - 1; k >= 0; k--)
            {
                for (int j = 0; j < nx; j++)
                {
                    dataX[k * nx + j] /= dataLU[k * numCols + k];
                }
                for (int i = 0; i < k; i++)
                {
                    for (int j = 0; j < nx; j++)
                    {
                        dataX[i * nx + j] -= dataX[k * nx + j] * dataLU[i * numCols + k];
                    }
                }
            }
        }
        private void solveUsingTriangle(float real, int index, FMatrixRMaj r)
        {
            for (int i = 0; i < index; i++)
            {
                _implicit.A.add(i, i, -real);
            }

            SpecializedOps_FDRM.subvector(_implicit.A, 0, index, index, false, 0, r);
            CommonOps_FDRM.changeSign(r);

            TriangularSolver_FDRM.solveU(_implicit.A.data, r.data, _implicit.A.numRows, 0, index);

            for (int i = 0; i < index; i++)
            {
                _implicit.A.add(i, i, real);
            }
        }
Пример #4
0
        /**
         * Computes the most dominant eigen vector of A using an inverted shifted matrix.
         * The inverted shifted matrix is defined as <b>B = (A - &alpha;I)<sup>-1</sup></b> and
         * can converge faster if &alpha; is chosen wisely.
         *
         * @param A An invertible square matrix matrix.
         * @param alpha Shifting factor.
         * @return If it converged or not.
         */
        public bool computeShiftInvert(FMatrixRMaj A, float alpha)
        {
            initPower(A);

            LinearSolverDense <FMatrixRMaj> solver = LinearSolverFactory_FDRM.linear(A.numCols);

            SpecializedOps_FDRM.addIdentity(A, B, -alpha);
            solver.setA(B);

            bool converged = false;

            for (int i = 0; i < maxIterations && !converged; i++)
            {
                solver.solve(q0, q1);
                float s = NormOps_FDRM.normPInf(q1);
                CommonOps_FDRM.divide(q1, s, q2);

                converged = checkConverged(A);
            }

            return(converged);
        }
        private void solveWithLU(float real, int index, FMatrixRMaj r)
        {
            FMatrixRMaj A = new FMatrixRMaj(index, index);

            CommonOps_FDRM.extract(_implicit.A, 0, index, 0, index, A, 0, 0);

            for (int i = 0; i < index; i++)
            {
                A.add(i, i, -real);
            }

            r.reshape(index, 1, false);

            SpecializedOps_FDRM.subvector(_implicit.A, 0, index, index, false, 0, r);
            CommonOps_FDRM.changeSign(r);

            // TODO this must be very inefficient
            if (!solver.setA(A))
            {
                throw new InvalidOperationException("Solve failed");
            }
            solver.solve(r, r);
        }
Пример #6
0
 public virtual FMatrixRMaj getRowPivot(FMatrixRMaj pivot)
 {
     return(SpecializedOps_FDRM.pivotMatrix(pivot, this.pivot, LU.numRows, false));
 }
Пример #7
0
        /**
         * Computes the most dominant eigen vector of A using a shifted matrix.
         * The shifted matrix is defined as <b>B = A - &alpha;I</b> and can converge faster
         * if &alpha; is chosen wisely.  In general it is easier to choose a value for &alpha;
         * that will converge faster with the shift-invert strategy than this one.
         *
         * @param A The matrix.
         * @param alpha Shifting factor.
         * @return If it converged or not.
         */
        public bool computeShiftDirect(FMatrixRMaj A, float alpha)
        {
            SpecializedOps_FDRM.addIdentity(A, B, -alpha);

            return(computeDirect(B));
        }