예제 #1
0
        /**
         * <p>
         * Creates a randomly generated set of orthonormal vectors.  At most it can generate the same
         * number of vectors as the dimension of the vectors.
         * </p>
         *
         * <p>
         * This is done by creating random vectors then ensuring that they are orthogonal
         * to all the ones previously created with reflectors.
         * </p>
         *
         * <p>
         * NOTE: This employs a brute force O(N<sup>3</sup>) algorithm.
         * </p>
         *
         * @param dimen dimension of the space which the vectors will span.
         * @param numVectors How many vectors it should generate.
         * @param rand Used to create random vectors.
         * @return Array of N random orthogonal vectors of unit length.
         */
        // is there a faster algorithm out there? This one is a bit sluggish
        public static FMatrixRMaj[] span(int dimen, int numVectors, IMersenneTwister rand)
        {
            if (dimen < numVectors)
            {
                throw new ArgumentException("The number of vectors must be less than or equal to the dimension");
            }

            FMatrixRMaj[] u = new FMatrixRMaj[numVectors];

            u[0] = RandomMatrices_FDRM.rectangle(dimen, 1, -1, 1, rand);
            NormOps_FDRM.normalizeF(u[0]);

            for (int i = 1; i < numVectors; i++)
            {
//            Console.WriteLine(" i = "+i);
                FMatrixRMaj a = new FMatrixRMaj(dimen, 1);
                FMatrixRMaj r = null;

                for (int j = 0; j < i; j++)
                {
//                Console.WriteLine("j = "+j);
                    if (j == 0)
                    {
                        r = RandomMatrices_FDRM.rectangle(dimen, 1, -1, 1, rand);
                    }

                    // find a vector that is normal to vector j
                    // u[i] = (1/2)*(r + Q[j]*r)
                    a.set(r);
                    VectorVectorMult_FDRM.householder(-2.0f, u[j], r, a);
                    CommonOps_FDRM.add(r, a, a);
                    CommonOps_FDRM.scale(0.5f, a);

//                UtilEjml.print(a);

                    FMatrixRMaj t = a;
                    a = r;
                    r = t;

                    // normalize it so it doesn't get too small
                    float val = NormOps_FDRM.normF(r);
                    if (val == 0 || float.IsNaN(val) || float.IsInfinity(val))
                    {
                        throw new InvalidOperationException("Failed sanity check");
                    }
                    CommonOps_FDRM.divide(r, val);
                }

                u[i] = r;
            }

            return(u);
        }
예제 #2
0
        /**
         * <p>
         * Creates a reflector from the provided vector.<br>
         * <br>
         * Q = I - &gamma; u u<sup>T</sup><br>
         * &gamma; = 2/||u||<sup>2</sup>
         * </p>
         *
         * <p>
         * In practice {@link VectorVectorMult_FDRM#householder(float, FMatrixD1, FMatrixD1, FMatrixD1)}  multHouseholder}
         * should be used for performance reasons since there is no need to calculate Q explicitly.
         * </p>
         *
         * @param u A vector. Not modified.
         * @return An orthogonal reflector.
         */
        public static FMatrixRMaj createReflector(FMatrix1Row u)
        {
            if (!MatrixFeatures_FDRM.isVector(u))
            {
                throw new ArgumentException("u must be a vector");
            }

            float norm  = NormOps_FDRM.fastNormF(u);
            float gamma = -2.0f / (norm * norm);

            FMatrixRMaj Q = CommonOps_FDRM.identity(u.getNumElements());

            CommonOps_FDRM.multAddTransB(gamma, u, u, Q);

            return(Q);
        }
예제 #3
0
        /**
         * <p>
         * Computes the F norm of the difference between the two Matrices:<br>
         * <br>
         * Sqrt{&sum;<sub>i=1:m</sub> &sum;<sub>j=1:n</sub> ( a<sub>ij</sub> - b<sub>ij</sub>)<sup>2</sup>}
         * </p>
         * <p>
         * This is often used as a cost function.
         * </p>
         *
         * @see NormOps_FDRM#fastNormF
         *
         * @param a m by n matrix. Not modified.
         * @param b m by n matrix. Not modified.
         *
         * @return The F normal of the difference matrix.
         */
        public static float diffNormF(FMatrixD1 a, FMatrixD1 b)
        {
            if (a.numRows != b.numRows || a.numCols != b.numCols)
            {
                throw new ArgumentException("Both matrices must have the same shape.");
            }

            int size = a.getNumElements();

            FMatrixRMaj diff = new FMatrixRMaj(size, 1);

            for (int i = 0; i < size; i++)
            {
                diff.set(i, b.get(i) - a.get(i));
            }
            return(NormOps_FDRM.normF(diff));
        }
예제 #4
0
        /**
         * <p>
         * Given an eigenvalue it computes an eigenvector using inverse iteration:
         * <br>
         * for i=1:MAX {<br>
         *   (A - &mu;I)z<sup>(i)</sup> = q<sup>(i-1)</sup><br>
         *   q<sup>(i)</sup> = z<sup>(i)</sup> / ||z<sup>(i)</sup>||<br>
         * &lambda;<sup>(i)</sup> =  q<sup>(i)</sup><sup>T</sup> A  q<sup>(i)</sup><br>
         * }<br>
         * </p>
         * <p>
         * NOTE: If there is another eigenvalue that is very similar to the provided one then there
         * is a chance of it converging towards that one instead.  The larger a matrix is the more
         * likely this is to happen.
         * </p>
         * @param A Matrix whose eigenvector is being computed.  Not modified.
         * @param eigenvalue The eigenvalue in the eigen pair.
         * @return The eigenvector or null if none could be found.
         */
        public static FEigenpair computeEigenVector(FMatrixRMaj A, float eigenvalue)
        {
            if (A.numRows != A.numCols)
            {
                throw new ArgumentException("Must be a square matrix.");
            }

            FMatrixRMaj M = new FMatrixRMaj(A.numRows, A.numCols);

            FMatrixRMaj x = new FMatrixRMaj(A.numRows, 1);
            FMatrixRMaj b = new FMatrixRMaj(A.numRows, 1);

            CommonOps_FDRM.fill(b, 1);

            // perturb the eigenvalue slightly so that its not an exact solution the first time
//        eigenvalue -= eigenvalue*UtilEjml.F_EPS*10;

            float origEigenvalue = eigenvalue;

            SpecializedOps_FDRM.addIdentity(A, M, -eigenvalue);

            float threshold = NormOps_FDRM.normPInf(A) * UtilEjml.F_EPS;

            float prevError = float.MaxValue;
            bool  hasWorked = false;

            LinearSolverDense <FMatrixRMaj> solver = LinearSolverFactory_FDRM.linear(M.numRows);

            float perp = 0.0001f;

            for (int i = 0; i < 200; i++)
            {
                bool failed = false;
                // if the matrix is singular then the eigenvalue is within machine precision
                // of the true value, meaning that x must also be.
                if (!solver.setA(M))
                {
                    failed = true;
                }
                else
                {
                    solver.solve(b, x);
                }

                // see if solve silently failed
                if (MatrixFeatures_FDRM.hasUncountable(x))
                {
                    failed = true;
                }

                if (failed)
                {
                    if (!hasWorked)
                    {
                        // if it failed on the first trial try perturbing it some more
                        float val = i % 2 == 0 ? 1.0f - perp : 1.0f + perp;
                        // maybe this should be turn into a parameter allowing the user
                        // to configure the wise of each step

                        eigenvalue = origEigenvalue * (float)Math.Pow(val, i / 2 + 1);
                        SpecializedOps_FDRM.addIdentity(A, M, -eigenvalue);
                    }
                    else
                    {
                        // otherwise assume that it was so accurate that the matrix was singular
                        // and return that result
                        return(new FEigenpair(eigenvalue, b));
                    }
                }
                else
                {
                    hasWorked = true;

                    b.set(x);
                    NormOps_FDRM.normalizeF(b);

                    // compute the residual
                    CommonOps_FDRM.mult(M, b, x);
                    float error = NormOps_FDRM.normPInf(x);

                    if (error - prevError > UtilEjml.F_EPS * 10)
                    {
                        // if the error increased it is probably converging towards a different
                        // eigenvalue
//                    CommonOps.set(b,1);
                        prevError = float.MaxValue;
                        hasWorked = false;
                        float val = i % 2 == 0 ? 1.0f - perp : 1.0f + perp;
                        eigenvalue = origEigenvalue * (float)Math.Pow(val, 1);
                    }
                    else
                    {
                        // see if it has converged
                        if (error <= threshold || Math.Abs(prevError - error) <= UtilEjml.F_EPS)
                        {
                            return(new FEigenpair(eigenvalue, b));
                        }

                        // update everything
                        prevError  = error;
                        eigenvalue = VectorVectorMult_FDRM.innerProdA(b, A, b);
                    }

                    SpecializedOps_FDRM.addIdentity(A, M, -eigenvalue);
                }
            }

            return(null);
        }