Example #1
0
        /// <summary>
        /// Returns the result of dividing each element by 'val':
        /// <code>b[i,j] = a[i,j]/val</code>
        /// </summary>
        /// <param name="val">Divisor</param>
        /// <returns>Matrix with its elements divided by the specified value.</returns>
        /// <see cref="CommonOps_DDRM.divide(DMatrixD1, double)"/>
        public override SimpleMatrixD divide(double val)
        {
            SimpleMatrixD ret = copy();
            var           rm  = ret.getMatrix();

            CommonOps_DDRM.divide(rm, val);
            return(ret);
        }
Example #2
0
        /**
         * <p>
         * Creates a randomly generated set of orthonormal vectors.  At most it can generate the same
         * number of vectors as the dimension of the vectors.
         * </p>
         *
         * <p>
         * This is done by creating random vectors then ensuring that they are orthogonal
         * to all the ones previously created with reflectors.
         * </p>
         *
         * <p>
         * NOTE: This employs a brute force O(N<sup>3</sup>) algorithm.
         * </p>
         *
         * @param dimen dimension of the space which the vectors will span.
         * @param numVectors How many vectors it should generate.
         * @param rand Used to create random vectors.
         * @return Array of N random orthogonal vectors of unit length.
         */
        // is there a faster algorithm out there? This one is a bit sluggish
        public static DMatrixRMaj[] span(int dimen, int numVectors, IMersenneTwister rand)
        {
            if (dimen < numVectors)
            {
                throw new ArgumentException("The number of vectors must be less than or equal to the dimension");
            }

            DMatrixRMaj[] u = new DMatrixRMaj[numVectors];

            u[0] = RandomMatrices_DDRM.rectangle(dimen, 1, -1, 1, rand);
            NormOps_DDRM.normalizeF(u[0]);

            for (int i = 1; i < numVectors; i++)
            {
//            Console.WriteLine(" i = "+i);
                DMatrixRMaj a = new DMatrixRMaj(dimen, 1);
                DMatrixRMaj r = null;

                for (int j = 0; j < i; j++)
                {
//                Console.WriteLine("j = "+j);
                    if (j == 0)
                    {
                        r = RandomMatrices_DDRM.rectangle(dimen, 1, -1, 1, rand);
                    }

                    // find a vector that is normal to vector j
                    // u[i] = (1/2)*(r + Q[j]*r)
                    a.set(r);
                    VectorVectorMult_DDRM.householder(-2.0, u[j], r, a);
                    CommonOps_DDRM.add(r, a, a);
                    CommonOps_DDRM.scale(0.5, a);

//                UtilEjml.print(a);

                    DMatrixRMaj t = a;
                    a = r;
                    r = t;

                    // normalize it so it doesn't get too small
                    double val = NormOps_DDRM.normF(r);
                    if (val == 0 || double.IsNaN(val) || double.IsInfinity(val))
                    {
                        throw new InvalidOperationException("Failed sanity check");
                    }
                    CommonOps_DDRM.divide(r, val);
                }

                u[i] = r;
            }

            return(u);
        }
        /**
         * This method computes the eigen vector with the largest eigen value by using the
         * direct power method. This technique is the easiest to implement, but the slowest to converge.
         * Works only if all the eigenvalues are real.
         *
         * @param A The matrix. Not modified.
         * @return If it converged or not.
         */
        public bool computeDirect(DMatrixRMaj A)
        {
            initPower(A);

            bool converged = false;

            for (int i = 0; i < maxIterations && !converged; i++)
            {
//            q0.print();

                CommonOps_DDRM.mult(A, q0, q1);
                double s = NormOps_DDRM.normPInf(q1);
                CommonOps_DDRM.divide(q1, s, q2);

                converged = checkConverged(A);
            }

            return(converged);
        }
        /**
         * Computes the most dominant eigen vector of A using an inverted shifted matrix.
         * The inverted shifted matrix is defined as <b>B = (A - &alpha;I)<sup>-1</sup></b> and
         * can converge faster if &alpha; is chosen wisely.
         *
         * @param A An invertible square matrix matrix.
         * @param alpha Shifting factor.
         * @return If it converged or not.
         */
        public bool computeShiftInvert(DMatrixRMaj A, double alpha)
        {
            initPower(A);

            LinearSolverDense <DMatrixRMaj> solver = LinearSolverFactory_DDRM.linear(A.numCols);

            SpecializedOps_DDRM.addIdentity(A, B, -alpha);
            solver.setA(B);

            bool converged = false;

            for (int i = 0; i < maxIterations && !converged; i++)
            {
                solver.solve(q0, q1);
                double s = NormOps_DDRM.normPInf(q1);
                CommonOps_DDRM.divide(q1, s, q2);

                converged = checkConverged(A);
            }

            return(converged);
        }
Example #5
0
 public void divide(Matrix A, double val, Matrix output)
 {
     CommonOps_DDRM.divide((DMatrixRMaj)A, (double)val, (DMatrixRMaj)output);
 }
Example #6
0
        /**
         * Computes the QR decomposition of the provided matrix.
         *
         * @param A Matrix which is to be decomposed.  Not modified.
         */
        public void decompose(DMatrixRMaj A)
        {
            this.QR = (DMatrixRMaj)A.copy();

            int N = Math.Min(A.numCols, A.numRows);

            gammas = new double[A.numCols];

            DMatrixRMaj A_small = new DMatrixRMaj(A.numRows, A.numCols);
            DMatrixRMaj A_mod   = new DMatrixRMaj(A.numRows, A.numCols);
            DMatrixRMaj v       = new DMatrixRMaj(A.numRows, 1);
            DMatrixRMaj Q_k     = new DMatrixRMaj(A.numRows, A.numRows);

            for (int i = 0; i < N; i++)
            {
                // reshape temporary variables
                A_small.reshape(QR.numRows - i, QR.numCols - i, false);
                A_mod.reshape(A_small.numRows, A_small.numCols, false);
                v.reshape(A_small.numRows, 1, false);
                Q_k.reshape(v.getNumElements(), v.getNumElements(), false);

                // use extract matrix to get the column that is to be zeroed
                CommonOps_DDRM.extract(QR, i, QR.numRows, i, i + 1, v, 0, 0);

                double max = CommonOps_DDRM.elementMaxAbs(v);

                if (max > 0 && v.getNumElements() > 1)
                {
                    // normalize to reduce overflow issues
                    CommonOps_DDRM.divide(v, max);

                    // compute the magnitude of the vector
                    double tau = NormOps_DDRM.normF(v);

                    if (v.get(0) < 0)
                    {
                        tau *= -1.0;
                    }

                    double u_0   = v.get(0) + tau;
                    double gamma = u_0 / tau;

                    CommonOps_DDRM.divide(v, u_0);
                    v.set(0, 1.0);

                    // extract the submatrix of A which is being operated on
                    CommonOps_DDRM.extract(QR, i, QR.numRows, i, QR.numCols, A_small, 0, 0);

                    // A = (I - &gamma;*u*u<sup>T</sup>)A
                    CommonOps_DDRM.setIdentity(Q_k);
                    CommonOps_DDRM.multAddTransB(-gamma, v, v, Q_k);
                    CommonOps_DDRM.mult(Q_k, A_small, A_mod);

                    // save the results
                    CommonOps_DDRM.insert(A_mod, QR, i, i);
                    CommonOps_DDRM.insert(v, QR, i, i);
                    QR.unsafe_set(i, i, -tau * max);

                    // save gamma for recomputing Q later on
                    gammas[i] = gamma;
                }
            }
        }