예제 #1
0
        static MixedMatrixTest()
        {
            R = new RectangularMatrix(2, 3);
            R[0, 0] = 0;
            R[0, 1] = 1;
            R[0, 2] = 2;
            R[1, 0] = 3;
            R[1, 1] = 4;
            R[1, 2] = 5;

            M = new SquareMatrix(3);
            M[0, 0] = 0;
            M[0, 1] = 1;
            M[0, 2] = 2;
            M[1, 0] = 3;
            M[1, 1] = 4;
            M[1, 2] = 5;
            M[2, 0] = 6;
            M[2, 1] = 7;
            M[2, 2] = 8;

            S = new SymmetricMatrix(3);
            M[0, 0] = 0;
            M[0, 1] = 1;
            M[0, 2] = 2;
            M[1, 1] = 3;
            M[1, 2] = 4;
            M[2, 2] = 5;
        }
        public MultivariateSample CreateMultivariateNormalSample(ColumnVector M, SymmetricMatrix C, int n)
        {
            int d = M.Dimension;

            MultivariateSample S = new MultivariateSample(d);

            SquareMatrix A = C.CholeskyDecomposition().SquareRootMatrix();

            Random rng = new Random(1);
            Distribution normal = new NormalDistribution();

            for (int i = 0; i < n; i++) {

                // create a vector of normal deviates
                ColumnVector V = new ColumnVector(d);
                for (int j = 0; j < d; j++) {
                    double y = rng.NextDouble();
                    double z = normal.InverseLeftProbability(y);
                    V[j] = z;
                }

                // form the multivariate distributed vector
                ColumnVector X = M + A * V;

                // add it to the sample
                S.Add(X);

            }

            return (S);
        }
        public void RealEigenvalueOrdering()
        {
            int d = 10;

            Random rng = new Random(d + 1);
            SymmetricMatrix A = new SymmetricMatrix(d);
            A.Fill((int r, int c) => -1.0 + 2.0 * rng.NextDouble());
            RealEigensystem E = A.Eigensystem();

            for (int i = 0; i < E.Dimension; i++) Console.WriteLine(E.Eigenvalue(i));

            E.Sort(OrderBy.ValueAscending);
            for (int i = 1; i < E.Dimension; i++) {
                Assert.IsTrue(E.Eigenvalue(i - 1) <= E.Eigenvalue(i));
                Assert.IsTrue(TestUtilities.IsNearlyEigenpair(A, E.Eigenvector(i), E.Eigenvalue(i)));
             }

            E.Sort(OrderBy.ValueDescending);
            for (int i = 1; i < E.Dimension; i++) {
                Assert.IsTrue(E.Eigenvalue(i - 1) >= E.Eigenvalue(i));
                Assert.IsTrue(TestUtilities.IsNearlyEigenpair(A, E.Eigenvector(i), E.Eigenvalue(i)));
            }

            E.Sort(OrderBy.MagnitudeAscending);
            for (int i = 1; i < E.Dimension; i++) {
                Assert.IsTrue(Math.Abs(E.Eigenvalue(i - 1)) <= Math.Abs(E.Eigenvalue(i)));
                Assert.IsTrue(TestUtilities.IsNearlyEigenpair(A, E.Eigenvector(i), E.Eigenvalue(i)));
            }

            E.Sort(OrderBy.MagnitudeDescending);
            for (int i = 1; i < E.Dimension; i++) {
                Assert.IsTrue(Math.Abs(E.Eigenvalue(i - 1)) >= Math.Abs(E.Eigenvalue(i)));
                Assert.IsTrue(TestUtilities.IsNearlyEigenpair(A, E.Eigenvector(i), E.Eigenvalue(i)));
            }
        }
        /// <summary>
        /// Computes the inverse of the original matrix.
        /// </summary>
        /// <returns>M<sup>-1</sup></returns>
        public SymmetricMatrix Inverse()
        {
            SymmetricMatrix MI = new SymmetricMatrix(Dimension);

            // do each column as a RHS
            for (int c = 0; c < Dimension; c++) {
                MI[c, c] = 1.0 / sqrtM[c, c];
                for (int r = c + 1; r < Dimension; r++) {
                    MI[r, c] = 0.0;
                    for (int i = c; i < r; i++) {
                        MI[r, c] -= sqrtM[r, i] * MI[i, c];
                    }
                    MI[r, c] = MI[r, c] / sqrtM[r, r];
                }
            }

            // unnecessary?
            for (int c = 0; c < Dimension; c++) {
                for (int r = (Dimension - 1); r >= c; r--) {
                    for (int i = r + 1; i < Dimension; i++) {
                        MI[r, c] -= sqrtM[r, i] * MI[i, c];
                    }
                    MI[r, c] = MI[r, c] / sqrtM[r, r];
                }
            }
            // end unnecessary?

            return (MI);
        }
예제 #5
0
 // one-parameter constructor
 internal FitResult(double p1, double dp1, TestResult test)
 {
     this.parameters = new ColumnVector(new double[] {p1}, 0, 1, 1, true);
     this.covarianceMatrix = new SymmetricMatrix(1);
     this.covarianceMatrix[0, 0] = dp1 * dp1;
     this.covarianceMatrix.IsReadOnly = true;
     this.test = test;
 }
예제 #6
0
 public static SymmetricMatrix CreateSymmetricRandomMatrix(int n, int seed)
 {
     SymmetricMatrix M = new SymmetricMatrix(n);
     Random rng = new Random(seed);
     for (int r = 0; r < n; r++) {
         for (int c = 0; c <= r; c++) {
             M[r, c] = 2.0 * rng.NextDouble() - 1.0;
         }
     }
     return (M);
 }
예제 #7
0
        // n-parameter constructor
        internal FitResult(IList<double> parameters, SymmetricMatrix covariance, TestResult test)
        {
            Debug.Assert(parameters != null);
            Debug.Assert(covariance != null);
            Debug.Assert(parameters.Count == covariance.Dimension);

            // This is a bit of a hack to ensure we store read-only ColumnVectors and SymmetricMatrix objects.
            this.parameters = ConvertListToReadOnlyVector(parameters);
            this.covarianceMatrix = covariance;
            this.covarianceMatrix.IsReadOnly = true;

            this.test = test;
        }
        public void CatalanHankelMatrixDeterminant()
        {
            for (int d = 1; d <= 8; d++) {

                SymmetricMatrix S = new SymmetricMatrix(d);
                for (int r = 0; r < d; r++) {
                    for (int c = 0; c <= r; c++) {
                        int n = r + c;
                        S[r, c] = AdvancedIntegerMath.BinomialCoefficient(2*n, n) / (n + 1);
                    }
                }

                CholeskyDecomposition CD = S.CholeskyDecomposition();
                Assert.IsTrue(TestUtilities.IsNearlyEqual(CD.Determinant(), 1.0));

            }
        }
예제 #9
0
        /// <summary>
        /// Computes the Beta distribution that best fits the given sample.
        /// </summary>
        /// <param name="sample">The sample to fit.</param>
        /// <returns>The best fit parameters.</returns>
        /// <remarks>
        /// <para>The returned fit parameters are the &#x3B1; (<see cref="Alpha"/>) and  &#x3B2; (<see cref="Beta"/>) parameters, in that order.
        /// These are the same parameters, in the same order, that are required by the <see cref="BetaDistribution(double,double)"/> constructor to
        /// specify a new Beta distribution.</para>
        /// </remarks>
        /// <exception cref="ArgumentNullException"><paramref name="sample"/> is null.</exception>
        /// <exception cref="InsufficientDataException"><paramref name="sample"/> contains fewer than three values.</exception>
        /// <exception cref="InvalidOperationException">Not all the entries in <paramref name="sample" /> lie between zero and one.</exception>
        public static FitResult FitToSample(Sample sample)
        {
            if (sample == null) throw new ArgumentNullException("sample");
            if (sample.Count < 3) throw new InsufficientDataException();

            // maximum likelyhood calculation
            //   \log L = \sum_i \left[ (\alpha-1) \log x_i + (\beta-1) \log (1-x_i) - \log B(\alpha,\beta) \right]
            // using \frac{\partial B(a,b)}{\partial a} = \psi(a) - \psi(a+b), we have
            //   \frac{\partial \log L}{\partial \alpha} = \sum_i \log x_i -     N \left[ \psi(\alpha) - \psi(\alpha+\beta) \right]
            //   \frac{\partial \log L}{\partial \beta}  = \sum_i \log (1-x_i) - N \left[ \psi(\beta)  - \psi(\alpha+\beta) \right]
            // set equal to zero to get equations for \alpha, \beta
            //   \psi(\alpha) - \psi(\alpha+\beta) = <\log x>
            //   \psi(\beta) - \psi(\alpha+\beta) = <\log (1-x)>

            // compute the mean log of x and (1-x)
            // these are the (logs of) the geometric means
            double ga = 0.0; double gb = 0.0;
            foreach (double value in sample) {
                if ((value <= 0.0) || (value >= 1.0)) throw new InvalidOperationException();
                ga += Math.Log(value); gb += Math.Log(1.0 - value);
            }
            ga /= sample.Count; gb /= sample.Count;

            // define the function to zero
            Func<IList<double>, IList<double>> f = delegate(IList<double> x) {
                double pab = AdvancedMath.Psi(x[0] + x[1]);
                return (new double[] {
                    AdvancedMath.Psi(x[0]) - pab - ga,
                    AdvancedMath.Psi(x[1]) - pab - gb
                });
            };

            // guess initial values using the method of moments
            //   M1 = \frac{\alpha}{\alpha+\beta} C2 = \frac{\alpha\beta}{(\alpha+\beta)^2 (\alpha+\beta+1)}
            // implies
            //   \alpha = M1 \left( \frac{M1 (1-M1)}{C2} - 1 \right)
            //   \beta = (1 - M1) \left( \frac{M1 (1-M1)}{C2} -1 \right)
            double m = sample.Mean; double mm = 1.0 - m;
            double q = m * mm / sample.Variance - 1.0;
            double[] x0 = new double[] { m * q, mm * q };

            // find the parameter values that zero the two equations
            IList<double> x1 = MultiFunctionMath.FindZero(f, x0);
            double a = x1[0]; double b = x1[1];

            // take more derivatives of \log L to get curvature matrix
            //   \frac{\partial^2 \log L}{\partial\alpha^2} = - N \left[ \psi'(\alpha) - \psi'(\alpha+\beta) \right]
            //   \frac{\partial^2 \log L}{\partial\beta^2}  = - N \left[ \psi'(\beta)  - \psi'(\alpha+\beta) \right]
            //   \frac{\partial^2 \log L}{\partial \alpha \partial \beta} = - N \psi'(\alpha+\beta)
            // covariance matrix is inverse of curvature matrix
            SymmetricMatrix CI = new SymmetricMatrix(2);
            CI[0, 0] = sample.Count * (AdvancedMath.Psi(1, a) - AdvancedMath.Psi(1, a + b));
            CI[1, 1] = sample.Count * (AdvancedMath.Psi(1, b) - AdvancedMath.Psi(1, a + b));
            CI[0, 1] = sample.Count * AdvancedMath.Psi(1, a + b);
            CholeskyDecomposition CD = CI.CholeskyDecomposition();
            SymmetricMatrix C = CD.Inverse();

            // do a KS test on the result
            TestResult test = sample.KolmogorovSmirnovTest(new BetaDistribution(a, b));

            // return the results
            FitResult result = new FitResult(x1, C, test);
            return (result);
        }
        public void SymmetricMatrixNorms()
        {
            SymmetricMatrix Z = new SymmetricMatrix(3);
            Assert.IsTrue(Z.OneNorm() == 0.0);
            Assert.IsTrue(Z.InfinityNorm() == 0.0);
            Assert.IsTrue(Z.FrobeniusNorm() == 0.0);

            SymmetricMatrix A = CreateSymmetricRandomMatrix(4, 1);
            Assert.IsTrue(A.OneNorm() > 0.0);
            Assert.IsTrue(A.InfinityNorm() > 0.0);
            Assert.IsTrue(A.FrobeniusNorm() > 0.0);

            SymmetricMatrix B = CreateSymmetricRandomMatrix(4, 2);
            Assert.IsTrue(B.OneNorm() > 0.0);
            Assert.IsTrue(B.InfinityNorm() > 0.0);
            Assert.IsTrue(B.FrobeniusNorm() > 0.0);

            SymmetricMatrix S = A + B;
            Assert.IsTrue(S.OneNorm() <= A.OneNorm() + B.OneNorm());
            Assert.IsTrue(S.InfinityNorm() <= A.InfinityNorm() + B.InfinityNorm());
            Assert.IsTrue(S.FrobeniusNorm() <= A.FrobeniusNorm() + B.FrobeniusNorm());

            SquareMatrix P = A * B;
            Assert.IsTrue(P.FrobeniusNorm() <= A.FrobeniusNorm() * B.FrobeniusNorm());
            // Frobenium norm is sub-multiplicative
        }
        public void MultivariateNormalSummaryStatistics()
        {
            ColumnVector V = new ColumnVector( new double[] { 1.0, 2.0} );
            SymmetricMatrix C = new SymmetricMatrix(2);
            C[0, 0] = 1.0;
            C[1, 1] = 2.0;
            C[0, 1] = 0.5;
            int N = 100;
            MultivariateSample S = CreateMultivariateNormalSample(V, C, 100);

            Assert.IsTrue(S.Count == N);

            // check the population means
            Assert.IsTrue(S.Column(0).PopulationMean.ConfidenceInterval(0.95).ClosedContains(1.0));
            Assert.IsTrue(S.Column(1).PopulationMean.ConfidenceInterval(0.95).ClosedContains(2.0));

            // check the population variances
            Assert.IsTrue(S.Column(0).PopulationVariance.ConfidenceInterval(0.95).ClosedContains(C[0, 0]));
            //Assert.IsTrue(S.PopulationCovariance(0, 1).ConfidenceInterval(0.95).ClosedContains(C[0, 1]));
            //Assert.IsTrue(S.PopulationCovariance(1, 0).ConfidenceInterval(0.95).ClosedContains(C[1, 0]));
            Assert.IsTrue(S.Column(1).PopulationVariance.ConfidenceInterval(0.95).ClosedContains(C[1, 1]));
            //Console.WriteLine(S.PopulationCovariance(0, 0));
            //Console.WriteLine(S.PopulationCovariance(1, 1));
            //Console.WriteLine(S.PopulationCovariance(0, 1));

            Console.WriteLine("--");
            // add tests of known higher moments for multivariate normal distribution
            // at the momement that is hard because we don't have uncertainty estimates for them
            //Console.WriteLine(S.Moment(0, 0));
            //Console.WriteLine(S.Mean(0));
            //Console.WriteLine(S.Moment(1, 0));
            //Console.WriteLine(S.Variance(0));
            //Console.WriteLine(S.MomentAboutMean(2, 0));
            //Console.WriteLine(S.MomentAboutMean(3, 0));
            //Console.WriteLine(S.MomentAboutMean(4, 0));
        }
예제 #12
0
 internal AasenDecomposition(SymmetricMatrix D)
 {
     this.D = D;
 }
예제 #13
0
        public void MinimizeQuadratic2D()
        {
            Func<IList<double>, double> f = (IList<double> x) => 1.0 + 2.0 * MoreMath.Sqr(x[0] - 3.0) + 4.0 * (x[0] - 3.0) * (x[1] - 5.0) + 6.0 * MoreMath.Sqr(x[1] - 5.0);
            MultiExtremum m = MultiFunctionMath.FindLocalMinimum(f, new double[] { 1.0, 1.0 });

            Console.WriteLine(m.Value);

            SymmetricMatrix H = new SymmetricMatrix(2);
            H[0, 0] = 4.0;
            H[0, 1] = 4.0;
            H[1, 1] = 12.0;

            Assert.IsTrue(m.Dimension == 2);
            Assert.IsTrue(TestUtilities.IsNearlyEqual(m.Value, 1.0));
            Assert.IsTrue(TestUtilities.IsNearlyEqual(m.Location, new ColumnVector(3.0, 5.0), new EvaluationSettings() { RelativePrecision = Math.Sqrt(TestUtilities.TargetPrecision) }));
            Assert.IsTrue(TestUtilities.IsNearlyEqual(m.HessianMatrix, H, new EvaluationSettings() { RelativePrecision = 8.0 * Math.Sqrt(Math.Sqrt(TestUtilities.TargetPrecision)) }));
        }
예제 #14
0
        public static AasenDecomposition LTLDecompose3(SymmetricMatrix M)
        {
            // Aasen's method, now with pivoting

            int n = M.Dimension;

            double[] a = new double[n];
            double[] b = new double[n - 1];
            SquareMatrix L = new SquareMatrix(n);
            for (int i = 0; i < n; i++) L[i, i] = 1.0;

            // working space for d'th column of H = T L^T
            double[] h = new double[n];

            // first row
            a[0] = M[0, 0];
            if (n > 1) b[0] = M[1, 0];
            for (int i = 2; i < n; i++) L[i, 1] = M[i, 0] / b[0];

            PrintLTLMatrices(h, a, b, L);

            // second row
            if (n > 1) {
                a[1] = M[1, 1];
                if (n > 2) b[1] = M[2, 1] - L[2, 1] * a[1];
                for (int i = 3; i < n; i++) L[i, 2] = (M[i, 1] - L[i, 1] * a[1]) / b[1];
            }

            PrintLTLMatrices(h, a, b, L);

            for (int d = 0; d < n; d++) {

                Console.WriteLine("d = {0}", d);

                // compute h (d'th row of T L^T)
                if (d == 0) {
                    h[0] = M[0, 0];
                } else if (d == 1) {
                    h[0] = b[0];
                    h[1] = M[1, 1];
                } else {
                    h[0] = b[0] * L[d, 1];
                    h[1] = b[1] * L[d, 2] + a[1] * L[d, 1];
                    h[d] = M[d, d] - L[d, 1] * h[1];
                    for (int i = 2; i < d; i++) {
                        h[i] = b[i] * L[d, i + 1] + a[i] * L[d, i] + b[i - 1] * L[d, i - 1];
                        h[d] -= L[d, i] * h[i];
                    }
                }

                // compute alpha (d'th diagonal element of T)
                if ((d == 0) || (d == 1)) {
                    a[d] = h[d];
                } else {
                    a[d] = h[d] - b[d - 1] * L[d, d - 1];
                }

                Console.WriteLine("before pivot");
                PrintMatrix(M);
                PrintMatrix(L);

                // find the pivot
                if (d < (n - 1)) {
                    int p = d + 1;
                    double q = M[p, d];
                    for (int i = d + 2; i < n; i++) {
                        if (Math.Abs(M[i, d]) > Math.Abs(q)) {
                            p = i;
                            q = M[i, d];
                        }
                    }

                    Console.WriteLine("pivot = {0}", p);

                    // symmetricly permute the pivot element to M[d+1,d]
                    if (p != d + 1) {

                        // symmetricly permute the pivot element to M[d+1, d]
                        // we have to be a bit careful here, because some permutations will be done
                        // automatically by our SymmetricMatrix class due to symmetry
                        for (int i = 0; i < n; i++) {
                            if ((i == p) || (i == d + 1)) continue;
                            double t = M[d + 1, i];
                            M[d + 1, i] = M[p, i];
                            M[p, i] = t;
                        }
                        double tt = M[d + 1, d + 1];
                        M[d + 1, d + 1] = M[p, p];
                        M[p, p] = tt;

                        // also reorder the affected previously computed elements of L
                        for (int i = 1; i <= d; i++) {
                            double t = L[d + 1, i];
                            L[d + 1, i] = L[p, i];
                            L[p, i] = t;
                        }

                        Console.WriteLine("after pivot");
                        PrintMatrix(M);
                        PrintMatrix(L);

                    }

                }

                // compute beta (d'th subdiagonal element of T)
                if (d < (n - 1)) {
                    b[d] = M[d + 1, d];
                    for (int i = 0; i <= d; i++) {
                        Console.WriteLine("n={0} d={1} i={2}", n, d, i);
                        b[d] -= L[d + 1, i] * h[i];
                    }
                }

                // compute (d+1)'th column of L
                for (int i = d + 2; i < n; i++) {
                    L[i, d + 1] = M[i, d];
                    for (int j = 0; j <= d; j++) L[i, d + 1] -= L[i, j] * h[j];
                    L[i, d + 1] = L[i, d + 1] / b[d];
                }

                PrintLTLMatrices(h, a, b, L);

            }

            Console.WriteLine("Reconstruct");
            SymmetricMatrix T = new SymmetricMatrix(n);
            for (int i = 0; i < n; i++) {
                T[i, i] = a[i];
            }
            for (int i = 0; i < (n - 1); i++) {
                T[i + 1, i] = b[i];
            }
            SquareMatrix A = L * T * L.Transpose();
            PrintMatrix(A);

            SymmetricMatrix D = new SymmetricMatrix(n);
            for (int i = 0; i < n; i++) {
                D[i, i] = a[i];
            }
            for (int i = 0; i < (n - 1); i++) {
                D[i + 1, i] = b[i];
            }
            for (int c = 1; c < (n - 1); c++) {
                for (int r = c + 1; r < n; r++) {
                    D[r, c - 1] = L[r, c];
                }
            }
            AasenDecomposition LTL = new AasenDecomposition(D);
            return (LTL);
        }
예제 #15
0
        // routines for maximum likelyhood fitting
        /// <summary>
        /// Computes the Gamma distribution that best fits the given sample.
        /// </summary>
        /// <param name="sample">The sample to fit.</param>
        /// <returns>The best fit parameters.</returns>
        /// <remarks>
        /// <para>The returned fit parameters are the <see cref="ShapeParameter"/> and <see cref="ScaleParameter"/>, in that order.
        /// These are the same parameters, in the same order, that are required by the <see cref="GammaDistribution(double,double)"/> constructor to
        /// specify a new Gamma distribution.</para>
        /// </remarks>
        /// <exception cref="ArgumentNullException"><paramref name="sample"/> is null.</exception>
        /// <exception cref="InvalidOperationException"><paramref name="sample"/> contains non-positive values.</exception>
        /// <exception cref="InsufficientDataException"><paramref name="sample"/> contains fewer than three values.</exception>
        public static FitResult FitToSample(Sample sample)
        {
            if (sample == null) throw new ArgumentNullException("sample");
            if (sample.Count < 3) throw new InsufficientDataException();

            // The log likelyhood of a sample given k and s is
            //   \log L = (k-1) \sum_i \log x_i - \frac{1}{s} \sum_i x_i - N \log \Gamma(k) - N k \log s
            // Differentiating,
            //   \frac{\partial \log L}{\partial s} = \frac{1}{s^2} \sum_i x_i - \frac{Nk}{s}
            //   \frac{\partial \log L}{\partial k} = \sum_i \log x_i - N \psi(k) - N \log s
            // Setting the first equal to zero gives
            //   k s = N^{-1} \sum_i x_i = <x>
            //   \psi(k) + \log s = N^{-1} \sum_i \log x_i = <log x>
            // Inserting the first into the second gives a single equation for k
            //   \log k - \psi(k) = \log <x> - <\log x>
            // Note the RHS need only be computed once.
            // \log k > \psi(k) for all k, so the RHS had better be positive. They get
            // closer for large k, so smaller RHS will produce a larger k.

            double s = 0.0;
            foreach (double x in sample) {
                if (x <= 0.0) throw new InvalidOperationException();
                s += Math.Log(x);
            }
            s = Math.Log(sample.Mean) - s / sample.Count;

            // We can get an initial guess for k from the method of moments
            //   \frac{\mu^2}{\sigma^2} = k

            double k0 = MoreMath.Sqr(sample.Mean) / sample.Variance;

            // Since 1/(2k) < \log(k) - \psi(k) < 1/k, we could get a bound; that
            // might be better to avoid the solver running into k < 0 territory

            double k1 = FunctionMath.FindZero(k => (Math.Log(k) - AdvancedMath.Psi(k) - s), k0);

            double s1 = sample.Mean / k1;

            // Curvature of the log likelyhood is straightforward
            //   \frac{\partial^2 \log L}{\partial s^2} = -\frac{2}{s^3} \sum_i x_i + \frac{Nk}{s^2} = - \frac{Nk}{s^2}
            //   \frac{\partial^2 \log L}{\partial k \partial s} = - \frac{N}{s}
            //   \frac{\partial^2 \log L}{\partial k^2} = - N \psi'(k)
            // This gives the curvature matrix and thus via inversion the covariance matrix.

            SymmetricMatrix B = new SymmetricMatrix(2);
            B[0, 0] = sample.Count * AdvancedMath.Psi(1, k1);
            B[0, 1] = sample.Count / s1;
            B[1, 1] = sample.Count * k1 / MoreMath.Sqr(s1);
            SymmetricMatrix C = B.CholeskyDecomposition().Inverse();

            // Do a KS test for goodness-of-fit
            TestResult test = sample.KolmogorovSmirnovTest(new GammaDistribution(k1, s1));

            return (new FitResult(new double[] { k1, s1 }, C, test));
        }
        public void SymmetricRandomMatrixCholeskyDecomposition()
        {
            int d = 100;
            Random rng = new Random(d);
            ColumnVector[] V = new ColumnVector[d];
            for (int i=0; i < d; i++) {
                V[i] = new ColumnVector(d);
                for (int j = 0; j < d; j++) {
                    V[i][j] = rng.NextDouble();
                }
            }

            SymmetricMatrix A = new SymmetricMatrix(d);
            for (int i = 0; i < d; i++) {
                for (int j = 0; j <= i; j++) {
                    A[i, j] = V[i].Transpose() * V[j];
                }
            }

            Stopwatch s = Stopwatch.StartNew();
            CholeskyDecomposition CD = A.CholeskyDecomposition();
            s.Stop();
            Console.WriteLine("{0} {1}", d, s.ElapsedMilliseconds);

            Assert.IsTrue(CD != null);
        }
예제 #17
0
 public static SymmetricMatrix CreateSymmetricHilbertMatrix(int n)
 {
     SymmetricMatrix H = new SymmetricMatrix(n);
     H.Fill((r, c) => 1.0 / (r + c + 1));
     return (H);
 }
        /// <summary>
        /// Computes the Weibull distribution that best fits the given sample.
        /// </summary>
        /// <param name="sample">The sample to fit.</param>
        /// <returns>The best fit parameters.</returns>
        /// <remarks>
        /// <para>The returned fit parameters are the <see cref="ShapeParameter"/> and <see cref="ScaleParameter"/>, in that order.
        /// These are the same parameters, in the same order, that are required by the <see cref="WeibullDistribution(double,double)"/> constructor to
        /// specify a new Weibull distribution.</para>
        /// </remarks>
        /// <exception cref="ArgumentNullException"><paramref name="sample"/> is null.</exception>
        /// <exception cref="InvalidOperationException"><paramref name="sample"/> contains non-positive values.</exception>
        /// <exception cref="InsufficientDataException"><paramref name="sample"/> contains fewer than three values.</exception>
        public static FitResult FitToSample(Sample sample)
        {
            if (sample == null) throw new ArgumentNullException("sample");
            if (sample.Count < 3) throw new InsufficientDataException();
            if (sample.Minimum <= 0.0) throw new InvalidOperationException();

            // The log likelyhood function is
            //   \log L = N \log k + (k-1) \sum_i \log x_i - N K \log \lambda - \sum_i \left(\frac{x_i}{\lambda}\right)^k
            // Taking derivatives, we get
            //   \frac{\partial \log L}{\partial \lambda} = - \frac{N k}{\lambda} + \sum_i \frac{k}{\lambda} \left(\frac{x_i}{\lambda}\right)^k
            //   \frac{\partial \log L}{\partial k} =\frac{N}{k} + \sum_i \left[ 1 - \left(\frac{x_i}{\lambda}\right)^k \right] \log \left(\frac{x_i}{\lambda}\right)
            // Setting the first expression to zero and solving for \lambda gives
            //   \lambda = \left( N^{-1} \sum_i x_i^k \right)^{1/k} = ( < x^k > )^{1/k}
            // which allows us to reduce the problem from 2D to 1D.
            // By the way, using the expression for the moment < x^k > of the Weibull distribution, you can show there is
            // no bias to this result even for finite samples.
            // Setting the second expression to zero gives
            //   \frac{1}{k} = \frac{1}{N} \sum_i \left[ \left( \frac{x_i}{\lambda} \right)^k - 1 \right] \log \left(\frac{x_i}{\lambda}\right)
            // which, given the equation for \lambda as a function of k derived from the first expression, is an implicit equation for k.
            // It cannot be solved in closed form, but we have now reduced our problem to finding a root in one-dimension.

            // We need a starting guess for k.
            // The method of moments equations are not solvable for the parameters in closed form
            // but the scale parameter drops out of the ratio of the 1/3 and 2/3 quantile points
            // and the result is easily solved for the shape parameter
            //   k = \frac{\log 2}{\log\left(\frac{x_{2/3}}{x_{1/3}}\right)}
            double x1 = sample.InverseLeftProbability(1.0 / 3.0);
            double x2 = sample.InverseLeftProbability(2.0 / 3.0);
            double k0 = Global.LogTwo / Math.Log(x2 / x1);
            // Given the shape paramter, we could invert the expression for the mean to get
            // the scale parameter, but since we have an expression for \lambda from k, we
            // dont' need it.
            //double s0 = sample.Mean / AdvancedMath.Gamma(1.0 + 1.0 / k0);

            // Simply handing our 1D function to a root-finder works fine until we start to encounter large k. For large k,
            // even just computing \lambda goes wrong because we are taking x_i^k which overflows. Horst Rinne, "The Weibull
            // Distribution: A Handbook" describes a way out. Basically, we first move to variables z_i = \log(x_i) and
            // then w_i = z_i - \bar{z}. Then lots of factors of e^{k \bar{z}} cancel out and, even though we still do
            // have some e^{k w_i}, the w_i are small and centered around 0 instead of large and centered around \lambda.

            Sample transformedSample = sample.Copy();
            transformedSample.Transform(x => Math.Log(x));
            double zbar = transformedSample.Mean;
            transformedSample.Transform(z => z - zbar);

            // After this change of variable the 1D function to zero becomes
            //   g(k) = \sum_i ( 1 - k w_i ) e^{k w_i}
            // It's easy to show that g(0) = n and g(\infinity) = -\infinity, so it must cross zero. It's also easy to take
            // a derivative
            //   g'(k) = - k \sum_i w_i^2 e^{k w_i}
            // so we can apply Newton's method.

            int i = 0;
            double k1 = k0;
            while (true) {
                i++;
                double g = 0.0;
                double gp = 0.0;
                foreach (double w in transformedSample) {
                    double e = Math.Exp(k1 * w);
                    g += (1.0 - k1 * w) * e;
                    gp -= k1 * w * w * e;
                }
                double dk = -g / gp;
                k1 += dk;
                if (Math.Abs(dk) <= Global.Accuracy * Math.Abs(k1)) break;
                if (i >= Global.SeriesMax) throw new NonconvergenceException();
            }

            // The corresponding lambda can also be expressed in terms of zbar and w's.

            double t = 0.0;
            foreach (double w in transformedSample) {
                t += Math.Exp(k1 * w);
            }
            t /= transformedSample.Count;
            double lambda1 = Math.Exp(zbar) * Math.Pow(t, 1.0 / k1);

            // We need the curvature matrix at the minimum of our log likelyhood function
            // to determine the covariance matrix. Taking more derivatives...
            //    \frac{\partial^2 \log L} = \frac{N k}{\lambda^2} - \sum_i \frac{k(k+1) x_i^k}{\lambda^{k+2}}
            //    = - \frac{N k^2}{\lambda^2}
            // The second expression follows by inserting the first-derivative-equal-zero relation into the first.
            // For k=1, this agrees with the variance formula for the mean of the best-fit exponential.

            // Derivatives involving k are less simple.

            // We end up needing the means < (x/lambda)^k log(x/lambda) > and < (x/lambda)^k log^2(x/lambda) >

            double mpl = 0.0; double mpl2 = 0.0;
            foreach (double x in sample) {
                double r = x / lambda1;
                double p = Math.Pow(r, k1);
                double l = Math.Log(r);
                double pl = p * l;
                double pl2 = pl * l;
                mpl += pl;
                mpl2 += pl2;
            }
            mpl = mpl / sample.Count;
            mpl2 = mpl2 / sample.Count;

            // See if we can't do any better here. Transforming to zbar and w's looked ugly, but perhaps it
            // can be simplified? One interesting observation: if we take expectation values (which gives
            // the Fisher information matrix) the entries become simple:
            //   B_{\lambda \lambda} = \frac{N k^2}{\lambda^2}
            //   B_{\lambda k} = -\Gamma'(2) \frac{N}{\lambda}
            //   B_{k k } = [1 + \Gamma''(2)] \frac{N}{k^2}
            // Would it be bad to just use these directly?

            // Construct the curvature matrix and invert it.
            SymmetricMatrix B = new SymmetricMatrix(2);
            B[0, 0] = sample.Count * MoreMath.Sqr(k1 / lambda1);
            B[0, 1] = -sample.Count * k1 / lambda1 * mpl;
            B[1, 1] = sample.Count * (1.0 / MoreMath.Pow2(k1) + mpl2);
            SymmetricMatrix C = B.CholeskyDecomposition().Inverse();

            // Do a KS test to compare sample to best-fit distribution
            Distribution distribution = new WeibullDistribution(lambda1, k1);
            TestResult test = sample.KolmogorovSmirnovTest(distribution);

            // return the result
            return (new FitResult(new double[] {lambda1, k1}, C, test));
        }
예제 #19
0
        public void GaussianIntegrals()
        {
            Random rng = new Random(1);
            for (int d = 2; d < 4; d++) {
                if (d == 4 || d == 5 || d == 6) continue;
                Console.WriteLine(d);

                // Create a symmetric matrix
                SymmetricMatrix A = new SymmetricMatrix(d);
                for (int r = 0; r < d; r++) {
                    for (int c = 0; c < r; c++) {
                        A[r, c] = rng.NextDouble();
                    }
                    // Ensure it is positive definite by diagonal dominance
                    A[r, r] = r + 1.0;
                }

                // Compute its determinant, which appears in the analytic value of the integral
                CholeskyDecomposition CD = A.CholeskyDecomposition();
                double detA = CD.Determinant();

                // Compute the integral
                Func<IList<double>, double> f = (IList<double> x) => {
                    ColumnVector v = new ColumnVector(x);
                    double s = v.Transpose() * (A * v);
                    return (Math.Exp(-s));
                };

                Interval[] volume = new Interval[d];
                for (int i = 0; i < d; i++) volume[i] = Interval.FromEndpoints(Double.NegativeInfinity, Double.PositiveInfinity);

                IntegrationResult I = MultiFunctionMath.Integrate(f, volume);

                // Compare to the analytic result
                Console.WriteLine("{0} ({1}) {2}", I.Value, I.Precision, Math.Sqrt(MoreMath.Pow(Math.PI, d) / detA));
                Assert.IsTrue(TestUtilities.IsNearlyEqual(I.Value, Math.Sqrt(MoreMath.Pow(Math.PI, d) / detA), new EvaluationSettings() { AbsolutePrecision = 2.0 * I.Precision }));
            }
        }
        // numerical approximation of Hessian
        // requires 3 evaluations for diagonals, there are d of those
        // requires 4 evaluations for off-diagonals; there are d(d-1)/2 of those
        // total of 2d^2 + d evaluations required
        private static SymmetricMatrix ComputeCurvature(Func<double[], double> f, double[] x)
        {
            int d = x.Length;

            double e = Math.Pow(2.0, -15.0);
            double[] dx = new double[d];
            for (int i = 0; i < d; i++) {
                double h = e * (Math.Abs(x[i]) + 1.0);
                // ensure that step is exactly representable
                double xh = x[i] + h;
                h = xh - x[i];
                // record d
                dx[i] = h;
            }

            SymmetricMatrix H = new SymmetricMatrix(d);
            for (int i = 0; i < d; i++) {
                double[] xp = (double[]) x.Clone(); xp[i] += dx[i];
                double[] xm = (double[]) x.Clone(); xm[i] -= dx[i];
                double f0 = f(x);
                double fp = f(xp);
                double fm = f(xm);
                H[i, i] = (fm - 2.0 * f0 + fp) / (dx[i]*dx[i]);
                for (int j = 0; j < i; j++) {
                    double[] xpp = (double[]) x.Clone(); xpp[i] += dx[i]; xpp[j] += dx[j];
                    double[] xpm = (double[]) x.Clone(); xpm[i] += dx[i]; xpm[j] -= dx[j];
                    double[] xmm = (double[]) x.Clone(); xmm[i] -= dx[i]; xmm[j] -= dx[j];
                    double[] xmp = (double[]) x.Clone(); xmp[i] -= dx[i]; xmp[j] += dx[j];
                    double fpp = f(xpp);
                    double fpm = f(xpm);
                    double fmm = f(xmm);
                    double fmp = f(xmp);
                    H[i, j] = (fpp - fpm - fmp + fmm) / dx[i] / dx[j] / 4.0; ;
                }
            }

            return (H);
        }
예제 #21
0
        /// <summary>
        /// Computes the inverse of the matrix.
        /// </summary>
        /// <returns>The matrix inverse M<sup>-1</sup>.</returns>
        public SymmetricMatrix Inverse()
        {
            // this implementation re-uses the generic square matrix algorithm
            // replace it with one based on LDL decomposition, which exploits the symmetry

            SquareMatrix M = new SquareMatrix(dimension);
            for (int r = 0; r < dimension; r++) {
                for (int c = 0; c < dimension; c++) {
                    M[r, c] = this[r, c];
                }
            }

            SquareMatrix MI = M.Inverse();

            SymmetricMatrix I = new SymmetricMatrix(dimension);
            for (int r = 0; r < dimension; r++) {
                for (int c = 0; c <= r; c++) {
                    I[r, c] = MI[r, c];
                }
            }

            return (I);
        }
 internal SpaceExtremum(double[] x, double f, SymmetricMatrix f2)
 {
     this.x = x;
     this.f = f;
     this.f2 = f2;
 }
예제 #23
0
 internal AasenDecomposition(SymmetricMatrix D)
 {
     this.D = D;
 }
 internal CholeskyDecomposition(SymmetricMatrix sqrtM)
 {
     this.sqrtM = sqrtM;
 }
        public void BivariatePolynomialRegression()
        {
            // do a set of polynomial regression fits
            // make sure not only that the fit parameters are what they should be, but that their variances/covariances are as claimed

            Random rng = new Random(271828);

            // define logistic parameters
            double[] a = new double[] { 0.0, -1.0, 2.0, -3.0 };

            // keep track of sample of returned a and b fit parameters
            MultivariateSample A = new MultivariateSample(a.Length);

            // also keep track of returned covariance estimates
            // since these vary slightly from fit to fit, we will average them
            SymmetricMatrix C = new SymmetricMatrix(a.Length);

            // also keep track of test statistics
            Sample F = new Sample();

            // do 100 fits
            for (int k = 0; k < 100; k++) {

                // we should be able to draw x's from any distribution; noise should be drawn from a normal distribution
                Distribution xd = new CauchyDistribution();
                Distribution nd = new NormalDistribution(0.0, 4.0);

                // generate a synthetic data set
                BivariateSample s = new BivariateSample();
                for (int j = 0; j < 20; j++) {
                    double x = xd.GetRandomValue(rng);
                    double y = nd.GetRandomValue(rng);
                    for (int i = 0; i < a.Length; i++) {
                        y += a[i] * MoreMath.Pow(x, i);
                    }
                    s.Add(x, y);
                }

                // do the regression
                FitResult r = s.PolynomialRegression(a.Length - 1);

                ColumnVector ps = r.Parameters;
                //Console.WriteLine("{0} {1} {2}", ps[0], ps[1], ps[2]);

                // record best fit parameters
                A.Add(ps);

                // record estimated covariances
                C += r.CovarianceMatrix;

                // record the fit statistic
                F.Add(r.GoodnessOfFit.Statistic);
                //Console.WriteLine("F={0}", r.GoodnessOfFit.Statistic);

            }

            C = (1.0 / A.Count) * C; // allow matrix division by real numbers

            // check that mean parameter estimates are what they should be: the underlying population parameters
            for (int i = 0; i < A.Dimension; i++) {
                Console.WriteLine("{0} {1}", A.Column(i).PopulationMean, a[i]);
                Assert.IsTrue(A.Column(i).PopulationMean.ConfidenceInterval(0.95).ClosedContains(a[i]));
            }

            // check that parameter covarainces are what they should be: the reported covariance estimates
            for (int i = 0; i < A.Dimension; i++) {
                for (int j = i; j < A.Dimension; j++) {
                    Console.WriteLine("{0} {1} {2} {3}", i, j, C[i, j], A.TwoColumns(i, j).PopulationCovariance);
                    Assert.IsTrue(A.TwoColumns(i, j).PopulationCovariance.ConfidenceInterval(0.95).ClosedContains(C[i, j]));
                }
            }

            // check that F is distributed as it should be
            //Console.WriteLine(fs.KolmogorovSmirnovTest(new FisherDistribution(2, 48)).LeftProbability);
        }
예제 #26
0
        public static AasenDecomposition LTLDecompose3(SymmetricMatrix M)
        {
            // Aasen's method, now with pivoting

            int n = M.Dimension;

            double[]     a = new double[n];
            double[]     b = new double[n - 1];
            SquareMatrix L = new SquareMatrix(n);

            for (int i = 0; i < n; i++)
            {
                L[i, i] = 1.0;
            }

            // working space for d'th column of H = T L^T
            double[] h = new double[n];

            // first row
            a[0] = M[0, 0];
            if (n > 1)
            {
                b[0] = M[1, 0];
            }
            for (int i = 2; i < n; i++)
            {
                L[i, 1] = M[i, 0] / b[0];
            }

            PrintLTLMatrices(h, a, b, L);

            // second row
            if (n > 1)
            {
                a[1] = M[1, 1];
                if (n > 2)
                {
                    b[1] = M[2, 1] - L[2, 1] * a[1];
                }
                for (int i = 3; i < n; i++)
                {
                    L[i, 2] = (M[i, 1] - L[i, 1] * a[1]) / b[1];
                }
            }

            PrintLTLMatrices(h, a, b, L);

            for (int d = 0; d < n; d++)
            {
                Console.WriteLine("d = {0}", d);

                // compute h (d'th row of T L^T)
                if (d == 0)
                {
                    h[0] = M[0, 0];
                }
                else if (d == 1)
                {
                    h[0] = b[0];
                    h[1] = M[1, 1];
                }
                else
                {
                    h[0] = b[0] * L[d, 1];
                    h[1] = b[1] * L[d, 2] + a[1] * L[d, 1];
                    h[d] = M[d, d] - L[d, 1] * h[1];
                    for (int i = 2; i < d; i++)
                    {
                        h[i]  = b[i] * L[d, i + 1] + a[i] * L[d, i] + b[i - 1] * L[d, i - 1];
                        h[d] -= L[d, i] * h[i];
                    }
                }

                // compute alpha (d'th diagonal element of T)
                if ((d == 0) || (d == 1))
                {
                    a[d] = h[d];
                }
                else
                {
                    a[d] = h[d] - b[d - 1] * L[d, d - 1];
                }

                Console.WriteLine("before pivot");
                PrintMatrix(M);
                PrintMatrix(L);

                // find the pivot
                if (d < (n - 1))
                {
                    int    p = d + 1;
                    double q = M[p, d];
                    for (int i = d + 2; i < n; i++)
                    {
                        if (Math.Abs(M[i, d]) > Math.Abs(q))
                        {
                            p = i;
                            q = M[i, d];
                        }
                    }

                    Console.WriteLine("pivot = {0}", p);

                    // symmetricly permute the pivot element to M[d+1,d]
                    if (p != d + 1)
                    {
                        // symmetricly permute the pivot element to M[d+1, d]
                        // we have to be a bit careful here, because some permutations will be done
                        // automatically by our SymmetricMatrix class due to symmetry
                        for (int i = 0; i < n; i++)
                        {
                            if ((i == p) || (i == d + 1))
                            {
                                continue;
                            }
                            double t = M[d + 1, i];
                            M[d + 1, i] = M[p, i];
                            M[p, i]     = t;
                        }
                        double tt = M[d + 1, d + 1];
                        M[d + 1, d + 1] = M[p, p];
                        M[p, p]         = tt;

                        // also reorder the affected previously computed elements of L
                        for (int i = 1; i <= d; i++)
                        {
                            double t = L[d + 1, i];
                            L[d + 1, i] = L[p, i];
                            L[p, i]     = t;
                        }

                        Console.WriteLine("after pivot");
                        PrintMatrix(M);
                        PrintMatrix(L);
                    }
                }

                // compute beta (d'th subdiagonal element of T)
                if (d < (n - 1))
                {
                    b[d] = M[d + 1, d];
                    for (int i = 0; i <= d; i++)
                    {
                        Console.WriteLine("n={0} d={1} i={2}", n, d, i);
                        b[d] -= L[d + 1, i] * h[i];
                    }
                }

                // compute (d+1)'th column of L
                for (int i = d + 2; i < n; i++)
                {
                    L[i, d + 1] = M[i, d];
                    for (int j = 0; j <= d; j++)
                    {
                        L[i, d + 1] -= L[i, j] * h[j];
                    }
                    L[i, d + 1] = L[i, d + 1] / b[d];
                }

                PrintLTLMatrices(h, a, b, L);
            }


            Console.WriteLine("Reconstruct");
            SymmetricMatrix T = new SymmetricMatrix(n);

            for (int i = 0; i < n; i++)
            {
                T[i, i] = a[i];
            }
            for (int i = 0; i < (n - 1); i++)
            {
                T[i + 1, i] = b[i];
            }
            SquareMatrix A = L * T * L.Transpose();

            PrintMatrix(A);


            SymmetricMatrix D = new SymmetricMatrix(n);

            for (int i = 0; i < n; i++)
            {
                D[i, i] = a[i];
            }
            for (int i = 0; i < (n - 1); i++)
            {
                D[i + 1, i] = b[i];
            }
            for (int c = 1; c < (n - 1); c++)
            {
                for (int r = c + 1; r < n; r++)
                {
                    D[r, c - 1] = L[r, c];
                }
            }
            AasenDecomposition LTL = new AasenDecomposition(D);

            return(LTL);
        }