// Durbin derived a matrix formulation, later programmed by Marsaglia, that gives P by taking matrix powers.
        // If t = n D = k - h, where k is an integer and h is a fraction, the matrix is (2k - 1) X (2k - 1) and takes the form:
        //        { (1-h)/1!    1           0           0           0        }
        //        { (1-h^2)/2!  1/1!        1           0           0        }
        //    H = { (1-h^3)/3!  1/2!        1/1!        1           0        }
        //        { (1-h^4)/4!  1/3!        1/2!        1/1!        1        }
        //        { *           (1-h^4)/4!  (1-h^3)/3!  (1-h^2)/2!  (1-h)/1! }
        // The lower-left element is special and depends on the size of h:
        //    0 < h < 1/2:  * = (1 - 2 h^m) / m!
        //    1/2 < h < 1:  * = (1 - 2 h^m + (2h-1)^m)/ m!
        // Note the factor 2 in the first case and the additional term in the second case. The left probability is then given by
        // the middle element of H to the nth power:
        //    P = \frac{n!}{n^n} \left( H^n \right)_{k,k}
        // Note that, for a given value of t, the matrix H does not depend on n; n dependence only enters as the power to which H is taken.

        // While astounding, the matrix method is clearly quite onerous. Because all the entries of H are positive, though, it is not subject to
        // the cancelation errors that makes the series solution practically useless in the region t < n/2.

        // See Durbin, "Distribution THeory for Tests Based on the Sample Distribution Function", 1973 and
        // Marsaglia, Tsand, & Wang, "Evaluating Kolmogorov's Distribution", Journal of Statistical Software 8  (2003)

        private double DurbinMatrixP(double t)
        {
            int k; double h;

            DecomposeInteger(t, out k, out h);

            SquareMatrix H = GetDurbinMatrix(k, h);

            SquareMatrix Hn = H.Power(n);

            double f = AdvancedIntegerMath.Factorial(n) / MoreMath.Pow(n, n);

            return(f * Hn[k - 1, k - 1]);
        }
Beispiel #2
0
        /// <summary>
        /// Converts central moments to cumulants.
        /// </summary>
        /// <param name="mu">The mean.</param>
        /// <param name="C">A set of central moments.</param>
        /// <returns>The corresponding set of cumulants.</returns>
        /// <exception cref="ArgumentNullException"><paramref name="C"/> is null.</exception>
        /// <exception cref="ArgumentOutOfRangeException">The zeroth central moment is not one, or the first central moment is not zero.</exception>
        public static double[] CentralToCumulant(double mu, double[] C)
        {
            if (C == null)
            {
                throw new ArgumentNullException("C");
            }

            double[] K = new double[C.Length];
            if (K.Length == 0)
            {
                return(K);
            }

            // C0 = 1 and K0 = 0
            if (C[0] != 1.0)
            {
                throw new ArgumentOutOfRangeException("C");
            }
            K[0] = 0.0;
            if (K.Length == 1)
            {
                return(K);
            }

            // C1 = 0 and K1 = M1
            if (C[1] != 0.0)
            {
                throw new ArgumentOutOfRangeException("C");
            }
            K[1] = mu;
            if (K.Length == 2)
            {
                return(K);
            }

            // Determine higher K
            // s = 0 term involves K1 = M1, so ignore
            // s = r - 1 term involves C1 = 0, so ignore
            for (int r = 1; r < K.Length - 1; r++)
            {
                double t = C[r + 1];
                for (int s = 1; s < r - 1; s++)
                {
                    t -= AdvancedIntegerMath.BinomialCoefficient(r, s) * K[s + 1] * C[r - s];
                }
                K[r + 1] = t;
            }

            return(K);
        }
Beispiel #3
0
        // Privault, "Generalized Bell polynomials and the combinatorics of Poisson central moments",
        // The Electronic Jounrnal of Combinatorics 2011
        // (http://www.ntu.edu.sg/home/nprivault/papers/central_moments.pdf),
        // derives the recurrence
        //   C_{r+1} = \mu \sum{s=0}^{r-1} { r \choose s } C_s
        // for central moments of the Poisson distribution, directly from the definition.

        // For the record, here is the derivation, which I have not found elsewhere. By
        // Poisson PMF, mean, and definition of central moment:
        //   C_{r+1} = e^{-\mu} \sum_{k=0}^{\infty} \frac{\mu^k}{k!} (k - \mu)^{r+1}
        // Expand one factor of (k - \mu) to get
        //   C_{r+1} = e^{-\mu} \sum_{k=1}^{\infty} \frac{\mu^k (k - \mu)^{r}}{(k-1)!}
        //            -e^{-\mu} \sum_{k=0}^{\infty} \frac{\mu^{k+1} (k - \mu)^{r}}{k!}
        // Note k=0 does not contribute to first term because of multiplication by k.
        // Now in first term, redefine dummy summation variable k -> k + 1 to get
        //   C_{r+1} = e^{-\mu} \sum_{k=0}^{\infty} \frac{\mu^{k+1}}{k!}
        //             \left[ (k + 1 - \mu)^{r} - (k - \mu)^{r} \right]
        // Now use the binomial theorem to expand (k - \mu + 1) in factors of (k -\mu) and 1.
        //   C_{r+1} = e^{-\mu} \sum_{k=0}^{\infty} \frac{\mu^{k+1}}{k!}
        //             \sum_{s=0}^{r-1} { r \choose s } (k - \mu)^{s}
        // The s=r term is canceled by the subtracted (k - \mu)^{r}, so the binomial sum only goes to s=r-1.
        // Switch the order of the sums and notice \sum_{k} \frac{\mu^{k}}{k!} (k - \mu)^{s} = C_{s}
        // to obtain the result. Wow.

        // This is almost, but not quite, the same recurrence as for the raw moments.
        // For the raw moment recursion, the bottom binomial argument runs over its full range.
        // For the central moment recursion, the final value of the bottom binomial argument is left out.
        // Also, for raw moments start the recursion with M_0 = 1, M_1 = \mu. For central moments,
        // start the recursion with C_0 = 1, C_1 = 0.

        private void ComputePoissonCentralMoments(double[] C)
        {
            for (int r = 2; r < C.Length; r++)
            {
                IEnumerator <double> binomial = AdvancedIntegerMath.BinomialCoefficients(r - 1).GetEnumerator();
                double t = 0.0;
                for (int s = 0; s < r - 1; s++)
                {
                    binomial.MoveNext();
                    t += binomial.Current * C[s];
                }
                C[r] = mu * t;
            }
        }
Beispiel #4
0
 /// <inheritdoc/>
 public override double ProbabilityMass(int k)
 {
     if ((k < Support.LeftEndpoint) || (k > Support.RightEndpoint))
     {
         return(0.0);
     }
     else
     {
         return(
             AdvancedIntegerMath.BinomialCoefficient(nSuccessPopulation, k) /
             AdvancedIntegerMath.BinomialCoefficient(nPopulation, nDraws) *
             AdvancedIntegerMath.BinomialCoefficient(nFailurePopulation, nDraws - k)
             );
     }
 }
Beispiel #5
0
 /// <inheritdoc/>
 public override double CentralMoment(int r)
 {
     if (r < 0)
     {
         throw new ArgumentOutOfRangeException(nameof(r));
     }
     else if (r % 2 == 0)
     {
         return(AdvancedIntegerMath.Factorial(r) * Math.Pow(b, r));
     }
     else
     {
         return(0.0);
     }
 }
Beispiel #6
0
 /// <inheritdoc />
 public override double Cumulant(int r)
 {
     if (r < 0)
     {
         throw new ArgumentOutOfRangeException(nameof(r));
     }
     else if (r == 0)
     {
         return(0.0);
     }
     else
     {
         return(MoreMath.Pow(2, r - 1) * AdvancedIntegerMath.Factorial(r - 1) * nu);
     }
 }
        public void BellNumberAsPoissonMoment()
        {
            // The Bell numbers are the moments of the Poisson distribution
            // with \mu = 1.

            UnivariateDistribution d = new PoissonDistribution(1.0);

            foreach (int r in TestUtilities.GenerateIntegerValues(1, 50, 4))
            {
                Assert.IsTrue(TestUtilities.IsNearlyEqual(
                                  AdvancedIntegerMath.BellNumber(r),
                                  d.RawMoment(r)
                                  ));
            }
        }
Beispiel #8
0
 public override double LeftInclusiveProbability(int k)
 {
     if (k < Minimum)
     {
         return(0.0);
     }
     else if (k >= Maximum)
     {
         return(1.0);
     }
     else
     {
         return(LatticePathSum(k * ((int)AdvancedIntegerMath.GCF(n, m))) / AdvancedIntegerMath.BinomialCoefficient(n + m, n));
     }
 }
Beispiel #9
0
 /// <inheritdoc />
 public override double ProbabilityMass(int k)
 {
     if ((k < 0) || (k > n))
     {
         return(0.0);
     }
     else
     {
         // for small enough integers, use the exact binomial coefficient,
         // which can be evaluated quickly and exactly
         return(AdvancedIntegerMath.BinomialCoefficient(n, k) * MoreMath.Pow(p, k) * MoreMath.Pow(q, n - k));
         // this could fail if the binomial overflows; should we go do factorials or
         // use an expansion around the normal approximation?
     }
 }
 /// <inheritdoc />
 public override double RawMoment(int r)
 {
     if (r < 0)
     {
         throw new ArgumentOutOfRangeException(nameof(r));
     }
     else if (r == 0)
     {
         return(1.0);
     }
     else
     {
         return(AdvancedIntegerMath.Factorial(r) * MoreMath.Pow(mu, r));
     }
 }
Beispiel #11
0
        // compute central moments from raw moments
        // this is subject to loss of precision from cancelation, so be careful

        internal virtual double CentralMomentFromRawMoment(int n)
        {
            double m = Mean;

            double mm = 1.0;
            double C  = RawMoment(n);

            for (int k = 1; k <= n; k++)
            {
                mm = mm * (-m);
                C += AdvancedIntegerMath.BinomialCoefficient(n, k) * mm * RawMoment(n - k);
            }

            return(C);
        }
 public void IntegerPartitionSums()
 {
     foreach (int n in TestUtilities.GenerateIntegerValues(1, 100, 5))
     {
         foreach (int[] partition in AdvancedIntegerMath.Partitions(n))
         {
             int s = 0;
             foreach (int i in partition)
             {
                 s += i;
             }
             Assert.IsTrue(s == n);
         }
     }
 }
Beispiel #13
0
 public override double Cumulant(int r)
 {
     if (r < 0)
     {
         throw new ArgumentOutOfRangeException("r");
     }
     else if (r == 0)
     {
         return(0.0);
     }
     else
     {
         return(MoreMath.Pow(2.0, r - 1) * AdvancedIntegerMath.Factorial(r - 1) * (nu + r * lambda));
     }
 }
        public void AssociatedLaguerreOrthonormality()
        {
            // don't let orders get too big, or (1) the Gamma function will overflow and (2) our integral will become highly oscilatory
            foreach (int n in TestUtilities.GenerateIntegerValues(1, 10, 3))
            {
                foreach (int m in TestUtilities.GenerateIntegerValues(1, 10, 3))
                {
                    foreach (double a in TestUtilities.GenerateRealValues(0.1, 10.0, 5))
                    {
                        //int n = 2;
                        //int m = 4;
                        //double a = 3.5;

                        Console.WriteLine("n={0} m={1} a={2}", n, m, a);

                        // evaluate the orthonormal integral
                        Func <double, double> f = delegate(double x) {
                            return(Math.Pow(x, a) * Math.Exp(-x) *
                                   OrthogonalPolynomials.LaguerreL(m, a, x) *
                                   OrthogonalPolynomials.LaguerreL(n, a, x)
                                   );
                        };
                        Interval r = Interval.FromEndpoints(0.0, Double.PositiveInfinity);

                        // need to loosen default evaluation settings in order to get convergence in some of these cases
                        // seems to have most convergence problems for large a
                        IntegrationSettings e = new IntegrationSettings();
                        e.AbsolutePrecision = TestUtilities.TargetPrecision;
                        e.RelativePrecision = TestUtilities.TargetPrecision;

                        double I = FunctionMath.Integrate(f, r, e).Value;
                        Console.WriteLine(I);

                        // test for orthonormality
                        if (n == m)
                        {
                            Assert.IsTrue(TestUtilities.IsNearlyEqual(
                                              I, AdvancedMath.Gamma(n + a + 1) / AdvancedIntegerMath.Factorial(n)
                                              ));
                        }
                        else
                        {
                            Assert.IsTrue(Math.Abs(I) < TestUtilities.TargetPrecision);
                        }
                    }
                }
            }
        }
        // the chi2 test fails when the expected values of some entries are small, because the entry values are not normally distributed
        // in the 2X2 case, the probably of any given entry value can be computed; it is given by
        //        N_R1! N_R2! N_C1! N_C2!
        // P = ----------------------------
        //      N_11! N_12! N_21! N_22! N!
        // the exact test computes this probability for the actual table and for all other tables having the same row and column totals
        // the test statistic is the fraction of tables that have a lower probability than the actual table; it is uniformly distributed
        // on [0,1]. if this fraction is very small, the actual table is particularly unlikely under the null hypothesis of no correlation

        /// <summary>
        /// Performs a Fisher exact test.
        /// </summary>
        /// <returns>The results of the test. The test statistic is the summed probability of all tables exhibiting equal or stronger correlations,
        /// and its likelyhood under the null hypothesis is the (left) probability to obtain a smaller value. Note that, in this case, the test
        /// statistic itself is the likelyhood.</returns>
        /// <remarks><para>The Fisher exact test tests for correlations between row and column entries. It is a robust, non-parametric test,
        /// which, unlike the &#x3C7;<sup>2</sup> test (see <see cref="ContingencyTable.PearsonChiSquaredTest"/>), can safely be used for tables
        /// with small, even zero-valued, entries.</para>
        /// <para>The Fisher test computes, under the null hypothesis of no correlation, the exact probability of all 2 X 2 tables with the
        /// same row and column totals as the given table. It then sums the probabilities of all tables that are as or less probable than
        /// the given table. In this way it determines the total probability of obtaining a 2 X 2 table which is at least as improbable
        /// as the given one.</para>
        /// <para>The test is two-sided, i.e. when considering less probable tables it does not distinguish between tables exhibiting
        /// the same and the opposite correlation as the given one.</para>
        /// </remarks>
        /// <seealso href="http://en.wikipedia.org/wiki/Fisher_exact_test"/>
        public TestResult FisherExactTest()
        {
            // store row and column totals
            int[] R = this.RowTotals;
            int[] C = this.ColumnTotals;
            int   N = this.Total;

            // compute the critical probability
            double x =
                AdvancedIntegerMath.LogFactorial(R[0]) +
                AdvancedIntegerMath.LogFactorial(R[1]) +
                AdvancedIntegerMath.LogFactorial(C[0]) +
                AdvancedIntegerMath.LogFactorial(C[1]) -
                AdvancedIntegerMath.LogFactorial(N);
            double lnPc = x -
                          AdvancedIntegerMath.LogFactorial(this[0, 0]) -
                          AdvancedIntegerMath.LogFactorial(this[0, 1]) -
                          AdvancedIntegerMath.LogFactorial(this[1, 0]) -
                          AdvancedIntegerMath.LogFactorial(this[1, 1]);

            // compute all possible 2 X 2 matrices with these row and column totals
            // compute the total probability of getting a matrix as or less probable than the measured one
            double P = 0.0;

            int[,] test = new int[2, 2];
            int min = Math.Max(C[0] - R[1], 0);
            int max = Math.Min(R[0], C[0]);

            for (int i = min; i <= max; i++)
            {
                test[0, 0] = i;
                test[0, 1] = R[0] - i;
                test[1, 0] = C[0] - i;
                test[1, 1] = R[1] - test[1, 0];
                double lnP = x -
                             AdvancedIntegerMath.LogFactorial(test[0, 0]) -
                             AdvancedIntegerMath.LogFactorial(test[0, 1]) -
                             AdvancedIntegerMath.LogFactorial(test[1, 0]) -
                             AdvancedIntegerMath.LogFactorial(test[1, 1]);
                if (lnP <= lnPc)
                {
                    P += Math.Exp(lnP);
                }
            }

            // return the result
            return(new TestResult(P, new UniformDistribution(Interval.FromEndpoints(0.0, 1.0))));
        }
Beispiel #16
0
        private void FirstStepSeriesExpansion(
            out Complex wave,
            out Complex deriv
            )
        {
            int MAX = 10;

            double[] A        = new double[MAX];
            double   DebyeSum = 0;
            double   dx       = WaveVector_fm * StepSize_fm;
            double   DK       = DebyeMass_MeV / WaveVector_fm / Constants.HbarC_MeV_fm;
            double   ZK       = Potential_fm.AlphaEff * Param.QuarkMass_MeV / WaveVector_fm / Constants.HbarC_MeV_fm;
            double   SK       = DebyeMass_MeV == 0 ? 0
                                : SigmaEff_MeV / DebyeMass_MeV / Constants.HbarC_MeV_fm / WaveVector_fm / WaveVector_fm;

            A[0] = 1.0;
            A[1] = 0.5 * ZK / (Param.QuantumNumberL + 1.0);
            for (int j = 2; j < MAX; j++)
            {
                for (int k = 0; k <= j - 2; k++)
                {
                    DebyeSum += Math.Pow(-DK, k) / AdvancedIntegerMath.Factorial(k)
                                * (SK + DK * ZK / (k + 1.0)) * A[j - 2 - k];
                }

                A[j] = (ZK * A[j - 1] - A[j - 2] - DebyeSum)
                       / (j + 2.0 * Param.QuantumNumberL + 1.0) / j;
            }

            if (A[MAX - 1] * Math.Pow(dx, MAX - 1) > 1e-14)
            {
                throw new Exception("Last term is suspiciously large.");
            }

            double sum  = 0;
            double dsum = 0;

            for (int j = 0; j < MAX; j++)
            {
                // x = WaveVector*Radius
                sum  += A[j] * Math.Pow(dx, j + Param.QuantumNumberL + 1);
                dsum += (j + Param.QuantumNumberL + 1) * A[j]
                        * Math.Pow(dx, j + Param.QuantumNumberL);
            }

            wave  = new Complex(sum, 0);
            deriv = new Complex(dsum * WaveVector_fm, 0);             // <- factor of WaveVector is important
        }
        public void BellNumberRecurrence()
        {
            // B_{n+1} = \sum_{k=0}^{n} {n \choose k} B_k

            foreach (int r in TestUtilities.GenerateIntegerValues(50, 100, 2))
            {
                double s = 0.0;
                IEnumerator <double> b = AdvancedIntegerMath.BinomialCoefficients(r).GetEnumerator();
                for (int k = 0; k <= r; k++)
                {
                    b.MoveNext();
                    s += b.Current * AdvancedIntegerMath.BellNumber(k);
                }
                Assert.IsTrue(TestUtilities.IsNearlyEqual(s, AdvancedIntegerMath.BellNumber(r + 1)));
            }
        }
Beispiel #18
0
 /// <inheritdoc/>
 public override double Cumulant(int r)
 {
     if (r < 0)
     {
         throw new ArgumentOutOfRangeException(nameof(r));
     }
     else if (r == 0)
     {
         return(0.0);
     }
     else
     {
         // This expression for the cumulant appears in the Wikipedia article.
         return(MoreMath.Pow(2.0, r - 1) * (nu + r * lambda) * AdvancedIntegerMath.Factorial(r - 1));
     }
 }
 public void ComplexReimannZetaPrimesTest () {
     // pick high enough values so that p^-x == 1 within double precision before we reach the end of our list of primes
     foreach (Complex z in TestUtilities.GenerateComplexValues(1.0, 100.0, 8)) {
         Complex zz = z;
         if (zz.Re < 0.0) zz = -zz;
         zz += 10.0;
         Console.WriteLine(zz);
         Complex f = 1.0;
         for (int p = 2; p < 100; p++) {
             if (!AdvancedIntegerMath.IsPrime(p)) continue;
             Complex t = Complex.One - ComplexMath.Pow(p, -zz);
             if (t == Complex.One) break;
             f = f * t;
         }
         Assert.IsTrue(TestUtilities.IsNearlyEqual(1.0 / AdvancedComplexMath.RiemannZeta(zz), f));
     }
 }
        public void PrimeSpecialCases()
        {
            // small numbers
            Assert.IsFalse(AdvancedIntegerMath.IsPrime(1));
            Assert.IsTrue(AdvancedIntegerMath.IsPrime(2));
            Assert.IsTrue(AdvancedIntegerMath.IsPrime(3));
            Assert.IsFalse(AdvancedIntegerMath.IsPrime(4));
            Assert.IsTrue(AdvancedIntegerMath.IsPrime(5));
            Assert.IsFalse(AdvancedIntegerMath.IsPrime(6));
            Assert.IsTrue(AdvancedIntegerMath.IsPrime(7));
            Assert.IsFalse(AdvancedIntegerMath.IsPrime(8));
            Assert.IsFalse(AdvancedIntegerMath.IsPrime(9));
            Assert.IsFalse(AdvancedIntegerMath.IsPrime(10));

            // Mersene prime candidates
            Assert.IsTrue(AdvancedIntegerMath.IsPrime(3));          // 2^2 - 1
            Assert.IsTrue(AdvancedIntegerMath.IsPrime(7));          // 2^3 - 1
            Assert.IsTrue(AdvancedIntegerMath.IsPrime(31));         // 2^5 - 1
            Assert.IsTrue(AdvancedIntegerMath.IsPrime(127));        // 2^7 - 1
            Assert.IsFalse(AdvancedIntegerMath.IsPrime(2047));      // 2^11 - 1
            Assert.IsTrue(AdvancedIntegerMath.IsPrime(8191));       // 2^13 - 1
            Assert.IsTrue(AdvancedIntegerMath.IsPrime(131071));     // 2^17 - 1
            Assert.IsTrue(AdvancedIntegerMath.IsPrime(524287));     // 2^19 - 1
            Assert.IsFalse(AdvancedIntegerMath.IsPrime(8388607));   // 2^23 - 1
            Assert.IsFalse(AdvancedIntegerMath.IsPrime(536870911)); // 2^29 - 1
            Assert.IsTrue(AdvancedIntegerMath.IsPrime(2147483647)); // 2^31 - 1

            // Pseudoprimes
            Assert.IsFalse(AdvancedIntegerMath.IsPrime(2047));
            Assert.IsFalse(AdvancedIntegerMath.IsPrime(3277));
            Assert.IsFalse(AdvancedIntegerMath.IsPrime(4033));
            Assert.IsFalse(AdvancedIntegerMath.IsPrime(121));
            Assert.IsFalse(AdvancedIntegerMath.IsPrime(703));
            Assert.IsFalse(AdvancedIntegerMath.IsPrime(1891));
            Assert.IsFalse(AdvancedIntegerMath.IsPrime(781));
            Assert.IsFalse(AdvancedIntegerMath.IsPrime(1541));

            // Fermat primes
            Assert.IsTrue(AdvancedIntegerMath.IsPrime(5));     // 2^2 + 1
            Assert.IsTrue(AdvancedIntegerMath.IsPrime(17));    // 2^4 + 1
            Assert.IsTrue(AdvancedIntegerMath.IsPrime(257));   // 2^8 + 1
            Assert.IsTrue(AdvancedIntegerMath.IsPrime(65537)); // 2^16 + 1

            // Euler's nonprime
            Assert.IsFalse(AdvancedIntegerMath.IsPrime(1000009)); // 1000009 = 293 * 3413
        }
Beispiel #21
0
        public static void ComputeSpecialFunctions()
        {
            // Compute the value x at which erf(x) is just 10^{-15} from 1.
            double x = AdvancedMath.InverseErfc(1.0E-15);

            // The Gamma function at 1/2 is sqrt(pi)
            double y = AdvancedMath.Gamma(0.5);

            // Compute a Coulomb Wave Function in the quantum tunneling region
            SolutionPair s = AdvancedMath.Coulomb(2, 4.5, 3.0);

            // Compute the Reimann Zeta function at a complex value
            Complex z = AdvancedComplexMath.RiemannZeta(new Complex(0.75, 6.0));

            // Compute the 100th central binomial coefficient
            double c = AdvancedIntegerMath.BinomialCoefficient(100, 50);
        }
 //[TestMethod]
 // Cancellation too bad to be useful
 public void BernoulliStirlingRelationship()
 {
     // This involves significant cancellation, so don't pick n too high
     foreach (int n in TestUtilities.GenerateIntegerValues(2, 16, 4))
     {
         double S = 0.0;
         for (int k = 0; k <= n; k++)
         {
             double dS = AdvancedIntegerMath.Factorial(k) / (k + 1) * AdvancedIntegerMath.StirlingNumber2(n, k);
             if (k % 2 != 0)
             {
                 dS = -dS;
             }
             S += dS;
         }
         Assert.IsTrue(TestUtilities.IsNearlyEqual(S, AdvancedIntegerMath.BernoulliNumber(n)));
     }
 }
Beispiel #23
0
        public void CatalanHankelMatrixDeterminant()
        {
            for (int d = 1; d <= 8; d++)
            {
                SymmetricMatrix S = new SymmetricMatrix(d);
                for (int r = 0; r < d; r++)
                {
                    for (int c = 0; c <= r; c++)
                    {
                        int n = r + c;
                        S[r, c] = AdvancedIntegerMath.BinomialCoefficient(2 * n, n) / (n + 1);
                    }
                }

                CholeskyDecomposition CD = S.CholeskyDecomposition();
                Assert.IsTrue(TestUtilities.IsNearlyEqual(CD.Determinant(), 1.0));
            }
        }
        public void StirlingNumbers2ColumnSum()
        {
            foreach (int n in TestUtilities.GenerateIntegerValues(1, 100, 4))
            {
                foreach (int k in TestUtilities.GenerateIntegerValues(1, n, 4))
                {
                    double sum = 0.0;
                    for (int m = k; m <= n; m++)
                    {
                        sum +=
                            AdvancedIntegerMath.BinomialCoefficient(n, m) *
                            AdvancedIntegerMath.StirlingNumber2(m, k);
                    }

                    Assert.IsTrue(TestUtilities.IsNearlyEqual(sum, AdvancedIntegerMath.StirlingNumber2(n + 1, k + 1)));
                }
            }
        }
 public void BinomialCoefficientRecurrence()
 {
     // this is the Pascal triangle recurrence
     foreach (int n in TestUtilities.GenerateIntegerValues(1, 100, 5))
     {
         foreach (int m in TestUtilities.GenerateUniformIntegerValues(0, n - 1, 5))
         {
             Console.WriteLine("n = {0}, m = {1}", n, m);
             Console.WriteLine(AdvancedIntegerMath.BinomialCoefficient(n + 1, m + 1));
             Console.WriteLine(AdvancedIntegerMath.BinomialCoefficient(n, m + 1));
             Console.WriteLine(AdvancedIntegerMath.BinomialCoefficient(n, m));
             Assert.IsTrue(TestUtilities.IsNearlyEqual(
                               AdvancedIntegerMath.BinomialCoefficient(n + 1, m + 1),
                               AdvancedIntegerMath.BinomialCoefficient(n, m) + AdvancedIntegerMath.BinomialCoefficient(n, m + 1)
                               ));
         }
     }
 }
        public void StirlingNumberMatrixInverse()
        {
            int n = 8;

            SquareMatrix S1 = new SquareMatrix(n);

            for (int i = 0; i < n; i++)
            {
                double[] s = AdvancedIntegerMath.StirlingNumbers1(i);
                for (int j = 0; j < s.Length; j++)
                {
                    if ((i - j) % 2 == 0)
                    {
                        S1[i, j] = s[j];
                    }
                    else
                    {
                        S1[i, j] = -s[j];
                    }
                }
            }

            SquareMatrix S2 = new SquareMatrix(n);

            for (int i = 0; i < n; i++)
            {
                double[] s = AdvancedIntegerMath.StirlingNumbers2(i);
                for (int j = 0; j < s.Length; j++)
                {
                    S2[i, j] = s[j];
                }
            }

            SquareMatrix S12 = S1 * S2;

            SquareMatrix I = new SquareMatrix(n);

            for (int i = 0; i < n; i++)
            {
                I[i, i] = 1.0;
            }

            Assert.IsTrue(TestUtilities.IsNearlyEqual(S12, I));
        }
Beispiel #27
0
 /// <inheritdoc />
 public override double CentralMoment(int r)
 {
     if (r < 0)
     {
         throw new ArgumentOutOfRangeException(nameof(r));
     }
     else if (r == 0)
     {
         return(1.0);
     }
     else if (r % 2 != 0)
     {
         return(0.0);
     }
     else
     {
         return(2.0 * AdvancedIntegerMath.Factorial(r) * AdvancedMath.DirichletEta(r) * MoreMath.Pow(s, r));
     }
 }
Beispiel #28
0
        public void TwoSampleKolmogorovNullDistributionTest()
        {
            ContinuousDistribution population = new ExponentialDistribution();

            int[] sizes = new int[] { 23, 30, 175 };

            foreach (int na in sizes)
            {
                foreach (int nb in sizes)
                {
                    Console.WriteLine("{0} {1}", na, nb);

                    Sample d = new Sample();
                    ContinuousDistribution nullDistribution = null;
                    for (int i = 0; i < 128; i++)
                    {
                        Sample a = TestUtilities.CreateSample(population, na, 31415 + na + i);
                        Sample b = TestUtilities.CreateSample(population, nb, 27182 + nb + i);

                        TestResult r = Sample.KolmogorovSmirnovTest(a, b);
                        d.Add(r.Statistic);
                        nullDistribution = r.Distribution;
                    }
                    // Only do full KS test if the number of bins is larger than the sample size, otherwise we are going to fail
                    // because the KS test detects the granularity of the distribution
                    TestResult mr = d.KolmogorovSmirnovTest(nullDistribution);
                    Console.WriteLine(mr.LeftProbability);
                    if (AdvancedIntegerMath.LCM(na, nb) > d.Count)
                    {
                        Assert.IsTrue(mr.LeftProbability < 0.99);
                    }
                    // But always test that mean and standard deviation are as expected
                    Console.WriteLine("{0} {1}", nullDistribution.Mean, d.PopulationMean.ConfidenceInterval(0.99));
                    Assert.IsTrue(d.PopulationMean.ConfidenceInterval(0.99).ClosedContains(nullDistribution.Mean));
                    Console.WriteLine("{0} {1}", nullDistribution.StandardDeviation, d.PopulationStandardDeviation.ConfidenceInterval(0.99));
                    Assert.IsTrue(d.PopulationStandardDeviation.ConfidenceInterval(0.99).ClosedContains(nullDistribution.StandardDeviation));
                    Console.WriteLine("{0} {1}", nullDistribution.CentralMoment(3), d.PopulationCentralMoment(3).ConfidenceInterval(0.99));
                    //Assert.IsTrue(d.PopulationMomentAboutMean(3).ConfidenceInterval(0.99).ClosedContains(nullDistribution.MomentAboutMean(3)));

                    //Console.WriteLine("m {0} {1}", nullDistribution.Mean, d.PopulationMean);
                }
            }
        }
 /// <inheritdoc />
 public override double ProbabilityMass(int k)
 {
     if (k < 0)
     {
         return(0.0);
     }
     else
     {
         // These are the same expression, but the form for small arguments is faster,
         // while the form for large arguments avoids overflow and cancellation errors.
         if (k < 16)
         {
             return(Math.Exp(-mu) * MoreMath.Pow(mu, k) / AdvancedIntegerMath.Factorial(k));
         }
         else
         {
             return(Stirling.PoissonProbability(mu, k));
         }
     }
 }
 /// <inheritdoc />
 public override double CentralMoment(int r)
 {
     if (r < 0)
     {
         throw new ArgumentOutOfRangeException(nameof(r));
     }
     else if (r == 0)
     {
         return(1.0);
     }
     else if ((r % 2) == 0)
     {
         // (r-1)!! \sigma^r
         return(AdvancedIntegerMath.DoubleFactorial(r - 1) * MoreMath.Pow(sigma, r));
     }
     else
     {
         return(0.0);
     }
 }