Exemple #1
0
        private static double PolyLog_BernoulliSum(int n, double w)
        {
            double s = 0.0;

            for (int k = n; k > 1; k--)
            {
                double ds = (1.0 - MoreMath.Pow(2.0, 1 - k)) * Math.Abs(AdvancedIntegerMath.BernoulliNumber(k)) *
                            MoreMath.Pow(Global.TwoPI, k) / AdvancedIntegerMath.Factorial(k) *
                            MoreMath.Pow(w, n - k) / AdvancedIntegerMath.Factorial(n - k);
                s -= ds;
            }
            s -= MoreMath.Pow(w, n) / AdvancedIntegerMath.Factorial(n);
            return(s);
        }
Exemple #2
0
        public long Order()
        {
            // Since the identity is obtained when every cycle completes at the same time, this is just the LCM of all the cycle lengths.
            // There is no known simple closed expression for this, but it is known to grow like e^{\sqrt{n \ln n}} for large n.
            // See http://mathworld.wolfram.com/LandausFunction.html.
            long order = 1;

            foreach (int[] cycle in cycles)
            {
                order = (int)AdvancedIntegerMath.LCM(order, cycle.Length);
            }
            // Will overflow for Int32 around n ~ 100, for Int64 around n ~ 300.
            return(order);
        }
Exemple #3
0
        // Li_n(x) = \sum_{k=0}^{\infty} \zeta(n-k) \frac{\log^k x}{k!} + \frac{\log^{n-1} x}{(n-1)!} \left( H_{n-1} -\log(-\log x) \right)

        private static double PolyLog_LogSeries(int n, double x)
        {
            double lnx = Math.Log(x);

            double f = AdvancedMath.RiemannZeta(n);

            if (lnx == 0.0)
            {
                return(f);
            }

            // c stores [log(x)]^k / k!
            double c = 1.0;

            for (int k = 1; k < Global.SeriesMax; k++)
            {
                double f_old = f;
                c *= lnx / k;
                // argument of zeta
                int m = n - k;
                if (m < 0)
                {
                    // For negative arguments, use \zeta(-m) = \frac{B_{m+1}}{m+1} and that odd Bernoulli numbers vanish.
                    if (-m % 2 == 0)
                    {
                        continue;
                    }
                    // This could theoretically overrun our stored Bernoulli values, but if we haven't converged after 32 negative terms, we are in trouble.
                    //f += c * AdvancedIntegerMath.Bernoulli[(-m + 1) / 2] / (-m + 1);
                    f += c * AdvancedMath.RiemannZeta(m);
                }
                else if (m == 1)
                {
                    // Special term in place of \zeta(1).
                    f += c * (AdvancedIntegerMath.HarmonicNumber(n - 1) - Math.Log(-lnx));
                }
                else
                {
                    // Otherwise just compute \zeta(m).
                    // We could reduce even m to Bernoulli references but then we would be in trouble for n > 32.
                    f += c * AdvancedMath.RiemannZeta(m);
                }
                if (f == f_old)
                {
                    return(f);
                }
            }
            throw new NonconvergenceException();
        }
Exemple #4
0
        // ( J1  J2    J1+J2  )
        // ( M1  M2  -(M1+M2) )
        private static double ThreeJ_MaxJ(SpinState j1, SpinState j2)
        {
            double r = AdvancedIntegerMath.LogFactorial(j1.TwoJ) +
                       AdvancedIntegerMath.LogFactorial(j2.TwoJ) +
                       AdvancedIntegerMath.LogFactorial(j1.JPlusM + j2.JPlusM) +
                       AdvancedIntegerMath.LogFactorial(j1.JMinusM + j2.JMinusM) -
                       AdvancedIntegerMath.LogFactorial(j1.TwoJ + j2.TwoJ + 1) -
                       AdvancedIntegerMath.LogFactorial(j1.JPlusM) -
                       AdvancedIntegerMath.LogFactorial(j1.JMinusM) -
                       AdvancedIntegerMath.LogFactorial(j2.JPlusM) -
                       AdvancedIntegerMath.LogFactorial(j2.JMinusM);

            r = Math.Exp(r / 2.0);
            return(r);
        }
        private static void FactorByPollardsRhoMethod(List <Element> factors, ref int n)
        {
            int x = 5; int y = 2; int k = 1; int l = 1;

            for (int c = 0; c < Global.SeriesMax; c++)
            {
                //while (true) {
                int g = AdvancedIntegerMath.GCD(Math.Abs(y - x), n);
                if (g == n)
                {
                    // the factor n will repeat itself indefinitely; either n is prime or the method has failed
                    return;
                }
                else if (g == 1)
                {
                    k--;
                    if (k == 0)
                    {
                        y = x;
                        l = 2 * l;
                        k = l;
                    }
                    // take x <- (x^2 + 1) mod n
                    x = AdvancedIntegerMath.PowMod(x, 2, n) + 1;
                    if (x == n)
                    {
                        x = 0;
                    }
                }
                else
                {
                    // g is a factor of n; in all likelyhood, it is prime, although this isn't guaranteed
                    // for our current approximate-factoring purposes, we will assume it is prime
                    // it is at least co-prime to all other recognized factors
                    int m = 0;
                    while (n % g == 0)
                    {
                        n = n / g;
                        x = x % n;
                        y = y % n;
                        m++;
                    }
                    factors.Add(new Element(g, m));
                }
            }
        }
        // Legendre polynomials normalized for their use in the spherical harmonics

        /// <summary>
        /// Computes the value of an associated Legendre polynomial.
        /// </summary>
        /// <param name="l">The order, which must be non-negative.</param>
        /// <param name="m">The associated order, which must lie between -l and l inclusive.</param>
        /// <param name="x">The argument, which must lie on the closed interval between -1 and +1.</param>
        /// <returns>The value of P<sub>l,m</sub>(x).</returns>
        /// <remarks>
        /// <para>Associated Legendre polynomials appear in the definition of the <see cref="AdvancedMath.SphericalHarmonic"/> functions.</para>
        /// <para>For values of l and m over about 150, values of this polynomial can exceed the capacity of double-wide floating point numbers.</para>
        /// </remarks>
        /// <seealso href="http://en.wikipedia.org/wiki/Associated_Legendre_polynomials"/>
        public static double LegendreP(int l, int m, double x)
        {
            if (l < 0)
            {
                throw new ArgumentOutOfRangeException(nameof(l));
            }

            //if (l < 0) {
            //    return (LegendreP(-l + 1, m, x));
            //}

            if (Math.Abs(m) > l)
            {
                throw new ArgumentOutOfRangeException(nameof(m));
            }

            double f;

            if (l < 10)
            {
                // for low enough orders, we can can get the factorial quickly from a table look-up and without danger of overflow
                f = Math.Sqrt(AdvancedIntegerMath.Factorial(l + m) / AdvancedIntegerMath.Factorial(l - m));
            }
            else
            {
                // for higher orders, we must move into log space to avoid overflow
                f = Math.Exp((AdvancedIntegerMath.LogFactorial(l + m) - AdvancedIntegerMath.LogFactorial(l - m)) / 2.0);
            }

            if (m < 0)
            {
                m = -m;
                if (m % 2 != 0)
                {
                    f = -f;
                }
            }

            if (Math.Abs(x) > 1.0)
            {
                throw new ArgumentOutOfRangeException(nameof(x));
            }

            return(f * LegendrePe(l, m, x));
        }
        // The Borwein coefficients e_k = \sum_{j=k}^{n} \binom{n, j}, i.e.
        //   e_n = \binom{n, n}
        //   e_{n-1} = \binom{n, n} + \binom{n, n-1 }
        // etc.

        private static double[] ComputeBorweinEtaCoefficients(int n)
        {
            double norm = MoreMath.Pow(2.0, n);

            double[] e   = new double[n];
            double   sum = 0.0;

            IEnumerator <double> binomials = AdvancedIntegerMath.BinomialCoefficients(n).GetEnumerator();

            for (int k = n - 1; k >= 0; k--)
            {
                binomials.MoveNext();
                sum += binomials.Current;
                e[k] = sum / norm;
            }

            return(e);
        }
        /// <summary>
        /// Computes a Stirling number of the second kind.
        /// </summary>
        /// <param name="n">The upper argument, which must be non-negative.</param>
        /// <param name="k">The lower argument, which must lie between 0 and n.</param>
        /// <returns>The value of the Stirling number of the second kind.</returns>
        /// <exception cref="ArgumentOutOfRangeException"><paramref name="n"/> is negative, or <paramref name="k"/>
        /// lies outside [0, n].</exception>
        /// <seealso href="https://en.wikipedia.org/wiki/Stirling_numbers_of_the_second_kind"/>
        public static double StirlingNumber2(int n, int k)
        {
            if (n < 0)
            {
                throw new ArgumentOutOfRangeException(nameof(n));
            }
            if ((k < 0) || (k > n))
            {
                throw new ArgumentOutOfRangeException(nameof(k));
            }

            if ((k == 1) || (k == n))
            {
                return(1.0);
            }
            else if (k == 0)
            {
                return(0.0);
                // The exceptional value 1 for n = k = 0 will already have been returned by the previous case.
            }
            else if (k == 2)
            {
                return(Math.Round(MoreMath.Pow(2.0, n - 1) - 1.0));
            }
            else if (k == (n - 1))
            {
                return(AdvancedIntegerMath.BinomialCoefficient(n, 2));
            }
            else
            {
                double[] s = Stirling2_Recursive(n, k);
                return(s[k]);
            }

            // There is a formula for Stirling numbers
            //   { n \brace k } = \frac{1}{k!} \sum{j=0}^{k} (-1)^j { k \choose j} (k - j)^n
            // which would be faster than recursion, but it has large cancelations between
            // terms. We could try to use it when all values are less than 2^52, for which
            // double arithmetic is exact for integers. For k!, that means k < 18. For
            // largest term in sum for all k, that means n < 14.
        }
Exemple #9
0
        // ( J1  J2  J3 )
        // ( 0   0   0  )
        private static double ThreeJ_ZeroM(SpinState s1, SpinState s2, SpinState s3)
        {
            Debug.Assert(s1.TwoM == 0);
            Debug.Assert(s2.TwoM == 0);
            Debug.Assert(s3.TwoM == 0);

            int j = (s1.TwoJ + s2.TwoJ + s3.TwoJ) / 2;

            if (j % 2 != 0)
            {
                // zero by symmetry
                return(0.0);
            }
            else
            {
                double f = -AdvancedIntegerMath.LogFactorial(j + 1)
                           + AdvancedIntegerMath.LogFactorial(j - s1.TwoJ)
                           + AdvancedIntegerMath.LogFactorial(j - s2.TwoJ)
                           + AdvancedIntegerMath.LogFactorial(j - s3.TwoJ);
                f = Math.Exp(f / 2.0);

                int    k = j / 2;
                double g = AdvancedIntegerMath.LogFactorial(k)
                           - AdvancedIntegerMath.LogFactorial(k - s1.TwoJ / 2)
                           - AdvancedIntegerMath.LogFactorial(k - s2.TwoJ / 2)
                           - AdvancedIntegerMath.LogFactorial(k - s3.TwoJ / 2);
                g = Math.Exp(g);

                if (k % 2 != 0)
                {
                    return(-f * g);
                }
                else
                {
                    return(f * g);
                }
            }
        }
Exemple #10
0
        private static double ThreeJ_Any(SpinState j1, SpinState j2, SpinState j3)
        {
            // compute prefactor

            double f1 = 0.0
                        + AdvancedIntegerMath.LogFactorial((j1.TwoJ + j2.TwoJ - j3.TwoJ) / 2)
                        + AdvancedIntegerMath.LogFactorial((j1.TwoJ - j2.TwoJ + j3.TwoJ) / 2)
                        + AdvancedIntegerMath.LogFactorial((-j1.TwoJ + j2.TwoJ + j3.TwoJ) / 2)
                        - AdvancedIntegerMath.LogFactorial((j1.TwoJ + j2.TwoJ + j3.TwoJ) / 2 + 1);
            double f2 = 0.0
                        + AdvancedIntegerMath.LogFactorial(j1.JPlusM)
                        + AdvancedIntegerMath.LogFactorial(j1.JMinusM)
                        + AdvancedIntegerMath.LogFactorial(j2.JPlusM)
                        + AdvancedIntegerMath.LogFactorial(j2.JMinusM)
                        + AdvancedIntegerMath.LogFactorial(j3.JPlusM)
                        + AdvancedIntegerMath.LogFactorial(j3.JMinusM);
            double f = Math.Exp((f1 + f2) / 2.0);

            if ((j1.JPlusM - j2.JMinusM) % 2 != 0)
            {
                f = -f;
            }
            Console.WriteLine("f={0}", f);

            // determine maximum and minimum values of k in sum

            int kmin = 0;
            int k23  = j2.JPlusM - j3.JMinusM;

            if (kmin < k23)
            {
                kmin = k23;
            }
            int k13 = j1.JMinusM - j3.JPlusM;

            if (kmin < k13)
            {
                kmin = k13;
            }

            int k123 = (j1.TwoJ + j2.TwoJ - j3.TwoJ) / 2;
            int kmax = k123;
            int k1   = j1.JMinusM;

            if (k1 < kmax)
            {
                kmax = k1;
            }
            int k2 = j2.JPlusM;

            if (k2 < kmax)
            {
                kmax = k2;
            }


            Console.WriteLine("{0} <= k <= {1}", kmin, kmax);
            Debug.Assert(kmin <= kmax);

            // compute the sum

            double g = 0.0;

            for (int k = kmin; k <= kmax; k++)
            {
                double gt = AdvancedIntegerMath.LogFactorial(k)
                            + AdvancedIntegerMath.LogFactorial(k123 - k)
                            + AdvancedIntegerMath.LogFactorial(k1 - k)
                            + AdvancedIntegerMath.LogFactorial(k2 - k)
                            + AdvancedIntegerMath.LogFactorial(k - k13)
                            + AdvancedIntegerMath.LogFactorial(k - k23);
                gt = Math.Exp(-gt);
                if (k % 2 != 0)
                {
                    gt = -gt;
                }
                g += gt;
            }
            Console.WriteLine("g={0}", g);

            return(f * g);
        }
Exemple #11
0
        /// <summary>
        /// Computes the polygamma function.
        /// </summary>
        /// <param name="n">The order, which must be non-negative.</param>
        /// <param name="x">The argument.</param>
        /// <returns>The value of &#968;<sub>n</sub>(x).</returns>
        /// <remarks>
        /// <para>The polygamma function gives higher logarithmic derivatives of the Gamma function.</para>
        /// </remarks>
        /// <exception cref="ArgumentOutOfRangeException"><paramref name="n"/> is negative.</exception>
        /// <seealso href="http://en.wikipedia.org/wiki/Polygamma_function"/>
        /// <seealso href="http://mathworld.wolfram.com/PolygammaFunction.html"/>
        public static double Psi(int n, double x)
        {
            if (n < 0)
            {
                throw new ArgumentOutOfRangeException("n");
            }

            // for n=0, use normal digamma algorithm
            if (n == 0)
            {
                return(Psi(x));
            }

            // for small x, use the reflection formula
            if (x <= 0.0)
            {
                if (x == Math.Ceiling(x))
                {
                    return(Double.NaN);
                }
                else
                {
                    // use the reflection formula
                    // this requires that we compute the nth derivative of cot(pi x)
                    // i was able to do this, but my algorithm is O(n^2), so for large n and not-so-negative x this is probably sub-optimal
                    double y = Math.PI * x;
                    double d = -EvaluateCotDerivative(ComputeCotDerivative(n), y) * MoreMath.Pow(Math.PI, n + 1);
                    if (n % 2 == 0)
                    {
                        return(d + Psi(n, 1.0 - x));
                    }
                    else
                    {
                        return(d - Psi(n, 1.0 - x));
                    }
                }
            }

            // compute the minimum x required for our asymptotic series to converge
            // my original approximation was to say that, for 1 << k << n, the factorials approach e^(2k),
            // so to achieve convergence to 10^(-16) at the 8'th term we should need x ~ 10 e ~ 27
            // unfortunately this estimate is both crude and an underestimate; for now i use an emperical relationship instead
            double xm = 16.0 + 2.0 * n;

            // by repeatedly using \psi(n,z) = \psi(n,z+1) - (-1)^n n! / z^(n+1),
            // increase x until it is large enough to use the asymptotic series
            // keep track of the accumulated shifts in the variable s
            double s = 0.0;

            while (x < xm)
            {
                s += 1.0 / MoreMath.Pow(x, n + 1);
                x += 1.0;
            }

            // now that x is big enough, use the asymptotic series
            // \psi(n,z) = - (-1)^n (n-1)! / z^n [ 1 + n / 2 z + \sum_{k=1}^{\infty} (2k+n-1)! / (2k)! / (n-1)! * B_{2k} / z^{2k} ]
            double t  = 1.0 + n / (2.0 * x);
            double x2 = x * x;
            double t1 = n * (n + 1) / 2.0 / x2;

            for (int i = 1; i < AdvancedIntegerMath.Bernoulli.Length; i++)
            {
                double t_old = t;
                t += AdvancedIntegerMath.Bernoulli[i] * t1;
                if (t == t_old)
                {
                    double g = AdvancedIntegerMath.Factorial(n - 1) * (t / MoreMath.Pow(x, n) + n * s);
                    if (n % 2 == 0)
                    {
                        g = -g;
                    }
                    return(g);
                }
                int i2 = 2 * i;
                t1 *= 1.0 * (n + i2) * (n + i2 + 1) / (i2 + 2) / (i2 + 1) / x2;
            }
            throw new NonconvergenceException();

            // for small x, the s-part strongly dominates the t-part, so it isn't actually necessary for us to determine t
            // very accurately; in the future, we should modify this code to allow the t-series to converge when its overall
            // contribution no longer matters, rather than requiring t to converge to full precision
        }
        private static double Hypergeometric2F1_Series_OneMinusX(double a, double b, int m, double e, double x1)
        {
            Debug.Assert(m >= 0);
            Debug.Assert(Math.Abs(e) <= 0.5);
            Debug.Assert(Math.Abs(x1) <= 0.75);

            double c = a + b + m + e;

            // Compute all the gammas we will use.
            double g_c    = AdvancedMath.Gamma(c);
            double rg_am  = 1.0 / AdvancedMath.Gamma(a + m);
            double rg_bm  = 1.0 / AdvancedMath.Gamma(b + m);
            double rg_ame = 1.0 / AdvancedMath.Gamma(a + m + e);
            double rg_bme = 1.0 / AdvancedMath.Gamma(b + m + e);
            double rg_m1e = 1.0 / AdvancedMath.Gamma(m + 1 + e);

            // Pochhammer product, keeps track of (a)_m (b)_m (x')^m
            double p = 1.0;

            // First compute the finite sum, which contains no divergent terms even for e = 0.
            double f0 = 0.0;

            if (m > 0)
            {
                double t0 = 1.0;
                f0 = t0;
                for (int k = 1; k < m; k++)
                {
                    int km1 = k - 1;
                    p  *= (a + km1) * (b + km1) * x1;
                    t0 *= 1.0 / (1.0 - m - e + km1) / k;
                    f0 += t0 * p;
                }

                f0 *= g_c * rg_bme * rg_ame * AdvancedMath.Gamma(m + e);
                p  *= (a + (m - 1)) * (b + (m - 1)) * x1;
            }

            // Now compute the remaining terms with analytically canceled divergent parts.

            double t = rg_bme * rg_ame * (NewG(1.0, -e) / AdvancedIntegerMath.Factorial(m) + NewG(m + 1, e)) -
                       rg_m1e * (NewG(a + m, e) * rg_bme + NewG(b + m, e) * rg_am) -
                       MoreMath.ReducedExpMinusOne(Math.Log(x1), e) * rg_am * rg_bm * rg_m1e;


            t *= p;
            double f1 = t;
            double u  = p * rg_bme * rg_ame / AdvancedMath.Gamma(1.0 - e) / AdvancedIntegerMath.Factorial(m);

            for (int k = 0; k < Global.SeriesMax; k++)
            {
                double f1_old = f1;

                // Compute a bunch of sums we will use.
                int    k1   = k + 1;
                int    mk   = m + k;
                int    mk1  = mk + 1;
                double k1e  = k1 - e;
                double amk  = a + mk;
                double bmk  = b + mk;
                double amke = amk + e;
                double bmke = bmk + e;
                double mk1e = mk1 + e;

                // Compute the ratios of each term. These are close, but not equal for e != 0.
                double r = amk * bmk / mk1 / k1e;
                double s = amke * bmke / mk1e / k1;

                // Compute (r - s) / e, with O(1) terms of (r - s) analytically canceled.
                double d = (amk * bmk / mk1 - (amk + bmk + e) + amke * bmke / k1) / mk1e / k1e;

                // Advance to the next term, including the correction for s != t.
                t = (s * t + d * u) * x1;

                f1 += t;

                if (f1 == f1_old)
                {
                    f1 *= ReciprocalSincPi(e) * g_c;
                    if (m % 2 != 0)
                    {
                        f1 = -f1;
                    }
                    return(f0 + f1);
                }

                // Advance the u term, which we will need for the next iteration.
                u *= r * x1;
            }

            throw new NonconvergenceException();
        }
        // Our approach to evaluating the transformed series is taken from Michel & Stoitsov, "Fast computation of the
        // Gauss hypergeometric function with all its parameters complex with application to the Poschl-Teller-Ginocchio
        // potential wave functions" (https://arxiv.org/abs/0708.0116). Michel & Stoitsov had a great idea, but their
        // exposition leaves much to be desired, so I'll put in a lot of detail here.

        // The basic idea is an old one: use the linear transformation formulas (A&S 15.3.3-15.3.9) to map all x into
        // the region [0, 1/2]. The x -> (1-x) transformation, for example, looks like
        //   F(a, b, c, x) =
        //     \frac{\Gamma(c) \Gamma(c-a-b)}{\Gamma(c-a) \Gamma(c-b)} F(a, b, a+b-c+1, 1-x) +
        //     \frac{\Gamma(c) \Gamma(a+b-c)}{\Gamma(a) \Gamma(b)} F(c-a, c-b, c-a-b, 1-x) (1-x)^{c-a-b}

        // When c-a-b is close to an integer, though, there is a problem. Write c = a + b + m + e, where m is a positive integer
        // and |e| <= 1/2. The transformed expression becomes:
        //   \frac{F(a, b, c, x)}{\Gamma(c)} =
        //     \frac{\Gamma(m + e)}{\Gamma(b + m + e) \Gamma(a + m + e)} F(a, b, 1 - m - e, 1 - x) +
        //     \frac{\Gamma(-m - e)}{\Gamma(a) \Gamma(b)} F(b + m + e, a + m + e, 1 + m + e, 1 - x) (1-x)^{m + e}
        // In the first term, the F-function blows up as e->0 (or, if m=0, \Gamma(m+e) blows up), and in the second term
        // \Gamma(-m-e) blows up in that limit. By finding the divergent O(1/e) and the sub-leading O(1) terms, it's not too
        // hard to show that the divgences cancel, leaving a finite result, and to derive that finite result for e=0.
        // (A&S gives the result, and similiar ones for the divergent limits of other linear transformations.)
        // But we still have a problem for e small-but-not-zero. The pre-limit expressions will have large cancelations.
        // We can't ignore O(e) and higher terms, but developing a series in e in unworkable -- the higher derivatives
        // rapidly become complicated and unwieldy. No expressions in A&S get around this problem,  but we will now
        // develop an approach that does.

        // Notice the divergence of F(a, b, 1 - m - e, 1 - x) is at the mth term, where (1 - m - e)_{m} ~ e. Pull out
        // the finite sum up to the (m-1)th term
        //   \frac{F_0}{\Gamma(c)} = \frac{\Gamma(m+e)}{\Gamma(b + m + e) \Gamma(a + m + e)}
        //     \sum_{k=0}^{m-1} \frac{(a)_k (b)_k}{(1 - m - e)_{k}} \frac{(1-x)^k}{k!}
        // The remainder, which contains the divergences, is:
        //   \frac{F_1}{\Gamma(c)} =
        //     \frac{\Gamma(m + e)}{\Gamma(b + m + e) \Gamma(a + m + e)} \sum_{k=0}^{\infty} \frac{(1-x)^{m + k}}{\Gamma(1 + m + k)}
        //     \frac{\Gamma(a + m + k) \Gamma(b + m +  k) \Gamma(1 - m - e}{\Gamma(a) \Gamma(b) \Gamma(1 - e + k)} +
        //     \frac{\Gamma(-m - e)}{\Gamma(a) \Gamma(b)} \sum_{k=0}^{\infty} \frac{(1-x)^{m + e + k}}{\Gamma(1 + k)}
        //     \frac{\Gamma(b + m + e + k) \Gamma(a + m + e + k) \Gamma(1 + m + e)}{\Gamma(b + m + e) \Gamma(a + m + e) \Gamma(1 + m + e + k)}
        // where we have shifted k by m in the first sum. Use the \Gamma reflection formulae
        //   \Gamma(m + e) \Gamma(1 - m - e) = \frac{\pi}{\sin(\pi(m + e))} = \frac{(-1)^m \pi}{\sin(\pi e)}
        //   \Gamma(-m - e) \Gamma(1 + m + e) = \frac{\pi}(\sin(-\pi(m + e))} = -\frac{(-1)^m \pi}{\sin(\pi e)}
        // to make this
        //   \frac{F_1}{\Gamma(c)} =
        //     \frac{(-1)^m \pi}{\sin(\pi e)} \sum_{k=0}^{\infty} \frac{(1-x)^{m + k}}{\Gamma(a) \Gamma(b) \Gamma(a + m + e) \Gamma(b + m + e)}
        //     \left[ \frac{\Gamma(a + m + k) \Gamma(b + m + k)}{\Gamma(1 + k - e) \Gamma(1 + m + k)} -
        //            \frac{\Gamma(a + m + k + e) \Gamma(b + m + k + e)}{\Gamma(1 + k) \Gamma(1 + m + k + e)} (1-x)^e \right]
        // Notice that \frac{\pi}{\sin(\pi e)} diverges like ~1/e. And that the two terms in parenthesis contain exactly the
        // same products of \Gamma functions, execpt for having their arguments shifted by e. Therefore in the e->0
        // limit their leading terms must cancel, leaving terms ~e, which will cancel the ~1/e divergence, leaving a finite result.

        // We would like to acomplish this cancelation analytically. This isn't too hard to do for e=0. Just write out a Taylor
        // series for \Gamma(z + e), keeping only terms up to O(e). The O(1) terms cancel, the e in front of the O(e) terms gets
        // absorbed into a finite prefactor \frac{\pi e}{\sin(\pi e)}, and we have a finite result. A&S gives the resulting expression.
        // The trouble is for e small-but-not-zero. If we try to evaluate the terms directly, we get cancelations between large terms,
        // leading the catastrophic loss of precision. If we try to use Taylor expansion, we need all the higher derivatives,
        // not just the first one, and the expressions rapidly become so complex and unwieldy as to be unworkable.

        // A good solution, introduced by Forrey, and refined by Michel & Stoistov, is to use finite differences instead
        // of derivatives. If we can express the difference betwen \Gamma(z) and \Gamma(z + e) as a function of z and e that
        // we can compute, then we can analytically cancel the divergent parts and be left with a finite expression involving
        // our finite difference function instead of an infinite series of Taylor series terms. For e=0, the finite difference
        // is just the first derivative, but for non-zero e, it implicitly sums the contributions of all Taylor series terms.

        // The finite difference function to use is:
        //   e G_{e}(z) = \frac{1}{\Gamma(z)} - \frac{1}{\Gamma(z+e)}
        // I played around with a few others, e.g. the perhaps more obvious choice \frac{\Gamma(z+e)}{\Gamma(z)} = 1 + e P_{e}(z),
        // but the key advantage of G_{e}(z) is that it is perfectly finite even for non-positive-integer values of z and z+e,
        // because it uses the recriprocol \Gamma function. (I actually had a mostly-working algorithm using P_{e}(z), but it
        // broke down at non-positive-integer z, because P_{e}(z) itself still diverged for those values.)

        // For a discussion of how to actually compute G_{e}(z), refer to the method notes.

        // The next trick Michel & Stoistov use is to first concentrate just on the k=0 term. The relevent factor is
        //   t = \frac{1}{\Gamma(a) \Gamma(b) \Gamma(a + m + e) \Gamma(b + m + e)}
        //       \left[ \frac{\Gamma(a + m) \Gamma(b + m)}{\Gamma(1 - e) \Gamma(1 + m)} -
        //              \frac{\Gamma(a + m + e) \Gamma(b + m + e)}{\Gamma(1) \Gamma(1 + m + e)} (1-x)^e \right]
        //     = \frac{\Gamma(a + m) \Gamma(b + m)}{\Gamma(a) \Gamma(b)}
        //       \left[ \frac{1}{\Gamma(a + m + e) \Gamma(b + m + e) \Gamma(1 + m) \Gamma(1 - e)} -
        //              \frac{1}{\Gamma(a + m) \Gamma(b + m) \Gamma(1 + m + e) \Gamma(1)} (1-x)^e \right]
        // In the second step, we have put all the \Gamma functions we will need to compute for e-shifted arguments
        // in the denominator, which makes it easier to apply our definition of G_e(z), since there they are also
        // in the deonominator.

        // Before we begin using G_e(z), let's isolate the e-dependence of the (1-x)^e factor. Write
        //   (1-x)^e = \exp(e \ln(1-x) ) = 1 + [ \exp(e \ln(1-x)) - 1 ] = 1 + e E_e(\ln(1-x))
        // where e E_e(z) = \exp(e \ln(1-x)) - 1. We could continue like this, using G_(e) to
        // eliminate every \Gamma(z + e) in favor of G_e(z) and \Gamma(z), but by doing so we
        // would end up with terms containing two and more explicit powers of e, and products
        // of different G_e(z). That would be perfectly correct, but we end up with a nicer
        // expression if we instead "peel off" only one e-shifted function at a time, like this...
        //   \frac{1}{\Gamma(a + m + e) \Gamma(b + m + e) \Gamma(1 + m) \Gamma(1 - e)} =
        //     \frac{1}{\Gamma(a + m + e) \Gamma(b + m + e) \Gamma(1 + m) \Gamma(1)} +
        //     \frac{e G_{-e}(1)}{\Gamma(a + m + e) \Gamma(b + m + e) \Gamma(1 + m)}
        //   \frac{1}{\Gamma(a + m) \Gamma(b + m) \Gamma(1 + m + e)} =
        //     \frac{1}{\Gamma(a + m + e) \Gamma(b + m) \Gamma(1 + m + e)} +
        //     \frac{e G_e(a + m)}{\Gamma(b + m) \Gamma(1 + m + e)}
        //   \frac{1}{\Gamma(a + m + e) \Gamma(b + m) \Gamma(1 + m + e)} =
        //     \frac{1}{\Gamma(a + m + e) \Gamma(b + m + e) \Gamma(1 + m + e)} +
        //     \frac{e G_e(b + m)}{Gamma(a + m + e) \Gamma(1 + m + e)}
        //   \frac{1}{\Gamma(a + m + e) \Gamma(b + m + e) \Gamma(1 + m + e)} =
        //     \frac{1}{\Gamma(a + m + e) \Gamma(b + m + e) \Gamma(1 + m)} -
        //     \frac{e G_e(1 + m)}{\Gamma(a + m + e) \Gamma(b + m + e)}
        // Putting this all together, we have
        //   t = \frac{1}{\Gamma(a + m + e) \Gamma(b + m + e)} \left[ \frac{e G_{-e}(1)}{\Gamma(1 + m) + e G_e(1 + m) \right]
        //     - \frac{1}{\Gamma(1 + m + e)} \left[ \frac{e G_e(a + m)}{\Gamma(b + m)} + \frac{e G_e(b+m)}{\Gamma(a + m + e)} \right]
        //     - \frac{1}{\Gamma(a + m + e) \Gamma(b + m + e) \Gamma(1 + m + e)} e E_e(\ln(1-x))
        // which is, as promised, proportional to e. For some a, b, m, and e, some of these \Gamma functions will blow up, but they are
        // all in the denoninator, so that will just zero some terms. The G_e(z) that appear in the numerator are finite for all z.

        // So now we have the k=0 term. What about higer k terms? We could repeat this analysis, carrying along the k's,
        // and get an expression involving G_e(z) and E_e(z) for each k. Michel & Stoistov's last trick is to realize
        // we don't have to do this, but can instead use our original expressions for each term as a ratio of \Gamma
        // functions to derive a recurrence. Let u_k be the first term, v_k be the second term, so t_k = u_k + v_k.
        // Let r_k = u_{k+1} / u_{k} and s_k = v_{k+1} / v_{k}. It's easy to write down r_k and s_k because they
        // follow immediately from the \Gamma function recurrence.
        //    r_{k} = \frac{(a + m + k)(b + m + k)}{(1 + k - e)(1 + m + k)}
        //    s_{k} = \frac{(a + m + k + e)(b + m + k + e)}{(1 + k)(1 + m + k + e)}
        // Notice that r_k and s_k are almost equal, but not quite: they differ by O(e). To advance t_k, use
        //    t_{k+1} = u_{k+1} + v_{k+1} = r_k u_k + s_k v_k = s_k (u_k + v_k) + (r_k - s_k) u_k
        //            = s_k t_k + d_k * u_k
        // where d_k = r_k - s_k, which will be O(e), since r_k and s_k only differ by e-shifted arguments.

        // In the x -> (1-x), x -> 1/x, x -> x / (1-x), and x -> 1 - 1/x linear transformations, canceling divergences
        // appear when some arguments of the transformed functions are non-positive-integers.

        private static double Hypergeometric2F1_Series_OneOverOneMinusX(double a, int m, double e, double c, double x1)
        {
            Debug.Assert(m >= 0);
            Debug.Assert(Math.Abs(e) <= 0.5);
            Debug.Assert(Math.Abs(x1) <= 0.75);

            double b = a + m + e;

            double g_c = AdvancedMath.Gamma(c);
            //double rg_a = 1.0 / AdvancedMath.Gamma(a);
            double rg_b   = 1.0 / AdvancedMath.Gamma(b);
            double rg_cma = 1.0 / AdvancedMath.Gamma(c - a);
            //double rg_cmb = 1.0 / AdvancedMath.Gamma(c - b);

            // Pochhammer product, keeps track of (a)_k (c-b)_k (x')^{a + k}
            double p = Math.Pow(x1, a);

            double f0 = 0.0;

            if (m > 0)
            {
                f0 = p;

                double q = 1.0;
                for (int k = 1; k < m; k++)
                {
                    int km1 = k - 1;
                    p  *= (a + km1) * (c - b + km1) * x1;
                    q  *= (k - m - e) * k;
                    f0 += p / q;
                }

                f0 *= g_c * rg_b * rg_cma * AdvancedMath.Gamma(m + e);
                p  *= (a + (m - 1)) * (c - b + (m - 1)) * x1;
            }

            // Now compute the remaining terms with analytically canceled divergent parts.

            double t = rg_b * rg_cma * (NewG(1.0, -e) / AdvancedIntegerMath.Factorial(m) + NewG(m + 1, e)) -
                       1.0 / AdvancedMath.Gamma(1 + m + e) * (NewG(a + m, e) / AdvancedMath.Gamma(c - a - e) + NewG(c - a, -e) / AdvancedMath.Gamma(b)) -
                       MoreMath.ReducedExpMinusOne(Math.Log(x1), e) / AdvancedMath.Gamma(a + m) / AdvancedMath.Gamma(c - a - e) / AdvancedMath.Gamma(m + 1 + e);

            t *= p;

            double f1 = t;

            double u = p * rg_b * rg_cma / AdvancedMath.Gamma(1.0 - e) / AdvancedIntegerMath.Factorial(m);

            for (int k = 0; k < Global.SeriesMax; k++)
            {
                double f1_old = f1;

                int    k1   = k + 1;
                int    mk1  = m + k1;
                double amk  = a + m + k;
                double amke = amk + e;
                double cak  = c - a + k;
                double cake = cak - e;
                double k1e  = k1 - e;
                double mk1e = mk1 + e;

                double r = amk * cake / k1e / mk1;
                double s = amke * cak / mk1e / k1;

                // Compute (r - s) / e analytically because leading terms cancel
                double d = (amk * cake / mk1 - amk - cake - e + amke * cak / k1) / mk1e / k1e;

                t = (s * t + d * u) * x1;

                f1 += t;

                if (f1 == f1_old)
                {
                    f1 *= ReciprocalSincPi(e) * g_c;
                    if (m % 2 != 0)
                    {
                        f1 = -f1;
                    }
                    return(f0 + f1);
                }

                u *= r * x1;
            }

            throw new NonconvergenceException();
        }
 /// <summary>
 /// Enumerates all partitions of the given integer
 /// </summary>
 /// <param name="n">The integer to partition, which must be positive.</param>
 /// <returns>An enumeration of all partitions of the given integer.</returns>
 /// <remarks>
 /// <para>Integer partitions are ways to write an integer as a sum of smaller integers. For example, the integer 4 has 5 partitions: 4,
 /// 3 + 1, 2 + 2, 2 + 1 + 1, and 1 + 1 + 1 + 1.</para>
 /// <para>Integer partitions appear in combinatoric problems and solutions to problems that may be mapped into combinatoric problems.
 /// For example, the terms which appear in <a href="http://en.wikipedia.org/wiki/Fa%C3%A0_di_Bruno%27s_formula">Faà di Bruno's formula</a>
 /// correspond to integer partitions.</para>
 /// <para>The number of partitions grows very rapidly with n. Since enumerating through partitions does not require us to count them,
 /// no overflows will occur even for large values of <paramref name="n"/>. However, completing the enumeration of
 /// such a large number of partitions will take a long time, even though our algorithm produces each partition very quickly. For
 /// example, there are about two hundred million partitions of the integer 100.
 /// </para>
 /// </remarks>
 /// <exception cref="ArgumentOutOfRangeException"><paramref name="n"/> is not positive.</exception>
 /// <seealso href="http://en.wikipedia.org/wiki/Integer_partition"/>
 public static IEnumerable <IntegerPartition> GetPartitions(int n)
 {
     return(AdvancedIntegerMath.Partitions(n));
 }