예제 #1
0
 internal static double PowerOverFactorial(double x, double nu)
 {
     if (nu < 16.0)
     {
         return(Math.Pow(x, nu) / AdvancedMath.Gamma(nu + 1.0));
     }
     else
     {
         return(Stirling.PowerFactor(x, nu));
     }
 }
예제 #2
0
        // This function computes x^{\nu} / \Gamma(\nu + 1), which can easily become Infinity/Infinity=NaN for large \nu if computed naively.

        internal static double PowOverGammaPlusOne(double x, double nu)
        {
            if (nu < 16.0)
            {
                return(Math.Pow(x, nu) / AdvancedMath.Gamma(nu + 1.0));
            }
            else
            {
                return(Stirling.PowOverGammaPlusOne(x, nu));
            }
        }
예제 #3
0
        private static SolutionPair Airy_Series(double x)
        {
            // compute k = 0 terms in f' and g' series, and in f and g series
            // compute terms to get k = 0 terms in a' and b' and a and b series

            double g  = 1.0 / Math.Pow(3.0, 1.0 / 3.0) / AdvancedMath.Gamma(1.0 / 3.0);
            double ap = -g;
            double bp = g;

            double f = 1.0 / Math.Pow(3.0, 2.0 / 3.0) / AdvancedMath.Gamma(2.0 / 3.0);

            g *= x;
            double a = f - g;
            double b = f + g;

            // we will need to multiply by x^2 to produce higher terms, so remember it
            double x2 = x * x;

            for (int k = 1; k < Global.SeriesMax; k++)
            {
                // remember old values
                double a_old  = a;
                double b_old  = b;
                double ap_old = ap;
                double bp_old = bp;

                // compute 3k
                double tk = 3 * k;

                // kth term in f' and g' series, and corresponding a' and b' series
                f  *= x2 / (tk - 1);
                g  *= x2 / tk;
                ap += (f - g);
                bp += (f + g);

                // kth term in f and g series, and corresponding a and b series
                f *= x / tk;
                g *= x / (tk + 1);
                a += (f - g);
                b += (f + g);

                // check for convergence
                if ((a == a_old) && (b == b_old) && (ap == ap_old) && (bp == bp_old))
                {
                    return(new SolutionPair(
                               a, ap,
                               Global.SqrtThree * b, Global.SqrtThree * bp
                               ));
                }
            }

            throw new NonconvergenceException();
        }
        /// <summary>
        /// Computes the generalized exponential integral.
        /// </summary>
        /// <param name="n">The order parameter.</param>
        /// <param name="x">The argument, which must be non-negative.</param>
        /// <returns>The value of E<sub>n</sub>(x).</returns>
        /// <remarks>
        /// <para>The generalized exponential integral is defined as:</para>
        /// <img src="../images/EIntegral.png" />
        /// <para>It is related to the incomplete Gamma function (<see cref="Gamma(double, double)"/>)
        /// for negative, integer shape parameters.</para>
        /// <img src="../images/EnGammaRelation.png" />
        /// <para>For n=1, it expressible as a simple power series.</para>
        /// <img src="../images/E1Series.png" />
        /// <para>For negative x, E<sub>1</sub>(x) develops an imaginary part, but its real part is given by the Ei(x) function
        /// (<see cref="IntegralEi(double)"/>).</para>
        /// <img src="../images/E1EiRelation.png" />
        /// <para>To compute E<sub>1</sub>(z) in the entire complex plane, use <see cref="AdvancedComplexMath.Ein(Complex)"/>.</para>
        /// <para>Sometimes the function E<sub>1</sub>(z) is called the exponential integral, and sometimes that name is used
        /// for Ei(x). In hydrology, E<sub>1</sub>(x) is sometimes called the Well function.</para>
        /// </remarks>
        /// <exception cref="ArgumentOutOfRangeException"><paramref name="x"/> is negative.</exception>
        /// <seealso href="http://mathworld.wolfram.com/En-Function.html"/>
        public static double IntegralE(int n, double x)
        {
            if (x < 0.0)
            {
                throw new ArgumentOutOfRangeException(nameof(x));
            }

            // Special case x = 0.
            if (x == 0.0)
            {
                if (n <= 1)
                {
                    return(Double.PositiveInfinity);
                }
                else
                {
                    return(1.0 / (n - 1));
                }
            }

            // Special case n negative and zero.
            if (n < 0)
            {
                // negative n is expressible using incomplete Gamma
                return(AdvancedMath.Gamma(1 - n, x) / MoreMath.Pow(x, 1 - n));
            }
            else if (n == 0)
            {
                // special case n=0
                return(Math.Exp(-x) / x);
            }

            // Now we are sure x > 0 and n > 0.
            if (x < 2.0)
            {
                return(IntegralE_Series(n, x));
            }
            else if (x < expLimit)
            {
                // Since E_n(x) < e^{-x}, we can short-cut to zero if x is big enough.
                // This nicely avoids our continued fraction's bad behavior for infinite x.
                return(IntegralE_ContinuedFraction(n, x));
            }
            else if (x <= Double.PositiveInfinity)
            {
                return(0.0);
            }
            else
            {
                Debug.Assert(Double.IsNaN(x));
                return(x);
            }
        }
예제 #5
0
        // series near the origin; this is entirely analogous to the Bessel series near the origin
        // it has a corresponding radius of rapid convergence, x < 4 + 2 Sqrt(nu)

        // This is exactly the same as BesselJ_Series with xx -> -xx.
        // We could even factor this out into a common method with an additional parameter.

        private static void ModifiedBesselI_Series(double nu, double x, out double I, out double IP)
        {
            if (x == 0.0)
            {
                if (nu == 0.0)
                {
                    I  = 1.0;
                    IP = 0.0;
                }
                else if (nu < 1.0)
                {
                    I  = 0.0;
                    IP = Double.PositiveInfinity;
                }
                else if (nu == 1.0)
                {
                    I  = 0.0;
                    IP = 0.5;
                }
                else
                {
                    I  = 0.0;
                    IP = 0.0;
                }
            }
            else
            {
                double x2 = x / 2.0;
                double xx = x2 * x2;
                double dI;
                if (nu < 128.0)
                {
                    dI = Math.Pow(x2, nu) / AdvancedMath.Gamma(nu + 1.0);
                }
                else
                {
                    dI = Math.Exp(nu * Math.Log(x2) - AdvancedMath.LogGamma(nu + 1.0));
                }
                I = dI; IP = nu * dI;
                for (int k = 1; k < Global.SeriesMax; k++)
                {
                    double I_old = I; double IP_old = IP;
                    dI *= xx / k / (nu + k);
                    I  += dI; IP += (nu + 2 * k) * dI;
                    if ((I == I_old) && (IP == IP_old))
                    {
                        IP = IP / x;
                        return;
                    }
                }
            }
        }
예제 #6
0
        // Li_n(x) = \sum_{k=0}^{\infty} \zeta(n-k) \frac{\log^k x}{k!} + \frac{\log^{n-1} x}{(n-1)!} \left( H_{n-1} -\log(-\log x) \right)

        private static double PolyLog_LogSeries(int n, double x)
        {
            double lnx = Math.Log(x);

            double f = AdvancedMath.RiemannZeta(n);

            if (lnx == 0.0)
            {
                return(f);
            }

            // c stores [log(x)]^k / k!
            double c = 1.0;

            for (int k = 1; k < Global.SeriesMax; k++)
            {
                double f_old = f;
                c *= lnx / k;
                // argument of zeta
                int m = n - k;
                if (m < 0)
                {
                    // For negative arguments, use \zeta(-m) = \frac{B_{m+1}}{m+1} and that odd Bernoulli numbers vanish.
                    if (-m % 2 == 0)
                    {
                        continue;
                    }
                    // This could theoretically overrun our stored Bernoulli values, but if we haven't converged after 32 negative terms, we are in trouble.
                    //f += c * AdvancedIntegerMath.Bernoulli[(-m + 1) / 2] / (-m + 1);
                    f += c * AdvancedMath.RiemannZeta(m);
                }
                else if (m == 1)
                {
                    // Special term in place of \zeta(1).
                    f += c * (AdvancedIntegerMath.HarmonicNumber(n - 1) - Math.Log(-lnx));
                }
                else
                {
                    // Otherwise just compute \zeta(m).
                    // We could reduce even m to Bernoulli references but then we would be in trouble for n > 32.
                    f += c * AdvancedMath.RiemannZeta(m);
                }
                if (f == f_old)
                {
                    return(f);
                }
            }
            throw new NonconvergenceException();
        }
 /// <summary>
 /// Computes the logrithm of the factorial of an integer.
 /// </summary>
 /// <param name="n">The argument, which must be non-negative.</param>
 /// <returns>The value of ln(n!).</returns>
 /// <remarks>
 /// <para>This function provides accurate values of ln(n!) even for values of n which would cause n! to overflow.</para>
 /// </remarks>
 /// <exception cref="ArgumentOutOfRangeException"><paramref name="n"/> is negative.</exception>
 /// <seealso cref="Factorial"/>
 public static double LogFactorial(int n)
 {
     if (n < 0)
     {
         throw new ArgumentOutOfRangeException(nameof(n));
     }
     else if (n < factorialTable.Length)
     {
         return(Math.Log((double)factorialTable[n]));
     }
     else
     {
         return(AdvancedMath.LogGamma(n + 1));
     }
 }
예제 #8
0
 /// <summary>
 /// Computes the factorial of an integer.
 /// </summary>
 /// <param name="n">The argument, which must be non-negative.</param>
 /// <returns>The value of n!.</returns>
 /// <remarks>
 /// <para>The factorial of an integer n is the product of all integers from 1 to n. For example, 4! = 4 * 3 * 2 * 1 = 24.</para>
 /// <para>n! also has a combinatorial intrepretation as the number of permutations of n objects. For example, a set of 3
 /// objects (abc) has 3! = 6 permutations: (abc), (bac), (cba), (acb), (cab), (bca).</para>
 /// <para>Because n! grows extremely quickly with increasing n, we return the result as a double, even though
 /// the value is always an integer. (13! would overlow an int, 21! would overflow a long, 171! overflows even a double.)</para>
 /// <para>In order to deal with factorials of larger numbers, you can use the <see cref="LogFactorial"/> method, which
 /// returns accurate values of ln(n!) even for values of n for which n! would overflow a double.</para>
 /// <para>The factorial is generalized to non-integer arguments by the &#x393; function (<see cref="AdvancedMath.Gamma(double)"/>).</para>
 /// </remarks>
 /// <exception cref="ArgumentOutOfRangeException"><paramref name="n"/> is negative.</exception>
 /// <seealso cref="LogFactorial"/>
 /// <seealso cref="AdvancedMath.Gamma(double)"/>
 /// <seealso href="http://en.wikipedia.org/wiki/Factorial"/>
 public static double Factorial(int n)
 {
     if (n < 0)
     {
         throw new ArgumentOutOfRangeException("n");
     }
     else if (n < factorialTable.Length)
     {
         return((double)factorialTable[n]);
     }
     else
     {
         return(Math.Round(AdvancedMath.Gamma(n + 1)));
     }
 }
 private static double LogDoubleFactorial_Gamma(int n)
 {
     if (n % 2 == 0)
     {
         // m = n/2, n!! = 2^m Gamma(m+1)
         int m = n / 2;
         return(m * Global.LogTwo + AdvancedMath.LogGamma(m + 1.0));
     }
     else
     {
         // m = (n+1)/2, n!! = 2^m Gamma(m+1/2) / Sqrt(PI)
         int m = (n + 1) / 2;
         return(m * Global.LogTwo + AdvancedMath.LogGamma(m + 0.5) - Math.Log(Math.PI) / 2.0);
     }
 }
예제 #10
0
        private static double JacobiSn_ViaRangeReduction(double u, double k, out long u0, out double u1)
        {
            Debug.Assert(u >= 0.0);

            double m = k * k;

            // First we compare u to a quick-to-compute lower bound for K / 2.
            // If it's below the bound, we can move directly to series for sn without having
            // to compute K or perform range reduction.
            double K = LowerBoundK(m);

            if (u <= K / 2.0)
            {
                u0 = 0;
                u1 = u;
            }
            else
            {
                // Too bad, we need to actually compute K and range reduce.
                K = AdvancedMath.EllipticK(k);
                double v  = u / K;
                double v0 = Math.Round(v);
                if (v < Int64.MaxValue)
                {
                    u0 = (long)v0;
                    u1 = (v - v0) * K;
                }
                else
                {
                    u0 = Int64.MaxValue;
                    u1 = 0.0;
                }
            }
            Debug.Assert(Math.Abs(u1) <= K / 2.0);
            // Note that for u >> K, this has the same problem as naïve trig function calculations
            // for x >> 2 \pi: we loose a lot of accuracy for u1 because it is computed via subtraction.
            // There is not much we can do about this, though, short of moving to arbitrary-precision
            // arithmetic, because K is different for each value of m.

            // Compute sn of the reduced -K/2 < u1 < K/2
            return(JacobiSn_ReduceToSeries(u1, m));

            // We should be able to do even better: |u1| <= K / 4. Our first attempt had some problems,
            // and the series still work for the larger range, so leave it as is for now.
        }
예제 #11
0
        /// <summary>
        /// Computes the complex Faddeeva function.
        /// </summary>
        /// <param name="z">The complex argument.</param>
        /// <returns>The complex value of w(z).</returns>
        /// <remarks>
        /// <para>The Faddeeva function w(z) is related to the error function with a complex argument.</para>
        /// <img src="../images/FaddeevaErfcRelation.png" />
        /// <para>It also has an integral representation.</para>
        /// <img src="../images/FaddeevaIntegral.png" />
        /// <para>For purely imaginary values, it reduces to the complementary error function (<see cref="AdvancedMath.Erfc"/>).
        /// For purely real values, it reduces to Dawson's integral (<see cref="AdvancedMath.Dawson"/>).</para>
        /// <para>It appears in the computation of the Voigt line profile function V(x;&#x3C3;,&#x3B3;).</para>
        /// <img src="../images/Voigt.png" />
        /// <para>Near the origin, w(z) &#x2248; 1. To accurately determine w(z) - 1 in this region, use the <see cref="Erf"/>
        /// function. Away from the origin near the large negative imaginary axis, the magnitude w(z) increases rapidly and
        /// may overflow.</para>
        /// <para>The image below shows the complex Faddeeva function near the origin, using domain coloring.</para>
        /// <img src="../images/ComplexFaddeevaPlot.png" />
        /// </remarks>
        /// <seealso cref="AdvancedComplexMath.Erf"/>
        /// <seealso cref="AdvancedMath.Erf" />
        /// <seealso cref="AdvancedMath.Erfc" />
        /// <seealso cref="AdvancedMath.Dawson"/>
        /// <seealso href="http://en.wikipedia.org/wiki/Voigt_profile" />
        public static Complex Faddeeva(Complex z)
        {
            // use reflection formulae to ensure that we are in the first quadrant
            if (z.Im < 0.0)
            {
                return(2.0 * ComplexMath.Exp(-z * z) - Faddeeva(-z));
            }
            if (z.Re < 0.0)
            {
                return(Faddeeva(-z.Conjugate).Conjugate);
            }

            double r = ComplexMath.Abs(z);

            if (r < 2.0)
            {
                // use series for small z
                return(ComplexMath.Exp(-z * z) * (1.0 - Erf_Series(-ComplexMath.I * z)));
                //return (Faddeeva_Series(z));
            }
            else if ((z.Im < 0.1) && (z.Re < 30.0))
            {
                // this is a special, awkward region
                // along the real axis, Re{w(x)} ~ e^{-x^2}; the Weideman algorthm doesen't compute this small number
                // well and the Laplace continued fraction misses it entirely; therefore very close to the real axis
                // we will use an analytic result on the real axis and Taylor expand to where we need to go.
                // unfortunately the Taylor expansion converges poorly for large x, so we drop this work-arround near x~30,
                // when this real part becomes too small to represent as a double anyway
                double x = z.Re;
                double y = z.Im;
                return(Faddeeva_Taylor(new Complex(x, 0.0),
                                       Math.Exp(-x * x) + 2.0 * AdvancedMath.Dawson(x) / Global.SqrtPI * ComplexMath.I,
                                       new Complex(0.0, y)));
            }
            else if (r > 7.0)
            {
                // use Laplace continued fraction for large z
                return(Faddeeva_ContinuedFraction(z));
            }
            else
            {
                // use Weideman algorithm for intermediate region
                return(Faddeeva_Weideman(z));
            }
        }
        private static double ModifiedBesselI_Series(double nu, double x)
        {
            double dI = AdvancedMath.PowOverGammaPlusOne(x / 2.0, nu);
            double I  = dI;
            double xx = x * x / 4.0;

            for (int k = 1; k < Global.SeriesMax; k++)
            {
                double I_old = I;
                dI = dI * xx / (nu + k) / k;
                I += dI;
                if (I == I_old)
                {
                    return(I);
                }
            }
            throw new NonconvergenceException();
        }
        private static double ModifiedBesselI_Series(double nu, double x)
        {
            double x2 = 0.5 * x;
            double xx = x2 * x2;
            double dI = AdvancedMath.PowerOverFactorial(x2, nu);
            double I  = dI;

            for (int k = 1; k < Global.SeriesMax; k++)
            {
                double I_old = I;
                dI = dI * xx / (k * (nu + k));
                I += dI;
                if (I == I_old)
                {
                    return(I);
                }
            }
            throw new NonconvergenceException();
        }
예제 #14
0
        // **** Real order Bessel functions ****

        // Series development of Bessel J
        //   J_{\nu}(x) = \sum_{k=0}^{\infty} \frac{( )^{\nu + 2k}}{\Gamma(\nu + 2k + 1)}
        // As can be seen by comparing leading term and first correction, this is good for z <~ \max(1 , \sqrt{\nu})

        // For nu=0, it requires 10 terms at x~1.0, 25 terms at x~10.0, but accuracy in the last few digits suffers that far out
        // Gets better for higher nu (for nu=10, only 20 terms are required at x~10.0 and all digits are good), but only with sqrt(nu)

        private static double BesselJ_Series(double nu, double x)
        {
            double z  = 0.5 * x;
            double dJ = AdvancedMath.PowerOverFactorial(z, nu);
            double J  = dJ;
            double zz = -z * z;

            for (int k = 1; k < Global.SeriesMax; k++)
            {
                double J_old = J;
                dJ *= zz / (k * (nu + k));
                J  += dJ;
                if (J == J_old)
                {
                    return(J);
                }
            }
            throw new NonconvergenceException();
        }
예제 #15
0
        private static double ModifiedBesselI_Series(double nu, double x)
        {
            double x2 = x / 2.0;
            double dI = Math.Pow(x2, nu) / AdvancedMath.Gamma(nu + 1.0);
            double I  = dI;
            double xx = x2 * x2;

            for (int k = 1; k < Global.SeriesMax; k++)
            {
                double I_old = I;
                dI = dI * xx / (nu + k) / k;
                I += dI;
                if (I == I_old)
                {
                    return(I);
                }
            }
            throw new NonconvergenceException();
        }
예제 #16
0
        private static double BerSeries(double nu, double x)
        {
            double c = Math.Cos(3.0 * nu * Math.PI / 4.0);
            double s = Math.Sin(3.0 * nu * Math.PI / 4.0);
            double xh = x / 2.0; double xh2 = xh * xh;

            double df    = Math.Pow(xh, nu) / AdvancedMath.Gamma(nu + 1.0);
            double f_old = 0.0;
            double f     = c * df;

            for (int k = 1; k < Global.SeriesMax; k++)
            {
                // we look two values back because for some values of nu (e.g. 0.0) only alternating
                // powers contribute; for these values we would always have f_old == f for non-contributing
                // powers and the series would terminate early
                double f_old_old = f_old; f_old = f;
                df *= xh2 / k / (nu + k);
                switch (k % 4)
                {
                case 0:
                    f += c * df;
                    break;

                case 1:
                    f -= s * df;
                    break;

                case 2:
                    f -= c * df;
                    break;

                case 3:
                    f += s * df;
                    break;
                }
                if (f == f_old_old)
                {
                    return(f);
                }
            }
            throw new NonconvergenceException();
        }
예제 #17
0
        /// <summary>
        /// Computes the exponential integral.
        /// </summary>
        /// <param name="n">The order parameter.</param>
        /// <param name="x">The argument, which must be non-negative.</param>
        /// <returns>The value of E<sub>n</sub>(x).</returns>
        /// <remarks>
        /// <para>The exponential integral is defined as:</para>
        /// <img src="../images/EIntegral.png" />
        /// <para>It is related to the incomplete Gamma function for negative, integer shape parameters by &#x393;(-k, x) = Ei<sub>k+1</sub>(x) / x<sup>k</sup>.</para>
        /// <para>In hydrology, E<sub>1</sub>(x) is sometimes called the Well function.</para>
        /// </remarks>
        /// <exception cref="ArgumentOutOfRangeException"><paramref name="x"/> is negative.</exception>
        public static double IntegralE(int n, double x)
        {
            if (x < 0.0)
            {
                throw new ArgumentOutOfRangeException("x");
            }

            // special case x = 0
            if (x == 0.0)
            {
                if (n <= 1)
                {
                    return(Double.PositiveInfinity);
                }
                else
                {
                    return(1.0 / (n - 1));
                }
            }

            if (n < 0)
            {
                // negative n is expressible using incomplete Gamma
                return(AdvancedMath.Gamma(1 - n, x) / MoreMath.Pow(x, 1 - n));
            }
            else if (n == 0)
            {
                // special case n=0
                return(Math.Exp(-x) / x);
            }
            else if (x < 2.0)
            {
                // use series for x < 1
                return(IntegralE_Series(n, x));
            }
            else
            {
                // use continued fraction for x > 1
                return(IntegralE_ContinuedFraction(n, x));
            }
        }
예제 #18
0
        // this is just an integer version of the series we implement below for doubles;
        // having an integer-specific version is slightly faster

        private static double BesselJ_Series(int n, double x)
        {
            Debug.Assert(n >= 0);
            Debug.Assert(x >= 0.0);
            double z  = 0.5 * x;
            double dJ = AdvancedMath.PowerOverFactorial(z, n);
            double J  = dJ;
            double zz = -z * z;

            for (int k = 1; k < Global.SeriesMax; k++)
            {
                double J_old = J;
                dJ *= zz / ((n + k) * k);
                J  += dJ;
                if (J == J_old)
                {
                    return(J);
                }
            }
            throw new NonconvergenceException();
        }
예제 #19
0
        private static double AiryBi_Series(double x)
        {
            double p = 1.0 / (Math.Pow(3.0, 1.0 / 6.0) * AdvancedMath.Gamma(2.0 / 3.0));
            double q = x * (Math.Pow(3.0, 1.0 / 6.0) / AdvancedMath.Gamma(1.0 / 3.0));
            double f = p + q;

            double x3 = x * x * x;

            for (int k = 0; k < Global.SeriesMax; k += 3)
            {
                double f_old = f;
                p *= x3 / ((k + 2) * (k + 3));
                q *= x3 / ((k + 3) * (k + 4));
                f += p + q;
                if (f == f_old)
                {
                    return(f);
                }
            }
            throw new NonconvergenceException();
        }
예제 #20
0
 /// <summary>
 /// Compute the complex log Gamma function.
 /// </summary>
 /// <param name="z">The complex argument.</param>
 /// <returns>The principal complex value y for which exp(y) = &#x393;(z).</returns>
 /// <seealso cref="AdvancedMath.LogGamma" />
 /// <seealso href="http://mathworld.wolfram.com/LogGammaFunction.html"/>
 public static Complex LogGamma(Complex z)
 {
     if (z.Im == 0.0 && z.Re < 0.0)
     {
         // Handle the pure negative case explicitly.
         double re = Math.Log(Math.PI / Math.Abs(MoreMath.SinPi(z.Re))) - AdvancedMath.LogGamma(1.0 - z.Re);
         double im = Math.PI * Math.Floor(z.Re);
         return(new Complex(re, im));
     }
     else if (z.Re > 16.0 || Math.Abs(z.Im) > 16.0)
     {
         // According to https://dlmf.nist.gov/5.11, the Stirling asymptoic series is valid everywhere
         // except on the negative real axis. So at first I tried to use it for |z.Im| > 0, |z| > 16. But in practice,
         // it exhibits false convergence close to the negative real axis, e.g. z = -16 + i. So I have
         // moved to requiring |z| large and reasonably far from the negative real axis.
         return(Stirling.LogGamma(z));
     }
     else if (z.Re >= 0.125)
     {
         return(Lanczos.LogGamma(z));
     }
     else
     {
         // For the remaining z < 0, we need to use the reflection formula.
         // For large z.Im, SinPi(z) \propto e^{\pi |z.Im|} overflows even though its log does not.
         // It's possible to do some algebra to get around that problem, but it's not necessary
         // because for z.Im that big we would have used the Stirling series.
         Complex f = ComplexMath.Log(Math.PI / ComplexMath.SinPi(z));
         Complex g = Lanczos.LogGamma(1.0 - z);
         // The reflection formula doesn't stay on the principal branch, so we need to add a multiple of 2 \pi i
         // to fix it up. See Hare, "Computing the Principal Branch of Log Gamma" for how to do this.
         // https://pdfs.semanticscholar.org/1c9d/8865836a312836500126cb47c3cbbed3043e.pdf
         Complex h = new Complex(0.0, 2.0 * Math.PI * Math.Floor(0.5 * (z.Re + 0.5)));
         if (z.Im < 0.0)
         {
             h = -h;
         }
         return(f - g + h);
     }
 }
 /// <summary>
 /// Computes the given harmonic number.
 /// </summary>
 /// <param name="n">The index of the harmonic number to compute, which must be non-negative.</param>
 /// <returns>The harmonic number H<sub>n</sub>.</returns>
 /// <exception cref="ArgumentOutOfRangeException"><paramref name="n"/> is negative.</exception>
 /// <remarks>
 /// <para>H<sub>n</sub> is the nth partial sum of the harmonic series.</para>
 /// <img src="..\images\HarmonicSeries.png" />
 /// <para>Since the harmonic series diverges, H<sub>n</sub> grows without bound as n increases, but
 /// it does so extremely slowly, approximately as log(n).</para>
 /// </remarks>
 /// <exception cref="ArgumentOutOfRangeException"><paramref name="n"/> is negative.</exception>
 /// <seealso href="http://en.wikipedia.org/wiki/Harmonic_series_(mathematics)"/>
 public static double HarmonicNumber(int n)
 {
     if (n < 0)
     {
         throw new ArgumentOutOfRangeException(nameof(n));
     }
     else if (n < 32)
     {
         // for small values, just add up the harmonic series
         double H = 0.0;
         for (int i = 1; i <= n; i++)
         {
             H += 1.0 / i;
         }
         return(H);
     }
     else
     {
         // for large values, use the digamma function
         // this will route to the the Stirling asymptotic expansion
         return(AdvancedMath.Psi(n + 1) + AdvancedMath.EulerGamma);
     }
 }
예제 #22
0
 /// <summary>
 /// Computes the Riemann zeta function.
 /// </summary>
 /// <param name="x">The argument.</param>
 /// <returns>The value &#x3B6;(s).</returns>
 /// <remarks>
 /// <para>The Riemann &#x3B6; function can be defined as the sum of the <paramref name="x"/>th inverse power of the natural numbers.</para>
 /// <img src="../images/ZetaSeries.png" />
 /// </remarks>
 /// <seealso href="http://en.wikipedia.org/wiki/Riemann_zeta_function"/>
 /// <seealso href="https://mathworld.wolfram.com/RiemannZetaFunction.html"/>
 /// <seealso href="https://dlmf.nist.gov/25"/>
 public static double RiemannZeta(double x)
 {
     if (x < 0.0)
     {
         // for negative numbers, use the reflection formula
         double t = 1.0 - x;
         return(2.0 * Math.Pow(2.0 * Math.PI, -t) * MoreMath.CosPi(0.5 * t) * AdvancedMath.Gamma(t) * RiemannZeta(t));
     }
     else
     {
         double xm1 = x - 1.0;
         if (Math.Abs(xm1) < 0.25)
         {
             // near the singularity, use the Stjielts expansion
             return(RiemannZeta_LaurentSeries(xm1));
         }
         else
         {
             // call Dirichlet function, which converges faster
             return(DirichletEta(x) / (1.0 - Math.Pow(2.0, 1.0 - x)));
         }
     }
 }
        /// <summary>
        /// Computes the given Bernoulli number.
        /// </summary>
        /// <param name="n">The index of the Bernoulli number to compute, which must be non-negative.</param>
        /// <returns>The Bernoulli number B<sub>n</sub>.</returns>
        /// <exception cref="ArgumentOutOfRangeException"><paramref name="n"/> is negative.</exception>
        /// <remarks>
        /// <para>B<sub>n</sub> vanishes for all odd n except n=1. For n about 260 or larger, B<sub>n</sub> overflows a double.</para>
        /// </remarks>
        /// <seealso href="http://en.wikipedia.org/wiki/Bernoulli_number"/>
        /// <seealso href="http://mathworld.wolfram.com/BernoulliNumber.html"/>
        public static double BernoulliNumber(int n)
        {
            if (n < 0)
            {
                throw new ArgumentOutOfRangeException(nameof(n));
            }

            // B_1 is the only odd Bernoulli number.
            if (n == 1)
            {
                return(-1.0 / 2.0);
            }

            // For all other odd arguments, return zero.
            if (n % 2 != 0)
            {
                return(0.0);
            }

            // If the argument is small enough, look up the answer in our stored array.
            int m = n / 2;

            if (m < Bernoulli.Length)
            {
                return(Bernoulli[m]);
            }

            // Otherwise, use the relationship with the Riemann zeta function to get the Bernoulli number.
            // Since this is only done for large n, it would probably be faster to just sum the zeta series explicitly here.
            double B = 2.0 * AdvancedMath.RiemannZeta(n) / AdvancedMath.PowOverGammaPlusOne(Global.TwoPI, n);

            if (m % 2 == 0)
            {
                B = -B;
            }
            return(B);
        }
예제 #24
0
 /// <summary>
 /// Computes the Gamma function.
 /// </summary>
 /// <param name="x">The argument.</param>
 /// <returns>The value of &#x393;(x).</returns>
 /// <remarks>
 /// <para>The Gamma function is a generalization of the factorial (see <see cref="AdvancedIntegerMath.Factorial"/>) to arbitrary real values.</para>
 /// <img src="../images/GammaIntegral.png" />
 /// <para>For positive integer arguments, this integral evaluates to &#x393;(n+1)=n!, but it can also be evaluated for non-integer z.</para>
 /// <para>Because &#x393;(x) grows beyond the largest value that can be represented by a <see cref="System.Double" /> at quite
 /// moderate values of x, you may find it useful to work with the <see cref="LogGamma" /> method, which returns ln(&#x393;(x)).</para>
 /// <para>To evaluate the Gamma function for a complex argument, use <see cref="AdvancedComplexMath.Gamma" />.</para>
 /// <h2>Domain, Range, and Accuracy</h2>
 /// <para>The function is defined for all x. It has poles at all negative integers and at zero; the method returns <see cref="Double.NaN"/> for these arguments. For positive
 /// arguments, the value of the function increases rapidly with increasing argument. For values of x greater than about 170, the value of the function exceeds
 /// <see cref="Double.MaxValue"/>; for these arguments the method returns <see cref="Double.PositiveInfinity"/>. The method is accurate to full precision over its entire
 /// domain.</para>
 /// </remarks>
 /// <seealso cref="AdvancedIntegerMath.Factorial" />
 /// <seealso cref="LogGamma" />
 /// <seealso cref="AdvancedComplexMath.Gamma" />
 /// <seealso href="http://en.wikipedia.org/wiki/Gamma_function" />
 /// <seealso href="http://mathworld.wolfram.com/GammaFunction.html" />
 /// <seealso href="http://dlmf.nist.gov/5">DLMF on the Gamma Function</seealso>
 public static double Gamma(double x)
 {
     if (x <= 0.0)
     {
         if (x == Math.Ceiling(x))
         {
             // poles at zero and negative integers
             return(Double.NaN);
         }
         else
         {
             return(Math.PI / Gamma(-x) / (-x) / AdvancedMath.Sin(0.0, x / 2.0));
         }
     }
     else if (x < 16.0)
     {
         return(Lanczos.Gamma(x));
     }
     else
     {
         return(Stirling.Gamma(x));
         //return (Math.Exp(LogGamma_Stirling(x)));
     }
 }
예제 #25
0
        private static void Gamma_Temme(double a, double x, out double P, out double Q)
        {
            double u = Math.Log(x / a);

            // compute argument of error function, which is almost (x-a)/sqrt(a)

            double dz = 1.0;
            double z  = dz;

            for (int i = 3; true; i++)
            {
                if (i > Global.SeriesMax)
                {
                    throw new NonconvergenceException();
                }
                double z_old = z;
                dz *= u / i;
                z  += dz;
                if (z == z_old)
                {
                    break;
                }
            }
            z = u * Math.Sqrt(a * z / 2.0);

            // the first approximation is just the almost-Gaussian one

            if (z > 0)
            {
                Q = AdvancedMath.Erfc(z) / 2.0;
                P = 1.0 - Q;
            }
            else
            {
                P = AdvancedMath.Erfc(-z) / 2.0;
                Q = 1.0 - P;
            }

            // compute Temme's correction to the Gaussian approximation

            double R0 = Math.Exp(-z * z) / Math.Sqrt(Global.TwoPI * a);

            double S0 = 0.0;
            double ai = 1.0;

            for (int i = 0; i < TemmeD.Length; i++)
            {
                double dS = 0.0;
                double uj = 1.0;
                for (int j = 0; j < TemmeD[i].Length; j++)
                {
                    dS += TemmeD[i][j] * uj;
                    uj *= u;
                }
                S0 += dS / ai;
                ai *= a;
            }

            double R = R0 * S0;

            Q = Q + R;
            P = P - R;
        }
예제 #26
0
 /// <summary>
 /// Compute the Riemann zeta function.
 /// </summary>
 /// <param name="s">The argument.</param>
 /// <returns>The value &#x3B6;(s).</returns>
 /// <remarks>
 /// <para>The Riemann &#x3B6; function can be defined as the sum of the <paramref name="s"/>th inverse power of the natural numbers.</para>
 /// <img src="../images/ZetaSeries.png" />
 /// </remarks>
 /// <seealso href="http://en.wikipedia.org/wiki/Riemann_zeta_function"/>
 public static double RiemannZeta(double s)
 {
     if (s < 0.0)
     {
         // for negative numbers, use the reflection formula
         double t = 1.0 - s;
         double z = 2.0 * Math.Pow(Global.TwoPI, -t) * Math.Cos(Global.HalfPI * t) * AdvancedMath.Gamma(t) * RiemannZeta(t);
         return(z);
     }
     else
     {
         if (Math.Abs(s - 1.0) < 0.25)
         {
             // near the sigularity, use the Stjielts expansion
             return(RiemannZeta_Series(s - 1.0));
         }
         else
         {
             // call Dirichlet function, which converges faster
             return(DirichletEta(s) / (1.0 - Math.Pow(2.0, 1.0 - s)));
         }
     }
 }
        // The point of this method is to compute
        //   e G_{e}(x) = \frac{1}{\Gamma(x)} - \frac{1}{\Gamma(x + e)}
        // To do this, we will re-use machinery that we developed to accurately compute the Pochhammer symbol
        //   (x)_e = \frac{\Gamma(x + e)}{\Gamma(x)}
        // To do this, we use the reduced log Pochhammer function L_{e}(x).
        //   \ln((x)_e) = e L_{e}(x)
        // To see why we developed this function, see the Pochhamer code. The Lanczos apparatus allows us to compute
        // L_{e}(x) accurately, even in the small-e limit. To see how look at the Pochhammer code.

        // To connect G_{e}(x) to L_{e}(x), write
        //   e G_{e}(x) = \frac{(x)_e - 1}{\Gamma(x + e)}
        //              = \frac{\exp(\ln((x)_e)) - 1}{\Gamma(x + e)}
        //              = \frac{\exp(e L_{e}(x)) - 1}{\Gamma(x + e)}
        //     G_{e}(x) = \frac{E_{e}(L_{e}(x))}{\Gamma(x + e)}
        // where e E_{e}(x) = \exp(e x) - 1, which we also know how to compute accurately even in the small-e limit.

        // This deals with G_{e}(x) for positive x. But L_{e}(x) and \Gamma(x + e) still blow up for x or x + e
        // near a non-positive integer, and our Lanczos machinery for L_{e}(x) assumes positive x. To deal with
        // the left half-plane, use the reflection formula
        //   \Gamma(z) \Gamma(1 - z) = \frac{\pi}{\sin(\pi z)}
        // on both Gamma functions in the definition of G_{e}(x) to get
        //   e G_{e}(x) = \frac{\sin(\pi x)}{\pi} \Gamma(1 - x) - \frac{\sin(\pi x + \pi e)}{\pi} \Gamma(1 - x - e)
        // Use the angle addition formula on the second \sin and the definition of the Pochhammer symbol
        // to get all terms proportional to one Gamma function with a guaranteed positive argument.
        //   \frac{e G_{e}(x)}{\Gamma(1 - x - e)} =
        //     \frac{\sin(\pi x)}{\pi} \left[ (1 - x - e)_{e} - \cos(\pi e) \right] -  \frac{\cos(\pi x) \sin(\pi e)}{\pi}
        // We need the RHS ~e to for small e. That's manifestly true for the second term because of the factor \sin(\pi e).
        // It's true for the second term because (1 - x - e)_{e} and \cos(\pi e) are both 1 + O(e), but to avoid cancelation
        // we need to make it manifest. Write
        //   (y)_{e} = \exp(e L_{e}(y)) - 1 + 1 = e E_{e}(L_{e}(y)) + 1
        // and
        //   1 - \cos(\pi e) = 2 \sin^2(\half \pi /e)
        // Now we can divide through by e.
        //   \frac{G_{e}(x)}{\Gamma(y)} =
        //     \sin(\pi x) \left[ \frac{E_{e}(L_{e}(y))}{\pi} + \frac{\sin^2(\half \pi e)}{\half \pi e} \right] -
        //     \cos(\pi x) \frac{\sin(\pi e)}{\pi e}
        // and everything can be safely computed.

        // This is a different approach than the one in the Michel & Stoitsov paper. Their approach also used
        // the Lanczos evaluation of the Pochhammer symbol, but had some deficiencies.
        // For example, for e <~ 1.0E-15 and x near a negative integer, it gives totally wrong
        // answers, and the answers loose accuracy for even larger e. This is because the
        // computation relies on a ratio of h to Gamma, both of which blow up in this region.

        private static double NewG(double x, double e)
        {
            Debug.Assert(Math.Abs(e) <= 0.5);

            // It would be better to compute G outright from Lanczos, rather than via h. Can we do this?

            // Also, we should probably pick larger of 1 - x and 1 - x - e to use as argument of
            // Gamma function factor.

            double y = x + e;

            if ((x < 0.5) || (y < 0.5))
            {
                double h = MoreMath.ReducedExpMinusOne(Lanczos.ReducedLogPochhammer(1.0 - y, e), e);

                if (e == 0.0)
                {
                    double t = MoreMath.SinPi(x) * h / Math.PI - MoreMath.CosPi(x);
                    return(AdvancedMath.Gamma(1.0 - y) * t);
                }
                else
                {
                    double s  = MoreMath.SinPi(e) / (Math.PI * e);
                    double s2 = MoreMath.Sqr(MoreMath.SinPi(e / 2.0)) / (Math.PI * e / 2.0);
                    double t  = MoreMath.SinPi(x) * (h / Math.PI + s2) - MoreMath.CosPi(x) * s;
                    return(AdvancedMath.Gamma(1.0 - y) * t);
                }
            }

            return(MoreMath.ReducedExpMinusOne(Lanczos.ReducedLogPochhammer(x, e), e) / AdvancedMath.Gamma(x + e));
        }
        // Our approach to evaluating the transformed series is taken from Michel & Stoitsov, "Fast computation of the
        // Gauss hypergeometric function with all its parameters complex with application to the Poschl-Teller-Ginocchio
        // potential wave functions" (https://arxiv.org/abs/0708.0116). Michel & Stoitsov had a great idea, but their
        // exposition leaves much to be desired, so I'll put in a lot of detail here.

        // The basic idea is an old one: use the linear transformation formulas (A&S 15.3.3-15.3.9) to map all x into
        // the region [0, 1/2]. The x -> (1-x) transformation, for example, looks like
        //   F(a, b, c, x) =
        //     \frac{\Gamma(c) \Gamma(c-a-b)}{\Gamma(c-a) \Gamma(c-b)} F(a, b, a+b-c+1, 1-x) +
        //     \frac{\Gamma(c) \Gamma(a+b-c)}{\Gamma(a) \Gamma(b)} F(c-a, c-b, c-a-b, 1-x) (1-x)^{c-a-b}

        // When c-a-b is close to an integer, though, there is a problem. Write c = a + b + m + e, where m is a positive integer
        // and |e| <= 1/2. The transformed expression becomes:
        //   \frac{F(a, b, c, x)}{\Gamma(c)} =
        //     \frac{\Gamma(m + e)}{\Gamma(b + m + e) \Gamma(a + m + e)} F(a, b, 1 - m - e, 1 - x) +
        //     \frac{\Gamma(-m - e)}{\Gamma(a) \Gamma(b)} F(b + m + e, a + m + e, 1 + m + e, 1 - x) (1-x)^{m + e}
        // In the first term, the F-function blows up as e->0 (or, if m=0, \Gamma(m+e) blows up), and in the second term
        // \Gamma(-m-e) blows up in that limit. By finding the divergent O(1/e) and the sub-leading O(1) terms, it's not too
        // hard to show that the divgences cancel, leaving a finite result, and to derive that finite result for e=0.
        // (A&S gives the result, and similiar ones for the divergent limits of other linear transformations.)
        // But we still have a problem for e small-but-not-zero. The pre-limit expressions will have large cancelations.
        // We can't ignore O(e) and higher terms, but developing a series in e in unworkable -- the higher derivatives
        // rapidly become complicated and unwieldy. No expressions in A&S get around this problem,  but we will now
        // develop an approach that does.

        // Notice the divergence of F(a, b, 1 - m - e, 1 - x) is at the mth term, where (1 - m - e)_{m} ~ e. Pull out
        // the finite sum up to the (m-1)th term
        //   \frac{F_0}{\Gamma(c)} = \frac{\Gamma(m+e)}{\Gamma(b + m + e) \Gamma(a + m + e)}
        //     \sum_{k=0}^{m-1} \frac{(a)_k (b)_k}{(1 - m - e)_{k}} \frac{(1-x)^k}{k!}
        // The remainder, which contains the divergences, is:
        //   \frac{F_1}{\Gamma(c)} =
        //     \frac{\Gamma(m + e)}{\Gamma(b + m + e) \Gamma(a + m + e)} \sum_{k=0}^{\infty} \frac{(1-x)^{m + k}}{\Gamma(1 + m + k)}
        //     \frac{\Gamma(a + m + k) \Gamma(b + m +  k) \Gamma(1 - m - e}{\Gamma(a) \Gamma(b) \Gamma(1 - e + k)} +
        //     \frac{\Gamma(-m - e)}{\Gamma(a) \Gamma(b)} \sum_{k=0}^{\infty} \frac{(1-x)^{m + e + k}}{\Gamma(1 + k)}
        //     \frac{\Gamma(b + m + e + k) \Gamma(a + m + e + k) \Gamma(1 + m + e)}{\Gamma(b + m + e) \Gamma(a + m + e) \Gamma(1 + m + e + k)}
        // where we have shifted k by m in the first sum. Use the \Gamma reflection formulae
        //   \Gamma(m + e) \Gamma(1 - m - e) = \frac{\pi}{\sin(\pi(m + e))} = \frac{(-1)^m \pi}{\sin(\pi e)}
        //   \Gamma(-m - e) \Gamma(1 + m + e) = \frac{\pi}(\sin(-\pi(m + e))} = -\frac{(-1)^m \pi}{\sin(\pi e)}
        // to make this
        //   \frac{F_1}{\Gamma(c)} =
        //     \frac{(-1)^m \pi}{\sin(\pi e)} \sum_{k=0}^{\infty} \frac{(1-x)^{m + k}}{\Gamma(a) \Gamma(b) \Gamma(a + m + e) \Gamma(b + m + e)}
        //     \left[ \frac{\Gamma(a + m + k) \Gamma(b + m + k)}{\Gamma(1 + k - e) \Gamma(1 + m + k)} -
        //            \frac{\Gamma(a + m + k + e) \Gamma(b + m + k + e)}{\Gamma(1 + k) \Gamma(1 + m + k + e)} (1-x)^e \right]
        // Notice that \frac{\pi}{\sin(\pi e)} diverges like ~1/e. And that the two terms in parenthesis contain exactly the
        // same products of \Gamma functions, execpt for having their arguments shifted by e. Therefore in the e->0
        // limit their leading terms must cancel, leaving terms ~e, which will cancel the ~1/e divergence, leaving a finite result.

        // We would like to acomplish this cancelation analytically. This isn't too hard to do for e=0. Just write out a Taylor
        // series for \Gamma(z + e), keeping only terms up to O(e). The O(1) terms cancel, the e in front of the O(e) terms gets
        // absorbed into a finite prefactor \frac{\pi e}{\sin(\pi e)}, and we have a finite result. A&S gives the resulting expression.
        // The trouble is for e small-but-not-zero. If we try to evaluate the terms directly, we get cancelations between large terms,
        // leading the catastrophic loss of precision. If we try to use Taylor expansion, we need all the higher derivatives,
        // not just the first one, and the expressions rapidly become so complex and unwieldy as to be unworkable.

        // A good solution, introduced by Forrey, and refined by Michel & Stoistov, is to use finite differences instead
        // of derivatives. If we can express the difference betwen \Gamma(z) and \Gamma(z + e) as a function of z and e that
        // we can compute, then we can analytically cancel the divergent parts and be left with a finite expression involving
        // our finite difference function instead of an infinite series of Taylor series terms. For e=0, the finite difference
        // is just the first derivative, but for non-zero e, it implicitly sums the contributions of all Taylor series terms.

        // The finite difference function to use is:
        //   e G_{e}(z) = \frac{1}{\Gamma(z)} - \frac{1}{\Gamma(z+e)}
        // I played around with a few others, e.g. the perhaps more obvious choice \frac{\Gamma(z+e)}{\Gamma(z)} = 1 + e P_{e}(z),
        // but the key advantage of G_{e}(z) is that it is perfectly finite even for non-positive-integer values of z and z+e,
        // because it uses the recriprocol \Gamma function. (I actually had a mostly-working algorithm using P_{e}(z), but it
        // broke down at non-positive-integer z, because P_{e}(z) itself still diverged for those values.)

        // For a discussion of how to actually compute G_{e}(z), refer to the method notes.

        // The next trick Michel & Stoistov use is to first concentrate just on the k=0 term. The relevent factor is
        //   t = \frac{1}{\Gamma(a) \Gamma(b) \Gamma(a + m + e) \Gamma(b + m + e)}
        //       \left[ \frac{\Gamma(a + m) \Gamma(b + m)}{\Gamma(1 - e) \Gamma(1 + m)} -
        //              \frac{\Gamma(a + m + e) \Gamma(b + m + e)}{\Gamma(1) \Gamma(1 + m + e)} (1-x)^e \right]
        //     = \frac{\Gamma(a + m) \Gamma(b + m)}{\Gamma(a) \Gamma(b)}
        //       \left[ \frac{1}{\Gamma(a + m + e) \Gamma(b + m + e) \Gamma(1 + m) \Gamma(1 - e)} -
        //              \frac{1}{\Gamma(a + m) \Gamma(b + m) \Gamma(1 + m + e) \Gamma(1)} (1-x)^e \right]
        // In the second step, we have put all the \Gamma functions we will need to compute for e-shifted arguments
        // in the denominator, which makes it easier to apply our definition of G_e(z), since there they are also
        // in the deonominator.

        // Before we begin using G_e(z), let's isolate the e-dependence of the (1-x)^e factor. Write
        //   (1-x)^e = \exp(e \ln(1-x) ) = 1 + [ \exp(e \ln(1-x)) - 1 ] = 1 + e E_e(\ln(1-x))
        // where e E_e(z) = \exp(e \ln(1-x)) - 1. We could continue like this, using G_(e) to
        // eliminate every \Gamma(z + e) in favor of G_e(z) and \Gamma(z), but by doing so we
        // would end up with terms containing two and more explicit powers of e, and products
        // of different G_e(z). That would be perfectly correct, but we end up with a nicer
        // expression if we instead "peel off" only one e-shifted function at a time, like this...
        //   \frac{1}{\Gamma(a + m + e) \Gamma(b + m + e) \Gamma(1 + m) \Gamma(1 - e)} =
        //     \frac{1}{\Gamma(a + m + e) \Gamma(b + m + e) \Gamma(1 + m) \Gamma(1)} +
        //     \frac{e G_{-e}(1)}{\Gamma(a + m + e) \Gamma(b + m + e) \Gamma(1 + m)}
        //   \frac{1}{\Gamma(a + m) \Gamma(b + m) \Gamma(1 + m + e)} =
        //     \frac{1}{\Gamma(a + m + e) \Gamma(b + m) \Gamma(1 + m + e)} +
        //     \frac{e G_e(a + m)}{\Gamma(b + m) \Gamma(1 + m + e)}
        //   \frac{1}{\Gamma(a + m + e) \Gamma(b + m) \Gamma(1 + m + e)} =
        //     \frac{1}{\Gamma(a + m + e) \Gamma(b + m + e) \Gamma(1 + m + e)} +
        //     \frac{e G_e(b + m)}{Gamma(a + m + e) \Gamma(1 + m + e)}
        //   \frac{1}{\Gamma(a + m + e) \Gamma(b + m + e) \Gamma(1 + m + e)} =
        //     \frac{1}{\Gamma(a + m + e) \Gamma(b + m + e) \Gamma(1 + m)} -
        //     \frac{e G_e(1 + m)}{\Gamma(a + m + e) \Gamma(b + m + e)}
        // Putting this all together, we have
        //   t = \frac{1}{\Gamma(a + m + e) \Gamma(b + m + e)} \left[ \frac{e G_{-e}(1)}{\Gamma(1 + m) + e G_e(1 + m) \right]
        //     - \frac{1}{\Gamma(1 + m + e)} \left[ \frac{e G_e(a + m)}{\Gamma(b + m)} + \frac{e G_e(b+m)}{\Gamma(a + m + e)} \right]
        //     - \frac{1}{\Gamma(a + m + e) \Gamma(b + m + e) \Gamma(1 + m + e)} e E_e(\ln(1-x))
        // which is, as promised, proportional to e. For some a, b, m, and e, some of these \Gamma functions will blow up, but they are
        // all in the denoninator, so that will just zero some terms. The G_e(z) that appear in the numerator are finite for all z.

        // So now we have the k=0 term. What about higer k terms? We could repeat this analysis, carrying along the k's,
        // and get an expression involving G_e(z) and E_e(z) for each k. Michel & Stoistov's last trick is to realize
        // we don't have to do this, but can instead use our original expressions for each term as a ratio of \Gamma
        // functions to derive a recurrence. Let u_k be the first term, v_k be the second term, so t_k = u_k + v_k.
        // Let r_k = u_{k+1} / u_{k} and s_k = v_{k+1} / v_{k}. It's easy to write down r_k and s_k because they
        // follow immediately from the \Gamma function recurrence.
        //    r_{k} = \frac{(a + m + k)(b + m + k)}{(1 + k - e)(1 + m + k)}
        //    s_{k} = \frac{(a + m + k + e)(b + m + k + e)}{(1 + k)(1 + m + k + e)}
        // Notice that r_k and s_k are almost equal, but not quite: they differ by O(e). To advance t_k, use
        //    t_{k+1} = u_{k+1} + v_{k+1} = r_k u_k + s_k v_k = s_k (u_k + v_k) + (r_k - s_k) u_k
        //            = s_k t_k + d_k * u_k
        // where d_k = r_k - s_k, which will be O(e), since r_k and s_k only differ by e-shifted arguments.

        // In the x -> (1-x), x -> 1/x, x -> x / (1-x), and x -> 1 - 1/x linear transformations, canceling divergences
        // appear when some arguments of the transformed functions are non-positive-integers.

        private static double Hypergeometric2F1_Series_OneOverOneMinusX(double a, int m, double e, double c, double x1)
        {
            Debug.Assert(m >= 0);
            Debug.Assert(Math.Abs(e) <= 0.5);
            Debug.Assert(Math.Abs(x1) <= 0.75);

            double b = a + m + e;

            double g_c = AdvancedMath.Gamma(c);
            //double rg_a = 1.0 / AdvancedMath.Gamma(a);
            double rg_b   = 1.0 / AdvancedMath.Gamma(b);
            double rg_cma = 1.0 / AdvancedMath.Gamma(c - a);
            //double rg_cmb = 1.0 / AdvancedMath.Gamma(c - b);

            // Pochhammer product, keeps track of (a)_k (c-b)_k (x')^{a + k}
            double p = Math.Pow(x1, a);

            double f0 = 0.0;

            if (m > 0)
            {
                f0 = p;

                double q = 1.0;
                for (int k = 1; k < m; k++)
                {
                    int km1 = k - 1;
                    p  *= (a + km1) * (c - b + km1) * x1;
                    q  *= (k - m - e) * k;
                    f0 += p / q;
                }

                f0 *= g_c * rg_b * rg_cma * AdvancedMath.Gamma(m + e);
                p  *= (a + (m - 1)) * (c - b + (m - 1)) * x1;
            }

            // Now compute the remaining terms with analytically canceled divergent parts.

            double t = rg_b * rg_cma * (NewG(1.0, -e) / AdvancedIntegerMath.Factorial(m) + NewG(m + 1, e)) -
                       1.0 / AdvancedMath.Gamma(1 + m + e) * (NewG(a + m, e) / AdvancedMath.Gamma(c - a - e) + NewG(c - a, -e) / AdvancedMath.Gamma(b)) -
                       MoreMath.ReducedExpMinusOne(Math.Log(x1), e) / AdvancedMath.Gamma(a + m) / AdvancedMath.Gamma(c - a - e) / AdvancedMath.Gamma(m + 1 + e);

            t *= p;

            double f1 = t;

            double u = p * rg_b * rg_cma / AdvancedMath.Gamma(1.0 - e) / AdvancedIntegerMath.Factorial(m);

            for (int k = 0; k < Global.SeriesMax; k++)
            {
                double f1_old = f1;

                int    k1   = k + 1;
                int    mk1  = m + k1;
                double amk  = a + m + k;
                double amke = amk + e;
                double cak  = c - a + k;
                double cake = cak - e;
                double k1e  = k1 - e;
                double mk1e = mk1 + e;

                double r = amk * cake / k1e / mk1;
                double s = amke * cak / mk1e / k1;

                // Compute (r - s) / e analytically because leading terms cancel
                double d = (amk * cake / mk1 - amk - cake - e + amke * cak / k1) / mk1e / k1e;

                t = (s * t + d * u) * x1;

                f1 += t;

                if (f1 == f1_old)
                {
                    f1 *= ReciprocalSincPi(e) * g_c;
                    if (m % 2 != 0)
                    {
                        f1 = -f1;
                    }
                    return(f0 + f1);
                }

                u *= r * x1;
            }

            throw new NonconvergenceException();
        }
        private static double Hypergeometric2F1_Series_OneMinusX(double a, double b, int m, double e, double x1)
        {
            Debug.Assert(m >= 0);
            Debug.Assert(Math.Abs(e) <= 0.5);
            Debug.Assert(Math.Abs(x1) <= 0.75);

            double c = a + b + m + e;

            // Compute all the gammas we will use.
            double g_c    = AdvancedMath.Gamma(c);
            double rg_am  = 1.0 / AdvancedMath.Gamma(a + m);
            double rg_bm  = 1.0 / AdvancedMath.Gamma(b + m);
            double rg_ame = 1.0 / AdvancedMath.Gamma(a + m + e);
            double rg_bme = 1.0 / AdvancedMath.Gamma(b + m + e);
            double rg_m1e = 1.0 / AdvancedMath.Gamma(m + 1 + e);

            // Pochhammer product, keeps track of (a)_m (b)_m (x')^m
            double p = 1.0;

            // First compute the finite sum, which contains no divergent terms even for e = 0.
            double f0 = 0.0;

            if (m > 0)
            {
                double t0 = 1.0;
                f0 = t0;
                for (int k = 1; k < m; k++)
                {
                    int km1 = k - 1;
                    p  *= (a + km1) * (b + km1) * x1;
                    t0 *= 1.0 / (1.0 - m - e + km1) / k;
                    f0 += t0 * p;
                }

                f0 *= g_c * rg_bme * rg_ame * AdvancedMath.Gamma(m + e);
                p  *= (a + (m - 1)) * (b + (m - 1)) * x1;
            }

            // Now compute the remaining terms with analytically canceled divergent parts.

            double t = rg_bme * rg_ame * (NewG(1.0, -e) / AdvancedIntegerMath.Factorial(m) + NewG(m + 1, e)) -
                       rg_m1e * (NewG(a + m, e) * rg_bme + NewG(b + m, e) * rg_am) -
                       MoreMath.ReducedExpMinusOne(Math.Log(x1), e) * rg_am * rg_bm * rg_m1e;


            t *= p;
            double f1 = t;
            double u  = p * rg_bme * rg_ame / AdvancedMath.Gamma(1.0 - e) / AdvancedIntegerMath.Factorial(m);

            for (int k = 0; k < Global.SeriesMax; k++)
            {
                double f1_old = f1;

                // Compute a bunch of sums we will use.
                int    k1   = k + 1;
                int    mk   = m + k;
                int    mk1  = mk + 1;
                double k1e  = k1 - e;
                double amk  = a + mk;
                double bmk  = b + mk;
                double amke = amk + e;
                double bmke = bmk + e;
                double mk1e = mk1 + e;

                // Compute the ratios of each term. These are close, but not equal for e != 0.
                double r = amk * bmk / mk1 / k1e;
                double s = amke * bmke / mk1e / k1;

                // Compute (r - s) / e, with O(1) terms of (r - s) analytically canceled.
                double d = (amk * bmk / mk1 - (amk + bmk + e) + amke * bmke / k1) / mk1e / k1e;

                // Advance to the next term, including the correction for s != t.
                t = (s * t + d * u) * x1;

                f1 += t;

                if (f1 == f1_old)
                {
                    f1 *= ReciprocalSincPi(e) * g_c;
                    if (m % 2 != 0)
                    {
                        f1 = -f1;
                    }
                    return(f0 + f1);
                }

                // Advance the u term, which we will need for the next iteration.
                u *= r * x1;
            }

            throw new NonconvergenceException();
        }