// The point of this method is to compute
        //   e G_{e}(x) = \frac{1}{\Gamma(x)} - \frac{1}{\Gamma(x + e)}
        // To do this, we will re-use machinery that we developed to accurately compute the Pochhammer symbol
        //   (x)_e = \frac{\Gamma(x + e)}{\Gamma(x)}
        // To do this, we use the reduced log Pochhammer function L_{e}(x).
        //   \ln((x)_e) = e L_{e}(x)
        // To see why we developed this function, see the Pochhamer code. The Lanczos apparatus allows us to compute
        // L_{e}(x) accurately, even in the small-e limit. To see how look at the Pochhammer code.

        // To connect G_{e}(x) to L_{e}(x), write
        //   e G_{e}(x) = \frac{(x)_e - 1}{\Gamma(x + e)}
        //              = \frac{\exp(\ln((x)_e)) - 1}{\Gamma(x + e)}
        //              = \frac{\exp(e L_{e}(x)) - 1}{\Gamma(x + e)}
        //     G_{e}(x) = \frac{E_{e}(L_{e}(x))}{\Gamma(x + e)}
        // where e E_{e}(x) = \exp(e x) - 1, which we also know how to compute accurately even in the small-e limit.

        // This deals with G_{e}(x) for positive x. But L_{e}(x) and \Gamma(x + e) still blow up for x or x + e
        // near a non-positive integer, and our Lanczos machinery for L_{e}(x) assumes positive x. To deal with
        // the left half-plane, use the reflection formula
        //   \Gamma(z) \Gamma(1 - z) = \frac{\pi}{\sin(\pi z)}
        // on both Gamma functions in the definition of G_{e}(x) to get
        //   e G_{e}(x) = \frac{\sin(\pi x)}{\pi} \Gamma(1 - x) - \frac{\sin(\pi x + \pi e)}{\pi} \Gamma(1 - x - e)
        // Use the angle addition formula on the second \sin and the definition of the Pochhammer symbol
        // to get all terms proportional to one Gamma function with a guaranteed positive argument.
        //   \frac{e G_{e}(x)}{\Gamma(1 - x - e)} =
        //     \frac{\sin(\pi x)}{\pi} \left[ (1 - x - e)_{e} - \cos(\pi e) \right] -  \frac{\cos(\pi x) \sin(\pi e)}{\pi}
        // We need the RHS ~e to for small e. That's manifestly true for the second term because of the factor \sin(\pi e).
        // It's true for the second term because (1 - x - e)_{e} and \cos(\pi e) are both 1 + O(e), but to avoid cancelation
        // we need to make it manifest. Write
        //   (y)_{e} = \exp(e L_{e}(y)) - 1 + 1 = e E_{e}(L_{e}(y)) + 1
        // and
        //   1 - \cos(\pi e) = 2 \sin^2(\half \pi /e)
        // Now we can divide through by e.
        //   \frac{G_{e}(x)}{\Gamma(y)} =
        //     \sin(\pi x) \left[ \frac{E_{e}(L_{e}(y))}{\pi} + \frac{\sin^2(\half \pi e)}{\half \pi e} \right] -
        //     \cos(\pi x) \frac{\sin(\pi e)}{\pi e}
        // and everything can be safely computed.

        // This is a different approach than the one in the Michel & Stoitsov paper. Their approach also used
        // the Lanczos evaluation of the Pochhammer symbol, but had some deficiencies.
        // For example, for e <~ 1.0E-15 and x near a negative integer, it gives totally wrong
        // answers, and the answers loose accuracy for even larger e. This is because the
        // computation relies on a ratio of h to Gamma, both of which blow up in this region.

        private static double NewG(double x, double e)
        {
            Debug.Assert(Math.Abs(e) <= 0.5);

            // It would be better to compute G outright from Lanczos, rather than via h. Can we do this?

            // Also, we should probably pick larger of 1 - x and 1 - x - e to use as argument of
            // Gamma function factor.

            double y = x + e;

            if ((x < 0.5) || (y < 0.5))
            {
                double h = MoreMath.ReducedExpMinusOne(Lanczos.ReducedLogPochhammer(1.0 - y, e), e);

                if (e == 0.0)
                {
                    double t = MoreMath.SinPi(x) * h / Math.PI - MoreMath.CosPi(x);
                    return(AdvancedMath.Gamma(1.0 - y) * t);
                }
                else
                {
                    double s  = MoreMath.SinPi(e) / (Math.PI * e);
                    double s2 = MoreMath.Sqr(MoreMath.SinPi(e / 2.0)) / (Math.PI * e / 2.0);
                    double t  = MoreMath.SinPi(x) * (h / Math.PI + s2) - MoreMath.CosPi(x) * s;
                    return(AdvancedMath.Gamma(1.0 - y) * t);
                }
            }

            return(MoreMath.ReducedExpMinusOne(Lanczos.ReducedLogPochhammer(x, e), e) / AdvancedMath.Gamma(x + e));
        }
Exemple #2
0
 private static double BesselI_Series(double nu, double x)
 {
     if (x == 0.0)
     {
         if (nu == 0.0)
         {
             return(1.0);
         }
         else
         {
             return(0.0);
         }
     }
     else
     {
         double dI = Math.Pow(x / 2.0, nu) / AdvancedMath.Gamma(nu + 1.0);
         double I  = dI;
         double xx = x * x / 4.0;
         for (int k = 1; k < Global.SeriesMax; k++)
         {
             double I_old = I;
             dI = dI * xx / (nu + k) / k;
             I += dI;
             if (I == I_old)
             {
                 return(I);
             }
         }
         throw new NonconvergenceException();
     }
 }
Exemple #3
0
        // problematic for positive x once exponential decay sets in; don't use for x > 2

        //private static readonly double AiryNorm1 = 1.0 / (Math.Pow(3.0, 2.0 / 3.0) * AdvancedMath.Gamma(2.0 / 3.0));

        //private static readonly double AiryNorm2 = 1.0 / (Math.Pow(3.0, 1.0 / 3.0) * AdvancedMath.Gamma(1.0 / 3.0));

        private static double AiryAi_Series(double x)
        {
            double AiryNorm1 = 1.0 / (Math.Pow(3.0, 2.0 / 3.0) * AdvancedMath.Gamma(2.0 / 3.0));
            double AiryNorm2 = 1.0 / (Math.Pow(3.0, 1.0 / 3.0) * AdvancedMath.Gamma(1.0 / 3.0));

            double p = AiryNorm1;
            double q = AiryNorm2 * x;
            double f = p - q;
            //double fp = -AiryNorm2;

            double x3 = x * x * x;

            for (int k = 0; k < Global.SeriesMax; k += 3)
            {
                double f_old = f;
                p *= x3 / ((k + 2) * (k + 3));
                q *= x3 / ((k + 3) * (k + 4));
                f += p - q;
                //fp += (k + 3) * p / x - (k + 4) * q / x;
                if (f == f_old)
                {
                    return(f);
                }
            }
            throw new NonconvergenceException();
        }
Exemple #4
0
        // series near the origin; this is entirely analogous to the Bessel series near the origin
        // it has a corresponding radius of rapid convergence, x < 4 + 2 Sqrt(nu)

        // This is exactly the same as BesselJ_Series with xx -> -xx.
        // We could even factor this out into a common method with an additional parameter.

        private static void ModifiedBesselI_Series(double nu, double x, out double I, out double IP)
        {
            Debug.Assert(x > 0.0);

            double x2 = x / 2.0;
            double xx = x2 * x2;
            double dI = Math.Pow(x2, nu) / AdvancedMath.Gamma(nu + 1.0);

            //double dI = AdvancedMath.PowOverGammaPlusOne(x2, nu);
            I  = dI;
            IP = nu * dI;
            for (int k = 1; k < Global.SeriesMax; k++)
            {
                double I_old  = I;
                double IP_old = IP;
                dI *= xx / k / (nu + k);
                I  += dI;
                IP += (nu + 2 * k) * dI;
                if ((I == I_old) && (IP == IP_old))
                {
                    IP = IP / x;
                    return;
                }
            }

            throw new NonconvergenceException();
        }
Exemple #5
0
 internal static double PowerOverFactorial(double x, double nu)
 {
     if (nu < 16.0)
     {
         return(Math.Pow(x, nu) / AdvancedMath.Gamma(nu + 1.0));
     }
     else
     {
         return(Stirling.PowerFactor(x, nu));
     }
 }
Exemple #6
0
        // This function computes x^{\nu} / \Gamma(\nu + 1), which can easily become Infinity/Infinity=NaN for large \nu if computed naively.

        internal static double PowOverGammaPlusOne(double x, double nu)
        {
            if (nu < 16.0)
            {
                return(Math.Pow(x, nu) / AdvancedMath.Gamma(nu + 1.0));
            }
            else
            {
                return(Stirling.PowOverGammaPlusOne(x, nu));
            }
        }
Exemple #7
0
        private static SolutionPair Airy_Series(double x)
        {
            // compute k = 0 terms in f' and g' series, and in f and g series
            // compute terms to get k = 0 terms in a' and b' and a and b series

            double g  = 1.0 / Math.Pow(3.0, 1.0 / 3.0) / AdvancedMath.Gamma(1.0 / 3.0);
            double ap = -g;
            double bp = g;

            double f = 1.0 / Math.Pow(3.0, 2.0 / 3.0) / AdvancedMath.Gamma(2.0 / 3.0);

            g *= x;
            double a = f - g;
            double b = f + g;

            // we will need to multiply by x^2 to produce higher terms, so remember it
            double x2 = x * x;

            for (int k = 1; k < Global.SeriesMax; k++)
            {
                // remember old values
                double a_old  = a;
                double b_old  = b;
                double ap_old = ap;
                double bp_old = bp;

                // compute 3k
                double tk = 3 * k;

                // kth term in f' and g' series, and corresponding a' and b' series
                f  *= x2 / (tk - 1);
                g  *= x2 / tk;
                ap += (f - g);
                bp += (f + g);

                // kth term in f and g series, and corresponding a and b series
                f *= x / tk;
                g *= x / (tk + 1);
                a += (f - g);
                b += (f + g);

                // check for convergence
                if ((a == a_old) && (b == b_old) && (ap == ap_old) && (bp == bp_old))
                {
                    return(new SolutionPair(
                               a, ap,
                               Global.SqrtThree * b, Global.SqrtThree * bp
                               ));
                }
            }

            throw new NonconvergenceException();
        }
        /// <summary>
        /// Computes the generalized exponential integral.
        /// </summary>
        /// <param name="n">The order parameter.</param>
        /// <param name="x">The argument, which must be non-negative.</param>
        /// <returns>The value of E<sub>n</sub>(x).</returns>
        /// <remarks>
        /// <para>The generalized exponential integral is defined as:</para>
        /// <img src="../images/EIntegral.png" />
        /// <para>It is related to the incomplete Gamma function (<see cref="Gamma(double, double)"/>)
        /// for negative, integer shape parameters.</para>
        /// <img src="../images/EnGammaRelation.png" />
        /// <para>For n=1, it expressible as a simple power series.</para>
        /// <img src="../images/E1Series.png" />
        /// <para>For negative x, E<sub>1</sub>(x) develops an imaginary part, but its real part is given by the Ei(x) function
        /// (<see cref="IntegralEi(double)"/>).</para>
        /// <img src="../images/E1EiRelation.png" />
        /// <para>To compute E<sub>1</sub>(z) in the entire complex plane, use <see cref="AdvancedComplexMath.Ein(Complex)"/>.</para>
        /// <para>Sometimes the function E<sub>1</sub>(z) is called the exponential integral, and sometimes that name is used
        /// for Ei(x). In hydrology, E<sub>1</sub>(x) is sometimes called the Well function.</para>
        /// </remarks>
        /// <exception cref="ArgumentOutOfRangeException"><paramref name="x"/> is negative.</exception>
        /// <seealso href="http://mathworld.wolfram.com/En-Function.html"/>
        public static double IntegralE(int n, double x)
        {
            if (x < 0.0)
            {
                throw new ArgumentOutOfRangeException(nameof(x));
            }

            // Special case x = 0.
            if (x == 0.0)
            {
                if (n <= 1)
                {
                    return(Double.PositiveInfinity);
                }
                else
                {
                    return(1.0 / (n - 1));
                }
            }

            // Special case n negative and zero.
            if (n < 0)
            {
                // negative n is expressible using incomplete Gamma
                return(AdvancedMath.Gamma(1 - n, x) / MoreMath.Pow(x, 1 - n));
            }
            else if (n == 0)
            {
                // special case n=0
                return(Math.Exp(-x) / x);
            }

            // Now we are sure x > 0 and n > 0.
            if (x < 2.0)
            {
                return(IntegralE_Series(n, x));
            }
            else if (x < expLimit)
            {
                // Since E_n(x) < e^{-x}, we can short-cut to zero if x is big enough.
                // This nicely avoids our continued fraction's bad behavior for infinite x.
                return(IntegralE_ContinuedFraction(n, x));
            }
            else if (x <= Double.PositiveInfinity)
            {
                return(0.0);
            }
            else
            {
                Debug.Assert(Double.IsNaN(x));
                return(x);
            }
        }
Exemple #9
0
        // series near the origin; this is entirely analogous to the Bessel series near the origin
        // it has a corresponding radius of rapid convergence, x < 4 + 2 Sqrt(nu)

        // This is exactly the same as BesselJ_Series with xx -> -xx.
        // We could even factor this out into a common method with an additional parameter.

        private static void ModifiedBesselI_Series(double nu, double x, out double I, out double IP)
        {
            if (x == 0.0)
            {
                if (nu == 0.0)
                {
                    I  = 1.0;
                    IP = 0.0;
                }
                else if (nu < 1.0)
                {
                    I  = 0.0;
                    IP = Double.PositiveInfinity;
                }
                else if (nu == 1.0)
                {
                    I  = 0.0;
                    IP = 0.5;
                }
                else
                {
                    I  = 0.0;
                    IP = 0.0;
                }
            }
            else
            {
                double x2 = x / 2.0;
                double xx = x2 * x2;
                double dI;
                if (nu < 128.0)
                {
                    dI = Math.Pow(x2, nu) / AdvancedMath.Gamma(nu + 1.0);
                }
                else
                {
                    dI = Math.Exp(nu * Math.Log(x2) - AdvancedMath.LogGamma(nu + 1.0));
                }
                I = dI; IP = nu * dI;
                for (int k = 1; k < Global.SeriesMax; k++)
                {
                    double I_old = I; double IP_old = IP;
                    dI *= xx / k / (nu + k);
                    I  += dI; IP += (nu + 2 * k) * dI;
                    if ((I == I_old) && (IP == IP_old))
                    {
                        IP = IP / x;
                        return;
                    }
                }
            }
        }
 /// <summary>
 /// Computes the factorial of an integer.
 /// </summary>
 /// <param name="n">The argument, which must be non-negative.</param>
 /// <returns>The value of n!.</returns>
 /// <remarks>
 /// <para>The factorial of an integer n is the product of all integers from 1 to n. For example, 4! = 4 * 3 * 2 * 1 = 24.</para>
 /// <para>n! also has a combinatorial intrepretation as the number of permutations of n objects. For example, a set of 3
 /// objects (abc) has 3! = 6 permutations: (abc), (bac), (cba), (acb), (cab), (bca).</para>
 /// <para>Because n! grows extremely quickly with increasing n, we return the result as a double, even though
 /// the value is always an integer. (13! would overlow an int, 21! would overflow a long, 171! overflows even a double.)</para>
 /// <para>In order to deal with factorials of larger numbers, you can use the <see cref="LogFactorial"/> method, which
 /// returns accurate values of ln(n!) even for values of n for which n! would overflow a double.</para>
 /// <para>The factorial is generalized to non-integer arguments by the &#x393; function (<see cref="AdvancedMath.Gamma(double)"/>).</para>
 /// </remarks>
 /// <exception cref="ArgumentOutOfRangeException"><paramref name="n"/> is negative.</exception>
 /// <seealso cref="LogFactorial"/>
 /// <seealso cref="AdvancedMath.Gamma(double)"/>
 /// <seealso href="http://en.wikipedia.org/wiki/Factorial"/>
 public static double Factorial(int n)
 {
     if (n < 0)
     {
         throw new ArgumentOutOfRangeException(nameof(n));
     }
     else if (n < factorialTable.Length)
     {
         return((double)factorialTable[n]);
     }
     else
     {
         return(Math.Round(AdvancedMath.Gamma(n + 1)));
     }
 }
Exemple #11
0
        private static double ModifiedBesselI_Series(double nu, double x)
        {
            double x2 = x / 2.0;
            double dI = Math.Pow(x2, nu) / AdvancedMath.Gamma(nu + 1.0);
            double I  = dI;
            double xx = x2 * x2;

            for (int k = 1; k < Global.SeriesMax; k++)
            {
                double I_old = I;
                dI = dI * xx / (nu + k) / k;
                I += dI;
                if (I == I_old)
                {
                    return(I);
                }
            }
            throw new NonconvergenceException();
        }
        private static double BerSeries(double nu, double x)
        {
            double c = Math.Cos(3.0 * nu * Math.PI / 4.0);
            double s = Math.Sin(3.0 * nu * Math.PI / 4.0);
            double xh = x / 2.0; double xh2 = xh * xh;

            double df    = Math.Pow(xh, nu) / AdvancedMath.Gamma(nu + 1.0);
            double f_old = 0.0;
            double f     = c * df;

            for (int k = 1; k < Global.SeriesMax; k++)
            {
                // we look two values back because for some values of nu (e.g. 0.0) only alternating
                // powers contribute; for these values we would always have f_old == f for non-contributing
                // powers and the series would terminate early
                double f_old_old = f_old; f_old = f;
                df *= xh2 / k / (nu + k);
                switch (k % 4)
                {
                case 0:
                    f += c * df;
                    break;

                case 1:
                    f -= s * df;
                    break;

                case 2:
                    f -= c * df;
                    break;

                case 3:
                    f += s * df;
                    break;
                }
                if (f == f_old_old)
                {
                    return(f);
                }
            }
            throw new NonconvergenceException();
        }
Exemple #13
0
        /// <summary>
        /// Computes the exponential integral.
        /// </summary>
        /// <param name="n">The order parameter.</param>
        /// <param name="x">The argument, which must be non-negative.</param>
        /// <returns>The value of E<sub>n</sub>(x).</returns>
        /// <remarks>
        /// <para>The exponential integral is defined as:</para>
        /// <img src="../images/EIntegral.png" />
        /// <para>It is related to the incomplete Gamma function for negative, integer shape parameters by &#x393;(-k, x) = Ei<sub>k+1</sub>(x) / x<sup>k</sup>.</para>
        /// <para>In hydrology, E<sub>1</sub>(x) is sometimes called the Well function.</para>
        /// </remarks>
        /// <exception cref="ArgumentOutOfRangeException"><paramref name="x"/> is negative.</exception>
        public static double IntegralE(int n, double x)
        {
            if (x < 0.0)
            {
                throw new ArgumentOutOfRangeException("x");
            }

            // special case x = 0
            if (x == 0.0)
            {
                if (n <= 1)
                {
                    return(Double.PositiveInfinity);
                }
                else
                {
                    return(1.0 / (n - 1));
                }
            }

            if (n < 0)
            {
                // negative n is expressible using incomplete Gamma
                return(AdvancedMath.Gamma(1 - n, x) / MoreMath.Pow(x, 1 - n));
            }
            else if (n == 0)
            {
                // special case n=0
                return(Math.Exp(-x) / x);
            }
            else if (x < 2.0)
            {
                // use series for x < 1
                return(IntegralE_Series(n, x));
            }
            else
            {
                // use continued fraction for x > 1
                return(IntegralE_ContinuedFraction(n, x));
            }
        }
Exemple #14
0
        private static double AiryBi_Series(double x)
        {
            double p = 1.0 / (Math.Pow(3.0, 1.0 / 6.0) * AdvancedMath.Gamma(2.0 / 3.0));
            double q = x * (Math.Pow(3.0, 1.0 / 6.0) / AdvancedMath.Gamma(1.0 / 3.0));
            double f = p + q;

            double x3 = x * x * x;

            for (int k = 0; k < Global.SeriesMax; k += 3)
            {
                double f_old = f;
                p *= x3 / ((k + 2) * (k + 3));
                q *= x3 / ((k + 3) * (k + 4));
                f += p + q;
                if (f == f_old)
                {
                    return(f);
                }
            }
            throw new NonconvergenceException();
        }
 /// <summary>
 /// Computes the Riemann zeta function.
 /// </summary>
 /// <param name="x">The argument.</param>
 /// <returns>The value &#x3B6;(s).</returns>
 /// <remarks>
 /// <para>The Riemann &#x3B6; function can be defined as the sum of the <paramref name="x"/>th inverse power of the natural numbers.</para>
 /// <img src="../images/ZetaSeries.png" />
 /// </remarks>
 /// <seealso href="http://en.wikipedia.org/wiki/Riemann_zeta_function"/>
 /// <seealso href="https://mathworld.wolfram.com/RiemannZetaFunction.html"/>
 /// <seealso href="https://dlmf.nist.gov/25"/>
 public static double RiemannZeta(double x)
 {
     if (x < 0.0)
     {
         // for negative numbers, use the reflection formula
         double t = 1.0 - x;
         return(2.0 * Math.Pow(2.0 * Math.PI, -t) * MoreMath.CosPi(0.5 * t) * AdvancedMath.Gamma(t) * RiemannZeta(t));
     }
     else
     {
         double xm1 = x - 1.0;
         if (Math.Abs(xm1) < 0.25)
         {
             // near the singularity, use the Stjielts expansion
             return(RiemannZeta_LaurentSeries(xm1));
         }
         else
         {
             // call Dirichlet function, which converges faster
             return(DirichletEta(x) / (1.0 - Math.Pow(2.0, 1.0 - x)));
         }
     }
 }
        // Our approach to evaluating the transformed series is taken from Michel & Stoitsov, "Fast computation of the
        // Gauss hypergeometric function with all its parameters complex with application to the Poschl-Teller-Ginocchio
        // potential wave functions" (https://arxiv.org/abs/0708.0116). Michel & Stoitsov had a great idea, but their
        // exposition leaves much to be desired, so I'll put in a lot of detail here.

        // The basic idea is an old one: use the linear transformation formulas (A&S 15.3.3-15.3.9) to map all x into
        // the region [0, 1/2]. The x -> (1-x) transformation, for example, looks like
        //   F(a, b, c, x) =
        //     \frac{\Gamma(c) \Gamma(c-a-b)}{\Gamma(c-a) \Gamma(c-b)} F(a, b, a+b-c+1, 1-x) +
        //     \frac{\Gamma(c) \Gamma(a+b-c)}{\Gamma(a) \Gamma(b)} F(c-a, c-b, c-a-b, 1-x) (1-x)^{c-a-b}

        // When c-a-b is close to an integer, though, there is a problem. Write c = a + b + m + e, where m is a positive integer
        // and |e| <= 1/2. The transformed expression becomes:
        //   \frac{F(a, b, c, x)}{\Gamma(c)} =
        //     \frac{\Gamma(m + e)}{\Gamma(b + m + e) \Gamma(a + m + e)} F(a, b, 1 - m - e, 1 - x) +
        //     \frac{\Gamma(-m - e)}{\Gamma(a) \Gamma(b)} F(b + m + e, a + m + e, 1 + m + e, 1 - x) (1-x)^{m + e}
        // In the first term, the F-function blows up as e->0 (or, if m=0, \Gamma(m+e) blows up), and in the second term
        // \Gamma(-m-e) blows up in that limit. By finding the divergent O(1/e) and the sub-leading O(1) terms, it's not too
        // hard to show that the divgences cancel, leaving a finite result, and to derive that finite result for e=0.
        // (A&S gives the result, and similiar ones for the divergent limits of other linear transformations.)
        // But we still have a problem for e small-but-not-zero. The pre-limit expressions will have large cancelations.
        // We can't ignore O(e) and higher terms, but developing a series in e in unworkable -- the higher derivatives
        // rapidly become complicated and unwieldy. No expressions in A&S get around this problem,  but we will now
        // develop an approach that does.

        // Notice the divergence of F(a, b, 1 - m - e, 1 - x) is at the mth term, where (1 - m - e)_{m} ~ e. Pull out
        // the finite sum up to the (m-1)th term
        //   \frac{F_0}{\Gamma(c)} = \frac{\Gamma(m+e)}{\Gamma(b + m + e) \Gamma(a + m + e)}
        //     \sum_{k=0}^{m-1} \frac{(a)_k (b)_k}{(1 - m - e)_{k}} \frac{(1-x)^k}{k!}
        // The remainder, which contains the divergences, is:
        //   \frac{F_1}{\Gamma(c)} =
        //     \frac{\Gamma(m + e)}{\Gamma(b + m + e) \Gamma(a + m + e)} \sum_{k=0}^{\infty} \frac{(1-x)^{m + k}}{\Gamma(1 + m + k)}
        //     \frac{\Gamma(a + m + k) \Gamma(b + m +  k) \Gamma(1 - m - e}{\Gamma(a) \Gamma(b) \Gamma(1 - e + k)} +
        //     \frac{\Gamma(-m - e)}{\Gamma(a) \Gamma(b)} \sum_{k=0}^{\infty} \frac{(1-x)^{m + e + k}}{\Gamma(1 + k)}
        //     \frac{\Gamma(b + m + e + k) \Gamma(a + m + e + k) \Gamma(1 + m + e)}{\Gamma(b + m + e) \Gamma(a + m + e) \Gamma(1 + m + e + k)}
        // where we have shifted k by m in the first sum. Use the \Gamma reflection formulae
        //   \Gamma(m + e) \Gamma(1 - m - e) = \frac{\pi}{\sin(\pi(m + e))} = \frac{(-1)^m \pi}{\sin(\pi e)}
        //   \Gamma(-m - e) \Gamma(1 + m + e) = \frac{\pi}(\sin(-\pi(m + e))} = -\frac{(-1)^m \pi}{\sin(\pi e)}
        // to make this
        //   \frac{F_1}{\Gamma(c)} =
        //     \frac{(-1)^m \pi}{\sin(\pi e)} \sum_{k=0}^{\infty} \frac{(1-x)^{m + k}}{\Gamma(a) \Gamma(b) \Gamma(a + m + e) \Gamma(b + m + e)}
        //     \left[ \frac{\Gamma(a + m + k) \Gamma(b + m + k)}{\Gamma(1 + k - e) \Gamma(1 + m + k)} -
        //            \frac{\Gamma(a + m + k + e) \Gamma(b + m + k + e)}{\Gamma(1 + k) \Gamma(1 + m + k + e)} (1-x)^e \right]
        // Notice that \frac{\pi}{\sin(\pi e)} diverges like ~1/e. And that the two terms in parenthesis contain exactly the
        // same products of \Gamma functions, execpt for having their arguments shifted by e. Therefore in the e->0
        // limit their leading terms must cancel, leaving terms ~e, which will cancel the ~1/e divergence, leaving a finite result.

        // We would like to acomplish this cancelation analytically. This isn't too hard to do for e=0. Just write out a Taylor
        // series for \Gamma(z + e), keeping only terms up to O(e). The O(1) terms cancel, the e in front of the O(e) terms gets
        // absorbed into a finite prefactor \frac{\pi e}{\sin(\pi e)}, and we have a finite result. A&S gives the resulting expression.
        // The trouble is for e small-but-not-zero. If we try to evaluate the terms directly, we get cancelations between large terms,
        // leading the catastrophic loss of precision. If we try to use Taylor expansion, we need all the higher derivatives,
        // not just the first one, and the expressions rapidly become so complex and unwieldy as to be unworkable.

        // A good solution, introduced by Forrey, and refined by Michel & Stoistov, is to use finite differences instead
        // of derivatives. If we can express the difference betwen \Gamma(z) and \Gamma(z + e) as a function of z and e that
        // we can compute, then we can analytically cancel the divergent parts and be left with a finite expression involving
        // our finite difference function instead of an infinite series of Taylor series terms. For e=0, the finite difference
        // is just the first derivative, but for non-zero e, it implicitly sums the contributions of all Taylor series terms.

        // The finite difference function to use is:
        //   e G_{e}(z) = \frac{1}{\Gamma(z)} - \frac{1}{\Gamma(z+e)}
        // I played around with a few others, e.g. the perhaps more obvious choice \frac{\Gamma(z+e)}{\Gamma(z)} = 1 + e P_{e}(z),
        // but the key advantage of G_{e}(z) is that it is perfectly finite even for non-positive-integer values of z and z+e,
        // because it uses the recriprocol \Gamma function. (I actually had a mostly-working algorithm using P_{e}(z), but it
        // broke down at non-positive-integer z, because P_{e}(z) itself still diverged for those values.)

        // For a discussion of how to actually compute G_{e}(z), refer to the method notes.

        // The next trick Michel & Stoistov use is to first concentrate just on the k=0 term. The relevent factor is
        //   t = \frac{1}{\Gamma(a) \Gamma(b) \Gamma(a + m + e) \Gamma(b + m + e)}
        //       \left[ \frac{\Gamma(a + m) \Gamma(b + m)}{\Gamma(1 - e) \Gamma(1 + m)} -
        //              \frac{\Gamma(a + m + e) \Gamma(b + m + e)}{\Gamma(1) \Gamma(1 + m + e)} (1-x)^e \right]
        //     = \frac{\Gamma(a + m) \Gamma(b + m)}{\Gamma(a) \Gamma(b)}
        //       \left[ \frac{1}{\Gamma(a + m + e) \Gamma(b + m + e) \Gamma(1 + m) \Gamma(1 - e)} -
        //              \frac{1}{\Gamma(a + m) \Gamma(b + m) \Gamma(1 + m + e) \Gamma(1)} (1-x)^e \right]
        // In the second step, we have put all the \Gamma functions we will need to compute for e-shifted arguments
        // in the denominator, which makes it easier to apply our definition of G_e(z), since there they are also
        // in the deonominator.

        // Before we begin using G_e(z), let's isolate the e-dependence of the (1-x)^e factor. Write
        //   (1-x)^e = \exp(e \ln(1-x) ) = 1 + [ \exp(e \ln(1-x)) - 1 ] = 1 + e E_e(\ln(1-x))
        // where e E_e(z) = \exp(e \ln(1-x)) - 1. We could continue like this, using G_(e) to
        // eliminate every \Gamma(z + e) in favor of G_e(z) and \Gamma(z), but by doing so we
        // would end up with terms containing two and more explicit powers of e, and products
        // of different G_e(z). That would be perfectly correct, but we end up with a nicer
        // expression if we instead "peel off" only one e-shifted function at a time, like this...
        //   \frac{1}{\Gamma(a + m + e) \Gamma(b + m + e) \Gamma(1 + m) \Gamma(1 - e)} =
        //     \frac{1}{\Gamma(a + m + e) \Gamma(b + m + e) \Gamma(1 + m) \Gamma(1)} +
        //     \frac{e G_{-e}(1)}{\Gamma(a + m + e) \Gamma(b + m + e) \Gamma(1 + m)}
        //   \frac{1}{\Gamma(a + m) \Gamma(b + m) \Gamma(1 + m + e)} =
        //     \frac{1}{\Gamma(a + m + e) \Gamma(b + m) \Gamma(1 + m + e)} +
        //     \frac{e G_e(a + m)}{\Gamma(b + m) \Gamma(1 + m + e)}
        //   \frac{1}{\Gamma(a + m + e) \Gamma(b + m) \Gamma(1 + m + e)} =
        //     \frac{1}{\Gamma(a + m + e) \Gamma(b + m + e) \Gamma(1 + m + e)} +
        //     \frac{e G_e(b + m)}{Gamma(a + m + e) \Gamma(1 + m + e)}
        //   \frac{1}{\Gamma(a + m + e) \Gamma(b + m + e) \Gamma(1 + m + e)} =
        //     \frac{1}{\Gamma(a + m + e) \Gamma(b + m + e) \Gamma(1 + m)} -
        //     \frac{e G_e(1 + m)}{\Gamma(a + m + e) \Gamma(b + m + e)}
        // Putting this all together, we have
        //   t = \frac{1}{\Gamma(a + m + e) \Gamma(b + m + e)} \left[ \frac{e G_{-e}(1)}{\Gamma(1 + m) + e G_e(1 + m) \right]
        //     - \frac{1}{\Gamma(1 + m + e)} \left[ \frac{e G_e(a + m)}{\Gamma(b + m)} + \frac{e G_e(b+m)}{\Gamma(a + m + e)} \right]
        //     - \frac{1}{\Gamma(a + m + e) \Gamma(b + m + e) \Gamma(1 + m + e)} e E_e(\ln(1-x))
        // which is, as promised, proportional to e. For some a, b, m, and e, some of these \Gamma functions will blow up, but they are
        // all in the denoninator, so that will just zero some terms. The G_e(z) that appear in the numerator are finite for all z.

        // So now we have the k=0 term. What about higer k terms? We could repeat this analysis, carrying along the k's,
        // and get an expression involving G_e(z) and E_e(z) for each k. Michel & Stoistov's last trick is to realize
        // we don't have to do this, but can instead use our original expressions for each term as a ratio of \Gamma
        // functions to derive a recurrence. Let u_k be the first term, v_k be the second term, so t_k = u_k + v_k.
        // Let r_k = u_{k+1} / u_{k} and s_k = v_{k+1} / v_{k}. It's easy to write down r_k and s_k because they
        // follow immediately from the \Gamma function recurrence.
        //    r_{k} = \frac{(a + m + k)(b + m + k)}{(1 + k - e)(1 + m + k)}
        //    s_{k} = \frac{(a + m + k + e)(b + m + k + e)}{(1 + k)(1 + m + k + e)}
        // Notice that r_k and s_k are almost equal, but not quite: they differ by O(e). To advance t_k, use
        //    t_{k+1} = u_{k+1} + v_{k+1} = r_k u_k + s_k v_k = s_k (u_k + v_k) + (r_k - s_k) u_k
        //            = s_k t_k + d_k * u_k
        // where d_k = r_k - s_k, which will be O(e), since r_k and s_k only differ by e-shifted arguments.

        // In the x -> (1-x), x -> 1/x, x -> x / (1-x), and x -> 1 - 1/x linear transformations, canceling divergences
        // appear when some arguments of the transformed functions are non-positive-integers.

        private static double Hypergeometric2F1_Series_OneOverOneMinusX(double a, int m, double e, double c, double x1)
        {
            Debug.Assert(m >= 0);
            Debug.Assert(Math.Abs(e) <= 0.5);
            Debug.Assert(Math.Abs(x1) <= 0.75);

            double b = a + m + e;

            double g_c = AdvancedMath.Gamma(c);
            //double rg_a = 1.0 / AdvancedMath.Gamma(a);
            double rg_b   = 1.0 / AdvancedMath.Gamma(b);
            double rg_cma = 1.0 / AdvancedMath.Gamma(c - a);
            //double rg_cmb = 1.0 / AdvancedMath.Gamma(c - b);

            // Pochhammer product, keeps track of (a)_k (c-b)_k (x')^{a + k}
            double p = Math.Pow(x1, a);

            double f0 = 0.0;

            if (m > 0)
            {
                f0 = p;

                double q = 1.0;
                for (int k = 1; k < m; k++)
                {
                    int km1 = k - 1;
                    p  *= (a + km1) * (c - b + km1) * x1;
                    q  *= (k - m - e) * k;
                    f0 += p / q;
                }

                f0 *= g_c * rg_b * rg_cma * AdvancedMath.Gamma(m + e);
                p  *= (a + (m - 1)) * (c - b + (m - 1)) * x1;
            }

            // Now compute the remaining terms with analytically canceled divergent parts.

            double t = rg_b * rg_cma * (NewG(1.0, -e) / AdvancedIntegerMath.Factorial(m) + NewG(m + 1, e)) -
                       1.0 / AdvancedMath.Gamma(1 + m + e) * (NewG(a + m, e) / AdvancedMath.Gamma(c - a - e) + NewG(c - a, -e) / AdvancedMath.Gamma(b)) -
                       MoreMath.ReducedExpMinusOne(Math.Log(x1), e) / AdvancedMath.Gamma(a + m) / AdvancedMath.Gamma(c - a - e) / AdvancedMath.Gamma(m + 1 + e);

            t *= p;

            double f1 = t;

            double u = p * rg_b * rg_cma / AdvancedMath.Gamma(1.0 - e) / AdvancedIntegerMath.Factorial(m);

            for (int k = 0; k < Global.SeriesMax; k++)
            {
                double f1_old = f1;

                int    k1   = k + 1;
                int    mk1  = m + k1;
                double amk  = a + m + k;
                double amke = amk + e;
                double cak  = c - a + k;
                double cake = cak - e;
                double k1e  = k1 - e;
                double mk1e = mk1 + e;

                double r = amk * cake / k1e / mk1;
                double s = amke * cak / mk1e / k1;

                // Compute (r - s) / e analytically because leading terms cancel
                double d = (amk * cake / mk1 - amk - cake - e + amke * cak / k1) / mk1e / k1e;

                t = (s * t + d * u) * x1;

                f1 += t;

                if (f1 == f1_old)
                {
                    f1 *= ReciprocalSincPi(e) * g_c;
                    if (m % 2 != 0)
                    {
                        f1 = -f1;
                    }
                    return(f0 + f1);
                }

                u *= r * x1;
            }

            throw new NonconvergenceException();
        }
        private static double Hypergeometric2F1_Series_OneMinusX(double a, double b, int m, double e, double x1)
        {
            Debug.Assert(m >= 0);
            Debug.Assert(Math.Abs(e) <= 0.5);
            Debug.Assert(Math.Abs(x1) <= 0.75);

            double c = a + b + m + e;

            // Compute all the gammas we will use.
            double g_c    = AdvancedMath.Gamma(c);
            double rg_am  = 1.0 / AdvancedMath.Gamma(a + m);
            double rg_bm  = 1.0 / AdvancedMath.Gamma(b + m);
            double rg_ame = 1.0 / AdvancedMath.Gamma(a + m + e);
            double rg_bme = 1.0 / AdvancedMath.Gamma(b + m + e);
            double rg_m1e = 1.0 / AdvancedMath.Gamma(m + 1 + e);

            // Pochhammer product, keeps track of (a)_m (b)_m (x')^m
            double p = 1.0;

            // First compute the finite sum, which contains no divergent terms even for e = 0.
            double f0 = 0.0;

            if (m > 0)
            {
                double t0 = 1.0;
                f0 = t0;
                for (int k = 1; k < m; k++)
                {
                    int km1 = k - 1;
                    p  *= (a + km1) * (b + km1) * x1;
                    t0 *= 1.0 / (1.0 - m - e + km1) / k;
                    f0 += t0 * p;
                }

                f0 *= g_c * rg_bme * rg_ame * AdvancedMath.Gamma(m + e);
                p  *= (a + (m - 1)) * (b + (m - 1)) * x1;
            }

            // Now compute the remaining terms with analytically canceled divergent parts.

            double t = rg_bme * rg_ame * (NewG(1.0, -e) / AdvancedIntegerMath.Factorial(m) + NewG(m + 1, e)) -
                       rg_m1e * (NewG(a + m, e) * rg_bme + NewG(b + m, e) * rg_am) -
                       MoreMath.ReducedExpMinusOne(Math.Log(x1), e) * rg_am * rg_bm * rg_m1e;


            t *= p;
            double f1 = t;
            double u  = p * rg_bme * rg_ame / AdvancedMath.Gamma(1.0 - e) / AdvancedIntegerMath.Factorial(m);

            for (int k = 0; k < Global.SeriesMax; k++)
            {
                double f1_old = f1;

                // Compute a bunch of sums we will use.
                int    k1   = k + 1;
                int    mk   = m + k;
                int    mk1  = mk + 1;
                double k1e  = k1 - e;
                double amk  = a + mk;
                double bmk  = b + mk;
                double amke = amk + e;
                double bmke = bmk + e;
                double mk1e = mk1 + e;

                // Compute the ratios of each term. These are close, but not equal for e != 0.
                double r = amk * bmk / mk1 / k1e;
                double s = amke * bmke / mk1e / k1;

                // Compute (r - s) / e, with O(1) terms of (r - s) analytically canceled.
                double d = (amk * bmk / mk1 - (amk + bmk + e) + amke * bmke / k1) / mk1e / k1e;

                // Advance to the next term, including the correction for s != t.
                t = (s * t + d * u) * x1;

                f1 += t;

                if (f1 == f1_old)
                {
                    f1 *= ReciprocalSincPi(e) * g_c;
                    if (m % 2 != 0)
                    {
                        f1 = -f1;
                    }
                    return(f0 + f1);
                }

                // Advance the u term, which we will need for the next iteration.
                u *= r * x1;
            }

            throw new NonconvergenceException();
        }
Exemple #18
0
 /// <summary>
 /// Compute the Riemann zeta function.
 /// </summary>
 /// <param name="s">The argument.</param>
 /// <returns>The value &#x3B6;(s).</returns>
 /// <remarks>
 /// <para>The Riemann &#x3B6; function can be defined as the sum of the <paramref name="s"/>th inverse power of the natural numbers.</para>
 /// <img src="../images/ZetaSeries.png" />
 /// </remarks>
 /// <seealso href="http://en.wikipedia.org/wiki/Riemann_zeta_function"/>
 public static double RiemannZeta(double s)
 {
     if (s < 0.0)
     {
         // for negative numbers, use the reflection formula
         double t = 1.0 - s;
         double z = 2.0 * Math.Pow(Global.TwoPI, -t) * Math.Cos(Global.HalfPI * t) * AdvancedMath.Gamma(t) * RiemannZeta(t);
         return(z);
     }
     else
     {
         if (Math.Abs(s - 1.0) < 0.25)
         {
             // near the sigularity, use the Stjielts expansion
             return(RiemannZeta_Series(s - 1.0));
         }
         else
         {
             // call Dirichlet function, which converges faster
             return(DirichletEta(s) / (1.0 - Math.Pow(2.0, 1.0 - s)));
         }
     }
 }