// The expansions in which they appear are asymptotic; the numbers grow rapidly after ~B_16 /// <summary> /// Computes a Stirling number of the first kind. /// </summary> /// <param name="n">The upper argument, which must be non-negative.</param> /// <param name="k">The lower argument, which must lie between 0 and n.</param> /// <returns>The value of the unsigned Stirling number of the first kind.</returns> /// <exception cref="ArgumentOutOfRangeException"><paramref name="n"/> is negative, or <paramref name="k"/> /// lies outside [0, n].</exception> /// <seealso href="https://en.wikipedia.org/wiki/Stirling_numbers_of_the_first_kind"/> public static double StirlingNumber1(int n, int k) { if (n < 0) { throw new ArgumentOutOfRangeException(nameof(n)); } if ((k < 0) || (k > n)) { throw new ArgumentOutOfRangeException(nameof(k)); } if (k == n) { return(1.0); } else if (k == 0) { return(0.0); } else if (k == 1) { return(AdvancedIntegerMath.Factorial(n - 1)); } else if (k == (n - 1)) { return(AdvancedIntegerMath.BinomialCoefficient(n, 2)); } else { double[] s = Stirling1_Recursive(n, k); return(s[k]); } }
// This function computes x^n / n! or x^{\nu} / \Gamma(\nu + 1), which can easily become // Infinity/Infinity=NaN for large n if computed naively. internal static double PowerOverFactorial(double x, int n) { if (n <= 16) { // For maximum range, we should evaluate this using Lanczos, but // since we know we don't call it for x large enough for x^n to overflow, // this is safer and faster. return(MoreMath.Pow(x, n) / AdvancedIntegerMath.Factorial(n)); } else { return(Stirling.PowerFactor(x, n)); } }
private static double PolyLog_BernoulliSum(int n, double w) { double s = 0.0; for (int k = n; k > 1; k--) { double ds = (1.0 - MoreMath.Pow(2.0, 1 - k)) * Math.Abs(AdvancedIntegerMath.BernoulliNumber(k)) * MoreMath.Pow(Global.TwoPI, k) / AdvancedIntegerMath.Factorial(k) * MoreMath.Pow(w, n - k) / AdvancedIntegerMath.Factorial(n - k); s -= ds; } s -= MoreMath.Pow(w, n) / AdvancedIntegerMath.Factorial(n); return(s); }
// Legendre polynomials normalized for their use in the spherical harmonics /// <summary> /// Computes the value of an associated Legendre polynomial. /// </summary> /// <param name="l">The order, which must be non-negative.</param> /// <param name="m">The associated order, which must lie between -l and l inclusive.</param> /// <param name="x">The argument, which must lie on the closed interval between -1 and +1.</param> /// <returns>The value of P<sub>l,m</sub>(x).</returns> /// <remarks> /// <para>Associated Legendre polynomials appear in the definition of the <see cref="AdvancedMath.SphericalHarmonic"/> functions.</para> /// <para>For values of l and m over about 150, values of this polynomial can exceed the capacity of double-wide floating point numbers.</para> /// </remarks> /// <seealso href="http://en.wikipedia.org/wiki/Associated_Legendre_polynomials"/> public static double LegendreP(int l, int m, double x) { if (l < 0) { throw new ArgumentOutOfRangeException(nameof(l)); } //if (l < 0) { // return (LegendreP(-l + 1, m, x)); //} if (Math.Abs(m) > l) { throw new ArgumentOutOfRangeException(nameof(m)); } double f; if (l < 10) { // for low enough orders, we can can get the factorial quickly from a table look-up and without danger of overflow f = Math.Sqrt(AdvancedIntegerMath.Factorial(l + m) / AdvancedIntegerMath.Factorial(l - m)); } else { // for higher orders, we must move into log space to avoid overflow f = Math.Exp((AdvancedIntegerMath.LogFactorial(l + m) - AdvancedIntegerMath.LogFactorial(l - m)) / 2.0); } if (m < 0) { m = -m; if (m % 2 != 0) { f = -f; } } if (Math.Abs(x) > 1.0) { throw new ArgumentOutOfRangeException(nameof(x)); } return(f * LegendrePe(l, m, x)); }
/// <summary> /// Computes the polygamma function. /// </summary> /// <param name="n">The order, which must be non-negative.</param> /// <param name="x">The argument.</param> /// <returns>The value of ψ<sub>n</sub>(x).</returns> /// <remarks> /// <para>The polygamma function gives higher logarithmic derivatives of the Gamma function.</para> /// </remarks> /// <exception cref="ArgumentOutOfRangeException"><paramref name="n"/> is negative.</exception> /// <seealso href="http://en.wikipedia.org/wiki/Polygamma_function"/> /// <seealso href="http://mathworld.wolfram.com/PolygammaFunction.html"/> public static double Psi(int n, double x) { if (n < 0) { throw new ArgumentOutOfRangeException("n"); } // for n=0, use normal digamma algorithm if (n == 0) { return(Psi(x)); } // for small x, use the reflection formula if (x <= 0.0) { if (x == Math.Ceiling(x)) { return(Double.NaN); } else { // use the reflection formula // this requires that we compute the nth derivative of cot(pi x) // i was able to do this, but my algorithm is O(n^2), so for large n and not-so-negative x this is probably sub-optimal double y = Math.PI * x; double d = -EvaluateCotDerivative(ComputeCotDerivative(n), y) * MoreMath.Pow(Math.PI, n + 1); if (n % 2 == 0) { return(d + Psi(n, 1.0 - x)); } else { return(d - Psi(n, 1.0 - x)); } } } // compute the minimum x required for our asymptotic series to converge // my original approximation was to say that, for 1 << k << n, the factorials approach e^(2k), // so to achieve convergence to 10^(-16) at the 8'th term we should need x ~ 10 e ~ 27 // unfortunately this estimate is both crude and an underestimate; for now i use an emperical relationship instead double xm = 16.0 + 2.0 * n; // by repeatedly using \psi(n,z) = \psi(n,z+1) - (-1)^n n! / z^(n+1), // increase x until it is large enough to use the asymptotic series // keep track of the accumulated shifts in the variable s double s = 0.0; while (x < xm) { s += 1.0 / MoreMath.Pow(x, n + 1); x += 1.0; } // now that x is big enough, use the asymptotic series // \psi(n,z) = - (-1)^n (n-1)! / z^n [ 1 + n / 2 z + \sum_{k=1}^{\infty} (2k+n-1)! / (2k)! / (n-1)! * B_{2k} / z^{2k} ] double t = 1.0 + n / (2.0 * x); double x2 = x * x; double t1 = n * (n + 1) / 2.0 / x2; for (int i = 1; i < AdvancedIntegerMath.Bernoulli.Length; i++) { double t_old = t; t += AdvancedIntegerMath.Bernoulli[i] * t1; if (t == t_old) { double g = AdvancedIntegerMath.Factorial(n - 1) * (t / MoreMath.Pow(x, n) + n * s); if (n % 2 == 0) { g = -g; } return(g); } int i2 = 2 * i; t1 *= 1.0 * (n + i2) * (n + i2 + 1) / (i2 + 2) / (i2 + 1) / x2; } throw new NonconvergenceException(); // for small x, the s-part strongly dominates the t-part, so it isn't actually necessary for us to determine t // very accurately; in the future, we should modify this code to allow the t-series to converge when its overall // contribution no longer matters, rather than requiring t to converge to full precision }
private static double Hypergeometric2F1_Series_OneMinusX(double a, double b, int m, double e, double x1) { Debug.Assert(m >= 0); Debug.Assert(Math.Abs(e) <= 0.5); Debug.Assert(Math.Abs(x1) <= 0.75); double c = a + b + m + e; // Compute all the gammas we will use. double g_c = AdvancedMath.Gamma(c); double rg_am = 1.0 / AdvancedMath.Gamma(a + m); double rg_bm = 1.0 / AdvancedMath.Gamma(b + m); double rg_ame = 1.0 / AdvancedMath.Gamma(a + m + e); double rg_bme = 1.0 / AdvancedMath.Gamma(b + m + e); double rg_m1e = 1.0 / AdvancedMath.Gamma(m + 1 + e); // Pochhammer product, keeps track of (a)_m (b)_m (x')^m double p = 1.0; // First compute the finite sum, which contains no divergent terms even for e = 0. double f0 = 0.0; if (m > 0) { double t0 = 1.0; f0 = t0; for (int k = 1; k < m; k++) { int km1 = k - 1; p *= (a + km1) * (b + km1) * x1; t0 *= 1.0 / (1.0 - m - e + km1) / k; f0 += t0 * p; } f0 *= g_c * rg_bme * rg_ame * AdvancedMath.Gamma(m + e); p *= (a + (m - 1)) * (b + (m - 1)) * x1; } // Now compute the remaining terms with analytically canceled divergent parts. double t = rg_bme * rg_ame * (NewG(1.0, -e) / AdvancedIntegerMath.Factorial(m) + NewG(m + 1, e)) - rg_m1e * (NewG(a + m, e) * rg_bme + NewG(b + m, e) * rg_am) - MoreMath.ReducedExpMinusOne(Math.Log(x1), e) * rg_am * rg_bm * rg_m1e; t *= p; double f1 = t; double u = p * rg_bme * rg_ame / AdvancedMath.Gamma(1.0 - e) / AdvancedIntegerMath.Factorial(m); for (int k = 0; k < Global.SeriesMax; k++) { double f1_old = f1; // Compute a bunch of sums we will use. int k1 = k + 1; int mk = m + k; int mk1 = mk + 1; double k1e = k1 - e; double amk = a + mk; double bmk = b + mk; double amke = amk + e; double bmke = bmk + e; double mk1e = mk1 + e; // Compute the ratios of each term. These are close, but not equal for e != 0. double r = amk * bmk / mk1 / k1e; double s = amke * bmke / mk1e / k1; // Compute (r - s) / e, with O(1) terms of (r - s) analytically canceled. double d = (amk * bmk / mk1 - (amk + bmk + e) + amke * bmke / k1) / mk1e / k1e; // Advance to the next term, including the correction for s != t. t = (s * t + d * u) * x1; f1 += t; if (f1 == f1_old) { f1 *= ReciprocalSincPi(e) * g_c; if (m % 2 != 0) { f1 = -f1; } return(f0 + f1); } // Advance the u term, which we will need for the next iteration. u *= r * x1; } throw new NonconvergenceException(); }
// Our approach to evaluating the transformed series is taken from Michel & Stoitsov, "Fast computation of the // Gauss hypergeometric function with all its parameters complex with application to the Poschl-Teller-Ginocchio // potential wave functions" (https://arxiv.org/abs/0708.0116). Michel & Stoitsov had a great idea, but their // exposition leaves much to be desired, so I'll put in a lot of detail here. // The basic idea is an old one: use the linear transformation formulas (A&S 15.3.3-15.3.9) to map all x into // the region [0, 1/2]. The x -> (1-x) transformation, for example, looks like // F(a, b, c, x) = // \frac{\Gamma(c) \Gamma(c-a-b)}{\Gamma(c-a) \Gamma(c-b)} F(a, b, a+b-c+1, 1-x) + // \frac{\Gamma(c) \Gamma(a+b-c)}{\Gamma(a) \Gamma(b)} F(c-a, c-b, c-a-b, 1-x) (1-x)^{c-a-b} // When c-a-b is close to an integer, though, there is a problem. Write c = a + b + m + e, where m is a positive integer // and |e| <= 1/2. The transformed expression becomes: // \frac{F(a, b, c, x)}{\Gamma(c)} = // \frac{\Gamma(m + e)}{\Gamma(b + m + e) \Gamma(a + m + e)} F(a, b, 1 - m - e, 1 - x) + // \frac{\Gamma(-m - e)}{\Gamma(a) \Gamma(b)} F(b + m + e, a + m + e, 1 + m + e, 1 - x) (1-x)^{m + e} // In the first term, the F-function blows up as e->0 (or, if m=0, \Gamma(m+e) blows up), and in the second term // \Gamma(-m-e) blows up in that limit. By finding the divergent O(1/e) and the sub-leading O(1) terms, it's not too // hard to show that the divgences cancel, leaving a finite result, and to derive that finite result for e=0. // (A&S gives the result, and similiar ones for the divergent limits of other linear transformations.) // But we still have a problem for e small-but-not-zero. The pre-limit expressions will have large cancelations. // We can't ignore O(e) and higher terms, but developing a series in e in unworkable -- the higher derivatives // rapidly become complicated and unwieldy. No expressions in A&S get around this problem, but we will now // develop an approach that does. // Notice the divergence of F(a, b, 1 - m - e, 1 - x) is at the mth term, where (1 - m - e)_{m} ~ e. Pull out // the finite sum up to the (m-1)th term // \frac{F_0}{\Gamma(c)} = \frac{\Gamma(m+e)}{\Gamma(b + m + e) \Gamma(a + m + e)} // \sum_{k=0}^{m-1} \frac{(a)_k (b)_k}{(1 - m - e)_{k}} \frac{(1-x)^k}{k!} // The remainder, which contains the divergences, is: // \frac{F_1}{\Gamma(c)} = // \frac{\Gamma(m + e)}{\Gamma(b + m + e) \Gamma(a + m + e)} \sum_{k=0}^{\infty} \frac{(1-x)^{m + k}}{\Gamma(1 + m + k)} // \frac{\Gamma(a + m + k) \Gamma(b + m + k) \Gamma(1 - m - e}{\Gamma(a) \Gamma(b) \Gamma(1 - e + k)} + // \frac{\Gamma(-m - e)}{\Gamma(a) \Gamma(b)} \sum_{k=0}^{\infty} \frac{(1-x)^{m + e + k}}{\Gamma(1 + k)} // \frac{\Gamma(b + m + e + k) \Gamma(a + m + e + k) \Gamma(1 + m + e)}{\Gamma(b + m + e) \Gamma(a + m + e) \Gamma(1 + m + e + k)} // where we have shifted k by m in the first sum. Use the \Gamma reflection formulae // \Gamma(m + e) \Gamma(1 - m - e) = \frac{\pi}{\sin(\pi(m + e))} = \frac{(-1)^m \pi}{\sin(\pi e)} // \Gamma(-m - e) \Gamma(1 + m + e) = \frac{\pi}(\sin(-\pi(m + e))} = -\frac{(-1)^m \pi}{\sin(\pi e)} // to make this // \frac{F_1}{\Gamma(c)} = // \frac{(-1)^m \pi}{\sin(\pi e)} \sum_{k=0}^{\infty} \frac{(1-x)^{m + k}}{\Gamma(a) \Gamma(b) \Gamma(a + m + e) \Gamma(b + m + e)} // \left[ \frac{\Gamma(a + m + k) \Gamma(b + m + k)}{\Gamma(1 + k - e) \Gamma(1 + m + k)} - // \frac{\Gamma(a + m + k + e) \Gamma(b + m + k + e)}{\Gamma(1 + k) \Gamma(1 + m + k + e)} (1-x)^e \right] // Notice that \frac{\pi}{\sin(\pi e)} diverges like ~1/e. And that the two terms in parenthesis contain exactly the // same products of \Gamma functions, execpt for having their arguments shifted by e. Therefore in the e->0 // limit their leading terms must cancel, leaving terms ~e, which will cancel the ~1/e divergence, leaving a finite result. // We would like to acomplish this cancelation analytically. This isn't too hard to do for e=0. Just write out a Taylor // series for \Gamma(z + e), keeping only terms up to O(e). The O(1) terms cancel, the e in front of the O(e) terms gets // absorbed into a finite prefactor \frac{\pi e}{\sin(\pi e)}, and we have a finite result. A&S gives the resulting expression. // The trouble is for e small-but-not-zero. If we try to evaluate the terms directly, we get cancelations between large terms, // leading the catastrophic loss of precision. If we try to use Taylor expansion, we need all the higher derivatives, // not just the first one, and the expressions rapidly become so complex and unwieldy as to be unworkable. // A good solution, introduced by Forrey, and refined by Michel & Stoistov, is to use finite differences instead // of derivatives. If we can express the difference betwen \Gamma(z) and \Gamma(z + e) as a function of z and e that // we can compute, then we can analytically cancel the divergent parts and be left with a finite expression involving // our finite difference function instead of an infinite series of Taylor series terms. For e=0, the finite difference // is just the first derivative, but for non-zero e, it implicitly sums the contributions of all Taylor series terms. // The finite difference function to use is: // e G_{e}(z) = \frac{1}{\Gamma(z)} - \frac{1}{\Gamma(z+e)} // I played around with a few others, e.g. the perhaps more obvious choice \frac{\Gamma(z+e)}{\Gamma(z)} = 1 + e P_{e}(z), // but the key advantage of G_{e}(z) is that it is perfectly finite even for non-positive-integer values of z and z+e, // because it uses the recriprocol \Gamma function. (I actually had a mostly-working algorithm using P_{e}(z), but it // broke down at non-positive-integer z, because P_{e}(z) itself still diverged for those values.) // For a discussion of how to actually compute G_{e}(z), refer to the method notes. // The next trick Michel & Stoistov use is to first concentrate just on the k=0 term. The relevent factor is // t = \frac{1}{\Gamma(a) \Gamma(b) \Gamma(a + m + e) \Gamma(b + m + e)} // \left[ \frac{\Gamma(a + m) \Gamma(b + m)}{\Gamma(1 - e) \Gamma(1 + m)} - // \frac{\Gamma(a + m + e) \Gamma(b + m + e)}{\Gamma(1) \Gamma(1 + m + e)} (1-x)^e \right] // = \frac{\Gamma(a + m) \Gamma(b + m)}{\Gamma(a) \Gamma(b)} // \left[ \frac{1}{\Gamma(a + m + e) \Gamma(b + m + e) \Gamma(1 + m) \Gamma(1 - e)} - // \frac{1}{\Gamma(a + m) \Gamma(b + m) \Gamma(1 + m + e) \Gamma(1)} (1-x)^e \right] // In the second step, we have put all the \Gamma functions we will need to compute for e-shifted arguments // in the denominator, which makes it easier to apply our definition of G_e(z), since there they are also // in the deonominator. // Before we begin using G_e(z), let's isolate the e-dependence of the (1-x)^e factor. Write // (1-x)^e = \exp(e \ln(1-x) ) = 1 + [ \exp(e \ln(1-x)) - 1 ] = 1 + e E_e(\ln(1-x)) // where e E_e(z) = \exp(e \ln(1-x)) - 1. We could continue like this, using G_(e) to // eliminate every \Gamma(z + e) in favor of G_e(z) and \Gamma(z), but by doing so we // would end up with terms containing two and more explicit powers of e, and products // of different G_e(z). That would be perfectly correct, but we end up with a nicer // expression if we instead "peel off" only one e-shifted function at a time, like this... // \frac{1}{\Gamma(a + m + e) \Gamma(b + m + e) \Gamma(1 + m) \Gamma(1 - e)} = // \frac{1}{\Gamma(a + m + e) \Gamma(b + m + e) \Gamma(1 + m) \Gamma(1)} + // \frac{e G_{-e}(1)}{\Gamma(a + m + e) \Gamma(b + m + e) \Gamma(1 + m)} // \frac{1}{\Gamma(a + m) \Gamma(b + m) \Gamma(1 + m + e)} = // \frac{1}{\Gamma(a + m + e) \Gamma(b + m) \Gamma(1 + m + e)} + // \frac{e G_e(a + m)}{\Gamma(b + m) \Gamma(1 + m + e)} // \frac{1}{\Gamma(a + m + e) \Gamma(b + m) \Gamma(1 + m + e)} = // \frac{1}{\Gamma(a + m + e) \Gamma(b + m + e) \Gamma(1 + m + e)} + // \frac{e G_e(b + m)}{Gamma(a + m + e) \Gamma(1 + m + e)} // \frac{1}{\Gamma(a + m + e) \Gamma(b + m + e) \Gamma(1 + m + e)} = // \frac{1}{\Gamma(a + m + e) \Gamma(b + m + e) \Gamma(1 + m)} - // \frac{e G_e(1 + m)}{\Gamma(a + m + e) \Gamma(b + m + e)} // Putting this all together, we have // t = \frac{1}{\Gamma(a + m + e) \Gamma(b + m + e)} \left[ \frac{e G_{-e}(1)}{\Gamma(1 + m) + e G_e(1 + m) \right] // - \frac{1}{\Gamma(1 + m + e)} \left[ \frac{e G_e(a + m)}{\Gamma(b + m)} + \frac{e G_e(b+m)}{\Gamma(a + m + e)} \right] // - \frac{1}{\Gamma(a + m + e) \Gamma(b + m + e) \Gamma(1 + m + e)} e E_e(\ln(1-x)) // which is, as promised, proportional to e. For some a, b, m, and e, some of these \Gamma functions will blow up, but they are // all in the denoninator, so that will just zero some terms. The G_e(z) that appear in the numerator are finite for all z. // So now we have the k=0 term. What about higer k terms? We could repeat this analysis, carrying along the k's, // and get an expression involving G_e(z) and E_e(z) for each k. Michel & Stoistov's last trick is to realize // we don't have to do this, but can instead use our original expressions for each term as a ratio of \Gamma // functions to derive a recurrence. Let u_k be the first term, v_k be the second term, so t_k = u_k + v_k. // Let r_k = u_{k+1} / u_{k} and s_k = v_{k+1} / v_{k}. It's easy to write down r_k and s_k because they // follow immediately from the \Gamma function recurrence. // r_{k} = \frac{(a + m + k)(b + m + k)}{(1 + k - e)(1 + m + k)} // s_{k} = \frac{(a + m + k + e)(b + m + k + e)}{(1 + k)(1 + m + k + e)} // Notice that r_k and s_k are almost equal, but not quite: they differ by O(e). To advance t_k, use // t_{k+1} = u_{k+1} + v_{k+1} = r_k u_k + s_k v_k = s_k (u_k + v_k) + (r_k - s_k) u_k // = s_k t_k + d_k * u_k // where d_k = r_k - s_k, which will be O(e), since r_k and s_k only differ by e-shifted arguments. // In the x -> (1-x), x -> 1/x, x -> x / (1-x), and x -> 1 - 1/x linear transformations, canceling divergences // appear when some arguments of the transformed functions are non-positive-integers. private static double Hypergeometric2F1_Series_OneOverOneMinusX(double a, int m, double e, double c, double x1) { Debug.Assert(m >= 0); Debug.Assert(Math.Abs(e) <= 0.5); Debug.Assert(Math.Abs(x1) <= 0.75); double b = a + m + e; double g_c = AdvancedMath.Gamma(c); //double rg_a = 1.0 / AdvancedMath.Gamma(a); double rg_b = 1.0 / AdvancedMath.Gamma(b); double rg_cma = 1.0 / AdvancedMath.Gamma(c - a); //double rg_cmb = 1.0 / AdvancedMath.Gamma(c - b); // Pochhammer product, keeps track of (a)_k (c-b)_k (x')^{a + k} double p = Math.Pow(x1, a); double f0 = 0.0; if (m > 0) { f0 = p; double q = 1.0; for (int k = 1; k < m; k++) { int km1 = k - 1; p *= (a + km1) * (c - b + km1) * x1; q *= (k - m - e) * k; f0 += p / q; } f0 *= g_c * rg_b * rg_cma * AdvancedMath.Gamma(m + e); p *= (a + (m - 1)) * (c - b + (m - 1)) * x1; } // Now compute the remaining terms with analytically canceled divergent parts. double t = rg_b * rg_cma * (NewG(1.0, -e) / AdvancedIntegerMath.Factorial(m) + NewG(m + 1, e)) - 1.0 / AdvancedMath.Gamma(1 + m + e) * (NewG(a + m, e) / AdvancedMath.Gamma(c - a - e) + NewG(c - a, -e) / AdvancedMath.Gamma(b)) - MoreMath.ReducedExpMinusOne(Math.Log(x1), e) / AdvancedMath.Gamma(a + m) / AdvancedMath.Gamma(c - a - e) / AdvancedMath.Gamma(m + 1 + e); t *= p; double f1 = t; double u = p * rg_b * rg_cma / AdvancedMath.Gamma(1.0 - e) / AdvancedIntegerMath.Factorial(m); for (int k = 0; k < Global.SeriesMax; k++) { double f1_old = f1; int k1 = k + 1; int mk1 = m + k1; double amk = a + m + k; double amke = amk + e; double cak = c - a + k; double cake = cak - e; double k1e = k1 - e; double mk1e = mk1 + e; double r = amk * cake / k1e / mk1; double s = amke * cak / mk1e / k1; // Compute (r - s) / e analytically because leading terms cancel double d = (amk * cake / mk1 - amk - cake - e + amke * cak / k1) / mk1e / k1e; t = (s * t + d * u) * x1; f1 += t; if (f1 == f1_old) { f1 *= ReciprocalSincPi(e) * g_c; if (m % 2 != 0) { f1 = -f1; } return(f0 + f1); } u *= r * x1; } throw new NonconvergenceException(); }