Exemple #1
0
        public void RootOfEi()
        {
            double x = FunctionMath.FindZero(AdvancedMath.IntegralEi, Interval.FromEndpoints(0.1, 1.0));

            Assert.IsTrue(TestUtilities.IsNearlyEqual(x, 0.37250741078136663446));
            Assert.IsTrue(Math.Abs(AdvancedMath.IntegralEi(x)) < TestUtilities.TargetPrecision);
        }
Exemple #2
0
        public void RootOfPsi()
        {
            double x = FunctionMath.FindZero(AdvancedMath.Psi, 1.5);

            Assert.IsTrue(TestUtilities.IsNearlyEqual(x, 1.46163214496836234126));
            Assert.IsTrue(AdvancedMath.Psi(x) < TestUtilities.TargetPrecision);
        }
Exemple #3
0
        public void IntegralEiZero()
        {
            // Value of zero documented at https://dlmf.nist.gov/6.13
            double x0 = FunctionMath.FindZero(AdvancedMath.IntegralEi, 1.0);

            Assert.IsTrue(TestUtilities.IsNearlyEqual(x0, 0.37250741078136663446));
        }
Exemple #4
0
        public void Bug5505()
        {
            // finding the root of x^3 would fail with a NonconvergenceException because our termination criterion tested only
            // for small relative changes in x; adding a test for small absolute changes too solved the problem
            double x0 = FunctionMath.FindZero(delegate(double x) { return(x * x * x); }, 1.0);

            Assert.IsTrue(Math.Abs(x0) < TestUtilities.TargetPrecision);
        }
Exemple #5
0
        public void RootOfJ0()
        {
            Func <double, double> f = delegate(double x) {
                return(AdvancedMath.BesselJ(0, x));
            };
            double y = FunctionMath.FindZero(f, Interval.FromEndpoints(2.0, 4.0));

            Assert.IsTrue(TestUtilities.IsNearlyEqual(y, 2.40482555769577276862));
        }
Exemple #6
0
        /// <summary>
        /// Returns the point at which the right probability function attains the given value.
        /// </summary>
        /// <param name="Q">The right cumulative probability, which must lie between 0 and 1.</param>
        /// <returns>The value x for which <see cref="RightProbability"/> equals Q.</returns>
        public virtual double InverseRightProbability(double Q)
        {
            if ((Q < 0.0) || (Q > 1.0))
            {
                throw new ArgumentOutOfRangeException(nameof(Q));
            }
            Func <double, double> f = delegate(double x) {
                return(RightProbability(x) - Q);
            };
            double y = FunctionMath.FindZero(f, Mean);

            return(y);
        }
Exemple #7
0
        /// <summary>
        /// Returns the point at which the cumulative distribution function attains a given value.
        /// </summary>
        /// <param name="P">The left cumulative probability P, which must lie between 0 and 1.</param>
        /// <returns>The value x at which <see cref="LeftProbability"/> equals P.</returns>
        /// <remarks>
        /// <para>The inverse left probability is commonly called the quantile function. Given a quantile,
        /// it tells which variable value is the lower border of that quantile.</para>
        /// </remarks>
        /// <exception cref="ArgumentOutOfRangeException"><paramref name="P"/> lies outside [0,1].</exception>
        /// <seealso href="https://en.wikipedia.org/wiki/Quantile_function"/>
        public virtual double InverseLeftProbability(double P)
        {
            // find x where LeftProbability(x) = P
            if ((P < 0.0) || (P > 1.0))
            {
                throw new ArgumentOutOfRangeException(nameof(P));
            }
            Func <double, double> f = delegate(double x) {
                return(LeftProbability(x) - P);
            };
            double y = FunctionMath.FindZero(f, Mean);

            return(y);
            // since we have the PDF = CDF', change to a method using Newton's method
        }
Exemple #8
0
        public static double AiryBiZero(int k)
        {
            if (k < 1)
            {
                throw new ArgumentOutOfRangeException(nameof(k));
            }

            double tm = 3.0 / 8.0 * Math.PI * (4 * k - 4);
            double tp = 3.0 / 8.0 * Math.PI * (4 * k - 2);

            double bm = (tm == 0.0) ? 0.0 : -T(tm);
            double bp = -T(tp);

            double b1 = FunctionMath.FindZero(AdvancedMath.AiryBi, Interval.FromEndpoints(bp, bm));

            return(b1);
        }
        // **** Spherical Bessel functions ****



        // Functions needed for uniform asymptotic expansions

        /// <summary>
        /// Computes the requested zero of the regular Bessel function.
        /// </summary>
        /// <param name="nu">The order, which must be non-negative.</param>
        /// <param name="k">The index of the zero, which must be positive.</param>
        /// <returns>The <paramref name="k"/>th value of x for which J<sub>&#x3BD;</sub>(x) = 0.</returns>
        /// <exception cref="ArgumentOutOfRangeException"><paramref name="nu"/> is negative or <paramref name="k"/> is non-positive.</exception>
        public static double BesselJZero(double nu, int k)
        {
            if (nu < 0.0)
            {
                throw new ArgumentOutOfRangeException(nameof(nu));
            }
            if (k < 1)
            {
                throw new ArgumentOutOfRangeException(nameof(k));
            }

            double jMin, jMax;

            if (k == 1)
            {
                // Rayleigh inequalities
                // Cited in Ismail and Muldoon, Bounds for the small real and purely imaginary zeros of
                // Bessel and related functions, Methods and Applications of Analysis 2 (1995) p. 1-21
                // http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.140.288&rep=rep1&type=pdf
                // They cite Watson p. 502, but I don't find them there.
                //jMin = 2.0 * Math.Sqrt((nu + 1.0) * Math.Sqrt(nu + 2.0));
                // Lee Lorch, "Some Inequalities for the First Positive Zeros of the Bessel Functions",
                // SIAM J. Math. Anal. 24 (1993) 814
                jMin = Math.Sqrt((nu + 1.0) * (nu + 5.0));
                jMax = ApproximateBesselJZero(nu + 1, k);
                double tt = Math.Sqrt(2.0 * (nu + 1.0) * (nu + 3.0));
                if (tt < jMax)
                {
                    jMax = tt;
                }
            }
            else
            {
                // Use the interlacing property j_{\nu + 1, k - 1} < j_{\nu, k} < j_{\nu + 1, k}
                // and the approximation function to get a bound. The interlacing property is exact,
                // but our approximation functions are not, so it is concievable this could go wrong.
                jMin = ApproximateBesselJZero(nu + 1, k - 1);
                jMax = ApproximateBesselJZero(nu + 1, k);
            }
            Debug.Assert(jMin < jMax);
            // We should use internal method that takes best guess. Also, add derivative and use Newton's method.
            double j = FunctionMath.FindZero(x => BesselJ(nu, x), Interval.FromEndpoints(jMin, jMax));

            return(j);
        }
        public void ComplexRiemannZetaZeros()
        {
            // Zeros from http://www.dtc.umn.edu/~odlyzko/zeta_tables/zeros2
            double[] zeros = new double[] {
                14.134725141734693790,
                21.022039638771554993,
                25.010857580145688763,
                30.424876125859513210
            };

            foreach (double zero in zeros)
            {
                double rho = FunctionMath.FindZero((double x) => {
                    Complex f = AdvancedComplexMath.RiemannZeta(new Complex(0.5, x));
                    return(f.Re + f.Im);
                }, Math.Round(zero));
                Assert.IsTrue(TestUtilities.IsNearlyEqual(rho, zero));
            }
        }
Exemple #11
0
        public static double AiryAiZero(int k)
        {
            if (k < 1)
            {
                throw new ArgumentOutOfRangeException(nameof(k));
            }

            double tm = 3.0 / 8.0 * Math.PI * (4 * k - 2);
            //double t0 = 3.0 / 8.0 * Math.PI * (4 * k - 1);
            double tp = 3.0 / 8.0 * Math.PI * (4 * k - 0);

            double am = -T(tm);
            //double a0 = -T(t0);
            double ap = -T(tp);

            double a1 = FunctionMath.FindZero(AdvancedMath.AiryAi, Interval.FromEndpoints(ap, am));

            return(a1);
        }
Exemple #12
0
        // routines for maximum likelyhood fitting

        /// <summary>
        /// Computes the Gamma distribution that best fits the given sample.
        /// </summary>
        /// <param name="sample">The sample to fit.</param>
        /// <returns>The best fit parameters.</returns>
        /// <remarks>
        /// <para>The returned fit parameters are the <see cref="ShapeParameter"/> and <see cref="ScaleParameter"/>, in that order.
        /// These are the same parameters, in the same order, that are required by the <see cref="GammaDistribution(double,double)"/> constructor to
        /// specify a new Gamma distribution.</para>
        /// </remarks>
        /// <exception cref="ArgumentNullException"><paramref name="sample"/> is null.</exception>
        /// <exception cref="InvalidOperationException"><paramref name="sample"/> contains non-positive values.</exception>
        /// <exception cref="InsufficientDataException"><paramref name="sample"/> contains fewer than three values.</exception>
        public static FitResult FitToSample(Sample sample)
        {
            if (sample == null)
            {
                throw new ArgumentNullException("sample");
            }
            if (sample.Count < 3)
            {
                throw new InsufficientDataException();
            }

            // The log likelyhood of a sample given k and s is
            //   \log L = (k-1) \sum_i \log x_i - \frac{1}{s} \sum_i x_i - N \log \Gamma(k) - N k \log s
            // Differentiating,
            //   \frac{\partial \log L}{\partial s} = \frac{1}{s^2} \sum_i x_i - \frac{Nk}{s}
            //   \frac{\partial \log L}{\partial k} = \sum_i \log x_i - N \psi(k) - N \log s
            // Setting the first equal to zero gives
            //   k s = N^{-1} \sum_i x_i = <x>
            //   \psi(k) + \log s = N^{-1} \sum_i \log x_i = <log x>
            // Inserting the first into the second gives a single equation for k
            //   \log k - \psi(k) = \log <x> - <\log x>
            // Note the RHS need only be computed once.
            // \log k > \psi(k) for all k, so the RHS had better be positive. They get
            // closer for large k, so smaller RHS will produce a larger k.

            double s = 0.0;

            foreach (double x in sample)
            {
                if (x <= 0.0)
                {
                    throw new InvalidOperationException();
                }
                s += Math.Log(x);
            }
            s = Math.Log(sample.Mean) - s / sample.Count;

            // We can get an initial guess for k from the method of moments
            //   \frac{\mu^2}{\sigma^2} = k

            double k0 = MoreMath.Sqr(sample.Mean) / sample.Variance;

            // Since 1/(2k) < \log(k) - \psi(k) < 1/k, we could get a bound; that
            // might be better to avoid the solver running into k < 0 territory

            double k1 = FunctionMath.FindZero(k => (Math.Log(k) - AdvancedMath.Psi(k) - s), k0);

            double s1 = sample.Mean / k1;

            // Curvature of the log likelyhood is straightforward
            //   \frac{\partial^2 \log L}{\partial s^2} = -\frac{2}{s^3} \sum_i x_i + \frac{Nk}{s^2} = - \frac{Nk}{s^2}
            //   \frac{\partial^2 \log L}{\partial k \partial s} = - \frac{N}{s}
            //   \frac{\partial^2 \log L}{\partial k^2} = - N \psi'(k)
            // This gives the curvature matrix and thus via inversion the covariance matrix.

            SymmetricMatrix B = new SymmetricMatrix(2);

            B[0, 0] = sample.Count * AdvancedMath.Psi(1, k1);
            B[0, 1] = sample.Count / s1;
            B[1, 1] = sample.Count * k1 / MoreMath.Sqr(s1);
            SymmetricMatrix C = B.CholeskyDecomposition().Inverse();

            // Do a KS test for goodness-of-fit
            TestResult test = sample.KolmogorovSmirnovTest(new GammaDistribution(k1, s1));

            return(new FitResult(new double[] { k1, s1 }, C, test));
        }
Exemple #13
0
        /// <summary>
        /// Find the Gumbel distribution that best fit the given sample.
        /// </summary>
        /// <param name="sample">The sample to fit.</param>
        /// <returns>The fit result.</returns>
        /// <exception cref="ArgumentNullException"><paramref name="sample"/> is <see langword="null"/>.</exception>
        /// <exception cref="InsufficientDataException"><paramref name="sample"/> contains fewer than three values.</exception>
        public static GumbelFitResult FitToGumbel(this IReadOnlyList <double> sample)
        {
            if (sample == null)
            {
                throw new ArgumentNullException(nameof(sample));
            }
            if (sample.Count < 3)
            {
                throw new InsufficientDataException();
            }

            // To do a maximum likelihood fit, start from the log probability of each data point and aggregate to
            // obtain the log likelihood of the sample
            //   z_i = \frac{x_i - m}{s}
            //   -\ln p_i = \ln s + ( z_i + e^{-z_i})
            //   \ln L = \sum_i \ln p_i

            // Take derivatives wrt m and s.
            //   \frac{\partial \ln L}{\partial m} = \frac{1}{s} \sum_i ( 1 - e^{-z_i} )
            //   \frac{\partial \ln L}{\partial s} = \frac{1}{s} \sum_i ( -1 + z_i - z_i e^{-z_i} )

            // Set derivatives to zero to get a system of equations for the maximum.
            //    n = \sum_i e^{-z_i}
            //    n = \sum_i ( z_i - z_i e^{-z_i} )
            // that is, <e^z> = 1 and <z> - <z e^z> = 1.

            // To solve this system, pull e^{m/s} out of the sum in the first equation and solve for m
            //    n = e^{m / s} \sum_i e^{-x_i / s}
            //    m = -s \ln \left( \frac{1}{n} \sum_i e^{-x_i / s} \right) = -s \ln <e^{-x/s}>
            // Substituting this result into the second equation gets us to
            //    s = \bar{x} - \frac{ <x e^{-x/s}> }{ <e^{x/s}> }
            // which involves only s. We can use a one-dimensional root-finder to determine s, then determine m
            // from the first equation.

            // To avoid exponentiating potentially large x_i, it's better to write the problem in terms
            // of d_i, where x_i = \bar{x} + d_i.
            //    m = \bar{x} - s \ln <e^{-d/s}>
            //    s = -\frac{ <d e^{-d/s}> }{ <e^{-d/s}> }

            // To get the covariance matrix, we need the curvature matrix at the minimum, so take more derivatives
            //    \frac{\partial^2 \ln L}{\partial m^2} = - \frac{1}{s} \sum_i e^{-z_i} = - \frac{n}{s^2}
            //    \frac{\partial^2 \ln L}{\partial m \partial s} = - \frac{n}{s^2} <z e^{-z}>
            //    \frac{\partial^2 \ln L}{\partial s^2} = - \frac{n}{s^2} ( <z^2 e^{-z}> + 1 )

            // Several crucial pieces of this analysis are taken from Mahdi and Cenac, "Estimating Parameters of Gumbel Distribution
            // "using the method of moments, probability weighted moments, and maximum likelihood", Revista de Mathematica:
            // Teoria y Aplicaciones 12 (2005) 151-156 (http://revistas.ucr.ac.cr/index.php/matematica/article/viewFile/259/239)

            // We will be needed the sample mean and standard deviation
            int    n;
            double mean, stdDev;

            Univariate.ComputeMomentsUpToSecond(sample, out n, out mean, out stdDev);
            stdDev = Math.Sqrt(stdDev / n);

            // Use the method of moments to get an initial estimate of s.
            double s0 = Math.Sqrt(6.0) / Math.PI * stdDev;

            // Define the function to zero
            Func <double, double> fnc = (double s) => {
                double u, v;
                MaximumLikelihoodHelper(sample, n, mean, s, out u, out v);
                return(s + v / u);
            };

            // Zero it to compute the best-fit s
            double s1 = FunctionMath.FindZero(fnc, s0);

            // Compute the corresponding best-fit m
            double u1, v1;

            MaximumLikelihoodHelper(sample, n, mean, s1, out u1, out v1);
            double m1 = mean - s1 * Math.Log(u1);

            // Compute the curvature matrix
            double w1 = 0.0;
            double w2 = 0.0;

            foreach (double x in sample)
            {
                double z = (x - m1) / s1;
                double e = Math.Exp(-z);
                w1 += z * e;
                w2 += z * z * e;
            }
            w1 /= sample.Count;
            w2 /= sample.Count;
            SymmetricMatrix C = new SymmetricMatrix(2);

            C[0, 0] = (n - 2) / (s1 * s1);
            C[0, 1] = (n - 2) / (s1 * s1) * w1;
            C[1, 1] = (n - 2) / (s1 * s1) * (w2 + 1.0);
            SymmetricMatrix CI = C.CholeskyDecomposition().Inverse();
            // The use of (n-2) here in place of n is a very ad hoc attempt to increase accuracy.


            // Compute goodness-of-fit
            GumbelDistribution dist = new GumbelDistribution(m1, s1);
            TestResult         test = sample.KolmogorovSmirnovTest(dist);

            return(new GumbelFitResult(m1, s1, CI[0, 0], CI[1, 1], CI[0, 1], test));
        }