private static UncertainValue Integrate_Adaptive(MultiFunctor f, CoordinateTransform[] map, IntegrationRegion r, EvaluationSettings settings)
        {
            // Create an evaluation rule
            GenzMalik75Rule rule = new GenzMalik75Rule(r.Dimension);

            // Use it on the whole region and put the answer in a linked list
            rule.Evaluate(f, map, r);
            LinkedList<IntegrationRegion> regionList = new LinkedList<IntegrationRegion>();
            regionList.AddFirst(r);

            // Iterate until convergence
            while (f.EvaluationCount < settings.EvaluationBudget) {

                // Add up value and errors in all regions.
                // While we are at it, take note of the region with the largest error.
                double value = 0.0;
                double error = 0.0;
                LinkedListNode<IntegrationRegion> regionNode = regionList.First;
                double maxError = 0.0;
                LinkedListNode<IntegrationRegion> maxErrorNode = null;
                while (regionNode != null) {
                    IntegrationRegion region = regionNode.Value;
                    value += region.Value;
                    error += region.Error;
                    //error += MoreMath.Sqr(region.Error);
                    if (region.Error > maxError) {
                        maxError = region.Error;
                        maxErrorNode = regionNode;
                    }
                    regionNode = regionNode.Next;
                }

                // Check for convergence.
                if ((error <= settings.AbsolutePrecision) || (error <= settings.RelativePrecision * Math.Abs(value))) {
                    return (new UncertainValue(value, error));
                }

                // Split the region with the largest error, and evaluate each subregion.
                IntegrationRegion maxErrorRegion = maxErrorNode.Value;
                regionList.Remove(maxErrorNode);
                IList<IntegrationRegion> subRegions = maxErrorRegion.Split(maxErrorRegion.SplitIndex);
                /*
                Countdown cnt = new Countdown(2);
                ThreadPool.QueueUserWorkItem((object state) => { rule.Evaluate(f, subRegions[0]); cnt.Signal(); });
                ThreadPool.QueueUserWorkItem((object state) => { rule.Evaluate(f, subRegions[1]); cnt.Signal(); });
                cnt.Wait();
                foreach (IntegrationRegion subRegion in subRegions) {
                    regionList.AddLast(subRegion);
                }
                */

                foreach (IntegrationRegion subRegion in subRegions) {
                    rule.Evaluate(f, map, subRegion);
                    regionList.AddLast(subRegion);
                }

            }

            throw new NonconvergenceException();
        }
예제 #2
0
 internal EvaluationResult(int count, EvaluationSettings settings)
 {
     Debug.Assert(count > 0);
     Debug.Assert(settings != null);
     this.count = count;
     this.settings = settings;
 }
 /// <summary>
 /// Finds a local maximum of a multi-dimensional function in the vincinity of the given starting location, subject to the given evaluation constraints.
 /// </summary>
 /// <param name="function">The multi-dimensional function to maximize.</param>
 /// <param name="start">The starting location for the search.</param>
 /// <param name="settings">The evaluation settings that govern the search for the maximum.</param>
 /// <returns>The local maximum.</returns>
 public static MultiExtremum FindLocalMaximum(Func<IList<double>, double> function, IList<double> start, EvaluationSettings settings)
 {
     if (function == null) throw new ArgumentNullException("function");
     if (start == null) throw new ArgumentNullException("start");
     if (settings == null) throw new ArgumentNullException("settings");
     return(FindLocalExtremum(function, start, settings, true));
 }
예제 #4
0
 internal Extremum(double x, double f, double f2, int count, EvaluationSettings settings)
     : base(count, settings)
 {
     this.x = x;
     this.f = f;
     this.f2 = f2;
 }
예제 #5
0
 internal MultiExtremum(int count, EvaluationSettings settings, double[] point, double value, double precision, double[][] hessian)
     : base(count, settings)
 {
     Debug.Assert(point != null);
     this.point = point;
     this.value = value;
     this.precision = precision;
     this.hessian = hessian;
 }
 private static MultiExtremum FindGlobalExtremum(Func<IList<double>, double> function, IList<Interval> volume, EvaluationSettings settings, bool negate)
 {
     if (function == null) throw new ArgumentNullException("function");
     if (volume == null) throw new ArgumentNullException("volume");
     MultiFunctor f = new MultiFunctor(function, negate);
     DifferentialEvolutionSettings deSettings = GetDefaultSettings(settings, volume.Count);
     MultiExtremum extremum = FindGlobalExtremum(f, volume, deSettings);
     return (extremum);
 }
        public void BoxIntegrals()
        {
            // The box integrals
            //   B_n(r) = \int{0}^{1} dx_1 \cdots dx_n ( x_1^2 + \cdots x_n^2 )^{r/2}
            //   D_n(r) = \int{0}^{1} dx_1 \cdots dx_n dy_1 \cdots dy_n \left[ (x_1 - y_1)^2 + \cdots (x_n - y_n)^2 \right]^{r/2}
            // Give the mean distance of a point in a box from its center or from other points. Various of these are known analytically.
            // see Bailey, Borwein, Crandall, "Box Integrals", Journal of Computational and Applied Mathematics 206 (2007) 196
            // http://www.davidhbailey.com/dhbpapers/boxintegrals.pdf and http://www.davidhbailey.com/dhbpapers/bbbz-conmath.pdf

            EvaluationSettings settings = new EvaluationSettings() { EvaluationBudget = 1000000, RelativePrecision = 1.0E-3 };

            // 2D integrals

            Assert.IsTrue(TestUtilities.IsNearlyEqual(
                BoxIntegralB(2, -1, settings), 2.0 * Math.Log(1.0 + Math.Sqrt(2.0)), settings.RelativePrecision * 2
            ));
            // Note 2 \ln(1 + \sqrt{2}) = \ln(3 + 2 \sqrt{2}) because (1 + \sqrt{2})^2 = 3 + 2 \sqrt{2}

            Assert.IsTrue(TestUtilities.IsNearlyEqual(
                BoxIntegralB(2, 1, settings), (Math.Sqrt(2.0) + Math.Log(Math.Sqrt(2.0) + 1.0)) / 3.0, settings.RelativePrecision * 2
            ));

            /*
            Assert.IsTrue(TestUtilities.IsNearlyEqual(
                BoxIntegralD(1, -1, new EvaluationSettings() { EvaluationBudget = 100000, RelativePrecision = 1.0E-2 }),
                (2.0 - 4.0 * Math.Sqrt(2.0)) / 3.0 + 4.0 * Math.Log(1.0 + Math.Sqrt(2.0)), settings.RelativePrecision * 2
            ));
            */

            Assert.IsTrue(TestUtilities.IsNearlyEqual(
                BoxIntegralD(1, 1, settings), 1.0 / 3.0, settings.RelativePrecision * 2
            ));

            // 3D integrals

            Assert.IsTrue(TestUtilities.IsNearlyEqual(
                BoxIntegralB(3, -1, settings), Math.Log(5.0 + 3.0 * Math.Sqrt(3.0)) - Math.Log(2.0) / 2.0 - Math.PI / 4.0, settings.RelativePrecision * 4
            ));

            Assert.IsTrue(TestUtilities.IsNearlyEqual(
                BoxIntegralB(3, 1, settings), Math.Log(2.0 + Math.Sqrt(3.0)) / 2.0 + Math.Sqrt(3.0) / 4.0 - Math.PI / 24.0, settings.RelativePrecision * 2
            ));

            // 4D integrals

            Assert.IsTrue(TestUtilities.IsNearlyEqual(
                BoxIntegralD(2, 1, settings), (2.0 + Math.Sqrt(2.0) + 5.0 * Math.Log(1.0 + Math.Sqrt(2.0))) / 15.0, settings.RelativePrecision * 4
            ));

            // 6D integrals
            Assert.IsTrue(TestUtilities.IsNearlyEqual(
                BoxIntegralD(3, 1, settings),
                4.0 / 105.0 + 17.0 * Math.Sqrt(2.0) / 105.0 - 2.0 * Math.Sqrt(3.0) / 35.0 + Math.Log(1.0 + Math.Sqrt(2.0)) / 5.0 + 2.0 * Math.Log(2.0 + Math.Sqrt(3.0)) / 5.0 - Math.PI / 15.0,
                settings.RelativePrecision * 2
            ));
        }
 public double BoxIntegralB(int d, int r, EvaluationSettings settings)
 {
     return (MultiFunctionMath.Integrate((IList<double> x) => {
         double s = 0.0;
         for (int k = 0; k < d; k++) {
             s += x[k] * x[k];
         }
         return (Math.Pow(s, r / 2.0));
     }, UnitCube(d), settings).Value);
 }
예제 #9
0
 public BaseOdeStepper(Func <double, T, T> rhs, double x0, T y0, EvaluationSettings settings)
 {
     if (rhs == null)
     {
         throw new ArgumentNullException("rhs");
     }
     if (settings == null)
     {
         throw new ArgumentNullException("settings");
     }
     this.rhs      = rhs;
     this.X        = x0;
     this.Y        = y0;
     this.Settings = settings;
 }
 private static void SetDefaultOptimizationSettings(EvaluationSettings settings, int d)
 {
     if (settings.RelativePrecision < 0.0)
     {
         settings.RelativePrecision = Math.Pow(10.0, -(10.0 + 4.0 / d));
     }
     if (settings.AbsolutePrecision < 0.0)
     {
         settings.AbsolutePrecision = Math.Pow(10.0, -(10.0 + 4.0 / d));
     }
     if (settings.EvaluationBudget < 0)
     {
         settings.EvaluationBudget = 16 * (d + 1) * (d + 2) * (d + 3);
     }
 }
예제 #11
0
 internal static void SetOdeDefaults(EvaluationSettings settings)
 {
     if (settings.RelativePrecision < 0)
     {
         settings.RelativePrecision = 1.0E-12;
     }
     if (settings.AbsolutePrecision < 0)
     {
         settings.AbsolutePrecision = 1.0E-24;
     }
     if (settings.EvaluationBudget < 0)
     {
         settings.EvaluationBudget = 8192;
     }
 }
        private static MultiExtremum FindLocalExtremum(Func<IList<double>, double> function, IList<double> start, EvaluationSettings settings, bool negate)
        {
            MultiFunctor f = new MultiFunctor(function, negate);

            // Pick an initial radius; we need to do this better.
            /*
            double s = Double.MaxValue;
            foreach (double x in start) s = Math.Min((Math.Abs(x) + 1.0 / 8.0) / 8.0, s);
            */

            double s = 0.0;
            foreach (double x in start) s += (Math.Abs(x) + 1.0 / 4.0) / 4.0;
            s = s / start.Count;

            //double s = 0.2;
            Debug.WriteLine("s={0}", s);

            return (FindMinimum_ModelTrust(f, start, s, settings));
        }
예제 #13
0
        private static DifferentialEvolutionSettings GetDefaultSettings(EvaluationSettings settings, int d)
        {
            DifferentialEvolutionSettings deSettings = new DifferentialEvolutionSettings();

            deSettings.Population           = 8 * d + 4;
            deSettings.CrossoverProbability = 1.0 - 1.0 / 8.0 - 1.0 / d;

            if (settings == null)
            {
                deSettings.RelativePrecision = Math.Pow(10.0, -(2.0 + 4.0 / d));
                deSettings.AbsolutePrecision = MoreMath.Sqr(deSettings.RelativePrecision);
                deSettings.EvaluationBudget  = 128 * d * d * d * d;
            }
            else
            {
                deSettings.RelativePrecision = settings.RelativePrecision;
                deSettings.AbsolutePrecision = settings.AbsolutePrecision;
                deSettings.EvaluationBudget  = settings.EvaluationBudget;
            }

            return(deSettings);
        }
예제 #14
0
        public void Ackley()
        {
            // Ackley's function has many local minima, and a global minimum at (0, 0) -> 0.

            Func<IList<double>, double> function = (IList<double> x) => {
                double s = 0.0;
                double c = 0.0;
                for (int i = 0; i < x.Count; i++) {
                    s += x[i] * x[i];
                    c += Math.Cos(2.0 * Math.PI * x[i]);
                }
                return (-20.0 * Math.Exp(-0.2 * Math.Sqrt(s / x.Count)) - Math.Exp(c / x.Count) + 20.0 + Math.E);
            };

            EvaluationSettings settings = new EvaluationSettings() { AbsolutePrecision = 1.0E-8, EvaluationBudget = 10000000 };

            for (int n = 2; n < 16; n = (int) Math.Round(AdvancedMath.GoldenRatio * n)) {
                Console.WriteLine("n={0}", n);

                Interval[] box = new Interval[n];
                for (int i = 0; i < box.Length; i++) box[i] = Interval.FromEndpoints(-32.0, 32.0);

                MultiExtremum minimum = MultiFunctionMath.FindGlobalMinimum(function, box, settings);

                Console.WriteLine(minimum.EvaluationCount);
                Console.WriteLine(minimum.Value);
                foreach (double coordinate in minimum.Location) Console.WriteLine(coordinate);

                Assert.IsTrue(minimum.Dimension == n);

                Assert.IsTrue(TestUtilities.IsNearlyEqual(minimum.Value, 0.0, new EvaluationSettings() { AbsolutePrecision = 2.0 * minimum.Precision }));

                ColumnVector solution = new ColumnVector(n);
                Assert.IsTrue(TestUtilities.IsNearlyEqual(minimum.Location, solution, new EvaluationSettings() { AbsolutePrecision = 2.0 * Math.Sqrt(minimum.Precision) }));

            }
        }
예제 #15
0
        public static double SolveConservativeOde(Func <double, double, double> rhs, double x0, double y0, double yp0, double x1, EvaluationSettings settings)
        {
            if (rhs == null)
            {
                throw new ArgumentNullException("rhs");
            }
            BaseOdeStepper <double> stepper = new StoermerStepper(rhs, x0, y0, yp0, settings);

            stepper.Integrate(x1);
            return(stepper.Y);
        }
 /// <summary>
 /// Maximizes a function on a multi-dimensional space in the vicinity of a given point, subject to the given settings.
 /// </summary>
 /// <param name="f">The function.</param>
 /// <param name="x">The starting point for the search.</param>
 /// <param name="settings">The evaluation settings.</param>
 /// <returns>The maximum.</returns>
 /// <exception cref="ArgumentNullException"><paramref name="f"/>, <paramref name="x"/>, or <paramref name="settings"/> is null.</exception>
 /// <exception cref="NonconvergenceException">The maximum was not found to the required precision within the budgeted number of function evaluations.</exception>
 internal static SpaceExtremum FindMaximum(Func <double[], double> f, double[] x, EvaluationSettings settings)
 {
     return(FindMinimum((double[] p) => - f(p), x, settings));
 }
예제 #17
0
        public void DistributionCentralMomentIntegral()
        {
            foreach (Distribution distribution in distributions) {
                foreach (int n in TestUtilities.GenerateIntegerValues(2, 24, 8)) {

                    // get the predicted central moment
                    double C = distribution.MomentAboutMean(n);

                    // don't try to integrate infinite moments
                    if (Double.IsInfinity(C) || Double.IsNaN(C)) continue;

                    if (C == 0.0) continue;

                    EvaluationSettings settings = new EvaluationSettings();
                    if (C == 0.0) {
                        // if moment is zero, use absolute precision
                        settings.AbsolutePrecision = TestUtilities.TargetPrecision;
                        settings.RelativePrecision = 0.0;
                    } else {
                        // if moment in non-zero, use relative precision
                        settings.AbsolutePrecision = 0.0;
                        settings.RelativePrecision = TestUtilities.TargetPrecision;
                    }

                    // do the integral
                    double m = distribution.Mean;
                    Func<double, double> f = delegate(double x) {
                        return (distribution.ProbabilityDensity(x) * MoreMath.Pow(x - m, n));
                    };
                    try {
                        double CI = FunctionMath.Integrate(f, distribution.Support, settings).Value;
                        Console.WriteLine("{0} {1} {2} {3}", distribution.GetType().Name, n, C, CI);
                        if (C == 0.0) {
                            Assert.IsTrue(Math.Abs(CI) < TestUtilities.TargetPrecision);
                        } else {
                            double e = TestUtilities.TargetPrecision;
                            // reduce required precision, because some distributions (e.g. Kolmogorov, Weibull)
                            // have no analytic expressions for central moments, which must therefore be
                            // determined via raw moments and are thus subject to cancelation error
                            // can we revisit this later?
                            if (distribution is WeibullDistribution) e = Math.Sqrt(Math.Sqrt(e));
                            if (distribution is KolmogorovDistribution) e = Math.Sqrt(e);
                            if (distribution is KuiperDistribution) e = Math.Sqrt(Math.Sqrt(e));
                            if (distribution is TriangularDistribution) e = Math.Sqrt(e);
                            Assert.IsTrue(TestUtilities.IsNearlyEqual(C, CI, e));
                        }
                    } catch (NonconvergenceException) {
                        Console.WriteLine("{0} {1} {2} {3}", distribution.GetType().Name, n, C, "NC");
                        // deal with these later; they are integration problems, not distribution problems
                    }
                }
            }
        }
예제 #18
0
        private static double FindMinimum(
            Func<double, double> f,
            double a, double b
        )
        {
            // evaluate three points within the bracket
            double u = (3.0 * a + b) / 4.0;
            double v = (a + b) / 2.0;
            double w = (a + 3.0 * b) / 4.0;

            double fu = f(u); double fv = f(v); double fw = f(w);

            Console.WriteLine("f({0})={1}  f({2})={3}  f({4})={5}", u, fu, v, fv, w, fw);

            // move in the bracket boundaries, if possible
            if (fv < fu) { a = u; if (fw < fv) a = v; }
            if (fv < fw) { b = w; if (fu < fv) b = v; }

            Console.WriteLine("a={0} b={1}", a, b);

            // sort u, v, w by fu, fv, fw values
            // these three comparisons are the most efficient three-item sort
            if (fv < fu) { double t = v; v = u; u = t; t = fv; fv = fu; fu = t; }
            if (fw < fu) { double t = w; w = u; u = t; t = fw; fw = fu; fu = t; }
            if (fw < fv) { double t = w; w = v; v = t; t = fw; fw = fv; fv = t; }

            // An evaluation budget of 32 is sufficient for all our test cases except for |x|, which requires 82 (!) evaluations to converge. Parabolic fitting just does a very poor job
            // for this function (at all scales, since it is scale invariant). We should look into cubic fitting.

            EvaluationSettings settings = new EvaluationSettings() { EvaluationBudget = 128, AbsolutePrecision = 0.0, RelativePrecision = 0.0 };
            return (FindMinimum(f, a, b, u, fu, v, fv, w, fw, settings, 3));
        }
예제 #19
0
        private double FindMinimumWithDerivative(
            FuncWithDerivative f,
            double a, double b,
            double u, double fu, double fpu,
            double v, double fv, double fpv,
            EvaluationSettings settings
        )
        {
            double tol = 0.0;

            int count = 0;
            while (count < settings.EvaluationBudget) {

                Console.WriteLine("n = {0}, tol = {1}", count, tol);
                Console.WriteLine("[{0} f({1})={2}({3}) f({4})={5}({6}) {7}]", a, u, fu, fpu, v, fv, fpv, b);

                // a, b bracket minimum a < u, v < b and f(u), f(v) <= f(a), f(b)
                Debug.Assert(a < b);
                Debug.Assert((a <= u) && (u <= b));
                //Debug.Assert((a <= v) && (v <= b));
                Debug.Assert(fu <= fv);

                if ((b - a) <= 4.0 * tol) return (u);

                // compute the minimum of the interpolating Hermite cubic
                double x, fpp; CubicHermiteMinimum(u, fu, fpu, v, fv, fpv, out x, out fpp);

                Console.WriteLine("cubic x = {0}, fpp = {1}", x, fpp);

                // if the cubic had no minimum, or the minimum lies outside our bounds, fall back to bisection
                if (Double.IsNaN(x) || (x <= a) || (x >= b)) {

                    // the derivative tells us which side to choose
                    if (fpu > 0.0) {
                        x = (a + u) / 2.0;
                    } else {
                        x = (u + b) / 2.0;
                    }

                    Console.WriteLine("bisection x = {0}", x);

                }

                // ensure we don't evaluate within tolerance of an existing point
                if (Math.Abs(x - u) < tol) { Console.WriteLine("shift from u (x={0})", x); x = (x > u) ? u + tol : u - tol; }
                if ((x - a) < tol) { Console.WriteLine("shift from a (x={0})", x); x = a + tol; }
                if ((b - x) < tol) { Console.WriteLine("shift from b (x={0})", x); x = b - tol; }

                // evaluate the function plus derivative at the predicted minimum
                double fx, fpx;
                f(x, out fx, out fpx);
                count++;

                Console.WriteLine("f({0}) = {1}({2})", x, fx, fpx);

                // check if we have converged
                double df = fu - fx;
                Console.WriteLine("df={0}", df);
                if ((Math.Abs(df) < settings.AbsolutePrecision) || (2.0 * Math.Abs(df) < settings.RelativePrecision * (Math.Abs(fu) + Math.Abs(fx)))) {
                    Console.WriteLine("count = {0}", count);
                    return (x);
                }

                if (fx < fu) {

                    // x is the new lowest point: f(x) < f(u) < f(v)
                    // this is the expected outcome

                    // move the bracket
                    if (x < u) {
                        b = u;
                    } else {
                        a = u;
                    }

                    // x -> u -> v
                    v = u; fv = fu; fpv = fpu;
                    u = x; fu = fx; fpu = fpx;

                } else {

                    // move the bracket
                    if (x < u) {
                        a = x;
                    } else {
                        b = x;
                    }

                    if (fx < fv) {

                        // x lies between other two known points: f(u) < f(x) < f(v)

                        // x -> v
                        v = x; fv = fx; fpv = fpx;

                    } else {

                        // x is higher than both other points: f(u) < f(v) < f(x)
                        // this is a really poor outcome; we expected to get a point lower than our other two and we got a point higher than both
                        // next time we should bisect
                        Console.WriteLine("bad point");
                        //throw new NotImplementedException();

                        //v = x; fv = fx; fpv = fpx;

                    }

                }

                // if the user has specified a tollerance, use it
                if ((settings.RelativePrecision > 0.0 || settings.AbsolutePrecision > 0.0)) {
                    tol = Math.Max(Math.Abs(u) * settings.RelativePrecision, settings.AbsolutePrecision);
                } else {
                    // otherwise, try to get the tollerance from the curvature
                    if (fpp > 0.0) {
                        tol = Math.Sqrt(2.0 * 1.0E-14 * (Math.Abs(fu) + 1.0E-14) / fpp);
                    } else {
                        // but if we don't have a useable curvature either, wing it
                        if (tol == 0.0) tol = 1.0E-7;
                    }
                }

            }

            throw new NonconvergenceException();
        }
예제 #20
0
        public void ThomsonLocal()
        {
            for (int n = 2; n < 8; n++)
            {
                Console.WriteLine(n);

                // define the thompson metric
                Func<IList<double>, double> f = GetThompsonFunction(n);

                // random distribution to start
                // using antipodal pairs gives us a better starting configuration
                Random r = new Random(1001110000);
                double[] start = new double[2 * (n - 1)];
                for (int i = 0; i < (n - 1) / 2; i++) {
                    int j = 4 * i;
                    start[j] = -Math.PI + 2.0 * r.NextDouble() * Math.PI;
                    start[j + 1] = Math.Asin(2.0 * r.NextDouble() - 1.0);
                    start[j + 2] = -(Math.PI - start[j]);
                    start[j + 3] = -start[j + 1];
                }
                // add one more point if necessary
                if (n % 2 == 0) {
                    start[2 * n - 4] = -Math.PI + 2.0 * r.NextDouble() * Math.PI;
                    start[2 * n - 3] = Math.Asin(2.0 * r.NextDouble() - 1.0);
                }

                EvaluationSettings set = new EvaluationSettings() { RelativePrecision = 1.0E-9 };
                MultiExtremum min = MultiFunctionMath.FindLocalMinimum(f, start, set);

                Console.WriteLine(min.Dimension);
                Console.WriteLine(min.EvaluationCount);
                Console.WriteLine("{0} ({1}) ?= {2}", min.Value, min.Precision, thompsonSolutions[n]);

                Assert.IsTrue(min.Dimension == 2 * (n - 1));
                Assert.IsTrue(TestUtilities.IsNearlyEqual(min.Value, thompsonSolutions[n], new EvaluationSettings() { AbsolutePrecision = 4.0 * min.Precision }));

            }
        }
예제 #21
0
        private static UncertainValue Integrate_MonteCarlo(MultiFunctor f, CoordinateTransform[] map, IList <Interval> box, EvaluationSettings settings)
        {
            int d = box.Count;

            // Use a Sobol quasi-random sequence. This give us 1/N accuracy instead of 1/\sqrt{N} accuracy.
            //VectorGenerator g = new RandomVectorGenerator(d, new Random(314159265));
            VectorGenerator g = new SobolVectorGenerator(d);

            // Start with a trivial Lepage grid.
            // We will increase the grid size every few cycles.
            // My tests indicate that trying to increase every cycle or even every other cycle is too often.
            // This makes sense, because we have no reason to believe our new grid will be better until we
            // have substantially more evaluations per grid cell than we did for the previous grid.
            LePageGrid grid        = new LePageGrid(box, 1);
            int        refineCount = 0;

            // Start with a reasonable number of evaluations per cycle that increases with the dimension.
            int cycleCount = 8 * d;

            //double lastValue = Integrate_MonteCarlo_Cycle(f, map, g, grid, cycleCount);

            // Each cycle consists of three sets of evaluations.
            // At first I did this with just two set and used the difference between the two sets as an error estimate.
            // I found that it was pretty common for that difference to be low just by chance, causing error underestimatation.
            double value1 = Integrate_MonteCarlo_Cycle(f, map, g, grid, cycleCount);
            double value2 = Integrate_MonteCarlo_Cycle(f, map, g, grid, cycleCount);
            double value3 = Integrate_MonteCarlo_Cycle(f, map, g, grid, cycleCount);

            while (f.EvaluationCount < settings.EvaluationBudget)
            {
                // Take the largest deviation as the error.
                double value = (value1 + value2 + value3) / 3.0;
                double error = Math.Max(Math.Abs(value1 - value3), Math.Max(Math.Abs(value1 - value2), Math.Abs(value2 - value3)));
                Debug.WriteLine("{0} {1} {2}", f.EvaluationCount, value, error);

                // Check for convergence.
                if ((error <= settings.AbsolutePrecision) || (error <= Math.Abs(value) * settings.RelativePrecision))
                {
                    return(new UncertainValue(value, error));
                }

                // Do more cycles. In order for new sets to be equal-sized, one of those must be at the current count and the next at twice that.
                double smallValue = Integrate_MonteCarlo_Cycle(f, map, g, grid, cycleCount);
                cycleCount *= 2;
                double bigValue = Integrate_MonteCarlo_Cycle(f, map, g, grid, cycleCount);

                // Combine all the cycles into new ones with twice the number of evaluations each.
                value1 = (value1 + value2) / 2.0;
                value2 = (value3 + smallValue) / 2.0;
                value3 = bigValue;

                //double currentValue = Integrate_MonteCarlo_Cycle(f, map, g, grid, cycleCount);
                //double error = Math.Abs(currentValue - lastValue);
                //double value = (currentValue + lastValue) / 2.0;



                //lastValue = value;

                // Increase the number of evaluations for the next cycle.
                //cycleCount *= 2;

                // Refine the grid for the next cycle.
                refineCount++;
                if (refineCount == 2)
                {
                    Debug.WriteLine("Replacing grid with {0} bins after {1} evaluations", grid.BinCount, grid.EvaluationCount);
                    grid        = grid.ComputeNewGrid(grid.BinCount * 2);
                    refineCount = 0;
                }
            }

            throw new NonconvergenceException();
        }
예제 #22
0
 private double RambleIntegral(int d, int s, EvaluationSettings settings)
 {
     return (MultiFunctionMath.Integrate((IList<double> x) => {
         Complex z = 0.0;
         for (int k = 0; k < d; k++) {
             z += ComplexMath.Exp(2.0 * Math.PI * Complex.I * x[k]);
         }
         return (MoreMath.Pow(ComplexMath.Abs(z), s));
     }, UnitCube(d), settings).Value);
 }
예제 #23
0
        public static ColumnVector SolveConservativeOde(Func <double, IList <double>, IList <double> > rhs, double x0, IList <double> y0, IList <double> yp0, double x1, EvaluationSettings settings)
        {
            if (rhs == null)
            {
                throw new ArgumentNullException("rhs");
            }
            if (y0 == null)
            {
                throw new ArgumentNullException("y0");
            }

            MultiOdeStepper stepper = new MultiStoermerStepper(rhs, x0, y0, yp0, settings);

            stepper.Integrate(x1);
            return(new ColumnVector(stepper.Y));
        }
예제 #24
0
        public void SteinmetzVolume()
        {
            // Steinmetz solid is intersection of unit cylinders along all axes. This is another hard-edged integral. Analytic values are known for d=2-5.
            // http://www.math.illinois.edu/~hildebr/ugresearch/cylinder-spring2013report.pdf
            // http://www.math.uiuc.edu/~hildebr/igl/nvolumes-fall2012report.pdf

            EvaluationSettings settings = new EvaluationSettings() { EvaluationBudget = 1000000, RelativePrecision = 1.0E-2 };

            for (int d = 2; d <= 5; d++) {

                IntegrationResult v1 = MultiFunctionMath.Integrate((IList<double> x) => {
                    for (int i = 0; i < d; i++) {
                        double s = 0.0;
                        for (int j = 0; j < d; j++) {
                            if (j != i) s += x[j] * x[j];
                        }
                        if (s > 1.0) return (0.0);
                    }
                    return (1.0);
                }, SymmetricUnitCube(d), settings);

                double v2 = 0.0;
                switch (d) {
                    case 2:
                        // trivial square
                        v2 = 4.0;
                        break;
                    case 3:
                        v2 = 16.0 - 8.0 * Math.Sqrt(2.0);
                        break;
                    case 4:
                        v2 = 48.0 * (Math.PI / 4.0 - Math.Atan(Math.Sqrt(2.0)) / Math.Sqrt(2.0));
                        break;
                    case 5:
                        v2 = 256.0 * (Math.PI / 12.0 - Math.Atan(1.0 / (2.0 * Math.Sqrt(2.0))) / Math.Sqrt(2.0));
                        break;
                }

                Console.WriteLine("{0} {1} {2}", d, v1.Value, v2);
                Assert.IsTrue(TestUtilities.IsNearlyEqual(v1.Value, v2, new EvaluationSettings() { AbsolutePrecision = 4.0 * v1.Precision }));

            }
        }
예제 #25
0
        public void WatsonIntegrals()
        {
            // Watson defined and analytically integrated three complicated triple integrals related to random walks in three dimension
            // See http://mathworld.wolfram.com/WatsonsTripleIntegrals.html

            // These integrals are difficult, so up the budget to about 1,000,000 and reduce the target accuracy to about 10^{-4}
            EvaluationSettings settings = new EvaluationSettings() { RelativePrecision = 1.0E-4, EvaluationBudget = 1000000 };

            Interval watsonWidth = Interval.FromEndpoints(0.0, Math.PI);
            Interval[] watsonBox = new Interval[] { watsonWidth, watsonWidth, watsonWidth };

            Assert.IsTrue(
                MultiFunctionMath.Integrate(
                    (IList<double> x) => 1.0 / (1.0 - Math.Cos(x[0]) * Math.Cos(x[1]) * Math.Cos(x[2])), watsonBox, settings
                ).Estimate.ConfidenceInterval(0.99).ClosedContains(
                    MoreMath.Pow(AdvancedMath.Gamma(1.0 / 4.0), 4) / 4.0
                )
            );

            Assert.IsTrue(
                MultiFunctionMath.Integrate(
                    (IList<double> x) => 1.0 / (3.0 - Math.Cos(x[0]) * Math.Cos(x[1]) - Math.Cos(x[1]) * Math.Cos(x[2]) - Math.Cos(x[0]) * Math.Cos(x[2])), watsonBox, settings
                ).Estimate.ConfidenceInterval(0.99).ClosedContains(
                    3.0 * MoreMath.Pow(AdvancedMath.Gamma(1.0 / 3.0), 6) / Math.Pow(2.0, 14.0 / 3.0) / Math.PI
                )
            );

            Assert.IsTrue(
                MultiFunctionMath.Integrate(
                    (IList<double> x) => 1.0 / (3.0 - Math.Cos(x[0]) - Math.Cos(x[1]) - Math.Cos(x[2])), watsonBox, settings
                ).Estimate.ConfidenceInterval(0.99).ClosedContains(
                    Math.Sqrt(6.0) / 96.0 * AdvancedMath.Gamma(1.0 / 24.0) * AdvancedMath.Gamma(5.0 / 24.0) * AdvancedMath.Gamma(7.0 / 24.0) * AdvancedMath.Gamma(11.0 / 24.0)
                )
            );
        }
        private static void FindExtremum_Amobea(MultiFunctor f, Vertex[] vertexes, EvaluationSettings settings)
        {
            int d = vertexes.Length - 1;

            while (f.EvaluationCount < settings.EvaluationBudget)
            {
                // Identify the best and worst vertexes.
                int minVertex = 0; double minY = vertexes[0].Y;
                int maxVertex = 0; double maxY = vertexes[0].Y;
                int nextMaxVertex = 0; double nextMaxY = vertexes[0].Y;
                for (int i = 1; i < vertexes.Length; i++)
                {
                    double y = vertexes[i].Y;
                    if (y < minY)
                    {
                        minVertex = i; minY = y;
                    }
                    if (y > nextMaxY)
                    {
                        if (y > maxY)
                        {
                            nextMaxVertex = maxVertex; nextMaxY = maxY;
                            maxVertex     = i; maxY = y;
                        }
                        else
                        {
                            nextMaxVertex = i; nextMaxY = y;
                        }
                    }
                }

                // Terminate based on spread between vertexes.
                if ((maxY - minY) <= Math.Abs(maxY) * settings.RelativePrecision)
                {
                    Debug.WriteLine(minY);
                    return;
                }

                // Produce a new candidate vertex by reflecting the worst vertex through the opposite face.
                double[] centroid = new double[d];
                for (int i = 0; i < vertexes.Length; i++)
                {
                    if (i != maxVertex)
                    {
                        for (int j = 0; j < d; j++)
                        {
                            centroid[j] += vertexes[i].X[j] / d;
                        }
                    }
                }
                double[] newX = new double[d];
                for (int j = 0; j < d; j++)
                {
                    newX[j] = centroid[j] + alpha * (centroid[j] - vertexes[maxVertex].X[j]);
                }
                double newY = f.Evaluate(newX);

                if (newY < nextMaxY)
                {
                    // As long as the new point is not terrible, we are going to replace the worst point with it.
                    vertexes[maxVertex] = new Vertex()
                    {
                        X = newX, Y = newY
                    };
                    Debug.WriteLine("Reflect");

                    if (newY < minY)
                    {
                        // If the new point was very good, we will try to extend the simplex further in that direction.
                        double[] extendedX = new double[d];
                        for (int j = 0; j < d; j++)
                        {
                            extendedX[j] = centroid[j] + 2.0 * (centroid[j] - vertexes[maxVertex].X[j]);
                        }
                        double extendedY = f.Evaluate(extendedX);
                        if (extendedY < minY)
                        {
                            // If the extension is also very good, we replace the second worst point too.
                            vertexes[maxVertex] = new Vertex()
                            {
                                X = extendedX, Y = extendedY
                            };
                            Debug.WriteLine("No, Extend");
                        }
                    }
                }
                else
                {
                    // The reflected point was pretty terrible, so we will try to produce a new candidate
                    // point by contracting the worst point toward the centroid instead.
                    for (int j = 0; j < d; j++)
                    {
                        newX[j] = centroid[j] + (vertexes[maxVertex].X[j] - centroid[j]) / 2.0;
                    }
                    newY = f.Evaluate(newX);

                    if (newY < nextMaxY)
                    {
                        // If that candidate is not terrible, accept it.
                        vertexes[maxVertex] = new Vertex()
                        {
                            X = newX, Y = newY
                        };
                        Debug.WriteLine("Contract");
                    }
                    else
                    {
                        // Otherwise, we give up and simply shrink our simplex down toward the minimum.
                        for (int i = 0; i < vertexes.Length; i++)
                        {
                            if (i != minVertex)
                            {
                                double[] shrunkX = new double[d];
                                for (int j = 0; j < d; j++)
                                {
                                    shrunkX[j] = vertexes[minVertex].X[j] + (vertexes[i].X[j] - vertexes[minVertex].X[j]) / 2.0;
                                }
                                double shrunkY = f.Evaluate(shrunkX);
                                vertexes[i] = new Vertex()
                                {
                                    X = shrunkX, Y = shrunkY
                                };
                            }
                        }
                        Debug.WriteLine("Shrink");
                    }
                }
            }
        }
        public static void FindExtremum_Amobea(Func <IList <double>, double> function, IList <double> x0, EvaluationSettings settings)
        {
            MultiFunctor f = new MultiFunctor(function);

            int d = x0.Count;

            Vertex[] vertexes = new Vertex[d + 1];
            for (int i = 0; i < d; i++)
            {
                double[] x = new double[d];
                x0.CopyTo(x, 0);
                x[i] = x[i] + 1.0 + Math.Abs(x[i]);
                double y = f.Evaluate(x);
                vertexes[i] = new Vertex()
                {
                    X = x, Y = y
                };
            }
            double[] x00 = new double[d];
            x0.CopyTo(x00, 0);
            double y00 = f.Evaluate(x00);

            vertexes[d] = new Vertex()
            {
                X = x00, Y = y00
            };

            FindExtremum_Amobea(f, vertexes, settings);
        }
예제 #28
0
 internal Extremum(double x, double f, double f2, int count, EvaluationSettings settings) : base(count, settings)
 {
     this.x  = x;
     this.f  = f;
     this.f2 = f2;
 }
예제 #29
0
        public BulrischStoerStepper(Func <double, IList <double>, IList <double> > rhs, double x0, IList <double> y0, EvaluationSettings settings) : base(rhs, x0, y0, settings)
        {
            YPrime = Evaluate(x0, y0);
            //DeltaX = 1.0;
            DeltaX = 0.1;
            //ComputeInitialSetp();

            extrapolators = new NevilleExtrapolator[Dimension];
            for (int i = 0; i < Dimension; i++)
            {
                extrapolators[i] = new NevilleExtrapolator(N.Length);
            }
            errorExtrapolator = new NevilleExtrapolator(N.Length);
        }
예제 #30
0
 public MultiOdeStepper(Func <double, IList <double>, IList <double> > rhs, double x0, IList <double> y0, EvaluationSettings settings) : base(rhs, x0, y0, settings)
 {
     this.Dimension = y0.Count;
 }
        private static UncertainValue Integrate_Adaptive(MultiFunctor f, CoordinateTransform[] map, IntegrationRegion r, EvaluationSettings settings)
        {
            // Create an evaluation rule
            GenzMalik75Rule rule = new GenzMalik75Rule(r.Dimension);

            // Use it on the whole region and put the answer in a linked list
            rule.Evaluate(f, map, r);
            LinkedList <IntegrationRegion> regionList = new LinkedList <IntegrationRegion>();

            regionList.AddFirst(r);

            // Iterate until convergence
            while (f.EvaluationCount < settings.EvaluationBudget)
            {
                // Add up value and errors in all regions.
                // While we are at it, take note of the region with the largest error.
                double value = 0.0;
                double error = 0.0;
                LinkedListNode <IntegrationRegion> regionNode = regionList.First;
                double maxError = 0.0;
                LinkedListNode <IntegrationRegion> maxErrorNode = null;
                while (regionNode != null)
                {
                    IntegrationRegion region = regionNode.Value;
                    value += region.Value;
                    error += region.Error;
                    //error += MoreMath.Sqr(region.Error);
                    if (region.Error > maxError)
                    {
                        maxError     = region.Error;
                        maxErrorNode = regionNode;
                    }
                    regionNode = regionNode.Next;
                }

                // Check for convergence.
                if ((error <= settings.AbsolutePrecision) || (error <= settings.RelativePrecision * Math.Abs(value)))
                {
                    return(new UncertainValue(value, error));
                }

                // Split the region with the largest error, and evaluate each subregion.
                IntegrationRegion maxErrorRegion = maxErrorNode.Value;
                regionList.Remove(maxErrorNode);
                IList <IntegrationRegion> subRegions = maxErrorRegion.Split(maxErrorRegion.SplitIndex);

                /*
                 * Countdown cnt = new Countdown(2);
                 * ThreadPool.QueueUserWorkItem((object state) => { rule.Evaluate(f, subRegions[0]); cnt.Signal(); });
                 * ThreadPool.QueueUserWorkItem((object state) => { rule.Evaluate(f, subRegions[1]); cnt.Signal(); });
                 * cnt.Wait();
                 * foreach (IntegrationRegion subRegion in subRegions) {
                 *  regionList.AddLast(subRegion);
                 * }
                 */

                foreach (IntegrationRegion subRegion in subRegions)
                {
                    rule.Evaluate(f, map, subRegion);
                    regionList.AddLast(subRegion);
                }
            }

            throw new NonconvergenceException();
        }
예제 #32
0
        public static double SolveOde(Func <double, double, double> rhs, double x0, double y0, double x1, EvaluationSettings settings)
        {
            if (rhs == null)
            {
                throw new ArgumentNullException("rhs");
            }

            MultiOdeStepper stepper = new BulrischStoerStepper((double x, IList <double> y) => new double[] { rhs(x, y[0]) }, x0, new double[] { y0 }, settings);

            //MultiOdeStepper stepper = new RungeKutta54Stepper((double x, IList<double> y) => new double[] { rhs(x, y[0]) }, range.LeftEndpoint, new double[] { start }, settings);
            stepper.Integrate(x1);
            return(stepper.Y[0]);
        }
        public void AssociatedLaguerreOrthonormality()
        {
            // don't let orders get too big, or (1) the Gamma function will overflow and (2) our integral will become highly oscilatory
            foreach (int n in TestUtilities.GenerateIntegerValues(1, 10, 3)) {
                foreach (int m in TestUtilities.GenerateIntegerValues(1, 10, 3)) {
                    foreach (double a in TestUtilities.GenerateRealValues(0.1, 10.0, 5)) {

                        //int n = 2;
                        //int m = 4;
                        //double a = 3.5;

                        Console.WriteLine("n={0} m={1} a={2}", n, m, a);

                        // evaluate the orthonormal integral
                        Func<double, double> f = delegate(double x) {
                            return (Math.Pow(x, a) * Math.Exp(-x) *
                                OrthogonalPolynomials.LaguerreL(m, a, x) *
                                OrthogonalPolynomials.LaguerreL(n, a, x)
                            );
                        };
                        Interval r = Interval.FromEndpoints(0.0, Double.PositiveInfinity);

                        // need to loosen default evaluation settings in order to get convergence in some of these cases
                        // seems to have most convergence problems for large a
                        EvaluationSettings e = new EvaluationSettings();
                        e.AbsolutePrecision = TestUtilities.TargetPrecision;
                        e.RelativePrecision = TestUtilities.TargetPrecision;

                        double I = FunctionMath.Integrate(f, r, e).Value;
                        Console.WriteLine(I);

                        // test for orthonormality
                        if (n == m) {
                            Assert.IsTrue(TestUtilities.IsNearlyEqual(
                                I, AdvancedMath.Gamma(n + a + 1) / AdvancedIntegerMath.Factorial(n)
                            ));
                        } else {
                            Assert.IsTrue(Math.Abs(I) < TestUtilities.TargetPrecision);
                        }

                    }
                }
            }
        }
예제 #34
0
 internal MultiOdeResult(double x, double[] y, double[] yPrime, int count, EvaluationSettings settings) : base(count, settings)
 {
     this.x      = x;
     this.y      = y;
     this.yPrime = yPrime;
 }
예제 #35
0
        // This method is due to Powell (http://en.wikipedia.org/wiki/Michael_J._D._Powell), but it is not what
        // is usually called Powell's Method (http://en.wikipedia.org/wiki/Powell%27s_method); Powell
        // developed that method in the 1960s, it was included in Numerical Recipies and is very popular.
        // This is a model trust algorithm developed by Powell in the 2000s. It typically uses many
        // fewer function evaluations, but does more intensive calcuations between each evaluation.

        // This is basically the UOBYQA variant of Powell's new methods. It maintains a quadratic model
        // that interpolates between (d + 1) (d + 2) / 2 points. The model is trusted
        // within a given radius. At each step, it moves to the minimum of the model (or the boundary of
        // the trust region in that direction) and evaluates the function. The new value is incorporated
        // into the model and the trust region expanded or contracted depending on how accurate its
        // prediction of the function value was.

        // Papers on these methods are collected at http://mat.uc.pt/~zhang/software.html#powell_software.
        // The UOBYQA paper is here: http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.28.1756.
        // The NEWUOA paper is here: http://www.damtp.cam.ac.uk/user/na/NA_papers/NA2004_08.pdf.
        // The CONDOR system (http://www.applied-mathematics.net/optimization/CONDORdownload.html) is based on these same ideas.
        // The thesis of CONDOR's author (http://www.applied-mathematics.net/mythesis/index.html) was also helpful.

        // It should be very easy to extend this method to constrained optimization, either by incorporating the bounds into
        // the step limits or by mapping hyper-space into a hyper-cube.

        private static MultiExtremum FindMinimum_ModelTrust(MultiFunctor f, IList <double> x, double s, EvaluationSettings settings)
        {
            // Construct an initial model.
            QuadraticInterpolationModel model = QuadraticInterpolationModel.Construct(f, x, s);
            double trustRadius = s;

            while (f.EvaluationCount < settings.EvaluationBudget)
            {
                // Find the minimum point of the model within the trust radius
                double[] z             = model.FindMinimum(trustRadius);
                double   expectedValue = model.Evaluate(z);

                double deltaExpected = model.MinimumValue - expectedValue;

                // Evaluate the function at the suggested minimum
                double[] point = model.ConvertPoint(z);
                double   value = f.Evaluate(point);

                double delta = model.MinimumValue - value;
                double tol   = settings.ComputePrecision(value);

                // To terminate, we demand: a reduction, the reduction be small, the reduction be in line with its expected value, that we have run up against trust boundary,
                // and that the gradient is small.
                // I had wanted to demand delta > 0, but we run into some cases where delta keeps being very slightly negative, typically orders of magnitude less than tol,
                // causing the trust radius to shrink in and endless cycle that causes our approximation to ultimately go sour, even though terminating on the original
                // very slightly negative delta would have produced an accurate estimate. So we tolerate this case for now.
                if ((-tol / 4.0 <= delta) && (delta <= tol))
                {
                    // We demand that the model be decent, i.e. that the expected delta was within tol of the measured delta.
                    if (Math.Abs(delta - deltaExpected) <= tol)
                    {
                        // We demand that the step not just be small because it ran up against the trust radius. If it ran up against the trust radius,
                        // there is probably more to be hand by continuing.
                        double zm = Blas1.dNrm2(z, 0, 1, z.Length);
                        if (zm < trustRadius)
                        {
                            // Finally, we demand that the gradient be small. You might think this was obvious since z was small, but if the Hessian is not positive definite
                            // the interplay of the Hessian and the gradient can produce a small z even if the model looks nothing like a quadratic minimum.
                            double gm = Blas1.dNrm2(model.GetGradient(), 0, 1, z.Length);
                            if (gm * zm <= tol)
                            {
                                if (f.IsNegated)
                                {
                                    value = -value;
                                }
                                return(new MultiExtremum(f.EvaluationCount, settings, point, value, Math.Max(Math.Abs(delta), 0.75 * tol), model.GetHessian()));
                            }
                        }
                    }
                }


                // There are now three decisions to be made:
                //   1. How to change the trust radius
                //   2. Whether to accept the new point
                //   3. Which existing point to replace

                // If the actual change was very far from the expected change, reduce the trust radius.
                // If the expected change did a good job of predicting the actual change, increase the trust radius.
                if ((delta < 0.25 * deltaExpected) /*|| (8.0 * deltaExpected < delta)*/)
                {
                    trustRadius = trustRadius / 2.0;
                }
                else if ((0.75 * deltaExpected <= delta) /*&& (delta <= 2.0 * deltaExpected)*/)
                {
                    trustRadius = 2.0 * trustRadius;
                }
                // It appears that the limits on delta being too large don't help, and even hurt if made too stringent.

                // Replace an old point with the new point.
                int iMax = 0; double fMax = model.values[0];
                int iBad = 0; double fBad = model.ComputeBadness(0, z, point, value);
                for (int i = 1; i < model.values.Length; i++)
                {
                    if (model.values[i] > fMax)
                    {
                        iMax = i; fMax = model.values[i];
                    }
                    double bad = model.ComputeBadness(i, z, point, value);
                    if (bad > fBad)
                    {
                        iBad = i; fBad = bad;
                    }
                }
                if (value < fMax)
                {
                    Debug.WriteLine("iMax={0}, iBad={1}", iMax, iBad);
                    model.ReplacePoint(iBad, point, z, value);
                }
                // There is some question about how best to choose which point to replace.
                // The largest value? The furthest away? The one closest to new min?
            }

            throw new NonconvergenceException();
        }
        /// <summary>
        /// Evaluates a definite integral with the given evaluation settings.
        /// </summary>
        /// <param name="integrand">The function to be integrated.</param>
        /// <param name="range">The range of integration.</param>
        /// <param name="settings">The settings which control the evaulation of the integal.</param>
        /// <returns>The result of the integral, which includes an estimated value and estimated uncertainty of that value.</returns>
        public static IntegrationResult Integrate(Func<double,double> integrand, Interval range, EvaluationSettings settings)
        {
            if (integrand == null) throw new ArgumentNullException("integrand");

            // remap infinite integrals to finite integrals

            if (Double.IsNegativeInfinity(range.LeftEndpoint) && Double.IsPositiveInfinity(range.RightEndpoint)) {

                // -infinity to +infinity

                // remap to (-pi/2,pi/2)
                Func<double, double> f0 = integrand;
                Func<double, double> f1 = delegate (double t) {
                    double x = Math.Tan(t);
                    return (f0(x) * (1.0 + x * x));
                };
                Interval r1 = Interval.FromEndpoints(-Global.HalfPI, Global.HalfPI);

                return (Integrate(f1, r1, settings));

            } else if (Double.IsPositiveInfinity(range.RightEndpoint)) {

                // finite to +infinity

                // remap to interval (-1,1)
                double a0 = range.LeftEndpoint;
                Func<double, double> f0 = integrand;
                Func<double, double> f1 = delegate (double t) {
                    double q = 1.0 - t;
                    double x = a0 + (1 + t) / q;
                    return (f0(x) * (2.0 / q / q));
                };
                Interval r1 = Interval.FromEndpoints(-1.0, 1.0);

                return (Integrate(f1, r1, settings));

            } else if (Double.IsNegativeInfinity(range.LeftEndpoint)) {

                // -infinity to finite

                // remap to interval (-1,1)
                double b0 = range.RightEndpoint;
                Func<double, double> f0 = integrand;
                Func<double, double> f1 = delegate (double t) {
                    double q = t + 1.0;
                    double x = b0 + (t - 1.0) / q;
                    return(f0(x) * (2.0 / q / q));
                };
                Interval r1 = Interval.FromEndpoints(-1.0, 1.0);

                return(Integrate(f1, r1, settings));

            }

            // normal integral over a finitite range

            IAdaptiveIntegrator integrator = new GaussKronrodIntegrator(integrand, range);
            IntegrationResult result = Integrate_Adaptive(integrator, settings);
            return (result);
        }
예제 #37
0
        private static double FindMinimum(
            Func<double,double> f,
            double a, double b,
            double u, double fu, double v, double fv, double w, double fw,
            EvaluationSettings settings, int count
        )
        {
            double tol = 0.0;

            while (count < settings.EvaluationBudget) {

                Console.WriteLine("n={0} tol={1}", count, tol);
                Console.WriteLine("[{0}  f({1})={2}  f({3})={4}  f({5})={6}  {7}]", a, u, fu, v, fv, w, fw, b);

                Debug.Assert(a < b);
                Debug.Assert((a <= u) && (u <= b));
                Debug.Assert((fu <= fv) && (fv <= fw));

                // Expected final situation is a<tol><tol>u<tol><tol>b, leaving no point left to evaluate that is not within tol of an existing point.

                if ((b - a) <= 4.0 * tol) return (u);

                // While a < u < b is guaranteed, a < v, w < b is not guaranteed, since the bracket can sometimes be made tight enough to exclude v or w.
                // For example, if u < v < w, then we can set b = v, placing w outside the bracket.

                double x, fpp;
                ParabolicFit(u, fu, v, fv, w, fw, out x, out fpp);
                Console.WriteLine("parabolic x={0} f''={1}", x, fpp);

                if (Double.IsNaN(fpp) || (fpp <= 0.0) || (x < a) || (x > b)) {

                    // the parabolic fit didn't work out, so do a golden section reduction instead

                    // to get the most reduction of the bracket, pick the larger of au and ub
                    // for self-similarity, pick a point inside it that divides it into two segments in the golden section ratio,
                    // i.e. 0.3820 = \frac{1}{\phi + 1} and 0.6180 = \frac{\phi}{\phi+1}
                    // put the smaller segment closer to u so that x is closer to u, the best minimum so far

                    double au = u - a;
                    double ub = b - u;

                    if (au > ub) {
                        x = u - au / (AdvancedMath.GoldenRatio + 1.0);
                    } else {
                        x = u + ub / (AdvancedMath.GoldenRatio + 1.0);
                    }

                    Console.WriteLine("golden section x={0}", x);

                }

                // ensure we don't evaluate within tolerance of an existing point
                if (Math.Abs(x - u) < tol) { Console.WriteLine("shift from u (x={0})", x); x = (x > u) ? u + tol : u - tol; }
                if ((x - a) < tol) { Console.WriteLine("shift from a (x={0})", x); x = a + tol; }
                if ((b - x) < tol) { Console.WriteLine("shift from b (x={0})", x); x = b - tol; }

                count++;
                double fx = f(x);
                Console.WriteLine("f({0}) = {1}", x, fx);

                Console.WriteLine("delta={0}", fu - fx);

                if (fx < fu) {

                    // the new point is lower than all the others; this is success

                    // u now becomes a bracket point
                    if (u < x) {
                        a = u;
                    } else {
                        b = u;
                    }

                    // x -> u -> v -> w
                    w = v; fw = fv;
                    v = u; fv = fu;
                    u = x; fu = fx;

                } else {

                    // x now becomes a bracket point
                    if (x < u) {
                        a = x;
                    } else {
                        b = x;
                    }

                    if (fx < fv) {

                        // the new point is higher than u, but still lower than v and w
                        // this isn't what we expected, but we have lower points that before

                        // x -> v -> w
                        w = v; fw = fv;
                        v = x; fv = fx;

                    } else if (fx < fw) {

                        // x -> w
                        w = x; fw = fx;

                    } else {

                        // the new point is higher than all our other points; this is the worst case
                        // we might still want to replace w with x because
                        // (i) otherwise a parabolic fit will reproduce the same x and
                        // (ii) w is quite likely far outside the new bracket and not telling us much about the behavior near u

                        Console.WriteLine("bad point");
                        //throw new NotImplementedException();
                    }

                }

                // if the user has specified a tollerance, use it
                if ((settings.RelativePrecision > 0.0 || settings.AbsolutePrecision > 0.0)) {
                    tol = Math.Max(Math.Abs(u) * settings.RelativePrecision, settings.AbsolutePrecision);
                } else {
                    // otherwise, try to get the tollerance from the curvature
                    if (fpp > 0.0) {
                        tol = Math.Sqrt(2.0 * (Math.Abs(fu) * 1.0E-14 + 1.0E-28) / fpp);
                    } else {
                        // but if we don't have a useable curvature either, wing it
                    }
                }

            }

            throw new NonconvergenceException();
        }
        // the drivers
        private static IntegrationResult Integrate_Adaptive(IAdaptiveIntegrator integrator, EvaluationSettings s)
        {
            LinkedList<IAdaptiveIntegrator> list = new LinkedList<IAdaptiveIntegrator>();
            list.AddFirst(integrator);

            int n = integrator.EvaluationCount;

            while (true) {

                // go through the intervals, adding estimates (and errors)
                // and noting which contributes the most error
                // keep track of the total value and uncertainty
                UncertainValue vTotal = new UncertainValue();
                //double v = 0.0;
                //double u = 0.0;

                // keep track of which node contributes the most error
                LinkedListNode<IAdaptiveIntegrator> maxNode = null;
                double maxError = 0.0;

                LinkedListNode<IAdaptiveIntegrator> node = list.First;
                while (node != null) {

                    IAdaptiveIntegrator i = node.Value;

                    UncertainValue v = i.Estimate;
                    vTotal += v;
                    //UncertainValue uv = i.Estimate;
                    //v += uv.Value;
                    //u += uv.Uncertainty;

                    if (v.Uncertainty > maxError) {
                        maxNode = node;
                        maxError = v.Uncertainty;
                    }

                    node = node.Next;

                }

                // if our error is small enough, return
                if ((vTotal.Uncertainty <= Math.Abs(vTotal.Value) * s.RelativePrecision) || (vTotal.Uncertainty <= s.AbsolutePrecision)) {
                    return (new IntegrationResult(vTotal, n, s));
                }
                //if ((vTotal.Uncertainty <= Math.Abs(vTotal.Value) * s.RelativePrecision) || (vTotal.Uncertainty <= s.AbsolutePrecision)) {
                //    return (new IntegrationResult(vTotal.Value, n));
                //}

                // if our evaluation count is too big, throw
                if (n > s.EvaluationBudget) throw new NonconvergenceException();

                // subdivide the interval with the largest error
                IEnumerable<IAdaptiveIntegrator> divisions = maxNode.Value.Divide();
                foreach (IAdaptiveIntegrator division in divisions) {
                    list.AddBefore(maxNode, division);
                    n += division.EvaluationCount;
                    //v2 += division.Estimate;
                }
                list.Remove(maxNode);

            }
        }
예제 #39
0
        public double FindMinimum(Func<double, double> f, double x, double d, EvaluationSettings settings)
        {
            // evaluate at x and x + d
            double fx = f(x);
            double y = x + d;
            double fy = f(y);
            int count = 2;

            // if we stepped uphill, reverse direction of steps and exchange x & y
            if (fy > fx) {
                double t = x; x = y; y = t;
                t = fx; fx = fy; fy = t;
                d = -d;
            }

            // we now know f(x) >= f(y) and we are stepping downhill
            // continue stepping until we step uphill
            double z, fz;
            while (true) {

                if (count >= settings.EvaluationBudget) throw new NonconvergenceException();

                z = y + d;
                fz = f(z);
                count++;

                Console.WriteLine("f({0})={1} f({2})={3} f({4})={5} d={6}", x, fx, y, fy, z, fz, d);

                if (fz > fy) break;

                // increase the step size each time
                d = AdvancedMath.GoldenRatio * d;

                // x <- y <- z
                x = y; fx = fy; y = z; fy = fz;

            }

            // we x and z now bracket a local minimum, with y the lowest point evaluated so far
            double a = Math.Min(x, z); double b = Math.Max(x, z);
            if (fz < fx) { double t = x; x = z; z = t; t = fx; fx = fz; fz = t; }

            return (FindMinimum(f, a, b, y, fy, x, fx, z, fz, settings, count));
        }
 // the public API
 /// <summary>
 /// Evaluates a definite integral.
 /// </summary>
 /// <param name="integrand">The function to be integrated.</param>
 /// <param name="range">The range of integration.</param>
 /// <returns>A numerical estimate of the given integral.</returns>
 /// <remarks>
 /// <para>Integral values are accurate to within about a digit of full double precision.</para>
 /// <para>To do integrals over infinite regions, simply set the lower bound of the <paramref name="range"/>
 /// to <see cref="System.Double.NegativeInfinity"/> or the upper bound to <see cref="System.Double.PositiveInfinity"/>.</para>
 /// <para>Our numerical integrator uses a Gauss-Kronrod rule that can integrate efficiently,
 /// combined with an adaptive strategy that limits function
 /// evaluations to those regions required to achieve the desired accuracy.</para>
 /// <para>Our integrator handles smooth functions extremely efficiently. It handles integrands with
 /// discontinuities, or discontinuities of derivatives, at the price of slightly more evaluations
 /// of the integrand. It handles oscilatory functions, so long as not too many periods contribute
 /// significantly to the integral. It can integrate logarithmic and mild power-law singularities.
 /// </para>
 /// <para>Strong power-law singularities will cause the alrorighm to fail with a NonconvergenceException.
 /// This is unavoidable for essentially any double-precision numerical integrator. Consider, for example,
 /// the integrable singularity 1/&#x221A;x. Since
 /// &#x3B5; = &#x222B;<sub>0</sub><sup>&#x3B4;</sup> x<sup>-1/2</sup> dx = 2 &#x3B4;<sup>1/2</sup>,
 /// points within &#x3B4; &#x223C; 10<sup>-16</sup> of the end-points, which as a close as you can get to
 /// a point in double precision without being on top of it, contribute at the &#x3B5; &#x223C; 10<sup>-8</sup>
 /// level to our integral, well beyond limit that nearly-full double precision requires. Said differently,
 /// to know the value of the integral to &#x3B5; &#x223C; 10<sup>-16</sup> prescision, we would need to
 /// evaluate the contributions of points within &#x3B4; &#x223C; 10<sup>-32</sup> of the endpoints,
 /// far closer than we can get.</para>
 /// <para>If you need to evaluate an integral with such a strong singularity, make an analytic
 /// change of variable to absorb the singularity before attempting numerical integration. For example,
 /// to evaluate I = &#x222B;<sub>0</sub><sup>b</sup> f(x) x<sup>-1/2</sup> dx, substitute y = x<sup>1/2</sup>
 /// to obtain I = 2 &#x222B;<sub>0</sub><sup>&#x221A;b</sup> f(y<sup>2</sup>) dy.</para>
 /// </remarks>
 public static double Integrate(Func<double, double> integrand, Interval range)
 {
     EvaluationSettings settings = new EvaluationSettings();
     return (Integrate(integrand, range, settings).Value);
 }
        /// <summary>
        /// Minimizes a function on a multi-dimensional space in the vicinity of a given point, subject to the given settings.
        /// </summary>
        /// <param name="f">The function.</param>
        /// <param name="x">The starting point for the search.</param>
        /// <param name="settings">The evaluation settings.</param>
        /// <returns>The minimum.</returns>
        /// <exception cref="ArgumentNullException"><paramref name="f"/>, <paramref name="x"/>, or <paramref name="settings"/> is null.</exception>
        /// <exception cref="NonconvergenceException">The minimum was not found to the required precision within the budgeted number of function evaluations.</exception>
        internal static SpaceExtremum FindMinimum(Func <double[], double> f, double[] x, EvaluationSettings settings)
        {
            if (f == null)
            {
                throw new ArgumentNullException("f");
            }
            if (x == null)
            {
                throw new ArgumentNullException("x");
            }
            if (settings == null)
            {
                throw new ArgumentNullException("settings");
            }

            int d = x.Length;

            // put the function into a Functor we will use for line searches
            LineSearchFunctor fo = new LineSearchFunctor(f);

            // keep track of the (conjugate) minimization directions
            double[][] Q = new double[d][];
            for (int i = 0; i < d; i++)
            {
                Q[i] = new double[d];
                for (int j = 0; j < d; j++)
                {
                    Q[i][j] = 0.0;
                }
                // pick a step size in each direction that represents a fraction of the input value
                Q[i][i] = (1.0 / 16.0) * (Math.Abs(x[i]) + (1.0 / 16.0));
            }
            // keep track of the curvature in these directions
            double[] r = new double[d];

            // keep track of the function value
            double y = f(x);

            // keep track of the total number of evaluations
            //int count = 0;

            bool skip = false;

            // interate until convergence
            while (fo.EvaluationCount < settings.EvaluationBudget)
            {
                // remember our starting position
                double[] x0 = new double[d];
                Array.Copy(x, x0, d);
                double y0 = y;

                // keep track of direction of largest decrease
                double max_dy = 0.0;
                int    max_i  = 0;

                // now minimize in each direction
                for (int i = 0; i < d; i++)
                {
                    // if we placed the net direction in the first slot last time,
                    // we are already at the minimum along that direction, so don't
                    // minimize along it
                    if (skip)
                    {
                        skip = false;
                        continue;
                    }
                    //if ((n > 0) && (i == 0) && (max_i == 0)) continue;

                    //Console.WriteLine("i = {0}", i);
                    //WriteVector(Q[i]);

                    // form a line function
                    //LineFunction f1 = new LineFunction(f, x, Q[i]);
                    fo.Origin    = x;
                    fo.Direction = Q[i];

                    // minimize it
                    Extremum m = FindMinimum(fo, 0.0, y, 1.0, new ExtremumSettings()
                    {
                        EvaluationBudget = settings.EvaluationBudget, AbsolutePrecision = 0.0, RelativePrecision = 0.0
                    });
                    //LineExtremum m = FindMinimum(new Func<double,double>(f1.Evaluate), 0.0, y, 1.0);

                    // add to the evaluation count
                    //count += f1.Count;

                    // update the current position
                    x = fo.ComputeLocation(m.Location);
                    //x = f1.Position(m.Location);
                    //WriteVector(x);
                    r[i] = m.Curvature;

                    // keep track of how much the function dropped, and
                    // if this is the direction of largest decrease
                    double dy = y - m.Value;
                    //Console.WriteLine("dy = {0}", dy);
                    if (dy > max_dy)
                    {
                        max_dy = dy;
                        max_i  = i;
                    }
                    y = m.Value;
                }

                //Console.WriteLine("max_i = {0}, max_dy = {1}", max_i, max_dy);
                //Console.WriteLine("y0 = {0}, y = {1}", y0, y);

                // figure out the net direction we have moved
                double[] dx = new double[d];
                for (int i = 0; i < d; i++)
                {
                    dx[i] = x[i] - x0[i];
                }
                //Console.WriteLine("Finish:");
                //WriteVector(x);
                //Console.WriteLine("Net direction:");
                //WriteVector(dx);

                // check termination criteria
                // we do this before minimizing in the net direction because if dx=0 it loops forever
                double Dy = Math.Abs(y0 - y);
                if ((Dy < settings.AbsolutePrecision) || (2.0 * Dy < (Math.Abs(y) + Math.Abs(y0)) * settings.RelativePrecision))
                {
                    SymmetricMatrix A = ComputeCurvature(f, x);
                    return(new SpaceExtremum(x, y, A));
                }

                // attempt a minimization in the net direction
                fo.Origin    = x;
                fo.Direction = dx;
                //LineFunction f2 = new LineFunction(f, x, dx);
                //LineExtremum mm = FindMinimum(new Func<double,double>(f2.Evaluate), 0.0, y, 1.0);
                Extremum mm = FindMinimum(fo, 0.0, y, 1.0, new ExtremumSettings()
                {
                    EvaluationBudget = settings.EvaluationBudget, RelativePrecision = 0.0, AbsolutePrecision = 0.0
                });
                //count += f2.Count;
                //x = f2.Position(mm.Location);
                x = fo.ComputeLocation(mm.Location);
                y = mm.Value;

                // rotate this direction into the direction set

                /*
                 * for (int i = 0; i < (d - 1); i++) {
                 *  Q[i] = Q[i + 1];
                 *  r[i] = r[i + 1];
                 * }
                 * Q[d - 1] = dx;
                 * r[d - 1] = mm.Curvature;
                 */
                // this is the basic Powell procedure, and it leads to linear dependence

                // replace the direction of largest decrease with the net direction
                Q[max_i] = dx;
                r[max_i] = mm.Curvature;
                if (max_i == 0)
                {
                    skip = true;
                }
                // this is powell's modification to avoid linear dependence

                // reset
            }

            throw new NonconvergenceException();
        }
예제 #42
0
 /// <summary>
 /// Finds the maxunyn of a function within the given volume, subject to the given evaluation constraints.
 /// </summary>
 /// <param name="function">The function.</param>
 /// <param name="volume">The volume to search.</param>
 /// <param name="settings">The evaluation constraints to apply.</param>
 /// <returns>The global maximum.</returns>
 public static MultiExtremum FindGlobalMaximum(Func <IList <double>, double> function, IList <Interval> volume, EvaluationSettings settings)
 {
     return(FindGlobalExtremum(function, volume, settings, true));
 }
예제 #43
0
        public void DistributionProbabilityIntegral()
        {
            Random rng = new Random(4);

            // if integral is very small, we still want to get it very accurately
            EvaluationSettings settings = new EvaluationSettings();
            settings.AbsolutePrecision = 0.0;

            foreach (Distribution distribution in distributions) {

                if (distribution is TriangularDistribution) continue;

                for (int i = 0; i < 3; i++) {
                    double x;
                    if (Double.IsNegativeInfinity(distribution.Support.LeftEndpoint) && Double.IsPositiveInfinity(distribution.Support.RightEndpoint)) {
                        // pick an exponentially distributed random point with a random sign
                        double y = rng.NextDouble();
                        x = - Math.Log(y);
                        if (rng.NextDouble() < 0.5) x = -x;
                    } else if (Double.IsPositiveInfinity(distribution.Support.RightEndpoint)) {
                        // pick an exponentialy distributed random point
                        double y = rng.NextDouble();
                        x = distribution.Support.LeftEndpoint - Math.Log(y);
                    } else {
                        // pick a random point within the support
                        x = distribution.Support.LeftEndpoint + rng.NextDouble() * distribution.Support.Width;
                    }
                    Console.WriteLine("{0} {1}", distribution.GetType().Name, x);
                    double P = FunctionMath.Integrate(distribution.ProbabilityDensity, Interval.FromEndpoints(distribution.Support.LeftEndpoint, x), settings).Value;
                    double Q = FunctionMath.Integrate(distribution.ProbabilityDensity, Interval.FromEndpoints(x, distribution.Support.RightEndpoint), settings).Value;
                    if (!TestUtilities.IsNearlyEqual(P + Q, 1.0)) {
                        // the numerical integral for the triangular distribution can be innacurate, because
                        // its locally low-polynomial behavior fools the integration routine into thinking it need
                        // not integrate as much near the inflection point as it must; this is a problem
                        // of the integration routine (or arguably the integral), not the triangular distribution,
                        // so skip it here
                        Console.WriteLine("skip (P={0}, Q={1})", P, Q);
                        continue;
                    }

                    Console.WriteLine("  {0} v. {1}", P, distribution.LeftProbability(x));
                    Console.WriteLine("  {0} v. {1}", Q, distribution.RightProbability(x));

                    Assert.IsTrue(TestUtilities.IsNearlyEqual(P, distribution.LeftProbability(x)));
                    Assert.IsTrue(TestUtilities.IsNearlyEqual(Q, distribution.RightProbability(x)));
                }

            }
        }
예제 #44
0
        public void Bukin()
        {
            // Burkin has a narrow valley, not aligned with any axis, punctuated with many tiny "wells" along its bottom.
            // The deepest well is at (-10,1)-> 0.
            Func<IList<double>, double> function = (IList<double> x) => 100.0 * Math.Sqrt(Math.Abs(x[1] - 0.01 * x[0] * x[0])) + 0.01 * Math.Abs(x[0] + 10.0);

            IList<Interval> box = new Interval[] { Interval.FromEndpoints(-15.0, 0.0), Interval.FromEndpoints(-3.0, 3.0) };

            EvaluationSettings settings = new EvaluationSettings() { RelativePrecision = 0.0, AbsolutePrecision = 1.0E-4, EvaluationBudget = 1000000 };
            /*
            settings.Update += (object result) => {
                MultiExtremum e = (MultiExtremum) result;
                Console.WriteLine("After {0} evaluations, best value {1}", e.EvaluationCount, e.Value);
            };
            */
            MultiExtremum minimum = MultiFunctionMath.FindGlobalMinimum(function, box, settings);

            Console.WriteLine(minimum.EvaluationCount);
            Console.WriteLine("{0} ({1})", minimum.Value, minimum.Precision);
            Console.WriteLine("{0} {1}", minimum.Location[0], minimum.Location[1]);

            // We do not end up finding the global minimum.
        }
        private static UncertainValue Integrate_MonteCarlo(MultiFunctor f, CoordinateTransform[] map, IList<Interval> box, EvaluationSettings settings)
        {
            int d = box.Count;

            // Use a Sobol quasi-random sequence. This give us 1/N accuracy instead of 1/\sqrt{N} accuracy.
            //VectorGenerator g = new RandomVectorGenerator(d, new Random(314159265));
            VectorGenerator g = new SobolVectorGenerator(d);

            // Start with a trivial Lepage grid.
            // We will increase the grid size every few cycles.
            // My tests indicate that trying to increase every cycle or even every other cycle is too often.
            // This makes sense, because we have no reason to believe our new grid will be better until we
            // have substantially more evaluations per grid cell than we did for the previous grid.
            LePageGrid grid = new LePageGrid(box, 1);
            int refineCount = 0;

            // Start with a reasonable number of evaluations per cycle that increases with the dimension.
            int cycleCount = 8 * d;

            //double lastValue = Integrate_MonteCarlo_Cycle(f, map, g, grid, cycleCount);

            // Each cycle consists of three sets of evaluations.
            // At first I did this with just two set and used the difference between the two sets as an error estimate.
            // I found that it was pretty common for that difference to be low just by chance, causing error underestimatation.
            double value1 = Integrate_MonteCarlo_Cycle(f, map, g, grid, cycleCount);
            double value2 = Integrate_MonteCarlo_Cycle(f, map, g, grid, cycleCount);
            double value3 = Integrate_MonteCarlo_Cycle(f, map, g, grid, cycleCount);

            while (f.EvaluationCount < settings.EvaluationBudget) {

                // Take the largest deviation as the error.
                double value = (value1 + value2 + value3) / 3.0;
                double error = Math.Max(Math.Abs(value1 - value3), Math.Max(Math.Abs(value1 - value2), Math.Abs(value2 - value3)));
                Debug.WriteLine("{0} {1} {2}", f.EvaluationCount, value, error);

                // Check for convergence.
                if ((error <= settings.AbsolutePrecision) || (error <= Math.Abs(value) * settings.RelativePrecision)) {
                    return (new UncertainValue(value, error));
                }

                // Do more cycles. In order for new sets to be equal-sized, one of those must be at the current count and the next at twice that.
                double smallValue = Integrate_MonteCarlo_Cycle(f, map, g, grid, cycleCount);
                cycleCount *= 2;
                double bigValue = Integrate_MonteCarlo_Cycle(f, map, g, grid, cycleCount);

                // Combine all the cycles into new ones with twice the number of evaluations each.
                value1 = (value1 + value2) / 2.0;
                value2 = (value3 + smallValue) / 2.0;
                value3 = bigValue;

                //double currentValue = Integrate_MonteCarlo_Cycle(f, map, g, grid, cycleCount);
                //double error = Math.Abs(currentValue - lastValue);
                //double value = (currentValue + lastValue) / 2.0;

                //lastValue = value;

                // Increase the number of evaluations for the next cycle.
                //cycleCount *= 2;

                // Refine the grid for the next cycle.
                refineCount++;
                if (refineCount == 2) {
                    Debug.WriteLine("Replacing grid with {0} bins after {1} evaluations", grid.BinCount, grid.EvaluationCount);
                    grid = grid.ComputeNewGrid(grid.BinCount * 2);
                    refineCount = 0;
                }

            }

            throw new NonconvergenceException();
        }
예제 #46
0
 public RungeKutta54Stepper(Func <double, IList <double>, IList <double> > rhs, double x0, IList <double> y0, EvaluationSettings settings) : base(rhs, x0, y0, settings)
 {
     YPrime = Evaluate(X, Y);
     DeltaX = 1.0;
 }
예제 #47
0
        private static MultiExtremum FindGlobalExtremum(Func <IList <double>, double> function, IList <Interval> volume, EvaluationSettings settings, bool negate)
        {
            if (function == null)
            {
                throw new ArgumentNullException("function");
            }
            if (volume == null)
            {
                throw new ArgumentNullException("volume");
            }
            MultiFunctor f = new MultiFunctor(function, negate);
            DifferentialEvolutionSettings deSettings = GetDefaultSettings(settings, volume.Count);
            MultiExtremum extremum = FindGlobalExtremum(f, volume, deSettings);

            return(extremum);
        }
예제 #48
0
        public void MinimizePerturbedQuadratic2D()
        {
            EvaluationSettings s = new EvaluationSettings() { EvaluationBudget = 100, RelativePrecision = 1.0E-10 };

            Func<IList<double>, double> f = (IList<double> x) =>
                1.0 + 2.0 * MoreMath.Sqr(x[0] - 3.0) + 4.0 * (x[0] - 3.0) * (x[1] - 5.0) + 6.0 * MoreMath.Sqr(x[1] - 5.0) +
                7.0 * MoreMath.Pow(x[0] - 3.0, 4) + 8.0 * MoreMath.Pow(x[1] - 5.0, 4);

            MultiExtremum m = MultiFunctionMath.FindLocalMinimum(f, new double[] { 1.0, 1.0 }, s);

            Assert.IsTrue(m.EvaluationCount <= s.EvaluationBudget);
            Assert.IsTrue(m.Dimension == 2);
            Assert.IsTrue(TestUtilities.IsNearlyEqual(m.Value, 1.0, s));
            Assert.IsTrue(TestUtilities.IsNearlyEqual(m.Location, new ColumnVector(3.0, 5.0), new EvaluationSettings() { RelativePrecision = Math.Sqrt(s.RelativePrecision) }));
        }
예제 #49
0
        public void Griewank()
        {
            // See http://mathworld.wolfram.com/GriewankFunction.html

            for (int n = 2; n < 8; n++) {

                Console.WriteLine(n);

                Func<IList<double>, double> function = (IList<double> x) => {
                    double s = 0.0;
                    double p = 1.0;
                    for (int i = 0; i < x.Count; i++) {
                        s += MoreMath.Sqr(x[i]);
                        p *= Math.Cos(x[i] / Math.Sqrt(i + 1.0));
                    }
                    return (1.0 + s / 4000.0 - p);
                };

                Interval[] box = new Interval[n];
                for (int i = 0; i < n; i++) box[i] = Interval.FromEndpoints(-100.0, 100.0);

                EvaluationSettings settings = new EvaluationSettings() { AbsolutePrecision = 1.0E-6, EvaluationBudget = 1000000 };

                MultiExtremum minimum = MultiFunctionMath.FindGlobalMinimum(function, box, settings);

                Console.WriteLine(minimum.Dimension);
                Console.WriteLine(minimum.EvaluationCount);
                Console.WriteLine("{0} ({1}) ?= 0.0", minimum.Value, minimum.Precision);
                Console.WriteLine("{0} {1} ...", minimum.Location[0], minimum.Location[1]);

                // We usually find the minimum at 0, but sometimes land a valley or two over.

            }
        }
예제 #50
0
        /// <summary>
        /// Finds a local minimum of a multi-dimensional function in the vincinity of the given starting location, subject to the given evaluation constraints.
        /// </summary>
        /// <param name="function">The multi-dimensional function to minimize.</param>
        /// <param name="start">The starting location for the search.</param>
        /// <param name="settings">The evaluation settings that govern the search for the minimum.</param>
        /// <returns>The local minimum.</returns>
        /// <remarks>
        /// <para>The Hessian (matrix of second derivatives) returned with the minimum is an approximation that is constructed in the course of search. It should be
        /// considered a crude approximation, and may not even be that if the minimum is highly non-quadratic.</para>
        /// <para>If you have a constrained minimization problem, require a high-precision solution, and do not have a good initial guess, consider first feeding
        /// your constrained problem into <see cref="FindGlobalMaximum"/>, which supports constraints but gives relatively lower precision solutions, then
        /// feeding the result of that method into this method, which finds relatively precision solutions but does not support constraints.</para>
        /// </remarks>
        /// <exception cref="NonconvergenceException">The number of function evaluations required exceeded the evaluation budget.</exception>
        public static MultiExtremum FindLocalMinimum(Func <IList <double>, double> function, IList <double> start, EvaluationSettings settings)
        {
            if (function == null)
            {
                throw new ArgumentNullException("function");
            }
            if (start == null)
            {
                throw new ArgumentNullException("start");
            }
            if (settings == null)
            {
                throw new ArgumentNullException("settings");
            }

            return(FindLocalExtremum(function, start, settings, false));
        }
예제 #51
0
        public void PackCirclesInSquare()
        {
            // Put n points into the unit square. Place them so as to maximize the minimum distance between them.
            // See http://en.wikipedia.org/wiki/Circle_packing_in_a_square, http://hydra.nat.uni-magdeburg.de/packing/csq/csq.html
            // This is a great test problem because it is simple to understand, hard to solve,
            // solutions are known/proven for many n, and it covers many dimensions.

            double[] solutions = new double[] {
                Double.NaN,
                Double.NaN,
                Math.Sqrt(2.0), /* at opposite corners */
                Math.Sqrt(6.0) - Math.Sqrt(2.0),
                1.0, /* at each corner */
                Math.Sqrt(2.0) / 2.0 /* at each corner and in center */,
                Math.Sqrt(13.0) / 6.0,
                4.0 - 2.0 * Math.Sqrt(3.0),
                (Math.Sqrt(6.0) - Math.Sqrt(2.0)) / 2.0,
                1.0 / 2.0, /* 3 X 3 grid */
                0.421279543983903432768821760651,
                0.398207310236844165221512929748,
                Math.Sqrt(34.0) / 15.0,
                0.366096007696425085295389370603,
                2.0 / (4.0 + Math.Sqrt(3.0)),
                2.0 / (Math.Sqrt(6.0) + 2.0 + Math.Sqrt(2.0)),
                1.0 / 3.0
            };

            //int n = 11;
            for (int n = 2; n < 8; n++)
            {

                Console.WriteLine("n={0}", n);

                Func<IList<double>, double> function = (IList<double> x) => {
                    // Intrepret coordinates as (x_1, y_1, x_2, y_2, \cdots, x_n, y_n)
                    // Iterate over all pairs of points, finding smallest distance betwen any pair of points.
                    double sMin = Double.MaxValue;
                    for (int i = 0; i < n; i++) {
                        for (int j = 0; j < i; j++) {
                            double s = MoreMath.Hypot(x[2 * i] - x[2 * j], x[2 * i + 1] - x[2 * j + 1]);
                            if (s < sMin) sMin = s;
                        }
                    }
                    return (sMin);
                };

                IList<Interval> box = new Interval[2 * n];
                for (int i = 0; i < box.Count; i++) box[i] = Interval.FromEndpoints(0.0, 1.0);

                EvaluationSettings settings = new EvaluationSettings() { RelativePrecision = 1.0E-4, AbsolutePrecision = 1.0E-6, EvaluationBudget = 10000000 };

                MultiExtremum maximum = MultiFunctionMath.FindGlobalMaximum(function, box, settings);

                Console.WriteLine(maximum.EvaluationCount);
                Console.WriteLine("{0} {1}", solutions[n], maximum.Value);

                Assert.IsTrue(maximum.Dimension == 2 * n);
                Assert.IsTrue(TestUtilities.IsNearlyEqual(maximum.Value, solutions[n], new EvaluationSettings() { AbsolutePrecision = 2.0 * maximum.Precision }));

            }
        }
예제 #52
0
        // the public API

        /// <summary>
        /// Evaluates a definite integral.
        /// </summary>
        /// <param name="integrand">The function to be integrated.</param>
        /// <param name="range">The range of integration.</param>
        /// <returns>A numerical estimate of the given integral.</returns>
        /// <remarks>
        /// <para>Integral values are accurate to within about a digit of full double precision.</para>
        /// <para>To do integrals over infinite regions, simply set the lower bound of the <paramref name="range"/>
        /// to <see cref="System.Double.NegativeInfinity"/> or the upper bound to <see cref="System.Double.PositiveInfinity"/>.</para>
        /// <para>Our numerical integrator uses a Gauss-Kronrod rule that can integrate efficiently,
        /// combined with an adaptive strategy that limits function
        /// evaluations to those regions required to achieve the desired accuracy.</para>
        /// <para>Our integrator handles smooth functions extremely efficiently. It handles integrands with
        /// discontinuities, or discontinuities of derivatives, at the price of slightly more evaluations
        /// of the integrand. It handles oscilatory functions, so long as not too many periods contribute
        /// significantly to the integral. It can integrate logarithmic and mild power-law singularities.
        /// </para>
        /// <para>Strong power-law singularities will cause the alrorighm to fail with a NonconvergenceException.
        /// This is unavoidable for essentially any double-precision numerical integrator. Consider, for example,
        /// the integrable singularity 1/&#x221A;x. Since
        /// &#x3B5; = &#x222B;<sub>0</sub><sup>&#x3B4;</sup> x<sup>-1/2</sup> dx = 2 &#x3B4;<sup>1/2</sup>,
        /// points within &#x3B4; &#x223C; 10<sup>-16</sup> of the end-points, which as a close as you can get to
        /// a point in double precision without being on top of it, contribute at the &#x3B5; &#x223C; 10<sup>-8</sup>
        /// level to our integral, well beyond limit that nearly-full double precision requires. Said differently,
        /// to know the value of the integral to &#x3B5; &#x223C; 10<sup>-16</sup> prescision, we would need to
        /// evaluate the contributions of points within &#x3B4; &#x223C; 10<sup>-32</sup> of the endpoints,
        /// far closer than we can get.</para>
        /// <para>If you need to evaluate an integral with such a strong singularity, make an analytic
        /// change of variable to absorb the singularity before attempting numerical integration. For example,
        /// to evaluate I = &#x222B;<sub>0</sub><sup>b</sup> f(x) x<sup>-1/2</sup> dx, substitute y = x<sup>1/2</sup>
        /// to obtain I = 2 &#x222B;<sub>0</sub><sup>&#x221A;b</sup> f(y<sup>2</sup>) dy.</para>
        /// </remarks>
        public static double Integrate(Func <double, double> integrand, Interval range)
        {
            EvaluationSettings settings = new EvaluationSettings();

            return(Integrate(integrand, range, settings).Value);
        }
예제 #53
0
        public void SumOfPowers()
        {
            // This test is difficult because the minimum is emphatically not quadratic.
            // We do get close to the minimum but we massivly underestimate our error.

            Func<IList<double>, double> function = (IList<double> x) => {
                double s = 0.0;
                for (int i = 0; i < x.Count; i++) {
                    s += MoreMath.Pow(Math.Abs(x[i]), i + 2);
                }
                return (s);
            };

            for (int n = 2; n < 8; n++) {

                Console.WriteLine(n);

                ColumnVector start = new ColumnVector(n);
                for (int i = 0; i < n; i++) start[i] = 1.0;

                EvaluationSettings settings = new EvaluationSettings() { AbsolutePrecision = 1.0E-8, EvaluationBudget = 32 * n * n * n };

                MultiExtremum minimum = MultiFunctionMath.FindLocalMinimum(function, start, settings);

                Console.WriteLine(minimum.EvaluationCount);
                Console.WriteLine("{0} {1}", minimum.Value, minimum.Precision);
                Console.WriteLine("|| {0} {1} ... || = {2}", minimum.Location[0], minimum.Location[1], minimum.Location.FrobeniusNorm());

                Assert.IsTrue(TestUtilities.IsNearlyEqual(minimum.Value, 0.0, new EvaluationSettings() { AbsolutePrecision = 1.0E-4 }));
                //Assert.IsTrue(TestUtilities.IsNearlyEqual(minimum.Location, new ColumnVector(n), new EvaluationSettings() { AbsolutePrecision = 1.0E-2 }));

            }
        }
예제 #54
0
        /// <summary>
        /// Evaluates a definite integral with the given evaluation settings.
        /// </summary>
        /// <param name="integrand">The function to be integrated.</param>
        /// <param name="range">The range of integration.</param>
        /// <param name="settings">The settings which control the evaulation of the integal.</param>
        /// <returns>The result of the integral, which includes an estimated value and estimated uncertainty of that value.</returns>
        public static IntegrationResult Integrate(Func <double, double> integrand, Interval range, EvaluationSettings settings)
        {
            if (integrand == null)
            {
                throw new ArgumentNullException("integrand");
            }

            // remap infinite integrals to finite integrals

            if (Double.IsNegativeInfinity(range.LeftEndpoint) && Double.IsPositiveInfinity(range.RightEndpoint))
            {
                // -infinity to +infinity

                // remap to (-pi/2,pi/2)
                Func <double, double> f0 = integrand;
                Func <double, double> f1 = delegate(double t) {
                    double x = Math.Tan(t);
                    return(f0(x) * (1.0 + x * x));
                };
                Interval r1 = Interval.FromEndpoints(-Global.HalfPI, Global.HalfPI);

                return(Integrate(f1, r1, settings));
            }
            else if (Double.IsPositiveInfinity(range.RightEndpoint))
            {
                // finite to +infinity

                // remap to interval (-1,1)
                double a0 = range.LeftEndpoint;
                Func <double, double> f0 = integrand;
                Func <double, double> f1 = delegate(double t) {
                    double q = 1.0 - t;
                    double x = a0 + (1 + t) / q;
                    return(f0(x) * (2.0 / q / q));
                };
                Interval r1 = Interval.FromEndpoints(-1.0, 1.0);

                return(Integrate(f1, r1, settings));
            }
            else if (Double.IsNegativeInfinity(range.LeftEndpoint))
            {
                // -infinity to finite

                // remap to interval (-1,1)
                double b0 = range.RightEndpoint;
                Func <double, double> f0 = integrand;
                Func <double, double> f1 = delegate(double t) {
                    double q = t + 1.0;
                    double x = b0 + (t - 1.0) / q;
                    return(f0(x) * (2.0 / q / q));
                };
                Interval r1 = Interval.FromEndpoints(-1.0, 1.0);

                return(Integrate(f1, r1, settings));
            }

            // normal integral over a finitite range

            IAdaptiveIntegrator integrator = new GaussKronrodIntegrator(integrand, range);
            IntegrationResult   result     = Integrate_Adaptive(integrator, settings);

            return(result);
        }
예제 #55
0
        /// <summary>
        /// Integrates the given function over the given volume.
        /// </summary>
        /// <param name="integrand">The function to integrate, which maps R<sup>d</sup> to R.</param>
        /// <param name="volume">The box defining the volume over with to integrate.</param>
        /// <param name="settings">The evaluation settings to use.</param>
        /// <returns>The value of the integral.</returns>
        public static double Integrate(Func <IList <double>, double> integrand, IList <Interval> volume, EvaluationSettings settings)
        {
            if (integrand == null)
            {
                throw new ArgumentNullException("integrand");
            }
            if (volume == null)
            {
                throw new ArgumentNullException("volume");
            }

            // get the dimension of the problem from the volume
            int d = volume.Count;

            if ((d < 1) || (d > sobolParameters.Length))
            {
                throw new InvalidOperationException();
            }

            // if no settings were provided, use defaults
            if (settings == null)
            {
                settings = new EvaluationSettings()
                {
                    RelativePrecision = 1.0 / (1 << (14 - 3 * d / 2)),
                    AbsolutePrecision = RelativePrecision / 128.0,
                    EvaluationBudget  = 1 << (18 + 3 * d / 2)
                };
            }

            // compute the volume of the box
            double V = 1.0;

            for (int j = 0; j < volume.Count; j++)
            {
                V *= volume[j].Width;
            }

            // generate the appropriate number of Sobol sequences
            SobolSequence[] s = new SobolSequence[d];
            for (int j = 0; j < d; j++)
            {
                SobolSequenceParameters p = sobolParameters[j];
                s[j] = new SobolSequence(p.Dimension, p.Coefficients, p.Seeds);
            }

            // create one vector to store the argument
            // we don't want to recreate the vector in the
            // heap on each iteration
            double[] x = new double[d];

            // keep track of the average sampled value
            double M = 0.0; double M_old = Double.NaN;

            // set the first convergence checkpoint to ~4000 points for d=2, increasing
            // for higher d
            int i_next = 1 << (10 + d);

            for (int i = 1; i <= settings.EvaluationBudget; i++)
            {
                // move to the next value in each dimension's Sobol sequence
                // and construct the argument vector
                for (int j = 0; j < d; j++)
                {
                    s[j].MoveNext();
                    x[j] = volume[j].LeftEndpoint + s[j].Current * volume[j].Width;
                }

                // evaluate the function
                double y = integrand(x);

                // update the mean
                M += (y - M) / i;

                // check for convergence at regular intervals
                if (i == i_next)
                {
                    // estimate error as change since last check
                    double dM = 2.0 * Math.Abs(M - M_old);

                    if (dM < settings.RelativePrecision * Math.Abs(M) || Math.Abs(V) * dM < settings.AbsolutePrecision)
                    {
                        return(V * M);
                    }

                    // no convergence, so remember current value and set next checkpoint
                    M_old  = M;
                    i_next = 2 * i_next;

                    // Consider how to do this better:
                    // 1. Is there any way we can do better than doubling? Say increasing by 4/3 and then 3/2, for an average increase of
                    // 40% instead of 100%? If so, how do we estimate error at each step?
                    // 2. Can we use Romberg extrapolation? I tried using both M and M_old, assuming an error term of 1/N. This gives
                    // the extrapolated value 2 M - M_old, but experimentally this is usually a worse value than M. I should try out
                    // 3-value extrapolation.
                }
            }

            throw new NonconvergenceException();
        }
예제 #56
0
        private static Extremum FindMinimum(Functor f, double x, double fx, double d, EvaluationSettings settings)
        {
            // This function brackets a minimum by starting from x and taking increasing steps downhill until it moves uphill again.

            // We write this function assuming f(x) has already been evaluated because when it is called
            // to do a line fit for Powell's multi-dimensional minimization routine, that is the case and
            // we don't want to do a superfluous evaluation.

            Debug.Assert(f != null); Debug.Assert(d > 0.0); Debug.Assert(settings != null);

            // evaluate at x + d
            double y  = x + d;
            double fy = f.Evaluate(y);

            // if we stepped uphill, reverse direction of steps and exchange x & y
            if (fy > fx)
            {
                Global.Swap(ref x, ref y); Global.Swap(ref fx, ref fy);
                d = -d;
            }

            // we now know f(x) >= f(y) and we are stepping downhill
            // continue stepping until we step uphill
            double z, fz;

            while (true)
            {
                if (f.EvaluationCount >= settings.EvaluationBudget)
                {
                    throw new NonconvergenceException();
                }

                z  = y + d;
                fz = f.Evaluate(z);

                Debug.WriteLine(String.Format("f({0})={1} f({2})={3} f({4})={5} d={6}", x, fx, y, fy, z, fz, d));

                if (fz > fy)
                {
                    break;
                }

                // increase the step size each time
                d = AdvancedMath.GoldenRatio * d;

                // x <- y <- z
                x = y; fx = fy; y = z; fy = fz;
            }

            // we x and z now bracket a local minimum, with y the lowest point evaluated so far
            double a = Math.Min(x, z); double b = Math.Max(x, z);

            if (fz < fx)
            {
                Global.Swap(ref x, ref z); Global.Swap(ref fx, ref fz);
            }

            return(FindMinimum(f, a, b, y, fy, x, fx, z, fz, settings));
        }
예제 #57
0
 internal IntegrationResult(UncertainValue estimate, int evaluationCount, EvaluationSettings settings) : base(evaluationCount, settings)
 {
     this.estimate = estimate;
 }
예제 #58
0
        // Brent's algorithm: use 3-point parabolic interpolation,
        // switching to golden section if interval does not shrink fast enough
        // see Richard Brent, "Algorithms for Minimization Without Derivatives"

        // The bracket is [a, b] and the three lowest points are (u,fu), (v,fv), (w, fw)
        // Note that a or b may be u, v, or w.

        private static Extremum FindMinimum(
            Functor f,
            double a, double b,
            double u, double fu, double v, double fv, double w, double fw,
            EvaluationSettings settings
            )
        {
            double tol = 0.0; double fpp = Double.NaN;

            while (f.EvaluationCount < settings.EvaluationBudget)
            {
                Debug.WriteLine(String.Format("n={0} tol={1}", f.EvaluationCount, tol));
                Debug.WriteLine(String.Format("[{0}  f({1})={2}  f({3})={4}  f({5})={6}  {7}]", a, u, fu, v, fv, w, fw, b));

                // While a < u < b is guaranteed, a < v, w < b is not guaranteed, since the bracket can sometimes be made tight enough to exclude v or w.
                // For example, if u < v < w, then we can set b = v, placing w outside the bracket.

                Debug.Assert(a < b);
                Debug.Assert((a <= u) && (u <= b));
                Debug.Assert((fu <= fv) && (fv <= fw));

                // Expected final situation is a<tol><tol>u<tol><tol>b, leaving no point left to evaluate that is not within tol of an existing point.

                if ((b - a) <= 4.0 * tol)
                {
                    return(new Extremum(u, fu, fpp, f.EvaluationCount, settings));
                }

                double x; ParabolicFit(u, fu, v, fv, w, fw, out x, out fpp);
                Debug.WriteLine(String.Format("parabolic x={0} f''={1}", x, fpp));

                if (Double.IsNaN(fpp) || (fpp <= 0.0) || (x < a) || (x > b))
                {
                    // the parabolic fit didn't work out, so do a golden section reduction instead

                    // to get the most reduction of the bracket, pick the larger of au and ub
                    // for self-similarity, pick a point inside it that divides it into two segments in the golden section ratio,
                    // i.e. 0.3820 = \frac{1}{\phi + 1} and 0.6180 = \frac{\phi}{\phi+1}
                    // put the smaller segment closer to u so that x is closer to u, the best minimum so far

                    double au = u - a;
                    double ub = b - u;

                    if (au > ub)
                    {
                        x = u - au / (AdvancedMath.GoldenRatio + 1.0);
                    }
                    else
                    {
                        x = u + ub / (AdvancedMath.GoldenRatio + 1.0);
                    }

                    Debug.WriteLine(String.Format("golden section x={0}", x));
                }

                // ensure we don't evaluate within tolerance of an existing point
                if (Math.Abs(x - u) < tol)
                {
                    Debug.WriteLine(String.Format("shift from u (x={0})", x)); x = (x > u) ? u + tol : u - tol;
                }
                if ((x - a) < tol)
                {
                    Debug.WriteLine(String.Format("shift from a (x={0})", x)); x = a + tol;
                }
                if ((b - x) < tol)
                {
                    Debug.WriteLine(String.Format("shift from b (x={0})", x)); x = b - tol;
                }

                // evaluate the function at the new point x
                double fx = f.Evaluate(x);
                Debug.WriteLine(String.Format("f({0}) = {1}", x, fx));
                Debug.WriteLine(String.Format("delta={0}", fu - fx));

                // update a, b and u, v, w based on new point x

                if (fx < fu)
                {
                    // the new point is lower than all the others; this is success

                    // u now becomes a bracket point
                    if (u < x)
                    {
                        a = u;
                    }
                    else
                    {
                        b = u;
                    }

                    // x -> u -> v -> w
                    w = v; fw = fv;
                    v = u; fv = fu;
                    u = x; fu = fx;
                }
                else
                {
                    // x now becomes a bracket point
                    if (x < u)
                    {
                        a = x;
                    }
                    else
                    {
                        b = x;
                    }

                    if (fx < fv)
                    {
                        // the new point is higher than u, but still lower than v and w
                        // this isn't what we expected, but we have lower points that before

                        // x -> v -> w
                        w = v; fw = fv;
                        v = x; fv = fx;
                    }
                    else if (fx < fw)
                    {
                        // x -> w
                        w = x; fw = fx;
                    }
                    else
                    {
                        // the new point is higher than all our other points; this is the worst case

                        // we might still want to replace w with x because
                        // (i) otherwise a parabolic fit will reproduce the same x and
                        // (ii) w is quite likely far outside the new bracket and not telling us much about the behavior near u
                        // w = x; fw = fx;
                        // but tests with Rosenbrock function indicate this increases evaluation count

                        Debug.WriteLine("bad point");
                    }
                }

                // if the user has specified a tollerance, use it
                if ((settings.RelativePrecision > 0.0 || settings.AbsolutePrecision > 0.0))
                {
                    tol = Math.Max(Math.Abs(u) * settings.RelativePrecision, settings.AbsolutePrecision);
                }
                else
                {
                    // otherwise, try to get the tollerance from the curvature
                    if (fpp > 0.0)
                    {
                        tol = Math.Sqrt(2.0 * Global.Accuracy * (Math.Abs(fu) + Global.Accuracy) / fpp);
                    }
                    else
                    {
                        // but if we don't have a useable curvature either, wing it
                        if (tol == 0.0)
                        {
                            tol = Math.Sqrt(Global.Accuracy);
                        }
                    }
                }
            }

            throw new NonconvergenceException();
        }
예제 #59
0
        // the drivers

        private static IntegrationResult Integrate_Adaptive(IAdaptiveIntegrator integrator, EvaluationSettings s)
        {
            LinkedList <IAdaptiveIntegrator> list = new LinkedList <IAdaptiveIntegrator>();

            list.AddFirst(integrator);

            int n = integrator.EvaluationCount;

            while (true)
            {
                // go through the intervals, adding estimates (and errors)
                // and noting which contributes the most error
                // keep track of the total value and uncertainty
                UncertainValue vTotal = new UncertainValue();
                //double v = 0.0;
                //double u = 0.0;

                // keep track of which node contributes the most error
                LinkedListNode <IAdaptiveIntegrator> maxNode = null;
                double maxError = 0.0;

                LinkedListNode <IAdaptiveIntegrator> node = list.First;
                while (node != null)
                {
                    IAdaptiveIntegrator i = node.Value;

                    UncertainValue v = i.Estimate;
                    vTotal += v;
                    //UncertainValue uv = i.Estimate;
                    //v += uv.Value;
                    //u += uv.Uncertainty;

                    if (v.Uncertainty > maxError)
                    {
                        maxNode  = node;
                        maxError = v.Uncertainty;
                    }

                    node = node.Next;
                }

                // if our error is small enough, return
                if ((vTotal.Uncertainty <= Math.Abs(vTotal.Value) * s.RelativePrecision) || (vTotal.Uncertainty <= s.AbsolutePrecision))
                {
                    return(new IntegrationResult(vTotal, n, s));
                }
                //if ((vTotal.Uncertainty <= Math.Abs(vTotal.Value) * s.RelativePrecision) || (vTotal.Uncertainty <= s.AbsolutePrecision)) {
                //    return (new IntegrationResult(vTotal.Value, n));
                //}

                // if our evaluation count is too big, throw
                if (n > s.EvaluationBudget)
                {
                    throw new NonconvergenceException();
                }

                // subdivide the interval with the largest error
                IEnumerable <IAdaptiveIntegrator> divisions = maxNode.Value.Divide();
                foreach (IAdaptiveIntegrator division in divisions)
                {
                    list.AddBefore(maxNode, division);
                    n += division.EvaluationCount;
                    //v2 += division.Estimate;
                }
                list.Remove(maxNode);
            }
        }
예제 #60
0
        private static MultiExtremum FindLocalExtremum(Func <IList <double>, double> function, IList <double> start, EvaluationSettings settings, bool negate)
        {
            MultiFunctor f = new MultiFunctor(function, negate);

            // Pick an initial radius; we need to do this better.

            /*
             * double s = Double.MaxValue;
             * foreach (double x in start) s = Math.Min((Math.Abs(x) + 1.0 / 8.0) / 8.0, s);
             */

            double s = 0.0;

            foreach (double x in start)
            {
                s += (Math.Abs(x) + 1.0 / 4.0) / 4.0;
            }
            s = s / start.Count;

            //double s = 0.2;
            Debug.WriteLine("s={0}", s);



            return(FindMinimum_ModelTrust(f, start, s, settings));
        }