/// <summary> /// Calculate (kth derivative of LogisticGaussian)*exp(0.5*mean^2/variance) /// </summary> /// <param name="mean"></param> /// <param name="variance"></param> /// <param name="k"></param> /// <returns></returns> public static double LogisticGaussianRatio(double mean, double variance, int k) { if (k < 0 || k > 2) { throw new ArgumentException("invalid k (" + k + ")"); } double a = mean / variance; // int 0.5 cosh(x(m/v+1/2))/cosh(x/2) N(x;0,v) dx double f(double x) { double logSigma = MMath.LogisticLn(x); double extra = 0; double s = 1; if (k > 0) { extra += MMath.LogisticLn(-x); } if (k > 1) { s = -Math.Tanh(x / 2); } return(s * Math.Exp(logSigma + extra + x * a + Gaussian.GetLogProb(x, 0, variance))); } double upperBound = (Math.Abs(a + 0.5) - 0.5) * variance + Math.Sqrt(variance); upperBound = Math.Max(upperBound, 10); return(Quadrature.AdaptiveClenshawCurtis(f, upperBound, 32, 1e-10)); }
protected override void OnBarUpdate() { if (CurrentBar < 50) { return; } smooth.Set((4 * Median[0] + 3 * Median[1] + 2 * Median[2] + Median[3]) / 10); detrender.Set((0.0962 * smooth[0] + 0.5769 * smooth[2] - 0.5769 * smooth[4] - 0.0962 * smooth[6]) * (0.075 * period[1] + .54)); //InPhase and Quadrature components q1.Set((0.0962 * detrender[0] + 0.5769 * detrender[2] - 0.5769 * detrender[4] - 0.0962 * detrender[6]) * (0.075 * period[1] + 0.54)); i1.Set(detrender[3]); //Advance the phase of I1 and Q1 by 90 degrees jI.Set((0.0962 * i1[0] + 0.5769 * i1[2] - 0.5769 * i1[4] - 0.0962 * i1[6]) * (0.075 * period[1] + .54)); jQ.Set((0.0962 * q1[0] + 0.5769 * q1[2] - 0.5769 * q1[4] - 0.0962 * q1[6]) * (0.075 * period[1] + .54)); //Phasor Addition i2.Set(i1[0] - jQ[0]); q2.Set(q1[0] + jI[0]); //Smooth the I and Q components before applying the discriminator i2.Set(0.2 * i2[0] + 0.8 * i2[1]); q2.Set(0.2 * q2[0] + 0.8 * q2[1]); //Homodyne Discriminator re.Set(i2[0] * i2[1] + q2[0] * q2[1]); im.Set(i2[0] * q2[1] - q2[0] * i2[1]); re.Set(0.2 * re[0] + 0.8 * re[1]); im.Set(0.2 * im[0] + 0.8 * im[1]); double rad2Deg = 180.0 / (4.0 * Math.Atan(1)); if (Math.Abs(im[0]) > double.Epsilon && Math.Abs(re[0]) > double.Epsilon) { period.Set(360 / (Math.Atan(im[0] / re[0]) * rad2Deg)); } if (period[0] > (1.5 * period[1])) { period.Set(1.5 * period[1]); } if (period[0] < (0.67 * period[1])) { period.Set(0.67 * period[1]); } if (period[0] < 6) { period.Set(6); } if (period[0] > 50) { period.Set(50); } period.Set(0.2 * period[0] + 0.8 * period[1]); smoothPeriod.Set(0.33 * period[0] + 0.67 * smoothPeriod[1]); InPhase.Set(i1[0]); Quadrature.Set(q1[0]); }
/// <summary> /// Calculate <c>\sigma'(m,v)=\int N(x;m,v)logistic'(x) dx</c> /// </summary> /// <param name="mean">Mean.</param> /// <param name="variance">Variance.</param> /// <returns>The value of this special function.</returns> /// <remarks><para> /// For large v we can use the big v approximation <c>\sigma'(m,v)=N(m,0,v+pi^2/3)</c>. /// For small and moderate v we use Gauss-Hermite quadrature. /// For moderate v we first find the mode of the (log concave) function since this may be quite far from m. /// </para></remarks> public static double LogisticGaussianDerivative(double mean, double variance) { double halfVariance = 0.5 * variance; mean = Math.Abs(mean); // use the upper bound exp(-|m|+v/2) to prune cases that must be zero if (-mean + halfVariance < log0) { return(0.0); } // use the upper bound 0.5 exp(-0.5 m^2/v) to prune cases that must be zero double q = -0.5 * mean * mean / variance - MMath.Ln2; if (mean <= variance && q < log0) { return(0.0); } if (double.IsPositiveInfinity(variance)) { return(0.0); } // Handle the tail cases using the following exact formula: // sigma'(m,v) = exp(-m+v/2) -2 exp(-2m+2v) +3 exp(-3m+9v/2) sigma(m-3v,v) - exp(-3m+9v/2) sigma'(m-3v,v) if (-mean + 1.5 * variance < logEpsilon) { return(Math.Exp(halfVariance - mean)); } if (-2 * mean + 4 * variance < logEpsilon) { return(Math.Exp(halfVariance - mean) - 2 * Math.Exp(2 * (variance - mean))); } if (variance > LogisticGaussianVarianceThreshold) { double f(double x) { return(Math.Exp(MMath.LogisticLn(x) + MMath.LogisticLn(-x) + Gaussian.GetLogProb(x, mean, variance))); } return(Quadrature.AdaptiveClenshawCurtis(f, 10, 32, 1e-10)); } else { Vector nodes = Vector.Zero(LogisticGaussianQuadratureNodeCount); Vector weights = Vector.Zero(LogisticGaussianQuadratureNodeCount); double m_p, v_p; BigvProposal(mean, variance, out m_p, out v_p); Quadrature.GaussianNodesAndWeights(m_p, v_p, nodes, weights); double weightedIntegrand(double z) { return(Math.Exp(MMath.LogisticLn(z) + MMath.LogisticLn(-z) + Gaussian.GetLogProb(z, mean, variance) - Gaussian.GetLogProb(z, m_p, v_p))); } return(Integrate(weightedIntegrand, nodes, weights)); } }
public void TruncatedGaussianNormaliser() { double a = 0, b = 2; var g = new TruncatedGaussian(3, 1, a, b); double Z = Quadrature.AdaptiveTrapeziumRule(x => System.Math.Exp(g.GetLogProb(x)), 32, a, b, 1e-10, 10000); Assert.True((1.0 - Z) < 1e-4); }
public void UniformQuadrature2() { for (int count = 3; count <= 20; count++) { Vector nodes = Vector.Zero(count); Vector weights = Vector.Zero(count); Quadrature.UniformNodesAndWeights(0, 1, nodes, weights); double result = (weights * (nodes ^ 5.0)).Sum(); Assert.True(MMath.AbsDiff(1.0 / 6, result, 1e-10) < 1e-10); } }
public void GammaQuadrature() { Vector nodes = Vector.Zero(15); Vector logWeights = Vector.Zero(15); Quadrature.GammaNodesAndWeights(2, 3, nodes, logWeights); Vector weights = Vector.Zero(logWeights.Count); weights.SetToFunction(logWeights, System.Math.Exp); double result = (weights * nodes * nodes).Sum(); Assert.True(MMath.AbsDiff(4.0 / 3, result, 1e-4) < 1e-4); }
private static void test02() //****************************************************************************80 // // Purpose: // // TEST02 tests the code for the even case N = 4. // // Licensing: // // This code is distributed under the GNU LGPL license. // // Modified: // // 03 August 2010 // // Author: // // John Burkardt // { int i; const int n = 4; Console.WriteLine(""); Console.WriteLine("TEST02"); Console.WriteLine(" Request KRONROD to compute the Gauss rule"); Console.WriteLine(" of order 4, and the Kronrod extension of"); Console.WriteLine(" order 4+5=9."); double eps = 0.000001; double[] w1 = new double[n + 1]; double[] w2 = new double[n + 1]; double[] x = new double[n + 1]; Quadrature.kronrod(n, eps, ref x, ref w1, ref w2); Console.WriteLine(""); Console.WriteLine(" KRONROD returns 3 vectors of length " + n + 1 + ""); Console.WriteLine(""); Console.WriteLine(" I X WK WG"); Console.WriteLine(""); for (i = 1; i <= n + 1; i++) { Console.WriteLine(" " + i.ToString(CultureInfo.InvariantCulture).PadLeft(4) + " " + x[i - 1].ToString(CultureInfo.InvariantCulture).PadLeft(14) + " " + w1[i - 1].ToString(CultureInfo.InvariantCulture).PadLeft(14) + " " + w2[i - 1].ToString(CultureInfo.InvariantCulture).PadLeft(14) + ""); } }
/// <summary> /// Evaluates E[log(1+exp(x))] under a Gaussian distribution with specified mean and variance. /// </summary> /// <param name="mean"></param> /// <param name="variance"></param> /// <returns></returns> public static double Log1PlusExpGaussian(double mean, double variance) { double[] nodes = new double[11]; double[] weights = new double[11]; Quadrature.GaussianNodesAndWeights(mean, variance, nodes, weights); double z = 0; for (int i = 0; i < nodes.Length; i++) { double x = nodes[i]; double f = MMath.Log1PlusExp(x); z += weights[i] * f; } return(z); }
/// <summary> /// Evidence message for EP /// </summary> /// <param name="exp">Incoming message from 'exp'.</param> /// <param name="d">Incoming message from 'd'.</param> /// <param name="to_d">Previous outgoing message to 'd'.</param> /// <returns>Logarithm of the factor's average value across the given argument distributions</returns> /// <remarks><para> /// The formula for the result is <c>log(sum_(exp,d) p(exp,d) factor(exp,d))</c>. /// </para></remarks> public static double LogAverageFactor(Gamma exp, Gaussian d, Gaussian to_d) { if (d.IsPointMass) { return(LogAverageFactor(exp, d.Point)); } if (d.IsUniform()) { return(exp.GetLogAverageOf(new Gamma(0, 0))); } if (exp.IsPointMass) { return(LogAverageFactor(exp.Point, d)); } if (exp.IsUniform()) { return(0.0); } double[] nodes = new double[QuadratureNodeCount]; double[] weights = new double[QuadratureNodeCount]; double mD, vD; Gaussian dMarginal = d * to_d; dMarginal.GetMeanAndVariance(out mD, out vD); Quadrature.GaussianNodesAndWeights(mD, vD, nodes, weights); if (!to_d.IsUniform()) { // modify the weights to include q(y_k)/N(y_k;m,v) for (int i = 0; i < weights.Length; i++) { weights[i] *= Math.Exp(d.GetLogProb(nodes[i]) - Gaussian.GetLogProb(nodes[i], mD, vD)); } } double Z = 0; for (int i = 0; i < weights.Length; i++) { double y = nodes[i]; double f = weights[i] * Math.Exp((exp.Shape - 1) * y - exp.Rate * Math.Exp(y)); Z += f; } return(Math.Log(Z) - exp.GetLogNormalizer()); }
/// <summary> /// Initializes a new instance of the ComplexAdaptiveIntegrator class. /// </summary> /// <param name="quadrature">A complex function representing the quadrature formula of integration.</param> public ComplexAdaptiveIntegrator(Quadrature quadrature) : this() { _quadr = quadrature; }
/// <summary> /// Called on each bar update event (incoming tick) /// </summary> protected override void OnBarUpdate() { if (this.CurrentBar < 50) { return; } Smooth.Set((4 * Median[0] + 3 * Median[1] + 2 * Median[2] + Median[3]) / 10); Detrender.Set((0.0962 * Smooth[0] + 0.5769 * Smooth[2] - 0.5769 * Smooth[4] - 0.0962 * Smooth[6]) * (0.075 * Period[1] + .54)); //InPhase and Quadrature components Q1.Set((0.0962 * Detrender[0] + 0.5769 * Detrender[2] - 0.5769 * Detrender[4] - 0.0962 * Detrender[6]) * (0.075 * Period[1] + 0.54)); I1.Set(Detrender[3]); //Advance the phase of I1 and Q1 by 90 degrees jI.Set((0.0962 * I1[0] + 0.5769 * I1[2] - 0.5769 * I1[4] - 0.0962 * I1[6]) * (0.075 * Period[1] + .54)); jQ.Set((0.0962 * Q1[0] + 0.5769 * Q1[2] - 0.5769 * Q1[4] - 0.0962 * Q1[6]) * (0.075 * Period[1] + .54)); //Phasor Addition I2.Set(I1[0] - jQ[0]); Q2.Set(Q1[0] + jI[0]); //Smooth the I and Q components before applying the discriminator I2.Set(0.2 * I2[0] + 0.8 * I2[1]); Q2.Set(0.2 * Q2[0] + 0.8 * Q2[1]); //Homodyne Discriminator Re.Set(I2[0] * I2[1] + Q2[0] * Q2[1]); Im.Set(I2[0] * Q2[1] - Q2[0] * I2[1]); Re.Set(0.2 * Re[0] + 0.8 * Re[1]); Im.Set(0.2 * Im[0] + 0.8 * Im[1]); double rad2Deg = 180.0 / (4.0 * Math.Atan(1)); if (Im[0] != 0 && Re[0] != 0) { Period.Set(360 / (Math.Atan(Im[0] / Re[0]) * rad2Deg)); } if (Period[0] > (1.5 * Period[1])) { Period.Set(1.5 * Period[1]); } if (Period[0] < (0.67 * Period[1])) { Period.Set(0.67 * Period[1]); } if (Period[0] < 6) { Period.Set(6); } if (Period[0] > 50) { Period.Set(50); } Period.Set(0.2 * Period[0] + 0.8 * Period[1]); SmoothPeriod.Set(0.33 * Period[0] + 0.67 * SmoothPeriod[1]); InPhase.Set(I1[0]); Quadrature.Set(Q1[0]); }
/// <summary> /// Computes the cumulative bivariate normal distribution. /// </summary> /// <param name="x">First upper limit. Must be finite.</param> /// <param name="y">Second upper limit. Must be finite.</param> /// <param name="r">Correlation coefficient.</param> /// <returns><c>phi(x,y,r)</c></returns> /// <remarks> /// The double integral is transformed into a single integral which is approximated by quadrature. /// Reference: /// "Numerical Computation of Rectangular Bivariate and Trivariate Normal and t Probabilities" /// Alan Genz, Statistics and Computing, 14 (2004), pp. 151-160 /// http://www.math.wsu.edu/faculty/genz/genzhome/research.html /// </remarks> private static double NormalCdf_Quadrature(double x, double y, double r) { double absr = System.Math.Abs(r); Vector nodes, weights; int count = 20; if (absr < 0.3) { count = 6; } else if (absr < 0.75) { count = 12; } nodes = Vector.Zero(count); weights = Vector.Zero(count); double result = 0.0; if (absr < 0.925) { // use equation (3) double asinr = System.Math.Asin(r); Quadrature.UniformNodesAndWeights(0, asinr, nodes, weights); double sq = 0.5 * (x * x + y * y), xy = x * y; for (int i = 0; i < nodes.Count; i++) { double sin = System.Math.Sin(nodes[i]); double cos2 = 1 - sin * sin; result += weights[i] * System.Math.Exp((xy * sin - sq) / cos2); } result /= 2 * System.Math.PI; result += MMath.NormalCdf(x, y, 0); } else { double sy = (r < 0) ? -y : y; if (absr < 1) { // use equation (6) modified by (7) // quadrature part double cos2asinr = (1 - r) * (1 + r), sqrt1mrr = System.Math.Sqrt(cos2asinr); Quadrature.UniformNodesAndWeights(0, sqrt1mrr, nodes, weights); double sxy = x * sy; double diff2 = (x - sy) * (x - sy); double c = (4 - sxy) / 8, d = (12 - sxy) / 16; for (int i = 0; i < nodes.Count; i++) { double cos2 = nodes[i] * nodes[i]; double sin = System.Math.Sqrt(1 - cos2); double series = 1 + c * cos2 * (1 + d * cos2); double exponent = -0.5 * (diff2 / cos2 + sxy); double f = System.Math.Exp(-0.5 * sxy * (1 - sin) / (1 + sin)) / sin; result += weights[i] * System.Math.Exp(exponent) * (f - series); } // Taylor expansion part double exponentr = -0.5 * (diff2 / cos2asinr + sxy); double absdiff = System.Math.Sqrt(diff2); if (exponentr > -800) { // avoid 0*Inf problems result += sqrt1mrr * System.Math.Exp(exponentr) * (1 - c * (diff2 - cos2asinr) * (1 - d * diff2 / 5) / 3 + c * d * cos2asinr * cos2asinr / 5); // for large absdiff, NormalCdfLn(-absdiff / sqrt1mrr) =approx -0.5*diff2/cos2asinr // so (-0.5*sxy + NormalCdfLn) =approx exponentr result -= System.Math.Exp(-0.5 * sxy + MMath.NormalCdfLn(-absdiff / sqrt1mrr)) * absdiff * (1 - c * diff2 * (1 - d * diff2 / 5) / 3) * MMath.Sqrt2PI; } result /= -2 * System.Math.PI; } if (r > 0) { // exact value for r=1 result += MMath.NormalCdf(x, y, 1); } else { // exact value for r=-1 result = -result; result += MMath.NormalCdf(x, y, -1); } } if (result < 0) { result = 0.0; } else if (result > 1) { result = 1.0; } return(result); }
private static double NormalCdfLn_Quadrature(double x, double y, double r) { double absr = System.Math.Abs(r); Vector nodes, weights; int count = 20; if (absr < 0.3) { count = 6; } else if (absr < 0.75) { count = 12; } nodes = Vector.Zero(count); weights = Vector.Zero(count); // hasInfiniteLimit is true if NormalCdf(x,y,-1) is 0 bool hasInfiniteLimit = false; if (r < -0.5) { if (x > 0) { // NormalCdf(y) <= NormalCdf(-x) iff y <= -x if (y < 0) { hasInfiniteLimit = (y <= -x); } } else { // NormalCdf(x) <= NormalCdf(-y) iff x <= -y if (y > 0) { hasInfiniteLimit = (x <= -y); } else { hasInfiniteLimit = true; } } } if (absr < 0.925 && !hasInfiniteLimit) { // use equation (3) double asinr = System.Math.Asin(r); Quadrature.UniformNodesAndWeights(0, asinr, nodes, weights); double sq = 0.5 * (x * x + y * y), xy = x * y; double logResult = double.NegativeInfinity; bool useLogWeights = true; if (useLogWeights) { for (int i = 0; i < nodes.Count; i++) { double sin = System.Math.Sin(nodes[i]); double cos2 = 1 - sin * sin; logResult = MMath.LogSumExp(logResult, System.Math.Log(System.Math.Abs(weights[i])) + (xy * sin - sq) / cos2); } logResult -= 2 * MMath.LnSqrt2PI; } else { double result = 0.0; for (int i = 0; i < nodes.Count; i++) { double sin = System.Math.Sin(nodes[i]); double cos2 = 1 - sin * sin; result += weights[i] * System.Math.Exp((xy * sin - sq) / cos2); } result /= 2 * System.Math.PI; logResult = System.Math.Log(System.Math.Abs(result)); } double r0 = MMath.NormalCdfLn(x, y, 0); if (asinr > 0) { return(MMath.LogSumExp(r0, logResult)); } else { return(MMath.LogDifferenceOfExp(r0, logResult)); } } else { double result = 0.0; double sy = (r < 0) ? -y : y; if (absr < 1) { // use equation (6) modified by (7) // quadrature part double cos2asinr = (1 - r) * (1 + r), sqrt1mrr = System.Math.Sqrt(cos2asinr); Quadrature.UniformNodesAndWeights(0, sqrt1mrr, nodes, weights); double sxy = x * sy; double diff2 = (x - sy) * (x - sy); double c = (4 - sxy) / 8, d = (12 - sxy) / 16; for (int i = 0; i < nodes.Count; i++) { double cos2 = nodes[i] * nodes[i]; double sin = System.Math.Sqrt(1 - cos2); double series = 1 + c * cos2 * (1 + d * cos2); double exponent = -0.5 * (diff2 / cos2 + sxy); double f = System.Math.Exp(-0.5 * sxy * (1 - sin) / (1 + sin)) / sin; result += weights[i] * System.Math.Exp(exponent) * (f - series); } // Taylor expansion part double exponentr = -0.5 * (diff2 / cos2asinr + sxy); double absdiff = System.Math.Sqrt(diff2); if (exponentr > -800) { double taylor = sqrt1mrr * (1 - c * (diff2 - cos2asinr) * (1 - d * diff2 / 5) / 3 + c * d * cos2asinr * cos2asinr / 5); // avoid 0*Inf problems //result -= Math.Exp(-0.5*sxy + NormalCdfLn(-absdiff/sqrt1mrr))*absdiff*(1 - c*diff2*(1 - d*diff2/5)/3)*Sqrt2PI; taylor -= MMath.NormalCdfRatio(-absdiff / sqrt1mrr) * absdiff * (1 - c * diff2 * (1 - d * diff2 / 5) / 3); result += System.Math.Exp(exponentr) * taylor; } result /= -2 * System.Math.PI; } if (r > 0) { // result += NormalCdf(x, y, 1); double r1 = MMath.NormalCdfLn(x, y, 1); if (result > 0) { result = System.Math.Log(result); return(MMath.LogSumExp(result, r1)); } else { return(MMath.LogDifferenceOfExp(r1, System.Math.Log(-result))); } } else { // return NormalCdf(x, y, -1) - result; double r1 = MMath.NormalCdfLn(x, y, -1); if (result > 0) { return(MMath.LogDifferenceOfExp(r1, System.Math.Log(result))); } else { return(MMath.LogSumExp(r1, System.Math.Log(-result))); } } } }
private static void test03() //****************************************************************************80 // // Purpose: // // TEST03 uses the program to estimate an integral. // // Licensing: // // This code is distributed under the GNU LGPL license. // // Modified: // // 24 April 2012 // // Author: // // John Burkardt // { const double exact = 1.5643964440690497731; Console.WriteLine(""); Console.WriteLine("TEST03"); Console.WriteLine(" Call Kronrod to estimate the integral of a function."); Console.WriteLine(" Keep trying until the error is small."); // // EPS just tells KRONROD how carefully it must compute X, W1 and W2. // It is NOT a statement about the accuracy of your integral estimate! // double eps = 0.000001; // // Start the process with a 1 point rule. // int n = 1; for (;;) { // // Make space. // double[] w1 = new double[n + 1]; double[] w2 = new double[n + 1]; double[] x = new double[n + 1]; Quadrature.kronrod(n, eps, ref x, ref w1, ref w2); // // Compute the estimates. // There are two complications here: // // 1) Both rules use all the points. However, the lower order rule uses // a zero weight for the points it doesn't need. // // 2) The points X are all positive, and are listed in descending order. // this means that 0 is always in the list, and always occurs as the // last member. Therefore, the integral estimates should use the // function value at 0 once, and the function values at the other // X values "twice", that is, once at X and once at -X. // double i1 = w1[n] * f(x[n]); double i2 = w2[n] * f(x[n]); int i; for (i = 0; i < n; i++) { i1 += w1[i] * (f(-x[i]) + f(x[i])); i2 += w2[i] * (f(-x[i]) + f(x[i])); } if (Math.Abs(i1 - i2) < 0.0001) { Console.WriteLine(""); Console.WriteLine(" Error tolerance satisfied with N = " + n + ""); Console.WriteLine(" Coarse integral estimate = " + i1.ToString("0.########") + ""); Console.WriteLine(" Fine integral estimate = " + i2 + ""); Console.WriteLine(" Error estimate = " + Math.Abs(i2 - i1) + ""); Console.WriteLine(" Actual error = " + Math.Abs(exact - i2) + ""); break; } if (25 < n) { Console.WriteLine(""); Console.WriteLine(" Error tolerance failed even for n = " + n + ""); Console.WriteLine(" Canceling iteration, and accepting bad estimates!"); Console.WriteLine(" Coarse integral estimate = " + i1 + ""); Console.WriteLine(" Fine integral estimate = " + i2 + ""); Console.WriteLine(" Error estimate = " + Math.Abs(i2 - i1) + ""); Console.WriteLine(" Actual error = " + Math.Abs(exact - i2) + ""); break; } n = 2 * n + 1; } }
private static void test01() //****************************************************************************80 // // Purpose: // // TEST01 tests the code for the odd case N = 3. // // Licensing: // // This code is distributed under the GNU LGPL license. // // Modified: // // 03 August 2010 // // Author: // // John Burkardt // { int i; int i2; const int n = 3; double s; double[] wg = { 0.555555555555555555556, 0.888888888888888888889, 0.555555555555555555556 }; double[] wk = { 0.104656226026467265194, 0.268488089868333440729, 0.401397414775962222905, 0.450916538658474142345, 0.401397414775962222905, 0.268488089868333440729, 0.104656226026467265194 }; double[] xg = { -0.77459666924148337704, 0.0, 0.77459666924148337704 }; double[] xk = { -0.96049126870802028342, -0.77459666924148337704, -0.43424374934680255800, 0.0, 0.43424374934680255800, 0.77459666924148337704, 0.96049126870802028342 }; Console.WriteLine(""); Console.WriteLine("TEST01"); Console.WriteLine(" Request KRONROD to compute the Gauss rule"); Console.WriteLine(" of order 3, and the Kronrod extension of"); Console.WriteLine(" order 3+4=7."); Console.WriteLine(""); Console.WriteLine(" Compare to exact data."); double eps = 0.000001; double[] w1 = new double[n + 1]; double[] w2 = new double[n + 1]; double[] x = new double[n + 1]; Quadrature.kronrod(n, eps, ref x, ref w1, ref w2); Console.WriteLine(""); Console.WriteLine(" KRONROD returns 3 vectors of length " + n + 1 + ""); Console.WriteLine(""); Console.WriteLine(" I X WK WG"); Console.WriteLine(""); for (i = 1; i <= n + 1; i++) { Console.WriteLine(" " + i.ToString(CultureInfo.InvariantCulture).PadLeft(4) + " " + x[i - 1].ToString(CultureInfo.InvariantCulture).PadLeft(14) + " " + w1[i - 1].ToString(CultureInfo.InvariantCulture).PadLeft(14) + " " + w2[i - 1].ToString(CultureInfo.InvariantCulture).PadLeft(14) + ""); } Console.WriteLine(""); Console.WriteLine(" Gauss Abscissas"); Console.WriteLine(" Exact Computed"); Console.WriteLine(""); for (i = 1; i <= n; i++) { if (2 * i <= n + 1) { i2 = 2 * i; s = -1.0; } else { i2 = 2 * (n + 1) - 2 * i; s = +1.0; } Console.WriteLine(" " + i.ToString(CultureInfo.InvariantCulture).PadLeft(4) + " " + xg[i - 1].ToString(CultureInfo.InvariantCulture).PadLeft(14) + " " + (s * x[i2 - 1]).ToString(CultureInfo.InvariantCulture).PadLeft(14) + ""); } Console.WriteLine(""); Console.WriteLine(" Gauss Weights"); Console.WriteLine(" Exact Computed"); Console.WriteLine(""); for (i = 1; i <= n; i++) { if (2 * i <= n + 1) { i2 = 2 * i; } else { i2 = 2 * (n + 1) - 2 * i; } Console.WriteLine(" " + i.ToString(CultureInfo.InvariantCulture).PadLeft(4) + " " + wg[i - 1].ToString(CultureInfo.InvariantCulture).PadLeft(14) + " " + w2[i2 - 1].ToString(CultureInfo.InvariantCulture).PadLeft(14) + ""); } Console.WriteLine(""); Console.WriteLine(" Gauss Kronrod Abscissas"); Console.WriteLine(" Exact Computed"); Console.WriteLine(""); for (i = 1; i <= 2 * n + 1; i++) { if (i <= n + 1) { i2 = i; s = -1.0; } else { i2 = 2 * (n + 1) - i; s = +1.0; } Console.WriteLine(" " + i.ToString(CultureInfo.InvariantCulture).PadLeft(4) + " " + xk[i - 1].ToString(CultureInfo.InvariantCulture).PadLeft(14) + " " + (s * x[i2 - 1]).ToString(CultureInfo.InvariantCulture).PadLeft(14) + ""); } Console.WriteLine(""); Console.WriteLine(" Gauss Kronrod Weights"); Console.WriteLine(" Exact Computed"); Console.WriteLine(""); for (i = 1; i <= 2 * n + 1; i++) { if (i <= n + 1) { i2 = i; } else { i2 = 2 * (n + 1) - i; } Console.WriteLine(" " + i.ToString(CultureInfo.InvariantCulture).PadLeft(4) + " " + wk[i - 1].ToString(CultureInfo.InvariantCulture).PadLeft(14) + " " + w1[i2 - 1].ToString(CultureInfo.InvariantCulture).PadLeft(14) + ""); } }
//internal static Gaussian DAverageConditional_slow([SkipIfUniform] Gamma exp, [Proper] Gaussian d) //{ // Gaussian to_d = exp.Shape<=1 || exp.Rate==0 ? // Gaussian.Uniform() // : new Gaussian(MMath.Digamma(exp.Shape-1) - Math.Log(exp.Rate), MMath.Trigamma(exp.Shape)); // //var to_d = Gaussian.Uniform(); // for (int i = 0; i < QuadratureIterations; i++) { // to_d = DAverageConditional(exp, d, to_d); // } // return to_d; //} // to_d does not need to be Fresh. it is only used for quadrature proposal. /// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="ExpOp"]/message_doc[@name="DAverageConditional(Gamma, Gaussian, Gaussian)"]/*'/> public static Gaussian DAverageConditional([SkipIfUniform] Gamma exp, [Proper] Gaussian d, Gaussian result) { if (exp.IsUniform() || d.IsUniform() || d.IsPointMass || exp.IsPointMass || exp.Rate <= 0) { return(ExpOp_Slow.DAverageConditional(exp, d)); } // We use moment matching to find the best Gaussian message. // The moments are computed via quadrature. // Z = int_y f(x,y) q(y) dy =approx sum_k w_k f(x,y_k) q(y_k)/N(y_k;m,v) // f(x,y) = Ga(exp(y); shape, rate) = exp(y*(shape-1) -rate*exp(y)) double[] nodes = new double[QuadratureNodeCount]; double[] weights = new double[QuadratureNodeCount]; double moD, voD; d.GetMeanAndVariance(out moD, out voD); double mD, vD; if (result.IsUniform() && exp.Shape > 1) { result = new Gaussian(MMath.Digamma(exp.Shape - 1) - Math.Log(exp.Rate), MMath.Trigamma(exp.Shape - 1)); } Gaussian dMarginal = d * result; dMarginal.GetMeanAndVariance(out mD, out vD); if (vD == 0) { return(ExpOp_Slow.DAverageConditional(exp, d)); } Quadrature.GaussianNodesAndWeights(mD, vD, nodes, weights); if (!result.IsUniform()) { // modify the weights to include q(y_k)/N(y_k;m,v) for (int i = 0; i < weights.Length; i++) { weights[i] *= Math.Exp(d.GetLogProb(nodes[i]) - Gaussian.GetLogProb(nodes[i], mD, vD)); } } double Z = 0; double sumy = 0; double sumy2 = 0; double maxLogF = Double.NegativeInfinity; for (int i = 0; i < weights.Length; i++) { double y = nodes[i]; double logf = Math.Log(weights[i]) + (exp.Shape - 1) * y - exp.Rate * Math.Exp(y); if (logf > maxLogF) { maxLogF = logf; } weights[i] = logf; } for (int i = 0; i < weights.Length; i++) { double y = nodes[i]; double f = Math.Exp(weights[i] - maxLogF); double f_y = f * y; double fyy = f_y * y; Z += f; sumy += f_y; sumy2 += fyy; } if (Z == 0) { return(Gaussian.Uniform()); } double s = 1.0 / Z; double mean = sumy * s; double var = sumy2 * s - mean * mean; // TODO: explain this if (var <= 0.0) { double quadratureGap = 0.1; var = 2 * vD * quadratureGap * quadratureGap; } result = new Gaussian(mean, var); result.SetToRatio(result, d, ForceProper); if (result.Precision < -1e10) { throw new InferRuntimeException("result has negative precision"); } if (Double.IsPositiveInfinity(result.Precision)) { throw new InferRuntimeException("result is point mass"); } if (Double.IsNaN(result.Precision) || Double.IsNaN(result.MeanTimesPrecision)) { return(ExpOp_Slow.DAverageConditional(exp, d)); } return(result); }
/// <summary> /// EP message to 'd' /// </summary> /// <param name="exp">Incoming message from 'exp'. Must be a proper distribution. If uniform, the result will be uniform.</param> /// <param name="d">Incoming message from 'd'. Must be a proper distribution. If uniform, the result will be uniform.</param> /// <param name="result">Modified to contain the outgoing message</param> /// <returns><paramref name="result"/></returns> /// <remarks><para> /// The outgoing message is a distribution matching the moments of 'd' as the random arguments are varied. /// The formula is <c>proj[p(d) sum_(exp) p(exp) factor(exp,d)]/p(d)</c>. /// </para></remarks> /// <exception cref="ImproperMessageException"><paramref name="exp"/> is not a proper distribution</exception> /// <exception cref="ImproperMessageException"><paramref name="d"/> is not a proper distribution</exception> //internal static Gaussian DAverageConditional_slow([SkipIfUniform] Gamma exp, [Proper] Gaussian d) //{ // Gaussian to_d = exp.Shape<=1 || exp.Rate==0 ? // Gaussian.Uniform() // : new Gaussian(MMath.Digamma(exp.Shape-1) - Math.Log(exp.Rate), MMath.Trigamma(exp.Shape)); // //var to_d = Gaussian.Uniform(); // for (int i = 0; i < QuadratureIterations; i++) { // to_d = DAverageConditional(exp, d, to_d); // } // return to_d; //} // to_d does not need to be Fresh. it is only used for quadrature proposal. public static Gaussian DAverageConditional([SkipIfUniform] Gamma exp, [Proper] Gaussian d, Gaussian result) { if (exp.IsUniform() || d.IsPointMass) { return(Gaussian.Uniform()); } if (exp.IsPointMass) { return(DAverageConditional(exp.Point)); } if (exp.Rate < 0) { throw new ImproperMessageException(exp); } if (d.IsUniform()) { // posterior for d is a shifted log-Gamma distribution: // exp((a-1)*d - b*exp(d)) =propto exp(a*(d+log(b)) - exp(d+log(b))) // we find the Gaussian with same moments. // u = d+log(b) // E[u] = digamma(a-1) // E[d] = E[u]-log(b) = digamma(a-1)-log(b) // var(d) = var(u) = trigamma(a-1) double lnRate = Math.Log(exp.Rate); return(new Gaussian(MMath.Digamma(exp.Shape - 1) - lnRate, MMath.Trigamma(exp.Shape - 1))); } // We use moment matching to find the best Gaussian message. // The moments are computed via quadrature. // Z = int_y f(x,y) q(y) dy =approx sum_k w_k f(x,y_k) q(y_k)/N(y_k;m,v) // f(x,y) = Ga(exp(y); shape, rate) = exp(y*(shape-1) -rate*exp(y)) double[] nodes = new double[QuadratureNodeCount]; double[] weights = new double[QuadratureNodeCount]; double moD, voD; d.GetMeanAndVariance(out moD, out voD); double mD, vD; if (result.IsUniform() && exp.Shape > 1) { result = new Gaussian(MMath.Digamma(exp.Shape - 1) - Math.Log(exp.Rate), MMath.Trigamma(exp.Shape - 1)); } Gaussian dMarginal = d * result; dMarginal.GetMeanAndVariance(out mD, out vD); Quadrature.GaussianNodesAndWeights(mD, vD, nodes, weights); if (!result.IsUniform()) { // modify the weights to include q(y_k)/N(y_k;m,v) for (int i = 0; i < weights.Length; i++) { weights[i] *= Math.Exp(d.GetLogProb(nodes[i]) - Gaussian.GetLogProb(nodes[i], mD, vD)); } } double Z = 0; double sumy = 0; double sumy2 = 0; double maxLogF = Double.NegativeInfinity; for (int i = 0; i < weights.Length; i++) { double y = nodes[i]; double logf = Math.Log(weights[i]) + (exp.Shape - 1) * y - exp.Rate * Math.Exp(y); if (logf > maxLogF) { maxLogF = logf; } weights[i] = logf; } for (int i = 0; i < weights.Length; i++) { double y = nodes[i]; double f = Math.Exp(weights[i] - maxLogF); double f_y = f * y; double fyy = f_y * y; Z += f; sumy += f_y; sumy2 += fyy; } if (Z == 0) { return(Gaussian.Uniform()); } double s = 1.0 / Z; double mean = sumy * s; double var = sumy2 * s - mean * mean; if (var <= 0.0) { double quadratureGap = 0.1; var = 2 * vD * quadratureGap * quadratureGap; } result = new Gaussian(mean, var); if (ForceProper) { result.SetToRatioProper(result, d); } else { result.SetToRatio(result, d); } if (result.Precision < -1e10) { throw new ApplicationException("result has negative precision"); } if (Double.IsPositiveInfinity(result.Precision)) { throw new ApplicationException("result is point mass"); } if (Double.IsNaN(result.Precision) || Double.IsNaN(result.MeanTimesPrecision)) { throw new ApplicationException("result is nan"); } return(result); }
/// <summary> /// EP message to 'exp' /// </summary> /// <param name="exp">Incoming message from 'exp'.</param> /// <param name="d">Incoming message from 'd'. Must be a proper distribution. If uniform, the result will be uniform.</param> /// <param name="to_d">Previous outgoing message to 'd'.</param> /// <returns>The outgoing EP message to the 'exp' argument</returns> /// <remarks><para> /// The outgoing message is a distribution matching the moments of 'exp' as the random arguments are varied. /// The formula is <c>proj[p(exp) sum_(d) p(d) factor(exp,d)]/p(exp)</c>. /// </para></remarks> /// <exception cref="ImproperMessageException"><paramref name="d"/> is not a proper distribution</exception> public static Gamma ExpAverageConditional(Gamma exp, [Proper] Gaussian d, Gaussian to_d) { if (d.IsPointMass) { return(Gamma.PointMass(Math.Exp(d.Point))); } if (d.IsUniform()) { return(Gamma.FromShapeAndRate(0, 0)); } if (exp.IsPointMass) { // Z = int_y delta(x - exp(y)) N(y; my, vy) dy // = int_u delta(x - u) N(log(u); my, vy)/u du // = N(log(x); my, vy)/x // logZ = -log(x) -0.5/vy*(log(x)-my)^2 // dlogZ/dx = -1/x -1/vy*(log(x)-my)/x // d2logZ/dx2 = -dlogZ/dx/x -1/vy/x^2 // log Ga(x;a,b) = (a-1)*log(x) - bx // dlogGa/dx = (a-1)/x - b // d2logGa/dx2 = -(a-1)/x^2 // match derivatives and solve for (a,b) double shape = (1 + d.GetMean() - Math.Log(exp.Point)) * d.Precision; double rate = d.Precision / exp.Point; return(Gamma.FromShapeAndRate(shape, rate)); } if (exp.IsUniform()) { return(ExpAverageLogarithm(d)); } if (to_d.IsUniform() && exp.Shape > 1) { to_d = new Gaussian(MMath.Digamma(exp.Shape - 1) - Math.Log(exp.Rate), MMath.Trigamma(exp.Shape - 1)); } double mD, vD; Gaussian dMarginal = d * to_d; dMarginal.GetMeanAndVariance(out mD, out vD); double Z = 0; double sumy = 0; double sumexpy = 0; if (vD < 1e-6) { double m, v; d.GetMeanAndVariance(out m, out v); return(Gamma.FromLogMeanAndMeanLog(m + v / 2.0, m)); } //if (vD < 10) if (true) { // Use Gauss-Hermite quadrature double[] nodes = new double[QuadratureNodeCount]; double[] weights = new double[QuadratureNodeCount]; Quadrature.GaussianNodesAndWeights(mD, vD, nodes, weights); for (int i = 0; i < weights.Length; i++) { weights[i] = Math.Log(weights[i]); } if (!to_d.IsUniform()) { // modify the weights to include q(y_k)/N(y_k;m,v) for (int i = 0; i < weights.Length; i++) { weights[i] += d.GetLogProb(nodes[i]) - dMarginal.GetLogProb(nodes[i]); } } double maxLogF = Double.NegativeInfinity; // f(x,y) = Ga(exp(y); shape, rate) = exp(y*(shape-1) -rate*exp(y)) // Z E[x] = int_y int_x x Ga(x;a,b) delta(x - exp(y)) N(y;my,vy) dx dy // = int_y exp(y) Ga(exp(y);a,b) N(y;my,vy) dy // Z E[log(x)] = int_y y Ga(exp(y);a,b) N(y;my,vy) dy for (int i = 0; i < weights.Length; i++) { double y = nodes[i]; double logf = weights[i] + (exp.Shape - 1) * y - exp.Rate * Math.Exp(y); if (logf > maxLogF) { maxLogF = logf; } weights[i] = logf; } for (int i = 0; i < weights.Length; i++) { double y = nodes[i]; double f = Math.Exp(weights[i] - maxLogF); double f_y = f * y; double fexpy = f * Math.Exp(y); Z += f; sumy += f_y; sumexpy += fexpy; } } else { Converter <double, double> p = delegate(double y) { return(d.GetLogProb(y) + (exp.Shape - 1) * y - exp.Rate * Math.Exp(y)); }; double sc = Math.Sqrt(vD); double offset = p(mD); Z = Quadrature.AdaptiveClenshawCurtis(z => Math.Exp(p(sc * z + mD) - offset), 1, 16, 1e-6); sumy = Quadrature.AdaptiveClenshawCurtis(z => (sc * z + mD) * Math.Exp(p(sc * z + mD) - offset), 1, 16, 1e-6); sumexpy = Quadrature.AdaptiveClenshawCurtis(z => Math.Exp(sc * z + mD + p(sc * z + mD) - offset), 1, 16, 1e-6); } if (Z == 0) { throw new ApplicationException("Z==0"); } double s = 1.0 / Z; if (Double.IsPositiveInfinity(s)) { throw new ApplicationException("s is -inf"); } double meanLog = sumy * s; double mean = sumexpy * s; Gamma result = Gamma.FromMeanAndMeanLog(mean, meanLog); if (ForceProper) { result.SetToRatioProper(result, exp); } else { result.SetToRatio(result, exp); } if (Double.IsNaN(result.Shape) || Double.IsNaN(result.Rate)) { throw new ApplicationException("result is nan"); } return(result); }
/// <summary> /// Calculate sigma(m,v) = \int N(x;m,v) logistic(x) dx /// </summary> /// <param name="mean">Mean</param> /// <param name="variance">Variance</param> /// <returns>The value of this special function.</returns> /// <remarks><para> /// Note <c>1-LogisticGaussian(m,v) = LogisticGaussian(-m,v)</c> which is more accurate. /// </para><para> /// For large v we can use the big v approximation <c>\sigma(m,v)=normcdf(m/sqrt(v+pi^2/3))</c>. /// For small and moderate v we use Gauss-Hermite quadrature. /// For moderate v we first find the mode of the (log concave) function since this may be quite far from m. /// </para></remarks> public static double LogisticGaussian(double mean, double variance) { double halfVariance = 0.5 * variance; // use the upper bound exp(m+v/2) to prune cases that must be zero or one if (mean + halfVariance < log0) { return(0.0); } if (-mean + halfVariance < logEpsilon) { return(1.0); } // use the upper bound 0.5 exp(-0.5 m^2/v) to prune cases that must be zero or one double q = -0.5 * mean * mean / variance - MMath.Ln2; if (mean <= 0 && mean + variance >= 0 && q < log0) { return(0.0); } if (mean >= 0 && variance - mean >= 0 && q < logEpsilon) { return(1.0); } // sigma(|m|,v) <= 0.5 + |m| sigma'(0,v) // sigma'(0,v) <= N(0;0,v+8/pi) double d0Upper = MMath.InvSqrt2PI / Math.Sqrt(variance + 8 / Math.PI); if (mean * mean / (variance + 8 / Math.PI) < 2e-20 * Math.PI) { double deriv = LogisticGaussianDerivative(mean, variance); return(0.5 + mean * deriv); } // Handle tail cases using the following exact formulas: // sigma(m,v) = 1 - exp(-m+v/2) + exp(-2m+2v) - exp(-3m+9v/2) sigma(m-3v,v) if (-mean + variance < logEpsilon) { return(1.0 - Math.Exp(halfVariance - mean)); } if (-3 * mean + 9 * halfVariance < logEpsilon) { return(1.0 - Math.Exp(halfVariance - mean) + Math.Exp(2 * (variance - mean))); } // sigma(m,v) = exp(m+v/2) - exp(2m+2v) + exp(3m + 9v/2) (1 - sigma(m+3v,v)) if (mean + 1.5 * variance < logEpsilon) { return(Math.Exp(mean + halfVariance)); } if (2 * mean + 4 * variance < logEpsilon) { return(Math.Exp(mean + halfVariance) * (1 - Math.Exp(mean + 1.5 * variance))); } if (variance > LogisticGaussianVarianceThreshold) { double f(double x) { return(Math.Exp(MMath.LogisticLn(x) + Gaussian.GetLogProb(x, mean, variance))); } double upperBound = mean + Math.Sqrt(variance); upperBound = Math.Max(upperBound, 10); return(Quadrature.AdaptiveClenshawCurtis(f, upperBound, 32, 1e-10)); } else { Vector nodes = Vector.Zero(LogisticGaussianQuadratureNodeCount); Vector weights = Vector.Zero(LogisticGaussianQuadratureNodeCount); double m_p, v_p; BigvProposal(mean, variance, out m_p, out v_p); Quadrature.GaussianNodesAndWeights(m_p, v_p, nodes, weights); double weightedIntegrand(double z) { return(Math.Exp(MMath.LogisticLn(z) + Gaussian.GetLogProb(z, mean, variance) - Gaussian.GetLogProb(z, m_p, v_p))); } return(Integrate(weightedIntegrand, nodes, weights)); } }
static void Main(string[] args) { var result = Noise.EmulateNoiseOverNPeriods(300, 9, 64); var strResult = Noise.GetPointsAsString(); var quadrature = Quadrature.GetQuadratureMatrix(21, 0.01, 0.01); }