Exemplo n.º 1
0
        /// <summary>
        /// Calculate (kth derivative of LogisticGaussian)*exp(0.5*mean^2/variance)
        /// </summary>
        /// <param name="mean"></param>
        /// <param name="variance"></param>
        /// <param name="k"></param>
        /// <returns></returns>
        public static double LogisticGaussianRatio(double mean, double variance, int k)
        {
            if (k < 0 || k > 2)
            {
                throw new ArgumentException("invalid k (" + k + ")");
            }
            double a = mean / variance;

            // int 0.5 cosh(x(m/v+1/2))/cosh(x/2) N(x;0,v) dx
            double f(double x)
            {
                double logSigma = MMath.LogisticLn(x);
                double extra    = 0;
                double s        = 1;

                if (k > 0)
                {
                    extra += MMath.LogisticLn(-x);
                }
                if (k > 1)
                {
                    s = -Math.Tanh(x / 2);
                }
                return(s * Math.Exp(logSigma + extra + x * a + Gaussian.GetLogProb(x, 0, variance)));
            }

            double upperBound = (Math.Abs(a + 0.5) - 0.5) * variance + Math.Sqrt(variance);

            upperBound = Math.Max(upperBound, 10);
            return(Quadrature.AdaptiveClenshawCurtis(f, upperBound, 32, 1e-10));
        }
Exemplo n.º 2
0
        /// <summary>
        /// Calculate <c>\sigma'(m,v)=\int N(x;m,v)logistic'(x) dx</c>
        /// </summary>
        /// <param name="mean">Mean.</param>
        /// <param name="variance">Variance.</param>
        /// <returns>The value of this special function.</returns>
        /// <remarks><para>
        /// For large v we can use the big v approximation <c>\sigma'(m,v)=N(m,0,v+pi^2/3)</c>.
        /// For small and moderate v we use Gauss-Hermite quadrature.
        /// For moderate v we first find the mode of the (log concave) function since this may be quite far from m.
        /// </para></remarks>
        public static double LogisticGaussianDerivative(double mean, double variance)
        {
            double halfVariance = 0.5 * variance;

            mean = Math.Abs(mean);

            // use the upper bound exp(-|m|+v/2) to prune cases that must be zero
            if (-mean + halfVariance < log0)
            {
                return(0.0);
            }

            // use the upper bound 0.5 exp(-0.5 m^2/v) to prune cases that must be zero
            double q = -0.5 * mean * mean / variance - MMath.Ln2;

            if (mean <= variance && q < log0)
            {
                return(0.0);
            }
            if (double.IsPositiveInfinity(variance))
            {
                return(0.0);
            }

            // Handle the tail cases using the following exact formula:
            // sigma'(m,v) = exp(-m+v/2) -2 exp(-2m+2v) +3 exp(-3m+9v/2) sigma(m-3v,v) - exp(-3m+9v/2) sigma'(m-3v,v)
            if (-mean + 1.5 * variance < logEpsilon)
            {
                return(Math.Exp(halfVariance - mean));
            }
            if (-2 * mean + 4 * variance < logEpsilon)
            {
                return(Math.Exp(halfVariance - mean) - 2 * Math.Exp(2 * (variance - mean)));
            }

            if (variance > LogisticGaussianVarianceThreshold)
            {
                double f(double x)
                {
                    return(Math.Exp(MMath.LogisticLn(x) + MMath.LogisticLn(-x) + Gaussian.GetLogProb(x, mean, variance)));
                }

                return(Quadrature.AdaptiveClenshawCurtis(f, 10, 32, 1e-10));
            }
            else
            {
                Vector nodes = Vector.Zero(LogisticGaussianQuadratureNodeCount);
                Vector weights = Vector.Zero(LogisticGaussianQuadratureNodeCount);
                double m_p, v_p;
                BigvProposal(mean, variance, out m_p, out v_p);
                Quadrature.GaussianNodesAndWeights(m_p, v_p, nodes, weights);
                double weightedIntegrand(double z)
                {
                    return(Math.Exp(MMath.LogisticLn(z) + MMath.LogisticLn(-z) + Gaussian.GetLogProb(z, mean, variance) - Gaussian.GetLogProb(z, m_p, v_p)));
                }

                return(Integrate(weightedIntegrand, nodes, weights));
            }
        }
Exemplo n.º 3
0
        /// <summary>
        /// Calculate sigma(m,v) = \int N(x;m,v) logistic(x) dx
        /// </summary>
        /// <param name="mean">Mean</param>
        /// <param name="variance">Variance</param>
        /// <returns>The value of this special function.</returns>
        /// <remarks><para>
        /// Note <c>1-LogisticGaussian(m,v) = LogisticGaussian(-m,v)</c> which is more accurate.
        /// </para><para>
        /// For large v we can use the big v approximation <c>\sigma(m,v)=normcdf(m/sqrt(v+pi^2/3))</c>.
        /// For small and moderate v we use Gauss-Hermite quadrature.
        /// For moderate v we first find the mode of the (log concave) function since this may be quite far from m.
        /// </para></remarks>
        public static double LogisticGaussian(double mean, double variance)
        {
            double halfVariance = 0.5 * variance;

            // use the upper bound exp(m+v/2) to prune cases that must be zero or one
            if (mean + halfVariance < log0)
            {
                return(0.0);
            }
            if (-mean + halfVariance < logEpsilon)
            {
                return(1.0);
            }

            // use the upper bound 0.5 exp(-0.5 m^2/v) to prune cases that must be zero or one
            double q = -0.5 * mean * mean / variance - MMath.Ln2;

            if (mean <= 0 && mean + variance >= 0 && q < log0)
            {
                return(0.0);
            }
            if (mean >= 0 && variance - mean >= 0 && q < logEpsilon)
            {
                return(1.0);
            }
            // sigma(|m|,v) <= 0.5 + |m| sigma'(0,v)
            // sigma'(0,v) <= N(0;0,v+8/pi)
            double d0Upper = MMath.InvSqrt2PI / Math.Sqrt(variance + 8 / Math.PI);

            if (mean * mean / (variance + 8 / Math.PI) < 2e-20 * Math.PI)
            {
                double deriv = LogisticGaussianDerivative(mean, variance);
                return(0.5 + mean * deriv);
            }

            // Handle tail cases using the following exact formulas:
            // sigma(m,v) = 1 - exp(-m+v/2) + exp(-2m+2v) - exp(-3m+9v/2) sigma(m-3v,v)
            if (-mean + variance < logEpsilon)
            {
                return(1.0 - Math.Exp(halfVariance - mean));
            }
            if (-3 * mean + 9 * halfVariance < logEpsilon)
            {
                return(1.0 - Math.Exp(halfVariance - mean) + Math.Exp(2 * (variance - mean)));
            }
            // sigma(m,v) = exp(m+v/2) - exp(2m+2v) + exp(3m + 9v/2) (1 - sigma(m+3v,v))
            if (mean + 1.5 * variance < logEpsilon)
            {
                return(Math.Exp(mean + halfVariance));
            }
            if (2 * mean + 4 * variance < logEpsilon)
            {
                return(Math.Exp(mean + halfVariance) * (1 - Math.Exp(mean + 1.5 * variance)));
            }

            if (variance > LogisticGaussianVarianceThreshold)
            {
                double f(double x)
                {
                    return(Math.Exp(MMath.LogisticLn(x) + Gaussian.GetLogProb(x, mean, variance)));
                }

                double upperBound = mean + Math.Sqrt(variance);
                upperBound = Math.Max(upperBound, 10);
                return(Quadrature.AdaptiveClenshawCurtis(f, upperBound, 32, 1e-10));
            }
            else
            {
                Vector nodes = Vector.Zero(LogisticGaussianQuadratureNodeCount);
                Vector weights = Vector.Zero(LogisticGaussianQuadratureNodeCount);
                double m_p, v_p;
                BigvProposal(mean, variance, out m_p, out v_p);
                Quadrature.GaussianNodesAndWeights(m_p, v_p, nodes, weights);
                double weightedIntegrand(double z)
                {
                    return(Math.Exp(MMath.LogisticLn(z) + Gaussian.GetLogProb(z, mean, variance) - Gaussian.GetLogProb(z, m_p, v_p)));
                }

                return(Integrate(weightedIntegrand, nodes, weights));
            }
        }