/// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GaussianFromMeanAndVarianceOp"]/message_doc[@name="SampleAverageLogarithm(Gaussian, double)"]/*'/>
 public static Gaussian SampleAverageLogarithm([SkipIfUniform] Gaussian mean, double variance)
 {
     return(GaussianOp.SampleAverageLogarithm(mean, 1.0 / variance));
 }
示例#2
0
        /// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="ArrayFromVectorOp"]/message_doc[@name="ArrayAverageConditional{GaussianList}(IList{Gaussian}, VectorGaussian, GaussianList)"]/*'/>
        /// <typeparam name="GaussianList">The type of the resulting array.</typeparam>
        public static GaussianList ArrayAverageConditional <GaussianList>(
            [NoInit] IList <Gaussian> array, [SkipIfUniform] VectorGaussian vector, GaussianList result)
            where GaussianList : IList <Gaussian>
        {
            if (result.Count != vector.Dimension)
            {
                throw new ArgumentException("vector.Dimension (" + vector.Dimension + ") != result.Count (" + result.Count + ")");
            }
            int  length       = result.Count;
            bool allPointMass = array.All(g => g.IsPointMass);

            if (allPointMass)
            {
                // efficient special case
                for (int i = 0; i < length; i++)
                {
                    double x = array[i].Point;
                    // -prec*(x-m) = -prec*x + prec*m
                    double dlogp = vector.MeanTimesPrecision[i];
                    for (int j = 0; j < length; j++)
                    {
                        dlogp -= vector.Precision[i, j] * array[j].Point;
                    }
                    double ddlogp = -vector.Precision[i, i];
                    result[i] = Gaussian.FromDerivatives(x, dlogp, ddlogp, false);
                }
            }
            else if (vector.IsPointMass)
            {
                // efficient special case
                Vector mean = vector.Point;
                for (int i = 0; i < length; i++)
                {
                    result[i] = Gaussian.PointMass(mean[i]);
                }
            }
            else if (vector.IsUniform())
            {
                for (int i = 0; i < length; i++)
                {
                    result[i] = Gaussian.Uniform();
                }
            }
            else if (array.Any(g => g.IsPointMass))
            {
                // Z = N(m1; m2, V1+V2)
                // logZ = -0.5 (m1-m2)'inv(V1+V2)(m1-m2)
                // dlogZ = (m1-m2)'inv(V1+V2) dm2
                // ddlogZ = -dm2'inv(V1+V2) dm2
                Vector mean = Vector.Zero(length);
                PositiveDefiniteMatrix variance = new PositiveDefiniteMatrix(length, length);
                vector.GetMeanAndVariance(mean, variance);
                for (int i = 0; i < length; i++)
                {
                    if (array[i].IsUniform())
                    {
                        continue;
                    }
                    double m, v;
                    array[i].GetMeanAndVariance(out m, out v);
                    variance[i, i] += v;
                    mean[i]        -= m;
                }
                PositiveDefiniteMatrix precision = variance.Inverse();
                Vector meanTimesPrecision        = precision * mean;
                for (int i = 0; i < length; i++)
                {
                    if (array[i].IsUniform())
                    {
                        result[i] = Gaussian.FromMeanAndVariance(mean[i], variance[i, i]);
                    }
                    else
                    {
                        double alpha = meanTimesPrecision[i];
                        double beta  = precision[i, i];
                        result[i] = GaussianOp.GaussianFromAlphaBeta(array[i], alpha, beta, false);
                    }
                }
            }
            else
            {
                // Compute inv(V1+V2)*(m1-m2) as inv(V2)*inv(inv(V1) + inv(V2))*(inv(V1)*m1 + inv(V2)*m2) - inv(V2)*m2 = inv(V2)*(m - m2)
                // Compute inv(V1+V2) as inv(V2)*inv(inv(V1) + inv(V2))*inv(V2) - inv(V2)
                PositiveDefiniteMatrix precision = (PositiveDefiniteMatrix)vector.Precision.Clone();
                Vector meanTimesPrecision        = vector.MeanTimesPrecision.Clone();
                for (int i = 0; i < length; i++)
                {
                    Gaussian g = array[i];
                    precision[i, i]       += g.Precision;
                    meanTimesPrecision[i] += g.MeanTimesPrecision;
                }
                bool fastMethod = true;
                if (fastMethod)
                {
                    bool isPosDef;
                    // this destroys precision
                    LowerTriangularMatrix precisionChol = precision.CholeskyInPlace(out isPosDef);
                    if (!isPosDef)
                    {
                        throw new PositiveDefiniteMatrixException();
                    }
                    // variance = inv(precisionChol*precisionChol') = inv(precisionChol)'*inv(precisionChol) = varianceChol*varianceChol'
                    // this destroys meanTimesPrecision
                    var mean = meanTimesPrecision.PredivideBy(precisionChol);
                    mean = mean.PredivideByTranspose(precisionChol);
                    var varianceCholTranspose = precisionChol;
                    // this destroys precisionChol
                    varianceCholTranspose.SetToInverse(precisionChol);
                    for (int i = 0; i < length; i++)
                    {
                        Gaussian g           = array[i];
                        double   variance_ii = GetSquaredLengthOfColumn(varianceCholTranspose, i);
                        // works when g is uniform, but not when g is point mass
                        result[i] = Gaussian.FromMeanAndVariance(mean[i], variance_ii) / g;
                    }
                }
                else
                {
                    // equivalent to above, but slower
                    PositiveDefiniteMatrix variance = precision.Inverse();
                    var mean = variance * meanTimesPrecision;
                    for (int i = 0; i < length; i++)
                    {
                        Gaussian g = array[i];
                        // works when g is uniform, but not when g is point mass
                        result[i] = Gaussian.FromMeanAndVariance(mean[i], variance[i, i]) / g;
                    }
                }
            }
            return(result);
        }
示例#3
0
        /// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="ArrayFromVectorOp"]/message_doc[@name="ArrayAverageConditional{GaussianList}(IList{Gaussian}, VectorGaussianMoments, GaussianList)"]/*'/>
        /// <typeparam name="GaussianList">The type of the resulting array.</typeparam>
        public static GaussianList ArrayAverageConditional <GaussianList>(
            [NoInit] IList <Gaussian> array, [SkipIfUniform] VectorGaussianMoments vector, GaussianList result)
            where GaussianList : IList <Gaussian>
        {
            if (result.Count != vector.Dimension)
            {
                throw new ArgumentException("vector.Dimension (" + vector.Dimension + ") != result.Count (" + result.Count + ")");
            }
            int length = result.Count;

            if (array.All(g => g.IsUniform()) || vector.IsUniform() || vector.IsPointMass)
            {
                for (int i = 0; i < length; i++)
                {
                    result[i] = Gaussian.FromMeanAndVariance(vector.Mean[i], vector.Variance[i, i]);
                }
            }
            else
            {
                // Z = N(m1; m2, V1+V2)
                // logZ = -0.5 (m1-m2)'inv(V1+V2)(m1-m2)
                // dlogZ = (m1-m2)'inv(V1+V2) dm2
                // ddlogZ = -dm2'inv(V1+V2) dm2
                VectorGaussianMoments product = new VectorGaussianMoments(length);
                Vector mean = product.Mean;
                PositiveDefiniteMatrix variance = product.Variance;
                vector.GetMeanAndVariance(mean, variance);
                for (int i = 0; i < length; i++)
                {
                    double m, v;
                    array[i].GetMeanAndVariance(out m, out v);
                    variance[i, i] += v;
                    mean[i]        -= m;
                }
                double offset = 1e-10;
                for (int i = 0; i < length; i++)
                {
                    variance[i, i] += offset;
                }
                PositiveDefiniteMatrix precision = variance.Inverse();
                Vector meanTimesPrecision        = precision * mean;
                double precisionThreshold        = 1;
                if (array.Any(g => g.Precision <= precisionThreshold))
                {
                    // compute the posterior mean and variance
                    product.SetToProduct(vector, array);
                    mean     = product.Mean;
                    variance = product.Variance;
                }
                for (int i = 0; i < length; i++)
                {
                    if (array[i].Precision <= precisionThreshold)
                    {
                        result[i] = Gaussian.FromMeanAndVariance(mean[i], variance[i, i]) / array[i];
                    }
                    else
                    {
                        double alpha = meanTimesPrecision[i];
                        double beta  = precision[i, i];
                        if (double.IsNaN(alpha))
                        {
                            throw new Exception("alpha is NaN");
                        }
                        if (double.IsNaN(beta))
                        {
                            throw new Exception("beta is NaN");
                        }
                        result[i] = GaussianOp.GaussianFromAlphaBeta(array[i], alpha, beta, false);
                        if (double.IsNaN(result[i].MeanTimesPrecision))
                        {
                            throw new Exception("result is NaN");
                        }
                        if (result[i].Precision < 0)
                        {
                            throw new Exception("result is improper");
                        }
                    }
                }
            }
            return(result);
        }
        public void GaussianOpPrecision()
        {
            Gamma    precMsg, precMsg2;
            Gaussian X, Mean;
            Gamma    Precision;

            X         = Gaussian.FromNatural(-1.5098177152950143E-09, 1.061649960537027E-168);
            Mean      = Gaussian.FromNatural(-3.6177299471249587, 0.11664740799025652);
            Precision = Gamma.FromShapeAndRate(306.39423695125572, 1.8326832031565403E+170);
            precMsg   = Gamma.PointMass(0);
            precMsg2  = GaussianOp_Slow.PrecisionAverageConditional(X, Mean, Precision);
            Assert.True(precMsg.MaxDiff(precMsg2) < 1e-4);
            precMsg2 = GaussianOp.PrecisionAverageConditional(X, Mean, Precision);
            Assert.True(precMsg.MaxDiff(precMsg2) < 1e-4);

            X         = Gaussian.FromNatural(-0.55657497231637854, 6.6259783218464713E-141);
            Mean      = Gaussian.FromNatural(-2.9330116542965374, 0.07513822741674292);
            Precision = Gamma.FromShapeAndRate(308.8184220331475, 4.6489382805051884E+142);
            precMsg   = Gamma.FromShapeAndRate(1.5000000000000628, 3.5279086383286634E+279);
            precMsg2  = GaussianOp_Slow.PrecisionAverageConditional(X, Mean, Precision);
            Assert.True(precMsg.MaxDiff(precMsg2) < 1e-4);
            precMsg2 = GaussianOp.PrecisionAverageConditional(X, Mean, Precision);
            Assert.True(precMsg.MaxDiff(precMsg2) < 1e-4);

            X         = Gaussian.FromNatural(0, 1.0705890985886898E-153);
            Mean      = Gaussian.PointMass(0);
            Precision = Gamma.FromShapeAndRate(1.6461630749684018, 1.0021354807958952E+153);
            precMsg   = Gamma.FromShapeAndRate(1.3230815374839406, 5.7102212927459039E+151);
            precMsg2  = GaussianOp_Slow.PrecisionAverageConditional(X, Mean, Precision);
            Assert.True(precMsg.MaxDiff(precMsg2) < 1e-4);
            precMsg2 = GaussianOp.PrecisionAverageConditional(X, Mean, Precision);
            Assert.True(precMsg.MaxDiff(precMsg2) < 1e-4);

            GaussianOp.PrecisionAverageConditional(Gaussian.FromNatural(0, 0.00849303091340374), Gaussian.FromNatural(0.303940178036662, 0.0357912415805232),
                                                   Gamma.FromNatural(0.870172077263786 - 1, 0.241027170904459));
            GaussianOp.PrecisionAverageConditional(Gaussian.FromNatural(0, 0.932143343115292), Gaussian.FromNatural(0.803368837946732, 0.096549750816333),
                                                   Gamma.FromNatural(0.63591693650741 - 1, 0.728459389753854));
            GaussianOp.PrecisionAverageConditional(Gaussian.FromNatural(0, 0.799724777601531), Gaussian.FromNatural(0.351882387116497, 0.0795619408970522),
                                                   Gamma.FromNatural(0.0398852019756498 - 1, 0.260567798400562));
            GaussianOp.PrecisionAverageConditional(Gaussian.FromNatural(0, 0.826197353576402), Gaussian.FromNatural(0.655970732055591, 0.125333868956814),
                                                   Gamma.FromNatural(0.202543332801453 - 1, 0.147645744563847));

            precMsg = GaussianOp.PrecisionAverageConditional(new Gaussian(-6.235e+207, 1.947e+209), Gaussian.PointMass(11), Gamma.PointMass(7));
            Assert.True(!double.IsNaN(precMsg.Rate));

            Gaussian X0         = Gaussian.FromMeanAndVariance(3, 0.5);
            Gaussian Mean0      = Gaussian.FromMeanAndVariance(7, 1.0 / 3);
            Gamma    Precision0 = Gamma.FromShapeAndScale(3, 3);

            precMsg = GaussianOp_Slow.PrecisionAverageConditional(Gaussian.FromNatural(0.010158033515400506, 0.0041117304509528533),
                                                                  Gaussian.FromNatural(33.157651455559929, 13.955304749880149),
                                                                  Gamma.FromShapeAndRate(7.1611372018172794, 1.8190207317123008));

            precMsg = GaussianOp_Slow.PrecisionAverageConditional(Gaussian.FromNatural(-0.020177353724675218, 0.0080005002339157711),
                                                                  Gaussian.FromNatural(-12.303440746896294, 4.6439574387849714),
                                                                  Gamma.FromShapeAndRate(5.6778922774773992, 1.0667129560350435));

            precMsg = GaussianOp_Slow.PrecisionAverageConditional(Gaussian.PointMass(248), Gaussian.FromNatural(0.099086933095776319, 0.00032349393599347853),
                                                                  Gamma.FromShapeAndRate(0.001, 0.001));
            precMsg2 = GaussianOp.PrecisionAverageConditional_slow(Gaussian.PointMass(248), Gaussian.FromNatural(0.099086933095776319, 0.00032349393599347853),
                                                                   Gamma.FromShapeAndRate(0.001, 0.001));
            Assert.True(precMsg.MaxDiff(precMsg2) < 0.3);

            precMsg =
                GaussianOp_Slow.PrecisionAverageConditional(Gaussian.FromNatural(-0.21769764449791806, 0.0000024898838689952023),
                                                            Gaussian.FromNatural(0, 0.5), Gamma.FromShapeAndRate(5, 5));
            precMsg2 =
                GaussianOp.PrecisionAverageConditional_slow(Gaussian.FromNatural(-0.21769764449791806, 0.0000024898838689952023),
                                                            Gaussian.FromNatural(0, 0.5), Gamma.FromShapeAndRate(5, 5));
            //Console.WriteLine("{0} should be {1}", precMsg2, precMsg);
            Assert.True(precMsg.MaxDiff(precMsg2) < 1e-4);
            Gamma precMsg3 =
                GaussianOp_Laplace.PrecisionAverageConditional_slow(Gaussian.FromNatural(-0.21769764449791806, 0.0000024898838689952023),
                                                                    Gaussian.FromNatural(0, 0.5), Gamma.FromShapeAndRate(5, 5));

            precMsg2 =
                GaussianOp.PrecisionAverageConditional(Gaussian.FromNatural(-0.21769764449791806, 0.0000024898838689952023),
                                                       Gaussian.FromNatural(0, 0.5), Gamma.FromShapeAndRate(5, 5));
            //Console.WriteLine("{0} should be {1}", precMsg2, precMsg);
            Assert.True(precMsg.MaxDiff(precMsg2) < 1e-4);

            Assert.True(GaussianOp.PrecisionAverageConditional_slow(Gaussian.FromNatural(-2.3874057896477092, 0.0070584383295080044),
                                                                    Gaussian.FromNatural(1.3999879871144227, 0.547354438587195), Gamma.FromShapeAndRate(3, 1))
                        .MaxDiff(Gamma.FromShapeAndRate(1.421, 55546)) < 10);

            // Unknown precision
            if (GaussianOp.ForceProper)
            {
                Assert.True(GaussianOp.PrecisionAverageConditional_slow(X0, Mean0, Precision0).MaxDiff(Gamma.FromShapeAndRate(1, 0.3632)) < 1e-4);
            }
            else
            {
                Assert.True(GaussianOp.PrecisionAverageConditional_slow(X0, Mean0, Precision0).MaxDiff(Gamma.FromShapeAndRate(-0.96304, -0.092572)) < 1e-4);
            }
            if (GaussianOp.ForceProper)
            {
                Assert.True(GaussianOp.PrecisionAverageConditional_slow(Gaussian.PointMass(3.0), Mean0, Precision0).MaxDiff(Gamma.FromShapeAndRate(1, 4.13824)) < 1e-4);
            }
            else
            {
                Assert.True(GaussianOp.PrecisionAverageConditional_slow(Gaussian.PointMass(3.0), Mean0, Precision0).MaxDiff(Gamma.FromShapeAndRate(-0.24693, 2.2797)) < 1e-4);
            }
            Assert.True(GaussianOp.PrecisionAverageConditional(3.0, 7.0).MaxDiff(Gamma.FromShapeAndRate(1.5, 8.0)) < 1e-4);
            Assert.True(GaussianOp.PrecisionAverageConditional_slow(new Gaussian(), Gaussian.PointMass(7.0), Precision0).MaxDiff(Gamma.FromShapeAndRate(1.0, 0.0)) < 1e-4);
        }
 /// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="SparseGaussianListOp"]/message_doc[@name="MeanAverageLogarithm(ISparseList{double}, ISparseList{double}, SparseGaussianList)"]/*'/>
 public static SparseGaussianList MeanAverageLogarithm(ISparseList <double> sample, ISparseList <double> precision, SparseGaussianList result)
 {
     result.SetToFunction(sample, precision, (s, p) => GaussianOp.MeanAverageLogarithm(s, p));
     return(result);
 }
示例#6
0
        public void SparseGaussianListFactor()
        {
            SparseGaussianList.DefaultTolerance = 1e-10;
            var calcSuffix     = ": calculation differs between sparse and dense";
            var sparsitySuffix = ": result is not sparse as expected";
            var calcErrMsg     = "";
            var sparsityErrMsg = "";
            var tolerance      = 1e-10;

            Rand.Restart(12347);
            int listSize = 50;

            // True distribution for the means
            var sparseMeanDist = SparseGaussianList.Constant(listSize, Gaussian.FromMeanAndPrecision(1, 2), tolerance);

            sparseMeanDist[3] = Gaussian.FromMeanAndPrecision(4, 5);
            sparseMeanDist[6] = Gaussian.FromMeanAndPrecision(7, 8);
            var meanDist        = sparseMeanDist.ToArray();
            var sparseMeanPoint = SparseList <double> .Constant(listSize, 0.1);

            sparseMeanPoint[3] = 0.7;
            sparseMeanPoint[6] = 0.8;
            var meanPoint = sparseMeanPoint.ToArray();

            // True distribution for the precisions
            var sparsePrecDist = SparseGammaList.Constant(listSize, Gamma.FromShapeAndRate(1.1, 1.2), tolerance);

            sparsePrecDist[3] = Gamma.FromShapeAndRate(2.3, 2.4);
            sparsePrecDist[6] = Gamma.FromShapeAndRate(3.4, 4.5);
            var precDist        = sparsePrecDist.ToArray();
            var sparsePrecPoint = SparseList <double> .Constant(listSize, 0.1);

            sparsePrecPoint[3] = 5.6;
            sparsePrecPoint[6] = 0.5;
            var precPoint = sparsePrecPoint.ToArray();

            var sparseSampleDist = SparseGaussianList.Constant(listSize, Gaussian.FromMeanAndPrecision(0.5, 1.5), tolerance);

            sparseSampleDist[3] = Gaussian.FromMeanAndPrecision(-0.5, 2.0);
            sparseSampleDist[9] = Gaussian.FromMeanAndPrecision(1.6, 0.4);
            var sampleDist        = sparseSampleDist.ToArray();
            var sparseSamplePoint = SparseList <double> .Constant(listSize, 0.5);

            sparseSamplePoint[3] = 0.1;
            sparseSamplePoint[9] = 2.3;
            var samplePoint = sparseSamplePoint.ToArray();

            var toSparseSampleDist = SparseGaussianList.Constant(listSize, Gaussian.FromMeanAndPrecision(-0.2, 0.3), tolerance);

            toSparseSampleDist[3] = Gaussian.FromMeanAndPrecision(2.1, 3.2);
            toSparseSampleDist[4] = Gaussian.FromMeanAndPrecision(1.3, 0.7);
            var toSampleDist = toSparseSampleDist.ToArray();

            var toSparsePrecDist = SparseGammaList.Constant(listSize, Gamma.FromShapeAndRate(2.3, 3.4), tolerance);

            toSparsePrecDist[3] = Gamma.FromShapeAndRate(3.4, 4.5);
            toSparsePrecDist[4] = Gamma.FromShapeAndRate(5.6, 6.7);
            var toPrecDist = toSparsePrecDist.ToArray();

            // ---------------------------
            // Check average log factor
            // ---------------------------
            calcErrMsg = "Average log factor" + calcSuffix;
            // Dist, dist, dist
            var sparseAvgLog = SparseGaussianListOp.AverageLogFactor(sparseSampleDist, sparseMeanDist, sparsePrecDist);
            var avgLog       = Util.ArrayInit(listSize, i => GaussianOp.AverageLogFactor(sampleDist[i], meanDist[i], precDist[i])).Sum();

            TAssert.True(System.Math.Abs(avgLog - sparseAvgLog) < tolerance, calcErrMsg);
            // Dist, dist, point
            sparseAvgLog = SparseGaussianListOp.AverageLogFactor(sparseSampleDist, sparseMeanDist, sparsePrecPoint);
            avgLog       = Util.ArrayInit(listSize, i => GaussianOp.AverageLogFactor(sampleDist[i], meanDist[i], precPoint[i])).Sum();
            TAssert.True(System.Math.Abs(avgLog - sparseAvgLog) < tolerance, calcErrMsg);
            // Dist, point, dist
            sparseAvgLog = SparseGaussianListOp.AverageLogFactor(sparseSampleDist, sparseMeanPoint, sparsePrecDist);
            avgLog       = Util.ArrayInit(listSize, i => GaussianOp.AverageLogFactor(sampleDist[i], meanPoint[i], precDist[i])).Sum();
            TAssert.True(System.Math.Abs(avgLog - sparseAvgLog) < tolerance, calcErrMsg);
            // Dist, point, point
            sparseAvgLog = SparseGaussianListOp.AverageLogFactor(sparseSampleDist, sparseMeanPoint, sparsePrecPoint);
            avgLog       = Util.ArrayInit(listSize, i => GaussianOp.AverageLogFactor(sampleDist[i], meanPoint[i], precPoint[i])).Sum();
            TAssert.True(System.Math.Abs(avgLog - sparseAvgLog) < tolerance, calcErrMsg);
            // Point, dist, dist
            sparseAvgLog = SparseGaussianListOp.AverageLogFactor(sparseSamplePoint, sparseMeanDist, sparsePrecDist);
            avgLog       = Util.ArrayInit(listSize, i => GaussianOp.AverageLogFactor(samplePoint[i], meanDist[i], precDist[i])).Sum();
            TAssert.True(System.Math.Abs(avgLog - sparseAvgLog) < tolerance, calcErrMsg);
            // Point, dist, point
            sparseAvgLog = SparseGaussianListOp.AverageLogFactor(sparseSamplePoint, sparseMeanDist, sparsePrecPoint);
            avgLog       = Util.ArrayInit(listSize, i => GaussianOp.AverageLogFactor(samplePoint[i], meanDist[i], precPoint[i])).Sum();
            TAssert.True(System.Math.Abs(avgLog - sparseAvgLog) < tolerance, calcErrMsg);
            // Point, point, dist
            sparseAvgLog = SparseGaussianListOp.AverageLogFactor(sparseSamplePoint, sparseMeanPoint, sparsePrecDist);
            avgLog       = Util.ArrayInit(listSize, i => GaussianOp.AverageLogFactor(samplePoint[i], meanPoint[i], precDist[i])).Sum();
            TAssert.True(System.Math.Abs(avgLog - sparseAvgLog) < tolerance, calcErrMsg);
            // Point, point, point
            sparseAvgLog = SparseGaussianListOp.AverageLogFactor(sparseSamplePoint, sparseMeanPoint, sparsePrecPoint);
            avgLog       = Util.ArrayInit(listSize, i => GaussianOp.AverageLogFactor(samplePoint[i], meanPoint[i], precPoint[i])).Sum();
            TAssert.True(System.Math.Abs(avgLog - sparseAvgLog) < tolerance, calcErrMsg);

            // ---------------------------
            // Check log average factor
            // ---------------------------
            calcErrMsg = "Log average factor" + calcSuffix;
            var sparseLogAvg = SparseGaussianListOp.LogAverageFactor(sparseSampleDist, sparseMeanDist, sparsePrecDist, toSparsePrecDist);
            var logAvg       = Util.ArrayInit(listSize, i => GaussianOp.LogAverageFactor(sampleDist[i], meanDist[i], precDist[i], toPrecDist[i])).Sum();

            TAssert.True(System.Math.Abs(logAvg - sparseLogAvg) < tolerance, calcErrMsg);
            // Dist, dist, point
            sparseLogAvg = SparseGaussianListOp.LogAverageFactor(sparseSampleDist, sparseMeanDist, sparsePrecPoint);
            logAvg       = Util.ArrayInit(listSize, i => GaussianOp.LogAverageFactor(sampleDist[i], meanDist[i], precPoint[i])).Sum();
            TAssert.True(System.Math.Abs(logAvg - sparseLogAvg) < tolerance, calcErrMsg);
            // Dist, point, dist
            sparseLogAvg = SparseGaussianListOp.LogAverageFactor(sparseSampleDist, sparseMeanPoint, sparsePrecDist, toSparsePrecDist);
            logAvg       = Util.ArrayInit(listSize, i => GaussianOp.LogAverageFactor(sampleDist[i], meanPoint[i], precDist[i], toPrecDist[i])).Sum();
            TAssert.True(System.Math.Abs(logAvg - sparseLogAvg) < tolerance, calcErrMsg);
            // Dist, point, point
            sparseLogAvg = SparseGaussianListOp.LogAverageFactor(sparseSampleDist, sparseMeanPoint, sparsePrecPoint);
            logAvg       = Util.ArrayInit(listSize, i => GaussianOp.LogAverageFactor(sampleDist[i], meanPoint[i], precPoint[i])).Sum();
            TAssert.True(System.Math.Abs(logAvg - sparseLogAvg) < tolerance, calcErrMsg);
            // Point, dist, dist
            sparseLogAvg = SparseGaussianListOp.LogAverageFactor(sparseSamplePoint, sparseMeanDist, sparsePrecDist, toSparsePrecDist);
            logAvg       = Util.ArrayInit(listSize, i => GaussianOp.LogAverageFactor(samplePoint[i], meanDist[i], precDist[i], toPrecDist[i])).Sum();
            TAssert.True(System.Math.Abs(logAvg - sparseLogAvg) < tolerance, calcErrMsg);
            // Point, dist, point
            sparseLogAvg = SparseGaussianListOp.LogAverageFactor(sparseSamplePoint, sparseMeanDist, sparsePrecPoint);
            logAvg       = Util.ArrayInit(listSize, i => GaussianOp.LogAverageFactor(samplePoint[i], meanDist[i], precPoint[i])).Sum();
            TAssert.True(System.Math.Abs(logAvg - sparseLogAvg) < tolerance, calcErrMsg);
            // Point, point, dist
            sparseLogAvg = SparseGaussianListOp.LogAverageFactor(sparseSamplePoint, sparseMeanPoint, sparsePrecDist);
            logAvg       = Util.ArrayInit(listSize, i => GaussianOp.LogAverageFactor(samplePoint[i], meanPoint[i], precDist[i])).Sum();
            TAssert.True(System.Math.Abs(logAvg - sparseLogAvg) < tolerance, calcErrMsg);
            // Point, point, point
            sparseLogAvg = SparseGaussianListOp.LogAverageFactor(sparseSamplePoint, sparseMeanPoint, sparsePrecPoint);
            logAvg       = Util.ArrayInit(listSize, i => GaussianOp.LogAverageFactor(samplePoint[i], meanPoint[i], precPoint[i])).Sum();
            TAssert.True(System.Math.Abs(logAvg - sparseLogAvg) < tolerance, calcErrMsg);

            // ---------------------------
            // Check log evidence ratio
            // ---------------------------
            calcErrMsg = "Log evidence ratio" + calcSuffix;
            var sparseEvidRat = SparseGaussianListOp.LogEvidenceRatio(sparseSampleDist, sparseMeanDist, sparsePrecDist, toSparseSampleDist, toSparsePrecDist);
            var evidRat       = Util.ArrayInit(listSize, i => GaussianOp.LogEvidenceRatio(sampleDist[i], meanDist[i], precDist[i], toSampleDist[i], toPrecDist[i])).Sum();

            TAssert.True(System.Math.Abs(evidRat - sparseEvidRat) < tolerance, calcErrMsg);
            // Dist, dist, point
            sparseEvidRat = SparseGaussianListOp.LogEvidenceRatio(sparseSampleDist, sparseMeanDist, sparsePrecPoint);
            evidRat       = Util.ArrayInit(listSize, i => GaussianOp.LogEvidenceRatio(sampleDist[i], meanDist[i], precPoint[i])).Sum();
            TAssert.True(System.Math.Abs(evidRat - sparseEvidRat) < tolerance, calcErrMsg);
            // Dist, point, dist
            sparseEvidRat = SparseGaussianListOp.LogEvidenceRatio(sparseSampleDist, sparseMeanPoint, sparsePrecDist, toSparseSampleDist, toSparsePrecDist);
            evidRat       = Util.ArrayInit(listSize, i => GaussianOp.LogEvidenceRatio(sampleDist[i], meanPoint[i], precDist[i], toSampleDist[i], toPrecDist[i])).Sum();
            TAssert.True(System.Math.Abs(evidRat - sparseEvidRat) < tolerance, calcErrMsg);
            // Dist, point, point
            sparseEvidRat = SparseGaussianListOp.LogEvidenceRatio(sparseSampleDist, sparseMeanPoint, sparsePrecPoint);
            evidRat       = Util.ArrayInit(listSize, i => GaussianOp.LogEvidenceRatio(sampleDist[i], meanPoint[i], precPoint[i])).Sum();
            TAssert.True(System.Math.Abs(evidRat - sparseEvidRat) < tolerance, calcErrMsg);
            // Point, dist, dist
            sparseEvidRat = SparseGaussianListOp.LogEvidenceRatio(sparseSamplePoint, sparseMeanDist, sparsePrecDist, toSparsePrecDist);
            evidRat       = Util.ArrayInit(listSize, i => GaussianOp.LogEvidenceRatio(samplePoint[i], meanDist[i], precDist[i], toPrecDist[i])).Sum();
            TAssert.True(System.Math.Abs(evidRat - sparseEvidRat) < tolerance, calcErrMsg);
            // Point, dist, point
            sparseEvidRat = SparseGaussianListOp.LogEvidenceRatio(sparseSamplePoint, sparseMeanDist, sparsePrecPoint);
            evidRat       = Util.ArrayInit(listSize, i => GaussianOp.LogEvidenceRatio(samplePoint[i], meanDist[i], precPoint[i])).Sum();
            TAssert.True(System.Math.Abs(evidRat - sparseEvidRat) < tolerance, calcErrMsg);
            // Point, point, dist
            sparseEvidRat = SparseGaussianListOp.LogEvidenceRatio(sparseSamplePoint, sparseMeanPoint, sparsePrecDist);
            evidRat       = Util.ArrayInit(listSize, i => GaussianOp.LogEvidenceRatio(samplePoint[i], meanPoint[i], precDist[i])).Sum();
            TAssert.True(System.Math.Abs(evidRat - sparseEvidRat) < tolerance, calcErrMsg);
            // Point, point, point
            sparseEvidRat = SparseGaussianListOp.LogEvidenceRatio(sparseSamplePoint, sparseMeanPoint, sparsePrecPoint);
            evidRat       = Util.ArrayInit(listSize, i => GaussianOp.LogEvidenceRatio(samplePoint[i], meanPoint[i], precPoint[i])).Sum();
            TAssert.True(System.Math.Abs(evidRat - sparseEvidRat) < tolerance, calcErrMsg);

            // ---------------------------
            // Check SampleAverageConditional
            // ---------------------------
            calcErrMsg     = "SampleAverageConditional" + calcSuffix;
            sparsityErrMsg = "SampleAverageConditional" + sparsitySuffix;
            // Use different common value to ensure this gets properly set
            var sparseSampleAvgConditional = SparseGaussianList.Constant(listSize, Gaussian.FromMeanAndPrecision(0.5, 0.6), tolerance);

            sparseSampleAvgConditional = SparseGaussianListOp.SampleAverageConditional(sparseSampleDist, sparseMeanDist, sparsePrecDist, toSparsePrecDist, sparseSampleAvgConditional);
            var sampleAvgConditional = Util.ArrayInit(listSize, i => GaussianOp.SampleAverageConditional(sampleDist[i], meanDist[i], precDist[i], toPrecDist[i]));

            TAssert.True(3 == sparseSampleAvgConditional.SparseCount, sparsityErrMsg);
            TAssert.True(sparseSampleAvgConditional.MaxDiff(sampleAvgConditional) < tolerance, calcErrMsg);

            sparseSampleAvgConditional = SparseGaussianList.Constant(listSize, Gaussian.FromMeanAndPrecision(0.5, 0.6), tolerance);
            sparseSampleAvgConditional = SparseGaussianListOp.SampleAverageConditional(sparseSampleDist, sparseMeanPoint, sparsePrecDist, toSparsePrecDist, sparseSampleAvgConditional);
            sampleAvgConditional       = Util.ArrayInit(listSize, i => GaussianOp.SampleAverageConditional(sampleDist[i], meanPoint[i], precDist[i], toPrecDist[i]));
            TAssert.True(3 == sparseSampleAvgConditional.SparseCount, sparsityErrMsg);
            TAssert.True(sparseSampleAvgConditional.MaxDiff(sampleAvgConditional) < tolerance, calcErrMsg);

            sparseSampleAvgConditional = SparseGaussianList.Constant(listSize, Gaussian.FromMeanAndPrecision(0.5, 0.6), tolerance);
            sparseSampleAvgConditional = SparseGaussianListOp.SampleAverageConditional(sparseMeanDist, sparsePrecPoint, sparseSampleAvgConditional);
            sampleAvgConditional       = Util.ArrayInit(listSize, i => GaussianOp.SampleAverageConditional(meanDist[i], precPoint[i]));
            TAssert.True(2 == sparseSampleAvgConditional.SparseCount, sparsityErrMsg);
            TAssert.True(sparseSampleAvgConditional.MaxDiff(sampleAvgConditional) < tolerance, calcErrMsg);

            sparseSampleAvgConditional = SparseGaussianList.Constant(listSize, Gaussian.FromMeanAndPrecision(0.5, 0.6), tolerance);
            sparseSampleAvgConditional = SparseGaussianListOp.SampleAverageConditional(sparseMeanPoint, sparsePrecPoint, sparseSampleAvgConditional);
            sampleAvgConditional       = Util.ArrayInit(listSize, i => GaussianOp.SampleAverageConditional(meanPoint[i], precPoint[i]));
            TAssert.True(2 == sparseSampleAvgConditional.SparseCount, sparsityErrMsg);
            TAssert.True(sparseSampleAvgConditional.MaxDiff(sampleAvgConditional) < tolerance, calcErrMsg);

            // ---------------------------
            // Check MeanAverageConditional
            // ---------------------------
            calcErrMsg     = "MeanAverageConditional" + calcSuffix;
            sparsityErrMsg = "MeanAverageConditional" + sparsitySuffix;
            // Use different common value to ensure this gets properly set
            var sparseMeanAvgConditional = SparseGaussianList.Constant(listSize, Gaussian.FromMeanAndPrecision(0.5, 0.6), tolerance);

            sparseMeanAvgConditional = SparseGaussianListOp.MeanAverageConditional(sparseSampleDist, sparseMeanDist, sparsePrecDist, toSparsePrecDist, sparseMeanAvgConditional);
            var meanAvgConditional = Util.ArrayInit(listSize, i => GaussianOp.MeanAverageConditional(sampleDist[i], meanDist[i], precDist[i], toPrecDist[i]));

            TAssert.True(3 == sparseMeanAvgConditional.SparseCount, sparsityErrMsg);
            TAssert.True(sparseMeanAvgConditional.MaxDiff(meanAvgConditional) < tolerance, calcErrMsg);

            sparseMeanAvgConditional = SparseGaussianList.Constant(listSize, Gaussian.FromMeanAndPrecision(0.5, 0.6), tolerance);
            sparseMeanAvgConditional = SparseGaussianListOp.MeanAverageConditional(sparseSamplePoint, sparseMeanDist, sparsePrecDist, toSparsePrecDist, sparseMeanAvgConditional);
            meanAvgConditional       = Util.ArrayInit(listSize, i => GaussianOp.MeanAverageConditional(samplePoint[i], meanDist[i], precDist[i], toPrecDist[i]));
            TAssert.True(3 == sparseMeanAvgConditional.SparseCount, sparsityErrMsg);
            TAssert.True(sparseMeanAvgConditional.MaxDiff(meanAvgConditional) < tolerance, calcErrMsg);

            sparseMeanAvgConditional = SparseGaussianList.Constant(listSize, Gaussian.FromMeanAndPrecision(0.5, 0.6), tolerance);
            sparseMeanAvgConditional = SparseGaussianListOp.MeanAverageConditional(sparseSampleDist, sparsePrecPoint, sparseMeanAvgConditional);
            meanAvgConditional       = Util.ArrayInit(listSize, i => GaussianOp.MeanAverageConditional(sampleDist[i], precPoint[i]));
            TAssert.True(3 == sparseMeanAvgConditional.SparseCount, sparsityErrMsg);
            TAssert.True(sparseMeanAvgConditional.MaxDiff(meanAvgConditional) < tolerance, calcErrMsg);

            sparseMeanAvgConditional = SparseGaussianList.Constant(listSize, Gaussian.FromMeanAndPrecision(0.5, 0.6), tolerance);
            sparseMeanAvgConditional = SparseGaussianListOp.MeanAverageConditional(sparseSamplePoint, sparsePrecPoint, sparseMeanAvgConditional);
            meanAvgConditional       = Util.ArrayInit(listSize, i => GaussianOp.MeanAverageConditional(samplePoint[i], precPoint[i]));
            TAssert.True(3 == sparseMeanAvgConditional.SparseCount, sparsityErrMsg);
            TAssert.True(sparseMeanAvgConditional.MaxDiff(meanAvgConditional) < tolerance, calcErrMsg);

            // ---------------------------
            // Check PrecisionAverageConditional
            // ---------------------------
            calcErrMsg     = "PrecisionAverageConditional" + calcSuffix;
            sparsityErrMsg = "PrecisionAverageConditional" + sparsitySuffix;
            // Use different common value to ensure this gets properly set
            var sparsePrecAvgConditional = SparseGammaList.Constant(listSize, Gamma.FromShapeAndRate(2.1, 3.2), tolerance);

            sparsePrecAvgConditional = SparseGaussianListOp.PrecisionAverageConditional(sparseSampleDist, sparseMeanDist, sparsePrecDist, sparsePrecAvgConditional);
            var precAvgConditional = Util.ArrayInit(listSize, i => GaussianOp.PrecisionAverageConditional(sampleDist[i], meanDist[i], precDist[i]));

            TAssert.True(3 == sparsePrecAvgConditional.SparseCount, sparsityErrMsg);
            TAssert.True(sparsePrecAvgConditional.MaxDiff(precAvgConditional) < tolerance, calcErrMsg);

            sparsePrecAvgConditional = SparseGammaList.Constant(listSize, Gamma.FromShapeAndRate(2.1, 3.2), tolerance);
            sparsePrecAvgConditional = SparseGaussianListOp.PrecisionAverageConditional(sparseSamplePoint, sparseMeanDist, sparsePrecDist, sparsePrecAvgConditional);
            precAvgConditional       = Util.ArrayInit(listSize, i => GaussianOp.PrecisionAverageConditional(Gaussian.PointMass(samplePoint[i]), meanDist[i], precDist[i]));
            TAssert.True(3 == sparsePrecAvgConditional.SparseCount, sparsityErrMsg);
            TAssert.True(sparsePrecAvgConditional.MaxDiff(precAvgConditional) < tolerance, calcErrMsg);

            sparsePrecAvgConditional = SparseGammaList.Constant(listSize, Gamma.FromShapeAndRate(2.1, 3.2), tolerance);
            sparsePrecAvgConditional = SparseGaussianListOp.PrecisionAverageConditional(sparseSampleDist, sparseMeanPoint, sparsePrecDist, sparsePrecAvgConditional);
            precAvgConditional       = Util.ArrayInit(listSize, i => GaussianOp.PrecisionAverageConditional(sampleDist[i], Gaussian.PointMass(meanPoint[i]), precDist[i]));
            TAssert.True(3 == sparsePrecAvgConditional.SparseCount, sparsityErrMsg);
            TAssert.True(sparsePrecAvgConditional.MaxDiff(precAvgConditional) < tolerance, calcErrMsg);

            sparsePrecAvgConditional = SparseGammaList.Constant(listSize, Gamma.FromShapeAndRate(2.1, 3.2), tolerance);
            sparsePrecAvgConditional = SparseGaussianListOp.PrecisionAverageConditional(sparseSamplePoint, sparseMeanPoint, sparsePrecAvgConditional);
            precAvgConditional       = Util.ArrayInit(listSize, i => GaussianOp.PrecisionAverageConditional(samplePoint[i], meanPoint[i]));
            TAssert.True(3 == sparsePrecAvgConditional.SparseCount, sparsityErrMsg);
            TAssert.True(sparsePrecAvgConditional.MaxDiff(precAvgConditional) < tolerance, calcErrMsg);
        }
        public void GaussianOpLogAverageFactor()
        {
            Gaussian uniform    = new Gaussian();
            Gaussian X0         = Gaussian.FromMeanAndVariance(3, 0.5);
            Gaussian Mean0      = Gaussian.FromMeanAndVariance(7, 1.0 / 3);
            Gamma    Precision0 = Gamma.FromShapeAndScale(3, 3);

            // Fixed precision
            Gamma    Precision = Gamma.PointMass(3);
            Gaussian X         = X0;
            Gaussian Mean      = uniform;

            Assert.True(MMath.AbsDiff(GaussianOp.LogAverageFactor_slow(X, Mean, Precision), 0, 1e-4) < 1e-4);
            Assert.True(MMath.AbsDiff(GaussianOp_Slow.LogAverageFactor(X, Mean, Precision), 0, 1e-4) < 1e-4);
            Assert.True(MMath.AbsDiff(GaussianOp.LogAverageFactor(X, Mean, Precision.Point), 0, 1e-4) < 1e-4);
            Mean = Mean0;
            // in matlab: normpdfln(3,7,[],0.5+1/3+1/3)
            Assert.True(MMath.AbsDiff(GaussianOp.LogAverageFactor_slow(X, Mean, Precision), -7.8532, 1e-4) < 1e-4);
            Assert.True(MMath.AbsDiff(GaussianOp_Slow.LogAverageFactor(X, Mean, Precision), -7.8532, 1e-4) < 1e-4);
            Assert.True(MMath.AbsDiff(GaussianOp.LogAverageFactor(X, Mean, Precision.Point), -7.8532, 1e-4) < 1e-4);
            Mean = Gaussian.PointMass(Mean0.GetMean());
            // in matlab: normpdfln(3,7,[],0.5+1/3)
            Assert.True(MMath.AbsDiff(GaussianOp.LogAverageFactor_slow(X, Mean, Precision), -10.42777775, 1e-4) < 1e-4);
            Assert.True(MMath.AbsDiff(GaussianOp_Slow.LogAverageFactor(X, Mean, Precision), -10.42777775, 1e-4) < 1e-4);
            Assert.True(MMath.AbsDiff(GaussianOp.LogAverageFactor(X, Mean, Precision.Point), -10.42777775, 1e-4) < 1e-4);
            X = uniform;
            Assert.True(MMath.AbsDiff(GaussianOp.LogAverageFactor_slow(X, Mean, Precision), 0, 1e-4) < 1e-4);
            Assert.True(MMath.AbsDiff(GaussianOp_Slow.LogAverageFactor(X, Mean, Precision), 0, 1e-4) < 1e-4);
            Assert.True(MMath.AbsDiff(GaussianOp.LogAverageFactor(X, Mean, Precision.Point), 0, 1e-4) < 1e-4);

            // Unknown precision
            Precision = Precision0;
            X         = X0;
            Mean      = Mean0;
            // converge the precision message.  (only matters if KeepLastMessage is set).
            //for (int i = 0; i < 10; i++) PrecisionAverageConditional(precisionMessage);
            // in matlab: log(t_normal_exact(mx-my,vx+vy,a+1,b))
            //            log(t_normal_exact(3-7,0.5+1/3,3,1/3))
            Assert.True(MMath.AbsDiff(GaussianOp.LogAverageFactor_slow(X, Mean, Precision), -8.4363, 1e-4) < 1e-4);
            Assert.True(MMath.AbsDiff(GaussianOp_Slow.LogAverageFactor(X, Mean, Precision), -8.4363, 1e-4) < 1e-4);
            Mean = Gaussian.PointMass(Mean0.GetMean());
            // converge the precision message.  (only matters if KeepLastMessage is set).
            //for (int i = 0; i < 10; i++) PrecisionAverageConditional(precisionMessage);
            // in matlab: log(t_normal_exact(3-7,0.5,3,1/3))
            Assert.True(MMath.AbsDiff(GaussianOp.LogAverageFactor_slow(X, Mean, Precision), -9.9890, 1e-4) < 1e-4);
            X    = Gaussian.PointMass(X0.GetMean());
            Mean = Mean0;
            // converge the precision message.  (only matters if KeepLastMessage is set).
            //for (int i = 0; i < 10; i++) PrecisionAverageConditional(precisionMessage);
            // in matlab: log(t_normal_exact(3-7,1/3,3,1/3))
            Assert.True(MMath.AbsDiff(GaussianOp.LogAverageFactor_slow(X, Mean, Precision), -10.478382, 1e-4) < 1e-4);
            X    = Gaussian.PointMass(X0.GetMean());
            Mean = Gaussian.PointMass(Mean0.GetMean());
            // in matlab: log(t_normal_exact(3-7,1e-4,3,1/3)) or tpdfln(3-7,0,2*1/3,2*3+1)
            Assert.True(MMath.AbsDiff(GaussianOp.LogAverageFactor_slow(X, Mean, Precision), -11.1278713, 1e-4) < 1e-4);
            X = uniform;
            Assert.True(MMath.AbsDiff(GaussianOp.LogAverageFactor_slow(X, Mean, Precision), 0, 1e-4) < 1e-4);

            // uniform precision
            // the answer should always be Double.PositiveInfinity
            Precision = Gamma.Uniform();
            X         = X0;
            Mean      = Mean0;
            Assert.True(MMath.AbsDiff(GaussianOp.LogAverageFactor_slow(X, Mean, Precision), Double.PositiveInfinity, 1e-4) < 1e-4);

            Assert.True(MMath.AbsDiff(GaussianOp_Slow.LogAverageFactor(new Gaussian(-0.641, 9.617e-22), Gaussian.PointMass(-1), new Gamma(1, 1)), -1.133394734344457, 1e-8) <
                        1e-4);
            GaussianOp_Slow.LogAverageFactor(new Gaussian(8.156, 9.653), Gaussian.PointMass(-1), new Gamma(1, 1));
        }
 /// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="SparseGaussianListOp"]/message_doc[@name="MeanAverageConditional(SparseGaussianList, ISparseList{double}, SparseGaussianList)"]/*'/>
 public static SparseGaussianList MeanAverageConditional([SkipIfUniform] SparseGaussianList sample, ISparseList <double> precision, SparseGaussianList result)
 {
     result.SetToFunction(sample, precision, (s, p) => GaussianOp.MeanAverageConditional(s, p));
     return(result);
 }
 /// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="SparseGaussianListOp"]/message_doc[@name="MeanAverageConditional(ISparseList{double}, SparseGaussianList, SparseGammaList, SparseGammaList, SparseGaussianList)"]/*'/>
 public static SparseGaussianList MeanAverageConditional(
     ISparseList <double> sample, [SkipIfUniform] SparseGaussianList mean, [SkipIfUniform] SparseGammaList precision, SparseGammaList to_precision, SparseGaussianList result)
 {
     result.SetToFunction(sample, mean, precision, to_precision, (s, m, p, tp) => GaussianOp.MeanAverageConditional(s, m, p, tp));
     return(result);
 }
 /// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="SparseGaussianListOp"]/message_doc[@name="PrecisionAverageConditional(ISparseList{double}, ISparseList{double}, SparseGammaList)"]/*'/>
 public static SparseGammaList PrecisionAverageConditional(ISparseList <double> sample, ISparseList <double> mean, SparseGammaList result)
 {
     result.SetToFunction(sample, mean, (s, m) => GaussianOp.PrecisionAverageConditional(s, m));
     return(result);
 }
 /// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="SparseGaussianListOp"]/message_doc[@name="SampleAverageConditional(SparseGaussianList, ISparseList{double}, SparseGaussianList)"]/*'/>
 public static SparseGaussianList SampleAverageConditional([SkipIfUniform] SparseGaussianList mean, ISparseList <double> precision, SparseGaussianList result)
 {
     result.SetToFunction(mean, precision, (m, p) => GaussianOp.SampleAverageConditional(m, p));
     return(result);
 }
 /// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="SparseGaussianListOp"]/message_doc[@name="PrecisionAverageLogarithm(ISparseList{double}, SparseGaussianList, SparseGammaList)"]/*'/>
 public static SparseGammaList PrecisionAverageLogarithm(ISparseList <double> sample, [Proper] SparseGaussianList mean, SparseGammaList result)
 {
     result.SetToFunction(sample, mean, (s, m) => GaussianOp.PrecisionAverageLogarithm(s, m));
     return(result);
 }
 /// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="SparseGaussianListOp"]/message_doc[@name="SampleAverageLogarithm(ISparseList{double}, SparseGammaList, SparseGaussianList)"]/*'/>
 public static SparseGaussianList SampleAverageLogarithm(ISparseList <double> mean, [Proper] SparseGammaList precision, SparseGaussianList result)
 {
     result.SetToFunction(mean, precision, (m, p) => GaussianOp.SampleAverageLogarithm(m, p));
     return(result);
 }
 /// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="SparseGaussianListOp"]/message_doc[@name="MeanAverageLogarithm(SparseGaussianList, SparseGammaList, SparseGaussianList)"]/*'/>
 public static SparseGaussianList MeanAverageLogarithm([Proper] SparseGaussianList sample, [Proper] SparseGammaList precision, SparseGaussianList result)
 {
     result.SetToFunction(sample, precision, (s, p) => GaussianOp.MeanAverageLogarithm(s, p));
     return(result);
 }
 /// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GaussianFromMeanAndVarianceOp"]/message_doc[@name="SampleAverageConditional(Gaussian, double)"]/*'/>
 public static Gaussian SampleAverageConditional([SkipIfUniform] Gaussian mean, double variance)
 {
     return(GaussianOp.SampleAverageConditional(mean, 1 / variance));
 }
 /// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="SparseGaussianListOp"]/message_doc[@name="PrecisionAverageConditional(ISparseList{double}, SparseGaussianList, SparseGammaList, SparseGammaList)"]/*'/>
 public static SparseGammaList PrecisionAverageConditional(
     ISparseList <double> sample, [SkipIfUniform] SparseGaussianList mean, [SkipIfUniform] SparseGammaList precision, SparseGammaList result)
 {
     result.SetToFunction(sample, mean, precision, (s, m, p) => GaussianOp.PrecisionAverageConditional_slow(Gaussian.PointMass(s), m, p));
     return(result);
 }
 /// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="GaussianFromMeanAndVarianceOp"]/message_doc[@name="AverageLogFactor(Gaussian, double, double)"]/*'/>
 public static double AverageLogFactor([Proper] Gaussian sample, double mean, double variance)
 {
     return(GaussianOp.AverageLogFactor(sample, mean, 1.0 / variance));
 }
示例#18
0
        /// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="BernoulliFromLogOddsOp"]/message_doc[@name="LogOddsAverageConditional(bool, Gaussian)"]/*'/>
        public static Gaussian LogOddsAverageConditional(bool sample, [SkipIfUniform] Gaussian logOdds)
        {
            double m, v;

            logOdds.GetMeanAndVariance(out m, out v);
            double s = sample ? 1 : -1;

            m *= s;
            // catch cases when sigma0 would evaluate to 0
            if (m + 1.5 * v < -38)  // this check catches sigma0=0 when v <= 200
            {
                double beta2 = Math.Exp(m + 1.5 * v);
                return(Gaussian.FromMeanAndVariance(s * (m + v), v * (1 - v * beta2)) / logOdds);
            }
            else if (m + v < 0)
            {
                // Factor out exp(m+v/2) in the following formulas:
                // sigma(m,v) = exp(m+v/2)(1-sigma(m+v,v))
                // sigma'(m,v) = d/dm sigma(m,v) = exp(m+v/2)(1-sigma(m+v,v) - sigma'(m+v,v))
                // sigma''(m,v) = d/dm sigma'(m,v) = exp(m+v/2)(1-sigma(m+v,v) - 2 sigma'(m+v,v) - sigma''(m+v,v))
                // This approach is always safe if sigma(-m-v,v)>0, which is guaranteed by m+v<0
                double sigma0 = MMath.LogisticGaussian(-m - v, v);
                double sd     = MMath.LogisticGaussianDerivative(m + v, v);
                double sigma1 = sigma0 - sd;
                double sigma2 = sigma1 - sd - MMath.LogisticGaussianDerivative2(m + v, v);
                double alpha  = sigma1 / sigma0; // 1 - sd/sigma0
                if (Double.IsNaN(alpha))
                {
                    throw new Exception("alpha is NaN");
                }
                double beta = alpha * alpha - sigma2 / sigma0;
                if (Double.IsNaN(beta))
                {
                    throw new Exception("beta is NaN");
                }
                return(GaussianOp.GaussianFromAlphaBeta(logOdds, s * alpha, beta, ForceProper));
            }
            else if (v > 1488 && m < 0)
            {
                double sigma0 = MMath.LogisticGaussianRatio(m, v, 0);
                double sigma1 = MMath.LogisticGaussianRatio(m, v, 1);
                double sigma2 = MMath.LogisticGaussianRatio(m, v, 2);
                double alpha, beta;
                alpha = sigma1 / sigma0;
                if (Double.IsNaN(alpha))
                {
                    throw new Exception("alpha is NaN");
                }
                beta = alpha * alpha - sigma2 / sigma0;
                if (Double.IsNaN(beta))
                {
                    throw new Exception("beta is NaN");
                }
                return(GaussianOp.GaussianFromAlphaBeta(logOdds, s * alpha, beta, ForceProper));
            }
            else
            {
                // the following code only works when sigma0 > 0
                // sigm0=0 can only happen here if v > 1488
                double sigma0 = MMath.LogisticGaussian(m, v);
                double sigma1 = MMath.LogisticGaussianDerivative(m, v);
                double sigma2 = MMath.LogisticGaussianDerivative2(m, v);
                double alpha, beta;
                alpha = sigma1 / sigma0;
                if (Double.IsNaN(alpha))
                {
                    throw new Exception("alpha is NaN");
                }
                if (m + 2 * v < -19)
                {
                    beta = Math.Exp(3 * m + 2.5 * v) / (sigma0 * sigma0);
                }
                else
                {
                    //beta = (sigma1*sigma1 - sigma2*sigma0)/(sigma0*sigma0);
                    beta = alpha * alpha - sigma2 / sigma0;
                }
                if (Double.IsNaN(beta))
                {
                    throw new Exception("beta is NaN");
                }
                return(GaussianOp.GaussianFromAlphaBeta(logOdds, s * alpha, beta, ForceProper));
            }
        }
示例#19
0
        /// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="MaxGaussianOp"]/message_doc[@name="AAverageConditional(Gaussian, Gaussian, Gaussian)"]/*'/>
        public static Gaussian AAverageConditional([SkipIfUniform] Gaussian max, [Proper] Gaussian a, [Proper] Gaussian b)
        {
            if (max.IsUniform())
            {
                return(Gaussian.Uniform());
            }
            if (!b.IsProper())
            {
                throw new ImproperMessageException(b);
            }

            double logw1, alpha1, vx1, mx1;
            double logw2, alpha2, vx2, mx2;
            double logz;

            ComputeStats(max, a, b, out logz, out logw1, out alpha1, out vx1, out mx1,
                         out logw2, out alpha2, out vx2, out mx2);
            double w1, w2;

            if (logz < double.MinValue)
            {
                w1 = (alpha1 > 0) ? 1.0 : 0.0;
                w2 = (alpha2 > 0) ? 1.0 : 0.0;
            }
            else
            {
                w1 = Math.Exp(logw1 - logz);
                w2 = Math.Exp(logw2 - logz);
            }
            bool checkDerivatives = false;

            if (a.IsPointMass)
            {
                // f(a) = int p(x = max(a,b)) p(b) db
                // f(a) = p(x = a) p(a >= b) + p(x = b) p(a < b | x = b)
                // f(a) = N(a; mx, vx) phi((a - m2)/sqrt(v2)) + N(m2; mx, vx + v2) phi((mx2 - a)/sqrt(vx2))
                // f'(a) = (mx-a)/vx N(a; mx, vx) phi((a - m2)/sqrt(v2)) + N(a; mx, vx) N(a; m2, v2) - N(m2; mx, vx + v2) N(a; mx2, vx2)
                //       = (mx-a)/vx N(a; mx, vx) phi((a - m2)/sqrt(v2))
                // f''(a) = -1/vx N(a; mx, vx) phi((a - m2)/sqrt(v2)) + (mx-a)^2/vx^2 N(a; mx, vx) phi((a - m2)/sqrt(v2)) + (mx-a)/vx N(a; mx, vx) N(a; m2, v2)
                // ddlogf = f''(a)/f(a) - (f'(a)/f(a))^2 = (f''(a) f(a) - f'(a)^2)/f(a)^2
                double aPoint = a.Point;
                if (max.IsPointMass)
                {
                    return(max);
                }
                double z     = max.MeanTimesPrecision - aPoint * max.Precision;
                double alpha = z * w1;
                double beta  = (z * alpha1 - max.Precision + z * z) * w1 - alpha * alpha;
                if (b.IsPointMass && b.Point != aPoint)
                {
                    beta -= max.Precision * w2 * alpha2 * (b.Point - aPoint);
                }
                if (checkDerivatives)
                {
                    double m1 = a.GetMean();
                    double delta = m1 * 1e-6;
                    double logzd, logw1d, alpha1d, vx1d, mx1d, logw2d, alpha2d, vx2d, mx2d;
                    ComputeStats(max, Gaussian.FromMeanAndPrecision(m1 + delta, a.Precision), b, out logzd, out logw1d, out alpha1d, out vx1d, out mx1d, out logw2d, out alpha2d, out vx2d, out mx2d);
                    double logzd2;
                    ComputeStats(max, Gaussian.FromMeanAndPrecision(m1 - delta, a.Precision), b, out logzd2, out logw1d, out alpha1d, out vx1d, out mx1d, out logw2d, out alpha2d, out vx2d, out mx2d);
                    double alphaCheck = (logzd - logzd2) / (2 * delta);
                    double alphaError = Math.Abs(alpha - alphaCheck);
                    double betaCheck  = (logzd + logzd2 - 2 * logz) / (delta * delta);
                    double betaError  = Math.Abs(beta - betaCheck);
                    Console.WriteLine($"alpha={alpha} check={alphaCheck} error={alphaError} beta={beta} check={betaCheck} error={betaError}");
                }
                return(Gaussian.FromDerivatives(aPoint, alpha, beta, ForceProper));
            }
            bool useMessage = (w1 > 0) || (logw2 > -100);

            if (useMessage)
            {
                // vx1 = 1/(1/vx + 1/v1) = vx*v1/(vx + v1)
                double z, alpha, beta;
                if (max.IsPointMass)
                {
                    if (w2 == 0)
                    {
                        return(max);
                    }
                    z     = max.Point * a.Precision - a.MeanTimesPrecision;
                    alpha = z * w1 - w2 * alpha2;
                    beta  = -z * alpha2 * w2 - alpha * alpha;
                    if (w1 != 0) // avoid 0 * inf
                    {
                        beta += (z * z - a.Precision) * w1;
                    }
                }
                else
                {
                    //z = vx1 * (max.MeanTimesPrecision * a.Precision - a.MeanTimesPrecision * max.Precision);
                    z     = (max.MeanTimesPrecision - a.GetMean() * max.Precision) / (max.Precision / a.Precision + 1);
                    alpha = z * w1 - vx1 * max.Precision * w2 * alpha2;
                    if (w2 == 0)
                    {
                        //beta = (z * alpha1 - max.Precision) / (max.Precision / a.Precision + 1);
                        //double resultPrecision = a.Precision * beta / (-a.Precision - beta);
                        //resultPrecision = beta / (-1 - beta/a.Precision);
                        //resultPrecision = 1 / (-1/beta - 1 / a.Precision);
                        //resultPrecision = a.Precision / (-a.Precision / beta - 1);
                        //resultPrecision = a.Precision / (-(a.Precision + max.Precision) / (z * alpha1 - max.Precision) - 1);
                        //resultPrecision = -a.Precision / ((a.Precision + z*alpha1) / (z * alpha1 - max.Precision));
                        double zalpha1         = z * alpha1;
                        double denom           = (1 + zalpha1 / a.Precision);
                        double resultPrecision = (max.Precision - zalpha1) / denom;
                        //double weight = (max.Precision - z * alpha1) / (a.Precision + z * alpha1);
                        //double weightPlus1 = (max.Precision + a.Precision) / (a.Precision + z * alpha1);
                        //double resultMeanTimesPrecision = weight * (a.MeanTimesPrecision + alpha) + alpha;
                        //resultMeanTimesPrecision = weight * a.MeanTimesPrecision + weightPlus1 * alpha;
                        //double resultMeanTimesPrecision = (a.Precision * max.MeanTimesPrecision - z * alpha1 * a.MeanTimesPrecision) / (a.Precision + z * alpha1);
                        double resultMeanTimesPrecision = (max.MeanTimesPrecision - zalpha1 * a.GetMean()) / denom;
                        return(Gaussian.FromNatural(resultMeanTimesPrecision, resultPrecision));
                    }
                    else
                    {
                        beta = ((z * alpha1 - max.Precision) * vx1 * a.Precision + z * z) * w1
                               - max.Precision * vx1 * w2 * alpha2 * (mx2 * a.Precision - a.MeanTimesPrecision) / (vx2 * a.Precision + 1) - alpha * alpha;
                    }
                }
                //Console.WriteLine($"z={z} w1={w1:r} w2={w2:r} logw2={logw2} alpha1={alpha1} alpha2={alpha2} alpha={alpha:r} beta={beta:r}");
                if (checkDerivatives)
                {
                    double m1 = a.GetMean();
                    double delta = m1 * 1e-6;
                    double logzd, logw1d, alpha1d, vx1d, mx1d, logw2d, alpha2d, vx2d, mx2d;
                    ComputeStats(max, Gaussian.FromMeanAndPrecision(m1 + delta, a.Precision), b, out logzd, out logw1d, out alpha1d, out vx1d, out mx1d, out logw2d, out alpha2d, out vx2d, out mx2d);
                    double logzd2;
                    ComputeStats(max, Gaussian.FromMeanAndPrecision(m1 - delta, a.Precision), b, out logzd2, out logw1d, out alpha1d, out vx1d, out mx1d, out logw2d, out alpha2d, out vx2d, out mx2d);
                    double alphaCheck = (logzd - logzd2) / (2 * delta);
                    double alphaError = Math.Abs(alpha - alphaCheck);
                    double betaCheck  = (logzd + logzd2 - 2 * logz) / (delta * delta);
                    double betaError  = Math.Abs(beta - betaCheck);
                    Console.WriteLine($"alpha={alpha} check={alphaCheck} error={alphaError} beta={beta} check={betaCheck} error={betaError}");
                }
                return(GaussianOp.GaussianFromAlphaBeta(a, alpha, -beta, ForceProper));
            }
            else // w1==0 && logw2 <= -100
            {
                double m1, v1, m2, v2;
                a.GetMeanAndVariance(out m1, out v1);
                b.GetMeanAndVariance(out m2, out v2);
                // the posterior is a mixture model with weights exp(logw1-logz), exp(logw2-logz) and distributions
                // N(a; mx1, vx1) phi((a - m2)/sqrt(v2)) / phi((mx1 - m2)/sqrt(vx1 + v2))
                // N(a; m1, v1) phi((mx2 - a)/sqrt(vx2)) / phi((mx2 - m1)/sqrt(vx2 + v1))
                // the moments of the posterior are computed via the moments of these two components.
                if (vx1 == 0)
                {
                    alpha1 = 0;
                }
                if (vx2 == 0)
                {
                    alpha2 = 0;
                }
                double mc1 = mx1;
                if (alpha1 != 0) // avoid 0*infinity
                {
                    mc1 += alpha1 * vx1;
                }
                alpha2 = -alpha2;
                double mc2 = m1;
                if (alpha2 != 0) // avoid 0*infinity
                {
                    mc2 += alpha2 * v1;
                }
                double       z2 = 0, Y = 0, dY = 0, d2YiY = 0;
                const double z2small = 0;
                if (vx2 == 0)
                {
                    double sqrtPrec = Math.Sqrt(a.Precision);
                    z2 = (mx2 - m1) * sqrtPrec;
                    // When max.IsPointMass, logw2 <= -100 implies NormalCdfLn(z2) <= -100 implies z2 < -13
                    if (z2 < z2small)
                    {
                        // dY = 1 + z2*Y
                        // d2Y = Y + z2*dY
                        //     = Y + z2*(1 + z2*Y)
                        //     = Y + z2^2*Y + z2
                        double d2Y = 2 * MMath.NormalCdfMomentRatio(2, z2);
                        // Y = (dY-1)/z2
                        // d2Y = (dY-1)/z2 + z2*dY
                        //     = dY*(z2 + 1/z2) - 1/z2
                        // dY = (d2Y+1/z2)/(z2 + 1/z2)
                        double invz2 = 1 / z2;
                        //dY = MMath.NormalCdfMomentRatio(1, z2);
                        dY = (d2Y + invz2) / (z2 + invz2);
                        //Y = MMath.NormalCdfRatio(z2);
                        // Y = (d2Y - z2)/(1 + z2^2)
                        Y     = (dY - 1) / z2;
                        d2YiY = d2Y / Y;
                        // logw2 = MMath.NormalCdfLn(z2);
                        // alpha2 = -Math.Exp(Gaussian.GetLogProb(mx2, m1, vx2 + v1) - logw2);
                        //        = -1/sqrt(vx2+v1)/NormalCdfRatio(z2)
                        // m1 = mx2 - sqrt(vx2 + v1)*z2
                        // z2 + 1/NormalCdfRatio(z2) = z2 + 1/Y = (z2*Y + 1)/Y = dY/Y
                        // mc2 = m1 + alpha2*v1
                        //     = sqrt(vx2+v1)*(m1/sqrt(vx2+v1) - v1/(vx2+v1)/NormalCdfRatio(z2))
                        //     = sqrt(vx2+v1)*(mx2/sqrt(vx2+v1) - z2 - v1/(vx2+v1)/NormalCdfRatio(z2))
                        //     = sqrt(vx2+v1)*(mx2/sqrt(vx2+v1) - dY/Y)  if vx2=0
                        //     = mx2 - sqrt(v1)*dY/Y
                        // sqrt(v1)*dY/Y = sqrt(v)*(d2Y/Y - 1)/z2   (see IsPositiveOp)
                        mc2 = mx2 - (d2YiY - 1) / (z2 * sqrtPrec);
                    }
                }
                double m = w1 * mc1 + w2 * mc2;
                double beta1;
                if (alpha1 == 0)
                {
                    beta1 = 0;  // avoid 0*infinity
                }
                else
                {
                    double r1 = (mx1 - m2) / (vx1 + v2);
                    beta1 = alpha1 * (alpha1 + r1);
                }
                double beta2;
                if (alpha2 == 0)
                {
                    beta2 = 0;  // avoid 0*infinity
                }
                else
                {
                    double r2 = (mx2 - m1) / (vx2 + v1);
                    beta2 = alpha2 * (alpha2 - r2);
                }
                double vc1 = vx1 * (1 - vx1 * beta1);
                double vc2;
                if (vx2 == 0 && z2 < z2small)
                {
                    // Since vx2 == 0,
                    // beta2 = alpha2 * (alpha2 - z2/sqrt(v1))
                    // vc2 = v1 - v1^2 * alpha2 * (alpha2 - z2/sqrt(v1))
                    //     = v1 - v1^2 * -1/sqrt(v1)/Y * (-1/sqrt(v1)/Y - z2/sqrt(v1))
                    //     = v1 - v1 * 1/Y * (1/Y + z2)
                    //     = v1 - v1*dY/Y^2
                    //     = v1 * (1 - dY/Y^2)
                    // Since vc1 == 0 and mx2 == 0,
                    // posterior E[x^2] = v - v*dY/Y^2 + v*dY^2/Y^2
                    //                  = v - v*dY/Y^2*(1 - dY)
                    //                  = v + v*z*dY/Y
                    //                  = v * (1 + z*dY/Y)
                    //                  = v * d2Y/Y
                    if (z2 < -1e8)
                    {
                        // Y =approx -1/z2
                        // dY =approx 1/z2^2
                        // d2Y =approx -2/z2^3
                        // d2Y/Y - (dY/Y)^2 =approx 2/z2^2 - 1/z2^2 = 1/z2^2
                        // The exact formulas are:
                        // d2Y/Y = 1 + z*dY/Y
                        // dY = (d2Y - Y)/z
                        // dY/Y = d2Y/Y/z - 1/z
                        // d2Y = (d3Y - 2dY)/z
                        // d2Y/Y = (d3Y/Y - 2dY/Y)/z
                        //       = d3Y/Y/z - 2(d2Y/Y/z - 1/z)/z
                        // d2Y/Y = (d3Y/Y + 2)/(2z + z^2)
                        // (d2Y/Y - 1) = z*dY/Y
                        // d2Y/Y - (dY/Y)^2 = (d3Y/Y + 2 - (2z+z^2)(dY/Y)^2)/(2z + z^2)
                        //                  = (d3Y/Y + 2 - (2/z + 1)(d2Y/Y - 1)^2)/(2z + z^2)
                        // d2Y/Y - (dY/Y)^2 = d3Y/Y/z - 2 d2Y/Y/z^2 + 2/z^2 - (d2Y^2/Y^2/z^2 - 2 d2Y/Y/z^2 + 1/z^2)
                        //                  = d3Y/Y/z - d2Y^2/Y^2/z^2 + 1/z^2
                        // Could rewrite using the method of NormalCdfRatioSqrMinusDerivative
                        vc2 = dY / a.Precision;
                    }
                    else
                    {
                        double dYiY = dY / Y;
                        vc2 = (d2YiY - dYiY * dYiY) / a.Precision;
                    }
                }
                else if (beta2 == 0)
                {
                    vc2 = v1;
                }
                else
                {
                    vc2 = (1 - beta2 / a.Precision) / a.Precision;
                }
                double   diff   = mc1 - mc2;
                double   v      = w1 * vc1 + w2 * vc2 + w1 * w2 * diff * diff;
                Gaussian result = new Gaussian(m, v);
                //Console.WriteLine($"z2={z2:g17} m={m:g17} v={v:g17} vc2={vc2} diff={diff}");
                result.SetToRatio(result, a, ForceProper);
                if (Double.IsNaN(result.Precision) || Double.IsNaN(result.MeanTimesPrecision))
                {
                    throw new InferRuntimeException($"result is NaN.  max={max}, a={a}, b={b}");
                }
                return(result);
            }
        }
示例#20
0
        public void Sample(Options options, Matrix data)
        {
            if (options.numParams > 2)
            {
                throw new Exception("numParams > 2");
            }
            int numStudents  = data.Rows;
            int numQuestions = data.Cols;
            // initialize the sampler at the mean of the priors (not sampling from the priors)
            double abilityMean        = abilityMeanPrior.GetMean();
            double abilityPrec        = abilityPrecPrior.GetMean();
            double difficultyMean     = difficultyMeanPrior.GetMean();
            double difficultyPrec     = difficultyPrecPrior.GetMean();
            double discriminationMean = discriminationMeanPrior.GetMean();
            double discriminationPrec = discriminationPrecPrior.GetMean();

            double[]            ability             = new double[numStudents];
            double[]            difficulty          = new double[numQuestions];
            List <double>[]     difficultySamples   = new List <double> [numQuestions];
            GaussianEstimator[] difficultyEstimator = new GaussianEstimator[numQuestions];
            for (int question = 0; question < numQuestions; question++)
            {
                difficultyEstimator[question] = new GaussianEstimator();
                difficultySamples[question]   = new List <double>();
                if (difficultyObserved != null)
                {
                    difficulty[question] = difficultyObserved[question];
                    difficultyEstimator[question].Add(difficultyObserved[question]);
                    difficultySamples[question].Add(difficultyObserved[question]);
                }
            }
            List <double>[]     abilitySamples   = new List <double> [numStudents];
            GaussianEstimator[] abilityEstimator = new GaussianEstimator[ability.Length];
            for (int student = 0; student < abilityEstimator.Length; student++)
            {
                abilityEstimator[student] = new GaussianEstimator();
                abilitySamples[student]   = new List <double>();
                if (abilityObserved != null)
                {
                    ability[student] = abilityObserved[student];
                    abilityEstimator[student].Add(abilityObserved[student]);
                    abilitySamples[student].Add(abilityObserved[student]);
                }
            }
            double[]         discrimination          = new double[numQuestions];
            List <double>[]  discriminationSamples   = new List <double> [numQuestions];
            GammaEstimator[] discriminationEstimator = new GammaEstimator[numQuestions];
            for (int question = 0; question < numQuestions; question++)
            {
                discriminationEstimator[question] = new GammaEstimator();
                discriminationSamples[question]   = new List <double>();
                discrimination[question]          = 1;
                if (discriminationObserved != null)
                {
                    discrimination[question] = discriminationObserved[question];
                    discriminationEstimator[question].Add(discriminationObserved[question]);
                    discriminationSamples[question].Add(discriminationObserved[question]);
                }
            }
            responseProbMean = new Matrix(numStudents, numQuestions);
            int    niters           = options.numberOfSamples;
            int    burnin           = options.burnIn;
            double logisticVariance = Math.PI * Math.PI / 3;
            double shape            = 4.5;
            Gamma  precPrior        = Gamma.FromShapeAndRate(shape, (shape - 1) * logisticVariance);

            precPrior      = Gamma.PointMass(1);
            double[,] prec = new double[numStudents, numQuestions];
            double[,] x    = new double[numStudents, numQuestions];
            int numRejected = 0, numAttempts = 0;

            for (int iter = 0; iter < niters; iter++)
            {
                for (int student = 0; student < numStudents; student++)
                {
                    for (int question = 0; question < numQuestions; question++)
                    {
                        // sample prec given ability, difficulty, x
                        // N(x; ability-difficulty, 1/prec) = Gamma(prec; 1.5, (x-ability+difficulty)^2/2)
                        Gamma  precPost = precPrior;
                        double xMean    = (ability[student] - difficulty[question]) * discrimination[question];
                        double delta    = x[student, question] - xMean;
                        Gamma  like     = Gamma.FromShapeAndRate(1.5, 0.5 * delta * delta);
                        precPost.SetToProduct(precPost, like);
                        prec[student, question] = precPost.Sample();
                        // sample x given ability, difficulty, prec, data
                        // using an independence chain MH
                        bool     y      = (data[student, question] > 0);
                        double   sign   = y ? 1.0 : -1.0;
                        Gaussian xPrior = Gaussian.FromMeanAndPrecision(xMean, prec[student, question]);
                        // we want to sample from xPrior*I(x>0)
                        // instead we sample from xPost
                        Gaussian xPost = xPrior * IsPositiveOp.XAverageConditional(y, xPrior);
                        double   oldx  = x[student, question];
                        double   newx  = xPost.Sample();
                        numAttempts++;
                        if (newx * sign < 0)
                        {
                            newx = oldx; // rejected
                            numRejected++;
                        }
                        else
                        {
                            // importance weights
                            double oldw = xPrior.GetLogProb(oldx) - xPost.GetLogProb(oldx);
                            double neww = xPrior.GetLogProb(newx) - xPost.GetLogProb(newx);
                            // acceptance ratio
                            double paccept = Math.Exp(neww - oldw);
                            if (paccept < 1 && Rand.Double() > paccept)
                            {
                                newx = oldx; // rejected
                                numRejected++;
                            }
                        }
                        x[student, question] = newx;
                        if (iter >= burnin)
                        {
                            double responseProb = MMath.Logistic(xMean);
                            responseProbMean[student, question] += responseProb;
                        }
                    }
                }
                if (abilityObserved == null)
                {
                    // sample ability given difficulty, prec, x
                    for (int student = 0; student < numStudents; student++)
                    {
                        Gaussian post = Gaussian.FromMeanAndPrecision(abilityMean, abilityPrec);
                        for (int question = 0; question < numQuestions; question++)
                        {
                            // N(x; disc*(ability-difficulty), 1/prec) =propto N(x/disc; ability-difficulty, 1/disc^2/prec) = N(ability; x/disc+difficulty, 1/disc^2/prec)
                            Gaussian abilityLike = Gaussian.FromMeanAndPrecision(x[student, question] / discrimination[question] + difficulty[question], prec[student, question] * discrimination[question] * discrimination[question]);
                            post.SetToProduct(post, abilityLike);
                        }
                        ability[student] = post.Sample();
                        if (iter >= burnin)
                        {
                            abilityEstimator[student].Add(post);
                            abilitySamples[student].Add(ability[student]);
                        }
                    }
                }
                // sample difficulty given ability, prec, x
                for (int question = 0; question < numQuestions; question++)
                {
                    Gaussian post = Gaussian.FromMeanAndPrecision(difficultyMean, difficultyPrec);
                    for (int student = 0; student < numStudents; student++)
                    {
                        // N(x; disc*(ability-difficulty), 1/prec) =propto N(x/disc; ability-difficulty, 1/disc^2/prec) = N(difficulty; ability-x/disc, 1/disc^2/prec)
                        if (discrimination[question] > 0)
                        {
                            Gaussian like = Gaussian.FromMeanAndPrecision(ability[student] - x[student, question] / discrimination[question], prec[student, question] * discrimination[question] * discrimination[question]);
                            post.SetToProduct(post, like);
                        }
                    }
                    difficulty[question] = post.Sample();
                    if (iter >= burnin)
                    {
                        //if (difficulty[question] > 100)
                        //    Console.WriteLine("difficulty[{0}] = {1}", question, difficulty[question]);
                        difficultyEstimator[question].Add(post);
                        difficultySamples[question].Add(difficulty[question]);
                    }
                }
                if (options.numParams > 1 && discriminationObserved == null)
                {
                    // sample discrimination given ability, difficulty, prec, x
                    for (int question = 0; question < numQuestions; question++)
                    {
                        // moment-matching on the prior
                        Gaussian approxPrior = Gaussian.FromMeanAndVariance(Math.Exp(discriminationMean + 0.5 / discriminationPrec), Math.Exp(2 * discriminationMean + 1 / discriminationPrec) * (Math.Exp(1 / discriminationPrec) - 1));
                        Gaussian post        = approxPrior;
                        for (int student = 0; student < numStudents; student++)
                        {
                            // N(x; disc*delta, 1/prec) =propto N(x/delta; disc, 1/prec/delta^2)
                            double delta = ability[student] - difficulty[question];
                            if (delta > 0)
                            {
                                Gaussian like = Gaussian.FromMeanAndPrecision(x[student, question] / delta, prec[student, question] * delta * delta);
                                post.SetToProduct(post, like);
                            }
                        }
                        TruncatedGaussian postTrunc = new TruncatedGaussian(post, 0, double.PositiveInfinity);
                        double            olddisc   = discrimination[question];
                        double            newdisc   = postTrunc.Sample();
                        // importance weights
                        Func <double, double> priorLogProb = delegate(double d)
                        {
                            double logd = Math.Log(d);
                            return(Gaussian.GetLogProb(logd, discriminationMean, 1 / discriminationPrec) - logd);
                        };
                        double oldw = priorLogProb(olddisc) - approxPrior.GetLogProb(olddisc);
                        double neww = priorLogProb(newdisc) - approxPrior.GetLogProb(newdisc);
                        // acceptance ratio
                        double paccept = Math.Exp(neww - oldw);
                        if (paccept < 1 && Rand.Double() > paccept)
                        {
                            // rejected
                        }
                        else
                        {
                            discrimination[question] = newdisc;
                        }
                        if (iter >= burnin)
                        {
                            discriminationEstimator[question].Add(discrimination[question]);
                            discriminationSamples[question].Add(discrimination[question]);
                        }
                    }
                }
                // sample abilityMean given ability, abilityPrec
                Gaussian abilityMeanPost = abilityMeanPrior;
                for (int student = 0; student < numStudents; student++)
                {
                    Gaussian like = GaussianOp.MeanAverageConditional(ability[student], abilityPrec);
                    abilityMeanPost *= like;
                }
                abilityMean = abilityMeanPost.Sample();
                // sample abilityPrec given ability, abilityMean
                Gamma abilityPrecPost = abilityPrecPrior;
                for (int student = 0; student < numStudents; student++)
                {
                    Gamma like = GaussianOp.PrecisionAverageConditional(ability[student], abilityMean);
                    abilityPrecPost *= like;
                }
                abilityPrec = abilityPrecPost.Sample();
                // sample difficultyMean given difficulty, difficultyPrec
                Gaussian difficultyMeanPost = difficultyMeanPrior;
                for (int question = 0; question < numQuestions; question++)
                {
                    Gaussian like = GaussianOp.MeanAverageConditional(difficulty[question], difficultyPrec);
                    difficultyMeanPost *= like;
                }
                difficultyMean = difficultyMeanPost.Sample();
                // sample difficultyPrec given difficulty, difficultyMean
                Gamma difficultyPrecPost = difficultyPrecPrior;
                for (int question = 0; question < numQuestions; question++)
                {
                    Gamma like = GaussianOp.PrecisionAverageConditional(difficulty[question], difficultyMean);
                    difficultyPrecPost *= like;
                }
                difficultyPrec = difficultyPrecPost.Sample();
                // sample discriminationMean given discrimination, discriminationPrec
                Gaussian discriminationMeanPost = discriminationMeanPrior;
                for (int question = 0; question < numQuestions; question++)
                {
                    Gaussian like = GaussianOp.MeanAverageConditional(Math.Log(discrimination[question]), discriminationPrec);
                    discriminationMeanPost *= like;
                }
                discriminationMean = discriminationMeanPost.Sample();
                // sample discriminationPrec given discrimination, discriminationMean
                Gamma discriminationPrecPost = discriminationPrecPrior;
                for (int question = 0; question < numQuestions; question++)
                {
                    Gamma like = GaussianOp.PrecisionAverageConditional(Math.Log(discrimination[question]), discriminationMean);
                    discriminationPrecPost *= like;
                }
                discriminationPrec = discriminationPrecPost.Sample();
                //if (iter % 1 == 0)
                //    Console.WriteLine("iter = {0}", iter);
            }
            //Console.WriteLine("abilityMean = {0}, abilityPrec = {1}", abilityMean, abilityPrec);
            //Console.WriteLine("difficultyMean = {0}, difficultyPrec = {1}", difficultyMean, difficultyPrec);
            int numSamplesUsed = niters - burnin;

            responseProbMean.Scale(1.0 / numSamplesUsed);
            //Console.WriteLine("acceptance rate = {0}", ((double)numAttempts - numRejected)/numAttempts);
            difficultyPost = Array.ConvertAll(difficultyEstimator, est => est.GetDistribution(Gaussian.Uniform()));
            abilityPost    = Array.ConvertAll(abilityEstimator, est => est.GetDistribution(Gaussian.Uniform()));
            if (options.numParams > 1)
            {
                discriminationPost = Array.ConvertAll(discriminationEstimator, est => est.GetDistribution(new Gamma()));
            }
            abilityCred    = GetCredibleIntervals(options.credibleIntervalProbability, abilitySamples);
            difficultyCred = GetCredibleIntervals(options.credibleIntervalProbability, difficultySamples);
            bool saveSamples = false;

            if (saveSamples)
            {
                using (MatlabWriter writer = new MatlabWriter(@"..\..\samples.mat"))
                {
                    int q = 11;
                    writer.Write("difficulty", difficultySamples[q]);
                    writer.Write("discrimination", discriminationSamples[q]);
                }
            }
        }
示例#21
0
        public void GaussianOpX()
        {
            Gaussian uniform = Gaussian.Uniform();
            Gaussian X0 = Gaussian.FromMeanAndVariance(3, 0.5);
            Gaussian Mean0 = Gaussian.FromMeanAndVariance(7, 1.0 / 3);
            double   Precision0 = 3;
            Gaussian X, Mean;
            Gamma    Precision, to_precision;
            Gaussian xActual, xExpected;

            bool testImproper = false;

            if (testImproper)
            {
                // Test the case where precisionIsBetween = false
                X            = Gaussian.FromNatural(1, 2);
                Mean         = Gaussian.FromNatural(3, -1);
                Precision    = Gamma.FromShapeAndRate(4, 5);
                to_precision = Gamma.FromShapeAndRate(6, 7);
                xActual      = GaussianOp.SampleAverageConditional(X, Mean, Precision, to_precision);
            }

            X            = Gaussian.FromNatural(-2.7793306963303595, 0.050822473645365768);
            Mean         = Gaussian.FromNatural(-5.9447032851878134E-09, 3.2975231004586637E-204);
            Precision    = Gamma.FromShapeAndRate(318.50907574398883, 9.6226982361933746E+205);
            to_precision = Gamma.PointMass(0);
            xActual      = GaussianOp.SampleAverageConditional(X, Mean, Precision, to_precision);

            X            = Gaussian.FromNatural(0.1559599323109816, 8.5162535450918462);
            Mean         = Gaussian.PointMass(0.57957597647840942);
            Precision    = Gamma.FromShapeAndRate(7.8308812008325587E+30, 8.2854255911709925E+30);
            to_precision = Gamma.FromShapeAndRate(1.4709139487775529, 0.14968339171493822);
            xActual      = GaussianOp.SampleAverageConditional(X, Mean, Precision, to_precision);

            X            = Gaussian.FromNatural(0.15595993233964134, 8.5162535466550349);
            Mean         = Gaussian.PointMass(0.57957597647840942);
            Precision    = Gamma.FromShapeAndRate(3.9206259406339067E+20, 4.1481991194547565E+20);
            to_precision = Gamma.FromShapeAndRate(1.4709139487806249, 0.14968339171413536);
            xActual      = GaussianOp.SampleAverageConditional(X, Mean, Precision, to_precision);

            X            = Gaussian.FromNatural(0.15595993261634511, 8.5162535617468418);
            Mean         = Gaussian.PointMass(0.57957597647840942);
            Precision    = Gamma.FromShapeAndRate(1.825759224425317E+19, 1.9317356258150703E+19);
            to_precision = Gamma.FromShapeAndRate(1.4709139487887679, 0.14968339176002607);
            xActual      = GaussianOp.SampleAverageConditional(X, Mean, Precision, to_precision);

            X            = Gaussian.FromNatural(0.16501264432785923, 9.01);
            Mean         = Gaussian.PointMass(0.57957597647840942);
            Precision    = Gamma.FromShapeAndRate(1.6965139612477539E+21, 1.6965139612889427E+21);
            to_precision = Gamma.FromShapeAndRate(1.4695136363119978, 0.14707291154227081);
            xActual      = GaussianOp.SampleAverageConditional(X, Mean, Precision, to_precision);

            // initialized in a bad place, gets stuck in a flat region
            X            = Gaussian.FromNatural(3.9112579392580757, 11.631097473681082);
            Mean         = Gaussian.FromNatural(10.449696977834144, 5.5617978202886995);
            Precision    = Gamma.FromShapeAndRate(1.0112702817305146, 0.026480506235719053);
            to_precision = Gamma.FromShapeAndRate(1, 0.029622790537514355);
            xActual      = GaussianOp.SampleAverageConditional(X, Mean, Precision, to_precision);

            X         = Gaussian.FromNatural(57788.170908674481, 50207.150004827061);
            Mean      = Gaussian.PointMass(0);
            Precision = Gamma.FromShapeAndRate(19764.051194189466, 0.97190264412377791);
            xActual   = GaussianOp.SampleAverageConditional_slow(X, Mean, Precision);

            // integration bounds should be [-36,4]
            X         = Gaussian.FromNatural(1.696828485456396, 0.71980672726406147);
            Mean      = Gaussian.PointMass(-1);
            Precision = new Gamma(1, 1);
            xActual   = GaussianOp.SampleAverageConditional_slow(X, Mean, Precision);
            xExpected = GaussianOp_Slow.SampleAverageConditional(X, Mean, Precision);
            Assert.True(xExpected.MaxDiff(xActual) < 1e-4);

            X         = new Gaussian(-1.565, 0.8466);
            Mean      = new Gaussian(0.0682, 0.3629);
            Precision = new Gamma(103.2, 0.009786);
            xActual   = GaussianOp.SampleAverageConditional_slow(X, Mean, Precision);
            xExpected = GaussianOp_Slow.SampleAverageConditional(X, Mean, Precision);
            Assert.True(xExpected.MaxDiff(xActual) < 1e-4);

            // Fixed precision
            X    = X0;
            Mean = uniform;
            Assert.True(GaussianOp.SampleAverageConditional(Mean, Precision0).MaxDiff(uniform) < 1e-10);
            Mean = Mean0;
            Assert.True(GaussianOp.SampleAverageConditional(Mean, Precision0).MaxDiff(new Gaussian(Mean.GetMean(), Mean.GetVariance() + 1 / Precision0)) < 1e-10);
            Mean = Gaussian.PointMass(Mean0.GetMean());
            Assert.True(GaussianOp.SampleAverageConditional(Mean, Precision0).MaxDiff(new Gaussian(Mean.GetMean(), 1 / Precision0)) < 1e-10);

            // Uniform precision
            // the answer should always be uniform
            Precision = Gamma.Uniform();
            Assert.True(GaussianOp.SampleAverageConditional_slow(X, Mean, Precision).MaxDiff(uniform) < 1e-10);

            // Unknown precision
            Precision = Gamma.FromShapeAndScale(3, 3);
            // Known X
            X    = Gaussian.PointMass(X0.GetMean());
            Mean = uniform;
            Assert.True(GaussianOp.SampleAverageConditional_slow(X, Mean, Precision).MaxDiff(uniform) < 1e-10);
            // Unknown X
            X    = X0;
            Mean = uniform;
            //Console.WriteLine(XAverageConditional2(result));
            Assert.True(GaussianOp.SampleAverageConditional_slow(X, Mean, Precision).MaxDiff(uniform) < 1e-10);
            X    = X0;
            Mean = Mean0;
            // converge the precision message.  (only matters if KeepLastMessage is set).
            //for (int i = 0; i < 10; i++) GaussianOp.PrecisionAverageConditional(X, Mean, Precision, precisionMessage);
            // in matlab: test_t_msg
            if (GaussianOp.ForceProper)
            {
                Assert.True(GaussianOp.SampleAverageConditional_slow(X, Mean, Precision).MaxDiff(Gaussian.FromNatural(3.1495, 0)) < 1e-4);
            }
            else
            {
                Assert.True(GaussianOp.SampleAverageConditional_slow(X, Mean, Precision).MaxDiff(new Gaussian(-9.9121, -4.5998)) < 1e-4);
            }
            X    = X0;
            Mean = Gaussian.PointMass(Mean0.GetMean());
            // converge the precision message.  (only matters if KeepLastMessage is set).
            //for (int i = 0; i < 10; i++) GaussianOp.PrecisionAverageConditional(X, Mean, Precision, precisionMessage);
            // in matlab: test_t_msg
            if (GaussianOp.ForceProper)
            {
                Assert.True(GaussianOp.SampleAverageConditional_slow(X, Mean, Precision).MaxDiff(Gaussian.FromNatural(2.443, 0)) < 1e-4);
            }
            else
            {
                Assert.True(GaussianOp.SampleAverageConditional_slow(X, Mean, Precision).MaxDiff(new Gaussian(0.81394, -1.3948)) < 1e-4);
            }
            // Uniform X
            X    = uniform;
            Mean = Mean0;
            // converge the precision message.  (only matters if KeepLastMessage is set).
            //for (int i = 0; i < 10; i++) GaussianOp.PrecisionAverageConditional(X, Mean, Precision, precisionMessage);
            Assert.True(GaussianOp.SampleAverageConditional_slow(X, Mean, Precision).MaxDiff(new Gaussian(7, 0.5)) < 1e-10);
            X    = uniform;
            Mean = Gaussian.PointMass(Mean0.GetMean());
            // converge the precision message.  (only matters if KeepLastMessage is set).
            //for (int i = 0; i < 10; i++) GaussianOp.PrecisionAverageConditional(X, Mean, Precision, precisionMessage);
            Assert.True(GaussianOp.SampleAverageConditional_slow(X, Mean, Precision).MaxDiff(new Gaussian(7, 1.0 / 6)) < 1e-10);
        }
示例#22
0
        /// <summary>Computations that depend on the observed value of numberOfIterationsDecreased and vGamma__1 and vGaussian__1 and vDirichlet1</summary>
        /// <param name="numberOfIterations">The number of times to iterate each loop</param>
        private void Changed_numberOfIterationsDecreased_vGamma__1_vGaussian__1_vDirichlet1(int numberOfIterations)
        {
            if (this.Changed_numberOfIterationsDecreased_vGamma__1_vGaussian__1_vDirichlet1_iterationsDone == numberOfIterations)
            {
                return;
            }
            // Create array for 'vdouble__3_marginal' Forwards messages.
            this.vdouble__3_marginal_F = new DistributionStructArray <Gaussian, double>(2);
            for (int index2 = 0; index2 < 2; index2++)
            {
                this.vdouble__3_marginal_F[index2] = Gaussian.Uniform();
            }
            this.vVector1_marginal_F = ArrayHelper.MakeUniform <Dirichlet>(this.VDirichlet1);
            Discrete vint9_F = ArrayHelper.MakeUniform <Discrete>(Discrete.Uniform(2));

            Discrete[] vint9_selector_uses_B = default(Discrete[]);
            // Create array for 'vint9_selector_uses' Backwards messages.
            vint9_selector_uses_B = new Discrete[3];
            for (int _ind = 0; _ind < 3; _ind++)
            {
                vint9_selector_uses_B[_ind] = ArrayHelper.MakeUniform <Discrete>(Discrete.Uniform(2));
            }
            DistributionStructArray <Bernoulli, bool>[] vint9_selector_cases_uses_B = default(DistributionStructArray <Bernoulli, bool>[]);
            // Create array for 'vint9_selector_cases_uses' Backwards messages.
            vint9_selector_cases_uses_B = new DistributionStructArray <Bernoulli, bool> [2];
            for (int _ind = 0; _ind < 2; _ind++)
            {
                // Create array for 'vint9_selector_cases_uses' Backwards messages.
                vint9_selector_cases_uses_B[_ind] = new DistributionStructArray <Bernoulli, bool>(2);
                for (int _iv = 0; _iv < 2; _iv++)
                {
                    vint9_selector_cases_uses_B[_ind][_iv] = Bernoulli.Uniform();
                }
            }
            DistributionStructArray <Bernoulli, bool>[] vint9_selector_cases_depth1_uses_B = default(DistributionStructArray <Bernoulli, bool>[]);
            // Create array for 'vint9_selector_cases_depth1_uses' Backwards messages.
            vint9_selector_cases_depth1_uses_B = new DistributionStructArray <Bernoulli, bool> [2];
            for (int _iv = 0; _iv < 2; _iv++)
            {
                // Create array for 'vint9_selector_cases_depth1_uses' Backwards messages.
                vint9_selector_cases_depth1_uses_B[_iv] = new DistributionStructArray <Bernoulli, bool>(10);
                for (int _ind = 0; _ind < 10; _ind++)
                {
                    vint9_selector_cases_depth1_uses_B[_iv][_ind] = Bernoulli.Uniform();
                }
            }
            // Create array for replicates of 'vdouble17__index2__F'
            DistributionStructArray <Gaussian, double> vdouble17__index2__F = new DistributionStructArray <Gaussian, double>(2);

            for (int index2 = 0; index2 < 2; index2++)
            {
                vdouble17__index2__F[index2] = Gaussian.Uniform();
            }
            // Message from use of 'vdouble17'
            Gaussian vdouble17_use_B = Gaussian.Uniform();
            // Message to marginal of 'vdouble17__index2_'
            // Create array for replicates of 'vdouble17__index2__marginal_F'
            DistributionStructArray <Gaussian, double> vdouble17__index2__marginal_F = new DistributionStructArray <Gaussian, double>(2);

            for (int index2 = 0; index2 < 2; index2++)
            {
                vdouble17__index2__marginal_F[index2] = Gaussian.Uniform();
            }
            // Create array for replicates of 'vdouble__4_index2__B'
            DistributionStructArray <Gamma, double> vdouble__4_index2__B = new DistributionStructArray <Gamma, double>(2);

            for (int index2 = 0; index2 < 2; index2++)
            {
                vdouble__4_index2__B[index2] = Gamma.Uniform();
            }
            // Create array for 'vdouble__4_marginal' Forwards messages.
            this.vdouble__4_marginal_F = new DistributionStructArray <Gamma, double>(2);
            for (int index2 = 0; index2 < 2; index2++)
            {
                this.vdouble__4_marginal_F[index2] = Gamma.Uniform();
            }
            DistributionStructArray <Bernoulli, bool> vint9_selector_cases_depth1_B = default(DistributionStructArray <Bernoulli, bool>);

            // Create array for 'vint9_selector_cases_depth1' Backwards messages.
            vint9_selector_cases_depth1_B = new DistributionStructArray <Bernoulli, bool>(2);
            for (int _iv = 0; _iv < 2; _iv++)
            {
                vint9_selector_cases_depth1_B[_iv] = Bernoulli.Uniform();
            }
            DistributionStructArray <Bernoulli, bool> vint9_selector_cases_B = default(DistributionStructArray <Bernoulli, bool>);

            // Create array for 'vint9_selector_cases' Backwards messages.
            vint9_selector_cases_B = new DistributionStructArray <Bernoulli, bool>(2);
            for (int _iv = 0; _iv < 2; _iv++)
            {
                vint9_selector_cases_B[_iv] = Bernoulli.Uniform();
            }
            this.vint9_marginal_F = ArrayHelper.MakeUniform <Discrete>(Discrete.Uniform(2));
            // Create array for replicates of 'vdouble__3_index2__B'
            DistributionStructArray <Gaussian, double> vdouble__3_index2__B = new DistributionStructArray <Gaussian, double>(2);

            for (int index2 = 0; index2 < 2; index2++)
            {
                vdouble__3_index2__B[index2] = Gaussian.Uniform();
                // Message to 'vdouble__3_marginal' from Variable factor
                this.vdouble__3_marginal_F[index2] = VariableVmpOp.MarginalAverageLogarithm <Gaussian>(this.vdouble__3_use_B[index2], this.VGaussian__1[index2], this.vdouble__3_marginal_F[index2]);
                // Message to 'vdouble__4_marginal' from Variable factor
                this.vdouble__4_marginal_F[index2] = VariableVmpOp.MarginalAverageLogarithm <Gamma>(this.vdouble__4_use_B[index2], this.VGamma__1[index2], this.vdouble__4_marginal_F[index2]);
                // Message to 'vdouble17__index2_' from Gaussian factor
                vdouble17__index2__F[index2] = GaussianOp.SampleAverageLogarithm(this.vdouble__3_marginal_F[index2], this.vdouble__4_marginal_F[index2]);
                // Message to 'vdouble17__index2__marginal' from Variable factor
                vdouble17__index2__marginal_F[index2] = VariableVmpOp.MarginalAverageLogarithm <Gaussian>(vdouble17_use_B, vdouble17__index2__F[index2], vdouble17__index2__marginal_F[index2]);
            }
            // Message to 'vVector1_marginal' from Variable factor
            this.vVector1_marginal_F = VariableVmpOp.MarginalAverageLogarithm <Dirichlet>(this.vVector1_use_B, this.VDirichlet1, this.vVector1_marginal_F);
            // Message to 'vint9' from Discrete factor
            vint9_F = DiscreteFromDirichletOp.SampleAverageLogarithm(this.vVector1_marginal_F, vint9_F);
            // Message to 'vint9_marginal' from Variable factor
            this.vint9_marginal_F = VariableVmpOp.MarginalAverageLogarithm <Discrete>(this.vint9_selector_B, vint9_F, this.vint9_marginal_F);
            for (int iteration = this.Changed_numberOfIterationsDecreased_vGamma__1_vGaussian__1_vDirichlet1_iterationsDone; iteration < numberOfIterations; iteration++)
            {
                for (int index2 = 0; index2 < 2; index2++)
                {
                    // Message to 'vdouble__4_marginal' from Variable factor
                    this.vdouble__4_marginal_F[index2] = VariableVmpOp.MarginalAverageLogarithm <Gamma>(this.vdouble__4_use_B[index2], this.VGamma__1[index2], this.vdouble__4_marginal_F[index2]);
                    // Message to 'vdouble__3_index2_' from Gaussian factor
                    vdouble__3_index2__B[index2] = GaussianOp.MeanAverageLogarithm(vdouble17__index2__marginal_F[index2], this.vdouble__4_marginal_F[index2]);
                    // Message to 'vdouble__3_use' from EnterOne factor
                    this.vdouble__3_use_B[index2] = GateEnterOneOp <double> .ValueAverageLogarithm <Gaussian>(vdouble__3_index2__B[index2], this.vint9_marginal_F, index2, this.vdouble__3_use_B[index2]);

                    // Message to 'vdouble__3_marginal' from Variable factor
                    this.vdouble__3_marginal_F[index2] = VariableVmpOp.MarginalAverageLogarithm <Gaussian>(this.vdouble__3_use_B[index2], this.VGaussian__1[index2], this.vdouble__3_marginal_F[index2]);
                    // Message to 'vdouble17__index2_' from Gaussian factor
                    vdouble17__index2__F[index2] = GaussianOp.SampleAverageLogarithm(this.vdouble__3_marginal_F[index2], this.vdouble__4_marginal_F[index2]);
                    // Message to 'vdouble17__index2__marginal' from Variable factor
                    vdouble17__index2__marginal_F[index2] = VariableVmpOp.MarginalAverageLogarithm <Gaussian>(vdouble17_use_B, vdouble17__index2__F[index2], vdouble17__index2__marginal_F[index2]);
                    // Message to 'vint9_selector_cases_depth1_uses' from Variable factor
                    vint9_selector_cases_depth1_uses_B[index2][8] = Bernoulli.FromLogOdds(VariableVmpOp.AverageLogFactor <Gaussian>(vdouble17__index2__marginal_F[index2]));
                    // Message to 'vint9_selector_cases_depth1_uses' from Gaussian factor
                    vint9_selector_cases_depth1_uses_B[index2][7] = Bernoulli.FromLogOdds(GaussianOp.AverageLogFactor(vdouble17__index2__marginal_F[index2], this.vdouble__3_marginal_F[index2], this.vdouble__4_marginal_F[index2]));
                }
                for (int _iv = 0; _iv < 2; _iv++)
                {
                    // Message to 'vint9_selector_cases_depth1' from Replicate factor
                    vint9_selector_cases_depth1_B[_iv] = ReplicateOp.DefAverageLogarithm <Bernoulli>(vint9_selector_cases_depth1_uses_B[_iv], vint9_selector_cases_depth1_B[_iv]);
                    // Message to 'vint9_selector_cases_uses' from Copy factor
                    vint9_selector_cases_uses_B[0][_iv] = ArrayHelper.SetTo <Bernoulli>(vint9_selector_cases_uses_B[0][_iv], vint9_selector_cases_depth1_B[_iv]);
                }
                // Message to 'vint9_selector_cases' from Replicate factor
                vint9_selector_cases_B = ReplicateOp.DefAverageLogarithm <DistributionStructArray <Bernoulli, bool> >(vint9_selector_cases_uses_B, vint9_selector_cases_B);
                // Message to 'vint9_selector_uses' from CasesInt factor
                vint9_selector_uses_B[0] = IntCasesOp.IAverageLogarithm(vint9_selector_cases_B, vint9_selector_uses_B[0]);
                // Message to 'vint9_selector' from Replicate factor
                this.vint9_selector_B = ReplicateOp.DefAverageLogarithm <Discrete>(vint9_selector_uses_B, this.vint9_selector_B);
                // Message to 'vVector1_marginal' from Variable factor
                this.vVector1_marginal_F = VariableVmpOp.MarginalAverageLogarithm <Dirichlet>(this.vVector1_use_B, this.VDirichlet1, this.vVector1_marginal_F);
                // Message to 'vint9' from Discrete factor
                vint9_F = DiscreteFromDirichletOp.SampleAverageLogarithm(this.vVector1_marginal_F, vint9_F);
                // Message to 'vint9_marginal' from Variable factor
                this.vint9_marginal_F = VariableVmpOp.MarginalAverageLogarithm <Discrete>(this.vint9_selector_B, vint9_F, this.vint9_marginal_F);
                for (int index2 = 0; index2 < 2; index2++)
                {
                    // Message to 'vdouble__4_index2_' from Gaussian factor
                    vdouble__4_index2__B[index2] = GaussianOp.PrecisionAverageLogarithm(vdouble17__index2__marginal_F[index2], this.vdouble__3_marginal_F[index2]);
                    // Message to 'vdouble__4_use' from EnterOne factor
                    this.vdouble__4_use_B[index2] = GateEnterOneOp <double> .ValueAverageLogarithm <Gamma>(vdouble__4_index2__B[index2], this.vint9_marginal_F, index2, this.vdouble__4_use_B[index2]);
                }
                // Message to 'vVector1_use' from Discrete factor
                this.vVector1_use_B = DiscreteFromDirichletOp.ProbsAverageLogarithm(this.vint9_marginal_F, this.vVector1_use_B);
                this.OnProgressChanged(new ProgressChangedEventArgs(iteration));
            }
            for (int index2 = 0; index2 < 2; index2++)
            {
                // Message to 'vdouble__3_index2_' from Gaussian factor
                vdouble__3_index2__B[index2] = GaussianOp.MeanAverageLogarithm(vdouble17__index2__marginal_F[index2], this.vdouble__4_marginal_F[index2]);
                // Message to 'vdouble__3_use' from EnterOne factor
                this.vdouble__3_use_B[index2] = GateEnterOneOp <double> .ValueAverageLogarithm <Gaussian>(vdouble__3_index2__B[index2], this.vint9_marginal_F, index2, this.vdouble__3_use_B[index2]);

                // Message to 'vdouble__3_marginal' from Variable factor
                this.vdouble__3_marginal_F[index2] = VariableVmpOp.MarginalAverageLogarithm <Gaussian>(this.vdouble__3_use_B[index2], this.VGaussian__1[index2], this.vdouble__3_marginal_F[index2]);
                // Message to 'vint9_selector_cases_depth1_uses' from Gaussian factor
                vint9_selector_cases_depth1_uses_B[index2][7] = Bernoulli.FromLogOdds(GaussianOp.AverageLogFactor(vdouble17__index2__marginal_F[index2], this.vdouble__3_marginal_F[index2], this.vdouble__4_marginal_F[index2]));
            }
            for (int _iv = 0; _iv < 2; _iv++)
            {
                // Message to 'vint9_selector_cases_depth1' from Replicate factor
                vint9_selector_cases_depth1_B[_iv] = ReplicateOp.DefAverageLogarithm <Bernoulli>(vint9_selector_cases_depth1_uses_B[_iv], vint9_selector_cases_depth1_B[_iv]);
                // Message to 'vint9_selector_cases_uses' from Copy factor
                vint9_selector_cases_uses_B[0][_iv] = ArrayHelper.SetTo <Bernoulli>(vint9_selector_cases_uses_B[0][_iv], vint9_selector_cases_depth1_B[_iv]);
            }
            // Message to 'vint9_selector_cases' from Replicate factor
            vint9_selector_cases_B = ReplicateOp.DefAverageLogarithm <DistributionStructArray <Bernoulli, bool> >(vint9_selector_cases_uses_B, vint9_selector_cases_B);
            // Message to 'vint9_selector_uses' from CasesInt factor
            vint9_selector_uses_B[0] = IntCasesOp.IAverageLogarithm(vint9_selector_cases_B, vint9_selector_uses_B[0]);
            // Message to 'vint9_selector' from Replicate factor
            this.vint9_selector_B = ReplicateOp.DefAverageLogarithm <Discrete>(vint9_selector_uses_B, this.vint9_selector_B);
            // Message to 'vint9_marginal' from Variable factor
            this.vint9_marginal_F = VariableVmpOp.MarginalAverageLogarithm <Discrete>(this.vint9_selector_B, vint9_F, this.vint9_marginal_F);
            for (int index2 = 0; index2 < 2; index2++)
            {
                // Message to 'vdouble__4_index2_' from Gaussian factor
                vdouble__4_index2__B[index2] = GaussianOp.PrecisionAverageLogarithm(vdouble17__index2__marginal_F[index2], this.vdouble__3_marginal_F[index2]);
                // Message to 'vdouble__4_use' from EnterOne factor
                this.vdouble__4_use_B[index2] = GateEnterOneOp <double> .ValueAverageLogarithm <Gamma>(vdouble__4_index2__B[index2], this.vint9_marginal_F, index2, this.vdouble__4_use_B[index2]);

                // Message to 'vdouble__4_marginal' from Variable factor
                this.vdouble__4_marginal_F[index2] = VariableVmpOp.MarginalAverageLogarithm <Gamma>(this.vdouble__4_use_B[index2], this.VGamma__1[index2], this.vdouble__4_marginal_F[index2]);
            }
            // Message to 'vVector1_use' from Discrete factor
            this.vVector1_use_B = DiscreteFromDirichletOp.ProbsAverageLogarithm(this.vint9_marginal_F, this.vVector1_use_B);
            // Message to 'vVector1_marginal' from Variable factor
            this.vVector1_marginal_F = VariableVmpOp.MarginalAverageLogarithm <Dirichlet>(this.vVector1_use_B, this.VDirichlet1, this.vVector1_marginal_F);
            this.vdouble17_F         = Gaussian.Uniform();
            DistributionStructArray <Bernoulli, bool> vint9_selector_cases_F = default(DistributionStructArray <Bernoulli, bool>);

            // Create array for 'vint9_selector_cases' Forwards messages.
            vint9_selector_cases_F = new DistributionStructArray <Bernoulli, bool>(2);
            for (int _iv = 0; _iv < 2; _iv++)
            {
                vint9_selector_cases_F[_iv] = Bernoulli.Uniform();
            }
            // Message to 'vint9_selector_cases' from CasesInt factor
            vint9_selector_cases_F = IntCasesOp.CasesAverageLogarithm <DistributionStructArray <Bernoulli, bool> >(this.vint9_marginal_F, vint9_selector_cases_F);
            // Message to 'vdouble17' from Exit factor
            this.vdouble17_F = GateExitOp <double> .ExitAverageLogarithm <Gaussian>(vint9_selector_cases_F, vdouble17__index2__marginal_F, this.vdouble17_F);

            this.Changed_numberOfIterationsDecreased_vGamma__1_vGaussian__1_vDirichlet1_iterationsDone = numberOfIterations;
        }
示例#23
0
        /// <include file='FactorDocs.xml' path='factor_docs/message_op_class[@name="MaxGaussianOp"]/message_doc[@name="MaxAverageConditional(Gaussian, Gaussian, Gaussian)"]/*'/>
        public static Gaussian MaxAverageConditional(Gaussian max, [Proper] Gaussian a, [Proper] Gaussian b)
        {
            // the following code works correctly even if max is uniform or improper.
            if (!a.IsProper())
            {
                throw new ImproperMessageException(a);
            }
            if (!b.IsProper())
            {
                throw new ImproperMessageException(b);
            }
            double m1, v1, m2, v2;

            a.GetMeanAndVariance(out m1, out v1);
            b.GetMeanAndVariance(out m2, out v2);

            double logw1, alpha1, vx1, mx1;
            double logw2, alpha2, vx2, mx2;
            double logz;

            ComputeStats(max, a, b, out logz, out logw1, out alpha1, out vx1, out mx1,
                         out logw2, out alpha2, out vx2, out mx2);
            double w1 = Math.Exp(logw1 - logz);
            double w2 = Math.Exp(logw2 - logz);

            if (max.IsPointMass)
            {
                double z1 = a.MeanTimesPrecision - max.Point * a.Precision;
                double z2 = b.MeanTimesPrecision - max.Point * b.Precision;
                if (a.IsPointMass)
                {
                    if (max.Point == a.Point)
                    {
                        return(Gaussian.PointMass(a.Point));
                    }
                    else
                    {
                        z1 = 0;
                    }
                }
                if (b.IsPointMass)
                {
                    if (max.Point == b.Point)
                    {
                        return(Gaussian.PointMass(b.Point));
                    }
                    else
                    {
                        z2 = 0;
                    }
                }
                double alpha = (z1 + alpha1) * w1 + (z2 + alpha2) * w2;
                double beta  = -alpha * alpha;
                if (w1 > 0)
                {
                    beta += (z1 * z1 - a.Precision + (2 * z1 + z2) * alpha1) * w1;
                }
                if (w2 > 0)
                {
                    beta += (z2 * z2 - b.Precision + (2 * z2 + z1) * alpha2) * w2;
                }
                //Console.WriteLine($"z1={z1} w1={w1} alpha1={alpha1} z2={z2} w2={w2} alpha2={alpha2} alpha={alpha} beta={beta:r}");
                return(GaussianOp.GaussianFromAlphaBeta(max, alpha, -beta, ForceProper));
            }
            bool useMessage = max.Precision > 10;

            if (useMessage)
            {
                // We want to avoid computing 1/max.Precision or max.MeanTimesPrecision/max.Precision, since these lead to roundoff errors.
                double z1    = (m1 * max.Precision - max.MeanTimesPrecision) / (v1 * max.Precision + 1);
                double z2    = (m2 * max.Precision - max.MeanTimesPrecision) / (v2 * max.Precision + 1);
                double alpha = 0;
                if (w1 > 0)
                {
                    alpha += (z1 + vx1 * max.Precision * alpha1) * w1;
                }
                if (w2 > 0)
                {
                    alpha += (z2 + vx2 * max.Precision * alpha2) * w2;
                }
                double beta = -alpha * alpha;
                if (w1 > 0)
                {
                    double diff;
                    // compute diff to avoid roundoff errors
                    if (a.IsPointMass)
                    {
                        diff = m2 - mx1;
                    }
                    else
                    {
                        diff = (m2 * (max.Precision + a.Precision) - (max.MeanTimesPrecision + a.MeanTimesPrecision)) * vx1;
                    }
                    if (!b.IsPointMass)
                    {
                        diff *= vx1 / (v2 + vx1);
                    }
                    beta += (z1 * z1 - max.Precision / (v1 * max.Precision + 1) + (2 * z1 + diff * max.Precision) * vx1 * max.Precision * alpha1) * w1;
                }
                if (w2 > 0)
                {
                    double diff;
                    if (b.IsPointMass)
                    {
                        diff = m1 - mx2;
                    }
                    else
                    {
                        diff = (m1 * (max.Precision + b.Precision) - (max.MeanTimesPrecision + b.MeanTimesPrecision)) * vx2;
                    }
                    if (!a.IsPointMass)
                    {
                        diff *= vx2 / (v1 + vx2);
                    }
                    beta += (z2 * z2 - max.Precision / (v2 * max.Precision + 1) + (2 * z2 + diff * max.Precision) * vx2 * max.Precision * alpha2) * w2;
                }
                bool check = false;
                if (check)
                {
                    double mx = max.GetMean();
                    double delta = mx * 1e-4;
                    double logzd, logw1d, alpha1d, vx1d, mx1d, logw2d, alpha2d, vx2d, mx2d;
                    ComputeStats(Gaussian.FromMeanAndPrecision(mx + delta, max.Precision), a, b, out logzd, out logw1d, out alpha1d, out vx1d, out mx1d, out logw2d, out alpha2d, out vx2d, out mx2d);
                    double logzd2;
                    ComputeStats(Gaussian.FromMeanAndPrecision(mx - delta, max.Precision), a, b, out logzd2, out logw1d, out alpha1d, out vx1d, out mx1d, out logw2d, out alpha2d, out vx2d, out mx2d);
                    double alphaCheck = (logzd - logzd2) / (2 * delta);
                    double alphaError = Math.Abs(alpha - alphaCheck);
                    double betaCheck  = (logzd + logzd2 - 2 * logz) / (delta * delta);
                    double betaError  = Math.Abs(beta - betaCheck);
                    Console.WriteLine($"alpha={alpha} check={alphaCheck} error={alphaError} beta={beta} check={betaCheck} error={betaError}");
                }
                //Console.WriteLine($"z1={z1} w1={w1} vx1={vx1} alpha1={alpha1} z2={z2} w2={w2} vx2={vx2} alpha2={alpha2} alpha={alpha} beta={beta:r} {max.Precision:r}");
                return(GaussianOp.GaussianFromAlphaBeta(max, alpha, -beta, ForceProper));
            }
            // the posterior is a mixture model with weights exp(logw1-logz), exp(logw2-logz) and distributions
            // N(x; mx1, vx1) phi((x - m2)/sqrt(v2)) / phi((mx1 - m2)/sqrt(vx1 + v2))
            // the moments of the posterior are computed via the moments of these two components.
            if (vx1 == 0)
            {
                alpha1 = 0;
            }
            if (vx2 == 0)
            {
                alpha2 = 0;
            }
            double mc1 = mx1 + alpha1 * vx1;
            double mc2 = mx2 + alpha2 * vx2;
            double m, v;

            if (w1 == 0)
            {
                // avoid dealing with infinities.
                m = mx2;
                v = vx2;
            }
            else if (w2 == 0)
            {
                // avoid dealing with infinities.
                m = mx1;
                v = vx1;
            }
            else
            {
                m = w1 * mc1 + w2 * mc2;
                double beta1;
                if (alpha1 == 0)
                {
                    beta1 = 0;
                }
                else
                {
                    double r1 = (mx1 - m2) / (vx1 + v2);
                    beta1 = alpha1 * (alpha1 + r1);
                }
                double vc1 = vx1 * (1 - vx1 * beta1);
                double beta2;
                if (alpha2 == 0)
                {
                    beta2 = 0;
                }
                else
                {
                    double r2 = (mx2 - m1) / (vx2 + v1);
                    beta2 = alpha2 * (alpha2 + r2);
                }
                double vc2  = vx2 * (1 - vx2 * beta2);
                double diff = mc1 - mc2;
                v = w1 * vc1 + w2 * vc2 + w1 * w2 * diff * diff;
            }
            Gaussian result = new Gaussian(m, v);

            result.SetToRatio(result, max, ForceProper);
            if (Double.IsNaN(result.Precision) || Double.IsNaN(result.MeanTimesPrecision))
            {
                throw new InferRuntimeException($"result is NaN.  max={max}, a={a}, b={b}");
            }
            return(result);
        }
示例#24
0
        internal void StudentIsPositiveTest2()
        {
            GaussianOp.ForceProper = false;
            double   shape     = 1;
            double   mean      = -1;
            Gamma    precPrior = Gamma.FromShapeAndRate(shape, shape);
            Gaussian meanPrior = Gaussian.PointMass(mean);
            double   evExpected;
            Gaussian xExpected = StudentIsPositiveExact(mean, precPrior, out evExpected);

            Gaussian xF2 = Gaussian.FromMeanAndVariance(-1, 1);
            // the energy has a stationary point here (min in both dimensions), even though xF0 is improper
            Gaussian xB0 = new Gaussian(2, 1);

            xF2 = Gaussian.FromMeanAndVariance(-4.552, 6.484);
            //xB0 = new Gaussian(1.832, 0.9502);
            //xB0 = new Gaussian(1.792, 1.558);
            //xB0 = new Gaussian(1.71, 1.558);
            //xB0 = new Gaussian(1.792, 1.5);
            Gaussian xF0 = GaussianOp_Slow.SampleAverageConditional(xB0, meanPrior, precPrior);

            //Console.WriteLine("xB0 = {0} xF0 = {1}", xB0, xF0);
            //Console.WriteLine(xF0*xB0);
            //Console.WriteLine(xF2*xB0);

            xF2 = new Gaussian(0.8651, 1.173);
            xB0 = new Gaussian(-4, 2);
            xB0 = new Gaussian(7, 7);
            if (false)
            {
                xF2 = new Gaussian(mean, 1);
                double[] xs      = EpTests.linspace(0, 100, 1000);
                double[] logTrue = Util.ArrayInit(xs.Length, i => GaussianOp.LogAverageFactor(xs[i], mean, precPrior));
                Normalize(logTrue);
                xF2 = FindxF4(xs, logTrue, xF2);
                xF2 = Gaussian.FromNatural(-0.85, 0);
                xB0 = IsPositiveOp.XAverageConditional(true, xF2);
                Console.WriteLine("xF = {0} xB = {1}", xF2, xB0);
                Console.WriteLine("x = {0} should be {1}", xF2 * xB0, xExpected);
                Console.WriteLine("proj[T*xB] = {0}", GaussianOp_Slow.SampleAverageConditional(xB0, meanPrior, precPrior) * xB0);
                double ev = System.Math.Exp(IsPositiveOp.LogAverageFactor(true, xF2) + GaussianOp_Slow.LogAverageFactor(xB0, meanPrior, precPrior) - xF2.GetLogAverageOf(xB0));
                Console.WriteLine("evidence = {0} should be {1}", ev, evExpected);
                return;
            }
            if (false)
            {
                xF2 = new Gaussian(mean, 1);
                xF2 = FindxF3(xExpected, evExpected, meanPrior, precPrior, xF2);
                xB0 = IsPositiveOp.XAverageConditional(true, xF2);
                Console.WriteLine("xF = {0} xB = {1}", xF2, xB0);
                Console.WriteLine("x = {0} should be {1}", xF2 * xB0, xExpected);
                //double ev = Math.Exp(IsPositiveOp.LogAverageFactor(true, xF2) + GaussianOp.LogAverageFactor_slow(xB0, meanPrior, precPrior) - xF2.GetLogAverageOf(xB0));
                //Console.WriteLine("evidence = {0} should be {1}", ev, evExpected);
                return;
            }
            if (false)
            {
                xF2 = new Gaussian(-2, 10);
                xF2 = FindxF2(meanPrior, precPrior, xF2);
                xB0 = IsPositiveOp.XAverageConditional(true, xF2);
                xF0 = GaussianOp_Slow.SampleAverageConditional(xB0, meanPrior, precPrior);
                Console.WriteLine("xB = {0}", xB0);
                Console.WriteLine("xF = {0} should be {1}", xF0, xF2);
                return;
            }
            if (false)
            {
                xF2 = new Gaussian(-3998, 4000);
                xF2 = new Gaussian(0.8651, 1.173);
                xB0 = new Gaussian(-4, 2);
                xB0 = new Gaussian(2000, 1e-5);
                xB0 = FindxB(xB0, meanPrior, precPrior, xF2);
                xF0 = GaussianOp_Slow.SampleAverageConditional(xB0, meanPrior, precPrior);
                Console.WriteLine("xB = {0}", xB0);
                Console.WriteLine("xF = {0} should be {1}", xF0, xF2);
                return;
            }
            if (false)
            {
                //xF2 = new Gaussian(-7, 10);
                //xF2 = new Gaussian(-50, 52);
                xB0 = new Gaussian(-1.966, 5.506e-08);
                //xF2 = new Gaussian(-3998, 4000);
                xF0 = FindxF(xB0, meanPrior, precPrior, xF2);
                Gaussian xB2 = IsPositiveOp.XAverageConditional(true, xF0);
                Console.WriteLine("xF = {0}", xF0);
                Console.WriteLine("xB = {0} should be {1}", xB2, xB0);
                return;
            }
            if (true)
            {
                xF0 = new Gaussian(-3.397e+08, 5.64e+08);
                xF0 = new Gaussian(-2.373e+04, 2.8e+04);
                xB0 = new Gaussian(2.359, 1.392);
                xF0 = Gaussian.FromNatural(-0.84, 0);
                //xF0 = Gaussian.FromNatural(-0.7, 0);
                for (int iter = 0; iter < 10; iter++)
                {
                    xB0 = FindxB(xB0, meanPrior, precPrior, xF0);
                    Gaussian xFt = GaussianOp_Slow.SampleAverageConditional(xB0, meanPrior, precPrior);
                    Console.WriteLine("xB = {0}", xB0);
                    Console.WriteLine("xF = {0} should be {1}", xFt, xF0);
                    xF0 = FindxF0(xB0, meanPrior, precPrior, xF0);
                    Gaussian xBt = IsPositiveOp.XAverageConditional(true, xF0);
                    Console.WriteLine("xF = {0}", xF0);
                    Console.WriteLine("xB = {0} should be {1}", xBt, xB0);
                }
                Console.WriteLine("x = {0} should be {1}", xF0 * xB0, xExpected);
                double ev = System.Math.Exp(IsPositiveOp.LogAverageFactor(true, xF0) + GaussianOp_Slow.LogAverageFactor(xB0, meanPrior, precPrior) - xF0.GetLogAverageOf(xB0));
                Console.WriteLine("evidence = {0} should be {1}", ev, evExpected);
                return;
            }

            //var precs = EpTests.linspace(1e-6, 1e-5, 200);
            var precs = EpTests.linspace(xB0.Precision / 11, xB0.Precision, 100);

            //var precs = EpTests.linspace(xF0.Precision/20, xF0.Precision/3, 100);
            precs = EpTests.linspace(1e-9, 1e-5, 100);
            //precs = new double[] { xB0.Precision };
            var ms = EpTests.linspace(xB0.GetMean() - 1, xB0.GetMean() + 1, 100);

            //var ms = EpTests.linspace(xF0.GetMean()-1, xF0.GetMean()+1, 100);
            //precs = EpTests.linspace(1.0/10, 1.0/8, 200);
            ms = EpTests.linspace(2000, 4000, 100);
            //ms = new double[] { xB0.GetMean() };
            Matrix result  = new Matrix(precs.Length, ms.Length);
            Matrix result2 = new Matrix(precs.Length, ms.Length);

            //ms = new double[] { 0.7 };
            for (int j = 0; j < ms.Length; j++)
            {
                double   maxZ  = double.NegativeInfinity;
                double   minZ  = double.PositiveInfinity;
                Gaussian maxxF = Gaussian.Uniform();
                Gaussian minxF = Gaussian.Uniform();
                Gaussian maxxB = Gaussian.Uniform();
                Gaussian minxB = Gaussian.Uniform();
                Vector   v     = Vector.Zero(3);
                for (int i = 0; i < precs.Length; i++)
                {
                    Gaussian xF = Gaussian.FromMeanAndPrecision(ms[j], precs[i]);
                    xF = xF2;
                    Gaussian xB = IsPositiveOp.XAverageConditional(true, xF);
                    xB = Gaussian.FromMeanAndPrecision(ms[j], precs[i]);
                    //xB = xB0;
                    v[0] = IsPositiveOp.LogAverageFactor(true, xF);
                    v[1] = GaussianOp.LogAverageFactor_slow(xB, meanPrior, precPrior);
                    //v[1] = GaussianOp_Slow.LogAverageFactor(xB, meanPrior, precPrior);
                    v[2] = -xF.GetLogAverageOf(xB);
                    double logZ = v.Sum();
                    double Z    = logZ;
                    if (Z > maxZ)
                    {
                        maxZ  = Z;
                        maxxF = xF;
                        maxxB = xB;
                    }
                    if (Z < minZ)
                    {
                        minZ  = Z;
                        minxF = xF;
                        minxB = xB;
                    }
                    result[i, j]  = Z;
                    result2[i, j] = IsPositiveOp.LogAverageFactor(true, xF) + xF0.GetLogAverageOf(xB) - xF.GetLogAverageOf(xB);
                    //Gaussian xF3 = GaussianOp.SampleAverageConditional_slower(xB, meanPrior, precPrior);
                    //result[i, j] = Math.Pow(xF3.Precision - xF.Precision, 2);
                    //result2[i, j] = Math.Pow((xF2*xB).Precision - (xF*xB).Precision, 2);
                    //result2[i, j] = -xF.GetLogAverageOf(xB);
                    //Gaussian xF2 = GaussianOp.SampleAverageConditional_slow(xB, Gaussian.PointMass(0), precPrior);
                    Gaussian xMarginal = xF * xB;
                    //Console.WriteLine("xF = {0} Z = {1} x = {2}", xF, Z.ToString("g4"), xMarginal);
                }
                double delta = v[1] - v[2];
                //Console.WriteLine("xF = {0} xB = {1} maxZ = {2} x = {3}", maxxF, maxxB, maxZ.ToString("g4"), maxxF*maxxB);
                //Console.WriteLine("xF = {0} maxZ = {1} delta = {2}", maxxF, maxZ.ToString("g4"), delta.ToString("g4"));
                Console.WriteLine("xF = {0} xB = {1} minZ = {2} x = {3}", minxF, minxB, minZ.ToString("g4"), minxF * minxB);
            }
            //TODO: change path for cross platform using
            using (var writer = new MatlabWriter(@"..\..\..\Tests\student.mat"))
            {
                writer.Write("z", result);
                writer.Write("z2", result2);
                writer.Write("precs", precs);
                writer.Write("ms", ms);
            }
        }