Beispiel #1
0
		/// <summary>
		/// Gibbs message to 'precision'
		/// </summary>
		/// <param name="sample">Constant value for 'sample'.</param>
		/// <param name="mean">Constant value for 'mean'.</param>
		/// <returns>The outgoing Gibbs message to the 'precision' argument</returns>
		/// <remarks><para>
		/// The outgoing message is the factor viewed as a function of 'precision' conditioned on the given values.
		/// </para></remarks>
		public static Gamma PrecisionConditional(double sample, double mean)
		{
			Gamma result = new Gamma();
			double diff = sample - mean;
			result.Rate = 0.5 * diff * diff;
			result.Shape = 1.5;
			return result;
		}
        private static void ARSTest(int sampleCount = 100000)
        {
            List<double> acceptedSamples = new List<double>();

            double shape = 2;
            double scale = 2;
            Gamma gaussian = new Gamma(shape, scale);
            DiscreteEnvelope envelope = new DiscreteEnvelope(0, 1000, gaussian, new double[] { 1, 5 });

            int rejectedCount = 0;
            while  (acceptedSamples.Count < sampleCount)
            {
                var sampleRegion = envelope.SampleDiscrete();
                double sample = envelope.SampleContinuous(sampleRegion);

                double ratio = Math.Exp(gaussian.GetLogProb(sample) - envelope.GetLogProb(sampleRegion, sample));
                double u = Utils.SRandom.GetUniform();

                if (u < ratio)
                {
                    Console.WriteLine("Sample accepted {0}/{1} : {2}", acceptedSamples.Count + 1, sampleCount, sample);
                    acceptedSamples.Add(sample);
                }
                else
                {
                    Console.WriteLine("Rejected, adding cut at {0}", sample);
                    rejectedCount++;
                    envelope.AddCutPoint(sample);
                }
            }
            double mean = acceptedSamples.Sum() / acceptedSamples.Count;
            double variance = acceptedSamples.Select(s => (s - mean) * (s - mean)).Sum() / (sampleCount - 1);

            Console.WriteLine("Total Rejected = {0}", rejectedCount);
            Console.WriteLine("Sample Mean = {0}, Sample Variance = {1}", mean, variance);
            Console.WriteLine("True Mean = {0},     True Variance = {1}", shape * scale, shape * scale * scale);
        }
Beispiel #3
0
        public void ValidateMinimum()
        {
            var n = new Gamma(1.0, 1.0);

            Assert.AreEqual(0.0, n.Minimum);
        }
        private void Gamma_Click(object sender, EventArgs e)
        {
            Gamma dlg = new Gamma();
            dlg.rouge = dlg.vert = dlg.bleu = 1;

            if (DialogResult.OK == dlg.ShowDialog())
            {
                if (filtres.Gamma(m_Bitmap, dlg.rouge, dlg.vert, dlg.bleu))
                    this.Invalidate();
                    this.Refresh();
            }
        }
Beispiel #5
0
        public void ValidateMode(double shape, double invScale, double mode)
        {
            var n = new Gamma(shape, invScale);

            Assert.AreEqual(mode, n.Mode);
        }
Beispiel #6
0
		/// <summary>
		/// Evidence message for EP
		/// </summary>
		/// <param name="d">Constant value for 'd'.</param>
		/// <param name="log">Incoming message from 'log'.</param>
		/// <returns>Logarithm of the factor's contribution the EP model evidence</returns>
		/// <remarks><para>
		/// The formula for the result is <c>log(sum_(log) p(log) factor(log,d) / sum_log p(log) messageTo(log))</c>.
		/// Adding up these values across all factors and variables gives the log-evidence estimate for EP.
		/// </para></remarks>
		public static double LogEvidenceRatio(double log, Gamma d)
		{
			return LogAverageFactor(log, d);
		}
Beispiel #7
0
		/// <summary>
		/// VMP message to 'd'
		/// </summary>
		/// <param name="log">Incoming message from 'log'. Must be a proper distribution.  If uniform, the result will be uniform.</param>
		/// <param name="d">Incoming message from 'd'. Must be a proper distribution.  If uniform, the result will be uniform.</param>
		/// <param name="to_D">Previous outgoing message to 'D'.</param>
		/// <returns>The outgoing VMP message to the 'd' argument</returns>
		/// <remarks><para>
		/// The outgoing message is the factor viewed as a function of 'd' with 'log' integrated out.
		/// The formula is <c>sum_log p(log) factor(log,d)</c>.
		/// </para></remarks>
		/// <exception cref="ImproperMessageException"><paramref name="d"/> is not a proper distribution</exception>
		/// <exception cref="ImproperMessageException"><paramref name="log"/> is not a proper distribution</exception>
		public static Gamma DAverageLogarithm([SkipIfUniform] Gaussian log, [SkipIfUniform] Gamma d, Gamma to_D)
		{
			if (log.IsPointMass)
				return DAverageLogarithm(log.Point, d, to_D);
			Vector grad = Vector.Zero(2);
			double meanLog = d.GetMeanLog();
			double m,v;
			log.GetMeanAndVariance(out m, out v);
			grad[0] = -MMath.Tetragamma(d.Shape) / (2 * v) - MMath.Trigamma(d.Shape) / v * (meanLog-m);
			grad[1] = (meanLog - m) / (v * d.Rate);
			Gamma approximateFactor = GammaFromShapeAndRateOp.NonconjugateProjection(d, grad);
			if (damping == 0.0)
				return approximateFactor;
			else
				return (approximateFactor ^ (1 - damping)) * (to_D ^ damping);
		}
 public void ValidateMinimum()
 {
     var n = new Gamma(1.0, 1.0);
     Assert.AreEqual(0.0, n.Minimum);
 }
 public void ValidateToString()
 {
     var n = new Gamma(1.0, 2.0);
     Assert.AreEqual("Gamma(Shape = 1, Inverse Scale = 2)", n.ToString());
 }
Beispiel #10
0
        /// <summary>
        /// Считает теоретические вероятности (Magic)
        /// </summary>
        private double getProbability(int u, double eta)
        {
            int    l;
            double sum, p;

            if (u == 0)
            {
                p = Math.Exp(-eta);
            }
            else
            {
                sum = 0.0;

                for (l = 1; l <= u; l++)
                {
                    sum += Math.Exp(-eta - u * Math.Log(2) + l * Math.Log(eta) - Gamma.Log(l + 1) + Gamma.Log(u) - Gamma.Log(l) - Gamma.Log(u - l + 1));
                }

                p = sum;
            }

            return(p);
        }
Beispiel #11
0
        /// <summary>
        /// Метод, считающий p-value на основе количества совпадений шаблона в блоках
        /// </summary>
        /// <param name="hash">Псевдослучайная последовательность</param>
        /// <returns>p-value</returns>
        public double getPValue(BitArray hash)
        {
            var blocksCount = hash.Length / _blockSize;
            var blocks      = new int[blocksCount][];
            // лямбда и эта используются для расчета теоретических вероятностей
            var lambda = (_blockSize - _template.Length + 1) / Math.Pow(2, _template.Length);
            var eta    = lambda / 2;
            var k      = Convert.ToInt32(2 * lambda); // количество степеней свободы

            var pi = new double[k + 1];               // теоретические вероятности
            var numberOfOccurrencesInBlocks = new int[k + 1];
            var sum = 0.0;

            // просчет теоретических вероятностей
            for (int i = 0; i < k; i++)
            {
                pi[i] = getProbability(i, eta);
                sum  += pi[i];
            }

            pi[k] = 1 - sum;

            // разбиение на блоки
            for (int i = 0; i < blocksCount; i++)
            {
                blocks[i] = new int[_blockSize];

                for (int j = 0; j < _blockSize; j++)
                {
                    blocks[i][j] = hash[i * _blockSize + j] ? 1 : 0;
                }
            }

            // поиск шаблонов в блоке
            for (int i = 0; i < blocksCount; i++)
            {
                var numberOfOccurrences = 0;

                for (int j = 0; j < _blockSize - _template.Length + 1; j++)
                {
                    bool match = true;

                    for (int l = 0; l < _template.Length; l++)
                    {
                        if (_template[l] != hash[i * _blockSize + j + l])
                        {
                            match = false;
                        }
                    }

                    if (match)
                    {
                        numberOfOccurrences++;
                    }
                }

                if (numberOfOccurrences <= 4)
                {
                    numberOfOccurrencesInBlocks[numberOfOccurrences]++;
                }
                else
                {
                    numberOfOccurrencesInBlocks[k]++;
                }
            }

            // мера того, насколько хорошо количество шаблонов в блоке
            // соответствует ожидаемой пропорции
            var x2 = 0.0;

            for (int i = 0; i < k + 1; i++)
            {
                x2 += Math.Pow(numberOfOccurrencesInBlocks[i] - blocksCount * pi[i], 2) / (blocksCount * pi[i]);
            }

            var pValue = 1 - Gamma.LowerIncomplete(k / 2.0, x2 / 2.0); // верхняя неполная гамма функция

            return(pValue);
        }
Beispiel #12
0
        public void GammaUpperRTest()
        {
            // Example values from
            // http://opensource.zyba.com/code/maths/special/gamma/gamma_upper_reg.php

            double expected, actual;

            actual   = Gamma.UpperIncomplete(0.000000, 2);
            expected = 1.000000;
            Assert.AreEqual(expected, actual);
            Assert.IsFalse(double.IsNaN(actual));

            actual   = Gamma.UpperIncomplete(0.250000, 2);
            expected = 0.017286;
            Assert.AreEqual(expected, actual, 1e-6);
            Assert.IsFalse(double.IsNaN(actual));

            actual   = Gamma.UpperIncomplete(0.500000, 2);
            expected = 0.045500;
            Assert.AreEqual(expected, actual, 1e-6);
            Assert.IsFalse(double.IsNaN(actual));

            actual   = Gamma.UpperIncomplete(0.750000, 2);
            expected = 0.085056;
            Assert.AreEqual(expected, actual, 1e-6);
            Assert.IsFalse(double.IsNaN(actual));

            actual   = Gamma.UpperIncomplete(1.000000, 2);
            expected = 0.135335;
            Assert.AreEqual(expected, actual, 1e-6);
            Assert.IsFalse(double.IsNaN(actual));

            actual   = Gamma.UpperIncomplete(1.250000, 2);
            expected = 0.194847;
            Assert.AreEqual(expected, actual, 1e-6);
            Assert.IsFalse(double.IsNaN(actual));

            actual   = Gamma.UpperIncomplete(1.500000, 2);
            expected = 0.261464;
            Assert.AreEqual(expected, actual, 1e-6);
            Assert.IsFalse(double.IsNaN(actual));

            actual   = Gamma.UpperIncomplete(1.750000, 2);
            expected = 0.332706;
            Assert.AreEqual(expected, actual, 1e-6);
            Assert.IsFalse(double.IsNaN(actual));

            actual   = Gamma.UpperIncomplete(2.000000, 2);
            expected = 0.406006;
            Assert.AreEqual(expected, actual, 1e-6);
            Assert.IsFalse(double.IsNaN(actual));

            actual   = Gamma.UpperIncomplete(2.250000, 2);
            expected = 0.478944;
            Assert.AreEqual(expected, actual, 1e-6);
            Assert.IsFalse(double.IsNaN(actual));

            actual   = Gamma.UpperIncomplete(2.500000, 2);
            expected = 0.549416;
            Assert.AreEqual(expected, actual, 1e-6);
            Assert.IsFalse(double.IsNaN(actual));

            actual   = Gamma.UpperIncomplete(2.750000, 2);
            expected = 0.615734;
            Assert.AreEqual(expected, actual, 1e-6);
            Assert.IsFalse(double.IsNaN(actual));
        }
Beispiel #13
0
        /// <summary>
        ///   Initializes a new instance of the <see cref="TDistribution"/> class.
        /// </summary>
        ///
        /// <param name="degreesOfFreedom">The degrees of freedom.</param>
        ///
        public TDistribution(double degreesOfFreedom)
        {
            if (degreesOfFreedom < 1)
            {
                throw new ArgumentOutOfRangeException("degreesOfFreedom");
            }

            this.DegreesOfFreedom = degreesOfFreedom;

            double v = degreesOfFreedom;

            // TODO: Use LogGamma instead.
            this.constant = Gamma.Function((v + 1) / 2.0) / (Math.Sqrt(v * Math.PI) * Gamma.Function(v / 2.0));
        }
Beispiel #14
0
        public void CanSampleSequenceStatic()
        {
            var ied = Gamma.Samples(new Random(0), 1.0, 1.0);

            ied.Take(5).ToArray();
        }
Beispiel #15
0
 public void CanSampleStatic()
 {
     Gamma.Sample(new Random(0), 1.0, 1.0);
 }
Beispiel #16
0
        public void ValidateMaximum()
        {
            var n = new Gamma(1.0, 1.0);

            Assert.AreEqual(Double.PositiveInfinity, n.Maximum);
        }
Beispiel #17
0
 public void ValidateEntropy([Values(0.0, 1.0, 1.0, 10.0, 10.0, 10.0)] double shape, [Values(0.0, 0.1, 1.0, 10.0, 1.0, Double.PositiveInfinity)] double invScale, [Values(Double.NaN, 3.3025850929940456285068402234265387271634735938763824, 1.0, 0.23346908548693395836262094490967812177376750477943892, 2.5360541784809796423806123995940423293748689934081866, 0.0)] double entropy)
 {
     var n = new Gamma(shape, invScale);
     AssertHelpers.AlmostEqual(entropy, n.Entropy, 13);
 }
 /// <summary>
 ///   Gets the cumulative distribution function (cdf) for
 ///   this distribution evaluated at point <c>x</c>.
 /// </summary>
 ///
 /// <param name="x">A single point in the distribution range.</param>
 ///
 /// <remarks>
 ///   The Cumulative Distribution Function (CDF) describes the cumulative
 ///   probability that a given value or any value smaller than it will occur.
 /// </remarks>
 ///
 public override double DistributionFunction(double x)
 {
     return(Gamma.LowerIncomplete(shape, x / scale));
 }
Beispiel #19
0
 public void ValidateMean([Values(0.0, 1.0, 1.0, 10.0, 10.0, 10.0)] double shape, [Values(0.0, 0.1, 1.0, 10.0, 1.0, Double.PositiveInfinity)] double invScale, [Values(Double.NaN, 10.0, 1.0, 1.0, 10.0, 10.0)] double mean)
 {
     var n = new Gamma(shape, invScale);
     Assert.AreEqual(mean, n.Mean);
 }
 public ImmutableColor GetValue(Gamma gamma)
 => Values[(int)gamma];
Beispiel #21
0
 public void ValidateSkewness([Values(0.0, 1.0, 1.0, 10.0, 10.0, 10.0)] double shape, [Values(0.0, 0.1, 1.0, 10.0, 1.0, Double.PositiveInfinity)] double invScale, [Values(Double.NaN, 2.0, 2.0, 0.63245553203367586639977870888654370674391102786504337, 0.63245553203367586639977870888654370674391102786504337, 0.0)] double skewness)
 {
     var n = new Gamma(shape, invScale);
     AssertHelpers.AlmostEqual(skewness, n.Skewness, 15);
 }
 public ImmutableColor this[Gamma g]
 => GetValue(g);
Beispiel #23
0
 public void CanCreateGamma([Values(0.0, 1.0, 1.0, 10.0, 10.0, 10.0)] double shape, [Values(0.0, 0.1, 1.0, 10.0, 1.0, Double.PositiveInfinity)] double invScale)
 {
     var n = new Gamma(shape, invScale);
     Assert.AreEqual(shape, n.Shape);
     Assert.AreEqual(invScale, n.InvScale);
 }
Beispiel #24
0
        static private ClickModelMarginals Model1(int numLabels, bool allowNoExams)
        {
            // Inference engine must be EP because of the ConstrainBetween constraint
            InferenceEngine engine = new InferenceEngine();

            if (!(engine.Algorithm is Algorithms.ExpectationPropagation))
            {
                Console.WriteLine("This example only runs with Expectation Propagation");
                return(null);
            }

            engine.NumberOfIterations = 10;  // Restrict the number of iterations

            // Includes lower and upper bounds
            int numThresholds = numLabels + 1;

            //-------------------------------------------------------------
            // Specify prior distributions
            //-------------------------------------------------------------
            Gaussian priorScoreMean = Gaussian.FromMeanAndVariance(0.5, 1.0);
            Gamma    priorScorePrec = Gamma.FromMeanAndVariance(2.0, 0.0);
            Gamma    priorJudgePrec = Gamma.FromMeanAndVariance(2.0, 1.0);
            Gamma    priorClickPrec = Gamma.FromMeanAndVariance(2.0, 1.0);

            Gaussian[] priorThresholds;
            CreateThresholdPriors(numLabels, out priorThresholds);

            //-------------------------------------------------------------
            // Variables to infer
            //-------------------------------------------------------------
            Variable <double> scoreMean = Variable.Random(priorScoreMean).Named("scoreMean");
            Variable <double> scorePrec = Variable.Random(priorScorePrec).Named("scorePrec");
            Variable <double> judgePrec = Variable.Random(priorJudgePrec).Named("judgePrec");
            Variable <double> clickPrec = Variable.Random(priorClickPrec).Named("clickPrec");

            Variable <double>[] thresholds = new Variable <double> [numLabels + 1];
            for (int i = 0; i < thresholds.Length; i++)
            {
                thresholds[i] = Variable.Random(priorThresholds[i]).Named("thresholds" + i);
            }

            //----------------------------------------------------------------------------------
            // The model
            //----------------------------------------------------------------------------------
            VariableArray <Gaussian>[] observationDistribs  = new VariableArray <Gaussian> [numLabels];
            Variable <int>[]           numberOfObservations = new Variable <int> [numLabels];
            for (int i = 0; i < numLabels; i++)
            {
                numberOfObservations[i] = Variable.New <int>().Named("NumObs" + i);
                Range r = new Range(numberOfObservations[i]).Named("N" + i);
                observationDistribs[i] = Variable.Array <Gaussian>(r).Named("Obs" + i);
                VariableArray <double> scores  = Variable.Array <double>(r).Named("Scores" + i);
                VariableArray <double> scoresJ = Variable.Array <double>(r).Named("ScoresJ" + i);
                VariableArray <double> scoresC = Variable.Array <double>(r).Named("ScoresC" + i);
                scores[r]  = Variable.GaussianFromMeanAndPrecision(scoreMean, scorePrec).ForEach(r);
                scoresJ[r] = Variable.GaussianFromMeanAndPrecision(scores[r], judgePrec);
                scoresC[r] = Variable.GaussianFromMeanAndPrecision(scores[r], clickPrec);
                Variable.ConstrainBetween(scoresJ[r], thresholds[i], thresholds[i + 1]);
                Variable.ConstrainEqualRandom(scoresC[r], observationDistribs[i][r]);
            }

            // Get the arrays of human judgement labels, clicks, and examinations
            int[]  labels;
            int[]  clicks;
            int[]  exams;
            string fileName = Path.Combine(
#if NETCORE
                Path.GetDirectoryName(typeof(ClickModel).Assembly.Location),     // work dir is not the one with Microsoft.ML.Probabilistic.Tests.dll on netcore and neither is .Location on netfull
#endif
                "TutorialData", "ClickModel.txt");

            if (!File.Exists(fileName))
            {
                fileName = Path.Combine(
#if NETCORE
                    Path.GetDirectoryName(typeof(ClickModel).Assembly.Location), // work dir is not the one with Microsoft.ML.Probabilistic.Tests.dll on netcore and neither is .Location on netfull
#endif
                    "..", "Samples", "C#", "ExamplesBrowser", "TutorialData", "ClickModel.txt");
            }

            LoadData(fileName, allowNoExams, out labels, out clicks, out exams);

            // Convert the raw click data into uncertain Gaussian observations chunk-by-chunk
            Gaussian[][] allObs = getClickObservations(numLabels, labels, clicks, exams);

            // (a) Set the observation and observation count parameters in the model
            for (int i = 0; i < numLabels; i++)
            {
                numberOfObservations[i].ObservedValue = allObs[i].Length;
                observationDistribs[i].ObservedValue  = allObs[i];
            }

            // (b) Request the marginals
            ClickModelMarginals marginals = new ClickModelMarginals(numLabels);
            marginals.marginalScoreMean = engine.Infer <Gaussian>(scoreMean);
            marginals.marginalScorePrec = engine.Infer <Gamma>(scorePrec);
            marginals.marginalJudgePrec = engine.Infer <Gamma>(judgePrec);
            marginals.marginalClickPrec = engine.Infer <Gamma>(clickPrec);
            for (int i = 0; i < numThresholds; i++)
            {
                marginals.marginalThresh[i] = engine.Infer <Gaussian>(thresholds[i]);
            }

            Console.WriteLine("Training: sample size: " + labels.Length + "\n");
            Console.WriteLine("scoreMean = {0}", marginals.marginalScoreMean);
            Console.WriteLine("scorePrec = {0}", marginals.marginalScorePrec);
            Console.WriteLine("judgePrec = {0}", marginals.marginalJudgePrec);
            Console.WriteLine("clickPrec = {0}", marginals.marginalClickPrec);
            for (int t = 0; t < numThresholds; t++)
            {
                Console.WriteLine("threshMean {0} = {1}", t, marginals.marginalThresh[t]);
            }

            return(marginals);
        }
Beispiel #25
0
		/// <summary>
		/// EP message to 'log'
		/// </summary>
		/// <param name="log">Incoming message from 'log'.</param>
		/// <param name="d">Incoming message from 'd'.</param>
		/// <param name="result">Modified to contain the outgoing message</param>
		/// <returns><paramref name="result"/></returns>
		/// <remarks><para>
		/// The outgoing message is a distribution matching the moments of 'log' as the random arguments are varied.
		/// The formula is <c>proj[p(log) sum_(d) p(d) factor(log,d)]/p(log)</c>.
		/// </para></remarks>
		public static Gaussian LogAverageConditional(Gaussian log, Gamma d, Gaussian result)
		{
			var g = Gamma.FromShapeAndRate(d.Shape + 1, d.Rate);
			return ExpOp.DAverageConditional(g, log, result);
		}
Beispiel #26
0
        static private ClickModelMarginals Model2(int numLabels, bool allowNoExams)
        {
            // Inference engine must be EP because of the ConstrainBetween constraint
            InferenceEngine engine = new InferenceEngine();

            if (!(engine.Algorithm is Algorithms.ExpectationPropagation))
            {
                Console.WriteLine("This example only runs with Expectation Propagation");
                return(null);
            }

            engine.NumberOfIterations = 10;

            // Includes lower and upper bounds
            int numThresholds = numLabels + 1;

            // Partition the dat into chunks to improve the schedule
            int chunkSize = 200;

            // Maximum number of passes through the data
            int maxPasses = 5;

            // The marginals at any given stage.
            ClickModelMarginals marginals = new ClickModelMarginals(numLabels);

            // Compare the marginals with the previous marginals to create
            // a convergence criterion
            Gaussian prevMargScoreMean;
            Gamma    prevMargJudgePrec;
            Gamma    prevMargClickPrec;
            double   convergenceThresh = 0.01;

            // Get the arrays of human judgement labels, clicks, and examinations
            int[]  labels;
            int[]  clicks;
            int[]  exams;
            string fileName = Path.Combine(
#if NETCORE
                Path.GetDirectoryName(typeof(ClickModel).Assembly.Location),     // work dir is not the one with Microsoft.ML.Probabilistic.Tests.dll on netcore and neither is .Location on netfull
#endif
                "TutorialData", "ClickModel.txt");

            if (!File.Exists(fileName))
            {
                fileName = Path.Combine(
#if NETCORE
                    Path.GetDirectoryName(typeof(ClickModel).Assembly.Location), // work dir is not the one with Microsoft.ML.Probabilistic.Tests.dll on netcore and neither is .Location on netfull
#endif
                    "..", "Samples", "C#", "ExamplesBrowser", "TutorialData", "ClickModel.txt");
            }

            LoadData(fileName, allowNoExams, out labels, out clicks, out exams);

            // Convert the raw click data into uncertain Gaussian observations chunk-by-chunk
            Gaussian[][][] allObs    = getClickObservations(numLabels, chunkSize, labels, clicks, exams);
            int            numChunks = allObs.Length;

            //-------------------------------------------------------------
            // Specify prior distributions
            //-------------------------------------------------------------
            Gaussian   priorScoreMean = Gaussian.FromMeanAndVariance(0.5, 1.0);
            Gamma      priorScorePrec = Gamma.FromMeanAndVariance(2.0, 0.0);
            Gamma      priorJudgePrec = Gamma.FromMeanAndVariance(2.0, 1.0);
            Gamma      priorClickPrec = Gamma.FromMeanAndVariance(2.0, 1.0);
            Gaussian[] priorThresholds;
            CreateThresholdPriors(numLabels, out priorThresholds);

            //-----------------------------------------------------
            // Create shared variables - these are the variables
            // which are shared between all chunks
            //-----------------------------------------------------
            Model model = new Model(numChunks);
            SharedVariable <double> scoreMean = SharedVariable <double> .Random(priorScoreMean).Named("scoreMean");

            SharedVariable <double> scorePrec = SharedVariable <double> .Random(priorScorePrec).Named("scorePrec");

            SharedVariable <double> judgePrec = SharedVariable <double> .Random(priorJudgePrec).Named("judgePrec");

            SharedVariable <double> clickPrec = SharedVariable <double> .Random(priorClickPrec).Named("clickPrec");

            SharedVariable <double>[] thresholds = new SharedVariable <double> [numThresholds];
            for (int t = 0; t < numThresholds; t++)
            {
                thresholds[t] = SharedVariable <double> .Random(priorThresholds[t]).Named("threshold" + t);
            }

            //----------------------------------------------------------------------------------
            // The model
            //----------------------------------------------------------------------------------

            // Gaussian click observations are given to the model - one set of observations
            // per label class. Also the number of observations per label class is given to the model
            VariableArray <Gaussian>[] observationDistribs  = new VariableArray <Gaussian> [numLabels];
            Variable <int>[]           numberOfObservations = new Variable <int> [numLabels];

            // For each label, and each observation (consisting of a human judgement and
            // a Gaussian click observation), there is a latent score variable, a judgement
            // score variable, and a click score variable
            for (int i = 0; i < numLabels; i++)
            {
                numberOfObservations[i] = Variable.New <int>().Named("NumObs" + i);
                Range r = new Range(numberOfObservations[i]).Named("N" + i);
                observationDistribs[i] = Variable.Array <Gaussian>(r).Named("Obs" + i);
                VariableArray <double> scores  = Variable.Array <double>(r).Named("Scores" + i);
                VariableArray <double> scoresJ = Variable.Array <double>(r).Named("ScoresJ" + i);
                VariableArray <double> scoresC = Variable.Array <double>(r).Named("ScoresC" + i);
                scores[r]  = Variable.GaussianFromMeanAndPrecision(scoreMean.GetCopyFor(model), scorePrec.GetCopyFor(model)).ForEach(r);
                scoresJ[r] = Variable.GaussianFromMeanAndPrecision(scores[r], judgePrec.GetCopyFor(model));
                scoresC[r] = Variable.GaussianFromMeanAndPrecision(scores[r], clickPrec.GetCopyFor(model));
                Variable.ConstrainEqualRandom(scoresC[r], observationDistribs[i][r]);
                Variable.ConstrainBetween(scoresJ[r], thresholds[i].GetCopyFor(model), thresholds[i + 1].GetCopyFor(model));
            }

            //----------------------------------------------------------
            // Outer loop iterates over a number of passes
            // Inner loop iterates over the unique labels
            //----------------------------------------------------------
            Console.WriteLine("Training: sample size: " + labels.Length + "\n");
            for (int pass = 0; pass < maxPasses; pass++)
            {
                prevMargScoreMean = marginals.marginalScoreMean;
                prevMargJudgePrec = marginals.marginalJudgePrec;
                prevMargClickPrec = marginals.marginalClickPrec;
                for (int c = 0; c < numChunks; c++)
                {
                    for (int i = 0; i < numLabels; i++)
                    {
                        numberOfObservations[i].ObservedValue = allObs[c][i].Length;
                        observationDistribs[i].ObservedValue  = allObs[c][i];
                    }

                    model.InferShared(engine, c);

                    // Retrieve marginals
                    marginals.marginalScoreMean = scoreMean.Marginal <Gaussian>();
                    marginals.marginalScorePrec = scorePrec.Marginal <Gamma>();
                    marginals.marginalJudgePrec = judgePrec.Marginal <Gamma>();
                    marginals.marginalClickPrec = clickPrec.Marginal <Gamma>();
                    for (int i = 0; i < numThresholds; i++)
                    {
                        marginals.marginalThresh[i] = thresholds[i].Marginal <Gaussian>();
                    }

                    Console.WriteLine("\n****** Pass {0}, chunk {1} ******", pass, c);
                    Console.WriteLine("----- Marginals -----");
                    Console.WriteLine("scoreMean = {0}", marginals.marginalScoreMean);
                    Console.WriteLine("scorePrec = {0}", marginals.marginalScorePrec);
                    Console.WriteLine("judgePrec = {0}", marginals.marginalJudgePrec);
                    Console.WriteLine("clickPrec = {0}", marginals.marginalClickPrec);
                    for (int t = 0; t < numThresholds; t++)
                    {
                        Console.WriteLine("threshMean {0} = {1}", t, marginals.marginalThresh[t]);
                    }
                }

                // Test for convergence
                if (marginals.marginalScoreMean.MaxDiff(prevMargScoreMean) < convergenceThresh &&
                    marginals.marginalJudgePrec.MaxDiff(prevMargJudgePrec) < convergenceThresh &&
                    marginals.marginalClickPrec.MaxDiff(prevMargClickPrec) < convergenceThresh)
                {
                    Console.WriteLine("\n****** Inference converged ******\n");
                    break;
                }
            }

            return(marginals);
        }
Beispiel #27
0
		//-- EP -------------------------------------------------------------------------------------------

		/// <summary>
		/// Evidence message for EP
		/// </summary>
		/// <param name="log">Constant value for 'log'.</param>
		/// <param name="d">Incoming message from 'd'.</param>
		/// <returns>Logarithm of the factor's average value across the given argument distributions</returns>
		/// <remarks><para>
		/// The formula for the result is <c>log(sum_(d) p(d) factor(log,d))</c>.
		/// </para></remarks>
		public static double LogAverageFactor(double log, Gamma d)
		{
			return d.GetLogProb(Math.Exp(log));
		}
Beispiel #28
0
 public void SetShapeFailsWithNegativeShape()
 {
     var n = new Gamma(1.0, 1.0);
     Assert.Throws<ArgumentOutOfRangeException>(() => n.Shape = -1.0);
 }
		/// <summary>
		/// VMP message to 'totalCount'
		/// </summary>
		/// <param name="mean">Incoming message from 'mean'. Must be a proper distribution.  If uniform, the result will be uniform.</param>
		/// <param name="totalCount">Incoming message from 'totalCount'. Must be a proper distribution.  If uniform, the result will be uniform. Must be a proper distribution.  If uniform, the result will be uniform.</param>
		/// <param name="prob">Incoming message from 'prob'. Must be a proper distribution.  If uniform, the result will be uniform.</param>
		/// <param name="to_totalCount">Previous outgoing message to 'TotalCount'.</param>
		/// <returns>The outgoing VMP message to the 'totalCount' argument</returns>
		/// <remarks><para>
		/// The outgoing message is the exponential of the average log-factor value, where the average is over all arguments except 'totalCount'.
		/// The formula is <c>exp(sum_(mean,prob) p(mean,prob) log(factor(prob,mean,totalCount)))</c>.
		/// </para></remarks>
		/// <exception cref="ImproperMessageException"><paramref name="mean"/> is not a proper distribution</exception>
		/// <exception cref="ImproperMessageException"><paramref name="totalCount"/> is not a proper distribution</exception>
		/// <exception cref="ImproperMessageException"><paramref name="prob"/> is not a proper distribution</exception>
		public static Gamma TotalCountAverageLogarithm([Proper] Beta mean, [Proper] Gamma totalCount, [SkipIfUniform] Beta prob, Gamma to_totalCount)
		{
			double ELogP, ELogOneMinusP;
			prob.GetMeanLogs(out ELogP, out ELogOneMinusP);
			Gamma approximateFactor = DirichletOp.TotalCountAverageLogarithmHelper(
				Vector.FromArray(new double[] { mean.TrueCount, mean.FalseCount }),
				totalCount,
				Vector.FromArray(new double[] { ELogP, ELogOneMinusP }));
			if (damping == 0.0)
				return approximateFactor;
			else
				return (approximateFactor^(1-damping)) * (to_totalCount ^ damping);
		}
Beispiel #30
0
 public void ValidateDensity(
     [Values(0, 0, 0, 1, 1, 1, 1, 1, 1, 10, 10, 10, 10, 10, 10, 10, 10, 10)] int shape, 
     [Values(0.0, 0.0, 0.0, 0.1, 0.1, 0.1, 1.0, 1.0, 1.0, 10.0, 10.0, 10.0, 1.0, 1.0, 1.0, Double.PositiveInfinity, Double.PositiveInfinity, Double.PositiveInfinity)] double invScale, 
     [Values(0.0, 1.0, 10.0, 0.0, 1.0, 10.0, 0.0, 1.0, 10.0, 0.0, 1.0, 10.0, 0.0, 1.0, 10.0, 0.0, 1.0, 10.0)] double x, 
     [Values(0.0, 0.0, 0.0, 0.10000000000000000555111512312578270211815834045410156, 0.090483741803595961836995913651194571475319347018875963, 0.036787944117144234201693506390001264039984687455876246, 1.0, 0.36787944117144232159552377016146086744581113103176804, 0.000045399929762484851535591515560550610237918088866564953, 0.0, 1.2511003572113329898476497894772544708420990097708588, 1.0251532120868705806216092933926141802686541811003037e-30, 0.0, 0.0000010137771196302974029859010421116095333052555418644397, 0.12511003572113329898476497894772544708420990097708601, 0.0, 0.0, Double.PositiveInfinity)] double pdf)
 {
     var n = new Gamma(shape, invScale);
     AssertHelpers.AlmostEqual(pdf, n.Density(x), 14);
 }
 public static double GetGamma(double x)
 {
     return(System.Math.Exp(Gamma.Log(x)));
 }
Beispiel #32
0
        public void ValidateMedian()
        {
            var n = new Gamma(0.0, 0.0);

            Assert.Throws <NotSupportedException>(() => { var median = n.Median; });
        }
Beispiel #33
0
 public void ValidateCumulativeDistribution(
     [Values(0, 0, 0, 1, 1, 1, 1, 1, 1, 10, 10, 10, 10, 10, 10, 10, 10, 10)] int shape, 
     [Values(0.0, 0.0, 0.0, 0.1, 0.1, 0.1, 1.0, 1.0, 1.0, 10.0, 10.0, 10.0, 1.0, 1.0, 1.0, Double.PositiveInfinity, Double.PositiveInfinity, Double.PositiveInfinity)] double invScale, 
     [Values(0.0, 1.0, 10.0, 0.0, 1.0, 10.0, 0.0, 1.0, 10.0, 0.0, 1.0, 10.0, 0.0, 1.0, 10.0, 0.0, 1.0, 10.0)] double x, 
     [Values(0.0, 0.0, 0.0, 0.0, 0.095162581964040431858607615783064404690935346242622848, 0.63212055882855767840447622983853913255418886896823196, 0.0, 0.63212055882855767840447622983853913255418886896823196, 0.99995460007023751514846440848443944938976208191113396, 0.0, 0.54207028552814779168583514294066541824736464003242184, 0.99999999999999999999999999999988746526039157266114706, 0.0, 0.00000011142547833872067735305068724025236288094949815466035, 0.54207028552814779168583514294066541824736464003242184, 0.0, 0.0, 1.0)] double cdf)
 {
     var n = new Gamma(shape, invScale);
     AssertHelpers.AlmostEqual(cdf, n.CumulativeDistribution(x), 14);
 }
        public void DirichletOpQuadratureTest()
        {
            var matlabResults
                = new double[]
                    {
                        0.625872049875551,
                        0.866057568760984,
                        -0.266065360660541,
                        -1.227320719860393,
                        1.280900246404125
                    };
            var matlabResults2
                = new double[]
                    {
                        0.843302107208523,
                        0.610546297106219,
                        -2.182481855300747,
                        -0.254011377373013,
                        -0.217430057568389
                    };
            double am = 2;
            double bm = 1;
            double at = 3;
            double bt = 1;
            Dirichlet meanQ = new Dirichlet(new double[] {am, bm});
            Gamma totalCountQ = new Gamma(at, 1/bt);
            double[] EELogGamma;
            double[] EELogMLogGamma;
            double[] EELogOneMinusMLogGamma;
            double[] EELogSLogGamma;
            double[] EEMSDigamma;
            DirichletOp.MeanMessageExpectations(
                meanQ.PseudoCount,
                totalCountQ,
                out EELogGamma,
                out EELogMLogGamma,
                out EELogOneMinusMLogGamma);

            Console.WriteLine(System.Math.Abs(EELogGamma[0] - matlabResults[0]));
            Console.WriteLine(System.Math.Abs(EELogMLogGamma[0] - matlabResults[2]));
            Console.WriteLine(System.Math.Abs(EELogOneMinusMLogGamma[0] - matlabResults[3]));

            Console.WriteLine(System.Math.Abs(EELogGamma[1] - matlabResults2[0]));
            Console.WriteLine(System.Math.Abs(EELogMLogGamma[1] - matlabResults2[2]));
            Console.WriteLine(System.Math.Abs(EELogOneMinusMLogGamma[1] - matlabResults2[3]));

            DirichletOp.TotalCountMessageExpectations(
                meanQ.PseudoCount,
                totalCountQ,
                out EELogGamma,
                out EELogSLogGamma,
                out EEMSDigamma);

            Console.WriteLine(System.Math.Abs(EELogGamma[0] - matlabResults[0]));
            Console.WriteLine(System.Math.Abs(EELogSLogGamma[0] - matlabResults[1]));
            Console.WriteLine(System.Math.Abs(EEMSDigamma[0] - matlabResults[4]));

            Console.WriteLine(System.Math.Abs(EELogGamma[1] - matlabResults2[0]));
            Console.WriteLine(System.Math.Abs(EELogSLogGamma[1] - matlabResults2[1]));
            Console.WriteLine(System.Math.Abs(EEMSDigamma[1] - matlabResults2[4]));
        }
Beispiel #35
0
 public void ValidateDensityLn(
     [Values(0, 0, 0, 1, 1, 1, 1, 1, 1, 10, 10, 10, 10, 10, 10, 10, 10, 10)] int shape, 
     [Values(0.0, 0.0, 0.0, 0.1, 0.1, 0.1, 1.0, 1.0, 1.0, 10.0, 10.0, 10.0, 1.0, 1.0, 1.0, Double.PositiveInfinity, Double.PositiveInfinity, Double.PositiveInfinity)] double invScale, 
     [Values(0.0, 1.0, 10.0, 0.0, 1.0, 10.0, 0.0, 1.0, 10.0, 0.0, 1.0, 10.0, 0.0, 1.0, 10.0, 0.0, 1.0, 10.0)] double x, 
     [Values(Double.NegativeInfinity, Double.NegativeInfinity, Double.NegativeInfinity, -2.3025850929940456285068402234265387271634735938763824, -2.402585092994045634057955346552321429281631934330484, -3.3025850929940456285068402234265387271634735938763824, 0.0, -1.0, -10.0, Double.NegativeInfinity, 0.22402344985898722897219667227693591172986563062456522, -69.052710713194601614865880235563786219860220971716511, Double.NegativeInfinity, -13.801827480081469611207717874566706164281149255663166, -2.0785616431350584550457947824074282958712358580042068, Double.NegativeInfinity, Double.NegativeInfinity, Double.PositiveInfinity)] double pdfln)
 {
     var n = new Gamma(shape, invScale);
     AssertHelpers.AlmostEqual(pdfln, n.DensityLn(x), 14);
 }
Beispiel #36
0
 public static double GammaFromShapeAndRate(double shape, double rate)
 {
     return(Gamma.Sample(shape, 1 / rate));
 }
Beispiel #37
0
 public void ValidateMaximum()
 {
     var n = new Gamma(1.0, 1.0);
     Assert.AreEqual(Double.PositiveInfinity, n.Maximum);
 }
Beispiel #38
0
 public static double TruncatedGammaFromShapeAndRate(double shape, double rate, double lowerBound, double upperBound)
 {
     return(TruncatedGamma.Sample(Gamma.FromShapeAndRate(shape, rate), lowerBound, upperBound));
 }
Beispiel #39
0
 public void ValidateMedian()
 {
     var n = new Gamma(0.0, 0.0);
     Assert.Throws<NotSupportedException>(() => { var median = n.Median; });
 }
        static void Main(string[] args)
        {
            //
            // Challenger O-ring data
            // Taken from: http://archive.ics.uci.edu/ml/machine-learning-databases/space-shuttle/o-ring-erosion-only.data
            //

            double[] temp     = { 66, 70, 69, 68, 67, 72, 73, 70, 57, 63, 70, 78, 67, 53, 67, 75, 70, 81, 76, 79, 75, 76, 58 };
            double[] distress = { 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 1 };

            // put features into array of Vectors
            Vector[] xdata = new Vector[temp.Length];
            for (int i = 0; i < temp.Length; i++)
            {
                xdata[i] = Vector.FromArray(temp[i], 1);    // including bias
            }
            //
            // Model variables
            //

            // define a prior distribution and attach that to "w" random variable
            VectorGaussian    wPrior = new VectorGaussian(Vector.Zero(2), PositiveDefiniteMatrix.Identity(2));
            Variable <Vector> w      = Variable.Random(wPrior);

            // hard-code variance
            Gamma             noiseDist = new Gamma(1, 2);
            Variable <double> noise     = Variable.Random(noiseDist);
            //double noise = 0.1;

            // set features "x" and observations "y" as observed in the model
            VariableArray <double> y = Variable.Observed(distress);
            Range n = y.Range;
            VariableArray <Vector> x = Variable.Observed(xdata, n);

            // define "y" statistically: Gaussian RV array. Mean is defined by dot-product of param vector "w" and the feature vector x[n]
            y[n] = Variable.GaussianFromMeanAndVariance(Variable.InnerProduct(w, x[n]), noise);

            //
            // Training: parameter inference
            //

            InferenceEngine engine = new InferenceEngine();

            engine.Compiler.RecommendedQuality = Microsoft.ML.Probabilistic.Factors.Attributes.QualityBand.Experimental;

            // infer "w" posterior as a distribution
            VectorGaussian wPosterior     = engine.Infer <VectorGaussian>(w);
            Gamma          noisePosterior = engine.Infer <Gamma>(noise);

            Console.WriteLine("Distribution over w = \n" + wPosterior);
            Console.WriteLine("Distribution over noise = \n" + noisePosterior);

            //
            // Prediction: temp = 31
            //

            // one data point
            double tempTest = 31;

            // RV for prediction
            Variable <double> distressTest = Variable.Observed(tempTest);

            // RV for feature vector
            Vector            xdataTest = Vector.FromArray(tempTest, 1);
            Variable <Vector> xTest     = Variable.Observed(xdataTest);

            // set w distribution that was obtained from training
            Variable <Vector> wParam = Variable.Random(wPosterior);

            Variable <double> noiseParam = Variable.Random(noisePosterior);

            // RV for prediction
            distressTest = Variable.GaussianFromMeanAndVariance(Variable.InnerProduct(wParam, xTest), noiseParam);

            // infer and print prediction distribution
            Console.WriteLine("Test distress = \n" + engine.Infer(distressTest));

            Console.WriteLine("Press any key ...");
            Console.ReadKey();
        }
Beispiel #41
0
 public void ValidateMode([Values(0.0, 1.0, 1.0, 10.0, 10.0, 10.0)] double shape, [Values(0.0, 0.1, 1.0, 10.0, 1.0, Double.PositiveInfinity)] double invScale, [Values(Double.NaN, 0.0, 0.0, 0.9, 9.0, 10.0)] double mode)
 {
     var n = new Gamma(shape, invScale);
     Assert.AreEqual(mode, n.Mode);
 }
 /// <summary>
 ///   Gets the complementary cumulative distribution function
 ///   (ccdf) for the χ² distribution evaluated at point <c>x</c>.
 ///   This function is also known as the Survival function.
 /// </summary>
 ///
 /// <remarks>
 /// <para>
 ///   The Complementary Cumulative Distribution Function (CCDF) is
 ///   the complement of the Cumulative Distribution Function, or 1
 ///   minus the CDF.</para>
 ///
 /// <para>
 ///   The χ² complementary distribution function is defined in terms of the
 ///   <see cref="Gamma.UpperIncomplete">Complemented Incomplete Gamma
 ///   Function Γc(a, x)</see> as CDF(x; df) = Γc(df/2, x/d). </para>
 /// </remarks>
 ///
 protected internal override double InnerComplementaryDistributionFunction(double x)
 {
     return(Gamma.UpperIncomplete(degreesOfFreedom / 2.0, x / 2.0));
 }
Beispiel #43
0
 public void ValidateStdDev([Values(0.0, 1.0, 1.0, 10.0, 10.0, 10.0)] double shape, [Values(0.0, 0.1, 1.0, 10.0, 1.0, Double.PositiveInfinity)] double invScale, [Values(Double.NaN, 10.0, 1.0, 0.31622776601683794197697302588502426416723164097476643, 3.1622776601683793319988935444327185337195551393252168, 0.0)] double sdev)
 {
     var n = new Gamma(shape, invScale);
     AssertHelpers.AlmostEqual(sdev, n.StdDev, 15);
 }
 /// <summary>
 ///   Gets the inverse of the cumulative distribution function (icdf) for
 ///   this distribution evaluated at probability <c>p</c>. This function
 ///   is also known as the Quantile function.
 /// </summary>
 ///
 /// <param name="p">A probability value between 0 and 1.</param>
 ///
 /// <returns>
 ///   A sample which could original the given probability
 ///   value when applied in the <see cref="UnivariateContinuousDistribution.DistributionFunction(double)"/>.
 /// </returns>
 ///
 protected internal override double InnerInverseDistributionFunction(double p)
 {
     return(Gamma.InverseLowerIncomplete(degreesOfFreedom / 2.0, p) * 2.0);
 }
Beispiel #45
0
 public void ValidateVariance([Values(0.0, 1.0, 1.0, 10.0, 10.0, 10.0)] double shape, [Values(0.0, 0.1, 1.0, 10.0, 1.0, Double.PositiveInfinity)] double invScale, [Values(Double.NaN, 100.0, 1.0, 0.1, 10.0, 0.0)] double var)
 {
     var n = new Gamma(shape, invScale);
     AssertHelpers.AlmostEqual(var, n.Variance, 15);
 }
 /// <summary>
 ///   Gets the inverse of the cumulative distribution function (icdf) for
 ///   this distribution evaluated at probability <c>p</c>. This function
 ///   is also known as the Quantile function.
 /// </summary>
 ///
 /// <param name="p">A probability value between 0 and 1.</param>
 /// <param name="degreesOfFreedom">
 ///   The degrees of freedom of the Chi-Square distribution.
 /// </param>
 ///
 /// <returns>
 ///   A sample which could original the given probability
 ///   value when applied in the <see cref="UnivariateContinuousDistribution.DistributionFunction(double)"/>.
 /// </returns>
 ///
 public static double Inverse(double p, int degreesOfFreedom)
 {
     return(Gamma.InverseLowerIncomplete(degreesOfFreedom / 2.0, p) * 2.0);
 }
Beispiel #47
0
		/// <summary>
		/// Evidence message for EP
		/// </summary>
		/// <param name="log">Incoming message from 'log'.</param>
		/// <param name="d">Incoming message from 'd'.</param>
		/// <param name="to_log">Outgoing message to 'log'.</param>
		/// <returns>Logarithm of the factor's contribution the EP model evidence</returns>
		/// <remarks><para>
		/// The formula for the result is <c>log(sum_(d,log) p(d,log) factor(log,d) / sum_log p(log) messageTo(log))</c>.
		/// Adding up these values across all factors and variables gives the log-evidence estimate for EP.
		/// </para></remarks>
		public static double LogEvidenceRatio(Gaussian log, Gamma d, [Fresh] Gaussian to_log)
		{
			return LogAverageFactor(log, d, to_log) - to_log.GetAverageLog(log);
		}
Beispiel #48
0
        /// <summary>
        ///   Computes the cumulative probability at <c>t</c> of the
        ///   non-central T-distribution with DF degrees of freedom
        ///   and non-centrality parameter.
        /// </summary>
        ///
        /// <remarks>
        ///   This function is based on the original work done by
        ///   Russell Lent hand John Burkardt, shared under the
        ///   LGPL license. Original FORTRAN code can be found at:
        ///   http://people.sc.fsu.edu/~jburkardt/f77_src/asa243/asa243.html
        /// </remarks>
        ///
        private static double distributionFunctionLowerTail(double t, double df, double delta)
        {
            double alnrpi = 0.57236494292470008707;
            double errmax = 1.0E-10;
            int    itrmax = 100;
            double r2pi   = 0.79788456080286535588;

            if (df <= 0.0)
            {
                throw new ArgumentOutOfRangeException("df",
                                                      "Degrees of freedom must be positive.");
            }

            double tt;
            double del;
            bool   negdel;

            if (t < 0.0)
            {
                tt     = -t;
                del    = -delta;
                negdel = true;
            }
            else
            {
                tt     = t;
                del    = delta;
                negdel = false;
            }

            // Initialize twin series.
            double en    = 1.0;
            double x     = t * t / (t * t + df);
            double value = 0;

            if (x <= 0.0)
            {
                // upper tail of normal cumulative function
                value += Normal.Complemented(del);

                if (negdel)
                {
                    value = 1.0 - value;
                }
                return(value);
            }

            double lambda = del * del;
            double p      = 0.5 * Math.Exp(-0.5 * lambda);
            double q      = r2pi * p * del;
            double s      = 0.5 - p;
            double a      = 0.5;
            double b      = 0.5 * df;
            double rxb    = Math.Pow(1.0 - x, b);
            double albeta = alnrpi + Gamma.Log(b) - Gamma.Log(a + b);
            double xodd   = Beta.Incomplete(a, b, x);
            double godd   = 2.0 * rxb * Math.Exp(a * Math.Log(x) - albeta);
            double xeven  = 1.0 - rxb;
            double geven  = b * x * rxb;

            value = p * xodd + q * xeven;

            // Repeat until convergence.
            while (true)
            {
                a     = a + 1.0;
                xodd  = xodd - godd;
                xeven = xeven - geven;
                godd  = godd * x * (a + b - 1.0) / a;
                geven = geven * x * (a + b - 0.5) / (a + 0.5);
                p     = p * lambda / (2.0 * en);
                q     = q * lambda / (2.0 * en + 1.0);
                s     = s - p;
                en    = en + 1.0;
                value = value + p * xodd + q * xeven;
                double errbd = 2.0 * s * (xodd - godd);

                if (errbd <= errmax)
                {
                    break;
                }

                if (itrmax < en)
                {
                    throw new ConvergenceException("Maximum number of iterations reached.");
                }
            }

            // upper tail of normal cumulative function
            value = value + Normal.Complemented(del);

            if (negdel)
            {
                value = 1.0 - value;
            }

            return(value);
        }
Beispiel #49
0
		/// <summary>
		/// EP message to 'd'
		/// </summary>
		/// <param name="log">Incoming message from 'log'.</param>
		/// <param name="d">Incoming message from 'd'.</param>
		/// <param name="to_log">Previous outgoing message to 'log'.</param>
		/// <returns>The outgoing EP message to the 'd' argument</returns>
		/// <remarks><para>
		/// The outgoing message is a distribution matching the moments of 'd' as the random arguments are varied.
		/// The formula is <c>proj[p(d) sum_(log) p(log) factor(log,d)]/p(d)</c>.
		/// </para></remarks>
		public static Gamma DAverageConditional(Gaussian log, Gamma d, Gaussian to_log)
		{
			var g = Gamma.FromShapeAndRate(d.Shape + 1, d.Rate);
			return ExpOp.ExpAverageConditional(g, log, to_log);
		}
        public override void Process(double[] values)
        {
            double lnsum = 0;
            int    count = values.Length;

            for (int i = 0; i < count; ++i)
            {
                lnsum += System.Math.Log(values[i]);
            }

            double mean = values.Average();

            double s = System.Math.Log(mean) - lnsum / count;

            double newK = (3 - s + System.Math.Sqrt((s - 3) * (s - 3) + 24 * s)) / (12 * s);

            double oldK;

            do
            {
                oldK = newK;
                newK = oldK - (System.Math.Log(newK) - Gamma.Digamma(newK) - s) / ((1 / newK) - Gamma.Trigamma(newK));
            }while (System.Math.Abs(oldK - newK) / System.Math.Abs(oldK) < double.Epsilon);

            double theta = mean / newK;

            m_lambda = 1 / theta;
            m_k      = (int)newK;

            mLnConstant = -(m_k * System.Math.Log(theta) + Gamma.Log(m_k));

            mMean   = mean;
            mStdDev = System.Math.Sqrt(m_k) / m_lambda;
        }
Beispiel #51
0
		/// <summary>
		/// VMP message to 'd'
		/// </summary>
		/// <param name="log">Constant value for 'log'.</param>
		/// <param name="d">Incoming message from 'd'.</param>
		/// <param name="result">Modified to contain the outgoing message</param>
		/// <returns><paramref name="result"/></returns>
		/// <remarks><para>
		/// The outgoing message is the factor viewed as a function of 'd' conditioned on the given values.
		/// </para></remarks>
		public static Gamma DAverageLogarithm(double log, Gamma d, Gamma result)
		{
			result.Point = Math.Exp(log);
			return result;
		}
Beispiel #52
0
        /// <summary>
        /// Infer.NET definition of the Semi Parametric Latent Factor Model of
        /// Teh, Y., Seeger, M., and Jordan, M. (AISTATS 2005).
        /// </summary>
        /// <param name="inputs">Covariates X</param>
        /// <param name="data">Outputs Y</param>
        /// <param name="Q">Number of latent functions</param>
        /// <param name="missing">Which elements of Y are missing</param>
        /// <param name="nodeFunctionNoise">Whether to include node noise</param>
        public void SPLFM(
            Vector[] inputs,
            double[,] data,
            int Q,
            bool[,] missing        = null,
            bool nodeFunctionNoise = false)
        {
            var             toInfer = new List <IVariable>();
            SummationKernel kf_node = new SummationKernel(new SquaredExponential(0));
            var             K_node  = Utils.GramMatrix(kf_node, inputs);

            var D    = Variable.Observed <int>(data.GetLength(0)).Named("D");
            var d    = new Range(D).Named("d");
            var Qvar = Variable.Observed <int>(Q).Named("Q");
            var q    = new Range(Qvar).Named("q");
            var N    = Variable.Observed <int>(data.GetLength(1)).Named("N");
            var n    = new Range(N).Named("n");

            if (missing == null)
            {
                missing = new bool[D.ObservedValue, N.ObservedValue]; // check this is all false
            }
            var ev         = Variable.Bernoulli(.5).Named("ev");
            var modelBlock = Variable.If(ev);

            var nodeSignalPrecisions = Variable.Array <double>(q).Named("nodeSignalPrecisions");
            // set this to 1 if not learning signal variance
            var nodeSignalPrecisionsPrior = Variable.Observed(Enumerable.Range(0, Q).Select(_ => Gamma.FromShapeAndRate(.1, .1)).ToArray(), q).Named("nodeSignalPrecisionsPrior");

            nodeSignalPrecisions[q] = Variable.Random <double, Gamma>(nodeSignalPrecisionsPrior[q]);

            var nodeFunctions  = Variable.Array <Vector>(q).Named("nodeFunctions");
            var K_node_inverse = Variable.Observed(K_node.Inverse()).Named("K_node_inverse");

            nodeFunctions[q] = Variable <Vector> .Factor(MyFactors.VectorGaussianScaled, nodeSignalPrecisions[q], K_node_inverse);

            nodeFunctions.AddAttribute(new MarginalPrototype(new VectorGaussian(N.ObservedValue)));
            var nodeFunctionValues           = Variable.Array(Variable.Array <double>(n), q).Named("nodeFunctionValues");
            var nodeFunctionValuesPredictive = Variable.Array(Variable.Array <double>(n), q).Named("nodeFunctionValuesPredictive");

            VariableArray <double> nodeNoisePrecisions = null;

            if (nodeFunctionNoise)
            {
                var nodeFunctionValuesClean = Variable.Array(Variable.Array <double>(n), q).Named("nodeFunctionValuesClean");
                nodeFunctionValuesClean[q] = Variable.ArrayFromVector(nodeFunctions[q], n);
                nodeNoisePrecisions        = Variable.Array <double>(q).Named("nodeNoisePrecisions");
                var nodeNoisePrecisionPrior = Variable.Observed(Enumerable.Range(0, Q).Select(_ => Gamma.FromShapeAndRate(.1, .01)).ToArray(), q).Named("nodeNoisePrecisionPrior");
                nodeNoisePrecisions[q] = Variable.Random <double, Gamma>(nodeNoisePrecisionPrior[q]);
                toInfer.Add(nodeNoisePrecisions);
                nodeFunctionValues[q][n] = Variable.GaussianFromMeanAndPrecision(nodeFunctionValuesClean[q][n], nodeNoisePrecisions[q]);

                nodeFunctionValuesPredictive[q][n] = Variable.GaussianFromMeanAndPrecision(nodeFunctionValuesClean[q][n], nodeNoisePrecisions[q]);
            }
            else
            {
                nodeFunctionValues[q]           = Variable.ArrayFromVector(nodeFunctions[q], n);
                nodeFunctionValuesPredictive[q] = Variable.ArrayFromVector(nodeFunctions[q], n);
            }

            var weights = Variable.Array <double>(d, q).Named("weights");

            weights[d, q] = Variable.GaussianFromMeanAndPrecision(0, 1).ForEach(d, q);
            var observedData        = Variable.Array <double>(d, n).Named("observedData");
            var noisePrecisionPrior = Variable.Observed(Gamma.FromShapeAndRate(1, .1)).Named("noisePrecisionPrior");
            var noisePrecision      = Variable.Random <double, Gamma>(noisePrecisionPrior).Named("noisePrecision");

            var isMissing = Variable.Array <bool>(d, n).Named("isMissing");

            isMissing.ObservedValue = missing;

            var noiseLessY = Variable.Array <double>(d, n).Named("noiseLessY");

            using (Variable.ForEach(n))
                using (Variable.ForEach(d))
                {
                    var temp = Variable.Array <double>(q).Named("temp");
                    temp[q]          = weights[d, q] * nodeFunctionValues[q][n];
                    noiseLessY[d, n] = Variable.Sum(temp);
                    using (Variable.IfNot(isMissing[d, n]))
                        observedData[d, n] = Variable.GaussianFromMeanAndPrecision(noiseLessY[d, n], noisePrecision);
                    using (Variable.If(isMissing[d, n]))
                        observedData[d, n] = Variable.GaussianFromMeanAndPrecision(0, 1);
                }
            observedData.ObservedValue = data;
            var nodeFunctionsInit = Enumerable.Range(0, Q).Select(i =>
                                                                  VectorGaussian.FromMeanAndVariance(
                                                                      VectorGaussian.Sample(Vector.Zero(N.ObservedValue), PositiveDefiniteMatrix.IdentityScaledBy(N.ObservedValue, 100)),
                                                                      PositiveDefiniteMatrix.IdentityScaledBy(N.ObservedValue, 100))).ToArray(); // should put this manually in generated code

            var distArray = Distribution <Vector> .Array(nodeFunctionsInit);

            var nodeFunctionsInitVar = Variable.Observed(distArray).Named("nodeFunctionsInitVar");

            nodeFunctions.InitialiseTo(nodeFunctionsInitVar);

            modelBlock.CloseBlock();

            toInfer.AddRange(new List <IVariable>()
            {
                ev, noiseLessY, noisePrecision, nodeFunctionValues, nodeSignalPrecisions, nodeFunctionValuesPredictive, weights
            });

            var ie = new InferenceEngine(new VariationalMessagePassing());

            ie.ModelName = "SPLFM";
            var ca = ie.GetCompiledInferenceAlgorithm(toInfer.ToArray());

            ca.Execute(100);
            var fvals      = ca.Marginal <Gaussian[][]>(nodeFunctionValues.NameInGeneratedCode)[0]; // [q][n]
            var x          = inputs.Select(i => i[0]).ToArray();
            var mplWrapper = new MatplotlibWrapper();

            mplWrapper.AddArray("x", x);
            mplWrapper.AddArray("y", fvals.Select(i => i.GetMean()).ToArray());
            mplWrapper.AddArray("s", fvals.Select(i => Math.Sqrt(i.GetVariance())).ToArray());

            mplWrapper.Plot(new string[] {
                "fill_between(x,y-s,y+s,color=\"gray\")",
                "ylabel(\"node (fitted)\")"
            });
        }
Beispiel #53
0
		/// <summary>
		/// Evidence message for VMP
		/// </summary>
		/// <param name="log">Incoming message from 'log'.</param>
		/// <param name="d">Incoming message from 'd'.</param>
		/// <returns>Zero</returns>
		/// <remarks><para>
		/// In Variational Message Passing, the evidence contribution of a deterministic factor is zero.
		/// Adding up these values across all factors and variables gives the log-evidence estimate for VMP.
		/// </para></remarks>
		public static double AverageLogFactor(Gaussian log, Gamma d)
		{
			double m, v;
			log.GetMeanAndVariance(out m, out v);
			double Elogd=d.GetMeanLog();
			double Elogd2;
			if (!d.IsPointMass)
				Elogd2 = MMath.Trigamma(d.Shape) + Elogd * Elogd;
			else
				Elogd2 = Math.Log(d.Point) * Math.Log(d.Point);
			return -Elogd2/(2*v)+m*Elogd/v-m*m/(2*v)-MMath.LnSqrt2PI-.5*Math.Log(v);
		}
Beispiel #54
0
        /// <summary>
        /// An implementation of GPRN specialised for one step look ahead multivariate volatility experiments
        /// </summary>
        /// <param name="inputs">Covariates X</param>
        /// <param name="data">Outputs Y</param>
        /// <returns>Predicted covariance for the next time point</returns>
        public VectorGaussian GPRN_MultivariateVolatility(
            Vector[] inputs,
            double[,] data,
            double[] nodeSignalPrecs,
            double[] nodeNoisePrecs,
            double obsNoisePrec,
            ref VectorGaussian[] finit,
            ref VectorGaussian[,] winit,
            KernelFunction nodeKernel,
            KernelFunction weightKernel)
        {
            var missing = new bool[data.GetLength(0), data.GetLength(1)];

            for (int i = 0; i < data.GetLength(0); i++)
            {
                missing[i, data.GetLength(1) - 1] = true; // last data point is missing
            }
            int Q = nodeSignalPrecs.Length;

            var toInfer   = new List <IVariable>();
            var K_node    = Utils.GramMatrix(nodeKernel, inputs);
            var K_weights = Utils.GramMatrix(weightKernel, inputs);

            var D    = Variable.Observed <int>(data.GetLength(0)).Named("D");
            var d    = new Range(D).Named("d");
            var Qvar = Variable.Observed <int>(Q).Named("Q");
            var q    = new Range(Qvar).Named("q");
            var N    = Variable.Observed <int>(data.GetLength(1)).Named("N");
            var n    = new Range(N).Named("n");

            var ev         = Variable.Bernoulli(.5).Named("ev");
            var modelBlock = Variable.If(ev);

            var nodeSignalPrecisions = Variable.Array <double>(q).Named("nodeSignalPrecisions");

            nodeSignalPrecisions.ObservedValue = nodeSignalPrecs;

            var nodeFunctions  = Variable.Array <Vector>(q).Named("nodeFunctions");
            var K_node_inverse = Variable.Observed(K_node.Inverse()).Named("K_node_inverse");

            nodeFunctions[q] = Variable <Vector> .Factor(MyFactors.VectorGaussianScaled, nodeSignalPrecisions[q], K_node_inverse);

            nodeFunctions.AddAttribute(new MarginalPrototype(new VectorGaussian(N.ObservedValue)));
            var nodeFunctionValues           = Variable.Array(Variable.Array <double>(n), q).Named("nodeFunctionValues");
            var nodeFunctionValuesPredictive = Variable.Array(Variable.Array <double>(n), q).Named("nodeFunctionValuesPredictive");

            var nodeFunctionValuesClean = Variable.Array(Variable.Array <double>(n), q).Named("nodeFunctionValuesClean");

            nodeFunctionValuesClean[q] = Variable.ArrayFromVector(nodeFunctions[q], n);
            var nodeNoisePrecisions = Variable.Array <double>(q).Named("nodeNoisePrecisions");

            nodeNoisePrecisions.ObservedValue = nodeNoisePrecs;
            nodeFunctionValues[q][n]          = Variable.GaussianFromMeanAndPrecision(nodeFunctionValuesClean[q][n], nodeNoisePrecisions[q]);

            nodeFunctionValuesPredictive[q][n] = Variable.GaussianFromMeanAndPrecision(nodeFunctionValuesClean[q][n], nodeNoisePrecisions[q]);

            var weightFunctions   = Variable.Array <Vector>(d, q).Named("weightFunctions");
            var K_weights_inverse = Variable.Observed(K_weights.Inverse()).Named("K_weights_inverse");

            weightFunctions[d, q] = Variable <Vector> .Factor(MyFactors.VectorGaussianScaled, Variable.Constant <double>(1), K_weights_inverse).ForEach(d, q);

            weightFunctions.AddAttribute(new MarginalPrototype(new VectorGaussian(N.ObservedValue)));
            var weightFunctionValues           = Variable.Array(Variable.Array <double>(n), d, q).Named("weightFunctionValues");
            var weightFunctionValuesPredictive = Variable.Array(Variable.Array <double>(n), d, q).Named("weightFunctionValuesPredictive");

            weightFunctionValues[d, q] = Variable.ArrayFromVector(weightFunctions[d, q], n);

            weightFunctionValuesPredictive[d, q] = Variable.ArrayFromVector(weightFunctions[d, q], n);
            var observedData   = Variable.Array <double>(d, n).Named("observedData");
            var noisePrecision = Variable.Observed(obsNoisePrec).Named("noisePrecision");

            var isMissing = Variable.Array <bool>(d, n).Named("isMissing");

            isMissing.ObservedValue = missing;

            var noiseLessY = Variable.Array <double>(d, n).Named("noiseLessY");

            using (Variable.ForEach(n))
                using (Variable.ForEach(d))
                {
                    var temp = Variable.Array <double>(q).Named("temp");
                    temp[q]          = weightFunctionValues[d, q][n] * nodeFunctionValues[q][n];
                    noiseLessY[d, n] = Variable.Sum(temp);
                    using (Variable.IfNot(isMissing[d, n]))
                        observedData[d, n] = Variable.GaussianFromMeanAndPrecision(noiseLessY[d, n], noisePrecision);
                    using (Variable.If(isMissing[d, n]))
                        observedData[d, n] = Variable.GaussianFromMeanAndPrecision(0, 1);
                }
            observedData.ObservedValue = data;
            var nodeFunctionsInit = Enumerable.Range(0, Q).Select(i =>
                                                                  VectorGaussian.FromMeanAndVariance(
                                                                      VectorGaussian.Sample(Vector.Zero(N.ObservedValue), PositiveDefiniteMatrix.IdentityScaledBy(N.ObservedValue, 100)),
                                                                      PositiveDefiniteMatrix.IdentityScaledBy(N.ObservedValue, 100))).ToArray(); // should put this manually in generated code

            var distArray = Distribution <Vector> .Array(nodeFunctionsInit);

            var nodeFunctionsInitVar = Variable.Observed(distArray).Named("nodeFunctionsInitVar");

            nodeFunctions.InitialiseTo(nodeFunctionsInitVar);

            var finitNew = finit.Select(i => Utils.extendByOneDimension(i, Gaussian.FromMeanAndVariance(0, 1))).ToArray();

            nodeFunctions.InitialiseTo(Distribution <Vector> .Array(finitNew));

            var winitNew = new VectorGaussian[data.GetLength(0), Q];

            for (int i = 0; i < data.GetLength(0); i++)
            {
                for (int j = 0; j < Q; j++)
                {
                    winitNew[i, j] = Utils.extendByOneDimension(winit[i, j], Gaussian.FromMeanAndVariance(0, 1));
                }
            }

            weightFunctions.InitialiseTo(Distribution <Vector> .Array(winitNew));

            modelBlock.CloseBlock();

            toInfer.AddRange(new List <IVariable>()
            {
                ev, noiseLessY, nodeFunctions, weightFunctions, nodeFunctionValuesPredictive, weightFunctionValues, weightFunctionValuesPredictive                                      /* is this redundant? */
            });

            var ie = new InferenceEngine(new VariationalMessagePassing());
            var ca = ie.GetCompiledInferenceAlgorithm(toInfer.ToArray());

            ca.SetObservedValue(K_node_inverse.NameInGeneratedCode, Utils.GramMatrix(nodeKernel, inputs).Inverse());
            ca.SetObservedValue(K_weights_inverse.NameInGeneratedCode, Utils.GramMatrix(weightKernel, inputs).Inverse());
            ca.Reset();

            double oldML = double.NegativeInfinity;
            double ml    = 0;
            int    it    = 0;

            for (; it < 30; it++)
            {
                ca.Update(1);
                ml = ca.Marginal <Bernoulli>(ev.NameInGeneratedCode).LogOdds;
                Console.WriteLine(ml);
                if (Math.Abs(oldML - ml) < .1)
                {
                    break;
                }
                oldML = ml;
            }

            var f = ca.Marginal <Gaussian[][]>("nodeFunctionValuesPredictive");
            var W = ca.Marginal <Gaussian[, ][]>("weightFunctionValuesPredictive");

            finit = ca.Marginal <VectorGaussian[]>(nodeFunctions.NameInGeneratedCode);
            winit = ca.Marginal <VectorGaussian[, ]>(weightFunctions.NameInGeneratedCode);
            return(Utils.CorrelatedPredictionsHelper(f, W, Gamma.PointMass(obsNoisePrec), Q, data.GetLength(0), data.GetLength(1) - 1));
        }
Beispiel #55
0
		/// <summary>
		/// Evidence message for EP
		/// </summary>
		/// <param name="log">Incoming message from 'log'.</param>
		/// <param name="d">Incoming message from 'd'.</param>
		/// <param name="to_log">Previous outgoing message to 'log'.</param>
		/// <returns>Logarithm of the factor's average value across the given argument distributions</returns>
		/// <remarks><para>
		/// The formula for the result is <c>log(sum_(d,log) p(d,log) factor(log,d))</c>.
		/// </para></remarks>
		public static double LogAverageFactor(Gaussian log, Gamma d, [Fresh] Gaussian to_log)
		{
			Gamma g = Gamma.FromShapeAndRate(d.Shape + 1, d.Rate);
			return d.Shape/d.Rate*ExpOp.LogAverageFactor(g, log, to_log);
		}
Beispiel #56
0
        /// <summary>
        /// Primary definition of the GPRN model as an Infer.NET model.
        /// </summary>
        /// <param name="inputs">Covariates X</param>
        /// <param name="data">Outputs Y</param>
        /// <param name="Q">Number of latent functions</param>
        /// <param name="missing">Which elements of Y are missing</param>
        /// <param name="nodeFunctionNoise">Whether to include node noise</param>
        /// <param name="constrainWpositive">Whether to constrain W to be positive [experimental]</param>
        /// <param name="isotropicNoise">Whether to use isotropic observation noise</param>
        /// <param name="meanFunctions">Whether to include a per output mean function</param>
        /// <param name="initLoglengthscales">Initial values for the length scales of the kernels</param>
        /// <param name="sw">An output file for logging</param>
        public void GPRN_InferNET_model(Vector[] inputs,
                                        double[,] data,
                                        int Q,
                                        bool grid                    = false,
                                        bool[,] missing              = null,
                                        bool nodeFunctionNoise       = false,
                                        bool constrainWpositive      = false,
                                        bool isotropicNoise          = true,
                                        bool meanFunctions           = false,
                                        double[] initLoglengthscales = null,
                                        StreamWriter sw              = null)
        {
            var             toInfer = new List <IVariable>();
            SummationKernel kf_node = new SummationKernel(new SquaredExponential(0)) + new WhiteNoise(-3);
            var             K_node  = Utils.GramMatrix(kf_node, inputs);

            SummationKernel kf_weights = new SummationKernel(new SquaredExponential(1)) + new WhiteNoise(-3);
            var             K_weights  = Utils.GramMatrix(kf_weights, inputs);

            var D    = Variable.Observed <int>(data.GetLength(0)).Named("D");
            var d    = new Range(D).Named("d");
            var Qvar = Variable.Observed <int>(Q).Named("Q");
            var q    = new Range(Qvar).Named("q");
            var N    = Variable.Observed <int>(data.GetLength(1)).Named("N");
            var n    = new Range(N).Named("n");

            if (missing == null)
            {
                missing = new bool[D.ObservedValue, N.ObservedValue]; // check this is all false
            }
            var ev         = Variable.Bernoulli(.5).Named("ev");
            var modelBlock = Variable.If(ev);

            var nodeSignalPrecisions = Variable.Array <double>(q).Named("nodeSignalPrecisions");
            // set this to 1 if not learning signal variance
            var nodeSignalPrecisionsPrior = Variable.Observed(Enumerable.Range(0, Q).Select(_ => Gamma.FromShapeAndRate(.1, .1)).ToArray(), q).Named("nodeSignalPrecisionsPrior");

            nodeSignalPrecisions[q] = Variable.Random <double, Gamma>(nodeSignalPrecisionsPrior[q]);

            var nodeFunctions  = Variable.Array <Vector>(q).Named("nodeFunctions");
            var K_node_inverse = Variable.Observed(K_node.Inverse()).Named("K_node_inverse");

            nodeFunctions[q] = Variable <Vector> .Factor(MyFactors.VectorGaussianScaled, nodeSignalPrecisions[q], K_node_inverse);

            nodeFunctions.AddAttribute(new MarginalPrototype(new VectorGaussian(N.ObservedValue)));
            var nodeFunctionValues           = Variable.Array(Variable.Array <double>(n), q).Named("nodeFunctionValues");
            var nodeFunctionValuesPredictive = Variable.Array(Variable.Array <double>(n), q).Named("nodeFunctionValuesPredictive");

            VariableArray <double> nodeNoisePrecisions = null;

            if (nodeFunctionNoise)
            {
                var nodeFunctionValuesClean = Variable.Array(Variable.Array <double>(n), q).Named("nodeFunctionValuesClean");
                nodeFunctionValuesClean[q] = Variable.ArrayFromVector(nodeFunctions[q], n);
                nodeNoisePrecisions        = Variable.Array <double>(q).Named("nodeNoisePrecisions");
                var nodeNoisePrecisionPrior = Variable.Observed(Enumerable.Range(0, Q).Select(_ => Gamma.FromShapeAndRate(.1, .01)).ToArray(), q).Named("nodeNoisePrecisionPrior");
                nodeNoisePrecisions[q] = Variable.Random <double, Gamma>(nodeNoisePrecisionPrior[q]);
                toInfer.Add(nodeNoisePrecisions);
                nodeFunctionValues[q][n] = Variable.GaussianFromMeanAndPrecision(nodeFunctionValuesClean[q][n], nodeNoisePrecisions[q]);

                nodeFunctionValuesPredictive[q][n] = Variable.GaussianFromMeanAndPrecision(nodeFunctionValuesClean[q][n], nodeNoisePrecisions[q]);
            }
            else
            {
                nodeFunctionValues[q]           = Variable.ArrayFromVector(nodeFunctions[q], n);
                nodeFunctionValuesPredictive[q] = Variable.ArrayFromVector(nodeFunctions[q], n);
            }

            var weightFunctions   = Variable.Array <Vector>(d, q).Named("weightFunctions");
            var K_weights_inverse = Variable.Observed(K_weights.Inverse()).Named("K_weights_inverse");

            weightFunctions[d, q] = Variable <Vector> .Factor(MyFactors.VectorGaussianScaled, Variable.Constant <double>(1), K_weights_inverse).ForEach(d, q);

            weightFunctions.AddAttribute(new MarginalPrototype(new VectorGaussian(N.ObservedValue)));
            var weightFunctionValues  = Variable.Array(Variable.Array <double>(n), d, q).Named("weightFunctionValues");
            var weightFunctionValues2 = Variable.Array(Variable.Array <double>(n), d, q).Named("weightFunctionValuesPredictive");

            weightFunctionValues[d, q] = Variable.ArrayFromVector(weightFunctions[d, q], n);
            if (constrainWpositive)
            {
                var weightFunctionValuesCopy = Variable.Array(Variable.Array <double>(n), d, q).Named("weightFunctionValuesCopy");
                weightFunctionValuesCopy[d, q][n] = Variable.GaussianFromMeanAndPrecision(weightFunctionValues[d, q][n], 100);
                Variable.ConstrainPositive(weightFunctionValuesCopy[d, q][n]);
            }
            weightFunctionValues2[d, q] = Variable.ArrayFromVector(weightFunctions[d, q], n);
            var observedData        = Variable.Array <double>(d, n).Named("observedData");
            var noisePrecisionPrior = Variable.Observed(Gamma.FromShapeAndRate(1, .1)).Named("noisePrecisionPrior");
            Variable <double>      noisePrecision      = null;
            VariableArray <double> noisePrecisionArray = null;

            if (isotropicNoise)
            {
                noisePrecision = Variable.Random <double, Gamma>(noisePrecisionPrior).Named("noisePrecision");
                toInfer.Add(noisePrecision);
            }
            else
            {
                noisePrecisionArray    = Variable.Array <double>(d).Named("noisePrecision");
                noisePrecisionArray[d] = Variable.Random <double, Gamma>(noisePrecisionPrior).ForEach(d);
                toInfer.Add(noisePrecisionArray);
            }

            var isMissing = Variable.Array <bool>(d, n).Named("isMissing");

            isMissing.ObservedValue = missing;

            var noiseLessY = Variable.Array <double>(d, n).Named("noiseLessY");

            VariableArray <VariableArray <double>, double[][]> meanFunctionValues = null;

            if (meanFunctions)
            {
                GPFactor.settings = new Settings
                {
                    solverMethod = Settings.SolverMethod.GradientDescent,
                };

                VariableArray <KernelFunction> kf = Variable.Array <KernelFunction>(d);
                kf.ObservedValue = Enumerable.Range(0, D.ObservedValue).Select(
                    o => new SummationKernel(new SquaredExponential()) + new WhiteNoise(-3)).ToArray();

                var mf = Variable.Array <Vector>(d).Named("meanFunctions");
                mf[d] = Variable <Vector> .Factor <double, Vector[], int[], KernelFunction>(MyFactors.GP, 1.0 /*Variable.GammaFromShapeAndRate(1,1)*/, inputs, new int[] { 0 },
                                                                                            kf[d]);

                mf.AddAttribute(new MarginalPrototype(new VectorGaussian(N.ObservedValue)));
                meanFunctionValues    = Variable.Array(Variable.Array <double>(n), d).Named("meanFunctionValues");
                meanFunctionValues[d] = Variable.ArrayFromVector(mf[d], n);
                toInfer.Add(meanFunctionValues);
            }

            using (Variable.ForEach(n))
                using (Variable.ForEach(d))
                {
                    var temp = Variable.Array <double>(q).Named("temp");
                    temp[q] = weightFunctionValues[d, q][n] * nodeFunctionValues[q][n];
                    if (meanFunctions)
                    {
                        noiseLessY[d, n] = Variable.Sum(temp) + meanFunctionValues[d][n];
                    }
                    else
                    {
                        noiseLessY[d, n] = Variable.Sum(temp);
                    }
                    using (Variable.IfNot(isMissing[d, n]))
                        if (isotropicNoise)
                        {
                            observedData[d, n] = Variable.GaussianFromMeanAndPrecision(noiseLessY[d, n], noisePrecision);
                        }
                        else
                        {
                            observedData[d, n] = Variable.GaussianFromMeanAndPrecision(noiseLessY[d, n], noisePrecisionArray[d]);
                        }
                    using (Variable.If(isMissing[d, n]))
                        observedData[d, n] = Variable.GaussianFromMeanAndPrecision(0, 1);
                }
            observedData.ObservedValue = data;
            var nodeFunctionsInit = Enumerable.Range(0, Q).Select(i =>
                                                                  VectorGaussian.FromMeanAndVariance(
                                                                      VectorGaussian.Sample(Vector.Zero(N.ObservedValue), PositiveDefiniteMatrix.IdentityScaledBy(N.ObservedValue, 100)),
                                                                      PositiveDefiniteMatrix.IdentityScaledBy(N.ObservedValue, 100))).ToArray(); // should put this manually in generated code

            var distArray = Distribution <Vector> .Array(nodeFunctionsInit);

            var nodeFunctionsInitVar = Variable.Observed(distArray).Named("nodeFunctionsInitVar");

            nodeFunctions.InitialiseTo(nodeFunctionsInitVar);

            modelBlock.CloseBlock();

            toInfer.AddRange(new List <IVariable>()
            {
                ev, noiseLessY, nodeFunctionValues, nodeSignalPrecisions, nodeFunctionValuesPredictive, weightFunctionValues, weightFunctionValues2
            });

            var infer = new InferenceEngine(new VariationalMessagePassing());

            infer.ModelName = "MeanFunction";
            var ca = infer.GetCompiledInferenceAlgorithm(toInfer.ToArray());

            var kernel = new SummationKernel(new SquaredExponential(initLoglengthscales[0]));

            kernel += new WhiteNoise(-3);
            ca.SetObservedValue(K_node_inverse.NameInGeneratedCode, Utils.GramMatrix(kernel, inputs).Inverse());

            kernel  = new SummationKernel(new SquaredExponential(initLoglengthscales[1]));
            kernel += new WhiteNoise(-3);
            ca.SetObservedValue(K_weights_inverse.NameInGeneratedCode, Utils.GramMatrix(kernel, inputs).Inverse());

            ca.Reset();
            double oldML = double.NegativeInfinity;
            double ml    = 0;
            int    it    = 0;

            for (; it < 100; it++)
            {
                ca.Update(1);
                ml = ca.Marginal <Bernoulli>(ev.NameInGeneratedCode).LogOdds;
                Console.WriteLine(ml);
                if (Math.Abs(oldML - ml) < .1)
                {
                    break;
                }
                oldML = ml;
            }
            Console.WriteLine("Finished after " + it);
        }
		/// <summary>
		/// VMP message to 'totalCount'
		/// </summary>
		/// <param name="mean">Incoming message from 'mean'. Must be a proper distribution.  If uniform, the result will be uniform.</param>
		/// <param name="totalCount">Incoming message from 'totalCount'. Must be a proper distribution.  If uniform, the result will be uniform. Must be a proper distribution.  If uniform, the result will be uniform.</param>
		/// <param name="prob">Constant value for 'prob'.</param>
		/// <param name="to_totalCount">Previous outgoing message to 'TotalCount'.</param>
		/// <returns>The outgoing VMP message to the 'totalCount' argument</returns>
		/// <remarks><para>
		/// The outgoing message is the exponential of the average log-factor value, where the average is over all arguments except 'totalCount'.
		/// The formula is <c>exp(sum_(mean) p(mean) log(factor(prob,mean,totalCount)))</c>.
		/// </para></remarks>
		/// <exception cref="ImproperMessageException"><paramref name="mean"/> is not a proper distribution</exception>
		/// <exception cref="ImproperMessageException"><paramref name="totalCount"/> is not a proper distribution</exception>
		public static Gamma TotalCountAverageLogarithm([Proper] Beta mean, [Proper] Gamma totalCount, double prob, Gamma to_totalCount)
		{
			return TotalCountAverageLogarithm(mean, totalCount, Beta.PointMass(prob), to_totalCount);
		}
Beispiel #58
0
 public static Gamma BAverageLogarithm(double ratio, Gamma A)
 {
     throw new NotSupportedException(NotSupportedMessage);
 }
Beispiel #59
0
	/* mcs can't compile this (#646744) */
#if FALSE
	static void InitMe (out Gamma noMercyWithTheStack) {
		noMercyWithTheStack = new Gamma ();
	}
Beispiel #60
0
        public void ValidateSkewness(double shape, double invScale, double skewness)
        {
            var n = new Gamma(shape, invScale);

            AssertHelpers.AlmostEqualRelative(skewness, n.Skewness, 15);
        }