Exemplo n.º 1
0
        public void TestKernelSummation()
        {
            double          log_length = System.Math.Log(1.234);
            double          log_sig_sd = System.Math.Log(2.345);
            double          log_nse_sd = System.Math.Log(3.456);
            SummationKernel kf         = new SummationKernel(new SquaredExponential(log_length, log_sig_sd));

            kf += new WhiteNoise(log_nse_sd);

            Assert.Equal(log_length, kf[0]);
            Assert.Equal(log_sig_sd, kf[1]);
            Assert.Equal(log_nse_sd, kf[2]);

            // Try resetting directly on the summation, using the name indexer
            log_length = 4.321;
            log_sig_sd = 5.432;
            log_nse_sd = 6.543;

            kf["Length"]   = log_length;
            kf["SignalSD"] = log_sig_sd;
            kf["NoiseSD"]  = log_nse_sd;

            Assert.Equal(log_length, kf[0]);
            Assert.Equal(log_sig_sd, kf[1]);
            Assert.Equal(log_nse_sd, kf[2]);
        }
Exemplo n.º 2
0
        public static void TestWithNoise(Vector[] inputs, double[] data)
        {
            var n = new Range(data.Length);
            //var kf = new SummationKernel(new ARD(new double[]{ 0 }, 0))+new WhiteNoise();
            var kf = new SummationKernel(new SquaredExponential()) + new WhiteNoise(-3);
            var y  = Variable <Vector> .Factor <double, Vector[], int[], KernelFunction>(MyFactors.GP, 1.0 /*Variable.GammaFromShapeAndRate(1,1)*/, inputs, new int[] { 0, 1 },
                                                                                         kf);

            GPFactor.settings = new Settings
            {
                solverMethod = Settings.SolverMethod.GradientDescent,
            };

            var kf_noise      = new SummationKernel(new SquaredExponential()) + new WhiteNoise(-3);
            var noiseFunction = Variable <Vector> .Factor <double, Vector[], int[], KernelFunction>(MyFactors.GP, 1.0 /*Variable.GammaFromShapeAndRate(1,1)*/, inputs, new int[] { 0, 1 },
                                                                                                    kf_noise);

            GPFactor.settings = new Settings
            {
                solverMethod = Settings.SolverMethod.GradientDescent,
            };
            noiseFunction.AddAttribute(new MarginalPrototype(new VectorGaussian(n.SizeAsInt)));
            var noiseFunctionValues  = Variable.ArrayFromVector(noiseFunction, n);
            var noisePrecisionValues = Variable.Array <double>(n);

            //noisePrecisionValues[n] = Variable.Exp(noiseFunctionValues[n] + 2.0);
            noisePrecisionValues[n] = Variable.Exp(noiseFunctionValues[n] + Variable.GaussianFromMeanAndPrecision(0, 1));

            y.AddAttribute(new MarginalPrototype(new VectorGaussian(n.SizeAsInt)));
            var y2noiseless = Variable.ArrayFromVector(y, n);
            var y2          = Variable.Array <double>(n);

            y2[n]            = Variable.GaussianFromMeanAndPrecision(y2noiseless[n], noisePrecisionValues[n]);
            y2.ObservedValue = data;
            var ypredictiveNoiseless = Variable.ArrayFromVector(y, n);
            var ypredictive          = Variable.Array <double>(n);

            ypredictive[n] = Variable.GaussianFromMeanAndPrecision(ypredictiveNoiseless[n], noisePrecisionValues[n]);
            var ie   = new InferenceEngine(new VariationalMessagePassing());
            var post = ie.Infer <Gaussian[]>(ypredictive);

            var mplWrapper = new MatplotlibWrapper();

            mplWrapper.AddArray("x", inputs.Select(j => j[0]).ToArray());
            mplWrapper.AddArray("y", data);
            var f = post.Select(i => i.GetMean()).ToArray();
            var e = post.Select(i => 2.0 * Math.Sqrt(i.GetVariance())).ToArray();

            mplWrapper.AddArray("f", f);
            mplWrapper.AddArray("e", e);

            mplWrapper.Plot(new string[] {
                "fill_between(x,f-e,f+e,color=\"gray\")",
                "plot(x,f,'k')",
                "scatter(x,y)"
            });
        }
Exemplo n.º 3
0
        public static (Vector[] dataX, double[] dataY) GenerateRandomData(int numData, double proportionCorrupt)
        {
            int randomSeed = 9876;

            Random rng = new Random(randomSeed);

            Rand.Restart(randomSeed);

            InferenceEngine engine = Utilities.GetInferenceEngine();

            // The points to evaluate
            Vector[] randomInputs = Utilities.VectorRange(0, 1, numData, null);

            var gaussianProcessGenerator = new GaussianProcessRegressor(randomInputs);

            // The basis
            Vector[] basis = Utilities.VectorRange(0, 1, 6, rng);

            // The kernel
            var kf = new SummationKernel(new SquaredExponential(-1)) + new WhiteNoise();

            // Fill in the sparse GP prior
            GaussianProcess gp = new GaussianProcess(new ConstantFunction(0), kf);

            gaussianProcessGenerator.Prior.ObservedValue = new SparseGP(new SparseGPFixed(gp, basis));

            // Infer the posterior Sparse GP, and sample a random function from it
            SparseGP sgp        = engine.Infer <SparseGP>(gaussianProcessGenerator.F);
            var      randomFunc = sgp.Sample();

            double[] randomOutputs = new double[randomInputs.Length];
            int      numCorrupted  = (int)Math.Ceiling(numData * proportionCorrupt);
            var      subset        = Enumerable.Range(0, randomInputs.Length + 1).OrderBy(x => rng.Next()).Take(numCorrupted);

            // get random data
            for (int i = 0; i < randomInputs.Length; i++)
            {
                double post = randomFunc.Evaluate(randomInputs[i]);
                // corrupt data point if it we haven't exceed the proportion we wish to corrupt
                if (subset.Contains(i))
                {
                    double sign     = rng.NextDouble() > 0.5 ? 1 : -1;
                    double distance = rng.NextDouble() * 1;
                    post = (sign * distance) + post;
                }

                randomOutputs[i] = post;
            }

            Console.WriteLine("Model complete: Generated {0} points with {1} corrupted", numData, numCorrupted);

            return(randomInputs, randomOutputs);
        }
Exemplo n.º 4
0
        public void TestSummmationReadWrite()
        {
            double log_length = System.Math.Log(1.234);
            double log_sig_sd = System.Math.Log(2.345);
            double log_nse_sd = System.Math.Log(3.456);

            double[]        log_var = { System.Math.Log(0.987), System.Math.Log(0.876), System.Math.Log(0.765) };
            SummationKernel kf      = new SummationKernel(new SquaredExponential(log_length, log_sig_sd));

            kf += new LinearKernel(log_var);
            kf += new WhiteNoise(log_nse_sd);
            TestReadWrite(kf);
        }
Exemplo n.º 5
0
        static void FitDataset(bool useSynthetic)
        {
            Vector[] trainingInputs;
            double[] trainingOutputs;

            if (!useSynthetic)
            {
                var trainingData = Utilities.LoadAISDataset();
                trainingInputs = trainingData.Select(tup => Vector.FromArray(new double[1] {
                    tup.x
                })).ToArray();
                trainingOutputs = trainingData.Select(tup => tup.y).ToArray();
            }
            else
            {
                (trainingInputs, trainingOutputs) = GaussianProcessDataGenerator.GenerateRandomData(30, 0.3);
            }

            InferenceEngine engine = Utilities.GetInferenceEngine();

            // First fit standard GP, then fit Student-T GP
            foreach (var useStudentTLikelihood in new[] { false, true })
            {
                var gaussianProcessRegressor = new GaussianProcessRegressor(trainingInputs, useStudentTLikelihood, trainingOutputs);

                // Log length scale estimated as -1
                var             noiseVariance = 0.8;
                var             kf            = new SummationKernel(new SquaredExponential(-1)) + new WhiteNoise(Math.Log(noiseVariance) / 2);
                GaussianProcess gp            = new GaussianProcess(new ConstantFunction(0), kf);

                // Convert SparseGP to full Gaussian Process by evaluating at all the training points
                gaussianProcessRegressor.Prior.ObservedValue = new SparseGP(new SparseGPFixed(gp, trainingInputs.ToArray()));
                double logOdds = engine.Infer <Bernoulli>(gaussianProcessRegressor.Evidence).LogOdds;
                Console.WriteLine("{0} evidence = {1}", kf, logOdds.ToString("g4"));

                // Infer the posterior Sparse GP
                SparseGP sgp = engine.Infer <SparseGP>(gaussianProcessRegressor.F);

#if NETFULL
                string datasetName = useSynthetic ? "Synthetic" : "AIS";
                Utilities.PlotPredictions(sgp, trainingInputs, trainingOutputs, useStudentTLikelihood, datasetName);
#endif
            }
        }
Exemplo n.º 6
0
        public void TestSumKDerivs()
        {
            double[] log_length = { System.Math.Log(0.543), System.Math.Log(0.432), System.Math.Log(0.321) };
            double   log_sig_sd = System.Math.Log(2.345);

            double[]        log_var    = { System.Math.Log(0.987), System.Math.Log(0.876), System.Math.Log(0.765) };
            double          log_nse_sd = System.Math.Log(3.456);
            SummationKernel kf         = new SummationKernel(new ARD(log_length, log_sig_sd));

            kf += new LinearKernel(log_var);
            kf += new WhiteNoise(log_nse_sd);
            double[] x1    = { 0.1, 0.2, 0.3 };
            double[] x2    = { 0.9, 0.7, 0.5 };
            Vector   x1Vec = Vector.FromArray(x1);
            Vector   x2Vec = Vector.FromArray(x2);

            TestDerivatives(kf, x1Vec, x1Vec);
            TestDerivatives(kf, x1Vec, x2Vec);
        }
Exemplo n.º 7
0
        public void BasicGPC()
        {
            Vector[] inputs = new Vector[]
            {
                Vector.FromArray(new double[2] {
                    0, 0
                }),
                Vector.FromArray(new double[2] {
                    0, 1
                }),
                Vector.FromArray(new double[2] {
                    1, 0
                }),
                Vector.FromArray(new double[2] {
                    0, 0.5
                }),
                Vector.FromArray(new double[2] {
                    1.5, 0
                }),
                Vector.FromArray(new double[2] {
                    0.5, 1.0
                })
            };
            bool[] outputs = { true, true, false, true, false, false };

            var kf = new SummationKernel(new SquaredExponential(0));

            kf += new WhiteNoise(System.Math.Log(0.1));
            var K = GramMatrix(kf, inputs);

            var n = new Range(inputs.Length);
            var x = Variable.VectorGaussianFromMeanAndVariance(Vector.Zero(inputs.Length), K);
            var g = Variable.ArrayFromVector(x, n);
            var p = Variable.Array <bool>(n);

            p[n]            = Variable.IsPositive(g[n]);
            p.ObservedValue = outputs;
            var ie = new InferenceEngine();

            Console.WriteLine(ie.Infer(x));
        }
Exemplo n.º 8
0
        public static void Test()
        {
            var inputs = Enumerable.Range(0, 50).Select(i => Vector.Constant(1, i)).ToArray();
            var data   = inputs.Select(j => Math.Cos(2 * j[0] / 10.0)).ToArray();
            var n      = new Range(data.Length);
            //var kf = new SummationKernel(new ARD(new double[]{ 0 }, 0))+new WhiteNoise();
            var kf = new SummationKernel(new SquaredExponential()) + new WhiteNoise();
            var y  = Variable <Vector> .Factor <double, Vector[], int[], KernelFunction>(MyFactors.GP, 1.0 /*Variable.GammaFromShapeAndRate(1,1)*/, inputs, new int[] { 0, 1 },
                                                                                         kf);

            GPFactor.settings = new Settings
            {
                solverMethod = Settings.SolverMethod.GradientDescent,
            };
            y.AddAttribute(new MarginalPrototype(new VectorGaussian(n.SizeAsInt)));
            var y2 = Variable.ArrayFromVector(y, n);

            y2.ObservedValue = data;
            var ypredictive = Variable.ArrayFromVector(y, n);
            var ie          = new InferenceEngine(new VariationalMessagePassing());
            var post        = ie.Infer <Gaussian[]>(ypredictive);

            var mplWrapper = new MatplotlibWrapper();

            mplWrapper.AddArray("x", inputs.Select(j => j[0]).ToArray());
            mplWrapper.AddArray("y", data);
            var f = post.Select(i => i.GetMean()).ToArray();
            var e = post.Select(i => Math.Sqrt(i.GetVariance())).ToArray();

            mplWrapper.AddArray("f", f);
            mplWrapper.AddArray("e", e);

            mplWrapper.Plot(new string[] {
                "fill_between(x,f-e,f+e,color=\"gray\")",
                "scatter(x,y)"
            });
        }
Exemplo n.º 9
0
        /// <summary>
        /// Primary definition of the GPRN model as an Infer.NET model.
        /// </summary>
        /// <param name="inputs">Covariates X</param>
        /// <param name="data">Outputs Y</param>
        /// <param name="Q">Number of latent functions</param>
        /// <param name="missing">Which elements of Y are missing</param>
        /// <param name="nodeFunctionNoise">Whether to include node noise</param>
        /// <param name="constrainWpositive">Whether to constrain W to be positive [experimental]</param>
        /// <param name="isotropicNoise">Whether to use isotropic observation noise</param>
        /// <param name="meanFunctions">Whether to include a per output mean function</param>
        /// <param name="initLoglengthscales">Initial values for the length scales of the kernels</param>
        /// <param name="sw">An output file for logging</param>
        public void GPRN_InferNET_model(Vector[] inputs,
                                        double[,] data,
                                        int Q,
                                        bool grid                    = false,
                                        bool[,] missing              = null,
                                        bool nodeFunctionNoise       = false,
                                        bool constrainWpositive      = false,
                                        bool isotropicNoise          = true,
                                        bool meanFunctions           = false,
                                        double[] initLoglengthscales = null,
                                        StreamWriter sw              = null)
        {
            var             toInfer = new List <IVariable>();
            SummationKernel kf_node = new SummationKernel(new SquaredExponential(0)) + new WhiteNoise(-3);
            var             K_node  = Utils.GramMatrix(kf_node, inputs);

            SummationKernel kf_weights = new SummationKernel(new SquaredExponential(1)) + new WhiteNoise(-3);
            var             K_weights  = Utils.GramMatrix(kf_weights, inputs);

            var D    = Variable.Observed <int>(data.GetLength(0)).Named("D");
            var d    = new Range(D).Named("d");
            var Qvar = Variable.Observed <int>(Q).Named("Q");
            var q    = new Range(Qvar).Named("q");
            var N    = Variable.Observed <int>(data.GetLength(1)).Named("N");
            var n    = new Range(N).Named("n");

            if (missing == null)
            {
                missing = new bool[D.ObservedValue, N.ObservedValue]; // check this is all false
            }
            var ev         = Variable.Bernoulli(.5).Named("ev");
            var modelBlock = Variable.If(ev);

            var nodeSignalPrecisions = Variable.Array <double>(q).Named("nodeSignalPrecisions");
            // set this to 1 if not learning signal variance
            var nodeSignalPrecisionsPrior = Variable.Observed(Enumerable.Range(0, Q).Select(_ => Gamma.FromShapeAndRate(.1, .1)).ToArray(), q).Named("nodeSignalPrecisionsPrior");

            nodeSignalPrecisions[q] = Variable.Random <double, Gamma>(nodeSignalPrecisionsPrior[q]);

            var nodeFunctions  = Variable.Array <Vector>(q).Named("nodeFunctions");
            var K_node_inverse = Variable.Observed(K_node.Inverse()).Named("K_node_inverse");

            nodeFunctions[q] = Variable <Vector> .Factor(MyFactors.VectorGaussianScaled, nodeSignalPrecisions[q], K_node_inverse);

            nodeFunctions.AddAttribute(new MarginalPrototype(new VectorGaussian(N.ObservedValue)));
            var nodeFunctionValues           = Variable.Array(Variable.Array <double>(n), q).Named("nodeFunctionValues");
            var nodeFunctionValuesPredictive = Variable.Array(Variable.Array <double>(n), q).Named("nodeFunctionValuesPredictive");

            VariableArray <double> nodeNoisePrecisions = null;

            if (nodeFunctionNoise)
            {
                var nodeFunctionValuesClean = Variable.Array(Variable.Array <double>(n), q).Named("nodeFunctionValuesClean");
                nodeFunctionValuesClean[q] = Variable.ArrayFromVector(nodeFunctions[q], n);
                nodeNoisePrecisions        = Variable.Array <double>(q).Named("nodeNoisePrecisions");
                var nodeNoisePrecisionPrior = Variable.Observed(Enumerable.Range(0, Q).Select(_ => Gamma.FromShapeAndRate(.1, .01)).ToArray(), q).Named("nodeNoisePrecisionPrior");
                nodeNoisePrecisions[q] = Variable.Random <double, Gamma>(nodeNoisePrecisionPrior[q]);
                toInfer.Add(nodeNoisePrecisions);
                nodeFunctionValues[q][n] = Variable.GaussianFromMeanAndPrecision(nodeFunctionValuesClean[q][n], nodeNoisePrecisions[q]);

                nodeFunctionValuesPredictive[q][n] = Variable.GaussianFromMeanAndPrecision(nodeFunctionValuesClean[q][n], nodeNoisePrecisions[q]);
            }
            else
            {
                nodeFunctionValues[q]           = Variable.ArrayFromVector(nodeFunctions[q], n);
                nodeFunctionValuesPredictive[q] = Variable.ArrayFromVector(nodeFunctions[q], n);
            }

            var weightFunctions   = Variable.Array <Vector>(d, q).Named("weightFunctions");
            var K_weights_inverse = Variable.Observed(K_weights.Inverse()).Named("K_weights_inverse");

            weightFunctions[d, q] = Variable <Vector> .Factor(MyFactors.VectorGaussianScaled, Variable.Constant <double>(1), K_weights_inverse).ForEach(d, q);

            weightFunctions.AddAttribute(new MarginalPrototype(new VectorGaussian(N.ObservedValue)));
            var weightFunctionValues  = Variable.Array(Variable.Array <double>(n), d, q).Named("weightFunctionValues");
            var weightFunctionValues2 = Variable.Array(Variable.Array <double>(n), d, q).Named("weightFunctionValuesPredictive");

            weightFunctionValues[d, q] = Variable.ArrayFromVector(weightFunctions[d, q], n);
            if (constrainWpositive)
            {
                var weightFunctionValuesCopy = Variable.Array(Variable.Array <double>(n), d, q).Named("weightFunctionValuesCopy");
                weightFunctionValuesCopy[d, q][n] = Variable.GaussianFromMeanAndPrecision(weightFunctionValues[d, q][n], 100);
                Variable.ConstrainPositive(weightFunctionValuesCopy[d, q][n]);
            }
            weightFunctionValues2[d, q] = Variable.ArrayFromVector(weightFunctions[d, q], n);
            var observedData        = Variable.Array <double>(d, n).Named("observedData");
            var noisePrecisionPrior = Variable.Observed(Gamma.FromShapeAndRate(1, .1)).Named("noisePrecisionPrior");
            Variable <double>      noisePrecision      = null;
            VariableArray <double> noisePrecisionArray = null;

            if (isotropicNoise)
            {
                noisePrecision = Variable.Random <double, Gamma>(noisePrecisionPrior).Named("noisePrecision");
                toInfer.Add(noisePrecision);
            }
            else
            {
                noisePrecisionArray    = Variable.Array <double>(d).Named("noisePrecision");
                noisePrecisionArray[d] = Variable.Random <double, Gamma>(noisePrecisionPrior).ForEach(d);
                toInfer.Add(noisePrecisionArray);
            }

            var isMissing = Variable.Array <bool>(d, n).Named("isMissing");

            isMissing.ObservedValue = missing;

            var noiseLessY = Variable.Array <double>(d, n).Named("noiseLessY");

            VariableArray <VariableArray <double>, double[][]> meanFunctionValues = null;

            if (meanFunctions)
            {
                GPFactor.settings = new Settings
                {
                    solverMethod = Settings.SolverMethod.GradientDescent,
                };

                VariableArray <KernelFunction> kf = Variable.Array <KernelFunction>(d);
                kf.ObservedValue = Enumerable.Range(0, D.ObservedValue).Select(
                    o => new SummationKernel(new SquaredExponential()) + new WhiteNoise(-3)).ToArray();

                var mf = Variable.Array <Vector>(d).Named("meanFunctions");
                mf[d] = Variable <Vector> .Factor <double, Vector[], int[], KernelFunction>(MyFactors.GP, 1.0 /*Variable.GammaFromShapeAndRate(1,1)*/, inputs, new int[] { 0 },
                                                                                            kf[d]);

                mf.AddAttribute(new MarginalPrototype(new VectorGaussian(N.ObservedValue)));
                meanFunctionValues    = Variable.Array(Variable.Array <double>(n), d).Named("meanFunctionValues");
                meanFunctionValues[d] = Variable.ArrayFromVector(mf[d], n);
                toInfer.Add(meanFunctionValues);
            }

            using (Variable.ForEach(n))
                using (Variable.ForEach(d))
                {
                    var temp = Variable.Array <double>(q).Named("temp");
                    temp[q] = weightFunctionValues[d, q][n] * nodeFunctionValues[q][n];
                    if (meanFunctions)
                    {
                        noiseLessY[d, n] = Variable.Sum(temp) + meanFunctionValues[d][n];
                    }
                    else
                    {
                        noiseLessY[d, n] = Variable.Sum(temp);
                    }
                    using (Variable.IfNot(isMissing[d, n]))
                        if (isotropicNoise)
                        {
                            observedData[d, n] = Variable.GaussianFromMeanAndPrecision(noiseLessY[d, n], noisePrecision);
                        }
                        else
                        {
                            observedData[d, n] = Variable.GaussianFromMeanAndPrecision(noiseLessY[d, n], noisePrecisionArray[d]);
                        }
                    using (Variable.If(isMissing[d, n]))
                        observedData[d, n] = Variable.GaussianFromMeanAndPrecision(0, 1);
                }
            observedData.ObservedValue = data;
            var nodeFunctionsInit = Enumerable.Range(0, Q).Select(i =>
                                                                  VectorGaussian.FromMeanAndVariance(
                                                                      VectorGaussian.Sample(Vector.Zero(N.ObservedValue), PositiveDefiniteMatrix.IdentityScaledBy(N.ObservedValue, 100)),
                                                                      PositiveDefiniteMatrix.IdentityScaledBy(N.ObservedValue, 100))).ToArray(); // should put this manually in generated code

            var distArray = Distribution <Vector> .Array(nodeFunctionsInit);

            var nodeFunctionsInitVar = Variable.Observed(distArray).Named("nodeFunctionsInitVar");

            nodeFunctions.InitialiseTo(nodeFunctionsInitVar);

            modelBlock.CloseBlock();

            toInfer.AddRange(new List <IVariable>()
            {
                ev, noiseLessY, nodeFunctionValues, nodeSignalPrecisions, nodeFunctionValuesPredictive, weightFunctionValues, weightFunctionValues2
            });

            var infer = new InferenceEngine(new VariationalMessagePassing());

            infer.ModelName = "MeanFunction";
            var ca = infer.GetCompiledInferenceAlgorithm(toInfer.ToArray());

            var kernel = new SummationKernel(new SquaredExponential(initLoglengthscales[0]));

            kernel += new WhiteNoise(-3);
            ca.SetObservedValue(K_node_inverse.NameInGeneratedCode, Utils.GramMatrix(kernel, inputs).Inverse());

            kernel  = new SummationKernel(new SquaredExponential(initLoglengthscales[1]));
            kernel += new WhiteNoise(-3);
            ca.SetObservedValue(K_weights_inverse.NameInGeneratedCode, Utils.GramMatrix(kernel, inputs).Inverse());

            ca.Reset();
            double oldML = double.NegativeInfinity;
            double ml    = 0;
            int    it    = 0;

            for (; it < 100; it++)
            {
                ca.Update(1);
                ml = ca.Marginal <Bernoulli>(ev.NameInGeneratedCode).LogOdds;
                Console.WriteLine(ml);
                if (Math.Abs(oldML - ml) < .1)
                {
                    break;
                }
                oldML = ml;
            }
            Console.WriteLine("Finished after " + it);
        }
Exemplo n.º 10
0
        /// <summary>
        /// Infer.NET definition of the Semi Parametric Latent Factor Model of
        /// Teh, Y., Seeger, M., and Jordan, M. (AISTATS 2005).
        /// </summary>
        /// <param name="inputs">Covariates X</param>
        /// <param name="data">Outputs Y</param>
        /// <param name="Q">Number of latent functions</param>
        /// <param name="missing">Which elements of Y are missing</param>
        /// <param name="nodeFunctionNoise">Whether to include node noise</param>
        public void SPLFM(
            Vector[] inputs,
            double[,] data,
            int Q,
            bool[,] missing        = null,
            bool nodeFunctionNoise = false)
        {
            var             toInfer = new List <IVariable>();
            SummationKernel kf_node = new SummationKernel(new SquaredExponential(0));
            var             K_node  = Utils.GramMatrix(kf_node, inputs);

            var D    = Variable.Observed <int>(data.GetLength(0)).Named("D");
            var d    = new Range(D).Named("d");
            var Qvar = Variable.Observed <int>(Q).Named("Q");
            var q    = new Range(Qvar).Named("q");
            var N    = Variable.Observed <int>(data.GetLength(1)).Named("N");
            var n    = new Range(N).Named("n");

            if (missing == null)
            {
                missing = new bool[D.ObservedValue, N.ObservedValue]; // check this is all false
            }
            var ev         = Variable.Bernoulli(.5).Named("ev");
            var modelBlock = Variable.If(ev);

            var nodeSignalPrecisions = Variable.Array <double>(q).Named("nodeSignalPrecisions");
            // set this to 1 if not learning signal variance
            var nodeSignalPrecisionsPrior = Variable.Observed(Enumerable.Range(0, Q).Select(_ => Gamma.FromShapeAndRate(.1, .1)).ToArray(), q).Named("nodeSignalPrecisionsPrior");

            nodeSignalPrecisions[q] = Variable.Random <double, Gamma>(nodeSignalPrecisionsPrior[q]);

            var nodeFunctions  = Variable.Array <Vector>(q).Named("nodeFunctions");
            var K_node_inverse = Variable.Observed(K_node.Inverse()).Named("K_node_inverse");

            nodeFunctions[q] = Variable <Vector> .Factor(MyFactors.VectorGaussianScaled, nodeSignalPrecisions[q], K_node_inverse);

            nodeFunctions.AddAttribute(new MarginalPrototype(new VectorGaussian(N.ObservedValue)));
            var nodeFunctionValues           = Variable.Array(Variable.Array <double>(n), q).Named("nodeFunctionValues");
            var nodeFunctionValuesPredictive = Variable.Array(Variable.Array <double>(n), q).Named("nodeFunctionValuesPredictive");

            VariableArray <double> nodeNoisePrecisions = null;

            if (nodeFunctionNoise)
            {
                var nodeFunctionValuesClean = Variable.Array(Variable.Array <double>(n), q).Named("nodeFunctionValuesClean");
                nodeFunctionValuesClean[q] = Variable.ArrayFromVector(nodeFunctions[q], n);
                nodeNoisePrecisions        = Variable.Array <double>(q).Named("nodeNoisePrecisions");
                var nodeNoisePrecisionPrior = Variable.Observed(Enumerable.Range(0, Q).Select(_ => Gamma.FromShapeAndRate(.1, .01)).ToArray(), q).Named("nodeNoisePrecisionPrior");
                nodeNoisePrecisions[q] = Variable.Random <double, Gamma>(nodeNoisePrecisionPrior[q]);
                toInfer.Add(nodeNoisePrecisions);
                nodeFunctionValues[q][n] = Variable.GaussianFromMeanAndPrecision(nodeFunctionValuesClean[q][n], nodeNoisePrecisions[q]);

                nodeFunctionValuesPredictive[q][n] = Variable.GaussianFromMeanAndPrecision(nodeFunctionValuesClean[q][n], nodeNoisePrecisions[q]);
            }
            else
            {
                nodeFunctionValues[q]           = Variable.ArrayFromVector(nodeFunctions[q], n);
                nodeFunctionValuesPredictive[q] = Variable.ArrayFromVector(nodeFunctions[q], n);
            }

            var weights = Variable.Array <double>(d, q).Named("weights");

            weights[d, q] = Variable.GaussianFromMeanAndPrecision(0, 1).ForEach(d, q);
            var observedData        = Variable.Array <double>(d, n).Named("observedData");
            var noisePrecisionPrior = Variable.Observed(Gamma.FromShapeAndRate(1, .1)).Named("noisePrecisionPrior");
            var noisePrecision      = Variable.Random <double, Gamma>(noisePrecisionPrior).Named("noisePrecision");

            var isMissing = Variable.Array <bool>(d, n).Named("isMissing");

            isMissing.ObservedValue = missing;

            var noiseLessY = Variable.Array <double>(d, n).Named("noiseLessY");

            using (Variable.ForEach(n))
                using (Variable.ForEach(d))
                {
                    var temp = Variable.Array <double>(q).Named("temp");
                    temp[q]          = weights[d, q] * nodeFunctionValues[q][n];
                    noiseLessY[d, n] = Variable.Sum(temp);
                    using (Variable.IfNot(isMissing[d, n]))
                        observedData[d, n] = Variable.GaussianFromMeanAndPrecision(noiseLessY[d, n], noisePrecision);
                    using (Variable.If(isMissing[d, n]))
                        observedData[d, n] = Variable.GaussianFromMeanAndPrecision(0, 1);
                }
            observedData.ObservedValue = data;
            var nodeFunctionsInit = Enumerable.Range(0, Q).Select(i =>
                                                                  VectorGaussian.FromMeanAndVariance(
                                                                      VectorGaussian.Sample(Vector.Zero(N.ObservedValue), PositiveDefiniteMatrix.IdentityScaledBy(N.ObservedValue, 100)),
                                                                      PositiveDefiniteMatrix.IdentityScaledBy(N.ObservedValue, 100))).ToArray(); // should put this manually in generated code

            var distArray = Distribution <Vector> .Array(nodeFunctionsInit);

            var nodeFunctionsInitVar = Variable.Observed(distArray).Named("nodeFunctionsInitVar");

            nodeFunctions.InitialiseTo(nodeFunctionsInitVar);

            modelBlock.CloseBlock();

            toInfer.AddRange(new List <IVariable>()
            {
                ev, noiseLessY, noisePrecision, nodeFunctionValues, nodeSignalPrecisions, nodeFunctionValuesPredictive, weights
            });

            var ie = new InferenceEngine(new VariationalMessagePassing());

            ie.ModelName = "SPLFM";
            var ca = ie.GetCompiledInferenceAlgorithm(toInfer.ToArray());

            ca.Execute(100);
            var fvals      = ca.Marginal <Gaussian[][]>(nodeFunctionValues.NameInGeneratedCode)[0]; // [q][n]
            var x          = inputs.Select(i => i[0]).ToArray();
            var mplWrapper = new MatplotlibWrapper();

            mplWrapper.AddArray("x", x);
            mplWrapper.AddArray("y", fvals.Select(i => i.GetMean()).ToArray());
            mplWrapper.AddArray("s", fvals.Select(i => Math.Sqrt(i.GetVariance())).ToArray());

            mplWrapper.Plot(new string[] {
                "fill_between(x,y-s,y+s,color=\"gray\")",
                "ylabel(\"node (fitted)\")"
            });
        }