internal override void Minimum(Image originalImage, KernelFunction kernelFunction, uint neighborhoodSize, uint x, uint y, Image outputImage)
        {
            ImageGray <T> castedOriginalImage = (ImageGray <T>)originalImage;
            ImageGray <T> castedOutputImage   = (ImageGray <T>)outputImage;

            castedOutputImage.Gray.Minimum(castedOriginalImage.Gray, kernelFunction, x, y, neighborhoodSize);
        }
Exemple #2
0
        public static BestWindow FindBestWindowType(DistanceFunction distance, KernelFunction kernel, DataSet dataSet, Double maxDistance, Boolean oneHot)
        {
            (Double maxMicro, Double maxMacro, Window bestWindow) = (Double.MinValue, Double.MinValue, null);

            for (Int32 i = 1; i < (Int32)(dataSet.Count / 2); i++) //Тут тоже надо шаманить от размера
            {
                CheckWindow(new Window(i));
            }

            for (Double i = 0; i < maxDistance; i += 0.5) //Шаг надо менять от размера датасета
            {
                CheckWindow(new Window(i));
            }

            void CheckWindow(Window window)
            {
                ConfusionMatrix confusion = dataSet.GetConfusionMatrix(distance, kernel, window, oneHot);

                (Double macro, Double micro) = (confusion.MicroF1Score(1), confusion.MacroF1Score(1));

                if (micro > maxMicro && macro > maxMacro)
                {
                    (maxMacro, maxMicro, bestWindow) = (macro, micro, window);
                }
            }

            Console.WriteLine(distance.Method.Name + " | " + kernel.Method.Name + " | " + bestWindow.IsFixed + " | Micro : " + maxMicro + " | Macro " + maxMacro);

            return(new BestWindow(bestWindow, maxMicro, maxMacro));
        }
Exemple #3
0
        public ConfusionMatrix GetConfusionMatrix(DistanceFunction distance, KernelFunction kernel, Window window, Boolean oneHot)
        {
            Int32[,] matrix = new Int32[_classCount, _classCount];

            for (Int32 i = 0; i < _dataSet.Length; i++)
            {
                Int32 predict;
                if (oneHot)
                {
                    predict = OneHotLeaveOneOutStep(distance, kernel, window, i);
                }
                else
                {
                    predict = (Int32)Math.Round(LeaveOneOutStep(distance, kernel, window, i));
                }

                if (predict > _classCount)
                {
                    predict = _classCount - 1;
                }

                Int32 actual = _dataSet[i].Label;

                matrix[actual, predict]++;
            }

            return(new ConfusionMatrix(matrix));
        }
Exemple #4
0
        public static Gaussian GPPrediction(Vector x, Vector[] xData, Gaussian[] y, KernelFunction kf, PositiveDefiniteMatrix spec)
        {
            var    KxD      = Vector.FromArray(xData.Select(o => kf.EvaluateX1X2(x, o)).ToArray());
            double mean     = spec.QuadraticForm(KxD, Vector.FromArray(y.Select(o => o.GetMean()).ToArray()));
            double variance = kf.EvaluateX1X2(x, x) - spec.QuadraticForm(KxD);

            return(Gaussian.FromMeanAndVariance(mean, variance));
        }
Exemple #5
0
        internal override void Maximum(Image originalImage, KernelFunction kernelFunction, uint neighborhoodSize, uint x, uint y, Image outputImage)
        {
            ImageRGB <T> castedOriginalImage = (ImageRGB <T>)originalImage;
            ImageRGB <T> castedOutputImage   = (ImageRGB <T>)outputImage;

            castedOutputImage.R.Maximum(castedOriginalImage.R, kernelFunction, x, y, neighborhoodSize);
            castedOutputImage.G.Maximum(castedOriginalImage.G, kernelFunction, x, y, neighborhoodSize);
            castedOutputImage.B.Maximum(castedOriginalImage.B, kernelFunction, x, y, neighborhoodSize);
        }
Exemple #6
0
        internal override void Range(Image originalImage, KernelFunction kernelFunction, uint neighborhoodSize, uint x, uint y, Image outputImage)
        {
            ImageHSV <T> castedOriginalImage = (ImageHSV <T>)originalImage;
            ImageHSV <T> castedOutputImage   = (ImageHSV <T>)outputImage;

            castedOutputImage.H.Range(castedOriginalImage.H, kernelFunction, x, y, neighborhoodSize);
            castedOutputImage.S.Range(castedOriginalImage.S, kernelFunction, x, y, neighborhoodSize);
            castedOutputImage.V.Range(castedOriginalImage.V, kernelFunction, x, y, neighborhoodSize);
        }
Exemple #7
0
 public static GPBuffer BufferInit([IgnoreDependency] KernelFunction initialKernel, [IgnoreDependency] Vector[] x)
 {
     return(new GPBuffer
     {
         Precision = Utils.GramMatrix(initialKernel, x).Inverse(),
         //kernel = ObjectCloner.Clone(initialKernel),
         kernel = initialKernel
     });
 }
        internal override void Minimum(Image originalImage, KernelFunction kernelFunction, uint neighborhoodSize, uint x, uint y, Image outputImage)
        {
            ImageCMYK <T> castedOriginalImage = (ImageCMYK <T>)originalImage;
            ImageCMYK <T> castedOutputImage   = (ImageCMYK <T>)outputImage;

            castedOutputImage.C.Minimum(castedOriginalImage.C, kernelFunction, x, y, neighborhoodSize);
            castedOutputImage.M.Minimum(castedOriginalImage.M, kernelFunction, x, y, neighborhoodSize);
            castedOutputImage.Y.Minimum(castedOriginalImage.Y, kernelFunction, x, y, neighborhoodSize);
            castedOutputImage.K.Minimum(castedOriginalImage.K, kernelFunction, x, y, neighborhoodSize);
        }
Exemple #9
0
        public static Gaussian[] PredictionsOnGrid(Vector[] xgrid, KernelFunction kf, Vector[] X, Gaussian[] f)
        {
            var Kclone = Utils.GramMatrix(kf, X);

            for (int i = 0; i < X.Length; i++)
            {
                Kclone[i, i] += f[i].GetVariance();
            }
            var spec = Kclone.Inverse();

            return(xgrid.Select(x =>
                                GPPrediction(x, X, f, kf, spec)).ToArray());
        }
Exemple #10
0
        /// <summary>
        ///   Computes the probability of the prediction being True.
        /// </summary>
        /// <param name="x"></param>
        /// <returns></returns>
        public double PredictRaw(Vector x)
        {
            var prediction = 0d;

            Preprocess(x);

            if (KernelFunction.IsLinear)
            {
                prediction = Theta.Dot(x) + Bias;
            }
            else
            {
                for (var j = 0; j < X.Rows; j++)
                {
                    prediction = prediction + Alpha[j] * Y[j] * KernelFunction.Compute(X[j, VectorType.Row], x);
                }
                prediction += Bias;
            }

            return(prediction);
        }
Exemple #11
0
 /// <summary>
 /// Uses the KernelOptimiser class to optimise the hypers given the current variational posterior
 /// on the function values (which has mean SampleMean and covariance SampleCovariance)
 /// </summary>
 public static GPBuffer BufferHelper(int[] hypersToOptimise, GPBuffer Buffer, Vector[] x, Vector SampleMean, PositiveDefiniteMatrix SampleVariance, Gamma scaling)
 {
     if (SampleMean.All(o => o == 0.0))
     {
         Buffer.Precision = Utils.GramMatrix(Buffer.kernel, x).Inverse();
     }
     else
     {
         //Console.WriteLine(Utils.KernelToArray(Buffer.kernel).Select(o => o.ToString()).Aggregate((p, q) => p + " " + q));
         var helper = new KernelOptimiser(settings);
         helper.kernel           = Buffer.kernel;
         helper.xData            = x;
         helper.hypersToOptimise = hypersToOptimise;
         helper.Optimise((prec, gradK, gradientVector) =>
                         helperFunction(prec, gradK, gradientVector, scaling, SampleMean,
                                        SampleVariance), ref Buffer.Precision);
         Buffer.ESamplePrecisionSample = VectorGaussianScaledPrecisionOp.ESamplePrecisionSample(SampleMean, SampleVariance, Buffer.Precision);
         Buffer.PrecisionMeanLogDet    = VectorGaussianScaledPrecisionOp.PrecisionMeanLogDet(Buffer.Precision);
         //Console.WriteLine(Utils.KernelToArray(Buffer.kernel).Select(o => o.ToString()).Aggregate((p, q) => p + " " + q));
         rememberKernel = Buffer.kernel;
     }
     return(Buffer);
 }
Exemple #12
0
        public Int32 OneHotLeaveOneOutStep(DistanceFunction distance, KernelFunction kernel, Window window, Int32 controlIndex)
        {
            Double maxClassValue = Double.MinValue;
            Int32  index         = Int32.MinValue;

            for (Int32 i = 0; i < _classCount; i++)
            {
                Int32 currentClass = i;

                DataSetObject[] currentClassRows = new DataSetObject[_dataSet.Length - 1];
                Int32           newRowsIndex     = 0;
                for (Int32 j = 0; j < _dataSet.Length; j++) //ОпТиМиЗаЦиЯ
                {
                    if (j == controlIndex)
                    {
                        continue;
                    }

                    var rowObject = _dataSet[j];

                    currentClassRows[newRowsIndex] = new DataSetObject(rowObject.Features, rowObject.Label == currentClass ? 1 : 0);
                    newRowsIndex++;
                }

                var result = new DataSet(currentClassRows)
                             .GetPredictkNN(_dataSet[controlIndex].Features, kernel, distance, window);

                if (result > maxClassValue)
                {
                    maxClassValue = result;
                    index         = i;
                }
            }

            return(index);
        }
        public override Image Filter(Kernel kernel, FilterType filter = FilterType.AVERAGE, EdgeHandling edgeHandling = EdgeHandling.MIRROR_EXTENSION)
        {
            uint size  = kernel.Size;
            uint range = size / 2;

            if (Width < range + 1 || Height < range + 1)
            {
                throw new IndexOutOfRangeException("Neighborhood range cannot be greater than image size.");
            }

            Image result;

            if (edgeHandling == EdgeHandling.SKIP_UNDEFINED)
            {
                result = Clone();
            }
            else
            {
                result = ImageFactory.Create(Width, Height, GetColorModel(), GetDataType());
            }

            uint lowerX, upperX, lowerY, upperY;

            GetFilterArea(edgeHandling, range, out lowerX, out upperX, out lowerY, out upperY);
            KernelFunction  kernelFunction  = GetKernelFunction(kernel, edgeHandling, (int)range);
            FilterOperation filterOperation = GetFilterOperation(filter);

            for (uint x = lowerX; x < upperX; x++)
            {
                for (uint y = lowerY; y < upperY; y++)
                {
                    filterOperation(this, kernelFunction, size, x, y, result);
                }
            }
            return(result);
        }
Exemple #14
0
 public static Vector GP(double scaling, Vector[] x, int[] hypersToOptimise, KernelFunction initialKernel)
 {
     return(VectorGaussian.Uniform(x.Length).Sample());
 }
 internal abstract void Range(Image originalImage, KernelFunction kernelFunction, uint neighborhoodSize, uint x, uint y, Image outputImage);
Exemple #16
0
        public Double GetPredictkNN(Double[] requestValues, KernelFunction kernel, DistanceFunction distance, Window window)
        {
            var orderedDataSet = GetOrderedByDistance(requestValues, distance);

            return(GetPredictLabel(orderedDataSet, kernel, window.GetFixedWindow(orderedDataSet)));
        }
Exemple #17
0
 private Double LeaveOneOutStep(DistanceFunction distance, KernelFunction kernel, Window window, Int32 controlIndex)
 => new DataSet(_dataSet.ExceptIndex(controlIndex))
 .GetPredictkNN(_dataSet[controlIndex].Features, kernel, distance, window);
Exemple #18
0
        /// <summary>Generates a SVM model based on a set of examples.</summary>
        /// <param name="X">The Matrix to process.</param>
        /// <param name="y">The Vector to process.</param>
        /// <returns>Model.</returns>
        public override IModel Generate(Matrix X, Vector y)
        {
            Preprocess(X);

            // expect truth = 1 and false = -1
            y = y.ToBinary(k => k == 1d, falseValue: -1.0);

            // initialise variables
            int m = X.Rows, n = X.Cols, i = -1, j = -1;
            var iterations = 0;

            Vector gradient = Vector.Zeros(m), alpha = Vector.Zeros(m);

            // precompute kernal matrix (using similarity function)
            var K = KernelFunction.Compute(X);

            // synchronise SVM parameters with working set selection function.
            SelectionFunction.Bias    = Bias;
            SelectionFunction.C       = C;
            SelectionFunction.Epsilon = Epsilon;
            SelectionFunction.K       = K;
            SelectionFunction.Y       = y;

            var finalise = false;

            SelectionFunction.Initialize(alpha, gradient);

            while (finalise == false && iterations < MaxIterations)
            {
                var changes = 0;

                #region Training

                for (var p = 0; p < m; p++)
                {
                    // get new working set selection using heuristic function
                    var newPair = SelectionFunction.GetWorkingSet(i, j, gradient, alpha);

                    // check for valid i, j pairs
                    if (newPair.Item1 >= 0 && newPair.Item2 >= 0 && newPair.Item1 != newPair.Item2)
                    {
                        i = newPair.Item1;
                        j = newPair.Item2;
                        // compute new gradients
                        gradient[i] = Bias + (alpha * y * K[i, VectorType.Col]).Sum() - y[i];

                        if ((!(y[i] * gradient[i] < -Epsilon) || !(alpha[i] < C)) &&
                            (!(y[i] * gradient[i] > Epsilon) || !(alpha[i] > 0)))
                        {
                            continue;
                        }
                        gradient[j] = Bias + (alpha * y * K[j, VectorType.Col]).Sum() - y[j];

                        // store temp working copies of alpha from both pairs (i, j)
                        var tempAI = alpha[i];
                        var tempAJ = alpha[j];

                        // update lower and upper bounds of lagrange multipliers
                        double lagHigh;
                        double lagLow;
                        if (y[i] == y[j])
                        {
                            // pairs are same class don't apply large margin
                            lagLow  = System.Math.Max(0.0, alpha[j] + alpha[i] - C);
                            lagHigh = System.Math.Min(C, alpha[j] + alpha[i]);
                        }
                        else
                        {
                            // pairs are not same class, apply large margin
                            lagLow  = System.Math.Max(0.0, alpha[j] - alpha[i]);
                            lagHigh = System.Math.Min(C, C + alpha[j] - alpha[i]);
                        }

                        // if lagrange constraints are not diverse then get new working set
                        if (lagLow == lagHigh)
                        {
                            continue;
                        }

                        // compute cost and if it's greater than 0 skip
                        // cost should optimise large margin where fit line intercepts <= 0
                        var cost = 2.0 * K[i, j] - K[i, i] - K[j, j];
                        if (cost >= 0.0)
                        {
                        }
                        else
                        {
                            // update alpha of (j) w.r.t to the relative cost difference of the i-th and j-th gradient
                            alpha[j] = alpha[j] - y[j] * (gradient[i] - gradient[j]) / cost;

                            // clip alpha with lagrange multipliers
                            alpha[j] = System.Math.Min(lagHigh, alpha[j]);
                            alpha[j] = System.Math.Max(lagLow, alpha[j]);

                            // check alpha tolerance factor
                            if (System.Math.Abs(alpha[j] - tempAJ) < Epsilon)
                            {
                                // we're optimising large margins so skip small ones
                                alpha[j] = tempAJ;
                                continue;
                            }

                            // update alpha of i if we have a large margin w.r.t to alpha (j)
                            alpha[i] = alpha[i] + y[i] * y[j] * (tempAJ - alpha[j]);

                            // precompute i, j into feasible region for Bias
                            var yBeta = (alpha[i] - tempAI) * K[i, j] - y[j] * (alpha[j] - tempAJ);
                            // store temp beta with gradient for i, j pairs
                            var beta_i = Bias - gradient[i] - y[i] * yBeta * K[i, j];
                            var beta_j = Bias - gradient[j] - y[i] * yBeta * K[j, j];

                            // update new bias with constrained alpha limits (0 < alpha < C)
                            if (0.0 < alpha[i] && alpha[i] < C)
                            {
                                Bias = beta_i;
                            }
                            else if (0.0 < alpha[j] && alpha[j] < C)
                            {
                                Bias = beta_j;
                            }
                            else
                            {
                                Bias = (beta_i + beta_j) / 2.0;
                            }

                            changes++;
                        }
                    }
                    else if (newPair.Item1 == -1 || newPair.Item2 == -1)
                    {
                        // unable to find suitable sub problem (j) to optimise
                        finalise = true;
                        break;
                    }
                }

                if (changes == 0)
                {
                    iterations++;
                }
                else
                {
                    iterations = 0;
                }

                #endregion
            }

            // get only supporting parameters where alpha is positive
            // i.e. because 0 < alpha < large margin
            var fitness = (alpha > 0d).ToArray();

            // return initialised model
            return(new SVMModel
            {
                Descriptor = Descriptor,
                FeatureNormalizer = FeatureNormalizer,
                FeatureProperties = FeatureProperties,
                Theta = (alpha * y * X).ToVector(),
                Alpha = alpha.Slice(fitness),
                Bias = Bias,
                X = X.Slice(fitness, VectorType.Row),
                Y = y.Slice(fitness),
                KernelFunction = KernelFunction
            });
        }
Exemple #19
0
        /// <summary>
        /// An implementation of GPRN specialised for one step look ahead multivariate volatility experiments
        /// </summary>
        /// <param name="inputs">Covariates X</param>
        /// <param name="data">Outputs Y</param>
        /// <returns>Predicted covariance for the next time point</returns>
        public VectorGaussian GPRN_MultivariateVolatility(
            Vector[] inputs,
            double[,] data,
            double[] nodeSignalPrecs,
            double[] nodeNoisePrecs,
            double obsNoisePrec,
            ref VectorGaussian[] finit,
            ref VectorGaussian[,] winit,
            KernelFunction nodeKernel,
            KernelFunction weightKernel)
        {
            var missing = new bool[data.GetLength(0), data.GetLength(1)];

            for (int i = 0; i < data.GetLength(0); i++)
            {
                missing[i, data.GetLength(1) - 1] = true; // last data point is missing
            }
            int Q = nodeSignalPrecs.Length;

            var toInfer   = new List <IVariable>();
            var K_node    = Utils.GramMatrix(nodeKernel, inputs);
            var K_weights = Utils.GramMatrix(weightKernel, inputs);

            var D    = Variable.Observed <int>(data.GetLength(0)).Named("D");
            var d    = new Range(D).Named("d");
            var Qvar = Variable.Observed <int>(Q).Named("Q");
            var q    = new Range(Qvar).Named("q");
            var N    = Variable.Observed <int>(data.GetLength(1)).Named("N");
            var n    = new Range(N).Named("n");

            var ev         = Variable.Bernoulli(.5).Named("ev");
            var modelBlock = Variable.If(ev);

            var nodeSignalPrecisions = Variable.Array <double>(q).Named("nodeSignalPrecisions");

            nodeSignalPrecisions.ObservedValue = nodeSignalPrecs;

            var nodeFunctions  = Variable.Array <Vector>(q).Named("nodeFunctions");
            var K_node_inverse = Variable.Observed(K_node.Inverse()).Named("K_node_inverse");

            nodeFunctions[q] = Variable <Vector> .Factor(MyFactors.VectorGaussianScaled, nodeSignalPrecisions[q], K_node_inverse);

            nodeFunctions.AddAttribute(new MarginalPrototype(new VectorGaussian(N.ObservedValue)));
            var nodeFunctionValues           = Variable.Array(Variable.Array <double>(n), q).Named("nodeFunctionValues");
            var nodeFunctionValuesPredictive = Variable.Array(Variable.Array <double>(n), q).Named("nodeFunctionValuesPredictive");

            var nodeFunctionValuesClean = Variable.Array(Variable.Array <double>(n), q).Named("nodeFunctionValuesClean");

            nodeFunctionValuesClean[q] = Variable.ArrayFromVector(nodeFunctions[q], n);
            var nodeNoisePrecisions = Variable.Array <double>(q).Named("nodeNoisePrecisions");

            nodeNoisePrecisions.ObservedValue = nodeNoisePrecs;
            nodeFunctionValues[q][n]          = Variable.GaussianFromMeanAndPrecision(nodeFunctionValuesClean[q][n], nodeNoisePrecisions[q]);

            nodeFunctionValuesPredictive[q][n] = Variable.GaussianFromMeanAndPrecision(nodeFunctionValuesClean[q][n], nodeNoisePrecisions[q]);

            var weightFunctions   = Variable.Array <Vector>(d, q).Named("weightFunctions");
            var K_weights_inverse = Variable.Observed(K_weights.Inverse()).Named("K_weights_inverse");

            weightFunctions[d, q] = Variable <Vector> .Factor(MyFactors.VectorGaussianScaled, Variable.Constant <double>(1), K_weights_inverse).ForEach(d, q);

            weightFunctions.AddAttribute(new MarginalPrototype(new VectorGaussian(N.ObservedValue)));
            var weightFunctionValues           = Variable.Array(Variable.Array <double>(n), d, q).Named("weightFunctionValues");
            var weightFunctionValuesPredictive = Variable.Array(Variable.Array <double>(n), d, q).Named("weightFunctionValuesPredictive");

            weightFunctionValues[d, q] = Variable.ArrayFromVector(weightFunctions[d, q], n);

            weightFunctionValuesPredictive[d, q] = Variable.ArrayFromVector(weightFunctions[d, q], n);
            var observedData   = Variable.Array <double>(d, n).Named("observedData");
            var noisePrecision = Variable.Observed(obsNoisePrec).Named("noisePrecision");

            var isMissing = Variable.Array <bool>(d, n).Named("isMissing");

            isMissing.ObservedValue = missing;

            var noiseLessY = Variable.Array <double>(d, n).Named("noiseLessY");

            using (Variable.ForEach(n))
                using (Variable.ForEach(d))
                {
                    var temp = Variable.Array <double>(q).Named("temp");
                    temp[q]          = weightFunctionValues[d, q][n] * nodeFunctionValues[q][n];
                    noiseLessY[d, n] = Variable.Sum(temp);
                    using (Variable.IfNot(isMissing[d, n]))
                        observedData[d, n] = Variable.GaussianFromMeanAndPrecision(noiseLessY[d, n], noisePrecision);
                    using (Variable.If(isMissing[d, n]))
                        observedData[d, n] = Variable.GaussianFromMeanAndPrecision(0, 1);
                }
            observedData.ObservedValue = data;
            var nodeFunctionsInit = Enumerable.Range(0, Q).Select(i =>
                                                                  VectorGaussian.FromMeanAndVariance(
                                                                      VectorGaussian.Sample(Vector.Zero(N.ObservedValue), PositiveDefiniteMatrix.IdentityScaledBy(N.ObservedValue, 100)),
                                                                      PositiveDefiniteMatrix.IdentityScaledBy(N.ObservedValue, 100))).ToArray(); // should put this manually in generated code

            var distArray = Distribution <Vector> .Array(nodeFunctionsInit);

            var nodeFunctionsInitVar = Variable.Observed(distArray).Named("nodeFunctionsInitVar");

            nodeFunctions.InitialiseTo(nodeFunctionsInitVar);

            var finitNew = finit.Select(i => Utils.extendByOneDimension(i, Gaussian.FromMeanAndVariance(0, 1))).ToArray();

            nodeFunctions.InitialiseTo(Distribution <Vector> .Array(finitNew));

            var winitNew = new VectorGaussian[data.GetLength(0), Q];

            for (int i = 0; i < data.GetLength(0); i++)
            {
                for (int j = 0; j < Q; j++)
                {
                    winitNew[i, j] = Utils.extendByOneDimension(winit[i, j], Gaussian.FromMeanAndVariance(0, 1));
                }
            }

            weightFunctions.InitialiseTo(Distribution <Vector> .Array(winitNew));

            modelBlock.CloseBlock();

            toInfer.AddRange(new List <IVariable>()
            {
                ev, noiseLessY, nodeFunctions, weightFunctions, nodeFunctionValuesPredictive, weightFunctionValues, weightFunctionValuesPredictive                                      /* is this redundant? */
            });

            var ie = new InferenceEngine(new VariationalMessagePassing());
            var ca = ie.GetCompiledInferenceAlgorithm(toInfer.ToArray());

            ca.SetObservedValue(K_node_inverse.NameInGeneratedCode, Utils.GramMatrix(nodeKernel, inputs).Inverse());
            ca.SetObservedValue(K_weights_inverse.NameInGeneratedCode, Utils.GramMatrix(weightKernel, inputs).Inverse());
            ca.Reset();

            double oldML = double.NegativeInfinity;
            double ml    = 0;
            int    it    = 0;

            for (; it < 30; it++)
            {
                ca.Update(1);
                ml = ca.Marginal <Bernoulli>(ev.NameInGeneratedCode).LogOdds;
                Console.WriteLine(ml);
                if (Math.Abs(oldML - ml) < .1)
                {
                    break;
                }
                oldML = ml;
            }

            var f = ca.Marginal <Gaussian[][]>("nodeFunctionValuesPredictive");
            var W = ca.Marginal <Gaussian[, ][]>("weightFunctionValuesPredictive");

            finit = ca.Marginal <VectorGaussian[]>(nodeFunctions.NameInGeneratedCode);
            winit = ca.Marginal <VectorGaussian[, ]>(weightFunctions.NameInGeneratedCode);
            return(Utils.CorrelatedPredictionsHelper(f, W, Gamma.PointMass(obsNoisePrec), Q, data.GetLength(0), data.GetLength(1) - 1));
        }