public void GPRegressionTest() { double[] yData = new double[] { -0.06416828853982412, -0.6799959810206935, -0.4541652863622044, 0.155770359928991, 1.036659040456137, 0.7353821980830825, 0.8996680933259047, -0.05368704705684217, -0.7905775695015919, -0.1436284683992815 }; double[] xData = new double[] { -2, -1.555555555555556, -1.111111111111111, -0.6666666666666667, -0.2222222222222223, 0.2222222222222223, 0.6666666666666665, 1.111111111111111, 1.555555555555555, 2 }; Vector[] xVec = Array.ConvertAll(xData, v => Vector.Constant(1, v)); Vector[] basis = new Vector[] { xVec[1], xVec[4], xVec[8] }; IKernelFunction kf = new SquaredExponential(System.Math.Log(2.0)); SparseGPFixed sgpf = new SparseGPFixed(kf, basis); Variable <bool> evidence = Variable.Bernoulli(0.5).Named("evidence"); IfBlock block = Variable.If(evidence); Variable <IFunction> f = Variable.Random <IFunction>(new SparseGP(sgpf)).Named("f"); Range item = new Range(xVec.Length).Named("item"); VariableArray <Vector> x = Variable.Array <Vector>(item).Named("x"); x.ObservedValue = xVec; VariableArray <double> y = Variable.Array <double>(item).Named("y"); y.ObservedValue = yData; VariableArray <double> h = Variable.Array <double>(item).Named("h"); h[item] = Variable.FunctionEvaluate(f, x[item]); y[item] = Variable.GaussianFromMeanAndVariance(h[item], 0.1); block.CloseBlock(); InferenceEngine engine = new InferenceEngine(); SparseGP sgp = engine.Infer <SparseGP>(f); Vector alphaExpected = Vector.FromArray(new double[] { -3.250044160725389, 4.579296091435270, -2.227005562666341 }); PositiveDefiniteMatrix betaExpected = new PositiveDefiniteMatrix(new double[, ] { { 3.187555652658986, -3.301824438047169, 1.227566907279797 }, { -3.30182443804717, 5.115027119603418, -2.373085083966294 }, { 1.227566907279797, -2.373085083966294, 2.156308696222915 } }); Console.WriteLine("alpha = {0} should be {1}", sgp.Alpha, alphaExpected); Console.WriteLine(StringUtil.JoinColumns("beta = ", sgp.Beta, " should be ", betaExpected)); double[] xTest = new double[] { -2, -1, 0.0 }; Vector[] xTestVec = Array.ConvertAll(xTest, v => Vector.Constant(1, v)); // computed by matlab/MNT/GP/test_gpr.m double[] yMeanTest = new double[] { -0.544583265595561, 0.134323399801302, 0.503623822120711 }; double[] yVarTest = new double[] { 0.058569682375201, 0.022695532903985, 0.024439582002951 }; for (int i = 0; i < xTestVec.Length; i++) { Gaussian pred = sgp.Marginal(xTestVec[i]); Gaussian predExpected = new Gaussian(yMeanTest[i], yVarTest[i]); Console.WriteLine("f({0}) = {1} should be {2}", xTest[i], pred, predExpected); Assert.True(predExpected.MaxDiff(pred) < 1e-4); } double evExpected = -13.201173794945003; double evActual = engine.Infer <Bernoulli>(evidence).LogOdds; Console.WriteLine("evidence = {0} should be {1}", evActual, evExpected); Assert.True(MMath.AbsDiff(evExpected, evActual, 1e-6) < 1e-4); }
public void GPClassificationTest() { bool[] yData = new bool[] { false, false, false, true, true, true, true, false, false, false }; double[] xData = new double[] { -2, -1.555555555555556, -1.111111111111111, -0.6666666666666667, -0.2222222222222223, 0.2222222222222223, 0.6666666666666665, 1.111111111111111, 1.555555555555555, 2 }; Vector[] xVec = Array.ConvertAll(xData, v => Vector.Constant(1, v)); Vector[] basis = new Vector[] { xVec[1], xVec[4], xVec[8] }; //basis = xVec; IKernelFunction kf = new SquaredExponential(0.0); SparseGPFixed sgpf = new SparseGPFixed(kf, basis); Variable <bool> evidence = Variable.Bernoulli(0.5).Named("evidence"); IfBlock block = Variable.If(evidence); Variable <IFunction> f = Variable.Random <IFunction>(new SparseGP(sgpf)).Named("f"); Range item = new Range(xVec.Length).Named("item"); VariableArray <Vector> x = Variable.Array <Vector>(item).Named("x"); x.ObservedValue = xVec; VariableArray <bool> y = Variable.Array <bool>(item).Named("y"); y.ObservedValue = yData; VariableArray <double> h = Variable.Array <double>(item).Named("h"); h[item] = Variable.FunctionEvaluate(f, x[item]); y[item] = (h[item] > 0); block.CloseBlock(); InferenceEngine engine = new InferenceEngine(); SparseGP sgp = engine.Infer <SparseGP>(f); Vector alphaExpected = Vector.FromArray(new double[] { -1.410457563120709, 1.521306076273262, -1.008600221619413 }); Console.WriteLine("alpha = {0} should be {1}", sgp.Alpha, alphaExpected); double[] xTest = new double[] { -2, -1, 0.0 }; Vector[] xTestVec = Array.ConvertAll(xTest, v => Vector.Constant(1, v)); // computed by matlab/MNT/GP/test_gpc.m double[] yMeanTest = new double[] { -0.966351175090184, -0.123034591744284, 0.762757400008960 }; double[] yVarTest = new double[] { 0.323871157983366, 0.164009511251333, 0.162068482365962 }; for (int i = 0; i < xTestVec.Length; i++) { Gaussian pred = sgp.Marginal(xTestVec[i]); Gaussian predExpected = new Gaussian(yMeanTest[i], yVarTest[i]); Console.WriteLine("f({0}) = {1} should be {2}", xTest[i], pred, predExpected); Assert.True(predExpected.MaxDiff(pred) < 1e-4); } double evExpected = -4.907121241357144; double evActual = engine.Infer <Bernoulli>(evidence).LogOdds; Console.WriteLine("evidence = {0} should be {1}", evActual, evExpected); Assert.True(MMath.AbsDiff(evExpected, evActual, 1e-6) < 1e-4); }
public void MixtureOfMultivariateGaussians() { // Define a range for the number of mixture components Range k = new Range(2).Named("k"); // Mixture component means VariableArray <Vector> means = Variable.Array <Vector>(k).Named("means"); means[k] = Variable.VectorGaussianFromMeanAndPrecision(Vector.Zero(2), PositiveDefiniteMatrix.IdentityScaledBy(2, 0.01)).ForEach(k); // Mixture component precisions VariableArray <PositiveDefiniteMatrix> precs = Variable.Array <PositiveDefiniteMatrix>(k).Named("precs"); precs[k] = Variable.WishartFromShapeAndScale(100.0, PositiveDefiniteMatrix.IdentityScaledBy(2, 0.01)).ForEach(k); // Mixture weights Variable <Vector> weights = Variable.Dirichlet(k, new double[] { 1, 1 }).Named("weights"); // Create a variable array which will hold the data Range n = new Range(300).Named("n"); VariableArray <Vector> data = Variable.Array <Vector>(n).Named("x"); // Create latent indicator variable for each data point VariableArray <int> z = Variable.Array <int>(n).Named("z"); // The mixture of Gaussians model using (Variable.ForEach(n)) { z[n] = Variable.Discrete(weights); using (Variable.Switch(z[n])) { data[n] = Variable.VectorGaussianFromMeanAndPrecision(means[z[n]], precs[z[n]]); } } // Attach some generated data double truePi = 0.6; data.ObservedValue = GenerateData(n.SizeAsInt, truePi); // Initialise messages randomly so as to break symmetry Discrete[] zinit = new Discrete[n.SizeAsInt]; for (int i = 0; i < zinit.Length; i++) { zinit[i] = Discrete.PointMass(Rand.Int(k.SizeAsInt), k.SizeAsInt); } z.InitialiseTo(Distribution <int> .Array(zinit)); // The inference InferenceEngine ie = new InferenceEngine(); ie.Algorithm = new VariationalMessagePassing(); //ie.Compiler.GenerateInMemory = false; //ie.NumberOfIterations = 200; Dirichlet wDist = (Dirichlet)ie.Infer(weights); Vector wEstMean = wDist.GetMean(); object meansActual = ie.Infer(means); Console.WriteLine("means = "); Console.WriteLine(meansActual); var precsActual = ie.Infer <IList <Wishart> >(precs); Console.WriteLine("precs = "); Console.WriteLine(precsActual); Console.WriteLine("w = {0} should be {1}", wEstMean, Vector.FromArray(truePi, 1 - truePi)); //Console.WriteLine(StringUtil.JoinColumns("z = ", ie.Infer(z))); Assert.True( MMath.AbsDiff(wEstMean[0], truePi) < 0.05 || MMath.AbsDiff(wEstMean[1], truePi) < 0.05); }
public void PoissonMixtureTest() { Rand.Restart(1); int N = 40, D = 2, K = 2; Range n = new Range(N).Named("n"); Range k = new Range(K).Named("k"); Range d = new Range(D).Named("d"); VariableArray2D <double> p = Variable.Array <double>(k, d).Named("p"); p[k, d] = Variable.GammaFromMeanAndVariance(10, 100).ForEach(k, d); VariableArray2D <int> x = Variable.Array <int>(n, d).Named("x"); VariableArray <int> c = Variable.Array <int>(n).Named("c"); using (Variable.ForEach(n)) { c[n] = Variable.Discrete(k, 0.5, 0.5); using (Variable.Switch(c[n])) { x[n, d] = Variable.Poisson(p[c[n], d]); } } //n.AddAttribute(new Sequential()); //c.AddAttribute(new DivideMessages(false)); InferenceEngine engine = new InferenceEngine(); //engine.Algorithm = new VariationalMessagePassing(); int[,] data = new int[N, D]; int N1 = N / 2; double[,] mean = new double[K, D]; for (int i = 0; i < K; i++) { for (int j = 0; j < D; j++) { //mean[i, j] = i+j; mean[i, j] = (i + j + 1) * 10; } } Discrete[] cInit = new Discrete[N]; for (int i = 0; i < N; i++) { int cluster = i % 2; for (int j = 0; j < D; j++) { data[i, j] = Rand.Poisson(mean[cluster, j]); } double r = cluster; cInit[i] = new Discrete(1 - r, r); } x.ObservedValue = data; c.InitialiseTo(Distribution <int> .Array(cInit)); engine.NumberOfIterations = 1; var pPost1 = engine.Infer(p); engine.NumberOfIterations = 200; Gamma[,] pPost = engine.Infer <Gamma[, ]>(p); for (int i = 0; i < pPost.GetLength(0); i++) { for (int j = 0; j < pPost.GetLength(1); j++) { double mActual = pPost[i, j].GetMean(); double mExpected = mean[i, j]; Console.WriteLine(String.Format("pPost[{0}][{1}] = {2} should be {3}", i, j, mActual, mExpected)); Assert.True(MMath.AbsDiff(mExpected, mActual, 1e-6) < 0.3); } } // test resetting inference engine.NumberOfIterations = 1; var pPost2 = engine.Infer <Diffable>(p); Assert.True(pPost2.MaxDiff(pPost1) < 1e-10); }
public void MixtureOfMultivariateGaussians() { // Define a range for the number of mixture components Range k = new Range(2).Named("k"); // Mixture component means VariableArray <Vector> means = Variable.Array <Vector>(k).Named("means"); means[k] = Variable.VectorGaussianFromMeanAndPrecision(Vector.Zero(2), PositiveDefiniteMatrix.IdentityScaledBy(2, 0.01)).ForEach(k); // Mixture component precisions VariableArray <PositiveDefiniteMatrix> precs = Variable.Array <PositiveDefiniteMatrix>(k).Named("precs"); precs[k] = Variable.WishartFromShapeAndScale(100.0, PositiveDefiniteMatrix.IdentityScaledBy(2, 0.01)).ForEach(k); // Mixture weights Variable <Vector> weights = Variable.Dirichlet(k, new double[] { 1, 1 }).Named("weights"); // Create a variable array which will hold the data Range n = new Range(300).Named("n"); VariableArray <Vector> data = Variable.Array <Vector>(n).Named("x"); // Create latent indicator variable for each data point VariableArray <int> z = Variable.Array <int>(n).Named("z"); // The mixture of Gaussians model using (Variable.ForEach(n)) { z[n] = Variable.Discrete(weights); using (Variable.Switch(z[n])) { data[n] = Variable.VectorGaussianFromMeanAndPrecision(means[z[n]], precs[z[n]]); } } // Attach some generated data double truePi = 0.6; data.ObservedValue = GenerateData(n.SizeAsInt, truePi); // Initialise messages randomly to break symmetry VariableArray <Discrete> zInit = Variable.Array <Discrete>(n).Named("zInit"); bool useObservedValue = true; if (useObservedValue) { zInit.ObservedValue = Util.ArrayInit(n.SizeAsInt, i => Discrete.PointMass(Rand.Int(k.SizeAsInt), k.SizeAsInt)); } else { // This approach doesn't work, because Infer.NET notices that Rand.Int is stochastic and thinks that it should perform message-passing here. using (Variable.ForEach(n)) { var randk = Variable <int> .Factor(new Func <int, int>(Rand.Int), (Variable <int>) k.Size); randk.SetValueRange(k); zInit[n] = Variable <Discrete> .Factor(Discrete.PointMass, randk, (Variable <int>) k.Size); } } z[n].InitialiseTo(zInit[n]); // The inference InferenceEngine ie = new InferenceEngine(); ie.Algorithm = new VariationalMessagePassing(); //ie.Compiler.GenerateInMemory = false; //ie.NumberOfIterations = 200; Dirichlet wDist = (Dirichlet)ie.Infer(weights); Vector wEstMean = wDist.GetMean(); object meansActual = ie.Infer(means); Console.WriteLine("means = "); Console.WriteLine(meansActual); var precsActual = ie.Infer <IList <Wishart> >(precs); Console.WriteLine("precs = "); Console.WriteLine(precsActual); Console.WriteLine("w = {0} should be {1}", wEstMean, Vector.FromArray(truePi, 1 - truePi)); //Console.WriteLine(StringUtil.JoinColumns("z = ", ie.Infer(z))); Assert.True( MMath.AbsDiff(wEstMean[0], truePi) < 0.05 || MMath.AbsDiff(wEstMean[1], truePi) < 0.05); }
/* Minimize a multidimensional scalar function starting at x. * Modifies x to be the minimum. */ internal static void MinimizePowell(Func <Vector, double> func, Vector x) { double fTol = 1e-15; Vector old_x = Vector.Copy(x); Vector ext_x = Vector.Copy(x); int d = x.Count; /* Initialize the directions to the unit vectors */ Vector[] dirs = Util.ArrayInit(d, i => Vector.FromArray(Util.ArrayInit(d, j => (i == j) ? 1.0 : 0.0))); double fmin = func(x); int maxIter = 100; int iter; for (iter = 0; iter < maxIter; iter++) { double fx = fmin; int i_max = 0; double delta_max = 0; /* Minimize along each direction, remembering the direction of greatest * function decrease. */ for (int i = 0; i < d; i++) { double old_min = fmin; Vector dir = dirs[i]; double a = MinimizeLine(func, x, dir, out fmin); dir.Scale(a); if (fmin > old_min) { throw new Exception("objective increased"); } double delta = System.Math.Abs(old_min - fmin); if (delta > delta_max) { delta_max = delta; i_max = i; } } if (MMath.AbsDiff(fx, fmin, 1e-14) < fTol) { break; } /* Construct new direction from old_x to x. */ Vector dir2 = x - old_x; old_x.SetTo(x); /* And extrapolate it. */ ext_x.SetTo(x); x.SetToSum(x, dir2); /* Good extrapolation? */ double fex = func(x); x.SetTo(ext_x); if (fex < fx) { double t = fx - fmin - delta_max; double delta = fx - fex; t = 2 * (fx - 2 * fmin + fex) * t * t - delta_max * delta * delta; if (t < 0) { double a = MinimizeLine(func, x, dir2, out fmin); dir2.Scale(a); /* Replace i_max with the new dir. */ dirs[i_max] = dir2; } } Console.WriteLine("x = {0} f = {1}", x, fmin); } if (iter == maxIter) { throw new Exception("exceeded maximum number of iterations"); } }
public void GaussianOpLogAverageFactor() { Gaussian uniform = new Gaussian(); Gaussian X0 = Gaussian.FromMeanAndVariance(3, 0.5); Gaussian Mean0 = Gaussian.FromMeanAndVariance(7, 1.0 / 3); Gamma Precision0 = Gamma.FromShapeAndScale(3, 3); // Fixed precision Gamma Precision = Gamma.PointMass(3); Gaussian X = X0; Gaussian Mean = uniform; Assert.True(MMath.AbsDiff(GaussianOp.LogAverageFactor_slow(X, Mean, Precision), 0, 1e-4) < 1e-4); Assert.True(MMath.AbsDiff(GaussianOp_Slow.LogAverageFactor(X, Mean, Precision), 0, 1e-4) < 1e-4); Assert.True(MMath.AbsDiff(GaussianOp.LogAverageFactor(X, Mean, Precision.Point), 0, 1e-4) < 1e-4); Mean = Mean0; // in matlab: normpdfln(3,7,[],0.5+1/3+1/3) Assert.True(MMath.AbsDiff(GaussianOp.LogAverageFactor_slow(X, Mean, Precision), -7.8532, 1e-4) < 1e-4); Assert.True(MMath.AbsDiff(GaussianOp_Slow.LogAverageFactor(X, Mean, Precision), -7.8532, 1e-4) < 1e-4); Assert.True(MMath.AbsDiff(GaussianOp.LogAverageFactor(X, Mean, Precision.Point), -7.8532, 1e-4) < 1e-4); Mean = Gaussian.PointMass(Mean0.GetMean()); // in matlab: normpdfln(3,7,[],0.5+1/3) Assert.True(MMath.AbsDiff(GaussianOp.LogAverageFactor_slow(X, Mean, Precision), -10.42777775, 1e-4) < 1e-4); Assert.True(MMath.AbsDiff(GaussianOp_Slow.LogAverageFactor(X, Mean, Precision), -10.42777775, 1e-4) < 1e-4); Assert.True(MMath.AbsDiff(GaussianOp.LogAverageFactor(X, Mean, Precision.Point), -10.42777775, 1e-4) < 1e-4); X = uniform; Assert.True(MMath.AbsDiff(GaussianOp.LogAverageFactor_slow(X, Mean, Precision), 0, 1e-4) < 1e-4); Assert.True(MMath.AbsDiff(GaussianOp_Slow.LogAverageFactor(X, Mean, Precision), 0, 1e-4) < 1e-4); Assert.True(MMath.AbsDiff(GaussianOp.LogAverageFactor(X, Mean, Precision.Point), 0, 1e-4) < 1e-4); // Unknown precision Precision = Precision0; X = X0; Mean = Mean0; // converge the precision message. (only matters if KeepLastMessage is set). //for (int i = 0; i < 10; i++) PrecisionAverageConditional(precisionMessage); // in matlab: log(t_normal_exact(mx-my,vx+vy,a+1,b)) // log(t_normal_exact(3-7,0.5+1/3,3,1/3)) Assert.True(MMath.AbsDiff(GaussianOp.LogAverageFactor_slow(X, Mean, Precision), -8.4363, 1e-4) < 1e-4); Assert.True(MMath.AbsDiff(GaussianOp_Slow.LogAverageFactor(X, Mean, Precision), -8.4363, 1e-4) < 1e-4); Mean = Gaussian.PointMass(Mean0.GetMean()); // converge the precision message. (only matters if KeepLastMessage is set). //for (int i = 0; i < 10; i++) PrecisionAverageConditional(precisionMessage); // in matlab: log(t_normal_exact(3-7,0.5,3,1/3)) Assert.True(MMath.AbsDiff(GaussianOp.LogAverageFactor_slow(X, Mean, Precision), -9.9890, 1e-4) < 1e-4); X = Gaussian.PointMass(X0.GetMean()); Mean = Mean0; // converge the precision message. (only matters if KeepLastMessage is set). //for (int i = 0; i < 10; i++) PrecisionAverageConditional(precisionMessage); // in matlab: log(t_normal_exact(3-7,1/3,3,1/3)) Assert.True(MMath.AbsDiff(GaussianOp.LogAverageFactor_slow(X, Mean, Precision), -10.478382, 1e-4) < 1e-4); X = Gaussian.PointMass(X0.GetMean()); Mean = Gaussian.PointMass(Mean0.GetMean()); // in matlab: log(t_normal_exact(3-7,1e-4,3,1/3)) or tpdfln(3-7,0,2*1/3,2*3+1) Assert.True(MMath.AbsDiff(GaussianOp.LogAverageFactor_slow(X, Mean, Precision), -11.1278713, 1e-4) < 1e-4); X = uniform; Assert.True(MMath.AbsDiff(GaussianOp.LogAverageFactor_slow(X, Mean, Precision), 0, 1e-4) < 1e-4); // uniform precision // the answer should always be Double.PositiveInfinity Precision = Gamma.Uniform(); X = X0; Mean = Mean0; Assert.True(MMath.AbsDiff(GaussianOp.LogAverageFactor_slow(X, Mean, Precision), Double.PositiveInfinity, 1e-4) < 1e-4); Assert.True(MMath.AbsDiff(GaussianOp_Slow.LogAverageFactor(new Gaussian(-0.641, 9.617e-22), Gaussian.PointMass(-1), new Gamma(1, 1)), -1.133394734344457, 1e-8) < 1e-4); GaussianOp_Slow.LogAverageFactor(new Gaussian(8.156, 9.653), Gaussian.PointMass(-1), new Gamma(1, 1)); }