public void LearnTest3() { // We will try to create a Hidden Markov Model which // can detect if a given sequence starts with a zero // and has any number of ones after that. int[][] sequences = new int[][] { new int[] { 0, 1, 1, 1, 1, 0, 1, 1, 1, 1 }, new int[] { 0, 1, 1, 1, 0, 1, 1, 1, 1, 1 }, new int[] { 0, 1, 1, 1, 1, 1, 1, 1, 1, 1 }, new int[] { 0, 1, 1, 1, 1, 1 }, new int[] { 0, 1, 1, 1, 1, 1, 1 }, new int[] { 0, 1, 1, 1, 1, 1, 1, 1, 1, 1 }, new int[] { 0, 1, 1, 1, 1, 1, 1, 1, 1, 1 }, }; // Creates a new Hidden Markov Model with 3 states for // an output alphabet of two characters (zero and one) HiddenMarkovModel hmm = new HiddenMarkovModel(3, 2); // Try to fit the model to the data until the difference in // the average log-likelihood changes only by as little as 0.0001 var teacher = new BaumWelchLearning(hmm) { Tolerance = 0.0001, Iterations = 0 }; double ll = teacher.Run(sequences); // Calculate the probability that the given // sequences originated from the model double l1; hmm.Decode(new int[] { 0, 1 }, out l1); // 0.4999 double l2; hmm.Decode(new int[] { 0, 1, 1, 1 }, out l2); // 0.1145 // Sequences which do not start with zero have much lesser probability. double l3; hmm.Decode(new int[] { 1, 1 }, out l3); // 0.0000 double l4; hmm.Decode(new int[] { 1, 0, 0, 0 }, out l4); // 0.0000 // Sequences which contains few errors have higher probability // than the ones which do not start with zero. This shows some // of the temporal elasticity and error tolerance of the HMMs. double l5; hmm.Decode(new int[] { 0, 1, 0, 1, 1, 1, 1, 1, 1 }, out l5); // 0.0002 double l6; hmm.Decode(new int[] { 0, 1, 1, 1, 1, 1, 1, 0, 1 }, out l6); // 0.0002 ll = System.Math.Exp(ll); l1 = System.Math.Exp(l1); l2 = System.Math.Exp(l2); l3 = System.Math.Exp(l3); l4 = System.Math.Exp(l4); l5 = System.Math.Exp(l5); l6 = System.Math.Exp(l6); Assert.AreEqual(1.2114235662225779, ll, 1e-4); Assert.AreEqual(0.4999419764097881, l1, 1e-4); Assert.AreEqual(0.1145702973735144, l2, 1e-4); Assert.AreEqual(0.0000529972606821, l3, 1e-4); Assert.AreEqual(0.0000000000000001, l4, 1e-4); Assert.AreEqual(0.0002674509390361, l5, 1e-4); Assert.AreEqual(0.0002674509390361, l6, 1e-4); Assert.IsTrue(l1 > l3 && l1 > l4); Assert.IsTrue(l2 > l3 && l2 > l4); }
private static double testThresholdModel(int[][] inputs, int[] outputs, HiddenMarkovClassifier classifier, double likelihood) { HiddenMarkovModel threshold = classifier.Threshold; Assert.AreEqual(6, threshold.States); Assert.AreEqual(classifier.Models[0].Transitions[0, 0], threshold.Transitions[0, 0], 1e-10); Assert.AreEqual(classifier.Models[0].Transitions[1, 1], threshold.Transitions[1, 1], 1e-10); Assert.AreEqual(classifier.Models[0].Transitions[2, 2], threshold.Transitions[2, 2], 1e-10); Assert.AreEqual(classifier.Models[1].Transitions[0, 0], threshold.Transitions[3, 3], 1e-10); Assert.AreEqual(classifier.Models[1].Transitions[1, 1], threshold.Transitions[4, 4], 1e-10); Assert.AreEqual(classifier.Models[1].Transitions[2, 2], threshold.Transitions[5, 5], 1e-10); for (int i = 0; i < 3; i++) { for (int j = 3; j < 6; j++) { Assert.AreEqual(Double.NegativeInfinity, threshold.Transitions[i, j]); } } for (int i = 3; i < 6; i++) { for (int j = 0; j < 3; j++) { Assert.AreEqual(Double.NegativeInfinity, threshold.Transitions[i, j]); } } Assert.IsFalse(Matrix.HasNaN(threshold.LogTransitions)); classifier.Sensitivity = 0.5; // Will assert the models have learned the sequences correctly. for (int i = 0; i < inputs.Length; i++) { int expected = outputs[i]; int actual = classifier.Compute(inputs[i], out likelihood); Assert.AreEqual(expected, actual); } int[] r0 = new int[] { 1, 1, 0, 0, 2 }; double logRejection; int c = classifier.Compute(r0, out logRejection); Assert.AreEqual(-1, c); Assert.AreEqual(0.99994164708402866, logRejection); logRejection = threshold.Evaluate(r0); Assert.AreEqual(-5.6077079936209504, logRejection, 1e-10); threshold.Decode(r0, out logRejection); Assert.AreEqual(-9.3103554170761686, logRejection, 1e-10); foreach (var model in classifier.Models) { double[,] A = model.Transitions; for (int i = 0; i < A.GetLength(0); i++) { double[] row = A.Exp().GetRow(i); double sum = row.Sum(); Assert.AreEqual(1, sum, 1e-10); } } { double[,] A = classifier.Threshold.Transitions; for (int i = 0; i < A.GetLength(0); i++) { double[] row = A.GetRow(i); double sum = row.Exp().Sum(); Assert.AreEqual(1, sum, 1e-6); } } return(likelihood); }
public void LearnTest2() { // Declare some testing data int[][] inputs = new int[][] { new int[] { 0, 0, 1, 2 }, // Class 0 new int[] { 0, 1, 1, 2 }, // Class 0 new int[] { 0, 0, 0, 1, 2 }, // Class 0 new int[] { 0, 1, 2, 2, 2 }, // Class 0 new int[] { 2, 2, 1, 0 }, // Class 1 new int[] { 2, 2, 2, 1, 0 }, // Class 1 new int[] { 2, 2, 2, 1, 0 }, // Class 1 new int[] { 2, 2, 2, 2, 1 }, // Class 1 }; int[] outputs = new int[] { 0, 0, 0, 0, // First four sequences are of class 0 1, 1, 1, 1, // Last four sequences are of class 1 }; // We are trying to predict two different classes int classes = 2; // Each sequence may have up to 3 symbols (0,1,2) int symbols = 3; // Nested models will have 3 states each int[] states = new int[] { 3, 3 }; // Creates a new Hidden Markov Model Classifier with the given parameters HiddenMarkovClassifier classifier = new HiddenMarkovClassifier(classes, states, symbols); // Create a new learning algorithm to train the sequence classifier var teacher = new HiddenMarkovClassifierLearning(classifier, // Train each model until the log-likelihood changes less than 0.001 modelIndex => new BaumWelchLearning(classifier.Models[modelIndex]) { Tolerance = 0.001, Iterations = 0 } ); // Enable support for sequence rejection teacher.Rejection = true; // Train the sequence classifier using the algorithm double likelihood = teacher.Run(inputs, outputs); // Will assert the models have learned the sequences correctly. for (int i = 0; i < inputs.Length; i++) { int expected = outputs[i]; int actual = classifier.Compute(inputs[i], out likelihood); Assert.AreEqual(expected, actual); } HiddenMarkovModel threshold = classifier.Threshold; Assert.AreEqual(6, threshold.States); Assert.AreEqual(classifier.Models[0].Transitions[0, 0], threshold.Transitions[0, 0], 1e-10); Assert.AreEqual(classifier.Models[0].Transitions[1, 1], threshold.Transitions[1, 1], 1e-10); Assert.AreEqual(classifier.Models[0].Transitions[2, 2], threshold.Transitions[2, 2], 1e-10); Assert.AreEqual(classifier.Models[1].Transitions[0, 0], threshold.Transitions[3, 3], 1e-10); Assert.AreEqual(classifier.Models[1].Transitions[1, 1], threshold.Transitions[4, 4], 1e-10); Assert.AreEqual(classifier.Models[1].Transitions[2, 2], threshold.Transitions[5, 5], 1e-10); Assert.IsFalse(Matrix.HasNaN(threshold.Transitions)); int[] r0 = new int[] { 1, 1, 0, 0, 2 }; double logRejection; int c = classifier.Compute(r0, out logRejection); Assert.AreEqual(-1, c); Assert.AreEqual(0.99569011079012049, logRejection); Assert.IsFalse(double.IsNaN(logRejection)); logRejection = threshold.Evaluate(r0); Assert.AreEqual(-6.7949285513628528, logRejection, 1e-10); Assert.IsFalse(double.IsNaN(logRejection)); threshold.Decode(r0, out logRejection); Assert.AreEqual(-8.902077561009957, logRejection, 1e-10); Assert.IsFalse(double.IsNaN(logRejection)); }
public void LearnTest3() { #region doc_learn // We will create a Hidden Markov Model to detect // whether a given sequence starts with a zero. int[][] sequences = new int[][] { new int[] { 0, 1, 1, 1, 1, 0, 1, 1, 1, 1 }, new int[] { 0, 1, 1, 1, 0, 1, 1, 1, 1, 1 }, new int[] { 0, 1, 1, 1, 1, 1, 1, 1, 1, 1 }, new int[] { 0, 1, 1, 1, 1, 1 }, new int[] { 0, 1, 1, 1, 1, 1, 1 }, new int[] { 0, 1, 1, 1, 1, 1, 1, 1, 1, 1 }, new int[] { 0, 1, 1, 1, 1, 1, 1, 1, 1, 1 }, }; // Create a new Hidden Markov Model with 3 states for // an output alphabet of two characters (zero and one) var hmm = new HiddenMarkovModel(states: 3, symbols: 2); // Create the learning algorithm var teacher = new BaumWelchLearning(hmm) { Tolerance = 0.0001, // until log-likelihood changes less than 0.0001 Iterations = 0 // and use as many iterations as needed }; // Estimate the model teacher.Learn(sequences); // Now we can calculate the probability that the given // sequences originated from the model. We can compute // those probabilities using the Viterbi algorithm: double vl1; hmm.Decode(new int[] { 0, 1 }, out vl1); // -0.69317855 double vl2; hmm.Decode(new int[] { 0, 1, 1, 1 }, out vl2); // -2.16644878 // Sequences which do not start with zero have much lesser probability. double vl3; hmm.Decode(new int[] { 1, 1 }, out vl3); // -11.3580034 double vl4; hmm.Decode(new int[] { 1, 0, 0, 0 }, out vl4); // -38.6759130 // Sequences which contains few errors have higher probability // than the ones which do not start with zero. This shows some // of the temporal elasticity and error tolerance of the HMMs. double vl5; hmm.Decode(new int[] { 0, 1, 0, 1, 1, 1, 1, 1, 1 }, out vl5); // -8.22665 double vl6; hmm.Decode(new int[] { 0, 1, 1, 1, 1, 1, 1, 0, 1 }, out vl6); // -8.22665 // Additionally, we can also compute the probability // of those sequences using the forward algorithm: double fl1 = hmm.LogLikelihood(new int[] { 0, 1 }); // -0.000031369 double fl2 = hmm.LogLikelihood(new int[] { 0, 1, 1, 1 }); // -0.087005121 // Sequences which do not start with zero have much lesser probability. double fl3 = hmm.LogLikelihood(new int[] { 1, 1 }); // -10.66485629 double fl4 = hmm.LogLikelihood(new int[] { 1, 0, 0, 0 }); // -36.61788687 // Sequences which contains few errors have higher probability // than the ones which do not start with zero. This shows some // of the temporal elasticity and error tolerance of the HMMs. double fl5 = hmm.LogLikelihood(new int[] { 0, 1, 0, 1, 1, 1, 1, 1, 1 }); // -3.3744416 double fl6 = hmm.LogLikelihood(new int[] { 0, 1, 1, 1, 1, 1, 1, 0, 1 }); // -3.3744416 #endregion Assert.AreEqual(-0.69317855044301457, vl1, 1e-4); Assert.AreEqual(-2.166448784882073, vl2, 1e-4); Assert.AreEqual(-11.358003471944887, vl3, 1e-4); Assert.AreEqual(-38.675913006221506, vl4, 1e-4); Assert.AreEqual(-8.22664996599565, vl5, 1e-4); Assert.AreEqual(-8.2266499659956516, vl6, 1e-4); Assert.IsTrue(vl1 > vl3 && vl1 > vl4); Assert.IsTrue(vl2 > vl3 && vl2 > vl4); Assert.AreEqual(-0.000031369883069287674, fl1, 1e-4); Assert.AreEqual(-0.087005121634496585, fl2, 1e-4); Assert.AreEqual(-10.664856291384941, fl3, 1e-4); Assert.AreEqual(-36.617886878165528, fl4, 1e-4); Assert.AreEqual(-3.3744415883604058, fl5, 1e-4); Assert.AreEqual(-3.3744426259067066, fl6, 1e-4); }
public void learn_test() { #region doc_learn Accord.Math.Random.Generator.Seed = 0; // We will try to create a Hidden Markov Model which // can detect if a given sequence starts with a zero // and has any number of ones after that. // int[][] sequences = new int[][] { new int[] { 0, 1, 1, 1, 1, 0, 1, 1, 1, 1 }, new int[] { 0, 1, 1, 1, 0, 1, 1, 1, 1, 1 }, new int[] { 0, 1, 1, 1, 1, 1, 1, 1, 1, 1 }, new int[] { 0, 1, 1, 1, 1, 1 }, new int[] { 0, 1, 1, 1, 1, 1, 1 }, new int[] { 0, 1, 1, 1, 1, 1, 1, 1, 1, 1 }, new int[] { 0, 1, 1, 1, 1, 1, 1, 1, 1, 1 }, }; // Creates a new Hidden Markov Model with 3 states for // an output alphabet of two characters (zero and one) // HiddenMarkovModel hmm = new HiddenMarkovModel(new Forward(3), 2); // Try to fit the model to the data until the difference in // the average log-likelihood changes only by as little as 0.0001 // var teacher = new ViterbiLearning(hmm) { Tolerance = 0.0001, Iterations = 0 }; // Learn the model teacher.Learn(sequences); // Calculate the probability that the given // sequences originated from the model // double l1; hmm.Decode(new int[] { 0, 1 }, out l1); // 0.5394 double l2; hmm.Decode(new int[] { 0, 1, 1, 1 }, out l2); // 0.4485 // Sequences which do not start with zero have much lesser probability. double l3; hmm.Decode(new int[] { 1, 1 }, out l3); // 0.0864 double l4; hmm.Decode(new int[] { 1, 0, 0, 0 }, out l4); // 0.0004 // Sequences which contains few errors have higher probability // than the ones which do not start with zero. This shows some // of the temporal elasticity and error tolerance of the HMMs. // double l5; hmm.Decode(new int[] { 0, 1, 0, 1, 1, 1, 1, 1, 1 }, out l5); // 0.0154 double l6; hmm.Decode(new int[] { 0, 1, 1, 1, 1, 1, 1, 0, 1 }, out l6); // 0.0154 #endregion l1 = System.Math.Exp(l1); l2 = System.Math.Exp(l2); l3 = System.Math.Exp(l3); l4 = System.Math.Exp(l4); l5 = System.Math.Exp(l5); l6 = System.Math.Exp(l6); Assert.AreEqual(0.53946360153256712, l1, 1e-6); Assert.AreEqual(0.44850249229903377, l2, 1e-6); Assert.AreEqual(0.08646414524833077, l3, 1e-6); Assert.AreEqual(0.00041152263374485, l4, 1e-6); Assert.AreEqual(0.01541807695931400, l5, 1e-6); Assert.AreEqual(0.01541807695931400, l6, 1e-6); Assert.IsTrue(l1 > l3 && l1 > l4); Assert.IsTrue(l2 > l3 && l2 > l4); }
public static void BaumWelchLearningContinuous() { // Create continuous sequences. In the sequences below, there // seems to be two states, one for values between 0 and 1 and // another for values between 5 and 7. The states seems to be // switched on every observation. double[][] sequences = new double[][] { new double[] { 0.1, 5.2, 0.3, 6.7, 0.1, 6.0 }, new double[] { 0.2, 6.2, 0.3, 6.3, 0.1, 5.0 }, new double[] { 0.1, 7.0, 0.1, 7.0, 0.2, 5.6 }, }; // Specify a initial normal distribution for the samples. Gaussian density = new Gaussian(); // Creates a continuous hidden Markov Model with two states organized in a forward // topology and an underlying univariate Normal distribution as probability density. var model = new HiddenMarkovModel(new Ergodic(2), density); // Configure the learning algorithms to train the sequence classifier until the // difference in the average log-likelihood changes only by as little as 0.0001 var teacher = new BaumWelchLearning(model) { Tolerance = 0.0001, Iterations = 0, }; // Fit the model double logLikelihood = teacher.Run(sequences); // See the log-probability of the sequences learned double a1 = model.Evaluate(new double[] { 0.1, 5.2, 0.3, 6.7, 0.1, 6.0 }); // -0.12799388666109757 double a2 = model.Evaluate(new double[] { 0.2, 6.2, 0.3, 6.3, 0.1, 5.0 }); // 0.01171157434400194 Console.WriteLine("a1 = {0}", a1); Console.WriteLine("a2 = {0}", a2); // See the log-probability of an unrelated sequence double a3 = model.Evaluate(new[] { 1.1, 2.2, 1.3, 3.2, 4.2, 1.0 }); // -298.7465244473417 Console.WriteLine("a3 = {0}", a3); // We can transform the log-probabilities to actual probabilities: double likelihood = System.Math.Exp(logLikelihood); a1 = System.Math.Exp(a1); // 0.879 a2 = System.Math.Exp(a2); // 1.011 a3 = System.Math.Exp(a3); // 0.000 Console.WriteLine("a1 = {0}", a1); Console.WriteLine("a2 = {0}", a2); Console.WriteLine("a3 = {0}", a3); // We can also ask the model to decode one of the sequences. After // this step the state variable will contain: { 0, 1, 0, 1, 0, 1 } double lll; int[] states = model.Decode(new double[] { 0.1, 5.2, 0.3, 6.7, 0.1, 6.0 }, out lll); Console.WriteLine("states: {{{0}}}", string.Join(", ", states)); Console.WriteLine("lll: {0}", lll); }