Ejemplo n.º 1
0
        public void RunTest()
        {
            var inputs  = QuasiNewtonHiddenLearningTest.inputs;
            var outputs = QuasiNewtonHiddenLearningTest.outputs;

            HiddenMarkovClassifier hmm = DiscreteHiddenMarkovClassifierPotentialFunctionTest.CreateModel1();
            var function = new MarkovDiscreteFunction(hmm);

            var model  = new HiddenConditionalRandomField <int>(function);
            var target = new HiddenResilientGradientLearning <int>(model);

            double[] actual   = new double[inputs.Length];
            double[] expected = new double[inputs.Length];

            for (int i = 0; i < inputs.Length; i++)
            {
                actual[i]   = model.Compute(inputs[i]);
                expected[i] = outputs[i];
            }

            for (int i = 0; i < inputs.Length; i++)
            {
                Assert.AreEqual(expected[i], actual[i]);
            }

            double ll0 = model.LogLikelihood(inputs, outputs);

            double error = Double.NegativeInfinity;

            for (int i = 0; i < 50; i++)
            {
                error = target.RunEpoch(inputs, outputs);
            }

            double ll1 = model.LogLikelihood(inputs, outputs);

            for (int i = 0; i < inputs.Length; i++)
            {
                actual[i]   = model.Compute(inputs[i]);
                expected[i] = outputs[i];
            }

            Assert.AreEqual(-0.0019419916698781847, ll0, 1e-10);
            Assert.AreEqual(0, error, 1e-10);
            Assert.AreEqual(error, ll1);
            Assert.IsFalse(Double.IsNaN(ll0));
            Assert.IsFalse(Double.IsNaN(error));

            for (int i = 0; i < inputs.Length; i++)
            {
                Assert.AreEqual(expected[i], actual[i]);
            }

            Assert.IsTrue(ll1 > ll0);
        }
Ejemplo n.º 2
0
        private static void resilientgradienthiddenlearning()
        {
            // Suppose we would like to learn how to classify the
            // following set of sequences among three class labels:
            int[][] inputSequences =
            {
                // First class of sequences: starts and
                // ends with zeros, ones in the middle:
                new[] { 0, 1, 1, 1, 0 },
                new[] { 0, 0, 1, 1,0, 0 },
                new[] { 0, 1, 1, 1,1, 0 },

                // Second class of sequences: starts with
                // twos and switches to ones until the end.
                new[] { 2, 2, 2, 2,1, 1, 1, 1, 1 },
                new[] { 2, 2, 1, 2,1, 1, 1, 1, 1 },
                new[] { 2, 2, 2, 2,2, 1, 1, 1, 1 },

                // Third class of sequences: can start
                // with any symbols, but ends with three.
                new[] { 0, 0, 1, 1,3, 3, 3, 3 },
                new[] { 0, 0, 0, 3,3, 3, 3 },
                new[] { 1, 0, 1, 2,2, 2, 3, 3 },
                new[] { 1, 1, 2, 3,3, 3, 3 },
                new[] { 0, 0, 1, 1,3, 3, 3, 3 },
                new[] { 2, 2, 0, 3,3, 3, 3 },
                new[] { 1, 0, 1, 2,3, 3, 3, 3 },
                new[] { 1, 1, 2, 3,3, 3, 3 },
            };

            // Now consider their respective class labels
            int[] outputLabels =
            {
                /* Sequences  1-3 are from class 0: */ 0, 0, 0,
                /* Sequences  4-6 are from class 1: */ 1, 1, 1,
                /* Sequences 7-14 are from class 2: */ 2, 2, 2, 2, 2, 2, 2, 2
            };


            // Create the Hidden Conditional Random Field using a set of discrete features
            var function   = new MarkovDiscreteFunction(states: 3, symbols: 4, outputClasses: 3);
            var classifier = new HiddenConditionalRandomField <int>(function);

            // Create a learning algorithm
            var teacher = new HiddenResilientGradientLearning <int>(classifier)
            {
                Iterations = 50
            };

            // Run the algorithm and learn the models
            teacher.Run(inputSequences, outputLabels);

            int[] answers = inputSequences.Apply(classifier.Compute);
        }
Ejemplo n.º 3
0
        private void btnLearnHCRF_Click(object sender, EventArgs e)
        {
            if (gridSamples.Rows.Count == 0)
            {
                MessageBox.Show("Please load or insert some data first.");
                return;
            }

            var samples = database.Samples;
            var classes = database.Classes;

            double[][][] inputs  = new double[samples.Count][][];
            int[]        outputs = new int[samples.Count];

            for (int i = 0; i < inputs.Length; i++)
            {
                inputs[i]  = samples[i].Input;
                outputs[i] = samples[i].Output;
            }

            int    iterations = 100;
            double tolerance  = 0.01;


            hcrf = new HiddenConditionalRandomField <double[]>(
                new MarkovMultivariateFunction(hmm));


            // Create the learning algorithm for the ensemble classifier
            var teacher = new HiddenResilientGradientLearning <double[]>(hcrf)
            {
                Iterations = iterations,
                Tolerance  = tolerance
            };


            // Run the learning algorithm
            teacher.Learn(inputs, outputs);


            foreach (var sample in database.Samples)
            {
                sample.RecognizedAs = hcrf.Compute(sample.Input);
            }

            foreach (DataGridViewRow row in gridSamples.Rows)
            {
                var sample = row.DataBoundItem as Sequence;
                row.DefaultCellStyle.BackColor = (sample.RecognizedAs == sample.Output) ?
                                                 Color.LightGreen : Color.White;
            }
        }
Ejemplo n.º 4
0
        public void RunTest2()
        {
            var inputs  = QuasiNewtonHiddenLearningTest.inputs;
            var outputs = QuasiNewtonHiddenLearningTest.outputs;


            Accord.Math.Tools.SetupGenerator(0);

            var function = new MarkovDiscreteFunction(2, 2, 2);

            var model  = new HiddenConditionalRandomField <int>(function);
            var target = new HiddenResilientGradientLearning <int>(model);

            double[] actual   = new double[inputs.Length];
            double[] expected = new double[inputs.Length];

            for (int i = 0; i < inputs.Length; i++)
            {
                actual[i]   = model.Compute(inputs[i]);
                expected[i] = outputs[i];
            }


            double ll0 = model.LogLikelihood(inputs, outputs);

            double error = Double.PositiveInfinity;

            for (int i = 0; i < 50; i++)
            {
                error = target.RunEpoch(inputs, outputs);
            }

            double ll1 = model.LogLikelihood(inputs, outputs);

            for (int i = 0; i < inputs.Length; i++)
            {
                actual[i]   = model.Compute(inputs[i]);
                expected[i] = outputs[i];
            }


            Assert.AreEqual(-5.5451774444795623, ll0, 1e-10);
            Assert.AreEqual(0, error, 1e-10);
            Assert.IsFalse(double.IsNaN(error));

            for (int i = 0; i < inputs.Length; i++)
            {
                Assert.AreEqual(expected[i], actual[i]);
            }

            Assert.IsTrue(ll1 > ll0);
        }
Ejemplo n.º 5
0
        private static void hcrf(int[][] inputs, int[] outputs)
        {
            // Create a learning algorithm
            var teacher = new HiddenResilientGradientLearning <int>()
            {
                Function      = new MarkovDiscreteFunction(states: 3, symbols: 4, outputClasses: 3),
                MaxIterations = 50
            };

            // Run the algorithm and learn the models
            var hcrf = teacher.Learn(inputs, outputs);

            // Compute the classifier answers for the given inputs
            int[] answers = hcrf.Decide(inputs);
        }
        public void RunTest()
        {
            var inputs = QuasiNewtonHiddenLearningTest.inputs;
            var outputs = QuasiNewtonHiddenLearningTest.outputs;

            HiddenMarkovClassifier hmm = HiddenMarkovClassifierPotentialFunctionTest.CreateModel1();
            var function = new MarkovDiscreteFunction(hmm);

            var model = new HiddenConditionalRandomField<int>(function);
            var target = new HiddenResilientGradientLearning<int>(model);

            double[] actual = new double[inputs.Length];
            double[] expected = new double[inputs.Length];

            for (int i = 0; i < inputs.Length; i++)
            {
                actual[i] = model.Compute(inputs[i]);
                expected[i] = outputs[i];
            }

            for (int i = 0; i < inputs.Length; i++)
                Assert.AreEqual(expected[i], actual[i]);

            double ll0 = model.LogLikelihood(inputs, outputs);

            double error = Double.NegativeInfinity;
            for (int i = 0; i < 50; i++)
                error = target.RunEpoch(inputs, outputs);

            double ll1 = model.LogLikelihood(inputs, outputs);

            for (int i = 0; i < inputs.Length; i++)
            {
                actual[i] = model.Compute(inputs[i]);
                expected[i] = outputs[i];
            }

            Assert.AreEqual(-0.00046872579976353634, ll0, 1e-10);
            Assert.AreEqual(0, error, 1e-10);
            Assert.AreEqual(error, ll1);
            Assert.IsFalse(Double.IsNaN(ll0));
            Assert.IsFalse(Double.IsNaN(error));

            for (int i = 0; i < inputs.Length; i++)
                Assert.AreEqual(expected[i], actual[i]);

            Assert.IsTrue(ll1 > ll0);
        }
        public void learn_test()
        {
            Accord.Math.Random.Generator.Seed = 0;

            #region doc_learn_1
            // Let's say we would like to do a very simple mechanism for gesture recognition.
            // In this example, we will be trying to create a classifier that can distinguish
            // between the words "hello", "car", and "wardrobe".

            // Let's say we decided to acquire some data, and we asked some people to perform
            // those words in front of a Kinect camera, and, using Microsoft's SDK, we were able
            // to captured the x and y coordinates of each hand while the word was being performed.

            // Let's say we decided to represent our frames as:
            //
            //    double[] frame = { leftHandX, leftHandY, rightHandX, rightHandY }; // 4 dimensions
            //
            // Since we captured words, this means we captured sequences of frames as we described
            // above. Let's write some of those as rough examples to explain how gesture recognition
            // can be done:

            double[][] hello =
            {
                new double[] { 1.0, 0.1, 0.0, 0.0 }, // let's say the word
                new double[] { 0.0, 1.0, 0.1, 0.1 }, // hello took 6 frames
                new double[] { 0.0, 1.0, 0.1, 0.1 }, // to be recorded.
                new double[] { 0.0, 0.0, 1.0, 0.0 },
                new double[] { 0.0, 0.0, 1.0, 0.0 },
                new double[] { 0.0, 0.0, 0.1, 1.1 },
            };

            double[][] car =
            {
                new double[] { 0.0, 0.0, 0.0, 1.0 }, // the car word
                new double[] { 0.1, 0.0, 1.0, 0.1 }, // took only 4.
                new double[] { 0.0, 0.0, 0.1, 0.0 },
                new double[] { 1.0, 0.0, 0.0, 0.0 },
            };

            double[][] wardrobe =
            {
                new double[] { 0.0, 0.0, 1.0, 0.0 }, // same for the
                new double[] { 0.1, 0.0, 1.0, 0.1 }, // wardrobe word.
                new double[] { 0.0, 0.1, 1.0, 0.0 },
                new double[] { 0.1, 0.0, 1.0, 0.1 },
            };

            // Please note that a real-world example would involve *lots* of samples for each word.
            // Here, we are considering just one from each class which is clearly sub-optimal and
            // should _never_ be done on practice. Please keep in mind that we are doing like this
            // only to simplify this example on how to create and use HCRFs.

            // These are the words we have in our vocabulary:
            double[][][] words = { hello, car, wardrobe };

            // Now, let's associate integer labels with them. This is needed
            // for the case where there are multiple samples for each word.
            int[] labels = { 0, 1, 2 };

            // Create a new learning algorithm to train the hidden Markov model sequence classifier
            var teacher = new HiddenMarkovClassifierLearning <Independent <NormalDistribution>, double[]>()
            {
                // Train each model until the log-likelihood changes less than 0.001
                Learner = (i) => new BaumWelchLearning <Independent <NormalDistribution>, double[]>()
                {
                    Topology = new Forward(5), // this value can be found by trial-and-error

                    // We will create our classifiers assuming an independent Gaussian distribution
                    // for each component in our feature vectors (assuming a Naive Bayes assumption).
                    Emissions = (s) => new Independent <NormalDistribution>(dimensions: 4), // 4 dimensions

                    Tolerance  = 0.001,
                    Iterations = 100,

                    // This is necessary so the code doesn't blow up when it realizes there is only one
                    // sample per word class. But this could also be needed in normal situations as well:
                    FittingOptions = new IndependentOptions()
                    {
                        InnerOption = new NormalOptions()
                        {
                            Regularization = 1e-5
                        }
                    }
                }
            };

            // PS: In case you find exceptions trying to configure your model, you might want
            //     to try disabling parallel processing to get more descriptive error messages:
            // teacher.ParallelOptions.MaxDegreeOfParallelism = 1;

            // Finally, we can run the learning algorithm!
            var    hmm           = teacher.Learn(words, labels);
            double logLikelihood = teacher.LogLikelihood;

            // At this point, the classifier should be successfully
            // able to distinguish between our three word classes:
            //
            int tc1 = hmm.Decide(hello);    // should be 0
            int tc2 = hmm.Decide(car);      // should be 1
            int tc3 = hmm.Decide(wardrobe); // should be 2
            #endregion

            Assert.AreEqual(0, tc1);
            Assert.AreEqual(1, tc2);
            Assert.AreEqual(2, tc3);

            #region doc_learn_2
            // Now, we can use the Markov classifier to initialize a HCRF
            var baseline = HiddenConditionalRandomField.FromHiddenMarkov(hmm);

            // We can check that both are equivalent, although they have
            // formulations that can be learned with different methods:
            int[] predictedLabels = baseline.Decide(words);

            #endregion

            // We can check that both are equivalent, although they have
            // formulations that can be learned with different methods
            //
            for (int i = 0; i < words.Length; i++)
            {
                // Should be the same
                int expected = hmm.Decide(words[i]);
                int actual   = baseline.Decide(words[i]);

                // Should be the same
                double h0 = hmm.LogLikelihood(words[i], 0);
                double c0 = baseline.LogLikelihood(words[i], 0);

                double h1 = hmm.LogLikelihood(words[i], 1);
                double c1 = baseline.LogLikelihood(words[i], 1);

                double h2 = hmm.LogLikelihood(words[i], 2);
                double c2 = baseline.LogLikelihood(words[i], 2);

                Assert.AreEqual(expected, predictedLabels[i]);
                Assert.AreEqual(expected, actual);
                Assert.AreEqual(h0, c0, 1e-10);
                Assert.IsTrue(h1.IsRelativelyEqual(c1, 1e-10));
                Assert.IsTrue(h2.IsRelativelyEqual(c2, 1e-10));
            }

            Accord.Math.Random.Generator.Seed = 0;

            #region doc_learn_3
            // Now we can learn the HCRF using one of the best learning
            // algorithms available, Resilient Backpropagation learning:

            // Create the Resilient Backpropagation learning algorithm
            var rprop = new HiddenResilientGradientLearning <double[]>()
            {
                Function = baseline.Function, // use the same HMM function

                Iterations = 50,
                Tolerance  = 1e-5
            };

            // Run the algorithm and learn the models
            var hcrf = rprop.Learn(words, labels);

            // At this point, the HCRF should be successfully
            // able to distinguish between our three word classes:
            //
            int hc1 = hcrf.Decide(hello);    // should be 0
            int hc2 = hcrf.Decide(car);      // should be 1
            int hc3 = hcrf.Decide(wardrobe); // should be 2
            #endregion

            Assert.AreEqual(0, hc1);
            Assert.AreEqual(1, hc2);
            Assert.AreEqual(2, hc3);
        }
        public void SimpleGestureRecognitionTest()
        {
            // Let's say we would like to do a very simple mechanism for
            // gesture recognition. In this example, we will be trying to
            // create a classifier that can distinguish between the words
            // "hello", "car", and "wardrobe".

            // Let's say we decided to acquire some data, and we asked some
            // people to perform those words in front of a Kinect camera, and,
            // using Microsoft's SDK, we were able to captured the x and y
            // coordinates of each hand while the word was being performed.

            // Let's say we decided to represent our frames as:
            //
            //    double[] frame = { leftHandX, leftHandY, rightHandX, rightHandY };
            //
            // Since we captured words, this means we captured sequences of
            // frames as we described above. Let's write some of those as
            // rough examples to explain how gesture recognition can be done:

            double[][] hello =
            {
                new double[] { 1.0, 0.1, 0.0, 0.0 }, // let's say the word
                new double[] { 0.0, 1.0, 0.1, 0.1 }, // hello took 6 frames
                new double[] { 0.0, 1.0, 0.1, 0.1 }, // to be recorded.
                new double[] { 0.0, 0.0, 1.0, 0.0 },
                new double[] { 0.0, 0.0, 1.0, 0.0 },
                new double[] { 0.0, 0.0, 0.1, 1.1 },
            };

            double[][] car =
            {
                new double[] { 0.0, 0.0, 0.0, 1.0 }, // the car word
                new double[] { 0.1, 0.0, 1.0, 0.1 }, // took only 4.
                new double[] { 0.0, 0.0, 0.1, 0.0 },
                new double[] { 1.0, 0.0, 0.0, 0.0 },
            };

            double[][] wardrobe =
            {
                new double[] { 0.0, 0.0, 1.0, 0.0 }, // same for the
                new double[] { 0.1, 0.0, 1.0, 0.1 }, // wardrobe word.
                new double[] { 0.0, 0.1, 1.0, 0.0 },
                new double[] { 0.1, 0.0, 1.0, 0.1 },
            };

            // Here, please note that a real-world example would involve *lots*
            // of samples for each word. Here, we are considering just one from
            // each class which is clearly sub-optimal and should _never_ be done
            // on practice. For example purposes, however, please disregard this.

            // Those are the words we have in our vocabulary:
            //
            double[][][] words = { hello, car, wardrobe };

            // Now, let's associate integer labels with them. This is needed
            // for the case where there are multiple samples for each word.
            //
            int[] labels = { 0, 1, 2 };


            // We will create our classifiers assuming an independent
            // Gaussian distribution for each component in our feature
            // vectors (like assuming a Naive Bayes assumption).

            var initial = new Independent <NormalDistribution>
                          (
                new NormalDistribution(0, 1),
                new NormalDistribution(0, 1),
                new NormalDistribution(0, 1),
                new NormalDistribution(0, 1)
                          );


            // Now, we can proceed and create our classifier.
            //
            int numberOfWords  = 3; // we are trying to distinguish between 3 words
            int numberOfStates = 5; // this value can be found by trial-and-error

            var hmm = new HiddenMarkovClassifier <Independent <NormalDistribution> >
                      (
                classes: numberOfWords,
                topology: new Forward(numberOfStates), // word classifiers should use a forward topology
                initial: initial
                      );

            // Create a new learning algorithm to train the sequence classifier
            var teacher = new HiddenMarkovClassifierLearning <Independent <NormalDistribution> >(hmm,

                                                                                                 // Train each model until the log-likelihood changes less than 0.001
                                                                                                 modelIndex => new BaumWelchLearning <Independent <NormalDistribution> >(hmm.Models[modelIndex])
            {
                Tolerance  = 0.001,
                Iterations = 100,

                // This is necessary so the code doesn't blow up when it realize
                // there is only one sample per word class. But this could also be
                // needed in normal situations as well.
                //
                FittingOptions = new IndependentOptions()
                {
                    InnerOption = new NormalOptions()
                    {
                        Regularization = 1e-5
                    }
                }
            }
                                                                                                 );

            // Finally, we can run the learning algorithm!
            double logLikelihood = teacher.Run(words, labels);

            // At this point, the classifier should be successfully
            // able to distinguish between our three word classes:
            //
            int tc1 = hmm.Compute(hello);
            int tc2 = hmm.Compute(car);
            int tc3 = hmm.Compute(wardrobe);

            Assert.AreEqual(0, tc1);
            Assert.AreEqual(1, tc2);
            Assert.AreEqual(2, tc3);

            // Now, we can use the Markov classifier to initialize a HCRF
            var function = new MarkovMultivariateFunction(hmm);
            var hcrf     = new HiddenConditionalRandomField <double[]>(function);


            // We can check that both are equivalent, although they have
            // formulations that can be learned with different methods
            //
            for (int i = 0; i < words.Length; i++)
            {
                // Should be the same
                int expected = hmm.Compute(words[i]);
                int actual   = hcrf.Compute(words[i]);

                // Should be the same
                double h0 = hmm.LogLikelihood(words[i], 0);
                double c0 = hcrf.LogLikelihood(words[i], 0);

                double h1 = hmm.LogLikelihood(words[i], 1);
                double c1 = hcrf.LogLikelihood(words[i], 1);

                double h2 = hmm.LogLikelihood(words[i], 2);
                double c2 = hcrf.LogLikelihood(words[i], 2);

                Assert.AreEqual(expected, actual);
                Assert.AreEqual(h0, c0, 1e-10);
                Assert.IsTrue(h1.IsRelativelyEqual(c1, 1e-10));
                Assert.IsTrue(h2.IsRelativelyEqual(c2, 1e-10));

                Assert.IsFalse(double.IsNaN(c0));
                Assert.IsFalse(double.IsNaN(c1));
                Assert.IsFalse(double.IsNaN(c2));
            }


            // Now we can learn the HCRF using one of the best learning
            // algorithms available, Resilient Backpropagation learning:

            // Create a learning algorithm
            var rprop = new HiddenResilientGradientLearning <double[]>(hcrf)
            {
                Iterations = 50,
                Tolerance  = 1e-5
            };

            // Run the algorithm and learn the models
            double error = rprop.Run(words, labels);

            // At this point, the HCRF should be successfully
            // able to distinguish between our three word classes:
            //
            int hc1 = hcrf.Compute(hello);
            int hc2 = hcrf.Compute(car);
            int hc3 = hcrf.Compute(wardrobe);

            Assert.AreEqual(0, hc1);
            Assert.AreEqual(1, hc2);
            Assert.AreEqual(2, hc3);
        }
Ejemplo n.º 9
0
        private static void resilientgradienthiddenlearning()
        {
            // Suppose we would like to learn how to classify the
            // following set of sequences among three class labels: 
            int[][] inputSequences =
            {
                // First class of sequences: starts and
                // ends with zeros, ones in the middle:
                new[] { 0, 1, 1, 1, 0 },        
                new[] { 0, 0, 1, 1, 0, 0 },     
                new[] { 0, 1, 1, 1, 1, 0 },     
 
                // Second class of sequences: starts with
                // twos and switches to ones until the end.
                new[] { 2, 2, 2, 2, 1, 1, 1, 1, 1 },
                new[] { 2, 2, 1, 2, 1, 1, 1, 1, 1 },
                new[] { 2, 2, 2, 2, 2, 1, 1, 1, 1 },
 
                // Third class of sequences: can start
                // with any symbols, but ends with three.
                new[] { 0, 0, 1, 1, 3, 3, 3, 3 },
                new[] { 0, 0, 0, 3, 3, 3, 3 },
                new[] { 1, 0, 1, 2, 2, 2, 3, 3 },
                new[] { 1, 1, 2, 3, 3, 3, 3 },
                new[] { 0, 0, 1, 1, 3, 3, 3, 3 },
                new[] { 2, 2, 0, 3, 3, 3, 3 },
                new[] { 1, 0, 1, 2, 3, 3, 3, 3 },
                new[] { 1, 1, 2, 3, 3, 3, 3 },
            };

            // Now consider their respective class labels
            int[] outputLabels =
            {
                /* Sequences  1-3 are from class 0: */ 0, 0, 0,
                /* Sequences  4-6 are from class 1: */ 1, 1, 1,
                /* Sequences 7-14 are from class 2: */ 2, 2, 2, 2, 2, 2, 2, 2
            };


            // Create the Hidden Conditional Random Field using a set of discrete features
            var function = new MarkovDiscreteFunction(states: 3, symbols: 4, outputClasses: 3);
            var classifier = new HiddenConditionalRandomField<int>(function);

            // Create a learning algorithm
            var teacher = new HiddenResilientGradientLearning<int>(classifier)
            {
                Iterations = 50
            };

            // Run the algorithm and learn the models
            teacher.Run(inputSequences, outputLabels);

            int[] answers = inputSequences.Apply(classifier.Compute);

        }
Ejemplo n.º 10
0
        private void btnLearnHCRF_Click(object sender, EventArgs e)
        {
            if (gridSamples.Rows.Count == 0)
            {
                MessageBox.Show("Please load or insert some data first.");
                return;
            }

            var samples = database.Samples;
            var classes = database.Classes;

            double[][][] inputs = new double[samples.Count][][];
            int[] outputs = new int[samples.Count];

            for (int i = 0; i < inputs.Length; i++)
            {
                inputs[i] = samples[i].Input;
                outputs[i] = samples[i].Output;
            }

            int iterations = 100;
            double tolerance = 0.01;


            hcrf = new HiddenConditionalRandomField<double[]>(
                new MarkovMultivariateFunction(hmm));


            // Create the learning algorithm for the ensemble classifier
            var teacher = new HiddenResilientGradientLearning<double[]>(hcrf)
            {
                Iterations = iterations,
                Tolerance = tolerance
            };


            // Run the learning algorithm
            double error = teacher.Run(inputs, outputs);


            foreach (var sample in database.Samples)
            {
                sample.RecognizedAs = hcrf.Compute(sample.Input);
            }

            foreach (DataGridViewRow row in gridSamples.Rows)
            {
                var sample = row.DataBoundItem as Sequence;
                row.DefaultCellStyle.BackColor = (sample.RecognizedAs == sample.Output) ?
                    Color.LightGreen : Color.White;
            }
        }
        public void SimpleGestureRecognitionTest()
        {
            // Let's say we would like to do a very simple mechanism for
            // gesture recognition. In this example, we will be trying to
            // create a classifier that can distinguish between the words
            // "hello", "car", and "wardrobe". 
            
            // Let's say we decided to acquire some data, and we asked some
            // people to perform those words in front of a Kinect camera, and,
            // using Microsoft's SDK, we were able to captured the x and y
            // coordinates of each hand while the word was being performed.

            // Let's say we decided to represent our frames as:
            // 
            //    double[] frame = { leftHandX, leftHandY, rightHandX, rightHandY };
            //
            // Since we captured words, this means we captured sequences of
            // frames as we described above. Let's write some of those as 
            // rough examples to explain how gesture recognition can be done:

            double[][] hello =
            {
                new double[] { 1.0, 0.1, 0.0, 0.0 }, // let's say the word
                new double[] { 0.0, 1.0, 0.1, 0.1 }, // hello took 6 frames
                new double[] { 0.0, 1.0, 0.1, 0.1 }, // to be recorded.
                new double[] { 0.0, 0.0, 1.0, 0.0 },
                new double[] { 0.0, 0.0, 1.0, 0.0 },
                new double[] { 0.0, 0.0, 0.1, 1.1 },
            };

            double[][] car =
            {
                new double[] { 0.0, 0.0, 0.0, 1.0 }, // the car word
                new double[] { 0.1, 0.0, 1.0, 0.1 }, // took only 4.
                new double[] { 0.0, 0.0, 0.1, 0.0 },
                new double[] { 1.0, 0.0, 0.0, 0.0 },
            };

            double[][] wardrobe =
            {
                new double[] { 0.0, 0.0, 1.0, 0.0 }, // same for the
                new double[] { 0.1, 0.0, 1.0, 0.1 }, // wardrobe word.
                new double[] { 0.0, 0.1, 1.0, 0.0 },
                new double[] { 0.1, 0.0, 1.0, 0.1 },
            };

            // Here, please note that a real-world example would involve *lots*
            // of samples for each word. Here, we are considering just one from
            // each class which is clearly sub-optimal and should _never_ be done
            // on practice. For example purposes, however, please disregard this.

            // Those are the words we have in our vocabulary:
            //
            double[][][] words = { hello, car, wardrobe }; 

            // Now, let's associate integer labels with them. This is needed
            // for the case where there are multiple samples for each word.
            //
            int[] labels = { 0, 1, 2 };


            // We will create our classifiers assuming an independent
            // Gaussian distribution for each component in our feature
            // vectors (like assuming a Naive Bayes assumption).

            var initial = new Independent<NormalDistribution>
            (
                new NormalDistribution(0, 1), 
                new NormalDistribution(0, 1), 
                new NormalDistribution(0, 1), 
                new NormalDistribution(0, 1)  
            );


            // Now, we can proceed and create our classifier. 
            //
            int numberOfWords = 3;  // we are trying to distinguish between 3 words
            int numberOfStates = 5; // this value can be found by trial-and-error

            var hmm = new HiddenMarkovClassifier<Independent<NormalDistribution>>
            (
                classes: numberOfWords, 
                topology: new Forward(numberOfStates), // word classifiers should use a forward topology
                initial: initial
            );

            // Create a new learning algorithm to train the sequence classifier
            var teacher = new HiddenMarkovClassifierLearning<Independent<NormalDistribution>>(hmm,

                // Train each model until the log-likelihood changes less than 0.001
                modelIndex => new BaumWelchLearning<Independent<NormalDistribution>>(hmm.Models[modelIndex])
                {
                    Tolerance = 0.001,
                    Iterations = 100,

                    // This is necessary so the code doesn't blow up when it realize
                    // there is only one sample per word class. But this could also be
                    // needed in normal situations as well.
                    //
                    FittingOptions = new IndependentOptions()
                    {
                        InnerOption = new NormalOptions() { Regularization = 1e-5 }
                    }
                }
            );

            // Finally, we can run the learning algorithm!
            double logLikelihood = teacher.Run(words, labels);

            // At this point, the classifier should be successfully 
            // able to distinguish between our three word classes:
            //
            int tc1 = hmm.Compute(hello);
            int tc2 = hmm.Compute(car);
            int tc3 = hmm.Compute(wardrobe);

            Assert.AreEqual(0, tc1);
            Assert.AreEqual(1, tc2);
            Assert.AreEqual(2, tc3);

            // Now, we can use the Markov classifier to initialize a HCRF
            var function = new MarkovMultivariateFunction(hmm);
            var hcrf = new HiddenConditionalRandomField<double[]>(function);


            // We can check that both are equivalent, although they have
            // formulations that can be learned with different methods
            //
            for (int i = 0; i < words.Length; i++)
            {
                // Should be the same
                int expected = hmm.Compute(words[i]);
                int actual = hcrf.Compute(words[i]);

                // Should be the same
                double h0 = hmm.LogLikelihood(words[i], 0);
                double c0 = hcrf.LogLikelihood(words[i], 0);

                double h1 = hmm.LogLikelihood(words[i], 1);
                double c1 = hcrf.LogLikelihood(words[i], 1);

                double h2 = hmm.LogLikelihood(words[i], 2);
                double c2 = hcrf.LogLikelihood(words[i], 2);

                Assert.AreEqual(expected, actual);
                Assert.AreEqual(h0, c0, 1e-10);
                Assert.IsTrue(h1.IsRelativelyEqual(c1, 1e-10));
                Assert.IsTrue(h2.IsRelativelyEqual(c2, 1e-10));

                Assert.IsFalse(double.IsNaN(c0));
                Assert.IsFalse(double.IsNaN(c1));
                Assert.IsFalse(double.IsNaN(c2));
            }


            // Now we can learn the HCRF using one of the best learning
            // algorithms available, Resilient Backpropagation learning:

            // Create a learning algorithm
            var rprop = new HiddenResilientGradientLearning<double[]>(hcrf)
            {
                Iterations = 50,
                Tolerance = 1e-5
            };

            // Run the algorithm and learn the models
            double error = rprop.Run(words, labels);

            // At this point, the HCRF should be successfully 
            // able to distinguish between our three word classes:
            //
            int hc1 = hcrf.Compute(hello);
            int hc2 = hcrf.Compute(car);
            int hc3 = hcrf.Compute(wardrobe);

            Assert.AreEqual(0, hc1);
            Assert.AreEqual(1, hc2);
            Assert.AreEqual(2, hc3);
        }
Ejemplo n.º 12
0
        //[Ignore("Intensive")]
#endif
        public void learn_pendigits_normalization()
        {
            #region doc_learn_pendigits
            // Ensure we get reproducible results
            Accord.Math.Random.Generator.Seed = 0;

            // Download the PENDIGITS dataset from UCI ML repository
            var pendigits = new Pendigits(path: Path.GetTempPath());

            // Get and pre-process the training set
            double[][][] trainInputs  = pendigits.Training.Item1;
            int[]        trainOutputs = pendigits.Training.Item2;

            // Pre-process the digits so each of them is centered and scaled
            trainInputs = trainInputs.Apply(Accord.Statistics.Tools.ZScores);
            trainInputs = trainInputs.Apply((x) => x.Subtract(x.Min())); // make them positive

            // Create some prior distributions to help initialize our parameters
            var priorC = new WishartDistribution(dimension: 2, degreesOfFreedom: 5);
            var priorM = new MultivariateNormalDistribution(dimension: 2);

            // Create a template Markov classifier that we can use as a base for the HCRF
            var hmmc = new HiddenMarkovClassifier <MultivariateNormalDistribution, double[]>(
                classes: pendigits.NumberOfClasses, topology: new Forward(5),
                initial: (i, j) => new MultivariateNormalDistribution(mean: priorM.Generate(), covariance: priorC.Generate()));

            // Create a new learning algorithm for creating HCRFs
            var teacher = new HiddenResilientGradientLearning <double[]>()
            {
                Function = new MarkovMultivariateFunction(hmmc),

                MaxIterations = 10
            };

            // The following line is only needed to ensure reproducible results. Please remove it to enable full parallelization
            teacher.ParallelOptions.MaxDegreeOfParallelism = 1; // (Remove, comment, or change this line to enable full parallelism)

            // Use the learning algorithm to create a classifier
            var hcrf = teacher.Learn(trainInputs, trainOutputs);

            // Compute predictions for the training set
            int[] trainPredicted = hcrf.Decide(trainInputs);

            // Check the performance of the classifier by comparing with the ground-truth:
            var    m1       = new ConfusionMatrix(predicted: trainPredicted, expected: trainOutputs);
            double trainAcc = m1.Accuracy; // should be 0.83476272155517439


            // Prepare the testing set
            double[][][] testInputs  = pendigits.Testing.Item1;
            int[]        testOutputs = pendigits.Testing.Item2;

            // Apply the same normalizations
            testInputs = testInputs.Apply(Accord.Statistics.Tools.ZScores);
            testInputs = testInputs.Apply((x) => x.Subtract(x.Min())); // make them positive

            // Compute predictions for the test set
            int[] testPredicted = hcrf.Decide(testInputs);

            // Check the performance of the classifier by comparing with the ground-truth:
            var    m2      = new ConfusionMatrix(predicted: testPredicted, expected: testOutputs);
            double testAcc = m2.Accuracy; // should be 0.81932212436615959
            #endregion
#if NET35
            Assert.AreEqual(0.89594053744997137d, trainAcc, 1e-5);
            Assert.AreEqual(0.89605017347211102d, testAcc, 1e-5);
#else
            Assert.IsTrue(trainAcc.IsEqual(0.83476272155517439d, 1e-5) || trainAcc.IsEqual(0.85162950257289882d, 1e-5));
            Assert.IsTrue(testAcc.IsEqual(0.86028823058446757d, 1e-5) || testAcc.IsEqual(0.81932212436615959, 1e-5));
#endif
        }
        [Ignore("Intensive")] // reproducible parallelization of this test requires #870
        public void learn_pendigits_normalization()
        {
            Console.WriteLine("Starting ResilientGradientHiddenLearningTest.learn_pendigits_normalization");
            string localDownloadPath = Path.Combine(NUnit.Framework.TestContext.CurrentContext.TestDirectory, "pendigits3");

            using (var travis = new KeepTravisAlive())
            {
                #region doc_learn_pendigits
                // Ensure we get reproducible results
                Accord.Math.Random.Generator.Seed = 0;

                // Download the PENDIGITS dataset from UCI ML repository
                var pendigits = new Pendigits(path: localDownloadPath);

                // Get and pre-process the training set
                double[][][] trainInputs  = pendigits.Training.Item1;
                int[]        trainOutputs = pendigits.Training.Item2;

                // Pre-process the digits so each of them is centered and scaled
                trainInputs = trainInputs.Apply(Accord.Statistics.Tools.ZScores);
                trainInputs = trainInputs.Apply((x) => x.Subtract(x.Min())); // make them positive

                // Create some prior distributions to help initialize our parameters
                var priorC = new WishartDistribution(dimension: 2, degreesOfFreedom: 5);
                var priorM = new MultivariateNormalDistribution(dimension: 2);

                // Create a new learning algorithm for creating continuous hidden Markov model classifiers
                var teacher1 = new HiddenMarkovClassifierLearning <MultivariateNormalDistribution, double[]>()
                {
                    // This tells the generative algorithm how to train each of the component models. Note: The learning
                    // algorithm is more efficient if all generic parameters are specified, including the fitting options
                    Learner = (i) => new BaumWelchLearning <MultivariateNormalDistribution, double[], NormalOptions>()
                    {
                        Topology = new Forward(5), // Each model will have a forward topology with 5 states

                        // Their emissions will be multivariate Normal distributions initialized using the prior distributions
                        Emissions = (j) => new MultivariateNormalDistribution(mean: priorM.Generate(), covariance: priorC.Generate()),

                        // We will train until the relative change in the average log-likelihood is less than 1e-6 between iterations
                        Tolerance     = 1e-6,
                        MaxIterations = 1000, // or until we perform 1000 iterations (which is unlikely for this dataset)

                        // We will prevent our covariance matrices from becoming degenerate by adding a small
                        // regularization value to their diagonal until they become positive-definite again:
                        FittingOptions = new NormalOptions()
                        {
                            Regularization = 1e-6
                        }
                    }
                };

                //// The following line is only needed to ensure reproducible results. Please remove it to enable full parallelization
                //teacher1.ParallelOptions.MaxDegreeOfParallelism = 1; // (Remove, comment, or change this line to enable full parallelism)

                // Use the learning algorithm to create a classifier
                var hmmc = teacher1.Learn(trainInputs, trainOutputs);

                // Create a new learning algorithm for creating HCRFs
                var teacher2 = new HiddenResilientGradientLearning <double[]>()
                {
                    Function = new MarkovMultivariateFunction(hmmc),

                    MaxIterations = 10
                };

                //// The following line is only needed to ensure reproducible results. Please remove it to enable full parallelization
                //teacher2.ParallelOptions.MaxDegreeOfParallelism = 1; // (Remove, comment, or change this line to enable full parallelism)

                // Use the learning algorithm to create a classifier
                var hcrf = teacher2.Learn(trainInputs, trainOutputs);

                // Compute predictions for the training set
                int[] trainPredicted = hcrf.Decide(trainInputs);

                // Check the performance of the classifier by comparing with the ground-truth:
                var    m1       = new GeneralConfusionMatrix(predicted: trainPredicted, expected: trainOutputs);
                double trainAcc = m1.Accuracy; // should be 0.81532304173813608


                // Prepare the testing set
                double[][][] testInputs  = pendigits.Testing.Item1;
                int[]        testOutputs = pendigits.Testing.Item2;

                // Apply the same normalizations
                testInputs = testInputs.Apply(Accord.Statistics.Tools.ZScores);
                testInputs = testInputs.Apply((x) => x.Subtract(x.Min())); // make them positive

                // Compute predictions for the test set
                int[] testPredicted = hcrf.Decide(testInputs);

                // Check the performance of the classifier by comparing with the ground-truth:
                var    m2      = new GeneralConfusionMatrix(predicted: testPredicted, expected: testOutputs);
                double testAcc = m2.Accuracy; // should be 0.77061649319455561
                #endregion

                var loss = new Accord.Math.Optimization.Losses.ZeroOneLoss(testOutputs).Loss(testPredicted);
                Assert.AreEqual(1.0 - loss, m2.Accuracy);

                Assert.AreEqual(10, m1.Classes);
                Assert.AreEqual(10, m2.Classes);

#if NET35
                Assert.AreEqual(0.89594053744997137d, trainAcc, 1e-5);
                Assert.AreEqual(0.89605017347211102d, testAcc, 1e-5);
#else
                Assert.IsTrue(trainAcc.IsEqual(0.81532304173813608, 1e-5) || trainAcc.IsEqual(0.81532304173813608, 1e-5));
                Assert.IsTrue(testAcc.IsEqual(0.77061649319455561, 1e-5) || testAcc.IsEqual(0.77061649319455561, 1e-5));
#endif
            }
        }
        public void ComputeTest2()
        {
            // Suppose we would like to learn how to classify the
            // following set of sequences among three class labels: 

            int[][] inputSequences =
            {
                // First class of sequences: starts and
                // ends with zeros, ones in the middle:
                new[] { 0, 1, 1, 1, 0 },        
                new[] { 0, 0, 1, 1, 0, 0 },     
                new[] { 0, 1, 1, 1, 1, 0 },     
 
                // Second class of sequences: starts with
                // twos and switches to ones until the end.
                new[] { 2, 2, 2, 2, 1, 1, 1, 1, 1 },
                new[] { 2, 2, 1, 2, 1, 1, 1, 1, 1 },
                new[] { 2, 2, 2, 2, 2, 1, 1, 1, 1 },
 
                // Third class of sequences: can start
                // with any symbols, but ends with three.
                new[] { 0, 0, 1, 1, 3, 3, 3, 3 },
                new[] { 0, 0, 0, 3, 3, 3, 3 },
                new[] { 1, 0, 1, 2, 2, 2, 3, 3 },
                new[] { 1, 1, 2, 3, 3, 3, 3 },
                new[] { 0, 0, 1, 1, 3, 3, 3, 3 },
                new[] { 2, 2, 0, 3, 3, 3, 3 },
                new[] { 1, 0, 1, 2, 3, 3, 3, 3 },
                new[] { 1, 1, 2, 3, 3, 3, 3 },
            };

            // Now consider their respective class labels
            int[] outputLabels =
            {
                /* Sequences  1-3 are from class 0: */ 0, 0, 0,
                /* Sequences  4-6 are from class 1: */ 1, 1, 1,
                /* Sequences 7-14 are from class 2: */ 2, 2, 2, 2, 2, 2, 2, 2
            };


            // Create the Hidden Conditional Random Field using a set of discrete features
            var function = new MarkovDiscreteFunction(states: 3, symbols: 4, outputClasses: 3);
            var classifier = new HiddenConditionalRandomField<int>(function);

            // Create a learning algorithm
            var teacher = new HiddenResilientGradientLearning<int>(classifier)
            {
                Iterations = 50
            };

            // Run the algorithm and learn the models
            teacher.Run(inputSequences, outputLabels);


            // After training has finished, we can check the 
            // output classification label for some sequences. 

            int y1 = classifier.Compute(new[] { 0, 1, 1, 1, 0 });    // output is y1 = 0
            int y2 = classifier.Compute(new[] { 0, 0, 1, 1, 0, 0 }); // output is y1 = 0

            int y3 = classifier.Compute(new[] { 2, 2, 2, 2, 1, 1 }); // output is y2 = 1
            int y4 = classifier.Compute(new[] { 2, 2, 1, 1 });       // output is y2 = 1

            int y5 = classifier.Compute(new[] { 0, 0, 1, 3, 3, 3 }); // output is y3 = 2
            int y6 = classifier.Compute(new[] { 2, 0, 2, 2, 3, 3 }); // output is y3 = 2

            Assert.AreEqual(0, y1);
            Assert.AreEqual(0, y2);
            Assert.AreEqual(1, y3);
            Assert.AreEqual(1, y4);
            Assert.AreEqual(2, y5);
            Assert.AreEqual(2, y6);
        }
        public void RunTest2()
        {
            var inputs = QuasiNewtonHiddenLearningTest.inputs;
            var outputs = QuasiNewtonHiddenLearningTest.outputs;


            Accord.Math.Tools.SetupGenerator(0);

            var function = new MarkovDiscreteFunction(2, 2, 2);

            var model = new HiddenConditionalRandomField<int>(function);
            var target = new HiddenResilientGradientLearning<int>(model);

            double[] actual = new double[inputs.Length];
            double[] expected = new double[inputs.Length];

            for (int i = 0; i < inputs.Length; i++)
            {
                actual[i] = model.Compute(inputs[i]);
                expected[i] = outputs[i];
            }


            double ll0 = model.LogLikelihood(inputs, outputs);

            double error = Double.PositiveInfinity;
            for (int i = 0; i < 50; i++)
            {
                error = target.RunEpoch(inputs, outputs);
            }

            double ll1 = model.LogLikelihood(inputs, outputs);

            for (int i = 0; i < inputs.Length; i++)
            {
                actual[i] = model.Compute(inputs[i]);
                expected[i] = outputs[i];
            }


            Assert.AreEqual(-5.5451774444795623, ll0, 1e-10);
            Assert.AreEqual(0, error, 1e-10);
            Assert.IsFalse(double.IsNaN(error));

            for (int i = 0; i < inputs.Length; i++)
                Assert.AreEqual(expected[i], actual[i]);

            Assert.IsTrue(ll1 > ll0);
        }
        static void runHiddenConditionalRandomFieldLearningExample()
        {
            // Observation sequences should only contain symbols that are greater than or equal to 0, and lesser than the number of symbols.
            int[][] observationSequences =
            {
                // First class of sequences: starts and ends with zeros, ones in the middle.
                new[] { 0, 1, 1, 1, 0 },
                new[] { 0, 0, 1, 1,0, 0 },
                new[] { 0, 1, 1, 1,1, 0 },

                // Second class of sequences: starts with twos and switches to ones until the end.
                new[] { 2, 2, 2, 2,1, 1, 1, 1, 1 },
                new[] { 2, 2, 1, 2,1, 1, 1, 1, 1 },
                new[] { 2, 2, 2, 2,2, 1, 1, 1, 1 },

                // Third class of sequences: can start with any symbols, but ends with three.
                new[] { 0, 0, 1, 1,3, 3, 3, 3 },
                new[] { 0, 0, 0, 3,3, 3, 3 },
                new[] { 1, 0, 1, 2,2, 2, 3, 3 },
                new[] { 1, 1, 2, 3,3, 3, 3 },
                new[] { 0, 0, 1, 1,3, 3, 3, 3 },
                new[] { 2, 2, 0, 3,3, 3, 3 },
                new[] { 1, 0, 1, 2,3, 3, 3, 3 },
                new[] { 1, 1, 2, 3,3, 3, 3 },
            };

            // Consider their respective class labels.
            // Class labels have to be zero-based and successive integers.
            int[] classLabels =
            {
                0, 0, 0,               // Sequences 1-3 are from class 0.
                1, 1, 1,               // Sequences 4-6 are from class 1.
                2, 2, 2, 2, 2, 2, 2, 2 // Sequences 7-14 are from class 2.
            };

            // Create the Hidden Conditional Random Field using a set of discrete features.
            var function = new MarkovDiscreteFunction(states: 3, symbols: 4, outputClasses: 3);
            var hcrf     = new HiddenConditionalRandomField <int>(function);

            // Create a learning algorithm.
            var trainer = new HiddenResilientGradientLearning <int>(hcrf)
            {
                Iterations = 50
            };

            // Run the algorithm and learn the models.
            double error = trainer.Run(observationSequences, classLabels);

            Console.WriteLine("the error in the last iteration = {0}", error);

            // Check the output classificaton label for some sequences.
            int y1 = hcrf.Compute(new[] { 0, 1, 1, 1, 0 });  // output is y1 = 0.

            Console.WriteLine("output class = {0}", y1);
            int y2 = hcrf.Compute(new[] { 0, 0, 1, 1, 0, 0 });  // output is y2 = 0.

            Console.WriteLine("output class = {0}", y2);

            int y3 = hcrf.Compute(new[] { 2, 2, 2, 2, 1, 1 });  // output is y3 = 1.

            Console.WriteLine("output class = {0}", y3);
            int y4 = hcrf.Compute(new[] { 2, 2, 1, 1 });  // output is y4 = 1.

            Console.WriteLine("output class = {0}", y4);

            int y5 = hcrf.Compute(new[] { 0, 0, 1, 3, 3, 3 });  // output is y5 = 2.

            Console.WriteLine("output class = {0}", y5);
            int y6 = hcrf.Compute(new[] { 2, 0, 2, 2, 3, 3 });  // output is y6 = 2.

            Console.WriteLine("output class = {0}", y6);
        }
Ejemplo n.º 17
0
        public void ComputeTest2()
        {
            // Suppose we would like to learn how to classify the
            // following set of sequences among three class labels:

            int[][] inputSequences =
            {
                // First class of sequences: starts and
                // ends with zeros, ones in the middle:
                new[] { 0, 1, 1, 1, 0 },
                new[] { 0, 0, 1, 1,0, 0 },
                new[] { 0, 1, 1, 1,1, 0 },

                // Second class of sequences: starts with
                // twos and switches to ones until the end.
                new[] { 2, 2, 2, 2,1, 1, 1, 1, 1 },
                new[] { 2, 2, 1, 2,1, 1, 1, 1, 1 },
                new[] { 2, 2, 2, 2,2, 1, 1, 1, 1 },

                // Third class of sequences: can start
                // with any symbols, but ends with three.
                new[] { 0, 0, 1, 1,3, 3, 3, 3 },
                new[] { 0, 0, 0, 3,3, 3, 3 },
                new[] { 1, 0, 1, 2,2, 2, 3, 3 },
                new[] { 1, 1, 2, 3,3, 3, 3 },
                new[] { 0, 0, 1, 1,3, 3, 3, 3 },
                new[] { 2, 2, 0, 3,3, 3, 3 },
                new[] { 1, 0, 1, 2,3, 3, 3, 3 },
                new[] { 1, 1, 2, 3,3, 3, 3 },
            };

            // Now consider their respective class labels
            int[] outputLabels =
            {
                /* Sequences  1-3 are from class 0: */ 0, 0, 0,
                /* Sequences  4-6 are from class 1: */ 1, 1, 1,
                /* Sequences 7-14 are from class 2: */ 2, 2, 2, 2, 2, 2, 2, 2
            };


            // Create the Hidden Conditional Random Field using a set of discrete features
            var function   = new MarkovDiscreteFunction(states: 3, symbols: 4, outputClasses: 3);
            var classifier = new HiddenConditionalRandomField <int>(function);

            // Create a learning algorithm
            var teacher = new HiddenResilientGradientLearning <int>(classifier)
            {
                Iterations = 50
            };

            // Run the algorithm and learn the models
            teacher.Run(inputSequences, outputLabels);


            // After training has finished, we can check the
            // output classification label for some sequences.

            int y1 = classifier.Compute(new[] { 0, 1, 1, 1, 0 });    // output is y1 = 0
            int y2 = classifier.Compute(new[] { 0, 0, 1, 1, 0, 0 }); // output is y1 = 0

            int y3 = classifier.Compute(new[] { 2, 2, 2, 2, 1, 1 }); // output is y2 = 1
            int y4 = classifier.Compute(new[] { 2, 2, 1, 1 });       // output is y2 = 1

            int y5 = classifier.Compute(new[] { 0, 0, 1, 3, 3, 3 }); // output is y3 = 2
            int y6 = classifier.Compute(new[] { 2, 0, 2, 2, 3, 3 }); // output is y3 = 2

            Assert.AreEqual(0, y1);
            Assert.AreEqual(0, y2);
            Assert.AreEqual(1, y3);
            Assert.AreEqual(1, y4);
            Assert.AreEqual(2, y5);
            Assert.AreEqual(2, y6);
        }
        static void runHiddenConditionalRandomFieldLearningExample()
        {
            // Observation sequences should only contain symbols that are greater than or equal to 0, and lesser than the number of symbols.
            int[][] observationSequences =
            {
                // First class of sequences: starts and ends with zeros, ones in the middle.
                new[] { 0, 1, 1, 1, 0 },
                new[] { 0, 0, 1, 1, 0, 0 },
                new[] { 0, 1, 1, 1, 1, 0 },

                // Second class of sequences: starts with twos and switches to ones until the end.
                new[] { 2, 2, 2, 2, 1, 1, 1, 1, 1 },
                new[] { 2, 2, 1, 2, 1, 1, 1, 1, 1 },
                new[] { 2, 2, 2, 2, 2, 1, 1, 1, 1 },

                // Third class of sequences: can start with any symbols, but ends with three.
                new[] { 0, 0, 1, 1, 3, 3, 3, 3 },
                new[] { 0, 0, 0, 3, 3, 3, 3 },
                new[] { 1, 0, 1, 2, 2, 2, 3, 3 },
                new[] { 1, 1, 2, 3, 3, 3, 3 },
                new[] { 0, 0, 1, 1, 3, 3, 3, 3 },
                new[] { 2, 2, 0, 3, 3, 3, 3 },
                new[] { 1, 0, 1, 2, 3, 3, 3, 3 },
                new[] { 1, 1, 2, 3, 3, 3, 3 },
            };

            // Consider their respective class labels.
            // Class labels have to be zero-based and successive integers.
            int[] classLabels =
            {
                0, 0, 0,  // Sequences 1-3 are from class 0.
                1, 1, 1,  // Sequences 4-6 are from class 1.
                2, 2, 2, 2, 2, 2, 2, 2  // Sequences 7-14 are from class 2.
            };

            // Create the Hidden Conditional Random Field using a set of discrete features.
            var function = new MarkovDiscreteFunction(states: 3, symbols: 4, outputClasses: 3);
            var hcrf = new HiddenConditionalRandomField<int>(function);

            // Create a learning algorithm.
            var trainer = new HiddenResilientGradientLearning<int>(hcrf)
            {
                Iterations = 50
            };

            // Run the algorithm and learn the models.
            double error = trainer.Run(observationSequences, classLabels);
            Console.WriteLine("the error in the last iteration = {0}", error);

            // Check the output classificaton label for some sequences.
            int y1 = hcrf.Compute(new[] { 0, 1, 1, 1, 0 });  // output is y1 = 0.
            Console.WriteLine("output class = {0}", y1);
            int y2 = hcrf.Compute(new[] { 0, 0, 1, 1, 0, 0 });  // output is y2 = 0.
            Console.WriteLine("output class = {0}", y2);

            int y3 = hcrf.Compute(new[] { 2, 2, 2, 2, 1, 1 });  // output is y3 = 1.
            Console.WriteLine("output class = {0}", y3);
            int y4 = hcrf.Compute(new[] { 2, 2, 1, 1 });  // output is y4 = 1.
            Console.WriteLine("output class = {0}", y4);

            int y5 = hcrf.Compute(new[] { 0, 0, 1, 3, 3, 3 });  // output is y5 = 2.
            Console.WriteLine("output class = {0}", y5);
            int y6 = hcrf.Compute(new[] { 2, 0, 2, 2, 3, 3 });  // output is y6 = 2.
            Console.WriteLine("output class = {0}", y6);
        }