예제 #1
0
        public void ComputeTest()
        {
            HiddenMarkovModel hmm = DiscreteHiddenMarkovModelFunctionTest.CreateModel2();

            int states = hmm.States;


            var    function = new MarkovDiscreteFunction(hmm);
            var    target = new ConditionalRandomField <int>(states, function);
            double p1, p2;

            int[] observations, expected, actual;

            observations = new int[] { 0, 0, 1, 1, 1, 2 };
            expected     = hmm.Decode(observations, out p1);
            actual       = target.Compute(observations, out p2);

            Assert.IsTrue(expected.IsEqual(actual));
            Assert.AreEqual(p1, p2, 1e-6);


            observations = new int[] { 0, 1, 2, 2, 2 };
            expected     = hmm.Decode(observations, out p1);
            actual       = target.Compute(observations, out p2);

            Assert.IsTrue(expected.IsEqual(actual));
            Assert.AreEqual(p1, p2, 1e-6);
        }
예제 #2
0
        public void LikelihoodTest()
        {
            HiddenMarkovModel hmm = DiscreteHiddenMarkovModelFunctionTest.CreateModel2();

            int states  = hmm.States;
            int symbols = hmm.Symbols;


            var function1 = new MarkovDiscreteFunction(hmm);
            var target1   = new ConditionalRandomField <int>(states, function1);

            var function2 = new MarkovDiscreteFunction(states, symbols);
            var target2   = new ConditionalRandomField <int>(states, function2);


            int[] observations;

            double a, b, la, lb;

            observations = new int[] { 0, 0, 1, 1, 1, 2 };
            a            = target1.LogLikelihood(observations, observations);
            b            = target2.LogLikelihood(observations, observations);
            Assert.IsTrue(a > b);

            observations = new int[] { 0, 0, 1, 1, 1, 2 };
            la           = target1.LogLikelihood(observations, observations);
            lb           = target2.LogLikelihood(observations, observations);
            Assert.IsTrue(la > lb);

            double lla = System.Math.Log(a);
            double llb = System.Math.Log(b);

            Assert.AreEqual(lla, la, 1e-6);
            Assert.AreEqual(llb, lb, 1e-6);
        }
예제 #3
0
        public void LikelihoodTest()
        {
            var hmm = DiscreteHiddenMarkovModelFunctionTest.CreateModel2();

            int states  = hmm.States;
            int symbols = hmm.Symbols;

            var hcrf = new ConditionalRandomField <int>(states,
                                                        new MarkovDiscreteFunction(hmm));

            var hmm0  = new HiddenMarkovModel(states, symbols);
            var hcrf0 = new ConditionalRandomField <int>(states,
                                                         new MarkovDiscreteFunction(hmm0));


            int[]  observations = new int[] { 0, 0, 1, 1, 1, 2 };
            double la           = hcrf.LogLikelihood(observations, observations);
            double lb           = hcrf0.LogLikelihood(observations, observations);

            Assert.IsTrue(la > lb);

            double lc = hmm.Evaluate(observations, observations);
            double ld = hmm0.Evaluate(observations, observations);

            Assert.IsTrue(lc > ld);

            double za = hcrf.LogPartition(observations);
            double zb = hcrf0.LogPartition(observations);

            la += za;
            lb += zb;

            Assert.AreEqual(la, lc, 1e-6);
            Assert.AreEqual(lb, ld, 1e-6);
        }
예제 #4
0
        public void RunTest()
        {
            Accord.Math.Random.Generator.Seed = 0;

            int nstates = 3;
            int symbols = 3;

            int[][] sequences = new int[][]
            {
                new int[] { 0, 1, 1, 1, 2 },
                new int[] { 0, 1, 1, 1, 2, 2, 2 },
                new int[] { 0, 0, 1, 1, 2, 2 },
                new int[] { 0, 1, 1, 1, 2, 2, 2 },
                new int[] { 0, 1, 1, 1, 2, 2 },
                new int[] { 0, 1, 1, 2, 2 },
                new int[] { 0, 0, 1, 1, 1, 2, 2 },
                new int[] { 0, 0, 0, 1, 1, 1, 2, 2 },
                new int[] { 0, 1, 1, 2, 2, 2 },
            };


            var function = new MarkovDiscreteFunction(nstates, symbols, new NormalDistribution());
            var model    = new ConditionalRandomField <int>(nstates, function);


            for (int i = 0; i < sequences.Length; i++)
            {
                double p;
                int[]  s = sequences[i];
                int[]  r = model.Compute(s, out p);
                Assert.IsFalse(s.IsEqual(r));
            }

            var target = new QuasiNewtonLearning <int>(model);

            target.ParallelOptions.MaxDegreeOfParallelism = 1;

            int[][] labels       = sequences;
            int[][] observations = sequences;

            double ll0 = model.LogLikelihood(observations, labels);

            double actual = target.Run(observations, labels);

            double ll1 = model.LogLikelihood(observations, labels);

            Assert.IsTrue(ll1 > ll0);


            Assert.AreEqual(-0.0010766857305242183, actual, 1e-6);

            for (int i = 0; i < sequences.Length; i++)
            {
                double p;
                int[]  s = sequences[i];
                int[]  r = model.Compute(s, out p);
                Assert.IsTrue(s.IsEqual(r));
            }
        }
        public void RunTest()
        {
            int nstates = 3;
            int symbols = 3;

            int[][] sequences = new int[][]
            {
                new int[] { 0, 1, 1, 1, 2 },
                new int[] { 0, 1, 1, 1, 2, 2, 2 },
                new int[] { 0, 0, 1, 1, 2, 2 },
                new int[] { 0, 1, 1, 1, 2, 2, 2 },
                new int[] { 0, 1, 1, 1, 2, 2 },
                new int[] { 0, 1, 1, 2, 2 },
                new int[] { 0, 0, 1, 1, 1, 2, 2 },
                new int[] { 0, 0, 0, 1, 1, 1, 2, 2 },
                new int[] { 0, 1, 1, 2, 2, 2 },
            };


            var function = new MarkovDiscreteFunction(nstates, symbols);
            var model    = new ConditionalRandomField <int>(nstates, function);


            for (int i = 0; i < sequences.Length; i++)
            {
                double p;
                int[]  s = sequences[i];
                int[]  r = model.Compute(s, out p);
                Assert.IsFalse(s.IsEqual(r));
            }

            var target = new QuasiNewtonLearning <int>(model);

            int[][] labels       = sequences;
            int[][] observations = sequences;

            double ll0 = model.LogLikelihood(observations, labels);

            double actual = target.Run(observations, labels);

            double ll1 = model.LogLikelihood(observations, labels);

            Assert.IsTrue(ll1 > ll0);


            Assert.AreEqual(0, actual, 1e-8);

            for (int i = 0; i < sequences.Length; i++)
            {
                double p;
                int[]  s = sequences[i];
                int[]  r = model.Compute(s, out p);
                Assert.IsTrue(s.IsEqual(r));
            }
        }
예제 #6
0
        public void ConditionalRandomFieldConstructorTest()
        {
            HiddenMarkovModel hmm = DiscreteHiddenMarkovModelFunctionTest.CreateModel1();

            int states   = 2;
            var function = new MarkovDiscreteFunction(hmm);
            var target   = new ConditionalRandomField <int>(states, function);


            Assert.AreEqual(function, target.Function);
            Assert.AreEqual(2, target.States);
        }
예제 #7
0
        /// <summary>
        ///   Constructs a new L-BFGS learning algorithm.
        /// </summary>
        ///
        public QuasiNewtonLearning(ConditionalRandomField <T> model)
        {
            this.model = model;
            this.lbfgs = new BoundedBroydenFletcherGoldfarbShanno(model.Function.Weights.Length);
            this.lbfgs.FunctionTolerance = 1e-3;

            for (int i = 0; i < lbfgs.UpperBounds.Length; i++)
            {
                lbfgs.UpperBounds[i] = 1e10;
                lbfgs.LowerBounds[i] = -1e100;
            }
        }
예제 #8
0
        public void learn_test()
        {
            #region doc_learn
            Accord.Math.Random.Generator.Seed = 0;

            int[][] input =
            {
                new int[] { 0, 1, 1, 1, 0, 0 },
                new int[] { 0, 1, 1,0 },
                new int[] { 0, 1, 1, 0, 0, 0 },
                new int[] { 0, 1, 1, 1, 1, 0 },
                new int[] { 0, 1, 1, 1, 0,0, 0, 0 },
                new int[] { 0, 1, 1, 1, 0, 0 },
                new int[] { 0, 1, 1,0 },
                new int[] { 0, 1, 1, 1,0 },
            };

            int[][] output =
            {
                new int[] { 0, 0, 1, 1, 1, 2 },
                new int[] { 0, 0, 1,2 },
                new int[] { 0, 0, 1, 1, 1, 2 },
                new int[] { 0, 0, 1, 1, 1, 2 },
                new int[] { 0, 0, 1, 1, 1,1, 2, 2 },
                new int[] { 0, 0, 1, 1, 1, 2 },
                new int[] { 0, 0, 1,2 },
                new int[] { 0, 1, 1, 1,2 },
            };

            // Create a new L-BFGS learning algorithm
            var teacher = new QuasiNewtonLearning <int>()
            {
                Function = new MarkovDiscreteFunction(states: 3, symbols: 2, initialization: new NormalDistribution())
            };

            // Learn the Conditional Random Field model
            ConditionalRandomField <int> crf = teacher.Learn(input, output);

            // Use the model to classify the samples
            int[][] prediction = crf.Decide(input);

            var    cm  = new ConfusionMatrix(predicted: prediction.Reshape(), expected: output.Reshape());
            double acc = cm.Accuracy;
            #endregion

            Assert.IsTrue(acc > 0.7);
        }
예제 #9
0
        /// <summary>
        /// Learns a model that can map the given inputs to the given outputs.
        /// </summary>
        /// <param name="x">The model inputs.</param>
        /// <param name="y">The desired outputs associated with each <paramref name="x">inputs</paramref>.</param>
        /// <param name="weights">The weight of importance for each input-output pair (if supported by the learning algorithm).</param>
        /// <returns>
        /// A model that has learned how to produce <paramref name="y" /> given <paramref name="x" />.
        /// </returns>
        public ConditionalRandomField <T> Learn(T[][] x, int[][] y, double[] weights = null)
        {
            if (weights != null)
            {
                throw new ArgumentException(Accord.Properties.Resources.NotSupportedWeights, "weights");
            }

            if (Model == null)
            {
                Model = new ConditionalRandomField <T>(states: y.Max() + 1, function: Function);
                init();
            }

            run(x, y);

            return(Model);
        }
예제 #10
0
        public void chunking_dataset_crf()
        {
            Chunking chunking = new Chunking(path: Path.GetTempPath());

            // Learn a mapping between each word to an integer class label:
            var wordMap = new Codification().Learn(chunking.Words);

            // Learn a mapping between each tag to an integer class labels:
            var tagMap = new Codification().Learn(chunking.Tags);

            // Convert the training and testing sets into integer labels:
            int[][] trainX = wordMap.Transform(chunking.Training.Item1);
            int[][] testX  = wordMap.Transform(chunking.Testing.Item1);

            // Convert the training and testing tags into integer labels:
            int[][] trainY = tagMap.Transform(chunking.Training.Item2);
            int[][] testY  = tagMap.Transform(chunking.Testing.Item2);


            int numberOfClasses = chunking.Tags.Length;
            int numberOfSymbols = chunking.Words.Length;

            // Learn one Markov model using the training data
            var teacher = new QuasiNewtonLearning <int>()
            {
                Function = new MarkovDiscreteFunction(states: numberOfClasses, symbols: numberOfSymbols)
            };

            // Use the teacher to learn a Conditional Random Field model
            ConditionalRandomField <int> crf = teacher.Learn(trainX, trainY);

            // Use the crf to predict instances:
            int[][] predY = crf.Decide(testX);

            // Check the accuracy of the model:
            var cm = new ConfusionMatrix(
                predicted: predY.Concatenate(),
                expected: testY.Concatenate());

            double acc = cm.Accuracy;

            Assert.AreEqual(0.99983114169322662, acc, 1e-10);
        }
예제 #11
0
        private static void crf(int[][] trainX, int[][] trainY, int[][] testX, int[][] testY)
        {
            int numberOfClasses = 44;    // chunking.Tags.Length;
            int numberOfSymbols = 21589; // chunking.Words.Length;

            // Learn one Markov model using the training data
            var teacher = new QuasiNewtonLearning <int>()
            {
                Function = new MarkovDiscreteFunction(states: numberOfClasses, symbols: numberOfSymbols)
            };

            // Use the teacher to learn a Conditional Random Field model
            ConditionalRandomField <int> crf = teacher.Learn(trainX, trainY);

            // Use the crf to predict instances:
            int[][] predY = crf.Decide(testX);

            // Check the accuracy of the model:
            var cm = new ConfusionMatrix(predicted: predY.Concatenate(), expected: testY.Concatenate());

            double acc = cm.Accuracy;
        }
예제 #12
0
 /// <summary>
 ///   Constructs a new L-BFGS learning algorithm.
 /// </summary>
 ///
 public QuasiNewtonLearning(ConditionalRandomField <T> model)
     : this()
 {
     this.Model = model;
     init();
 }
예제 #13
0
 /// <summary>
 ///   Constructs a new L-BFGS learning algorithm.
 /// </summary>
 ///
 public QuasiNewtonLearning(ConditionalRandomField <T> model)
 {
     this.model           = model;
     this.lbfgs           = new BroydenFletcherGoldfarbShanno(model.Function.Weights.Length);
     this.lbfgs.Tolerance = 1e-3;
 }
예제 #14
0
 /// <summary>
 ///   Constructs a new L-BFGS learning algorithm.
 /// </summary>
 ///
 public QuasiNewtonLearning(ConditionalRandomField model)
 {
     this.model           = model;
     this.lbfgs           = new LbfgsOptimization(model.Function.Weights.Length);
     this.lbfgs.Tolerance = 1e-3;
 }
 /// <summary>
 ///   Constructs a new L-BFGS learning algorithm.
 /// </summary>
 /// 
 public QuasiNewtonLearning(ConditionalRandomField model)
 {
     this.model = model;
     this.lbfgs = new LbfgsOptimization(model.Function.Weights.Length);
     this.lbfgs.Tolerance = 1e-3;
 }