private void PredictImages(object sender, RoutedEventArgs e)
        {
            imageInfo.Text = "Predicting";

            if (bow == null)
            {
                MessageBox.Show("No BoW model!");
                return;
            }
            if (svmIm == null)
            {
                MessageBox.Show("No SVM model!");
                return;
            }

            Bitmap[] trainIms = new Bitmap[imagesEdited.Count];

            ushort z = 0;

            foreach (BitmapImage b in imagesEdited)
            {
                trainIms[z++] = UtilFn.BitmapImage2Bitmap(b);
            }

            double[][] features = bow.Transform(trainIms);

            output = svmIm.Decide(features);
            if (output != null)
            {
                if (output[j] && !cvs.Children.Contains(rectSel2))
                {
                    cvs.Children.Add(rectSel2);
                }
                else if (!output[j] && cvs.Children.Contains(rectSel2))
                {
                    cvs.Children.Remove(rectSel2);
                }
            }
            imageInfo.Text       = "Done";
            btnCorrect.IsEnabled = true;
        }
Beispiel #2
0
        public void learn_new()
        {
            #region doc_learn
            // Ensure results are reproducible
            Accord.Math.Random.Generator.Seed = 0;

            // The Bag-of-Visual-Words model converts images of arbitrary
            // size into fixed-length feature vectors. In this example, we
            // will be setting the codebook size to 10. This means all feature
            // vectors that will be generated will have the same length of 10.

            // By default, the BoW object will use the sparse SURF as the
            // feature extractor and K-means as the clustering algorithm.

            // Create a new Bag-of-Visual-Words (BoW) model
            BagOfVisualWords bow = new BagOfVisualWords(10);
            // Note: the BoW model can also be created using
            // var bow = BagOfVisualWords.Create(10);

            // Get some training images
            Bitmap[] images = GetImages();

            // Compute the model
            bow.Learn(images);

            // After this point, we will be able to translate
            // images into double[] feature vectors using
            double[][] features = bow.Transform(images);
            #endregion

            var kmeans = bow.Clustering as KMeans;
            Assert.AreEqual(64, kmeans.Clusters.NumberOfInputs);
            Assert.AreEqual(10, kmeans.Clusters.NumberOfOutputs);
            Assert.AreEqual(10, kmeans.Clusters.NumberOfClasses);

            string   str = kmeans.Clusters.Proportions.ToCSharp();
            double[] expectedProportions = new double[] { 0.0960793804453049, 0.0767182962245886, 0.103823814133591, 0.0738141335914811, 0.0997095837366893, 0.0815585672797677, 0.0788964181994192, 0.090513068731849, 0.117376573088093, 0.181510164569216 };

            Assert.IsTrue(kmeans.Clusters.Proportions.IsEqual(expectedProportions, 1e-10));
            Assert.IsTrue(kmeans.Clusters.Covariances.All(x => x == null));

            Assert.AreEqual(features.GetLength(), new[] { 6, 10 });

            str = features.ToCSharp();

            double[][] expected = new double[][]
            {
                new double[] { 47, 44, 42, 4, 23, 22, 28, 53, 50, 96 },
                new double[] { 26, 91, 71, 49, 99, 70, 59, 28, 155, 79 },
                new double[] { 71, 34, 51, 33, 53, 25, 44, 64, 32, 145 },
                new double[] { 49, 41, 31, 24, 54, 19, 41, 63, 66, 72 },
                new double[] { 137, 16, 92, 115, 39, 75, 24, 92, 41, 88 },
                new double[] { 67, 91, 142, 80, 144, 126, 130, 74, 141, 270 }
            };

            for (int i = 0; i < features.Length; i++)
            {
                for (int j = 0; j < features[i].Length; j++)
                {
                    Assert.IsTrue(expected[i].Contains(features[i][j]));
                }
            }

            #region doc_classification

            // Now, the features can be used to train any classification
            // algorithm as if they were the images themselves. For example,
            // let's assume the first three images belong to a class and
            // the second three to another class. We can train an SVM using

            int[] labels = { -1, -1, -1, +1, +1, +1 };

            // Create the SMO algorithm to learn a Linear kernel SVM
            var teacher = new SequentialMinimalOptimization <Linear>()
            {
                Complexity = 10000 // make a hard margin SVM
            };

            // Obtain a learned machine
            var svm = teacher.Learn(features, labels);

            // Use the machine to classify the features
            bool[] output = svm.Decide(features);

            // Compute the error between the expected and predicted labels
            double error = new ZeroOneLoss(labels).Loss(output);
            #endregion

            Assert.IsTrue(new ZeroOneLoss(labels).IsBinary);
            Assert.AreEqual(error, 0);
        }
Beispiel #3
0
        public void learn_new()
        {
            #region doc_learn
            // Ensure results are reproducible
            Accord.Math.Random.Generator.Seed = 0;

            // The Bag-of-Visual-Words model converts images of arbitrary
            // size into fixed-length feature vectors. In this example, we
            // will be setting the codebook size to 10. This means all feature
            // vectors that will be generated will have the same length of 10.

            // By default, the BoW object will use the sparse SURF as the
            // feature extractor and K-means as the clustering algorithm.

            // Create a new Bag-of-Visual-Words (BoW) model
            BagOfVisualWords bow = new BagOfVisualWords(10);
            // Note: the BoW model can also be created using
            // var bow = BagOfVisualWords.Create(10);

            // Ensure results are reproducible
            bow.ParallelOptions.MaxDegreeOfParallelism = 1;

            // Get some training images
            Bitmap[] images = GetImages();

            // Compute the model
            bow.Learn(images);

            // After this point, we will be able to translate
            // images into double[] feature vectors using
            double[][] features = bow.Transform(images);
            #endregion

            Assert.AreEqual(features.GetLength(), new[] { 6, 10 });

            string str = features.ToCSharp();

            double[][] expected = new double[][]
            {
                new double[] { 4, 28, 24, 68, 51, 97, 60, 35, 18, 24 },
                new double[] { 53, 111, 89, 70, 24, 80, 130, 46, 50, 74 },
                new double[] { 31, 29, 57, 102, 63, 142, 40, 18, 37, 33 },
                new double[] { 24, 52, 57, 78, 56, 69, 65, 22, 21, 16 },
                new double[] { 124, 35, 33, 145, 90, 83, 31, 4, 95, 79 },
                new double[] { 97, 110, 127, 131, 71, 264, 139, 58, 116, 152 }
            };

            for (int i = 0; i < features.Length; i++)
            {
                for (int j = 0; j < features[i].Length; j++)
                {
                    Assert.IsTrue(expected[i].Contains(features[i][j]));
                }
            }

            #region doc_classification

            // Now, the features can be used to train any classification
            // algorithm as if they were the images themselves. For example,
            // let's assume the first three images belong to a class and
            // the second three to another class. We can train an SVM using

            int[] labels = { -1, -1, -1, +1, +1, +1 };

            // Create the SMO algorithm to learn a Linear kernel SVM
            var teacher = new SequentialMinimalOptimization <Linear>()
            {
                Complexity = 10000 // make a hard margin SVM
            };

            // Obtain a learned machine
            var svm = teacher.Learn(features, labels);

            // Use the machine to classify the features
            bool[] output = svm.Decide(features);

            // Compute the error between the expected and predicted labels
            double error = new ZeroOneLoss(labels).Loss(output);
            #endregion

            Assert.IsTrue(new ZeroOneLoss(labels).IsBinary);
            Assert.AreEqual(error, 0);
        }