Exemplo n.º 1
0
        private static void CreateBoW()
        {
            var numberOfWords = 36;

            foreach (var file in Directory.EnumerateFiles(@"C:\Temp\TLLCamerasTestData\37_Training", "*.jpg"))
            {
                var trainingImage = (Bitmap)Bitmap.FromFile(file);

                trainingImages.Add(file, trainingImage);
            }

            foreach (var file in Directory.EnumerateFiles(@"C:\Temp\TLLCamerasTestData\37_Testing", "*.jpg"))
            {
                var testImage = (Bitmap)Bitmap.FromFile(file);

                testingImages.Add(file, testImage);
            }



            // We will use SURF, so we can use a standard clustering
            // algorithm that is based on Euclidean distances. A good
            // algorithm for clustering codewords is the Binary Split
            // variant of the K-Means algorithm.

            // Create a Binary-Split clustering algorithm
            BinarySplit binarySplit = new BinarySplit(numberOfWords);

            // Create bag-of-words (BoW) with the given algorithm
            BagOfVisualWords surfBow = new BagOfVisualWords(binarySplit);

            // Compute the BoW codebook using training images only
            IBagOfWords <Bitmap> bow = surfBow.Learn(trainingImages.Values.ToArray());

            // now that we've created the bow we need to use it to create a representation of each training and test image

            foreach (var trainingImage in trainingImages.Keys)
            {
                var asBitmap = trainingImages[trainingImage] as Bitmap;

                var featureVector = (bow as ITransform <Bitmap, double[]>).Transform(asBitmap);

                var featureString = featureVector.ToString(DefaultArrayFormatProvider.InvariantCulture);

                trainingFeatures.Add(trainingImage, featureVector);
            }

            foreach (var testingImage in testingImages.Keys)
            {
                var asBitmap = testingImages[testingImage] as Bitmap;

                var featureVector = (bow as ITransform <Bitmap, double[]>).Transform(asBitmap);

                var featureString = featureVector.ToString(DefaultArrayFormatProvider.InvariantCulture);

                testingFeatures.Add(testingImage, featureVector);
            }
        }
Exemplo n.º 2
0
        private void trainBoW(object sender, RoutedEventArgs e)
        {
            //train bow
            imageInfo.Text = "Training BoW";

            WPFFolderBrowserDialog dd = new WPFFolderBrowserDialog();

            dd.Title = "Select a folder";
            if (dd.ShowDialog() == true)
            {
                String[] names = UtilFn.GetFilesFrom(dd.FileName, filtersDir, false);
                if (names.Length == 0)
                {
                    MessageBox.Show("NO IMAGES IN SELECTED FOLDER.");
                }

                ushort        c   = 0;
                List <Bitmap> ims = new List <Bitmap>();
                foreach (String fn in names)
                {
                    if (c++ == 2000)
                    {
                        break;
                    }

                    ims.Add(new Bitmap(fn));
                }

                bow = new BagOfVisualWords(20); // br features
                bow.Learn(ims.ToArray());
            }
            else
            {
                MessageBox.Show("Something went wrong.");
            }
            imageInfo.Text = "Done";
        }
Exemplo n.º 3
0
        /// <summary>
        ///   This methods computes the Bag-of-Visual-Words with the training images.
        /// </summary>
        ///
        private void btnBagOfWords_Click(object sender, EventArgs e)
        {
            int numberOfWords = (int)numWords.Value;


            Stopwatch sw1 = Stopwatch.StartNew();

            IBagOfWords <Bitmap> bow;

            // Check if we will use SURF or FREAK as the feature detector
            if (rbSurf.Checked)
            {
                // We will use SURF, so we can use a standard clustering
                // algorithm that is based on Euclidean distances. A good
                // algorithm for clustering codewords is the Binary Split
                // variant of the K-Means algorithm.

                // Create a Binary-Split clustering algorithm
                BinarySplit binarySplit = new BinarySplit(numberOfWords);

                // Create bag-of-words (BoW) with the given algorithm
                BagOfVisualWords surfBow = new BagOfVisualWords(binarySplit);

                // Compute the BoW codebook using training images only
                bow = surfBow.Learn(originalTrainImages.Values.ToArray());
            }
            else if (rbFreak.Checked)
            {
                // We will use the FREAK detector. The features generated by FREAK
                // are represented as bytes. While it is possible to transform those
                // to standard double vectors, we will demonstrate how to use a non-
                // Euclidean distance based algorithm to generate codewords for it.

                // Note: Using Binary-Split with normalized FREAK features would
                // possibly work better than the k-modes. This is just an example.

                // Create a k-Modes clustering algorithm
                var kmodes = new KModes <byte>(numberOfWords, new Hamming());

                // Create a FREAK detector explicitly (if no detector was passed,
                // the BagOfVisualWords would be using a SURF detector by default).
                var freak = new FastRetinaKeypointDetector();

                // Create bag-of-words (BoW) with the k-modes clustering and FREAK detector
                var freakBow = new BagOfVisualWords <FastRetinaKeypoint, byte[]>(freak, kmodes);

                // Compute the BoW codebook using training images only
                bow = freakBow.Learn(originalTrainImages.Values.ToArray());
            }
            else
            {
                // We will use HOG, so we can use a standard clustering
                // algorithm that is based on Euclidean distances. A good
                // algorithm for clustering codewords is the Binary Split
                // variant of the K-Means algorithm.

                // Create a Binary-Split clustering algorithm
                BinarySplit binarySplit = new BinarySplit(numberOfWords);

                // Create a HOG detector explicitly (if no detector was passed,
                // the BagOfVisualWords would be using a SURF detector by default).
                var hog = new HistogramsOfOrientedGradients();

                // Create bag-of-words (BoW) with the given algorithm
                var hogBow = BagOfVisualWords.Create(hog, binarySplit);

                // Compute the BoW codebook using training images only
                bow = hogBow.Learn(originalTrainImages.Values.ToArray());
            }

            sw1.Stop();

            // Now that we have already created and computed the BoW model, we
            // will use it to extract representations for each of the images in
            // both training and testing sets.

            Stopwatch sw2 = Stopwatch.StartNew();

            // Extract features for all images
            foreach (ListViewItem item in listView1.Items)
            {
                // Get item image
                Bitmap image = originalImages[item.ImageKey] as Bitmap;

                // Get a feature vector representing this image
                double[] featureVector = (bow as ITransform <Bitmap, double[]>).Transform(image);

                // Represent it as a string so we can show it onscreen
                string featureString = featureVector.ToString(DefaultArrayFormatProvider.InvariantCulture);

                // Show it in the visual grid
                if (item.SubItems.Count == 2)
                {
                    item.SubItems[1].Text = featureString;
                }
                else
                {
                    item.SubItems.Add(featureString);
                }

                // Retrieve the class labels, that we had stored in the Tag
                int classLabel = (item.Tag as Tuple <double[], int>).Item2;

                // Now, use the Tag to store the feature vector too
                item.Tag = Tuple.Create(featureVector, classLabel);
            }

            sw2.Stop();

            lbStatus.Text = "BoW constructed in " + sw1.Elapsed + "s. Features extracted in " + sw2.Elapsed + "s.";
            btnSampleRunAnalysis.Enabled = true;
        }
        /// <summary>
        /// This methods only for admin, and this recompute bagOfContourFragments and svm
        /// </summary>
        /// <param name="sender"></param>
        /// <param name="e"></param>
        private void buttonCompute_Click(object sender, EventArgs e)
        {
            //Accord.Math.Random.Generator.Seed = 1;

            DirectoryInfo path = new DirectoryInfo(Path.Combine(Application.StartupPath, "Resources/Res"));

            ///Create dictionary for train images
            originalTrainImages = new Dictionary <int, Bitmap>();

            int j = 0;

            int k = 0;

            foreach (DirectoryInfo classFolder in path.EnumerateDirectories())
            {
                ///Add name of folder
                string name = classFolder.Name;

                ///Upload all files in aarray
                FileInfo[] files = GetFilesByExtensions(classFolder, ".jpg", ".tif").ToArray();

                //Shuffle objects in array
                Vector.Shuffle(files);

                //For each image complite some easy operations
                for (int i = 0; i < files.Length; i++)
                {
                    //Uploat only train images
                    //70%
                    if ((i / (double)files.Length) < 0.7)
                    {
                        //Add file
                        FileInfo file = files[i];

                        //Create image from file
                        Bitmap image = (Bitmap)Bitmap.FromFile(file.FullName);

                        //Use detector
                        CannyEdgeDetector filterCanny = new CannyEdgeDetector();

                        //Apply changes
                        filterCanny.ApplyInPlace(image);

                        //Add some information of image
                        string shortName = file.Name;
                        int    imageKey  = j;

                        //Add image to dictionary
                        originalTrainImages.Add(j, image);

                        //Save correct key of class for image
                        outputsResult[j] = k;
                        j++;
                    }
                }
                //Change key of folder
                k++;
            }

            //Create teacher for svm, using Histogram Intersection
            var teacher = new MulticlassSupportVectorLearning <HistogramIntersection>()
            {
                //Add leaner params
                Learner = (param) => new SequentialMinimalOptimization <HistogramIntersection>()
                {
                    //Create kernel with optimal params
                    Kernel = new HistogramIntersection(0.25, 1),
                }
            };

            //Create KMeans algr
            var kmodes = new KModes <byte>(numberOfContour, new Hamming());

            //Create detector
            var detector = new FastRetinaKeypointDetector();

            //Create bagOfContourFragments
            bagOfContourFragments = new BagOfVisualWords(numberOfContour);

            //Learned bagOfContourFragments
            bagOfContourFragments.Learn(originalTrainImages.Values.ToArray());

            //For each iamge add inputs info
            for (int i = 0; i < originalTrainImages.Count; i++)
            {
                Bitmap image = originalTrainImages[i] as Bitmap;

                inputsInfo[i] = (bagOfContourFragments as ITransform <Bitmap, double[]>).Transform(image);
            }

            //Save condition of bagOfContourFragments
            BinarySave.WriteBinary(bagOfContourFragments);

            //Teach svm
            multiSVM = teacher.Learn(inputsInfo, outputsResult);

            //Save condition of svm
            BinarySave.WriteBinary(multiSVM);
        }
Exemplo n.º 5
0
        public void learn_new()
        {
            #region doc_learn
            // Ensure results are reproducible
            Accord.Math.Random.Generator.Seed = 0;

            // The Bag-of-Visual-Words model converts images of arbitrary
            // size into fixed-length feature vectors. In this example, we
            // will be setting the codebook size to 10. This means all feature
            // vectors that will be generated will have the same length of 10.

            // By default, the BoW object will use the sparse SURF as the
            // feature extractor and K-means as the clustering algorithm.

            // Create a new Bag-of-Visual-Words (BoW) model
            BagOfVisualWords bow = new BagOfVisualWords(10);
            // Note: the BoW model can also be created using
            // var bow = BagOfVisualWords.Create(10);

            // Get some training images
            Bitmap[] images = GetImages();

            // Compute the model
            bow.Learn(images);

            // After this point, we will be able to translate
            // images into double[] feature vectors using
            double[][] features = bow.Transform(images);
            #endregion

            var kmeans = bow.Clustering as KMeans;
            Assert.AreEqual(64, kmeans.Clusters.NumberOfInputs);
            Assert.AreEqual(10, kmeans.Clusters.NumberOfOutputs);
            Assert.AreEqual(10, kmeans.Clusters.NumberOfClasses);

            string   str = kmeans.Clusters.Proportions.ToCSharp();
            double[] expectedProportions = new double[] { 0.0960793804453049, 0.0767182962245886, 0.103823814133591, 0.0738141335914811, 0.0997095837366893, 0.0815585672797677, 0.0788964181994192, 0.090513068731849, 0.117376573088093, 0.181510164569216 };

            Assert.IsTrue(kmeans.Clusters.Proportions.IsEqual(expectedProportions, 1e-10));
            Assert.IsTrue(kmeans.Clusters.Covariances.All(x => x == null));

            Assert.AreEqual(features.GetLength(), new[] { 6, 10 });

            str = features.ToCSharp();

            double[][] expected = new double[][]
            {
                new double[] { 47, 44, 42, 4, 23, 22, 28, 53, 50, 96 },
                new double[] { 26, 91, 71, 49, 99, 70, 59, 28, 155, 79 },
                new double[] { 71, 34, 51, 33, 53, 25, 44, 64, 32, 145 },
                new double[] { 49, 41, 31, 24, 54, 19, 41, 63, 66, 72 },
                new double[] { 137, 16, 92, 115, 39, 75, 24, 92, 41, 88 },
                new double[] { 67, 91, 142, 80, 144, 126, 130, 74, 141, 270 }
            };

            for (int i = 0; i < features.Length; i++)
            {
                for (int j = 0; j < features[i].Length; j++)
                {
                    Assert.IsTrue(expected[i].Contains(features[i][j]));
                }
            }

            #region doc_classification

            // Now, the features can be used to train any classification
            // algorithm as if they were the images themselves. For example,
            // let's assume the first three images belong to a class and
            // the second three to another class. We can train an SVM using

            int[] labels = { -1, -1, -1, +1, +1, +1 };

            // Create the SMO algorithm to learn a Linear kernel SVM
            var teacher = new SequentialMinimalOptimization <Linear>()
            {
                Complexity = 10000 // make a hard margin SVM
            };

            // Obtain a learned machine
            var svm = teacher.Learn(features, labels);

            // Use the machine to classify the features
            bool[] output = svm.Decide(features);

            // Compute the error between the expected and predicted labels
            double error = new ZeroOneLoss(labels).Loss(output);
            #endregion

            Assert.IsTrue(new ZeroOneLoss(labels).IsBinary);
            Assert.AreEqual(error, 0);
        }
Exemplo n.º 6
0
        public void learn_new()
        {
            #region doc_learn
            // Ensure results are reproducible
            Accord.Math.Random.Generator.Seed = 0;

            // The Bag-of-Visual-Words model converts images of arbitrary
            // size into fixed-length feature vectors. In this example, we
            // will be setting the codebook size to 10. This means all feature
            // vectors that will be generated will have the same length of 10.

            // By default, the BoW object will use the sparse SURF as the
            // feature extractor and K-means as the clustering algorithm.

            // Create a new Bag-of-Visual-Words (BoW) model
            BagOfVisualWords bow = new BagOfVisualWords(10);
            // Note: the BoW model can also be created using
            // var bow = BagOfVisualWords.Create(10);

            // Ensure results are reproducible
            bow.ParallelOptions.MaxDegreeOfParallelism = 1;

            // Get some training images
            Bitmap[] images = GetImages();

            // Compute the model
            bow.Learn(images);

            // After this point, we will be able to translate
            // images into double[] feature vectors using
            double[][] features = bow.Transform(images);
            #endregion

            Assert.AreEqual(features.GetLength(), new[] { 6, 10 });

            string str = features.ToCSharp();

            double[][] expected = new double[][]
            {
                new double[] { 4, 28, 24, 68, 51, 97, 60, 35, 18, 24 },
                new double[] { 53, 111, 89, 70, 24, 80, 130, 46, 50, 74 },
                new double[] { 31, 29, 57, 102, 63, 142, 40, 18, 37, 33 },
                new double[] { 24, 52, 57, 78, 56, 69, 65, 22, 21, 16 },
                new double[] { 124, 35, 33, 145, 90, 83, 31, 4, 95, 79 },
                new double[] { 97, 110, 127, 131, 71, 264, 139, 58, 116, 152 }
            };

            for (int i = 0; i < features.Length; i++)
            {
                for (int j = 0; j < features[i].Length; j++)
                {
                    Assert.IsTrue(expected[i].Contains(features[i][j]));
                }
            }

            #region doc_classification

            // Now, the features can be used to train any classification
            // algorithm as if they were the images themselves. For example,
            // let's assume the first three images belong to a class and
            // the second three to another class. We can train an SVM using

            int[] labels = { -1, -1, -1, +1, +1, +1 };

            // Create the SMO algorithm to learn a Linear kernel SVM
            var teacher = new SequentialMinimalOptimization <Linear>()
            {
                Complexity = 10000 // make a hard margin SVM
            };

            // Obtain a learned machine
            var svm = teacher.Learn(features, labels);

            // Use the machine to classify the features
            bool[] output = svm.Decide(features);

            // Compute the error between the expected and predicted labels
            double error = new ZeroOneLoss(labels).Loss(output);
            #endregion

            Assert.IsTrue(new ZeroOneLoss(labels).IsBinary);
            Assert.AreEqual(error, 0);
        }
Exemplo n.º 7
0
        private void button4_Click(object sender, EventArgs e)
        {
            var path = new DirectoryInfo(dataPath + "BoW");
            //var path = new DirectoryInfo(@"C:\tmp\Accord\Resources");
            Dictionary <string, Bitmap> train      = new Dictionary <string, Bitmap>();
            Dictionary <string, Bitmap> test       = new Dictionary <string, Bitmap>();
            Dictionary <string, Bitmap> all        = new Dictionary <string, Bitmap>();
            Dictionary <string, int>    labelIndex = new Dictionary <string, int>();

            int labelCount = 0;

            foreach (DirectoryInfo classFolder in path.EnumerateDirectories())
            {
                string name = classFolder.Name;
                labelIndex[name] = ++labelCount;
                logDebug(name + " " + labelIndex[name]);
                FileInfo[] files = Utils.GetFilesByExtensions(classFolder, ".jpg", ".png").ToArray();
                Vector.Shuffle(files);
                for (int i = 0; i < files.Length; i++)
                {
                    FileInfo file  = files[i];
                    Bitmap   image = (Bitmap)Bitmap.FromFile(file.FullName);
                    if ((i / (double)files.Length) < 0.7)
                    {
                        // Put the first 70% in training set
                        train.Add(file.FullName, image);
                    }
                    else
                    {
                        // Put the restant 30% in test set
                        test.Add(file.FullName, image);
                    }
                    all.Add(file.FullName, image);
                    logDebug(file.FullName);
                }
            }

            int numberOfWords = 36;
            IBagOfWords <Bitmap> bow;
            BinarySplit          binarySplit = new BinarySplit(numberOfWords);

            // Create bag-of-words (BoW) with the given algorithm
            BagOfVisualWords surfBow = new BagOfVisualWords(binarySplit);

            // Compute the BoW codebook using training images only
            bow = surfBow.Learn(train.Values.ToArray());
            logDebug("BOW Done");

            List <double[]> lstInput  = new List <double[]>();
            List <int>      lstOutput = new List <int>();

            // Extract Feature in bother training and testing
            foreach (String fileName  in train.Keys)
            {
                double[] featureVector = (bow as ITransform <Bitmap, double[]>).Transform(train[fileName]);
                //string featureString = featureVector.ToString(DefaultArrayFormatProvider.InvariantCulture);
                //logDebug(featureString);
                lstInput.Add(featureVector);
                //FileInfo fin = new FileInfo(fileName);
                String labelString = Path.GetFileName(Path.GetDirectoryName(fileName));
                lstOutput.Add(labelIndex[labelString]);
            }
            //this.ksvm = teacher.Learn(inputs, outputs);

            double[][] inputs  = lstInput.ToArray();
            int[]      outputs = lstOutput.ToArray();

            IKernel kernel = new ChiSquare();

            MulticlassSupportVectorLearning <IKernel> teacher = new MulticlassSupportVectorLearning <IKernel>()
            {
                Kernel  = kernel,
                Learner = (param) =>
                {
                    return(new SequentialMinimalOptimization <IKernel>()
                    {
                        Kernel = kernel,
                        Complexity = 1.0,
                        Tolerance = 0.01,
                        CacheSize = 500,
                        Strategy = SelectionStrategy.Sequential,
                    });
                }
            };
            var ksvm = teacher.Learn(inputs, outputs);

            logDebug("ksvm Done");
            double error = new ZeroOneLoss(outputs).Loss(ksvm.Decide(inputs));

            logDebug("error=" + error);

            int trainingHit  = 0;
            int trainintMiss = 0;
            // For each image group (i.e. flowers, dolphins)
            Dictionary <string, Bitmap> data = train;

            foreach (String fileName in data.Keys)
            {
                double[] input       = (bow as ITransform <Bitmap, double[]>).Transform(data[fileName]);
                String   labelString = Path.GetFileName(Path.GetDirectoryName(fileName));
                int      label       = labelIndex[labelString];

                int actual = ksvm.Decide(input);
                if (label == actual)
                {
                    trainingHit++;
                }
                else
                {
                    trainintMiss++;
                    logDebug(labelString + " " + String.Format("{0} {1}", label, actual));
                }
            }
            logDebug(String.Format("Result {0}/{1}", trainingHit, data.Count));
        }