Ejemplo n.º 1
0
 /// <summary>
 ///   Initializes this instance.
 /// </summary>
 ///
 protected void Init(IFeatureDetector <TPoint, TFeature> detector, IClusteringAlgorithm <TFeature> algorithm)
 {
     this.NumberOfWords   = algorithm.Clusters.Count;
     this.Clustering      = algorithm;
     this.Detector        = detector;
     this.ParallelOptions = new ParallelOptions();
 }
Ejemplo n.º 2
0
        private void FindKeypoint(int ID, InputFileModel inputFile, IFeatureDetector detector, bool AddToList = true)
        {
            WindowsFormHelper.AddLogToConsole($"Start finding key points for: {inputFile.fileInfo.Name.ToString()}\n");

            var detectedKeyPoints = detector.DetectKeyPoints(new Mat(inputFile.fileInfo.FullName));

            if (AddToList)
            {
                DetectedKeyPoints.Add(ID, new KeyPointModel()
                {
                    DetectedKeyPoints = new VectorOfKeyPoint(detectedKeyPoints),
                    InputFile         = inputFile,
                    ID = ID
                }
                                      );
            }

            WindowsFormHelper.AddLogToConsole($"FINISH finding key points for: {inputFile.fileInfo.Name.ToString()}\n");


            // Save drawing image
            Mat output = new Mat();

            Directory.CreateDirectory($@"{tempDirectory}\DrawKeypoint");
            Features2DToolbox.DrawKeypoints(new Mat(inputFile.fileInfo.FullName), new VectorOfKeyPoint(detectedKeyPoints), output, new Bgr(0, 0, 255), KeypointDrawType.DrawRichKeypoints);
            output.Save(Path.Combine($@"{tempDirectory}\DrawKeypoint", $"{Path.GetFileNameWithoutExtension(inputFile.fileInfo.Name)}.JPG"));
            fileManager.listViewerModel._lastDrawnKeypoint = new Image <Bgr, byte>(output.Bitmap);

            var file       = new InputFileModel(Path.Combine($@"{tempDirectory}\DrawKeypoint", $"{Path.GetFileNameWithoutExtension(inputFile.fileInfo.Name)}.JPG"));
            var imageList  = _winForm.ImageList[(int)EListViewGroup.DrawnKeyPoint];
            var listViewer = _winForm.ListViews[(int)EListViewGroup.DrawnKeyPoint];

            fileManager.AddInputFileToList(file, fileManager.listViewerModel.ListOfListInputFolder[(int)EListViewGroup.DrawnKeyPoint], imageList, listViewer);
        }
Ejemplo n.º 3
0
 /// <summary>
 /// Adapts a detector to partition the source image into a grid and detect points in each cell.
 /// </summary>
 /// <param name="detector">Detector that will be adapted</param>
 /// <param name="maxTotalKeyPoints">Maximum count of keypoints detected on the image. Only the strongest keypoints</param>
 /// <param name="gridRows">Grid rows count</param>
 /// <param name="gridCols">Grid column count</param>
 public GridAdaptedFeatureDetector(IFeatureDetector detector, int maxTotalKeyPoints, int gridRows, int gridCols)
 {
     _baseDetector     = detector;
     MaxTotalKeyPoints = maxTotalKeyPoints;
     GridRows          = gridRows;
     GridCols          = gridCols;
     _ptr = GridAdaptedFeatureDetectorCreate(detector.FeatureDetectorPtr, maxTotalKeyPoints, gridRows, gridCols);
 }
Ejemplo n.º 4
0
 /// <summary>
 /// Detect the keypoints from the image
 /// </summary>
 /// <param name="detector">The keypoint detector</param>
 /// <param name="image">The image to extract keypoints from</param>
 /// <param name="mask">The optional mask.</param>
 /// <returns>An array of key points</returns>
 public static MKeyPoint[] Detect(this IFeatureDetector detector, IInputArray image, IInputArray mask = null)
 {
     using (VectorOfKeyPoint keypoints = new VectorOfKeyPoint())
     {
         detector.DetectRaw(image, keypoints, mask);
         return(keypoints.ToArray());
     }
 }
Ejemplo n.º 5
0
        /// <summary>
        /// Detects keypoints in the specified input image.
        /// </summary>
        /// <param name="detector">The feature detector used to find image keypoints.</param>
        /// <param name="image">The image on which to detect keypoints.</param>
        /// <param name="mask">The optional operation mask used to specify where to look for keypoints.</param>
        /// <returns>The collection of detected keypoints.</returns>
        public static KeyPointCollection Detect(this IFeatureDetector detector, Arr image, Arr mask = null)
        {
            if (detector == null)
            {
                throw new ArgumentNullException("detector");
            }

            var keyPoints = new KeyPointCollection();

            detector.Detect(image, keyPoints, mask);
            return(keyPoints);
        }
        /// <summary>
        /// Process image looking for interest points.
        /// </summary>
        /// <typeparam name="TPoint">The type of returned feature points.</typeparam>
        /// <typeparam name="TFeature">The type of extracted features.</typeparam>
        /// <param name="featureDetector">Feature detector.</param>
        /// <param name="image">Source image data to process.</param>
        /// <returns>Returns list of found interest points.</returns>
        public static List <TPoint> ProcessImage <TPoint, TFeature>(this IFeatureDetector <TPoint, TFeature> featureDetector, Gray <byte>[,] image)
            where TPoint : IFeatureDescriptor <TFeature>
        {
            List <TPoint> points;

            using (var uImg = image.Lock())
            {
                points = featureDetector.ProcessImage(uImg.AsAForgeImage());
            }

            return(points);
        }
Ejemplo n.º 7
0
        public void ContinueInComputingSFM(IFeatureDetector detector, IFeatureDescriptor descriptor, IFeatureMatcher matcher, List <InputFileModel> listOfInput)
        {
            var iterMatches = FoundedMatches.Count;

            countInputFile = DetectedKeyPoints.Count;

            StartDetectingKeyPoint(countInputFile, listOfInput, detector);
            StartComputingDescriptor(countInputFile, descriptor);
            StartMatching(countInputFile, matcher);

            WriteAddedImages(listOfInput);
            AppendMatches(FoundedMatches, iterMatches);
            ContinueVisualSFM();
        }
Ejemplo n.º 8
0
        public void ComputeSfM(IFeatureDetector detector, IFeatureDescriptor descriptor, IFeatureMatcher matcher, List <InputFileModel> listOfInput)
        {
            countInputFile = 0;
            DetectedKeyPoints.Clear();
            ComputedDescriptors.Clear();
            FoundedMatches.Clear();

            switch (fileManager._inputType)
            {
            case EInput.ListView:
                StartDetectingKeyPoint(0, listOfInput, detector);
                StartComputingDescriptor(0, descriptor);
                StartMatching(0, matcher);
                break;

            case EInput.ConnectedStereoCamera:

                countInputFile = DetectedKeyPoints.Count;
                listOfInput    = GetInputFromStereoCamera(countInputFile);


                StartDetectingKeyPoint(countInputFile, listOfInput, detector);
                StartComputingDescriptor(countInputFile, descriptor);
                while (!stopSFM)
                {
                    countInputFile = DetectedKeyPoints.Count;
                    listOfInput    = GetInputFromStereoCamera(countInputFile);


                    StartDetectingKeyPoint(countInputFile, listOfInput, detector);
                    StartComputingDescriptor(countInputFile, descriptor);
                    StartStereoMatching(countInputFile, matcher);
                }
                break;
            }

            WriteAllMatches(FoundedMatches);
            RunVisualSFM();
        }
Ejemplo n.º 9
0
        public void SetFeatureDetector(object sender, EventArgs e)
        {
            var currentItem = sender as ToolStripComboBox;
            var enumItem    = EnumExtension.ReturnEnumValue <EFeaturesDetector>(currentItem.SelectedItem.ToString());

            IFeatureDetector tempItem = null;

            switch (enumItem)
            {
            case EFeaturesDetector.ORB: tempItem = new OrientedFastAndRotatedBrief(); break;

            case EFeaturesDetector.FAST: tempItem = new FAST(); break;

            case EFeaturesDetector.FREAK: tempItem = new FREAK(); break;

            case EFeaturesDetector.BRIEF: tempItem = new BRIEF(); break;

            case EFeaturesDetector.CudaORB: tempItem = new CudaOrientedFastAndRotatedBrief(); break;
            }

            _sfmManager._detector = tempItem;
        }
Ejemplo n.º 10
0
 /// <summary>
 ///   Constructs a new <see cref="BagOfVisualWords"/>.
 /// </summary>
 ///
 /// <param name="detector">The feature detector to use.</param>
 /// <param name="algorithm">The clustering algorithm to use.</param>
 ///
 public BagOfVisualWords(IFeatureDetector <TPoint, TFeature> detector, IClusteringAlgorithm <TFeature> algorithm)
 {
     this.NumberOfWords = algorithm.Clusters.Count;
     this.Clustering    = algorithm;
     this.Detector      = detector;
 }
Ejemplo n.º 11
0
 /// <summary>
 /// Detect the features in the image
 /// </summary>
 /// <param name="detector">The feature detector</param>
 /// <param name="keypoints">The result vector of keypoints</param>
 /// <param name="image">The image from which the features will be detected from</param>
 /// <param name="mask">The optional mask.</param>
 public static void DetectRaw(this IFeatureDetector detector, IInputArray image, VectorOfKeyPoint keypoints, IInputArray mask = null)
 {
     using (InputArray iaImage = image.GetInputArray())
         using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray())
             CvFeatureDetectorDetectKeyPoints(detector.FeatureDetectorPtr, iaImage, keypoints.Ptr, iaMask);
 }
Ejemplo n.º 12
0
 /// <summary>
 ///   Constructs a new <see cref="BagOfVisualWords"/>.
 /// </summary>
 ///
 /// <param name="detector">The feature detector to use.</param>
 /// <param name="algorithm">The clustering algorithm to use.</param>
 ///
 public BagOfVisualWords(IFeatureDetector <TPoint, TFeature> detector,
                         //IClusteringAlgorithm<TFeature>
                         IUnsupervisedLearning <IClassifier <TFeature, int>, TFeature, int> algorithm)
 {
     Init(detector, algorithm);
 }
Ejemplo n.º 13
0
 /// <summary>
 /// Initializes a new instance of the <see cref="PointsOfInterestFeatureBuilder"/> class.
 /// </summary>
 /// <param name="detector">The feature detector.</param>
 /// <exception cref="ArgumentNullException">
 /// <paramref name="detector"/> is <b>null</b>.
 /// </exception>
 public PointsOfInterestFeatureBuilder(IFeatureDetector detector)
 {
     this.detector = detector ?? throw new ArgumentNullException(nameof(detector));
 }
 /// <summary>
 /// Process image looking for interest points.
 /// </summary>
 /// <typeparam name="TPoint">The type of returned feature points.</typeparam>
 /// <typeparam name="TFeature">The type of extracted features.</typeparam>
 /// <param name="featureDetector">Feature detector.</param>
 /// <param name="image">Source image data to process.</param>
 /// <returns>Returns list of found interest points.</returns>
 public static List <TPoint> ProcessImage <TPoint, TFeature>(this IFeatureDetector <TPoint, TFeature> featureDetector, Image <Gray, byte> image)
     where TPoint : IFeatureDescriptor <TFeature>
 {
     return(featureDetector.ProcessImage(image.ToAForgeImage(copyAlways: false, failIfCannotCast: true)));
 }
Ejemplo n.º 15
0
 /// <summary>
 ///   Constructs a new <see cref="BagOfVisualWords"/>.
 /// </summary>
 ///
 /// <param name="detector">The feature detector to use.</param>
 /// <param name="algorithm">The clustering algorithm to use.</param>
 ///
 public BagOfVisualWords(IFeatureDetector <TPoint> detector, //IClusteringAlgorithm<double[]>
                         IUnsupervisedLearning <IClassifier <double[], int>, double[], int> algorithm)
 {
     base.Init(detector, algorithm);
 }
Ejemplo n.º 16
0
 /// <summary>
 ///   Constructs a new <see cref="BagOfVisualWords"/>.
 /// </summary>
 ///
 /// <param name="detector">The feature detector to use.</param>
 /// <param name="numberOfWords">The number of codewords.</param>
 ///
 public BagOfVisualWords(IFeatureDetector <TPoint> detector, int numberOfWords)
 {
     base.Init(detector, base.KMeans(numberOfWords));
 }
Ejemplo n.º 17
0
 /// <summary>
 ///   Constructs a new <see cref="BagOfVisualWords"/>.
 /// </summary>
 ///
 /// <param name="detector">The feature detector to use.</param>
 /// <param name="algorithm">The clustering algorithm to use.</param>
 ///
 public BagOfVisualWords(IFeatureDetector <TPoint, TFeature> detector, IClusteringAlgorithm <TFeature> algorithm)
 {
     Init(detector, algorithm);
 }
Ejemplo n.º 18
0
 /// <summary>
 ///   Constructs a new <see cref="BagOfVisualWords"/>.
 /// </summary>
 ///
 /// <param name="detector">The feature detector to use.</param>
 /// <param name="numberOfWords">The number of codewords.</param>
 ///
 public BagOfVisualWords(IFeatureDetector <TPoint> detector, int numberOfWords)
     : base(detector, new KMeans(numberOfWords))
 {
 }
Ejemplo n.º 19
0
        private void StartDetectingKeyPoint(int countOfInput, List <InputFileModel> listOfInput, IFeatureDetector detector)
        {
            _winForm.SetMaximumProgressBar("Detecting keypoints", listOfInput.Count - countInputFile);

            if (_useParallel)
            {
                Parallel.For(0, listOfInput.Count, x => { FindKeypoint(countOfInput + x, listOfInput[x], detector); });
            }
            else
            {
                for (int i = 0; i < listOfInput.Count; i++)
                {
                    FindKeypoint(countOfInput + i, listOfInput[i], detector);
                }
            }

            _winForm.IncrementValueProgressBar();
        }
Ejemplo n.º 20
0
 /// <summary>
 ///   Constructs a new <see cref="BagOfVisualWords"/>.
 /// </summary>
 ///
 /// <param name="detector">The feature detector to use.</param>
 /// <param name="algorithm">The clustering algorithm to use.</param>
 ///
 public BagOfVisualWords(IFeatureDetector <TPoint> detector, IClusteringAlgorithm <double[]> algorithm)
     : base(detector, algorithm)
 {
 }
Ejemplo n.º 21
0
        public static bool TestFeature2DTracker(IFeatureDetector keyPointDetector, IDescriptorExtractor descriptorGenerator)
        {
            //for (int k = 0; k < 1; k++)
            {
                Feature2D feature2D = null;
                if (keyPointDetector == descriptorGenerator)
                {
                    feature2D = keyPointDetector as Feature2D;
                }

                Image <Gray, Byte> modelImage = EmguAssert.LoadImage <Gray, byte>("box.png");
                //Image<Gray, Byte> modelImage = new Image<Gray, byte>("stop.jpg");
                //modelImage = modelImage.Resize(400, 400, true);

                //modelImage._EqualizeHist();

                #region extract features from the object image
                Stopwatch        stopwatch      = Stopwatch.StartNew();
                VectorOfKeyPoint modelKeypoints = new VectorOfKeyPoint();
                Mat modelDescriptors            = new Mat();
                if (feature2D != null)
                {
                    feature2D.DetectAndCompute(modelImage, null, modelKeypoints, modelDescriptors, false);
                }
                else
                {
                    keyPointDetector.DetectRaw(modelImage, modelKeypoints);
                    descriptorGenerator.Compute(modelImage, modelKeypoints, modelDescriptors);
                }
                stopwatch.Stop();
                EmguAssert.WriteLine(String.Format("Time to extract feature from model: {0} milli-sec", stopwatch.ElapsedMilliseconds));
                #endregion

                //Image<Gray, Byte> observedImage = new Image<Gray, byte>("traffic.jpg");
                Image <Gray, Byte> observedImage = EmguAssert.LoadImage <Gray, byte>("box_in_scene.png");
                //Image<Gray, Byte> observedImage = modelImage.Rotate(45, new Gray(0.0));
                //image = image.Resize(400, 400, true);

                //observedImage._EqualizeHist();
                #region extract features from the observed image
                stopwatch.Reset();
                stopwatch.Start();
                VectorOfKeyPoint observedKeypoints = new VectorOfKeyPoint();
                using (Mat observedDescriptors = new Mat())
                {
                    if (feature2D != null)
                    {
                        feature2D.DetectAndCompute(observedImage, null, observedKeypoints, observedDescriptors, false);
                    }
                    else
                    {
                        keyPointDetector.DetectRaw(observedImage, observedKeypoints);
                        descriptorGenerator.Compute(observedImage, observedKeypoints, observedDescriptors);
                    }

                    stopwatch.Stop();
                    EmguAssert.WriteLine(String.Format("Time to extract feature from image: {0} milli-sec", stopwatch.ElapsedMilliseconds));
                    #endregion

                    //Merge the object image and the observed image into one big image for display
                    Image <Gray, Byte> res = modelImage.ConcateVertical(observedImage);

                    Rectangle rect = modelImage.ROI;
                    PointF[]  pts  = new PointF[] {
                        new PointF(rect.Left, rect.Bottom),
                        new PointF(rect.Right, rect.Bottom),
                        new PointF(rect.Right, rect.Top),
                        new PointF(rect.Left, rect.Top)
                    };

                    HomographyMatrix homography = null;

                    stopwatch.Reset();
                    stopwatch.Start();

                    int          k  = 2;
                    DistanceType dt = modelDescriptors.Depth == CvEnum.DepthType.Cv8U ? DistanceType.Hamming : DistanceType.L2;
                    //using (Matrix<int> indices = new Matrix<int>(observedDescriptors.Rows, k))
                    //using (Matrix<float> dist = new Matrix<float>(observedDescriptors.Rows, k))
                    using (VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch())
                        using (BruteForceMatcher matcher = new BruteForceMatcher(dt))
                        {
                            matcher.Add(modelDescriptors);
                            matcher.KnnMatch(observedDescriptors, matches, k, null);

                            Matrix <byte> mask = new Matrix <byte>(matches.Size, 1);
                            mask.SetValue(255);
                            Features2DToolbox.VoteForUniqueness(matches, 0.8, mask);

                            int nonZeroCount = CvInvoke.CountNonZero(mask);
                            if (nonZeroCount >= 4)
                            {
                                nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeypoints, observedKeypoints, matches, mask, 1.5, 20);
                                if (nonZeroCount >= 4)
                                {
                                    homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeypoints, observedKeypoints, matches, mask, 2);
                                }
                            }
                        }
                    stopwatch.Stop();
                    EmguAssert.WriteLine(String.Format("Time for feature matching: {0} milli-sec", stopwatch.ElapsedMilliseconds));

                    bool success = false;
                    if (homography != null)
                    {
                        PointF[] points = pts.Clone() as PointF[];
                        homography.ProjectPoints(points);

                        for (int i = 0; i < points.Length; i++)
                        {
                            points[i].Y += modelImage.Height;
                        }

                        res.DrawPolyline(
#if NETFX_CORE
                            Extensions.
#else
                            Array.
#endif
                            ConvertAll <PointF, Point>(points, Point.Round), true, new Gray(255.0), 5);

                        success = true;
                    }
                    //Emgu.CV.UI.ImageViewer.Show(res);
                    return(success);
                }



                /*
                 * stopwatch.Reset(); stopwatch.Start();
                 * //set the initial region to be the whole image
                 * using (Image<Gray, Single> priorMask = new Image<Gray, float>(observedImage.Size))
                 * {
                 * priorMask.SetValue(1.0);
                 * homography = tracker.CamShiftTrack(
                 *    observedFeatures,
                 *    (RectangleF)observedImage.ROI,
                 *    priorMask);
                 * }
                 * Trace.WriteLine(String.Format("Time for feature tracking: {0} milli-sec", stopwatch.ElapsedMilliseconds));
                 *
                 * if (homography != null) //set the initial tracking window to be the whole image
                 * {
                 * PointF[] points = pts.Clone() as PointF[];
                 * homography.ProjectPoints(points);
                 *
                 * for (int i = 0; i < points.Length; i++)
                 *    points[i].Y += modelImage.Height;
                 * res.DrawPolyline(Array.ConvertAll<PointF, Point>(points, Point.Round), true, new Gray(255.0), 5);
                 * return true;
                 * }
                 * else
                 * {
                 * return false;
                 * }*/
            }
        }