Wrapping class for feature detection using the goodFeaturesToTrack() function.
Inheritance: Emgu.Util.UnmanagedObject, IKeyPointDetector
コード例 #1
0
ファイル: AutoTestImage.cs プロジェクト: neutmute/emgucv
      public void TestGoodFeature()
      {
         using (GFTTDetector detector = new GFTTDetector(1000, 0.01, 1, 3, false, 0.04))
         using (Mat img = EmguAssert.LoadMat("stuff.jpg"))
         {

            var keypoints = detector.Detect(img);
            int nanCount = 0;
            foreach (MKeyPoint p in keypoints)
            {
               CvInvoke.Circle(img, Point.Round(p.Point), 3, new Bgr(255, 0, 0).MCvScalar, 1);
               if (float.IsNaN(p.Point.X) || float.IsNaN(p.Point.Y))
                  nanCount ++;
            }

            System.Diagnostics.Debug.WriteLine(String.Format("NanCount: {0}", nanCount));
            EmguAssert.IsTrue(nanCount == 0);
            //ImageViewer.Show(img);
         }


         using (GFTTDetector detector = new GFTTDetector())
         using (Image<Bgr, Byte> img = EmguAssert.LoadImage<Bgr, Byte>("stuff.jpg"))
         {
            Stopwatch watch = Stopwatch.StartNew();
            int runs = 10;
            for (int i = 0; i < runs; i++)
            {
               var pts = detector.Detect(img);
            }
            watch.Stop();
            EmguAssert.WriteLine(String.Format("Avg time to extract good features from image of {0}: {1}", img.Size, watch.ElapsedMilliseconds / runs));
         }
      }
コード例 #2
0
ファイル: AutoTestFeatures2d.cs プロジェクト: Delaley/emgucv
 public void TestGFTTDetector()
 {
    GFTTDetector keyPointDetector = new GFTTDetector(1000, 0.01, 1, 3, false, 0.04);
    SIFT descriptorGenerator = new SIFT();
    //ParamDef[] parameters = keyPointDetector.GetParams();
    TestFeature2DTracker(keyPointDetector, descriptorGenerator);
 }
コード例 #3
-2
        public void InitOriginalVideo(string initFile)
        {
            //Capture Image
            OutputPath = _defaultInitVideoPath;

            List<string> grayImgList = CatchImages(initFile, 0, OutputPath);

            if (grayImgList.Count < 3)
            {
                return;
            }

            //Get the Optical flow of L-K feature
            Image<Gray, Byte> mask = new Image<Gray, Byte>(grayImgList.First());

            Image<Gray, Byte> grayImage1 = null;//new Image<Gray, Byte>(grayImgList[1]);
            Image<Gray, Byte> grayImage2 = null;//new Image<Gray, Byte>(grayImgList.Last());

            for (int i=1; i< grayImgList.Count-1; i++)
            {
                grayImage1 = new Image<Gray, Byte>(grayImgList[i]);
                grayImage2 = new Image<Gray, Byte>(grayImgList[i + 1]);
                EmguType features1 = SURFFeatureDetect(grayImage1, mask);

                Utils.WriteJsonFile(features1, grayImgList[i] + ".dat");

                //VectorOfPointF vp1 = new VectorOfPointF(features1.KeyPoints.ToArray().Select(x => x.Point).ToArray());
                //VectorOfPointF vp2 = new VectorOfPointF(vp1.Size);
                //VectorOfByte vstatus = new VectorOfByte(vp1.Size);
                //VectorOfFloat verr = new VectorOfFloat(vp1.Size);
                Size winsize = new Size(grayImage1.Width, grayImage1.Height);
                int maxLevel = 1; // if 0, winsize is not used
                MCvTermCriteria criteria = new MCvTermCriteria(10, 1);

                try
                {
                    if (i % Constants.DETECTIVE_GROUP_COUNT == 1)
                    {
                        GFTTDetector gd = new GFTTDetector();
                        MKeyPoint[] gdkp = gd.Detect(grayImage1, mask);
                        VectorOfPointF gdvp1 = new VectorOfPointF(gdkp.Select(x => x.Point).ToArray());
                        VectorOfPointF gdvp2 = new VectorOfPointF(gdvp1.Size);
                        VectorOfByte vstatus = new VectorOfByte(gdvp1.Size);
                        VectorOfFloat verr = new VectorOfFloat(gdvp1.Size);

                        CvInvoke.CalcOpticalFlowPyrLK(grayImage1, grayImage2, gdvp1, gdvp2, vstatus, verr, winsize, maxLevel, criteria);
                        Utils.WriteJsonFile(gdvp2, grayImgList[i] + "pp.dat");
                    }
                    else
                    {
                        VectorOfPointF gdvp1 = Utils.ReadJsonFile<VectorOfPointF>(grayImgList[i - 1] + "pp.dat");
                        VectorOfPointF gdvp2 = new VectorOfPointF(gdvp1.Size);
                        VectorOfByte vstatus = new VectorOfByte(gdvp1.Size);
                        VectorOfFloat verr = new VectorOfFloat(gdvp1.Size);

                        CvInvoke.CalcOpticalFlowPyrLK(grayImage1, grayImage2, gdvp1, gdvp2, vstatus, verr, winsize, maxLevel, criteria);
                        Utils.WriteJsonFile(gdvp2, grayImgList[i] + "pp.dat");
                    }

                }
                catch (Exception e)
                {
                    _log.Debug("error: " + e.Message);
                }
            }

            /*
            //Get SIFT Feature
            foreach (string grayImgPath in grayImgList)
            {
                Image<Gray, Byte> grayImage = new Image<Gray, Byte>(grayImgPath);
                //List<SiftFeature> features = SiftFeatureDetect(image: grayImage, showDetail: true);

                //Image<Gray, float> grayImage = new Image<Gray, float>(grayImgPath);
                //List<Feature> features = SiftFeatureDetect(grayImage);

                EmguType features = SURFFeatureDetect(grayImage);

                Utils.WriteJsonFile(features, grayImgPath + ".dat");
            }
            */

            _initSuccess = true;
            OutputPath = string.Empty;
        }