public static extern void cvExtractSURF(
  IntPtr image, IntPtr mask,
  ref IntPtr keypoints,
  ref IntPtr descriptors,
  IntPtr storage,
  MCvSURFParams parameters,
  int useProvidedKeyPoints);
Ejemplo n.º 2
0
        static void Run()
        {
            MCvSURFParams surfParam = new MCvSURFParams(500, false);

             Image<Gray, Byte> modelImage = new Image<Gray, byte>("box.png");
             //extract features from the object image
             SURFFeature[] modelFeatures = modelImage.ExtractSURF(ref surfParam);

             //Create a SURF Tracker
             SURFTracker tracker = new SURFTracker(modelFeatures);

             Image<Gray, Byte> observedImage = new Image<Gray, byte>("box_in_scene.png");

             Stopwatch watch = Stopwatch.StartNew();
             // extract features from the observed image
             SURFFeature[] imageFeatures = observedImage.ExtractSURF(ref surfParam);

             SURFTracker.MatchedSURFFeature[] matchedFeatures = tracker.MatchFeature(imageFeatures, 2, 20);
             matchedFeatures = SURFTracker.VoteForUniqueness(matchedFeatures, 0.8);
             matchedFeatures = SURFTracker.VoteForSizeAndOrientation(matchedFeatures, 1.5, 20);
             HomographyMatrix homography = SURFTracker.GetHomographyMatrixFromMatchedFeatures(matchedFeatures);
             watch.Stop();

             //Merge the object image and the observed image into one image for display
             Image<Gray, Byte> res = modelImage.ConcateVertical(observedImage);

             #region draw lines between the matched features
             foreach (SURFTracker.MatchedSURFFeature matchedFeature in matchedFeatures)
             {
            PointF p = matchedFeature.ObservedFeature.Point.pt;
            p.Y += modelImage.Height;
            res.Draw(new LineSegment2DF(matchedFeature.SimilarFeatures[0].Feature.Point.pt, p), new Gray(0), 1);
             }
             #endregion

             #region draw the project region on the image
             if (homography != null)
             {  //draw a rectangle along the projected model
            Rectangle rect = modelImage.ROI;
            PointF[] pts = new PointF[] {
               new PointF(rect.Left, rect.Bottom),
               new PointF(rect.Right, rect.Bottom),
               new PointF(rect.Right, rect.Top),
               new PointF(rect.Left, rect.Top)};
            homography.ProjectPoints(pts);

            for (int i = 0; i < pts.Length; i++)
               pts[i].Y += modelImage.Height;

            res.DrawPolyline(Array.ConvertAll<PointF, Point>(pts, Point.Round), true, new Gray(255.0), 5);
             }
             #endregion

             ImageViewer.Show(res, String.Format("Matched in {0} milliseconds", watch.ElapsedMilliseconds));
        }
Ejemplo n.º 3
0
 public StopSignDetector()
 {
     _surfParam = new MCvSURFParams(500, false);
      using (Image<Bgr, Byte> stopSignModel = new Image<Bgr, Byte>("stop-sign-model.png"))
      using (Image<Gray, Byte> redMask = GetRedPixelMask(stopSignModel))
      {
     _tracker = new SURFTracker(redMask.ExtractSURF(ref _surfParam));
      }
      _octagonStorage = new MemStorage();
      _octagon = new Contour<Point>(_octagonStorage);
      _octagon.PushMulti(new Point[] {
     new Point(1, 0),
     new Point(2, 0),
     new Point(3, 1),
     new Point(3, 2),
     new Point(2, 3),
     new Point(1, 3),
     new Point(0, 2),
     new Point(0, 1)},
     Emgu.CV.CvEnum.BACK_OR_FRONT.FRONT);
 }
Ejemplo n.º 4
0
      /// <summary>
      /// Finds robust features in the image. For each feature it returns its location, size, orientation and optionally the descriptor, basic or extended. The function can be used for object tracking and localization, image stitching etc
      /// </summary>
      /// <param name="image">The input 8-bit grayscale image</param>
      /// <param name="mask">The optional input 8-bit mask. The features are only found in the areas that contain more than 50% of non-zero mask pixels</param>
      /// <param name="keypoints">The output parameter; double pointer to the sequence of keypoints. This will be the sequence of MCvSURFPoint structures</param>
      /// <param name="descriptors">The optional output parameter; double pointer to the sequence of descriptors; Depending on the params.extended value, each element of the sequence will be either 64-element or 128-element floating-point (CV_32F) vector. If the parameter is IntPtr.Zero, the descriptors are not computed</param>
      /// <param name="storage">Memory storage where keypoints and descriptors will be stored</param>
      /// <param name="parameters">Various algorithm parameters put to the structure CvSURFParams</param>
      /// <param name="useProvidedKeyPoints">If 1, the provided key points are locations for computing SURF descriptors</param>
#if ANDROID
      public static void cvExtractSURF(
         IntPtr image, IntPtr mask,
         ref IntPtr keypoints,
         ref IntPtr descriptors,
         IntPtr storage,
         MCvSURFParams parameters,
         int useProvidedKeyPoints)
      {
         cvExtractSURF(image, mask, ref keypoints, ref descriptors, storage, 
            parameters.Extended, parameters.Upright, parameters.HessianThreshold, parameters.NOctaves, parameters.NOctaveLayers,
            useProvidedKeyPoints);
      }
Ejemplo n.º 5
0
 public void TestRTreeClassifier()
 {
     using(Image<Bgr, Byte> image = new Image<Bgr, byte>("box_in_scene.png"))
      using(Image<Gray, Byte> gray = image.Convert<Gray, byte>())
      using (RTreeClassifier<Bgr> classifier = new RTreeClassifier<Bgr>())
      {
     MCvSURFParams surf = new MCvSURFParams(300, false);
     MKeyPoint[] keypoints = surf.DetectKeyPoints(gray, null);
     Point[] points = Array.ConvertAll<MKeyPoint, Point>(keypoints, delegate(MKeyPoint kp) { return Point.Round(kp.Point); });
     Stopwatch watch = Stopwatch.StartNew();
     classifier.Train(image, points, 48, 9, 50, 176, 4);
     watch.Stop();
     Trace.WriteLine(String.Format("Training time: {0} milliseconds", watch.ElapsedMilliseconds));
     float[] signiture = classifier.GetSigniture(image, points[0], 15);
     Assert.AreEqual(signiture.Length, classifier.NumberOfClasses);
      }
 }
Ejemplo n.º 6
0
      static void Run()
      {
         Image<Gray, Byte> modelImage = new Image<Gray, byte>("box.png");

         #region extract features from the object image
         MCvSURFParams param1 = new MCvSURFParams(500, false);
         SURFFeature[] modelFeatures = modelImage.ExtractSURF(ref param1);
         SURFFeature[] modelFeaturesPositiveLaplacian = Array.FindAll<SURFFeature>(modelFeatures, delegate(SURFFeature f) { return f.Point.laplacian >= 0; });
         SURFFeature[] modelFeaturesNegativeLaplacian = Array.FindAll<SURFFeature>(modelFeatures, delegate(SURFFeature f) { return f.Point.laplacian < 0; });

         //Create feature trees for the given features
         FeatureTree featureTreePositiveLaplacian = new FeatureTree(
            Array.ConvertAll<SURFFeature, Matrix<float>>(
               modelFeaturesPositiveLaplacian,
               delegate(SURFFeature f) { return f.Descriptor; }));
         FeatureTree featureTreeNegativeLaplacian = new FeatureTree(
            Array.ConvertAll<SURFFeature, Matrix<float>>(
               modelFeaturesNegativeLaplacian,
               delegate(SURFFeature f) { return f.Descriptor; }));
         #endregion

         Image<Gray, Byte> observedImage = new Image<Gray, byte>("box_in_scene.png");

         #region extract features from the observed image
         MCvSURFParams param2 = new MCvSURFParams(500, false);
         SURFFeature[] imageFeatures = observedImage.ExtractSURF(ref param2);
         SURFFeature[] imageFeaturesPositiveLaplacian = Array.FindAll<SURFFeature>(imageFeatures, delegate(SURFFeature f) { return f.Point.laplacian >= 0; });
         SURFFeature[] imageFeaturesNegativeLaplacian = Array.FindAll<SURFFeature>(imageFeatures, delegate(SURFFeature f) { return f.Point.laplacian < 0; });
         #endregion

         #region Merge the object image and the observed image into one image for display
         Image<Gray, Byte> res = new Image<Gray, byte>(Math.Max(modelImage.Width, observedImage.Width), modelImage.Height + observedImage.Height);
         res.ROI = new System.Drawing.Rectangle(0, 0, modelImage.Width, modelImage.Height);
         modelImage.Copy(res, null);
         res.ROI = new System.Drawing.Rectangle(0, modelImage.Height, observedImage.Width, observedImage.Height);
         observedImage.Copy(res, null);
         res.ROI = Rectangle.Empty;
         #endregion

         double matchDistanceRatio = 0.8;
         List<PointF> modelPoints = new List<PointF>();
         List<PointF> observePoints = new List<PointF>();

         #region using Feature Tree to match feature
         Matrix<float>[] imageFeatureDescriptorsPositiveLaplacian = Array.ConvertAll<SURFFeature, Matrix<float>>(
            imageFeaturesPositiveLaplacian,
            delegate(SURFFeature f) { return f.Descriptor; });
         Matrix<float>[] imageFeatureDescriptorsNegativeLaplacian = Array.ConvertAll<SURFFeature, Matrix<float>>(
            imageFeaturesNegativeLaplacian,
            delegate(SURFFeature f) { return f.Descriptor; });
         Matrix<Int32> result1;
         Matrix<double> dist1;

         featureTreePositiveLaplacian.FindFeatures(imageFeatureDescriptorsPositiveLaplacian, out result1, out dist1, 2, 20);
         MatchSURFFeatureWithFeatureTree(
           modelFeaturesPositiveLaplacian,
           imageFeaturesPositiveLaplacian,
           matchDistanceRatio, result1.Data, dist1.Data, modelPoints, observePoints);

         featureTreeNegativeLaplacian.FindFeatures(imageFeatureDescriptorsNegativeLaplacian, out result1, out dist1, 2, 20);
         MatchSURFFeatureWithFeatureTree(
              modelFeaturesNegativeLaplacian,
              imageFeaturesNegativeLaplacian,
              matchDistanceRatio, result1.Data, dist1.Data, modelPoints, observePoints);
         #endregion

         Matrix<float> homographyMatrix = CameraCalibration.FindHomography(
            modelPoints.ToArray(), //points on the object image
            observePoints.ToArray(), //points on the observed image
            HOMOGRAPHY_METHOD.RANSAC,
            3).Convert<float>();

         #region draw the projected object in observed image
         for (int i = 0; i < modelPoints.Count; i++)
         {
            PointF p = observePoints[i];
            p.Y += modelImage.Height;
            res.Draw(new LineSegment2DF(modelPoints[i], p), new Gray(0), 1);
         }

         System.Drawing.Rectangle rect = modelImage.ROI;
         Matrix<float> orginalCornerCoordinate = new Matrix<float>(new float[,] 
            {{  rect.Left, rect.Bottom, 1.0f},
               { rect.Right, rect.Bottom, 1.0f},
               { rect.Right, rect.Top, 1.0f},
               { rect.Left, rect.Top, 1.0f}});

         Matrix<float> destCornerCoordinate = homographyMatrix * orginalCornerCoordinate.Transpose();
         float[,] destCornerCoordinateArray = destCornerCoordinate.Data;

         Point[] destCornerPoints = new Point[4];
         for (int i = 0; i < destCornerPoints.Length; i++)
         {
            float denominator = destCornerCoordinateArray[2, i];
            destCornerPoints[i] = new Point(
               (int)(destCornerCoordinateArray[0, i] / denominator),
               (int)(destCornerCoordinateArray[1, i] / denominator) + modelImage.Height);
         }

         res.DrawPolyline(destCornerPoints, true, new Gray(255.0), 5);
         #endregion

         ImageViewer.Show(res);
      }
 /// <summary>
 /// Create a SURF detector with the specific surfParameters
 /// </summary>
 /// <param name="surfParams">The surf parameters</param>
 public SURFDetector(MCvSURFParams surfParams)
 {
     _surfParams = surfParams;
      _featureDetectorPtr = CvSURFGetFeatureDetector(ref _surfParams);
      _descriptorExtractorPtr = CvSURFGetDescriptorExtractor(ref _surfParams);
 }
 private static extern IntPtr CvSURFGetFeatureDetector(ref MCvSURFParams detector);
 private static extern IntPtr CvSURFGetDescriptorExtractor(ref MCvSURFParams detector);
 private static extern void CvSURFDetectorComputeDescriptorsBGR(
  ref MCvSURFParams detector,
  IntPtr image,
  IntPtr keypoints,
  IntPtr descriptors);
 /// <summary>
 /// Create a GPU SURF detector using the default parameters
 /// </summary>
 /// <param name="detector">The surf detector where the parameters will be borrow from</param>
 /// <param name="FeaturesRatio">Max features = featuresRatio * img.size().srea(). Use 0.01 for default</param>
 public GpuSURFDetector(MCvSURFParams detector, float FeaturesRatio)
     : this((float)detector.HessianThreshold, detector.NOctaves, detector.NOctaveLayers, detector.Extended, 0.01f, detector.Upright)
 {
 }
Ejemplo n.º 12
0
        private static bool MatchImages(Image<Gray, Byte> observedImage, Bitmap reference)
        {
            MCvSURFParams surfParam = new MCvSURFParams(500, false);
            Image<Gray, Byte> modelImage = new Image<Gray, byte>(reference);

            //extract features from the object image
            SURFFeature[] modelFeatures = modelImage.ExtractSURF(ref surfParam);

            //Create a SURF Tracker
            SURFTracker tracker = new SURFTracker(modelFeatures);

            // extract features from the observed image
            SURFFeature[] imageFeatures = observedImage.ExtractSURF(ref surfParam);

            SURFTracker.MatchedSURFFeature[] matchedFeatures = tracker.MatchFeature(imageFeatures, 2, 20);
            matchedFeatures = SURFTracker.VoteForUniqueness(matchedFeatures, accuracy);
            matchedFeatures = SURFTracker.VoteForSizeAndOrientation(matchedFeatures, 1.5, 20);
            HomographyMatrix homography = SURFTracker.GetHomographyMatrixFromMatchedFeatures(matchedFeatures);

            if (homography != null)
            {
                return true;
            }
            else
            {
                return false;
            }
        }
Ejemplo n.º 13
0
        public void TestSURFDetector()
        {
            Image<Gray, byte> box = new Image<Gray, byte>("box.png");
             MCvSURFParams detector = new MCvSURFParams(400, false);

             Stopwatch watch = Stopwatch.StartNew();
             ImageFeature[] features1 = detector.DetectFeatures(box, null);
             watch.Stop();
             Trace.WriteLine(String.Format("Time used: {0} milliseconds.", watch.ElapsedMilliseconds));

             watch.Reset(); watch.Start();
             MKeyPoint[] keypoints = detector.DetectKeyPoints(box, null);
             ImageFeature[] features2 = detector.ComputeDescriptors(box, null, keypoints);
             watch.Stop();
             Trace.WriteLine(String.Format("Time used: {0} milliseconds.", watch.ElapsedMilliseconds));

             watch.Reset(); watch.Start();
             SURFFeature[] features3 = box.ExtractSURF(ref detector);
             watch.Stop();
             Trace.WriteLine(String.Format("Time used: {0} milliseconds.", watch.ElapsedMilliseconds));

             PointF[] pts = Array.ConvertAll<MKeyPoint, PointF>(keypoints, delegate(MKeyPoint mkp) { return mkp.Point; });
             //SURFFeature[] features = box.ExtractSURF(pts, null, ref detector);
             //int count = features.Length;

             for (int i = 0; i < features1.Length; i++)
             {
            Assert.AreEqual(features1[i].KeyPoint.Point, features2[i].KeyPoint.Point);
            float[] d1 = features1[i].Descriptor;
            float[] d2 = features2[i].Descriptor;

            for (int j = 0; j < d1.Length; j++)
               Assert.AreEqual(d1[j], d2[j]);
             }

             foreach (MKeyPoint kp in keypoints)
             {
            box.Draw(new CircleF(kp.Point, kp.Size), new Gray(255), 1);
             }
        }
Ejemplo n.º 14
0
        public void TestSURF()
        {
            for (int k = 0; k < 1; k++)
             {
            Image<Gray, Byte> modelImage = new Image<Gray, byte>("box.png");
            //Image<Gray, Byte> modelImage = new Image<Gray, byte>("stop.jpg");
            //modelImage = modelImage.Resize(400, 400, true);

            //modelImage._EqualizeHist();

            #region extract features from the object image
            Stopwatch stopwatch = Stopwatch.StartNew();
            MCvSURFParams param1 = new MCvSURFParams(500, false);
            SURFFeature[] modelFeatures = modelImage.ExtractSURF(ref param1);
            SURFTracker tracker = new SURFTracker(modelFeatures);
            stopwatch.Stop();
            Trace.WriteLine(String.Format("Time to extract feature from model: {0} milli-sec", stopwatch.ElapsedMilliseconds));
            #endregion

            //Image<Gray, Byte> observedImage = new Image<Gray, byte>("traffic.jpg");
            Image<Gray, Byte> observedImage = new Image<Gray, byte>("box_in_scene.png");
            //Image<Gray, Byte> observedImage = modelImage.Rotate(45, new Gray(0.0));
            //Image<Gray, Byte> observedImage = new Image<Gray, byte>("left.jpg");
            //image = image.Resize(400, 400, true);

            //observedImage._EqualizeHist();
            #region extract features from the observed image
            stopwatch.Reset(); stopwatch.Start();
            MCvSURFParams param2 = new MCvSURFParams(500, false);
            SURFFeature[] observedFeatures = observedImage.ExtractSURF(ref param2);
            stopwatch.Stop();
            Trace.WriteLine(String.Format("Time to extract feature from image: {0} milli-sec", stopwatch.ElapsedMilliseconds));
            #endregion

            //Merge the object image and the observed image into one big image for display
            Image<Gray, Byte> res = modelImage.ConcateVertical(observedImage);

            Rectangle rect = modelImage.ROI;
            PointF[] pts = new PointF[] {
               new PointF(rect.Left, rect.Bottom),
               new PointF(rect.Right, rect.Bottom),
               new PointF(rect.Right, rect.Top),
               new PointF(rect.Left, rect.Top)};

            HomographyMatrix homography;

            stopwatch.Reset(); stopwatch.Start();
            homography = tracker.Detect(observedFeatures, 0.8);
            stopwatch.Stop();
            Trace.WriteLine(String.Format("Time for feature matching: {0} milli-sec", stopwatch.ElapsedMilliseconds));
            if (homography != null)
            {
               PointF[] points = pts.Clone() as PointF[];
               homography.ProjectPoints(points);

               for (int i = 0; i < points.Length; i++)
                  points[i].Y += modelImage.Height;
               res.DrawPolyline(Array.ConvertAll<PointF, Point>(points, Point.Round), true, new Gray(255.0), 5);
            }

            stopwatch.Reset(); stopwatch.Start();
            //set the initial region to be the whole image
            using (Image<Gray, Single> priorMask = new Image<Gray, float>(observedImage.Size))
            {
               priorMask.SetValue(1.0);
               homography = tracker.CamShiftTrack(
                  observedFeatures,
                  (RectangleF)observedImage.ROI,
                  priorMask);
            }
            Trace.WriteLine(String.Format("Time for feature tracking: {0} milli-sec", stopwatch.ElapsedMilliseconds));

            if (homography != null) //set the initial tracking window to be the whole image
            {
               PointF[] points = pts.Clone() as PointF[];
               homography.ProjectPoints(points);

               for (int i = 0; i < points.Length; i++)
                  points[i].Y += modelImage.Height;
               res.DrawPolyline(Array.ConvertAll<PointF, Point>(points, Point.Round), true, new Gray(255.0), 5);
            }
             }
        }