private extern static void CvSURFDetectorComputeDescriptors(
     ref MCvSURFParams detector,
     IntPtr image,
     IntPtr mask,
     IntPtr keypoints,
     IntPtr descriptors);
 private extern static IntPtr CvSURFGetDescriptorExtractor(ref MCvSURFParams detector);
 private extern static IntPtr CvSURFGetFeatureDetector(ref MCvSURFParams detector);
 /// <summary>
 /// Create an OpenCL SURF detector using the default parameters
 /// </summary>
 /// <param name="detector">The surf detector where the parameters will be borrow from</param>
 /// <param name="FeaturesRatio">Max features = featuresRatio * img.size().srea(). Use 0.01 for default</param>
 public OclSURFDetector(MCvSURFParams detector, float FeaturesRatio)
     : this((float)detector.HessianThreshold, detector.NOctaves, detector.NOctaveLayers, (detector.Extended != 0), 0.01f, (detector.Upright != 0))
 {
 }
        static void Run()
        {
            MCvSURFParams surfParam = new MCvSURFParams(500, false);

            Image <Gray, Byte> modelImage = new Image <Gray, byte>("box.png");

            //extract features from the object image
            SURFFeature[] modelFeatures = modelImage.ExtractSURF(ref surfParam);

            //Create a SURF Tracker
            SURFTracker tracker = new SURFTracker(modelFeatures);

            Image <Gray, Byte> observedImage = new Image <Gray, byte>("box_in_scene.png");

            Stopwatch watch = Stopwatch.StartNew();

            // extract features from the observed image
            SURFFeature[] imageFeatures = observedImage.ExtractSURF(ref surfParam);

            SURFTracker.MatchedSURFFeature[] matchedFeatures = tracker.MatchFeature(imageFeatures, 2, 20);
            matchedFeatures = SURFTracker.VoteForUniqueness(matchedFeatures, 0.8);
            matchedFeatures = SURFTracker.VoteForSizeAndOrientation(matchedFeatures, 1.5, 20);
            HomographyMatrix homography = SURFTracker.GetHomographyMatrixFromMatchedFeatures(matchedFeatures);

            watch.Stop();

            //Merge the object image and the observed image into one image for display
            Image <Gray, Byte> res = modelImage.ConcateVertical(observedImage);

            #region draw lines between the matched features
            foreach (SURFTracker.MatchedSURFFeature matchedFeature in matchedFeatures)
            {
                PointF p = matchedFeature.ObservedFeature.Point.pt;
                p.Y += modelImage.Height;
                res.Draw(new LineSegment2DF(matchedFeature.SimilarFeatures[0].Feature.Point.pt, p), new Gray(0), 1);
            }
            #endregion

            #region draw the project region on the image
            if (homography != null)
            { //draw a rectangle along the projected model
                Rectangle rect = modelImage.ROI;
                PointF[]  pts  = new PointF[] {
                    new PointF(rect.Left, rect.Bottom),
                    new PointF(rect.Right, rect.Bottom),
                    new PointF(rect.Right, rect.Top),
                    new PointF(rect.Left, rect.Top)
                };
                homography.ProjectPoints(pts);

                for (int i = 0; i < pts.Length; i++)
                {
                    pts[i].Y += modelImage.Height;
                }

                res.DrawPolyline(Array.ConvertAll <PointF, Point>(pts, Point.Round), true, new Gray(255.0), 5);
            }
            #endregion

            ImageViewer.Show(res, String.Format("Matched in {0} milliseconds", watch.ElapsedMilliseconds));
        }
 /// <summary>
 /// Create a SURF detector with the specific surfParameters
 /// </summary>
 /// <param name="surfParams">The surf parameters</param>
 public SURFDetector(MCvSURFParams surfParams)
 {
     _surfParams = surfParams;
     _ptr        = CvInvoke.CvSURFDetectorCreate(ref surfParams, ref _featureDetectorPtr, ref _descriptorExtractorPtr);
 }