Пример #1
0
 internal static extern IntPtr cveCudaFastFeatureDetectorCreate(
    int threshold,
    [MarshalAs(CvInvoke.BoolMarshalType)]
    bool nonmaxSupression,
    FastDetector.DetectorType type,
    int maxPoints,
    ref IntPtr feature2D,
    ref IntPtr feature2DAsync);
Пример #2
0
        public bool testFAST(Image<Gray, Byte> modelImage, Image<Gray, byte> observedImage)
        {
            bool isFound = false;

            HomographyMatrix homography = null;

            FastDetector fastCPU = new FastDetector(10, true);
            VectorOfKeyPoint modelKeyPoints;
            VectorOfKeyPoint observedKeyPoints;
            Matrix<int> indices;

            BriefDescriptorExtractor descriptor = new BriefDescriptorExtractor();

            Matrix<byte> mask;
            int k = 2;
            double uniquenessThreshold = 0.8;

            //extract features from the object image
            modelKeyPoints = fastCPU.DetectKeyPointsRaw(modelImage, null);
            Matrix<Byte> modelDescriptors = descriptor.ComputeDescriptorsRaw(modelImage, null, modelKeyPoints);

            // extract features from the observed image
            observedKeyPoints = fastCPU.DetectKeyPointsRaw(observedImage, null);
            Matrix<Byte> observedDescriptors = descriptor.ComputeDescriptorsRaw(observedImage, null, observedKeyPoints);
            BruteForceMatcher<Byte> matcher = new BruteForceMatcher<Byte>(DistanceType.L2);
            matcher.Add(modelDescriptors);

            indices = new Matrix<int>(observedDescriptors.Rows, k);
            using (Matrix<float> dist = new Matrix<float>(observedDescriptors.Rows, k))
            {
                matcher.KnnMatch(observedDescriptors, indices, dist, k, null);
                mask = new Matrix<byte>(dist.Rows, 1);
                mask.SetValue(255);
                Features2DToolbox.VoteForUniqueness(dist, uniquenessThreshold, mask);
            }

            int nonZeroCount = CvInvoke.cvCountNonZero(mask);
            if (nonZeroCount >= 4)
            {
                nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
                if (nonZeroCount >= 4)
                    homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(
                    modelKeyPoints, observedKeyPoints, indices, mask, 2);
            }

            //Draw the matched keypoints
            Image<Bgr, Byte> result = Features2DToolbox.DrawMatches(modelImage, modelKeyPoints, observedImage, observedKeyPoints,
               indices, new Bgr(255, 255, 255), new Bgr(255, 255, 255), mask, Features2DToolbox.KeypointDrawType.DEFAULT);

            #region draw the projected region on the image
            if (homography != null)
            {  //draw a rectangle along the projected model
                Rectangle rect = modelImage.ROI;
                PointF[] pts = new PointF[] {
                 new PointF(rect.Left, rect.Bottom),
                 new PointF(rect.Right, rect.Bottom),
                 new PointF(rect.Right, rect.Top),
                 new PointF(rect.Left, rect.Top)};
                homography.ProjectPoints(pts);

                if (CvInvoke.cvCountNonZero(mask) >= 10)
                    isFound = true;

                result.DrawPolyline(Array.ConvertAll<PointF, Point>(pts, Point.Round), true, new Bgr(Color.LightGreen), 5);
            }
            #endregion
            return isFound;
        }
Пример #3
0
 /// <summary>
 /// Create a fast detector with the specific parameters
 /// </summary>
 /// <param name="threshold">Threshold on difference between intensity of center pixel and pixels on circle around
 /// this pixel. Use 10 for default.</param>
 /// <param name="nonmaxSupression">Specifiy if non-maximum supression should be used.</param>
 /// <param name="maxNKeypoints">The maximum number of keypoints to be extracted.</param>
 /// <param name="type">The detector type</param>
 public CudaFastFeatureDetector(int threshold = 10, bool nonmaxSupression = true, FastDetector.DetectorType type = DetectorType.Type9_16, int maxNKeypoints = 5000)
 {
    _ptr = CudaInvoke.cveCudaFastFeatureDetectorCreate(threshold, nonmaxSupression, type, maxNKeypoints, ref _feature2D, ref _feature2DAsyncPtr);
 }
Пример #4
0
 public void TestFreak()
 {
    FastDetector fast = new FastDetector(10, true);
    Freak freak = new Freak(true, true, 22.0f, 4);
    //ParamDef[] parameters = freak.GetParams();
    //int nOctaves = freak.GetInt("nbOctave");
    EmguAssert.IsTrue(TestFeature2DTracker(fast, freak), "Unable to find homography matrix");
 }
Пример #5
0
 public void TestFAST()
 {
    FastDetector fast = new FastDetector(10, true);
    //GridAdaptedFeatureDetector fastGrid = new GridAdaptedFeatureDetector(fast, 2000, 4, 4);
    BriefDescriptorExtractor brief = new BriefDescriptorExtractor(32);
    //ParamDef[] parameters = fastGrid.GetParams();
    EmguAssert.IsTrue(TestFeature2DTracker(fast, brief), "Unable to find homography matrix");
 }
Пример #6
0
      public void TestBruteForceHammingDistance()
      {
         if (CudaInvoke.HasCuda)
         {
            Image<Gray, byte> box = new Image<Gray, byte>("box.png");
            FastDetector fast = new FastDetector(100, true);
            BriefDescriptorExtractor brief = new BriefDescriptorExtractor(32);

            #region extract features from the object image
            Stopwatch stopwatch = Stopwatch.StartNew();
            VectorOfKeyPoint modelKeypoints = new VectorOfKeyPoint();
            fast.DetectRaw(box, modelKeypoints);
            Mat modelDescriptors = new Mat();
            brief.Compute(box, modelKeypoints, modelDescriptors);
            stopwatch.Stop();
            Trace.WriteLine(String.Format("Time to extract feature from model: {0} milli-sec", stopwatch.ElapsedMilliseconds));
            #endregion

            Image<Gray, Byte> observedImage = new Image<Gray, byte>("box_in_scene.png");

            #region extract features from the observed image
            stopwatch.Reset(); stopwatch.Start();
            VectorOfKeyPoint observedKeypoints = new VectorOfKeyPoint();
            fast.DetectRaw(observedImage, observedKeypoints);
            Mat observedDescriptors = new Mat();
            brief.Compute(observedImage, observedKeypoints, observedDescriptors);
            stopwatch.Stop();
            Trace.WriteLine(String.Format("Time to extract feature from image: {0} milli-sec", stopwatch.ElapsedMilliseconds));
            #endregion

            Mat homography = null;
            using (GpuMat<Byte> gpuModelDescriptors = new GpuMat<byte>(modelDescriptors)) //initialization of GPU code might took longer time.
            {
               stopwatch.Reset(); stopwatch.Start();
               CudaBFMatcher hammingMatcher = new CudaBFMatcher(DistanceType.Hamming);

               //BFMatcher hammingMatcher = new BFMatcher(BFMatcher.DistanceType.Hamming, modelDescriptors);
               int k = 2;
               Matrix<int> trainIdx = new Matrix<int>(observedKeypoints.Size, k);
               Matrix<float> distance = new Matrix<float>(trainIdx.Size);

               using (GpuMat<Byte> gpuObservedDescriptors = new GpuMat<byte>(observedDescriptors))
               //using (GpuMat<int> gpuTrainIdx = new GpuMat<int>(trainIdx.Rows, trainIdx.Cols, 1, true))
               //using (GpuMat<float> gpuDistance = new GpuMat<float>(distance.Rows, distance.Cols, 1, true))
               using (VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch())
               {
                  Stopwatch w2 = Stopwatch.StartNew();
                  //hammingMatcher.KnnMatch(gpuObservedDescriptors, gpuModelDescriptors, matches, k);
                  hammingMatcher.KnnMatch(gpuObservedDescriptors, gpuModelDescriptors, matches, k, null, true);
                  w2.Stop();
                  Trace.WriteLine(String.Format("Time for feature matching (excluding data transfer): {0} milli-sec",
                     w2.ElapsedMilliseconds));
                  //gpuTrainIdx.Download(trainIdx);
                  //gpuDistance.Download(distance);


                  Mat mask = new Mat(distance.Rows, 1, DepthType.Cv8U, 1);
                  mask.SetTo(new MCvScalar(255));
                  Features2DToolbox.VoteForUniqueness(matches, 0.8, mask);

                  int nonZeroCount = CvInvoke.CountNonZero(mask);
                  if (nonZeroCount >= 4)
                  {
                     nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeypoints, observedKeypoints,
                        matches, mask, 1.5, 20);
                     if (nonZeroCount >= 4)
                        homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeypoints,
                           observedKeypoints, matches, mask, 2);
                     nonZeroCount = CvInvoke.CountNonZero(mask);
                  }

                  stopwatch.Stop();
                  Trace.WriteLine(String.Format("Time for feature matching (including data transfer): {0} milli-sec",
                     stopwatch.ElapsedMilliseconds));
               }
            }

            if (homography != null)
            {
               Rectangle rect = box.ROI;
               PointF[] pts = new PointF[] { 
               new PointF(rect.Left, rect.Bottom),
               new PointF(rect.Right, rect.Bottom),
               new PointF(rect.Right, rect.Top),
               new PointF(rect.Left, rect.Top)};

               PointF[] points = CvInvoke.PerspectiveTransform(pts, homography);
               //homography.ProjectPoints(points);

               //Merge the object image and the observed image into one big image for display
               Image<Gray, Byte> res = box.ConcateVertical(observedImage);

               for (int i = 0; i < points.Length; i++)
                  points[i].Y += box.Height;
               res.DrawPolyline(Array.ConvertAll<PointF, Point>(points, Point.Round), true, new Gray(255.0), 5);
               //ImageViewer.Show(res);
            }
         }
      }
Пример #7
0
    public void DetectFeatures()
    {
        //const int numFeatures = 15;
        //const double quality = 5000;
        //const double minDistance = 50;
        //const int blockSize = 11;

        // note, the histogram is a property of the cloud itself and is updated when the cloud is created
        // load depth image into something that emgucv likes
        Image<Gray, Byte> depthImage = new Image<Gray, byte>(depthX, depthY);

        // have to convert this to gray via luminosity
        byte[] bytes = colorizedDepth.Select(x => (byte)(0.21*x.r + 0.71*x.g + 0.07*x.b)).ToArray();

        depthImage.Bytes = bytes;

        // detect features of depth image using the fast detector
        // I don't really feel like implementing a Harris detector

        FastDetector fast = new FastDetector(10, true);

        var keyPoints = fast.DetectKeyPoints(depthImage, null); // no mask because I don't know what that is

        List<CloudPoint> cloudFeatures = new List<CloudPoint>();

        foreach (var p in keyPoints)
        {
            cloudFeatures.Add(findCloudPoint((int)p.Point.X, (int)p.Point.Y));
        }

        FeatureTree = KdTree<CloudPoint>.Construct(4, cloudFeatures, x => x.ColorLocation());
    }