Inheritance: Emgu.Util.UnmanagedObject, IInputOutputArray, ISerializable
Exemplo n.º 1
3
      public StopSignDetector(IInputArray stopSignModel)
      {
         _detector = new SURF(500);
         using (Mat redMask = new Mat())
         {
            GetRedPixelMask(stopSignModel, redMask);
            _modelKeypoints = new VectorOfKeyPoint();
            _modelDescriptors = new Mat();
            _detector.DetectAndCompute(redMask, null, _modelKeypoints, _modelDescriptors, false);
            if (_modelKeypoints.Size == 0)
               throw new Exception("No image feature has been found in the stop sign model");
         }

         _modelDescriptorMatcher = new BFMatcher(DistanceType.L2);
         _modelDescriptorMatcher.Add(_modelDescriptors);

         _octagon = new VectorOfPoint(
            new Point[]
            {
               new Point(1, 0),
               new Point(2, 0),
               new Point(3, 1),
               new Point(3, 2),
               new Point(2, 3),
               new Point(1, 3),
               new Point(0, 2),
               new Point(0, 1)
            });

      }
Exemplo n.º 2
2
      public static void FindMatch(Mat modelImage, Mat observedImage, out long matchTime, out VectorOfKeyPoint modelKeyPoints, out VectorOfKeyPoint observedKeyPoints, VectorOfVectorOfDMatch matches, out Mat mask, out Mat homography)
      {
         int k = 2;
         double uniquenessThreshold = 0.80;

         Stopwatch watch;
         homography = null;

         modelKeyPoints = new VectorOfKeyPoint();
         observedKeyPoints = new VectorOfKeyPoint();

         using (UMat uModelImage = modelImage.GetUMat(AccessType.Read))
         using (UMat uObservedImage = observedImage.GetUMat(AccessType.Read))
         {
            KAZE featureDetector = new KAZE();

            //extract features from the object image
            Mat modelDescriptors = new Mat();
            featureDetector.DetectAndCompute(uModelImage, null, modelKeyPoints, modelDescriptors, false);

            watch = Stopwatch.StartNew();

            // extract features from the observed image
            Mat observedDescriptors = new Mat();
            featureDetector.DetectAndCompute(uObservedImage, null, observedKeyPoints, observedDescriptors, false);
            BFMatcher matcher = new BFMatcher(DistanceType.L2);
            
            matcher.Add(modelDescriptors);

            matcher.KnnMatch(observedDescriptors, matches, k, null);
            mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
            mask.SetTo(new MCvScalar(255));
            Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);

            int nonZeroCount = CvInvoke.CountNonZero(mask);
            if (nonZeroCount >= 4)
            {
               nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints,
                  matches, mask, 1.5, 20);
               if (nonZeroCount >= 4)
                  homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints,
                     observedKeyPoints, matches, mask, 2);
            }

            watch.Stop();

         }
         matchTime = watch.ElapsedMilliseconds;
      }
Exemplo n.º 3
1
        private static void FindMatch(Image<Gray, Byte> modelImage, Image<Gray, byte> observedImage, SurfSettings surfSettings, out long matchTime, out VectorOfKeyPoint modelKeyPoints, out VectorOfKeyPoint observedKeyPoints, out Matrix<int> indices, out Matrix<byte> mask, out HomographyMatrix homography)
        {
            #region Surf Dectator Region
            double hessianThresh = 500;
            double uniquenessThreshold = 0.8;

            if (surfSettings != null)
            {
                hessianThresh = surfSettings.HessianThresh.Value;
                uniquenessThreshold = surfSettings.UniquenessThreshold.Value;
            }

            SURFDetector surfCPU = new SURFDetector(hessianThresh, false);
            #endregion

            int k = 2;
            Stopwatch watch;
            homography = null;

            //extract features from the object image
            modelKeyPoints = new VectorOfKeyPoint();
            Matrix<float> modelDescriptors = surfCPU.DetectAndCompute(modelImage, null, modelKeyPoints);

            watch = Stopwatch.StartNew();

            // extract features from the observed image
            observedKeyPoints = new VectorOfKeyPoint();
            Matrix<float> observedDescriptors = surfCPU.DetectAndCompute(observedImage, null, observedKeyPoints);
            BruteForceMatcher<float> matcher = new BruteForceMatcher<float>(DistanceType.L2);
            matcher.Add(modelDescriptors);

            indices = new Matrix<int>(observedDescriptors.Rows, k);
            using (Matrix<float> dist = new Matrix<float>(observedDescriptors.Rows, k))
            {
                matcher.KnnMatch(observedDescriptors, indices, dist, k, null);
                mask = new Matrix<byte>(dist.Rows, 1);
                mask.SetValue(255);
                Features2DToolbox.VoteForUniqueness(dist, uniquenessThreshold, mask);
            }

            int nonZeroCount = CvInvoke.cvCountNonZero(mask);
            if (nonZeroCount >= 4)
            {
                nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
                if (nonZeroCount >= 4)
                    homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 2);
            }

            watch.Stop();

            matchTime = watch.ElapsedMilliseconds;
        }
Exemplo n.º 4
1
        public float classify(Image<Bgr, Byte> predImg)
        {
            using (SURF detector = new SURF(30))
            using (BFMatcher matcher = new BFMatcher(DistanceType.L2))
            using (Image<Gray, Byte> testImgGray = predImg.Convert<Gray, Byte>())
            using (VectorOfKeyPoint testKeyPoints = new VectorOfKeyPoint())
            using (Mat testBOWDescriptor = new Mat())
            using( bowDE = new BOWImgDescriptorExtractor(detector, matcher))
            {
                float result = 0;
                bowDE.SetVocabulary(vocabulary);
                detector.DetectRaw(predImg, testKeyPoints, null);
                bowDE.Compute(predImg, testKeyPoints, testBOWDescriptor);
                if(!testBOWDescriptor.IsEmpty)
                    result = svmClassifier.Predict(testBOWDescriptor);

                //result will indicate whether test image belongs to trainDescriptor label 1, 2
                return result;
            }
        }
Exemplo n.º 5
0
        public Mat Calculate(Bitmap referenceBitmap, Bitmap currentBitmap)
        {
            Mat homography;
            using (var detector = new SURF(threshold))
            using (var model = new Image<Gray, byte>(referenceBitmap))
            using (var modelMat = model.Mat.ToUMat(AccessType.Read))
            using (var modelKeyPoints = new VectorOfKeyPoint())
            using (var modelDescriptors = new UMat())
            using (var observed = new Image<Gray, byte>(currentBitmap))
            using (var observedMat = observed.Mat.ToUMat(AccessType.Read))
            using (var observedKeyPoints = new VectorOfKeyPoint())
            using (var observedDescriptors = new UMat())
            using (var matcher = new BFMatcher(DistanceType.L2))
            using (var matches = new VectorOfVectorOfDMatch())
            {
                detector.DetectAndCompute(modelMat, null, modelKeyPoints, modelDescriptors, false);
                detector.DetectAndCompute(observedMat, null, observedKeyPoints, observedDescriptors, false);

                matcher.Add(modelDescriptors);
                matcher.KnnMatch(observedDescriptors, matches, k, null);

                homography = TryFindHomography(modelKeyPoints, observedKeyPoints, matches);
            }

            return homography;
        }
Exemplo n.º 6
0
 public static Matrix<byte> ExtractBriefFeatureDescriptors(Emgu.CV.Image<Gray, byte> im, MKeyPoint kp)
 {
     var f = new VectorOfKeyPoint();
     f.Push(new MKeyPoint[] { kp });
     //i'm are going to invoke this with a single point because otherwise I cannot tell which points failed to get descriptors
     return new BriefDescriptorExtractor().ComputeDescriptorsRaw(im, (Emgu.CV.Image<Gray, byte>)null, f);
 }
Exemplo n.º 7
0
 public SurfRecognizer(Image<Gray, Byte> modelImage)
 {
     surfCPU = new SURFDetector(HessianThreshold, extendedflag);
     this.modelImage = modelImage;
     modelKeyPoints = new VectorOfKeyPoint();
     Matrix<float> modelDescriptors = surfCPU.DetectAndCompute(modelImage, null, modelKeyPoints); // extract information from the model image
 }
Exemplo n.º 8
0
        private Mat TryFindHomography(VectorOfKeyPoint modelKeyPoints, VectorOfKeyPoint observedKeyPoints,
            VectorOfVectorOfDMatch matches)
        {
            var mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
            mask.SetTo(new MCvScalar(255));

            try
            {
                Features2DToolbox.VoteForUniqueness(matches, threshold, mask);

                var nonZeroCount = CvInvoke.CountNonZero(mask);

                if (nonZeroCount < 4)
                {
                    return null;
                }

                nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints,
                    matches, mask, 1.5, 20);

                if (nonZeroCount >= 4)
                {
                    return Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints,
                        observedKeyPoints, matches, mask, 2);
                }
            }
            catch (Exception)
            {
                return null;
            }
            return null;
        }
Exemplo n.º 9
0
        public Mat Calculate(Bitmap referenceBitmap, Bitmap currentBitmap)
        {
            Mat homography;

            using (var detector = new CudaSURF(threshold))
            using (var model = new Image<Gray, byte>(referenceBitmap))
            using (var observed = new Image<Gray, byte>(currentBitmap))
            using (var modelMat = new GpuMat(model))
            using (var modelKeyPointsRaw = detector.DetectKeyPointsRaw(modelMat))
            using (var modelKeyPoints = new VectorOfKeyPoint())
            using (var modelDescriptorsRaw = detector.ComputeDescriptorsRaw(modelMat, null, modelKeyPointsRaw))
            using (var observedMat = new GpuMat(observed))
            using (var observedKeyPointsRaw = detector.DetectKeyPointsRaw(observedMat))
            using (var observedKeyPoints = new VectorOfKeyPoint())
            using (var observedDescriptorsRaw = detector.ComputeDescriptorsRaw(observedMat, null, observedKeyPointsRaw))
            using (
                var matcher =
                    new CudaBFMatcher(DistanceType.L2))
            using (var matches = new VectorOfVectorOfDMatch())
            {
                matcher.KnnMatch(observedDescriptorsRaw, modelDescriptorsRaw, matches, k);

                detector.DownloadKeypoints(modelKeyPointsRaw, modelKeyPoints);
                detector.DownloadKeypoints(observedKeyPointsRaw, observedKeyPoints);

                homography = TryFindHomography(modelKeyPoints, observedKeyPoints, matches);
            }

            return homography;
        }
Exemplo n.º 10
0
 /// <summary>
 /// Detect keypoints in an image and compute the descriptors on the image from the keypoint locations.
 /// </summary>
 /// <param name="image">The image</param>
 /// <param name="mask">The optional mask, can be null if not needed</param>
 /// <param name="keyPoints">The detected keypoints will be stored in this vector</param>
 /// <param name="descriptors">The descriptors from the keypoints</param>
 /// <param name="useProvidedKeyPoints">If true, the method will skip the detection phase and will compute descriptors for the provided keypoints</param>
 public void DetectAndCompute(IInputArray image, IInputArray mask, VectorOfKeyPoint keyPoints, IOutputArray descriptors, bool useProvidedKeyPoints)
 {
    using (InputArray iaImage = image.GetInputArray())
    using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray())
    using (OutputArray oaDescriptors = descriptors.GetOutputArray())
       Feature2DInvoke.CvFeature2DDetectAndCompute(_ptr, iaImage, iaMask, keyPoints, oaDescriptors, useProvidedKeyPoints);
 }
Exemplo n.º 11
0
 /// <summary>
 /// Detect the Lepetit keypoints from the image
 /// </summary>
 /// <param name="image">The image to extract Lepetit keypoints</param>
 /// <param name="maxCount">The maximum number of keypoints to be extracted, use 0 to ignore the max count</param>
 /// <param name="scaleCoords">Indicates if the coordinates should be scaled</param>
 /// <returns>The array of Lepetit keypoints</returns>
 public MKeyPoint[] DetectKeyPoints(Image<Gray, Byte> image, int maxCount, bool scaleCoords)
 {
     using (VectorOfKeyPoint kpts = new VectorOfKeyPoint())
      {
     CvLDetectorDetectKeyPoints(ref this, image, kpts, maxCount, scaleCoords);
     return kpts.ToArray();
      }
 }
Exemplo n.º 12
0
 /// <summary>
 /// Detect the Fast keypoints from the image
 /// </summary>
 /// <param name="image">The image to extract keypoints from</param>
 /// <returns>The array of fast keypoints</returns>
 public MKeyPoint[] DetectKeyPoints(Image<Gray, byte> image)
 {
     using (VectorOfKeyPoint keypoints = new VectorOfKeyPoint())
      {
     CvFASTKeyPoints(image, keypoints, Threshold, NonmaxSupression);
     return keypoints.ToArray();
      }
 }
Exemplo n.º 13
0
 public static void Convert(this IFeature2DAsync feature2DAsync, IInputArray gpuKeypoints,
    VectorOfKeyPoint keypoints)
 {
    using (InputArray iaGpuKeypoints = gpuKeypoints.GetInputArray())
    {
       CudaInvoke.cveCudaFeature2dAsyncConvert(feature2DAsync.Feature2DAsyncPtr, iaGpuKeypoints, keypoints);
    }
 }
Exemplo n.º 14
0
 private void calculatedescriptors(Mat image, UMat imageDescriptors, VectorOfKeyPoint imageKeyPoints)
 {
     using (UMat mImage = image.ToUMat(Emgu.CV.CvEnum.AccessType.Read))
     {
         SIFT sift = new SIFT();
         sift.DetectAndCompute(mImage, null, imageKeyPoints, imageDescriptors, false);
     }
 }
Exemplo n.º 15
0
 /// <summary>
 /// Detect keypoints in the CudaImage
 /// </summary>
 /// <param name="img">The image where keypoints will be detected from</param>
 /// <param name="mask">The optional mask, can be null if not needed</param>
 /// <returns>An array of keypoints</returns>
 public MKeyPoint[] DetectKeyPoints(GpuMat img, GpuMat mask)
 {
    using (GpuMat tmp = DetectKeyPointsRaw(img, mask))
    using (VectorOfKeyPoint kpts = new VectorOfKeyPoint())
    {
       DownloadKeypoints(tmp, kpts);
       return kpts.ToArray();
    }
 }
Exemplo n.º 16
0
        public SURFEngine(Emgu.CV.Image<Gray, byte> roi)
        {
            surfDetector = new SURFDetector(500, false);
            itemImage = roi;

            itemKP = surfDetector.DetectKeyPointsRaw(itemImage, null);
            itemDescriptors = surfDetector.ComputeDescriptorsRaw(itemImage, null, itemKP);

            matcher = new BruteForceMatcher<float>(DistanceType.L2);
            matcher.Add(itemDescriptors);
        }
 /// <summary>
 /// Compute the descriptor given the image and the point location
 /// </summary>
 /// <param name="image">The image where the descriptor will be computed from</param>
 /// <param name="keyPoints">The keypoint where the descriptor will be computed from. Keypoints for which a descriptor cannot be computed are removed.</param>
 /// <returns>The descriptors founded on the keypoint location</returns>
 public Matrix<Byte> ComputeDescriptorsRaw(Image<Gray, Byte> image, VectorOfKeyPoint keyPoints)
 {
     const float epsilon = 1.192092896e-07f;        // smallest such that 1.0+epsilon != 1.0
      keyPoints.FilterByImageBorder(image.Size, 48 / 2 + 9 / 2); //this value comes from opencv's BriefDescriptorExtractor::computeImpl implementation
      keyPoints.FilterByKeypointSize(epsilon, float.MaxValue);
      int count = keyPoints.Size;
      if (count == 0) return null;
      Matrix<Byte> descriptors = new Matrix<Byte>(count, DescriptorSize, 1);
      CvBriefDescriptorComputeDescriptors(_ptr, image, keyPoints, descriptors);
      Debug.Assert(keyPoints.Size == descriptors.Rows);
      return descriptors;
 }
Exemplo n.º 18
0
 /// <summary>
 /// Draw the keypoints found on the image.
 /// </summary>
 /// <param name="image">The image</param>
 /// <param name="keypoints">The keypoints to be drawn</param>
 /// <param name="color">The color used to draw the keypoints</param>
 /// <param name="type">The drawing type</param>
 /// <param name="outImage">The image with the keypoints drawn</param> 
 public static void DrawKeypoints(
    IInputArray image,
    VectorOfKeyPoint keypoints,
    IInputOutputArray outImage,
    Bgr color,
    Features2DToolbox.KeypointDrawType type)
 {
    MCvScalar c = color.MCvScalar;
    using (InputArray iaImage = image.GetInputArray())
    using (InputOutputArray ioaOutImage = outImage.GetInputOutputArray())
    CvInvoke.drawKeypoints(iaImage, keypoints, ioaOutImage, ref c, type);
 }
Exemplo n.º 19
0
        private void button2_Click(object sender, EventArgs e)
        {
            loadImage(textBox1.Text);
            grayImage = new Mat();
            CvInvoke.CvtColor(loadedImage, grayImage, ColorConversion.Bgr2Gray);
            originalImageDescriptors = new UMat();

            originalImageKeyPoints = new VectorOfKeyPoint();
            calculatedescriptors(loadedImage, originalImageDescriptors, originalImageKeyPoints);

            videoCompute();
        }
Exemplo n.º 20
0
 /// <summary>
 /// Draw the matched keypoints between the model image and the observered image.
 /// </summary>
 /// <param name="modelImage">The model image</param>
 /// <param name="modelKeypoints">The keypoints in the model image</param>
 /// <param name="observerdImage">The observed image</param>
 /// <param name="observedKeyPoints">The keypoints in the observed image</param>
 /// <param name="matchColor">The color for the match correspondence lines</param>
 /// <param name="singlePointColor">The color for highlighting the keypoints</param>
 /// <param name="mask">The mask for the matches. Use null for all matches.</param>
 /// <param name="flags">The drawing type</param>
 /// <param name="result">The image where model and observed image is displayed side by side. Matches are drawn as indicated by the flag</param>
 /// <param name="matches">Matches. Each matches[i] is k or less matches for the same query descriptor.</param>
 public static void DrawMatches(
    IInputArray modelImage, VectorOfKeyPoint modelKeypoints,
    IInputArray observerdImage, VectorOfKeyPoint observedKeyPoints,
    VectorOfVectorOfDMatch matches,
    IInputOutputArray result,
    MCvScalar matchColor, MCvScalar singlePointColor,
    IInputArray mask = null,
    KeypointDrawType flags = KeypointDrawType.Default)
 {
    using (InputArray iaModelImage = modelImage.GetInputArray())
    using (InputArray iaObserverdImage = observerdImage.GetInputArray())
    using (InputOutputArray ioaResult = result.GetInputOutputArray())
    using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray())
    CvInvoke.drawMatchedFeatures(iaObserverdImage, observedKeyPoints, iaModelImage,
       modelKeypoints, matches, ioaResult, ref matchColor, ref singlePointColor, iaMask , flags);
 }
Exemplo n.º 21
0
 public bool Recognize(Image<Gray, Byte> observedImage, out PointF[] Region)
 {
     // extract features from the observed image
     observedKeyPoints = new VectorOfKeyPoint();
     Matrix<float> observedDescriptors = surfCPU.DetectAndCompute(observedImage, null, observedKeyPoints);
     BruteForceMatcher<float> matcher = new BruteForceMatcher<float>(DistanceType.L2);
     matcher.Add(modelDescriptors);
     indices = new Matrix<int>(observedDescriptors.Rows, k);
     using (Matrix<float> dist = new Matrix<float>(observedDescriptors.Rows, k))
     {
         matcher.KnnMatch(observedDescriptors, indices, dist, k, null);
         mask = new Matrix<byte>(dist.Rows, 1);
         mask.SetValue(255);
         Features2DToolbox.VoteForUniqueness(dist, uniquenessThreshold, mask);
     }
     int nonZeroCount = CvInvoke.cvCountNonZero(mask);
     if (nonZeroCount >= requiredNonZeroCount)
     {
         nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, scaleIncrement, RotationBins);
         if (nonZeroCount >= requiredNonZeroCount)
             homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, ransacReprojThreshold);
     }
     bool ObjectFound;
     if (homography != null)
     {  //draw a rectangle along the projected model
         Rectangle rect = modelImage.ROI;
         Region = new PointF[] {
         new PointF(rect.Left, rect.Bottom),
         new PointF(rect.Right, rect.Bottom),
         new PointF(rect.Right, rect.Top),
         new PointF(rect.Left, rect.Top)};
         homography.ProjectPoints(Region);
         ObjectFound = true;
     }
     else
     {
         Region = null;
         ObjectFound = false;
     }
     return ObjectFound;
 }
Exemplo n.º 22
0
        /// <summary>
        /// Compute the descriptor given the image and the point location
        /// </summary>
        /// <param name="image">The image where the descriptor will be computed from</param>
        /// <param name="mask">The optional mask, can be null if not needed</param>
        /// <param name="keyPoints">The keypoint where the descriptor will be computed from</param>
        /// <returns>The image features founded on the keypoint location</returns>
        public ImageFeature[] ComputeDescriptors(Image<Gray, Byte> image, Image<Gray, byte> mask, MKeyPoint[] keyPoints)
        {
            using (VectorOfFloat descs = new VectorOfFloat())
             using (VectorOfKeyPoint kpts = new VectorOfKeyPoint())
             {
            kpts.Push(keyPoints);
            CvSURFDetectorComputeDescriptors(ref this, image, mask, kpts, descs);

            int n = keyPoints.Length;
            long address = descs.StartAddress.ToInt64();

            ImageFeature[] features = new ImageFeature[n];
            int sizeOfdescriptor = extended == 0 ? 64 : 128;
            for (int i = 0; i < n; i++, address += sizeOfdescriptor * sizeof(float))
            {
               features[i].KeyPoint = keyPoints[i];
               float[] desc = new float[sizeOfdescriptor];
               Marshal.Copy(new IntPtr(address), desc, 0, sizeOfdescriptor);
               features[i].Descriptor = desc;
            }
            return features;
             }
        }
Exemplo n.º 23
0
        /// <summary>
        /// Compute the descriptor given the image and the point location
        /// </summary>
        /// <param name="image">The image where the descriptor will be computed from</param>
        /// <param name="mask">The optional mask, can be null if not needed</param>
        /// <param name="keyPoints">The keypoint where the descriptor will be computed from</param>
        /// <returns>The image features founded on the keypoint location</returns>
        public ImageFeature[] ComputeDescriptors(Image<Gray, Byte> image, Image<Gray, byte> mask, MKeyPoint[] keyPoints)
        {
            if (keyPoints.Length == 0) return new ImageFeature[0];
             using (VectorOfFloat descVec = new VectorOfFloat())
             using (VectorOfKeyPoint kpts = new VectorOfKeyPoint())
             {
            kpts.Push(keyPoints);
            CvSIFTDetectorComputeDescriptors(_ptr, image, mask, kpts, descVec);

            int n = keyPoints.Length;
            float[] descs = descVec.ToArray();
            //long address = descVec.StartAddress.ToInt64();

            ImageFeature[] features = new ImageFeature[n];
            int sizeOfdescriptor = DescriptorSize;
            for (int i = 0; i < n; i++)
            {
               features[i].KeyPoint = keyPoints[i];
               float[] d = new float[sizeOfdescriptor];
               Array.Copy(descs, i * sizeOfdescriptor, d, 0, sizeOfdescriptor);
               features[i].Descriptor = d;
            }
            return features;
             }
        }
 /// <summary>
 /// Detect the keypoints from the image
 /// </summary>
 /// <param name="image">The image to extract features from</param>
 /// <param name="mask">The optional mask, can be null if not needed</param>
 /// <returns>An array of key points</returns>
 public VectorOfKeyPoint DetectKeyPointsRaw(Image<Gray, byte> image, Image<Gray, byte> mask)
 {
     VectorOfKeyPoint kpts = new VectorOfKeyPoint();
      CvInvoke.CvFeatureDetectorDetectKeyPoints(_ptr, image, mask, kpts);
      return kpts;
 }
Exemplo n.º 25
0
        /// <summary>
        /// Draw the model image and observed image, the matched features and homography projection.
        /// </summary>
        /// <param name="modelImage">The model image</param>
        /// <param name="observedImage">The observed image</param>
        /// <param name="matchTime">The output total time for computing the homography matrix.</param>
        /// <returns>The model image and observed image, the matched features and homography projection.</returns>
        public static Image<Bgr, Byte> Draw(Image<Gray, Byte> modelImage, Image<Gray, byte> observedImage, out long matchTime)
        {
            Stopwatch watch;
            HomographyMatrix homography = null;

            SURFDetector surfCPU = new SURFDetector (500, false);
            VectorOfKeyPoint modelKeyPoints;
            VectorOfKeyPoint observedKeyPoints;
            Matrix<int> indices;

            Matrix<byte> mask;
            int k = 2;
            double uniquenessThreshold = 0.8;
            if (GpuInvoke.HasCuda) {
                GpuSURFDetector surfGPU = new GpuSURFDetector (surfCPU.SURFParams, 0.01f);
                using (GpuImage<Gray, Byte> gpuModelImage = new GpuImage<Gray, byte> (modelImage))
                    //extract features from the object image
                using (GpuMat<float> gpuModelKeyPoints = surfGPU.DetectKeyPointsRaw (gpuModelImage, null))
                using (GpuMat<float> gpuModelDescriptors = surfGPU.ComputeDescriptorsRaw (gpuModelImage, null, gpuModelKeyPoints))
                using (GpuBruteForceMatcher<float> matcher = new GpuBruteForceMatcher<float> (DistanceType.L2)) {
                    modelKeyPoints = new VectorOfKeyPoint ();
                    surfGPU.DownloadKeypoints (gpuModelKeyPoints, modelKeyPoints);
                    watch = Stopwatch.StartNew ();

                    // extract features from the observed image
                    using (GpuImage<Gray, Byte> gpuObservedImage = new GpuImage<Gray, byte> (observedImage))
                    using (GpuMat<float> gpuObservedKeyPoints = surfGPU.DetectKeyPointsRaw (gpuObservedImage, null))
                    using (GpuMat<float> gpuObservedDescriptors = surfGPU.ComputeDescriptorsRaw (gpuObservedImage, null, gpuObservedKeyPoints))
                    using (GpuMat<int> gpuMatchIndices = new GpuMat<int> (gpuObservedDescriptors.Size.Height, k, 1, true))
                    using (GpuMat<float> gpuMatchDist = new GpuMat<float> (gpuObservedDescriptors.Size.Height, k, 1, true))
                    using (GpuMat<Byte> gpuMask = new GpuMat<byte> (gpuMatchIndices.Size.Height, 1, 1))
                    using (Stream stream = new Stream ()) {
                        matcher.KnnMatchSingle (gpuObservedDescriptors, gpuModelDescriptors, gpuMatchIndices, gpuMatchDist, k, null, stream);
                        indices = new Matrix<int> (gpuMatchIndices.Size);
                        mask = new Matrix<byte> (gpuMask.Size);

                        //gpu implementation of voteForUniquess
                        using (GpuMat<float> col0 = gpuMatchDist.Col (0))
                        using (GpuMat<float> col1 = gpuMatchDist.Col (1)) {
                            GpuInvoke.Multiply (col1, new MCvScalar (uniquenessThreshold), col1, stream);
                            GpuInvoke.Compare (col0, col1, gpuMask, CMP_TYPE.CV_CMP_LE, stream);
                        }

                        observedKeyPoints = new VectorOfKeyPoint ();
                        surfGPU.DownloadKeypoints (gpuObservedKeyPoints, observedKeyPoints);

                        //wait for the stream to complete its tasks
                        //We can perform some other CPU intesive stuffs here while we are waiting for the stream to complete.
                        stream.WaitForCompletion ();

                        gpuMask.Download (mask);
                        gpuMatchIndices.Download (indices);

                        if (GpuInvoke.CountNonZero (gpuMask) >= 4) {
                            int nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation (modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
                            if (nonZeroCount >= 4)
                                homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures (modelKeyPoints, observedKeyPoints, indices, mask, 2);
                        }

                        watch.Stop ();
                    }
                }
            } else {
                //extract features from the object image
                modelKeyPoints = surfCPU.DetectKeyPointsRaw (modelImage, null);
                Matrix<float> modelDescriptors = surfCPU.ComputeDescriptorsRaw (modelImage, null, modelKeyPoints);

                watch = Stopwatch.StartNew ();

                // extract features from the observed image
                observedKeyPoints = surfCPU.DetectKeyPointsRaw (observedImage, null);
                Matrix<float> observedDescriptors = surfCPU.ComputeDescriptorsRaw (observedImage, null, observedKeyPoints);
                BruteForceMatcher<float> matcher = new BruteForceMatcher<float> (DistanceType.L2);
                matcher.Add (modelDescriptors);

                indices = new Matrix<int> (observedDescriptors.Rows, k);
                using (Matrix<float> dist = new Matrix<float> (observedDescriptors.Rows, k)) {
                    matcher.KnnMatch (observedDescriptors, indices, dist, k, null);
                    mask = new Matrix<byte> (dist.Rows, 1);
                    mask.SetValue (255);
                    Features2DToolbox.VoteForUniqueness (dist, uniquenessThreshold, mask);
                }

                int nonZeroCount = CvInvoke.cvCountNonZero (mask);
                if (nonZeroCount >= 4) {
                    nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation (modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
                    if (nonZeroCount >= 4)
                        homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures (modelKeyPoints, observedKeyPoints, indices, mask, 2);
                }

                watch.Stop ();
            }

            //Draw the matched keypoints
            Image<Bgr, Byte> result = Features2DToolbox.DrawMatches (modelImage, modelKeyPoints, observedImage, observedKeyPoints,
                                          indices, new Bgr (255, 255, 255), new Bgr (255, 255, 255), mask, Features2DToolbox.KeypointDrawType.DEFAULT);

            #region draw the projected region on the image
            if (homography != null) {  //draw a rectangle along the projected model
                Rectangle rect = modelImage.ROI;
                PointF[] pts = new PointF[] {
                    new PointF (rect.Left, rect.Bottom),
                    new PointF (rect.Right, rect.Bottom),
                    new PointF (rect.Right, rect.Top),
                    new PointF (rect.Left, rect.Top)
                };
                homography.ProjectPoints (pts);

                result.DrawPolyline (Array.ConvertAll<PointF, Point> (pts, Point.Round), true, new Bgr (Color.Red), 5);
            }
            #endregion

            matchTime = watch.ElapsedMilliseconds;

            return result;
        }
Exemplo n.º 26
0
 /// <summary>
 /// Computes an image descriptor using the set visual vocabulary.
 /// </summary>
 /// <param name="image">Image, for which the descriptor is computed</param>
 /// <param name="keypoints">Key points detected in the input image.</param>
 /// <param name="imgDescriptors">The output image descriptors.</param>
 public void Compute(IInputArray image, VectorOfKeyPoint keypoints, Mat imgDescriptors)
 {
    using (InputArray iaImage = image.GetInputArray())
       BOWImgDescriptorExtractorInvoke.CvBOWImgDescriptorExtractorCompute(_ptr, iaImage, keypoints, imgDescriptors);
 }
Exemplo n.º 27
0
        /// <summary>
        /// Detect image features from the given image
        /// </summary>
        /// <param name="image">The image to detect features from</param>
        /// <param name="mask">The optional mask, can be null if not needed</param>
        /// <returns>The Image features detected from the given image</returns>
        public ImageFeature[] DetectFeatures(Image<Gray, Byte> image, Image<Gray, byte> mask)
        {
            using (VectorOfKeyPoint pts = new VectorOfKeyPoint())
             using (VectorOfFloat descVec = new VectorOfFloat())
             {
            CvSIFTDetectorDetectFeature(_ptr, image, mask, pts, descVec);
            MKeyPoint[] kpts = pts.ToArray();
            float[] desc = descVec.ToArray();
            int n = kpts.Length;
            int sizeOfdescriptor = DescriptorSize;

            ImageFeature[] features = new ImageFeature[n];
            for (int i = 0; i < n; i++)
            {
               features[i].KeyPoint = kpts[i];
               float[] d = new float[sizeOfdescriptor];
               Array.Copy(desc, i * sizeOfdescriptor, d, 0, sizeOfdescriptor);
               features[i].Descriptor = d;
            }
            return features;
             }
        }
Exemplo n.º 28
0
 public EmguType()
 {
     KeyPoints = new VectorOfKeyPoint();
     Descriptors = new UMat();
 }
Exemplo n.º 29
0
 public DebuggerProxy(VectorOfKeyPoint v)
 {
     _v = v;
 }
Exemplo n.º 30
0
 /// <summary>
 /// Detect the SURF keypoints from the image
 /// </summary>
 /// <param name="image">The image to extract SURF features from</param>
 /// <param name="mask">The optional mask, can be null if not needed</param>
 /// <returns>An array of SURF key points</returns>
 public MKeyPoint[] DetectKeyPoints(Image<Gray, Byte> image, Image<Gray, byte> mask)
 {
     using (VectorOfKeyPoint kpts = new VectorOfKeyPoint())
      {
     CvSIFTDetectorDetectKeyPoints(_ptr, image, mask, kpts);
     return kpts.ToArray();
      }
 }
Exemplo n.º 31
0
        private void timer1_Tick(object sender, EventArgs e)
        {
            if (time == 10)
            {
                Mat frame = new Mat();
                capture.Retrieve(frame, 0);
                Mat grayVideo = new Mat();
                CvInvoke.CvtColor(frame, grayVideo, ColorConversion.Bgr2Gray);
                UMat videoDescriptors = new UMat();
                VectorOfKeyPoint videoKeyPoints = new VectorOfKeyPoint();
                calculatedescriptors(grayVideo, videoDescriptors, videoKeyPoints);
                VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch();

                BFMatcher matcher = new BFMatcher(DistanceType.L2);
                matcher.Add(originalImageDescriptors);
                matcher.KnnMatch(videoDescriptors, matches, 2, null);
                Mat mask = mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
                mask.SetTo(new MCvScalar(255));

                Features2DToolbox.VoteForUniqueness(matches, 0.8, mask);
                Mat homography = new Mat();
                int nonZeroCount = CvInvoke.CountNonZero(mask);
                if (nonZeroCount >= 4)
                {
                    nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(originalImageKeyPoints, videoKeyPoints,
                       matches, mask, 1.5, 20);
                    if (nonZeroCount >= 4)
                        homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(originalImageKeyPoints, videoKeyPoints, matches, mask, 2);
                }

                Mat result = new Mat();
                Features2DToolbox.DrawMatches(grayImage, originalImageKeyPoints, grayVideo, videoKeyPoints,
                   matches, result, new MCvScalar(255, 255, 255), new MCvScalar(255, 255, 255), mask);
                if (homography != null)
                {
                    //draw a rectangle along the projected model
                    Rectangle rect = new Rectangle(Point.Empty, grayImage.Size);
                    PointF[] pts = new PointF[]
                    {
                              new PointF(rect.Left, rect.Bottom),
                              new PointF(rect.Right, rect.Bottom),
                              new PointF(rect.Right, rect.Top),
                              new PointF(rect.Left, rect.Top)
                    };
                    pts = CvInvoke.PerspectiveTransform(pts, homography);

                    Point[] points = Array.ConvertAll<PointF, Point>(pts, Point.Round);
                    using (VectorOfPoint vp = new VectorOfPoint(points))
                    {
                        CvInvoke.Polylines(result, vp, true, new MCvScalar(255, 0, 0, 255), 5);
                    }
                    viewer.Image = result;
                    
                }

                
                time = 0; 
            }
            else
            {
                time++;
            }
    }
Exemplo n.º 32
0
 /// <summary>
 /// Push multiple values from the other vector into this vector
 /// </summary>
 /// <param name="other">The other vector, from which the values will be pushed to the current vector</param>
 public void Push(VectorOfKeyPoint other)
 {
     VectorOfKeyPointPushVector(_ptr, other);
 }