public void Bootstrap(Mat img)
        {
            ValidateImages(null, img);

            _bootstrapKp.Clear();
            _detector.DetectRaw(img, _bootstrapKp);

            _trackedFeatures = new VectorOfKeyPoint(_bootstrapKp.ToArray());

            _trackedFeatures3D.Clear();

            CvInvoke.CvtColor(img, _prevGray, ColorConversion.Bgr2Gray);
        }
Exemplo n.º 2
0
        /// <summary>
        /// Convert the raw keypoints and descriptors to ImageFeature
        /// </summary>
        /// <param name="keyPointsVec">The raw keypoints vector</param>
        /// <param name="descriptors">The raw descriptor matrix</param>
        /// <returns>An array of image features</returns>
        public static ImageFeature <TDescriptor>[] ConvertFromRaw(VectorOfKeyPoint keyPointsVec, Matrix <TDescriptor> descriptors)
        {
            if (keyPointsVec.Size == 0)
            {
                return(new ImageFeature <TDescriptor> [0]);
            }
            Debug.Assert(keyPointsVec.Size == descriptors.Rows, "Size of keypoints vector do not match the rows of the descriptors matrix.");
            int sizeOfdescriptor = descriptors.Cols;

            MKeyPoint[] keyPoints = keyPointsVec.ToArray();
            ImageFeature <TDescriptor>[] features = new ImageFeature <TDescriptor> [keyPoints.Length];
            MCvMat header        = descriptors.MCvMat;
            long   address       = header.data.ToInt64();
            int    rowSizeInByte = sizeOfdescriptor * Marshal.SizeOf(typeof(TDescriptor));

            for (int i = 0; i < keyPoints.Length; i++, address += header.step)
            {
                features[i].KeyPoint = keyPoints[i];
                TDescriptor[] desc    = new TDescriptor[sizeOfdescriptor];
                GCHandle      handler = GCHandle.Alloc(desc, GCHandleType.Pinned);
                Toolbox.memcpy(handler.AddrOfPinnedObject(), new IntPtr(address), rowSizeInByte);
                handler.Free();
                features[i].Descriptor = desc;
            }
            return(features);
        }
Exemplo n.º 3
0
        private void button_Detect_Click(object sender, EventArgs e)
        {
            Mat scr    = imagemat;
            Mat result = imagemat.Clone();

            #region Detect()代码

            /*
             * GFTTDetector _gftd = new GFTTDetector();//以默认参数创建 GFTTDetector 类。
             * MKeyPoint[] keypoints = _gftd.Detect(scr, null);//检测关键点,返回 MKeyPoint[]。
             * foreach (MKeyPoint keypoint in keypoints)//遍历 MKeyPoint[]数组。
             * {
             *  Point point = Point.Truncate(keypoint.Point);//获得关键点的坐 标位置,以 Point 类型。
             *  CvInvoke.Circle(result, point, 3, new MCvScalar(0, 0, 255), 1);//绘 制关键点的位置,以 Circle 形式。
             * }
             */
            #endregion
            #region DetectRaw() code
            GFTTDetector     _gftd            = new GFTTDetector();               //以默认参数创建 GFTTDetector 类。
            VectorOfKeyPoint vector_keypoints = new VectorOfKeyPoint();           //创建 VectorOfKeyPoint 类型,存储关键点集合。
            _gftd.DetectRaw(scr, vector_keypoints, null);                         //检测关键点。
            foreach (MKeyPoint keypoint in vector_keypoints.ToArray())            //遍历 MKeyPoint[]数组。
            {
                Point point = Point.Truncate(keypoint.Point);                     //获得关键点的坐 标位置,以 Point 类型。
                CvInvoke.Circle(result, point, 3, new MCvScalar(255, 255, 0), 1); //绘制关键点的位置,以 Circle 形式。
            }
            #endregion
            imageBox1.Image = scr;    //显示输入图像。
            imageBox2.Image = result; //显示角点检测图像。
        }
Exemplo n.º 4
0
 /// <summary>
 /// Detect the keypoints in the image
 /// </summary>
 /// <param name="image">The image from which the key point will be detected from</param>
 /// <returns>The key pionts in the image</returns>
 public MKeyPoint[] DetectKeyPoints(Image <Gray, Byte> image)
 {
     using (VectorOfKeyPoint kpts = DetectKeyPointsRaw(image, null))
     {
         return(kpts.ToArray());
     }
 }
Exemplo n.º 5
0
 /// <summary>
 /// Detect the SURF keypoints from the image
 /// </summary>
 /// <param name="image">The image to extract SURF features from</param>
 /// <param name="mask">The optional mask, can be null if not needed</param>
 /// <returns>An array of SURF key points</returns>
 public MKeyPoint[] DetectKeyPoints(Image <Gray, Byte> image, Image <Gray, byte> mask)
 {
     using (VectorOfKeyPoint keypoints = DetectKeyPointsRaw(image, mask))
     {
         return(keypoints.ToArray());
     }
 }
Exemplo n.º 6
0
 /// <summary>
 /// Detect the Lepetit keypoints from the image
 /// </summary>
 /// <param name="image">The image to extract Lepetit keypoints</param>
 /// <param name="maxCount">The maximum number of keypoints to be extracted, use 0 to ignore the max count</param>
 /// <param name="scaleCoords">Indicates if the coordinates should be scaled</param>
 /// <returns>The array of Lepetit keypoints</returns>
 public        MKeyPoint[] DetectKeyPoints(Image <Gray, Byte> image, int maxCount, bool scaleCoords)
 {
     using (VectorOfKeyPoint kpts = DetectKeyPointsRaw(image, maxCount, scaleCoords))
     {
         return(kpts.ToArray());
     }
 }
Exemplo n.º 7
0
        /// <summary>
        /// Detects keypoints and computes descriptors for them.
        /// </summary>
        /// <param name="image">Image where keypoints (corners) are detected.
        /// Only 8-bit grayscale images are supported.</param>
        /// <param name="mask">Optional input mask that marks the regions where we should detect features.</param>
        /// <param name="keypoints">The output vector of keypoints.</param>
        /// <param name="descriptors"></param>
        public void Run(GpuMat image, GpuMat mask, out KeyPoint[] keypoints, GpuMat descriptors)
        {
            if (disposed)
            {
                throw new ObjectDisposedException(GetType().Name);
            }
            if (image == null)
            {
                throw new ArgumentNullException("image");
            }
            if (mask == null)
            {
                throw new ArgumentNullException("mask");
            }
            if (descriptors == null)
            {
                throw new ArgumentNullException("descriptors");
            }

            using (var keypointsVec = new VectorOfKeyPoint())
            {
                NativeMethods.gpu_ORB_GPU_operator4(ptr, image.CvPtr, mask.CvPtr, keypointsVec.CvPtr, descriptors.CvPtr);
                keypoints = keypointsVec.ToArray();
            }

            GC.KeepAlive(image);
            GC.KeepAlive(mask);
            GC.KeepAlive(descriptors);
        }
Exemplo n.º 8
0
        public void TestCudaOrbDetector()
        {
            if (!CudaInvoke.HasCuda)
            {
                return;
            }
            using (Image <Bgr, Byte> img = new Image <Bgr, byte>("box.png"))
                using (GpuMat cudaImage = new GpuMat(img))
                    using (GpuMat grayCudaImage = new GpuMat())
                        using (CudaORBDetector detector = new CudaORBDetector(500))
                            using (VectorOfKeyPoint kpts = new VectorOfKeyPoint())
                                using (GpuMat keyPointMat = new GpuMat())
                                    using (GpuMat descriptorMat = new GpuMat())
                                    {
                                        CudaInvoke.CvtColor(cudaImage, grayCudaImage, ColorConversion.Bgr2Gray);
                                        detector.DetectAsync(grayCudaImage, keyPointMat);
                                        detector.Convert(keyPointMat, kpts);
                                        //detector.ComputeRaw(grayCudaImage, null, keyPointMat, descriptorMat);
                                        //detector.DownloadKeypoints(keyPointMat, kpts);

                                        foreach (MKeyPoint kpt in kpts.ToArray())
                                        {
                                            img.Draw(new CircleF(kpt.Point, 3.0f), new Bgr(0, 255, 0), 1);
                                        }

                                        //ImageViewer.Show(img);
                                    }
        }
Exemplo n.º 9
0
 /// <summary>
 /// Detect the keypoints from the image
 /// </summary>
 /// <param name="detector">The keypoint detector</param>
 /// <param name="image">The image to extract keypoints from</param>
 /// <param name="mask">The optional mask, can be null if not needed</param>
 /// <returns>An array of key points</returns>
 public static MKeyPoint[] DetectKeyPoints(this IKeyPointDetector detector, Image <Gray, Byte> image, Image <Gray, byte> mask)
 {
     using (VectorOfKeyPoint keypoints = detector.DetectKeyPointsRaw(image, mask))
     {
         return(keypoints.ToArray());
     }
 }
Exemplo n.º 10
0
        public void TestCudaFASTDetector()
        {
            if (!CudaInvoke.HasCuda)
            {
                return;
            }
            using (Image <Bgr, Byte> img = new Image <Bgr, byte>("box.png"))
                using (CudaImage <Bgr, Byte> CudaImage = new CudaImage <Bgr, byte>(img))
                    using (CudaImage <Gray, Byte> grayCudaImage = CudaImage.Convert <Gray, Byte>())
                        using (CudaFastFeatureDetector featureDetector = new CudaFastFeatureDetector(10, true, FastDetector.DetectorType.Type9_16, 1000))
                            using (VectorOfKeyPoint kpts = new VectorOfKeyPoint())
                                using (GpuMat keyPointsMat = new GpuMat())
                                {
                                    featureDetector.DetectAsync(grayCudaImage, keyPointsMat);
                                    featureDetector.Convert(keyPointsMat, kpts);
                                    //featureDetector.DetectKeyPointsRaw(grayCudaImage, null, keyPointsMat);

                                    //featureDetector.DownloadKeypoints(keyPointsMat, kpts);

                                    foreach (MKeyPoint kpt in kpts.ToArray())
                                    {
                                        img.Draw(new CircleF(kpt.Point, 3.0f), new Bgr(0, 255, 0), 1);
                                    }

                                    //ImageViewer.Show(img);
                                }
        }
Exemplo n.º 11
0
 /// <summary>
 /// Detect the SURF keypoints from the image
 /// </summary>
 /// <param name="image">The image to extract SURF features from</param>
 /// <param name="mask">The optional mask, can be null if not needed</param>
 /// <returns>An array of SURF key points</returns>
 public        MKeyPoint[] DetectKeyPoints(Image <Gray, Byte> image, Image <Gray, byte> mask)
 {
     using (VectorOfKeyPoint keypoints = new VectorOfKeyPoint())
     {
         CvSURFDetectorDetectKeyPoints(ref this, image, mask, keypoints);
         return(keypoints.ToArray());
     }
 }
 /// <summary>
 /// Detect the keypoints in the image
 /// </summary>
 /// <param name="image">The image from which the key point will be detected from</param>
 /// <returns>The key pionts in the image</returns>
 public     MKeyPoint[] DetectKeyPoints(Image <Gray, Byte> image)
 {
     using (VectorOfKeyPoint kpts = new VectorOfKeyPoint())
     {
         CvStarDetectorDetectKeyPoints(ref this, image, kpts);
         return(kpts.ToArray());
     }
 }
 /// <summary>
 /// Detect the Fast keypoints from the image
 /// </summary>
 /// <param name="image">The image to extract keypoints from</param>
 /// <returns>The array of fast keypoints</returns>
 public MKeyPoint[] DetectKeyPoints(Image <Gray, byte> image)
 {
     using (VectorOfKeyPoint keypoints = new VectorOfKeyPoint())
     {
         CvFASTKeyPoints(image, keypoints, Threshold, NonmaxSupression);
         return(keypoints.ToArray());
     }
 }
Exemplo n.º 14
0
 /// <summary>
 /// Detect the MSER keypoints from the image
 /// </summary>
 /// <param name="image">The image to extract MSER keypoints from</param>
 /// <param name="mask">The optional mask, can be null if not needed</param>
 /// <returns>An array of MSER key points</returns>
 public        MKeyPoint[] DetectKeyPoints(Image <Gray, Byte> image, Image <Gray, byte> mask)
 {
     using (VectorOfKeyPoint kpts = new VectorOfKeyPoint())
     {
         CvMSERKeyPoints(image, mask, kpts, ref this);
         return(kpts.ToArray());
     }
 }
Exemplo n.º 15
0
 /// <summary>
 /// Detect the Lepetit keypoints from the image
 /// </summary>
 /// <param name="image">The image to extract Lepetit keypoints</param>
 /// <param name="maxCount">The maximum number of keypoints to be extracted, use 0 to ignore the max count</param>
 /// <param name="scaleCoords">Indicates if the coordinates should be scaled</param>
 /// <returns>The array of Lepetit keypoints</returns>
 public        MKeyPoint[] DetectKeyPoints(Image <Gray, Byte> image, int maxCount, bool scaleCoords)
 {
     using (VectorOfKeyPoint kpts = new VectorOfKeyPoint())
     {
         CvLDetectorDetectKeyPoints(ref this, image, kpts, maxCount, scaleCoords);
         return(kpts.ToArray());
     }
 }
Exemplo n.º 16
0
 /// <summary>
 /// Detect the keypoints from the image
 /// </summary>
 /// <param name="image">The image to extract keypoints from</param>
 /// <param name="mask">The optional mask.</param>
 /// <returns>An array of key points</returns>
 public MKeyPoint[] Detect(IInputArray image, IInputArray mask = null)
 {
     using (VectorOfKeyPoint keypoints = new VectorOfKeyPoint())
     {
         DetectRaw(image, keypoints, mask);
         return(keypoints.ToArray());
     }
 }
Exemplo n.º 17
0
        private static VectorOfKeyPoint GetBestKeypointsCount(VectorOfKeyPoint keyPoints, int count)
        {
            List <MKeyPoint> kpList = keyPoints.ToArray().ToList();

            kpList.Sort((x, y) => x.Response > y.Response ? 1 : x.Response < y.Response ? -1 : 0);
            kpList = kpList.Take(count).ToList();
            return(new VectorOfKeyPoint(kpList.ToArray()));
        }
 /// <summary>
 /// Detect the SURF keypoints from the image
 /// </summary>
 /// <param name="image">The image to extract SURF features from</param>
 /// <param name="mask">The optional mask, can be null if not needed</param>
 /// <returns>An array of SURF key points</returns>
 public MKeyPoint[] DetectKeyPoints(Image <Gray, Byte> image, Image <Gray, byte> mask)
 {
     using (VectorOfKeyPoint kpts = new VectorOfKeyPoint())
     {
         CvSIFTDetectorDetectKeyPoints(_ptr, image, mask, kpts);
         return(kpts.ToArray());
     }
 }
Exemplo n.º 19
0
 /// <summary>
 /// Detect the keypoints from the image
 /// </summary>
 /// <param name="detector">The keypoint detector</param>
 /// <param name="image">The image to extract keypoints from</param>
 /// <param name="mask">The optional mask.</param>
 /// <returns>An array of key points</returns>
 public static MKeyPoint[] Detect(this IFeatureDetector detector, IInputArray image, IInputArray mask = null)
 {
     using (VectorOfKeyPoint keypoints = new VectorOfKeyPoint())
     {
         detector.DetectRaw(image, keypoints, mask);
         return(keypoints.ToArray());
     }
 }
Exemplo n.º 20
0
 /// <summary>
 /// Detect keypoints in the GpuImage
 /// </summary>
 /// <param name="img">The image where keypoints will be detected from</param>
 /// <param name="mask">The optional mask, can be null if not needed</param>
 /// <returns>An array of keypoints</returns>
 public MKeyPoint[] DetectKeyPoints(GpuImage <Gray, Byte> img, GpuImage <Gray, Byte> mask)
 {
     using (GpuMat <float> tmp = DetectKeyPointsRaw(img, mask))
         using (VectorOfKeyPoint kpts = new VectorOfKeyPoint())
         {
             DownloadKeypoints(tmp, kpts);
             return(kpts.ToArray());
         }
 }
Exemplo n.º 21
0
 /// <summary>
 /// Detect keypoints in the CudaImage
 /// </summary>
 /// <param name="img">The image where keypoints will be detected from</param>
 /// <param name="mask">The optional mask, can be null if not needed</param>
 /// <returns>An array of keypoints</returns>
 public MKeyPoint[] DetectKeyPoints(GpuMat img, GpuMat mask)
 {
     using (GpuMat tmp = DetectKeyPointsRaw(img, mask))
         using (VectorOfKeyPoint kpts = new VectorOfKeyPoint())
         {
             DownloadKeypoints(tmp, kpts);
             return(kpts.ToArray());
         }
 }
Exemplo n.º 22
0
 /// <summary>
 /// detects corners using FAST algorithm by E. Rosten
 /// </summary>
 /// <param name="image"></param>
 /// <param name="keypoints"></param>
 /// <param name="threshold"></param>
 /// <param name="nonmaxSupression"></param>
 /// <param name="type"></param>
 public static void FASTX(InputArray image, out KeyPoint[] keypoints, int threshold, bool nonmaxSupression, int type)
 {
     if (image == null)
         throw new ArgumentNullException("image");
     image.ThrowIfDisposed();
     using (var kp = new VectorOfKeyPoint())
     {
         NativeMethods.features2d_FASTX(image.CvPtr, kp.CvPtr, threshold, nonmaxSupression ? 1 : 0, type);
         keypoints = kp.ToArray();
     }
 }
Exemplo n.º 23
0
        /// <summary>
        /// Remove keypoints within borderPixels of an image edge.
        /// </summary>
        /// <param name="keypoints"></param>
        /// <param name="imageSize"></param>
        /// <param name="borderSize"></param>
        /// <returns></returns>
        public static KeyPoint[] RunByImageBorder(IEnumerable<KeyPoint> keypoints, Size imageSize, int borderSize)
        {
            if (keypoints == null) 
                throw new ArgumentNullException("keypoints");

            using (var keypointsVec = new VectorOfKeyPoint(keypoints))
            {
                NativeMethods.features2d_KeyPointsFilter_runByImageBorder(
                    keypointsVec.CvPtr, imageSize, borderSize);
                return keypointsVec.ToArray();
            }
        }
Exemplo n.º 24
0
        /// <summary>
        /// Remove keypoints of sizes out of range.
        /// </summary>
        /// <param name="keypoints"></param>
        /// <param name="minSize"></param>
        /// <param name="maxSize"></param>
        /// <returns></returns>
        public static KeyPoint[] RunByKeypointSize(IEnumerable<KeyPoint> keypoints, float minSize,
            float maxSize = Single.MaxValue)
        {
            if (keypoints == null)
                throw new ArgumentNullException("keypoints");

            using (var keypointsVec = new VectorOfKeyPoint(keypoints))
            {
                NativeMethods.features2d_KeyPointsFilter_runByKeypointSize(
                    keypointsVec.CvPtr, minSize, maxSize);
                return keypointsVec.ToArray();
            }
        }
Exemplo n.º 25
0
        /// <summary>
        /// Detects corners using the FAST algorithm
        /// </summary>
        /// <param name="image">grayscale image where keypoints (corners) are detected.</param>
        /// <param name="threshold">threshold on difference between intensity of the central pixel 
        /// and pixels of a circle around this pixel.</param>
        /// <param name="nonmaxSupression">if true, non-maximum suppression is applied to 
        /// detected corners (keypoints).</param>
        /// <param name="type">one of the three neighborhoods as defined in the paper</param>
        /// <returns>keypoints detected on the image.</returns>
        public static KeyPoint[] FAST(InputArray image, int threshold, bool nonmaxSupression, FASTType type)
        {
            if (image == null)
                throw new ArgumentNullException(nameof(image));
            image.ThrowIfDisposed();

            using (var kp = new VectorOfKeyPoint())
            {
                NativeMethods.features2d_FAST2(image.CvPtr, kp.CvPtr, threshold, nonmaxSupression ? 1 : 0, (int)type);
                GC.KeepAlive(image);
                return kp.ToArray();
            }
        }
Exemplo n.º 26
0
 /// <summary>
 /// Detects corners using the AGAST algorithm
 /// </summary>
 /// <param name="image">grayscale image where keypoints (corners) are detected.</param>
 /// <param name="threshold">threshold on difference between intensity of the central pixel 
 /// and pixels of a circle around this pixel.</param>
 /// <param name="nonmaxSuppression">if true, non-maximum suppression is applied to 
 /// detected corners (keypoints).</param>
 /// <param name="type">one of the four neighborhoods as defined in the paper</param>
 /// <returns>keypoints detected on the image.</returns>
 public static KeyPoint[] AGAST(InputArray image, int threshold, bool nonmaxSuppression, AGASTType type)
 {
     if (image == null)
         throw new ArgumentNullException("image");
     image.ThrowIfDisposed();
     
     using (var vector = new VectorOfKeyPoint())
     {
         NativeMethods.features2d_AGAST(image.CvPtr, vector.CvPtr, threshold, nonmaxSuppression ? 1 : 0,
             (int) type);
         GC.KeepAlive(image);
         return vector.ToArray();
     }
 }
Exemplo n.º 27
0
        public static Bitmap VisualizeFeatures(Image <Bgr, byte> img, VectorOfKeyPoint features, Color color)
        {
            Bitmap   bmp = new Bitmap(img.Width, img.Height);
            Graphics g   = Graphics.FromImage(bmp);

            g.DrawImage(img.ToBitmap(), 0, 0, img.Width, img.Height);
            SolidBrush brush = new SolidBrush(color);

            foreach (MKeyPoint kp in features.ToArray())
            {
                g.FillEllipse(brush, kp.Point.X - 5, kp.Point.Y - 5, 11, 11);
            }

            return(bmp);
        }
Exemplo n.º 28
0
        public void Bootstrap(Mat img)
        {
            //Detect first features in the image (clear any current tracks)
            if (img.IsEmpty || !img.IsEmpty && img.NumberOfChannels != 3)
            {
                throw new Exception("Image is not appropriate (Empty or wrong number of channels).");
            }

            _bootstrapKp.Clear();
            _detector.DetectRaw(img, _bootstrapKp);

            _trackedFeatures = new VectorOfKeyPoint(_bootstrapKp.ToArray());

            #region Trace

            Trace.Indent();
            Trace.WriteLine($"Bootstrap keypoints: { _trackedFeatures.Size}.");
            Trace.Unindent();
            Trace.WriteLine("--------------------------");

            #endregion

            CvInvoke.CvtColor(img, _prevGray, ColorConversion.Bgr2Gray);
        }
Exemplo n.º 29
0
        /// <summary>
        /// Remove keypoints from some image by mask for pixels of this image.
        /// </summary>
        /// <param name="keypoints"></param>
        /// <param name="mask"></param>
        /// <returns></returns>
        public static KeyPoint[] RunByPixelsMask(IEnumerable<KeyPoint> keypoints, Mat mask)
        {
            if (keypoints == null)
                throw new ArgumentNullException("keypoints");
            if (mask == null) 
                throw new ArgumentNullException("mask");
            mask.ThrowIfDisposed();

            using (var keypointsVec = new VectorOfKeyPoint(keypoints))
            {
                NativeMethods.features2d_KeyPointsFilter_runByPixelsMask(
                    keypointsVec.CvPtr, mask.CvPtr);
                GC.KeepAlive(mask);
                return keypointsVec.ToArray();
            }
        }
Exemplo n.º 30
0
        public Mat FindHomography(VectorOfKeyPoint keypointsModel, VectorOfKeyPoint keypointsTest, List <MDMatch[]> matches, Mat Mask)
        {
            MKeyPoint[] kptsModel = keypointsModel.ToArray();
            MKeyPoint[] kptsTest  = keypointsTest.ToArray();

            PointF[] srcPoints  = new PointF[matches.Count];
            PointF[] destPoints = new PointF[matches.Count];

            for (int i = 0; i < matches.Count; i++)
            {
                srcPoints[i]  = kptsModel[matches[i][0].TrainIdx].Point;
                destPoints[i] = kptsTest[matches[i][0].QueryIdx].Point;
            }

            Mat homography = CvInvoke.FindHomography(srcPoints, destPoints, Emgu.CV.CvEnum.HomographyMethod.Ransac, 10, Mask);

            return(homography);
        }
Exemplo n.º 31
0
        /// <summary>
        ///
        /// </summary>
        /// <param name="featuresFinder"></param>
        /// <param name="image"></param>
        /// <param name="features"></param>
        /// <param name="mask"></param>
        public static void ComputeImageFeatures(
            Feature2D featuresFinder,
            InputArray image,
            out ImageFeatures features,
            InputArray?mask = null)
        {
            if (featuresFinder == null)
            {
                throw new ArgumentNullException(nameof(featuresFinder));
            }
            if (image == null)
            {
                throw new ArgumentNullException(nameof(image));
            }
            featuresFinder.ThrowIfDisposed();
            image.ThrowIfDisposed();

            var descriptorsMat = new Mat();
            var keypointsVec   = new VectorOfKeyPoint();
            var wImageFeatures = new WImageFeatures
            {
                Keypoints   = keypointsVec.CvPtr,
                Descriptors = descriptorsMat.CvPtr
            };

            unsafe
            {
                NativeMethods.HandleException(
                    NativeMethods.stitching_computeImageFeatures2(
                        featuresFinder.CvPtr, image.CvPtr, &wImageFeatures, mask?.CvPtr ?? IntPtr.Zero));
            }

            features = new ImageFeatures(
                wImageFeatures.ImgIdx,
                wImageFeatures.ImgSize,
                keypointsVec.ToArray(),
                descriptorsMat);

            GC.KeepAlive(featuresFinder);
            GC.KeepAlive(image);
            GC.KeepAlive(mask);
            GC.KeepAlive(descriptorsMat);
        }
Exemplo n.º 32
0
        public List <Keypoint> usingSift(Bitmap image)
        {
            SIFTDetector       sift           = new SIFTDetector();
            Image <Gray, Byte> modelImage     = new Image <Gray, byte>(new Bitmap(image));
            VectorOfKeyPoint   modelKeyPoints = sift.DetectKeyPointsRaw(modelImage, null);

            MKeyPoint[] keypoints = modelKeyPoints.ToArray();

            Keypoint        key;
            List <Keypoint> keypointsList = new List <Keypoint>();

            foreach (MKeyPoint keypoint in keypoints)
            {
                key = new Keypoint(keypoint.Point.X, keypoint.Point.Y, keypoint.Size);
                keypointsList.Add(key);
            }

            return(keypointsList);
        }
Exemplo n.º 33
0
        /// <summary>
        /// Download keypoints from GPU to CPU memory.
        /// </summary>
        /// <param name="dKeypoints"></param>
        /// <returns></returns>
        public KeyPoint[] DownloadKeyPoints(GpuMat dKeypoints)
        {
            if (disposed)
            {
                throw new ObjectDisposedException(GetType().Name);
            }
            if (dKeypoints == null)
            {
                throw new ArgumentNullException("dKeypoints");
            }

            KeyPoint[] result;
            using (var keypoints = new VectorOfKeyPoint())
            {
                NativeMethods.gpu_ORB_GPU_downloadKeyPoints(ptr, dKeypoints.CvPtr, keypoints.CvPtr);
                result = keypoints.ToArray();
            }

            GC.KeepAlive(dKeypoints);
            return(result);
        }
Exemplo n.º 34
0
        /// <summary>
        /// Converts keypoints from GPU representation to vector of KeyPoint.
        /// </summary>
        /// <param name="hKeypoints"></param>
        /// <returns></returns>
        public KeyPoint[] ConvertKeypoints(Mat hKeypoints)
        {
            if (disposed)
            {
                throw new ObjectDisposedException(GetType().Name);
            }
            if (hKeypoints == null)
            {
                throw new ArgumentNullException("hKeypoints");
            }

            KeyPoint[] result;
            using (var keypoints = new VectorOfKeyPoint())
            {
                NativeMethods.gpu_FAST_GPU_convertKeypoints(ptr, hKeypoints.CvPtr, keypoints.CvPtr);
                result = keypoints.ToArray();
            }

            GC.KeepAlive(hKeypoints);
            return(result);
        }
Exemplo n.º 35
0
        private MKeyPoint[] RemoveFakeKeyPoint(VectorOfKeyPoint MainVecor, VectorOfKeyPoint InputVecor, double Compression, double Radius)
        {
            List <MKeyPoint> InputListKeyPoint = new List <MKeyPoint>(InputVecor.ToArray());
            List <MKeyPoint> OutputVector      = new List <MKeyPoint>();

            for (int i = 0; i < MainVecor.Size; i++)
            {
                for (int j = InputListKeyPoint.Count - 1; j >= 0; j--)
                {
                    PointF InputLocate = InputListKeyPoint[j].Point;
                    PointF MainLocate  = MainVecor[i].Point;
                    if (Math.Pow(MainLocate.X * Compression - InputLocate.X, 2) +
                        Math.Pow(MainLocate.Y * Compression - InputLocate.Y, 2) <= Math.Pow(Radius, 2))
                    {
                        OutputVector.Add(InputListKeyPoint[j]);
                        InputListKeyPoint.RemoveAt(j);
                    }
                }
            }
            return(OutputVector.ToArray());
        }
Exemplo n.º 36
0
        /// <summary>
        /// Detect image features from the given image
        /// </summary>
        /// <param name="image">The image to detect features from</param>
        /// <param name="mask">The optional mask, can be null if not needed</param>
        /// <returns>The Image features detected from the given image</returns>
        public        ImageFeature[] DetectFeatures(Image <Gray, Byte> image, Image <Gray, byte> mask)
        {
            using (VectorOfKeyPoint pts = new VectorOfKeyPoint())
                using (VectorOfFloat descs = new VectorOfFloat())
                {
                    CvSURFDetectorDetectFeature(ref this, image, mask, pts, descs);
                    MKeyPoint[] kpts = pts.ToArray();
                    int         n    = kpts.Length;
                    long        add  = descs.StartAddress.ToInt64();

                    ImageFeature[] features         = new ImageFeature[n];
                    int            sizeOfdescriptor = extended == 0 ? 64 : 128;
                    for (int i = 0; i < n; i++, add += sizeOfdescriptor * sizeof(float))
                    {
                        features[i].KeyPoint = kpts[i];
                        float[] desc = new float[sizeOfdescriptor];
                        Marshal.Copy(new IntPtr(add), desc, 0, sizeOfdescriptor);
                        features[i].Descriptor = desc;
                    }
                    return(features);
                }
        }
Exemplo n.º 37
0
        /// <summary>
        /// Compute the descriptors for a set of keypoints in an image.
        /// </summary>
        /// <param name="image">The image.</param>
        /// <param name="inKeypoints">The input keypoints. Keypoints for which a descriptor cannot be computed are removed.</param>
        /// <param name="outKeypoints"></param>
        /// <param name="descriptors">Copmputed descriptors. Row i is the descriptor for keypoint i.</param>param>
        public virtual void Compute(InputArray image, KeyPoint[] inKeypoints, out KeyPoint[] outKeypoints,
            OutputArray descriptors)
        {
            if (image == null)
                throw new ArgumentNullException("image");
            if (disposed)
                throw new ObjectDisposedException(GetType().Name);

            using (var keypointsVec = new VectorOfKeyPoint(inKeypoints))
            {
                NativeMethods.features2d_Feature2D_compute1(ptr, image.CvPtr, keypointsVec.CvPtr, descriptors.CvPtr);
                outKeypoints = keypointsVec.ToArray();
            }
        }
Exemplo n.º 38
0
 /// <summary>
 /// 
 /// </summary>
 /// <param name="image"></param>
 /// <param name="keypoints"></param>
 /// <param name="descriptors"></param>
 public void Compute(Mat image, out KeyPoint[] keypoints, Mat descriptors)
 {
     if (image == null)
         throw new ArgumentNullException("image");
     using (VectorOfKeyPoint keypointsVec = new VectorOfKeyPoint())
     {
         NativeMethods.features2d_Feature2D_compute(ptr, image.CvPtr, keypointsVec.CvPtr, descriptors.CvPtr);
         keypoints = keypointsVec.ToArray();
     }
 }
Exemplo n.º 39
0
        /// <summary>
        /// Computes an image descriptor using the set visual vocabulary.
        /// </summary>
        /// <param name="image">Image, for which the descriptor is computed.</param>
        /// <param name="keypoints">Keypoints detected in the input image.</param>
        /// <param name="imgDescriptor">Computed output image descriptor.</param>
        /// <param name="pointIdxsOfClusters">pointIdxsOfClusters Indices of keypoints that belong to the cluster. 
        /// This means that pointIdxsOfClusters[i] are keypoint indices that belong to the i -th cluster(word of vocabulary) returned if it is non-zero.</param>
        /// <param name="descriptors">Descriptors of the image keypoints that are returned if they are non-zero.</param>
        public void Compute(InputArray image, out KeyPoint[] keypoints, OutputArray imgDescriptor,
            out int[][] pointIdxsOfClusters, Mat descriptors = null)
        {
            if (IsDisposed)
                throw new ObjectDisposedException(GetType().Name);
            if (image == null)
                throw new ArgumentNullException(nameof(image));
            if (imgDescriptor == null)
                throw new ArgumentNullException(nameof(imgDescriptor));

            using (var keypointsVec = new VectorOfKeyPoint())
            using (var pointIdxsOfClustersVec = new VectorOfVectorInt())
            {
                NativeMethods.features2d_BOWImgDescriptorExtractor_compute11(ptr, image.CvPtr, keypointsVec.CvPtr, 
                    imgDescriptor.CvPtr, pointIdxsOfClustersVec.CvPtr, Cv2.ToPtr(descriptors));
                keypoints = keypointsVec.ToArray();
                pointIdxsOfClusters = pointIdxsOfClustersVec.ToArray();
            }
            GC.KeepAlive(image);
            GC.KeepAlive(imgDescriptor);
            GC.KeepAlive(descriptors);
        }
Exemplo n.º 40
0
 /// <summary>
 /// Detect keypoints in an image.
 /// </summary>
 /// <param name="image">The image.</param>
 /// <param name="mask">Mask specifying where to look for keypoints (optional). 
 /// Must be a char matrix with non-zero values in the region of interest.</param>
 /// <returns>The detected keypoints.</returns>
 public KeyPoint[] Detect(Mat image, Mat mask = null)
 {
     if(image == null)
         throw new ArgumentNullException("image");
     using (var keypoints = new VectorOfKeyPoint())
     {
         NativeMethods.features2d_FeatureDetector_detect(ptr, image.CvPtr, keypoints.CvPtr, Cv2.ToPtr(mask));
         return keypoints.ToArray();
     }
 }
Exemplo n.º 41
0
        /// <summary>
        /// Extract features and computes their descriptors using SIFT algorithm
        /// </summary>
        /// <param name="img">Input 8-bit grayscale image</param>
        /// <param name="mask">Optional input mask that marks the regions where we should detect features.</param>
        /// <returns>The output vector of keypoints</returns>
#else
        /// <summary>
        /// Extract features and computes their descriptors using SIFT algorithm
        /// </summary>
        /// <param name="img">Input 8-bit grayscale image</param>
        /// <param name="mask">Optional input mask that marks the regions where we should detect features.</param>
        /// <returns>The output vector of keypoints</returns>
#endif
        public KeyPoint[] Run(InputArray img, InputArray mask)
        {
            ThrowIfDisposed();
            if (img == null)
                throw new ArgumentNullException("img");
            img.ThrowIfDisposed();

            using (VectorOfKeyPoint keypointsVec = new VectorOfKeyPoint())
            {
                NativeMethods.nonfree_SIFT_run1(ptr, img.CvPtr, Cv2.ToPtr(mask), keypointsVec.CvPtr);
                return keypointsVec.ToArray();
            }
        }
Exemplo n.º 42
0
        /// <summary>
        /// Computes an image descriptor using the set visual vocabulary.
        /// </summary>
        /// <param name="image">Image, for which the descriptor is computed.</param>
        /// <param name="keypoints">Keypoints detected in the input image.</param>
        /// <param name="imgDescriptor">Computed output image descriptor.</param>
        public void Compute2(Mat image, out KeyPoint[] keypoints, Mat imgDescriptor)
        {
            if (IsDisposed)
                throw new ObjectDisposedException(GetType().Name);
            if (image == null)
                throw new ArgumentNullException(nameof(image));
            if (imgDescriptor == null)
                throw new ArgumentNullException(nameof(imgDescriptor));

            using (var keypointsVec = new VectorOfKeyPoint())
            {
                NativeMethods.features2d_BOWImgDescriptorExtractor_compute2(
                    ptr, image.CvPtr, keypointsVec.CvPtr, imgDescriptor.CvPtr);
                keypoints = keypointsVec.ToArray();
            }
            GC.KeepAlive(image);
            GC.KeepAlive(imgDescriptor);
        }
Exemplo n.º 43
0
        /// <summary>
        /// Remove duplicated keypoints.
        /// </summary>
        /// <param name="keypoints"></param>
        /// <returns></returns>
        public static KeyPoint[] RemoveDuplicated(IEnumerable<KeyPoint> keypoints)
        {
            if (keypoints == null)
                throw new ArgumentNullException(nameof(keypoints));

            using (var keypointsVec = new VectorOfKeyPoint(keypoints))
            {
                NativeMethods.features2d_KeyPointsFilter_removeDuplicated(keypointsVec.CvPtr);
                return keypointsVec.ToArray();
            }
        }
Exemplo n.º 44
0
        /// <summary>
        /// Retain the specified number of the best keypoints (according to the response)
        /// </summary>
        /// <param name="keypoints"></param>
        /// <param name="npoints"></param>
        /// <returns></returns>
        public static KeyPoint[] RetainBest(IEnumerable<KeyPoint> keypoints, int npoints)
        {
            if (keypoints == null)
                throw new ArgumentNullException("keypoints");

            using (var keypointsVec = new VectorOfKeyPoint(keypoints))
            {
                NativeMethods.features2d_KeyPointsFilter_retainBest(
                    keypointsVec.CvPtr, npoints);
                return keypointsVec.ToArray();
            }
        }
Exemplo n.º 45
0
        /// <summary>
        /// keypoint を検出し,その SURF ディスクリプタを計算します.[useProvidedKeypoints = true]
        /// </summary>
        /// <param name="img"></param>
        /// <param name="mask"></param>
        /// <param name="keypoints"></param>
        /// <param name="descriptors"></param>
        /// <param name="useProvidedKeypoints"></param>
#else
        /// <summary>
        /// detects keypoints and computes the SURF descriptors for them. [useProvidedKeypoints = true]
        /// </summary>
        /// <param name="img"></param>
        /// <param name="mask"></param>
        /// <param name="keypoints"></param>
        /// <param name="descriptors"></param>
        /// <param name="useProvidedKeypoints"></param>
#endif
        public void Run(InputArray img, InputArray mask, out KeyPoint[] keypoints, OutputArray descriptors,
            bool useProvidedKeypoints = false)
        {
            ThrowIfDisposed();
            if (img == null)
                throw new ArgumentNullException("img");
            if (descriptors == null)
                throw new ArgumentNullException("descriptors");
            img.ThrowIfDisposed();
            descriptors.ThrowIfNotReady();

            using (VectorOfKeyPoint keypointsVec = new VectorOfKeyPoint())
            {
                NativeMethods.nonfree_SURF_run2_OutputArray(ptr, img.CvPtr, Cv2.ToPtr(mask), keypointsVec.CvPtr,
                    descriptors.CvPtr, useProvidedKeypoints ? 1 : 0);
                keypoints = keypointsVec.ToArray();
            }
        }
Exemplo n.º 46
0
        /// <summary>
        /// Converts keypoints from GPU representation to vector of KeyPoint.
        /// </summary>
        /// <param name="hKeypoints"></param>
        /// <returns></returns>
        public KeyPoint[] ConvertKeypoints(Mat hKeypoints)
        {
            if (disposed)
                throw new ObjectDisposedException(GetType().Name);
            if (hKeypoints == null)
                throw new ArgumentNullException("hKeypoints");

            KeyPoint[] result;
            using (var keypoints = new VectorOfKeyPoint())
            {
                NativeMethods.gpu_FAST_GPU_convertKeypoints(ptr, hKeypoints.CvPtr, keypoints.CvPtr);
                result = keypoints.ToArray();
            }

            GC.KeepAlive(hKeypoints);
            return result;
        }
Exemplo n.º 47
0
        /// <summary>
        /// Download keypoints from GPU to CPU memory.
        /// </summary>
        /// <param name="dKeypoints"></param>
        /// <returns></returns>
        public KeyPoint[] DownloadKeyPoints(GpuMat dKeypoints)
        {
            if (disposed)
                throw new ObjectDisposedException(GetType().Name);
            if (dKeypoints == null)
                throw new ArgumentNullException("dKeypoints");

            KeyPoint[] result;
            using (var keypoints = new VectorOfKeyPoint())
            {
                NativeMethods.gpu_ORB_GPU_downloadKeyPoints(ptr, dKeypoints.CvPtr, keypoints.CvPtr);
                result = keypoints.ToArray();
            }

            GC.KeepAlive(dKeypoints);
            return result;
        }
Exemplo n.º 48
0
        /// <summary>
        /// 
        /// </summary>
        /// <param name="img1"></param>
        /// <param name="img2"></param>
        /// <param name="H1to2"></param>
        /// <param name="keypoints1"></param>
        /// <param name="keypoints2"></param>
        /// <param name="repeatability"></param>
        /// <param name="correspCount"></param>
        public static void EvaluateFeatureDetector(
            Mat img1, Mat img2, Mat H1to2,
            ref KeyPoint[] keypoints1, ref KeyPoint[] keypoints2,
            out float repeatability, out int correspCount)
        {
            if (img1 == null) 
                throw new ArgumentNullException(nameof(img1));
            if (img2 == null) 
                throw new ArgumentNullException(nameof(img2));
            if (H1to2 == null) 
                throw new ArgumentNullException(nameof(H1to2));
            if (keypoints1 == null) 
                throw new ArgumentNullException(nameof(keypoints1));
            if (keypoints2 == null) 
                throw new ArgumentNullException(nameof(keypoints2));

            using (var keypoints1Vec = new VectorOfKeyPoint(keypoints1))
            using (var keypoints2Vec = new VectorOfKeyPoint(keypoints2))
            {
                NativeMethods.features2d_evaluateFeatureDetector(
                    img1.CvPtr, img2.CvPtr, H1to2.CvPtr,
                    keypoints1Vec.CvPtr, keypoints2Vec.CvPtr, 
                    out repeatability, out correspCount);
                keypoints1 = keypoints1Vec.ToArray();
                keypoints2 = keypoints2Vec.ToArray();
            }
        }
Exemplo n.º 49
0
 /// <summary>
 /// 
 /// </summary>
 /// <returns></returns>
 public KeyPoint[] ReadKeyPoints()
 {
     using (var valueVector = new VectorOfKeyPoint())
     {
         NativeMethods.core_FileNode_read_vectorOfKeyPoint(ptr, valueVector.CvPtr);
         return valueVector.ToArray();
     }
 }
Exemplo n.º 50
0
        /// <summary>
        /// Compute the BRISK features on an image
        /// </summary>
        /// <param name="image"></param>
        /// <param name="mask"></param>
        /// <returns></returns>
        public KeyPoint[] Run(InputArray image, InputArray mask = null)
        {
            ThrowIfDisposed();
            if (image == null)
                throw new ArgumentNullException("image");
            image.ThrowIfDisposed();

            using (VectorOfKeyPoint keyPointsVec = new VectorOfKeyPoint())
            {
                NativeMethods.features2d_BRISK_run1(ptr, image.CvPtr, Cv2.ToPtr(mask), keyPointsVec.CvPtr);
                return keyPointsVec.ToArray();
            }
        }
Exemplo n.º 51
0
        /// <summary>
        /// Finds the keypoints using FAST detector.
        /// </summary>
        /// <param name="image">Image where keypoints (corners) are detected. 
        /// Only 8-bit grayscale images are supported.</param>
        /// <param name="mask">Optional input mask that marks the regions where we should detect features.</param>
        /// <param name="keypoints">The output vector of keypoints.</param>
        public void Run(GpuMat image, GpuMat mask, out KeyPoint[] keypoints)
        {
            if (disposed)
                throw new ObjectDisposedException(GetType().Name);
            if (image == null)
                throw new ArgumentNullException("image");
            if (mask == null)
                throw new ArgumentNullException("mask");

            using (var keypointsVec = new VectorOfKeyPoint())
            {
                NativeMethods.gpu_FAST_GPU_operator2(ptr, image.CvPtr, mask.CvPtr, keypointsVec.CvPtr);
                keypoints = keypointsVec.ToArray();
            }

            GC.KeepAlive(image);
            GC.KeepAlive(mask);
        }
Exemplo n.º 52
0
        /// <summary>
        /// Compute the BRISK features and descriptors on an image
        /// </summary>
        /// <param name="image"></param>
        /// <param name="mask"></param>
        /// <param name="keyPoints"></param>
        /// <param name="descriptors"></param>
        /// <param name="useProvidedKeypoints"></param>
        public void Run(InputArray image, InputArray mask, out KeyPoint[] keyPoints,
            OutputArray descriptors, bool useProvidedKeypoints = false)
        {
            ThrowIfDisposed();
            if (image == null)
                throw new ArgumentNullException("image");
            if (descriptors == null)
                throw new ArgumentNullException("descriptors");
            image.ThrowIfDisposed();
            descriptors.ThrowIfNotReady();

            using (VectorOfKeyPoint keyPointsVec = new VectorOfKeyPoint())
            {
                NativeMethods.features2d_BRISK_run2(ptr, image.CvPtr, Cv2.ToPtr(mask), keyPointsVec.CvPtr,
                    descriptors.CvPtr, useProvidedKeypoints ? 1 : 0);
                keyPoints = keyPointsVec.ToArray();
            }
            descriptors.Fix();
        }
Exemplo n.º 53
0
        /// <summary>
        /// Detects keypoints and computes the descriptors
        /// </summary>
        /// <param name="image"></param>
        /// <param name="mask"></param>
        /// <param name="keypoints"></param>
        /// <param name="descriptors"></param>
        /// <param name="useProvidedKeypoints"></param>
        public virtual void DetectAndCompute(
            InputArray image,
            InputArray mask,
            out KeyPoint[] keypoints,
            OutputArray descriptors,
            bool useProvidedKeypoints = false)
        {
            if (disposed)
                throw new ObjectDisposedException(GetType().Name);
            if (image == null)
                throw new ArgumentNullException("image");
            if (descriptors == null)
                throw new ArgumentNullException("descriptors");
            image.ThrowIfDisposed();
            if (mask != null)
                mask.ThrowIfDisposed();

            using (var keypointsVec = new VectorOfKeyPoint())
            {
                NativeMethods.features2d_Feature2D_detectAndCompute(
                    ptr, image.CvPtr, Cv2.ToPtr(mask), keypointsVec.CvPtr, descriptors.CvPtr, useProvidedKeypoints ? 1 : 0);
                keypoints = keypointsVec.ToArray();
            }

            GC.KeepAlive(image);
            GC.KeepAlive(mask);
            descriptors.Fix();
        }
Exemplo n.º 54
0
        /// <summary>
        /// Compute the descriptors for a set of keypoints in an image.
        /// </summary>
        /// <param name="image">The image.</param>
        /// <param name="keypoints">The input keypoints. Keypoints for which a descriptor cannot be computed are removed.</param>
        /// <param name="descriptors">Copmputed descriptors. Row i is the descriptor for keypoint i.</param>param>
        public virtual void Compute(Mat image, ref KeyPoint[] keypoints, Mat descriptors)
        {
            if (image == null)
                throw new ArgumentNullException("image");
            if (descriptors == null)
                throw new ArgumentNullException("descriptors");

            using (var keypointsVec = new VectorOfKeyPoint(keypoints))
            {
                NativeMethods.features2d_DescriptorExtractor_compute1(
                    ptr, image.CvPtr, keypointsVec.CvPtr, descriptors.CvPtr);
                keypoints = keypointsVec.ToArray();
            }
        }
Exemplo n.º 55
0
        /// <summary>
        /// 
        /// </summary>
        /// <param name="gaussPyr"></param>
        /// <param name="dogPyr"></param>
        /// <returns></returns>
        public KeyPoint[] FindScaleSpaceExtrema(IEnumerable<Mat> gaussPyr, IEnumerable<Mat> dogPyr)
        {
            ThrowIfDisposed();
            if (gaussPyr == null)
                throw new ArgumentNullException("gaussPyr");
            if (dogPyr == null)
                throw new ArgumentNullException("dogPyr");

            IntPtr[] gaussPyrPtrs = EnumerableEx.SelectPtrs(gaussPyr);
            IntPtr[] dogPyrPtrs = EnumerableEx.SelectPtrs(dogPyr);

            using (VectorOfKeyPoint keyPointsVec = new VectorOfKeyPoint())
            {
                NativeMethods.nonfree_SIFT_findScaleSpaceExtrema(ptr, gaussPyrPtrs, gaussPyrPtrs.Length,
                    dogPyrPtrs, dogPyrPtrs.Length, keyPointsVec.CvPtr);
                return keyPointsVec.ToArray();
            }
        }
Exemplo n.º 56
0
        /// <summary>
        /// StarDetectorアルゴリズムによりキーポイントを取得する
        /// </summary>
        /// <param name="image">8ビット グレースケールの入力画像</param>
        /// <returns></returns>
#else
        /// <summary>
        /// Retrieves keypoints using the StarDetector algorithm.
        /// </summary>
        /// <param name="image">The input 8-bit grayscale image</param>
        /// <returns></returns>
#endif
        public KeyPoint[] Run(Mat image)
        {
            if (image == null)
                throw new ArgumentNullException("image");
            image.ThrowIfDisposed();

            IntPtr keypoints;
            NativeMethods.features2d_StarDetector_detect(ptr, image.CvPtr, out keypoints);

            using (VectorOfKeyPoint keypointsVec = new VectorOfKeyPoint(keypoints))
            {
                return keypointsVec.ToArray();
            }
        }
Exemplo n.º 57
0
        /// <summary>
        /// Detect keypoints in an image.
        /// </summary>
        /// <param name="image">The image.</param>
        /// <param name="mask">Mask specifying where to look for keypoints (optional). 
        /// Must be a char matrix with non-zero values in the region of interest.</param>
        /// <returns>The detected keypoints.</returns>
        public KeyPoint[] Detect(InputArray image, Mat mask = null)
        {
            if (image == null)
                throw new ArgumentNullException("image");
            if (disposed)
                throw new ObjectDisposedException(GetType().Name);

            image.ThrowIfDisposed();
            try
            {
                using (var keypoints = new VectorOfKeyPoint())
                {
                    NativeMethods.features2d_Feature2D_detect_InputArray(ptr, image.CvPtr, keypoints.CvPtr,
                        Cv2.ToPtr(mask));
                    return keypoints.ToArray();
                }
            }
            finally
            {
                GC.KeepAlive(image);
                GC.KeepAlive(mask);
            }
        }