Exemple #1
0
 /// <summary>
 /// detects corners using FAST algorithm by E. Rosten
 /// </summary>
 /// <param name="image"></param>
 /// <param name="keypoints"></param>
 /// <param name="threshold"></param>
 /// <param name="nonmaxSupression"></param>
 /// <param name="type"></param>
 public static void FASTX(InputArray image, out KeyPoint[] keypoints, int threshold, bool nonmaxSupression, int type)
 {
     if (image == null)
         throw new ArgumentNullException("image");
     image.ThrowIfDisposed();
     using (var kp = new VectorOfKeyPoint())
     {
         NativeMethods.features2d_FASTX(image.CvPtr, kp.CvPtr, threshold, nonmaxSupression ? 1 : 0, type);
         keypoints = kp.ToArray();
     }
 }
        /// <summary>
        /// Remove keypoints within borderPixels of an image edge.
        /// </summary>
        /// <param name="keypoints"></param>
        /// <param name="imageSize"></param>
        /// <param name="borderSize"></param>
        /// <returns></returns>
        public static KeyPoint[] RunByImageBorder(IEnumerable<KeyPoint> keypoints, Size imageSize, int borderSize)
        {
            if (keypoints == null) 
                throw new ArgumentNullException("keypoints");

            using (var keypointsVec = new VectorOfKeyPoint(keypoints))
            {
                NativeMethods.features2d_KeyPointsFilter_runByImageBorder(
                    keypointsVec.CvPtr, imageSize, borderSize);
                return keypointsVec.ToArray();
            }
        }
Exemple #3
0
        /// <summary>
        /// Detects corners using the FAST algorithm
        /// </summary>
        /// <param name="image">grayscale image where keypoints (corners) are detected.</param>
        /// <param name="threshold">threshold on difference between intensity of the central pixel 
        /// and pixels of a circle around this pixel.</param>
        /// <param name="nonmaxSupression">if true, non-maximum suppression is applied to 
        /// detected corners (keypoints).</param>
        /// <param name="type">one of the three neighborhoods as defined in the paper</param>
        /// <returns>keypoints detected on the image.</returns>
        public static KeyPoint[] FAST(InputArray image, int threshold, bool nonmaxSupression, FASTType type)
        {
            if (image == null)
                throw new ArgumentNullException(nameof(image));
            image.ThrowIfDisposed();

            using (var kp = new VectorOfKeyPoint())
            {
                NativeMethods.features2d_FAST2(image.CvPtr, kp.CvPtr, threshold, nonmaxSupression ? 1 : 0, (int)type);
                GC.KeepAlive(image);
                return kp.ToArray();
            }
        }
        /// <summary>
        /// Remove keypoints of sizes out of range.
        /// </summary>
        /// <param name="keypoints"></param>
        /// <param name="minSize"></param>
        /// <param name="maxSize"></param>
        /// <returns></returns>
        public static KeyPoint[] RunByKeypointSize(IEnumerable<KeyPoint> keypoints, float minSize,
            float maxSize = Single.MaxValue)
        {
            if (keypoints == null)
                throw new ArgumentNullException("keypoints");

            using (var keypointsVec = new VectorOfKeyPoint(keypoints))
            {
                NativeMethods.features2d_KeyPointsFilter_runByKeypointSize(
                    keypointsVec.CvPtr, minSize, maxSize);
                return keypointsVec.ToArray();
            }
        }
 /// <summary>
 /// Detects corners using the AGAST algorithm
 /// </summary>
 /// <param name="image">grayscale image where keypoints (corners) are detected.</param>
 /// <param name="threshold">threshold on difference between intensity of the central pixel 
 /// and pixels of a circle around this pixel.</param>
 /// <param name="nonmaxSuppression">if true, non-maximum suppression is applied to 
 /// detected corners (keypoints).</param>
 /// <param name="type">one of the four neighborhoods as defined in the paper</param>
 /// <returns>keypoints detected on the image.</returns>
 public static KeyPoint[] AGAST(InputArray image, int threshold, bool nonmaxSuppression, AGASTType type)
 {
     if (image == null)
         throw new ArgumentNullException("image");
     image.ThrowIfDisposed();
     
     using (var vector = new VectorOfKeyPoint())
     {
         NativeMethods.features2d_AGAST(image.CvPtr, vector.CvPtr, threshold, nonmaxSuppression ? 1 : 0,
             (int) type);
         GC.KeepAlive(image);
         return vector.ToArray();
     }
 }
Exemple #6
0
        public static UMat Run(Mat img)
        {
            var modelKeyPoints = new VectorOfKeyPoint();
            var result         = new UMat();

            using (UMat uModelImage = img.ToUMat(AccessType.Read))
            {
                SIFT siftfCPU         = new SIFT();
                UMat modelDescriptors = new UMat();
                siftfCPU.DetectRaw(uModelImage, modelKeyPoints);
                Features2DToolbox.DrawKeypoints(img, modelKeyPoints, result, new Bgr(Color.Red), Features2DToolbox.KeypointDrawType.NotDrawSinglePoints);
            }

            return(result);
        }
Exemple #7
0
        public static Bitmap VisualizeFeatures(Image <Bgr, byte> img, VectorOfKeyPoint features, Color color)
        {
            Bitmap   bmp = new Bitmap(img.Width, img.Height);
            Graphics g   = Graphics.FromImage(bmp);

            g.DrawImage(img.ToBitmap(), 0, 0, img.Width, img.Height);
            SolidBrush brush = new SolidBrush(color);

            foreach (MKeyPoint kp in features.ToArray())
            {
                g.FillEllipse(brush, kp.Point.X - 5, kp.Point.Y - 5, 11, 11);
            }

            return(bmp);
        }
Exemple #8
0
        private void button_akaz_Click(object sender, EventArgs e)
        {
            Mat scr    = imagemat;
            Mat result = imagemat.Clone();
            VectorOfKeyPoint vector_keypoints = new VectorOfKeyPoint();// 创 建 VectorOfKeyPoint 类型,存储关键点集合。

            #region akaz
            //AKAZE _akaze = new AKAZE();//以默认参数创建 AKAZE 类。
            // _akaze.DetectRaw(scr, vector_keypoints,null);
            #endregion
            # region Brisk
            // Brisk _brisk = new Brisk(30, 1, 1f); ;//以默认参数创建 BriskE 类。
            // _brisk.DetectRaw(scr, vector_keypoints, null);//检测关键点。
            #endregion
            # region ORBDetector scalefactor影响因子 调大调小得出不同的检测数量
        /*
         * /// <summary>
         * /// Compute the descriptor given the bgr image and the point location, using oppponent color (CGIV 2008 "Color Descriptors for Object Category Recognition").
         * /// </summary>
         * /// <param name="image">The image where the descriptor will be computed from</param>
         * /// <param name="keyPoints">The keypoint where the descriptor will be computed from</param>
         * /// <returns>The descriptors founded on the keypoint location</returns>
         * public Matrix<float> ComputeDescriptorsRaw(Image<Bgr, Byte> image, VectorOfKeyPoint keyPoints)
         * {
         * int count = keyPoints.Size;
         * if (count == 0) return null;
         * Matrix<float> descriptors = new Matrix<float>(count, DescriptorSize * 3, 1);
         * CvSIFTDetectorComputeDescriptorsBGR(_ptr, image, keyPoints, descriptors);
         * return descriptors;
         * }*/

        /// <summary>
        /// Compute the descriptor given the image and the point location
        /// </summary>
        /// <param name="image">The image where the descriptor will be computed from</param>
        /// <param name="mask">The optional mask, can be null if not needed</param>
        /// <param name="keyPoints">The keypoint where the descriptor will be computed from</param>
        /// <returns>The descriptors founded on the keypoint location</returns>
        public ImageFeature[] ComputeDescriptors(Image <Gray, Byte> image, Image <Gray, byte> mask, MKeyPoint[] keyPoints)
        {
            if (keyPoints.Length == 0)
            {
                return(new ImageFeature[0]);
            }
            using (VectorOfKeyPoint kpts = new VectorOfKeyPoint())
            {
                kpts.Push(keyPoints);
                using (Matrix <float> descriptor = ComputeDescriptorsRaw(image, mask, kpts))
                {
                    return(Features2DTracker.ConvertToImageFeature(kpts, descriptor));
                }
            }
        }
 /// <summary>
 /// LOGOS (Local geometric support for high-outlier spatial verification) feature matching strategy
 /// </summary>
 /// <param name="keypoints1">Input keypoints of image1.</param>
 /// <param name="keypoints2">Input keypoints of image2.</param>
 /// <param name="nn1">Index to the closest BoW centroid for each descriptors of image1.</param>
 /// <param name="nn2">Index to the closest BoW centroid for each descriptors of image2.</param>
 /// <param name="matches1to2">Matches returned by the LOGOS matching strategy.</param>
 public static void MatchLOGOS(
     VectorOfKeyPoint keypoints1,
     VectorOfKeyPoint keypoints2,
     VectorOfInt nn1,
     VectorOfInt nn2,
     VectorOfDMatch matches1to2)
 {
     cveMatchLOGOS(
         keypoints1,
         keypoints2,
         nn1,
         nn2,
         matches1to2
         );
 }
Exemple #11
0
        private static SURFData ExecuteSurfDetection(Mat scene)
        {
            using (SURF surfDetector = new SURF(300, 4, 2, false, true))
            {
                Mat sceneDescriptors            = new Mat();
                VectorOfKeyPoint sceneKeyPoints = new VectorOfKeyPoint();
                surfDetector.DetectAndCompute(scene, null, sceneKeyPoints, sceneDescriptors, false);

                return(new SURFData
                {
                    KeyPoints = sceneKeyPoints,
                    Descriptors = sceneDescriptors
                });
            }
        }
Exemple #12
0
        /// <summary>
        /// Recover the homography matrix using RANDSAC. If the matrix cannot be recovered, null is returned.
        /// </summary>
        /// <param name="model">The model keypoints</param>
        /// <param name="observed">The observed keypoints</param>
        /// <param name="matchIndices">The match indices</param>
        /// <param name="ransacReprojThreshold">
        /// The maximum allowed reprojection error to treat a point pair as an inlier.
        /// If srcPoints and dstPoints are measured in pixels, it usually makes sense to set this parameter somewhere in the range 1 to 10.
        /// </param>
        /// <param name="mask">
        /// The mask matrix of which the value might be modified by the function.
        /// As input, if the value is 0, the corresponding match will be ignored when computing the homography matrix.
        /// If the value is 1 and RANSAC determine the match is an outlier, the value will be set to 0.
        /// </param>
        /// <returns>The homography matrix, if it cannot be found, null is returned</returns>
        public static HomographyMatrix GetHomographyMatrixFromMatchedFeatures(VectorOfKeyPoint model, VectorOfKeyPoint observed, Matrix <int> matchIndices, Matrix <Byte> mask, double ransacReprojThreshold)
        {
            HomographyMatrix homography = new HomographyMatrix();
            bool             found      = CvInvoke.getHomographyMatrixFromMatchedFeatures(model, observed, matchIndices, mask, ransacReprojThreshold, homography);

            if (found)
            {
                return(homography);
            }
            else
            {
                homography.Dispose();
                return(null);
            }
        }
        public static void FindMatch(string pageFile, string templateFile)
        {
            Image <Rgb, byte> page     = getPreprocessedImage(pageFile);
            Image <Rgb, byte> template = getPreprocessedImage(templateFile);

            var detector = new ORBDetector();
            VectorOfKeyPoint templateKeyPoints = new VectorOfKeyPoint();
            Mat templateDescriptors            = new Mat();

            detector.DetectAndCompute(template, null, templateKeyPoints, templateDescriptors, false);

            VectorOfKeyPoint pageKeyPoints = new VectorOfKeyPoint();
            Mat pageDescriptors            = new Mat();

            detector.DetectAndCompute(page, null, pageKeyPoints, pageDescriptors, false);
            using (var matcher = new BFMatcher(DistanceType.L1))
            {
                matcher.Add(templateDescriptors);
                VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch();

                //VectorOfDMatch matches2 = new VectorOfDMatch();
                //matcher.Match(pageDescriptors, matches2);


                matcher.KnnMatch(pageDescriptors, matches, 2, null);

                Mat mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
                mask.SetTo(new MCvScalar(255));
                Features2DToolbox.VoteForUniqueness(matches, 0.8, mask);
                Mat homography   = new Mat();
                int nonZeroCount = CvInvoke.CountNonZero(mask);
                if (nonZeroCount >= 4)
                {
                    nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(templateKeyPoints, pageKeyPoints, matches, mask, 1.5, 20);
                    if (nonZeroCount >= 4)
                    {
                        homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(templateKeyPoints, pageKeyPoints, matches, mask, 2);
                    }
                }

                Mat result = new Mat();
                Features2DToolbox.DrawMatches(template, templateKeyPoints, page, pageKeyPoints, matches, result, new MCvScalar(0, 255, 0), new MCvScalar(255, 0, 0), mask, Features2DToolbox.KeypointDrawType.NotDrawSinglePoints);

                //Features2DToolbox.DrawMatches(template, templateKeyPoints, page, pageKeyPoints, matches2, result, new MCvScalar(0, 255, 0), new MCvScalar(255, 0, 0), null, Features2DToolbox.KeypointDrawType.NotDrawSinglePoints);

                MainForm.This.PageBox.Image = result.ToBitmap();
            }
        }
Exemple #14
0
        public void FindMatch(Image <Gray, byte> modelImage, Image <Gray, byte> observedImage, double hessianThresh, int k,
                              double uniquenessThreshold, VectorOfVectorOfDMatch matches, out VectorOfKeyPoint modelKeyPoints,
                              out VectorOfKeyPoint observedKeyPoints, out Mat mask, out Mat homography)
        {
            homography        = null;
            modelKeyPoints    = new VectorOfKeyPoint();
            observedKeyPoints = new VectorOfKeyPoint();

            CudaSURFDetector surfCuda = new CudaSURFDetector((float)hessianThresh);

            using (GpuMat gpuModelImage = new GpuMat(modelImage))
                //extract features from the object image
                using (GpuMat gpuModelKeyPoints = surfCuda.DetectKeyPointsRaw(gpuModelImage, null))
                    using (
                        GpuMat gpuModelDescriptors = surfCuda.ComputeDescriptorsRaw(gpuModelImage, null, gpuModelKeyPoints))
                        using (CudaBFMatcher matcher = new CudaBFMatcher(DistanceType.L2))
                        {
                            surfCuda.DownloadKeypoints(gpuModelKeyPoints, modelKeyPoints);

                            // extract features from the observed image
                            using (GpuMat gpuObservedImage = new GpuMat(observedImage))
                                using (GpuMat gpuObservedKeyPoints = surfCuda.DetectKeyPointsRaw(gpuObservedImage, null))
                                    using (
                                        GpuMat gpuObservedDescriptors = surfCuda.ComputeDescriptorsRaw(gpuObservedImage, null,
                                                                                                       gpuObservedKeyPoints))
                                    {
                                        matcher.KnnMatch(gpuObservedDescriptors, gpuModelDescriptors, matches, k);

                                        surfCuda.DownloadKeypoints(gpuObservedKeyPoints, observedKeyPoints);

                                        mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
                                        mask.SetTo(new MCvScalar(255));
                                        Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);

                                        int nonZeroCount = CvInvoke.CountNonZero(mask);
                                        if (nonZeroCount >= 4)
                                        {
                                            nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints,
                                                                                                       matches, mask, 1.5, 20);
                                            if (nonZeroCount >= 4)
                                            {
                                                homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints,
                                                                                                                      observedKeyPoints, matches, mask, 2);
                                            }
                                        }
                                    }
                        }
        }
Exemple #15
0
        public static void FindMatchWM(Mat modelImage, Mat observedImage, out long matchTime, out VectorOfKeyPoint modelKeyPoints, out VectorOfKeyPoint observedKeyPoints, VectorOfVectorOfDMatch matches, out Mat mask, out Mat homography, Feature2D computer, Feature2D detector)
        {
            Stopwatch watch;

            modelKeyPoints    = new VectorOfKeyPoint(); // точки на модели
            observedKeyPoints = new VectorOfKeyPoint(); // точки на большем изображении
            homography        = null;
            int k = 2;


            using (Mat uModelImage = modelImage.Clone())
                using (Mat uObservedImage = observedImage.Clone())
                {
                    //получаем дескрипторы из первого изображения
                    Mat modelDescriptors = new Mat();
                    DetectAndCompute(uModelImage, out modelKeyPoints, out modelDescriptors, detector, computer);

                    watch = Stopwatch.StartNew();

                    // ... из второго изображения
                    Mat observedDescriptors = new Mat();
                    DetectAndCompute(uObservedImage, out observedKeyPoints, out observedDescriptors, detector, computer);


                    BFMatcher matcher = new BFMatcher(DistanceType.L2); // "сравниватель" дескрипторов на 2-х изображениях
                    matcher.Add(modelDescriptors);

                    matcher.KnnMatch(observedDescriptors, matches, k, null); // сравнение
                    mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
                    mask.SetTo(new MCvScalar(255));
                    Features2DToolbox.VoteForUniqueness(matches, 0.8, mask); // построениии маски (см ниже)

                    int nonZeroCount = CvInvoke.CountNonZero(mask);
                    if (nonZeroCount >= 4)
                    {
                        nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints,
                                                                                   matches, mask, 1.5, 20);
                        if (nonZeroCount >= 4)
                        {
                            homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, // получение предположительной зоны, куда должна встать модель
                                                                                                  observedKeyPoints, matches, mask, 2);
                        }
                    }

                    watch.Stop();
                }
            matchTime = watch.ElapsedMilliseconds;
        }
        public KeyPoints SIFTDescriptor()
        {
            KeyPoints result = new KeyPoints();
            //SiFT Descriptor
            SIFT             siftAlgo           = null;
            VectorOfKeyPoint modelKeyPointsSift = null;

            try
            {
                siftAlgo           = new SIFT();
                modelKeyPointsSift = new VectorOfKeyPoint();

                MKeyPoint[] siftPoints = siftAlgo.Detect(preProcessedImageInGrayScale);
                modelKeyPointsSift.Push(siftPoints);
                UMat siftDescriptors = new UMat();
                siftAlgo.DetectAndCompute(preProcessedImageInGrayScale, null, modelKeyPointsSift, siftDescriptors, true);
                Image <Gray, Byte> outputImage = new Image <Gray, byte>(
                    preProcessedImageInGrayScale.Width,
                    preProcessedImageInGrayScale.Height);
                Features2DToolbox.DrawKeypoints(
                    preProcessedImageInGrayScale,
                    modelKeyPointsSift,
                    outputImage,
                    new Bgr(255, 255, 255),
                    Features2DToolbox.KeypointDrawType.Default);

                string folderName = @"C:\Projects\LeafService\SiftImage";
                string pathString = System.IO.Path.Combine(folderName, "Sift" + DateTime.UtcNow.Ticks);
                System.IO.Directory.CreateDirectory(pathString);
                if (Directory.Exists(pathString))
                {
                    string newFilePath = Path.Combine(pathString, "SiftImage" + DateTime.UtcNow.Ticks);
                    outputImage.Save(folderName + ".jpg");
                    outputImage.Save(@"C:\Projects\LeafService\SIFTgray.jpg");
                }


                //outputImage.Save("sift.jpg");
                result.Descriptor = siftDescriptors;
                result.Points     = siftPoints;
                return(result);
            }
            finally
            {
                siftAlgo.Dispose();
                modelKeyPointsSift.Dispose();
            }
        }
        public static void FindMatch(Mat modelImage, Mat observedImage, out long matchTime, out VectorOfKeyPoint modelKeyPoints, out VectorOfKeyPoint observedKeyPoints, VectorOfVectorOfDMatch matches, out Mat mask, out Mat homography)
        {
            int    k = 2;
            double uniquenessThreshold = 0.8;

            Stopwatch watch;

            homography = null;

            modelKeyPoints    = new VectorOfKeyPoint();
            observedKeyPoints = new VectorOfKeyPoint();

            using (UMat uModelImage = modelImage.ToUMat(AccessType.Read))
                using (UMat uObservedImage = observedImage.ToUMat(AccessType.Read))
                {
                    SIFT surfCPU = new SIFT();

                    //extract features from the object image
                    UMat modelDescriptors = new UMat();
                    surfCPU.DetectAndCompute(uModelImage, null, modelKeyPoints, modelDescriptors, false);

                    watch = Stopwatch.StartNew();

                    // extract features from the observed image
                    UMat observedDescriptors = new UMat();
                    surfCPU.DetectAndCompute(uObservedImage, null, observedKeyPoints, observedDescriptors, false);
                    BFMatcher matcher = new BFMatcher(DistanceType.L2);
                    matcher.Add(modelDescriptors);
                    matcher.KnnMatch(observedDescriptors, matches, k, null);
                    mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
                    mask.SetTo(new MCvScalar(255));
                    Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);

                    int nonZeroCount = CvInvoke.CountNonZero(mask);
                    if (nonZeroCount >= 4)
                    {
                        nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints,
                                                                                   matches, mask, 1.5, 20);
                        if (nonZeroCount >= 4)
                        {
                            homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints,
                                                                                                  observedKeyPoints, matches, mask, 2);
                        }
                    }
                    watch.Stop();
                }
            matchTime = watch.ElapsedMilliseconds;
        }
        public bool ConfigureImageTrainROI(VectorOfKeyPoint keypointsImageTrain, Mat roiTrain)
        {
            if (keypointsImageTrain == null)
            {
                return(false);
            }

            VectorOfVectorOfPoint contoursImageTrainROI = new VectorOfVectorOfPoint();
            VectorOfPointF        hierarchyContours     = new VectorOfPointF();

            //Extrai contornos da imagem definida como ROI (Mascara) e a hierarquia destes contornos
            CvInvoke.FindContours(roiTrain.Clone(), contoursImageTrainROI, hierarchyContours, Emgu.CV.CvEnum.RetrType.External, Emgu.CV.CvEnum.ChainApproxMethod.ChainApproxSimple);

            if (contoursImageTrainROI.Size == 0)
            {
                return(false);
            }

            int numberKeypointsImageTrain = keypointsImageTrain.Size;

            var pop = new ParallelOptions {
                MaxDegreeOfParallelism = 5
            };

            for (int indexKeypointImageTrain = 0; indexKeypointImageTrain < numberKeypointsImageTrain; ++indexKeypointImageTrain)
            {
                Parallel.For(0, contoursImageTrainROI.Size, pop, i =>
                {
                    PointF pointXY = keypointsImageTrain[indexKeypointImageTrain].Point;
                    if (CvInvoke.PointPolygonTest(contoursImageTrainROI, pointXY, false) >= 0)
                    {
                        _indexKeypointsImageTrainAssociatedROI[_LODIndex][indexKeypointImageTrain] = i;
                        return;
                    }
                });
            }

            _numberKeypointsImageTrainInContour[_LODIndex].Clear();
            _numberKeypointsImageTrainInContour[_LODIndex] = new List <int>(contoursImageTrainROI.Size);

            for (int i = 0; i < _indexKeypointsImageTrainAssociatedROI[_LODIndex].Count; ++i)
            {
                var indexContour = _indexKeypointsImageTrainAssociatedROI[_LODIndex][i];
                ++_numberKeypointsImageTrainInContour[_LODIndex][indexContour];
            }

            return(true);
        }
        public SimpleAdHocTracker(CameraCalibrationInfo calibrationInfo)
        {
            _calibrationInfo = calibrationInfo;

            _detector = new ORBDetector();

            _prevGray = new Mat();
            _currGray = new Mat();

            _raux = new Mat();
            _taux = new Mat();

            _bootstrapKp       = new VectorOfKeyPoint();
            _trackedFeatures   = new VectorOfKeyPoint();
            _trackedFeatures3D = new VectorOfPoint3D32F();
        }
 /// <summary>
 /// Compute the descriptor given the image and the point location
 /// </summary>
 /// <param name="extractor">The descriptor extractor</param>
 /// <param name="image">The image where the descriptor will be computed from</param>
 /// <param name="mask">The optional mask, can be null if not needed</param>
 /// <param name="keyPoints">The keypoint where the descriptor will be computed from</param>
 /// <returns>The descriptors founded on the keypoint location</returns>
 public static ImageFeature <TDepth>[] ComputeDescriptors <TDepth>(this IDescriptorExtractor <TDepth> extractor, Image <Gray, Byte> image, Image <Gray, byte> mask, MKeyPoint[] keyPoints)
     where TDepth : struct
 {
     if (keyPoints.Length == 0)
     {
         return(new ImageFeature <TDepth> [0]);
     }
     using (VectorOfKeyPoint kpts = new VectorOfKeyPoint())
     {
         kpts.Push(keyPoints);
         using (Matrix <TDepth> descriptor = extractor.ComputeDescriptorsRaw(image, mask, kpts))
         {
             return(ImageFeature <TDepth> .ConvertFromRaw(kpts, descriptor));
         }
     }
 }
        /// <summary>
        /// Remove keypoints from some image by mask for pixels of this image.
        /// </summary>
        /// <param name="keypoints"></param>
        /// <param name="mask"></param>
        /// <returns></returns>
        public static KeyPoint[] RunByPixelsMask(IEnumerable<KeyPoint> keypoints, Mat mask)
        {
            if (keypoints == null)
                throw new ArgumentNullException("keypoints");
            if (mask == null) 
                throw new ArgumentNullException("mask");
            mask.ThrowIfDisposed();

            using (var keypointsVec = new VectorOfKeyPoint(keypoints))
            {
                NativeMethods.features2d_KeyPointsFilter_runByPixelsMask(
                    keypointsVec.CvPtr, mask.CvPtr);
                GC.KeepAlive(mask);
                return keypointsVec.ToArray();
            }
        }
Exemple #22
0
        private static VectorOfKeyPoint GetBestKeypointsPercent(VectorOfKeyPoint keyPoints, float percent)
        {
            if (percent < 0 || percent > 1)
            {
                throw new ArgumentOutOfRangeException(nameof(percent));
            }

            if (percent == 1)
            {
                return(keyPoints);
            }

            int count = (int)Math.Round(keyPoints.Size * percent);

            return(GetBestKeypointsCount(keyPoints, count));
        }
Exemple #23
0
        public void TestCudaSURFKeypointDetection()
        {
            if (CudaInvoke.HasCuda)
            {
                Image <Gray, byte> image = new Image <Gray, byte>(200, 100);
                image.SetRandUniform(new MCvScalar(), new MCvScalar(255));
                GpuMat gpuMat = new GpuMat(image);

                EmguAssert.IsTrue(gpuMat.ToMat().Equals(image.Mat));

                CudaSURF         cudaSurf = new CudaSURF(100.0f, 2, 4, false, 0.01f, false);
                GpuMat           cudaKpts = cudaSurf.DetectKeyPointsRaw(gpuMat, null);
                VectorOfKeyPoint kpts     = new VectorOfKeyPoint();
                cudaSurf.DownloadKeypoints(cudaKpts, kpts);
            }
        }
Exemple #24
0
        /// <summary>
        /// 繪製特徵點到圖像上
        /// </summary>
        /// <param name="surf">特徵資料</param>
        /// <param name="drawImg">要繪製的圖像</param>
        /// <returns>回傳已繪製特徵點的圖像</returns>
        public static Image <Bgr, Byte> DrawSURFFeature(SURFFeatureData surf, Image <Bgr, Byte> drawImg)
        {
            VectorOfKeyPoint keyPoints  = surf.GetKeyPoints();
            Bitmap           imgForDraw = drawImg.ToBitmap();

            //使用Graphics繪製
            using (Graphics g = Graphics.FromImage(imgForDraw))
            {
                for (int i = 0; i < keyPoints.Size; i++)
                {
                    g.DrawEllipse(new Pen(new SolidBrush(Color.White), 2), (int)keyPoints[i].Point.X, (int)keyPoints[i].Point.Y, 15, 15);
                }
                g.Dispose();
            }
            return(new Image <Bgr, Byte>(imgForDraw).Resize(320, 240, INTER.CV_INTER_LINEAR));
        }
Exemple #25
0
        public static Mat ClassifyAndShowResult(Mat modelImage, Mat observedImage, double uniquenessThreshold, int k, out long score)
        {
            VectorOfKeyPoint       modelKeyPoints    = null;
            VectorOfKeyPoint       observedKeyPoints = null;
            VectorOfVectorOfDMatch matches           = null;
            Mat homography = null;

            score = 0;
            var mask   = ClassifyForDrawing(modelImage, observedImage, uniquenessThreshold, k, out modelKeyPoints, out observedKeyPoints, out matches, out homography, out score);
            var result = new Mat();

            Features2DToolbox.DrawMatches(modelImage, modelKeyPoints, observedImage, observedKeyPoints,
                                          matches, result, new MCvScalar(0, 0, 0), new MCvScalar(0, 0, 0), mask);
            Draw(homography, result, modelImage);
            return(result);
        }
 /// <summary>
 /// Draw the matched keypoints between the model image and the observered image.
 /// </summary>
 /// <param name="modelImage">The model image</param>
 /// <param name="modelKeypoints">The keypoints in the model image</param>
 /// <param name="observerdImage">The observed image</param>
 /// <param name="observedKeyPoints">The keypoints in the observed image</param>
 /// <param name="matchColor">The color for the match correspondence lines</param>
 /// <param name="singlePointColor">The color for highlighting the keypoints</param>
 /// <param name="mask">The mask for the matches. Use null for all matches.</param>
 /// <param name="flags">The drawing type</param>
 /// <param name="result">The image where model and observed image is displayed side by side. Matches are drawn as indicated by the flag</param>
 /// <param name="matches">Matches. Each matches[i] is k or less matches for the same query descriptor.</param>
 public static void DrawMatches(
     IInputArray modelImage, VectorOfKeyPoint modelKeypoints,
     IInputArray observerdImage, VectorOfKeyPoint observedKeyPoints,
     VectorOfVectorOfDMatch matches,
     IInputOutputArray result,
     MCvScalar matchColor, MCvScalar singlePointColor,
     IInputArray mask       = null,
     KeypointDrawType flags = KeypointDrawType.Default)
 {
     using (InputArray iaModelImage = modelImage.GetInputArray())
         using (InputArray iaObserverdImage = observerdImage.GetInputArray())
             using (InputOutputArray ioaResult = result.GetInputOutputArray())
                 using (InputArray iaMask = mask == null ? InputArray.GetEmpty() : mask.GetInputArray())
                     Features2DInvoke.drawMatchedFeatures(iaObserverdImage, observedKeyPoints, iaModelImage,
                                                          modelKeypoints, matches, ioaResult, ref matchColor, ref singlePointColor, iaMask, flags);
 }
Exemple #27
0
        /// <summary>
        /// Detects the feature set and caches it
        /// </summary>
        /// <param name="featureDetector"></param>
        /// <param name="image"></param>
        /// <returns></returns>
        public static FeatureSet Detect(KAZE featureDetector, Image <Gray, byte> image)
        {
            using (UMat uModelImage = image.ToUMat())
            {
                Mat descriptors = new Mat();
                var keyPoints   = new VectorOfKeyPoint();
                featureDetector.DetectAndCompute(uModelImage, null, keyPoints, descriptors, false);

                return(new FeatureSet()
                {
                    image = image,
                    descriptors = descriptors,
                    keyPoints = keyPoints,
                });
            }
        }
Exemple #28
0
        public static void KeepVectorsByStatus(ref VectorOfKeyPoint f1, ref VectorOfPoint3D32F f2, VectorOfByte status)
        {
            var newF1 = new VectorOfKeyPoint();
            var newF2 = new VectorOfPoint3D32F();

            for (int i = 0; i < status.Size; i++)
            {
                if (status[i] > 0)
                {
                    newF1.Push(new[] { f1[i] });
                    newF2.Push(new[] { f2[i] });
                }
            }

            f1 = newF1;
            f2 = newF2;
        }
Exemple #29
0
        public Matrix <float> ComputeSingleDescriptors(Image <Gray, byte> image) // old return Matrix<float>
        {
            Mat descsTmp = new Mat();

            #region depreciated
            //VectorOfKeyPoint keyPoints = detector.DetectKeyPointsRaw(img, null);
            //descs = detector.ComputeDescriptorsRaw(img, null, keyPoints);
            #endregion

            VectorOfKeyPoint keyPoints = new VectorOfKeyPoint();
            detector.DetectAndCompute(image, null, keyPoints, descsTmp, false);

            Matrix <float> descs = new Matrix <float>(descsTmp.Rows, descsTmp.Cols);
            descsTmp.CopyTo(descs);

            return(descs);
        }
        /// <summary>
        /// Convert the image features to keypoint vector and descriptor matrix
        /// </summary>
        private static void ConvertFromImageFeature(ImageFeature[] features, out VectorOfKeyPoint keyPoints, out Matrix <float> descriptors)
        {
            keyPoints = new VectorOfKeyPoint();
            keyPoints.Push(Array.ConvertAll <ImageFeature, MKeyPoint>(features, delegate(ImageFeature feature) { return(feature.KeyPoint); }));
            descriptors = new Matrix <float>(features.Length, features[0].Descriptor.Length);

            int descriptorLength = features[0].Descriptor.Length;

            float[,] data = descriptors.Data;
            for (int i = 0; i < features.Length; i++)
            {
                for (int j = 0; j < descriptorLength; j++)
                {
                    data[i, j] = features[i].Descriptor[j];
                }
            }
        }
        public UMat SURFDescriptor()
        {
            double hessianThresh = 800;
            // public SURF(double hessianThresh, int nOctaves = 4, int nOctaveLayers = 2, bool extended = true, bool upright = false)
            SURF             surfAlgo       = new SURF(hessianThresh, 4, 2, true, false);
            VectorOfKeyPoint modelKeyPoints = new VectorOfKeyPoint();

            MKeyPoint[] mKeyPoints = surfAlgo.Detect(preProcessedImageInGrayScale);
            modelKeyPoints.Push(mKeyPoints);
            VectorOfKeyPoint observedKeyPoints = new VectorOfKeyPoint();
            UMat             SurfDescriptors   = new UMat();

            surfAlgo.DetectAndCompute(preProcessedImageInGrayScale, null, modelKeyPoints, SurfDescriptors, true);
            //image2.Source = BitmapSourceConvert.ToBitmapSource(modelDescriptors);
            SurfDescriptors.Save("SURFDetection.jpg");
            return(SurfDescriptors);
        }
        /// <summary>
        /// Recover the homography matrix using RANDSAC. If the matrix cannot be recovered, null is returned.
        /// </summary>
        /// <param name="model">The model keypoints</param>
        /// <param name="observed">The observed keypoints</param>
        /// <param name="ransacReprojThreshold">
        /// The maximum allowed reprojection error to treat a point pair as an inlier.
        /// If srcPoints and dstPoints are measured in pixels, it usually makes sense to set this parameter somewhere in the range 1 to 10.
        /// </param>
        /// <param name="mask">
        /// The mask matrix of which the value might be modified by the function.
        /// As input, if the value is 0, the corresponding match will be ignored when computing the homography matrix.
        /// If the value is 1 and RANSAC determine the match is an outlier, the value will be set to 0.
        /// </param>
        /// <returns>The homography matrix, if it cannot be found, null is returned</returns>
        /// <param name="matches">Matches. Each matches[i] is k or less matches for the same query descriptor.</param>
        public static Mat GetHomographyMatrixFromMatchedFeatures(VectorOfKeyPoint model,
                                                                 VectorOfKeyPoint observed, VectorOfVectorOfDMatch matches, Mat mask, double ransacReprojThreshold)
        {
            Mat  homography = new Mat();
            bool found      = Features2DInvoke.getHomographyMatrixFromMatchedFeatures(model, observed, matches, mask,
                                                                                      ransacReprojThreshold, homography);

            if (found)
            {
                return(homography);
            }
            else
            {
                homography.Dispose();
                return(null);
            }
        }
        //取得image是不是一樣的
        //傳入參數
        public HomographyMatrix GetTwoImageHomographyMatrix(VectorOfKeyPoint modelKeyPoints, VectorOfKeyPoint observedKeyPoints, Matrix <int> indices, Matrix <byte> mask)
        {
            int k = 2;
            //indices = new Matrix<int>(observedDescriptors.Rows, k);
            HomographyMatrix homography = null;
            int nonZeroCount            = CvInvoke.cvCountNonZero(mask);

            if (nonZeroCount >= 4)
            {
                nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
                if (nonZeroCount >= 4)
                {
                    homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 2);
                }
            }
            return(homography);
        }
Exemple #34
0
        public static UMat Run(Mat img)
        {
            var modelKeyPoints = new VectorOfKeyPoint();
            var result         = new UMat();

            using (UMat uModelImage = img.ToUMat(AccessType.Read))
            {
                FastDetector fastCPU          = new FastDetector(10, true);
                Freak        freakCPU         = new Freak();
                UMat         modelDescriptors = new UMat();
                fastCPU.DetectRaw(uModelImage, modelKeyPoints);
                freakCPU.Compute(uModelImage, modelKeyPoints, modelDescriptors);
                Features2DToolbox.DrawKeypoints(img, modelKeyPoints, result, new Bgr(Color.Red), Features2DToolbox.KeypointDrawType.NotDrawSinglePoints);
            }

            return(result);
        }
 public Result(int trainValue, VectorOfPoint trainContour, MCvScalar trainContourColor, float bestROIMatch,
               Mat referenceTrainImage, VectorOfKeyPoint referenceTrainKeyPoints, VectorOfKeyPoint keypointsEvalImage, ref
               VectorOfDMatch matches, ref VectorOfDMatch inliers, ref VectorOfInt inliersMatcheMask, ref Mat homography)
 {
     this._trainValue              = trainValue;
     this._trainContour            = trainContour;
     this._trainContourColor       = trainContourColor;
     this._bestROIMatch            = bestROIMatch;
     this._referenceTrainImage     = referenceTrainImage;
     this._referenceTrainKeyPoints = referenceTrainKeyPoints;
     this._keypointsEvalImag       = keypointsEvalImage;
     this._matches           = matches;
     this._inliers           = inliers;
     this._inliersMatcheMask = inliersMatcheMask;
     this._homography        = homography;
     this._inliersKeyPoints  = new VectorOfKeyPoint();
 }
Exemple #36
0
        public IDrawer FindMatch(KeyFrame keyFrame, Image <Bgr, Byte> observedImage, List <KeyFrame> keyframes = null)
        {
            if (keyFrame.KeyPoints == null)
            {
                keyFrame.KeyPoints = new VectorOfKeyPoint(CPU.Detect(keyFrame.Frame));
            }
            if (keyFrame.Descriptors == null)
            {
                keyFrame.Descriptors = new Mat();
                descriptor.Compute(keyFrame.Frame, keyFrame.KeyPoints, keyFrame.Descriptors);
            }
            // extract features from the observed image
            observedKeyPoints = new VectorOfKeyPoint(CPU.Detect(observedImage));
            descriptor.Compute(observedImage, observedKeyPoints, observedDescriptors);
            matcher.Add(keyFrame.Descriptors);

            matcher.KnnMatch(observedDescriptors, matches, k, null);
            mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
            mask.SetTo(new MCvScalar(255));
            Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);

            int nonZeroCount = CvInvoke.CountNonZero(mask);

            if (nonZeroCount >= 4)
            {
                nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(keyFrame.KeyPoints, observedKeyPoints,
                                                                           matches, mask, 1.5, 20);
                if (nonZeroCount >= 4)
                {
                    homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(keyFrame.KeyPoints, observedKeyPoints, matches, mask, 2);
                }

                nonZeroCount = CvInvoke.CountNonZero(mask);

                if (nonZeroCount < 9)
                {
                    homography = null;
                }

                //if (keyframes != null && homography == null)
                //    keyframes.Add(new KeyFrame() { Frame = observedImage, KeyPoints = observedKeyPoints, Descriptors = observedDescriptors });
            }

            return(this);
        }
Exemple #37
0
 /// <summary>
 /// 
 /// </summary>
 /// <param name="name"></param>
 /// <param name="value"></param>
 public void Write(string name, IEnumerable<KeyPoint> value)
 {
     if (name == null)
         throw new ArgumentNullException(nameof(name));
     if (value == null)
         throw new ArgumentNullException(nameof(value));
     using (var valueVector = new VectorOfKeyPoint(value))
     {
         NativeMethods.core_FileStorage_write_vectorOfKeyPoint(ptr, name, valueVector.CvPtr);
     }
 }
Exemple #38
0
 /// <summary>
 /// 
 /// </summary>
 /// <param name="image"></param>
 /// <param name="keypoints"></param>
 /// <param name="descriptors"></param>
 public void Compute(Mat image, out KeyPoint[] keypoints, Mat descriptors)
 {
     if (image == null)
         throw new ArgumentNullException("image");
     using (VectorOfKeyPoint keypointsVec = new VectorOfKeyPoint())
     {
         NativeMethods.features2d_Feature2D_compute(ptr, image.CvPtr, keypointsVec.CvPtr, descriptors.CvPtr);
         keypoints = keypointsVec.ToArray();
     }
 }
Exemple #39
0
        /// <summary>
        /// 
        /// </summary>
        /// <param name="img1"></param>
        /// <param name="img2"></param>
        /// <param name="H1to2"></param>
        /// <param name="keypoints1"></param>
        /// <param name="keypoints2"></param>
        /// <param name="repeatability"></param>
        /// <param name="correspCount"></param>
        public static void EvaluateFeatureDetector(
            Mat img1, Mat img2, Mat H1to2,
            ref KeyPoint[] keypoints1, ref KeyPoint[] keypoints2,
            out float repeatability, out int correspCount)
        {
            if (img1 == null) 
                throw new ArgumentNullException(nameof(img1));
            if (img2 == null) 
                throw new ArgumentNullException(nameof(img2));
            if (H1to2 == null) 
                throw new ArgumentNullException(nameof(H1to2));
            if (keypoints1 == null) 
                throw new ArgumentNullException(nameof(keypoints1));
            if (keypoints2 == null) 
                throw new ArgumentNullException(nameof(keypoints2));

            using (var keypoints1Vec = new VectorOfKeyPoint(keypoints1))
            using (var keypoints2Vec = new VectorOfKeyPoint(keypoints2))
            {
                NativeMethods.features2d_evaluateFeatureDetector(
                    img1.CvPtr, img2.CvPtr, H1to2.CvPtr,
                    keypoints1Vec.CvPtr, keypoints2Vec.CvPtr, 
                    out repeatability, out correspCount);
                keypoints1 = keypoints1Vec.ToArray();
                keypoints2 = keypoints2Vec.ToArray();
            }
        }
Exemple #40
0
        /// <summary>
        /// 
        /// </summary>
        /// <param name="gaussPyr"></param>
        /// <param name="dogPyr"></param>
        /// <returns></returns>
        public KeyPoint[] FindScaleSpaceExtrema(IEnumerable<Mat> gaussPyr, IEnumerable<Mat> dogPyr)
        {
            ThrowIfDisposed();
            if (gaussPyr == null)
                throw new ArgumentNullException("gaussPyr");
            if (dogPyr == null)
                throw new ArgumentNullException("dogPyr");

            IntPtr[] gaussPyrPtrs = EnumerableEx.SelectPtrs(gaussPyr);
            IntPtr[] dogPyrPtrs = EnumerableEx.SelectPtrs(dogPyr);

            using (VectorOfKeyPoint keyPointsVec = new VectorOfKeyPoint())
            {
                NativeMethods.nonfree_SIFT_findScaleSpaceExtrema(ptr, gaussPyrPtrs, gaussPyrPtrs.Length,
                    dogPyrPtrs, dogPyrPtrs.Length, keyPointsVec.CvPtr);
                return keyPointsVec.ToArray();
            }
        }
Exemple #41
0
        /// <summary>
        /// Extract features and computes their descriptors using SIFT algorithm
        /// </summary>
        /// <param name="img">Input 8-bit grayscale image</param>
        /// <param name="mask">Optional input mask that marks the regions where we should detect features.</param>
        /// <returns>The output vector of keypoints</returns>
#else
        /// <summary>
        /// Extract features and computes their descriptors using SIFT algorithm
        /// </summary>
        /// <param name="img">Input 8-bit grayscale image</param>
        /// <param name="mask">Optional input mask that marks the regions where we should detect features.</param>
        /// <returns>The output vector of keypoints</returns>
#endif
        public KeyPoint[] Run(InputArray img, InputArray mask)
        {
            ThrowIfDisposed();
            if (img == null)
                throw new ArgumentNullException("img");
            img.ThrowIfDisposed();

            using (VectorOfKeyPoint keypointsVec = new VectorOfKeyPoint())
            {
                NativeMethods.nonfree_SIFT_run1(ptr, img.CvPtr, Cv2.ToPtr(mask), keypointsVec.CvPtr);
                return keypointsVec.ToArray();
            }
        }
        /// <summary>
        /// Compute the descriptors for a set of keypoints in an image.
        /// </summary>
        /// <param name="image">The image.</param>
        /// <param name="keypoints">The input keypoints. Keypoints for which a descriptor cannot be computed are removed.</param>
        /// <param name="descriptors">Copmputed descriptors. Row i is the descriptor for keypoint i.</param>param>
        public virtual void Compute(Mat image, ref KeyPoint[] keypoints, Mat descriptors)
        {
            if (image == null)
                throw new ArgumentNullException("image");
            if (descriptors == null)
                throw new ArgumentNullException("descriptors");

            using (var keypointsVec = new VectorOfKeyPoint(keypoints))
            {
                NativeMethods.features2d_DescriptorExtractor_compute1(
                    ptr, image.CvPtr, keypointsVec.CvPtr, descriptors.CvPtr);
                keypoints = keypointsVec.ToArray();
            }
        }
Exemple #43
0
 /// <summary>
 /// 
 /// </summary>
 /// <returns></returns>
 public KeyPoint[] ReadKeyPoints()
 {
     using (var valueVector = new VectorOfKeyPoint())
     {
         NativeMethods.core_FileNode_read_vectorOfKeyPoint(ptr, valueVector.CvPtr);
         return valueVector.ToArray();
     }
 }
Exemple #44
0
        /// <summary>
        /// keypoint を検出し,その SURF ディスクリプタを計算します.[useProvidedKeypoints = true]
        /// </summary>
        /// <param name="img"></param>
        /// <param name="mask"></param>
        /// <param name="keypoints"></param>
        /// <param name="descriptors"></param>
        /// <param name="useProvidedKeypoints"></param>
#else
        /// <summary>
        /// detects keypoints and computes the SURF descriptors for them. [useProvidedKeypoints = true]
        /// </summary>
        /// <param name="img"></param>
        /// <param name="mask"></param>
        /// <param name="keypoints"></param>
        /// <param name="descriptors"></param>
        /// <param name="useProvidedKeypoints"></param>
#endif
        public void Run(InputArray img, InputArray mask, out KeyPoint[] keypoints, OutputArray descriptors,
            bool useProvidedKeypoints = false)
        {
            ThrowIfDisposed();
            if (img == null)
                throw new ArgumentNullException("img");
            if (descriptors == null)
                throw new ArgumentNullException("descriptors");
            img.ThrowIfDisposed();
            descriptors.ThrowIfNotReady();

            using (VectorOfKeyPoint keypointsVec = new VectorOfKeyPoint())
            {
                NativeMethods.nonfree_SURF_run2_OutputArray(ptr, img.CvPtr, Cv2.ToPtr(mask), keypointsVec.CvPtr,
                    descriptors.CvPtr, useProvidedKeypoints ? 1 : 0);
                keypoints = keypointsVec.ToArray();
            }
        }
Exemple #45
0
        /// <summary>
        /// Compute the BRISK features on an image
        /// </summary>
        /// <param name="image"></param>
        /// <param name="mask"></param>
        /// <returns></returns>
        public KeyPoint[] Run(InputArray image, InputArray mask = null)
        {
            ThrowIfDisposed();
            if (image == null)
                throw new ArgumentNullException("image");
            image.ThrowIfDisposed();

            using (VectorOfKeyPoint keyPointsVec = new VectorOfKeyPoint())
            {
                NativeMethods.features2d_BRISK_run1(ptr, image.CvPtr, Cv2.ToPtr(mask), keyPointsVec.CvPtr);
                return keyPointsVec.ToArray();
            }
        }
        /// <summary>
        /// Computes an image descriptor using the set visual vocabulary.
        /// </summary>
        /// <param name="image">Image, for which the descriptor is computed.</param>
        /// <param name="keypoints">Keypoints detected in the input image.</param>
        /// <param name="imgDescriptor">Computed output image descriptor.</param>
        public void Compute2(Mat image, out KeyPoint[] keypoints, Mat imgDescriptor)
        {
            if (IsDisposed)
                throw new ObjectDisposedException(GetType().Name);
            if (image == null)
                throw new ArgumentNullException(nameof(image));
            if (imgDescriptor == null)
                throw new ArgumentNullException(nameof(imgDescriptor));

            using (var keypointsVec = new VectorOfKeyPoint())
            {
                NativeMethods.features2d_BOWImgDescriptorExtractor_compute2(
                    ptr, image.CvPtr, keypointsVec.CvPtr, imgDescriptor.CvPtr);
                keypoints = keypointsVec.ToArray();
            }
            GC.KeepAlive(image);
            GC.KeepAlive(imgDescriptor);
        }
        /// <summary>
        /// Computes an image descriptor using the set visual vocabulary.
        /// </summary>
        /// <param name="image">Image, for which the descriptor is computed.</param>
        /// <param name="keypoints">Keypoints detected in the input image.</param>
        /// <param name="imgDescriptor">Computed output image descriptor.</param>
        /// <param name="pointIdxsOfClusters">pointIdxsOfClusters Indices of keypoints that belong to the cluster. 
        /// This means that pointIdxsOfClusters[i] are keypoint indices that belong to the i -th cluster(word of vocabulary) returned if it is non-zero.</param>
        /// <param name="descriptors">Descriptors of the image keypoints that are returned if they are non-zero.</param>
        public void Compute(InputArray image, out KeyPoint[] keypoints, OutputArray imgDescriptor,
            out int[][] pointIdxsOfClusters, Mat descriptors = null)
        {
            if (IsDisposed)
                throw new ObjectDisposedException(GetType().Name);
            if (image == null)
                throw new ArgumentNullException(nameof(image));
            if (imgDescriptor == null)
                throw new ArgumentNullException(nameof(imgDescriptor));

            using (var keypointsVec = new VectorOfKeyPoint())
            using (var pointIdxsOfClustersVec = new VectorOfVectorInt())
            {
                NativeMethods.features2d_BOWImgDescriptorExtractor_compute11(ptr, image.CvPtr, keypointsVec.CvPtr, 
                    imgDescriptor.CvPtr, pointIdxsOfClustersVec.CvPtr, Cv2.ToPtr(descriptors));
                keypoints = keypointsVec.ToArray();
                pointIdxsOfClusters = pointIdxsOfClustersVec.ToArray();
            }
            GC.KeepAlive(image);
            GC.KeepAlive(imgDescriptor);
            GC.KeepAlive(descriptors);
        }
Exemple #48
0
        /// <summary>
        /// Detect keypoints in an image.
        /// </summary>
        /// <param name="image">The image.</param>
        /// <param name="mask">Mask specifying where to look for keypoints (optional). 
        /// Must be a char matrix with non-zero values in the region of interest.</param>
        /// <returns>The detected keypoints.</returns>
        public KeyPoint[] Detect(InputArray image, Mat mask = null)
        {
            if (image == null)
                throw new ArgumentNullException("image");
            if (disposed)
                throw new ObjectDisposedException(GetType().Name);

            image.ThrowIfDisposed();
            try
            {
                using (var keypoints = new VectorOfKeyPoint())
                {
                    NativeMethods.features2d_Feature2D_detect_InputArray(ptr, image.CvPtr, keypoints.CvPtr,
                        Cv2.ToPtr(mask));
                    return keypoints.ToArray();
                }
            }
            finally
            {
                GC.KeepAlive(image);
                GC.KeepAlive(mask);
            }
        }
Exemple #49
0
        /// <summary>
        /// Compute the descriptors for a set of keypoints in an image.
        /// </summary>
        /// <param name="image">The image.</param>
        /// <param name="inKeypoints">The input keypoints. Keypoints for which a descriptor cannot be computed are removed.</param>
        /// <param name="outKeypoints"></param>
        /// <param name="descriptors">Copmputed descriptors. Row i is the descriptor for keypoint i.</param>param>
        public virtual void Compute(InputArray image, KeyPoint[] inKeypoints, out KeyPoint[] outKeypoints,
            OutputArray descriptors)
        {
            if (image == null)
                throw new ArgumentNullException("image");
            if (disposed)
                throw new ObjectDisposedException(GetType().Name);

            using (var keypointsVec = new VectorOfKeyPoint(inKeypoints))
            {
                NativeMethods.features2d_Feature2D_compute1(ptr, image.CvPtr, keypointsVec.CvPtr, descriptors.CvPtr);
                outKeypoints = keypointsVec.ToArray();
            }
        }
Exemple #50
0
        /// <summary>
        /// Download keypoints from GPU to CPU memory.
        /// </summary>
        /// <param name="dKeypoints"></param>
        /// <returns></returns>
        public KeyPoint[] DownloadKeyPoints(GpuMat dKeypoints)
        {
            if (disposed)
                throw new ObjectDisposedException(GetType().Name);
            if (dKeypoints == null)
                throw new ArgumentNullException("dKeypoints");

            KeyPoint[] result;
            using (var keypoints = new VectorOfKeyPoint())
            {
                NativeMethods.gpu_ORB_GPU_downloadKeyPoints(ptr, dKeypoints.CvPtr, keypoints.CvPtr);
                result = keypoints.ToArray();
            }

            GC.KeepAlive(dKeypoints);
            return result;
        }
Exemple #51
0
        /// <summary>
        /// Detects keypoints and computes the descriptors
        /// </summary>
        /// <param name="image"></param>
        /// <param name="mask"></param>
        /// <param name="keypoints"></param>
        /// <param name="descriptors"></param>
        /// <param name="useProvidedKeypoints"></param>
        public virtual void DetectAndCompute(
            InputArray image,
            InputArray mask,
            out KeyPoint[] keypoints,
            OutputArray descriptors,
            bool useProvidedKeypoints = false)
        {
            if (disposed)
                throw new ObjectDisposedException(GetType().Name);
            if (image == null)
                throw new ArgumentNullException("image");
            if (descriptors == null)
                throw new ArgumentNullException("descriptors");
            image.ThrowIfDisposed();
            if (mask != null)
                mask.ThrowIfDisposed();

            using (var keypointsVec = new VectorOfKeyPoint())
            {
                NativeMethods.features2d_Feature2D_detectAndCompute(
                    ptr, image.CvPtr, Cv2.ToPtr(mask), keypointsVec.CvPtr, descriptors.CvPtr, useProvidedKeypoints ? 1 : 0);
                keypoints = keypointsVec.ToArray();
            }

            GC.KeepAlive(image);
            GC.KeepAlive(mask);
            descriptors.Fix();
        }
Exemple #52
0
        /// <summary>
        /// Finds the keypoints using FAST detector.
        /// </summary>
        /// <param name="image">Image where keypoints (corners) are detected. 
        /// Only 8-bit grayscale images are supported.</param>
        /// <param name="mask">Optional input mask that marks the regions where we should detect features.</param>
        /// <param name="keypoints">The output vector of keypoints.</param>
        public void Run(GpuMat image, GpuMat mask, out KeyPoint[] keypoints)
        {
            if (disposed)
                throw new ObjectDisposedException(GetType().Name);
            if (image == null)
                throw new ArgumentNullException("image");
            if (mask == null)
                throw new ArgumentNullException("mask");

            using (var keypointsVec = new VectorOfKeyPoint())
            {
                NativeMethods.gpu_FAST_GPU_operator2(ptr, image.CvPtr, mask.CvPtr, keypointsVec.CvPtr);
                keypoints = keypointsVec.ToArray();
            }

            GC.KeepAlive(image);
            GC.KeepAlive(mask);
        }
 /// <summary>
 /// Detect keypoints in an image.
 /// </summary>
 /// <param name="image">The image.</param>
 /// <param name="mask">Mask specifying where to look for keypoints (optional). 
 /// Must be a char matrix with non-zero values in the region of interest.</param>
 /// <returns>The detected keypoints.</returns>
 public KeyPoint[] Detect(Mat image, Mat mask = null)
 {
     if(image == null)
         throw new ArgumentNullException("image");
     using (var keypoints = new VectorOfKeyPoint())
     {
         NativeMethods.features2d_FeatureDetector_detect(ptr, image.CvPtr, keypoints.CvPtr, Cv2.ToPtr(mask));
         return keypoints.ToArray();
     }
 }
Exemple #54
0
        /// <summary>
        /// Converts keypoints from GPU representation to vector of KeyPoint.
        /// </summary>
        /// <param name="hKeypoints"></param>
        /// <returns></returns>
        public KeyPoint[] ConvertKeypoints(Mat hKeypoints)
        {
            if (disposed)
                throw new ObjectDisposedException(GetType().Name);
            if (hKeypoints == null)
                throw new ArgumentNullException("hKeypoints");

            KeyPoint[] result;
            using (var keypoints = new VectorOfKeyPoint())
            {
                NativeMethods.gpu_FAST_GPU_convertKeypoints(ptr, hKeypoints.CvPtr, keypoints.CvPtr);
                result = keypoints.ToArray();
            }

            GC.KeepAlive(hKeypoints);
            return result;
        }
Exemple #55
0
        /// <summary>
        /// Compute the BRISK features and descriptors on an image
        /// </summary>
        /// <param name="image"></param>
        /// <param name="mask"></param>
        /// <param name="keyPoints"></param>
        /// <param name="descriptors"></param>
        /// <param name="useProvidedKeypoints"></param>
        public void Run(InputArray image, InputArray mask, out KeyPoint[] keyPoints,
            OutputArray descriptors, bool useProvidedKeypoints = false)
        {
            ThrowIfDisposed();
            if (image == null)
                throw new ArgumentNullException("image");
            if (descriptors == null)
                throw new ArgumentNullException("descriptors");
            image.ThrowIfDisposed();
            descriptors.ThrowIfNotReady();

            using (VectorOfKeyPoint keyPointsVec = new VectorOfKeyPoint())
            {
                NativeMethods.features2d_BRISK_run2(ptr, image.CvPtr, Cv2.ToPtr(mask), keyPointsVec.CvPtr,
                    descriptors.CvPtr, useProvidedKeypoints ? 1 : 0);
                keyPoints = keyPointsVec.ToArray();
            }
            descriptors.Fix();
        }
Exemple #56
0
        /// <summary>
        /// Remove duplicated keypoints.
        /// </summary>
        /// <param name="keypoints"></param>
        /// <returns></returns>
        public static KeyPoint[] RemoveDuplicated(IEnumerable<KeyPoint> keypoints)
        {
            if (keypoints == null)
                throw new ArgumentNullException(nameof(keypoints));

            using (var keypointsVec = new VectorOfKeyPoint(keypoints))
            {
                NativeMethods.features2d_KeyPointsFilter_removeDuplicated(keypointsVec.CvPtr);
                return keypointsVec.ToArray();
            }
        }
Exemple #57
0
        /// <summary>
        /// StarDetectorアルゴリズムによりキーポイントを取得する
        /// </summary>
        /// <param name="image">8ビット グレースケールの入力画像</param>
        /// <returns></returns>
#else
        /// <summary>
        /// Retrieves keypoints using the StarDetector algorithm.
        /// </summary>
        /// <param name="image">The input 8-bit grayscale image</param>
        /// <returns></returns>
#endif
        public KeyPoint[] Run(Mat image)
        {
            if (image == null)
                throw new ArgumentNullException("image");
            image.ThrowIfDisposed();

            IntPtr keypoints;
            NativeMethods.features2d_StarDetector_detect(ptr, image.CvPtr, out keypoints);

            using (VectorOfKeyPoint keypointsVec = new VectorOfKeyPoint(keypoints))
            {
                return keypointsVec.ToArray();
            }
        }
        /// <summary>
        /// Retain the specified number of the best keypoints (according to the response)
        /// </summary>
        /// <param name="keypoints"></param>
        /// <param name="npoints"></param>
        /// <returns></returns>
        public static KeyPoint[] RetainBest(IEnumerable<KeyPoint> keypoints, int npoints)
        {
            if (keypoints == null)
                throw new ArgumentNullException("keypoints");

            using (var keypointsVec = new VectorOfKeyPoint(keypoints))
            {
                NativeMethods.features2d_KeyPointsFilter_retainBest(
                    keypointsVec.CvPtr, npoints);
                return keypointsVec.ToArray();
            }
        }