Пример #1
0
        public bool MatchDescriptorsWithRatioTest(BFMatcher descriptorMatcher, ref Mat descriptorsEvalImage, Mat trainDescriptors, ref VectorOfDMatch matchesFilteredOut, float maxDistanceRatio)
        {
            if (trainDescriptors.Rows < 4)
            {
                return(false);
            }

            matchesFilteredOut.Clear();
            descriptorMatcher.Add(trainDescriptors);

            VectorOfVectorOfDMatch matchesKNN = new VectorOfVectorOfDMatch();

            descriptorMatcher.KnnMatch(descriptorsEvalImage, matchesKNN, 2, null);
            for (int matchPos = 0; matchPos < matchesKNN.Size; ++matchPos)
            {
                if (matchesKNN[matchPos].Size >= 2)
                {
                    if (matchesKNN[matchPos][0].Distance <= maxDistanceRatio * matchesKNN[matchPos][1].Distance)
                    {
                        matchesFilteredOut.Push(new MDMatch[] { matchesKNN[matchPos][0] });
                    }
                }
            }

            return(!(matchesFilteredOut.Size == 0));
        }
Пример #2
0
        public Image <Bgr, byte> pointComp(Image <Bgr, byte> baseImg, Image <Bgr, byte> twistedImg)
        {
            Image <Gray, byte> baseImgGray    = baseImg.Convert <Gray, byte>();
            Image <Gray, byte> twistedImgGray = twistedImg.Convert <Gray, byte>();
            Brisk            descriptor       = new Brisk();
            GFTTDetector     detector         = new GFTTDetector(40, 0.01, 5, 3, true);
            VectorOfKeyPoint GFP1             = new VectorOfKeyPoint();
            UMat             baseDesc         = new UMat();
            UMat             bimg             = twistedImgGray.Mat.GetUMat(AccessType.Read);
            VectorOfKeyPoint GFP2             = new VectorOfKeyPoint();
            UMat             twistedDesc      = new UMat();
            UMat             timg             = baseImgGray.Mat.GetUMat(AccessType.Read);

            detector.DetectRaw(bimg, GFP1);
            descriptor.Compute(bimg, GFP1, baseDesc);
            detector.DetectRaw(timg, GFP2);
            descriptor.Compute(timg, GFP2, twistedDesc);
            BFMatcher matcher = new BFMatcher(DistanceType.L2);
            VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch();

            matcher.Add(baseDesc);
            matcher.KnnMatch(twistedDesc, matches, 2, null);
            Mat mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);

            mask.SetTo(new MCvScalar(255));
            Features2DToolbox.VoteForUniqueness(matches, 0.8, mask);
            //int nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(GFP1, GFP1, matches, mask, 1.5, 20);
            Image <Bgr, byte> res = baseImg.CopyBlank();

            Features2DToolbox.DrawMatches(twistedImg, GFP1, baseImg, GFP2, matches, res, new MCvScalar(255, 0, 0), new MCvScalar(255, 0, 0), mask);
            return(res);
        }
Пример #3
0
        public StopSignDetector(IInputArray stopSignModel)
        {
            _detector = new KAZE();
            using (Mat redMask = new Mat())
            {
                GetRedPixelMask(stopSignModel, redMask);
                _modelKeypoints   = new VectorOfKeyPoint();
                _modelDescriptors = new Mat();
                _detector.DetectAndCompute(redMask, null, _modelKeypoints, _modelDescriptors, false);
                if (_modelKeypoints.Size == 0)
                {
                    throw new Exception("No image feature has been found in the stop sign model");
                }
            }

            _modelDescriptorMatcher = new BFMatcher(DistanceType.L2);
            _modelDescriptorMatcher.Add(_modelDescriptors);

            _octagon = new VectorOfPoint(
                new Point[]
            {
                new Point(1, 0),
                new Point(2, 0),
                new Point(3, 1),
                new Point(3, 2),
                new Point(2, 3),
                new Point(1, 3),
                new Point(0, 2),
                new Point(0, 1)
            });
        }
        private VectorOfPoint _octagon;             //Искомая область

        /// <summary>
        /// Конструктор.
        /// </summary>
        /// <param name="brickSingModel">Обрабатываемое изображение. Принимается Image<Bgr, Byte></param>
        public SingDetectorMethodCanny(IInputArray brickSingModel)
        {
            _detector = new SURF(500);

            using (Mat redMask = new Mat())
            {
                GetRedPixelMask(brickSingModel, redMask);
                _modelKeypoints   = new VectorOfKeyPoint();
                _modelDescriptors = new Mat();
                _detector.DetectAndCompute(redMask, null, _modelKeypoints, _modelDescriptors, false);
                if (_modelKeypoints.Size == 0)
                {
                    //throw new Exception("Изображение для обработки не загружено");
                }
            }

            _modelDescriptorMatcher = new BFMatcher(DistanceType.L2);
            _modelDescriptorMatcher.Add(_modelDescriptors);

            _octagon = new VectorOfPoint(
                new Point[] {
                new Point(1, 0),
                new Point(2, 0),
                new Point(3, 1),
                new Point(3, 2),
                new Point(2, 3),
                new Point(1, 3),
                new Point(0, 2),
                new Point(0, 1)
            });
        }
Пример #5
0
        public void FindFeaturePointsBetweenTwoImages()
        {
            var filename            = "";
            var filename2           = "";
            var orb                 = new ORBDetector(2000);
            Image <Bgr, byte> left  = new Image <Bgr, byte>(filename);
            Image <Bgr, byte> right = new Image <Bgr, byte>(filename2);
            var vectorLeft          = new VectorOfKeyPoint();
            var vectorRight         = new VectorOfKeyPoint();
            var matLeft             = new Mat();
            var matRight            = new Mat();

            orb.DetectAndCompute(left, null, vectorLeft, matLeft, false);
            orb.DetectAndCompute(right, null, vectorRight, matRight, false);

            var matcher = new BFMatcher(DistanceType.Hamming2, true);
            var matches = new VectorOfVectorOfDMatch();

            matcher.Add(matLeft);
            matcher.KnnMatch(matRight, matches, 1, null);


            CalculateEssentialMAtrix(vectorLeft, vectorRight, camera.CameraMatrix);
            CalculateFundamentalMatrix(vectorLeft, vectorRight);
        }
Пример #6
0
        private void FindMatch(Mat observedImage)
        {
            int k = 2;

            mask           = new Mat();
            homography     = null;
            matches        = new VectorOfVectorOfDMatch();
            uObservedImage = observedImage.GetUMat(AccessType.ReadWrite);

            // extract features from the observed image
            ORBCPU.DetectAndCompute(uObservedImage, null, observedKeyPoints, observedDescriptors, false);
            matcher = new BFMatcher(DistanceType.L2);
            matcher.Add(objDescriptors);
            if (objDescriptors.Size.Height > 3 && observedDescriptors.Size.Height > 3)
            {
                matcher.KnnMatch(observedDescriptors, matches, k, null);
                mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
                mask.SetTo(new MCvScalar(255));
                Features2DToolbox.VoteForUniqueness(matches, 0.8, mask);

                int nonZeroCount = CvInvoke.CountNonZero(mask);
                if (nonZeroCount >= 4)
                {
                    //nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(objKeyPoints, observedKeyPoints,
                    //matches, mask, 1, 2);
                    if (nonZeroCount >= 4)
                    {
                        homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(objKeyPoints,
                                                                                              observedKeyPoints, matches, mask, 3);
                    }
                }
            }
        }
Пример #7
0
        public static void FindMatch(string modelFileName, string observedFileName, out VectorOfKeyPoint modelKeyPoints, out VectorOfKeyPoint observedKeyPoints, VectorOfVectorOfDMatch matches, out Mat mask)
        {
            int    k = 2;
            double uniquenessThreshold = 0.8;

            modelKeyPoints    = new VectorOfKeyPoint();
            observedKeyPoints = new VectorOfKeyPoint();
            {
                using (UMat uModelImage = CvInvoke.Imread(modelFileName, ImreadModes.Color).GetUMat(AccessType.Read))
                    using (UMat uObservedImage = CvInvoke.Imread(observedFileName, ImreadModes.Color).GetUMat(AccessType.Read))
                    {
                        SIFT sift             = new SIFT();
                        UMat modelDescriptors = new UMat();
                        sift.DetectAndCompute(uModelImage, null, modelKeyPoints, modelDescriptors, false);

                        UMat observedDescriptors = new UMat();
                        sift.DetectAndCompute(uObservedImage, null, observedKeyPoints, observedDescriptors, false);
                        BFMatcher matcher = new BFMatcher(DistanceType.L2);
                        matcher.Add(modelDescriptors);

                        matcher.KnnMatch(observedDescriptors, matches, k, null);
                        mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
                        mask.SetTo(new MCvScalar(255));
                        Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);
                    }
            }
        }
Пример #8
0
        public static void GetMatches(VectorOfKeyPoint imageKeypoints, IInputArray imageDescriptors, VectorOfKeyPoint patternKeypoints, IInputArray patternDescriptors, out VectorOfVectorOfDMatch matches, out Mat homography)
        {
            int    k = 2;
            double uniquenessThreshold = 0.8;

            homography = null;

            matches = new VectorOfVectorOfDMatch();

            var matcher = new BFMatcher(DistanceType.L2);

            matcher.Add(patternDescriptors);
            matcher.KnnMatch(imageDescriptors, matches, k, null);

            var mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);

            mask.SetTo(new MCvScalar(255));
            Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);

            int nonZeroCount = CvInvoke.CountNonZero(mask);

            if (nonZeroCount >= 4)
            {
                nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(patternKeypoints, imageKeypoints, matches, mask, 1.5, 20);
                if (nonZeroCount >= 4)
                {
                    homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(patternKeypoints, imageKeypoints, matches, mask, 2);
                }
            }
        }
Пример #9
0
        public static void FindMatch(Mat modelImage, Mat observedImage, out long matchTime, out VectorOfKeyPoint modelKeyPoints, out VectorOfKeyPoint observedKeyPoints, VectorOfVectorOfDMatch matches, out Mat mask, out Mat homography)
        {
            int    k = 2;
            double uniquenessThreshold = 0.80;
            double hessianThresh       = 100;

            Stopwatch watch;

            homography = null;

            modelKeyPoints    = new VectorOfKeyPoint();
            observedKeyPoints = new VectorOfKeyPoint();

            using (UMat uModelImage = modelImage.GetUMat(AccessType.Read))
                using (UMat uObservedImage = observedImage.GetUMat(AccessType.Read))
                {
                    SURF surfCPU = new SURF(hessianThresh);
                    SIFT siftCPU = new SIFT();


                    //extract features from the object image
                    UMat modelDescriptors = new UMat();

                    //surfCPU.DetectAndCompute(uModelImage, null, modelKeyPoints, modelDescriptors, false);
                    siftCPU.DetectAndCompute(uModelImage, null, modelKeyPoints, modelDescriptors, false);

                    watch = Stopwatch.StartNew();

                    // extract features from the observed image
                    UMat observedDescriptors = new UMat();

                    //surfCPU.DetectAndCompute(uObservedImage, null, observedKeyPoints, observedDescriptors, false);
                    siftCPU.DetectAndCompute(uObservedImage, null, observedKeyPoints, observedDescriptors, false);

                    BFMatcher matcher = new BFMatcher(DistanceType.L2);
                    matcher.Add(modelDescriptors);

                    matcher.KnnMatch(observedDescriptors, matches, k, null);
                    mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
                    mask.SetTo(new MCvScalar(255));

                    Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);
                    //Features2DToolbox.VoteForUniqueness(matches, 1, mask);

                    int nonZeroCount = CvInvoke.CountNonZero(mask);
                    if (nonZeroCount >= 4)
                    {
                        nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints,
                                                                                   matches, mask, 1.5, 20);
                        if (nonZeroCount >= 4)
                        {
                            homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints,
                                                                                                  observedKeyPoints, matches, mask, 2);
                        }
                    }

                    watch.Stop();
                }
            matchTime = watch.ElapsedMilliseconds;
        }
Пример #10
0
        private void button3_Click(object sender, EventArgs e)
        {
            GFTTDetector detector = new GFTTDetector(40, 0.01, 5, 3, true);

            var baseImgGray    = baseImg.Convert <Gray, byte>();
            var twistedImgGray = twistedImg.Convert <Gray, byte>();

            //генератор описания ключевых точек
            Brisk descriptor = new Brisk();

            //поскольку в данном случае необходимо посчитать обратное преобразование
            //базой будет являться изменённое изображение
            VectorOfKeyPoint GFP1     = new VectorOfKeyPoint();
            UMat             baseDesc = new UMat();
            UMat             bimg     = twistedImgGray.Mat.GetUMat(AccessType.Read);

            VectorOfKeyPoint GFP2        = new VectorOfKeyPoint();
            UMat             twistedDesc = new UMat();
            UMat             timg        = baseImgGray.Mat.GetUMat(AccessType.Read);

            //получение необработанной информации о характерных точках изображений
            detector.DetectRaw(bimg, GFP1);

            //генерация описания характерных точек изображений
            descriptor.Compute(bimg, GFP1, baseDesc);
            detector.DetectRaw(timg, GFP2);
            descriptor.Compute(timg, GFP2, twistedDesc);

            //класс позволяющий сравнивать описания наборов ключевых точек
            BFMatcher matcher = new BFMatcher(DistanceType.L2);

            //массив для хранения совпадений характерных точек
            VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch();

            //добавление описания базовых точек
            matcher.Add(baseDesc);
            //сравнение с описанием изменённых
            matcher.KnnMatch(twistedDesc, matches, 2, null);
            //3й параметр - количество ближайших соседей среди которых осуществляется поиск совпадений
            //4й параметр - маска, в данном случае не нужна

            //маска для определения отбрасываемых значений (аномальных и не уникальных)
            Mat mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);

            mask.SetTo(new MCvScalar(255));
            //определение уникальных совпадений
            Features2DToolbox.VoteForUniqueness(matches, 0.8, mask);

            Mat homography;

            //получение матрицы гомографии
            homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(GFP1, GFP2, matches, mask, 2);

            var destImage = new Image <Bgr, byte>(baseImg.Size);

            CvInvoke.WarpPerspective(twistedImg, destImage, homography, destImage.Size);
            twistedImg      = destImage;
            imageBox2.Image = destImage.Resize(640, 480, Inter.Linear);
        }
Пример #11
0
        //-------------------------------------------------------------------------------------------------------

        /// <summary>
        /// Dette er funksjonen
        /// </summary>
        /// <param name="modelImage"> Referansebilde i formatet Mat </param>
        /// <param name="observedImage"> Bildet som kommer fra kameraet, etter at det har blitt behandlet, i formatet Mat </param>
        /// <param name="matchTime"> Returnerer tiden funksjonen brukte på gjennomføre analyseringen av det observerte bildet. </param>
        /// <param name="modelKeyPoints"> Punkter som KAZE setter på referansebildet </param>
        /// <param name="observedKeyPoints"> Punkter som KAZE setter på det observerte bildet </param>
        /// <param name="matches">  </param>
        /// <param name="mask"></param>
        /// <param name="homography"></param>
        public static void FindMatch(Mat modelImage, Mat observedImage, out long matchTime, out VectorOfKeyPoint modelKeyPoints, out VectorOfKeyPoint observedKeyPoints, VectorOfVectorOfDMatch matches, out Mat mask, out Mat homography)
        {
            int    k = 2;
            double uniquenessThreshold = 0.80;

            Stopwatch watch;

            homography = null;

            modelKeyPoints    = new VectorOfKeyPoint();
            observedKeyPoints = new VectorOfKeyPoint();

            using (UMat uModelImage = modelImage.GetUMat(AccessType.Read))
                using (UMat uObservedImage = observedImage.GetUMat(AccessType.Read))
                {
                    KAZE featureDetector = new KAZE();

                    //extract features from the object image
                    Mat modelDescriptors = new Mat();
                    featureDetector.DetectAndCompute(uModelImage, null, modelKeyPoints, modelDescriptors, false);

                    watch = Stopwatch.StartNew();

                    // extract features from the observed image
                    Mat observedDescriptors = new Mat();
                    featureDetector.DetectAndCompute(uObservedImage, null, observedKeyPoints, observedDescriptors, false);

                    // Bruteforce, slower but more accurate
                    using (Emgu.CV.Flann.LinearIndexParams ip = new Emgu.CV.Flann.LinearIndexParams())
                        using (Emgu.CV.Flann.SearchParams sp = new SearchParams())
                            using (BFMatcher matcher = new BFMatcher(DistanceType.L1, false))
                            {
                                matcher.Add(modelDescriptors);

                                matcher.KnnMatch(observedDescriptors, matches, k, null);
                                mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1); // Cv8U er den eneste som virker?
                                mask.SetTo(new MCvScalar(255));
                                Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);

                                int nonZeroCount = CvInvoke.CountNonZero(mask);
                                if (nonZeroCount >= 4)
                                {
                                    nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints,
                                                                                               matches, mask, 1.5, 20);

                                    if (nonZeroCount >= 4)
                                    {
                                        homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints,
                                                                                                              observedKeyPoints, matches, mask, 2);
                                    }
                                }
                            }

                    watch.Stop();
                }

            matchTime = watch.ElapsedMilliseconds;
        }
Пример #12
0
        public static int SiftComparison(string img1, string img2)
        {
            var sift = new Emgu.CV.XFeatures2D.SIFT();

            var modelKeyPoints   = new VectorOfKeyPoint();
            Mat modelDescriptors = new Mat();

            var observedKeyPoints   = new VectorOfKeyPoint();
            Mat observedDescriptors = new Mat();
            Mat mask = new Mat();

            VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch();
            int    k = 2;
            double uniquenessThreshold = 0.80;

            using (Mat modelImage = CvInvoke.Imread(img1, ImreadModes.Grayscale))
                using (Mat observedImage = CvInvoke.Imread(img2, ImreadModes.Grayscale))
                {
                    sift.DetectAndCompute(modelImage, null, modelKeyPoints, modelDescriptors, false);
                    sift.DetectAndCompute(observedImage, null, observedKeyPoints, observedDescriptors, false);
                    BFMatcher matcher = new BFMatcher(DistanceType.L1);

                    matcher.Add(modelDescriptors);
                    //matcher.Add(observedDescriptors);

                    matcher.KnnMatch(observedDescriptors, matches, k, null);
                    mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
                    mask.SetTo(new MCvScalar(255));
                    try
                    {
                        Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);
                    }
                    catch (Exception ex)
                    {
                        Log(ex.Message);
                        Log("Error with SIFT algorithm, unable to compare images..");
                        return(0);
                    }
                }

            int score = 0;

            for (int i = 0; i < matches.Size; i++)
            {
                if (mask.GetData(i)[0] == 0)
                {
                    continue;
                }
                foreach (var e in matches[i].ToArray())
                {
                    ++score;
                }
            }

            return(score);
        }
Пример #13
0
        public static Mat Draw(Mat modelImage, Mat observedImage)
        {
            var sift = new SIFT();

            var modelKeyPoints    = new VectorOfKeyPoint();
            var observedKeyPoints = new VectorOfKeyPoint();

            UMat modelDescriptors    = new UMat();
            UMat observedDescriptors = new UMat();

            sift.DetectAndCompute(modelImage, null, modelKeyPoints, modelDescriptors, false);
            sift.DetectAndCompute(observedImage, null, observedKeyPoints, observedDescriptors, false);

            BFMatcher matcher = new BFMatcher(DistanceType.L2);

            matcher.Add(modelDescriptors);

            var matches = new VectorOfVectorOfDMatch();

            matcher.KnnMatch(observedDescriptors, matches, 2, null);

            var mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);

            mask.SetTo(new MCvScalar(255));
            Features2DToolbox.VoteForUniqueness(matches, 0.8, mask);
            Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, matches, mask, 1.5, 20);

            var homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, matches, mask, 10);

            var result = new Mat();

            Features2DToolbox.DrawMatches(modelImage, modelKeyPoints, observedImage, observedKeyPoints, matches, result,
                                          new MCvScalar(255, 255, 255),
                                          new MCvScalar(0, 0, 0),
                                          mask,
                                          Features2DToolbox.KeypointDrawType.NotDrawSinglePoints);

            Rectangle rect = new Rectangle(Point.Empty, modelImage.Size);

            PointF[] pts =
            {
                new PointF(rect.Left,  rect.Bottom),
                new PointF(rect.Right, rect.Bottom),
                new PointF(rect.Right, rect.Top),
                new PointF(rect.Left,  rect.Top)
            };
            pts = CvInvoke.PerspectiveTransform(pts, homography);

            Point[] points = Array.ConvertAll <PointF, Point>(pts, Point.Round);
            using (VectorOfPoint vp = new VectorOfPoint(points))
            {
                CvInvoke.Polylines(result, vp, true, new MCvScalar(0, 255, 0, 55), 2);
            }

            return(result);
        }
        public static List <ImageSearchResult> SearchImageForObjects(WorldObject modelObject, string imageToSearch)
        {
            int    k = 2;
            double uniquenessThreshold = 0.8;
            double hessianThresh       = 300;

            int nonZeroThreshold = 10;

            ObjectFeatures targetImageFeatures = DetectFeatures_Brisk(imageToSearch);

            Mat mask;

            List <ImageSearchResult> searchResults = new List <ImageSearchResult>();

            foreach (ObjectView view in modelObject.Views)
            {
                if (view == null)
                {
                    continue;
                }

                VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch();

                BFMatcher matcher = new BFMatcher(DistanceType.L2);
                matcher.Add(view.Features.Descriptors);

                matcher.KnnMatch(targetImageFeatures.Descriptors, matches, 2, null);

                mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);

                mask.SetTo(new MCvScalar(255));

                Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);

                int nonZeroCount = CvInvoke.CountNonZero(mask);

                if (nonZeroCount >= nonZeroThreshold)
                {
                    nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(view.Features.KeyPoints,
                                                                               targetImageFeatures.KeyPoints, matches, mask, 1.5, 20);

                    if (nonZeroCount >= nonZeroThreshold)
                    {
                        Mat homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(view.Features.KeyPoints,
                                                                                                  targetImageFeatures.KeyPoints, matches, mask, 2);

                        searchResults.Add(new ImageSearchResult(view, homography, matches, targetImageFeatures, mask));
                    }
                }
            }

            return(searchResults);
        }
Пример #15
0
        public Image <Bgr, byte> ReturnCompared(out Image <Bgr, byte> def, out Image <Bgr, byte> twistdef)
        {
            var image      = sourceImage.Copy();
            var twistedImg = additionalImage.Copy();
            //генератор описания ключевых точек
            Brisk        descriptor = new Brisk();
            GFTTDetector detector   = new GFTTDetector(40, 0.01, 5, 3, true);
            //поскольку в данном случае необходимо посчитать обратное преобразование
            //базой будет являться изменённое изображение
            VectorOfKeyPoint GFP1           = new VectorOfKeyPoint();
            UMat             baseDesc       = new UMat();
            var              twistedImgGray = twistedImg.Convert <Gray, byte>();
            var              baseImgGray    = image.Convert <Gray, byte>();
            UMat             bimg           = twistedImgGray.Mat.GetUMat(AccessType.Read);
            VectorOfKeyPoint GFP2           = new VectorOfKeyPoint();
            UMat             twistedDesc    = new UMat();
            UMat             timg           = baseImgGray.Mat.GetUMat(AccessType.Read);

            //получение необработанной информации о характерных точках изображений
            detector.DetectRaw(bimg, GFP1);
            //генерация описания характерных точек изображений
            descriptor.Compute(bimg, GFP1, baseDesc);
            detector.DetectRaw(timg, GFP2);
            descriptor.Compute(timg, GFP2, twistedDesc);


            BFMatcher matcher = new BFMatcher(DistanceType.L2);

            //массив для хранения совпадений характерных точек
            VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch();

            //добавление описания базовых точек
            matcher.Add(baseDesc);
            //сравнение с описанием изменённых
            matcher.KnnMatch(twistedDesc, matches, 2, null);


            Mat mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);

            mask.SetTo(new MCvScalar(255));
            //определение уникальных совпадений
            Mat resM = new Mat(image.Height, image.Width * 2, DepthType.Cv8U, 3);
            var res  = resM.ToImage <Bgr, byte>();

            Features2DToolbox.VoteForUniqueness(matches, 0.8, mask);
            int nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(GFP1, GFP1, matches, mask, 1.5, 20);

            Features2DToolbox.DrawMatches(twistedImg, GFP1, image, GFP2, matches, res, new MCvScalar(255, 0,
                                                                                                     0), new MCvScalar(255, 0, 0), mask);
            def      = image;
            twistdef = twistedImg;
            return(res);
        }
Пример #16
0
        /// <summary>
        ///
        ///
        /// TODO: thresholds must be set
        /// </summary>
        /// <param name="model"></param>
        /// <param name="imageToSearch"></param>
        /// <returns></returns>
        public static bool SearchImageForObjects(List <ObjectFeatures> model, Bitmap image)
        {
            int    k = 2;
            double uniquenessThreshold = 0.8;
            double hessianThresh       = 300;

            ObjectFeatures targetImageFeatures = DetectFeatures(image);

            Mat mask;

            VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch();

            BFMatcher matcher = new BFMatcher(DistanceType.L2);

            foreach (ObjectFeatures of in model)
            {
                matcher.Add(of.Descriptors);

                matcher.KnnMatch(targetImageFeatures.Descriptors, matches, 2, null);

                mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);

                mask.SetTo(new MCvScalar(255));

                Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);

                int nonZeroCount = CvInvoke.CountNonZero(mask);

                if (nonZeroCount >= 4)
                {
                    nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(of.KeyPoints,
                                                                               targetImageFeatures.KeyPoints, matches, mask, 1.5, 20);

                    if (nonZeroCount >= 4)
                    {
                        return(true);

                        /*
                         * Mat homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(view.Features.KeyPoints,
                         *  targetImageFeatures.KeyPoints, matches, mask, 2);
                         *
                         * searchResults.Add(new ImageSearchResult(view, homography, matches));
                         */
                    }
                }
            }

            return(false);
        }
Пример #17
0
Файл: SURF.cs Проект: okeanz/IPS
        public static void FindMatchWM(Mat modelImage, Mat observedImage, out long matchTime, out VectorOfKeyPoint modelKeyPoints, out VectorOfKeyPoint observedKeyPoints, VectorOfVectorOfDMatch matches, out Mat mask, out Mat homography, Feature2D computer, Feature2D detector)
        {
            Stopwatch watch;

            modelKeyPoints    = new VectorOfKeyPoint(); // точки на модели
            observedKeyPoints = new VectorOfKeyPoint(); // точки на большем изображении
            homography        = null;
            int k = 2;


            using (Mat uModelImage = modelImage.Clone())
                using (Mat uObservedImage = observedImage.Clone())
                {
                    //получаем дескрипторы из первого изображения
                    Mat modelDescriptors = new Mat();
                    DetectAndCompute(uModelImage, out modelKeyPoints, out modelDescriptors, detector, computer);

                    watch = Stopwatch.StartNew();

                    // ... из второго изображения
                    Mat observedDescriptors = new Mat();
                    DetectAndCompute(uObservedImage, out observedKeyPoints, out observedDescriptors, detector, computer);


                    BFMatcher matcher = new BFMatcher(DistanceType.L2); // "сравниватель" дескрипторов на 2-х изображениях
                    matcher.Add(modelDescriptors);

                    matcher.KnnMatch(observedDescriptors, matches, k, null); // сравнение
                    mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
                    mask.SetTo(new MCvScalar(255));
                    Features2DToolbox.VoteForUniqueness(matches, 0.8, mask); // построениии маски (см ниже)

                    int nonZeroCount = CvInvoke.CountNonZero(mask);
                    if (nonZeroCount >= 4)
                    {
                        nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints,
                                                                                   matches, mask, 1.5, 20);
                        if (nonZeroCount >= 4)
                        {
                            homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, // получение предположительной зоны, куда должна встать модель
                                                                                                  observedKeyPoints, matches, mask, 2);
                        }
                    }

                    watch.Stop();
                }
            matchTime = watch.ElapsedMilliseconds;
        }
        public static void FindMatch(string pageFile, string templateFile)
        {
            Image <Rgb, byte> page     = getPreprocessedImage(pageFile);
            Image <Rgb, byte> template = getPreprocessedImage(templateFile);

            var detector = new ORBDetector();
            VectorOfKeyPoint templateKeyPoints = new VectorOfKeyPoint();
            Mat templateDescriptors            = new Mat();

            detector.DetectAndCompute(template, null, templateKeyPoints, templateDescriptors, false);

            VectorOfKeyPoint pageKeyPoints = new VectorOfKeyPoint();
            Mat pageDescriptors            = new Mat();

            detector.DetectAndCompute(page, null, pageKeyPoints, pageDescriptors, false);
            using (var matcher = new BFMatcher(DistanceType.L1))
            {
                matcher.Add(templateDescriptors);
                VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch();

                //VectorOfDMatch matches2 = new VectorOfDMatch();
                //matcher.Match(pageDescriptors, matches2);


                matcher.KnnMatch(pageDescriptors, matches, 2, null);

                Mat mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
                mask.SetTo(new MCvScalar(255));
                Features2DToolbox.VoteForUniqueness(matches, 0.8, mask);
                Mat homography   = new Mat();
                int nonZeroCount = CvInvoke.CountNonZero(mask);
                if (nonZeroCount >= 4)
                {
                    nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(templateKeyPoints, pageKeyPoints, matches, mask, 1.5, 20);
                    if (nonZeroCount >= 4)
                    {
                        homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(templateKeyPoints, pageKeyPoints, matches, mask, 2);
                    }
                }

                Mat result = new Mat();
                Features2DToolbox.DrawMatches(template, templateKeyPoints, page, pageKeyPoints, matches, result, new MCvScalar(0, 255, 0), new MCvScalar(255, 0, 0), mask, Features2DToolbox.KeypointDrawType.NotDrawSinglePoints);

                //Features2DToolbox.DrawMatches(template, templateKeyPoints, page, pageKeyPoints, matches2, result, new MCvScalar(0, 255, 0), new MCvScalar(255, 0, 0), null, Features2DToolbox.KeypointDrawType.NotDrawSinglePoints);

                MainForm.This.PageBox.Image = result.ToBitmap();
            }
        }
Пример #19
0
        private void detectImgFeatures()
        {
            ORBDetector detector = new ORBDetector(100, 1.2f, 8);

            MKeyPoint[]      img0_keyPoints        = detector.Detect(imgs[0]);
            VectorOfKeyPoint img0_vector_keypoints = new VectorOfKeyPoint(img0_keyPoints);
            Matrix <Byte>    img0_descriptors      = new Matrix <Byte>(img0_vector_keypoints.Size, detector.DescriptorSize);

            MKeyPoint[]      img1_keyPoints        = detector.Detect(imgs[1]);
            VectorOfKeyPoint img1_vector_keypoints = new VectorOfKeyPoint(img1_keyPoints);
            Matrix <Byte>    img1_descriptors      = new Matrix <Byte>(img1_vector_keypoints.Size, detector.DescriptorSize);

            detector.Compute(imgs[0], img0_vector_keypoints, img0_descriptors);

            // display keypoints in red
            Image <Bgr, Byte> newImg = new Image <Bgr, Byte>(imgs[0].Width, imgs[0].Height);

            Features2DToolbox.DrawKeypoints(imgs[0], img0_vector_keypoints, newImg, new Bgr(255, 0, 255),
                                            Features2DToolbox.KeypointDrawType.DrawRichKeypoints);
            imgbox_original.Image = newImg;

            Image <Bgr, Byte> newImg2 = new Image <Bgr, Byte>(imgs[1].Width, imgs[1].Height);

            Features2DToolbox.DrawKeypoints(imgs[1], img1_vector_keypoints, newImg2, new Bgr(255, 0, 255),
                                            Features2DToolbox.KeypointDrawType.DrawRichKeypoints);
            imgbox_second.Image = newImg2;

            // apply BFMatcher to find matches in two images
            BFMatcher bfMatcher            = new BFMatcher(DistanceType.Hamming, true);
            VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch();

            numberFoundPairs = matches.Size;
            bfMatcher.Add(img0_descriptors);
            bfMatcher.KnnMatch(img1_descriptors, matches, 1, null);

            // display final image as two merged images with keypoints
            Mat matched_image = new Mat();

            Features2DToolbox.DrawMatches(imgs[0], img0_vector_keypoints, imgs[1], img1_vector_keypoints,
                                          matches, matched_image, new MCvScalar(255, 0, 255), new MCvScalar(0, 255, 0));
            img_final = matched_image.ToImage <Bgr, Byte>();
        }
Пример #20
0
        private static Mat FindMatchWithoutCuda(Mat modelImage, Mat observedImage, VectorOfKeyPoint modelKeyPoints, VectorOfKeyPoint observedKeyPoints, VectorOfVectorOfDMatch matches, out Mat mask, Mat homography, int k, double uniquenessThreshold, double hessianThresh)
        {
            using (UMat uModelImage = modelImage.GetUMat(AccessType.Read))
                using (UMat uObservedImage = observedImage.GetUMat(AccessType.Read))
                {
                    SURF surfCPU = new SURF(hessianThresh, upright: true);

                    UMat modelDescriptors;
                    if (!FindDescriptors(surfCPU, modelKeyPoints, uModelImage, out modelDescriptors))
                    {
                        Logger.Log(LogType.Error, "Feature Descriptor for Model image is empty. Is the image too small?");
                        return(mask = null);
                    }

                    UMat observedDescriptors;
                    if (!FindDescriptors(surfCPU, observedKeyPoints, uObservedImage, out observedDescriptors))
                    {
                        Logger.Log(LogType.Error, "Feature Descriptor for Observed image is empty. Is the image too small?");
                        return(mask = null);
                    }

                    BFMatcher matcher = new BFMatcher(DistanceType.L2);
                    matcher.Add(modelDescriptors);
                    matcher.KnnMatch(observedDescriptors, matches, k, null);

                    mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
                    mask.SetTo(new MCvScalar(255));
                    Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);

                    int nonZeroCount = CvInvoke.CountNonZero(mask);
                    if (nonZeroCount >= 4)
                    {
                        nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, matches, mask, 1.5, 20);
                        if (nonZeroCount >= 4)
                        {
                            homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, matches, mask, 2);
                        }
                    }
                }

            return(homography);
        }
Пример #21
0
        public override FeatureMatchResult Match(Mat sourceMat, Mat searchMat, FeatureMatchArgument argument)
        {
            //创建SIFT
            using var sift = SIFT.Create();

            //创建特征点描述对象,为下边的特征点匹配做准备
            using var sourceDescriptors = new Mat();
            using var searchDescriptors = new Mat();

            //提取特征点,并进行特征点描述
            sift.DetectAndCompute(sourceMat, null, out var sourceKeyPoints, sourceDescriptors);
            sift.DetectAndCompute(searchMat, null, out var searchKeyPoints, searchDescriptors);

            //创建Brute-force descriptor matcher
            using var bfMatcher = new BFMatcher();
            //对原图特征点描述加入训练
            bfMatcher.Add(new List <Mat>()
            {
                sourceDescriptors
            });
            bfMatcher.Train();
            //获得匹配特征点,并提取最优配对
            var matches = bfMatcher.KnnMatch(sourceDescriptors, searchDescriptors, (int)argument.MatchPoints);

            argument.OutputDebugMessage($"[FeatureMatch] [SIFT] The number of matching points is ({matches.Length}).");

            //即使使用SIFT算法,但此时没有经过点筛选的匹配效果同样糟糕,所进一步获取优秀匹配点
            var goodMatches = SelectGoodMatches(matches, argument, sourceKeyPoints, searchKeyPoints);

            //获取匹配结果
            var matchResult = GetMatchResult(goodMatches, sourceKeyPoints, searchKeyPoints);

            argument.OutputDebugMessage($"[FeatureMatch] [SIFT] The result of the match is ({matchResult.Success}) ({matchResult.MatchItems.Count}).");
            if (matchResult.Success)
            {
                var bestMatch = matchResult.MatchItems[0];
                argument.OutputDebugMessage($"[FeatureMatch] [SIFT] The center point of the best match is ({bestMatch.Point}), and the rect is {bestMatch.Rectangle}.");
            }
            argument.PreviewDebugFeatureMatchResult(matchResult, sourceMat, searchMat, sourceKeyPoints, searchKeyPoints, goodMatches);
            return(matchResult);
        }
Пример #22
0
        public int  FindMatch1(Mat modelImage, Mat observedImage, out VectorOfKeyPoint modelKeyPoints, out VectorOfKeyPoint observedKeyPoints, VectorOfVectorOfDMatch matches, out Mat mask, out Mat homography)
        {
            int    k                   = 2;
            int    nonZeroCount        = 0;
            double uniquenessThreshold = 0.80;

            homography        = null;
            modelKeyPoints    = new VectorOfKeyPoint();
            observedKeyPoints = new VectorOfKeyPoint();
            using (UMat uModelImage = modelImage.GetUMat(AccessType.Read))
                using (UMat uObservedImage = observedImage.GetUMat(AccessType.Read))

                {
                    var featureDetector  = new ORBDetector(9000);
                    Mat modelDescriptors = new Mat();
                    featureDetector.DetectAndCompute(uModelImage, null, modelKeyPoints, modelDescriptors, false);
                    Mat observedDescriptors = new Mat();
                    featureDetector.DetectAndCompute(uObservedImage, null, observedKeyPoints, observedDescriptors, false);
                    using (var matcher = new BFMatcher(DistanceType.Hamming, false))
                    {
                        matcher.Add(modelDescriptors);

                        matcher.KnnMatch(observedDescriptors, matches, k, null);
                        mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
                        mask.SetTo(new MCvScalar(255));
                        Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);

                        nonZeroCount = CvInvoke.CountNonZero(mask);
                        if (nonZeroCount >= 4)
                        {
                            nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints,
                                                                                       matches, mask, 1.5, 20);
                            //if (nonZeroCount >= 4)
                            //    homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints,
                            //        observedKeyPoints, matches, mask, 2);
                        }
                    }
                }
            return(nonZeroCount);
        }
Пример #23
0
        public Image <Bgr, byte> PointHomo(Image <Bgr, byte> image, Image <Bgr, byte> image2)
        {
            Image <Gray, byte> baseImgGray    = image.Convert <Gray, byte>();
            Image <Gray, byte> twistedImgGray = image2.Convert <Gray, byte>();
            Brisk            descriptor       = new Brisk();
            GFTTDetector     detector         = new GFTTDetector(40, 0.01, 5, 3, true);
            VectorOfKeyPoint GFP1             = new VectorOfKeyPoint();
            UMat             baseDesc         = new UMat();
            UMat             bimg             = twistedImgGray.Mat.GetUMat(AccessType.Read);
            VectorOfKeyPoint GFP2             = new VectorOfKeyPoint();
            UMat             twistedDesc      = new UMat();
            UMat             timg             = baseImgGray.Mat.GetUMat(AccessType.Read);

            detector.DetectRaw(bimg, GFP1);
            descriptor.Compute(bimg, GFP1, baseDesc);
            detector.DetectRaw(timg, GFP2);
            descriptor.Compute(timg, GFP2, twistedDesc);
            BFMatcher matcher = new BFMatcher(DistanceType.L2);
            VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch();

            matcher.Add(baseDesc);
            matcher.KnnMatch(twistedDesc, matches, 2, null);
            Mat mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);

            mask.SetTo(new MCvScalar(255));
            Features2DToolbox.VoteForUniqueness(matches, 0.8, mask);
            int nonZeroCount      = Features2DToolbox.VoteForSizeAndOrientation(GFP1, GFP1, matches, mask, 1.5, 20);
            Image <Bgr, byte> res = image.CopyBlank();

            Features2DToolbox.DrawMatches(image2, GFP1, image, GFP2, matches, res, new MCvScalar(255, 0, 0), new MCvScalar(255, 0, 0), mask);

            Mat homography;

            homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(GFP1, GFP2, matches, mask, 2);
            var destImage = new Image <Bgr, byte>(image2.Size);

            CvInvoke.WarpPerspective(image2, destImage, homography, destImage.Size);

            return(destImage);
        }
Пример #24
0
        private double DetectTemplate(Mat observedImage, TemplateContainer.ImageData template)
        {
            orbDetector.DetectAndCompute(observedImage, null, observedKeyPoints, observedDescriptor, false);

            if (template.keyPointsOrb.Size > 0 && observedKeyPoints.Size > 0)
            {
                BFMatcher matcher = new BFMatcher(DistanceType.L2);
                matcher.Add(template.descriptorOrb);

                VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch();
                matcher.KnnMatch(observedDescriptor, matches, 2, null);

                //Copied
                Mat mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
                mask.SetTo(new MCvScalar(255));
                Features2DToolbox.VoteForUniqueness(matches, 0.8, mask);

                if (matches.Size == 0)
                {
                    return(0.0);
                }
                else
                {
                    int    nonZeroCount           = CvInvoke.CountNonZero(mask);
                    double nonZeroCountNormalized = 1.0 * nonZeroCount / template.keyPointsOrb.Size;
                    if (nonZeroCount > 3)
                    {
                        nonZeroCount           = Features2DToolbox.VoteForSizeAndOrientation(template.keyPointsOrb, observedKeyPoints, matches, mask, 1.8, 18);
                        nonZeroCountNormalized = 1.0 * nonZeroCount / template.keyPointsOrb.Size;
                        return(nonZeroCount);
                    }
                    return(0.0);
                }
            }
            else
            {
                return(0.0);
            }
        }
Пример #25
0
    public void AddFrame(Image <Gray, byte> frame)
    {
        Mat observedDescriptors = new Mat();
        Mat mask;
        VectorOfKeyPoint observedKeyPoints = new VectorOfKeyPoint();


        if (_isFirst)
        {
            _detector.DetectRaw(frame, modelKeyPoints);
            _descriptor.Compute(frame, modelKeyPoints, _modelDescriptors);
            if (modelKeyPoints.Size == 0)
            {
                return;
            }

            _width  = frame.Width;
            _height = frame.Height;

            _matcher = new BFMatcher(DistanceType.L2);
            _matcher.Add(_modelDescriptors);
            _isFirst = false;
            return;
        }
        else
        {
            _detector.DetectRaw(frame, observedKeyPoints);
            _descriptor.Compute(frame, observedKeyPoints, observedDescriptors);
        }

        _matches.Clear();
        _matcher.KnnMatch(observedDescriptors, _matches, k, null);

        _matcher = new BFMatcher(DistanceType.L2);         //clear it
        _matcher.Add(observedDescriptors);

        mask = new  Mat(_matches.Size, 1, Emgu.CV.CvEnum.DepthType.Cv8U, 1);
        mask.SetTo(new MCvScalar(255));
        Features2DToolbox.VoteForUniqueness(_matches, uniquenessThresh, mask);

        Stopwatch stopwatch = Stopwatch.StartNew();

        stopwatch.Reset();
        stopwatch.Start();
        int nonZeroCount = CvInvoke.CountNonZero(mask);

        if (nonZeroCount >= 4)
        {
            nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, _matches, mask, 1.5, 20);

            if (nonZeroCount >= 4)
            {
                _homography   = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, _matches, mask, 2);
                double[,] arr = new double[3, 3];
                _homography.CopyTo(arr);
                Homography.SetRow(0, new Vector4((float)arr[0, 0], (float)arr[0, 1], 0, 0));
                Homography.SetRow(1, new Vector4((float)arr[1, 0], (float)arr[1, 1], 0, 0));
                Homography.SetRow(2, new Vector4(0, 0, 1, 0));

                Translation.Set((float)arr [0, 2] / (float)_width, (float)arr [1, 2] / (float)_height, 0);
            }
        }
        stopwatch.Stop();
        UnityEngine.Debug.Log("Matcher required time:" + stopwatch.ElapsedMilliseconds + " Count: " + nonZeroCount + "/" + _matches.Size);

        List <int> kp = new List <int>();

        _matchesPointsA.Clear();
        _matchesPointsB.Clear();

        for (int i = 0; i < _matches.Size / 2 - 1; i += 2)
        {
            if (_matches [i] [0].Distance < _matches [i] [1].Distance * 0.7f)
            {
                try{
                    int idx = _matches [i] [0].TrainIdx;
                    _matchesPointsA.Add(new Vector2(modelKeyPoints [idx].Point.X, modelKeyPoints [idx].Point.Y));
                    idx = _matches [i] [0].QueryIdx;
                    if (idx < observedKeyPoints.Size)
                    {
                        _matchesPointsB.Add(new Vector2(observedKeyPoints [idx].Point.X, observedKeyPoints [idx].Point.Y));
                    }
                    else
                    {
                        UnityEngine.Debug.Log("Exceed length!");
                    }
                }catch (Exception e)
                {
                    UnityEngine.Debug.Log(e.Message);
                }
            }
            //	kp.Add (_matches [i][0].ImgIdx);
        }        /**/

        /*
         * for (int i = 0; i < observedKeyPoints.Size; ++i) {
         *      _matchesPoints.Add (new Vector2 (observedKeyPoints [i].Point.X, observedKeyPoints [i].Point.Y));
         * }*/

        if (ShowImage)
        {
            ShowImage = false;

            Image <Bgr, Byte> result = frame.Mat.ToImage <Bgr, Byte> ();
            //	Features2DToolbox.DrawMatches (frame, modelKeyPoints, _storedImage, observedKeyPoints, _matches, result, new MCvScalar (255, 255, 255), new MCvScalar (0, 0, 255), mask, Features2DToolbox.KeypointDrawType.Default);

            var kpts = observedKeyPoints.ToArray();
            for (int i = 0; i < kpts.Length; ++i)
            {
                var p = kpts [i];
                result.Draw(new CircleF(p.Point, p.Size), new Bgr(255, 0, 0), 1);
            }

            //Emgu.CV.UI.ImageViewer.Show(result,"Result");
        }

        modelKeyPoints    = observedKeyPoints;
        _modelDescriptors = observedDescriptors;

        _storedImage = frame.Mat.Clone();
    }
        public AlgorithmResult DetectFeatureMatch(
            string modelName,
            string observedeName,
            FeatureDetectType detectType,
            FeatureMatchType matchType,
            int k,
            double uniquenessThreshold)
        {
            AlgorithmResult   result        = new AlgorithmResult();
            Image <Bgr, byte> modelImage    = ImageHelper.GetImage(modelName);
            Image <Bgr, byte> observedImage = ImageHelper.GetImage(observedeName);
            Mat resultImage = new Mat();

            // Get features from modelImage
            var modelKeyPoints   = new VectorOfKeyPoint();
            var modelDescriptors = new UMat();

            GetDetector(detectType).DetectAndCompute(
                modelImage.Convert <Gray, byte>(),
                null,
                modelKeyPoints,
                modelDescriptors,
                false);

            // Get features from observedImage
            var observedKeyPoints   = new VectorOfKeyPoint();
            var observedDescriptors = new UMat();

            GetDetector(detectType).DetectAndCompute(
                observedImage.Convert <Gray, byte>(),
                null,
                observedKeyPoints,
                observedDescriptors,
                false);

            // Perform match with selected matcher
            var matches = new VectorOfVectorOfDMatch();

            if (matchType == FeatureMatchType.Flann)
            {
                // TODO: Add parameters for GetIndexParams
                var flannMatcher = new FlannBasedMatcher(new AutotunedIndexParams(), new SearchParams());
                flannMatcher.Add(modelDescriptors);
                flannMatcher.KnnMatch(
                    observedDescriptors,
                    matches,
                    k,
                    null);
            }
            else
            {
                // TODO: Add parameters for DistanceType
                var bfMatcher = new BFMatcher(DistanceType.L2);
                bfMatcher.Add(modelDescriptors);
                bfMatcher.KnnMatch(
                    observedDescriptors,
                    matches,
                    k,
                    null);
            }

            // Find homography
            Mat homography = null;
            var mask       = new Mat(matches.Size, 1, DepthType.Cv8U, 1);

            mask.SetTo(new MCvScalar(255));

            VoteForUniqueness(matches, uniquenessThreshold, mask);

            // If 4 or more patches continue
            int nonZeroCount = CvInvoke.CountNonZero(mask);

            if (nonZeroCount >= 4)
            {
                // Filter for majority scale and rotation
                nonZeroCount = VoteForSizeAndOrientation(
                    modelKeyPoints, observedKeyPoints, matches, mask, 1.5, 20);

                // If 4 or more patches continue
                if (nonZeroCount >= 4)
                {
                    homography = GetHomographyMatrixFromMatchedFeatures(modelKeyPoints,
                                                                        observedKeyPoints, matches, mask, 2);
                }
            }

            // Draw the matched keypoints
            DrawMatches(
                modelImage, modelKeyPoints, observedImage, observedKeyPoints,
                matches, resultImage, new MCvScalar(255, 255, 255), new MCvScalar(255, 255, 255), mask);

            // Draw the projected region on the image
            if (homography != null)
            {
                // Draw a rectangle along the projected model
                Rectangle rect = new Rectangle(Point.Empty, modelImage.Size);
                PointF[]  pts  = new PointF[]
                {
                    new PointF(rect.Left, rect.Bottom),
                    new PointF(rect.Right, rect.Bottom),
                    new PointF(rect.Right, rect.Top),
                    new PointF(rect.Left, rect.Top)
                };

                // Transform the perspective of the points array based on the homography
                // And get a rotated rectangle for the homography
                pts = CvInvoke.PerspectiveTransform(pts, homography);

                Point[] points = Array.ConvertAll(pts, Point.Round);
                using (VectorOfPoint vp = new VectorOfPoint(points))
                {
                    CvInvoke.Polylines(resultImage, vp, true, new MCvScalar(255, 0, 0, 255), 5);
                }
            }

            result.ImageArray = ImageHelper.SetImage(resultImage.ToImage <Bgr, byte>());

            return(result);
        }
Пример #27
0
        public static void FindMatch(Mat modelImage, Mat observedImage, out long matchTime, out VectorOfKeyPoint modelKeyPoints, out VectorOfKeyPoint observedKeyPoints, VectorOfVectorOfDMatch matches, out Mat mask, out Mat homography)
        {
            int    k = 2;
            double uniquenessThreshold = 0.8;
            double hessianThresh       = 300;

            Stopwatch watch;

            homography = null;

            modelKeyPoints    = new VectorOfKeyPoint();
            observedKeyPoints = new VectorOfKeyPoint();

            if (CudaInvoke.HasCuda)
            {
                CudaSURF surfCuda = new CudaSURF((float)hessianThresh);
                using (GpuMat gpuModelImage = new GpuMat(modelImage))
                    //extract features from the object image
                    using (GpuMat gpuModelKeyPoints = surfCuda.DetectKeyPointsRaw(gpuModelImage, null))
                        using (GpuMat gpuModelDescriptors = surfCuda.ComputeDescriptorsRaw(gpuModelImage, null, gpuModelKeyPoints))
                            using (CudaBFMatcher matcher = new CudaBFMatcher(DistanceType.L2)) {
                                surfCuda.DownloadKeypoints(gpuModelKeyPoints, modelKeyPoints);
                                watch = Stopwatch.StartNew();

                                // extract features from the observed image
                                using (GpuMat gpuObservedImage = new GpuMat(observedImage))
                                    using (GpuMat gpuObservedKeyPoints = surfCuda.DetectKeyPointsRaw(gpuObservedImage, null))
                                        using (GpuMat gpuObservedDescriptors = surfCuda.ComputeDescriptorsRaw(gpuObservedImage, null, gpuObservedKeyPoints))
                                        //using (GpuMat tmp = new GpuMat())
                                        //using (Stream stream = new Stream())
                                        {
                                            matcher.KnnMatch(gpuObservedDescriptors, gpuModelDescriptors, matches, k);

                                            surfCuda.DownloadKeypoints(gpuObservedKeyPoints, observedKeyPoints);

                                            mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
                                            mask.SetTo(new MCvScalar(255));
                                            Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);

                                            int nonZeroCount = CvInvoke.CountNonZero(mask);
                                            if (nonZeroCount >= 4)
                                            {
                                                nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints,
                                                                                                           matches, mask, 1.5, 20);
                                                if (nonZeroCount >= 4)
                                                {
                                                    homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints,
                                                                                                                          observedKeyPoints, matches, mask, 2);
                                                }
                                            }
                                        }
                                watch.Stop();
                            }
            }
            else
            {
                using (UMat uModelImage = modelImage.GetUMat(AccessType.Read))
                    using (UMat uObservedImage = observedImage.GetUMat(AccessType.Read)) {
                        SURF surfCPU = new SURF(hessianThresh);
                        //extract features from the object image
                        UMat modelDescriptors = new UMat();
                        surfCPU.DetectAndCompute(uModelImage, null, modelKeyPoints, modelDescriptors, false);

                        watch = Stopwatch.StartNew();

                        // extract features from the observed image
                        UMat observedDescriptors = new UMat();
                        surfCPU.DetectAndCompute(uObservedImage, null, observedKeyPoints, observedDescriptors, false);
                        BFMatcher matcher = new BFMatcher(DistanceType.L2);
                        matcher.Add(modelDescriptors);

                        matcher.KnnMatch(observedDescriptors, matches, k, null);
                        mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
                        mask.SetTo(new MCvScalar(255));
                        Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);


                        watch.Stop();
                    }
            }
            matchTime = watch.ElapsedMilliseconds;
        }
Пример #28
0
        public static void FindMatch(Mat modelImage, Mat observedImage, out long matchTime, out VectorOfKeyPoint modelKeyPoints, out VectorOfKeyPoint observedKeyPoints, VectorOfVectorOfDMatch matches, out Mat mask, out Mat homography, out long score)
        {
            int       k = 2;
            double    uniquenessThreshold = 0.80;
            Stopwatch watch;

            homography        = null;
            modelKeyPoints    = new VectorOfKeyPoint();
            observedKeyPoints = new VectorOfKeyPoint();

            using (UMat uModelImage = modelImage.GetUMat(AccessType.Read))
                using (UMat uObservedImage = observedImage.GetUMat(AccessType.Read))
                {
                    //   KAZE featureDetector = new KAZE();
                    SURF featureDetector = new SURF(100);
                    //    SIFT featureDetector = new SIFT();
                    Mat modelDescriptors = new Mat();
                    //进行检测和计算,把opencv中的两部分和到一起了,分开用也可以
                    featureDetector.DetectAndCompute(uModelImage, null, modelKeyPoints, modelDescriptors, false);
                    watch = Stopwatch.StartNew();

                    Mat observedDescriptors = new Mat();
                    featureDetector.DetectAndCompute(uObservedImage, null, observedKeyPoints, observedDescriptors, false);

                    // KdTree for faster results / less accuracy
                    using (var ip = new Emgu.CV.Flann.KdTreeIndexParams())
                        using (var sp = new SearchParams())
                            //  using (DescriptorMatcher matcher = new FlannBasedMatcher(ip, sp))//开始进行匹配
                            using (BFMatcher matcher = new BFMatcher(DistanceType.L2))
                            {
                                matcher.Add(modelDescriptors);
                                matcher.KnnMatch(observedDescriptors, matches, k, null);
                                mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
                                mask.SetTo(new MCvScalar(255));
                                Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);//去除重复的匹配

                                // Calculate score based on matches size
                                // ---------------------------------------------->
                                score = 0;
                                for (int i = 0; i < matches.Size; i++)
                                {
                                    if (mask.GetData(i)[0] == 0)
                                    {
                                        continue;
                                    }
                                    foreach (var e in matches[i].ToArray())
                                    {
                                        ++score;
                                    }
                                }
                                // <----------------------------------------------

                                int nonZeroCount = CvInvoke.CountNonZero(mask);//用于寻找模板在图中的位置
                                if (nonZeroCount >= 4)
                                {
                                    nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, matches, mask, 1.5, 20);
                                    if (nonZeroCount >= 4)
                                    {
                                        homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, matches, mask, 2);
                                    }
                                }
                            }
                    watch.Stop();
                }
            matchTime = watch.ElapsedMilliseconds;
        }
Пример #29
0
    public void ExtractFeatures(Image <Gray, byte> modelImage, Image <Gray, byte> observed)
    {
        Mat       modelDescriptors    = new Mat();
        Mat       observedDescriptors = new Mat();
        BFMatcher matcher             = new BFMatcher(DistanceType.L2);
        VectorOfVectorOfDMatch matches;
        VectorOfKeyPoint       observedKeyPoints = new VectorOfKeyPoint();
        Mat mask;

        Mat homography = null;


        _detector.DetectRaw(modelImage, modelKeyPoints);
        _descriptor.Compute(modelImage, modelKeyPoints, modelDescriptors);

        _detector.DetectRaw(observed, observedKeyPoints);
        _descriptor.Compute(observed, observedKeyPoints, observedDescriptors);

        matcher.Add(modelDescriptors);

        matches = new VectorOfVectorOfDMatch();
        matcher.KnnMatch(observedDescriptors, matches, k, null);

        mask = new  Mat(matches.Size, 1, Emgu.CV.CvEnum.DepthType.Cv8U, 1);
        mask.SetTo(new MCvScalar(255));
        Features2DToolbox.VoteForUniqueness(matches, uniquenessThresh, mask);

        Stopwatch stopwatch = Stopwatch.StartNew();

        stopwatch.Reset();
        stopwatch.Start();
        int nonZeroCount = CvInvoke.CountNonZero(mask);

        if (nonZeroCount >= 4)
        {
            nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, matches, mask, 1.5, 20);

            if (nonZeroCount >= 4)
            {
                homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, matches, mask, 2);
            }
        }
        stopwatch.Stop();
        UnityEngine.Debug.Log("Matcher required time:" + stopwatch.ElapsedMilliseconds);

        Image <Bgr, Byte> result = modelImage.Mat.ToImage <Bgr, Byte> ();

        Features2DToolbox.DrawMatches(modelImage, modelKeyPoints, observed, observedKeyPoints, matches, result, new MCvScalar(255, 255, 255), new MCvScalar(0, 0, 255), mask, Features2DToolbox.KeypointDrawType.NotDrawSinglePoints);

        var kpts = modelKeyPoints.ToArray();

        foreach (var p in kpts)
        {
            result.Draw(new CircleF(p.Point, p.Size), new Bgr(255, 0, 0), 1);
        }

        if (homography != null && false)
        {
            Rectangle rec = modelImage.ROI;

            PointF[] pts = new PointF[] {
                new PointF(rec.Left, rec.Bottom),
                new PointF(rec.Right, rec.Bottom),
                new PointF(rec.Right, rec.Top),
                new PointF(rec.Left, rec.Top)
            };

            pts = CvInvoke.PerspectiveTransform(pts, homography);

            result.DrawPolyline(Array.ConvertAll <PointF, Point>(pts, Point.Round), true, new Bgr(255, 0, 0), 5);
        }

        //	Emgu.CV.UI.ImageViewer.Show(result,"Result");
    }
Пример #30
0
        public override void Process(Image <Bgr, byte> image, out Image <Bgr, byte> annotatedImage, out List <object> data)
        {
            base.Process(image, out annotatedImage, out data);

            using (var detector = GetDetector())
                using (var modelKeyPoints = new VectorOfKeyPoint())
                    using (var imageKeyPoints = new VectorOfKeyPoint())
                        using (var modelDescriptors = new Mat())
                            using (var imageDescriptors = new Mat())
                                using (var flannMatcher = new FlannBasedMatcher(GetIndexParams(), new SearchParams()))
                                    using (var bfMatcher = new BFMatcher(_distanceType))
                                        using (var matches = new VectorOfVectorOfDMatch())
                                        {
                                            // get features from image
                                            detector.DetectAndCompute(
                                                image.Convert <Gray, byte>(),
                                                null,
                                                imageKeyPoints,
                                                imageDescriptors,
                                                false);

                                            // optionally view image keypoints and return
                                            if (_showKeypoints)
                                            {
                                                Features2DToolbox.DrawKeypoints(
                                                    annotatedImage,
                                                    imageKeyPoints,
                                                    annotatedImage,
                                                    new Bgr(_annoColor.Color()),
                                                    Features2DToolbox.KeypointDrawType.DrawRichKeypoints);
                                                data = new List <object>();
                                                data.AddRange(imageKeyPoints.ToArray().Select(k => new KeyPoint(k)));
                                                return;
                                            }

                                            // do not proceed if there is no template
                                            if (Template == null)
                                            {
                                                return;
                                            }

                                            // get features from object
                                            detector.DetectAndCompute(
                                                Template.Convert <Gray, byte>(),
                                                null,
                                                modelKeyPoints,
                                                modelDescriptors,
                                                false);

                                            // perform match with selected matcher
                                            if (_matcherType == MatcherType.Flann)
                                            {
                                                flannMatcher.Add(modelDescriptors);
                                                flannMatcher.KnnMatch(
                                                    imageDescriptors,
                                                    matches,
                                                    2,
                                                    null);
                                            }
                                            else
                                            {
                                                bfMatcher.Add(modelDescriptors);
                                                bfMatcher.KnnMatch(
                                                    imageDescriptors,
                                                    matches,
                                                    2,
                                                    null);
                                            }

                                            // find homography
                                            using (var mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1))
                                            {
                                                Mat homography = null;

                                                // filter for unique matches
                                                mask.SetTo(new MCvScalar(255));
                                                Features2DToolbox.VoteForUniqueness(matches, 0.8, mask);

                                                // if 4 or more patches continue
                                                var nonZeroCount = CvInvoke.CountNonZero(mask);
                                                if (nonZeroCount >= 4)
                                                {
                                                    // filter for majority scale and rotation
                                                    nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, imageKeyPoints,
                                                                                                               matches, mask, 1.5, 20);

                                                    // if 4 or more patches continue
                                                    if (nonZeroCount >= 4)
                                                    {
                                                        // get the homography
                                                        homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints,
                                                                                                                              imageKeyPoints, matches, mask, 2);
                                                    }
                                                }

                                                // if no homography, return
                                                if (homography == null)
                                                {
                                                    return;
                                                }

                                                // initialize a rectangle of the template size
                                                var rect = new Rectangle(Point.Empty, Template.Size);

                                                // create points array for the vertices of the template
                                                var pts = new[]
                                                {
                                                    new PointF(rect.Left, rect.Bottom),
                                                    new PointF(rect.Right, rect.Bottom),
                                                    new PointF(rect.Right, rect.Top),
                                                    new PointF(rect.Left, rect.Top)
                                                };

                                                // transform the perspective of the points array based on the homography
                                                // and get a rotated rectangle for the homography
                                                pts = CvInvoke.PerspectiveTransform(pts, homography);
                                                var rotRect = CvInvoke.MinAreaRect(pts);

                                                // annotate the image and return the rotated rectangle model
                                                annotatedImage.Draw(rotRect, new Bgr(_annoColor.Color()), _lineThick);
                                                data = new List <object> {
                                                    new RotatedBox(rotRect)
                                                };
                                            }
                                        }
        }