示例#1
0
        private void SURFfeature(Image <Gray, Byte> modelImage, Image <Gray, byte> observedImage, out VectorOfKeyPoint modelKeyPoints, out VectorOfKeyPoint observedKeyPoints, out Matrix <int> indices, out Matrix <byte> mask, out HomographyMatrix homography)
        {
            int          k = 2;
            double       uniquenessThreshold = 0.8;
            SURFDetector surfCPU             = new SURFDetector(300, false);

            homography = null;

            //extract features from the object image
            modelKeyPoints = new VectorOfKeyPoint();
            Matrix <float> modelDescriptors = surfCPU.DetectAndCompute(modelImage, null, modelKeyPoints);

            // extract features from the observed image
            observedKeyPoints = new VectorOfKeyPoint();
            Matrix <float>            observedDescriptors = surfCPU.DetectAndCompute(observedImage, null, observedKeyPoints);
            BruteForceMatcher <float> matcher             = new BruteForceMatcher <float>(DistanceType.L2);

            matcher.Add(modelDescriptors);

            indices = new Matrix <int>(observedDescriptors.Rows, k);
            using (Matrix <float> dist = new Matrix <float>(observedDescriptors.Rows, k))
            {
                matcher.KnnMatch(observedDescriptors, indices, dist, k, null);
                mask = new Matrix <byte>(dist.Rows, 1);
                mask.SetValue(255);
                Features2DToolbox.VoteForUniqueness(dist, uniquenessThreshold, mask);
            }

            int nonZeroCount = CvInvoke.cvCountNonZero(mask);

            if (nonZeroCount >= 4)
            {
                nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
                if (nonZeroCount >= 4)
                {
                    homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 2);
                }
            }
        }
示例#2
0
        private void find(Mat modelImage, out VectorOfKeyPoint modelKeyPoints, VectorOfVectorOfDMatch matches, out Mat mask, out Mat homography)
        {
            int    k = 2;
            double uniquenessThreshold = 0.70;

            homography = null;

            VectorOfKeyPoint currentKeyPoints = new VectorOfKeyPoint();
            Mat currentDescriptors            = new Mat();

            detector.DetectAndCompute(current, null, currentKeyPoints, currentDescriptors, false);

            modelKeyPoints = new VectorOfKeyPoint();
            Mat modelDescriptors = new Mat();

            detector.DetectAndCompute(modelImage, null, modelKeyPoints, modelDescriptors, false);

            LinearIndexParams ip      = new LinearIndexParams();
            SearchParams      sp      = new SearchParams();
            DescriptorMatcher matcher = new FlannBasedMatcher(ip, sp);

            matcher.Add(modelDescriptors);

            matcher.KnnMatch(currentDescriptors, matches, k, null);
            mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
            mask.SetTo(new MCvScalar(255));
            Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);

            int nonZeroCount = CvInvoke.CountNonZero(mask);

            if (nonZeroCount >= 4)
            {
                nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, currentKeyPoints, matches, mask, 1.5, 20);
                if (nonZeroCount >= 4)
                {
                    homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, currentKeyPoints, matches, mask, 2);
                }
            }
        }
示例#3
0
        public IDrawer FindMatch(Image <Bgr, Byte> modelImage, Image <Bgr, Byte> observedImage, List <KeyFrame> keyframes)
        {
            modelKeyPoints = new VectorOfKeyPoint(CPU.Detect(modelImage));
            descriptor.Compute(modelImage, modelKeyPoints, modelDescriptors);

            observedKeyPoints = new VectorOfKeyPoint(CPU.Detect(observedImage));
            descriptor.Compute(observedImage, observedKeyPoints, observedDescriptors);

            matcher.Add(modelDescriptors);

            matcher.KnnMatch(observedDescriptors, matches, k, null);
            mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
            mask.SetTo(new MCvScalar(255));
            Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);

            int nonZeroCount = CvInvoke.CountNonZero(mask);

            if (nonZeroCount >= 4)
            {
                nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, matches, mask, 1.5, 20);
                if (nonZeroCount >= 4)
                {
                    homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, matches, mask, 2);
                }

                nonZeroCount = CvInvoke.CountNonZero(mask);
                if (nonZeroCount < 9)
                {
                    homography = null;
                }

                //if (keyframes != null && homography == null)
                //    keyframes.Add(new KeyFrame() { Frame = observedImage, KeyPoints = observedKeyPoints });
            }
            return(this);
        }
示例#4
0
        public void TestBruteForceHammingDistance()
        {
            if (CudaInvoke.HasCuda)
            {
                Image <Gray, byte>       box   = new Image <Gray, byte>("box.png");
                FastDetector             fast  = new FastDetector(100, true);
                BriefDescriptorExtractor brief = new BriefDescriptorExtractor(32);

                #region extract features from the object image
                Stopwatch        stopwatch      = Stopwatch.StartNew();
                VectorOfKeyPoint modelKeypoints = new VectorOfKeyPoint();
                fast.DetectRaw(box, modelKeypoints);
                Mat modelDescriptors = new Mat();
                brief.Compute(box, modelKeypoints, modelDescriptors);
                stopwatch.Stop();
                Trace.WriteLine(String.Format("Time to extract feature from model: {0} milli-sec", stopwatch.ElapsedMilliseconds));
                #endregion

                Image <Gray, Byte> observedImage = new Image <Gray, byte>("box_in_scene.png");

                #region extract features from the observed image
                stopwatch.Reset(); stopwatch.Start();
                VectorOfKeyPoint observedKeypoints = new VectorOfKeyPoint();
                fast.DetectRaw(observedImage, observedKeypoints);
                Mat observedDescriptors = new Mat();
                brief.Compute(observedImage, observedKeypoints, observedDescriptors);
                stopwatch.Stop();
                Trace.WriteLine(String.Format("Time to extract feature from image: {0} milli-sec", stopwatch.ElapsedMilliseconds));
                #endregion

                Mat homography = null;
                using (GpuMat <Byte> gpuModelDescriptors = new GpuMat <byte>(modelDescriptors)) //initialization of GPU code might took longer time.
                {
                    stopwatch.Reset(); stopwatch.Start();
                    CudaBFMatcher hammingMatcher = new CudaBFMatcher(DistanceType.Hamming);

                    //BFMatcher hammingMatcher = new BFMatcher(BFMatcher.DistanceType.Hamming, modelDescriptors);
                    int            k        = 2;
                    Matrix <int>   trainIdx = new Matrix <int>(observedKeypoints.Size, k);
                    Matrix <float> distance = new Matrix <float>(trainIdx.Size);

                    using (GpuMat <Byte> gpuObservedDescriptors = new GpuMat <byte>(observedDescriptors))
                        //using (GpuMat<int> gpuTrainIdx = new GpuMat<int>(trainIdx.Rows, trainIdx.Cols, 1, true))
                        //using (GpuMat<float> gpuDistance = new GpuMat<float>(distance.Rows, distance.Cols, 1, true))
                        using (VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch())
                        {
                            Stopwatch w2 = Stopwatch.StartNew();
                            //hammingMatcher.KnnMatch(gpuObservedDescriptors, gpuModelDescriptors, matches, k);
                            hammingMatcher.KnnMatch(gpuObservedDescriptors, gpuModelDescriptors, matches, k, null, true);
                            w2.Stop();
                            Trace.WriteLine(String.Format("Time for feature matching (excluding data transfer): {0} milli-sec",
                                                          w2.ElapsedMilliseconds));
                            //gpuTrainIdx.Download(trainIdx);
                            //gpuDistance.Download(distance);


                            Mat mask = new Mat(distance.Rows, 1, DepthType.Cv8U, 1);
                            mask.SetTo(new MCvScalar(255));
                            Features2DToolbox.VoteForUniqueness(matches, 0.8, mask);

                            int nonZeroCount = CvInvoke.CountNonZero(mask);
                            if (nonZeroCount >= 4)
                            {
                                nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeypoints, observedKeypoints,
                                                                                           matches, mask, 1.5, 20);
                                if (nonZeroCount >= 4)
                                {
                                    homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeypoints,
                                                                                                          observedKeypoints, matches, mask, 2);
                                }
                                nonZeroCount = CvInvoke.CountNonZero(mask);
                            }

                            stopwatch.Stop();
                            Trace.WriteLine(String.Format("Time for feature matching (including data transfer): {0} milli-sec",
                                                          stopwatch.ElapsedMilliseconds));
                        }
                }

                if (homography != null)
                {
                    Rectangle rect = box.ROI;
                    PointF[]  pts  = new PointF[] {
                        new PointF(rect.Left, rect.Bottom),
                        new PointF(rect.Right, rect.Bottom),
                        new PointF(rect.Right, rect.Top),
                        new PointF(rect.Left, rect.Top)
                    };

                    PointF[] points = CvInvoke.PerspectiveTransform(pts, homography);
                    //homography.ProjectPoints(points);

                    //Merge the object image and the observed image into one big image for display
                    Image <Gray, Byte> res = box.ConcateVertical(observedImage);

                    for (int i = 0; i < points.Length; i++)
                    {
                        points[i].Y += box.Height;
                    }
                    res.DrawPolyline(Array.ConvertAll <PointF, Point>(points, Point.Round), true, new Gray(255.0), 5);
                    //ImageViewer.Show(res);
                }
            }
        }
示例#5
0
        public static void FindMatch(Mat modelImage, Mat observedImage, out long matchTime, out VectorOfKeyPoint modelKeyPoints, out VectorOfKeyPoint observedKeyPoints, VectorOfVectorOfDMatch matches, out Mat mask, out Mat homography, out long score)
        {
            int       k = 2;
            double    uniquenessThreshold = 0.80;
            Stopwatch watch;

            homography        = null;
            modelKeyPoints    = new VectorOfKeyPoint();
            observedKeyPoints = new VectorOfKeyPoint();

            using (UMat uModelImage = modelImage.GetUMat(AccessType.Read))
                using (UMat uObservedImage = observedImage.GetUMat(AccessType.Read))
                {
                    //   KAZE featureDetector = new KAZE();
                    SURF featureDetector = new SURF(100);
                    //    SIFT featureDetector = new SIFT();
                    Mat modelDescriptors = new Mat();
                    //进行检测和计算,把opencv中的两部分和到一起了,分开用也可以
                    featureDetector.DetectAndCompute(uModelImage, null, modelKeyPoints, modelDescriptors, false);
                    watch = Stopwatch.StartNew();

                    Mat observedDescriptors = new Mat();
                    featureDetector.DetectAndCompute(uObservedImage, null, observedKeyPoints, observedDescriptors, false);

                    // KdTree for faster results / less accuracy
                    using (var ip = new Emgu.CV.Flann.KdTreeIndexParams())
                        using (var sp = new SearchParams())
                            //  using (DescriptorMatcher matcher = new FlannBasedMatcher(ip, sp))//开始进行匹配
                            using (BFMatcher matcher = new BFMatcher(DistanceType.L2))
                            {
                                matcher.Add(modelDescriptors);
                                matcher.KnnMatch(observedDescriptors, matches, k, null);
                                mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
                                mask.SetTo(new MCvScalar(255));
                                Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);//去除重复的匹配

                                // Calculate score based on matches size
                                // ---------------------------------------------->
                                score = 0;
                                for (int i = 0; i < matches.Size; i++)
                                {
                                    if (mask.GetData(i)[0] == 0)
                                    {
                                        continue;
                                    }
                                    foreach (var e in matches[i].ToArray())
                                    {
                                        ++score;
                                    }
                                }
                                // <----------------------------------------------

                                int nonZeroCount = CvInvoke.CountNonZero(mask);//用于寻找模板在图中的位置
                                if (nonZeroCount >= 4)
                                {
                                    nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, matches, mask, 1.5, 20);
                                    if (nonZeroCount >= 4)
                                    {
                                        homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, matches, mask, 2);
                                    }
                                }
                            }
                    watch.Stop();
                }
            matchTime = watch.ElapsedMilliseconds;
        }
示例#6
0
        public static void FindMatch(Image <Gray, Byte> modelImage, Image <Gray, byte> observedImage, out long matchTime, out VectorOfKeyPoint modelKeyPoints, out VectorOfKeyPoint observedKeyPoints, out Matrix <int> indices, out Matrix <byte> mask, out HomographyMatrix homography)
        {
            int          k = 2;
            double       uniquenessThreshold = 0.8;
            SURFDetector surfCPU             = new SURFDetector(500, false);
            Stopwatch    watch;

            homography = null;

            if (GpuInvoke.HasCuda)
            {
                GpuSURFDetector surfGPU = new GpuSURFDetector(surfCPU.SURFParams, 0.01f);
                using (GpuImage <Gray, Byte> gpuModelImage = new GpuImage <Gray, byte>(modelImage))
                    //extract features from the object image
                    using (GpuMat <float> gpuModelKeyPoints = surfGPU.DetectKeyPointsRaw(gpuModelImage, null))
                        using (GpuMat <float> gpuModelDescriptors = surfGPU.ComputeDescriptorsRaw(gpuModelImage, null, gpuModelKeyPoints))
                            using (GpuBruteForceMatcher <float> matcher = new GpuBruteForceMatcher <float>(DistanceType.L2))
                            {
                                modelKeyPoints = new VectorOfKeyPoint();
                                surfGPU.DownloadKeypoints(gpuModelKeyPoints, modelKeyPoints);
                                watch = Stopwatch.StartNew();

                                // extract features from the observed image
                                using (GpuImage <Gray, Byte> gpuObservedImage = new GpuImage <Gray, byte>(observedImage))
                                    using (GpuMat <float> gpuObservedKeyPoints = surfGPU.DetectKeyPointsRaw(gpuObservedImage, null))
                                        using (GpuMat <float> gpuObservedDescriptors = surfGPU.ComputeDescriptorsRaw(gpuObservedImage, null, gpuObservedKeyPoints))
                                            using (GpuMat <int> gpuMatchIndices = new GpuMat <int>(gpuObservedDescriptors.Size.Height, k, 1, true))
                                                using (GpuMat <float> gpuMatchDist = new GpuMat <float>(gpuObservedDescriptors.Size.Height, k, 1, true))
                                                    using (GpuMat <Byte> gpuMask = new GpuMat <byte>(gpuMatchIndices.Size.Height, 1, 1))
                                                        using (Stream stream = new Stream())
                                                        {
                                                            matcher.KnnMatchSingle(gpuObservedDescriptors, gpuModelDescriptors, gpuMatchIndices, gpuMatchDist, k, null, stream);
                                                            indices = new Matrix <int>(gpuMatchIndices.Size);
                                                            mask    = new Matrix <byte>(gpuMask.Size);

                                                            //gpu implementation of voteForUniquess
                                                            using (GpuMat <float> col0 = gpuMatchDist.Col(0))
                                                                using (GpuMat <float> col1 = gpuMatchDist.Col(1))
                                                                {
                                                                    GpuInvoke.Multiply(col1, new MCvScalar(uniquenessThreshold), col1, stream);
                                                                    GpuInvoke.Compare(col0, col1, gpuMask, CMP_TYPE.CV_CMP_LE, stream);
                                                                }

                                                            observedKeyPoints = new VectorOfKeyPoint();
                                                            surfGPU.DownloadKeypoints(gpuObservedKeyPoints, observedKeyPoints);

                                                            //wait for the stream to complete its tasks
                                                            //We can perform some other CPU intesive stuffs here while we are waiting for the stream to complete.
                                                            stream.WaitForCompletion();

                                                            gpuMask.Download(mask);
                                                            gpuMatchIndices.Download(indices);

                                                            if (GpuInvoke.CountNonZero(gpuMask) >= 4)
                                                            {
                                                                int nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
                                                                if (nonZeroCount >= 4)
                                                                {
                                                                    homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 2);
                                                                }
                                                            }

                                                            watch.Stop();
                                                        }
                            }
            }
            else
            {
                //extract features from the object image
                modelKeyPoints = new VectorOfKeyPoint();
                Matrix <float> modelDescriptors = surfCPU.DetectAndCompute(modelImage, null, modelKeyPoints);

                watch = Stopwatch.StartNew();

                // extract features from the observed image
                observedKeyPoints = new VectorOfKeyPoint();
                Matrix <float>            observedDescriptors = surfCPU.DetectAndCompute(observedImage, null, observedKeyPoints);
                BruteForceMatcher <float> matcher             = new BruteForceMatcher <float>(DistanceType.L2);
                matcher.Add(modelDescriptors);

                indices = new Matrix <int>(observedDescriptors.Rows, k);
                using (Matrix <float> dist = new Matrix <float>(observedDescriptors.Rows, k))
                {
                    matcher.KnnMatch(observedDescriptors, indices, dist, k, null);
                    mask = new Matrix <byte>(dist.Rows, 1);
                    mask.SetValue(255);
                    Features2DToolbox.VoteForUniqueness(dist, uniquenessThreshold, mask);
                }

                int nonZeroCount = CvInvoke.cvCountNonZero(mask);
                if (nonZeroCount >= 4)
                {
                    nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
                    if (nonZeroCount >= 4)
                    {
                        homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 2);
                    }
                }

                watch.Stop();
            }
            matchTime = watch.ElapsedMilliseconds;
        }
示例#7
0
    public void ExtractFeatures(Image <Gray, byte> modelImage, Image <Gray, byte> observed)
    {
        Mat       modelDescriptors    = new Mat();
        Mat       observedDescriptors = new Mat();
        BFMatcher matcher             = new BFMatcher(DistanceType.L2);
        VectorOfVectorOfDMatch matches;
        VectorOfKeyPoint       observedKeyPoints = new VectorOfKeyPoint();
        Mat mask;

        Mat homography = null;


        _detector.DetectRaw(modelImage, modelKeyPoints);
        _descriptor.Compute(modelImage, modelKeyPoints, modelDescriptors);

        _detector.DetectRaw(observed, observedKeyPoints);
        _descriptor.Compute(observed, observedKeyPoints, observedDescriptors);

        matcher.Add(modelDescriptors);

        matches = new VectorOfVectorOfDMatch();
        matcher.KnnMatch(observedDescriptors, matches, k, null);

        mask = new  Mat(matches.Size, 1, Emgu.CV.CvEnum.DepthType.Cv8U, 1);
        mask.SetTo(new MCvScalar(255));
        Features2DToolbox.VoteForUniqueness(matches, uniquenessThresh, mask);

        Stopwatch stopwatch = Stopwatch.StartNew();

        stopwatch.Reset();
        stopwatch.Start();
        int nonZeroCount = CvInvoke.CountNonZero(mask);

        if (nonZeroCount >= 4)
        {
            nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, matches, mask, 1.5, 20);

            if (nonZeroCount >= 4)
            {
                homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, matches, mask, 2);
            }
        }
        stopwatch.Stop();
        UnityEngine.Debug.Log("Matcher required time:" + stopwatch.ElapsedMilliseconds);

        Image <Bgr, Byte> result = modelImage.Mat.ToImage <Bgr, Byte> ();

        Features2DToolbox.DrawMatches(modelImage, modelKeyPoints, observed, observedKeyPoints, matches, result, new MCvScalar(255, 255, 255), new MCvScalar(0, 0, 255), mask, Features2DToolbox.KeypointDrawType.NotDrawSinglePoints);

        var kpts = modelKeyPoints.ToArray();

        foreach (var p in kpts)
        {
            result.Draw(new CircleF(p.Point, p.Size), new Bgr(255, 0, 0), 1);
        }

        if (homography != null && false)
        {
            Rectangle rec = modelImage.ROI;

            PointF[] pts = new PointF[] {
                new PointF(rec.Left, rec.Bottom),
                new PointF(rec.Right, rec.Bottom),
                new PointF(rec.Right, rec.Top),
                new PointF(rec.Left, rec.Top)
            };

            pts = CvInvoke.PerspectiveTransform(pts, homography);

            result.DrawPolyline(Array.ConvertAll <PointF, Point>(pts, Point.Round), true, new Bgr(255, 0, 0), 5);
        }

        //	Emgu.CV.UI.ImageViewer.Show(result,"Result");
    }
示例#8
0
        private void button3_Click(object sender, EventArgs e)
        {
            GFTTDetector detector = new GFTTDetector(40, 0.01, 5, 3, true);

            var baseImgGray    = baseImage.Convert <Gray, byte>();
            var twistedImgGray = twistedImage.Convert <Gray, byte>();

            //генератор описания ключевых точек
            Brisk descriptor = new Brisk();

            //поскольку в данном случае необходимо посчитать обратное преобразование
            //базой будет являться изменённое изображение
            VectorOfKeyPoint GFP1     = new VectorOfKeyPoint();
            UMat             baseDesc = new UMat();
            UMat             bimg     = twistedImgGray.Mat.GetUMat(AccessType.Read);

            VectorOfKeyPoint GFP2        = new VectorOfKeyPoint();
            UMat             twistedDesc = new UMat();
            UMat             timg        = baseImgGray.Mat.GetUMat(AccessType.Read);

            //получение необработанной информации о характерных точках изображений
            detector.DetectRaw(bimg, GFP1);
            //генерация описания характерных точек изображений
            descriptor.Compute(bimg, GFP1, baseDesc);

            detector.DetectRaw(timg, GFP2);
            descriptor.Compute(timg, GFP2, twistedDesc);

            //класс позволяющий сравнивать описания наборов ключевых точек
            BFMatcher matcher = new BFMatcher(DistanceType.L2);

            //массив для хранения совпадений характерных точек
            VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch();

            //добавление описания базовых точек
            matcher.Add(baseDesc);
            //сравнение с описанием изменённых
            matcher.KnnMatch(twistedDesc, matches, 2, null);
            //3й параметр - количество ближайших соседей среди которых осуществляется поиск совпадений
            //4й параметр - маска, в данном случае не нужна

            //маска для определения отбрасываемых значений (аномальных и не уникальных)
            Mat mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);

            mask.SetTo(new MCvScalar(255));
            //определение уникальных совпадений
            Features2DToolbox.VoteForUniqueness(matches, 0.8, mask);

            var res = new Image <Bgr, byte>(baseImage.Size);
            //отбрасывание совпадения, чьи параметры масштабирования и поворота не совпадают с параметрами большинства
            int nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(GFP1, GFP1, matches, mask, 1.5, 20);

            Features2DToolbox.DrawMatches(twistedImage, GFP1, baseImage, GFP2, matches, res, new MCvScalar(255, 0, 0), new MCvScalar(255, 0, 0), mask);

            imageBox3.Image = res.Resize(1280, 480, Inter.Linear);

            Mat homography;

            //получение матрицы гомографии
            homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(GFP1, GFP2, matches, mask, 2);

            var destImage = new Image <Bgr, byte>(baseImage.Size);

            CvInvoke.WarpPerspective(twistedImage, destImage, homography, destImage.Size);

            imageBox2.Image = destImage.Resize(640, 480, Inter.Linear);
        }
示例#9
0
        public static void FindMatch(
            Mat modelImage,
            Mat observedImage,
            Feature2D featureDetectorExtractor,
            out long matchTime,
            out VectorOfKeyPoint modelKeyPoints,
            out VectorOfKeyPoint observedKeyPoints,
            VectorOfVectorOfDMatch matches,
            out Mat mask,
            out Mat homography)
        {
            int    k = 2;
            double uniquenessThreshold = 0.80;

            Stopwatch watch;

            homography = null;

            modelKeyPoints    = new VectorOfKeyPoint();
            observedKeyPoints = new VectorOfKeyPoint();

            using (UMat uModelImage = modelImage.GetUMat(AccessType.Read))
                using (UMat uObservedImage = observedImage.GetUMat(AccessType.Read))
                {
                    //extract features from the object image
                    Mat modelDescriptors = new Mat();
                    featureDetectorExtractor.DetectAndCompute(uModelImage, null, modelKeyPoints, modelDescriptors, false);

                    watch = Stopwatch.StartNew();

                    // extract features from the observed image
                    Mat observedDescriptors = new Mat();
                    featureDetectorExtractor.DetectAndCompute(uObservedImage, null, observedKeyPoints, observedDescriptors, false);

                    // Brute force, slower but more accurate
                    // You can use KDTree for faster matching with slight loss in accuracy
                    using (Emgu.CV.Flann.LinearIndexParams ip = new Emgu.CV.Flann.LinearIndexParams())
                        using (Emgu.CV.Flann.SearchParams sp = new SearchParams())
                            using (DescriptorMatcher matcher = new FlannBasedMatcher(ip, sp))
                            {
                                matcher.Add(modelDescriptors);

                                matcher.KnnMatch(observedDescriptors, matches, k, null);
                                mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
                                mask.SetTo(new MCvScalar(255));
                                Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);

                                int nonZeroCount = CvInvoke.CountNonZero(mask);
                                if (nonZeroCount >= 4)
                                {
                                    nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints,
                                                                                               matches, mask, 1.5, 20);
                                    if (nonZeroCount >= 4)
                                    {
                                        homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints,
                                                                                                              observedKeyPoints, matches, mask, 2);
                                    }
                                }
                            }
                    watch.Stop();
                }
            matchTime = watch.ElapsedMilliseconds;
        }
示例#10
0
        public static void FindMatch(Mat modelImage, Mat modelImage2, Mat observedImage,
                                     out long matchTime, out VectorOfKeyPoint modelKeyPoints, out VectorOfKeyPoint observedKeyPoints, VectorOfVectorOfDMatch matches, out Mat mask, out Mat homography,
                                     out VectorOfKeyPoint modelKeyPoints2, VectorOfVectorOfDMatch matches2, out Mat mask2, out Mat homography2)
        {
            int    k = 2;
            double uniquenessThreshold = 0.80;

            Stopwatch watch;

            homography  = null;
            homography2 = null;

            modelKeyPoints    = new VectorOfKeyPoint();
            modelKeyPoints2   = new VectorOfKeyPoint();
            observedKeyPoints = new VectorOfKeyPoint();

            using (UMat uModelImage = modelImage.GetUMat(AccessType.Read)) // Создаем объект модели изображения
                using (UMat uModelImage2 = modelImage2.GetUMat(AccessType.Read))
                    using (UMat uObservedImage = observedImage.GetUMat(AccessType.Read))
                    {
                        KAZE featureDetector = new KAZE();

                        //извлекаем точки интереса из изображения объекта
                        Mat modelDescriptors = new Mat();
                        featureDetector.DetectAndCompute(uModelImage, null, modelKeyPoints, modelDescriptors, false);

                        Mat modelDescriptors2 = new Mat();
                        featureDetector.DetectAndCompute(uModelImage2, null, modelKeyPoints2, modelDescriptors2, false);

                        watch = Stopwatch.StartNew();

                        // извлекаем точки интереса из исследуемого изображения
                        Mat observedDescriptors = new Mat();
                        featureDetector.DetectAndCompute(uObservedImage, null, observedKeyPoints, observedDescriptors, false);


                        // Bruteforce, slower but more accurate
                        // You can use KDTree for faster matching with slight loss in accuracy
                        using (Emgu.CV.Flann.LinearIndexParams ip = new Emgu.CV.Flann.LinearIndexParams())
                            using (Emgu.CV.Flann.SearchParams sp = new SearchParams())
                                using (DescriptorMatcher matcher = new FlannBasedMatcher(ip, sp))
                                {
                                    matcher.Add(modelDescriptors);

                                    matcher.KnnMatch(observedDescriptors, matches, k, null);
                                    mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
                                    mask.SetTo(new MCvScalar(255));
                                    Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);

                                    int nonZeroCount = CvInvoke.CountNonZero(mask);
                                    if (nonZeroCount >= 4)
                                    {
                                        nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints,
                                                                                                   matches, mask, 1.5, 20);
                                        if (nonZeroCount >= 4)
                                        {
                                            homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints,
                                                                                                                  observedKeyPoints, matches, mask, 2);
                                        }
                                    }
                                }

                        using (Emgu.CV.Flann.LinearIndexParams ip = new Emgu.CV.Flann.LinearIndexParams())
                            using (Emgu.CV.Flann.SearchParams sp = new SearchParams())
                                using (DescriptorMatcher matcher2 = new FlannBasedMatcher(ip, sp))
                                {
                                    matcher2.Add(modelDescriptors2);

                                    matcher2.KnnMatch(observedDescriptors, matches2, k, null);
                                    mask2 = new Mat(matches2.Size, 1, DepthType.Cv8U, 1);
                                    mask2.SetTo(new MCvScalar(255));
                                    Features2DToolbox.VoteForUniqueness(matches2, uniquenessThreshold, mask2);

                                    int nonZeroCount = CvInvoke.CountNonZero(mask2);
                                    if (nonZeroCount >= 4)
                                    {
                                        nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints2, observedKeyPoints,
                                                                                                   matches2, mask2, 1.5, 20);
                                        if (nonZeroCount >= 4)
                                        {
                                            homography2 = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints2,
                                                                                                                   observedKeyPoints, matches2, mask2, 2);
                                        }
                                    }
                                }


                        watch.Stop();
                    }
            matchTime = watch.ElapsedMilliseconds;
        }
示例#11
0
        private void CaptureOnImageGrabbed(object sender, EventArgs eventArgs)
        {
            var capture = (Capture)sender;

            var frame = new Mat();

            capture.Retrieve(frame);

            // 1. get key points
            var keyPoints = new VectorOfKeyPoint(_detector.Detect(frame));

            _tempCloudPoints.SetKeyFeatures(_selectedFrameIndex, keyPoints);

            // 2. get descripters
            var descripters = new Mat();

            _descripter.Compute(frame, keyPoints, descripters);

            // draw keypoints
            var imageFrame = new Mat();

            Features2DToolbox.DrawKeypoints(frame, keyPoints, imageFrame, new Bgr(Color.DarkBlue),
                                            Features2DToolbox.KeypointDrawType.NotDrawSinglePoints);

            if (_selectedFrameIndex != 0)
            {
                var previousKeyPoints      = _tempCloudPoints.GetKeyFeatures(_selectedFrameIndex - 1);
                var previousKeyDescripters = _previousDescripters;

                const int    k = 2;
                const double uniquenessThreshold = 0.8;

                // 3. compute all matches with previous frame
                var matches = new VectorOfVectorOfDMatch();
                var matcher = GetNativeMatcher(SelectedMatcher);
                matcher.Add(previousKeyDescripters);

                matcher.KnnMatch(descripters, matches, k, null);

                var mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
                mask.SetTo(new MCvScalar(255));
                Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);
                Features2DToolbox.VoteForSizeAndOrientation(previousKeyPoints, keyPoints,
                                                            matches, mask, 1.5, 20);
                Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(previousKeyPoints,
                                                                         keyPoints, matches, mask, 2);

                var managedMask = mask.GetData();

                // 4. separate good matches
                var currentKeys = keyPoints;

                for (int i = 0; i < matches.Size; i++)
                {
                    var match = matches[i][0];
                    // filter wrong matches
                    if (managedMask[i] == 1)
                    {
                        var previousIndex = match.TrainIdx;
                        var currentIndex  = match.QueryIdx;

                        var previousPoint = previousKeyPoints[previousIndex].Point;
                        var currentPoint  = currentKeys[currentIndex].Point;

                        _tempCloudPoints.Unite(_selectedFrameIndex - 1, previousIndex,
                                               _selectedFrameIndex, currentIndex);

                        CvInvoke.Line(imageFrame,
                                      Point.Round(previousPoint),
                                      Point.Round(currentPoint),
                                      new Bgr(Color.Red).MCvScalar,
                                      2);
                    }
                }
            }

            _previousDescripters = descripters;

            PreviewImageSource = imageFrame;

            _selectedFrameIndex++;
            RaisePropertyChanged("Progress");
            RaisePropertyChanged("ProgressText");
            if (_selectedFrameIndex == _framesCount)
            {
                GeneratingStates = FeatureGeneratingStates.Finished;
            }
        }
示例#12
0
        /// <summary>
        /// Draw the model image and observed image, the matched features and homography projection.
        /// </summary>
        /// <param name="modelImage">The model image</param>
        /// <param name="observedImage">The observed image</param>
        /// <param name="matchTime">The output total time for computing the homography matrix.</param>
        /// <returns>The model image and observed image, the matched features and homography projection.</returns>
        public static Image <Bgr, Byte> Draw(Image <Gray, Byte> modelImage, Image <Gray, byte> observedImage, int state, out long matchTime, out int p)
        {
            Stopwatch        watch;
            HomographyMatrix homography = null;


            SURFDetector     surfCPU = new SURFDetector(500, false);
            VectorOfKeyPoint modelKeyPoints;
            VectorOfKeyPoint observedKeyPoints;
            Matrix <int>     indices;

            Matrix <byte> mask;
            int           k = 2;
            double        uniquenessThreshold = 0.8;

            if (state == 1)
            {
                uniquenessThreshold = 0.8;
            }



            //extract features from the object image
            modelKeyPoints = surfCPU.DetectKeyPointsRaw(modelImage, null);
            Matrix <float> modelDescriptors = surfCPU.ComputeDescriptorsRaw(modelImage, null, modelKeyPoints);

            watch = Stopwatch.StartNew();

            // extract features from the observed image
            observedKeyPoints = surfCPU.DetectKeyPointsRaw(observedImage, null);
            Matrix <float>            observedDescriptors = surfCPU.ComputeDescriptorsRaw(observedImage, null, observedKeyPoints);
            BruteForceMatcher <float> matcher             = new BruteForceMatcher <float>(DistanceType.L2);

            matcher.Add(modelDescriptors);

            indices = new Matrix <int>(observedDescriptors.Rows, k);
            using (Matrix <float> dist = new Matrix <float>(observedDescriptors.Rows, k))
            {
                matcher.KnnMatch(observedDescriptors, indices, dist, k, null);
                mask = new Matrix <byte>(dist.Rows, 1);
                mask.SetValue(255);
                Features2DToolbox.VoteForUniqueness(dist, uniquenessThreshold, mask);
            }



            int nonZeroCount = CvInvoke.cvCountNonZero(mask);

            if (nonZeroCount >= 1)
            {
                nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
                if (nonZeroCount >= 1)
                {
                    homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 2);
                }
            }

            watch.Stop();

            p = mask.ManagedArray.OfType <byte>().ToList().Where(q => q > 0).Count();


            //Draw the matched keypoints
            Image <Bgr, Byte> result = Features2DToolbox.DrawMatches(modelImage, modelKeyPoints, observedImage, observedKeyPoints,
                                                                     indices, new Bgr(255, 255, 255), new Bgr(255, 255, 255), mask, Features2DToolbox.KeypointDrawType.DEFAULT);

            matchTime = watch.ElapsedMilliseconds;



            return(result);
        }
示例#13
0
        static void Main(string[] args)
        {
            //Read Images
            String modelImageLocation    = @"C:\EMT\Image\EMT_Lab7\model.png";
            String observedImageLocation = @"C:\EMT\Image\EMT_Lab7\observed3.png";

            Mat modelImage    = CvInvoke.Imread(modelImageLocation, ImreadModes.Grayscale);
            Mat observedImage = CvInvoke.Imread(observedImageLocation, ImreadModes.Grayscale);

            //Create a sift variable
            int  threshold = 170;
            SIFT siftCPU   = new SIFT(threshold);

            //Extract features from the model image
            //Store Keypoint and Descriptors
            VectorOfKeyPoint modelKeyPoints = new VectorOfKeyPoint();
            Mat modelDescriptors            = new Mat();

            siftCPU.DetectAndCompute(modelImage, null, modelKeyPoints, modelDescriptors, false);

            //Extract features from the observed image
            //Store Keypoint and Descriptors
            VectorOfKeyPoint observedKeypoints = new VectorOfKeyPoint();
            Mat observedDescriptors            = new Mat();

            siftCPU.DetectAndCompute(observedImage, null, observedKeypoints, observedDescriptors, false);

            //Create a Brute-Force Matcher to match modelDescriptors to observedDescriptors
            //using DistanceType - Squared Eucledian distance
            //Stores results of matching to matches
            BFMatcher matcher = new BFMatcher(DistanceType.L2);
            VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch();
            int k = 2;             // number of nearest neighbours to search

            matcher.Add(modelDescriptors);
            matcher.KnnMatch(observedDescriptors, matches, k, null);

            //Eliminate the matched features whose scale and rotation
            // do not agree with the majority's scale and rotation

            //Create a mask to store the matches
            Mat mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);

            mask.SetTo(new MCvScalar(255));

            //Create Uniqueness threshold to set limit to uniqueness
            //Store results in mask
            double uniquenessThreshold = 0.8;

            Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);

            //Calculate the Homography of the model and observed images
            Mat homography   = null;
            int nonZeroCount = CvInvoke.CountNonZero(mask);

            if (nonZeroCount >= 4)
            {
                nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeypoints, matches, mask, 1.5, 20);

                if (nonZeroCount >= 4)
                {
                    homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeypoints, matches, mask, 2);
                }
            }

            //Re-read the images in color format
            modelImage    = CvInvoke.Imread(modelImageLocation, ImreadModes.AnyColor);
            observedImage = CvInvoke.Imread(observedImageLocation, ImreadModes.AnyColor);

            // Draw Match lines between matched points in the model and observed image
            Mat result = new Mat();

            Features2DToolbox.DrawMatches(modelImage, modelKeyPoints, observedImage, observedKeypoints, matches, result, new MCvScalar(255, 255, 255), new MCvScalar(255, 255, 255), mask);

            //draw a rectangle of the matched object at the observed image
            if (homography != null)
            {
                Rectangle rect = new Rectangle(Point.Empty, modelImage.Size);

                PointF[] pts = new PointF[]
                {
                    new PointF(rect.Left, rect.Bottom),
                    new PointF(rect.Right, rect.Bottom),
                    new PointF(rect.Right, rect.Top),
                    new PointF(rect.Left, rect.Top)
                };

                pts = CvInvoke.PerspectiveTransform(pts, homography);

                Point[] points = Array.ConvertAll <PointF, Point>(pts, Point.Round);

                using (VectorOfPoint vp = new VectorOfPoint(points))
                {
                    CvInvoke.Polylines(result, vp, true, new MCvScalar(255, 0, 0, 255), 5);
                }
            }

            CvInvoke.Imshow("Keypoint Image", result);
            CvInvoke.WaitKey(0);
        }
示例#14
0
        public static void FindMatch(Mat modelImage, Mat observedImage, out long matchTime, out VectorOfKeyPoint modelKeyPoints,
                                     out VectorOfKeyPoint observedKeyPoints, VectorOfVectorOfDMatch matches, out Mat mask, out Mat homography)
        {
            int    k = 2;
            double uniquenessThreshold = 0.8;
            double hessianThresh       = 300;

            matchTime = 1;
            Stopwatch watch;

            homography = null;

            modelKeyPoints    = new VectorOfKeyPoint();
            observedKeyPoints = new VectorOfKeyPoint();

            // using (UMat uModelImage = modelImage.ToUMat(AccessType.Read))
            using (UMat uModelImage = modelImage.GetUMat(AccessType.Read))

                using (UMat uObservedImage = observedImage.GetUMat(AccessType.Read))
                {
                    SURF surfCPU = new SURF(hessianThresh);
                    //extract features from the object image
                    UMat modelDescriptors = new UMat();
                    surfCPU.DetectAndCompute(uModelImage, null, modelKeyPoints, modelDescriptors, false);

                    //watch = Stopwatch.StartNew();

                    // extract features from the observed image
                    UMat observedDescriptors = new UMat();
                    surfCPU.DetectAndCompute(uObservedImage, null, observedKeyPoints, observedDescriptors, false);
                    BFMatcher matcher = new BFMatcher(DistanceType.L2);
                    matcher.Add(modelDescriptors);
                    try
                    {
                        matcher.KnnMatch(observedDescriptors, matches, k, null);
                    }
                    catch (Exception exp)
                    {
                        MessageBox.Show(exp.Message);
                    }

                    mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
                    mask.SetTo(new MCvScalar(255));
                    Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);

                    int nonZeroCount = CvInvoke.CountNonZero(mask);
                    if (nonZeroCount >= 4)
                    {
                        nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints,
                                                                                   matches, mask, 1.5, 20);
                        if (nonZeroCount >= 4)
                        {
                            homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints,
                                                                                                  observedKeyPoints, matches, mask, 2);
                        }
                    }

                    // watch.Stop();
                }


            // matchTime = watch.ElapsedMilliseconds;
        }
示例#15
0
        public Image <Bgr, float> alignment(Image <Bgr, float> fImage, Image <Bgr, float> lImage)
        {
            HomographyMatrix homography = null;
            SURFDetector     surfCPU    = new SURFDetector(500, false);
            VectorOfKeyPoint modelKeyPoints;
            VectorOfKeyPoint observedKeyPoints;
            Matrix <int>     indices;

            Matrix <byte> mask;

            int    k = 2;
            double uniquenessThreshold = 0.8;


            Image <Gray, Byte> fImageG = fImage.Convert <Gray, Byte>();
            Image <Gray, Byte> lImageG = lImage.Convert <Gray, Byte>();

            //extract features from the object image
            modelKeyPoints = new VectorOfKeyPoint();
            Matrix <float> modelDescriptors = surfCPU.DetectAndCompute(fImageG, null, modelKeyPoints);


            // extract features from the observed image
            observedKeyPoints = new VectorOfKeyPoint();
            Matrix <float>            observedDescriptors = surfCPU.DetectAndCompute(lImageG, null, observedKeyPoints);
            BruteForceMatcher <float> matcher             = new BruteForceMatcher <float>(DistanceType.L2);

            matcher.Add(modelDescriptors);

            indices = new Matrix <int>(observedDescriptors.Rows, k);
            using (Matrix <float> dist = new Matrix <float>(observedDescriptors.Rows, k))
            {
                matcher.KnnMatch(observedDescriptors, indices, dist, k, null);
                mask = new Matrix <byte>(dist.Rows, 1);
                mask.SetValue(255);
                Features2DToolbox.VoteForUniqueness(dist, uniquenessThreshold, mask);
            }

            int nonZeroCount = CvInvoke.cvCountNonZero(mask);

            if (nonZeroCount >= 4)
            {
                nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
                if (nonZeroCount >= 4)
                {
                    homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 2);
                }
            }
            Image <Bgr, Byte> result = Features2DToolbox.DrawMatches(fImageG, modelKeyPoints, lImageG, observedKeyPoints,
                                                                     indices, new Bgr(255, 255, 255), new Bgr(255, 255, 255), mask, Features2DToolbox.KeypointDrawType.DEFAULT);

            if (homography != null)
            {
                //draw a rectangle along the projected model
                Rectangle rect = fImageG.ROI;
                PointF[]  pts  = new PointF[] {
                    new PointF(rect.Left, rect.Bottom),
                    new PointF(rect.Right, rect.Bottom),
                    new PointF(rect.Right, rect.Top),
                    new PointF(rect.Left, rect.Top)
                };
                homography.ProjectPoints(pts);

                result.DrawPolyline(Array.ConvertAll <PointF, Point>(pts, Point.Round), true, new Bgr(Color.Red), 5);

                Image <Bgr, byte>  mosaic     = new Image <Bgr, byte>(fImageG.Width + fImageG.Width, fImageG.Height);
                Image <Bgr, byte>  warp_image = mosaic.Clone();
                Image <Bgr, float> result2    = new Image <Bgr, float>(fImage.Size);
                Image <Gray, Byte> result3    = new Image <Gray, Byte>(fImage.Size);
                CvInvoke.cvWarpPerspective(fImage.Ptr, result2, homography.Ptr, (int)INTER.CV_INTER_CUBIC + (int)WARP.CV_WARP_FILL_OUTLIERS, new MCvScalar(0));
                return(result2);
            }
            return(null);
        }
示例#16
0
        public static Mat ClassifyForDrawing(Mat modelImage, Mat observedImage, double uniquenessThreshold, int k, out VectorOfKeyPoint modelKeyPoints, out VectorOfKeyPoint observedKeyPoints, out VectorOfVectorOfDMatch matches, out Mat homography, out long score)
        {
            var detectionType = 0;

            score = 0L;
            Mat mask = null;

            homography        = null;
            modelKeyPoints    = new VectorOfKeyPoint();
            observedKeyPoints = new VectorOfKeyPoint();
            var modelDescriptors    = new Mat();
            var observedDescriptors = new Mat();
            var mdlImage            = new Mat();
            var obsImage            = new Mat();

            matches = new VectorOfVectorOfDMatch();
            CvInvoke.Threshold(modelImage, mdlImage, 100.0, 255.0, ThresholdType.BinaryInv);
            CvInvoke.Threshold(observedImage, obsImage, 100.0, 255.0, ThresholdType.BinaryInv);
            using (UMat uModelImage = mdlImage.GetUMat(AccessType.Read))
                using (UMat uObservedImage = obsImage.GetUMat(AccessType.Read))
                {
                    switch (detectionType)
                    {
                    default:
                        using (var featureDetector = new SIFT(0, 3, 0.04, 10.0, 1.6))
                        {
                            featureDetector.DetectAndCompute(uModelImage, null, modelKeyPoints, modelDescriptors, false);
                            featureDetector.DetectAndCompute(uObservedImage, null, observedKeyPoints, observedDescriptors, false);
                            using (var ip = new KdTreeIndexParams())
                                using (var sp = new SearchParams())
                                    using (DescriptorMatcher matcher = new FlannBasedMatcher(ip, sp))
                                    {
                                        matcher.Add(modelDescriptors);
                                        matcher.KnnMatch(observedDescriptors, matches, k, null);
                                        mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
                                        mask.SetTo(new MCvScalar(255));
                                        Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);
                                        score = 0;
                                        for (int i = 0; i < matches.Size; i++)
                                        {
                                            if (mask.GetData(i)[0] == 0)
                                            {
                                                continue;
                                            }
                                            foreach (var e in matches[i].ToArray())
                                            {
                                                ++score;
                                            }
                                        }
                                        var nonZeroCount = CvInvoke.CountNonZero(mask);
                                        if (nonZeroCount >= 4)
                                        {
                                            nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, matches, mask, 1.5, 20);
                                            if (nonZeroCount >= 4)
                                            {
                                                homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, matches, mask, 2);
                                            }
                                        }
                                    }
                        }
                        break;

                    case 1:
                        using (var featureDetector = new KAZE())
                        {
                            featureDetector.DetectAndCompute(uObservedImage, null, observedKeyPoints, observedDescriptors, false);
                            using (var ip = new KdTreeIndexParams())
                                using (var sp = new SearchParams())
                                    using (DescriptorMatcher matcher = new FlannBasedMatcher(ip, sp))
                                    {
                                        matcher.Add(modelDescriptors);
                                        matcher.KnnMatch(observedDescriptors, matches, k, null);
                                        mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
                                        mask.SetTo(new MCvScalar(255));
                                        Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);
                                        score = 0;
                                        for (int i = 0; i < matches.Size; i++)
                                        {
                                            if (mask.GetData(i)[0] == 0)
                                            {
                                                continue;
                                            }
                                            foreach (var e in matches[i].ToArray())
                                            {
                                                ++score;
                                            }
                                        }
                                        var nonZeroCount = CvInvoke.CountNonZero(mask);
                                        if (nonZeroCount >= 4)
                                        {
                                            nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, matches, mask, 1.5, 20);
                                            if (nonZeroCount >= 4)
                                            {
                                                homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, matches, mask, 2);
                                            }
                                        }
                                    }
                        }
                        break;
                    }
                }
            return(mask);
        }
示例#17
0
        /// <summary>
        /// Draw the model image and observed image, the matched features and homography projection.
        /// </summary>
        /// <param name="modelImage">The model image</param>
        /// <param name="observedImage">The observed image</param>
        /// <param name="matchTime">The output total time for computing the homography matrix.</param>
        /// <returns>The model image and observed image, the matched features and homography projection.</returns>
        public static Image <Bgr, Byte> Draw(Image <Gray, Byte> modelImage, Image <Gray, byte> observedImage, out long matchTime)
        {
            Stopwatch        watch;
            HomographyMatrix homography = null;

            SURFDetector     surfCPU = new SURFDetector(500, false);
            VectorOfKeyPoint modelKeyPoints;
            VectorOfKeyPoint observedKeyPoints;
            Matrix <int>     indices;

            Matrix <byte> mask;
            int           k = 2;
            double        uniquenessThreshold = 0.8;

            if (GpuInvoke.HasCuda)
            {
                GpuSURFDetector surfGPU = new GpuSURFDetector(surfCPU.SURFParams, 0.01f);
                using (GpuImage <Gray, Byte> gpuModelImage = new GpuImage <Gray, byte>(modelImage))
                    //extract features from the object image
                    using (GpuMat <float> gpuModelKeyPoints = surfGPU.DetectKeyPointsRaw(gpuModelImage, null))
                        using (GpuMat <float> gpuModelDescriptors = surfGPU.ComputeDescriptorsRaw(gpuModelImage, null, gpuModelKeyPoints))
                            using (GpuBruteForceMatcher <float> matcher = new GpuBruteForceMatcher <float>(DistanceType.L2))
                            {
                                modelKeyPoints = new VectorOfKeyPoint();
                                surfGPU.DownloadKeypoints(gpuModelKeyPoints, modelKeyPoints);
                                watch = Stopwatch.StartNew();

                                // extract features from the observed image
                                using (GpuImage <Gray, Byte> gpuObservedImage = new GpuImage <Gray, byte>(observedImage))
                                    using (GpuMat <float> gpuObservedKeyPoints = surfGPU.DetectKeyPointsRaw(gpuObservedImage, null))
                                        using (GpuMat <float> gpuObservedDescriptors = surfGPU.ComputeDescriptorsRaw(gpuObservedImage, null, gpuObservedKeyPoints))
                                            using (GpuMat <int> gpuMatchIndices = new GpuMat <int>(gpuObservedDescriptors.Size.Height, k, 1, true))
                                                using (GpuMat <float> gpuMatchDist = new GpuMat <float>(gpuObservedDescriptors.Size.Height, k, 1, true))
                                                    using (GpuMat <Byte> gpuMask = new GpuMat <byte>(gpuMatchIndices.Size.Height, 1, 1))
                                                        using (Stream stream = new Stream())
                                                        {
                                                            matcher.KnnMatchSingle(gpuObservedDescriptors, gpuModelDescriptors, gpuMatchIndices, gpuMatchDist, k, null, stream);
                                                            indices = new Matrix <int>(gpuMatchIndices.Size);
                                                            mask    = new Matrix <byte>(gpuMask.Size);

                                                            //gpu implementation of voteForUniquess
                                                            using (GpuMat <float> col0 = gpuMatchDist.Col(0))
                                                                using (GpuMat <float> col1 = gpuMatchDist.Col(1))
                                                                {
                                                                    GpuInvoke.Multiply(col1, new MCvScalar(uniquenessThreshold), col1, stream);
                                                                    GpuInvoke.Compare(col0, col1, gpuMask, CMP_TYPE.CV_CMP_LE, stream);
                                                                }

                                                            observedKeyPoints = new VectorOfKeyPoint();
                                                            surfGPU.DownloadKeypoints(gpuObservedKeyPoints, observedKeyPoints);

                                                            //wait for the stream to complete its tasks
                                                            //We can perform some other CPU intesive stuffs here while we are waiting for the stream to complete.
                                                            stream.WaitForCompletion();

                                                            gpuMask.Download(mask);
                                                            gpuMatchIndices.Download(indices);

                                                            if (GpuInvoke.CountNonZero(gpuMask) >= 4)
                                                            {
                                                                int nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
                                                                if (nonZeroCount >= 4)
                                                                {
                                                                    homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 2);
                                                                }
                                                            }

                                                            watch.Stop();
                                                        }
                            }
            }
            else
            {
                //extract features from the object image
                modelKeyPoints = surfCPU.DetectKeyPointsRaw(modelImage, null);
                Matrix <float> modelDescriptors = surfCPU.ComputeDescriptorsRaw(modelImage, null, modelKeyPoints);

                watch = Stopwatch.StartNew();

                // extract features from the observed image
                observedKeyPoints = surfCPU.DetectKeyPointsRaw(observedImage, null);
                Matrix <float>            observedDescriptors = surfCPU.ComputeDescriptorsRaw(observedImage, null, observedKeyPoints);
                BruteForceMatcher <float> matcher             = new BruteForceMatcher <float>(DistanceType.L2);
                matcher.Add(modelDescriptors);

                indices = new Matrix <int>(observedDescriptors.Rows, k);
                using (Matrix <float> dist = new Matrix <float>(observedDescriptors.Rows, k))
                {
                    matcher.KnnMatch(observedDescriptors, indices, dist, k, null);
                    mask = new Matrix <byte>(dist.Rows, 1);
                    mask.SetValue(255);
                    Features2DToolbox.VoteForUniqueness(dist, uniquenessThreshold, mask);
                }

                int nonZeroCount = CvInvoke.cvCountNonZero(mask);
                if (nonZeroCount >= 4)
                {
                    nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
                    if (nonZeroCount >= 4)
                    {
                        homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 2);
                    }
                }

                watch.Stop();
            }

            //Draw the matched keypoints
            Image <Bgr, Byte> result = Features2DToolbox.DrawMatches(modelImage, modelKeyPoints, observedImage, observedKeyPoints,
                                                                     indices, new Bgr(255, 255, 255), new Bgr(255, 255, 255), mask, Features2DToolbox.KeypointDrawType.DEFAULT);

            #region draw the projected region on the image
            if (homography != null)
            {  //draw a rectangle along the projected model
                Rectangle rect = modelImage.ROI;
                PointF[]  pts  = new PointF[] {
                    new PointF(rect.Left, rect.Bottom),
                    new PointF(rect.Right, rect.Bottom),
                    new PointF(rect.Right, rect.Top),
                    new PointF(rect.Left, rect.Top)
                };
                homography.ProjectPoints(pts);

                result.DrawPolyline(Array.ConvertAll <PointF, Point>(pts, Point.Round), true, new Bgr(Color.Red), 5);
            }
            #endregion

            matchTime = watch.ElapsedMilliseconds;

            return(result);
        }
示例#18
0
    public Image <Gray, byte> ObjectTrackingSurf(Image <Gray, byte> liveImg, Image <Gray, byte> templateImg, bool showOnLiveImg)
    {
        vkpLiveKeyPoint    = surfDetector.DetectKeyPointsRaw(liveImg, null);
        mtxLiveDescriptors = surfDetector.ComputeDescriptorsRaw(liveImg, null, vkpLiveKeyPoint);

        vkpTemplateKeyPoint    = surfDetector.DetectKeyPointsRaw(templateImg, null);
        mtxTemplateDescriptors = surfDetector.ComputeDescriptorsRaw(templateImg, null, vkpTemplateKeyPoint);

        bruteForceMatcher = new BruteForceMatcher <Single> (DistanceType.L2);
        bruteForceMatcher.Add(mtxTemplateDescriptors);

        mtxMatchIndices = new Matrix <int> (mtxLiveDescriptors.Rows, KNumNearestNeighbors);
        mtxDistance     = new Matrix <Single> (mtxLiveDescriptors.Rows, KNumNearestNeighbors);

        bruteForceMatcher.KnnMatch(mtxLiveDescriptors, mtxMatchIndices, mtxDistance, KNumNearestNeighbors, null);

        mtxMask = new Matrix <Byte> (mtxDistance.Rows, 1);
        mtxMask.SetValue(255);
        Features2DToolbox.VoteForUniqueness(mtxDistance, UniquenessThreshold, mtxMask);

        NumNonZeroElements = CvInvoke.cvCountNonZero(mtxMask);
        if (NumNonZeroElements >= 4)
        {
            NumNonZeroElements = Features2DToolbox.VoteForSizeAndOrientation(vkpTemplateKeyPoint,
                                                                             vkpLiveKeyPoint,
                                                                             mtxMatchIndices,
                                                                             mtxMask,
                                                                             ScaleIncrement,
                                                                             RotationBins);
            if (NumNonZeroElements >= 4)
            {
                homographyMatrix = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(vkpTemplateKeyPoint,
                                                                                            vkpLiveKeyPoint,
                                                                                            mtxMatchIndices,
                                                                                            mtxMask,
                                                                                            RansacReprojectionThreshold);
            }
        }
        //templateImgCopy = templateImg.Copy ();
        //templateImgCopy.Draw (new Rectangle (1, 1, templateImgCopy.Width - 3, templateImgCopy.Height - 3), boxGray, 2);
        liveImgCopy = liveImg.Copy();         //.ConcateHorizontal(templateImgCopy);

        if (homographyMatrix != null)
        {
            rect.X       = 0;
            rect.Y       = 0;
            rect.Width   = templateImg.Width;
            rect.Height  = templateImg.Height;
            pointsF[0].X = rect.Left; pointsF[0].Y = rect.Top;
            pointsF[1].X = rect.Right; pointsF[1].Y = rect.Top;
            pointsF[2].X = rect.Right; pointsF[2].Y = rect.Bottom;
            pointsF[3].X = rect.Left; pointsF[3].Y = rect.Bottom;

            homographyMatrix.ProjectPoints(pointsF);
            //Debug.Log("live w: "+ liveImgCopy.Width + "live h: " + liveImgCopy.Height);
            //Debug.Log ("pf0: " + pointsF[0] + "pf1: "+ pointsF[1] + " pf2: " + pointsF[2] + " pf3: " + pointsF[3]);

            centerPointF.X = 0;
            centerPointF.Y = 0;
            for (int i = 0; i < pointsF.Length; ++i)
            {
                centerPointF.X += pointsF[i].X;
                centerPointF.Y += pointsF[i].Y;
            }
            centerPointF.X = centerPointF.X / 4f;
            centerPointF.Y = centerPointF.Y / 4f;
            //Debug.Log("centerF: " + centerPointF);
            points[0] = Point.Round(pointsF[0]);
            points[1] = Point.Round(pointsF[1]);
            points[2] = Point.Round(pointsF[2]);
            points[3] = Point.Round(pointsF[3]);

            liveImgCopy.DrawPolyline(points, true, boxGray, 4);
        }
        if (showOnLiveImg)
        {
            return(liveImgCopy);
        }
        else
        {
            return(templateImgCopy);
        }
    }
示例#19
0
        static void Main(string[] args)
        {
            Console.Write("image1:");
            var fa = Console.ReadLine().Replace("\"", "");

            Console.Write("image2:");
            var fb = Console.ReadLine().Replace("\"", "");

            var a = (new Image <Bgr, byte>(fa).Resize(0.2, Inter.Area)).SubR(new Bgr(255, 255, 255));
            var b = new Image <Bgr, byte>(fb).Resize(0.2, Inter.Area);

            Mat homography        = null;
            Mat mask              = null;
            var modelKeyPoints    = new VectorOfKeyPoint();
            var observedKeyPoints = new VectorOfKeyPoint();
            var matches           = new VectorOfVectorOfDMatch();

            UMat a1 = a.Mat.ToUMat(AccessType.Read);
            UMat b1 = b.Mat.ToUMat(AccessType.Read);

            SURF surf                = new SURF(300);
            UMat modelDescriptors    = new UMat();
            UMat observedDescriptors = new UMat();

            surf.DetectAndCompute(a1, null, modelKeyPoints, modelDescriptors, false);       //进行检测和计算,把opencv中的两部分和到一起了,分开用也可以
            surf.DetectAndCompute(b1, null, observedKeyPoints, observedDescriptors, false);

            var matcher = new BFMatcher(DistanceType.L2);       //开始进行匹配

            matcher.Add(modelDescriptors);
            matcher.KnnMatch(observedDescriptors, matches, 2, null);
            mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
            mask.SetTo(new MCvScalar(255));
            Features2DToolbox.VoteForUniqueness(matches, 0.8, mask); //去除重复的匹配

            int Count = CvInvoke.CountNonZero(mask);                 //用于寻找模板在图中的位置

            if (Count >= 4)
            {
                Count = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, matches, mask, 1.5, 20);
                if (Count >= 4)
                {
                    homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, matches, mask, 2);
                }
            }

            //CvInvoke.Imshow("a", a);
            //CvInvoke.Imshow("b", b);

            Mat result = new Mat();

            //Features2DToolbox.DrawMatches(a.Convert<Gray, byte>().Mat, modelKeyPoints, b.Convert<Gray, byte>().Mat, observedKeyPoints, matches, result, new MCvScalar(255, 0, 255), new MCvScalar(0, 255, 255), mask);
            Features2DToolbox.DrawMatches(a, modelKeyPoints, b, observedKeyPoints, matches, result, new MCvScalar(0, 0, 255), new MCvScalar(0, 255, 255), mask);
            //绘制匹配的关系图
            //if (homography != null)     //如果在图中找到了模板,就把它画出来
            //{
            //    Rectangle rect = new Rectangle(Point.Empty, a.Size);
            //    PointF[] points = new PointF[]
            //    {
            //      new PointF(rect.Left, rect.Bottom),
            //      new PointF(rect.Right, rect.Bottom),
            //      new PointF(rect.Right, rect.Top),
            //      new PointF(rect.Left, rect.Top)
            //    };
            //    points = CvInvoke.PerspectiveTransform(points, homography);
            //    Point[] points2 = Array.ConvertAll<PointF, Point>(points, Point.Round);
            //    VectorOfPoint vp = new VectorOfPoint(points2);
            //    CvInvoke.Polylines(result, vp, true, new MCvScalar(255, 0, 0, 255), 15);
            //}

            CvInvoke.Imshow("result", result);
            CvInvoke.WaitKey();

            //Console.ReadLine();
        }
示例#20
0
        public static void FindMatches(Mat modelImage, Mat observedImage, out VectorOfKeyPoint modelKeyPoints, out VectorOfKeyPoint observedKeyPoints, VectorOfVectorOfDMatch matches, out Mat mask, out Mat homography, MatchingTechnique matchingTechnique, float keyPointFilter = 1, double detectorParameter = -1)
        {
            int    k = 2;
            double uniquenessThreshold = 0.8;

            homography        = null;
            modelKeyPoints    = new VectorOfKeyPoint();
            observedKeyPoints = new VectorOfKeyPoint();


            Feature2D    detector;
            Feature2D    descriptor;
            DistanceType distanceType;

            if (matchingTechnique == MatchingTechnique.FAST)
            {
                if (detectorParameter <= 0)
                {
                    detectorParameter = 20;
                }

                detector     = new FastDetector((int)detectorParameter);
                descriptor   = new BriefDescriptorExtractor();
                distanceType = DistanceType.Hamming;
            }
            else if (matchingTechnique == MatchingTechnique.ORB)
            {
                if (detectorParameter <= 0)
                {
                    detectorParameter = 100000;
                }

                detector     = new ORBDetector((int)detectorParameter);
                descriptor   = detector;
                distanceType = DistanceType.Hamming;
            }
            else if (matchingTechnique == MatchingTechnique.SURF)
            {
                if (detectorParameter <= 0)
                {
                    detectorParameter = 300;
                }

                detector     = new SURF(detectorParameter);
                descriptor   = detector;
                distanceType = DistanceType.L2;
            }
            else
            {
                throw new NotImplementedException($"{matchingTechnique} not supported.");
            }

            // Extract features from model image.
            UMat modelDescriptors = new UMat();

            detector.DetectRaw(modelImage, modelKeyPoints, null);
            Console.WriteLine($"modelKeyPoints: {modelKeyPoints.Size}");
            if (keyPointFilter < 2)
            {
                modelKeyPoints = GetBestKeypointsPercent(modelKeyPoints, keyPointFilter);
            }
            else
            {
                modelKeyPoints = GetBestKeypointsCount(modelKeyPoints, (int)keyPointFilter);
            }
            descriptor.Compute(modelImage, modelKeyPoints, modelDescriptors);

            // Extract features from observed image.
            UMat observedDescriptors = new UMat();

            detector.DetectRaw(observedImage, observedKeyPoints, null);
            Console.WriteLine($"observedKeyPoints: {observedKeyPoints.Size}");
            if (keyPointFilter < 2)
            {
                observedKeyPoints = GetBestKeypointsPercent(observedKeyPoints, keyPointFilter);
            }
            else
            {
                observedKeyPoints = GetBestKeypointsCount(observedKeyPoints, (int)keyPointFilter);
            }
            descriptor.Compute(observedImage, observedKeyPoints, observedDescriptors);

            // Match keypoints.
            BFMatcher matcher = new BFMatcher(distanceType);

            matcher.Add(modelDescriptors);
            matcher.KnnMatch(observedDescriptors, matches, k, null);

            mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
            mask.SetTo(new MCvScalar(255));
            Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);

            int nonZeroCount = CvInvoke.CountNonZero(mask);

            if (nonZeroCount >= 4)
            {
                nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, matches, mask, 1.5, 20);
                if (nonZeroCount >= 4)
                {
                    homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, matches, mask, 2);
                }
            }
        }
示例#21
0
    public void AddFrame(Image <Gray, byte> frame)
    {
        Mat observedDescriptors = new Mat();
        Mat mask;
        VectorOfKeyPoint observedKeyPoints = new VectorOfKeyPoint();


        if (_isFirst)
        {
            _detector.DetectRaw(frame, modelKeyPoints);
            _descriptor.Compute(frame, modelKeyPoints, _modelDescriptors);
            if (modelKeyPoints.Size == 0)
            {
                return;
            }

            _width  = frame.Width;
            _height = frame.Height;

            _matcher = new BFMatcher(DistanceType.L2);
            _matcher.Add(_modelDescriptors);
            _isFirst = false;
            return;
        }
        else
        {
            _detector.DetectRaw(frame, observedKeyPoints);
            _descriptor.Compute(frame, observedKeyPoints, observedDescriptors);
        }

        _matches.Clear();
        _matcher.KnnMatch(observedDescriptors, _matches, k, null);

        _matcher = new BFMatcher(DistanceType.L2);         //clear it
        _matcher.Add(observedDescriptors);

        mask = new  Mat(_matches.Size, 1, Emgu.CV.CvEnum.DepthType.Cv8U, 1);
        mask.SetTo(new MCvScalar(255));
        Features2DToolbox.VoteForUniqueness(_matches, uniquenessThresh, mask);

        Stopwatch stopwatch = Stopwatch.StartNew();

        stopwatch.Reset();
        stopwatch.Start();
        int nonZeroCount = CvInvoke.CountNonZero(mask);

        if (nonZeroCount >= 4)
        {
            nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, _matches, mask, 1.5, 20);

            if (nonZeroCount >= 4)
            {
                _homography   = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, _matches, mask, 2);
                double[,] arr = new double[3, 3];
                _homography.CopyTo(arr);
                Homography.SetRow(0, new Vector4((float)arr[0, 0], (float)arr[0, 1], 0, 0));
                Homography.SetRow(1, new Vector4((float)arr[1, 0], (float)arr[1, 1], 0, 0));
                Homography.SetRow(2, new Vector4(0, 0, 1, 0));

                Translation.Set((float)arr [0, 2] / (float)_width, (float)arr [1, 2] / (float)_height, 0);
            }
        }
        stopwatch.Stop();
        UnityEngine.Debug.Log("Matcher required time:" + stopwatch.ElapsedMilliseconds + " Count: " + nonZeroCount + "/" + _matches.Size);

        List <int> kp = new List <int>();

        _matchesPointsA.Clear();
        _matchesPointsB.Clear();

        for (int i = 0; i < _matches.Size / 2 - 1; i += 2)
        {
            if (_matches [i] [0].Distance < _matches [i] [1].Distance * 0.7f)
            {
                try{
                    int idx = _matches [i] [0].TrainIdx;
                    _matchesPointsA.Add(new Vector2(modelKeyPoints [idx].Point.X, modelKeyPoints [idx].Point.Y));
                    idx = _matches [i] [0].QueryIdx;
                    if (idx < observedKeyPoints.Size)
                    {
                        _matchesPointsB.Add(new Vector2(observedKeyPoints [idx].Point.X, observedKeyPoints [idx].Point.Y));
                    }
                    else
                    {
                        UnityEngine.Debug.Log("Exceed length!");
                    }
                }catch (Exception e)
                {
                    UnityEngine.Debug.Log(e.Message);
                }
            }
            //	kp.Add (_matches [i][0].ImgIdx);
        }        /**/

        /*
         * for (int i = 0; i < observedKeyPoints.Size; ++i) {
         *      _matchesPoints.Add (new Vector2 (observedKeyPoints [i].Point.X, observedKeyPoints [i].Point.Y));
         * }*/

        if (ShowImage)
        {
            ShowImage = false;

            Image <Bgr, Byte> result = frame.Mat.ToImage <Bgr, Byte> ();
            //	Features2DToolbox.DrawMatches (frame, modelKeyPoints, _storedImage, observedKeyPoints, _matches, result, new MCvScalar (255, 255, 255), new MCvScalar (0, 0, 255), mask, Features2DToolbox.KeypointDrawType.Default);

            var kpts = observedKeyPoints.ToArray();
            for (int i = 0; i < kpts.Length; ++i)
            {
                var p = kpts [i];
                result.Draw(new CircleF(p.Point, p.Size), new Bgr(255, 0, 0), 1);
            }

            //Emgu.CV.UI.ImageViewer.Show(result,"Result");
        }

        modelKeyPoints    = observedKeyPoints;
        _modelDescriptors = observedDescriptors;

        _storedImage = frame.Mat.Clone();
    }
示例#22
0
        /// <summary>
        /// The method used to discover similarities amongst the images, and populating arrays.
        /// </summary>
        /// <param name="m_modelImage"> The model image (library basic). </param>
        /// <param name="m_observedImage"> The observed image (test).  </param>
        /// <param name="d_matchTime"> The output total time for computing the homography matrix. </param>
        /// <param name="v_modelKeyPoints"></param>
        /// <param name="v_observedKeyPoints"></param>
        /// <param name="v_matches"></param>
        /// <param name="m_mask"></param>
        /// <param name="m_homography"></param>
        /// <param name="l_score"> Field contains the score of matching. </param>
        public static void FindMatch(Mat m_modelImage, Mat m_observedImage, out double d_matchTime, out VectorOfKeyPoint v_modelKeyPoints,
                                     out VectorOfKeyPoint v_observedKeyPoints, VectorOfVectorOfDMatch v_matches, out Mat m_mask,
                                     out Mat m_homography, out long l_score)
        {
            ErrInfLogger.LockInstance.InfoLog("Start of the FindMatch");

            TimerAbstraction _tim = new TimerRefinedAbstraction();

            _tim._iTimer = new TimerFractional();

            m_homography = null;

            v_modelKeyPoints    = new VectorOfKeyPoint();
            v_observedKeyPoints = new VectorOfKeyPoint();

            KAZE featureDetector = new KAZE();

            Mat modelDescriptors = new Mat();

            featureDetector.DetectAndCompute(m_modelImage, null, v_modelKeyPoints, modelDescriptors, false);

            _tim.MeasureStart();

            Mat observedDescriptors = new Mat();

            featureDetector.DetectAndCompute(m_observedImage, null, v_observedKeyPoints, observedDescriptors, false);

            // KdTree for faster results / less accuracy
            using (KdTreeIndexParams ip = new KdTreeIndexParams())
                using (SearchParams sp = new SearchParams())
                    using (DescriptorMatcher matcher = new FlannBasedMatcher(ip, sp))
                    {
                        matcher.Add(modelDescriptors);

                        matcher.KnnMatch(observedDescriptors, v_matches, SettingsContainer.Instance.i_K, null);
                        m_mask = new Mat(v_matches.Size, 1, DepthType.Cv8U, 1);
                        m_mask.SetTo(new MCvScalar(255));
                        Features2DToolbox.VoteForUniqueness(v_matches, SettingsContainer.Instance.d_UniquenessThreshold, m_mask);

                        // Calculate score based on matches size
                        // ---------------------------------------------->
                        l_score = 0;
                        for (int i = 0; i < v_matches.Size; i++)
                        {
                            if (m_mask.GetData(i)[0] == 0)
                            {
                                continue;
                            }
                            foreach (var e in v_matches[i].ToArray())
                            {
                                ++l_score;
                            }
                        }
                        // <----------------------------------------------

                        int nonZeroCount = CvInvoke.CountNonZero(m_mask);
                        if (nonZeroCount >= 4)
                        {
                            nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(v_modelKeyPoints, v_observedKeyPoints, v_matches,
                                                                                       m_mask, 1.5, 20);
                            if (nonZeroCount >= 4)
                            {
                                m_homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(v_modelKeyPoints, v_observedKeyPoints,
                                                                                                        v_matches, m_mask, 2);
                            }
                        }
                    }
            _tim.MeasureStop();
            d_matchTime = Math.Round(_tim.MeasureResult().TotalMilliseconds, 2);
            _tim.MeasureRestart();

            ErrInfLogger.LockInstance.InfoLog("End of the FindMatch");
        }
示例#23
0
        public override void Process(Image <Bgr, byte> image, out Image <Bgr, byte> annotatedImage, out List <object> data)
        {
            base.Process(image, out annotatedImage, out data);

            using (var detector = GetDetector())
                using (var modelKeyPoints = new VectorOfKeyPoint())
                    using (var imageKeyPoints = new VectorOfKeyPoint())
                        using (var modelDescriptors = new Mat())
                            using (var imageDescriptors = new Mat())
                                using (var flannMatcher = new FlannBasedMatcher(GetIndexParams(), new SearchParams()))
                                    using (var bfMatcher = new BFMatcher(_distanceType))
                                        using (var matches = new VectorOfVectorOfDMatch())
                                        {
                                            // get features from image
                                            detector.DetectAndCompute(
                                                image.Convert <Gray, byte>(),
                                                null,
                                                imageKeyPoints,
                                                imageDescriptors,
                                                false);

                                            // optionally view image keypoints and return
                                            if (_showKeypoints)
                                            {
                                                Features2DToolbox.DrawKeypoints(
                                                    annotatedImage,
                                                    imageKeyPoints,
                                                    annotatedImage,
                                                    new Bgr(_annoColor.Color()),
                                                    Features2DToolbox.KeypointDrawType.DrawRichKeypoints);
                                                data = new List <object>();
                                                data.AddRange(imageKeyPoints.ToArray().Select(k => new KeyPoint(k)));
                                                return;
                                            }

                                            // do not proceed if there is no template
                                            if (Template == null)
                                            {
                                                return;
                                            }

                                            // get features from object
                                            detector.DetectAndCompute(
                                                Template.Convert <Gray, byte>(),
                                                null,
                                                modelKeyPoints,
                                                modelDescriptors,
                                                false);

                                            // perform match with selected matcher
                                            if (_matcherType == MatcherType.Flann)
                                            {
                                                flannMatcher.Add(modelDescriptors);
                                                flannMatcher.KnnMatch(
                                                    imageDescriptors,
                                                    matches,
                                                    2,
                                                    null);
                                            }
                                            else
                                            {
                                                bfMatcher.Add(modelDescriptors);
                                                bfMatcher.KnnMatch(
                                                    imageDescriptors,
                                                    matches,
                                                    2,
                                                    null);
                                            }

                                            // find homography
                                            using (var mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1))
                                            {
                                                Mat homography = null;

                                                // filter for unique matches
                                                mask.SetTo(new MCvScalar(255));
                                                Features2DToolbox.VoteForUniqueness(matches, 0.8, mask);

                                                // if 4 or more patches continue
                                                var nonZeroCount = CvInvoke.CountNonZero(mask);
                                                if (nonZeroCount >= 4)
                                                {
                                                    // filter for majority scale and rotation
                                                    nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, imageKeyPoints,
                                                                                                               matches, mask, 1.5, 20);

                                                    // if 4 or more patches continue
                                                    if (nonZeroCount >= 4)
                                                    {
                                                        // get the homography
                                                        homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints,
                                                                                                                              imageKeyPoints, matches, mask, 2);
                                                    }
                                                }

                                                // if no homography, return
                                                if (homography == null)
                                                {
                                                    return;
                                                }

                                                // initialize a rectangle of the template size
                                                var rect = new Rectangle(Point.Empty, Template.Size);

                                                // create points array for the vertices of the template
                                                var pts = new[]
                                                {
                                                    new PointF(rect.Left, rect.Bottom),
                                                    new PointF(rect.Right, rect.Bottom),
                                                    new PointF(rect.Right, rect.Top),
                                                    new PointF(rect.Left, rect.Top)
                                                };

                                                // transform the perspective of the points array based on the homography
                                                // and get a rotated rectangle for the homography
                                                pts = CvInvoke.PerspectiveTransform(pts, homography);
                                                var rotRect = CvInvoke.MinAreaRect(pts);

                                                // annotate the image and return the rotated rectangle model
                                                annotatedImage.Draw(rotRect, new Bgr(_annoColor.Color()), _lineThick);
                                                data = new List <object> {
                                                    new RotatedBox(rotRect)
                                                };
                                            }
                                        }
        }
示例#24
0
        public static Tuple <Image <Bgr, byte>, HomographyMatrix> DrawHomography(Image <Gray, byte> model, Image <Gray, byte> observed, double uniquenessThreshold, int TM, int hessianThreshould)
        {
            HomographyMatrix  homography = null;
            Image <Bgr, Byte> result     = null;
            /////surf
            SURFDetector surfCPU = new SURFDetector(hessianThreshould, false);

            VectorOfKeyPoint modelKeyPoints;
            VectorOfKeyPoint observedKeyPoints;
            Matrix <int>     indices;
            Matrix <byte>    mask;
            int k = 2;

            try
            {
                result = observed.Convert <Bgr, byte>();
            }
            catch (Exception)
            { }

            try
            {
                modelKeyPoints = surfCPU.DetectKeyPointsRaw(model, null); // Extract features from the object image
                Matrix <float> modelDescriptors = surfCPU.ComputeDescriptorsRaw(model, null, modelKeyPoints);

                observedKeyPoints = surfCPU.DetectKeyPointsRaw(observed, null); // Extract features from the observed image

                if (modelKeyPoints.Size <= 0)
                {
                    throw new System.ArgumentException("Can't find any keypoints in your model image!");
                }

                if (observedKeyPoints.Size > 0)
                {
                    Matrix <float> observedDescriptors = surfCPU.ComputeDescriptorsRaw(observed, null, observedKeyPoints);

                    BruteForceMatcher <float> matcher = new BruteForceMatcher <float>(DistanceType.L2);
                    matcher.Add(modelDescriptors);

                    indices = new Matrix <int>(observedDescriptors.Rows, k);

                    using (Matrix <float> dist = new Matrix <float>(observedDescriptors.Rows, k))
                    {
                        matcher.KnnMatch(observedDescriptors, indices, dist, k, null);
                        mask = new Matrix <byte>(dist.Rows, 1);
                        mask.SetValue(255);
                        Features2DToolbox.VoteForUniqueness(dist, uniquenessThreshold, mask);
                    }



                    int nonZeroCount = CvInvoke.cvCountNonZero(mask);
                    if (nonZeroCount >= TM)
                    {
                        nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
                        if (nonZeroCount >= TM)
                        {
                            homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 2);
                        }
                    }

                    result = Features2DToolbox.DrawMatches(model, modelKeyPoints, observed, observedKeyPoints,
                                                           indices, new Bgr(12, 200, 214), new Bgr(255, 255, 255), mask, Features2DToolbox.KeypointDrawType.DEFAULT);
                }
                return(new Tuple <Image <Bgr, byte>, HomographyMatrix>(result, homography));
            }
            catch (Exception)
            {
                throw;
            }
            return(null);
        }
//..........metoda żeby SURF zrobić........................................................................................
        public void DoSURFDetectAndUpdateForm(object sender, EventArgs e)
        {
            try
            {
                imgSceneColor             = captureWebcam.QueryFrame(); //try pobrać jedną klatkę z obrazu kamery
                lbPreparingCamera.Visible = false;
            }
            catch (Exception ex)                                    //jak się nie da to error wyświetlamy
            {
                this.Text = ex.Message;
            }


            if (imgSceneColor == null)
            {
                this.Text = "error, nie wczytano obrazu z kamery";      //gdy nie odczytano następnej klatki do zmiennej obrazka
            }
            if (imgToFindColor == null)                                 //jeśli jeszcze nie mamy obrazka do znalezienia...
            {
                ibResult.Image = imgSceneColor.ToBitmap();              //...to wywołaj obraz sceny do imageBoxu
            }
            //gdy dotarliśmy aż tutaj, obydwa obrazki są OK i możemy rozpocząć SURF detection

            SURFDetector surfDetector = new SURFDetector(500, false);   //objekt surf, parametr treshold(jak duże punkty bierze pod uwagę i extended flag

            Image <Gray, Byte> imgSceneGray  = null;                    //szary obraz sceny
            Image <Gray, Byte> imgToFindGray = null;                    //szary obrazek do znalezienia

            VectorOfKeyPoint vkpSceneKeyPoints;                         //vektor punktów na obrazie sceny
            VectorOfKeyPoint vkpToFindKeyPoints;                        //vektor punktów na obrazku do znalezienia

            Matrix <Single> mtxSceneDescriptors;                        //macierz deskryptorów do pytania o najbliższe sąsiedztwo
            Matrix <Single> mtxToFindDescriptor;                        //macierz deskryptorów dla szukanego obrazka

            Matrix <int>    mtxMatchIndices;                            //macierz ze wskaźnikami deskryptorów, będzie wypełniana przy trenowaniu deskryptorów (KnnMatch())
            Matrix <Single> mtxDistance;                                //macierz z wartościami odległości, po treningu jak wyżej
            Matrix <Byte>   mtxMask;                                    //input i output dla funkcji VoteForUniqueness(), wskazującej, który rząd pasuje

            BruteForceMatcher <Single> bruteForceMatcher;               //dla każdego deskryptora w pierwszym zestawie, matcher szuka...
                                                                        //...najbliższego deskryptora w drugim zestawie ustawionym przez trening każdego jednego

            HomographyMatrix homographyMatrix = null;                   //dla ProjectPoints() aby ustawić lokalizację znalezionego obrazka w scenie
            int    intKNumNearestNeighbors    = 2;                      //k, liczba najbliższego sąsiedztwa do przeszukania
            double dblUniquenessThreshold     = 0.8;                    //stosunek różncy dystansu dla porównania, żeby wypadło unikalne

            int intNumNonZeroElements;                                  //jako wartość zwracana dla liczby nie-zerowych elementów obu w macierzy maski,...
                                                                        //...także z wywołania GetHomographyMatrixFromMatchedFeatures()

            //parametry do używania przy wywołaniach VoteForSizeAndOrientation()

            double dblScareIncrement = 1.5;                      //określa różnicę w skali dla sąsiadujących komórek
            int    intRotationBins   = 20;                       //liczba komórek dla rotacji z 360 stopni (jeśli =20 to każda komórka pokrywa 18 stopni (20*18=360))

            double dblRansacReprojectionThreshold = 2.0;         //do użycia z GetHomographyMatrixFromMatchedFeatures(), max. dozwolony błąd odwzorowania...
                                                                 //...aby uznać parę punktów za ?inlier?

            Rectangle rectImageToFind = new Rectangle();         //prostokąt obejmujący cały obrazek do znalezienia

            PointF [] pointsF;                                   //4 punkty określające ramkę wokół lokacji znalezionego obrazka na scenie (float)
            Point []  points;                                    //4 punkty, to samo, ale (int)

            imgSceneGray = imgSceneColor.Convert <Gray, Byte>(); //ta sama scena do Graya

            if (isImgToFind == true)
            {
                try
                {
                    imgToFindGray = imgToFindColor.Convert <Gray, Byte>();       // obrazek do znalezienia do Graya
                }
                catch (Exception ex)
                {
                    MessageBox.Show(ex.ToString());
                }

                vkpSceneKeyPoints   = surfDetector.DetectKeyPointsRaw(imgSceneGray, null);                       //wykrywa punkty w scenie, drugi param. to maska, jeśli null to nie potrzebna
                mtxSceneDescriptors = surfDetector.ComputeDescriptorsRaw(imgSceneGray, null, vkpSceneKeyPoints); //oblicza deskrptory sceny, param. to obraz sceny...
                //...maska, punkty na scenie

                vkpToFindKeyPoints = surfDetector.DetectKeyPointsRaw(imgToFindGray, null);                          //wykrywa punkty na obrazku do znalezienia, drugi param. to...
                //...maska, null bo nie potrzebna

                mtxToFindDescriptor = surfDetector.ComputeDescriptorsRaw(imgToFindGray, null, vkpToFindKeyPoints);                //oblicza aby znaleźć deskryptory(szukany obrazek, maska, szukanego o. punkty)

                bruteForceMatcher = new BruteForceMatcher <Single>(DistanceType.L2);                                              //objekt brute force matchera z L2, kwadrat odległ. Euklidesowej
                bruteForceMatcher.Add(mtxToFindDescriptor);                                                                       //dodaj macierz dla szukanych deskryptorów do brute force matchera

                if (mtxSceneDescriptors != null)                                                                                  //gdy obraz nie ma cech np. ściana
                {
                    mtxMatchIndices = new Matrix <int>(mtxSceneDescriptors.Rows, intKNumNearestNeighbors);                        //objekt macierzy indeksów/komórek (wiersze, kolumny)
                    mtxDistance     = new Matrix <Single>(mtxSceneDescriptors.Rows, intKNumNearestNeighbors);                     //to samo z dystansami

                    bruteForceMatcher.KnnMatch(mtxSceneDescriptors, mtxMatchIndices, mtxDistance, intKNumNearestNeighbors, null); //znajduje k-najbliższy match, (jak null to maska nie potrzebna)

                    mtxMask = new Matrix <Byte>(mtxDistance.Rows, 1);                                                             //objekt macierzy maski
                    mtxMask.SetValue(255);                                                                                        //ustawia wartości wszystkich elementów w macierzy maski

                    Features2DToolbox.VoteForUniqueness(mtxDistance, dblUniquenessThreshold, mtxMask);                            //filtruje pasujące cechy tj. czy match NIE jest unikalny to jest odrzucany

                    intNumNonZeroElements = CvInvoke.cvCountNonZero(mtxMask);                                                     //pobierz liczbę nie-zerowych elementów w macierzy maski
                    if (intNumNonZeroElements >= 4)
                    {
                        //eliminuje dopasowanye cechy, których skla i rotacja nie zgadzają się ze skalą i rotacją większości
                        intNumNonZeroElements = Features2DToolbox.VoteForSizeAndOrientation(vkpToFindKeyPoints, vkpSceneKeyPoints, mtxMatchIndices, mtxMask, dblScareIncrement, intRotationBins);
                        if (intNumNonZeroElements >= 4)             //jeśli ciągle są co najmniej 4 nie-zerowe elementy

                        //pobierz homography matrix używając RANSAC (random sample consensus)
                        {
                            homographyMatrix = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(vkpToFindKeyPoints, vkpSceneKeyPoints, mtxMatchIndices, mtxMask, dblRansacReprojectionThreshold);
                        }
                    }

                    imgCopyOfImageToFindWithBorder = imgToFindColor.Copy();     //robi kopię obrazka do znalezienia aby na tej kopi rysować, bez zmieniania oryginalnego obrazka

                    //rysuje 2pix ramkę wkoło kopi obrazka do znalezienia, używając takiego samego koloru jaki ma box na znaleziony obrazek
                    imgCopyOfImageToFindWithBorder.Draw(new Rectangle(1, 1, imgCopyOfImageToFindWithBorder.Width - 3, imgCopyOfImageToFindWithBorder.Height - 3), bgrFoundImageColor, 2);

                    //rysowanie obrazu sceny i obrazka do znalezienia razem na obrazie rezultatu
                    //3 warunki w zależności od tego, który checkBox jest zaznaczony (rysuj punkty i/lub rysuj linie)
                    if (ckDrawKeyPoints.Checked == true && ckDrawMatchingLines.Checked == true)
                    {
                        //używa DrawMatches() aby połączyć obraz sceny z obrazkiem do znalezienia, potem rysuje punkty i linie
                        imgResult = Features2DToolbox.DrawMatches(imgCopyOfImageToFindWithBorder,
                                                                  vkpToFindKeyPoints,
                                                                  imgSceneColor,
                                                                  vkpSceneKeyPoints,
                                                                  mtxMatchIndices,
                                                                  bgrMatchingLineColor,
                                                                  bgrKeyPointColor,
                                                                  mtxMask,
                                                                  Features2DToolbox.KeypointDrawType.DEFAULT);
                    }
                    else if (ckDrawKeyPoints.Checked == true && ckDrawMatchingLines.Checked == false)
                    {
                        //rysuje scenę z punktami na obrazie rezultatu
                        imgResult = Features2DToolbox.DrawKeypoints(imgSceneColor,
                                                                    vkpSceneKeyPoints,
                                                                    bgrKeyPointColor,
                                                                    Features2DToolbox.KeypointDrawType.DEFAULT);
                        //potem rysuje punkty na kopi obrazka do znalezienia
                        imgCopyOfImageToFindWithBorder = Features2DToolbox.DrawKeypoints(imgCopyOfImageToFindWithBorder,
                                                                                         vkpToFindKeyPoints,
                                                                                         bgrKeyPointColor,
                                                                                         Features2DToolbox.KeypointDrawType.DEFAULT);
                        //potem łączy kopię obrazka do znaleienia na obrazie rezultatu
                        imgResult = imgResult.ConcateHorizontal(imgCopyOfImageToFindWithBorder);
                    }
                    else if (ckDrawKeyPoints.Checked == false && ckDrawMatchingLines.Checked == false)
                    {
                        imgResult = imgSceneColor;                                                  //dołącza scenę do obrazu rezultatu
                        imgResult = imgResult.ConcateHorizontal(imgCopyOfImageToFindWithBorder);    //wiąże kopię szukanego obrazka na obrazie rezultatu
                    }
                    else
                    {
                        MessageBox.Show("Błąd");     //tu już nie powinno nigdy dojść
                    }
                }
                else
                {
                    imgResult = imgSceneColor;                                                  //dołącza scenę do obrazu rezultatu
                    imgResult = imgResult.ConcateHorizontal(imgCopyOfImageToFindWithBorder);    //wiąże kopię szukanego obrazka na obrazie rezultatu
                }

                if (homographyMatrix != null)    //sprawdzanie czy na pewno coś w tej macierzy jest
                {
                    //rysuje ramkę na kawałku sceny z obrazu rezultatu, w miejscu gdzie jest znaleziony szukany obrazek
                    rectImageToFind.X      = 0;     //na starcie ustawia rozmiar prostokąta na pełny rozmiar obrazka do znalezienia
                    rectImageToFind.Y      = 0;
                    rectImageToFind.Width  = imgToFindGray.Width;
                    rectImageToFind.Height = imgToFindGray.Height;

                    //tworzymy obiekt -> array (szereg) tablica na PointF odpowiadające prostokątom
                    pointsF = new PointF[] { new PointF(rectImageToFind.Left, rectImageToFind.Top),
                                             new PointF(rectImageToFind.Right, rectImageToFind.Top),
                                             new PointF(rectImageToFind.Right, rectImageToFind.Bottom),
                                             new PointF(rectImageToFind.Left, rectImageToFind.Bottom) };

                    //ProjectionPoints() ustawia ptfPointsF(przez referencję) na bycie lokacją ramki na fragmencie sceny gdzie jest znaleziony szukany obrazek
                    homographyMatrix.ProjectPoints(pointsF);

                    //konwersja z PointF() do Point() bo ProjectPoints() używa typ PointF() a DrawPolyline() używa Point()
                    points = new Point[] { Point.Round(pointsF[0]),
                                           Point.Round(pointsF[1]),
                                           Point.Round(pointsF[2]),
                                           Point.Round(pointsF[3]) };

                    //rysowanie ramki wkoło znalezionego obrazka na fragmencie sceny obrazu rezultatu
                    imgResult.DrawPolyline(points, true, new Bgr(0, 255, 0), 2);

                    //rysowanie czerwonego myślnika na środku obiektu
                    int x, y, x1, y1, xW, yW;

                    x  = Convert.ToInt32(points[0].X);
                    y  = Convert.ToInt32(points[0].Y);
                    x1 = Convert.ToInt32(points[2].X);
                    y1 = Convert.ToInt32(points[2].Y);

                    xW  = x1 - x;
                    xW /= 2;
                    xW += x;
                    yW  = y1 - y;
                    yW /= 2;
                    yW += y;
                    Point [] pp = new Point[] { new Point(xW, yW), new Point(xW + 10, yW) };    //rysowanie środka wykrytego obiektu
                    imgResult.DrawPolyline(pp, true, new Bgr(0, 0, 255), 5);

                    XX = xW.ToString();
                    YY = yW.ToString();
                    //////////gdy obiekt znika z pola widzenia
                    if (xW == 0 || yW == 0 || xW < -200 || yW < -200 || xW > 800 || yW > 800)
                    {
                        targetLost(-1);
                    }
                    else
                    {
                        targetLost(1);
                    }
                    //////////
                }
                else
                {
                    targetLost(-1);     //strzał w 10!
                }
                //koniec SURF, update całego form

                ibResult.Image = imgResult.ToBitmap();          //pokazanie rezultatu na imageBoxie
            }
        }
示例#26
0
        private static VectorOfPoint ProcessImageFLANN(Image <Gray, byte> template, Image <Gray, byte> sceneImage)
        {
            try
            {
                // initialization
                VectorOfPoint    finalPoints        = null;
                Mat              homography         = null;
                VectorOfKeyPoint templateKeyPoints  = new VectorOfKeyPoint();
                VectorOfKeyPoint sceneKeyPoints     = new VectorOfKeyPoint();
                Mat              tempalteDescriptor = new Mat();
                Mat              sceneDescriptor    = new Mat();

                Mat    mask;
                int    k = 2;
                double uniquenessthreshold     = 0.80;
                VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch();

                // feature detectino and description
                KAZE featureDetector = new KAZE();
                featureDetector.DetectAndCompute(template, null, templateKeyPoints, tempalteDescriptor, false);
                featureDetector.DetectAndCompute(sceneImage, null, sceneKeyPoints, sceneDescriptor, false);


                // Matching

                //KdTreeIndexParams ip = new KdTreeIndexParams();
                //var ip = new AutotunedIndexParams();
                var               ip      = new LinearIndexParams();
                SearchParams      sp      = new SearchParams();
                FlannBasedMatcher matcher = new FlannBasedMatcher(ip, sp);


                matcher.Add(tempalteDescriptor);
                matcher.KnnMatch(sceneDescriptor, matches, k);

                mask = new Mat(matches.Size, 1, Emgu.CV.CvEnum.DepthType.Cv8U, 1);
                mask.SetTo(new MCvScalar(255));

                Features2DToolbox.VoteForUniqueness(matches, uniquenessthreshold, mask);

                int count = Features2DToolbox.VoteForSizeAndOrientation(templateKeyPoints, sceneKeyPoints, matches, mask, 1.5, 20);

                if (count >= 4)
                {
                    homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(templateKeyPoints,
                                                                                          sceneKeyPoints, matches, mask, 5);
                }

                if (homography != null)
                {
                    System.Drawing.Rectangle rect = new System.Drawing.Rectangle(System.Drawing.Point.Empty, template.Size);
                    PointF[] pts = new PointF[]
                    {
                        new PointF(rect.Left, rect.Bottom),
                        new PointF(rect.Right, rect.Bottom),
                        new PointF(rect.Right, rect.Top),
                        new PointF(rect.Left, rect.Top)
                    };

                    pts = CvInvoke.PerspectiveTransform(pts, homography);
                    System.Drawing.Point[] points = Array.ConvertAll <PointF, System.Drawing.Point>(pts, System.Drawing.Point.Round);
                    finalPoints = new VectorOfPoint(points);
                }

                return(finalPoints);
            }
            catch (Exception ex)
            {
                throw new Exception(ex.Message);
            }
        }
示例#27
0
        public static void FindMatch(Mat modelImage, Mat observedImage, out long matchTime, out VectorOfKeyPoint modelKeyPoints, out VectorOfKeyPoint observedKeyPoints, VectorOfVectorOfDMatch matches, out Mat mask, out Mat homography)
        {
            int    k = 2;
            double uniquenessThreshold = 0.8;
            double hessianThresh       = 300;

            Stopwatch watch;

            homography = null;

            modelKeyPoints    = new VectorOfKeyPoint();
            observedKeyPoints = new VectorOfKeyPoint();

            if (CudaInvoke.HasCuda)
            {
                CudaSURF surfCuda = new CudaSURF((float)hessianThresh);
                using (GpuMat gpuModelImage = new GpuMat(modelImage))
                    //extract features from the object image
                    using (GpuMat gpuModelKeyPoints = surfCuda.DetectKeyPointsRaw(gpuModelImage, null))
                        using (GpuMat gpuModelDescriptors = surfCuda.ComputeDescriptorsRaw(gpuModelImage, null, gpuModelKeyPoints))
                            using (CudaBFMatcher matcher = new CudaBFMatcher(DistanceType.L2)) {
                                surfCuda.DownloadKeypoints(gpuModelKeyPoints, modelKeyPoints);
                                watch = Stopwatch.StartNew();

                                // extract features from the observed image
                                using (GpuMat gpuObservedImage = new GpuMat(observedImage))
                                    using (GpuMat gpuObservedKeyPoints = surfCuda.DetectKeyPointsRaw(gpuObservedImage, null))
                                        using (GpuMat gpuObservedDescriptors = surfCuda.ComputeDescriptorsRaw(gpuObservedImage, null, gpuObservedKeyPoints))
                                        //using (GpuMat tmp = new GpuMat())
                                        //using (Stream stream = new Stream())
                                        {
                                            matcher.KnnMatch(gpuObservedDescriptors, gpuModelDescriptors, matches, k);

                                            surfCuda.DownloadKeypoints(gpuObservedKeyPoints, observedKeyPoints);

                                            mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
                                            mask.SetTo(new MCvScalar(255));
                                            Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);

                                            int nonZeroCount = CvInvoke.CountNonZero(mask);
                                            if (nonZeroCount >= 4)
                                            {
                                                nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints,
                                                                                                           matches, mask, 1.5, 20);
                                                if (nonZeroCount >= 4)
                                                {
                                                    homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints,
                                                                                                                          observedKeyPoints, matches, mask, 2);
                                                }
                                            }
                                        }
                                watch.Stop();
                            }
            }
            else
            {
                using (UMat uModelImage = modelImage.GetUMat(AccessType.Read))
                    using (UMat uObservedImage = observedImage.GetUMat(AccessType.Read)) {
                        SURF surfCPU = new SURF(hessianThresh);
                        //extract features from the object image
                        UMat modelDescriptors = new UMat();
                        surfCPU.DetectAndCompute(uModelImage, null, modelKeyPoints, modelDescriptors, false);

                        watch = Stopwatch.StartNew();

                        // extract features from the observed image
                        UMat observedDescriptors = new UMat();
                        surfCPU.DetectAndCompute(uObservedImage, null, observedKeyPoints, observedDescriptors, false);
                        BFMatcher matcher = new BFMatcher(DistanceType.L2);
                        matcher.Add(modelDescriptors);

                        matcher.KnnMatch(observedDescriptors, matches, k, null);
                        mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
                        mask.SetTo(new MCvScalar(255));
                        Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);


                        watch.Stop();
                    }
            }
            matchTime = watch.ElapsedMilliseconds;
        }
示例#28
0
    public Image <Bgr, Byte> Drawtwo(Image <Gray, Byte> modelImage, Image <Gray, byte> observedImage)
    {
        HomographyMatrix homography = null;

        FastDetector     fastCPU = new FastDetector(10, true);
        VectorOfKeyPoint modelKeyPoints;
        VectorOfKeyPoint observedKeyPoints;
        Matrix <int>     indices;

        BriefDescriptorExtractor descriptor = new BriefDescriptorExtractor();

        Matrix <byte> mask;
        int           k = 2;
        double        uniquenessThreshold = 0.8;

        //extract features from the object image
        modelKeyPoints = fastCPU.DetectKeyPointsRaw(modelImage, null);
        Matrix <Byte> modelDescriptors = descriptor.ComputeDescriptorsRaw(modelImage, null, modelKeyPoints);

        // extract features from the observed image
        observedKeyPoints = fastCPU.DetectKeyPointsRaw(observedImage, null);
        Matrix <Byte>            observedDescriptors = descriptor.ComputeDescriptorsRaw(observedImage, null, observedKeyPoints);
        BruteForceMatcher <Byte> matcher             = new BruteForceMatcher <Byte>(DistanceType.L2);

        matcher.Add(modelDescriptors);

        indices = new Matrix <int>(observedDescriptors.Rows, k);
        using (Matrix <float> dist = new Matrix <float>(observedDescriptors.Rows, k))
        {
            matcher.KnnMatch(observedDescriptors, indices, dist, k, null);
            mask = new Matrix <byte>(dist.Rows, 1);
            mask.SetValue(255);
            Features2DToolbox.VoteForUniqueness(dist, uniquenessThreshold, mask);
        }

        nonZeroCount = CvInvoke.cvCountNonZero(mask);
        //print("nonZeroCount is "+nonZeroCount);
        if (nonZeroCount >= 4)
        {
            nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
            if (nonZeroCount >= 4)
            {
                homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(
                    modelKeyPoints, observedKeyPoints, indices, mask, 2);
            }
        }

        //Draw the matched keypoints
        Image <Bgr, Byte> result = Features2DToolbox.DrawMatches(modelImage, modelKeyPoints, observedImage, observedKeyPoints,
                                                                 indices, new Bgr(255, 255, 255), new Bgr(255, 255, 255), mask, Features2DToolbox.KeypointDrawType.DEFAULT);

        #region draw the projected region on the image
        if (homography != null)
        {  //draw a rectangle along the projected model
            Rectangle rect = modelImage.ROI;
            PointF[]  pts  = new PointF[] {
                new PointF(rect.Left, rect.Bottom),
                new PointF(rect.Right, rect.Bottom),
                new PointF(rect.Right, rect.Top),
                new PointF(rect.Left, rect.Top)
            };
            homography.ProjectPoints(pts);
            //area = Math.Abs((rect.Top - rect.Bottom) * (rect.Right - rect.Left));
            result.DrawPolyline(Array.ConvertAll <PointF, Point>(pts, Point.Round), true, new Bgr(System.Drawing.Color.Red), 5);
        }
        #endregion



        return(result);
    }
示例#29
0
        private void testToolStripMenuItem_Click(object sender, EventArgs e)
        {
            lstMat.Clear();
            VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch();
            Mat testImage  = CvInvoke.Imread(@"Linage2\Main\PartyAuto\2e35av2fwbk.png", ImreadModes.Color);
            Mat modelImage = CVUtil.crop_color_frame(testImage, new Rectangle(842, 646, 70, 70));

            log(modelImage.ToString());
            Image <Bgr, Byte> img = modelImage.ToImage <Bgr, Byte>();

            CvInvoke.cvSetImageROI(img, new Rectangle(0, 0, 35, 35));

            //UMat uModelImage = modelImage.GetUMat(AccessType.Read);
            var featureDetector             = new SIFT();
            Mat modelDescriptors            = new Mat();
            VectorOfKeyPoint modelKeyPoints = new VectorOfKeyPoint();

            VectorOfKeyPoint observedKeyPoints = new VectorOfKeyPoint();

            featureDetector.DetectAndCompute(modelImage, null, modelKeyPoints, modelDescriptors, false);
            log("model size = " + modelKeyPoints.Size);
            Mat observedDescriptors = new Mat();

            featureDetector.DetectAndCompute(testImage, null, observedKeyPoints, observedDescriptors, false);

            int    k = 2;
            double uniquenessThreshold = 0.80;
            Mat    mask;
            Mat    homography = null;

            // Bruteforce, slower but more accurate
            // You can use KDTree for faster matching with slight loss in accuracy
            using (Emgu.CV.Flann.LinearIndexParams ip = new Emgu.CV.Flann.LinearIndexParams())
                using (Emgu.CV.Flann.SearchParams sp = new SearchParams())
                    using (DescriptorMatcher matcher = new FlannBasedMatcher(ip, sp))
                    {
                        matcher.Add(modelDescriptors);

                        matcher.KnnMatch(observedDescriptors, matches, k, null);
                        mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
                        mask.SetTo(new MCvScalar(255));
                        Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);

                        int nonZeroCount = CvInvoke.CountNonZero(mask);
                        if (nonZeroCount >= 4)
                        {
                            nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints,
                                                                                       matches, mask, 1.5, 20);
                            if (nonZeroCount >= 4)
                            {
                                homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, matches, mask, 2);
                                PointF[] src =
                                {
                                    new PointF(0, 0), new PointF(0, modelImage.Height - 1), new PointF(modelImage.Width - 1, modelImage.Height - 1), new PointF(modelImage.Width - 1, 0)
                                };
                                PointF[] points = CvInvoke.PerspectiveTransform(src, homography);
                                foreach (var p in points)
                                {
                                    Console.WriteLine(p.ToString());
                                }
                                Point[] ap = Array.ConvertAll(points,
                                                              new Converter <PointF, Point>(CVUtil.PointFToPoint));

                                CvInvoke.Polylines(testImage, ap, true, new MCvScalar(255, 0, 0));
                                CvInvoke.Rectangle(testImage, new Rectangle(0, 0, 100, 100), new MCvScalar(255, 255, 0));
                                CvInvoke.Circle(testImage, new Point(100, 100), 50, new MCvScalar(255, 255, 0), -1);
                                lstMat.Add(testImage);
                            }
                            //Mat modelMatches = new Mat();
                            //Features2DToolbox.DrawKeypoints(modelImage, modelKeyPoints, result, new Bgr(Color.Red));
                            //Features2DToolbox.DrawKeypoints(testImage, observedKeyPoints, result, new Bgr(Color.Red));
                            //Features2DToolbox.DrawMatches(modelImage, modelKeyPoints, testImage, observedKeyPoints, matches, modelMatches,
                            //    new MCvScalar(255, 0, 0), new MCvScalar(0, 255, 0));
                            //lstMat.Add(modelMatches);

                            //Mat model1 = new Mat();
                            //Features2DToolbox.DrawKeypoints(modelImage, modelKeyPoints, model1, new Bgr(Color.Red));
                            //lstMat.Add(model1);
                            //modelMatches = crop_color_frame(testImage,new Rectangle(842,646,70,70));
                        }
                    }
            log("Done " + mask.Size);

            Refresh();
        }
示例#30
0
        public static bool TestFeature2DTracker(Feature2D keyPointDetector, Feature2D descriptorGenerator)
        {
            //for (int k = 0; k < 1; k++)
            {
                Feature2D feature2D = null;
                if (keyPointDetector == descriptorGenerator)
                {
                    feature2D = keyPointDetector as Feature2D;
                }

                Mat modelImage = EmguAssert.LoadMat("box.png");
                //Image<Gray, Byte> modelImage = new Image<Gray, byte>("stop.jpg");
                //modelImage = modelImage.Resize(400, 400, true);

                //modelImage._EqualizeHist();

                #region extract features from the object image
                Stopwatch        stopwatch      = Stopwatch.StartNew();
                VectorOfKeyPoint modelKeypoints = new VectorOfKeyPoint();
                Mat modelDescriptors            = new Mat();
                if (feature2D != null)
                {
                    feature2D.DetectAndCompute(modelImage, null, modelKeypoints, modelDescriptors, false);
                }
                else
                {
                    keyPointDetector.DetectRaw(modelImage, modelKeypoints);
                    descriptorGenerator.Compute(modelImage, modelKeypoints, modelDescriptors);
                }
                stopwatch.Stop();
                EmguAssert.WriteLine(String.Format("Time to extract feature from model: {0} milli-sec", stopwatch.ElapsedMilliseconds));
                #endregion

                //Image<Gray, Byte> observedImage = new Image<Gray, byte>("traffic.jpg");
                Image <Gray, Byte> observedImage = EmguAssert.LoadImage <Gray, byte>("box_in_scene.png");
                //Image<Gray, Byte> observedImage = modelImage.Rotate(45, new Gray(0.0));
                //image = image.Resize(400, 400, true);

                //observedImage._EqualizeHist();
                #region extract features from the observed image
                stopwatch.Reset();
                stopwatch.Start();
                VectorOfKeyPoint observedKeypoints = new VectorOfKeyPoint();
                using (Mat observedDescriptors = new Mat())
                {
                    if (feature2D != null)
                    {
                        feature2D.DetectAndCompute(observedImage, null, observedKeypoints, observedDescriptors, false);
                    }
                    else
                    {
                        keyPointDetector.DetectRaw(observedImage, observedKeypoints);
                        descriptorGenerator.Compute(observedImage, observedKeypoints, observedDescriptors);
                    }

                    stopwatch.Stop();
                    EmguAssert.WriteLine(String.Format("Time to extract feature from image: {0} milli-sec", stopwatch.ElapsedMilliseconds));
                    #endregion

                    //Merge the object image and the observed image into one big image for display
                    Image <Gray, Byte> res = modelImage.ToImage <Gray, Byte>().ConcateVertical(observedImage);

                    Rectangle rect = new Rectangle(Point.Empty, modelImage.Size);
                    PointF[]  pts  = new PointF[] {
                        new PointF(rect.Left, rect.Bottom),
                        new PointF(rect.Right, rect.Bottom),
                        new PointF(rect.Right, rect.Top),
                        new PointF(rect.Left, rect.Top)
                    };

                    Mat homography = null;

                    stopwatch.Reset();
                    stopwatch.Start();

                    int          k  = 2;
                    DistanceType dt = modelDescriptors.Depth == CvEnum.DepthType.Cv8U ? DistanceType.Hamming : DistanceType.L2;
                    //using (Matrix<int> indices = new Matrix<int>(observedDescriptors.Rows, k))
                    //using (Matrix<float> dist = new Matrix<float>(observedDescriptors.Rows, k))
                    using (VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch())
                        using (BFMatcher matcher = new BFMatcher(dt))
                        {
                            //ParamDef[] parameterDefs = matcher.GetParams();
                            matcher.Add(modelDescriptors);
                            matcher.KnnMatch(observedDescriptors, matches, k, null);

                            Mat mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
                            mask.SetTo(new MCvScalar(255));
                            //mask.SetValue(255);
                            Features2DToolbox.VoteForUniqueness(matches, 0.8, mask);

                            int nonZeroCount = CvInvoke.CountNonZero(mask);
                            if (nonZeroCount >= 4)
                            {
                                nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeypoints, observedKeypoints, matches, mask, 1.5, 20);
                                if (nonZeroCount >= 4)
                                {
                                    homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeypoints, observedKeypoints, matches, mask, 2);
                                }
                            }
                        }
                    stopwatch.Stop();
                    EmguAssert.WriteLine(String.Format("Time for feature matching: {0} milli-sec", stopwatch.ElapsedMilliseconds));

                    bool success = false;
                    if (homography != null)
                    {
                        PointF[] points = pts.Clone() as PointF[];
                        points = CvInvoke.PerspectiveTransform(points, homography);
                        //homography.ProjectPoints(points);

                        for (int i = 0; i < points.Length; i++)
                        {
                            points[i].Y += modelImage.Height;
                        }

                        res.DrawPolyline(
#if NETFX_CORE
                            Extensions.
#else
                            Array.
#endif
                            ConvertAll <PointF, Point>(points, Point.Round), true, new Gray(255.0), 5);

                        success = true;
                    }
                    //Emgu.CV.UI.ImageViewer.Show(res);
                    return(success);
                }



                /*
                 * stopwatch.Reset(); stopwatch.Start();
                 * //set the initial region to be the whole image
                 * using (Image<Gray, Single> priorMask = new Image<Gray, float>(observedImage.Size))
                 * {
                 * priorMask.SetValue(1.0);
                 * homography = tracker.CamShiftTrack(
                 *    observedFeatures,
                 *    (RectangleF)observedImage.ROI,
                 *    priorMask);
                 * }
                 * Trace.WriteLine(String.Format("Time for feature tracking: {0} milli-sec", stopwatch.ElapsedMilliseconds));
                 *
                 * if (homography != null) //set the initial tracking window to be the whole image
                 * {
                 * PointF[] points = pts.Clone() as PointF[];
                 * homography.ProjectPoints(points);
                 *
                 * for (int i = 0; i < points.Length; i++)
                 *    points[i].Y += modelImage.Height;
                 * res.DrawPolyline(Array.ConvertAll<PointF, Point>(points, Point.Round), true, new Gray(255.0), 5);
                 * return true;
                 * }
                 * else
                 * {
                 * return false;
                 * }*/
            }
        }