Пример #1
0
        public static void FindMatch(Mat modelImage, Mat observedImage, out long matchTime, out VectorOfKeyPoint modelKeyPoints, out VectorOfKeyPoint observedKeyPoints, VectorOfVectorOfDMatch matches, out Mat mask, out Mat homography)
        {
            int    k = 2;
            double uniquenessThreshold = 0.8;
            double hessianThresh       = 500;

            Stopwatch watch;

            homography = null;

            modelKeyPoints    = new VectorOfKeyPoint();
            observedKeyPoints = new VectorOfKeyPoint();

            using (UMat uModelImage = modelImage.ToUMat(AccessType.Read))
                using (UMat uObservedImage = observedImage.ToUMat(AccessType.Read))
                {
                    SURF surfCPU = new SURF(hessianThresh);
                    //extract features from the object image
                    UMat modelDescriptors = new UMat();
                    surfCPU.DetectAndCompute(uModelImage, null, modelKeyPoints, modelDescriptors, false);

                    watch = Stopwatch.StartNew();

                    // extract features from the observed image
                    UMat observedDescriptors = new UMat();
                    surfCPU.DetectAndCompute(uObservedImage, null, observedKeyPoints, observedDescriptors, false);
                    BFMatcher matcher = new BFMatcher(DistanceType.L2);
                    matcher.Add(modelDescriptors);

                    matcher.KnnMatch(observedDescriptors, matches, k, null);
                    mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
                    mask.SetTo(new MCvScalar(255));
                    Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);

                    int nonZeroCount = CvInvoke.CountNonZero(mask);
                    if (nonZeroCount >= 4)
                    {
                        nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints,
                                                                                   matches, mask, 1.5, 20);
                        if (nonZeroCount >= 4)
                        {
                            homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints,
                                                                                                  observedKeyPoints, matches, mask, 2);
                        }
                    }

                    watch.Stop();
                }

            matchTime = watch.ElapsedMilliseconds;
        }
Пример #2
0
        public void TestBOWKmeansTrainer()
        {
            Image <Gray, byte> box    = EmguAssert.LoadImage <Gray, byte>("box.png");
            SURF             detector = new SURF(500);
            VectorOfKeyPoint kpts     = new VectorOfKeyPoint();
            Mat descriptors           = new Mat();

            detector.DetectAndCompute(box, null, kpts, descriptors, false);

            BOWKMeansTrainer trainer = new BOWKMeansTrainer(100, new MCvTermCriteria(), 3, CvEnum.KMeansInitType.PPCenters);

            trainer.Add(descriptors);
            Mat vocabulary = new Mat();

            trainer.Cluster(vocabulary);

            BFMatcher matcher = new BFMatcher(DistanceType.L2);

            BOWImgDescriptorExtractor extractor = new BOWImgDescriptorExtractor(detector, matcher);

            extractor.SetVocabulary(vocabulary);

            Mat descriptors2 = new Mat();

            extractor.Compute(box, kpts, descriptors2);
        }
        private VectorOfPoint _octagon;             //Искомая область

        /// <summary>
        /// Конструктор.
        /// </summary>
        /// <param name="brickSingModel">Обрабатываемое изображение. Принимается Image<Bgr, Byte></param>
        public SingDetectorMethodCanny(IInputArray brickSingModel)
        {
            _detector = new SURF(500);

            using (Mat redMask = new Mat())
            {
                GetRedPixelMask(brickSingModel, redMask);
                _modelKeypoints   = new VectorOfKeyPoint();
                _modelDescriptors = new Mat();
                _detector.DetectAndCompute(redMask, null, _modelKeypoints, _modelDescriptors, false);
                if (_modelKeypoints.Size == 0)
                {
                    //throw new Exception("Изображение для обработки не загружено");
                }
            }

            _modelDescriptorMatcher = new BFMatcher(DistanceType.L2);
            _modelDescriptorMatcher.Add(_modelDescriptors);

            _octagon = new VectorOfPoint(
                new Point[] {
                new Point(1, 0),
                new Point(2, 0),
                new Point(3, 1),
                new Point(3, 2),
                new Point(2, 3),
                new Point(1, 3),
                new Point(0, 2),
                new Point(0, 1)
            });
        }
Пример #4
0
        public StopSignDetector(IInputArray stopSignModel)
        {
            _detector = new SURF(500);
            using (Mat redMask = new Mat())
            {
                GetRedPixelMask(stopSignModel, redMask);
                _modelKeypoints   = new VectorOfKeyPoint();
                _modelDescriptors = new Mat();
                _detector.DetectAndCompute(redMask, null, _modelKeypoints, _modelDescriptors, false);
                if (_modelKeypoints.Size == 0)
                {
                    throw new Exception("No image feature has been found in the stop sign model");
                }
            }

            _modelDescriptorMatcher = new BFMatcher(DistanceType.L2);
            _modelDescriptorMatcher.Add(_modelDescriptors);

            _octagon = new VectorOfPoint(
                new Point[]
            {
                new Point(1, 0),
                new Point(2, 0),
                new Point(3, 1),
                new Point(3, 2),
                new Point(2, 3),
                new Point(1, 3),
                new Point(0, 2),
                new Point(0, 1)
            });
        }
        public static Tuple <UMat, VectorOfKeyPoint> DetectAndCompute
            (SURF surf, Image <Bgr, byte> image, bool b, IInputArray inputArray = null)
        {
            var keypoints = new VectorOfKeyPoint();
            var desc      = new UMat();

            surf.DetectAndCompute(image, inputArray, keypoints, desc, b);
            return(new Tuple <UMat, VectorOfKeyPoint>(desc, keypoints));
        }
Пример #6
0
        public void TestSURFBlankImage()
        {
            SURF detector          = new SURF(500);
            Image <Gray, Byte> img = new Image <Gray, byte>(1024, 900);
            VectorOfKeyPoint   vp  = new VectorOfKeyPoint();
            Mat descriptors        = new Mat();

            detector.DetectAndCompute(img, null, vp, descriptors, false);
        }
        /// <summary>
        /// Detects the keypoints and computes the features for a sign
        /// </summary>
        /// <param name="sign"> Sign to update </param>
        private void UpdateDescriptors(TrafficSign sign)
        {
            Mat desc            = new Mat();
            VectorOfKeyPoint kp = new VectorOfKeyPoint();

            detector.DetectAndCompute(sign.ImageGray, null, kp, desc, false);
            sign.Features  = desc;
            sign.KeyPoints = kp;
        }
Пример #8
0
        private static SURFData ExecuteSurfDetection(Mat scene)
        {
            using (SURF surfDetector = new SURF(300, 4, 2, false, true))
            {
                Mat sceneDescriptors            = new Mat();
                VectorOfKeyPoint sceneKeyPoints = new VectorOfKeyPoint();
                surfDetector.DetectAndCompute(scene, null, sceneKeyPoints, sceneDescriptors, false);

                return(new SURFData
                {
                    KeyPoints = sceneKeyPoints,
                    Descriptors = sceneDescriptors
                });
            }
        }
        public UMat SURFDescriptor()
        {
            double hessianThresh = 800;
            // public SURF(double hessianThresh, int nOctaves = 4, int nOctaveLayers = 2, bool extended = true, bool upright = false)
            SURF             surfAlgo       = new SURF(hessianThresh, 4, 2, true, false);
            VectorOfKeyPoint modelKeyPoints = new VectorOfKeyPoint();

            MKeyPoint[] mKeyPoints = surfAlgo.Detect(preProcessedImageInGrayScale);
            modelKeyPoints.Push(mKeyPoints);
            VectorOfKeyPoint observedKeyPoints = new VectorOfKeyPoint();
            UMat             SurfDescriptors   = new UMat();

            surfAlgo.DetectAndCompute(preProcessedImageInGrayScale, null, modelKeyPoints, SurfDescriptors, true);
            //image2.Source = BitmapSourceConvert.ToBitmapSource(modelDescriptors);
            SurfDescriptors.Save("SURFDetection.jpg");
            return(SurfDescriptors);
        }
Пример #10
0
        public Matrix <float> ComputeSingleDescriptors(string fileName) // old return Matrix<float>
        {
            Mat descsTmp = new Mat();


            using (Image <Gray, byte> img = new Image <Gray, byte> (fileName))
            {
                #region depreciated
                //VectorOfKeyPoint keyPoints = detector.DetectKeyPointsRaw(img, null);
                //descs = detector.ComputeDescriptorsRaw(img, null, keyPoints);
                #endregion

                VectorOfKeyPoint keyPoints = new VectorOfKeyPoint();
                detector.DetectAndCompute(img, null, keyPoints, descsTmp, false);
            }

            Matrix <float> descs = new Matrix <float>(descsTmp.Rows, descsTmp.Cols);
            descsTmp.CopyTo(descs);

            return(descs);
        }
Пример #11
0
        public override void Init()
        {
            base.Init();
            String filename = @".//Parameter/Model/Match Model/model.jpg";

            try
            {
                Mat mat = CvInvoke.Imread(filename, Emgu.CV.CvEnum.ImreadModes.AnyColor);
                _imageTemple = new Image <Gray, byte>(mat.Bitmap);
            }
            catch (Exception)
            {
            }
            if (actionMatchData.ModelAOIWidth != 0 && actionMatchData.ModelAOIHeight != 0)
            {
                Image <Gray, byte> image = imageModel.Clone();
                for (int i = 0; i < actionMatchData.time; i++)
                {
                    image = image.PyrDown();
                }
                image.ROI = new Rectangle(actionMatchData.ModelAOIX, actionMatchData.ModelAOIY, actionMatchData.ModelAOIWidth, actionMatchData.ModelAOIHeight);
                try
                {
                    _imageTempleAOI = new Image <Gray, byte>(new Size(actionMatchData.ModelAOIWidth, actionMatchData.ModelAOIHeight));
                    image.CopyTo(_imageTempleAOI);
                    CvInvoke.cvResetImageROI(_imageTemple);
                    a1             = _imageTempleAOI.ToUMat();
                    modelKeyPoints = new VectorOfKeyPoint();

                    surf             = new SURF(actionMatchData.iKeyPointNumber, 1, 1);
                    modelDescriptors = new UMat();
                    surf.DetectAndCompute(a1, null, modelKeyPoints, modelDescriptors, false);
                }
                catch (Exception ex)
                {
                    MessageBox.Show(ex.Message);
                }
            }
        }
Пример #12
0
        public static void FindMatch(Mat modelImage, Mat observedImage, out long matchTime, out VectorOfKeyPoint modelKeyPoints,
                                     out VectorOfKeyPoint observedKeyPoints, VectorOfVectorOfDMatch matches, out Mat mask, out Mat homography, double hessianThresh)
        {
            int    k = 2;
            double uniquenessThreshold = 0.8;
            //double hessianThresh = 300;设置阈值,这个值越大,最终的特征点越少

            Stopwatch sw;

            homography = null;

            modelKeyPoints    = new VectorOfKeyPoint();
            observedKeyPoints = new VectorOfKeyPoint();

#if !__IOS__
            //判断是否存在NVIDIA显卡,如果存在就是使用GPU进行计算
            if (CudaInvoke.HasCuda)
            {
                //SURF算法
                //创建一个CudaSurf 侦测器
                CudaSURF surfCuda = new CudaSURF((float)hessianThresh);
                //在Gpu中 使用GpuMat 来替代cv::Mat
                using (GpuMat gpuModelImage = new GpuMat(modelImage))

                    //从图像中提取特征点
                    using (GpuMat gpuModelKeyPoints = surfCuda.DetectKeyPointsRaw(gpuModelImage, null))
                        //创建特征点描述器
                        using (GpuMat gupModelDescriptors = surfCuda.ComputeDescriptorsRaw(gpuModelImage, null, gpuModelKeyPoints))
                            //创建匹配器
                            using (CudaBFMatcher matcher = new CudaBFMatcher(DistanceType.L2))
                            {
                                surfCuda.DownloadKeypoints(gpuModelKeyPoints, modelKeyPoints);
                                sw = Stopwatch.StartNew();

                                using (GpuMat gpuObservedImage = new GpuMat(observedImage))
                                    using (GpuMat gpuObservedKeyPoints = surfCuda.DetectKeyPointsRaw(gpuObservedImage, null))
                                        using (GpuMat gpuObservedDescriptors = surfCuda.ComputeDescriptorsRaw(gpuObservedImage, null, gpuObservedKeyPoints))

                                        //using (GpuMat tmp = new GpuMat())
                                        //using (Stream stream = new Stream())
                                        {
                                            matcher.KnnMatch(gpuObservedDescriptors, gpuObservedDescriptors, matches, k);

                                            surfCuda.DownloadKeypoints(gpuModelKeyPoints, observedKeyPoints);
                                            mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
                                            mask.SetTo(new MCvScalar(255));

                                            //过滤匹配特征,,如果匹配点是比较罕见,那么就剔除
                                            Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);
                                            //返回数组中的非零元素
                                            int nonZeroCount = CvInvoke.CountNonZero(mask);
                                            if (nonZeroCount >= 4)
                                            {
                                                //剔除
                                                nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, matches, mask, 1.5, 20);
                                                if (nonZeroCount >= 4)
                                                {
                                                    homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, matches, mask, 2);
                                                }
                                            }
                                        }
                                sw.Stop();
                            }
            }
            else
#endif
            {
                using (UMat uModelImage = modelImage.ToUMat(AccessType.Read))
                    using (UMat uObservedImage = observedImage.ToUMat(AccessType.Read)) {
                        //创建surf算法器
                        SURF surfCPU = new SURF(hessianThresh);

                        //从源的图像提取描述符
                        UMat modelDescriptors = new UMat();
                        surfCPU.DetectAndCompute(uModelImage, null, modelKeyPoints, modelDescriptors, false);
                        sw = Stopwatch.StartNew();

                        //从观察图像中提取描述器
                        UMat observedDescriptors = new UMat();
                        surfCPU.DetectAndCompute(uObservedImage, null, observedKeyPoints, observedDescriptors, false);

                        //Brute Force匹配
                        BFMatcher matcher = new BFMatcher(DistanceType.L2);
                        matcher.Add(modelDescriptors);
                        //matches:VectorOfVectorOfDMatch
                        //observedDescriptors:VectorOfKeyPoint
                        matcher.KnnMatch(observedDescriptors, matches, k, null);

                        mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
                        mask.SetTo(new MCvScalar(255));
                        //过滤匹配特征,,如果匹配点是比较罕见,那么就剔除
                        Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);
                        //返回数组中的非零元素
                        int nonZeroCount = CvInvoke.CountNonZero(mask);
                        if (nonZeroCount >= 4)
                        {
                            //剔除那些旋转和缩放不与大多数匹配和旋转统一的特征点
                            nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, matches, mask, 1.5, 20);
                            if (nonZeroCount >= 4)
                            {
                                //使用RANDSAC算法获取单应性矩阵,如果矩阵不能恢复,返回null
                                homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, matches, mask, 2);
                            }
                        }
                        sw.Stop();
                    }
            }
            matchTime = sw.ElapsedMilliseconds;
        }
Пример #13
0
        static void Main(string[] args)
        {
            Console.Write("image1:");
            var fa = Console.ReadLine().Replace("\"", "");

            Console.Write("image2:");
            var fb = Console.ReadLine().Replace("\"", "");

            var a = (new Image <Bgr, byte>(fa).Resize(0.2, Inter.Area)).SubR(new Bgr(255, 255, 255));
            var b = new Image <Bgr, byte>(fb).Resize(0.2, Inter.Area);

            Mat homography        = null;
            Mat mask              = null;
            var modelKeyPoints    = new VectorOfKeyPoint();
            var observedKeyPoints = new VectorOfKeyPoint();
            var matches           = new VectorOfVectorOfDMatch();

            UMat a1 = a.Mat.ToUMat(AccessType.Read);
            UMat b1 = b.Mat.ToUMat(AccessType.Read);

            SURF surf                = new SURF(300);
            UMat modelDescriptors    = new UMat();
            UMat observedDescriptors = new UMat();

            surf.DetectAndCompute(a1, null, modelKeyPoints, modelDescriptors, false);       //进行检测和计算,把opencv中的两部分和到一起了,分开用也可以
            surf.DetectAndCompute(b1, null, observedKeyPoints, observedDescriptors, false);

            var matcher = new BFMatcher(DistanceType.L2);       //开始进行匹配

            matcher.Add(modelDescriptors);
            matcher.KnnMatch(observedDescriptors, matches, 2, null);
            mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
            mask.SetTo(new MCvScalar(255));
            Features2DToolbox.VoteForUniqueness(matches, 0.8, mask); //去除重复的匹配

            int Count = CvInvoke.CountNonZero(mask);                 //用于寻找模板在图中的位置

            if (Count >= 4)
            {
                Count = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, matches, mask, 1.5, 20);
                if (Count >= 4)
                {
                    homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, matches, mask, 2);
                }
            }

            //CvInvoke.Imshow("a", a);
            //CvInvoke.Imshow("b", b);

            Mat result = new Mat();

            //Features2DToolbox.DrawMatches(a.Convert<Gray, byte>().Mat, modelKeyPoints, b.Convert<Gray, byte>().Mat, observedKeyPoints, matches, result, new MCvScalar(255, 0, 255), new MCvScalar(0, 255, 255), mask);
            Features2DToolbox.DrawMatches(a, modelKeyPoints, b, observedKeyPoints, matches, result, new MCvScalar(0, 0, 255), new MCvScalar(0, 255, 255), mask);
            //绘制匹配的关系图
            //if (homography != null)     //如果在图中找到了模板,就把它画出来
            //{
            //    Rectangle rect = new Rectangle(Point.Empty, a.Size);
            //    PointF[] points = new PointF[]
            //    {
            //      new PointF(rect.Left, rect.Bottom),
            //      new PointF(rect.Right, rect.Bottom),
            //      new PointF(rect.Right, rect.Top),
            //      new PointF(rect.Left, rect.Top)
            //    };
            //    points = CvInvoke.PerspectiveTransform(points, homography);
            //    Point[] points2 = Array.ConvertAll<PointF, Point>(points, Point.Round);
            //    VectorOfPoint vp = new VectorOfPoint(points2);
            //    CvInvoke.Polylines(result, vp, true, new MCvScalar(255, 0, 0, 255), 15);
            //}

            CvInvoke.Imshow("result", result);
            CvInvoke.WaitKey();

            //Console.ReadLine();
        }
        /// <summary>
        /// Поиск знака. Метод Кенни (поиск по границам)
        /// </summary>
        /// <param name="img">Исходное изображение</param>
        /// <param name="brickSingList">Список знаков на изображении</param>
        /// <param name="boxList">Список областей со знаком</param>
        /// <param name="contours">Контур</param>
        /// <param name="hierachy"></param>
        /// <param name="idx"></param>
        private void FindBrickSing(Mat img, List <Mat> brickSingList, List <Rectangle> boxList, VectorOfVectorOfPoint contours, int[,] hierachy, int idx)
        {
            for (; idx >= 0; idx = hierachy[idx, 0])
            {
                using (VectorOfPoint c = contours[idx])
                    using (VectorOfPoint approx = new VectorOfPoint())
                    {
                        CvInvoke.ApproxPolyDP(c, approx, CvInvoke.ArcLength(c, true) * 0.02, true);
                        double area = CvInvoke.ContourArea(approx);

                        if (area > 200)
                        {
                            double ratio = CvInvoke.MatchShapes(_octagon, approx, ContoursMatchType.I3);

                            if (ratio > 0.1) //Подходящих совпадений не найдено
                            {
                                if (hierachy[idx, 2] >= 0)
                                {
                                    FindBrickSing(img, brickSingList, boxList, contours, hierachy, hierachy[idx, 2]);
                                }
                                continue;
                            }

                            Rectangle box = CvInvoke.BoundingRectangle(c);

                            Mat candidate = new Mat();

                            //Поиск кандидата на искомое вхождение
                            using (Mat tmp = new Mat(img, box))
                            {
                                CvInvoke.CvtColor(tmp, candidate, ColorConversion.Bgr2Gray);
                            }

                            //Устанавливаем значение пикселей вне контура равным нулю
                            using (Mat mask = new Mat(candidate.Size.Height, candidate.Width, DepthType.Cv8U, 1))
                            {
                                mask.SetTo(new MCvScalar(0));
                                CvInvoke.DrawContours(mask, contours, idx, new MCvScalar(255), -1, LineType.EightConnected, null, int.MaxValue, new Point(-box.X, -box.Y));
                                double mean = CvInvoke.Mean(candidate, mask).V0;

                                CvInvoke.Threshold(candidate, candidate, mean, 255, ThresholdType.Binary);
                                CvInvoke.BitwiseNot(candidate, candidate);
                                CvInvoke.BitwiseNot(mask, mask);

                                candidate.SetTo(new MCvScalar(0), mask);
                            }

                            int              minMatchCount        = 0;
                            double           uniquenessThreshold  = 0.0;
                            VectorOfKeyPoint observaredKeyPoint   = new VectorOfKeyPoint();
                            Mat              observeredDescriptor = new Mat();
                            _detector.DetectAndCompute(candidate, null, observaredKeyPoint, observeredDescriptor, false);

                            //Обозначаем искомое вхождение
                            if (observaredKeyPoint.Size >= minMatchCount)
                            {
                                int i = 2;
                                Mat mask;

                                using (VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch())
                                {
                                    _modelDescriptorMatcher.KnnMatch(observeredDescriptor, matches, i, null);
                                    mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
                                    Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);
                                }

                                int nonZeroCount = CvInvoke.CountNonZero(mask);

                                if (nonZeroCount >= minMatchCount)
                                {
                                    boxList.Add(box);
                                    brickSingList.Add(candidate);
                                }
                            }
                        }
                    }
            }
        }
Пример #15
0
        public void TestSURFDetector2()
        {
            //Trace.WriteLine("Size of MCvSURFParams: " + Marshal.SizeOf(typeof(MCvSURFParams)));
            Image <Gray, byte> box = EmguAssert.LoadImage <Gray, byte>("box.png");
            SURF detector          = new SURF(400);

            Stopwatch        watch = Stopwatch.StartNew();
            VectorOfKeyPoint vp1   = new VectorOfKeyPoint();
            Mat descriptors1       = new Mat();

            detector.DetectAndCompute(box, null, vp1, descriptors1, false);
            watch.Stop();
            EmguAssert.WriteLine(String.Format("Time used: {0} milliseconds.", watch.ElapsedMilliseconds));

            watch.Reset();
            watch.Start();
            MKeyPoint[] keypoints = detector.Detect(box, null);
            //ImageFeature<float>[] features2 = detector.Compute(box, keypoints);
            watch.Stop();
            EmguAssert.WriteLine(String.Format("Time used: {0} milliseconds.", watch.ElapsedMilliseconds));

            watch.Reset();
            watch.Start();
            //MCvSURFParams p = detector.SURFParams;

            //SURFFeature[] features3 = box.ExtractSURF(ref p);
            //watch.Stop();
            //EmguAssert.WriteLine(String.Format("Time used: {0} milliseconds.", watch.ElapsedMilliseconds));

            // EmguAssert.IsTrue(features1.Length == features2.Length);
            //EmguAssert.IsTrue(features2.Length == features3.Length);

            PointF[] pts =
#if NETFX_CORE
                Extensions.
#else
                Array.
#endif
                ConvertAll <MKeyPoint, PointF>(keypoints, delegate(MKeyPoint mkp)
            {
                return(mkp.Point);
            });
            //SURFFeature[] features = box.ExtractSURF(pts, null, ref detector);
            //int count = features.Length;

            /*
             * for (int i = 0; i < features1.Length; i++)
             * {
             * Assert.AreEqual(features1[i].KeyPoint.Point, features2[i].KeyPoint.Point);
             * float[] d1 = features1[i].Descriptor;
             * float[] d2 = features2[i].Descriptor;
             *
             * for (int j = 0; j < d1.Length; j++)
             *    Assert.AreEqual(d1[j], d2[j]);
             * }*/

            foreach (MKeyPoint kp in keypoints)
            {
                box.Draw(new CircleF(kp.Point, kp.Size), new Gray(255), 1);
            }
        }
Пример #16
0
        private void FindStopSign(Mat img, List <Mat> stopSignList, List <Rectangle> boxList, VectorOfVectorOfPoint contours, int[,] hierachy, int idx)
        {
            for (; idx >= 0; idx = hierachy[idx, 0])
            {
                using (VectorOfPoint c = contours[idx])
                    using (VectorOfPoint approx = new VectorOfPoint())
                    {
                        CvInvoke.ApproxPolyDP(c, approx, CvInvoke.ArcLength(c, true) * 0.02, true);
                        double area = CvInvoke.ContourArea(approx);
                        if (area > 200)
                        {
                            double ratio = CvInvoke.MatchShapes(_octagon, approx, Emgu.CV.CvEnum.ContoursMatchType.I3);

                            if (ratio > 0.1) //not a good match of contour shape
                            {
                                //check children
                                if (hierachy[idx, 2] >= 0)
                                {
                                    FindStopSign(img, stopSignList, boxList, contours, hierachy, hierachy[idx, 2]);
                                }
                                continue;
                            }

                            Rectangle box = CvInvoke.BoundingRectangle(c);

                            Mat candidate = new Mat();
                            using (Mat tmp = new Mat(img, box))
                                CvInvoke.CvtColor(tmp, candidate, ColorConversion.Bgr2Gray);

                            //set the value of pixels not in the contour region to zero
                            using (Mat mask = new Mat(candidate.Size.Height, candidate.Width, DepthType.Cv8U, 1))
                            {
                                mask.SetTo(new MCvScalar(0));
                                CvInvoke.DrawContours(mask, contours, idx, new MCvScalar(255), -1, LineType.EightConnected, null, int.MaxValue, new Point(-box.X, -box.Y));

                                double mean = CvInvoke.Mean(candidate, mask).V0;
                                CvInvoke.Threshold(candidate, candidate, mean, 255, ThresholdType.Binary);
                                CvInvoke.BitwiseNot(candidate, candidate);
                                CvInvoke.BitwiseNot(mask, mask);

                                candidate.SetTo(new MCvScalar(0), mask);
                            }

                            int              minMatchCount         = 8;
                            double           uniquenessThreshold   = 0.8;
                            VectorOfKeyPoint _observeredKeypoint   = new VectorOfKeyPoint();
                            Mat              _observeredDescriptor = new Mat();
                            _detector.DetectAndCompute(candidate, null, _observeredKeypoint, _observeredDescriptor, false);

                            if (_observeredKeypoint.Size >= minMatchCount)
                            {
                                int k = 2;

                                Mat mask;

                                using (VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch())
                                {
                                    _modelDescriptorMatcher.KnnMatch(_observeredDescriptor, matches, k, null);
                                    mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
                                    mask.SetTo(new MCvScalar(255));
                                    Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);
                                }

                                int nonZeroCount = CvInvoke.CountNonZero(mask);
                                if (nonZeroCount >= minMatchCount)
                                {
                                    boxList.Add(box);
                                    stopSignList.Add(candidate);
                                }
                            }
                        }
                    }
            }
        }
Пример #17
0
        /// <summary>
        /// Detect image using SURF
        /// </summary>
        /// <param name="modelImage"></param>
        /// <param name="observedImage"></param>
        /// <param name="modelKeyPoints"></param>
        /// <param name="observedKeyPoints"></param>
        /// <param name="matches"></param>
        /// <param name="mask"></param>
        /// <param name="homography"></param>
        public static void FindMatch(Mat modelImage, Mat observedImage, out VectorOfKeyPoint modelKeyPoints, out VectorOfKeyPoint observedKeyPoints, VectorOfVectorOfDMatch matches, out Mat mask, out Mat homography, out int score)
        {
            int    k = 2;
            double uniquenessThreshold = 0.8;
            double hessianThresh       = 300;

            homography = null;

            modelKeyPoints    = new VectorOfKeyPoint();
            observedKeyPoints = new VectorOfKeyPoint();

            if (CudaInvoke.HasCuda)
            {
                CudaSURF surfCuda = new CudaSURF((float)hessianThresh);
                using (GpuMat gpuModelImage = new GpuMat(modelImage))
                    //extract features from the object image
                    using (GpuMat gpuModelKeyPoints = surfCuda.DetectKeyPointsRaw(gpuModelImage, null))
                        using (GpuMat gpuModelDescriptors = surfCuda.ComputeDescriptorsRaw(gpuModelImage, null, gpuModelKeyPoints))
                            using (CudaBFMatcher matcher = new CudaBFMatcher(DistanceType.L2))
                            {
                                surfCuda.DownloadKeypoints(gpuModelKeyPoints, modelKeyPoints);

                                // extract features from the observed image
                                using (GpuMat gpuObservedImage = new GpuMat(observedImage))
                                    using (GpuMat gpuObservedKeyPoints = surfCuda.DetectKeyPointsRaw(gpuObservedImage, null))
                                        using (GpuMat gpuObservedDescriptors = surfCuda.ComputeDescriptorsRaw(gpuObservedImage, null, gpuObservedKeyPoints))
                                        //using (GpuMat tmp = new GpuMat())
                                        //using (Stream stream = new Stream())
                                        {
                                            matcher.KnnMatch(gpuObservedDescriptors, gpuModelDescriptors, matches, k);

                                            surfCuda.DownloadKeypoints(gpuObservedKeyPoints, observedKeyPoints);

                                            mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
                                            mask.SetTo(new MCvScalar(255));
                                            Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);

                                            score = 0;
                                            for (int i = 0; i < matches.Size; i++)
                                            {
                                                if ((byte)mask.GetData().GetValue(i, 0) == 0)
                                                {
                                                    continue;
                                                }
                                                foreach (var e in matches[i].ToArray())
                                                {
                                                    ++score;
                                                }
                                            }

                                            int nonZeroCount = CvInvoke.CountNonZero(mask);
                                            if (nonZeroCount >= 4)
                                            {
                                                nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints,
                                                                                                           matches, mask, 1.5, 20);
                                                if (nonZeroCount >= 4)
                                                {
                                                    homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints,
                                                                                                                          observedKeyPoints, matches, mask, 2);
                                                }
                                            }
                                        }
                            }
            }
            else
            {
                using (UMat uModelImage = modelImage.GetUMat(AccessType.Read))
                    using (UMat uObservedImage = observedImage.GetUMat(AccessType.Read))
                    {
                        SURF surfCPU = new SURF(hessianThresh);
                        //extract features from the object image
                        UMat modelDescriptors = new UMat();
                        surfCPU.DetectAndCompute(uModelImage, null, modelKeyPoints, modelDescriptors, false);

                        // extract features from the observed image
                        UMat observedDescriptors = new UMat();
                        surfCPU.DetectAndCompute(uObservedImage, null, observedKeyPoints, observedDescriptors, false);
                        BFMatcher matcher = new BFMatcher(DistanceType.L2);
                        matcher.Add(modelDescriptors);

                        matcher.KnnMatch(observedDescriptors, matches, k, null);
                        mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
                        mask.SetTo(new MCvScalar(255));
                        Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);

                        score = 0;
                        for (int i = 0; i < matches.Size; i++)
                        {
                            //if (mask.GetData(true)[0] == 0) continue;
                            foreach (var e in matches[i].ToArray())
                            {
                                ++score;
                            }
                        }

                        int nonZeroCount = CvInvoke.CountNonZero(mask);
                        if (nonZeroCount >= 4)
                        {
                            nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints,
                                                                                       matches, mask, 1.5, 20);
                            if (nonZeroCount >= 4)
                            {
                                homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints,
                                                                                                      observedKeyPoints, matches, mask, 2);
                            }
                        }
                    }
            }
        }
Пример #18
0
        public static void FindMatch(Mat modelImage, Mat observedImage, out long matchTime, out VectorOfKeyPoint modelKeyPoints, out VectorOfKeyPoint observedKeyPoints, VectorOfVectorOfDMatch matches, out Mat mask, out Mat homography)
        {
            int    k = 2;
            double uniquenessThreshold = 0.8;
            double hessianThresh       = 300;

            Stopwatch watch;

            homography = null;

            modelKeyPoints    = new VectorOfKeyPoint();
            observedKeyPoints = new VectorOfKeyPoint();

#if !__IOS__
            if (CudaInvoke.HasCuda)
            {
                CudaSURF surfCuda = new CudaSURF((float)hessianThresh);
                using (GpuMat gpuModelImage = new GpuMat(modelImage))
                    //extract features from the object image
                    using (GpuMat gpuModelKeyPoints = surfCuda.DetectKeyPointsRaw(gpuModelImage, null))
                        using (GpuMat gpuModelDescriptors = surfCuda.ComputeDescriptorsRaw(gpuModelImage, null, gpuModelKeyPoints))
                            using (CudaBFMatcher matcher = new CudaBFMatcher(DistanceType.L2))
                            {
                                surfCuda.DownloadKeypoints(gpuModelKeyPoints, modelKeyPoints);
                                watch = Stopwatch.StartNew();

                                // extract features from the observed image
                                using (GpuMat gpuObservedImage = new GpuMat(observedImage))
                                    using (GpuMat gpuObservedKeyPoints = surfCuda.DetectKeyPointsRaw(gpuObservedImage, null))
                                        using (GpuMat gpuObservedDescriptors = surfCuda.ComputeDescriptorsRaw(gpuObservedImage, null, gpuObservedKeyPoints))
                                        //using (GpuMat tmp = new GpuMat())
                                        //using (Stream stream = new Stream())
                                        {
                                            matcher.KnnMatch(gpuObservedDescriptors, gpuModelDescriptors, matches, k);

                                            surfCuda.DownloadKeypoints(gpuObservedKeyPoints, observedKeyPoints);

                                            mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
                                            mask.SetTo(new MCvScalar(255));
                                            Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);

                                            int nonZeroCount = CvInvoke.CountNonZero(mask);
                                            if (nonZeroCount >= 4)
                                            {
                                                nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints,
                                                                                                           matches, mask, 1.5, 20);
                                                if (nonZeroCount >= 4)
                                                {
                                                    homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints,
                                                                                                                          observedKeyPoints, matches, mask, 2);
                                                }
                                            }
                                        }
                                watch.Stop();
                            }
            }
            else
#endif
            {
                //using (UMat uModelImage = modelImage.ToUMat(AccessType.Read))
                //using (UMat uObservedImage = observedImage.ToUMat(AccessType.Read))
                {
                    SURF surfCPU = new SURF(hessianThresh);
                    //extract features from the object image
                    UMat modelDescriptors = new UMat();
                    surfCPU.DetectAndCompute(modelImage, null, modelKeyPoints, modelDescriptors, false);

                    watch = Stopwatch.StartNew();

                    // extract features from the observed image
                    UMat observedDescriptors = new UMat();
                    surfCPU.DetectAndCompute(observedImage, null, observedKeyPoints, observedDescriptors, false);
                    BFMatcher matcher = new BFMatcher(DistanceType.L2);
                    matcher.Add(modelDescriptors);

                    matcher.KnnMatch(observedDescriptors, matches, k, null);
                    mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
                    mask.SetTo(new MCvScalar(255));
                    Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);

                    int nonZeroCount = CvInvoke.CountNonZero(mask);
                    if (nonZeroCount >= 4)
                    {
                        nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints,
                                                                                   matches, mask, 1.5, 20);
                        if (nonZeroCount >= 4)
                        {
                            homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints,
                                                                                                  observedKeyPoints, matches, mask, 2);
                        }
                    }

                    watch.Stop();
                }
            }
            matchTime = watch.ElapsedMilliseconds;
        }
Пример #19
0
        public override void ActionExcute()
        {
            Stopwatch sw = new Stopwatch();

            sw.Start();
            Mat homography = null;
            Mat mask       = null;

            VectorOfKeyPoint       observedKeyPoints = new VectorOfKeyPoint();
            VectorOfVectorOfDMatch matches           = new VectorOfVectorOfDMatch();

            Image <Gray, byte> image = imageInput.Clone();

            for (int i = 0; i < actionMatchData.time; i++)
            {
                image = image.PyrDown();
            }
            if (0 != actionMatchData.InputAOIWidth && 0 != actionMatchData.InputAOIHeight)
            {
                image.ROI = new Rectangle(actionMatchData.InputAOIX, actionMatchData.InputAOIY, actionMatchData.InputAOIWidth, actionMatchData.InputAOIHeight);
            }
            PointF center;

            if (null != modelDescriptors)
            {
                UMat b1 = image.ToUMat();


                UMat observedDescriptors = new UMat();

                //进行检测和计算,把opencv中的两部分和到一起了,分开用也可以
                surf.DetectAndCompute(b1, null, observedKeyPoints, observedDescriptors, false);


                BFMatcher matcher = new BFMatcher(DistanceType.L2Sqr);       //开始进行匹配
                matcher.Add(modelDescriptors);
                matcher.KnnMatch(observedDescriptors, matches, 2, null);
                mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
                mask.SetTo(new MCvScalar(255));
                Features2DToolbox.VoteForUniqueness(matches, 0.8, mask); //去除重复的匹配

                int Count = CvInvoke.CountNonZero(mask);                 //用于寻找模板在图中的位置
                if (Count >= 4)
                {
                    Count = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, matches, mask, 1.5, 20);
                    if (Count >= 4)
                    {
                        homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, matches, mask, 2);
                    }
                }

                Mat result1 = new Mat();
                Features2DToolbox.DrawMatches(_imageTempleAOI.Convert <Gray, byte>().Mat, modelKeyPoints, image.Convert <Gray, byte>().Mat, observedKeyPoints, matches, result1, new MCvScalar(255, 0, 255), new MCvScalar(0, 255, 255), mask);
                //绘制匹配的关系图
                if (homography != null)     //如果在图中找到了模板,就把它画出来
                {
                    Rectangle rect   = new Rectangle(Point.Empty, _imageTempleAOI.Size);
                    PointF[]  points = new PointF[]
                    {
                        new PointF(rect.Left, rect.Bottom),
                        new PointF(rect.Right, rect.Bottom),
                        new PointF(rect.Right, rect.Top),
                        new PointF(rect.Left, rect.Top)
                    };
                    points = CvInvoke.PerspectiveTransform(points, homography);
                    Point[]       points2 = Array.ConvertAll <PointF, Point>(points, Point.Round);
                    VectorOfPoint vp      = new VectorOfPoint(points2);
                    CvInvoke.Polylines(result1, vp, true, new MCvScalar(255, 0, 0, 255), 15);
                    dResultX = (points[0].X + points[1].X + points[2].X + points[3].X) / 4 * ((float)Math.Pow(2, actionMatchData.time));
                    dResultY = (points[0].Y + points[1].Y + points[2].Y + points[3].Y) / 4 * ((float)Math.Pow(2, actionMatchData.time));

                    Point point1 = new Point(Convert.ToInt32((points[0].X + points[3].X) / 2), Convert.ToInt32((points[0].Y + points[3].Y) / 2));
                    Point point2 = new Point(Convert.ToInt32((points[1].X + points[2].X) / 2), Convert.ToInt32((points[1].Y + points[2].Y) / 2));
                    CvInvoke.Line(result1, point1, point2, new MCvScalar(255, 0, 0), 1, Emgu.CV.CvEnum.LineType.EightConnected, 0);

                    dResultAngle  = Math.Atan2((point2.Y - point1.Y), (point2.X - point1.X)) * 180 / Math.PI;
                    imageDescript = result1;
                }
                else
                {
                    actionRes = ActionResponse.NG;
                    return;
                }
                center = new PointF((float)dResultX, (float)dResultY);
            }

            else
            {
                center = new PointF(imageInput.Width / 2, imageInput.Height / 2);
            }


            Mat rotation = new Mat();

            CvInvoke.GetRotationMatrix2D(center, dResultAngle + actionMatchData.fOffsetAngle, 1, rotation);

            Image <Gray, float> mat = new Image <Gray, float>(new Size(3, 2));

            CvInvoke.cvSet2D(mat, 0, 2, new MCvScalar(actionMatchData.fOffsetX - dResultX));
            CvInvoke.cvSet2D(mat, 1, 2, new MCvScalar(actionMatchData.fOffsetY - dResultY));
            CvInvoke.cvSet2D(mat, 0, 0, new MCvScalar(1));
            CvInvoke.cvSet2D(mat, 1, 1, new MCvScalar(1));
            System.Drawing.Size roisize = new Size(imageInput.Bitmap.Width, imageInput.Bitmap.Height);
            try
            {
                if (null == imageResult)
                {
                    imageResult = new Image <Gray, byte>(imageInput.Size);
                }
                imageInput.Draw(new CircleF(center, 3), new Gray(255), 3);
                CvInvoke.WarpAffine(imageInput, imageResult, rotation, roisize);
                CvInvoke.WarpAffine(imageResult, imageResult, mat, roisize);
            }
            catch (Exception ex)
            {
                MessageBox.Show(ex.Message);
            }


            actionRes = ActionResponse.OK;
            sw.Stop();
        }
Пример #20
0
        public static void FindMatch(Mat modelImage, Mat observedImage, out long matchTime, out VectorOfKeyPoint modelKeyPoints, out VectorOfKeyPoint observedKeyPoints, VectorOfVectorOfDMatch matches, out Mat mask, out Mat homography, out long score)
        {
            int       k = 2;
            double    uniquenessThreshold = 0.80;
            Stopwatch watch;

            homography        = null;
            modelKeyPoints    = new VectorOfKeyPoint();
            observedKeyPoints = new VectorOfKeyPoint();

            using (UMat uModelImage = modelImage.GetUMat(AccessType.Read))
                using (UMat uObservedImage = observedImage.GetUMat(AccessType.Read))
                {
                    //   KAZE featureDetector = new KAZE();
                    SURF featureDetector = new SURF(100);
                    //    SIFT featureDetector = new SIFT();
                    Mat modelDescriptors = new Mat();
                    //进行检测和计算,把opencv中的两部分和到一起了,分开用也可以
                    featureDetector.DetectAndCompute(uModelImage, null, modelKeyPoints, modelDescriptors, false);
                    watch = Stopwatch.StartNew();

                    Mat observedDescriptors = new Mat();
                    featureDetector.DetectAndCompute(uObservedImage, null, observedKeyPoints, observedDescriptors, false);

                    // KdTree for faster results / less accuracy
                    using (var ip = new Emgu.CV.Flann.KdTreeIndexParams())
                        using (var sp = new SearchParams())
                            //  using (DescriptorMatcher matcher = new FlannBasedMatcher(ip, sp))//开始进行匹配
                            using (BFMatcher matcher = new BFMatcher(DistanceType.L2))
                            {
                                matcher.Add(modelDescriptors);
                                matcher.KnnMatch(observedDescriptors, matches, k, null);
                                mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
                                mask.SetTo(new MCvScalar(255));
                                Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);//去除重复的匹配

                                // Calculate score based on matches size
                                // ---------------------------------------------->
                                score = 0;
                                for (int i = 0; i < matches.Size; i++)
                                {
                                    if (mask.GetData(i)[0] == 0)
                                    {
                                        continue;
                                    }
                                    foreach (var e in matches[i].ToArray())
                                    {
                                        ++score;
                                    }
                                }
                                // <----------------------------------------------

                                int nonZeroCount = CvInvoke.CountNonZero(mask);//用于寻找模板在图中的位置
                                if (nonZeroCount >= 4)
                                {
                                    nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, matches, mask, 1.5, 20);
                                    if (nonZeroCount >= 4)
                                    {
                                        homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, matches, mask, 2);
                                    }
                                }
                            }
                    watch.Stop();
                }
            matchTime = watch.ElapsedMilliseconds;
        }
        private void Worker_DoWork(object sender, DoWorkEventArgs e)
        {
            //Invoke(new Action(delegate { Progress(int, string); }));

            #region Indentyfikacja puzzli

            Invoke(new Action(delegate { Progress(0, "Wczytywanie obrazka."); }));
            var q1 = new Image <Bgr, byte>(ExtensionMethods.ImagePath);

            Invoke(new Action(delegate { Progress(33, "Wstępna obróbka obrazka."); }));
            var w3 = ExtensionMethods.FindContours
                         (q1.Copy().Convert <Gray, byte>().GaussBlur().AdaptiveThreshold().Dilate(8).Erode());

            var avg = ExtensionMethods.CalculateAvreage(w3.Item1, Prog);
            var e4  = new VectorOfVectorOfPoint();
            for (var i = 0; i < w3.Item1.Size; i++)
            {
                if (CvInvoke.ContourArea(w3.Item1[i]) > avg)
                {
                    e4.Push(w3.Item1[i]);
                }
            }
            var boundRect = new List <Rectangle>();
            for (var i = 0; i < e4.Size; i++)
            {
                boundRect.Add(CvInvoke.BoundingRectangle(e4[i]));
            }

            var puzzels = new List <Image <Bgr, byte> >();

            Invoke(new Action(delegate { Progress(50, "Znajdowanie puzzli"); }));

            var puzzleCount = 0;
            foreach (var r in boundRect)
            {
                puzzleCount++;
                var img = q1.Copy();

                img.ROI = r;
                puzzels.Add(img.Copy());

                q1 = q1.Rectangle(r, new MCvScalar(255, 0, 255));
                q1 = q1.PutText
                     (
                    puzzleCount.ToString()
                    , new Point(r.X + r.Width / 2, r.Y + r.Height / 2)
                    , new MCvScalar(255, 0, 255)
                    , FontFace.HersheySimplex
                    , 10
                    , 20);
            }

            #endregion Indentyfikacja puzzli

            #region Cechy wspólne

            Invoke(new Action(delegate { Progress(66, "Detekcja cech wspólnych."); }));
            var surf             = new SURF(920);
            var puzzelCounter    = puzzels.Count;
            var avgPuzellXPoints = new double[puzzelCounter];
            var avgPuzellYPoints = new double[puzzelCounter];
            puzzelCounter = 0;

            Invoke(new Action(delegate { Progress(70, "Znajdowanie puktów charakterystycznych dla orginalnego obrazu."); }));
            var orginal          = new Image <Bgr, byte>(ExtensionMethods.OrginalImagePath);
            var copyOrginal      = orginal.Copy();
            var orginalFeatures  = ExtensionMethods.DetectAndCompute(surf, copyOrginal, false);
            var odesc            = orginalFeatures.Item1;
            var orginalKeypoints = orginalFeatures.Item2;

            #endregion Cechy wspólne

            #region Dopasowanie

            Invoke(new Action(delegate { Progress(80, "Próba dopasowania puzzli."); }));

            var matcher = new BFMatcher(DistanceType.L2);
            matcher.Add(odesc);

            foreach (var puzzel in puzzels)
            {
                var pdesc         = surf.DetectAndCompute(puzzel);
                var puzzelmatches = matcher.KnnMatch(pdesc, 3);

                double x     = 0;
                double y     = 0;
                var    count = 0;

                for (var i = 0; i < puzzelmatches.Size; i++)
                {
                    var arrayOfMatches = puzzelmatches[i].ToArray();

                    foreach (var match in arrayOfMatches)
                    {
                        if (!(match.Distance > MatchDistance))
                        {
                            continue;
                        }
                        x += orginalKeypoints[match.TrainIdx].Point.X;
                        y += orginalKeypoints[match.TrainIdx].Point.Y;
                        count++;
                    }
                }

                x = x / count;
                y = y / count;

                avgPuzellXPoints[puzzelCounter] = x;
                avgPuzellYPoints[puzzelCounter] = y;
                puzzelCounter++;
            }

            #endregion Dopasowanie

            #region Układanie puzzli

            Invoke(new Action(delegate { Progress(90, "Układanie puzzli w właściwej kolejnoścu."); }));

            var resultTab = ExtensionMethods.PlacePuzzels(XAx, YAx, avgPuzellXPoints, avgPuzellYPoints);

            Invoke(new Action(delegate { Progress(90, "Tworzenie obrazka końcowego."); }));
            var fp = ExtensionMethods.GenerateFinalpicture(XAx, YAx, resultTab, puzzels);

            var finalword = "Puzzle należy ułożyć w kolejności:" + Environment.NewLine;
            for (var i = 0; i < puzzelCounter; i++)
            {
                resultTab[i]++;                 //przetwarzając przetwarzałem od zera a puzzle są od 1 ..więc
                finalword += resultTab[i];
                finalword += " ";
                if (i != 0 && (i + 1) % XAx == 0)
                {
                    finalword += Environment.NewLine;
                }
            }

            var solution = new Bitmap(q1.Width / 2, q1.Height / 2);
            solution.DrawSymbol
                (finalword, new SolidBrush(Color.Gray), new Font(FontFamily.GenericSerif, 40), new SolidBrush(Color.Black));

            #endregion Układanie puzzli

            #region Zapisywanie

            Invoke(new Action(delegate { Progress(99, "Zapisywanie postępów."); }));

            ExtensionMethods.ImageOut.Add(q1.ToBitmap());
            ExtensionMethods.ImageOut.Add(solution);
            ExtensionMethods.ImageOut.Add(fp.ToBitmap());

            Invoke(new Action(delegate { Progress(100, "DONE"); }));

            Thread.Sleep(100);

            #endregion Zapisywanie
        }