static void Main(string[] args)
        {
            var img1 = new Mat(@"..\..\Images\left.png", LoadMode.GrayScale);
            Cv2.ImShow("Left", img1);
            Cv2.WaitKey(1); // do events

            var img2 = new Mat(@"..\..\Images\right.png", LoadMode.GrayScale);
            Cv2.ImShow("Right", img2);
            Cv2.WaitKey(1); // do events

            // detecting keypoints
            // FastFeatureDetector, StarDetector, SIFT, SURF, ORB, BRISK, MSER, GFTTDetector, DenseFeatureDetector, SimpleBlobDetector
            // SURF = Speeded Up Robust Features
            var detector = new SURF(hessianThreshold: 400); //A good default value could be from 300 to 500, depending from the image contrast.
            var keypoints1 = detector.Detect(img1);
            var keypoints2 = detector.Detect(img2);

            // computing descriptors, BRIEF, FREAK
            // BRIEF = Binary Robust Independent Elementary Features
            var extractor = new BriefDescriptorExtractor();
            var descriptors1 = new Mat();
            var descriptors2 = new Mat();
            extractor.Compute(img1, ref keypoints1, descriptors1);
            extractor.Compute(img2, ref keypoints2, descriptors2);

            // matching descriptors
            var matcher = new BFMatcher();
            var matches = matcher.Match(descriptors1, descriptors2);

            // drawing the results
            var imgMatches = new Mat();
            Cv2.DrawMatches(img1, keypoints1, img2, keypoints2, matches, imgMatches);
            Cv2.ImShow("Matches", imgMatches);
            Cv2.WaitKey(1); // do events

            Cv2.WaitKey(0);

            Cv2.DestroyAllWindows();
            img1.Dispose();
            img2.Dispose();
        }
Exemplo n.º 2
0
        static void test_svm()
        {
            FileStorage fs   = new FileStorage("test.yaml", FileStorage.Mode.Read);
            FileNode    n    = fs["voca"];
            Mat         voca = new Mat();

            n.ReadMat(voca);

            SURF      surf    = new SURF(400);
            BFMatcher matcher = new BFMatcher(DistanceType.L2);
            BOWImgDescriptorExtractor bowDex = new BOWImgDescriptorExtractor(surf, matcher);

            bowDex.SetVocabulary(voca);

            SVM svm = new SVM();

            //FileStorage fs1 = new FileStorage("svm.yaml", FileStorage.Mode.Read);
            svm.Read(fs.GetRoot());

            foreach (string s in System.IO.Directory.GetFiles(@"C:\projects\local\testMQ\testMQ\bin\Debug\icons"))
            {
                Image <Bgr, Byte> test_img = new Image <Bgr, byte>(s);
                //Image<Bgr, Byte> test_img = new Image<Bgr, byte>(@"C:\projects\local\testMQ\testMQ\bin\Debug\mail_samples\email_icon_t.jpg");
                //Image<Bgr, Byte> test_img = new Image<Bgr, byte>(@"C:\projects\local\testMQ\testMQ\bin\Debug\phone_icons\icon_2.jpg");
                //Image<Bgr, Byte> test_img = new Image<Bgr, byte>(@"C:\test\35928233-email-icon-on-blue-background-clean-vector.jpg");
                Mat ii = new Mat();
                CvInvoke.CvtColor(test_img, ii, ColorConversion.Bgr2Gray);
                MKeyPoint[] kp   = surf.Detect(ii);
                Mat         desc = new Mat();
                bowDex.Compute(ii, new VectorOfKeyPoint(kp), desc);
                float r = svm.Predict(desc);
                Program.logIt(string.Format("{0}={1}", s, r));
            }
        }
Exemplo n.º 3
0
        private void FrmMain_Shown(object sender, EventArgs e)
        {
            this.list = Ptma.LoadFromPath("config.xml");
            for (int i = 0; i < list.Count; i++)
            {
                TrainedTemplate trainedTemplate = new TrainedTemplate();
                trainedTemplate.templateImage = Cv2.ImRead(list[i].imageSrc, OpenCvSharp.LoadMode.Color);

                SURF featureDetector = new SURF();
                //获取模板图的特征点
                KeyPoint[] templateKeyPoints = featureDetector.Detect(trainedTemplate.templateImage);
                //提取模板图的特征点
                Mat  templateDescriptors = new Mat(trainedTemplate.templateImage.Rows, trainedTemplate.templateImage.Cols, trainedTemplate.templateImage.Type());
                SURF descriptorExtractor = new SURF();
                descriptorExtractor.Compute(trainedTemplate.templateImage, ref templateKeyPoints, templateDescriptors);
                trainedTemplate.templateDescriptors = templateDescriptors;
                trainedTemplate.templateKeyPoints   = templateKeyPoints;
                this.toolList.Add(trainedTemplate);
            }
            this.dgvMain.DataSource = this.list;

            Thread bgThread = new Thread(CaptureAndAnalyse);

            bgThread.IsBackground = true;
            bgThread.Start();
        }
Exemplo n.º 4
0
        static void Main(string[] args)
        {
            var img1 = new Mat(@"..\..\Images\left.png", LoadMode.GrayScale);

            Cv2.ImShow("Left", img1);
            Cv2.WaitKey(1); // do events

            var img2 = new Mat(@"..\..\Images\right.png", LoadMode.GrayScale);

            Cv2.ImShow("Right", img2);
            Cv2.WaitKey(1); // do events


            // detecting keypoints
            // FastFeatureDetector, StarDetector, SIFT, SURF, ORB, BRISK, MSER, GFTTDetector, DenseFeatureDetector, SimpleBlobDetector
            // SURF = Speeded Up Robust Features
            var detector   = new SURF(hessianThreshold: 400); //A good default value could be from 300 to 500, depending from the image contrast.
            var keypoints1 = detector.Detect(img1);
            var keypoints2 = detector.Detect(img2);

            // computing descriptors, BRIEF, FREAK
            // BRIEF = Binary Robust Independent Elementary Features
            var extractor    = new BriefDescriptorExtractor();
            var descriptors1 = new Mat();
            var descriptors2 = new Mat();

            extractor.Compute(img1, ref keypoints1, descriptors1);
            extractor.Compute(img2, ref keypoints2, descriptors2);

            // matching descriptors
            var matcher = new BFMatcher();
            var matches = matcher.Match(descriptors1, descriptors2);

            // drawing the results
            var imgMatches = new Mat();

            Cv2.DrawMatches(img1, keypoints1, img2, keypoints2, matches, imgMatches);
            Cv2.ImShow("Matches", imgMatches);
            Cv2.WaitKey(1); // do events


            Cv2.WaitKey(0);

            Cv2.DestroyAllWindows();
            img1.Dispose();
            img2.Dispose();
        }
Exemplo n.º 5
0
        private void MatchBySurf(Mat src1, Mat src2)
        {
            var gray1 = new Mat();
            var gray2 = new Mat();

            Cv2.CvtColor(src1, gray1, ColorConversion.BgrToGray);
            Cv2.CvtColor(src2, gray2, ColorConversion.BgrToGray);

            var surf = new SURF(500, 4, 2, true);

            KeyPoint[] kpt1 = surf.Detect(gray1, null);
            KeyPoint[] kpt2 = surf.Detect(gray2, null);

            // Detect the keypoints and generate their descriptors using SURF
            KeyPoint[] keypoints1, keypoints2;
            var descriptors1 = new MatOfFloat();
            var descriptors2 = new MatOfFloat();
            surf.Run(gray1, null, out keypoints1, descriptors1);
            surf.Run(gray2, null, out keypoints2, descriptors2);

            // Match descriptor vectors 
            var bfMatcher = new BFMatcher(NormType.L2, false);
            var flannMatcher = new FlannBasedMatcher();
            DMatch[] bfMatches = bfMatcher.Match(descriptors1, descriptors2);
            DMatch[] flannMatches = flannMatcher.Match(descriptors1, descriptors2);

            // Draw matches
            var bfView = new Mat();
            Cv2.DrawMatches(gray1, keypoints1, gray2, keypoints2, bfMatches, bfView);
            var flannView = new Mat();
            Cv2.DrawMatches(gray1, keypoints1, gray2, keypoints2, flannMatches, flannView);

            using (new Window("SURF matching (by BFMather)", WindowMode.AutoSize, bfView))
            using (new Window("SURF matching (by FlannBasedMatcher)", WindowMode.AutoSize, flannView))
            {
                Cv2.WaitKey();
            }
        }
Exemplo n.º 6
0
        static void Main(string[] args)
        {
            var image        = new Image <Bgr, byte>("RGB.jpg").Resize(0.4, Inter.Area);
            var image_gray   = image.Convert <Gray, byte>();
            var surfDetector = new SURF(1000);
            var keyPoints    = surfDetector.Detect(image_gray);

            foreach (var point in keyPoints)
            {
                CvInvoke.Circle(image, new Point((int)point.Point.X, (int)point.Point.Y), 1, new MCvScalar(0, 0, 255, 255), 2);
            }
            CvInvoke.Imshow("result", image);
            CvInvoke.WaitKey();
        }
Exemplo n.º 7
0
        //Set target image to retrieve keypoints and descriptor(SURF)
        //Target 등록하여 디스크립터 추출
        public void setTarget(string filename)
        {
            target = Cv2.ImRead(filename, LoadMode.GrayScale);

            t_keypoints = surfobj.Detect(target);                   //SURF keypoint
            surfobj.Compute(target, ref t_keypoints, t_descriptor); //SURF descriptor
            obj_corners = new Point2d[4];

            //target의 크기에 맞는 사각형 (For RANSAC)
            obj_corners[0] = new Point2d(0, 0);
            obj_corners[1] = new Point2d(target.Cols, 0);
            obj_corners[2] = new Point2d(target.Cols, target.Rows);
            obj_corners[3] = new Point2d(0, target.Rows);
        }
        public UMat SURFDescriptor()
        {
            double hessianThresh = 800;
            // public SURF(double hessianThresh, int nOctaves = 4, int nOctaveLayers = 2, bool extended = true, bool upright = false)
            SURF             surfAlgo       = new SURF(hessianThresh, 4, 2, true, false);
            VectorOfKeyPoint modelKeyPoints = new VectorOfKeyPoint();

            MKeyPoint[] mKeyPoints = surfAlgo.Detect(preProcessedImageInGrayScale);
            modelKeyPoints.Push(mKeyPoints);
            VectorOfKeyPoint observedKeyPoints = new VectorOfKeyPoint();
            UMat             SurfDescriptors   = new UMat();

            surfAlgo.DetectAndCompute(preProcessedImageInGrayScale, null, modelKeyPoints, SurfDescriptors, true);
            //image2.Source = BitmapSourceConvert.ToBitmapSource(modelDescriptors);
            SurfDescriptors.Save("SURFDetection.jpg");
            return(SurfDescriptors);
        }
Exemplo n.º 9
0
        public ImageModel ConvertImagePathToImageModel(string imagePath)
        {
            Mat imgObject = Cv2.ImRead(imagePath, ImreadModes.Grayscale);

            Mat    descriptorsObject = new Mat();
            double minHessian        = 400;

            SURF detector = SURF.Create(minHessian, extended: false);

            var keypointsObject = detector.Detect(imgObject).Take(1000).ToArray();

            detector.Compute(imgObject, ref keypointsObject, descriptorsObject);

            var imageModel = new ImageModel()
            {
                ImagePath  = imagePath,
                ImageHash  = ImageHelper.GetImageHash(imagePath),
                Descriptor = ConvertOpenCVTypes.ConvertMatToFloatArray(descriptorsObject)
            };

            return(imageModel);
        }
        public AlgorithmResult DetectSurf(
            string filename,
            KeypointType kpsType,
            double hessianThresh,
            int octaves,
            int octaveLayers)
        {
            AlgorithmResult   result      = new AlgorithmResult();
            Image <Bgr, byte> image       = ImageHelper.GetImage(filename);
            Image <Bgr, byte> resultImage = new Image <Bgr, byte>(filename);

            // Get features from image
            var surf      = new SURF(hessianThresh, octaves, octaveLayers);
            var keyPoints = surf.Detect(image);

            DrawKeypoints(
                image,
                new VectorOfKeyPoint(keyPoints),
                resultImage,
                new Bgr(Color.FromArgb(255, 77, 77)),
                GetKeypointDraw(kpsType));

            result.ImageArray = ImageHelper.SetImage(resultImage);
            result.KeyDatas   = new List <KeyPointModel>();
            result.KeyDatas.AddRange(keyPoints.Select(k => new KeyPointModel()
            {
                X        = k.Point.X,
                Y        = k.Point.Y,
                Size     = k.Size,
                Angle    = k.Angle,
                Response = k.Response,
                Octave   = k.Octave,
                ClassId  = k.ClassId
            }));
            return(result);
        }
Exemplo n.º 11
0
        static void train_svm()
        {
            int           n_samples          = 0;
            SURF          surf               = new SURF(400);
            List <Bitmap> samples            = new List <Bitmap>();
            List <Tuple <Bitmap, int> > data = new List <Tuple <Bitmap, int> >();

            /*
             * foreach (string s in System.IO.Directory.GetFiles("mail_samples"))
             * {
             *  Bitmap f1 = new Bitmap(s);//ImageDecoder.DecodeFromFile(s);
             *  data.Add(new Tuple<Bitmap, int>(f1, +1));
             * }
             * foreach (string s in System.IO.Directory.GetFiles("phone_icons"))
             * {
             *  Bitmap f1 = new Bitmap(s); //ImageDecoder.DecodeFromFile(s);
             *  data.Add(new Tuple<Bitmap, int>(f1, -1));
             * }
             */
            foreach (string s in System.IO.Directory.GetFiles(@"C:\test\iphone_icon"))
            {
                Bitmap f1 = new Bitmap(s);//ImageDecoder.DecodeFromFile(s);
                if (string.Compare(System.IO.Path.GetFileNameWithoutExtension(s), "temp_1") == 0 ||
                    string.Compare(System.IO.Path.GetFileNameWithoutExtension(s), "scoll_selected_icon") == 0
                    )
                {
                    data.Add(new Tuple <Bitmap, int>(f1, +1));
                }
                else
                {
                    data.Add(new Tuple <Bitmap, int>(f1, 0));
                }
            }

            n_samples = data.Count;

            // computr bow
            Mat m = new Mat();

            foreach (Tuple <Bitmap, int> v in data)
            {
                Image <Bgr, Byte> i = new Image <Bgr, byte>(v.Item1);
                Mat ii = new Mat();
                CvInvoke.CvtColor(i, ii, ColorConversion.Bgr2Gray);
                MKeyPoint[] kp   = surf.Detect(ii);
                Mat         desc = new Mat();
                surf.Compute(ii, new VectorOfKeyPoint(kp), desc);
                m.PushBack(desc);
            }
            // Create the vocabulary with KMeans.
            MCvTermCriteria  tc         = new MCvTermCriteria(100, 0.00001);
            BOWKMeansTrainer bowTrainer = new BOWKMeansTrainer(16, tc, 3, KMeansInitType.PPCenters);

            bowTrainer.Add(m);
            Mat voca = new Mat();

            bowTrainer.Cluster(voca);
            //
            BFMatcher matcher = new BFMatcher(DistanceType.L2);
            BOWImgDescriptorExtractor bowDex = new BOWImgDescriptorExtractor(surf, matcher);

            bowDex.SetVocabulary(voca);

            //
            Mat tDesc = new Mat();
            //Matrix<int> tLabel = new Matrix<int>(1, n_samples);
            Matrix <int> tLabel = new Matrix <int>(n_samples, 1);

            //foreach (Tuple<Bitmap, int> v in data)
            for (int j = 0; j < data.Count; j++)
            {
                Image <Bgr, Byte> i = new Image <Bgr, byte>(data[j].Item1);
                Mat ii = new Mat();
                CvInvoke.CvtColor(i, ii, ColorConversion.Bgr2Gray);
                MKeyPoint[] kp   = surf.Detect(ii);
                Mat         desc = new Mat();
                bowDex.Compute(ii, new VectorOfKeyPoint(kp), desc);
                tDesc.PushBack(desc);
                //tLabel[0, j] = data[j].Item2;
                tLabel[j, 0] = data[j].Item2;
            }
            //
            //SVM model = new SVM();
            //model.SetKernel(Emgu.CV.ML.SVM.SvmKernelType.Linear);
            //model.Type = SVM.SvmType.CSvc;
            //model.C = 1;
            //model.TermCriteria = new MCvTermCriteria(100, 0.00001);

            SVM svm = new SVM();

            svm.C     = 312.5;
            svm.Gamma = 0.50625000000000009;
            svm.SetKernel(SVM.SvmKernelType.Rbf);
            svm.Type = SVM.SvmType.CSvc;
            svm.Nu   = 0.5;

            TrainData td     = new TrainData(tDesc, Emgu.CV.ML.MlEnum.DataLayoutType.RowSample, tLabel);
            bool      tained = svm.TrainAuto(td);

            using (FileStorage fs = new FileStorage("voca.yaml", FileStorage.Mode.Write))
            {
                svm.Write(fs);
                fs.Write(voca, "voca");
            }
            //using (FileStorage fs = new FileStorage("svm.yaml", FileStorage.Mode.Write))
            //{
            //    svm.Write(fs);
            //}
            //svm.Save("svm.yaml");
            // test
            {
                //Image<Bgr, Byte> test_img = new Image<Bgr, byte>(@"C:\test\scroll_left.jpg");
                Image <Bgr, Byte> test_img = new Image <Bgr, byte>(@"C:\test\iphone_icon\temp_1.jpg");
                //Image<Bgr, Byte> test_img = new Image<Bgr, byte>(@"C:\projects\local\testMQ\testMQ\bin\Debug\phone_icons\icon_2.jpg");
                //Image<Bgr, Byte> test_img = new Image<Bgr, byte>(@"C:\test\35928233-email-icon-on-blue-background-clean-vector.jpg");
                Mat ii = new Mat();
                CvInvoke.CvtColor(test_img, ii, ColorConversion.Bgr2Gray);
                MKeyPoint[] kp   = surf.Detect(ii);
                Mat         desc = new Mat();
                bowDex.Compute(ii, new VectorOfKeyPoint(kp), desc);
                float r = svm.Predict(desc);
            }
        }
Exemplo n.º 12
0
        public void TestSURFDetector2()
        {
            //Trace.WriteLine("Size of MCvSURFParams: " + Marshal.SizeOf(typeof(MCvSURFParams)));
            Image <Gray, byte> box = EmguAssert.LoadImage <Gray, byte>("box.png");
            SURF detector          = new SURF(400);

            Stopwatch        watch = Stopwatch.StartNew();
            VectorOfKeyPoint vp1   = new VectorOfKeyPoint();
            Mat descriptors1       = new Mat();

            detector.DetectAndCompute(box, null, vp1, descriptors1, false);
            watch.Stop();
            EmguAssert.WriteLine(String.Format("Time used: {0} milliseconds.", watch.ElapsedMilliseconds));

            watch.Reset();
            watch.Start();
            MKeyPoint[] keypoints = detector.Detect(box, null);
            //ImageFeature<float>[] features2 = detector.Compute(box, keypoints);
            watch.Stop();
            EmguAssert.WriteLine(String.Format("Time used: {0} milliseconds.", watch.ElapsedMilliseconds));

            watch.Reset();
            watch.Start();
            //MCvSURFParams p = detector.SURFParams;

            //SURFFeature[] features3 = box.ExtractSURF(ref p);
            //watch.Stop();
            //EmguAssert.WriteLine(String.Format("Time used: {0} milliseconds.", watch.ElapsedMilliseconds));

            // EmguAssert.IsTrue(features1.Length == features2.Length);
            //EmguAssert.IsTrue(features2.Length == features3.Length);

            PointF[] pts =
#if NETFX_CORE
                Extensions.
#else
                Array.
#endif
                ConvertAll <MKeyPoint, PointF>(keypoints, delegate(MKeyPoint mkp)
            {
                return(mkp.Point);
            });
            //SURFFeature[] features = box.ExtractSURF(pts, null, ref detector);
            //int count = features.Length;

            /*
             * for (int i = 0; i < features1.Length; i++)
             * {
             * Assert.AreEqual(features1[i].KeyPoint.Point, features2[i].KeyPoint.Point);
             * float[] d1 = features1[i].Descriptor;
             * float[] d2 = features2[i].Descriptor;
             *
             * for (int j = 0; j < d1.Length; j++)
             *    Assert.AreEqual(d1[j], d2[j]);
             * }*/

            foreach (MKeyPoint kp in keypoints)
            {
                box.Draw(new CircleF(kp.Point, kp.Size), new Gray(255), 1);
            }
        }
Exemplo n.º 13
0
        private void стартToolStripMenuItem_Click(object sender, EventArgs e)
        {
            try
            {
                labelstrip.Text = "Получение изображения";


                // берем изображение из PictureBox и переводим bitmap в Mat
                Bitmap image1b = (Bitmap)objectPictureBox.Image;
                Bitmap image2b = (Bitmap)scenePictureBox.Image;
                Mat    image1  = BitmapConverter.ToMat(image1b);
                Mat    image2  = BitmapConverter.ToMat(image2b);

                //создаем SURF переменную
                SURF surf = new SURF(form.getThreshold(), form.getOctaves(), form.getOctavesLayer(), form.getDecriptors(), false);

                labelstrip.Text = "Нахождение особых точек и их дескриптеров";

                //создаем по 2 переменных для записи ключевых точек и дескриптеров
                Mat        descriptors1 = new Mat();
                Mat        descriptors2 = new Mat();
                KeyPoint[] points1, points2;

                //находим особые точки
                points1 = surf.Detect(image1);
                points2 = surf.Detect(image2);


                //нахождение дескрипторов точек
                surf.Compute(image1, ref points1, descriptors1);
                surf.Compute(image2, ref points2, descriptors2);

                //матчим массивы дескрипторов
                FlannBasedMatcher matcher = new FlannBasedMatcher();
                DMatch[]          matches;
                matches = matcher.Match(descriptors1, descriptors2);



                //Вычисление максимального и минимального расстояния среди всех дескрипторов
                double max_dist = 0; double min_dist = 100;

                for (int i = 0; i < descriptors1.Rows; i++)
                {
                    double dist = matches[i].Distance;
                    if (dist < min_dist)
                    {
                        min_dist = dist;
                    }
                    if (dist > max_dist)
                    {
                        max_dist = dist;
                    }
                }

                labelstrip.Text = "Отбор точек";

                // Отобрать только хорошие матчи, расстояние меньше чем 3 * min_dist

                List <DMatch> good_matches = new List <DMatch>();

                for (int i = 0; i < matches.Length; i++)
                {
                    if (matches[i].Distance < form.getMinDinst() * min_dist)
                    {
                        good_matches.Add(matches[i]);
                    }
                }



                Mat image3 = new Mat();
                Cv2.DrawMatches(image1, points1, image2, points2, good_matches, image3, Scalar.RandomColor(), Scalar.RandomColor(), null, DrawMatchesFlags.NotDrawSinglePoints);

                labelstrip.Text = "Локализация объекта";

                //Использование гомографии
                // Локализация объектов

                Point2f[] vector  = new Point2f[good_matches.Count];
                Point2d[] vector1 = new Point2d[good_matches.Count];
                Point2d[] vector2 = new Point2d[good_matches.Count];
                for (int i = 0; i < good_matches.Count; i++)
                {
                    vector[i]    = points1[good_matches[i].QueryIdx].Pt;
                    vector1[i].X = vector[i].X;
                    vector1[i].Y = vector[i].Y;
                    vector[i]    = points2[good_matches[i].TrainIdx].Pt;
                    vector2[i].X = vector[i].X;
                    vector2[i].Y = vector[i].Y;
                }

                Mat H = Cv2.FindHomography(vector1, vector2, HomographyMethod.Ransac);

                //Получить "углы" изображения с целевым объектом
                Point2d[] vector3 = new Point2d[4];
                vector3[0].X = 0; vector3[0].Y = 0;
                vector3[1].X = image1.Cols; vector3[1].Y = 0;
                vector3[2].X = image1.Cols; vector3[2].Y = image1.Rows;
                vector3[3].X = 0; vector3[3].Y = image1.Rows;
                Point2d pointtest; pointtest.X = 0; pointtest.Y = 0;


                List <Point2d> vector4 = new List <Point2d>()
                {
                    pointtest, pointtest, pointtest, pointtest
                };

                //Отобразить углы целевого объекта, используя найденное преобразование, на сцену
                Cv2.PerspectiveTransform(InputArray.Create(vector3), OutputArray.Create(vector4), H); //?

                Point2d point1;
                Point2d point2;
                int     k;
                for (int i = 0; i < 4; i++)
                {
                    if (i == 3)
                    {
                        k = 0;
                    }
                    else
                    {
                        k = i + 1;
                    }
                    point1.X = vector4[i].X + image1.Cols;
                    point1.Y = vector4[i].Y + 0;
                    point2.X = vector4[k].X + image1.Cols;
                    point2.Y = vector4[k].Y + 0;
                    Cv2.Line(image3, point1, point2, Scalar.RandomColor(), 4);
                }

                labelstrip.Text = "Объект локализован";

                Bitmap image3b = BitmapConverter.ToBitmap(image3);
                imageForm.getresultimage(image3b);

                imageForm.ShowDialog();
            }
            catch (Exception ex)
            {
                MessageBox.Show(Convert.ToString(ex));
                labelstrip.Text = "Произошла ошибка";
            }
        }
Exemplo n.º 14
0
        /// <summary>
        /// 图像匹配
        /// </summary>
        /// <param name="templateImage">模板图</param>
        /// <param name="originalImage">原图</param>
        /// <param name="nndrRatio">距离阈值,一般取0.5</param>
        public static void matchImage(Mat templateImage, Mat originalImage, float nndrRatio)
        {
            DateTime start = DateTime.Now;
            //指定特征点算法SURF
            SURF surf = new SURF();


            //获取模板图的特征点
            KeyPoint[] templateKeyPoints = surf.Detect(templateImage);
            //提取模板图的特征描述
            //Mat templateDescriptors = new Mat(templateImage.Rows, templateImage.Cols, templateImage.Type());
            Mat templateDescriptors = new Mat();

            surf.Compute(templateImage, ref templateKeyPoints, templateDescriptors);



            //获取原图的特征点
            KeyPoint[] originalKeyPoints = surf.Detect(originalImage);
            //提取原图的特征点描述;
            Mat originalDescriptors = new Mat();

            surf.Compute(originalImage, ref originalKeyPoints, originalDescriptors);


            //开始匹配
            DescriptorMatcher descriptorMatcher = DescriptorMatcher.Create("FlannBased");//或者使用

            /**
             * knnMatch方法的作用就是在给定特征描述集合中寻找最佳匹配
             * 使用KNN-matching算法,令K=2,则每个match得到两个最接近的descriptor,然后计算最接近距离和次接近距离之间的比值,当比值大于既定值时,才作为最终match。
             */
            DMatch[][]    matches         = descriptorMatcher.KnnMatch(templateDescriptors, originalDescriptors, 2);
            List <DMatch> goodMatchesList = new List <DMatch>();

            foreach (DMatch[] match in matches)
            {
                DMatch m1 = match[0];
                DMatch m2 = match[1];
                if (m1.Distance <= m2.Distance * nndrRatio)
                {
                    goodMatchesList.Add(m1);
                }
            }
            //当匹配后的特征点大于等于 4 个,则认为模板图在原图中,该值可以自行调整
            if (goodMatchesList.Count >= 4)
            {
                //Console.WriteLine("模板图在原图匹配成功!");
                List <KeyPoint> templateKeyPointList = templateKeyPoints.ToList();
                List <KeyPoint> originalKeyPointList = originalKeyPoints.ToList();
                List <Point2f>  objectPoints         = new List <Point2f>();
                List <Point2f>  scenePoints          = new List <Point2f>();
                foreach (DMatch goodMatch in goodMatchesList)
                {
                    objectPoints.Add(templateKeyPointList[goodMatch.QueryIdx].Pt);
                    scenePoints.Add(originalKeyPointList[goodMatch.TrainIdx].Pt);
                }
                MatOfPoint2f objMatOfPoint2f = new MatOfPoint2f();
                foreach (Point2f p in objectPoints)
                {
                    objMatOfPoint2f.Add(p);
                }

                MatOfPoint2f scnMatOfPoint2f = new MatOfPoint2f();
                foreach (Point2f p in scenePoints)
                {
                    scnMatOfPoint2f.Add(p);
                }
                //使用 findHomography 寻找匹配上的关键点的变换
                Mat homography = Cv2.FindHomography(objMatOfPoint2f, scnMatOfPoint2f, OpenCvSharp.HomographyMethod.Ransac, 3);

                /**
                 * 透视变换(Perspective Transformation)是将图片投影到一个新的视平面(Viewing Plane),也称作投影映射(Projective Mapping)。
                 */
                Mat templateCorners         = new Mat(4, 1, MatType.CV_32FC2);
                Mat templateTransformResult = new Mat(4, 1, MatType.CV_32FC2);
                templateCorners.Set <Point2f>(0, 0, new Point2f(0, 0));
                templateCorners.Set <Point2f>(1, 0, new Point2f(templateImage.Cols, 0));
                templateCorners.Set <Point2f>(2, 0, new Point2f(templateImage.Cols, templateImage.Rows));
                templateCorners.Set <Point2f>(3, 0, new Point2f(0, templateImage.Rows));

                //使用 perspectiveTransform 将模板图进行透视变以矫正图象得到标准图片
                Cv2.PerspectiveTransform(templateCorners, templateTransformResult, homography);

                //矩形四个顶点
                Point2f pointA = templateTransformResult.Get <Point2f>(0, 0);
                Point2f pointB = templateTransformResult.Get <Point2f>(1, 0);
                Point2f pointC = templateTransformResult.Get <Point2f>(2, 0);
                Point2f pointD = templateTransformResult.Get <Point2f>(3, 0);

                //将匹配的图像用用四条线框出来
                Cv2.Line(originalImage, pointA, pointB, new Scalar(0, 255, 0), 1); //上 A->B
                Cv2.Line(originalImage, pointB, pointC, new Scalar(0, 255, 0), 1); //右 B->C
                Cv2.Line(originalImage, pointC, pointD, new Scalar(0, 255, 0), 1); //下 C->D
                Cv2.Line(originalImage, pointD, pointA, new Scalar(0, 255, 0), 1); //左 D->A

                Cv2.PutText(originalImage, "time:" + DateTime.Now.Subtract(start).TotalMilliseconds + "ms", new Point(10, originalImage.Height - 10), FontFace.HersheySimplex, 0.5, new Scalar(255, 255, 255));
                Cv2.ImWrite(@"C:\Users\Administrator\Desktop\result.jpg", originalImage);
            }
        }
Exemplo n.º 15
0
        public static MatchResult Match(Bitmap src, TrainedTemplate trainedTemplate)
        {
            try
            {
                Mat      originalImage = Bitmap2Mat(src);
                DateTime begin         = DateTime.Now;
                SURF     surf          = new SURF();
                //获取原图的特征点
                KeyPoint[] originalKeyPoints = surf.Detect(originalImage);
                //提取原图的特征点描述;
                Mat originalDescriptors = new Mat();
                surf.Compute(originalImage, ref originalKeyPoints, originalDescriptors);


                //开始匹配
                DescriptorMatcher descriptorMatcher = DescriptorMatcher.Create("FlannBased");

                /**
                 * knnMatch方法的作用就是在给定特征描述集合中寻找最佳匹配
                 * 使用KNN-matching算法,令K=2,则每个match得到两个最接近的descriptor,然后计算最接近距离和次接近距离之间的比值,当比值大于既定值时,才作为最终match。
                 */
                DMatch[][]    matches         = descriptorMatcher.KnnMatch(trainedTemplate.templateDescriptors, originalDescriptors, 2);
                List <DMatch> goodMatchesList = new List <DMatch>();
                foreach (DMatch[] match in matches)
                {
                    DMatch m1 = match[0];
                    DMatch m2 = match[1];
                    if (m1.Distance <= m2.Distance * nndrRatio)
                    {
                        goodMatchesList.Add(m1);
                    }
                }
                //当匹配后的特征点大于等于 4 个,则认为模板图在原图中,该值可以自行调整
                if (goodMatchesList.Count >= 4)
                {
                    //Console.WriteLine("模板图在原图匹配成功!");
                    List <KeyPoint> templateKeyPointList = trainedTemplate.templateKeyPoints.ToList();
                    List <KeyPoint> originalKeyPointList = originalKeyPoints.ToList();
                    List <Point2f>  objectPoints         = new List <Point2f>();
                    List <Point2f>  scenePoints          = new List <Point2f>();
                    foreach (DMatch goodMatch in goodMatchesList)
                    {
                        objectPoints.Add(templateKeyPointList[goodMatch.QueryIdx].Pt);
                        scenePoints.Add(originalKeyPointList[goodMatch.TrainIdx].Pt);
                    }
                    MatOfPoint2f objMatOfPoint2f = new MatOfPoint2f();
                    foreach (Point2f p in objectPoints)
                    {
                        objMatOfPoint2f.Add(p);
                    }

                    MatOfPoint2f scnMatOfPoint2f = new MatOfPoint2f();
                    foreach (Point2f p in scenePoints)
                    {
                        scnMatOfPoint2f.Add(p);
                    }
                    //使用 findHomography 寻找匹配上的关键点的变换
                    Mat homography = Cv2.FindHomography(objMatOfPoint2f, scnMatOfPoint2f, OpenCvSharp.HomographyMethod.Ransac, 3);

                    /**
                     * 透视变换(Perspective Transformation)是将图片投影到一个新的视平面(Viewing Plane),也称作投影映射(Projective Mapping)。
                     */
                    Mat templateCorners         = new Mat(4, 1, MatType.CV_32FC2);
                    Mat templateTransformResult = new Mat(4, 1, MatType.CV_32FC2);
                    templateCorners.Set <Point2f>(0, 0, new Point2f(0, 0));
                    templateCorners.Set <Point2f>(1, 0, new Point2f(trainedTemplate.templateImage.Cols, 0));
                    templateCorners.Set <Point2f>(2, 0, new Point2f(trainedTemplate.templateImage.Cols, trainedTemplate.templateImage.Rows));
                    templateCorners.Set <Point2f>(3, 0, new Point2f(0, trainedTemplate.templateImage.Rows));

                    //使用 perspectiveTransform 将模板图进行透视变以矫正图象得到标准图片
                    Cv2.PerspectiveTransform(templateCorners, templateTransformResult, homography);

                    //矩形四个顶点
                    Point2f pointA = templateTransformResult.Get <Point2f>(0, 0);
                    Point2f pointB = templateTransformResult.Get <Point2f>(1, 0);
                    Point2f pointC = templateTransformResult.Get <Point2f>(2, 0);
                    Point2f pointD = templateTransformResult.Get <Point2f>(3, 0);

                    MatchResult matchResult = new MatchResult();
                    matchResult.top    = (int)pointA.Y;
                    matchResult.left   = (int)pointA.X;
                    matchResult.right  = (int)pointB.X;
                    matchResult.bottom = (int)pointD.Y;
                    matchResult.time   = DateTime.Now.Subtract(begin).TotalMilliseconds;
                    return(matchResult);
                }
                else
                {
                    return(null);
                }
            }
            catch (Exception ex)
            {
                Console.WriteLine(ex.Message);
                return(null);
            }
        }