Beispiel #1
0
        private void GetCorners(Mat newFrame, Rectangle roi)
        {
            using (GFTTDetector detector = new GFTTDetector(1000, 0.01, 1, 3, false, 0.04))
            {
                Mat img = newFrame.Clone();

                // Create SPECIAL Mask with ROI
                Image <Gray, byte> maskImg = new Image <Gray, byte>(img.Size);

                roi = new Rectangle((int)(roi.X + roi.Width * 0.25), (int)(roi.Y + roi.Height * 0.1), (int)(roi.Width * 0.5), (int)(roi.Height * 0.4));

                maskImg.Draw(roi, new Gray(255), -1, LineType.FourConnected);



                var keypoints = detector.Detect(img, maskImg);
                //prePoints = keypoints;

                prePoints = new PointF[keypoints.Length];

                for (int i = 0; i < keypoints.Length; i++)
                {
                    prePoints[i] = (keypoints[i].Point);
                }
            }
        }
Beispiel #2
0
        private void button_Detect_Click(object sender, EventArgs e)
        {
            Mat scr    = imagemat;
            Mat result = imagemat.Clone();

            #region Detect()代码

            /*
             * GFTTDetector _gftd = new GFTTDetector();//以默认参数创建 GFTTDetector 类。
             * MKeyPoint[] keypoints = _gftd.Detect(scr, null);//检测关键点,返回 MKeyPoint[]。
             * foreach (MKeyPoint keypoint in keypoints)//遍历 MKeyPoint[]数组。
             * {
             *  Point point = Point.Truncate(keypoint.Point);//获得关键点的坐 标位置,以 Point 类型。
             *  CvInvoke.Circle(result, point, 3, new MCvScalar(0, 0, 255), 1);//绘 制关键点的位置,以 Circle 形式。
             * }
             */
            #endregion
            #region DetectRaw() code
            GFTTDetector     _gftd            = new GFTTDetector();               //以默认参数创建 GFTTDetector 类。
            VectorOfKeyPoint vector_keypoints = new VectorOfKeyPoint();           //创建 VectorOfKeyPoint 类型,存储关键点集合。
            _gftd.DetectRaw(scr, vector_keypoints, null);                         //检测关键点。
            foreach (MKeyPoint keypoint in vector_keypoints.ToArray())            //遍历 MKeyPoint[]数组。
            {
                Point point = Point.Truncate(keypoint.Point);                     //获得关键点的坐 标位置,以 Point 类型。
                CvInvoke.Circle(result, point, 3, new MCvScalar(255, 255, 0), 1); //绘制关键点的位置,以 Circle 形式。
            }
            #endregion
            imageBox1.Image = scr;    //显示输入图像。
            imageBox2.Image = result; //显示角点检测图像。
        }
Beispiel #3
0
        public void TestGFTTDetector()
        {
            GFTTDetector             keyPointDetector    = new GFTTDetector(1000, 0.01, 1, 3, false, 0.04);
            BriefDescriptorExtractor descriptorGenerator = new BriefDescriptorExtractor(32);

            TestFeature2DTracker(keyPointDetector, descriptorGenerator);
        }
Beispiel #4
0
        public Image <Bgr, byte> pointComp(Image <Bgr, byte> baseImg, Image <Bgr, byte> twistedImg)
        {
            Image <Gray, byte> baseImgGray    = baseImg.Convert <Gray, byte>();
            Image <Gray, byte> twistedImgGray = twistedImg.Convert <Gray, byte>();
            Brisk            descriptor       = new Brisk();
            GFTTDetector     detector         = new GFTTDetector(40, 0.01, 5, 3, true);
            VectorOfKeyPoint GFP1             = new VectorOfKeyPoint();
            UMat             baseDesc         = new UMat();
            UMat             bimg             = twistedImgGray.Mat.GetUMat(AccessType.Read);
            VectorOfKeyPoint GFP2             = new VectorOfKeyPoint();
            UMat             twistedDesc      = new UMat();
            UMat             timg             = baseImgGray.Mat.GetUMat(AccessType.Read);

            detector.DetectRaw(bimg, GFP1);
            descriptor.Compute(bimg, GFP1, baseDesc);
            detector.DetectRaw(timg, GFP2);
            descriptor.Compute(timg, GFP2, twistedDesc);
            BFMatcher matcher = new BFMatcher(DistanceType.L2);
            VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch();

            matcher.Add(baseDesc);
            matcher.KnnMatch(twistedDesc, matches, 2, null);
            Mat mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);

            mask.SetTo(new MCvScalar(255));
            Features2DToolbox.VoteForUniqueness(matches, 0.8, mask);
            //int nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(GFP1, GFP1, matches, mask, 1.5, 20);
            Image <Bgr, byte> res = baseImg.CopyBlank();

            Features2DToolbox.DrawMatches(twistedImg, GFP1, baseImg, GFP2, matches, res, new MCvScalar(255, 0, 0), new MCvScalar(255, 0, 0), mask);
            return(res);
        }
Beispiel #5
0
        public MKeyPoint[] Maspointer(Image <Bgr, byte> image, int mode)
        {
            switch (mode)
            {
            case 0:
            {
                GFTTDetector detector = new GFTTDetector(40, 0.01, 5, 3, true);
                MKeyPoint[]  GFP1     = detector.Detect(image.Convert <Gray, byte>().Mat);
                return(GFP1);
            }

            case 1:
            {
                Brisk       detector = new Brisk();
                MKeyPoint[] GFP1     = detector.Detect(image.Convert <Gray, byte>().Mat);
                return(GFP1);
            }

            case 2:
            {
                FastFeatureDetector detector = new FastFeatureDetector();
                MKeyPoint[]         GFP1     = detector.Detect(image.Convert <Gray, byte>().Mat);
                return(GFP1);
            }
            }
            return(null);
        }
Beispiel #6
0
        private void button3_Click(object sender, EventArgs e)
        {
            GFTTDetector detector = new GFTTDetector(40, 0.01, 5, 3, true);

            var baseImgGray    = baseImg.Convert <Gray, byte>();
            var twistedImgGray = twistedImg.Convert <Gray, byte>();

            //генератор описания ключевых точек
            Brisk descriptor = new Brisk();

            //поскольку в данном случае необходимо посчитать обратное преобразование
            //базой будет являться изменённое изображение
            VectorOfKeyPoint GFP1     = new VectorOfKeyPoint();
            UMat             baseDesc = new UMat();
            UMat             bimg     = twistedImgGray.Mat.GetUMat(AccessType.Read);

            VectorOfKeyPoint GFP2        = new VectorOfKeyPoint();
            UMat             twistedDesc = new UMat();
            UMat             timg        = baseImgGray.Mat.GetUMat(AccessType.Read);

            //получение необработанной информации о характерных точках изображений
            detector.DetectRaw(bimg, GFP1);

            //генерация описания характерных точек изображений
            descriptor.Compute(bimg, GFP1, baseDesc);
            detector.DetectRaw(timg, GFP2);
            descriptor.Compute(timg, GFP2, twistedDesc);

            //класс позволяющий сравнивать описания наборов ключевых точек
            BFMatcher matcher = new BFMatcher(DistanceType.L2);

            //массив для хранения совпадений характерных точек
            VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch();

            //добавление описания базовых точек
            matcher.Add(baseDesc);
            //сравнение с описанием изменённых
            matcher.KnnMatch(twistedDesc, matches, 2, null);
            //3й параметр - количество ближайших соседей среди которых осуществляется поиск совпадений
            //4й параметр - маска, в данном случае не нужна

            //маска для определения отбрасываемых значений (аномальных и не уникальных)
            Mat mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);

            mask.SetTo(new MCvScalar(255));
            //определение уникальных совпадений
            Features2DToolbox.VoteForUniqueness(matches, 0.8, mask);

            Mat homography;

            //получение матрицы гомографии
            homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(GFP1, GFP2, matches, mask, 2);

            var destImage = new Image <Bgr, byte>(baseImg.Size);

            CvInvoke.WarpPerspective(twistedImg, destImage, homography, destImage.Size);
            twistedImg      = destImage;
            imageBox2.Image = destImage.Resize(640, 480, Inter.Linear);
        }
Beispiel #7
0
        public void TestGFTTDetector()
        {
            GFTTDetector keyPointDetector    = new GFTTDetector(1000, 0.01, 1, 3, false, 0.04);
            SIFT         descriptorGenerator = new SIFT();

            //ParamDef[] parameters = keyPointDetector.GetParams();
            TestFeature2DTracker(keyPointDetector, descriptorGenerator);
        }
Beispiel #8
0
        private void button2_Click(object sender, EventArgs e)
        {
            GFTTDetector detector = new GFTTDetector(40, 0.01, 5, 3, true);

            MKeyPoint[] GFP1 = detector.Detect(baseImg.Convert <Gray, byte>().Mat);


            //создание массива характерных точек исходного изображения (только позиции)
            PointF[] srcPoints = new PointF[GFP1.Length];
            for (int i = 0; i < GFP1.Length; i++)
            {
                srcPoints[i] = GFP1[i].Point;
            }
            PointF[] destPoints;                       //массив для хранения позиций точек на изменённом изображении
            byte[]   status;                           //статус точек (найдены/не найдены)
            float[]  trackErrors;                      //ошибки
                                                       //вычисление позиций характерных точек на новом изображении методом Лукаса-Канаде
            CvInvoke.CalcOpticalFlowPyrLK(
                baseImg.Convert <Gray, byte>().Mat,    //исходное изображение
                twistedImg.Convert <Gray, byte>().Mat, //изменённое изображение
                srcPoints,                             //массив характерных точек исходного изображения
                new Size(20, 20),                      //размер окна поиска
                5,                                     //уровни пирамиды
                new MCvTermCriteria(20, 1),            //условие остановки вычисления оптического потока
                out destPoints,                        //позиции характерных точек на новом изображении
                out status,                            //содержит 1 в элементах, для которых поток был найден
                out trackErrors                        //содержит ошибки
                );

            //вычисление матрицы гомографии
            Mat homographyMatrix = CvInvoke.FindHomography(destPoints, srcPoints,
                                                           RobustEstimationAlgorithm.LMEDS);
            var destImage = new Image <Bgr, byte>(baseImg.Size);

            CvInvoke.WarpPerspective(twistedImg, destImage, homographyMatrix, destImage.Size);


            //var output1 = baseImg.Clone();

            //foreach (MKeyPoint p in GFP1)
            //{
            //    CvInvoke.Circle(output1, Point.Round(p.Point), 3, new Bgr(Color.Blue).MCvScalar, 2);
            //}
            //imageBox1.Image = output1.Resize(640, 480, Inter.Linear);

            ////var output2 = twistedImg.Clone();

            //foreach (PointF p in destPoints)
            //{
            //    CvInvoke.Circle(destImage, Point.Round(p), 3, new Bgr(Color.Blue).MCvScalar, 2);
            //}
            imageBox2.Image = destImage.Resize(640, 480, Inter.Linear);
        }
Beispiel #9
0
        public Image <Bgr, byte> ReturnCompared(out Image <Bgr, byte> def, out Image <Bgr, byte> twistdef)
        {
            var image      = sourceImage.Copy();
            var twistedImg = additionalImage.Copy();
            //генератор описания ключевых точек
            Brisk        descriptor = new Brisk();
            GFTTDetector detector   = new GFTTDetector(40, 0.01, 5, 3, true);
            //поскольку в данном случае необходимо посчитать обратное преобразование
            //базой будет являться изменённое изображение
            VectorOfKeyPoint GFP1           = new VectorOfKeyPoint();
            UMat             baseDesc       = new UMat();
            var              twistedImgGray = twistedImg.Convert <Gray, byte>();
            var              baseImgGray    = image.Convert <Gray, byte>();
            UMat             bimg           = twistedImgGray.Mat.GetUMat(AccessType.Read);
            VectorOfKeyPoint GFP2           = new VectorOfKeyPoint();
            UMat             twistedDesc    = new UMat();
            UMat             timg           = baseImgGray.Mat.GetUMat(AccessType.Read);

            //получение необработанной информации о характерных точках изображений
            detector.DetectRaw(bimg, GFP1);
            //генерация описания характерных точек изображений
            descriptor.Compute(bimg, GFP1, baseDesc);
            detector.DetectRaw(timg, GFP2);
            descriptor.Compute(timg, GFP2, twistedDesc);


            BFMatcher matcher = new BFMatcher(DistanceType.L2);

            //массив для хранения совпадений характерных точек
            VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch();

            //добавление описания базовых точек
            matcher.Add(baseDesc);
            //сравнение с описанием изменённых
            matcher.KnnMatch(twistedDesc, matches, 2, null);


            Mat mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);

            mask.SetTo(new MCvScalar(255));
            //определение уникальных совпадений
            Mat resM = new Mat(image.Height, image.Width * 2, DepthType.Cv8U, 3);
            var res  = resM.ToImage <Bgr, byte>();

            Features2DToolbox.VoteForUniqueness(matches, 0.8, mask);
            int nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(GFP1, GFP1, matches, mask, 1.5, 20);

            Features2DToolbox.DrawMatches(twistedImg, GFP1, image, GFP2, matches, res, new MCvScalar(255, 0,
                                                                                                     0), new MCvScalar(255, 0, 0), mask);
            def      = image;
            twistdef = twistedImg;
            return(res);
        }
Beispiel #10
0
 public Form1()
 {
     InitializeComponent();
     //set threshold for the number of good features to track
     gFTTDetector = new GFTTDetector(500);
     sWatch       = new Stopwatch();
     try
     {
         //Choose the camera by changing the parameter 0 or 1
         capture = new VideoCapture(0);
         capture.ImageGrabbed += ProcessFrame;
     }
     catch (NullReferenceException excpt)
     {
         MessageBox.Show(excpt.Message);
     }
 }
Beispiel #11
0
        public Image <Bgr, byte> GFTT()
        {
            GFTTDetector detector = new GFTTDetector(40, 0.01, 5, 3, true);

            MKeyPoint[] GFP1 = detector.Detect(baseImage.Convert <Gray, byte>().Mat);

            //создание массива характерных точек исходного изображения (только позиции)
            PointF[] srcPoints = new PointF[GFP1.Length];
            for (int i = 0; i < GFP1.Length; i++)
            {
                srcPoints[i] = GFP1[i].Point;
            }
            PointF[] destPoints;                         //массив для хранения позиций точек на изменённом изображении
            byte[]   status;                             //статус точек (найдены/не найдены)
            float[]  trackErrors;                        //ошибки
                                                         //вычисление позиций характерных точек на новом изображении методом Лукаса-Канаде
            CvInvoke.CalcOpticalFlowPyrLK(
                baseImage.Convert <Gray, byte>().Mat,    //исходное изображение
                twistedImage.Convert <Gray, byte>().Mat, //изменённое изображение
                srcPoints,                               //массив характерных точек исходного изображения
                new Size(20, 20),                        //размер окна поиска
                5,                                       //уровни пирамиды
                new MCvTermCriteria(20, 1),              //условие остановки вычисления оптического потока
                out destPoints,                          //позиции характерных точек на новом изображении
                out status,                              //содержит 1 в элементах, для которых поток был найден
                out trackErrors                          //содержит ошибки
                );


            var output = baseImage.Clone();

            foreach (MKeyPoint p in GFP1)
            {
                CvInvoke.Circle(output, Point.Round(p.Point), 3, new Bgr(Color.Blue).MCvScalar, 2);
            }

            var output2 = twistedImage.Clone();

            foreach (MKeyPoint p in GFP1)
            {
                CvInvoke.Circle(output2, Point.Round(p.Point), 3, new Bgr(Color.Blue).MCvScalar, 2);
            }

            return(output.Resize(640, 480, Inter.Linear));
        }
Beispiel #12
0
        public int Init(Bitmap bm)
        {
            GFTTDetector gftt    = new GFTTDetector(maxPoints, 0.1, 1, 3, false, 0.04);
            Rectangle    rect    = new Rectangle(0, 0, bm.Width, bm.Height);
            BitmapData   bmpData = bm.LockBits(rect, ImageLockMode.ReadWrite, bm.PixelFormat);

            Mat m = new Mat(bm.Height, bm.Width, Emgu.CV.CvEnum.DepthType.Cv8U, 3, bmpData.Scan0, bmpData.Stride);

            Rectangle roi = new Rectangle(searchLocation, searchSize);

            // MKeyPoint[] keyPoints;
            keyPoints = gftt.Detect(new Mat(m, roi));
            bm.UnlockBits(bmpData);
            PrevImg = m;

            prevPoints = new PointF[keyPoints.Count()];
            for (int j = 0; j < keyPoints.Count(); j++)
            {
                prevPoints[j] = new PointF(keyPoints[j].Point.X + roi.Left, keyPoints[j].Point.Y + roi.Top);
            }

            /*            unsafe
             *          {
             *              fixed (byte* pArray = fr.Buffer)
             *              {
             *                  //IntPtr = pArray;
             *                  Mat m = new Mat((int)fr.Height, (int)fr.Width, Emgu.CV.CvEnum.DepthType.Cv8U, 3, (IntPtr)pArray, (int)fr.Width*3);
             *                  keyPoints = gftt.Detect(m);
             *                  foreach (var kp in keyPoints)
             *                  {
             *                      CvInvoke.Circle(m, new Point((int)kp.Point.X, (int)kp.Point.Y), 5, new MCvScalar(100, 200, 200));
             *
             *                  }
             *
             *                  }
             *              }
             */



            //CvInvoke.CalcOpticalFlowPyrLK()
            return(0);
        }
Beispiel #13
0
        public Image <Bgr, byte> ReturnLucas(Image <Bgr, byte> image, Image <Bgr, byte> twistedImg, out Image <Bgr, byte> defImg)
        {
            GFTTDetector detector = new GFTTDetector(40, 0.01, 5, 3, true);

            MKeyPoint[] GFP1 = detector.Detect(image.Convert <Gray, byte>().Mat);
            foreach (MKeyPoint p in GFP1)
            {
                CvInvoke.Circle(image, Point.Round(p.Point), 5, new Bgr(Color.LawnGreen).MCvScalar, 2);
            }
            defImg = image;

            PointF[] srcPoints = new PointF[GFP1.Length];

            for (int i = 0; i < GFP1.Length; i++)
            {
                srcPoints[i] = GFP1[i].Point;
            }
            PointF[] destPoints;                       //массив для хранения позиций точек на изменённом изображении
            byte[]   status;                           //статус точек (найдены/не найдены)
            float[]  trackErrors;                      //ошибки
                                                       //вычисление позиций характерных точек на новом изображении методом Лукаса-Канаде
            CvInvoke.CalcOpticalFlowPyrLK(
                image.Convert <Gray, byte>().Mat,      //исходное изображение
                twistedImg.Convert <Gray, byte>().Mat, //изменённое изображение
                srcPoints,                             //массив характерных точек исходного изображения
                new Size(20, 20),                      //размер окна поиска
                5,                                     //уровни пирамиды
                new MCvTermCriteria(20, 1),            //условие остановки вычисления оптического потока
                out destPoints,                        //позиции характерных точек на новом изображении
                out status,                            //содержит 1 в элементах, для которых поток был найден
                out trackErrors                        //содержит ошибки
                );

            //for (int i = 0; i < destPoints.Length; i++)
            //    srcPoints[i] = GFP1[i].Point;
            foreach (PointF p in destPoints)
            {
                CvInvoke.Circle(twistedImg, Point.Round(p), 5, new Bgr(Color.LawnGreen).MCvScalar, 2);
            }
            return(twistedImg);
        }
        private void DrawKeypoints()
        {
            try
            {
                if (imgList["Input"] == null)
                {
                    return;
                }

                var img  = imgList["Input"].Clone();
                var gray = img.Convert <Gray, byte>();

                GFTTDetector detector = new GFTTDetector(2000, 0.06);
                var          corners  = detector.Detect(gray);
                dt.Rows.Clear();
                foreach (MKeyPoint pt in corners)
                {
                    dt.Rows.Add(pt.ClassId,
                                pt.Point.ToString(),
                                pt.Angle,
                                pt.Size,
                                pt.Octave,
                                pt.Response

                                );
                }



                Mat outimg = new Mat();
                Features2DToolbox.DrawKeypoints(img, new VectorOfKeyPoint(corners), outimg, new Bgr(0, 0, 255));

                imageBoxEx1.Image        = outimg.ToBitmap();
                dataGridView1.DataSource = dt;
            }
            catch (Exception ex)
            {
                MessageBox.Show(ex.Message);
            }
        }
Beispiel #15
0
        public Image <Bgr, byte> PointHomo(Image <Bgr, byte> image, Image <Bgr, byte> image2)
        {
            Image <Gray, byte> baseImgGray    = image.Convert <Gray, byte>();
            Image <Gray, byte> twistedImgGray = image2.Convert <Gray, byte>();
            Brisk            descriptor       = new Brisk();
            GFTTDetector     detector         = new GFTTDetector(40, 0.01, 5, 3, true);
            VectorOfKeyPoint GFP1             = new VectorOfKeyPoint();
            UMat             baseDesc         = new UMat();
            UMat             bimg             = twistedImgGray.Mat.GetUMat(AccessType.Read);
            VectorOfKeyPoint GFP2             = new VectorOfKeyPoint();
            UMat             twistedDesc      = new UMat();
            UMat             timg             = baseImgGray.Mat.GetUMat(AccessType.Read);

            detector.DetectRaw(bimg, GFP1);
            descriptor.Compute(bimg, GFP1, baseDesc);
            detector.DetectRaw(timg, GFP2);
            descriptor.Compute(timg, GFP2, twistedDesc);
            BFMatcher matcher = new BFMatcher(DistanceType.L2);
            VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch();

            matcher.Add(baseDesc);
            matcher.KnnMatch(twistedDesc, matches, 2, null);
            Mat mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);

            mask.SetTo(new MCvScalar(255));
            Features2DToolbox.VoteForUniqueness(matches, 0.8, mask);
            int nonZeroCount      = Features2DToolbox.VoteForSizeAndOrientation(GFP1, GFP1, matches, mask, 1.5, 20);
            Image <Bgr, byte> res = image.CopyBlank();

            Features2DToolbox.DrawMatches(image2, GFP1, image, GFP2, matches, res, new MCvScalar(255, 0, 0), new MCvScalar(255, 0, 0), mask);

            Mat homography;

            homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(GFP1, GFP2, matches, mask, 2);
            var destImage = new Image <Bgr, byte>(image2.Size);

            CvInvoke.WarpPerspective(image2, destImage, homography, destImage.Size);

            return(destImage);
        }
Beispiel #16
0
        // Calculate Optical Flow Using PyrLk Algorithm
        public void PyrLkOpticalFlow(Image <Gray, byte> prevFrame, Image <Gray, byte> nextFrame)
        {
            //Get the Optical flow of L-K feature
            Image <Gray, Byte> mask     = prevFrame.Clone();
            GFTTDetector       detector = new GFTTDetector(30, 0.01, 10, 3, false, 0.04);

            MKeyPoint[]     fp1      = detector.Detect(prevFrame, null);
            VectorOfPointF  vp1      = new VectorOfPointF(fp1.Select(x => x.Point).ToArray());
            VectorOfPointF  vp2      = new VectorOfPointF(vp1.Size);
            VectorOfByte    vstatus  = new VectorOfByte(vp1.Size);
            VectorOfFloat   verr     = new VectorOfFloat(vp1.Size);
            Size            winsize  = new Size(prevFrame.Width, prevFrame.Height);
            int             maxLevel = 1; // if 0, winsize is not used
            MCvTermCriteria criteria = new MCvTermCriteria(10, 1);

            try
            {
                CvInvoke.CalcOpticalFlowPyrLK(prevFrame, nextFrame, vp1, vp2, vstatus, verr, winsize, maxLevel, criteria);
            }
            catch (Exception e)
            {
                Console.WriteLine(e.Message);
            }
        }
Beispiel #17
0
        /// <summary>
        /// Find the 4 strongest corners using the GFTT algorithm.
        /// </summary>
        /// <param name="pieceID">ID of the piece</param>
        /// <param name="pieceImgBw">Black white image of piece</param>
        /// <param name="pieceImgColor">Color image of piece</param>
        /// <returns>List with corner points</returns>
        /// see: http://docs.opencv.org/doc/tutorials/features2d/trackingmotion/corner_subpixeles/corner_subpixeles.html
        public override List <Point> FindCorners(string pieceID, Bitmap pieceImgBw, Bitmap pieceImgColor)
        {
            PluginFactory.LogHandle.Report(new LogEventInfo(pieceID + " Finding corners with GFTT algorithm"));

            double minDistance = PluginFactory.GetGeneralSettingsPlugin().PuzzleMinPieceSize;    //How close can 2 corners be?

            double min = 0;
            double max = 1;
            bool   found_all_corners = false;

            Image <Gray, byte> bw_clone = new Image <Gray, byte>(pieceImgBw);

            List <Point> corners = new List <Point>();

            //Binary search, altering quality until exactly 4 corners are found. Usually done in 1 or 2 iterations
            while (0 < MaxIterations--)
            {
                if (PluginFactory.CancelToken.IsCancellationRequested)
                {
                    PluginFactory.CancelToken.ThrowIfCancellationRequested();
                }

                double qualityLevel = (min + max) / 2;

                VectorOfKeyPoint keyPoints       = new VectorOfKeyPoint();
                GFTTDetector     featureDetector = new GFTTDetector(100, qualityLevel, minDistance, BlockSize, true, HarrisDetectorParameterK);

                featureDetector.DetectRaw(bw_clone, keyPoints);

                if (keyPoints.Size > 4)
                {
                    min = qualityLevel;     //Found too many corners increase quality
                }
                else if (keyPoints.Size < 4)
                {
                    max = qualityLevel;
                }
                else
                {
                    for (int i = 0; i < keyPoints.Size; i++)
                    {
                        corners.Add(Point.Round(keyPoints[i].Point));
                    }

                    found_all_corners = true;       //found all corners
                    break;
                }
            }

            //Find the sub-pixel locations of the corners.
            //Size winSize = new Size(blockSize, blockSize);
            //Size zeroZone = new Size(-1, -1);
            //MCvTermCriteria criteria = new MCvTermCriteria(40, 0.001);

            // Calculate the refined corner locations
            //CvInvoke.CornerSubPix(bw_clone, corners, winSize, zeroZone, criteria);

            if (PluginFactory.GetGeneralSettingsPlugin().SolverShowDebugResults)
            {
                Image <Rgb, byte> corner_img = new Image <Rgb, byte>(pieceImgColor);
                for (int i = 0; i < corners.Count; i++)
                {
                    CvInvoke.Circle(corner_img, Point.Round(corners[i]), 7, new MCvScalar(255, 0, 0), -1);
                }
                PluginFactory.LogHandle.Report(new LogEventImage(pieceID + " Found Corners (" + corners.Count.ToString() + ")", corner_img.Bitmap));
                corner_img.Dispose();
            }

            if (!found_all_corners)
            {
                PluginFactory.LogHandle.Report(new LogEventError(pieceID + " Failed to find correct number of corners. " + corners.Count + " found."));
            }
            return(corners);
        }
Beispiel #18
0
        /// <summary>
        /// Get the piece corners by finding the biggest rectangle of the contour points
        /// </summary>
        /// <param name="pieceID">ID of the piece</param>
        /// <param name="pieceImgBw">Black white image of piece</param>
        /// <param name="pieceImgColor">Color image of piece</param>
        /// <returns>List with corner points</returns>
        public override List <Point> FindCorners(string pieceID, Bitmap pieceImgBw, Bitmap pieceImgColor)
        {
            PluginFactory.LogHandle.Report(new LogEventInfo(pieceID + " Finding corners by finding the maximum rectangle within candidate points"));

            List <Point> corners = new List <Point>();

            // Find all dominant corner points using the GFTTDetector (this uses the Harris corner detector)
            GFTTDetector detector = new GFTTDetector(500, 0.01, 5, 2, true, 0.04);

            MKeyPoint[]  keyPoints       = detector.Detect(new Image <Gray, byte>(pieceImgBw));
            List <Point> possibleCorners = keyPoints.Select(k => Point.Round(k.Point)).ToList();

            if (possibleCorners.Count > 0)
            {
                // Sort the dominant corners by the distance to upper left corner of the bounding rectangle (0, 0) and keep only the corners that are near enough to this point
                List <Point> possibleCornersSortedUpperLeft = new List <Point>(possibleCorners);
                possibleCornersSortedUpperLeft.Sort(new DistanceToPointComparer(new Point(0, 0), DistanceOrders.NEAREST_FIRST));
                double minCornerDistUpperLeft = Utils.Distance(possibleCornersSortedUpperLeft[0], new PointF(0, 0));
                possibleCornersSortedUpperLeft = possibleCornersSortedUpperLeft.Where(c => Utils.Distance(c, new PointF(0, 0)) < minCornerDistUpperLeft * PieceFindCornersMaxCornerDistRatio).ToList();

                // Sort the dominant corners by the distance to upper right corner of the bounding rectangle (ImageWidth, 0) and keep only the corners that are near enough to this point
                List <Point> possibleCornersSortedUpperRight = new List <Point>(possibleCorners);
                possibleCornersSortedUpperRight.Sort(new DistanceToPointComparer(new Point(pieceImgBw.Width, 0), DistanceOrders.NEAREST_FIRST));
                double minCornerDistUpperRight = Utils.Distance(possibleCornersSortedUpperRight[0], new PointF(pieceImgBw.Width, 0));
                possibleCornersSortedUpperRight = possibleCornersSortedUpperRight.Where(c => Utils.Distance(c, new PointF(pieceImgBw.Width, 0)) < minCornerDistUpperRight * PieceFindCornersMaxCornerDistRatio).ToList();

                // Sort the dominant corners by the distance to lower right corner of the bounding rectangle (ImageWidth, ImageHeight) and keep only the corners that are near enough to this point
                List <Point> possibleCornersSortedLowerRight = new List <Point>(possibleCorners);
                possibleCornersSortedLowerRight.Sort(new DistanceToPointComparer(new Point(pieceImgBw.Width, pieceImgBw.Height), DistanceOrders.NEAREST_FIRST));
                double minCornerDistLowerRight = Utils.Distance(possibleCornersSortedLowerRight[0], new PointF(pieceImgBw.Width, pieceImgBw.Height));
                possibleCornersSortedLowerRight = possibleCornersSortedLowerRight.Where(c => Utils.Distance(c, new PointF(pieceImgBw.Width, pieceImgBw.Height)) < minCornerDistLowerRight * PieceFindCornersMaxCornerDistRatio).ToList();

                // Sort the dominant corners by the distance to lower left corner of the bounding rectangle (0, ImageHeight) and keep only the corners that are near enough to this point
                List <Point> possibleCornersSortedLowerLeft = new List <Point>(possibleCorners);
                possibleCornersSortedLowerLeft.Sort(new DistanceToPointComparer(new Point(0, pieceImgBw.Height), DistanceOrders.NEAREST_FIRST));
                double minCornerDistLowerLeft = Utils.Distance(possibleCornersSortedLowerLeft[0], new PointF(0, pieceImgBw.Height));
                possibleCornersSortedLowerLeft = possibleCornersSortedLowerLeft.Where(c => Utils.Distance(c, new PointF(0, pieceImgBw.Height)) < minCornerDistLowerLeft * PieceFindCornersMaxCornerDistRatio).ToList();

                // Combine all possibleCorners from the four lists and discard all combination with too bad angle differences
                List <FindCornerRectangleScore> scores = new List <FindCornerRectangleScore>();
                for (int indexUpperLeft = 0; indexUpperLeft < possibleCornersSortedUpperLeft.Count; indexUpperLeft++)
                {
                    for (int indexUpperRight = 0; indexUpperRight < possibleCornersSortedUpperRight.Count; indexUpperRight++)
                    {
                        for (int indexLowerRight = 0; indexLowerRight < possibleCornersSortedLowerRight.Count; indexLowerRight++)
                        {
                            for (int indexLowerLeft = 0; indexLowerLeft < possibleCornersSortedLowerLeft.Count; indexLowerLeft++)
                            {
                                if (PluginFactory.CancelToken.IsCancellationRequested)
                                {
                                    PluginFactory.CancelToken.ThrowIfCancellationRequested();
                                }

                                // Possible corner combination
                                Point[] tmpCorners = new Point[]
                                {
                                    possibleCornersSortedUpperLeft[indexUpperLeft],         // the corners are ordered beginning in the upper left corner and going counter clock wise
                                    possibleCornersSortedLowerLeft[indexLowerLeft],
                                    possibleCornersSortedLowerRight[indexLowerRight],
                                    possibleCornersSortedUpperRight[indexUpperRight]
                                };
                                double angleDiff = RectangleDifferenceAngle(tmpCorners);
                                if (angleDiff > PieceFindCornersMaxAngleDiff)
                                {
                                    continue;
                                }

                                double area = CvInvoke.ContourArea(new VectorOfPoint(tmpCorners));
                                FindCornerRectangleScore score = new FindCornerRectangleScore()
                                {
                                    AngleDiff = angleDiff, RectangleArea = area, PossibleCorners = tmpCorners
                                };
                                scores.Add(score);
                            }
                        }
                    }
                }

                // Order the scores by rectangle area (biggest first) and take the PossibleCorners of the biggest rectangle as corners
                scores = scores.OrderByDescending(s => s.RectangleArea).ToList();
                if (scores.Count > 0)
                {
                    corners.AddRange(scores[0].PossibleCorners);
                }
            }

            if (corners.Count != 4)
            {
                PluginFactory.LogHandle.Report(new LogEventError(pieceID + " Failed to find correct number of corners. " + corners.Count + " found."));
            }

            if (PluginFactory.GetGeneralSettingsPlugin().SolverShowDebugResults)
            {
                using (Image <Rgb, byte> imgCorners = new Image <Rgb, byte>(pieceImgColor))
                {
                    Features2DToolbox.DrawKeypoints(imgCorners, new VectorOfKeyPoint(keyPoints), imgCorners, new Bgr(0, 0, 255));       // Draw the dominant key points

                    for (int i = 0; i < corners.Count; i++)
                    {
                        CvInvoke.Circle(imgCorners, Point.Round(corners[i]), 4, new MCvScalar(0, Math.Max(255 - i * 50, 50), 0), 3);
                    }
                    PluginFactory.LogHandle.Report(new LogEventImage(pieceID + " Corners", imgCorners.Bitmap));
                    imgCorners.Dispose();
                }
            }
            return(corners);
        }
Beispiel #19
0
        void ProcessFrame(object sender, EventArgs e)
        {
            Mat frame         = _cameraCapture.QueryFrame();
            Mat smoothedFrame = new Mat();

            CvInvoke.GaussianBlur(frame, smoothedFrame, new Size(3, 3), 1);    //高斯滤波
            CvInvoke.CvtColor(frame, curgray, ColorConversion.Bgr2Gray);       //灰度图
            goodFeaturesToTrack = new GFTTDetector(maxCount, qLevel, minDist); //关键点检测初始化
            frame.CopyTo(KeyPointPic);

            MKeyPoint[] keypoint = goodFeaturesToTrack.Detect(curgray);//关键点检测
            for (int i = 0; i < keypoint.Count(); i++)
            {
                System.Drawing.Point point = System.Drawing.Point.Truncate(keypoint[i].Point);//获得关键点的坐标位置,以 Point 类型。
                CvInvoke.Circle(KeyPointPic, point, 3, new MCvScalar(0, 0, 255), 1);
            }

            if (prevFeature.Count() < 10)                                    //特征点太少了,重新检测特征点
            {
                MKeyPoint[] keypoints = goodFeaturesToTrack.Detect(curgray); //关键点检测
                AddNewPoint = keypoints.Count();
                Array.Resize(ref prevFeature, keypoints.Count());
                Array.Resize(ref initial, keypoints.Count());
                for (int i = 0; i < keypoints.Count(); i++)
                {
                    System.Drawing.Point point = System.Drawing.Point.Truncate(keypoints[i].Point);//获得关键点的坐标位置,以 Point 类型。
                    prevFeature[i] = point;
                    initial[i]     = point;
                    CvInvoke.Circle(curgray, point, 3, new MCvScalar(0, 0, 255), 1);
                }
            }
            if (pregray.Size.IsEmpty)
            {
                curgray.CopyTo(pregray);                      //第一帧
            }
            MCvTermCriteria termcrit = new MCvTermCriteria(6);

            CvInvoke.CalcOpticalFlowPyrLK(pregray, curgray, prevFeature, curgray.Size, 2, termcrit, out currFeature, out status, out err, 0, 0.0001);
            AddNewPoint = prevFeature.Count();
            // 去掉一些不好的特征点
            int k = 0;

            for (int i = 0; i < currFeature.Count(); i++)
            {
                try
                {
                    if (acceptTrackedPoint(i))
                    {
                        initial[k]       = initial[i];
                        currFeature[k++] = currFeature[i];
                    }
                }
                catch { }
            }

            Array.Resize(ref currFeature, k);
            Array.Resize(ref initial, k);

            frame.CopyTo(Flow);
            for (int i = 0; i < currFeature.Count(); i++)
            {
                //CvInvoke.Circle(Flow, Point.Truncate(currFeature[i]), 3, new MCvScalar(0, 0, 255),1);
                CvInvoke.Line(Flow, Point.Truncate(initial[i]), Point.Truncate(currFeature[i]), new Bgr(Color.DarkOrange).MCvScalar, 2);
            }



            imageBox1.Image = frame;
            imageBox2.Image = KeyPointPic;
            imageBox3.Image = Flow;

            curgray.CopyTo(pregray);
            Array.Resize(ref prevFeature, currFeature.Count());
            for (int i = 0; i < currFeature.Count(); i++)
            {
                prevFeature[i] = currFeature[i];
            }
            //Thread t = new Thread(() =>
            //{
            //    this.mainPages.Invoke(new Action(delegate ()
            //    {


            //    }));
            //});
            //t.Start();
        }
        void OnHarris()
        {
            Mat image01 = Cv2.ImRead(Application.streamingAssetsPath + "/bryce_01.jpg");
            Mat image02 = Cv2.ImRead(Application.streamingAssetsPath + "/bryce_02.jpg");

            Mat image1 = new Mat(), image2 = new Mat();

            Cv2.CvtColor(image01, image1, ColorConversionCodes.RGB2GRAY);
            Cv2.CvtColor(image02, image2, ColorConversionCodes.RGB2GRAY);

            KeyPoint[] keyPoint1 = null, keyPoint2 = null;

            using (var gFTTDetector = GFTTDetector.Create(500))
                using (var orb = ORB.Create(20))
                    using (Mat descriptor1 = new Mat())
                        using (Mat descriptor2 = new Mat())
                            using (var matcher = new BFMatcher(NormTypes.L2))
                            {
                                keyPoint1 = gFTTDetector.Detect(image1);
                                keyPoint2 = gFTTDetector.Detect(image2);

                                orb.Compute(image1, ref keyPoint1, descriptor1);
                                orb.Compute(image2, ref keyPoint2, descriptor2);

                                List <DMatch> goodMatchePoints = new List <DMatch>();
                                DMatch[][]    dm = matcher.KnnMatch(descriptor1, descriptor2, 2);

                                #region matched 30
                                //for (int i = 0; i < dm.Length; i++)
                                //{
                                //    if (dm[i][0].Distance < 0.6 * dm[i][1].Distance)
                                //    {
                                //        goodMatchePoints.Add(dm[i][0]);
                                //    }
                                //}
                                #endregion
                                #region matched 48
                                float minRatio = 1.0f / 1.5f;
                                for (int i = 0; i < dm.Length; i++)
                                {
                                    DMatch bestMatch   = dm[i][0];
                                    DMatch betterMatch = dm[i][1];

                                    float distanceRatio = bestMatch.Distance / betterMatch.Distance;

                                    if (distanceRatio < minRatio)
                                    {
                                        goodMatchePoints.Add(bestMatch);
                                    }
                                }
                                #endregion

                                var dstMat = new Mat();
                                Debug.Log(string.Format("matchePoints has {0} items", goodMatchePoints.Count));
                                Cv2.DrawMatches(image01, keyPoint1, image02, keyPoint2, goodMatchePoints, dstMat);
                                t2d = VideoDetectorExample.Utils.MatToTexture2D(dstMat);
                            }
            Sprite dst_sp = Sprite.Create(t2d, new UnityEngine.Rect(0, 0, t2d.width, t2d.height), Vector2.zero);

            SrcSprite.sprite = dst_sp;
        }
 /// <summary>
 /// 初始化相关参数
 /// </summary>
 /// <param name="sender"></param>
 /// <param name="e"></param>
 private void _3DMeasure_Load(object sender, EventArgs e)
 {
     sgbm = new StereoSGBM(0, 64, 15, 0, 0, 1, 60, 10, 100, 32);
     gFTT = new GFTTDetector(4, 0.01, 1, 3, false, 0.04);                               //初始化GFTT
     ObjectPointsCal_Timer = new System.Threading.Timer(ObjectPointsCal, null, 0, 100); //初始化并启动定时器
 }
    public override void CalculateWeights(Mat image, ImageFeatureMap target)
    {
        DetectionTime = 0;
        if (!Enabled)
        {
            return;
        }
        byte[]   status;
        float[]  errTracker;
        PointF[] features;



        float W = image.Width;
        float H = image.Height;

        if (_isFirstFrame ||
            _prevImage.Width != image.Width ||
            _prevImage.Height != image.Height)
        {
            _prevImage    = image.Clone();
            _isFirstFrame = false;
            return;
        }

        DateTime t = DateTime.Now;

        if (_currPoints == null || _currPoints.Length < 50 ||
            (t - _time).TotalSeconds > Params.OFParameters.FeaturesUpdateTime)
        {
            _time = t;
            UnityEngine.Debug.Log("Recalculating feature points");

            GFTTDetector _GFTTdetector = new GFTTDetector(Params.OFParameters.MaxFeaturesCount);
            MKeyPoint[]  featPoints    = _GFTTdetector.Detect(image, null);

            _prevPoints = new PointF[featPoints.Length];
            int i = 0;
            foreach (var k in featPoints)
            {
                _prevPoints [i] = k.Point;
                ++i;
            }

            _currPoints = _prevPoints;
        }

        Stopwatch watch;

        watch = Stopwatch.StartNew();
        try{
            _criteria.Type    = Params.OFParameters.CriteriaType;
            _criteria.MaxIter = Params.OFParameters.Iterations;
            _criteria.Epsilon = Params.OFParameters.Epsilon;
            CvInvoke.CalcOpticalFlowPyrLK(_prevImage, image, _prevPoints, new Size((int)Params.OFParameters.SearchWindow.x, (int)Params.OFParameters.SearchWindow.y),
                                          Params.OFParameters.Level, _criteria, out features, out status, out errTracker);

            //calculate homography matrix
            CvInvoke.FindHomography(_prevPoints, features, _homography, Emgu.CV.CvEnum.HomographyMethod.Default);
        }catch (Exception e) {
            UnityEngine.Debug.Log(e.Message);
            return;
        }
        watch.Stop();
        DetectionTime = watch.ElapsedMilliseconds;

        //calculate homography transformation, and remove it from points
        Matrix4x4 m = new Matrix4x4();

        m.SetRow(0, new Vector4((float)_homography[0, 0], (float)_homography[0, 1], 0, (float)_homography[0, 2]));
        m.SetRow(1, new Vector4((float)_homography[1, 0], (float)_homography[1, 1], 0, (float)_homography[1, 2]));
        m.SetRow(2, new Vector4(0, 0, 1, 0));
        m.SetRow(3, new Vector4((float)_homography[2, 0], (float)_homography[2, 1], 0, (float)_homography[2, 2]));
        Matrix4x4 homographyInverse = Matrix4x4.Inverse(m);         //get the inverse


        //next, fill weight map


        Vector2 direction = new Vector2((float)_homography [0, 2], (float)_homography [1, 2]);

        direction.Normalize();
        _opticalFlow.Clear();
        int count = 0;

        for (int i = 0; i < features.Length; ++i)
        {
            Vector3 dp   = m * new Vector3(features [i].X, features [i].Y, 0);
            float   dist = (dp.x - _prevPoints [i].X) * (dp.x - _prevPoints [i].X) +
                           (dp.y - _prevPoints [i].Y) * (dp.y - _prevPoints [i].Y);
            if (dist > Params.OFParameters.MinDistance * Params.OFParameters.MinDistance &&
                dist < Params.OFParameters.MaxDistance * Params.OFParameters.MaxDistance)
            {
                //check if the calculated point belongs to the object motion or to camera motion
                //Vector3 d = new Vector3 (features [i].X - dp.x, features [i].Y - dp.y,0);

                /*	float len= Mathf.Sqrt(dist);//dp.magnitude;
                 *      if (len < Params.OFParameters.FeatureSimilarityThreshold) {
                 *              continue;//skip this point, correlated with camera motion
                 *      }*/
                /*
                 * Vector3 d = new Vector3 (features [i].X - _currPoints [i].X, features [i].Y - _currPoints [i].Y,0);
                 * d.Normalize ();
                 * float dp = Vector2.Dot (d, direction);
                 * if (dp > Params.OFParameters.FeatureSimilarityThreshold) {
                 *      continue;//skip this point, correlated with camera motion
                 * }*/
                // add this point
                ++count;
                float x = features [i].X / (float)W;
                float y = (features [i].Y / (float)H);
                if (x > 1 || x < 0 || y > 1 || y < 0)
                {
                    continue;
                }
                float w = 20 / W;              // Mathf.Abs(_currPoints [i].X - features [i].X)/W;
                float h = 20 / H;              //Mathf.Abs(_currPoints [i].Y - features [i].Y)/H;
                Rect  r = new Rect(x - w / 2.0f, y - h / 2.0f /*1-y-h*/, w, h);
                //target.SetWeight (x,1-y,1.0f);
                target.FillRectangle(r.x, r.y, r.width, r.height, 1);

                TrackedFeature f = new TrackedFeature();
                f.v1 = new Vector2(_currPoints[i].X / W, _currPoints[i].Y / H);
                f.v2 = new Vector2(features [i].X / W, features [i].Y / H);
                _opticalFlow.Add(f);
            }
        }

        if (count > features.Length / 10)
        {
            _featuresDetected = true;
        }
        else
        {
            _featuresDetected = false;
        }


        if (features != null)
        {
            lock (_objectsLock) {
                _prevPoints = _currPoints;
                _currPoints = features;
            }
        }

        _prevImage = image.Clone();
    }