示例#1
0
        private void GetCorners(Mat newFrame, Rectangle roi)
        {
            using (GFTTDetector detector = new GFTTDetector(1000, 0.01, 1, 3, false, 0.04))
            {
                Mat img = newFrame.Clone();

                // Create SPECIAL Mask with ROI
                Image <Gray, byte> maskImg = new Image <Gray, byte>(img.Size);

                roi = new Rectangle((int)(roi.X + roi.Width * 0.25), (int)(roi.Y + roi.Height * 0.1), (int)(roi.Width * 0.5), (int)(roi.Height * 0.4));

                maskImg.Draw(roi, new Gray(255), -1, LineType.FourConnected);



                var keypoints = detector.Detect(img, maskImg);
                //prePoints = keypoints;

                prePoints = new PointF[keypoints.Length];

                for (int i = 0; i < keypoints.Length; i++)
                {
                    prePoints[i] = (keypoints[i].Point);
                }
            }
        }
示例#2
0
        public MKeyPoint[] Maspointer(Image <Bgr, byte> image, int mode)
        {
            switch (mode)
            {
            case 0:
            {
                GFTTDetector detector = new GFTTDetector(40, 0.01, 5, 3, true);
                MKeyPoint[]  GFP1     = detector.Detect(image.Convert <Gray, byte>().Mat);
                return(GFP1);
            }

            case 1:
            {
                Brisk       detector = new Brisk();
                MKeyPoint[] GFP1     = detector.Detect(image.Convert <Gray, byte>().Mat);
                return(GFP1);
            }

            case 2:
            {
                FastFeatureDetector detector = new FastFeatureDetector();
                MKeyPoint[]         GFP1     = detector.Detect(image.Convert <Gray, byte>().Mat);
                return(GFP1);
            }
            }
            return(null);
        }
示例#3
0
文件: Form1.cs 项目: swishposh/aoci7
        private void button2_Click(object sender, EventArgs e)
        {
            GFTTDetector detector = new GFTTDetector(40, 0.01, 5, 3, true);

            MKeyPoint[] GFP1 = detector.Detect(baseImg.Convert <Gray, byte>().Mat);


            //создание массива характерных точек исходного изображения (только позиции)
            PointF[] srcPoints = new PointF[GFP1.Length];
            for (int i = 0; i < GFP1.Length; i++)
            {
                srcPoints[i] = GFP1[i].Point;
            }
            PointF[] destPoints;                       //массив для хранения позиций точек на изменённом изображении
            byte[]   status;                           //статус точек (найдены/не найдены)
            float[]  trackErrors;                      //ошибки
                                                       //вычисление позиций характерных точек на новом изображении методом Лукаса-Канаде
            CvInvoke.CalcOpticalFlowPyrLK(
                baseImg.Convert <Gray, byte>().Mat,    //исходное изображение
                twistedImg.Convert <Gray, byte>().Mat, //изменённое изображение
                srcPoints,                             //массив характерных точек исходного изображения
                new Size(20, 20),                      //размер окна поиска
                5,                                     //уровни пирамиды
                new MCvTermCriteria(20, 1),            //условие остановки вычисления оптического потока
                out destPoints,                        //позиции характерных точек на новом изображении
                out status,                            //содержит 1 в элементах, для которых поток был найден
                out trackErrors                        //содержит ошибки
                );

            //вычисление матрицы гомографии
            Mat homographyMatrix = CvInvoke.FindHomography(destPoints, srcPoints,
                                                           RobustEstimationAlgorithm.LMEDS);
            var destImage = new Image <Bgr, byte>(baseImg.Size);

            CvInvoke.WarpPerspective(twistedImg, destImage, homographyMatrix, destImage.Size);


            //var output1 = baseImg.Clone();

            //foreach (MKeyPoint p in GFP1)
            //{
            //    CvInvoke.Circle(output1, Point.Round(p.Point), 3, new Bgr(Color.Blue).MCvScalar, 2);
            //}
            //imageBox1.Image = output1.Resize(640, 480, Inter.Linear);

            ////var output2 = twistedImg.Clone();

            //foreach (PointF p in destPoints)
            //{
            //    CvInvoke.Circle(destImage, Point.Round(p), 3, new Bgr(Color.Blue).MCvScalar, 2);
            //}
            imageBox2.Image = destImage.Resize(640, 480, Inter.Linear);
        }
示例#4
0
文件: Func.cs 项目: Sidsand/AOCI_lab7
        public Image <Bgr, byte> GFTT()
        {
            GFTTDetector detector = new GFTTDetector(40, 0.01, 5, 3, true);

            MKeyPoint[] GFP1 = detector.Detect(baseImage.Convert <Gray, byte>().Mat);

            //создание массива характерных точек исходного изображения (только позиции)
            PointF[] srcPoints = new PointF[GFP1.Length];
            for (int i = 0; i < GFP1.Length; i++)
            {
                srcPoints[i] = GFP1[i].Point;
            }
            PointF[] destPoints;                         //массив для хранения позиций точек на изменённом изображении
            byte[]   status;                             //статус точек (найдены/не найдены)
            float[]  trackErrors;                        //ошибки
                                                         //вычисление позиций характерных точек на новом изображении методом Лукаса-Канаде
            CvInvoke.CalcOpticalFlowPyrLK(
                baseImage.Convert <Gray, byte>().Mat,    //исходное изображение
                twistedImage.Convert <Gray, byte>().Mat, //изменённое изображение
                srcPoints,                               //массив характерных точек исходного изображения
                new Size(20, 20),                        //размер окна поиска
                5,                                       //уровни пирамиды
                new MCvTermCriteria(20, 1),              //условие остановки вычисления оптического потока
                out destPoints,                          //позиции характерных точек на новом изображении
                out status,                              //содержит 1 в элементах, для которых поток был найден
                out trackErrors                          //содержит ошибки
                );


            var output = baseImage.Clone();

            foreach (MKeyPoint p in GFP1)
            {
                CvInvoke.Circle(output, Point.Round(p.Point), 3, new Bgr(Color.Blue).MCvScalar, 2);
            }

            var output2 = twistedImage.Clone();

            foreach (MKeyPoint p in GFP1)
            {
                CvInvoke.Circle(output2, Point.Round(p.Point), 3, new Bgr(Color.Blue).MCvScalar, 2);
            }

            return(output.Resize(640, 480, Inter.Linear));
        }
示例#5
0
        public int Init(Bitmap bm)
        {
            GFTTDetector gftt    = new GFTTDetector(maxPoints, 0.1, 1, 3, false, 0.04);
            Rectangle    rect    = new Rectangle(0, 0, bm.Width, bm.Height);
            BitmapData   bmpData = bm.LockBits(rect, ImageLockMode.ReadWrite, bm.PixelFormat);

            Mat m = new Mat(bm.Height, bm.Width, Emgu.CV.CvEnum.DepthType.Cv8U, 3, bmpData.Scan0, bmpData.Stride);

            Rectangle roi = new Rectangle(searchLocation, searchSize);

            // MKeyPoint[] keyPoints;
            keyPoints = gftt.Detect(new Mat(m, roi));
            bm.UnlockBits(bmpData);
            PrevImg = m;

            prevPoints = new PointF[keyPoints.Count()];
            for (int j = 0; j < keyPoints.Count(); j++)
            {
                prevPoints[j] = new PointF(keyPoints[j].Point.X + roi.Left, keyPoints[j].Point.Y + roi.Top);
            }

            /*            unsafe
             *          {
             *              fixed (byte* pArray = fr.Buffer)
             *              {
             *                  //IntPtr = pArray;
             *                  Mat m = new Mat((int)fr.Height, (int)fr.Width, Emgu.CV.CvEnum.DepthType.Cv8U, 3, (IntPtr)pArray, (int)fr.Width*3);
             *                  keyPoints = gftt.Detect(m);
             *                  foreach (var kp in keyPoints)
             *                  {
             *                      CvInvoke.Circle(m, new Point((int)kp.Point.X, (int)kp.Point.Y), 5, new MCvScalar(100, 200, 200));
             *
             *                  }
             *
             *                  }
             *              }
             */



            //CvInvoke.CalcOpticalFlowPyrLK()
            return(0);
        }
示例#6
0
        public Image <Bgr, byte> ReturnLucas(Image <Bgr, byte> image, Image <Bgr, byte> twistedImg, out Image <Bgr, byte> defImg)
        {
            GFTTDetector detector = new GFTTDetector(40, 0.01, 5, 3, true);

            MKeyPoint[] GFP1 = detector.Detect(image.Convert <Gray, byte>().Mat);
            foreach (MKeyPoint p in GFP1)
            {
                CvInvoke.Circle(image, Point.Round(p.Point), 5, new Bgr(Color.LawnGreen).MCvScalar, 2);
            }
            defImg = image;

            PointF[] srcPoints = new PointF[GFP1.Length];

            for (int i = 0; i < GFP1.Length; i++)
            {
                srcPoints[i] = GFP1[i].Point;
            }
            PointF[] destPoints;                       //массив для хранения позиций точек на изменённом изображении
            byte[]   status;                           //статус точек (найдены/не найдены)
            float[]  trackErrors;                      //ошибки
                                                       //вычисление позиций характерных точек на новом изображении методом Лукаса-Канаде
            CvInvoke.CalcOpticalFlowPyrLK(
                image.Convert <Gray, byte>().Mat,      //исходное изображение
                twistedImg.Convert <Gray, byte>().Mat, //изменённое изображение
                srcPoints,                             //массив характерных точек исходного изображения
                new Size(20, 20),                      //размер окна поиска
                5,                                     //уровни пирамиды
                new MCvTermCriteria(20, 1),            //условие остановки вычисления оптического потока
                out destPoints,                        //позиции характерных точек на новом изображении
                out status,                            //содержит 1 в элементах, для которых поток был найден
                out trackErrors                        //содержит ошибки
                );

            //for (int i = 0; i < destPoints.Length; i++)
            //    srcPoints[i] = GFP1[i].Point;
            foreach (PointF p in destPoints)
            {
                CvInvoke.Circle(twistedImg, Point.Round(p), 5, new Bgr(Color.LawnGreen).MCvScalar, 2);
            }
            return(twistedImg);
        }
        private void DrawKeypoints()
        {
            try
            {
                if (imgList["Input"] == null)
                {
                    return;
                }

                var img  = imgList["Input"].Clone();
                var gray = img.Convert <Gray, byte>();

                GFTTDetector detector = new GFTTDetector(2000, 0.06);
                var          corners  = detector.Detect(gray);
                dt.Rows.Clear();
                foreach (MKeyPoint pt in corners)
                {
                    dt.Rows.Add(pt.ClassId,
                                pt.Point.ToString(),
                                pt.Angle,
                                pt.Size,
                                pt.Octave,
                                pt.Response

                                );
                }



                Mat outimg = new Mat();
                Features2DToolbox.DrawKeypoints(img, new VectorOfKeyPoint(corners), outimg, new Bgr(0, 0, 255));

                imageBoxEx1.Image        = outimg.ToBitmap();
                dataGridView1.DataSource = dt;
            }
            catch (Exception ex)
            {
                MessageBox.Show(ex.Message);
            }
        }
示例#8
0
        // Calculate Optical Flow Using PyrLk Algorithm
        public void PyrLkOpticalFlow(Image <Gray, byte> prevFrame, Image <Gray, byte> nextFrame)
        {
            //Get the Optical flow of L-K feature
            Image <Gray, Byte> mask     = prevFrame.Clone();
            GFTTDetector       detector = new GFTTDetector(30, 0.01, 10, 3, false, 0.04);

            MKeyPoint[]     fp1      = detector.Detect(prevFrame, null);
            VectorOfPointF  vp1      = new VectorOfPointF(fp1.Select(x => x.Point).ToArray());
            VectorOfPointF  vp2      = new VectorOfPointF(vp1.Size);
            VectorOfByte    vstatus  = new VectorOfByte(vp1.Size);
            VectorOfFloat   verr     = new VectorOfFloat(vp1.Size);
            Size            winsize  = new Size(prevFrame.Width, prevFrame.Height);
            int             maxLevel = 1; // if 0, winsize is not used
            MCvTermCriteria criteria = new MCvTermCriteria(10, 1);

            try
            {
                CvInvoke.CalcOpticalFlowPyrLK(prevFrame, nextFrame, vp1, vp2, vstatus, verr, winsize, maxLevel, criteria);
            }
            catch (Exception e)
            {
                Console.WriteLine(e.Message);
            }
        }
    public override void CalculateWeights(Mat image, ImageFeatureMap target)
    {
        DetectionTime = 0;
        if (!Enabled)
        {
            return;
        }
        byte[]   status;
        float[]  errTracker;
        PointF[] features;



        float W = image.Width;
        float H = image.Height;

        if (_isFirstFrame ||
            _prevImage.Width != image.Width ||
            _prevImage.Height != image.Height)
        {
            _prevImage    = image.Clone();
            _isFirstFrame = false;
            return;
        }

        DateTime t = DateTime.Now;

        if (_currPoints == null || _currPoints.Length < 50 ||
            (t - _time).TotalSeconds > Params.OFParameters.FeaturesUpdateTime)
        {
            _time = t;
            UnityEngine.Debug.Log("Recalculating feature points");

            GFTTDetector _GFTTdetector = new GFTTDetector(Params.OFParameters.MaxFeaturesCount);
            MKeyPoint[]  featPoints    = _GFTTdetector.Detect(image, null);

            _prevPoints = new PointF[featPoints.Length];
            int i = 0;
            foreach (var k in featPoints)
            {
                _prevPoints [i] = k.Point;
                ++i;
            }

            _currPoints = _prevPoints;
        }

        Stopwatch watch;

        watch = Stopwatch.StartNew();
        try{
            _criteria.Type    = Params.OFParameters.CriteriaType;
            _criteria.MaxIter = Params.OFParameters.Iterations;
            _criteria.Epsilon = Params.OFParameters.Epsilon;
            CvInvoke.CalcOpticalFlowPyrLK(_prevImage, image, _prevPoints, new Size((int)Params.OFParameters.SearchWindow.x, (int)Params.OFParameters.SearchWindow.y),
                                          Params.OFParameters.Level, _criteria, out features, out status, out errTracker);

            //calculate homography matrix
            CvInvoke.FindHomography(_prevPoints, features, _homography, Emgu.CV.CvEnum.HomographyMethod.Default);
        }catch (Exception e) {
            UnityEngine.Debug.Log(e.Message);
            return;
        }
        watch.Stop();
        DetectionTime = watch.ElapsedMilliseconds;

        //calculate homography transformation, and remove it from points
        Matrix4x4 m = new Matrix4x4();

        m.SetRow(0, new Vector4((float)_homography[0, 0], (float)_homography[0, 1], 0, (float)_homography[0, 2]));
        m.SetRow(1, new Vector4((float)_homography[1, 0], (float)_homography[1, 1], 0, (float)_homography[1, 2]));
        m.SetRow(2, new Vector4(0, 0, 1, 0));
        m.SetRow(3, new Vector4((float)_homography[2, 0], (float)_homography[2, 1], 0, (float)_homography[2, 2]));
        Matrix4x4 homographyInverse = Matrix4x4.Inverse(m);         //get the inverse


        //next, fill weight map


        Vector2 direction = new Vector2((float)_homography [0, 2], (float)_homography [1, 2]);

        direction.Normalize();
        _opticalFlow.Clear();
        int count = 0;

        for (int i = 0; i < features.Length; ++i)
        {
            Vector3 dp   = m * new Vector3(features [i].X, features [i].Y, 0);
            float   dist = (dp.x - _prevPoints [i].X) * (dp.x - _prevPoints [i].X) +
                           (dp.y - _prevPoints [i].Y) * (dp.y - _prevPoints [i].Y);
            if (dist > Params.OFParameters.MinDistance * Params.OFParameters.MinDistance &&
                dist < Params.OFParameters.MaxDistance * Params.OFParameters.MaxDistance)
            {
                //check if the calculated point belongs to the object motion or to camera motion
                //Vector3 d = new Vector3 (features [i].X - dp.x, features [i].Y - dp.y,0);

                /*	float len= Mathf.Sqrt(dist);//dp.magnitude;
                 *      if (len < Params.OFParameters.FeatureSimilarityThreshold) {
                 *              continue;//skip this point, correlated with camera motion
                 *      }*/
                /*
                 * Vector3 d = new Vector3 (features [i].X - _currPoints [i].X, features [i].Y - _currPoints [i].Y,0);
                 * d.Normalize ();
                 * float dp = Vector2.Dot (d, direction);
                 * if (dp > Params.OFParameters.FeatureSimilarityThreshold) {
                 *      continue;//skip this point, correlated with camera motion
                 * }*/
                // add this point
                ++count;
                float x = features [i].X / (float)W;
                float y = (features [i].Y / (float)H);
                if (x > 1 || x < 0 || y > 1 || y < 0)
                {
                    continue;
                }
                float w = 20 / W;              // Mathf.Abs(_currPoints [i].X - features [i].X)/W;
                float h = 20 / H;              //Mathf.Abs(_currPoints [i].Y - features [i].Y)/H;
                Rect  r = new Rect(x - w / 2.0f, y - h / 2.0f /*1-y-h*/, w, h);
                //target.SetWeight (x,1-y,1.0f);
                target.FillRectangle(r.x, r.y, r.width, r.height, 1);

                TrackedFeature f = new TrackedFeature();
                f.v1 = new Vector2(_currPoints[i].X / W, _currPoints[i].Y / H);
                f.v2 = new Vector2(features [i].X / W, features [i].Y / H);
                _opticalFlow.Add(f);
            }
        }

        if (count > features.Length / 10)
        {
            _featuresDetected = true;
        }
        else
        {
            _featuresDetected = false;
        }


        if (features != null)
        {
            lock (_objectsLock) {
                _prevPoints = _currPoints;
                _currPoints = features;
            }
        }

        _prevImage = image.Clone();
    }
 private void ObjectPointsCal(object state)
 {
     if (BM_CAL_FLAG)                                //BM视差图计算
     {
         //获取原图
         Data.leftImg.CopyTo(bm_lsrc);
         Data.rightImg.CopyTo(bm_rsrc);
         //转换为灰度图
         CvInvoke.CvtColor(bm_lsrc, bm_lsrc, ColorConversion.Bgr2Gray);
         CvInvoke.CvtColor(bm_rsrc, bm_rsrc, ColorConversion.Bgr2Gray);
         bm.Compute(bm_lsrc, bm_rsrc, bm_distImg);                                              //BM算法只能处理灰度图
         bm_distImg.ConvertTo(bm_distImg, DepthType.Cv32F, 1.0 / 16);                           //除16得到真正的视差图 -----这里的数据需要再次使用
         bm_distImg8U = new Mat(bm_distImg.Size, DepthType.Cv8U, 1);
         CvInvoke.Normalize(bm_distImg, bm_distImg8U, 0, 255, NormType.MinMax, DepthType.Cv8U); //归一化后显示
         this.imageBox1.Image = bm_distImg8U;
     }
     if (SHOWCONTOURS_FLAG)                         //轮廓角点计算
     {
         contourImg.SetTo(new MCvScalar(0, 0, 0));
         Data.leftImg.CopyTo(contourSrc);
         CvInvoke.CvtColor(contourSrc, grayImg, ColorConversion.Bgr2Gray);
         CvInvoke.Canny(grayImg, cannyImg, 100, 200, 3, false);                            //转换为二值图
         CvInvoke.FindContours(cannyImg, contours, hierarchy, RetrType.External, ChainApproxMethod.ChainApproxNone);
         CvInvoke.DrawContours(contourImg, contours, -1, new MCvScalar(255, 255, 255), 2); //绘制所有轮廓
         cornerPoints = gFTT.Detect(grayImg);                                              //计算角点
         for (int i = 0; i < cornerPoints.Length; i++)
         {
             Point pt = new Point();
             pt.X = (int)cornerPoints[i].Point.X;
             pt.Y = (int)cornerPoints[i].Point.Y;
             CvInvoke.Circle(contourImg, pt, 3, new MCvScalar(0, 0, 255), -1); //绘制所有角点
         }
         this.imageBox2.Image = contourImg;                                    //显示图像
     }
     if (OBJPOINTS_CAL_FLAG)                                                   //计算角点的三维坐标在TextBox中显示
     {
         //2种方法
         Image <Gray, Single> bm_distImg_C = bm_distImg.ToImage <Gray, Single>();
         try
         {
             for (int i = 0; i < cornerPoints.Length; i++)
             {
                 MCvPoint3D32f[] ptf = new MCvPoint3D32f[1];                          //转换计算得到的角点,记得清空
                 ptf[0].X = cornerPoints[i].Point.X;
                 ptf[0].Y = cornerPoints[i].Point.Y;
                 if (ptf[0].X > bm_distImg_C.Width)                               //数组越界检查
                 {
                     ptf[0].X = bm_distImg_C.Width - 1;
                 }
                 if (ptf[0].Y > bm_distImg_C.Height)
                 {
                     ptf[0].Y = bm_distImg_C.Height - 1;
                 }
                 ptf[0].Z = bm_distImg_C.Data[(int)ptf[0].X, (int)ptf[0].Y, 0];      //获取角点在视差图中的深度值(单位是像素)
                 cornerPoints_vec.Push(ptf);                                         //存储转换好后的值
             }
             CvInvoke.PerspectiveTransform(cornerPoints_vec, objXYZ, Data.Q);        //透视变换,得到稀疏特征点在摄像机坐标系下的坐标
             //方法2
             //CvInvoke.ReprojectImageTo3D(bm_distImg, bm_image3D, Data.Q, true);
             //for(int i = 0; i < cornerPoints.Length; i++)
             //{
             //    MCvPoint3D32f[] ptf = new MCvPoint3D32f[1];
             //    //需要处理数组越界问题
             //    ptf[0].X = this.bm_image3D.Data[(int)cornerPoints[i].Point.X, (int)cornerPoints[i].Point.Y, 0];
             //    ptf[0].Y = this.bm_image3D.Data[(int)cornerPoints[i].Point.X, (int)cornerPoints[i].Point.Y, 1];
             //    ptf[0].Z = this.bm_image3D.Data[(int)cornerPoints[i].Point.X, (int)cornerPoints[i].Point.Y, 2];
             //    objXYZ.Push(ptf);               //存储计算好的空间坐标点
             //}
             //写入数据  必须调用Invoke方法
             this.Invoke(new UpdateTextBox(UpdateTextBoxFunc), new object[] { });
         }
         catch (Exception e)
         {
             Data.LogString = "[error]  " + e.Message;
         }
     }
     cornerPoints_vec.Clear();                        //清空元素,Vector会一直叠加
 }
示例#11
0
        void ProcessFrame(object sender, EventArgs e)
        {
            Mat frame         = _cameraCapture.QueryFrame();
            Mat smoothedFrame = new Mat();

            CvInvoke.GaussianBlur(frame, smoothedFrame, new Size(3, 3), 1);    //高斯滤波
            CvInvoke.CvtColor(frame, curgray, ColorConversion.Bgr2Gray);       //灰度图
            goodFeaturesToTrack = new GFTTDetector(maxCount, qLevel, minDist); //关键点检测初始化
            frame.CopyTo(KeyPointPic);

            MKeyPoint[] keypoint = goodFeaturesToTrack.Detect(curgray);//关键点检测
            for (int i = 0; i < keypoint.Count(); i++)
            {
                System.Drawing.Point point = System.Drawing.Point.Truncate(keypoint[i].Point);//获得关键点的坐标位置,以 Point 类型。
                CvInvoke.Circle(KeyPointPic, point, 3, new MCvScalar(0, 0, 255), 1);
            }

            if (prevFeature.Count() < 10)                                    //特征点太少了,重新检测特征点
            {
                MKeyPoint[] keypoints = goodFeaturesToTrack.Detect(curgray); //关键点检测
                AddNewPoint = keypoints.Count();
                Array.Resize(ref prevFeature, keypoints.Count());
                Array.Resize(ref initial, keypoints.Count());
                for (int i = 0; i < keypoints.Count(); i++)
                {
                    System.Drawing.Point point = System.Drawing.Point.Truncate(keypoints[i].Point);//获得关键点的坐标位置,以 Point 类型。
                    prevFeature[i] = point;
                    initial[i]     = point;
                    CvInvoke.Circle(curgray, point, 3, new MCvScalar(0, 0, 255), 1);
                }
            }
            if (pregray.Size.IsEmpty)
            {
                curgray.CopyTo(pregray);                      //第一帧
            }
            MCvTermCriteria termcrit = new MCvTermCriteria(6);

            CvInvoke.CalcOpticalFlowPyrLK(pregray, curgray, prevFeature, curgray.Size, 2, termcrit, out currFeature, out status, out err, 0, 0.0001);
            AddNewPoint = prevFeature.Count();
            // 去掉一些不好的特征点
            int k = 0;

            for (int i = 0; i < currFeature.Count(); i++)
            {
                try
                {
                    if (acceptTrackedPoint(i))
                    {
                        initial[k]       = initial[i];
                        currFeature[k++] = currFeature[i];
                    }
                }
                catch { }
            }

            Array.Resize(ref currFeature, k);
            Array.Resize(ref initial, k);

            frame.CopyTo(Flow);
            for (int i = 0; i < currFeature.Count(); i++)
            {
                //CvInvoke.Circle(Flow, Point.Truncate(currFeature[i]), 3, new MCvScalar(0, 0, 255),1);
                CvInvoke.Line(Flow, Point.Truncate(initial[i]), Point.Truncate(currFeature[i]), new Bgr(Color.DarkOrange).MCvScalar, 2);
            }



            imageBox1.Image = frame;
            imageBox2.Image = KeyPointPic;
            imageBox3.Image = Flow;

            curgray.CopyTo(pregray);
            Array.Resize(ref prevFeature, currFeature.Count());
            for (int i = 0; i < currFeature.Count(); i++)
            {
                prevFeature[i] = currFeature[i];
            }
            //Thread t = new Thread(() =>
            //{
            //    this.mainPages.Invoke(new Action(delegate ()
            //    {


            //    }));
            //});
            //t.Start();
        }
示例#12
0
        private MKeyPoint[] GetGFTPoints(Image<Bgr,byte> img)
        {
            MKeyPoint[] points = detector.Detect(img.Copy().Convert<Gray, byte>().Mat);

            return points;
        }
示例#13
0
        private void ProcessFrame(object sender, EventArgs arg)
        {
            if (capture != null && capture.Ptr != IntPtr.Zero)
            {
                //Retrieve video from the camera and store in frame
                Mat frame = new Mat();
                capture.Retrieve(frame);

                //Copy frame to Result Frame
                Mat ResultFrame = new Mat();
                frame.CopyTo(ResultFrame);

                //Create a nextFrame and convert the captured frame to grayscale
                nextFrame = new Mat();
                CvInvoke.CvtColor(frame, nextFrame, Emgu.CV.CvEnum.ColorConversion.Bgr2Gray);

                //If the prevFrame is not empty run program to detect moving frame
                if (!prevFrameEmpty)
                {
                    //Detect Keypoints using good feature to track command
                    VectorOfKeyPoint prevFeatures = new VectorOfKeyPoint(gFTTDetector.Detect(prevFrame));

                    //Store KeyPoints in Floaing Point variable
                    PointF[] prevPts = new PointF[prevFeatures.Size];
                    for (int i = 0; i < prevFeatures.Size; i++)
                    {
                        prevPts[i] = prevFeatures[i].Point;
                    }

                    //Declare variables to store results of optical flow
                    PointF[] nextPts;
                    byte[]   status;
                    float[]  errors;

                    //Run Lucas-Kanade Optical Flow by comparing the previous and the next frame
                    CvInvoke.CalcOpticalFlowPyrLK(prevFrame, nextFrame, prevPts, new Size(25, 25), 1, new MCvTermCriteria(20, 0.03), out nextPts, out status, out errors);

                    //Display results of motion by drawing circles
                    for (int i = 0; i < status.Length; i++)
                    {
                        Point  prevPt = new Point((int)prevPts[i].X, (int)nextPts[i].Y);
                        Point  nextPt = new Point((int)nextPts[i].X, (int)nextPts[i].Y);
                        double length = Math.Sqrt(Math.Pow(prevPt.X - nextPt.X, 2) + Math.Pow(prevPt.Y - nextPt.Y, 2));
                        if (length > 3)
                        {
                            CvInvoke.Circle(ResultFrame, nextPt, 1, new MCvScalar(0, 255, 0), 2);
                        }
                    }
                    //Copy next frame to previous frame for next motion
                    prevFrame      = nextFrame.Clone();
                    prevFrameEmpty = false;
                }
                else if (prevFrameEmpty)
                {
                    prevFrame      = nextFrame.Clone();
                    prevFrameEmpty = false;
                }

                //Display results in picturebox to display
                captureImageBox.Image = frame.ToImage <Bgr, byte>().Bitmap;
                resultImageBox.Image  = ResultFrame.ToImage <Bgr, byte>().Bitmap;
            }
        }
示例#14
0
        /// <summary>
        /// Get the piece corners by finding the biggest rectangle of the contour points
        /// </summary>
        /// <param name="pieceID">ID of the piece</param>
        /// <param name="pieceImgBw">Black white image of piece</param>
        /// <param name="pieceImgColor">Color image of piece</param>
        /// <returns>List with corner points</returns>
        public override List <Point> FindCorners(string pieceID, Bitmap pieceImgBw, Bitmap pieceImgColor)
        {
            PluginFactory.LogHandle.Report(new LogEventInfo(pieceID + " Finding corners by finding the maximum rectangle within candidate points"));

            List <Point> corners = new List <Point>();

            // Find all dominant corner points using the GFTTDetector (this uses the Harris corner detector)
            GFTTDetector detector = new GFTTDetector(500, 0.01, 5, 2, true, 0.04);

            MKeyPoint[]  keyPoints       = detector.Detect(new Image <Gray, byte>(pieceImgBw));
            List <Point> possibleCorners = keyPoints.Select(k => Point.Round(k.Point)).ToList();

            if (possibleCorners.Count > 0)
            {
                // Sort the dominant corners by the distance to upper left corner of the bounding rectangle (0, 0) and keep only the corners that are near enough to this point
                List <Point> possibleCornersSortedUpperLeft = new List <Point>(possibleCorners);
                possibleCornersSortedUpperLeft.Sort(new DistanceToPointComparer(new Point(0, 0), DistanceOrders.NEAREST_FIRST));
                double minCornerDistUpperLeft = Utils.Distance(possibleCornersSortedUpperLeft[0], new PointF(0, 0));
                possibleCornersSortedUpperLeft = possibleCornersSortedUpperLeft.Where(c => Utils.Distance(c, new PointF(0, 0)) < minCornerDistUpperLeft * PieceFindCornersMaxCornerDistRatio).ToList();

                // Sort the dominant corners by the distance to upper right corner of the bounding rectangle (ImageWidth, 0) and keep only the corners that are near enough to this point
                List <Point> possibleCornersSortedUpperRight = new List <Point>(possibleCorners);
                possibleCornersSortedUpperRight.Sort(new DistanceToPointComparer(new Point(pieceImgBw.Width, 0), DistanceOrders.NEAREST_FIRST));
                double minCornerDistUpperRight = Utils.Distance(possibleCornersSortedUpperRight[0], new PointF(pieceImgBw.Width, 0));
                possibleCornersSortedUpperRight = possibleCornersSortedUpperRight.Where(c => Utils.Distance(c, new PointF(pieceImgBw.Width, 0)) < minCornerDistUpperRight * PieceFindCornersMaxCornerDistRatio).ToList();

                // Sort the dominant corners by the distance to lower right corner of the bounding rectangle (ImageWidth, ImageHeight) and keep only the corners that are near enough to this point
                List <Point> possibleCornersSortedLowerRight = new List <Point>(possibleCorners);
                possibleCornersSortedLowerRight.Sort(new DistanceToPointComparer(new Point(pieceImgBw.Width, pieceImgBw.Height), DistanceOrders.NEAREST_FIRST));
                double minCornerDistLowerRight = Utils.Distance(possibleCornersSortedLowerRight[0], new PointF(pieceImgBw.Width, pieceImgBw.Height));
                possibleCornersSortedLowerRight = possibleCornersSortedLowerRight.Where(c => Utils.Distance(c, new PointF(pieceImgBw.Width, pieceImgBw.Height)) < minCornerDistLowerRight * PieceFindCornersMaxCornerDistRatio).ToList();

                // Sort the dominant corners by the distance to lower left corner of the bounding rectangle (0, ImageHeight) and keep only the corners that are near enough to this point
                List <Point> possibleCornersSortedLowerLeft = new List <Point>(possibleCorners);
                possibleCornersSortedLowerLeft.Sort(new DistanceToPointComparer(new Point(0, pieceImgBw.Height), DistanceOrders.NEAREST_FIRST));
                double minCornerDistLowerLeft = Utils.Distance(possibleCornersSortedLowerLeft[0], new PointF(0, pieceImgBw.Height));
                possibleCornersSortedLowerLeft = possibleCornersSortedLowerLeft.Where(c => Utils.Distance(c, new PointF(0, pieceImgBw.Height)) < minCornerDistLowerLeft * PieceFindCornersMaxCornerDistRatio).ToList();

                // Combine all possibleCorners from the four lists and discard all combination with too bad angle differences
                List <FindCornerRectangleScore> scores = new List <FindCornerRectangleScore>();
                for (int indexUpperLeft = 0; indexUpperLeft < possibleCornersSortedUpperLeft.Count; indexUpperLeft++)
                {
                    for (int indexUpperRight = 0; indexUpperRight < possibleCornersSortedUpperRight.Count; indexUpperRight++)
                    {
                        for (int indexLowerRight = 0; indexLowerRight < possibleCornersSortedLowerRight.Count; indexLowerRight++)
                        {
                            for (int indexLowerLeft = 0; indexLowerLeft < possibleCornersSortedLowerLeft.Count; indexLowerLeft++)
                            {
                                if (PluginFactory.CancelToken.IsCancellationRequested)
                                {
                                    PluginFactory.CancelToken.ThrowIfCancellationRequested();
                                }

                                // Possible corner combination
                                Point[] tmpCorners = new Point[]
                                {
                                    possibleCornersSortedUpperLeft[indexUpperLeft],         // the corners are ordered beginning in the upper left corner and going counter clock wise
                                    possibleCornersSortedLowerLeft[indexLowerLeft],
                                    possibleCornersSortedLowerRight[indexLowerRight],
                                    possibleCornersSortedUpperRight[indexUpperRight]
                                };
                                double angleDiff = RectangleDifferenceAngle(tmpCorners);
                                if (angleDiff > PieceFindCornersMaxAngleDiff)
                                {
                                    continue;
                                }

                                double area = CvInvoke.ContourArea(new VectorOfPoint(tmpCorners));
                                FindCornerRectangleScore score = new FindCornerRectangleScore()
                                {
                                    AngleDiff = angleDiff, RectangleArea = area, PossibleCorners = tmpCorners
                                };
                                scores.Add(score);
                            }
                        }
                    }
                }

                // Order the scores by rectangle area (biggest first) and take the PossibleCorners of the biggest rectangle as corners
                scores = scores.OrderByDescending(s => s.RectangleArea).ToList();
                if (scores.Count > 0)
                {
                    corners.AddRange(scores[0].PossibleCorners);
                }
            }

            if (corners.Count != 4)
            {
                PluginFactory.LogHandle.Report(new LogEventError(pieceID + " Failed to find correct number of corners. " + corners.Count + " found."));
            }

            if (PluginFactory.GetGeneralSettingsPlugin().SolverShowDebugResults)
            {
                using (Image <Rgb, byte> imgCorners = new Image <Rgb, byte>(pieceImgColor))
                {
                    Features2DToolbox.DrawKeypoints(imgCorners, new VectorOfKeyPoint(keyPoints), imgCorners, new Bgr(0, 0, 255));       // Draw the dominant key points

                    for (int i = 0; i < corners.Count; i++)
                    {
                        CvInvoke.Circle(imgCorners, Point.Round(corners[i]), 4, new MCvScalar(0, Math.Max(255 - i * 50, 50), 0), 3);
                    }
                    PluginFactory.LogHandle.Report(new LogEventImage(pieceID + " Corners", imgCorners.Bitmap));
                    imgCorners.Dispose();
                }
            }
            return(corners);
        }