Esempio n. 1
0
        public void TestBlobTracking()
        {
            Capture capture = new Capture();

            ImageViewer viewer = new ImageViewer();

            BlobTrackerAutoParam <Gray> param = new BlobTrackerAutoParam <Gray>();

            //param.BlobDetector = new BlobDetector(Emgu.CV.CvEnum.BLOB_DETECTOR_TYPE.CC);
            param.FGDetector = new FGDetector <Gray>(Emgu.CV.CvEnum.FORGROUND_DETECTOR_TYPE.FGD);
            //param.BlobTracker = new BlobTracker(Emgu.CV.CvEnum.BLOBTRACKER_TYPE.CCMSPF);
            param.FGTrainFrames = 10;
            BlobTrackerAuto <Gray> tracker = new BlobTrackerAuto <Gray>(param);

            MCvFont font = new MCvFont(Emgu.CV.CvEnum.FONT.CV_FONT_HERSHEY_SIMPLEX, 1.0, 1.0);

            Application.Idle += new EventHandler(delegate(object sender, EventArgs e)
            {
                tracker.Process(capture.QuerySmallFrame().PyrUp().Convert <Gray, Byte>());

                Image <Gray, Byte> img = tracker.ForgroundMask;
                //viewer.Image = tracker.GetForgroundMask();
                foreach (MCvBlob blob in tracker)
                {
                    img.Draw(Rectangle.Round(blob), new Gray(255.0), 2);
                    img.Draw(blob.ID.ToString(), ref font, Point.Round(blob.Center), new Gray(255.0));
                }
                viewer.Image = img;
            });
            viewer.ShowDialog();
        }
        void Run()
        {
            try
            {
                _cameraCapture = new Capture();
            }
            catch (Exception e)
            {
                MessageBox.Show(e.Message);
                return;
            }

            _detector = new FGDetector <Bgr>(FORGROUND_DETECTOR_TYPE.FGD);

            _tracker = new BlobTrackerAuto <Bgr>();

            Application.Idle += ProcessFrame;
        }
Esempio n. 3
0
      void Run()
      {
         try
         {
            _cameraCapture = new Capture(@"C:\Users\Tom\Documents\Artemis\New Florence Wood Products\video_grab40.avi");
         }
         catch (Exception e)
         {
            MessageBox.Show(e.Message);
            return;
         }
         
         _detector = new FGDetector<Bgr>(FORGROUND_DETECTOR_TYPE.FGD);

         _tracker = new BlobTrackerAuto<Bgr>();

         Application.Idle += ProcessFrame;
      }
Esempio n. 4
0
        void Run()
        {
            try
            {
                _cameraCapture = new Capture();
            }
            catch (Exception e)
            {
                MessageBox.Show(e.Message);
                return;
            }

            _detector = new FGDetector <Bgr>(ForgroundDetectorType.Fgd);

            _tracker = new BlobTrackerAuto <Bgr>();

            Application.Idle += ProcessFrame;
        }
Esempio n. 5
0
        void Run()
        {
            try
            {
                _cameraCapture = new Capture(@"C:\Users\Tom\Documents\Artemis\New Florence Wood Products\video_grab40.avi");
            }
            catch (Exception e)
            {
                MessageBox.Show(e.Message);
                return;
            }

            _detector = new FGDetector <Bgr>(FORGROUND_DETECTOR_TYPE.FGD);

            _tracker = new BlobTrackerAuto <Bgr>();

            Application.Idle += ProcessFrame;
        }
Esempio n. 6
0
        void Run()
        {
            try
             {
            _cameraCapture = new Capture();
             }
             catch (Exception e)
             {
            MessageBox.Show(e.Message);
            return;
             }

             _detector = new FGDetector<Bgr>(FORGROUND_DETECTOR_TYPE.FGD);

             _tracker = new BlobTrackerAuto<Bgr>();

             Application.Idle += ProcessFrame;
        }
Esempio n. 7
0
        public void TestBlobColor()
        {
            int width = 300;
             int height = 400;
             Image<Bgr, Byte> bg = new Image<Bgr, byte>(width, height);
             bg.SetRandNormal(new MCvScalar(), new MCvScalar(100, 100, 100));

             Size size = new Size(width / 10, height / 10);
             Point topLeft = new Point((width >> 1) - (size.Width >> 1), (height >> 1) - (size.Height >> 1));

             Rectangle rect = new Rectangle(topLeft, size);

             BlobTrackerAutoParam<Bgr> param = new BlobTrackerAutoParam<Bgr>();
             param.BlobDetector = new BlobDetector(Emgu.CV.CvEnum.BLOB_DETECTOR_TYPE.CC);
             //param.FGDetector = new FGDetector<Gray>(Emgu.CV.CvEnum.FORGROUND_DETECTOR_TYPE.FGD);
             param.BlobTracker = new BlobTracker(Emgu.CV.CvEnum.BLOBTRACKER_TYPE.MSFGS);
             param.FGTrainFrames = 5;
             BlobTrackerAuto<Bgr> tracker = new BlobTrackerAuto<Bgr>(param);

             //ImageViewer viewer = new ImageViewer();
             //viewer.Show();
             for (int i = 0; i < 20; i++)
             {
            using (Image<Bgr, Byte> img1 = bg.Copy())
            {
               rect.Offset(5, 0); //shift the rectangle 5 pixels horizontally
               img1.Draw(rect, new Bgr(Color.Red), -1);
               tracker.Process(img1);
               //viewer.Image = img1;
            }
             }

             //MCvBlob blob = tracker[0];
             //int id = blob.ID;
             //ImageViewer.Show(forground);
        }
Esempio n. 8
0
        public IEnumerator <ITask> ProcessImageFrame()
        {
            DateTime started = DateTime.Now;

            // default RGB image size is 640 x 480
            // setting other value (i.e. 320x240) in ...\TrackRoamer\TrackRoamerServices\Config\TrackRoamer.TrackRoamerBot.Kinect.Config.xml does not seem to work (causes NUI initialization failure).

            byte[] srcImageBits          = this.RawFrames.RawColorFrameData;
            int    srcImageBytesPerPixel = this.RawFrames.RawColorFrameInfo.BytesPerPixel;
            int    srcImageWidth         = this.RawFrames.RawColorFrameInfo.Width;
            int    srcImageHeight        = this.RawFrames.RawColorFrameInfo.Height;

            //if (ProcessedImageWidth != this.RawFrames.RawImageFrameData.Image.Width || ProcessedImageHeight != this.RawFrames.RawImageFrameData.Image.Height)
            //{
            //    ProcessedImageWidth = this.RawFrames.RawImageFrameData.Image.Width;
            //    ProcessedImageHeight = this.RawFrames.RawImageFrameData.Image.Height;

            //    ImageBitsProcessed = new byte[ProcessedImageWidth * ProcessedImageHeight * 4];
            //}

            // we need to convert Kinect/MRDS service Image to OpenCV Image - that takes converting first to a BitmapSource and then to System.Drawing.Bitmap:
            BitmapSource srcBitmapSource = BitmapSource.Create(srcImageWidth, srcImageHeight, 96, 96, PixelFormats.Bgr32, null, srcImageBits, srcImageWidth * srcImageBytesPerPixel);

            if (doSaveOneImage)
            {
                doSaveOneImage = false;

                SaveBitmapSource(srcBitmapSource);
            }

            Image <Bgr, byte>  img      = new Image <Bgr, byte>(BitmapSourceToBitmap(srcBitmapSource));
            Image <Gray, byte> gimg     = null;
            Image <Bgr, byte>  filtered = null;

            img._SmoothGaussian(11); //filter out noises

            // from here we can operate OpenCV / Emgu Image, at the end converting Image to BitmapProcessed:

            if (videoSurveillanceDecider == null)
            {
                videoSurveillanceDecider = new VideoSurveillanceDecider(img.Width, img.Height);
            }

            videoSurveillanceDecider.Init();

            if (doColorRecognition)
            {
                // color detection (T-shirt, cone...):

                //lock (videoSurveillanceDecider)
                //{
                //    videoSurveillanceDecider.purgeColorBlobs();
                //}

                filtered = img.Clone().SmoothBlur(13, 13);       //.SmoothGaussian(9);

                byte[, ,] data = filtered.Data;
                int    nRows = filtered.Rows;
                int    nCols = filtered.Cols;
                double averageBrightnessTmp = 0.0d;

                colorTresholdMain = averageBrightness / 2.0d;
                double colorFactorMain = 256.0d * colorFactor / averageBrightness;

                /*
                 */
                // leave only pixels with distinct red color in the "filtered":
                for (int i = nRows - 1; i >= 0; i--)
                {
                    for (int j = nCols - 1; j >= 0; j--)
                    {
                        // R component (2) must be greater than B (0) and G (1) by the colorFactor; dark areas are excluded:
                        double compR = data[i, j, 2];
                        double compG = data[i, j, 1];
                        double compB = data[i, j, 0];

                        double compSum = compR + compG + compB;         // brightness
                        averageBrightnessTmp += compSum;

                        if (compR > colorTresholdMain)                   //&& compG > colorTreshold && compB > colorTreshold)
                        {
                            compR = (compR / compSum) / colorFactorMain; // adjusted for brightness
                            compG = compG / compSum;
                            compB = compB / compSum;
                            if (compR > compG && compR > compB)
                            {
                                data[i, j, 0] = data[i, j, 1] = 0;    // B, G
                                data[i, j, 2] = 255;                  // R
                            }
                            else
                            {
                                data[i, j, 0] = data[i, j, 1] = data[i, j, 2] = 0;
                            }
                        }
                        else
                        {
                            // too dark.
                            data[i, j, 0] = data[i, j, 1] = data[i, j, 2] = 0;
                        }
                    }
                }

                averageBrightness = averageBrightnessTmp / (nRows * nCols * 3.0d); // save it for the next cycle

                gimg = filtered.Split()[2];                                        // make a grey image out of the Red channel, supposedly containing all red objects.

                // contour detection:

                int areaTreshold = 300;     // do not consider red contours with area in pixels less than areaTreshold.

                Contour <System.Drawing.Point> contours;
                MemStorage store = new MemStorage();

                // make a linked list of contours from the red spots on the screen:
                contours = gimg.FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_LIST, store);
                CvInvoke.cvZero(gimg.Ptr);

                if (contours != null)
                {
                    CvInvoke.cvDrawContours(img.Ptr, contours.Ptr, new MCvScalar(255, 0, 0), new MCvScalar(255, 255, 255), 2, 2, Emgu.CV.CvEnum.LINE_TYPE.CV_AA, System.Drawing.Point.Empty);

                    List <ContourContainer> contourContainers = new List <ContourContainer>();

                    for (; contours != null; contours = contours.HNext)
                    {
                        contours.ApproxPoly(contours.Perimeter * 0.02, 0, contours.Storage);
                        if (contours.Area > areaTreshold)
                        {
                            contourContainers.Add(new ContourContainer()
                            {
                                contour = contours
                            });

                            //int centerX = contours.BoundingRectangle.X + contours.BoundingRectangle.Width / 2;
                            //int centerY = contours.BoundingRectangle.Y + contours.BoundingRectangle.Height / 2;
                            //img.Draw(contours.BoundingRectangle, new Bgr(64.0, 64.0, 255.0), 2);
                            //img.Draw(Math.Round((double)(((int)contours.Area) / 100) * 100).ToString(), ref _font, new System.Drawing.Point(centerX, centerY), new Bgr(64.0, 64.0, 255.0));
                        }
                    }

                    // for the VideoSurveillanceDecider to work, we need to supply blobs IDs - generate them as numbers in a size-ordered list, offset by 1000:
                    var ccs = from cc in contourContainers
                              orderby cc.contour.Area descending
                              select cc;

                    int ccId        = 0;
                    int goodCounter = 0;
                    lock (videoSurveillanceDecider)
                    {
                        videoSurveillanceDecider.purgeColorBlobs();

                        foreach (ContourContainer cc in ccs)
                        {
                            cc.ID = 1000 + ccId;      // offset not to overlap with VideoSurveillance-generated blobs
                            VideoSurveillanceTarget target = videoSurveillanceDecider.Update(cc, currentPanKinect, currentTiltKinect);
                            ccId++;
                            if (target != null && target.Rank > 1.0d)
                            {
                                goodCounter++;
                                if (goodCounter > 10000)  // take 10 largest good ones
                                {
                                    break;
                                }
                            }
                        }

                        if (!doSurveillance)
                        {
                            videoSurveillanceDecider.Commit();
                            videoSurveillanceDecider.ComputeMainColorTarget();
                            videoSurveillanceDecider.Draw(img);             // must run under lock
                        }
                    }
                }
            }

            if (doSurveillance)
            {
                // blob detection by Emgu.CV.VideoSurveillance:

                if (_tracker == null)
                {
                    _tracker  = new BlobTrackerAuto <Bgr>();
                    _detector = new FGDetector <Bgr>(FORGROUND_DETECTOR_TYPE.FGD);
                }

                Image <Bgr, byte> imgSmall = img.Resize(0.5d, INTER.CV_INTER_NN);      // for the full image - _tracker.Process() fails to allocate 91Mb of memory

                #region use the BG/FG detector to find the forground mask
                _detector.Update(imgSmall);
                Image <Gray, byte> forgroundMask = _detector.ForgroundMask;
                #endregion

                _tracker.Process(imgSmall, forgroundMask);

                lock (videoSurveillanceDecider)
                {
                    videoSurveillanceDecider.PurgeAndCommit();      // make sure that obsolete Surveillance targets are removed

                    foreach (MCvBlob blob in _tracker)
                    {
                        // keep in mind that we were working on the scaled down (to 1/2 size) image. So all points should be multiplied by two.
                        VideoSurveillanceTarget target = videoSurveillanceDecider.Update(blob, currentPanKinect, currentTiltKinect);
                    }

                    videoSurveillanceDecider.ComputeMainColorTarget();
                    videoSurveillanceDecider.Draw(img);             // must run under lock
                }
            }

            Bgr color = new Bgr(0.0, 128.0, 128.0);

            // draw center vertical line:
            System.Drawing.Point[] pts = new System.Drawing.Point[2];
            pts[0] = new System.Drawing.Point(img.Width / 2, 0);
            pts[1] = new System.Drawing.Point(img.Width / 2, img.Height);

            img.DrawPolyline(pts, false, color, 1);

            // draw center horizontal line:
            pts[0] = new System.Drawing.Point(0, img.Height / 2);
            pts[1] = new System.Drawing.Point(img.Width, img.Height / 2);

            img.DrawPolyline(pts, false, color, 1);

            // draw a sighting frame for precise alignment:
            // Horisontally the frame spans 16.56 degrees on every side and 12.75 degrees either up or down (at 74" the size it covers is 44"W by 33.5"H, i.e. 33.12 degrees by 25.5 degrees)
            System.Drawing.Point[] pts1 = new System.Drawing.Point[5];
            pts1[0] = new System.Drawing.Point(img.Width / 4, img.Height / 4);
            pts1[1] = new System.Drawing.Point(img.Width * 3 / 4, img.Height / 4);
            pts1[2] = new System.Drawing.Point(img.Width * 3 / 4, img.Height * 3 / 4);
            pts1[3] = new System.Drawing.Point(img.Width / 4, img.Height * 3 / 4);
            pts1[4] = new System.Drawing.Point(img.Width / 4, img.Height / 4);

            img.DrawPolyline(pts1, false, color, 1);

            // end of OpenCV / Emgu Image processing, converting the Image to BitmapProcessed:

            BitmapProcessed = img.ToBitmap();     // image with all processing marked
            //BitmapProcessed = filtered.ToBitmap();  // red image out of the Red channel
            //BitmapProcessed = gimg.ToBitmap();      // grey image; is CvZero'ed by this point
            //BitmapProcessed = forgroundMask.ToBitmap();

            //Tracer.Trace("Video processed in " + (DateTime.Now - started).TotalMilliseconds + " ms");       // usually 40...70ms

            yield break;
        }
Esempio n. 9
0
        /// <summary>
        /// PROCESS EACH FRAME
        /// </summary>
        /// <param name="sender"></param>
        /// <param name="arg"></param>
        private void ProcessFrame(object sender, EventArgs arg)
        {
            try
            {
                frame    = _capture1.QueryFrame();
                Framesno = _capture1.GetCaptureProperty(Emgu.CV.CvEnum.CAP_PROP.CV_CAP_PROP_POS_FRAMES);


                framecopy = frame.Rotate(180, new Bgr());


                #region framming

                if (frame != null)
                {
                    if (_forgroundDetector == null)
                    {
                        _forgroundDetector = new FGDetector <Bgr>(Emgu.CV.CvEnum.FORGROUND_DETECTOR_TYPE.FGD_SIMPLE);
                        //_forgroundDetector = new BGStatModel<Bgr>(frame, Emgu.CV.CvEnum.BG_STAT_TYPE.FGD_STAT_MODEL);
                        _tracker = new BlobTrackerAuto <Bgr>();
                    }


                    _forgroundDetector.Update(frame);
                    Image <Gray, Byte> todetectF = FillHoles(_forgroundDetector.ForegroundMask);



                    Image <Gray, Byte> todetect = todetectF.Rotate(180, new Gray(255));


                    _tracker.Process(todetect.Convert <Bgr, Byte>(), todetect);


                    {
                        Contour <Point> contours = todetect.FindContours(CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, RETR_TYPE.CV_RETR_LIST, stor);


                        #region contours
                        while (contours != null)
                        {
                            if (contours.Area > 200)// && (contours.BoundingRectangle.Width > 50 && contours.BoundingRectangle.Width < 100) && contours.BoundingRectangle.Height > 200)
                            {
                                Rectangle rect = contours.BoundingRectangle;
                                rect.X      = rect.X - 5;
                                rect.Y      = rect.Y - 5;
                                rect.Height = (rect.Height + 10);
                                rect.Width  = (rect.Width + 10);
                                Bitmap frame22 = framecopy.Bitmap;

                                framecopy.Draw(rect, new Bgr(Color.Red), 2);

                                label1.Text = "Vehicle Count:" + (_tracker.Count);

                                foreach (MCvBlob blob in _tracker)
                                {
                                    int blob_id = (blob.ID + 1);
                                    // framecopy.Draw(blob_id.ToString(), ref _font, Point.Round(blob.Center), new Bgr(Color.Red));

                                    if (Point.Round(blob.Center).X > rectangleShape2.Location.X)
                                    {
                                        if (!rectCnt.Contains(blob_id))
                                        {
                                            rectCnt.Add(blob_id);
                                        }
                                    }
                                }
                                label1.Text = "Vehicle Count:" + rectCnt.Count;
                                SqlCommand cmd = new SqlCommand("", Connection);
                                cmd.CommandType = CommandType.Text;
                                SqlDataAdapter dap = new SqlDataAdapter(cmd);
                                DataTable      dt  = new DataTable();
                                dap.Fill(dt);
                                //MessageBox.Show(contours.Area.ToString());
                            }
                            contours = contours.HNext;
                        }
                        #endregion contours



                        pictureBox3.Image = framecopy.ToBitmap();
                    }
                    if (cam == 0)
                    {
                        double time_index = _capture1.GetCaptureProperty(Emgu.CV.CvEnum.CAP_PROP.CV_CAP_PROP_POS_MSEC);
                        //Time_Label.Text = "Time: " + TimeSpan.FromMilliseconds(time_index).ToString().Substring(0, 8);

                        double framenumber = _capture1.GetCaptureProperty(Emgu.CV.CvEnum.CAP_PROP.CV_CAP_PROP_POS_FRAMES);
                        // Frame_lbl.Text = "Frame: " + framenumber.ToString();
                    }

                    if (cam == 1)
                    {
                        // Frame_lbl.Text = "Frame: " + (webcam_frm_cnt++).ToString();
                    }
                }
                #endregion framming
            }
            catch (Exception ex)
            {
                //MessageBox.Show(ex.StackTrace);
            }
        }
Esempio n. 10
0
        public void TestBlobTracking()
        {
            Capture capture = new Capture();

             ImageViewer viewer = new ImageViewer();

             BlobTrackerAutoParam<Gray> param = new BlobTrackerAutoParam<Gray>();
             //param.BlobDetector = new BlobDetector(Emgu.CV.CvEnum.BLOB_DETECTOR_TYPE.CC);
             param.FGDetector = new FGDetector<Gray>(Emgu.CV.CvEnum.FORGROUND_DETECTOR_TYPE.FGD);
             //param.BlobTracker = new BlobTracker(Emgu.CV.CvEnum.BLOBTRACKER_TYPE.CCMSPF);
             param.FGTrainFrames = 10;
             BlobTrackerAuto<Gray> tracker = new BlobTrackerAuto<Gray>(param);

             MCvFont font = new MCvFont(Emgu.CV.CvEnum.FONT.CV_FONT_HERSHEY_SIMPLEX, 1.0, 1.0);

             Application.Idle += new EventHandler(delegate(object sender, EventArgs e)
             {
            tracker.Process(capture.QuerySmallFrame().PyrUp().Convert<Gray, Byte>());

            Image<Gray, Byte> img = tracker.ForgroundMask;
            //viewer.Image = tracker.GetForgroundMask();
            foreach (MCvBlob blob in tracker)
            {
               img.Draw(Rectangle.Round(blob), new Gray(255.0), 2);
               img.Draw(blob.ID.ToString(), ref font, Point.Round(blob.Center), new Gray(255.0));
            }
            viewer.Image = img;
             });
             viewer.ShowDialog();
        }