コード例 #1
0
        private VideoSurveillanceTarget(VideoSurveillanceDecider decider, int id, VideoTargetType targetType)
        {
            _decider   = decider;
            TargetType = targetType;
            ID         = id;

            // prepare some items to help drawing the object on Image:
            this.imgWidth  = _decider.imgWidth;
            this.imgHeight = _decider.imgHeight;
            switch (targetType)
            {
            case VideoTargetType.ColorBlob:
                drawingRectangeColor     = new Bgr(64.0, 64.0, 255.0);
                drawingRectangeColorMain = new Bgr(32.0, 32.0, 255.0);
                drawingLabelColor        = new Bgr(255.0, 255.0, 128.0);
                break;

            case VideoTargetType.SurveillanceBlob:
                drawingRectangeColor     = new Bgr(64.0, 255.0, 64.0);
                drawingRectangeColorMain = new Bgr(32.0, 32.0, 255.0);
                drawingLabelColor        = new Bgr(255.0, 255.0, 128.0);
                break;
            }
        }
コード例 #2
0
        public IEnumerator <ITask> ProcessImageFrame()
        {
            DateTime started = DateTime.Now;

            // default RGB image size is 640 x 480
            // setting other value (i.e. 320x240) in ...\TrackRoamer\TrackRoamerServices\Config\TrackRoamer.TrackRoamerBot.Kinect.Config.xml does not seem to work (causes NUI initialization failure).

            byte[] srcImageBits          = this.RawFrames.RawColorFrameData;
            int    srcImageBytesPerPixel = this.RawFrames.RawColorFrameInfo.BytesPerPixel;
            int    srcImageWidth         = this.RawFrames.RawColorFrameInfo.Width;
            int    srcImageHeight        = this.RawFrames.RawColorFrameInfo.Height;

            //if (ProcessedImageWidth != this.RawFrames.RawImageFrameData.Image.Width || ProcessedImageHeight != this.RawFrames.RawImageFrameData.Image.Height)
            //{
            //    ProcessedImageWidth = this.RawFrames.RawImageFrameData.Image.Width;
            //    ProcessedImageHeight = this.RawFrames.RawImageFrameData.Image.Height;

            //    ImageBitsProcessed = new byte[ProcessedImageWidth * ProcessedImageHeight * 4];
            //}

            // we need to convert Kinect/MRDS service Image to OpenCV Image - that takes converting first to a BitmapSource and then to System.Drawing.Bitmap:
            BitmapSource srcBitmapSource = BitmapSource.Create(srcImageWidth, srcImageHeight, 96, 96, PixelFormats.Bgr32, null, srcImageBits, srcImageWidth * srcImageBytesPerPixel);

            if (doSaveOneImage)
            {
                doSaveOneImage = false;

                SaveBitmapSource(srcBitmapSource);
            }

            Image <Bgr, byte>  img      = new Image <Bgr, byte>(BitmapSourceToBitmap(srcBitmapSource));
            Image <Gray, byte> gimg     = null;
            Image <Bgr, byte>  filtered = null;

            img._SmoothGaussian(11); //filter out noises

            // from here we can operate OpenCV / Emgu Image, at the end converting Image to BitmapProcessed:

            if (videoSurveillanceDecider == null)
            {
                videoSurveillanceDecider = new VideoSurveillanceDecider(img.Width, img.Height);
            }

            videoSurveillanceDecider.Init();

            if (doColorRecognition)
            {
                // color detection (T-shirt, cone...):

                //lock (videoSurveillanceDecider)
                //{
                //    videoSurveillanceDecider.purgeColorBlobs();
                //}

                filtered = img.Clone().SmoothBlur(13, 13);       //.SmoothGaussian(9);

                byte[, ,] data = filtered.Data;
                int    nRows = filtered.Rows;
                int    nCols = filtered.Cols;
                double averageBrightnessTmp = 0.0d;

                colorTresholdMain = averageBrightness / 2.0d;
                double colorFactorMain = 256.0d * colorFactor / averageBrightness;

                /*
                 */
                // leave only pixels with distinct red color in the "filtered":
                for (int i = nRows - 1; i >= 0; i--)
                {
                    for (int j = nCols - 1; j >= 0; j--)
                    {
                        // R component (2) must be greater than B (0) and G (1) by the colorFactor; dark areas are excluded:
                        double compR = data[i, j, 2];
                        double compG = data[i, j, 1];
                        double compB = data[i, j, 0];

                        double compSum = compR + compG + compB;         // brightness
                        averageBrightnessTmp += compSum;

                        if (compR > colorTresholdMain)                   //&& compG > colorTreshold && compB > colorTreshold)
                        {
                            compR = (compR / compSum) / colorFactorMain; // adjusted for brightness
                            compG = compG / compSum;
                            compB = compB / compSum;
                            if (compR > compG && compR > compB)
                            {
                                data[i, j, 0] = data[i, j, 1] = 0;    // B, G
                                data[i, j, 2] = 255;                  // R
                            }
                            else
                            {
                                data[i, j, 0] = data[i, j, 1] = data[i, j, 2] = 0;
                            }
                        }
                        else
                        {
                            // too dark.
                            data[i, j, 0] = data[i, j, 1] = data[i, j, 2] = 0;
                        }
                    }
                }

                averageBrightness = averageBrightnessTmp / (nRows * nCols * 3.0d); // save it for the next cycle

                gimg = filtered.Split()[2];                                        // make a grey image out of the Red channel, supposedly containing all red objects.

                // contour detection:

                int areaTreshold = 300;     // do not consider red contours with area in pixels less than areaTreshold.

                Contour <System.Drawing.Point> contours;
                MemStorage store = new MemStorage();

                // make a linked list of contours from the red spots on the screen:
                contours = gimg.FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_LIST, store);
                CvInvoke.cvZero(gimg.Ptr);

                if (contours != null)
                {
                    CvInvoke.cvDrawContours(img.Ptr, contours.Ptr, new MCvScalar(255, 0, 0), new MCvScalar(255, 255, 255), 2, 2, Emgu.CV.CvEnum.LINE_TYPE.CV_AA, System.Drawing.Point.Empty);

                    List <ContourContainer> contourContainers = new List <ContourContainer>();

                    for (; contours != null; contours = contours.HNext)
                    {
                        contours.ApproxPoly(contours.Perimeter * 0.02, 0, contours.Storage);
                        if (contours.Area > areaTreshold)
                        {
                            contourContainers.Add(new ContourContainer()
                            {
                                contour = contours
                            });

                            //int centerX = contours.BoundingRectangle.X + contours.BoundingRectangle.Width / 2;
                            //int centerY = contours.BoundingRectangle.Y + contours.BoundingRectangle.Height / 2;
                            //img.Draw(contours.BoundingRectangle, new Bgr(64.0, 64.0, 255.0), 2);
                            //img.Draw(Math.Round((double)(((int)contours.Area) / 100) * 100).ToString(), ref _font, new System.Drawing.Point(centerX, centerY), new Bgr(64.0, 64.0, 255.0));
                        }
                    }

                    // for the VideoSurveillanceDecider to work, we need to supply blobs IDs - generate them as numbers in a size-ordered list, offset by 1000:
                    var ccs = from cc in contourContainers
                              orderby cc.contour.Area descending
                              select cc;

                    int ccId        = 0;
                    int goodCounter = 0;
                    lock (videoSurveillanceDecider)
                    {
                        videoSurveillanceDecider.purgeColorBlobs();

                        foreach (ContourContainer cc in ccs)
                        {
                            cc.ID = 1000 + ccId;      // offset not to overlap with VideoSurveillance-generated blobs
                            VideoSurveillanceTarget target = videoSurveillanceDecider.Update(cc, currentPanKinect, currentTiltKinect);
                            ccId++;
                            if (target != null && target.Rank > 1.0d)
                            {
                                goodCounter++;
                                if (goodCounter > 10000)  // take 10 largest good ones
                                {
                                    break;
                                }
                            }
                        }

                        if (!doSurveillance)
                        {
                            videoSurveillanceDecider.Commit();
                            videoSurveillanceDecider.ComputeMainColorTarget();
                            videoSurveillanceDecider.Draw(img);             // must run under lock
                        }
                    }
                }
            }

            if (doSurveillance)
            {
                // blob detection by Emgu.CV.VideoSurveillance:

                if (_tracker == null)
                {
                    _tracker  = new BlobTrackerAuto <Bgr>();
                    _detector = new FGDetector <Bgr>(FORGROUND_DETECTOR_TYPE.FGD);
                }

                Image <Bgr, byte> imgSmall = img.Resize(0.5d, INTER.CV_INTER_NN);      // for the full image - _tracker.Process() fails to allocate 91Mb of memory

                #region use the BG/FG detector to find the forground mask
                _detector.Update(imgSmall);
                Image <Gray, byte> forgroundMask = _detector.ForgroundMask;
                #endregion

                _tracker.Process(imgSmall, forgroundMask);

                lock (videoSurveillanceDecider)
                {
                    videoSurveillanceDecider.PurgeAndCommit();      // make sure that obsolete Surveillance targets are removed

                    foreach (MCvBlob blob in _tracker)
                    {
                        // keep in mind that we were working on the scaled down (to 1/2 size) image. So all points should be multiplied by two.
                        VideoSurveillanceTarget target = videoSurveillanceDecider.Update(blob, currentPanKinect, currentTiltKinect);
                    }

                    videoSurveillanceDecider.ComputeMainColorTarget();
                    videoSurveillanceDecider.Draw(img);             // must run under lock
                }
            }

            Bgr color = new Bgr(0.0, 128.0, 128.0);

            // draw center vertical line:
            System.Drawing.Point[] pts = new System.Drawing.Point[2];
            pts[0] = new System.Drawing.Point(img.Width / 2, 0);
            pts[1] = new System.Drawing.Point(img.Width / 2, img.Height);

            img.DrawPolyline(pts, false, color, 1);

            // draw center horizontal line:
            pts[0] = new System.Drawing.Point(0, img.Height / 2);
            pts[1] = new System.Drawing.Point(img.Width, img.Height / 2);

            img.DrawPolyline(pts, false, color, 1);

            // draw a sighting frame for precise alignment:
            // Horisontally the frame spans 16.56 degrees on every side and 12.75 degrees either up or down (at 74" the size it covers is 44"W by 33.5"H, i.e. 33.12 degrees by 25.5 degrees)
            System.Drawing.Point[] pts1 = new System.Drawing.Point[5];
            pts1[0] = new System.Drawing.Point(img.Width / 4, img.Height / 4);
            pts1[1] = new System.Drawing.Point(img.Width * 3 / 4, img.Height / 4);
            pts1[2] = new System.Drawing.Point(img.Width * 3 / 4, img.Height * 3 / 4);
            pts1[3] = new System.Drawing.Point(img.Width / 4, img.Height * 3 / 4);
            pts1[4] = new System.Drawing.Point(img.Width / 4, img.Height / 4);

            img.DrawPolyline(pts1, false, color, 1);

            // end of OpenCV / Emgu Image processing, converting the Image to BitmapProcessed:

            BitmapProcessed = img.ToBitmap();     // image with all processing marked
            //BitmapProcessed = filtered.ToBitmap();  // red image out of the Red channel
            //BitmapProcessed = gimg.ToBitmap();      // grey image; is CvZero'ed by this point
            //BitmapProcessed = forgroundMask.ToBitmap();

            //Tracer.Trace("Video processed in " + (DateTime.Now - started).TotalMilliseconds + " ms");       // usually 40...70ms

            yield break;
        }
コード例 #3
0
 public VideoSurveillanceTarget(VideoSurveillanceDecider decider, int id, ContourContainer contour, double currentPanKinect, double currentTiltKinect)
     : this(decider, id, VideoTargetType.ColorBlob)
 {
     Update(contour, currentPanKinect, currentTiltKinect);
 }
コード例 #4
0
 public VideoSurveillanceTarget(VideoSurveillanceDecider decider, int id, MCvBlob blob, double currentPanKinect, double currentTiltKinect)
     : this(decider, id, VideoTargetType.SurveillanceBlob)
 {
     Update(blob, currentPanKinect, currentTiltKinect);
 }