Beispiel #1
0
 /// <summary>
 /// Constructor
 /// </summary>
 /// <param name="Configuration">Search configuration parameters</param>
 public Board(SearchConfigurator Configuration)
 {
     this.ImageBlobs      = new Emgu.CV.Cvb.CvBlobs();
     this.BlobDetector    = new Emgu.CV.Cvb.CvBlobDetector();
     this.BoardSize       = new Size(400, 400);
     this.dW              = (int)(this.BoardSize.Width / 8);
     this.dH              = (int)(this.BoardSize.Height / 8);
     this.BoxScaling      = 15;
     this.OutputImageSize = new Size(640, 480);
     this.SC              = Configuration;
 }
Beispiel #2
0
        /// <summary>
        /// Process rocks in the image.
        /// </summary>
        private void ProcessSand()
        {
            // TODO: Make image be saved in MyDocs/Pictures.
            if (this.inputImage == null)
            {
                return;
            }

            Thread workerThread = new Thread(() =>
            {
                /*
                 * string path = @"C:\Users\POLYGONTeam\Documents\GitHub\Androbot\Androbot\Androbot\bin\Debug\Images\2D_20160728170358.PNG";
                 */
                //Emgu.CV.Image<Bgr, byte> inpImg = new Emgu.CV.Image<Bgr, byte>(this.inputImage);
                Emgu.CV.Image <Bgr, byte> inpImg = new Emgu.CV.Image <Bgr, byte>(inputImage);

                Emgu.CV.Image <Gray, byte> water = inpImg.InRange(new Bgr(0, 100, 0), new Bgr(255, 255, 255));
                //TODO: To check does we need mask?
                //water = water.Add(mask);
                //water._Dilate(1);

                // Create the blobs.
                Emgu.CV.Cvb.CvBlobs blobs = new Emgu.CV.Cvb.CvBlobs();
                // Create blob detector.
                Emgu.CV.Cvb.CvBlobDetector dtk = new Emgu.CV.Cvb.CvBlobDetector();
                // Detect blobs.
                uint state = dtk.Detect(water, blobs);

                foreach (Emgu.CV.Cvb.CvBlob blob in blobs.Values)
                {
                    //Console.WriteLine("Center: X:{0:F3} Y:{1:F3}", blob.Centroid.X, blob.Centroid.Y);
                    //Console.WriteLine("{0}", blob.Area);
                    if (blob.Area >= 4500 && blob.Area < 34465)
                    {
                        //Console.WriteLine("{0}", blob.Area);
                        inpImg.Draw(new CircleF(blob.Centroid, 5), new Bgr(Color.Red), 2);
                        inpImg.Draw(blob.BoundingBox, new Bgr(Color.Blue), 2);
                    }
                }

                if (this.outputImage != null)
                {
                    this.outputImage.Dispose();
                }
                // Dump the image.
                this.outputImage = inpImg.ToBitmap();
                // Show the nwe mage.
                this.pbMain.Image = this.FitImage(this.outputImage, this.pbMain.Size);
            });

            workerThread.Start();
        }
Beispiel #3
0
        public int FindCorner()
        {
            Emgu.CV.Cvb.CvBlobDetector bDetect    = new Emgu.CV.Cvb.CvBlobDetector();
            Emgu.CV.Cvb.CvBlobs        markerBlob = new Emgu.CV.Cvb.CvBlobs();

            List <Rectangle> blobs = new List <Rectangle>();

            Image <Gray, Byte> preprocessImage = originalImage.Convert <Gray, Byte>();

            preprocessImage = preprocessImage.ThresholdBinary(new Gray(100), new Gray(255));
            preprocessImage = preprocessImage.Not();



            markerBlob.Clear();

            bDetect.Detect(preprocessImage, markerBlob);
            //preprocessImage.Dispose();
            //preprocessImage = null;

            //markerBlob.FilterByArea(250, 1800);

            foreach (Emgu.CV.Cvb.CvBlob targetBlob in markerBlob.Values)
            {
                if (targetBlob.BoundingBox.Width <= 32 && targetBlob.BoundingBox.Width >= 29)
                {
                    if (targetBlob.BoundingBox.Height <= 32 && targetBlob.BoundingBox.Height >= 29)
                    {
                        Rectangle r = new Rectangle(targetBlob.BoundingBox.X, targetBlob.BoundingBox.Y, targetBlob.BoundingBox.Width, targetBlob.BoundingBox.Height);
                        outerMarker.Add(r);
                    }
                }
                CvInvoke.PutText(preprocessImage, targetBlob.BoundingBox.Width + " " + targetBlob.BoundingBox.Height, new Point(targetBlob.BoundingBox.X, targetBlob.BoundingBox.Y), FontFace.HersheyComplex, 1, new Bgr(Color.Red).MCvScalar, 2);
            }

            return(outerMarker.Count);
        }
Beispiel #4
0
        /// <summary>
        /// 获取产品轮廓,camera=0代表右侧相机,camera=1代表前方向机
        /// </summary>
        /// <param name="img">产品图像</param>
        /// <param name="cameraID">相机编号</param>
        /// <returns></returns>
        private List <VectorOfPoint> GetContours(Bitmap img, int cameraID, int spongeH)
        {
            #region 灰度处理
            //灰度化
            Image <Gray, byte> grayImg  = new Image <Gray, byte>(img).PyrDown().PyrUp();
            Image <Gray, byte> resImg   = grayImg.CopyBlank();
            Image <Gray, byte> remapImg = grayImg.CopyBlank();//映射后图像
            //获取畸变参数
            if (cameraID == 0)
            {
                GetUpperCamParams(spongeH);
                resImg = GetROI(grayImg, new Rectangle(new Point(1080, 0), new Size(4250 - 1080, 3400)));
            }
            else
            {
                GetLowerCamParams(spongeH);
                resImg = GetROI(grayImg, new Rectangle(new Point(1150, 0), new Size(4390 - 1150, 3500 - 150)));
            }

            //畸变校正
            try
            {
                CvInvoke.InitUndistortRectifyMap(cameraMatrix, distCoeffs, null, cameraMatrix, imageSize, DepthType.Cv32F, mapx, mapy);
                CvInvoke.Remap(resImg, remapImg, mapx, mapy, Inter.Linear, BorderType.Reflect101, new MCvScalar(0));
            }
            catch (Exception ex)
            {
                throw (ex);
            }
            //二值化
            Image <Gray, byte> binaryImg = grayImg.CopyBlank();                  //创建一张和灰度图一样大小的画布

            CvInvoke.Threshold(remapImg, binaryImg, 0, 255, ThresholdType.Otsu); //控制是否需要畸变校正
            //传到字段
            BinaryImage = binaryImg;
            //Closing【去除闭运算20190125】
            Image <Gray, byte> closingImg = binaryImg.CopyBlank();//闭运算后图像
            CvInvoke.MorphologyEx(binaryImg, closingImg, MorphOp.Open, kernelClosing, new Point(-1, -1), 5, BorderType.Default, new MCvScalar(255, 0, 0, 255));
            #endregion
            List <VectorOfPoint> myContours = new List <VectorOfPoint>();//序号,轮廓
            try
            {
                #region 去除白色不相干区域块
                VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint();                                           //区块集合
                Image <Gray, byte>    dnc      = new Image <Gray, byte>(binaryImg.Width, binaryImg.Height);
                CvInvoke.FindContours(closingImg, contours, dnc, RetrType.External, ChainApproxMethod.ChainApproxNone); //轮廓集合
                Emgu.CV.Cvb.CvBlobs        bbs = new Emgu.CV.Cvb.CvBlobs();
                Emgu.CV.Cvb.CvBlobDetector bd  = new Emgu.CV.Cvb.CvBlobDetector();
                uint n = bd.Detect(closingImg, bbs);
                //筛选掉面积小的Blob
                //bbs.FilterByArea(10000, 50000);
                uint      lb        = bbs[(uint)bbs.Keys.ToArray()[0]].Label; //获取编号
                Point[]   bbsPoints = bbs[(uint)bbs.Keys.ToArray()[0]].GetContour();
                Rectangle rect      = bbs[lb].BoundingBox;                    //获取绑定矩形
                PointF    iCentroid = bbs[lb].Centroid;                       //获取当前域块之质心
                Trace.WriteLine("当前质心:" + iCentroid.ToString());
                Trace.WriteLine("当前BBS轮廓点数:" + bbsPoints.Length.ToString());
                Trace.WriteLine("当前Blob绑定矩形中心:" + rect.ToString());


                myContours.Clear();
                for (int k = 0; k < contours.Size; k++)
                {
                    double area = CvInvoke.ContourArea(contours[k]); //获取各连通域的面积
                    if (area < 1000000)                              //根据面积作筛选(指定最小面积,最大面积2500000):
                    {
                        CvInvoke.FillConvexPoly(binaryImg, contours[k], new MCvScalar(0));
                    }
                    if (area > 1500000)//3000000
                    {
                        myContours.Add(contours[k]);
                    }
                }
            }
            catch (Exception ex)
            {
                throw ex;
            }
            #endregion
            return(myContours);
        }
        private void doImageProcessing()
        {
            // Translate our most recent color coordinates - Done before the bg worker as
            // we cant acess the sensor inside another thread

            // Clear the green screen
            Array.Clear(_greenScreenPixelData, 0, _greenScreenPixelData.Length);
            // Emgu CV Image
            using (Image <Emgu.CV.Structure.Gray, byte> emguOriginal = new Image <Emgu.CV.Structure.Gray, byte>(640, 480))
            {
                byte[, ,] emguData = emguOriginal.Data;

                // We have to iterate the whole depth image
                for (int y = 0; y < _depthStreamFrameHeight; ++y)
                {
                    for (int x = 0; x < _depthStreamFrameWidth; ++x)
                    {
                        // calculate index into depth array
                        int depthIndex = x + (y * _sensorRef.DepthStream.FrameWidth);

                        DepthImagePixel depthPixel = _depthPixels[depthIndex];

                        // retrieve the depth to color mapping for the current depth pixel
                        ColorImagePoint colorImagePoint = _colorCoordinates[depthIndex];

                        // scale color coordinates to depth resolution
                        int colorInDepthX = colorImagePoint.X;
                        int colorInDepthY = colorImagePoint.Y;

                        // make sure the depth pixel maps to a valid point in color space
                        // check y > 0 and y < depthHeight to make sure we don't write outside of the array
                        // check x > 0 instead of >= 0 since to fill gaps we set opaque current pixel plus the one to the left
                        // because of how the sensor works it is more correct to do it this way than to set to the right
                        if (colorInDepthX > 0 && colorInDepthX < _depthStreamFrameWidth && colorInDepthY >= 0 && colorInDepthY < _depthStreamFrameHeight)
                        {
                            // calculate index into the green screen pixel array
                            int greenScreenIndex = colorInDepthX + (colorInDepthY * _depthStreamFrameWidth);

                            // OK emgu needs a black and white only image.
                            if (depthPixel.Depth < _depthThreshold && depthPixel.Depth != 0)
                            {
                                // set opaque
                                _greenScreenPixelData[greenScreenIndex] = opaquePixelValue;

                                // compensate for depth/color not corresponding exactly by setting the pixel
                                // to the left to opaque as well
                                _greenScreenPixelData[greenScreenIndex - 1] = opaquePixelValue;

                                // Emgu needs an all black image with pure white where the depth data is
                                emguData[colorInDepthY, colorInDepthX, 0] = 255;

                                // set the pixel before this white too. We dont need this in blob detection as the blobs will fill in
                                // it just ends up adding extra on all the left edges

                                /*
                                 * if (colorInDepthX - 1 > -1)
                                 * {
                                 *  emguData[colorInDepthY, colorInDepthX - 1, 0] = 255;
                                 * }
                                 */
                            }
                        }
                    }
                }

                // emguCV work
                Emgu.CV.Cvb.CvBlobs        resultingBlobs = new Emgu.CV.Cvb.CvBlobs();
                Emgu.CV.Cvb.CvBlobDetector bDetect        = new Emgu.CV.Cvb.CvBlobDetector();
                uint numLabeledPixels = bDetect.Detect(emguOriginal, resultingBlobs);

                Image <Emgu.CV.Structure.Bgra, double> blobImg = new Image <Emgu.CV.Structure.Bgra, double>(emguOriginal.Width, emguOriginal.Height, new Emgu.CV.Structure.Bgra(0, 0, 0, 0));
                foreach (Emgu.CV.Cvb.CvBlob targetBlob in resultingBlobs.Values)
                {
                    using (MemStorage mem_BlobContours = new MemStorage())
                    {
                        Contour <System.Drawing.Point> allContourPointsInBlob = targetBlob.GetContour(mem_BlobContours);

                        // If thre are more than five points smooth them
                        if (allContourPointsInBlob.Total > 5)
                        {
                            System.Drawing.Point[] originalPoints = allContourPointsInBlob.ToArray();
                            System.Drawing.Point[] smoothedPoints = EmguUtilities.getSmoothedContour(originalPoints, 6, (float)0.5, Properties.Settings.Default.kinectGreenScreenMaskXPixelShift);

                            //------------- FILL -----------------------------------
                            // Sweet shove em back into a contour collection

                            MemStorage finalFillStorage = new MemStorage();
                            Contour <System.Drawing.Point> finalFillContours = new Contour <System.Drawing.Point>(finalFillStorage);
                            finalFillContours.PushMulti(smoothedPoints, Emgu.CV.CvEnum.BACK_OR_FRONT.BACK);
                            blobImg.Draw(finalFillContours, black, -1);

                            // ------------ END FILL ------------------------------
                        }
                    }
                }

                // Converts an emgu cv image to a bitmapsource
                BitmapSource finalRef = EmguUtilities.ToBitmapSource(blobImg);
                finalRef.Freeze();
                // Ensure the greenScreenMask is locked before doing this
                // copy pixels - I get the feeling this isnt supposed to be used on bigger areas but it seems like the fastest way to do it?
                finalRef.CopyPixels(_copyArea, _pBackBuffer, _gsBufferSize, _gsStride);
                // Just in case dispose of the image
                blobImg.Dispose();
                //emguEroded.Dispose();
            }

            // make a copy to be more thread-safe - we really dont need this anymore but  oh well

            /*
             * EventHandler handler = frameReadyForDisplay;
             * if (handler != null)
             * {
             *    // invoke the subscribed event-handler(s)
             *    handler(this, EventArgs.Empty);
             * }
             */
        }
Beispiel #6
0
        public KeyValuePair <BitmapSource, List <KeyValuePair <Emgu.CV.Cvb.CvBlob, CameraSpacePoint> > > processBlobs(DepthFrameReader depthFrameReader, InfraredFrame frame, double blobSizeThreshold)
        {
            int width  = frame.FrameDescription.Width;
            int height = frame.FrameDescription.Height;

            ushort[] imageDataArray = new ushort[width * height];
            frame.CopyFrameDataToArray(imageDataArray);


            byte[] pixelData = new byte[width * height * (PixelFormats.Bgr32.BitsPerPixel + 7) / 8];

            int colorIndex = 0;

            for (int i = 0; i < imageDataArray.Length; i++)
            {
                ushort d = (ushort)(imageDataArray[i] >> 8);
                byte   b = (byte)d;
                int    x = colorIndex;
                pixelData[colorIndex++] = b;
                pixelData[colorIndex++] = b;
                pixelData[colorIndex++] = b;
                pixelData[colorIndex++] = 255;
            }

            Image <Bgr, short> openCvImg      = new Image <Bgr, short>(CameraImage.DEPTH_IMAGE_WIDTH, CameraImage.DEPTH_IMAGE_HEIGHT, new Bgr(0, 0, 0));
            Image <Bgr, short> openCvDepthImg = new Image <Bgr, short>(CameraImage.DEPTH_IMAGE_WIDTH, CameraImage.DEPTH_IMAGE_HEIGHT, new Bgr(0, 0, 0));

            int stride = width * ((PixelFormats.Bgr32.BitsPerPixel) / 8);
            //We create an image 96x96
            BitmapSource sBitmap = System.Windows.Media.Imaging.BitmapSource.Create(width, height, 96, 96, PixelFormats.Bgr32, null, pixelData, stride);

            openCvImg.Bitmap = CameraImage.BmsToBm(sBitmap);

            // copy this image as the debug image on which will be drawn
            var gray_image = openCvImg.Convert <Gray, byte>();

            gray_image._GammaCorrect(0.3);

            var greyThreshImg = gray_image.ThresholdBinary(new Gray(220), new Gray(255));

            greyThreshImg = greyThreshImg.Dilate(5);

            var rgb = new Rgb(255, 0, 0);
            //var depthFrameReader = _kSensor.DepthFrameSource.OpenReader();
            var depthFrame = depthFrameReader.AcquireLatestFrame();

            if (depthFrame == null)
            {
                return(new KeyValuePair <BitmapSource, List <KeyValuePair <Emgu.CV.Cvb.CvBlob, CameraSpacePoint> > >(null, null));
            }
            ushort[] depthData = new ushort[width * height];

            depthFrame.CopyFrameDataToArray(depthData);
            depthFrame.Dispose();

            Emgu.CV.Cvb.CvBlobs        resultingImgBlobs = new Emgu.CV.Cvb.CvBlobs();
            Emgu.CV.Cvb.CvBlobDetector bDetect           = new Emgu.CV.Cvb.CvBlobDetector();
            var nBlobs = bDetect.Detect(greyThreshImg, resultingImgBlobs);


            List <KeyValuePair <Emgu.CV.Cvb.CvBlob, CameraSpacePoint> > mappedPoints = new List <KeyValuePair <Emgu.CV.Cvb.CvBlob, CameraSpacePoint> >();

            if (nBlobs > 0)
            {
                var             blobImg = greyThreshImg;
                DepthSpacePoint dsp     = new DepthSpacePoint();
                foreach (Emgu.CV.Cvb.CvBlob targetBlob in resultingImgBlobs.Values)
                {
                    if (targetBlob.Area > blobSizeThreshold)
                    {
                        blobImg.Draw(targetBlob.BoundingBox, new Gray(255), 1);
                        dsp.X = targetBlob.Centroid.X;
                        dsp.Y = targetBlob.Centroid.Y;
                        int depth       = (int)this.blobDetector.getDepth((int)dsp.X, (int)dsp.Y, width, depthData);//(Math.Floor(width * dsp.Y + dsp.X));
                        var mappedPoint = _kSensor.CoordinateMapper.MapDepthPointToCameraSpace(dsp, depthData[depth]);
                        if (!float.IsInfinity(mappedPoint.X) && !float.IsInfinity(mappedPoint.Y) && !float.IsInfinity(mappedPoint.Z))
                        {
                            mappedPoints.Add(new KeyValuePair <Emgu.CV.Cvb.CvBlob, CameraSpacePoint>(targetBlob, mappedPoint));
                        }
                    }
                }
            }


            //return BitmapSource.Create(width, height, 96, 96, PixelFormats.Bgr32, null, pixelData, stride);
            var bitmap = BitmapSource.Create(width, height, 96, 96, PixelFormats.Bgr32, null, pixelData, stride);

            return(new KeyValuePair <BitmapSource, List <KeyValuePair <Emgu.CV.Cvb.CvBlob, CameraSpacePoint> > >(bitmap, mappedPoints));
        }
Beispiel #7
0
        private void ProcessFrame(object sender, EventArgs arg)
        {
            Image <Bgr, Byte> frame = _capture.RetrieveBgrFrame();

            var minHsv = new Hsv(_minHue, _minSat, _minVal);
            var maxHsv = new Hsv(_maxHue, _maxSat, _maxVal);


            Image <Gray, Byte> grayFrame         = frame.Convert <Hsv, Byte>().InRange(minHsv, maxHsv);
            Image <Gray, Byte> smallGrayFrame    = grayFrame.PyrDown();
            Image <Gray, Byte> smoothedGrayFrame = smallGrayFrame.PyrUp();
            Image <Gray, Byte> cannyFrame        = smoothedGrayFrame.Canny(100, 60);

            // This takes our nice looking input and blows or cuts off values
            // above or below a bightness threshold
            Image <Gray, byte> webcamThreshImg = smoothedGrayFrame.ThresholdBinary(new Gray(150), new Gray(255));

            // The magic blob detection code
            Emgu.CV.Cvb.CvBlobs        resultingWebcamBlobs = new Emgu.CV.Cvb.CvBlobs();
            Emgu.CV.Cvb.CvBlobDetector bDetect = new Emgu.CV.Cvb.CvBlobDetector();
            uint numWebcamBlobsFound           = bDetect.Detect(webcamThreshImg, resultingWebcamBlobs);

            Emgu.CV.Cvb.CvBlob largestBlob = null;
            foreach (var blob in resultingWebcamBlobs)
            {
                if (blob.Value.Area > MINIMUM_BLOB_SIZE_IN_PX)
                {
                    if (largestBlob == null || (largestBlob.Area < blob.Value.Area))
                    {
                        largestBlob = blob.Value;
                    }
                }
            }

            if (largestBlob != null)
            {
                // Draw bounding box around target
                frame.Draw(largestBlob.BoundingBox, new Bgr(0, 255, 255), 5);

                System.Drawing.Point center = new System.Drawing.Point(frame.Width / 2, frame.Height / 2);

                double thresholdWidth  = MOVEMENT_THRESHOLD_PERCENTAGE * frame.Width;
                double thresholdHeight = MOVEMENT_THRESHOLD_PERCENTAGE * frame.Height;

                if (largestBlob.Centroid.X < (center.X - thresholdWidth))
                {
                    XGunState = XGunState.Left;
                }
                else if (largestBlob.Centroid.X > (center.X + thresholdWidth))
                {
                    XGunState = XGunState.Right;
                }
                else
                {
                    XGunState = XGunState.Idle;
                }

                if (largestBlob.Centroid.Y > (center.Y + thresholdHeight))
                {
                    YGunState = YGunState.Down;
                }
                else if (largestBlob.Centroid.Y < (center.Y - thresholdHeight))
                {
                    YGunState = YGunState.Up;
                }
                else
                {
                    YGunState = YGunState.Idle;
                }

                if (YGunState != PrevYGunState && _serialConnected && !_manualMode)
                {
                    PrevYGunState = YGunState;
                    SerialPort.Write(_yGunStateCommands[YGunState]);
                }

                if (XGunState != PrevXGunState && _serialConnected && !_manualMode)
                {
                    PrevXGunState = XGunState;
                    SerialPort.Write(_xGunStateCommands[XGunState]);
                }
            }
            else if (_serialConnected)
            {
                PrevXGunState = XGunState = XGunState.Idle;
                PrevYGunState = YGunState = YGunState.Idle;
                SerialPort.Write(_xGunStateCommands[XGunState.Idle] + _yGunStateCommands[YGunState.Idle]);
            }

            // Camera frame callback is not on UI tread. Need to get there to set image source
            this.Dispatcher.InvokeAsync(() =>
            {
                commandLabel.Content = string.Format("XGunState: {0}, YGunState: {1}", XGunState, YGunState);

                if (!_test)
                {
                    image.Source = ToBitmapSource(frame);
                }
                else
                {
                    image.Source = ToBitmapSource(smoothedGrayFrame);
                }
            });
        }
        private void doImageProcessing()
        {
            // Translate our most recent color coordinates - Done before the bg worker as
            // we cant acess the sensor inside another thread

            // Clear the green screen
            Array.Clear(_greenScreenPixelData, 0, _greenScreenPixelData.Length);
            // Emgu CV Image
            using (Image<Emgu.CV.Structure.Gray, byte> emguOriginal = new Image<Emgu.CV.Structure.Gray, byte>(640, 480))
            {
                byte[, ,] emguData = emguOriginal.Data;

                // We have to iterate the whole depth image
                for (int y = 0; y < _depthStreamFrameHeight; ++y)
                {
                    for (int x = 0; x < _depthStreamFrameWidth; ++x)
                    {
                        // calculate index into depth array
                        int depthIndex = x + (y * _sensorRef.DepthStream.FrameWidth);

                        DepthImagePixel depthPixel = _depthPixels[depthIndex];

                        // retrieve the depth to color mapping for the current depth pixel
                        ColorImagePoint colorImagePoint = _colorCoordinates[depthIndex];

                        // scale color coordinates to depth resolution
                        int colorInDepthX = colorImagePoint.X;
                        int colorInDepthY = colorImagePoint.Y;

                        // make sure the depth pixel maps to a valid point in color space
                        // check y > 0 and y < depthHeight to make sure we don't write outside of the array
                        // check x > 0 instead of >= 0 since to fill gaps we set opaque current pixel plus the one to the left
                        // because of how the sensor works it is more correct to do it this way than to set to the right
                        if (colorInDepthX > 0 && colorInDepthX < _depthStreamFrameWidth && colorInDepthY >= 0 && colorInDepthY < _depthStreamFrameHeight)
                        {
                            // calculate index into the green screen pixel array
                            int greenScreenIndex = colorInDepthX + (colorInDepthY * _depthStreamFrameWidth);

                            // OK emgu needs a black and white only image.
                            if (depthPixel.Depth < _depthThreshold && depthPixel.Depth != 0)
                            {
                                // set opaque
                                _greenScreenPixelData[greenScreenIndex] = opaquePixelValue;

                                // compensate for depth/color not corresponding exactly by setting the pixel 
                                // to the left to opaque as well
                                _greenScreenPixelData[greenScreenIndex - 1] = opaquePixelValue;

                                // Emgu needs an all black image with pure white where the depth data is
                                emguData[colorInDepthY, colorInDepthX, 0] = 255;

                                // set the pixel before this white too. We dont need this in blob detection as the blobs will fill in
                                // it just ends up adding extra on all the left edges
                                /*
                                if (colorInDepthX - 1 > -1)
                                {
                                    emguData[colorInDepthY, colorInDepthX - 1, 0] = 255;
                                }
                                */
                            }
                        }
                    }
                }

                    // emguCV work
                    Emgu.CV.Cvb.CvBlobs resultingBlobs = new Emgu.CV.Cvb.CvBlobs();
                    Emgu.CV.Cvb.CvBlobDetector bDetect = new Emgu.CV.Cvb.CvBlobDetector();
                    uint numLabeledPixels = bDetect.Detect(emguOriginal, resultingBlobs);

                    Image<Emgu.CV.Structure.Bgra, double> blobImg = new Image<Emgu.CV.Structure.Bgra, double>(emguOriginal.Width, emguOriginal.Height, new Emgu.CV.Structure.Bgra(0, 0, 0, 0));
                    foreach (Emgu.CV.Cvb.CvBlob targetBlob in resultingBlobs.Values)
                    {
                        using (MemStorage mem_BlobContours = new MemStorage())
                        {
                            Contour<System.Drawing.Point> allContourPointsInBlob = targetBlob.GetContour(mem_BlobContours);

                            // If thre are more than five points smooth them
                            if (allContourPointsInBlob.Total > 5)
                            {

                                System.Drawing.Point[] originalPoints = allContourPointsInBlob.ToArray();
                                System.Drawing.Point[] smoothedPoints = EmguUtilities.getSmoothedContour(originalPoints, 6, (float)0.5, Properties.Settings.Default.kinectGreenScreenMaskXPixelShift);

                                //------------- FILL -----------------------------------
                                // Sweet shove em back into a contour collection

                                MemStorage finalFillStorage = new MemStorage();
                                Contour<System.Drawing.Point> finalFillContours = new Contour<System.Drawing.Point>(finalFillStorage);
                                finalFillContours.PushMulti(smoothedPoints, Emgu.CV.CvEnum.BACK_OR_FRONT.BACK);
                                blobImg.Draw(finalFillContours, black, -1);

                                // ------------ END FILL ------------------------------
                            }
                        }
                    }

                    // Converts an emgu cv image to a bitmapsource
                    BitmapSource finalRef = EmguUtilities.ToBitmapSource(blobImg);
                    finalRef.Freeze();
                    // Ensure the greenScreenMask is locked before doing this
                    // copy pixels - I get the feeling this isnt supposed to be used on bigger areas but it seems like the fastest way to do it?
                    finalRef.CopyPixels(_copyArea, _pBackBuffer, _gsBufferSize, _gsStride);
                    // Just in case dispose of the image
                    blobImg.Dispose();
                    //emguEroded.Dispose();
            }

            // make a copy to be more thread-safe - we really dont need this anymore but  oh well
          /*
            EventHandler handler = frameReadyForDisplay;
            if (handler != null)
            {
                // invoke the subscribed event-handler(s)
                handler(this, EventArgs.Empty);
            }
            */
        }
        public void DeSkew()
        {
            Rectangle vBoundary = new Rectangle(new Point(0, 0), new Size(140, originalImage.Height));

            Emgu.CV.Cvb.CvBlobDetector bDetect = new Emgu.CV.Cvb.CvBlobDetector();
            Emgu.CV.Cvb.CvBlobs markerBlob = new Emgu.CV.Cvb.CvBlobs();

            List<Rectangle> blobs = new List<Rectangle>();

            Image<Gray, Byte> preprocessImage = originalImage.Convert<Gray, Byte>();
            preprocessImage = preprocessImage.ThresholdBinary(new Gray(200), new Gray(255));
            preprocessImage = preprocessImage.Not();

            markerBlob.Clear();

            bDetect.Detect(preprocessImage, markerBlob);
            preprocessImage.Dispose();
            preprocessImage = null;

            markerBlob.FilterByArea(250, 1800);

            foreach (Emgu.CV.Cvb.CvBlob targetBlob in markerBlob.Values)
            {
                if (vBoundary.Contains(targetBlob.BoundingBox))
                {
                    if (targetBlob.BoundingBox.Width >= targetBlob.BoundingBox.Height - 5)
                    {
                        Rectangle r = new Rectangle(targetBlob.BoundingBox.X, targetBlob.BoundingBox.Y, targetBlob.BoundingBox.Width, targetBlob.BoundingBox.Height);
                        blobs.Add(r);
                    }
                }
            }

            RectangleF temp = blobs.First();
            RectangleF temp2 = blobs.Last();

            double dY = Math.Abs(temp.Y - temp2.Y);
            double dX = Math.Abs(temp.X - temp2.X);

            double angle = Math.Atan2(dX, dY);
            angle = angle * (180 / Math.PI);

            if (temp2.X > temp.X)
            {
                angle = angle * -1;
            }

            RotatedRect rot_rec = new RotatedRect();
            rot_rec.Center = new PointF(temp.X, temp.Y);
            RotationMatrix2D rot_mat = new RotationMatrix2D(rot_rec.Center, angle, 1);
            Image<Bgr, Byte> outimage = originalImage.CopyBlank();
            CvInvoke.WarpAffine(originalImage, outimage, rot_mat, originalImage.Size, Inter.Cubic, Warp.Default, BorderType.Constant, new Bgr(Color.White).MCvScalar);

            int xOffset = 80 - (int)temp.X;
            int yOffset = 45 - (int)temp.Y;

            originalImage = outimage.Copy();

            Bitmap a = originalImage.ToBitmap();
            CanvasMove filter = new CanvasMove(new AForge.IntPoint(xOffset, yOffset), Color.White);
            a = filter.Apply(a);
            originalImage = new Image<Bgr, Byte>(a);

            a.Dispose();
            a = null;
            outimage.Dispose();
            outimage = null;
            blobs = null;
        }
        public int Validate()
        {
            Emgu.CV.Cvb.CvBlobDetector bDetect = new Emgu.CV.Cvb.CvBlobDetector();
            Emgu.CV.Cvb.CvBlobs markerBlob = new Emgu.CV.Cvb.CvBlobs();

            Image<Gray, Byte> preprocessImage = originalImage.Convert<Gray, Byte>();

            UMat pyr = new UMat();
            CvInvoke.PyrDown(preprocessImage, pyr);
            CvInvoke.PyrUp(pyr, preprocessImage);

            preprocessImage = preprocessImage.ThresholdBinary(new Gray(200), new Gray(255));
            preprocessImage = preprocessImage.Not();

            markerBlob.Clear();

            bDetect.Detect(preprocessImage, markerBlob);

            preprocessImage.Dispose();
            preprocessImage = null;

            markerBlob.FilterByArea(400, 1800);

            foreach (Emgu.CV.Cvb.CvBlob targetBlob in markerBlob.Values)
            {
                if (vBoundary.Contains(targetBlob.BoundingBox))
                {
                    if (targetBlob.BoundingBox.Width >= targetBlob.BoundingBox.Height - 5)
                    {
                        Rectangle r = new Rectangle(targetBlob.BoundingBox.X, targetBlob.BoundingBox.Y, targetBlob.BoundingBox.Width, targetBlob.BoundingBox.Height);
                        vBlobs.Add(r);
                    }
                }
                else if (hBoundary.Contains(targetBlob.BoundingBox))
                {
                    if (targetBlob.BoundingBox.Height >= targetBlob.BoundingBox.Width - 5)
                    {
                        Rectangle r = new Rectangle(targetBlob.BoundingBox.X, targetBlob.BoundingBox.Y, targetBlob.BoundingBox.Width, targetBlob.BoundingBox.Height);
                        hBlobs.Add(r);
                    }
                }
                else if (idBoundary.Contains(targetBlob.BoundingBox))
                {
                    Rectangle r = new Rectangle(targetBlob.BoundingBox.X, targetBlob.BoundingBox.Y, targetBlob.BoundingBox.Width, targetBlob.BoundingBox.Height);
                    idBlobs.Add(r);
                }

                else if (answerBoundary.Contains(targetBlob.BoundingBox))
                {
                    Rectangle r = new Rectangle(targetBlob.BoundingBox.X, targetBlob.BoundingBox.Y, targetBlob.BoundingBox.Width, targetBlob.BoundingBox.Height);
                    answerBlobs.Add(r);
                }
            }

            vBlobs.RemoveAt(0);
            int bCount = 0;

            hBlobs = hBlobs.OrderBy(r => r.X).ToList();
            vBlobs = vBlobs.OrderBy(r => r.Y).ToList();

            markerBlob.Dispose();
            markerBlob = null;
            return bCount;
        }
        public int FindCorner()
        {
            Emgu.CV.Cvb.CvBlobDetector bDetect = new Emgu.CV.Cvb.CvBlobDetector();
            Emgu.CV.Cvb.CvBlobs markerBlob = new Emgu.CV.Cvb.CvBlobs();

            List<Rectangle> blobs = new List<Rectangle>();

            Image<Gray, Byte> preprocessImage = originalImage.Convert<Gray, Byte>();

            preprocessImage = preprocessImage.ThresholdBinary(new Gray(100), new Gray(255));
            preprocessImage = preprocessImage.Not();

            markerBlob.Clear();

            bDetect.Detect(preprocessImage, markerBlob);
            //preprocessImage.Dispose();
            //preprocessImage = null;

            //markerBlob.FilterByArea(250, 1800);

            foreach (Emgu.CV.Cvb.CvBlob targetBlob in markerBlob.Values)
            {
                if (targetBlob.BoundingBox.Width <= 32 && targetBlob.BoundingBox.Width >= 29)
                {
                    if (targetBlob.BoundingBox.Height <= 32 && targetBlob.BoundingBox.Height >= 29)
                    {
                        Rectangle r = new Rectangle(targetBlob.BoundingBox.X, targetBlob.BoundingBox.Y, targetBlob.BoundingBox.Width, targetBlob.BoundingBox.Height);
                        outerMarker.Add(r);
                    }
                }
                CvInvoke.PutText(preprocessImage, targetBlob.BoundingBox.Width + " " + targetBlob.BoundingBox.Height, new Point(targetBlob.BoundingBox.X, targetBlob.BoundingBox.Y), FontFace.HersheyComplex, 1, new Bgr(Color.Red).MCvScalar, 2);
            }

            return outerMarker.Count;
        }
Beispiel #12
0
        public int Validate()
        {
            Emgu.CV.Cvb.CvBlobDetector bDetect    = new Emgu.CV.Cvb.CvBlobDetector();
            Emgu.CV.Cvb.CvBlobs        markerBlob = new Emgu.CV.Cvb.CvBlobs();

            Image <Gray, Byte> preprocessImage = originalImage.Convert <Gray, Byte>();

            UMat pyr = new UMat();

            CvInvoke.PyrDown(preprocessImage, pyr);
            CvInvoke.PyrUp(pyr, preprocessImage);

            preprocessImage = preprocessImage.ThresholdBinary(new Gray(200), new Gray(255));
            preprocessImage = preprocessImage.Not();


            markerBlob.Clear();

            bDetect.Detect(preprocessImage, markerBlob);

            preprocessImage.Dispose();
            preprocessImage = null;

            markerBlob.FilterByArea(400, 1800);


            foreach (Emgu.CV.Cvb.CvBlob targetBlob in markerBlob.Values)
            {
                if (vBoundary.Contains(targetBlob.BoundingBox))
                {
                    if (targetBlob.BoundingBox.Width >= targetBlob.BoundingBox.Height - 5)
                    {
                        Rectangle r = new Rectangle(targetBlob.BoundingBox.X, targetBlob.BoundingBox.Y, targetBlob.BoundingBox.Width, targetBlob.BoundingBox.Height);
                        vBlobs.Add(r);
                    }
                }
                else if (hBoundary.Contains(targetBlob.BoundingBox))
                {
                    if (targetBlob.BoundingBox.Height >= targetBlob.BoundingBox.Width - 5)
                    {
                        Rectangle r = new Rectangle(targetBlob.BoundingBox.X, targetBlob.BoundingBox.Y, targetBlob.BoundingBox.Width, targetBlob.BoundingBox.Height);
                        hBlobs.Add(r);
                    }
                }
                else if (idBoundary.Contains(targetBlob.BoundingBox))
                {
                    Rectangle r = new Rectangle(targetBlob.BoundingBox.X, targetBlob.BoundingBox.Y, targetBlob.BoundingBox.Width, targetBlob.BoundingBox.Height);
                    idBlobs.Add(r);
                }

                else if (answerBoundary.Contains(targetBlob.BoundingBox))
                {
                    Rectangle r = new Rectangle(targetBlob.BoundingBox.X, targetBlob.BoundingBox.Y, targetBlob.BoundingBox.Width, targetBlob.BoundingBox.Height);
                    answerBlobs.Add(r);
                }
            }

            vBlobs.RemoveAt(0);
            int bCount = 0;

            hBlobs = hBlobs.OrderBy(r => r.X).ToList();
            vBlobs = vBlobs.OrderBy(r => r.Y).ToList();

            markerBlob.Dispose();
            markerBlob = null;
            return(bCount);
        }
Beispiel #13
0
        public void DeSkew()
        {
            Rectangle vBoundary = new Rectangle(new Point(0, 0), new Size(140, originalImage.Height));

            Emgu.CV.Cvb.CvBlobDetector bDetect    = new Emgu.CV.Cvb.CvBlobDetector();
            Emgu.CV.Cvb.CvBlobs        markerBlob = new Emgu.CV.Cvb.CvBlobs();

            List <Rectangle> blobs = new List <Rectangle>();

            Image <Gray, Byte> preprocessImage = originalImage.Convert <Gray, Byte>();

            preprocessImage = preprocessImage.ThresholdBinary(new Gray(200), new Gray(255));
            preprocessImage = preprocessImage.Not();

            markerBlob.Clear();

            bDetect.Detect(preprocessImage, markerBlob);
            preprocessImage.Dispose();
            preprocessImage = null;

            markerBlob.FilterByArea(250, 1800);

            foreach (Emgu.CV.Cvb.CvBlob targetBlob in markerBlob.Values)
            {
                if (vBoundary.Contains(targetBlob.BoundingBox))
                {
                    if (targetBlob.BoundingBox.Width >= targetBlob.BoundingBox.Height - 5)
                    {
                        Rectangle r = new Rectangle(targetBlob.BoundingBox.X, targetBlob.BoundingBox.Y, targetBlob.BoundingBox.Width, targetBlob.BoundingBox.Height);
                        blobs.Add(r);
                    }
                }
            }

            RectangleF temp  = blobs.First();
            RectangleF temp2 = blobs.Last();

            double dY = Math.Abs(temp.Y - temp2.Y);
            double dX = Math.Abs(temp.X - temp2.X);

            double angle = Math.Atan2(dX, dY);

            angle = angle * (180 / Math.PI);

            if (temp2.X > temp.X)
            {
                angle = angle * -1;
            }

            RotatedRect rot_rec = new RotatedRect();

            rot_rec.Center = new PointF(temp.X, temp.Y);
            RotationMatrix2D  rot_mat  = new RotationMatrix2D(rot_rec.Center, angle, 1);
            Image <Bgr, Byte> outimage = originalImage.CopyBlank();

            CvInvoke.WarpAffine(originalImage, outimage, rot_mat, originalImage.Size, Inter.Cubic, Warp.Default, BorderType.Constant, new Bgr(Color.White).MCvScalar);

            int xOffset = 80 - (int)temp.X;
            int yOffset = 45 - (int)temp.Y;

            originalImage = outimage.Copy();

            Bitmap     a      = originalImage.ToBitmap();
            CanvasMove filter = new CanvasMove(new AForge.IntPoint(xOffset, yOffset), Color.White);

            a             = filter.Apply(a);
            originalImage = new Image <Bgr, Byte>(a);

            a.Dispose();
            a = null;
            outimage.Dispose();
            outimage = null;
            blobs    = null;
        }
Beispiel #14
0
 /// <summary>
 /// Dispose, call from destrucotr.
 /// </summary>
 public void Dispose()
 {
     this.BlobDetector = null;
     this.ImageBlobs   = null;
 }
Beispiel #15
0
        /// <summary>
        ///
        /// </summary>
        /// <param name="openCVImg"></param>
        /// <param name="masterBlobs"></param>
        /// <param name="colorBS"></param>
        /// <param name="mode"></param>
        /// <returns></returns>
        public static List <BlobObject> FindAllBlob(Image <Gray, Int32> openCVImg,
                                                    List <BlobObject> masterBlobs,
                                                    Image <Bgra, byte> colorBS,
                                                    bool mode)
        {
            List <BlobObject> retList = new List <BlobObject>();

            try
            {
                Image <Gray, byte> gray_image = openCVImg.Convert <Gray, byte>();
                List <BlobObject>  newBlobs   = new List <BlobObject>();
                if (mode == false)
                {
                    #region using cvBlob
                    Emgu.CV.Cvb.CvBlobs        resultingBlobs = new Emgu.CV.Cvb.CvBlobs();
                    Emgu.CV.Cvb.CvBlobDetector bDetect        = new Emgu.CV.Cvb.CvBlobDetector();
                    uint numWebcamBlobsFound = bDetect.Detect(gray_image, resultingBlobs);

                    using (MemStorage stor = new MemStorage())
                    {
                        foreach (Emgu.CV.Cvb.CvBlob targetBlob in resultingBlobs.Values)
                        {
                            if (targetBlob.Area > 200)
                            {
                                var contour = targetBlob.GetContour(stor);

                                MCvBox2D box = contour.GetMinAreaRect();

                                PointF[] boxCorner = UtilitiesImage.ToPercent(contour.GetMinAreaRect().GetVertices(), gray_image.Width, gray_image.Height);

                                PointF center = UtilitiesImage.ToPercent(contour.GetMinAreaRect().center, gray_image.Width, gray_image.Height);

                                RectangleF rect = UtilitiesImage.ToPercent(targetBlob.BoundingBox, gray_image.Width, gray_image.Height);

                                Image <Gray, byte> newCropImg = UtilitiesImage.CropImage(colorBS.Convert <Gray, byte>(), rect);
                                newBlobs.Add(new BlobObject(newCropImg, null, boxCorner, rect, center, 0, 0, 0 + ""));
                                //stor.Clear();
                            }
                        }
                    }
                    #endregion
                }
                else
                {
                    #region using contour
                    using (MemStorage storage = new MemStorage())
                    {
                        //Find contours with no holes try CV_RETR_EXTERNAL to find holes
                        Contour <System.Drawing.Point> contours = gray_image.FindContours(
                            Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE,
                            Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_EXTERNAL,
                            storage);

                        for (int i = 0; contours != null; contours = contours.HNext)
                        {
                            i++;

                            //double area = contours.Area;
                            if (contours.Area > 200)
                            {
                                PointF[]           boxCorner  = UtilitiesImage.ToPercent(contours.GetMinAreaRect().GetVertices(), gray_image.Width, gray_image.Height);
                                PointF             center     = UtilitiesImage.ToPercent(contours.GetMinAreaRect().center, gray_image.Width, gray_image.Height);
                                RectangleF         rect       = UtilitiesImage.ToPercent(contours.BoundingRectangle, gray_image.Width, gray_image.Height);
                                Image <Bgra, byte> newCropImg = UtilitiesImage.CropImage(colorBS, rect);
                                newBlobs.Add(new BlobObject(newCropImg.Convert <Gray, byte>(), null, boxCorner, rect, center, 0, 0, 0 + ""));
                            }
                        }
                    }
                    #endregion
                }

                // read objects from database now
                List <TrackableObject> objects = DatabaseManager.Instance.Objects.ToList();

                if (objects.Count == 0)
                {
                    foreach (BlobObject b in newBlobs)
                    {
                        retList.Add(new BlobObject(b.Image, null, b.CornerPoints, b.Rect, b.Center, 0, 0, 0 + ""));
                    }
                }
                else
                {
                    #region

                    // size and position werden abgeglichen
                    List <Tuple <double, double, int, int> > trackInfo = new List <Tuple <double, double, int, int> >();
                    for (int newblob = 0; newblob < newBlobs.Count; newblob++)
                    {
                        for (int master = 0; master < masterBlobs.Count; master++)
                        {
                            double d = UtilitiesImage.Distance(newBlobs[newblob].Center, masterBlobs[master].Center);
                            double s = UtilitiesImage.DiffSize(newBlobs[newblob].Rect.Size, masterBlobs[master].Rect.Size);
                            trackInfo.Add(new Tuple <double, double, int, int>(d, s, master, newblob));
                        }
                    }


                    trackInfo.Sort((x, y) => x.Item1.CompareTo(y.Item1));
                    List <int> newItem = new List <int>();

                    if (!m_LastWasCorrupted)
                    {
                        while (trackInfo.Count != 0)
                        {
                            if (trackInfo[0].Item2 < 0.2)
                            {
                                int        masterId  = trackInfo[0].Item3;
                                int        newId     = trackInfo[0].Item4;
                                BlobObject newObject = new BlobObject(newBlobs[newId].Image, newBlobs[newId].DepthStructur, newBlobs[newId].CornerPoints, newBlobs[newId].Rect, newBlobs[newId].Center, masterBlobs[masterId].Hits, masterBlobs[masterId].Id, masterBlobs[masterId].Name);
                                retList.Add(newObject);
                                trackInfo.RemoveAll(item => item.Item3 == masterId);
                                trackInfo.RemoveAll(item => item.Item4 == newId);
                                newItem.Add(newId);
                            }
                            else
                            {
                                trackInfo.RemoveAt(0);
                            }
                        }
                        newItem.Sort();
                        //}


                        // check the images based on their features
                        for (int i = newBlobs.Count - 1; i >= 0; i--)
                        {
                            if (newItem.Count != 0 && i == newItem.Last())
                            {
                                newItem.RemoveAt(newItem.Count - 1);
                            }
                            else
                            {
                                // set the name according to the recognized object
                                string currentBlobId = RecognizeObject(newBlobs[i], objects);
                                newBlobs[i].Name = currentBlobId;
                                retList.Add(newBlobs[i]);
                            }
                        }
                    }
                }
            }
            catch (CvException e)
            {
                Logger.Instance.Log(e.Message, Logger.LoggerState.ERROR);
                return(retList);
            }
            catch (Exception e)
            {
                Logger.Instance.Log(e.Message, Logger.LoggerState.ERROR);
                //mark a flag that the last frame was corrupt
                m_LastWasCorrupted = true;
            }
            #endregion

            return(retList);
        }
Beispiel #16
0
        public ImageSource detectRetroreflectiveBlob(int width, int height, byte[] pixelData)
        {
            //Create color and depth images to process
            Image <Bgr, short> openCvImg      = new Image <Bgr, short>(CameraImage.DEPTH_IMAGE_WIDTH, CameraImage.DEPTH_IMAGE_HEIGHT, new Bgr(0, 0, 0));
            Image <Bgr, short> openCvDepthImg = new Image <Bgr, short>(CameraImage.DEPTH_IMAGE_WIDTH, CameraImage.DEPTH_IMAGE_HEIGHT, new Bgr(0, 0, 0));

            int stride = width * ((PixelFormats.Bgr32.BitsPerPixel) / 8);
            //We create an image 96x96
            BitmapSource sBitmap = System.Windows.Media.Imaging.BitmapSource.Create(width, height, 96, 96, PixelFormats.Bgr32, null, pixelData, stride);

            openCvImg.Bitmap = CameraImage.BmsToBm(sBitmap);

            // copy this image as the debug image on which will be drawn
            var gray_image = openCvImg.Convert <Gray, byte>();

            gray_image._GammaCorrect(0.3);

            var greyThreshImg = gray_image.ThresholdBinary(new Gray(220), new Gray(255));

            //greyThreshImg = greyThreshImg.Dilate(5);

            Emgu.CV.Cvb.CvBlobs        resultingImgBlobs = new Emgu.CV.Cvb.CvBlobs();
            Emgu.CV.Cvb.CvBlobDetector bDetect           = new Emgu.CV.Cvb.CvBlobDetector();
            var nBlobs = bDetect.Detect(greyThreshImg, resultingImgBlobs);

            int _blobSizeThreshold = 1;
            var rgb = new Rgb(255, 0, 0);
            var depthFrameReader = _kSensor.DepthFrameSource.OpenReader();
            var depthFrame       = depthFrameReader.AcquireLatestFrame();

            ushort[] depthData = new ushort[width * height];

            depthFrame.CopyFrameDataToArray(depthData);

            List <KeyValuePair <Emgu.CV.Cvb.CvBlob, CameraSpacePoint> > detectedBlobs = new List <KeyValuePair <Emgu.CV.Cvb.CvBlob, CameraSpacePoint> >();

            if (nBlobs > 0)
            {
                var blobImg = greyThreshImg;

                foreach (Emgu.CV.Cvb.CvBlob targetBlob in resultingImgBlobs.Values)
                {
                    if (targetBlob.Area > _blobSizeThreshold)
                    {
                        blobImg.Draw(targetBlob.BoundingBox, new Gray(255), 1);
                        float centroidX = targetBlob.Centroid.X;
                        float centroidY = targetBlob.Centroid.Y;

                        DepthSpacePoint dsp = new DepthSpacePoint();
                        dsp.X = targetBlob.Centroid.X; //targetBlob.BoundingBox.X;
                        dsp.Y = targetBlob.Centroid.Y; //targetBlob.BoundingBox.Y;
                        int depth       = (int)(width * dsp.Y + dsp.X);
                        var mappedPoint = _kSensor.CoordinateMapper.MapDepthPointToCameraSpace(dsp, depthData[depth]);
                        detectedBlobs.Add(new KeyValuePair <Emgu.CV.Cvb.CvBlob, CameraSpacePoint>(targetBlob, mappedPoint));
                    }
                }
            }

            depthFrame.Dispose();
            //return BitmapSource.Create(width, height, 96, 96, PixelFormats.Bgr32, null, pixelData, stride);
            //return detectedBlobs;
            return(CameraImage.BmToBms(greyThreshImg.Bitmap));
        }