public static Bitmap GetTheBestNoteExtraction(Bitmap bmp1, Bitmap bmp2)
 {
     lock (sync)
     {
         Image<Gray, byte> extraction_1 = new Image<Gray, byte>(bmp1);
         Image<Gray, byte> extraction_2 = new Image<Gray, byte>(bmp2);
         Image<Gray, byte> dif = extraction_1.AbsDiff(extraction_2);
         extraction_1 = extraction_1.Not();
         Gray sum_1 = extraction_1.GetSum();
         extraction_2 = extraction_2.Not();
         Gray sum_2 = extraction_2.GetSum();
         Gray sum_dif = dif.GetSum();
         Bitmap big = sum_1.Intensity >= sum_2.Intensity ? bmp1 : bmp2;
         Bitmap small = sum_1.Intensity < sum_2.Intensity ? bmp1 : bmp2;
         Gray bigSum = sum_1.Intensity >= sum_2.Intensity ? sum_1 : sum_2;
         Gray smallSum = sum_1.Intensity < sum_2.Intensity ? sum_1 : sum_2;
         if (smallSum.Intensity == 0)
         {
             return big;
         }
         else
         {
             if (sum_dif.Intensity / smallSum.Intensity > 0.3)
             {
                 return small;
             }
             else
             {
                 return big;
             }
         }
     }
 }
        public void CalculateAbsoluteDifference(string imagePath0, string imagePath1, string resultPath)
        {
            Image<Bgr, byte> image0 = new Image<Bgr, byte>(imagePath0);
            Image<Bgr, byte> image1 = new Image<Bgr, byte>(imagePath1);

            Image<Bgr, byte> result1 = new Image<Bgr, byte>(new byte[288, 352, 3]);

            result1 = image0.AbsDiff(image1); // Absolute Differenz
            result1._ThresholdBinaryInv(new Bgr(50, 50, 50), new Bgr(255, 255, 255)); // Threshholden und Invertieren
            result1._Erode(3); // Macht die schwarze Fläche grösser

            // Für alle Farbkanäle gleich
            for (int i = 0; i < result1.Cols; i++)
            {
                for (int j = 0; j < result1.Rows; j++)
                {
                    if (result1.Data[j, i, 0] < 50 && result1.Data[j, i, 1] < 50 && result1.Data[j, i, 2] < 50)
                    {
                        result1.Data[j, i, 0] = 0;
                        result1.Data[j, i, 1] = 0;
                        result1.Data[j, i, 2] = 0;
                    }
                    else
                    {
                        result1.Data[j, i, 0] = 255;
                        result1.Data[j, i, 1] = 255;
                        result1.Data[j, i, 2] = 255;
                    }
                }
            }

            result1.Save(resultPath);
        }
Exemple #3
0
        public static Tuple <bool, Rectangle, Bitmap> extrac_context_menu(Bitmap b1, Bitmap b2)
        {
            bool               retB   = false;
            Rectangle          retR   = Rectangle.Empty;
            Bitmap             retImg = null;
            Image <Bgra, byte> img1   = new Image <Bgra, byte>(b1);
            Image <Bgra, byte> img2   = new Image <Bgra, byte>(b2);
            Image <Bgra, byte> diff   = img2.AbsDiff(img1);

            //diff.Save("temp_2.jpg");
            img1 = diff.PyrDown();
            diff = img1.PyrUp();
            //img2.Save("temp_2.jpg");
            UMat uimage = new UMat();

            CvInvoke.CvtColor(diff, uimage, ColorConversion.Bgr2Gray);
            //uimage.Save("temp_3.jpg");
            //MCvScalar mean = new MCvScalar();
            //MCvScalar std_dev = new MCvScalar();
            //CvInvoke.MeanStdDev(uimage, ref mean, ref std_dev);
            UMat bimage = new UMat();

            //CvInvoke.Threshold(uimage, bimage, mean.V0 + std_dev.V0, 255, ThresholdType.Binary);
            CvInvoke.Threshold(uimage, bimage, 0, 255, ThresholdType.Binary | ThresholdType.Otsu);
            //bimage.Save("temp_2.jpg");
            Rectangle roi = Rectangle.Empty;

            using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint())
            {
                CvInvoke.FindContours(bimage, contours, null, RetrType.External, ChainApproxMethod.ChainApproxNone);
                int count = contours.Size;
                for (int i = 0; i < count; i++)
                {
                    double    a1 = CvInvoke.ContourArea(contours[i], false);
                    Rectangle r  = CvInvoke.BoundingRectangle(contours[i]);
                    //Program.logIt(string.Format("{0}", r));
                    if (a1 > 500.0)
                    {
                        //Program.logIt(string.Format("{0}", r));
                        if (roi.IsEmpty)
                        {
                            roi = r;
                        }
                        else
                        {
                            roi = Rectangle.Union(roi, r);
                        }
                    }
                }
            }
            if (!roi.IsEmpty && img2.Width > roi.Width && img2.Height > roi.Height)
            {
                //Image<Bgra, byte> cropped = img2.GetSubRect(roi);
                retB   = true;
                retR   = roi;
                retImg = img2.GetSubRect(retR).Bitmap;
                //Program.logIt(string.Format("rect={0}", retR));
            }
            return(new Tuple <bool, Rectangle, Bitmap>(retB, retR, retImg));
        }
Exemple #4
0
        private Image <Bgr, byte> Process2(Mat frame)
        {
            Image <Gray, byte> cur  = frame.ToImage <Gray, byte>();
            Image <Gray, byte> diff = bg.AbsDiff(cur);

            diff.Erode(4);
            diff.Dilate(6);

            diff._ThresholdBinary(new Gray(10), new Gray(255));

            VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint();

            CvInvoke.FindContours(diff, contours, null, RetrType.External,
                                  ChainApproxMethod.ChainApproxTc89L1);
            var output = frame.ToImage <Bgr, byte>().Copy();

            for (int i = 0; i < contours.Size; i++)
            {
                if (CvInvoke.ContourArea(contours[i]) > Area)
                {
                    Rectangle boundingRect = CvInvoke.BoundingRectangle(contours[i]);
                    output.Draw(boundingRect, new Bgr(Color.GreenYellow), 2);
                }
            }
            //BG();
            return(output);
        }
Exemple #5
0
        public Image <Bgr, byte> ReturnMovingArea2(Mat frame, int minArea)
        {
            Image <Gray, byte> cur    = frame.ToImage <Gray, byte>();
            Image <Bgr, byte>  curBgr = frame.ToImage <Bgr, byte>();

            if (bg == null)
            {
                bg = cur;
            }

            Image <Gray, byte> diff = bg.AbsDiff(cur);

            diff._ThresholdBinary(new Gray(100), new Gray(255));

            diff.Erode(3);
            diff.Dilate(4);

            VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint();

            CvInvoke.FindContours(diff, contours, null, RetrType.External, ChainApproxMethod.ChainApproxTc89L1);

            var output = curBgr;

            for (int i = 0; i < contours.Size; i++)
            {
                if (CvInvoke.ContourArea(contours[i]) > minArea) //игнорирование маленьких контуров
                {
                    Rectangle rect = CvInvoke.BoundingRectangle(contours[i]);
                    output.Draw(rect, new Bgr(Color.LawnGreen), 2);
                }
            }
            return(output);
        }
Exemple #6
0
        public void ComputeFrame(Image <Bgr, byte> currentFrame)
        {
            if (_lastFrame == null)
            {
                _lastFrame = currentFrame.Copy();
                return;
            }
            var difference = _lastFrame.AbsDiff(currentFrame);

            difference = difference.ThresholdBinary(new Bgr(PixelThreshold, PixelThreshold, PixelThreshold), new Bgr(255, 255, 255));
            _lastFrame.Dispose();
            _lastFrame = currentFrame.Copy();

            var dif = 0;

            dif = difference.Bytes.Sum(b => b);
            //for (var i = 0; i < difference.Size.Width; i++) {
            //    for (var j = 0; j < difference.Size.Height; j++) {
            //        dif += difference.Data[j, i, 0];
            //    }
            //}
            if (dif > MotionThreshold)
            {
                Mediator.Default.Publish(new MotionDetected());
            }
            difference.Dispose();
        }
        /// <summary>
        /// Finds the significant contours between the two images.
        /// </summary>
        private VectorOfVectorOfPoint GetContours(
            Image <Gray, TDepth> background,
            Image <Gray, TDepth> source,
            Gray threshold,
            Gray maxValue,
            int erode,
            int dilate)
        {
            // compute absolute diff between current frame and first frame
            var diff = background.AbsDiff(source);

            // binarize image
            var t = diff.ThresholdBinary(threshold, maxValue);

            // erode to get rid of small dots
            t = t.Erode(erode);

            // dilate the threshold image to fill in holes
            t = t.Dilate(dilate);

            // find contours
            var contours  = new VectorOfVectorOfPoint();
            var hierarchy = new Mat();

            CvInvoke.FindContours(t, contours, hierarchy, RetrType.External, ChainApproxMethod.ChainApproxSimple);

            return(contours);
        }
Exemple #8
0
        public void detect()
        {
            //new pDepthImageCropped diff on m_GroundTruthImg
            //Image<Gray, Byte> imageDeltaMaskByte = m_GroundTruthImg.Cmp(m_DepthImageCropped.Add(new Gray(2*KinectManager.SCALE_FACTOR)), Emgu.CV.CvEnum.CMP_TYPE.CV_CMP_GT);
            Image <Gray, Byte> imageDeltaMaskByte = m_GroundTruthImg.Cmp(m_DepthImageCropped, Emgu.CV.CvEnum.CMP_TYPE.CV_CMP_GE);

            CvInvoke.cvShowImage("imageDeltaMask", imageDeltaMaskByte.Ptr);
            Image <Gray, Int32> imageDeltaMaskInt = imageDeltaMaskByte.Convert <Int32>(delegate(Byte b)
            {
                if (b == 0)
                {
                    return(0);
                }
                else
                {
                    return(Int32.MaxValue);
                }
            });

            CvInvoke.cvShowImage("cimageDeltaMask ", imageDeltaMaskInt.Ptr);

            Image <Gray, Int32> imageDelta = m_GroundTruthImg.AbsDiff(m_DepthImageCropped).And(imageDeltaMaskInt);

            double valueThreshold;
            int    areaThreshold;

            valueThreshold = 50;
            areaThreshold  = 30;

            List <Vector2> listBlobBounds = FindAllBlob(imageDelta, areaThreshold, valueThreshold);
        }
Exemple #9
0
        private static void SobelFilter()
        {
            Image <Gray, Byte>  img   = new Image <Gray, byte>(show_img);
            Image <Gray, float> shimg = img.Sobel(1, 0, 3);
            Image <Gray, float> svimg = img.Sobel(0, 1, 3);

            //Convert negative values to positive valus
            shimg = shimg.AbsDiff(new Gray(0));
            svimg = svimg.AbsDiff(new Gray(0));

            Image <Gray, float> sobel = shimg + svimg;

            //Find sobel min or max value
            double[] mins, maxs;
            //Find sobel min or max value position
            Point[] minLoc, maxLoc;
            sobel.MinMax(out mins, out maxs, out minLoc, out maxLoc);
            //Conversion to 8-bit image
            Image <Gray, Byte> sobelImage = sobel.ConvertScale <byte>(255 / maxs[0], 0);

            sobelEdges = new UMat();
            CvInvoke.Threshold(sobelImage, sobelEdges, 70, 255, ThresholdType.Binary);
            //sobelEdges.Save("sobel_" + count + ".png");
            pre_img  = show_img;
            show_img = sobelImage.Bitmap;
            rform.UpdateImage(show_img);
        }
        static bool handle_motion(Image <Bgr, Byte> frane, Image <Gray, Byte> bg, int idx)
        {
            bool device_in_place = false;
            //Rectangle r = new Rectangle(196, 665, 269, 628);
            Rectangle          r     = new Rectangle(334, 774, 452, 1016);
            Image <Bgr, Byte>  img1  = frane.Copy(r);
            Image <Gray, Byte> imgg  = frane.Mat.ToImage <Gray, Byte>().Copy(r);
            Image <Gray, Byte> imgbg = bg.Copy(r);

            imgg = imgg.AbsDiff(imgbg);
            Gray g = imgg.GetAverage();

            if (g.MCvScalar.V0 > 17)
            {
                Rectangle sz  = detect_size_old(imgg);
                Bgr       rgb = sample_color(img1);
                Program.logIt($"Device arrival. size: {sz.Size}, color: {rgb} ({g.MCvScalar.V0})");
                Console.WriteLine("Enter device model and color:");
                string info = System.Console.ReadLine();
                img1.Save($"temp_{info}_2.jpg");
                imgbg.Save($"temp_{info}_1.jpg");
                imgg.Save($"temp_{info}_3.jpg");
                Console.WriteLine($"{info}: size={sz.Size}, color={rgb}");
                Program.logIt($"{info}: size={sz.Size}, color={rgb}");
                device_in_place = true;
            }
            else
            {
                Program.logIt($"Device removal. ({g.MCvScalar.V0})");
                device_in_place = false;
            }
            return(device_in_place);
        }
Exemple #11
0
        private void RetriveAndInitFrames()
        {
            //using (Capture capture = new Emgu.CV.Capture(0))
            //{
            if (CurrentFrame == null)     //we need at least one fram to work out running average so acquire one before doing anything
            {
                capture.Start();

                //display the frame aquired
                Image <Bgr, Byte> imageFrameTemp = capture.RetrieveBgrFrame();    //we could use RetrieveGrayFrame if we didn't care about displaying colour image

                InputImageFrame = imageFrameTemp.Convert <Bgr, Byte>();
                CurrentFrame    = InputImageFrame.Convert <Gray, Byte>();
                PreviousFrame   = CurrentFrame.Copy();   //copy the frame to act as the previous
            }
            else
            {
                //acquire the frame
                Image <Bgr, Byte> imageFrameTemp = capture.QueryFrame();    //we could use RetrieveGrayFrame if we didn't care about displaying colour image
                InputImageFrame = imageFrameTemp.Convert <Bgr, Byte>();
                InputImageFrame = InputImageFrame.Flip(Emgu.CV.CvEnum.FLIP.HORIZONTAL);
                InputImageFrame = InputImageFrame.SmoothGaussian(3, 0, 0, 0);

                CurrentFrame    = InputImageFrame.Convert <Gray, Byte>();
                DifferenceFrame = PreviousFrame.AbsDiff(CurrentFrame);                                     //find the absolute difference
                                                                                                           //
                                                                                                           /*Play with the value 60 to set a threshold for movement*/
                DifferenceFrame     = DifferenceFrame.ThresholdBinary(new Gray(Threshold), new Gray(255)); //if value > 60 set to 255, 0 otherwise
                DifferenceFrameGray = DifferenceFrame.Convert <Gray, Byte>();
                PreviousFrame       = CurrentFrame.Copy();                                                 //copy the frame to act as the previous frame
            }
            //}
        }
Exemple #12
0
        public override async Task extractBackground()
        {
            Image <Gray, Byte> gray  = rgb.Convert <Gray, Byte>();
            Image <Gray, Byte> canny = gray.CopyBlank();

            this.mask = gray.CopyBlank();
            // borders detection
            Image <Gray, float> sobX = gray.Sobel(1, 0, 3);
            Image <Gray, float> sobY = gray.Sobel(0, 1, 3);

            sobX = sobX.AbsDiff(new Gray(0));
            sobY = sobY.AbsDiff(new Gray(0));
            Image <Gray, float> borders = sobX + sobY;

            gray = borders.Convert <Gray, Byte>();
            //canny filter
            CvInvoke.Canny(gray, canny, 20, 200);
            Mat kernel = CvInvoke.GetStructuringElement(Emgu.CV.CvEnum.ElementShape.Ellipse, new Size(3, 3), new Point(-1, -1));

            canny._MorphologyEx(Emgu.CV.CvEnum.MorphOp.Close, kernel, new Point(-1, -1), 1, Emgu.CV.CvEnum.BorderType.Default, new MCvScalar(1.0));
            await createMask(canny);

            gray.Dispose();
            borders.Dispose();
            canny.Dispose();
            sobX.Dispose();
            sobY.Dispose();
        }
        //List<Contour<Point>> contoursList = new List<Contour<Point>>();



        public Form1()
        {
            InitializeComponent();
            haar_obj = new HaarCascade(@"haarcascade_frontalface_default.xml");

            ///// game

            image1 = new Image <Bgr, byte>(@"D:\GitHubGeneral\FaceShapeDetectionThenPlayDifferenceGame\FaceShapeDetectionThenPlayDifferenceGame\Resources\Image\copy5.bmp");
            image2 = new Image <Bgr, byte>(@"D:\GitHubGeneral\FaceShapeDetectionThenPlayDifferenceGame\FaceShapeDetectionThenPlayDifferenceGame\Resources\Image\original5.bmp");



            imageOriginal.Image = image1;
            imageCopy.Image     = image2;
            Difference          = image1.AbsDiff(image2);

            Difference = Difference.ThresholdBinary(new Bgr(Threshold, Threshold, Threshold), new Bgr(255, 255, 255)); //if value > 60 set to 255, 0 otherwise

            #region Draw the contours of difference
            //this is tasken from the ShapeDetection Example


            using (MemStorage storage = new MemStorage()) //allocate storage for contour approximation
                //detect the contours and loop through each of them

                for (Contour <Point> contours = Difference.Convert <Gray, Byte>().FindContours(
                         Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE,
                         Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_LIST, storage);
                     contours != null;
                     contours = contours.HNext)
                {
                    //Create a contour for the current variable for us to work with
                    Contour <Point> currentContour = contours.ApproxPoly(contours.Perimeter * 0.05, storage);


                    //Draw the detected contour on the image
                    if (currentContour.Area > ContourThresh) //only consider contours with area greater than 100 as default then take from form control
                    {
                        //contoursList.Add(currentContour);
                        distination[i, 0] = currentContour.BoundingRectangle.X;
                        distination[i, 1] = currentContour.BoundingRectangle.Y;
                        distination[i, 2] = currentContour.BoundingRectangle.Width;
                        distination[i, 3] = currentContour.BoundingRectangle.Height;
                        distination[i, 4] = currentContour.BoundingRectangle.Top;
                        distination[i, 5] = currentContour.BoundingRectangle.Bottom;
                        distination[i, 6] = currentContour.BoundingRectangle.Left;
                        distination[i, 7] = currentContour.BoundingRectangle.Right;
                        distination[i, 8] = 0;


                        /*  Rectangle rec = new Rectangle(distination[i, 0], distination[i, 1], distination[i, 2], distination[i, 3]);
                         * image2.Draw(rec, new Bgr(Color.Red), 3);*/
                        i++;

                        //   image2.Draw(currentContour.BoundingRectangle, new Bgr(Color.Red), 2);
                    }
                }
            #endregion
        }
        public DiffContainer GetDiffs(Bitmap newFrame, int compressRate)
        {
            //sw.Restart();

            Image <Bgr, Byte> Frame = new Image <Bgr, byte>(newFrame);

            if (_oldImage == null || _oldImage.Height != newFrame.Height)
            {
                _oldImage = new Bitmap(newFrame.Width, newFrame.Height);
            }
            Previous_Frame = new Image <Bgr, byte>(_oldImage);
            Image <Bgr, Byte> Difference;

            Difference = Previous_Frame.AbsDiff(Frame);

            Image <Gray, Byte> gray = Difference.Convert <Gray, Byte>().PyrDown().PyrUp();

            DiffContainer      container    = new DiffContainer();
            List <MovedObject> movedObjects = new List <MovedObject>();

            using (MemStorage storage = new MemStorage())
                for (Contour <System.Drawing.Point> contours = gray.FindContours(CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, RETR_TYPE.CV_RETR_EXTERNAL, storage);
                     contours != null; contours = contours.HNext)
                {
                    Contour <Point> contour = contours.ApproxPoly(contours.Perimeter * 0.001);
                    if (contour.Area > 100)
                    //  if (contour.Total > 5)
                    {
                        Rectangle rect = contour.BoundingRectangle;

                        rect.X      -= 1;
                        rect.Y      -= 1;
                        rect.Width  += 2;
                        rect.Height += 2;

                        var part = Frame.GetSubRect(rect);

                        //var j = ImageResizer.ImageBuilder.Current.Build(part.ToBitmap(),
                        //    new ImageResizer.ResizeSettings(
                        //        "maxwidth=" + part.Width / CompressRate +
                        //        "&maxheight=" + part.Height +
                        //        "&format=jpg&quality=20"
                        //        ));

                        var j = part.ToBitmap();

                        container.Data.Add(rect, j);
                    }
                }


            UpdateOldFrame(container);
            //sw.Stop();
            //container.Elapsed = sw.ElapsedMilliseconds;

            return(container);
        }
Exemple #15
0
        private void UpdateDisplayImage()
        {
            Video.SetFrame(FrameNumber);

            using (Image <Bgr, Byte> displayImage = Video.GetFrameImage())
            {
                displayImage.ROI = Roi;
                using (Image <Gray, Byte> grayImage = displayImage.Convert <Gray, Byte>())
                    using (Image <Gray, Byte> binaryImage = grayImage.ThresholdBinary(new Gray(VideoSettings.ThresholdValue), new Gray(255)))
                        using (Image <Gray, Byte> finalMouseImage = binaryImage.AbsDiff(BinaryBackground))
                        {
                            Point[] mousePoints = null;
                            using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint())
                            {
                                CvInvoke.FindContours(finalMouseImage, contours, null, RetrType.External, ChainApproxMethod.ChainApproxNone);

                                int    count   = contours.Size;
                                double maxArea = 0;
                                for (int j = 0; j < count; j++)
                                {
                                    using (VectorOfPoint contour = contours[j])
                                    {
                                        double contourArea = CvInvoke.ContourArea(contour);
                                        if (contourArea >= maxArea)
                                        {
                                            maxArea     = contourArea;
                                            mousePoints = contour.ToArray();
                                        }
                                    }
                                }
                            }

                            displayImage.DrawPolyline(mousePoints, true, new Bgr(Color.Yellow), 2);

                            //foreach (var boundary in VideoSettings.Boundries)
                            //{
                            //    displayImage.DrawPolyline(VideoSettings.Boundries.ToArray(), true, new Bgr(Color.Red), 2);
                            //}


                            foreach (BoundaryBaseViewModel boundry in Boundries)
                            {
                                if (boundry.Enabled)
                                {
                                    displayImage.DrawPolyline(boundry.Points, true, boundry.Color, 2);
                                }
                            }

                            displayImage.ROI = Roi;

                            DisplayImage = ImageService.ToBitmapSource(displayImage);
                            //displayImage.Save(@"C:\Users\10488835\Desktop\PhD\Papers\Software Practise and Experience\Latex\images\BackgroundSub\5-Result.png");
                        }
            }
        }
Exemple #16
0
        public Image <Gray, byte> editNegative(Image <Bgr, byte> sourceImage, Image <Gray, byte> background)
        {
            Image <Gray, byte> diff = background.AbsDiff(sourceImage.Convert <Gray, byte>());

            diff.Erode(4);
            diff.Dilate(6);
            Image <Gray, byte> binarizedImage = diff.ThresholdBinary(new Gray(120), new Gray(255));



            return(binarizedImage);
        }
Exemple #17
0
        Image <Gray, Byte> Motion(Image <Gray, Byte> image)
        {
            if (_previousImage == null)
            {
                _previousImage = image.Clone();
                return(_blankImage);
            }

            Image <Gray, Byte> motionImage;

            motionImage    = image.AbsDiff(_previousImage);
            _previousImage = image.Clone();
            return(motionImage.ThresholdBinary(new Gray(20), new Gray(255)));
        }
Exemple #18
0
        //相减计算
        private void button3_Click(object sender, EventArgs e)
        {
            Image <Gray, Byte> curBitmapSrc_1 = new Image <Gray, Byte>(curBitmapSrc);
            Image <Gray, Byte> curBitmapDst_1 = new Image <Gray, Byte>(curBitmapDst);
            Image <Gray, Byte> result         = new Image <Gray, Byte>(curBitmapSrc);

            //CvInvoke.cvCopy(img1, img_1, IntPtr.Zero);
            //CvInvoke.cvCopy(img2, img_2, IntPtr.Zero);



            result            = curBitmapSrc_1.AbsDiff(curBitmapDst_1);
            pictureBox3.Image = result.Bitmap;
        }
        private void DetectionAndDisplay()
        {
            // set background
            if (backgroundFrame == null)
            {
                // sets the first frame as background
                Image <Bgr, byte> image = (capture.QueryFrame()).ToImage <Bgr, byte>();
                CurrentFrame_Image.Image    = image;
                backgroundFrame             = image.Convert <Gray, byte>();
                BackgroundFrame_Image.Image = backgroundFrame;
                // start the compareing for testing
                Image <Bgr, byte> cimage = (compareCapture.QueryFrame()).ToImage <Bgr, byte>();
                Object_Image.Image = cimage;
                return;
            }

            // get the current frame
            currentFrame     = (capture.QueryFrame()).ToImage <Bgr, byte>();
            currentFrameGray = currentFrame.Convert <Gray, byte>();
            currentFrameGray = currentFrameGray.SmoothGaussian(7);

            // get the object image window that will be used for testing
            objectImage        = (compareCapture.QueryFrame()).ToImage <Bgr, byte>();
            Object_Image.Image = objectImage;

            // get Difference
            differenceFrame = backgroundFrame.AbsDiff(currentFrameGray);
            differenceFrame = differenceFrame.SmoothGaussian(7);
            differenceFrame = differenceFrame.ThresholdBinary(new Gray(50), new Gray(255));

            //call the find object function
            List <CircleF> objects = GetMovingObjects();

            // determine if moving objects are what is already being tracked or something new to track
            DetermineTrackedObjects(objects);

            // will be replaced with method to clean tracked objects.
            foreach (TrackedObject t in trackedObjects)
            {
                t.Update(false);
            }

            //display the tracked objects
            DisplayTrackedObjects();

            //display
            CurrentFrame_Image.Image    = currentFrame;
            DifferenceFrame_Image.Image = differenceFrame;
            Object_Image.Image          = objectImage;
        }
Exemple #20
0
        private void AssertImagesEqual(Image <Gray, Byte> expected, Image <Gray, Byte> actual)
        {
            Image <Gray, Byte> imageDiff = expected.AbsDiff(actual);

            Gray expectedPixelVal = new Gray(0);

            for (int row = 0; row < imageDiff.Rows; row++)
            {
                for (int col = 0; col < imageDiff.Cols; col++)
                {
                    Assert.AreEqual(expectedPixelVal, imageDiff[row, col], String.Format("Mismatch found at entry with row = {0}, col = {1}.", row, col));
                }
            }
        }
Exemple #21
0
        private bool isUserWorking2(Image <Bgra, byte> image)
        {
            if (m_LastImage == null)
            {
                m_LastImage = image.Clone();
                return(false);
            }

            Image <Bgra, byte> diffFrame = image.AbsDiff(m_LastImage);
            int moves = diffFrame.CountNonzero()[0];

            m_PreviousMoves[m_Cnt] = moves;
            m_Cnt++;
            if (m_Cnt > m_ARRAYSIZE - 1)
            {
                m_Cnt = 0;
                int median = calculateMedian();
                int diff   = Math.Abs(median - m_PreviousMed);

                if (diff > 300)
                {
                    m_Currentlyworking = true;
                    m_LastWorkingStamp = DateTime.Now.Ticks / TimeSpan.TicksPerMillisecond;
                }
                else
                {
                    long now = DateTime.Now.Ticks / TimeSpan.TicksPerMillisecond;
                    if (now - m_LastWorkingStamp > 1000) // 1 second cooldown
                    {
                        m_Currentlyworking = false;
                    }
                }

                m_PreviousMed = median;
            }


            /*
             * if (moves > 38940) // value = wtf?
             * {
             *  ret = true;
             * }
             */


            m_LastImage = image.Clone();

            return(m_Currentlyworking);
        }
        static bool handle_motion(Image <Bgr, Byte> frane, Image <Bgr, Byte> bg)
        {
            bool      ret = false;
            Rectangle roi = new Rectangle(744, 266, 576, 1116);

            Image <Bgr, Byte> img0 = bg.Copy();
            Image <Bgr, Byte> img1 = frane.Copy(roi);

            //img1.Save("temp_1.jpg");
            img0 = img1.AbsDiff(img0);


            Tuple <bool, bool> device_inplace = check_device_inplace(img1);

            if (device_inplace.Item1)
            {
                ret = device_inplace.Item2;
                if (ret)
                {
                    Program.logIt("Device Arrival");
                    Size sz = detect_size(img0.Mat.ToImage <Gray, Byte>());
                    if (sz.IsEmpty)
                    {
                        // error
                        frane.Save("temp_1.jpg");
                        bg.Save("temp_2.jpg");
                    }
                    else
                    {
                        Bgr rgb = sample_color(img1);
                        Program.logIt($"device: size={sz}, color={rgb}");
                        Tuple <bool, int, int> res = predict_color_and_size(rgb, sz);
                        if (res.Item1)
                        {
                            Console.WriteLine($"device=ready");
                            Console.WriteLine($"colorid={res.Item2}");
                            Console.WriteLine($"sizeid={res.Item3}");
                        }
                    }
                }
                else
                {
                    Program.logIt("Device Removal");
                    Console.WriteLine($"device=removed");
                }
            }
            return(ret);
        }
Exemple #23
0
        private Image <Gray, byte> GetMaskFromDiff(Image <Gray, byte> frame)
        {
            if (backGround == null)
            {
                backGround = frame.Copy();
            }
            var diff = backGround.AbsDiff(frame);

            diff = diff.Erode(3);
            diff = diff.Dilate(4);

            /*diff.Erode(3);
             * diff.Dilate(4);*/
            backGround = frame.Copy();
            return(diff);
        }
Exemple #24
0
        public override void Eval(IEvolutionState state, int thread, GPData input, ADFStack stack, GPIndividual individual, IProblem problem)
        {
            var p  = (FeatureExtractionProblem2)problem;
            var c0 = p.currentImage[thread].Copy();

            Children[0].Eval(state, thread, input, stack, individual, problem);
            Image <Gray, float> c1 = new Image <Gray, float>(64, 64);

            c1 = p.currentImage[thread].Convert <Gray, float>();
            c0.CopyTo(p.currentImage[thread]);
            Children[1].Eval(state, thread, input, stack, individual, problem);
            var c2 = new Image <Gray, float>(64, 64);

            c2 = p.currentImage[thread].Convert <Gray, float>();
            p.currentImage[thread] = c2.AbsDiff(c1).Convert <Gray, Byte>();
            //CvInvoke.AbsDiff(p.currentImage[thread], c1, p.currentImage[thread]);
        }
Exemple #25
0
    static void Main(string[] args)
    {
        Image <Hsv, byte> bitmap     = new Image <Hsv, byte>(@"D:\red3.bmp");
        Image <Hsv, byte> bitmap1    = new Image <Hsv, byte>(@"D:\testc.bmp");
        Image <Hsv, byte> bitmapDIFF = bitmap - bitmap1;
        Hsv lowerLimit = new Hsv(0, 0, 200);
        Hsv upperLimit = new Hsv(5, 255, 255);

        var imageHSVDest = bitmap.InRange(lowerLimit, upperLimit);

        //CvInvoke.cvShowImage("imageHSVDest", imageHSVDest);

        CvInvoke.cvShowImage("imageHSVDest", bitmapDIFF);

        bitmap.AbsDiff(bitmap1);
        CvInvoke.cvWaitKey(0);
    }
        /// <summary>
        /// Average Difference (AD)
        /// </summary>
        /// <param name="image1"></param>
        /// <param name="image2"></param>
        /// <returns></returns>
        public static double AD(Image <Gray, double> image1, Image <Gray, double> image2)
        {
            double result = 0;
            int    M      = image1.Rows;
            int    N      = image1.Cols;

            var absDiff = image1.AbsDiff(image2);

            for (int m = 0; m < M; m++)
            {
                for (int n = 0; n < N; n++)
                {
                    result += absDiff.Data[m, n, 0]; // |I-K|^2
                }
            }
            result = result * (1.0 / (M * N));
            return(result);
        }
Exemple #27
0
        public Contour <Point> CompareOld(String previousImage, String currentImage)
        {
            if (!File.Exists(previousImage))
            {
                return(null);
            }

            Image <Bgr, Byte> img1 = new Image <Bgr, Byte>(previousImage);
            Image <Bgr, Byte> img2 = new Image <Bgr, Byte>(currentImage);
            Image <Bgr, Byte> diff = img1.AbsDiff(img2);

            diff = diff.ThresholdBinary(new Bgr(60, 60, 60), new Bgr(255, 255, 255));

            Image <Gray, Byte> diffGray = diff.Convert <Gray, Byte>();
            Contour <Point>    contours = FindAllContours(diffGray);

            return(contours);
        }
Exemple #28
0
        private void manageBlur(Mat img, int blurMode, int kSize = 3)
        {
            Mat store = img.Clone();
            Image <Gray, Byte> gray = store.ToImage <Gray, Byte>();

            switch (blurMode)
            {
            case Constants.Sobel:
            {
                Image <Gray, float> sobelX = gray.Sobel(1, 0, kSize);
                Image <Gray, float> sobelY = gray.Sobel(0, 1, kSize);

                sobelX = sobelX.AbsDiff(new Gray(0));
                sobelY = sobelY.AbsDiff(new Gray(0));

                Image <Gray, float> sobel = sobelX + sobelY;

                double[] mins, maxs;
                //Find sobel min or max value position
                System.Drawing.Point[] minLoc, maxLoc;
                sobel.MinMax(out mins, out maxs, out minLoc, out maxLoc);
                //Conversion to 8-bit image
                Image <Gray, Byte> sobelImage = sobel.ConvertScale <byte>(255 / maxs[0], 0);
                CurrentMat = sobelImage.Mat;
                break;
            }

            case Constants.Laplace:
            {
                Image <Gray, float> targetImage = gray.Laplace(kSize);
                CvInvoke.ConvertScaleAbs(targetImage, targetImage, 1, 0);

                CurrentMat = targetImage.Mat;
                break;
            }

            case Constants.Median:
                break;

            case Constants.Gaussian:
                break;
            }
            showImg(CurrentMat);
        }
Exemple #29
0
        /*
         * public void OpticalFlowAlign(Image<Bgr, Byte> imgA, Image<Bgr,Byte> imgB)
         * {
         *   Image<Gray,Byte> grayA = imgA.Convert<Gray, Byte>();
         *   Image<Gray,Byte> grayB = imgB.Convert<Gray,Byte>();
         *   Image<Gray,Byte> pyrBufferA = new Image<Gray,Byte>(imgA.Size);
         *   Image<Gray,Byte> pyrBufferB = new Image<Gray,Byte>(imgA.Size);
         *
         *   featuresA = grayA.GoodFeaturesToTrack(100, 0.01, 25, 3)
         *   grayA.FindCornerSubPix(featuresA, New System.Drawing.Size(10, 10),
         *                          New System.Drawing.Size(-1, -1),
         *                          New Emgu.CV.Structure.MCvTermCriteria(20, 0.03))
         *   features = featuresA(0).Length
         *   Emgu.CV.OpticalFlow.PyrLK(grayA, grayB, pyrBufferA, pyrBufferB, _
         *                             featuresA(0), New Size(25, 25), 3, _
         *                             New Emgu.CV.Structure.MCvTermCriteria(20, 0.03D),
         *                             flags, featuresB(0), status, errors)
         *   pointsA = New Matrix(Of Single)(features, 2)
         *   pointsB = New Matrix(Of Single)(features, 2)
         *   For i As Integer = 0 To features - 1
         *       pointsA(i, 0) = featuresA(0)(i).X
         *       pointsA(i, 1) = featuresA(0)(i).Y
         *       pointsB(i, 0) = featuresB(0)(i).X
         *       pointsB(i, 1) = featuresB(0)(i).Y
         *   Next
         *   Dim Homography As New Matrix(Of Double)(3, 3)
         *   cvFindHomography(pointsA.Ptr, pointsB.Ptr, Homography, HOMOGRAPHY_METHOD.RANSAC, 1, 0);
         * }
         */
        public void abDifference(Image <Bgr, float> fImage, Image <Bgr, float> lImage)
        {
            Image <Bgr, Byte> fImageB       = fImage.Convert <Bgr, Byte>();
            Image <Bgr, Byte> lImageB       = lImage.Convert <Bgr, Byte>();
            double            ContourThresh = 0.003; //stores alpha for thread access
            int Threshold = 60;
            Image <Bgr, Byte> contourImage = new Image <Bgr, Byte>(fImageB.Size);
            Image <Bgr, Byte> Difference   = new Image <Bgr, Byte>(fImageB.Size);

            Difference = fImageB.AbsDiff(lImageB);                                                                     //find the absolute difference
            /*Play with the value 60 to set a threshold for movement*/
            Difference = Difference.ThresholdBinary(new Bgr(Threshold, Threshold, Threshold), new Bgr(255, 255, 255)); //if value > 60 set to 255, 0 otherwise
            //DisplayImage(Difference.ToBitmap(), resultbox); //display the absolute difference

            //Previous_Frame = Frame.Copy(); //copy the frame to act as the previous frame
            Point p = new Point(0, 0);

            #region Draw the contours of difference
            //this is tasken from the ShapeDetection Example
            using (MemStorage storage = new MemStorage()) //allocate storage for contour approximation
                //detect the contours and loop through each of them
                for (Contour <Point> contours = Difference.Convert <Gray, Byte>().FindContours(
                         Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE,
                         Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_LIST,
                         storage);
                     contours != null;
                     contours = contours.HNext)
                {
                    //Create a contour for the current variable for us to work with
                    Contour <Point> currentContour = contours.ApproxPoly(contours.Perimeter * 0.00005, storage);

                    //Draw the detected contour on the image
                    if (currentContour.Area > ContourThresh) //only consider contours with area greater than 100 as default then take from form control
                    {
                        contourImage.Draw(currentContour, new Bgr(Color.Red), -1);
                        //Emgu.CV.CvInvoke.cvDrawContours(contourImage, currentContour, new Bgr(Color.Red), new Bgr(Color.Red), 0, -1, Emgu.CV.CvEnum.LINE_TYPE.EIGHT_CONNECTED, p);
                    }
                }
            #endregion
            //  CvInvoke.cvShowImage("asdf", contourImage);
            contourImage.Save("D:\\asdf.jpg");
            // DisplayImage(Frame.ToBitmap(), CurrentFrame); //display the image using thread safe call
            // DisplayImage(Previous_Frame.ToBitmap(), PreviousFrame); //display the previous image using thread safe call
        }
Exemple #30
0
        public Math::Vector <double> ExtractFeatures(Image <Gray, Byte> frame)
        {
            Math::Vector <double> result = new DenseVector(Length);

            Image <Gray, Byte> grayFrame      = frame.Resize(frameWidth, frameHeight, INTER.CV_INTER_AREA);
            Image <Gray, Byte> foregroundMask =
                grayFrame.AbsDiff(background).ThresholdBinary(new Gray(40), new Gray(255)) -
                grayFrame.ThresholdBinary(new Gray(200), new Gray(255));


            CvInvoke.cvDilate(foregroundMask, foregroundMask, structuringElement, 1);
            CvInvoke.cvErode(foregroundMask, foregroundMask, structuringElement, 2);
            CvInvoke.cvDilate(foregroundMask, foregroundMask, structuringElement, 1);

            //Image<Gray, byte> outline = foregroundMask - foregroundMask.Erode(1);
            //Image<Gray, byte> edges = grayFrame.Canny(80, 150);
            //Image<Gray, byte> foregroundEdges = edges.Min(foregroundMask.Dilate(1));
            //outline.Save(@"D:\! Egyetem\! RWTH\Semester 2\Seminar CV\Datasets\UCSD\derived images\outlines\" + i + ".png");
            //foregroundEdges.Save(@"D:\! Egyetem\! RWTH\Semester 2\Seminar CV\Datasets\UCSD\derived images\edges\" + i + ".png");
            //foregroundMask.Save(@"D:\! Egyetem\! RWTH\Semester 2\Seminar CV\Datasets\UCSD\derived images\masks\" + i + ".png");
            //i++;

            IList <Image <Gray, Byte> > frameCells          = ImageSplitter.SplitImage(grayFrame, N, M);
            IList <Image <Gray, Byte> > foregroundMaskCells = ImageSplitter.SplitImage(foregroundMask, N, M);

            int cellHeight = frame.Height / N;
            int cellWidth  = frame.Width / M;

            for (int cellId = 0; cellId < frameCells.Count; ++cellId)
            {
                int offsetX = (cellId % M) * cellWidth;
                int offsetY = (cellId / M) * cellHeight;

                Math::Vector <double> featureVector =
                    cellFeatureExtractor.ExtractFeatures(
                        frameCells[cellId], foregroundMaskCells[cellId],
                        perspectiveCorrector.GetScaleFunction(offsetX, offsetY, frameHeight));

                result.SetSubVector(featureVector.Count * cellId, featureVector.Count, featureVector);
            }

            return(result);
        }
Exemple #31
0
        public void Logitech_test1()
        {
            double alpha     = 0.003; //stores alpha for thread access
            int    Threshold = 60;    //stores threshold for thread access


            //var shooter = new CameraShooter();
            //shooter.ClickLogitechShootButton();

            var Background = new Image <Bgr, byte>("C:\\Picture 70.jpg").Convert <Gray, Byte>().Convert <Gray, float>();
            var Frame      = new Image <Bgr, byte>("C:\\Picture 71.jpg");
            var Gray_Frame = Frame.Convert <Gray, Byte>().Convert <Gray, float>();
            var Difference = Background.AbsDiff(Gray_Frame);

            Difference.ToBitmap().Save("C:\\test_Difference.jpg");

            Background.RunningAvg(Difference, alpha);
            Background.ToBitmap().Save("C:\\test_Background.jpg");
        }
Exemple #32
0
        public void SubtractImages()
        {
            Image <Bgr, Byte>   Frame = null;      //current Frame from camera
            Image <Gray, float> Gray_Frame;        //gray_frame form camera
            Image <Gray, float> Background = null; //Average Background being formed
            Image <Gray, float> Previous_Frame;    //Previiousframe aquired
            Image <Gray, float> Difference;        //Difference between the two fra

            double alpha     = 0.003;              //stores alpha for thread access
            int    Threshold = 60;                 //stores threshold for thread access

            using (var _capture = new Capture())
            {
                if (Frame == null) //we need at least one fram to work out running average so acquire one before doing anything
                {
                    //display the frame aquired
                    Frame = _capture.RetrieveBgrFrame(); //we could use RetrieveGrayFrame if we didn't care about displaying colour image
                    //DisplayImage(Frame.ToBitmap(), captureBox); //display the image using thread safe call

                    //copy the frame to previousframe
                    //Previous_Frame = Frame.Convert<Gray, Byte>().Convert<Gray, float>(); //we can only convert one aspect at a time so wel call convert twice
                    Background = Frame.Convert <Gray, Byte>().Convert <Gray, float>(); //we can only convert one aspect at a time so wel call convert twice
                }

                //acquire the frame
                Frame = _capture.RetrieveBgrFrame(); //we could use RetrieveGrayFrame if we didn't care about displaying colour image
                //DisplayImage(Frame.ToBitmap(), captureBox); //display the image using thread safe call

                //create a gray copy for processing
                Gray_Frame = Frame.Convert <Gray, Byte>().Convert <Gray, float>(); //we can only convert one aspect at a time so wel call convert twice

                //cvAbsDiff(pFrameMat, pBkMat, pFrMat);
                Difference = Background.AbsDiff(Gray_Frame); //find the absolute difference

                Difference.ToBitmap().Save("C:\\test_Difference.jpg");

                //CvInvoke.cvRunningAvg(Difference, Background, 0.003, Background);
                /*Play with the alpha weighting 0.001 */
                Background.RunningAvg(Difference, alpha); //performe the cvRunningAvg frame acumullation
                //DisplayImage(Background.ToBitmap(), resultbox); //display the image using thread safe call
                Background.ToBitmap().Save("C:\\test_Background.jpg");
            }
        }
        //Find the difference between 2 pictures to be tried with BIKE PICTURES!!
        // Test method: attempts to find the difference between two pictures.
        public Image<Bgr, Byte> FindDifference(Image<Bgr, Byte> Frame, Image<Bgr,Byte> Previous_Frame)
        {
            Image<Bgr, Byte> Difference = null; //Difference between the two frames

            double ContourThresh = 0.003; //stores alpha for thread access
            int Threshold = 60; //stores threshold for thread access

            if (Frame == null) //we need at least one fram to work out running average so acquire one before doing anything
            {

                Previous_Frame = Frame.Copy(); //copy the frame to act as the previous

            }
            else
            {

                Difference = Frame.AbsDiff(Previous_Frame); //find the absolute difference
                /*Play with the value 60 to set a threshold for movement*/
                Difference = Difference.ThresholdBinary(new Bgr(Threshold, Threshold, Threshold), new Bgr(255, 255, 255)); //if value > 60 set to 255, 0 otherwise

                Previous_Frame = Frame.Copy(); //copy the frame to act as the previous frame

                #region Draw the contours of difference
                //this is tasken from the ShapeDetection Example
                using (MemStorage storage = new MemStorage()) //allocate storage for contour approximation
                                                              //detect the contours and loop through each of them
                    for (Contour<Point> contours = Difference.Convert<Gray, Byte>().FindContours(
                          Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE,
                          Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_LIST,
                          storage);
                        contours != null;
                       contours = contours.HNext)
                    {
                        //Create a contour for the current variable for us to work with
                        Contour<Point> currentContour = contours.ApproxPoly(contours.Perimeter * 0.05, storage);

                        //Draw the detected contour on the image
                        if (currentContour.Area > ContourThresh) //only consider contours with area greater than 100 as default then take from form control
                        {
                            Frame.Draw(currentContour.BoundingRectangle, new Bgr(Color.Red), 2);
                        }
                    }
                #endregion

            }
            return Difference;
        }
        public BitmapImage CalculateAbsoluteDifference(string imagePath0, string imagePath1)
        {
            Image<Bgr, byte> image0 = new Image<Bgr, byte>(imagePath0);
            Image<Bgr, byte> image1 = new Image<Bgr, byte>(imagePath1);

            Image<Bgr, byte> result1 = new Image<Bgr, byte>(new byte[288, 352, 3]);

            result1 = image0.AbsDiff(image1);
            result1._ThresholdBinaryInv(new Bgr(50, 50, 50), new Bgr(255, 255, 255));
            CvInvoke.CvtColor(result1, result1, ColorConversion.Bgr2Gray);

            return BitmapToBitmapImage(result1.Bitmap);
        }
        public BitmapImage CalculateAbsoluteDifference(BitmapImage bitmapImage0, BitmapImage bitmapImage1)
        {
            Image<Bgr, byte> image0 = new Image<Bgr, byte>(BitmapImageToBitmap(bitmapImage0));
            Image<Bgr, byte> image1 = new Image<Bgr, byte>(BitmapImageToBitmap(bitmapImage1));

            Image<Bgr, byte> result1 = new Image<Bgr, byte>(new byte[288, 352, 3]);

            result1 = image0.AbsDiff(image1);
            result1._ThresholdBinaryInv(new Bgr(50, 50, 50), new Bgr(255, 255, 255));
            // CvInvoke.CvtColor(result1, result1, ColorConversion.Bgr2Gray);

            //result1._Dilate(1); // Macht die schwarze Fläche kleiner
            result1._Erode(3); // Macht die schwarze Fläche grösser

            for (int i = 0; i < result1.Cols; i++)
            {
                for (int j = 0; j < result1.Rows; j++)
                {
                    if (result1.Data[j, i, 0] < 50 && result1.Data[j, i, 1] < 50 && result1.Data[j, i, 2] < 50)
                    {
                        result1.Data[j, i, 0] = 0;
                        result1.Data[j, i, 1] = 0;
                        result1.Data[j, i, 2] = 0;
                    }
                    else
                    {
                        result1.Data[j, i, 0] = 255;
                        result1.Data[j, i, 1] = 255;
                        result1.Data[j, i, 2] = 255;
                    }
                }
            }

            return BitmapToBitmapImage(result1.Bitmap);
        }
Exemple #36
0
        Image<Gray, Byte> Motion(Image<Gray, Byte> image)
        {
            if (_previousImage == null)
            {
                _previousImage = image.Clone();
                return _blankImage;
            }

            Image<Gray, Byte> motionImage;
            motionImage = image.AbsDiff(_previousImage);
            _previousImage = image.Clone();
            return motionImage.ThresholdBinary(new Gray(20), new Gray(255));
        }
Exemple #37
-1
 //simple background substraction with gray image
 private Image<Gray, byte> GrayColorSubstraction(Image<Gray,byte> bkImg, Image<Gray,byte> frame)
 {
     Gray gray = frame.GetAverage(null);
     Image<Gray, byte> dif = frame.AbsDiff(bkImg);
     Image<Gray,byte> threshold = dif.ThresholdBinary(gray, new Gray(255));
     return threshold;
 }