Example #1
1
        public List <BlockData> DetectBlocks(Bitmap inputImg, int width, int height)
        {
            Image <Bgra, Byte> img = new Image <Bgra, Byte>(inputImg);

            width  = img.Width;
            height = img.Height;

            // Threshold out bakground
            Image <Gray, Byte> grayImg         = img.Convert <Gray, Byte>();
            Image <Gray, Byte> backgroundMask  = new Image <Gray, Byte>(width, height);
            double             threshold_value = CvInvoke.Threshold(grayImg, backgroundMask, 0, 255, ThresholdType.Otsu);

            Image <Gray, Byte> filledBackground = this.FillMask(backgroundMask);

            VectorOfVectorOfPoint allContours = new VectorOfVectorOfPoint();

            CvInvoke.FindContours(backgroundMask, allContours, null, RetrType.External, ChainApproxMethod.ChainApproxSimple);

            // Remove all contours except table
            int tableContourIdx          = this.FindLargestContourIdx(allContours);
            Image <Gray, Byte> tableMask = new Image <Gray, Byte>(width, height);
            int fillInterior             = -1;

            CvInvoke.DrawContours(tableMask, allContours, tableContourIdx, new MCvScalar(255), fillInterior);
            IInputArray structElem = CvInvoke.GetStructuringElement(ElementShape.Rectangle, STRUCT_ELEM_SIZE, STRUCT_ELEM_ANCHOR);

            CvInvoke.Erode(tableMask, tableMask, structElem, new Point(-1, -1), 1, BorderType.Constant, new MCvScalar(255));

            // Grab objects on table that are in foreground
            Image <Gray, Byte> foregroundMask = new Image <Gray, Byte>(width, height);

            CvInvoke.BitwiseNot(backgroundMask, foregroundMask);

            Image <Gray, Byte> tableForegroundMask = new Image <Gray, Byte>(width, height);

            CvInvoke.BitwiseAnd(foregroundMask, tableMask, tableForegroundMask);

            // Save table mask and mask applied to original image for access as shared resource
            Image <Bgra, Byte> processedImg = new Image <Bgra, Byte>(width, height);

            CvInvoke.BitwiseOr(img, processedImg, processedImg, tableMask);

            // Find contours for blocks on table
            VectorOfVectorOfPoint possibleBlocks = new VectorOfVectorOfPoint();

            CvInvoke.FindContours(tableForegroundMask, possibleBlocks, null, RetrType.External, ChainApproxMethod.ChainApproxSimple);
            VectorOfVectorOfPoint filteredBlocks = this.FilterSmallAreaContours(possibleBlocks);

            // Find block centers
            Point[] blockCenters = this.FindAndMarkContourCenters(filteredBlocks, processedImg);
            return(this.FindColorAtCenters(blockCenters, img));
        }
Example #2
0
        private void ProcessFrame(object sender, EventArgs e)
        {
            if (cap != null && cap.Ptr != IntPtr.Zero)
            {
                cap.Retrieve(frame);

                IImage hsv = (IImage) new Mat();
                CvInvoke.CvtColor(frame, hsv, ColorConversion.Bgr2Hsv);
                IOutputArray mask = (IImage) new Mat();
                //CvInvoke.ExtractChannel(hsv, mask, 0);
                //IImage s = (IImage)new Mat();
                //CvInvoke.ExtractChannel(hsv, s, 1);


                ScalarArray lower = new ScalarArray(new MCvScalar(low1, 40, 40));
                ScalarArray upper = new ScalarArray(new MCvScalar(up1, 255, 255));
                //ScalarArray lower = new ScalarArray(170);
                //ScalarArray upper = new ScalarArray(180);
                CvInvoke.InRange(hsv, lower, upper, mask);

                //CvInvoke.BitwiseNot(mask, mask);
                //CvInvoke.Threshold(s, s, 10, 255, ThresholdType.Binary);
                //CvInvoke.BitwiseAnd(mask, s, mask, null);


                IOutputArray result = (IImage) new Mat();
                CvInvoke.BitwiseAnd(frame, frame, result, mask);
                IInputArray kernel = new Matrix <byte>(new byte[5, 5] {
                    { 1, 1, 1, 1, 1 },
                    { 1, 1, 1, 1, 1 },
                    { 1, 1, 1, 1, 1 },
                    { 1, 1, 1, 1, 1 },
                    { 1, 1, 1, 1, 1 }
                });
                IOutputArray opening = (IImage) new Mat();
                CvInvoke.MorphologyEx(result, opening, MorphOp.Open, kernel, new System.Drawing.Point(-1, -1), 1, BorderType.Default, CvInvoke.MorphologyDefaultBorderValue);
                IOutputArray smooth = (IImage) new Mat();
                CvInvoke.MedianBlur(opening, smooth, 35);
                IOutputArray framegray = (IImage) new Mat();
                CvInvoke.CvtColor(smooth, framegray, ColorConversion.Bgr2Gray);
                IInputOutputArray threshold = (IImage) new Mat();
                CvInvoke.Threshold(framegray, threshold, 10, 255, ThresholdType.Binary);
                VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint();
                CvInvoke.FindContours(threshold, contours, null, RetrType.List, ChainApproxMethod.ChainApproxSimple);
                //CvInvoke.DrawContours(frame, contours, -1, new MCvScalar(0,0,255), 1);

                if (contours.Size > 0)
                {
                    //Console.WriteLine(CvInvoke.ContourArea(contours[0]));
                    CircleF cf = CvInvoke.MinEnclosingCircle(contours[0]);
                    points.Add(new Vertex((cf.Center.X - 320), -(cf.Center.Y - 240), ((float)cf.Area / 50)));
                    //Console.WriteLine(cf.Center.X.ToString() + " " + cf.Center.Y.ToString() + " " + cf.Area.ToString());
                    //xa.Text = cf.Center.X.ToString();
                    //ya.Text = cf.Center.Y.ToString();
                    //za.Text = cf.Area.ToString();
                }

                imgBox.Image = (IImage)frame;
            }
        }
Example #3
0
        public Mat GetHandContours(Mat image)
        {
            var copy = new Mat();

            CvInvoke.GaussianBlur(image, copy, new Size(5, 5), 1.5, 1.5);
            var  mask = new Mat();
            bool useUMat;

            using (InputOutputArray ia = mask.GetInputOutputArray())
                useUMat = ia.IsUMat;

            using (IImage hsv = useUMat ? (IImage) new UMat() : (IImage) new Mat())
                using (IImage s = useUMat ? (IImage) new UMat() : (IImage) new Mat())
                {
                    CvInvoke.CvtColor(copy, hsv, ColorConversion.Bgr2Hsv);
                    CvInvoke.ExtractChannel(hsv, mask, 0);
                    CvInvoke.ExtractChannel(hsv, s, 1);

                    using (ScalarArray lower = new ScalarArray(9))
                        using (ScalarArray upper = new ScalarArray(14))
                            CvInvoke.InRange(mask, lower, upper, mask);
                    //CvInvoke.BitwiseNot(mask, mask);

                    //s is the mask for saturation of at least 10, this is mainly used to filter out white pixels
                    CvInvoke.Threshold(s, s, 10, 255, ThresholdType.Binary);
                    CvInvoke.BitwiseAnd(mask, s, mask, null);
                }

            //Use Dilate followed by Erode to eliminate small gaps in some contour.
            CvInvoke.Dilate(mask, mask, null, new Point(-1, -1), 1, BorderType.Constant, CvInvoke.MorphologyDefaultBorderValue);
            CvInvoke.Erode(mask, mask, null, new Point(-1, -1), 1, BorderType.Constant, CvInvoke.MorphologyDefaultBorderValue);

            return(mask);
        }
        public static Bitmap BackProject(Bitmap bmp, int[] HueRange, int[] SaturationRange)
        {
            Emgu.CV.Image <Bgr, Byte> Mask = new Image <Bgr, Byte>(bmp);                                    //Image Datatype switch
            Mat  Copy = new Mat();                                                                          //Result Mat type
            bool useUMat;                                                                                   //bool for Mat Check

            using (InputOutputArray ia = Copy.GetInputOutputArray())                                        //Determine Mask type
                useUMat = ia.IsUMat;                                                                        //If Mat, use Mat
            using (IImage hsv = useUMat ? (IImage) new UMat() : (IImage) new Mat())                         //Mat Image Copies (Hue)
                using (IImage s = useUMat ? (IImage) new UMat() : (IImage) new Mat())                       //Mat Image Copies (Saturation)
                {
                    CvInvoke.CvtColor(Mask, hsv, ColorConversion.Bgr2Hsv);                                  //Convert Image to Hsv
                    CvInvoke.ExtractChannel(hsv, Copy, 0);                                                  //Extract Hue channel from Hsv
                    CvInvoke.ExtractChannel(hsv, s, 1);                                                     //Extract Saturation channel from Hsv
                                                                                                            //the mask for hue less than 20 or larger than 160
                    using (ScalarArray lower = new ScalarArray(HueRange[0]))                                //hue min
                        using (ScalarArray upper = new ScalarArray(HueRange[1]))                            //hue max
                            CvInvoke.InRange(Copy, lower, upper, Copy);                                     //Check Ranges
                    CvInvoke.BitwiseNot(Copy, Copy);                                                        //If ranges dont line up, fade to black
                                                                                                            //s is the mask for saturation of at least 10, this is mainly used to filter out white pixels
                    CvInvoke.Threshold(s, s, SaturationRange[0], SaturationRange[1], ThresholdType.Binary); //saturation check
                    CvInvoke.BitwiseAnd(Copy, s, Copy, null);                                               //If saturation and hue match requirements, place in mask
                }
            return(Copy.Bitmap);
        }
Example #5
0
        private static void GetLabColorPixelMask(IInputArray image, IInputOutputArray mask, int lightLower, int lightUpper, int aLower, int aUpper, int bLower, int bUpper)
        {
            bool useUMat;

            using (InputOutputArray ia = mask.GetInputOutputArray())
                useUMat = ia.IsUMat;

            using (IImage lab = useUMat ? (IImage) new UMat() : (IImage) new Mat())
                using (IImage l = useUMat ? (IImage) new UMat() : (IImage) new Mat())
                    using (IImage a = useUMat ? (IImage) new UMat() : (IImage) new Mat())
                        using (IImage b = useUMat ? (IImage) new UMat() : (IImage) new Mat())
                        {
                            CvInvoke.CvtColor(image, lab, ColorConversion.Bgr2Lab);
                            CvInvoke.ExtractChannel(lab, mask, 0);
                            CvInvoke.ExtractChannel(lab, a, 1);
                            CvInvoke.ExtractChannel(lab, b, 2);

                            //threshold on lightness
                            //CvInvoke.Threshold(lab, l, lightLower, lightUpper, ThresholdType.Binary);
                            //CvInvoke.BitwiseAnd(mask, s, mask, null);

                            using (ScalarArray lower = new ScalarArray(lightLower))
                                using (ScalarArray upper = new ScalarArray(lightUpper))
                                    CvInvoke.InRange(mask, lower, upper, mask);

                            //threshold on A colorspace and merge L and A into Mask
                            CvInvoke.Threshold(a, a, aLower, aUpper, ThresholdType.Binary);
                            CvInvoke.BitwiseAnd(mask, a, mask, null);

                            //threshold on B colorspace and merge B into previous Mask
                            CvInvoke.Threshold(b, b, bLower, bUpper, ThresholdType.Binary);
                            CvInvoke.BitwiseAnd(mask, b, mask, null);
                        }
        }
Example #6
0
        /// <summary>
        /// Compute the red pixel mask for the given image.
        /// A red pixel is a pixel where:  20 &lt; hue &lt; 160 AND saturation &gt; 10
        /// </summary>
        /// <param name="image">The color image to find red mask from</param>
        /// <param name="mask">The red pixel mask</param>
        private static void GetColorPixelMask(IInputArray image, IInputOutputArray mask, int hueLower, int hueUpper, int satLower, int satUpper, int lumLower, int lumUpper)
        {
            bool useUMat;

            using (InputOutputArray ia = mask.GetInputOutputArray())
                useUMat = ia.IsUMat;

            using (IImage hsv = useUMat ? (IImage) new UMat() : (IImage) new Mat())
                using (IImage s = useUMat ? (IImage) new UMat() : (IImage) new Mat())
                    using (IImage lum = useUMat ? (IImage) new UMat() : (IImage) new Mat())
                    {
                        CvInvoke.CvtColor(image, hsv, ColorConversion.Bgr2Hsv);
                        CvInvoke.ExtractChannel(hsv, mask, 0);
                        CvInvoke.ExtractChannel(hsv, s, 1);
                        CvInvoke.ExtractChannel(hsv, lum, 2);

                        //the mask for hue less than 20 or larger than 160
                        using (ScalarArray lower = new ScalarArray(hueLower))
                            using (ScalarArray upper = new ScalarArray(hueUpper))
                                CvInvoke.InRange(mask, lower, upper, mask);
                        //CvInvoke.BitwiseNot(mask, mask); //invert results to "round the corner" of the hue scale

                        //s is the mask for saturation of at least 10, this is mainly used to filter out white pixels
                        CvInvoke.Threshold(s, s, satLower, satUpper, ThresholdType.Binary);
                        CvInvoke.BitwiseAnd(mask, s, mask, null);

                        // mask luminosity
                        CvInvoke.Threshold(lum, lum, lumLower, lumUpper, ThresholdType.Binary);
                        CvInvoke.BitwiseAnd(mask, lum, mask, null);
                    }
        }
        /// <summary>
        /// Нахадит пиксели с красным цветом
        /// </summary>
        /// <param name="image">Изображение для обработки</param>
        /// <param name="mask">Пиксельная маска</param>
        private static void GetRedPixelMask(IInputArray image, IInputOutputArray mask)
        {
            bool useUMat;

            using (InputOutputArray ia = mask.GetInputOutputArray())
            {
                useUMat = ia.IsUMat;
            }

            using (IImage hsv = useUMat ? (IImage) new UMat() : (IImage) new Mat())
                using (IImage s = useUMat ? (IImage) new UMat() : (IImage) new Mat())
                {
                    CvInvoke.CvtColor(image, hsv, ColorConversion.Bgr2Hsv);
                    CvInvoke.ExtractChannel(hsv, mask, 0);
                    CvInvoke.ExtractChannel(hsv, s, 1);

                    //По маске от 20 до 160
                    using (ScalarArray lower = new ScalarArray(20))
                        using (ScalarArray upper = new ScalarArray(160))
                        {
                            CvInvoke.InRange(mask, lower, upper, mask);
                        }

                    CvInvoke.BitwiseNot(mask, mask);

                    //маска для насыщения не менее 10
                    CvInvoke.Threshold(s, s, 15, 255, ThresholdType.Binary);
                    CvInvoke.BitwiseAnd(mask, s, mask, null);
                }
        }
Example #8
0
        // Funkcja tworzy maskę w postaci prostokąta otaczającego główną linię i wycina resztę zakłóceń.
        // DZIAŁA
        public void ValidWindowMask(Mat thresholdImage, Mat imageDiff, out Mat binaryImage, out Mat mask)
        {
            binaryImage = null;
            mask        = null;

            try
            {
                if (thresholdImage == null)
                {
                    throw new InvalidOperationException("BinaryImage == null");
                }

                // granice przedziałów pikseli.
                int[] minValuesCL, maxValuesCL;

                // Przedział binaryzacji pikseli.
                GetMinMaxValuesCL(thresholdImage, out maxValuesCL, out minValuesCL, Window);

                // tworzenie maski.
                Matrix <Byte> mask_cl;
                GenerateImageMaskCL(thresholdImage, minValuesCL, maxValuesCL, out mask_cl);

                // wycinanie niepotrzebnych śmieci.
                binaryImage = new Mat();
                CvInvoke.BitwiseAnd(imageDiff, mask_cl, binaryImage);
                mask = mask_cl.Mat.Clone();
            } catch (Exception ex)
            {
                MessageBox.Show(ex.Message);
            }
        }
        //##############################################################################################################################################################################################

        private void rotate_ccw(int id, int times)
        {
            int direction = times % 4;

            for (int i = 0; i < direction; i++)
            {
                sets[id].locations = Utils.RotateMatrixCounterClockwise(sets[id].locations);
                sets[id].rotations = Utils.RotateMatrixCounterClockwise(sets[id].rotations);
            }
            sets[id].rotations += direction;

            //If there is no piece at the location, the rotation needs to be set back to zero
            for (int i = 0; i < sets[id].locations.Size.Width; i++)
            {
                for (int j = 0; j < sets[id].locations.Size.Height; j++)
                {
                    if (sets[id].locations[j, i] == -1)
                    {
                        sets[id].rotations[j, i] = 0;
                    }
                }
            }

            //basically rotations%4 (opencv does not have an operator to do this). Luckly the last 2 bits are all that is needed
            CvInvoke.BitwiseAnd(sets[id].rotations, new ScalarArray(3), sets[id].rotations);
            return;
        }
Example #10
0
        /// <summary>
        /// 获取ROI
        /// </summary>
        /// <param name="image">需裁剪的原图</param>
        /// <param name="rect">裁剪留下的ROI大小</param>
        /// <returns>ROI</returns>
        private Image <Gray, byte> GetROI(Image <Gray, byte> image, int cameraID)
        {
            Rectangle rightROI = new Rectangle(new Point(950, 50), new Size(4050 - 850, 2800));
            Rectangle frontROI = new Rectangle(new Point(720, 100), new Size(4300 - 720, 3400));
            //程序中image是原始图像,类型Image<Gray, byte>,rectangle是矩形,CropImage是截得的图像。
            Image <Gray, byte> resImag = image.CopyBlank();

            using (var mask = new Image <Gray, Byte>(image.Size))
            {
                mask.SetZero();//设置所有值为0
                if (cameraID == 0)
                {
                    mask.ROI = rightROI;
                }
                else
                {
                    mask.ROI = frontROI;
                }
                mask.SetValue(255);         //设置ROI的值为255
                mask.ROI = Rectangle.Empty; //去掉ROI
                //res(I)=img1(I)+img2(I) if mask(I)!=0
                CvInvoke.BitwiseAnd(image, mask, resImag);
            }
            return(resImag);
        }
Example #11
0
        /// <summary>
        /// Compute the red pixel mask for the given image.
        /// A red pixel is a pixel where:  20 &lt; hue &lt; 160 AND saturation &gt; 10
        /// </summary>
        /// <param name="image">The color image to find red mask from</param>
        /// <param name="mask">The red pixel mask</param>
        private static void GetRedPixelMask(IInputArray image, IInputOutputArray mask)
        {
            bool useUMat;

            using (InputOutputArray ia = mask.GetInputOutputArray())
                useUMat = ia.IsUMat;

            using (IImage hsv = useUMat ? (IImage) new UMat() : (IImage) new Mat())
                using (IImage s = useUMat ? (IImage) new UMat() : (IImage) new Mat())
                {
                    CvInvoke.CvtColor(image, hsv, ColorConversion.Bgr2Hsv);
                    CvInvoke.ExtractChannel(hsv, mask, 0);
                    CvInvoke.ExtractChannel(hsv, s, 1);

                    //the mask for hue less than 20 or larger than 160
                    using (ScalarArray lower = new ScalarArray(20))
                        using (ScalarArray upper = new ScalarArray(160))
                            CvInvoke.InRange(mask, lower, upper, mask);
                    CvInvoke.BitwiseNot(mask, mask);

                    //s is the mask for saturation of at least 10, this is mainly used to filter out white pixels
                    CvInvoke.Threshold(s, s, 10, 255, ThresholdType.Binary);
                    CvInvoke.BitwiseAnd(mask, s, mask, null);
                }
        }
Example #12
0
        public static Image <Gray, Byte> GetPixelMask(Image <Bgr, byte> image, int lowerRange, int upperRange)
        {
            using (Image <Hsv, Byte> hsv = image.Convert <Hsv, Byte>())
            {
                Image <Gray, Byte>[] channels = hsv.Split();

                try
                {
                    ScalarArray SA1 = new ScalarArray(new MCvScalar(lowerRange));
                    ScalarArray SA2 = new ScalarArray(new MCvScalar(upperRange));

                    CvInvoke.InRange(channels[0], SA1, SA2, channels[0]);
                    channels[0]._Not();

                    channels[1]._ThresholdBinary(new Gray(10), new Gray(255.0));

                    CvInvoke.BitwiseAnd(channels[0], channels[1], channels[0]);
                }
                finally
                {
                    channels[1].Dispose();
                    channels[2].Dispose();
                }
                return(channels[0]);
            }
        }
Example #13
0
        /// <summary>
        /// Compute the red pixel mask for the given image.
        /// A red pixel is a pixel where:  20 &lt; hue &lt; 160 AND satuation &gt; 10
        /// </summary>
        /// <param name="image">The color image to find red mask from</param>
        /// <returns>The red pixel mask</returns>
        private static Image <Gray, Byte> GetRedPixelMask(Image <Bgr, byte> image)
        {
            using (Image <Hsv, Byte> hsv = image.Convert <Hsv, Byte>())
            {
                Image <Gray, Byte>[] channels = hsv.Split();

                try
                {
                    //channels[0] is the mask for hue less than 20 or larger than 160
                    using (ScalarArray lower = new ScalarArray(20))
                        using (ScalarArray upper = new ScalarArray(160))
                            CvInvoke.InRange(channels[0], lower, upper, channels[0]);
                    channels[0]._Not();

                    //channels[1] is the mask for satuation of at least 10, this is mainly used to filter out white pixels
                    channels[1]._ThresholdBinary(new Gray(10), new Gray(255.0));

                    CvInvoke.BitwiseAnd(channels[0], channels[1], channels[0], null);
                }
                finally
                {
                    channels[1].Dispose();
                    channels[2].Dispose();
                }
                return(channels[0]);
            }
        }
Example #14
0
        /// <summary>
        /// Checks if two contours intersects
        /// </summary>
        /// <param name="contour1">Contour 1</param>
        /// <param name="contour2">Contour 2</param>
        /// <returns></returns>
        public static bool ContoursIntersect(VectorOfVectorOfPoint contour1, VectorOfVectorOfPoint contour2)
        {
            var contour1Rect = CvInvoke.BoundingRectangle(contour1[0]);
            var contour2Rect = CvInvoke.BoundingRectangle(contour2[0]);

            /* early exit if the bounding rectangles don't intersect */
            if (!contour1Rect.IntersectsWith(contour2Rect))
            {
                return(false);
            }

            var minX = contour1Rect.X < contour2Rect.X ? contour1Rect.X : contour2Rect.X;
            var minY = contour1Rect.Y < contour2Rect.Y ? contour1Rect.Y : contour2Rect.Y;

            var maxX = (contour1Rect.X + contour1Rect.Width) < (contour2Rect.X + contour2Rect.Width) ? (contour2Rect.X + contour2Rect.Width) : (contour1Rect.X + contour1Rect.Width);
            var maxY = (contour1Rect.Y + contour1Rect.Height) < (contour2Rect.Y + contour2Rect.Height) ? (contour2Rect.Y + contour2Rect.Height) : (contour1Rect.Y + contour1Rect.Height);

            var totalRect = new Rectangle(minX, minY, maxX - minX, maxY - minY);

            using var contour1Mat = EmguExtensions.InitMat(totalRect.Size);
            using var contour2Mat = EmguExtensions.InitMat(totalRect.Size);

            var inverseOffset = new Point(minX * -1, minY * -1);

            CvInvoke.DrawContours(contour1Mat, contour1, -1, EmguExtensions.WhiteColor, -1, Emgu.CV.CvEnum.LineType.EightConnected, null, int.MaxValue, inverseOffset);
            CvInvoke.DrawContours(contour2Mat, contour2, -1, EmguExtensions.WhiteColor, -1, Emgu.CV.CvEnum.LineType.EightConnected, null, int.MaxValue, inverseOffset);

            CvInvoke.BitwiseAnd(contour1Mat, contour2Mat, contour1Mat);

            return(!contour1Mat.IsZeroed());
        }
Example #15
0
        private Image <Gray, Byte> GetRedPixelMask(Image <Bgr, byte> image)
        {
            using (Image <Hsv, Byte> hsv = image.Convert <Hsv, Byte>())
            {
                Image <Gray, Byte>[] channels = hsv.Split();
                try
                {
                    //channels[0] is the mask for hue less than 80 or larger than 180
                    CvInvoke.InRange(channels[0], new ScalarArray(new MCvScalar(80)), new ScalarArray(new MCvScalar(180)), channels[0]);
                    channels[0]._Not();

                    //channels[1] is the mask for satuation of at least 10, this is mainly used to filter out white pixels
                    channels[1]._ThresholdBinary(new Gray(0), new Gray(255));

                    // Convert image to binary
                    CvInvoke.BitwiseAnd(channels[0], channels[1], channels[1]);

                    channels[1]._Not();
                }
                finally
                {
                    channels[0].Dispose();
                    channels[2].Dispose();
                }
                return(channels[1]);
            }
        }
Example #16
0
        public static Image <Gray, byte> FilterCups(Image <Hsv, byte> input, bool ShowFiltered)
        {
//      ScalarArray lower = new ScalarArray( new Hsv( 0, (255*0.5), 255 ).MCvScalar );
//      ScalarArray upper = new ScalarArray( new Hsv( (180* ( 360 / 328) ), 255, (255*0.75) ).MCvScalar );
//
//      ScalarArray lower = new ScalarArray( new Hsv( 130, 0, 0 ).MCvScalar );
//      ScalarArray upper = new ScalarArray( new Hsv( 180, 200, 255 ).MCvScalar );

            ScalarArray lower = new ScalarArray(new Bgr(Color.Red).MCvScalar);
            ScalarArray upper = new ScalarArray(new Bgr(Color.Purple).MCvScalar);

            Image <Gray, byte>[] channels = input.Split();

            CvInvoke.InRange(channels[0], new ScalarArray(20), new ScalarArray(160), channels[0]);

            channels[0]._Not();
//      channels[0]._ThresholdBinary( new Gray(200), new Gray(255.0));
            CvInvoke.BitwiseAnd(channels[0], channels[1], channels[0], null);

            channels[0]._ThresholdToZero(new Gray(150));

            if (ShowFiltered)
            {
                CvInvoke.Imshow("Cup Filter", channels[0]);
            }

//      CvInvoke.InRange( input, lower, upper, output );

            return(channels[0]);
        }
Example #17
0
        private void ProcessImage(List <string> lines)
        {
            Mat imageOriginal = CvInvoke.Imread(ImageRecievedName, LoadImageType.AnyColor);

            var imageWithHitsBgr = CreateHitImage(imageOriginal.Size, lines);

            // create mask to have white circles wherever hits exist and to be black on all other parts
            var mask = new Mat();

            CvInvoke.Threshold(imageWithHitsBgr, mask, 1, 255, ThresholdType.Binary);
            var inverseMask = new Mat();

            CvInvoke.BitwiseNot(mask, inverseMask);

            // mapping level of gray to ColorMap
            CvInvoke.ApplyColorMap(imageWithHitsBgr, imageWithHitsBgr, ColorMapType.Jet);
            // from mapped image remove everything except hits
            var imageWithHitsWithoutBackground = new Mat();

            CvInvoke.BitwiseAnd(imageWithHitsBgr, imageWithHitsBgr, imageWithHitsWithoutBackground, mask);

            // from original image remove only parts where hits happended
            var imageOriginalWithoutHits = new Mat();

            CvInvoke.BitwiseAnd(imageOriginal, imageOriginal, imageOriginalWithoutHits, inverseMask);
            // result is combination of original image without hits and image with hits mapped to certain ColorMap
            var result = new Mat();

            CvInvoke.Add(imageOriginalWithoutHits, imageWithHitsWithoutBackground, result);
            result.Save(ImageProcessedName);
        }
Example #18
0
 /// <summary>人脸检测</summary>
 public Bitmap FaceVerify(Bitmap bmpImage)
 {
     try
     {
         using (var currentFrame = new Image <Bgr, Byte>(bmpImage))
         {
             //只能这么转
             invert = new Mat();
             CvInvoke.BitwiseAnd(currentFrame, currentFrame, invert);
             int c = 0;
             _rf     = new RrFaceT();
             _w      = invert.Width; _h = invert.Height;
             _itData = invert.DataPointer;
             var    lp = new IntPtr();
             IntPtr it = FaceverifyDll.rr_fd_detect(_ip, _itData, RrImageType.RR_IMAGE_BGR8UC3, _w, _h, ref lp, ref c);
             if (it.ToInt32() != 0)
             {
                 return(bmpImage);
             }
             if (c > 0)
             {
                 _rf    = (RrFaceT)Marshal.PtrToStructure(lp, typeof(RrFaceT));
                 Rrface = _rf;
             }
             FaceverifyDll.rr_fd_release_detect_result(lp);
         }
     }
     catch (Exception ex)
     {
         ex.ToSaveLog("FaceData.FaceVerify:");
     }
     return(bmpImage);
 }
Example #19
0
        /// <summary>
        /// 利用Emgu的Image转换构造函数将bitmap转换为mat
        /// </summary>
        /// <param name="bitmap"></param>
        /// <returns></returns>
        public static Mat BitmapToMat(Bitmap bitmap)
        {
            Image <Bgr, Byte> currentFrame = new Image <Bgr, Byte>(bitmap);
            Mat invert = new Mat();

            CvInvoke.BitwiseAnd(currentFrame, currentFrame, invert);
            return(invert);
        }
Example #20
0
        private void button_segment_Click(object sender, EventArgs e)
        {
            Mat segmented = new Mat();

            //chuyển ảnh ROI dưới dạng binary sang ROI dạng RGB bằng toán tử bitwise and
            CvInvoke.BitwiseAnd(img, img, segmented, imgThreshed);
            pictureBox_segmented.Image = segmented.Bitmap;
            segment = segmented;//lưu vào segment là global variable dùng cho các bước khác
        }
Example #21
0
        static Mat MaskImg(Mat edges, String maskFileName = "edgesmask.png")
        {
            var edgesmask   = LoadGray(maskFileName);
            var maskedEdges = new Mat();

            CvInvoke.BitwiseAnd(edges, edgesmask, maskedEdges);
            return(maskedEdges);
            //CvInvoke.BitwiseOr(edges, maskedEdges, edgesmask);
        }
    public void SkeletonExtractorHandle(Image <Rgb, byte> _img, Image <Rgb, byte> _handle_img)
    {
        this.label_forname = Label.Handle;
        this.body_img      = _handle_img.Convert <Gray, byte>();
        this.attach_img    = _img.Convert <Gray, byte>();
        noface             = true;
        // guess top face
        List <Vector2> body_points   = IExtension.GetBoundary(this.body_img);
        List <Vector2> attach_points = IExtension.GetBoundary(this.attach_img);
        Vector2        attach_center = Utility.Average(attach_points);

        double mindis_center = double.MaxValue;
        //int counter = 0;

        List <OrderItem> dis_body = new List <OrderItem>();

        Vector2 closest_body_point_to_center = new Vector2();

        for (int i = 0; i < body_points.Count; i++)
        {
            double dis = Utility.DistancePoint2Set(body_points[i], attach_points);
            dis_body.Add(new OrderItem(dis, i));
        }
        dis_body = dis_body.OrderBy(x => x.value).ToList();
        for (int i = 0; i < dis_body.Count * 0.1; i++)
        {
            double dis_to_center = Vector2.Distance(body_points[dis_body[i].index], attach_center);
            if (dis_to_center < mindis_center)
            {
                mindis_center = dis_to_center;
                closest_body_point_to_center = body_points[dis_body[i].index];
            }
        }

        Image <Gray, byte> guess_face = this.body_img.CopyBlank();
        //float guess_radius = 20f * body_img.Height / 600f;
        float guess_radius = 10f * body_img.Height / 600f;

        do
        {
            guess_radius *= 1.1f;
            guess_face    = this.body_img.CopyBlank();
            guess_face.Draw(new CircleF(new PointF(closest_body_point_to_center.x, closest_body_point_to_center.y), guess_radius), new Gray(255), -1);
            guess_face.Save("skeleton1guessface" + guess_radius + ".png");
        }while (IExtension.GetBoundary(guess_face).Count == 0);


        CvInvoke.BitwiseAnd(guess_face, body_img, guess_face);
        if (debug)
        {
            guess_face.Save("skeleton1guessface.png");
        }


        this.face_img = new List <Image <Gray, byte> >();
        this.face_img.Add(guess_face);
    }
Example #23
0
        public static Bitmap bitwies_and(Bitmap src, Bitmap mask)
        {
            Bitmap            ret  = null;
            Image <Bgr, Byte> img1 = new Image <Bgr, byte>(src);
            Image <Bgr, Byte> img2 = new Image <Bgr, byte>(mask);
            Mat r = new Mat();

            CvInvoke.BitwiseAnd(img1, img2, r);
            ret = r.Bitmap;
            return(ret);
        }
Example #24
0
        public Image <Gray, Byte> deleteCircle()
        {
            Image <Gray, Byte> outputImage = new Image <Gray, byte>(this.grayImage.Rows, this.grayImage.Cols);
            Image <Gray, Byte> tmp         = this.grayImage.Copy();

            this.correctErrors();
            CvInvoke.Circle(outputImage, this.center, this.radius, new MCvScalar(255), -1);
            // CvInvoke.BitwiseNot(tmp, tmp);
            CvInvoke.BitwiseAnd(outputImage, tmp, outputImage);
            return(outputImage);
        }
    private Image <Gray, byte> HitOrMiss(Image <Gray, byte> skel, Mat c, Mat d)
    {
        Image <Gray, byte> skel_hm = skel.Copy();
        Image <Gray, byte> temp2   = new Image <Gray, byte>(skel.Size);
        Image <Gray, byte> temp    = new Image <Gray, byte>(skel.Size);

        CvInvoke.Erode(skel_hm, temp, c, new Point(-1, -1), 1, BorderType.Reflect, default(MCvScalar));
        CvInvoke.BitwiseNot(skel_hm, temp2);
        CvInvoke.Erode(temp2, temp2, d, new Point(-1, -1), 1, BorderType.Reflect, default(MCvScalar));
        CvInvoke.BitwiseAnd(temp, temp2, skel_hm);
        return(skel_hm);
    }
Example #26
0
        public override bool Execute(Mat mat, params object[] arguments)
        {
            var target = GetRoiOrDefault(mat);

            using var mask = GetMask(mat);
            if (Mask.Size != target.Size)
            {
                return(false);
            }
            CvInvoke.BitwiseAnd(target, Mask, target, mask);
            return(true);
        }
        /// <summary>
        /// The two color segmentations ('Minimum' and 'HSV') are combined to give a better prediction.
        /// </summary>
        /// <param name="inputImage">A standard BGR image.</param>
        /// <returns>A grayscale image with skin being white pixels.</returns>
        private static Image <Gray, byte> ColorSegment(Image <Bgr, byte> inputImage)
        {
            Image <Gray, byte> minimumSegment = MinimumSegment(inputImage);
            Image <Gray, byte> hsvSegment     = HsvSegment(inputImage);

            Image <Gray, byte> segmentedImage = new Image <Gray, byte>(inputImage.Size);

            CvInvoke.BitwiseAnd(minimumSegment, hsvSegment, segmentedImage);

            minimumSegment.Dispose();
            hsvSegment.Dispose();
            return(segmentedImage);
        }
Example #28
0
        public List <Rectangle> DetectText(Image <Bgr, byte> img, decimal dilate, int minFontSize = 6, int maxFontSize = 300, int minWidth = 35, int maxWidth = 500)
        {
            //ProcessedImageStep1 = img.Mat;
            var img2gray = img.Convert <Gray, byte>();

            _RemoveVerticalLines(img2gray);

            Mat mask = new Mat();

            CvInvoke.Threshold(img2gray, mask, 180, 255, ThresholdType.Binary);
            Mat image_final = new Mat();

            CvInvoke.BitwiseAnd(img2gray, img2gray, image_final, mask);
            Mat new_img = new Mat();

            CvInvoke.Threshold(image_final, new_img, 180, 255, ThresholdType.BinaryInv);
            Mat kernel = new Mat();

            CvInvoke.GetStructuringElement(ElementShape.Rectangle, new Size(12, 12), new Point(-1, -1));
            Mat dilated = new Mat();

            //DetectionImage = kernel;
            CvInvoke.Dilate(new_img, dilated, kernel, new Point(0, 0), (int)dilate, BorderType.Default, new MCvScalar(0, 0, 0));

            VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint();

            CvInvoke.FindContours(dilated, contours, null, RetrType.External, ChainApproxMethod.ChainApproxNone);

            List <Rectangle> rectangles = new List <Rectangle>();

            for (int i = 0; i < contours.Size; i++)
            {
                Rectangle br = CvInvoke.BoundingRectangle(contours[i]);
                if (br.Width < minWidth || br.Width > maxWidth || br.Height > maxFontSize || br.Height < minFontSize)
                {
                    continue;
                }

                CvInvoke.Rectangle(img, br, new MCvScalar(255, 0, 255), 3);
                rectangles.Add(br);
            }

            ProcessedImageStep2 = img2gray.Mat;
            ProcessedImageStep3 = image_final;
            ProcessedImageStep4 = new_img;
            ProcessedImageStep5 = dilated;

            ExtractedImage = img.Mat;

            return(rectangles);
        }
        public static Bitmap SuperPositionedImage(Bitmap Mask, Bitmap StandardImage)
        {
            Image <Hsv, Byte>  GrayOrigin = new Image <Gray, Byte>(StandardImage).Convert <Hsv, Byte>();
            Image <Hsv, Byte>  HSVOrigin  = new Image <Hsv, Byte>(StandardImage);
            Image <Gray, Byte> MaskImage  = new Image <Gray, Byte>(Mask);
            Mat ResultHolder = new Mat();

            CvInvoke.BitwiseAnd(GrayOrigin, HSVOrigin, ResultHolder, MaskImage);
            Mat TrueResultHolder = new Mat();

            CvInvoke.Add(GrayOrigin, ResultHolder, TrueResultHolder, MaskImage);

            return(TrueResultHolder.Bitmap);
        }
        public static Mat ApplyMask(Mat img, Mat mask)
        {
            Mat output = new Mat(img.Size, Emgu.CV.CvEnum.DepthType.Cv8U, 1);

            output.SetTo(new MCvScalar(0));
            if (img.Height < mask.Height)
            {
                CvInvoke.Resize(img, img, mask.Size);
            }
            CvInvoke.Threshold(mask, mask, 127, 255, ThresholdType.Binary);

            CvInvoke.BitwiseAnd(img, img, output, mask);
            return(output);
        }