コード例 #1
5
ファイル: Form1.cs プロジェクト: alecrudd/GocatorImager
        public void FilterTiles(Mat image, Mat modifiedMat)
        {
            CvInvoke.Imshow("0", image);
            
            Stopwatch sw1 = new Stopwatch();
            sw1.Start();
            Mat laplaced = new Mat();
            CvInvoke.CvtColor(image, laplaced, Emgu.CV.CvEnum.ColorConversion.Bgr2Gray);
            Mat greyResult = laplaced.Clone();
            Mat greySource = laplaced.Clone();

            Mat cannySrc = new Mat();

            //if not half inch, do canny and subtract to separate tiles better. Basically "sharpens" the edge
            if (scan.TileSettings.CannyEdges)
            {
                //create canny image, these parameters could be adjusted probably?
                CvInvoke.Canny(greySource, greyResult, 50, 150);
                //dilate canny                

                CvInvoke.Dilate(greyResult, greyResult, null, new System.Drawing.Point(1, 1), scan.TileSettings.CannyDilate, BorderType.Default, CvInvoke.MorphologyDefaultBorderValue);
                CvInvoke.Erode(greyResult, greyResult, null, new System.Drawing.Point(1, 1), scan.TileSettings.CannyDilate, BorderType.Default, CvInvoke.MorphologyDefaultBorderValue);
                
                CvInvoke.Imshow("1a", greyResult);

                //subtract dilated canny from source to get separation
                CvInvoke.Subtract(greySource, greyResult, greyResult);
                greySource = greyResult.Clone();
                CvInvoke.Imshow("1b", greyResult);
            }

            if (scan.TileSettings.ThresholdEdges)
            {
                Mat edges = new Mat();
                CvInvoke.Threshold(greyResult, edges, (float)thresholdTrackbar.Value, 0, ThresholdType.ToZero);
                CvInvoke.Subtract(greySource, edges, greyResult);
                CvInvoke.Erode(greyResult, greyResult, null, new System.Drawing.Point(1, 1), 2, BorderType.Default, CvInvoke.MorphologyDefaultBorderValue);
                CvInvoke.Imshow("pres-1c", greyResult);
             }
            //perform distance transform
            CvInvoke.DistanceTransform(greyResult, greyResult, null, DistType.L2, 5);
            //normalize the image to bring out the peaks
            CvInvoke.Normalize(greyResult, greyResult, 0, 1, NormType.MinMax);
            CvInvoke.Imshow("2", greyResult);

            //threshold the image, different thresholds for different tiles

            CvInvoke.Threshold(greyResult, greyResult, scan.TileSettings.ThresholdVal, 1, ThresholdType.Binary);

            CvInvoke.Imshow("3", greyResult);

            //erode to split the blobs
            CvInvoke.Erode(greyResult, greyResult, null, new System.Drawing.Point(-1, -1), scan.TileSettings.ThresholdErode, BorderType.Default, CvInvoke.MorphologyDefaultBorderValue);

            //convert to 8 bit unsigned needed for canny
            greyResult.ConvertTo(greyResult, DepthType.Cv8U);

            VectorOfVectorOfPoint markers = new VectorOfVectorOfPoint();

            //create 32bit, single channel image for result of markers
            Mat markerImage = new Mat(greyResult.Size, DepthType.Cv32S, 1);

            //set image to 0
            markerImage.SetTo(new MCvScalar(0, 0, 0));

            //find the contours
            CvInvoke.FindContours(greyResult, markers, null, RetrType.External, ChainApproxMethod.LinkRuns);

            //label the markers from 1 -> n, the rest of the image should remain 0
            for (int i = 0; i < markers.Size; i++)
                CvInvoke.DrawContours(markerImage, markers, i, new MCvScalar(i + 1, i + 1, i + 1), -1);

            ScalarArray mult = new ScalarArray(5000);
            Mat markerVisual = new Mat();

            CvInvoke.Multiply(markerImage, mult, markerVisual);

            CvInvoke.Imshow("4", markerVisual);

            //draw the background marker
            CvInvoke.Circle(markerImage,
                new System.Drawing.Point(5, 5),
                3,
                new MCvScalar(255, 255, 255),
                -1);

            //convert to 3 channel
            Mat convertedOriginal = new Mat();
            
            //use canny modified if 3/4", or use the gray image for others

            CvInvoke.CvtColor(greySource, convertedOriginal, ColorConversion.Gray2Bgr);

            //watershed!!
            CvInvoke.Watershed(convertedOriginal, markerImage);
            //visualize
            CvInvoke.Multiply(markerImage, mult, markerVisual);
            CvInvoke.Imshow("5", markerVisual);

            //get contours to get the actual tiles now that they are separate...
            VectorOfVectorOfPoint tilesContours = new VectorOfVectorOfPoint();

            markerVisual.ConvertTo(markerVisual, DepthType.Cv8U);
          
            CvInvoke.BitwiseNot(markerVisual, markerVisual);
            CvInvoke.Imshow("6", markerVisual);
            CvInvoke.Dilate(markerVisual, markerVisual, null, new System.Drawing.Point(1, 1), 2, BorderType.Default, CvInvoke.MorphologyDefaultBorderValue);

            CvInvoke.FindContours(markerVisual, tilesContours, null, RetrType.External, ChainApproxMethod.LinkRuns);
            
            List<System.Drawing.Point> tiles = new List<System.Drawing.Point>();
            for (int i = 0; i < tilesContours.Size; i++)
            {
                using(VectorOfPoint c = tilesContours[i])
                using (VectorOfPoint approx = new VectorOfPoint())
                {
                    //epsilon = arclength * .05 to get rid of convex areas
                    CvInvoke.ApproxPolyDP(c, approx, CvInvoke.ArcLength(c, true) * .05, true);
                    double area = CvInvoke.ContourArea(approx);
                  
                    //filter out the small contours...
                    if (area > scan.TileSettings.MinArea && area < scan.TileSettings.MaxArea)
                    {
                        //match the shape to the square
                        double ratio = CvInvoke.MatchShapes(_square, approx, Emgu.CV.CvEnum.ContoursMatchType.I3);

                        if (ratio < .05)
                        {
                            var M = CvInvoke.Moments(c);
                            int cx = (int)(M.M10 / M.M00);
                            int cy = (int)(M.M01 / M.M00);

                            //filter out any that are too close 
                            if (!tiles.Any(x => Math.Abs(x.X - cx) < 50 && Math.Abs(x.Y - cy) < 50))
                            {
                                tiles.Add(new System.Drawing.Point(cx, cy));
                                for (int j = 0; j < approx.Size; j++)
                                {
                                    int second = j+1 == approx.Size ? 0 : j + 1;

                                    //do some detection for upsidedown/right side up here....

                                    CvInvoke.Line(image,
                                        new System.Drawing.Point(approx[j].X, approx[j].Y),
                                        new System.Drawing.Point(approx[second].X, approx[second].Y),
                                        new MCvScalar(255, 255, 255,255), 4);
                                }
                            }
                        }
                    }
                }
            }
            sw1.Stop();

            dataTextBox.AppendText(String.Format("Took {0} ms to detect {1} tiles{2}", sw1.ElapsedMilliseconds, tiles.Count, Environment.NewLine));

       //     dataTextBox.AppendText(String.Format("Found {0} tiles{1}", tiles.Count, Environment.NewLine));
         
            this.originalBox.Image = image;
            resultBox.Image = markerVisual;
        }
コード例 #2
1
ファイル: Form1.cs プロジェクト: Neths/ReStudio
        public static VectorOfPoint FindLargestContour(IInputOutputArray cannyEdges, IInputOutputArray result)
        {
            int largest_contour_index = 0;
            double largest_area = 0;
            VectorOfPoint largestContour;

            using (Mat hierachy = new Mat())
            using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint())
            {
                IOutputArray hirarchy;

                CvInvoke.FindContours(cannyEdges, contours, hierachy, RetrType.Tree, ChainApproxMethod.ChainApproxNone);

                for (int i = 0; i < contours.Size; i++)
                {
                    MCvScalar color = new MCvScalar(0, 0, 255);

                    double a = CvInvoke.ContourArea(contours[i], false);  //  Find the area of contour
                    if (a > largest_area)
                    {
                        largest_area = a;
                        largest_contour_index = i;                //Store the index of largest contour
                    }

                    CvInvoke.DrawContours(result, contours, largest_contour_index, new MCvScalar(255, 0, 0));
                }

                CvInvoke.DrawContours(result, contours, largest_contour_index, new MCvScalar(0, 0, 255), 3, LineType.EightConnected, hierachy);
                largestContour = new VectorOfPoint(contours[largest_contour_index].ToArray());
            }

            return largestContour;
        }
コード例 #3
0
 public ArtGenerator(Settings settings, VectorOfVectorOfPoint objects)
 {
     this.settings = settings;
     this.objects  = objects;
     randomGen     = new Random(settings.Seed);
     gausRandomGen = new GaussianRandom(settings.Seed);
 }
コード例 #4
0
ファイル: MatImage.cs プロジェクト: gw-sd-2016/TornRepair
 public VectorOfVectorOfPoint FindContours()
 {
     VectorOfVectorOfPoint contours =new VectorOfVectorOfPoint();
     IOutputArray hierarchy=new Mat();
     CvInvoke.FindContours(matImage, contours, hierarchy, RetrType.List, ChainApproxMethod.ChainApproxNone);
     return contours;
 }
コード例 #5
0
        private Image <Gray, byte> imFillHoles(Image <Gray, byte> image, int minArea, int maxArea)
        {
            var  resultImage = image.CopyBlank();
            Gray gray        = new Gray(255);

            // Declaração do vetor de vetores de pontos
            Emgu.CV.Util.VectorOfVectorOfPoint vetordeVetdePontos = new Emgu.CV.Util.VectorOfVectorOfPoint();
            // Declaração de uma matriz
            Mat hierarquia = new Mat();

            {
                CvInvoke.FindContours(
                    image                                              // Recebe a imagem de entrada
                    , vetordeVetdePontos                               // Recebe um vetor de pontos de contorno
                    , hierarquia                                       // Recebe a hierarquia dos pontos
                    , Emgu.CV.CvEnum.RetrType.Tree                     // Recebe o tipo de arvore e contornos
                    , Emgu.CV.CvEnum.ChainApproxMethod.ChainApproxNone // Tip de aproximação aos contornos
                    , new Point(0, 0)                                  // Offset do ponto, posso omitir ou declarar um ponto a 0 0
                    );

                for (int iter = 0; iter < vetordeVetdePontos.Size; iter++)
                {
                    double areaAtual = CvInvoke.ContourArea(vetordeVetdePontos[iter]);

                    if ((areaAtual < maxArea) && (areaAtual > minArea))
                    {
                        resultImage.Draw(vetordeVetdePontos[iter].ToArray(), new Gray(0), -1, LineType.AntiAlias); // Gray 0, por ter só um canal
                    }
                }
            }
            return(resultImage);
        }
コード例 #6
0
        private void ProcessFrame(object sender, EventArgs arg)
        {
            _capture.Retrieve(frame);
            if (frame != null)
            {
                //CvInvoke.Resize(frame, frame, new Size(imageBox1.Width, imageBox1.Height), 0, 0, Inter.Linear);    //This resizes the image to the size of Imagebox1
                CvInvoke.Resize(frame, frame, new Size(640, 480), 0, 0, Inter.Linear);
                //CvInvoke.GaussianBlur(frame, smoothedFrame, new Size(3, 3), 1); //filter out noises
                if (_forgroundDetector == null)
                {
                    _forgroundDetector = new BackgroundSubtractorMOG2();
                }

                _forgroundDetector.Apply(frame, _forgroundMask);
                //CvInvoke.Canny(_forgroundMask, smoothedFrame, 100, 60);

                //CvInvoke.Threshold(_forgroundMask, smoothedFrame, 0, 255, Emgu.CV.CvEnum.ThresholdType.Otsu | Emgu.CV.CvEnum.ThresholdType.Binary);
                contours = new Emgu.CV.Util.VectorOfVectorOfPoint();
                CvInvoke.FindContours(_forgroundMask, contours, hier, Emgu.CV.CvEnum.RetrType.External, Emgu.CV.CvEnum.ChainApproxMethod.ChainApproxSimple);

                if (contours.Size > 0)
                {
                    for (int i = 0; i < contours.Size; i++)
                    {
                        double    area = CvInvoke.ContourArea(contours[i]);
                        Rectangle rect = CvInvoke.BoundingRectangle(contours[i]);

                        if (area > 300 && rect.Y > 200 && area < 3500 && rect.X > 100 && rect.X < 400)
                        {
                            CvInvoke.Rectangle(frame, rect, new MCvScalar(0, 255, 0), 6);
                        }
                    }
                }


                //CvInvoke.DrawContours(frame, contours, -1, new MCvScalar(255, 0, 0));



                imageBox1.Image = frame;
                curFrameCount++;
                if (curFrameCount >= TotalFrame - 10)
                {
                    updateButton2Text("Load Video");
                    _capture.Stop();
                    _capture.Dispose();
                    _captureInProgress = false;
                    _capture           = null;
                }
                updateCurFrameCount("Current Frame # " + curFrameCount);
            }
            else
            {
                _capture.Pause();
                _capture.ImageGrabbed -= ProcessFrame;
                MessageBox.Show("null frame");
            }
        }
コード例 #7
0
ファイル: Form1.cs プロジェクト: Neths/ReStudio
        public static VectorOfVectorOfPoint FindRectangle(IInputOutputArray cannyEdges, IInputOutputArray result, int areaSize = 250)
        {
            using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint())
            {
                CvInvoke.FindContours(cannyEdges, contours, null, RetrType.List, ChainApproxMethod.ChainApproxSimple);
                int count = contours.Size;
                for (int i = 0; i < count; i++)
                {
                    var rect = CvInvoke.MinAreaRect(contours[i]).MinAreaRect();
                    CvInvoke.Rectangle(result, rect, new MCvScalar(0, 0, 255), 3);

                    using (VectorOfPoint contour = contours[i])
                    using (VectorOfPoint approxContour = new VectorOfPoint())
                    {
                        CvInvoke.ApproxPolyDP(contour, approxContour, CvInvoke.ArcLength(contour, true) * 0.05, true);

                        if (CvInvoke.ContourArea(approxContour, false) > areaSize) //only consider contours with area greater than 250
                        {
                            if (approxContour.Size >= 4) //The contour has 4 vertices.
                            {
                                #region determine if all the angles in the contour are within [80, 100] degree
                                bool isRectangle = true;
                                Point[] pts = approxContour.ToArray();
                                LineSegment2D[] edges = PointCollection.PolyLine(pts, true);

                                for (int j = 0; j < edges.Length; j++)
                                {
                                    double angle = Math.Abs(
                                       edges[(j + 1) % edges.Length].GetExteriorAngleDegree(edges[j]));
                                    if (angle < 80 || angle > 100)
                                    {
                                        isRectangle = false;
                                        break;
                                    }
                                }
                                #endregion

                                //if (isRectangle)
                                //{
                                //    var rect = CvInvoke.MinAreaRect(approxContour).MinAreaRect();
                                //    CvInvoke.Rectangle(result, rect, new MCvScalar(0, 0, 255), 3);
                                //    //boxList.Add(CvInvoke.MinAreaRect(approxContour));
                                //}
                            }
                        }
                    }
                }

                return contours;
            }
        }
コード例 #8
0
        private void detectarToolStripMenuItem_Click(object sender, EventArgs e)
        {
            //Binarizacion
            Image <Gray, byte> _imgOutput = _imgInput.Convert <Gray, byte>().ThresholdBinary(new Gray(200), new Gray(255));

            Emgu.CV.Util.VectorOfVectorOfPoint contours = new Emgu.CV.Util.VectorOfVectorOfPoint();
            Mat hier = new Mat();

            //Encontrat los contornos en la imagen
            CvInvoke.FindContours(_imgOutput, contours, hier, Emgu.CV.CvEnum.RetrType.External, Emgu.CV.CvEnum.ChainApproxMethod.ChainApproxSimple);
            CvInvoke.DrawContours(_imgOutput, contours, -1, new MCvScalar(255, 0, 0));

            pictureBox2.Image = _imgOutput.Bitmap;
        }
コード例 #9
0
        private void DetectText(Image <Bgr, byte> img)
        {
            /*
             *  1. Edge detection using Sobel
             *  2. Morphological Operation (Dilation)
             *  3. Contour Extraction
             *  4. Applying geometrical Constraints
             *  5. Display localized text
             */

            // 1 Sobel
            Image <Gray, byte> sobel = img.Convert <Gray, byte>().Sobel(1, 0, 3).AbsDiff(new Gray(0.0)).Convert <Gray, byte>().ThresholdBinary(new Gray(50), new Gray(255));

            // 2 Dilation
            Mat SE = CvInvoke.GetStructuringElement(Emgu.CV.CvEnum.ElementShape.Rectangle, new Size(10, 2), new Point(-1, -1));

            sobel = sobel.MorphologyEx(Emgu.CV.CvEnum.MorphOp.Dilate, SE, new Point(-1, -1), 1, Emgu.CV.CvEnum.BorderType.Reflect, new MCvScalar(255));

            // 3 Contours
            Emgu.CV.Util.VectorOfVectorOfPoint contours = new Emgu.CV.Util.VectorOfVectorOfPoint();
            Mat m = new Mat();

            CvInvoke.FindContours(sobel, contours, m, Emgu.CV.CvEnum.RetrType.External, Emgu.CV.CvEnum.ChainApproxMethod.ChainApproxSimple);

            // 4 geometrical Constraints
            List <Rectangle> list = new List <Rectangle>();

            for (int i = 0; i < contours.Size; i++)
            {
                Rectangle brect = CvInvoke.BoundingRectangle(contours[i]);
                double    ar    = brect.Width / brect.Height;

                if (ar > 2 && brect.Width > 15 && brect.Height > 8 && brect.Width < 650)
                {
                    list.Add(brect);
                }
            }

            Image <Bgr, byte> imgout = img.CopyBlank();

            foreach (var item in list)
            {
                CvInvoke.Rectangle(img, item, new MCvScalar(0, 0, 255), 2);
                CvInvoke.Rectangle(imgout, item, new MCvScalar(0, 255, 255), -1);
            }

            imgout._And(img);
            pictureBox1.Image = img.ToBitmap();
            pictureBox2.Image = imgout.ToBitmap();
        }
コード例 #10
0
        private void button9_Click(object sender, EventArgs e)
        {
            Image <Gray, byte> imgOutput = _inputImage.Convert <Gray, byte>().ThresholdBinary(new Gray(100), new Gray(255));

            Emgu.CV.Util.VectorOfVectorOfPoint contours = new Emgu.CV.Util.VectorOfVectorOfPoint();
            Mat hier = new Mat();

            Image <Gray, byte> imgout = new Image <Gray, byte>(_inputImage.Width, _inputImage.Height, new Gray(0));

            CvInvoke.FindContours(imgOutput, contours, hier, Emgu.CV.CvEnum.RetrType.External, Emgu.CV.CvEnum.ChainApproxMethod.ChainApproxSimple);
            CvInvoke.DrawContours(imgout, contours, -1, new MCvScalar(255, 0, 0));

            imageBox2.Image = imgout;
        }
コード例 #11
0
ファイル: DrawSubdivision.cs プロジェクト: neutmute/emgucv
      /// <summary>
      /// Draw the planar subdivision
      /// </summary>
      /// <param name="maxValue">The points contains values between [0, maxValue)</param>
      /// <param name="pointCount">The total number of points</param>
      /// <returns>An image representing the planar subvidision of the points</returns>
      public static Mat Draw(float maxValue, int pointCount)
      {
         Triangle2DF[] delaunayTriangles;
         VoronoiFacet[] voronoiFacets;
         Random r = new Random((int)(DateTime.Now.Ticks & 0x0000ffff));

         CreateSubdivision(maxValue, pointCount, out delaunayTriangles, out voronoiFacets);

         //create an image for display purpose
         Mat img = new Mat((int)maxValue, (int)maxValue, DepthType.Cv8U, 3);

         //Draw the voronoi Facets
         foreach (VoronoiFacet facet in voronoiFacets)
         {
#if NETFX_CORE
            Point[] polyline = Extensions.ConvertAll<PointF, Point>(facet.Vertices, Point.Round);
#else
            Point[] polyline = Array.ConvertAll<PointF, Point>(facet.Vertices, Point.Round);
#endif
            using (VectorOfPoint vp = new VectorOfPoint(polyline))
            using (VectorOfVectorOfPoint vvp = new VectorOfVectorOfPoint(vp))
            {
               //Draw the facet in color
               CvInvoke.FillPoly(
                  img, vvp, 
                  new Bgr(r.NextDouble()*120, r.NextDouble()*120, r.NextDouble()*120).MCvScalar);

               //highlight the edge of the facet in black
               CvInvoke.Polylines(img, vp, true, new Bgr(0, 0, 0).MCvScalar, 2);
            }
            //draw the points associated with each facet in red
            CvInvoke.Circle(img, Point.Round( facet.Point ), 5, new Bgr(0, 0, 255).MCvScalar, -1);
         }

         //Draw the Delaunay triangulation
         foreach (Triangle2DF triangle in delaunayTriangles)
         {
#if NETFX_CORE
            Point[] vertices = Extensions.ConvertAll<PointF, Point>(triangle.GetVertices(), Point.Round);
#else
            Point[] vertices = Array.ConvertAll<PointF, Point>(triangle.GetVertices(), Point.Round);
#endif
            using (VectorOfPoint vp = new VectorOfPoint(vertices))
            {
               CvInvoke.Polylines(img, vp, true, new Bgr(255, 255, 255).MCvScalar);
            }
         }

         return img;
      }
コード例 #12
0
        /*
         * public static void TestDrawLine(IntPtr img, int startX, int startY, int endX, int endY, MCvScalar color)
         * {
         * TestDrawLine(img, startX, startY, endX, endY, color.v0, color.v1, color.v2, color.v3);
         * }
         *
         * [DllImport(CvInvoke.EXTERN_LIBRARY, CallingConvention = CvInvoke.CvCallingConvention, EntryPoint="testDrawLine")]
         * private static extern void TestDrawLine(IntPtr img, int startX, int startY, int endX, int endY, double v0, double v1, double v2, double v3);
         */

        /// <summary>
        /// Implements the chamfer matching algorithm on images taking into account both distance from
        /// the template pixels to the nearest pixels and orientation alignment between template and image
        /// contours.
        /// </summary>
        /// <param name="img">The edge image where search is performed</param>
        /// <param name="templ">The template (an edge image)</param>
        /// <param name="contours">The output contours</param>
        /// <param name="cost">The cost associated with the matching</param>
        /// <param name="templScale">The template scale, use 1 for default</param>
        /// <param name="maxMatches">The maximum number of matches, use 20 for default</param>
        /// <param name="minMatchDistance">The minimum match distance. use 1.0 for default</param>
        /// <param name="padX">PadX, use 3 for default</param>
        /// <param name="padY">PadY, use 3 for default</param>
        /// <param name="scales">Scales, use 5 for default</param>
        /// <param name="minScale">Minimum scale, use 0.6 for default</param>
        /// <param name="maxScale">Maximum scale, use 1.6 for default</param>
        /// <param name="orientationWeight">Orientation weight, use 0.5 for default</param>
        /// <param name="truncate">Truncate, use 20 for default</param>
        /// <returns>The number of matches</returns>
        public static int cvChamferMatching(Image <Gray, Byte> img, Image <Gray, Byte> templ,
                                            out Point[][] contours, out float[] cost,
                                            double templScale, int maxMatches,
                                            double minMatchDistance, int padX,
                                            int padY, int scales, double minScale, double maxScale,
                                            double orientationWeight, double truncate)
        {
            using (Emgu.CV.Util.VectorOfVectorOfPoint vecOfVecOfPoint = new Util.VectorOfVectorOfPoint())
                using (Emgu.CV.Util.VectorOfFloat vecOfFloat = new Util.VectorOfFloat())
                {
                    int count = _cvChamferMatching(img, templ, vecOfVecOfPoint, vecOfFloat, templScale, maxMatches, minMatchDistance, padX, padY, scales, minScale, maxScale, orientationWeight, truncate);
                    contours = vecOfVecOfPoint.ToArray();
                    cost     = vecOfFloat.ToArray();
                    return(count);
                }
        }
コード例 #13
0
ファイル: Form1.cs プロジェクト: Neths/ReStudio
        private static UMat FilterPlate(UMat plate)
        {
            UMat thresh = new UMat();
            CvInvoke.Threshold(plate, thresh, 120, 255, ThresholdType.BinaryInv);
            //Image<Gray, Byte> thresh = plate.ThresholdBinaryInv(new Gray(120), new Gray(255));

            Size plateSize = plate.Size;
            using (Mat plateMask = new Mat(plateSize.Height, plateSize.Width, DepthType.Cv8U, 1))
            using (Mat plateCanny = new Mat())
            using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint())
            {
                plateMask.SetTo(new MCvScalar(255.0));
                CvInvoke.Canny(plate, plateCanny, 100, 50);
                CvInvoke.FindContours(plateCanny, contours, null, RetrType.External, ChainApproxMethod.ChainApproxSimple);

                int count = contours.Size;
                for (int i = 1; i < count; i++)
                {
                    using (VectorOfPoint contour = contours[i])
                    {

                        Rectangle rect = CvInvoke.BoundingRectangle(contour);
                        if (rect.Height > (plateSize.Height >> 1))
                        {
                            rect.X -= 1;
                            rect.Y -= 1;
                            rect.Width += 2;
                            rect.Height += 2;
                            Rectangle roi = new Rectangle(Point.Empty, plate.Size);
                            rect.Intersect(roi);
                            CvInvoke.Rectangle(plateMask, rect, new MCvScalar(), -1);
                            //plateMask.Draw(rect, new Gray(0.0), -1);
                        }
                    }

                }

                thresh.SetTo(new MCvScalar(), plateMask);
            }

            CvInvoke.Erode(thresh, thresh, null, new Point(-1, -1), 1, BorderType.Constant,
                CvInvoke.MorphologyDefaultBorderValue);
            CvInvoke.Dilate(thresh, thresh, null, new Point(-1, -1), 1, BorderType.Constant,
                CvInvoke.MorphologyDefaultBorderValue);

            return thresh;
        }
コード例 #14
0
ファイル: Program.cs プロジェクト: Neths/ReStudio
        /// <summary>
        /// Detect license plate from the given image
        /// </summary>
        /// <param name="img">The image to search license plate from</param>
        /// <param name="licensePlateImagesList">A list of images where the detected license plate regions are stored</param>
        /// <param name="filteredLicensePlateImagesList">A list of images where the detected license plate regions (with noise removed) are stored</param>
        /// <param name="detectedLicensePlateRegionList">A list where the regions of license plate (defined by an MCvBox2D) are stored</param>
        /// <returns>The list of words for each license plate</returns>
        public List<String> DetectLicensePlate(
            IInputArray img,
            List<IInputOutputArray> licensePlateImagesList,
            List<IInputOutputArray> filteredLicensePlateImagesList,
            List<RotatedRect> detectedLicensePlateRegionList)
        {
            List<String> licenses = new List<String>();
            using (Mat gray = new Mat())
            using (Mat canny = new Mat())
            using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint())
            {
                CvInvoke.CvtColor(img, gray, ColorConversion.Bgr2Gray);
                CvInvoke.Canny(gray, canny, 100, 50, 3, false);
                int[,] hierachy = CvInvoke.FindContourTree(canny, contours, ChainApproxMethod.ChainApproxSimple);

                FindLicensePlate(contours, hierachy, 0, gray, canny, licensePlateImagesList, filteredLicensePlateImagesList, detectedLicensePlateRegionList, licenses);
            }
            return licenses;
        }
コード例 #15
0
        // - PROCESSING FILTERS End -

        // - CONTORNOS -
        // Detect Objects
        private void DetectarToolStripMenuItem_Click(object sender, EventArgs e)
        {
            if (_ImgInput != null)
            {
                // Make another ImageBox appear to compare the original state with the applied effect
                pictureBox1.Image = _ImgInput.Bitmap;

                // Binarizacion
                Image <Gray, byte> _imgOutput = _ImgInput.Convert <Gray, byte>().ThresholdBinary(new Gray(200), new Gray(255));
                Emgu.CV.Util.VectorOfVectorOfPoint contours = new Emgu.CV.Util.VectorOfVectorOfPoint();
                Mat hier = new Mat();

                // Encontrar los contornos en la imagen
                CvInvoke.FindContours(_imgOutput, contours, hier, Emgu.CV.CvEnum.RetrType.External, Emgu.CV.CvEnum.ChainApproxMethod.ChainApproxSimple);
                CvInvoke.DrawContours(_imgOutput, contours, -1, new MCvScalar(255, 0, 0));

                pictureBox2.Image = _imgOutput.Bitmap;
            }
        }
コード例 #16
0
        public void wykrywanie_konturow(int t1, int t2)
        {
            Thread.Sleep(50);
            try
            {
                Image <Gray, byte> img  = imgInPuzzle.Convert <Gray, byte>();
                Image <Gray, byte> img1 = img;

                Mat graymat = new Mat();
                Mat mask    = new Mat();

                graymat = img1.Mat;

                CvInvoke.AdaptiveThreshold(graymat, mask, 255, AdaptiveThresholdType.MeanC, ThresholdType.BinaryInv, t1 * 10 - 1, t2);

                contours = new Emgu.CV.Util.VectorOfVectorOfPoint();
                con      = new Emgu.CV.Util.VectorOfVectorOfPoint();

                Mat hier = new Mat(mask.Rows, mask.Cols, DepthType.Cv8U, 0);

                CvInvoke.FindContours(mask, contours, hier, Emgu.CV.CvEnum.RetrType.External, Emgu.CV.CvEnum.ChainApproxMethod.ChainApproxSimple);

                for (int i = 0; i < contours.Size; i++)
                {
                    double a = CvInvoke.ContourArea(contours[i], false);
                    if (a > 600 && a < ((mask.Cols * mask.Rows) / 2))
                    {
                        con.Push(contours[i]);
                    }
                }

                CvInvoke.DrawContours(mask, con, -1, new MCvScalar(255, 255, 255), 1);

                ilosc_konturow.Text = "Liczba konturów: " + con.Size.ToString();

                zdj.Source = ToBitmapSource(mask);
            }
            catch (System.NullReferenceException)
            {
                System.Windows.MessageBox.Show("Wczytaj zdjęcia", "Podpowiadarka do puzzli - Błąd", MessageBoxButton.OK, MessageBoxImage.Error);
            }
        }
コード例 #17
0
        private void FindLicensePlate(Image <Bgr, byte> image)
        {
            Image <Gray, byte> sobel = image.Convert <Gray, byte>().Sobel(1, 0, 3).AbsDiff(new Gray(0.0)).Convert <Gray, byte>();

            Image <Gray, byte> binImg = sobel.ThresholdBinary(new Gray(50), new Gray(255));
            //Image<Gray, byte> binImg = sobel.ThresholdAdaptive(new Gray(255), AdaptiveThresholdType.MeanC, ThresholdType.Binary, 7, new Gray(0));

            Mat structuringElem = CvInvoke.GetStructuringElement(Emgu.CV.CvEnum.ElementShape.Rectangle, new Size(10, 2), new Point(-1, -1));

            binImg = binImg.MorphologyEx(Emgu.CV.CvEnum.MorphOp.Dilate, structuringElem, new Point(-1, -1), 1, Emgu.CV.CvEnum.BorderType.Reflect, new MCvScalar(255));

            Emgu.CV.Util.VectorOfVectorOfPoint contours = new Emgu.CV.Util.VectorOfVectorOfPoint();
            Mat m = new Mat();

            CvInvoke.FindContours(binImg, contours, m, Emgu.CV.CvEnum.RetrType.External, Emgu.CV.CvEnum.ChainApproxMethod.ChainApproxSimple);



            for (int i = 0; i < contours.Size; i++)
            {
                Rectangle rectangle = CvInvoke.BoundingRectangle(contours[i]);
                double    area      = CvInvoke.ContourArea(contours[i]);
                double    ratio     = rectangle.Width / rectangle.Height;

                if (ratio > 2.5 && ratio < 6 && area > 2000 && area < 4500)
                {
                    CvInvoke.Rectangle(image, rectangle, new MCvScalar(255, 0, 0), 3);
                    Image <Bgr, byte> outputImage = inputImage.Copy(rectangle);
                    try
                    {
                        ImageBox imgbox  = new ImageBox();
                        ImageBox imgbox2 = new ImageBox();
                        tableLayoutPanel3.Controls.Add(imgbox);
                        imgbox.ClientSize = outputImage.Size;
                        imgbox.Image      = outputImage;
                    }
                    catch
                    {
                    }
                }
            }
        }
コード例 #18
0
        private void DetectarTexto2
            (Image <Bgr, byte> img)
        {
            Image <Gray, byte> sobel = img.Convert <Gray, byte>().Sobel(1, 0, 3).AbsDiff(new Gray(0.0)).Convert <Gray, byte>().ThresholdBinary(new Gray(100), new Gray(255));
            Mat SE = CvInvoke.GetStructuringElement(Emgu.CV.CvEnum.ElementShape.Rectangle, new Size(10, 1), new Point(-1, -1));

            sobel = sobel.MorphologyEx(Emgu.CV.CvEnum.MorphOp.Dilate, SE, new Point(-1, -1), 1, Emgu.CV.CvEnum.BorderType.Reflect, new MCvScalar(255));
            Emgu.CV.Util.VectorOfVectorOfPoint contours = new Emgu.CV.Util.VectorOfVectorOfPoint();
            Mat m = new Mat();

            CvInvoke.FindContours(sobel, contours, m, Emgu.CV.CvEnum.RetrType.External, Emgu.CV.CvEnum.ChainApproxMethod.ChainApproxSimple);
            List <Rectangle> list = new List <Rectangle>();

            for (int i = 0; i < contours.Size; i++)
            {
                Rectangle brect = CvInvoke.BoundingRectangle(contours[i]);

                double ar = brect.Width / brect.Height;

                if (brect.Width > 1 && brect.Height > 1 && brect.Height < 50)
                {
                    list.Add(brect);
                }
            }

            Image <Bgr, byte> imgout = img.CopyBlank();

            foreach (var r in list)
            {
                CvInvoke.Rectangle(img, r, new MCvScalar(0, 0, 255), 2);
                CvInvoke.Rectangle(img, r, new MCvScalar(0, 255, 255), -1);
                CvInvoke.Rectangle(imgout, r, new MCvScalar(0, 255, 255), -1);

                imgout._And(img);


                pictureBox3.Image = img.Bitmap;
                imagen            = img.Mat;
                pictureBox2.Image = imgout.Bitmap;
            }
        }
コード例 #19
0
        // Detección de Letras

        /*
         * private async void detectarTextoToolStripMenuItem_Click(object sender, EventArgs e)
         * {
         *  if (_ImgInput != null)
         *  {
         *      Image<Gray, byte> imgOutput = _ImgInput.Convert<Gray, byte>().Not().ThresholdBinary(new Gray(50), new Gray(255));
         *      VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint();
         *      Mat hier = new Mat();
         *
         *      pictureBox1.Image = _ImgInput.Bitmap;
         *
         *      CvInvoke.FindContours(imgOutput, contours, hier, Emgu.CV.CvEnum.RetrType.External, Emgu.CV.CvEnum.ChainApproxMethod.ChainApproxSimple);
         *      show = true;
         *      if (contours.Size > 0)
         *      {
         *          for (int i = 0; i < contours.Size; i++)
         *          {
         *              Rectangle rect = CvInvoke.BoundingRectangle(contours[i]);
         *              _ImgInput.ROI = rect;
         *
         *              image = _ImgInput.Copy().Bitmap;
         *              _ImgInput.ROI = Rectangle.Empty;
         *              this.Invalidate();
         *
         *              await Task.Delay(1500);
         *          }
         *          show = false;
         *      }
         *  }
         * }
         */

        // Detección de Palabras
        private void detectarTextoToolStripMenuItem_Click(object sender, EventArgs e)
        {
            // Detección de Bordes con Sobel
            Image <Gray, byte> sobel = _ImgInput.Convert <Gray, byte>().Sobel(1, 0, 3).AbsDiff(new Gray(0.0)).Convert <Gray, byte>().ThresholdBinary(new Gray(200), new Gray(255));
            Mat SE = CvInvoke.GetStructuringElement(Emgu.CV.CvEnum.ElementShape.Rectangle, new Size(10, 2), new Point(-1, -1));

            // Dilation
            sobel = sobel.MorphologyEx(Emgu.CV.CvEnum.MorphOp.Dilate, SE, new Point(-1, -1), 1, Emgu.CV.CvEnum.BorderType.Reflect, new MCvScalar(255));
            Emgu.CV.Util.VectorOfVectorOfPoint contours = new Emgu.CV.Util.VectorOfVectorOfPoint();

            // Find Contours
            Mat m = new Mat();

            CvInvoke.FindContours(sobel, contours, m, Emgu.CV.CvEnum.RetrType.External, Emgu.CV.CvEnum.ChainApproxMethod.ChainApproxSimple);

            // Geometrical Constraints
            List <Rectangle> list = new List <Rectangle>();

            for (int i = 0; i < contours.Size; i++)
            {
                Rectangle brect = CvInvoke.BoundingRectangle(contours[i]);
                double    ar    = brect.Width / brect.Height;

                if (ar > 2 && brect.Width > 25 && brect.Height > 8 && brect.Height < 100)
                {
                    list.Add(brect);
                }
            }

            Image <Bgr, byte> imgout = _ImgInput.CopyBlank();

            foreach (var r in list)
            {
                CvInvoke.Rectangle(_ImgInput, r, new MCvScalar(0, 0, 255), 2);
                CvInvoke.Rectangle(imgout, r, new MCvScalar(0, 255, 255), -1);
            }

            imgout._And(_ImgInput);
            pictureBox1.Image = _ImgInput.Bitmap;
            pictureBox2.Image = imgout.Bitmap;
        }
コード例 #20
0
    public void ApplyFilter(Mat src)
    {
        CvInvoke.CvtColor(src, src, ColorConversion.Bgr2Hsv);

        Mat threshold = new Mat(src.Height, src.Width, src.Depth, src.NumberOfChannels);
        MCvScalar min = new MCvScalar(m_hmin, m_smin, m_vmin);
        MCvScalar max = new MCvScalar(m_hmax, m_smax, m_vmax);

        CvInvoke.InRange(src, new ScalarArray(min), new ScalarArray(max), threshold);

        Mat element = CvInvoke.GetStructuringElement(ElementShape.Rectangle, new Size(3,3), Point.Empty);
        CvInvoke.Erode(threshold, threshold, element, Point.Empty, 1, BorderType.Constant, new MCvScalar(1.0f));
        CvInvoke.Canny(threshold, threshold, 100, 255);

        VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint();
        Mat hierarchy = new Mat();

         CvInvoke.FindContours(threshold, contours, hierarchy, RetrType.Tree, ChainApproxMethod.ChainApproxSimple, Point.Empty);

        Mat draw = new Mat(src.Height, src.Width, src.Depth, 1);
        draw.SetTo(new MCvScalar(0.0));
        int i = 0;

        //Debug.Log("CONTOURS");

        var contoursArray = contours.ToArrayOfArray();
        foreach(Point[] contour in contoursArray)
        {
            CvInvoke.DrawContours(draw, contours, i, new MCvScalar(255.0), 1, LineType.EightConnected, null, int.MaxValue, Point.Empty);

         	double a = CvInvoke.ContourArea(new VectorOfPoint(contour));
            //Debug.Log("Contour: " + a);
            i++;
        }

        //Emgu.CV.UI.ImageViewer.Show(draw, "test");

        if(m_onFrame != null) m_onFrame.Invoke(draw);
    }
コード例 #21
0
ファイル: LaserTracker.cs プロジェクト: akx/ltag
        public LaserTrackerResult UpdateFromFrame(Mat frame)
        {
            _timer.Reset();
            _timer.Start();
            Bitmap camBitmap, threshBitmap;

            var rects = new List<Rectangle>();
            using (var threshFrame = new Mat())
            {
                using (var hsvFrame = new Mat())
                {
                    using (var resizeFrame = new Mat())
                    {
                        var size = new Size(_width, _height);
                        CvInvoke.Resize(frame, resizeFrame, size);
                        if (_warp)
                        {
                            using (var warpedFrame = new Mat())
                            {
                                CvInvoke.WarpPerspective(resizeFrame, warpedFrame, _homographyMat, size);
                                warpedFrame.CopyTo(resizeFrame);
                            }
                        }
                        CvInvoke.CvtColor(resizeFrame, hsvFrame, ColorConversion.Bgr2Hsv);
                        camBitmap = resizeFrame.Bitmap.Clone(new Rectangle(0, 0, _width, _height), PixelFormat.Format32bppArgb);
                    }
                    float hueMin = _hueCenter - _hueWidth;
                    float hueMax = _hueCenter + _hueWidth;
                    HueThreshold(hueMin, hueMax, hsvFrame, threshFrame);
                    if (_dilate > 0)
                    {
                        CvInvoke.Dilate(threshFrame, threshFrame, null, new Point(-1, -1), _dilate, BorderType.Default, new MCvScalar());
                    }

                }
                threshBitmap = threshFrame.Bitmap.Clone(new Rectangle(0, 0, _width, _height), PixelFormat.Format32bppArgb);

                using (var dummyFrame = threshFrame.Clone())
                {
                    using (var contours = new VectorOfVectorOfPoint())
                    {
                        CvInvoke.FindContours(dummyFrame, contours, null, RetrType.External, ChainApproxMethod.ChainApproxSimple);
                        for (var i = 0; i < contours.Size; i++)
                        {
                            var rect = CvInvoke.BoundingRectangle(contours[i]);
                            if (rect.Width*rect.Height < _minPixels) continue;
                            rects.Add(rect);
                        }
                    }
                }
            }
            rects.Sort((r1, r2) =>
            {
                var s1 = r1.Width * r1.Height;
                var s2 = r2.Width * r2.Height;
                return s1.CompareTo(s2);
            });
            return new LaserTrackerResult(camBitmap, threshBitmap, rects, _timer.Elapsed);
        }
コード例 #22
0
ファイル: AutoTestVarious.cs プロジェクト: Delaley/emgucv
      public void TestConvexityDefacts()
      {
         Image<Bgr, Byte> image = new Image<Bgr, byte>(300, 300);
         Point[] polyline = new Point[] {
            new Point(10, 10),
            new Point(10, 250),
            new Point(100, 100),
            new Point(250, 250),
            new Point(250, 10)};
         using (VectorOfPoint vp = new VectorOfPoint(polyline))
         using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint(vp))
         using (VectorOfInt convexHull = new VectorOfInt())
         using (Mat convexityDefect = new Mat())
         {
            //Draw the contour in white thick line
            CvInvoke.DrawContours(image, contours, -1, new MCvScalar(255, 255, 255), 3);
            CvInvoke.ConvexHull(vp, convexHull);
            CvInvoke.ConvexityDefects(vp, convexHull, convexityDefect);

            //convexity defect is a four channel mat, when k rows and 1 cols, where k = the number of convexity defects. 
            if (!convexityDefect.IsEmpty)
            {
               //Data from Mat are not directly readable so we convert it to Matrix<>
               Matrix<int> m = new Matrix<int>(convexityDefect.Rows, convexityDefect.Cols,
                  convexityDefect.NumberOfChannels);
               convexityDefect.CopyTo(m);

               for (int i = 0; i < m.Rows; i++)
               {
                  int startIdx = m.Data[i, 0];
                  int endIdx = m.Data[i, 1];
                  Point startPoint = polyline[startIdx];
                  Point endPoint = polyline[endIdx];
                  //draw  a line connecting the convexity defect start point and end point in thin red line
                  CvInvoke.Line(image, startPoint, endPoint, new MCvScalar(0, 0, 255));
               }
            }

            //Emgu.CV.UI.ImageViewer.Show(image);
         }
      }
コード例 #23
0
ファイル: AutoTestVarious.cs プロジェクト: Delaley/emgucv
      public void TestContour()
      {
         //Application.EnableVisualStyles();
         //Application.SetCompatibleTextRenderingDefault(false);
         using (Image<Gray, Byte> img = new Image<Gray, Byte>(100, 100, new Gray()))
         {
            Rectangle rect = new Rectangle(10, 10, 80 - 10, 50 - 10);
            img.Draw(rect, new Gray(255.0), -1);
            //ImageViewer.Show(img);
            PointF pIn = new PointF(60, 40);
            PointF pOut = new PointF(80, 100);

            using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint() )
            
            {
               CvInvoke.FindContours(img, contours, null, RetrType.List, ChainApproxMethod.ChainApproxSimple);
               using (VectorOfPoint firstContour = contours[0])
               {
                  EmguAssert.IsTrue( CvInvoke.IsContourConvex(firstContour )  );
               }
            }
            /*
            using (MemStorage stor = new MemStorage())
            {
               //Contour<Point> cs = img.FindContours(CvEnum.ChainApproxMethod.ChainApproxSimple, CvEnum.RetrType.List, stor);
               //EmguAssert.IsTrue(cs.MCvContour.elem_size == Marshal.SizeOf(typeof(Point)));
               //EmguAssert.IsTrue(rect.Width * rect.Height == cs.Area);

               //EmguAssert.IsTrue(cs.Convex);
               //EmguAssert.IsTrue(rect.Width * 2 + rect.Height * 2 == cs.Perimeter);
               Rectangle rect2 = cs.BoundingRectangle;
               rect2.Width -= 1;
               rect2.Height -= 1;
               //rect2.Center.X -= 0.5;
               //rect2.Center.Y -= 0.5;
               //EmguAssert.IsTrue(rect2.Equals(rect));
               EmguAssert.IsTrue(cs.InContour(pIn) > 0);
               EmguAssert.IsTrue(cs.InContour(pOut) < 0);
               //EmguAssert.IsTrue(cs.Distance(pIn) == 10);
               //EmguAssert.IsTrue(cs.Distance(pOut) == -50);
               img.Draw(cs, new Gray(100), new Gray(100), 0, 1);

               MCvPoint2D64f rectangleCenter = new MCvPoint2D64f(rect.X + rect.Width / 2.0, rect.Y + rect.Height / 2.0);

               using (VectorOfPoint vp = new VectorOfPoint(cs.ToArray()))
               {    
                  MCvMoments moment = CvInvoke.Moments(vp, false);
                  MCvPoint2D64f center = moment.GravityCenter;
                  //EmguAssert.IsTrue(center.Equals(rectangleCenter));
               }
               
            }

            using (MemStorage stor = new MemStorage())
            {
               Image<Gray, Byte> img2 = new Image<Gray, byte>(300, 200);
               Contour<Point> c = img2.FindContours(Emgu.CV.CvEnum.ChainApproxMethod.ChainApproxSimple, Emgu.CV.CvEnum.RetrType.List, stor);
               EmguAssert.IsTrue(c == null);
            }*/
         }

         int s1 = Marshal.SizeOf(typeof(MCvSeq));
         int s2 = Marshal.SizeOf(typeof(MCvContour));
         int sizeRect = Marshal.SizeOf(typeof(Rectangle));
         EmguAssert.IsTrue(s1 + sizeRect + 4 * Marshal.SizeOf(typeof(int)) == s2);
      }
コード例 #24
0
ファイル: AutoTestVarious.cs プロジェクト: neutmute/emgucv
      public void TestConvecityDefect()
      {
         Mat frame = EmguAssert.LoadMat("lena.jpg");
         using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint())
         using (Image<Gray, byte> canny = frame.ToImage<Gray, byte>())
         {
            IOutputArray hierarchy = null;
            CvInvoke.FindContours(canny, contours, hierarchy, RetrType.List, ChainApproxMethod.ChainApproxSimple);

            for (int i = 0; i < contours.Size; i++)
            {
               CvInvoke.ApproxPolyDP(contours[i], contours[i], 5, false);
               using (VectorOfInt hull = new VectorOfInt())
               using (Mat defects = new Mat())
               using (VectorOfPoint c = contours[i])
               {
                  CvInvoke.ConvexHull(c, hull, false, false);
                  CvInvoke.ConvexityDefects(c, hull, defects);
                  if (!defects.IsEmpty)
                  {
                     using (Matrix<int> value = new Matrix<int>(defects.Rows, defects.Cols, defects.NumberOfChannels))
                     {
                        defects.CopyTo(value);
                        //you can iterate through the defect here:
                        for (int j = 0; j < value.Rows; j++)
                        {
                           int startIdx = value.Data[j, 0];
                           int endIdx = value.Data[j, 1];
                           int farthestPtIdx = value.Data[j, 2];
                           double fixPtDepth = value.Data[j, 3]/256.0;
                           
                        }
                     }
                  }
               }
            }
         }
      }
コード例 #25
0
ファイル: AutoTestImage.cs プロジェクト: neutmute/emgucv
      public void TestContour()
      {
         Image<Gray, Byte> img = EmguAssert.LoadImage<Gray, byte>("stuff.jpg");
         img.SmoothGaussian(3);
         img = img.Canny(80, 50);
         Image<Gray, Byte> res = img.CopyBlank();
         res.SetValue(255);

         using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint())
         using (Mat hierachy = new Mat())
         {
            CvInvoke.FindContours(img, contours, hierachy, RetrType.Tree, ChainApproxMethod.ChainApproxSimple);
            
         }

         using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint())
         //using (VectorOfVectorOfInt hierarchy = new VectorOfVectorOfInt())
         {
            CvInvoke.FindContours(img, contours, null, RetrType.List, ChainApproxMethod.ChainApproxSimple);
            for (int i = 0; i < contours.Size; i++)
            {
               using (VectorOfPoint contour = contours[i])
               {
                  Point[] pts = contour.ToArray();
                  CvInvoke.Polylines(res, contour, true, new MCvScalar());
               }
            }
         }
         /*
         Contour<Point> contour = img.FindContours();

         while (contour != null)
         {
            Contour<Point> approx = contour.ApproxPoly(contour.Perimeter * 0.05);

            if (approx.Convex && approx.Area > 20.0)
            {
               Point[] vertices = approx.ToArray();

               LineSegment2D[] edges = PointCollection.PolyLine(vertices, true);

               res.DrawPolyline(vertices, true, new Gray(200), 1);
            }
            contour = contour.HNext;
         }*/
         //Emgu.CV.UI.ImageViewer.Show(res);
      }
コード例 #26
0
        private void ProcessFrame(Object sender, EventArgs args)
        {
            try
            {
                Image <Bgr, Byte> Previous_Frame = new Image <Bgr, Byte>(imgCamUser.Image.Bitmap); //Previiousframe aquired
                Image <Bgr, Byte> Difference;                                                      //Difference between the two frames
                Image <Bgr, Byte> thresholdImage = null;
                imgCamUser.Image = _capture.QueryFrame();
                Image <Bgr, Byte> Frame         = new Image <Bgr, Byte>(imgCamUser.Image.Bitmap);
                double            ContourThresh = 0.003; //stores alpha for thread access
                int Threshold = 60;                      //stores threshold for thread access
                Frame.Convert <Gray, Byte>();
                Previous_Frame.Convert <Gray, Byte>();

                Difference = Previous_Frame.AbsDiff(Frame);                                                                    //find the absolute difference
                                                                                                                               /*Play with the value 60 to set a threshold for movement*/
                thresholdImage = Difference.ThresholdBinary(new Bgr(Threshold, Threshold, Threshold), new Bgr(255, 255, 255)); //if value > 60 set to 255, 0 otherwise

                picCapturedUser.Image = thresholdImage.Convert <Gray, byte>().Copy();

                if (trackingEnabled)
                {
                    //check for motion in the video feed
                    //the detectMotion function will return true if motion is detected, else it will return false.
                    //set motionDetected boolean to the returned value.

                    Image <Gray, byte> imgOutput = thresholdImage.Convert <Gray, byte>().ThresholdBinary(new Gray(100), new Gray(255));

                    label1.Text = "idle";
                    Emgu.CV.Util.VectorOfVectorOfPoint contours = new Emgu.CV.Util.VectorOfVectorOfPoint();
                    Mat hier = new Mat();
                    CvInvoke.FindContours(imgOutput, contours, hier, Emgu.CV.CvEnum.RetrType.External, Emgu.CV.CvEnum.ChainApproxMethod.ChainApproxSimple);
                    if (contours.Size > 0)
                    {
                        motionDetected = true;
                        label2.Text    = contours.Size.ToString();
                    }
                    else
                    {
                        //reset our variables if tracking is disabled
                        motionDetected = false;
                        label1.Text    = "Idle";
                    }

                    if (motionDetected)
                    {
                        label1.Text = "motion detected";
                    }
                }

                /* using (var imageFrame = _capture.QueryFrame().ToImage<Bgr, Byte>())
                 * {
                 *   if (imageFrame != null)
                 *   {
                 *       var grayframe = imageFrame.Convert<Gray, byte>();
                 *       var faces = _cascadeClassifier.DetectMultiScale(grayframe, 1.1, 10, Size.Empty);
                 *       foreach (var face in faces)
                 *       {
                 *           imageFrame.Draw(face, new Bgr(Color.BurlyWood), 3);
                 *           //render the image to the picture box
                 *           picCapturedUser.Image = imageFrame.Copy(face);
                 *       }
                 *   }
                 *   imgCamUser.Image = imageFrame;
                 *
                 *
                 * }*/
            }
            catch (Exception e)
            {
                //MessageBox.Show(e.ToString());
            }
        }
コード例 #27
0
        private void sensor_AllFramesReady(object sender, AllFramesReadyEventArgs e)
        {
            BitmapSource depthBmp = null;
            blobCount = 0;

            using (ColorImageFrame colorFrame = e.OpenColorImageFrame())
            {
                using (DepthImageFrame depthFrame = e.OpenDepthImageFrame())
                {
                    if (depthFrame != null)
                    {

                        blobCount = 0;

                        depthBmp = depthFrame.SliceDepthImage((int)sliderMin.Value, (int)sliderMax.Value);

                        Image<Bgr, Byte> openCVImg = new Image<Bgr, byte>(depthBmp.ToBitmap());
                        Image<Gray, byte> gray_image = openCVImg.Convert<Gray, byte>();

                        //Find contours
                        using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint())
                        {
                            CvInvoke.FindContours(gray_image, contours, new Mat(), Emgu.CV.CvEnum.RetrType.List, Emgu.CV.CvEnum.ChainApproxMethod.ChainApproxSimple);

                            for (int i = 0; i < contours.Size; i++)
                            {
                                VectorOfPoint contour = contours[i];
                                double area = CvInvoke.ContourArea(contour, false);

                                if ((area > Math.Pow(sliderMinSize.Value, 2)) && (area < Math.Pow(sliderMaxSize.Value, 2)))
                                {
                                    System.Drawing.Rectangle box = CvInvoke.BoundingRectangle(contour);
                                    openCVImg.Draw(box, new Bgr(System.Drawing.Color.Red), 2);
                                    blobCount++;
                                }
                            }
                        }

                        this.outImg.Source = ImageHelpers.ToBitmapSource(openCVImg);
                        txtBlobCount.Text = blobCount.ToString();
                    }
                }

                if (colorFrame != null)
                {

                    colorFrame.CopyPixelDataTo(this.colorPixels);
                    this.colorBitmap.WritePixels(
                        new Int32Rect(0, 0, this.colorBitmap.PixelWidth, this.colorBitmap.PixelHeight),
                        this.colorPixels,
                        this.colorBitmap.PixelWidth * sizeof(int),
                        0);

                }
            }
        }
コード例 #28
0
 /// <summary>
 /// Push multiple values from the other vector into this vector
 /// </summary>
 /// <param name="other">The other vector, from which the values will be pushed to the current vector</param>
 public void Push(VectorOfVectorOfPoint other)
 {
     VectorOfVectorOfPointPushVector(_ptr, other);
 }
コード例 #29
0
ファイル: MainForm.cs プロジェクト: Lerbytech/ShapeDetection
        public void PerformShapeDetection()
        {
            if (fileNameTextBox.Text != String.Empty)
            {
                Stopwatch watch = Stopwatch.StartNew();
                watch.Start();
                StringBuilder msgBuilder = new StringBuilder("Performance: ");

                #region get image

                img = new Image<Bgr, byte>(fileNameTextBox.Text);
                img = img.Resize(0.5, Inter.Linear).SmoothMedian(5);
                #endregion

                #region HSV magic
                //min.Hue = MinHueTB.Value; min.Satuation = MinSatTB.Value; min.Value = MinValTB.Value;
                //max.Hue = MaxHueTB.Value; max.Satuation = MaxSatTB.Value; max.Value = MaxValTB.Value;

                HsvMagic(img, maskHsvBlack, maskHsvBlue);

                circleImageBox.Image = maskHsvBlack;
                originalImageBox.Image = img;

                img.ToBitmap().Save("C:\\Emgu\\Dump\\Img.png",System.Drawing.Imaging.ImageFormat.Png);
                maskHsvBlack.ToBitmap().Save("C:\\Emgu\\Dump\\maskHsvBlack.png",  System.Drawing.Imaging.ImageFormat.Png);
                maskHsvBlue.ToBitmap().Save("C:\\Emgu\\Dump\\maskHsvBlue.png",  System.Drawing.Imaging.ImageFormat.Png);
                #endregion

                #region Canny and edge detection

                double cannyThreshold = 1.0;
                double cannyThresholdLinking = 500.0;

                Image<Gray, Byte> cannyBlue = maskHsvBlue.Canny(cannyThreshold, cannyThresholdLinking);
                Image<Gray, Byte> cannyBlack = maskHsvBlack.Canny(cannyThreshold, cannyThresholdLinking);

                watch.Stop();
                msgBuilder.Append(String.Format("Hsv and Canny - {0} ms; ", watch.ElapsedMilliseconds));
                #endregion
                cannyBlue.ToBitmap().Save("C:\\Emgu\\Dump\\cannyBlue.png", System.Drawing.Imaging.ImageFormat.Png);
                cannyBlack.ToBitmap().Save("C:\\Emgu\\Dump\\cannyBlack.png", System.Drawing.Imaging.ImageFormat.Png);

                #region Find  rectangles

                #region detect black borders
                VectorOfVectorOfPoint blackborders = new VectorOfVectorOfPoint();//list of black borders
                List<RotatedRect> Black_boxList = new List<RotatedRect>(); //a box is a rotated rectangle
                VectorOfVectorOfPoint othercontours_black = new VectorOfVectorOfPoint();
                getBlackContours(cannyBlack, blackborders, Black_boxList, othercontours_black);
                resultImg = cannyBlack.Convert<Bgr, Byte>();
                #endregion

                #region blue borders

                VectorOfVectorOfPoint blueborders = new VectorOfVectorOfPoint();//list of blue borders
                List<RotatedRect> Blue_boxList = new List<RotatedRect>(); //a box is a rotated rectangle
                VectorOfVectorOfPoint othercontours_blue = new VectorOfVectorOfPoint();
                getBlueContours(cannyBlue, blueborders, Blue_boxList, othercontours_blue);

                #endregion

              #region clear duplicate boxes

                List<RotatedRect> fltrBlue_boxList = new List<RotatedRect>();
                SizeF TMP_SizeF = new SizeF(0,0);
                PointF TMP_PointF = new PointF(0, 0);
                float TMP_Angle = 0;

                if (Blue_boxList.Count >= 2)
                {
                  for (int i = 1; i < Blue_boxList.Count; i++)
                  {
                    if (Blue_boxList[i - 1].Size.Width * Blue_boxList[i - 1].Size.Height > 750)
                    {
                      if (Math.Abs(Blue_boxList[i - 1].Angle - Blue_boxList[i].Angle) < 1)
                      {
                        if (Math.Abs(Blue_boxList[i - 1].Center.X - Blue_boxList[i].Center.X) < 1 && Math.Abs(Blue_boxList[i - 1].Center.Y - Blue_boxList[i].Center.Y) < 1)
                          if (Math.Abs(Blue_boxList[i - 1].Size.Width - Blue_boxList[i].Size.Width) < 1 && Math.Abs(Blue_boxList[i - 1].Size.Height - Blue_boxList[i].Size.Height) < 1)
                          {
                            TMP_PointF.X = (float)(0.5 * (Blue_boxList[i - 1].Center.X + Blue_boxList[i].Center.X));
                            TMP_PointF.Y = (float)(0.5 * (Blue_boxList[i - 1].Center.Y + Blue_boxList[i].Center.Y));
                            TMP_SizeF.Width = (float)(0.5 * (Blue_boxList[i - 1].Size.Width + Blue_boxList[i].Size.Width));
                            TMP_SizeF.Height = (float)(0.5 * (Blue_boxList[i - 1].Size.Height + Blue_boxList[i].Size.Height));
                            TMP_Angle = (float)(0.5 * (Blue_boxList[i - 1].Angle + Blue_boxList[i].Angle));
                            fltrBlue_boxList.Add(new RotatedRect(TMP_PointF, TMP_SizeF, TMP_Angle));

                          }
                      }
                      else fltrBlue_boxList.Add(Blue_boxList[i]);
                    }
                  }
                }
                else { fltrBlue_boxList = Blue_boxList; } //Blue_boxList.Clear(); }

                List<RotatedRect> fltrBlack_boxList = new List<RotatedRect>();
              VectorOfVectorOfPoint fltr_blackborders = new VectorOfVectorOfPoint();
                TMP_SizeF.Width = 0;
                TMP_SizeF.Height = 0;
                TMP_PointF.X = 0;
                TMP_PointF.Y = 0;
                TMP_Angle = 0;

                if (Black_boxList.Count >= 2)
                {
                  for (int i = 1; i < Black_boxList.Count; i++)
                  {
                    if (Black_boxList[i - 1].Size.Width * Black_boxList[i - 1].Size.Height > 10)
                    {
                      if (Math.Abs(Black_boxList[i - 1].Angle - Black_boxList[i].Angle) < 1)
                      {
                        if (Math.Abs(Black_boxList[i - 1].Center.X - Black_boxList[i].Center.X) < 1 && Math.Abs(Black_boxList[i - 1].Center.Y - Black_boxList[i].Center.Y) < 1)
                          if (Math.Abs(Black_boxList[i - 1].Size.Width - Black_boxList[i].Size.Width) < 1 && Math.Abs(Black_boxList[i - 1].Size.Height - Black_boxList[i].Size.Height) < 1)
                          {
                            TMP_PointF.X = (float)(0.5 * (Black_boxList[i - 1].Center.X + Black_boxList[i].Center.X));
                            TMP_PointF.Y = (float)(0.5 * (Black_boxList[i - 1].Center.Y + Black_boxList[i].Center.Y));
                            TMP_SizeF.Width = (float)(0.5 * (Black_boxList[i - 1].Size.Width + Black_boxList[i].Size.Width));
                            TMP_SizeF.Height = (float)(0.5 * (Black_boxList[i - 1].Size.Height + Black_boxList[i].Size.Height));
                            TMP_Angle = (float)(0.5 * (Black_boxList[i - 1].Angle + Black_boxList[i].Angle));
                            fltrBlack_boxList.Add(new RotatedRect(TMP_PointF, TMP_SizeF, TMP_Angle));
                            //fltr_blackborders.Push();
                          }
                      }
                      else fltrBlack_boxList.Add(Black_boxList[i]);
                    }
                  }
                }
                else { fltrBlack_boxList = Black_boxList; }//Black_boxList.Clear(); }
                #endregion

              //////////
                circleImageBox.Image = maskHsvBlack;
              ////////////

                CvInvoke.DrawContours(resultImg, blackborders, -1, new Bgr(Color.Green).MCvScalar);
                CvInvoke.DrawContours(resultImg, othercontours_black, -1, new Bgr(Color.Red).MCvScalar);
                CvInvoke.DrawContours(resultImg, blueborders, -1, new Bgr(Color.Blue).MCvScalar);

                foreach (RotatedRect box in fltrBlack_boxList)
                {
                    CvInvoke.Polylines(resultImg, Array.ConvertAll(box.GetVertices(), Point.Round), true, new Bgr(Color.Aqua).MCvScalar, 1);
                }
                foreach (RotatedRect box in Black_boxList)
                {
                  CvInvoke.Polylines(img, Array.ConvertAll(box.GetVertices(), Point.Round), true, new Bgr(Color.Pink).MCvScalar, 1);
                }
                foreach (RotatedRect box in Blue_boxList)
                {
                  CvInvoke.Polylines(img, Array.ConvertAll(box.GetVertices(), Point.Round), true, new Bgr(Color.DarkViolet).MCvScalar, 1);
                }
                foreach (RotatedRect box in fltrBlue_boxList)
                {
                  CvInvoke.Polylines(resultImg, Array.ConvertAll(box.GetVertices(), Point.Round), true, new Bgr(Color.Yellow).MCvScalar, 1);
                }
                triangleRectangleImageBox.Image = resultImg;
                originalImageBox.Image = img;

                #region save to files
                Image<Bgr, Byte> TMPImageforSaving = new Image<Bgr, byte>(maskHsvBlack.Width, maskHsvBlack.Height, new Bgr(Color.Black));
                CvInvoke.DrawContours(TMPImageforSaving, blackborders, -1, new Bgr(Color.Green).MCvScalar);
                CvInvoke.DrawContours(TMPImageforSaving, othercontours_black, -1, new Bgr(Color.Red).MCvScalar);

                foreach (RotatedRect box in Black_boxList)
                {
                  CvInvoke.Polylines(TMPImageforSaving, Array.ConvertAll(box.GetVertices(), Point.Round), true, new Bgr(Color.Pink).MCvScalar, 1);
                }
                TMPImageforSaving.ToBitmap().Save("C:\\Emgu\\Dump\\NonFltrBlack.png", System.Drawing.Imaging.ImageFormat.Png);

                TMPImageforSaving = new Image<Bgr, byte>(TMPImageforSaving.Width, TMPImageforSaving.Height, new Bgr(Color.Black));
                CvInvoke.DrawContours(TMPImageforSaving, blackborders, -1, new Bgr(Color.Green).MCvScalar);
                CvInvoke.DrawContours(TMPImageforSaving, othercontours_black, -1, new Bgr(Color.Red).MCvScalar);
                foreach (RotatedRect box in Blue_boxList)
                {
                  CvInvoke.Polylines(TMPImageforSaving, Array.ConvertAll(box.GetVertices(), Point.Round), true, new Bgr(Color.DarkViolet).MCvScalar, 1);
                }
                TMPImageforSaving.ToBitmap().Save("C:\\Emgu\\Dump\\NonFltrBlue.png", System.Drawing.Imaging.ImageFormat.Png);
                TMPImageforSaving = new Image<Bgr, byte>(maskHsvBlack.Width, maskHsvBlack.Height, new Bgr(Color.Black));
                CvInvoke.DrawContours(TMPImageforSaving, blackborders, -1, new Bgr(Color.Green).MCvScalar);
                CvInvoke.DrawContours(TMPImageforSaving, othercontours_black, -1, new Bgr(Color.Red).MCvScalar);

                foreach (RotatedRect box in fltrBlack_boxList)
                {
                  CvInvoke.Polylines(TMPImageforSaving, Array.ConvertAll(box.GetVertices(), Point.Round), true, new Bgr(Color.Aqua).MCvScalar, 1);
                }
                TMPImageforSaving.ToBitmap().Save("C:\\Emgu\\Dump\\FltrBlack.png", System.Drawing.Imaging.ImageFormat.Png);

                TMPImageforSaving = new Image<Bgr, byte>(TMPImageforSaving.Width, TMPImageforSaving.Height, new Bgr(Color.Black));
                CvInvoke.DrawContours(TMPImageforSaving, blackborders, -1, new Bgr(Color.Green).MCvScalar);
                CvInvoke.DrawContours(TMPImageforSaving, othercontours_black, -1, new Bgr(Color.Red).MCvScalar);
                foreach (RotatedRect box in fltrBlue_boxList)
                {
                  CvInvoke.Polylines(TMPImageforSaving, Array.ConvertAll(box.GetVertices(), Point.Round), true, new Bgr(Color.Yellow).MCvScalar, 1);
                }
                TMPImageforSaving.ToBitmap().Save("C:\\Emgu\\Dump\\FltrBlue.png", System.Drawing.Imaging.ImageFormat.Png);
                #endregion

              /*
                List<VectorOfPoint> contours_for_work = new List<VectorOfPoint>();
                using (VectorOfVectorOfPoint contours = blackborders)
                {
                  for (int i = 0; i < contours.Size; i++)
                  {
                    contours_for_work.Add(contours[i]);
                  }
                }
                contours_for_work.Sort((VectorOfPoint cont1, VectorOfPoint cont2) =>
                 (bool) (CvInvoke.ContourArea(cont1) > CvInvoke.ContourArea(cont1)) );
              */

                VectorOfVectorOfPoint Big = new VectorOfVectorOfPoint();
                bool ready = false;
                using (VectorOfVectorOfPoint contours = blackborders)
                {
                    for (int i = 0; i < contours.Size && !ready; i++)
                    {

                        VectorOfPoint contourI = contours[i];
                        for (int j = i + 1; j < contours.Size && !ready; j++)
                        {
                            if (0.38 * CvInvoke.ContourArea(contours[j]) > CvInvoke.ContourArea(contourI) && 0.26 * CvInvoke.ContourArea(contours[j]) < CvInvoke.ContourArea(contourI))
                            {
                                Big.Push(contours[j]);
                                Big.Push(contours[i]);
                                ready = !ready;
                            }
                        }
                    }
                }

                TMPImageforSaving = new Image<Bgr, Byte>(resultImg.Width, resultImg.Height, new Bgr(Color.Black));
                CvInvoke.DrawContours(TMPImageforSaving, Big, -1, new Bgr(Color.White).MCvScalar);
                TMPImageforSaving.ToBitmap().Save("C:\\Emgu\\Dump\\DetectedContours.png", System.Drawing.Imaging.ImageFormat.Png);
              imgHsv[0].ToBitmap().Save("C:\\Emgu\\Dump\\ImgHsv - Hue.png", System.Drawing.Imaging.ImageFormat.Png);
              imgHsv[1].ToBitmap().Save("C:\\Emgu\\Dump\\ImgHsv - Sat.png", System.Drawing.Imaging.ImageFormat.Png);
              imgHsv[2].ToBitmap().Save("C:\\Emgu\\Dump\\ImgHsv - Val.png", System.Drawing.Imaging.ImageFormat.Png);
              Image<Hls, byte> HlsImg = img.Convert<Hls, Byte>();

              HlsImg[0].ToBitmap().Save("C:\\Emgu\\Dump\\Img HLS - Hue.png", System.Drawing.Imaging.ImageFormat.Png);
              HlsImg[1].ToBitmap().Save("C:\\Emgu\\Dump\\Img HLS - Light.png", System.Drawing.Imaging.ImageFormat.Png);
              HlsImg[2].ToBitmap().Save("C:\\Emgu\\Dump\\Img HLS - Sat.png", System.Drawing.Imaging.ImageFormat.Png);

                lineImageBox.Image = TMPImageforSaving;

                watch.Stop();
                msgBuilder.Append(String.Format("Triangles & Rectangles - {0} ms; ", watch.ElapsedMilliseconds));
                #endregion
                /*

                  lineImageBox.Image = resultImg;
                  originalImageBox.Image = img;
                  this.Text = msgBuilder.ToString();

                  #region draw and rectangles
                  Mat triangleRectangleImage = new Mat(img.Size, DepthType.Cv8U, 3);
                  triangleRectangleImage.SetTo(new MCvScalar(0));

                  foreach (RotatedRect box in boxList)
                  {
                      CvInvoke.Polylines(triangleRectangleImage, Array.ConvertAll(box.GetVertices(), Point.Round), true, new Bgr(Color.DarkOrange).MCvScalar, 2);
                  }

                  triangleRectangleImageBox.Image = triangleRectangleImage;
                  #endregion

                  #region draw lines
                  /*Mat lineImage = new Mat(img.Size, DepthType.Cv8U, 3);
                  lineImage.SetTo(new MCvScalar(0));
                 foreach (LineSegment2D line in lines)
                   CvInvoke.Line(lineImage, line.P1, line.P2, new Bgr(Color.Green).MCvScalar, 2);

                  lineImageBox.Image = lineImage;
                  #endregion
              }
              }

              #region draw
              //foreach (LineSegment2D line in lines)
              //CvInvoke.Line(lineImage, line.P1, line.P2, new Bgr(Color.Green).MCvScalar, 2);

              #endregion
               * */
            }
        }
コード例 #30
0
ファイル: MainForm.cs プロジェクト: Lerbytech/ShapeDetection
        public void getBlueContours(Image<Gray, Byte> src, VectorOfVectorOfPoint blueborders, List<RotatedRect> Blue_boxList, VectorOfVectorOfPoint othercontours_blue)
        {
            //blueborders = new VectorOfVectorOfPoint();//list of blue borders
              //Blue_boxList = new List<RotatedRect>(); //a box is a rotated rectangle
              //othercontours_blue = new VectorOfVectorOfPoint();

              using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint())
              {
            CvInvoke.FindContours(src, contours, null, RetrType.List, ChainApproxMethod.ChainApproxSimple);

            for (int i = 0; i < contours.Size; i++)
            {
              using (VectorOfPoint contour = contours[i])
              using (VectorOfPoint approxContour = new VectorOfPoint())
              {
                CvInvoke.ApproxPolyDP(contour, approxContour, CvInvoke.ArcLength(contour, true) * 0.05, true);
                if (CvInvoke.ContourArea(approxContour, false) > 250 && CvInvoke.BoundingRectangle(approxContour).Width * CvInvoke.BoundingRectangle(approxContour).Height > 1000) //only consider contours with area greater than 250
                {
                  if (approxContour.Size == 4)
                  {
                    Blue_boxList.Add(CvInvoke.MinAreaRect(approxContour));
                    blueborders.Push(contour);
                  }
                  else
                  {
                    othercontours_blue.Push(contour);
                    //Point[] pts = approxContour.ToArray();
                    //other.Add(PointCollection.PolyLine(pts, true));
                  }
                }
              }
            }
              }
        }
コード例 #31
0
ファイル: Form1.cs プロジェクト: alecrudd/GocatorImager
         public void FindTiles(Mat image, VectorOfVectorOfPoint contours, int[,] hierachy, int hIndex)
        {
             Mat resultContours = new Mat(image.Size, image.Depth, image.NumberOfChannels);

            // originalBox.Image = image;
             //for all of the root hierarchies...
             for(; hIndex >= 0; hIndex = hierachy[hIndex,0])
             {
                 MCvScalar color = new MCvScalar(0, 0, 255);

                 using (VectorOfPoint c = contours[hIndex])
                 using (VectorOfPoint approx = new VectorOfPoint())
                 {
                     CvInvoke.ApproxPolyDP(c, approx, CvInvoke.ArcLength(c, true) * .02, true);//CvInvoke.ArcLength(c, true) * .02, true);
                     double area = CvInvoke.ContourArea(approx);
                     //filter out the small contours...
                     //if (area > 20000 && area < 30000 ) //3/4" tiles
                     if(area > 0 && area < 100000)
                     {
                         //match the shape to the square
                         double ratio = CvInvoke.MatchShapes(_square, approx, Emgu.CV.CvEnum.ContoursMatchType.I3);

                         if(ratio < .1)
                         {
                             CvInvoke.FillConvexPoly(resultContours, c, new MCvScalar(255), LineType.AntiAlias);
                             var M = CvInvoke.Moments(c);
                             int cx = (int)(M.M10 / M.M00);
                             int cy = (int)(M.M01 / M.M00);
                            
                            for (int i = 0; i < approx.Size; i++)
                            {
                                int second = i + 1;
                                if (second ==approx.Size)
                                    second = 0;
                                CvInvoke.Line(resultContours, 
                                    new System.Drawing.Point(approx[i].X, approx[i].Y),  
                                    new System.Drawing.Point(approx[second].X, approx[second].Y),
                                    new MCvScalar(128), 10);
                            }
                             CvInvoke.Rectangle(resultContours, new Rectangle(new System.Drawing.Point(cx - 50, cy - 50), new Size(100, 100)), new MCvScalar(128), 2);
                          }
                     }
                 }
                 color = new MCvScalar(0, 255, 0);
             }
             resultBox.Image = resultContours;
        }
コード例 #32
0
      private void FindLicensePlate(
         VectorOfVectorOfPoint contours, int[,] hierachy, int idx, IInputArray gray, IInputArray canny,
         List<IInputOutputArray> licensePlateImagesList, List<IInputOutputArray> filteredLicensePlateImagesList, List<RotatedRect> detectedLicensePlateRegionList,
         List<String> licenses)
      {
         for (; idx >= 0;  idx = hierachy[idx,0])
         {
            int numberOfChildren = GetNumberOfChildren(hierachy, idx);      
            //if it does not contains any children (charactor), it is not a license plate region
            if (numberOfChildren == 0) continue;

            using (VectorOfPoint contour = contours[idx])
            {
               if (CvInvoke.ContourArea(contour) > 400)
               {
                  if (numberOfChildren < 3)
                  {
                     //If the contour has less than 3 children, it is not a license plate (assuming license plate has at least 3 charactor)
                     //However we should search the children of this contour to see if any of them is a license plate
                     FindLicensePlate(contours, hierachy, hierachy[idx, 2], gray, canny, licensePlateImagesList,
                        filteredLicensePlateImagesList, detectedLicensePlateRegionList, licenses);
                     continue;
                  }

                  RotatedRect box = CvInvoke.MinAreaRect(contour);
                  if (box.Angle < -45.0)
                  {
                     float tmp = box.Size.Width;
                     box.Size.Width = box.Size.Height;
                     box.Size.Height = tmp;
                     box.Angle += 90.0f;
                  }
                  else if (box.Angle > 45.0)
                  {
                     float tmp = box.Size.Width;
                     box.Size.Width = box.Size.Height;
                     box.Size.Height = tmp;
                     box.Angle -= 90.0f;
                  }

                  double whRatio = (double) box.Size.Width/box.Size.Height;
                  if (!(3.0 < whRatio && whRatio < 10.0))
                     //if (!(1.0 < whRatio && whRatio < 2.0))
                  {
                     //if the width height ratio is not in the specific range,it is not a license plate 
                     //However we should search the children of this contour to see if any of them is a license plate
                     //Contour<Point> child = contours.VNext;
                     if (hierachy[idx, 2] > 0)
                        FindLicensePlate(contours, hierachy, hierachy[idx, 2], gray, canny, licensePlateImagesList,
                           filteredLicensePlateImagesList, detectedLicensePlateRegionList, licenses);
                     continue;
                  }

                  using (UMat tmp1 = new UMat())
                  using (UMat tmp2 = new UMat())
                  {
                     PointF[] srcCorners = box.GetVertices();
                     
                     PointF[] destCorners = new PointF[] {
                        new PointF(0, box.Size.Height - 1),
                        new PointF(0, 0),
                        new PointF(box.Size.Width - 1, 0), 
                        new PointF(box.Size.Width - 1, box.Size.Height - 1)};
                     
                     using (Mat rot = CvInvoke.GetAffineTransform(srcCorners, destCorners))
                     {
                        CvInvoke.WarpAffine(gray, tmp1, rot, Size.Round(box.Size));           
                     }

                     //resize the license plate such that the front is ~ 10-12. This size of front results in better accuracy from tesseract
                     Size approxSize = new Size(240, 180);
                     double scale = Math.Min(approxSize.Width/box.Size.Width, approxSize.Height/box.Size.Height);
                     Size newSize = new Size( (int)Math.Round(box.Size.Width*scale),(int) Math.Round(box.Size.Height*scale));
                     CvInvoke.Resize(tmp1, tmp2, newSize, 0, 0, Inter.Cubic);

                     //removes some pixels from the edge
                     int edgePixelSize = 2;
                     Rectangle newRoi = new Rectangle(new Point(edgePixelSize, edgePixelSize),
                        tmp2.Size - new Size(2*edgePixelSize, 2*edgePixelSize));
                     UMat plate = new UMat(tmp2, newRoi);

                     UMat filteredPlate = FilterPlate(plate);

                     Tesseract.Character[] words;
                     StringBuilder strBuilder = new StringBuilder();
                     using (UMat tmp = filteredPlate.Clone())
                     {
                        _ocr.Recognize(tmp);
                        words = _ocr.GetCharacters();

                        if (words.Length == 0) continue;

                        for (int i = 0; i < words.Length; i++)
                        {
                           strBuilder.Append(words[i].Text);
                        }
                     }

                     licenses.Add(strBuilder.ToString());
                     licensePlateImagesList.Add(plate);
                     filteredLicensePlateImagesList.Add(filteredPlate);
                     detectedLicensePlateRegionList.Add(box);

                  }
               }
            }
         }
      }
コード例 #33
0
ファイル: MainForm.cs プロジェクト: Lerbytech/ShapeDetection
 public void FilterBlackBorders(VectorOfVectorOfPoint blackborders, List<RotatedRect> Black_boxlist, VectorOfVectorOfPoint othercontours_black)
 {
 }
コード例 #34
0
        public void Process(string imagePath)
        {
            var im = CvInvoke.Imread(imagePath, Emgu.CV.CvEnum.ImreadModes.Grayscale);
            Mat imx = new Mat(), th = new Mat();

            CvInvoke.MedianBlur(im, imx, 3);

            CvInvoke.Resize(imx, th, new Size(50, 50), 0, 0, Inter.Area);
            CvInvoke.GaussianBlur(th, im, new Size(7, 7), 0);
            CvInvoke.Resize(im, th, imx.Size, 0, 0, Inter.Linear);
            CvInvoke.Compare(imx, th, im, CmpType.GreaterThan);

            var imrgb = new Mat(im.Rows, im.Cols, DepthType.Cv8U, 3);

            CvInvoke.CvtColor(im, imrgb, ColorConversion.Gray2Bgr);

            var contours = new Emgu.CV.Util.VectorOfVectorOfPoint();

            CvInvoke.FindContours(im, contours, null, RetrType.Ccomp, ChainApproxMethod.ChainApproxSimple);
            var ca = contours.ToArrayOfArray();

            var           cells = new List <Cell>();
            List <double> ca_aa = new List <double>(ca.Length);

            for (int i = 0; i < ca.Length; i++)
            {
                var c = new Cell(ca[i]);
                cells.Add(c);
                ca_aa.Add(c.sarea);
            }
            ca_aa.Sort();

            double ca_max = 0, ca_min = 0;

            for (int i = 100; i >= 0 && ca_aa[i] / ca_aa[i + 1] < 1.01; i--)
            {
                ca_max = -ca_aa[i];
            }

            for (int i = 100; i < ca_aa.Count && ca_aa[i] / ca_aa[i + 1] < 1.01; i++)
            {
                ca_min = -ca_aa[i];
            }

            double side_avg = Math.Sqrt((ca_min + ca_max) / 2);

            List <Cell> gridCells = new List <Cell>();

            for (int i = 0; i < ca.Length; i++)
            {
                var col = colors[i % colors.Count];
                var c   = cells[i];
                if (c.area > ca_max || c.area < ca_min)
                {
                    continue;
                }

                c.FindCenter();
                double minR = c.sidelength / 2.25;
                //CvInvoke.Circle(imrgb, c.cp, (int)minR, col, 1);
                bool ok    = true;
                var  minR2 = minR * minR;
                for (int j = 0; j < c.ca.Length; j++)
                {
                    if (c.Dist2(c.ca[j]) < minR2)
                    {
                        ok = false;
                    }
                }

                //for (int j = 1; j < c.ca.Length; j++)
                //    CvInvoke.Line(imrgb, c.ca[j - 1], c.ca[j], col, ok ? 9 : 3);

                if (ok)
                {
                    gridCells.Add(c);
                }
            }


            double maxCentDist2 = 2 * side_avg * side_avg;

            for (int i = 0; i < gridCells.Count; i++)
            {
                var a = gridCells[i];
                for (int j = 0; j < gridCells.Count; j++)
                {
                    var b = gridCells[j];
                    if (i == j)
                    {
                        continue;
                    }
                    if (a.Dist2(b) < maxCentDist2)
                    {
                        var      d   = a.Diff(b);
                        bool     isX = Math.Abs(d.X) > Math.Abs(d.Y);
                        Cell.Dir dir = isX ? d.X < 0 ? Cell.Dir.Left : Cell.Dir.Right : d.Y < 0 ? Cell.Dir.Up : Cell.Dir.Down;
                        a.Link(b, dir);

                        //CvInvoke.Line(imrgb, a.cp, b.cp, new MCvScalar(0, 255, isX ? 255 : 0), 3);
                    }
                }
            }

            Queue <Cell> qc = new Queue <Cell>();

            gridCells[0].SetGridIndex(new Point()); // Arbitrary Origo
            qc.Enqueue(gridCells[0]);
            while (qc.Count > 0)
            {
                var c = qc.Dequeue();
                foreach (var nc in c.neighbours)
                {
                    if (nc != null && !nc.hasGridIndex)
                    {
                        qc.Enqueue(nc);
                    }
                }
                c.CalcNeighboursGridIndex();
            }

            int gixr = (from c in gridCells select c.gi.X).Min();
            int giyr = (from c in gridCells select c.gi.Y).Min();

            foreach (var c in cells)
            {
                c.gi = new Point(c.gi.X - gixr, c.gi.Y - giyr);
            }
            gixr = (from c in gridCells select c.gi.X).Max() + 1;
            giyr = (from c in gridCells select c.gi.Y).Max() + 1;

            var gridEst = new GridEstimator();

            foreach (var c in gridCells)
            {
                gridEst.Add(c.gi, c.cpf);
            }

            gridEst.Process();
            for (int Xi = 0; Xi < gixr; Xi++)
            {
                Point p1 = gridEst.GetP(Xi + 0.5f, 0.5f);
                Point p2 = gridEst.GetP(Xi + 0.5f, giyr - 0.5f);
                CvInvoke.Line(imrgb, p1, p2, new MCvScalar(0, 100, 255), 5);
            }
            for (int Yi = 0; Yi < giyr; Yi++)
            {
                Point p1 = gridEst.GetP(0.5f, Yi + 0.5f);
                Point p2 = gridEst.GetP(gixr - 0.5f, Yi + 0.5f);
                CvInvoke.Line(imrgb, p1, p2, new MCvScalar(0, 100, 255), 5);
            }


            Cell[,] cg = new Cell[gixr, giyr];
            foreach (var c in gridCells)
            {
                cg[c.gi.X, c.gi.Y] = c;
            }

            for (int xi = 0; xi < gixr; xi++)
            {
                for (int yi = 0; yi < giyr; yi++)
                {
                    var c = cg[xi, yi];
                    if (c == null)
                    {
                        continue;
                    }
                    var    col  = colors[(xi + yi) % colors.Count];
                    double minR = c.sidelength / 2.25;
                    //CvInvoke.Circle(imrgb, c.cp, (int)minR, col, 4);
                }
            }

            var sidesize = (int)(side_avg * 0.8);

            for (int xi = 0; xi < gixr; xi++)
            {
                for (int yi = 0; yi < giyr; yi++)
                {
                    var c = cg[xi, yi];
                    if (c == null)
                    {
                        continue;
                    }
                    c.image  = c.ExtractImage(imx, sidesize);
                    c.imMask = c.ExtractImage(im, sidesize);
                }
            }

            List <float> diffs = new List <float>();

            for (int xi = 0; xi < gixr; xi++)
            {
                for (int yi = 0; yi < giyr; yi++)
                {
                    var c = cg[xi, yi];
                    if (c == null)
                    {
                        continue;
                    }
                    var t = cg[10, 10];
                    var r = c.Match(t);

                    diffs.Add(r);
                }
            }

            float diffMax = diffs.Max();

            Mat gridImage = new Mat(sidesize * giyr, sidesize * gixr, DepthType.Cv8U, 3);

            gridImage.SetTo(new MCvScalar(128, 128, 128));
            Mat gridMatchImage = new Mat(sidesize * giyr, sidesize * gixr, DepthType.Cv8U, 3);

            gridMatchImage.SetTo(new MCvScalar(128, 128, 128));

            var templateCell = cg[10, 10];

            for (int xi = 0; xi < gixr; xi++)
            {
                for (int yi = 0; yi < giyr; yi++)
                {
                    var c = cg[xi, yi];
                    if (c == null)
                    {
                        continue;
                    }
                    var r = c.Match(templateCell);

                    var imrgb2 = new Mat();
                    CvInvoke.CvtColor(c.image, imrgb2, ColorConversion.Gray2Bgr);
                    Mat w = new Mat(gridImage, new Rectangle(new Point(xi * sidesize, yi * sidesize), c.image.Size));
                    imrgb2.CopyTo(w);

                    w = new Mat(gridMatchImage, new Rectangle(new Point(xi * sidesize, yi * sidesize), c.image.Size));
                    int rr = (int)(r / diffMax * 255);
                    w.SetTo(new MCvScalar(0, 255 - rr, rr));
                    imrgb2.CopyTo(w, c.imMask);
                }
            }

            new Emgu.CV.UI.ImageViewer(gridImage, "gridImage").Show(); // Allows zoom and pan
            //CvInvoke.Imshow("gridImage", gridImage);
            CvInvoke.Imshow("gridMatchImage", gridMatchImage);

            new Emgu.CV.UI.ImageViewer(imrgb, "work grid").Show(); // Allows zoom and pan
                                                                   //            CvInvoke.Imshow("work grid", imrgb);
            CvInvoke.Imwrite("work grid.png", imrgb);

            while (CvInvoke.WaitKey(100) == -1)
            {
                ;
            }
        }
コード例 #35
0
        // get all of the valid contour maps, valid means circumfence > 200 px
        // this was not in their code, I added this feature, but I used their logic
        public static List<ColorfulContourMap> getAllContourMap(Mat input, int index, int mode = 0)
        {
            // use for all members
            List<ColorfulContourMap> result = new List<ColorfulContourMap>();
            MatImage m1 = new MatImage(input);
            m1.Convert();
            Mat gray = m1.Out();
            // use for black background
            if (mode == 0)
            {
                MatImage m2 = new MatImage(gray);
                m2.SmoothGaussian(3);
                m2.ThresholdBinaryInv(245, 255);
                gray = m2.Out();
            }
            // use for white background
            else
            {
                MatImage m2 = new MatImage(gray);
                m2.SmoothGaussian(3);
                m2.ThresholdBinaryInv(100, 255);
                gray = m2.Out();
            }

            // one time use
            List<Point> pointList = new List<Point>();
            List<Point> polyPointList = new List<Point>();
            List<ColorfulPoint> cps = new List<ColorfulPoint>();
            List<ColorfulPoint> pcps = new List<ColorfulPoint>();

            // fetch all the contours using Emgu CV
            // fetch all the polys using Emgu CV
            // extract the points and colors

            Mat temp = gray.Clone();
            VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint();
            CvInvoke.FindContours(gray, contours, new Mat(), RetrType.List, ChainApproxMethod.ChainApproxNone);

            double area = Math.Abs(CvInvoke.ContourArea(contours[0]));
            VectorOfPoint maxArea = contours[0]; // maxArea is used as the current contour
                                                 //contour = contour.HNext;
                                                 // use this to loop
            for (int i = 0; i < contours.Size; i++)
            {

                double nextArea = Math.Abs(CvInvoke.ContourArea(contours[i], false));  //  Find the area of contour
                area = nextArea;
                if (area >= Constants.MIN_AREA)
                {
                    maxArea = contours[i];
                    VectorOfPoint poly = new VectorOfPoint();
                    CvInvoke.ApproxPolyDP(maxArea, poly, 1.0, true);
                    pointList = maxArea.ToArray().ToList();
                    polyPointList = poly.ToArray().ToList();
                    foreach (Point p in pointList)
                    {
                        ColorfulPoint cp = new ColorfulPoint { X = p.X, Y = p.Y, color = extractPointColor(p, input) };
                        cps.Add(cp);
                    }
                    foreach (Point p in polyPointList)
                    {
                        ColorfulPoint cp = new ColorfulPoint { X = p.X, Y = p.Y, color = extractPointColor(p, input) };
                        pcps.Add(cp);
                    }
                    result.Add(new ColorfulContourMap(cps, pcps, index));
                    // clear temporal lists
                    pointList = new List<Point>();
                    polyPointList = new List<Point>();
                    cps = new List<ColorfulPoint>();
                    pcps = new List<ColorfulPoint>();

                }

            }

            return result;
        }
コード例 #36
0
        private void ProcessFrame()
        {
            try
            {
                #region Background/Foreground
                Image<Bgr, byte> difference = BackgroundSubstractionOptions.Substract(_currentFrame, _frameHistoryBuffer);

                Rectangle? handArea = ForegoundExtractionOptions.HighlightForeground(difference);
                Image<Bgr, byte> skinDetectionFrame = _currentFrame.Copy();

                if (handArea.HasValue)
                    ForegoundExtractionOptions.CutBackground(skinDetectionFrame, handArea.Value);
                #endregion

                #region Skin filtering / Morphological / Smooth filtering
                Image<Gray, byte> skinDetectionFrameGray = SkinFilteringOptions.ActiveItem.FilterFrame(skinDetectionFrame);

                MorphologicalFilteringOptions.StackSync.EnterReadLock();
                foreach (var operation in MorphologicalFilteringOptions.OperationStack)
                {
                    if (operation.FilterType == Model.Enums.MorphologicalFilterType.Dilatation)
                    {
                        CvInvoke.Dilate(skinDetectionFrameGray, skinDetectionFrameGray, operation.GetKernel(),
                            new Point(operation.KernelAnchorX, operation.KernelAnchorY), operation.Intensity, operation.KernelBorderType,
                            new MCvScalar(operation.KernelBorderThickness));
                    }
                    else
                    {
                        CvInvoke.Erode(skinDetectionFrameGray, skinDetectionFrameGray, operation.GetKernel(),
                            new Point(operation.KernelAnchorX, operation.KernelAnchorY), operation.Intensity, operation.KernelBorderType,
                            new MCvScalar(operation.KernelBorderThickness));
                    }
                }
                MorphologicalFilteringOptions.StackSync.ExitReadLock();

                skinDetectionFrameGray = SmoothFilteringOptions.FilterFrame(skinDetectionFrameGray);
                #endregion

                #region Contours / ConvexHull / ConvexityDefects
                Image<Bgr, byte> fingerTrackerFrame = _currentFrame.Copy();

                List<Point> fingers = new List<Point>();

                using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint())
                {
                    CvInvoke.FindContours(skinDetectionFrameGray.Copy(), contours, null, RetrType.List, FingerTrackingOptions.ApproxMethod);

                    if (contours.Size > 0)
                    {
                        VectorOfPoint biggestContour = contours[0];

                        if (contours.Size > 1)
                        {
                            for (int i = 1; i < contours.Size; i++)
                            {
                                if (CvInvoke.ContourArea(contours[i], false) > CvInvoke.ContourArea(biggestContour, false))
                                    biggestContour = contours[i];
                            }
                        }

                        if (CvInvoke.ContourArea(biggestContour, false) > FingerTrackingOptions.MinContourArea)
                        {
                            using (VectorOfPoint contour = biggestContour)
                            {
                                using (VectorOfPoint approxContour = new VectorOfPoint())
                                {
                                    CvInvoke.ApproxPolyDP(contour, approxContour, CvInvoke.ArcLength(contour, true) * FingerTrackingOptions.PerimeterScalingFactor.Value, true);

                                    fingerTrackerFrame.Draw(approxContour.ToArray(), new Bgr(FingerTrackingOptions.ContourHighlightColor), 2);
                                    VectorOfPoint convexHull = new VectorOfPoint();
                                    VectorOfInt intHull = new VectorOfInt();
                                    CvInvoke.ConvexHull(approxContour, convexHull, FingerTrackingOptions.ConvexHullCW);
                                    CvInvoke.ConvexHull(approxContour, intHull, FingerTrackingOptions.ConvexHullCW);
                                    fingerTrackerFrame.DrawPolyline(convexHull.ToArray(), true, new Bgr(FingerTrackingOptions.ConvexHullColor), 2);

                                    var countourRect = CvInvoke.MinAreaRect(approxContour);
                                    fingerTrackerFrame.Draw(new CircleF(new PointF(countourRect.Center.X, countourRect.Center.Y), 3), new Bgr(FingerTrackingOptions.DefectLinesColor), 2);

                                    Mat defects = new Mat();
                                    CvInvoke.ConvexityDefects(approxContour, intHull, defects);

                                    if (!defects.IsEmpty)
                                    {
                                        var contourPoints = approxContour.ToArray();

                                        Matrix<int> m = new Matrix<int>(defects.Rows, defects.Cols, defects.NumberOfChannels);
                                        defects.CopyTo(m);

                                        for (int i = 0; i < m.Rows; i++)
                                        {
                                            int startIdx = m.Data[i, 0];
                                            int endIdx = m.Data[i, 1];
                                            int depthIdx = m.Data[i, 2];

                                            Point startPoint = contourPoints[startIdx];
                                            Point endPoint = contourPoints[endIdx];
                                            Point depthPoint = contourPoints[depthIdx];

                                            LineSegment2D startDepthLine = new LineSegment2D(startPoint, depthPoint);
                                            LineSegment2D depthEndLine = new LineSegment2D(depthPoint, endPoint);

                                            LineSegment2D startCenterLine = new LineSegment2D(startPoint, new Point((int)countourRect.Center.X, (int)countourRect.Center.Y));
                                            LineSegment2D depthCenterLine = new LineSegment2D(depthPoint, new Point((int)countourRect.Center.X, (int)countourRect.Center.Y));
                                            LineSegment2D endCenterLine = new LineSegment2D(endPoint, new Point((int)countourRect.Center.X, (int)countourRect.Center.Y));

                                            CircleF startCircle = new CircleF(startPoint, 5);
                                            CircleF depthCircle = new CircleF(depthPoint, 5);
                                            CircleF endCircle = new CircleF(endPoint, 5);

                                            if (startPoint.Y < countourRect.Center.Y)
                                                fingers.Add(startPoint);

                                            if (!FingerTrackingOptions.TrackOnlyControlPoint)
                                            {
                                                fingerTrackerFrame.Draw(startCircle, new Bgr(FingerTrackingOptions.DefectStartPointHighlightColor), 2);
                                                fingerTrackerFrame.Draw(depthCircle, new Bgr(FingerTrackingOptions.DefectDepthPointHighlightColor), 2);
                                                fingerTrackerFrame.Draw(endCircle, new Bgr(FingerTrackingOptions.DefectEndPointHighlightColor), 2);

                                                fingerTrackerFrame.Draw(startDepthLine, new Bgr(FingerTrackingOptions.DefectLinesColor), 2);
                                                //fingerTrackerFrame.Draw(depthEndLine, new Bgr(FingerTrackingOptions.DefectLinesColor), 2);

                                                fingerTrackerFrame.Draw(startCenterLine, new Bgr(FingerTrackingOptions.DefectLinesColor), 2);
                                                //fingerTrackerFrame.Draw(depthCenterLine, new Bgr(FingerTrackingOptions.DefectLinesColor), 2);
                                               // fingerTrackerFrame.Draw(endCenterLine, new Bgr(FingerTrackingOptions.DefectLinesColor), 2);
                                            }
                                        }

                                        _lastControlPoint = _currentControlPoint;
                                        _currentControlPoint = MouseControlOptions.UseHandCenter ? new Point((int)countourRect.Center.X, (int)countourRect.Center.Y)
                                                    : fingers.FirstOrDefault(f => f.Y == fingers.Min(line => line.Y));
                                        fingers.Clear();

                                        if (FingerTrackingOptions.TrackOnlyControlPoint)
                                        {
                                            fingerTrackerFrame = new Image<Bgr, byte>(fingerTrackerFrame.Width, fingerTrackerFrame.Height, new Bgr(Color.Black));
                                            fingerTrackerFrame.Draw(new CircleF(_currentControlPoint, 5), new Bgr(Color.Red), 2);
                                        }

                                    }
                                }
                            }
                        }
                    }
                }
                #endregion

                #region Mouse control
                if (_currentControlPoint.X != -1 && _currentControlPoint.Y != -1 && _lastControlPoint.X != -1 && _lastControlPoint.Y != -1
                         && _currentControlPoint.X != _lastControlPoint.X && _currentControlPoint.Y != _lastControlPoint.Y
                            && Math.Abs(_currentControlPoint.X - _lastControlPoint.X) < (MouseControlOptions.FrameWidth / 10)
                                 && Math.Abs(_currentControlPoint.Y - _lastControlPoint.Y) < (MouseControlOptions.FrameHeight / 10))
                {
                    int frameX = _currentControlPoint.X;
                    int frameY = _currentControlPoint.Y;

                    int moveX = _currentControlPoint.X - _lastControlPoint.X;
                    int moveY = _currentControlPoint.Y - _lastControlPoint.Y;

                    int sensitiveX = 1;
                    int sensitiveY = 1;

                    if (MouseControlOptions.MouseSensitive.Value > 0)
                    {
                        sensitiveX = (int)(((double)MouseControlOptions.ScreenWidth / MouseControlOptions.FrameWidth) * MouseControlOptions.MouseSensitive.Value);
                        sensitiveY = (int)(((double)MouseControlOptions.ScreenHeight / MouseControlOptions.FrameHeight) * MouseControlOptions.MouseSensitive.Value);
                    }
                    else if (MouseControlOptions.MouseSensitive.Value < 0)
                    {
                        sensitiveX = (int)(((double)MouseControlOptions.FrameWidth / MouseControlOptions.ScreenWidth) * MouseControlOptions.MouseSensitive.Value * -1);
                        sensitiveY = (int)(((double)MouseControlOptions.FrameHeight / MouseControlOptions.ScreenHeight) * MouseControlOptions.MouseSensitive.Value * -1);
                    }

                    moveX *= sensitiveX * -1;
                    moveY *= sensitiveY;

                    Point currentMousePosition = GetMousePosition();

                    int destinationX = currentMousePosition.X + moveX;
                    int destinationY = currentMousePosition.Y + moveY;

                    Messanger.PublishOnCurrentThread(new FingerMovedMessage(MouseControlOptions.ControlMouse, frameX, frameY, destinationX, destinationY));

                    if (MouseControlOptions.ControlMouse && MouseControlOptions.MouseSensitive.Value != 0 && destinationX >= 0 && destinationY >= 0)
                        SetCursorPos(destinationX, destinationY);
                }
                #endregion

                Messanger.PublishOnCurrentThread(new FrameProcessedMessage(_currentFrame, difference, skinDetectionFrameGray, fingerTrackerFrame));
            }
            catch { }
        }
コード例 #37
0
        private void processFrameAndUpdateGUI(object sender, EventArgs e)
        {
            threshold1 = Convert.ToInt32(numericUpDown1.Value);
            threshold2 = Convert.ToInt32(numericUpDown2.Value);

            imgOriginal = capWebcam.QueryFrame();

            img          = imgOriginal.ToImage <Bgr, byte>();
            imgGrayScale = img.Convert <Gray, byte>().ThresholdBinary(new Gray(threshold1), new Gray(threshold2));

            // ----------------------Find Canny Edge Detaction-------------------------------
            //Mat imgGrayScale = new Mat(imgOriginal.Size, DepthType.Cv8U, 1);
            //Mat imgBlurred = new Mat(imgOriginal.Size, DepthType.Cv8U, 1);
            //Mat imgCanny = new Mat(imgOriginal.Size, DepthType.Cv8U, 1);
            //CvInvoke.CvtColor(imgOriginal, imgGrayScale, ColorConversion.Bgr2Gray);
            //CvInvoke.GaussianBlur(imgGrayScale, imgBlurred, new Size(5, 5), 1.5);
            //CvInvoke.Canny(imgBlurred, imgCanny, threshold1, threshold2);
            //---------------------------------End--------------------------------------------

            Emgu.CV.Util.VectorOfVectorOfPoint contours = new Emgu.CV.Util.VectorOfVectorOfPoint();

            Mat hier = new Mat();

            CvInvoke.FindContours(imgGrayScale, contours, hier, Emgu.CV.CvEnum.RetrType.External, Emgu.CV.CvEnum.ChainApproxMethod.ChainApproxSimple);
            //CvInvoke.DrawContours(imgOriginal, contours, -1, new MCvScalar(255, 0, 0),5);

            for (int i = 0; i < contours.Size; i++) //Loop of Contour size
            {
                double        perimeter = CvInvoke.ArcLength(contours[i], true);
                VectorOfPoint approx    = new VectorOfPoint();
                CvInvoke.ApproxPolyDP(contours[i], approx, 0.04 * perimeter, true);

                //------------------Center Point of Shape (Centroid of Object)------------------//
                // x and y are centroid of object
                var moment = CvInvoke.Moments(contours[i]);
                int x      = (int)(moment.M10 / moment.M00);
                int y      = (int)(moment.M01 / moment.M00);

                rect = CvInvoke.BoundingRectangle(contours[i]);
                double ar = (double)(rect.Width / rect.Height);

                label4.Text = (contours.Size).ToString();

                if (approx.Size == 3) //The contour has 3 vertices.
                {
                    //CvInvoke.PutText(imgOriginal,"Triangle",new Point(x,y),Emgu.CV.CvEnum.FontFace.HersheySimplex,0.5, new MCvScalar(255,0,0),4);
                }

                if (approx.Size == 4) //The contour has 4 vertices.
                {
                    if ((ar >= 0.95) && (ar <= 1.05))
                    {
                        //CvInvoke.PutText(imgOriginal, "Square", new Point(x, y), Emgu.CV.CvEnum.FontFace.HersheySimplex, 1, new MCvScalar(255, 0, 0), 3);
                    }
                    else
                    {
                        //CvInvoke.PutText(imgOriginal, "Rectangle", new Point(x, y), Emgu.CV.CvEnum.FontFace.HersheySimplex, 1, new MCvScalar(255, 0, 0), 3);
                    }
                }

                if (approx.Size > 5) //The contour has 5 vertices.
                {
                    //CvInvoke.PutText(imgOriginal, "Circle", new Point(x + 100, y), Emgu.CV.CvEnum.FontFace.HersheySimplex, 0.5, new MCvScalar(255, 0, 0), 3);
                }

                CvInvoke.Rectangle(imgOriginal, rect, new MCvScalar(0, 255, 0), 3);

                FindDistanceA(rect.Width);
                FindDistanceB(rect.Height);

                CvInvoke.PutText(imgOriginal, "Width:" + _width.ToString("n2") + "cm", new Point(x, (y - rect.Height / 2) - 2), Emgu.CV.CvEnum.FontFace.HersheySimplex, 0.70, new MCvScalar(0, 0, 255), 2);
                CvInvoke.PutText(imgOriginal, "Height:" + _height.ToString("n2") + "cm", new Point((x + rect.Width / 2) + 2, y), Emgu.CV.CvEnum.FontFace.HersheySimplex, 0.70, new MCvScalar(0, 0, 255), 2);
            }

            imageBox1.Image = imgOriginal;
            imageBox2.Image = imgGrayScale;
        }
コード例 #38
0
        // Maximum Value, Minimum Value and their locations
        // Mean Color or Mean Intensity

        public void calcularRegionProps(Image <Gray, byte> inputRegionIMG, double AreaMin)
        {
            // Declaração do vetor de vetores de pontos
            Emgu.CV.Util.VectorOfVectorOfPoint vetordeVetdePontos = new Emgu.CV.Util.VectorOfVectorOfPoint();
            // Declaração de uma matriz
            Mat hierarquia = new Mat();

            // Aplicação da função FindContour
            CvInvoke.FindContours(
                inputRegionIMG                                       // Recebe a imagem de entrada
                , vetordeVetdePontos                                 // Recebe um vetor de pontos de contorno
                , hierarquia                                         // Recebe a hierarquia dos pontos
                , Emgu.CV.CvEnum.RetrType.Tree                       // Recebe o tipo de arvore e contornos
                , Emgu.CV.CvEnum.ChainApproxMethod.ChainApproxNone   // Tip de aproximação aos contornos
                , new Point(0, 0)                                    // Offset do ponto, posso omitir ou declarar um ponto a 0 0
                );

            Image <Bgr, Byte> input = inputRegionIMG.Convert <Bgr, byte>();

            //Até aqui encontro o contorno. Deve ser só 1!!!, portanto deve ser o contorno 0,
            //mas mesmo assim vamos fazer um teste para ver qual o contorno a usar

            // Pontos buffer
            PointF buffer_Minx = new PointF(inputRegionIMG.Width, inputRegionIMG.Height);
            PointF buffer_MaxX = new PointF(0, 0);
            PointF buffer_MinY = new PointF(inputRegionIMG.Width, inputRegionIMG.Height);
            PointF buffer_MaxY = new PointF(0, 0);


            for (int i = 0; i < vetordeVetdePontos.Size; i++)
            {
                Area = Math.Abs(CvInvoke.ContourArea(vetordeVetdePontos[i], true));      // calcula a area do contorno

                if (Area >= AreaMin)
                {
                    for (int iter = 0; iter < vetordeVetdePontos[i].Size; iter++)
                    {
                        //----------------- Calculo do extreme -----------------
                        // Calcula o valor do ponto mais à esquerda
                        if (vetordeVetdePontos[i][iter].X < buffer_Minx.X)
                        {
                            buffer_Minx = vetordeVetdePontos[i][iter];
                        }

                        // Calcula o valor do ponto mais à direita
                        if (vetordeVetdePontos[i][iter].X > buffer_MaxX.X)
                        {
                            buffer_MaxX = vetordeVetdePontos[i][iter];
                        }

                        // Calcula o valor do ponto Y mais em cima
                        if (vetordeVetdePontos[i][iter].Y < buffer_MinY.Y)
                        {
                            buffer_MinY = vetordeVetdePontos[i][iter];
                        }

                        // Calcula o valor do ponto Y mais em baixo
                        if (vetordeVetdePontos[i][iter].Y > buffer_MaxY.Y)
                        {
                            buffer_MaxY = vetordeVetdePontos[i][iter];
                        }
                        //----------------- Fim do calculo do extreme -----------------
                    }

                    // ------------- Calculo do Centroid ---------------------
                    Moments momento = CvInvoke.Moments(vetordeVetdePontos[i]);
                    int     X       = (int)(momento.M10 / momento.M00);
                    int     Y       = (int)(momento.M01 / momento.M00);
                    Centroid = new PointF(X, Y);
                    // ------------------------------------------------------

                    // ------------ Calculo do AspectRatio ------------------
                    AspectRatio = inputRegionIMG.Width / inputRegionIMG.Height;
                    //-------------------------------------------------------

                    //------------- Calculo da BoundingBox ------------------
                    BoundingBox = CvInvoke.BoundingRectangle(vetordeVetdePontos[i]);
                    //-------------------------------------------------------

                    // ------------   Calculo do Extent   -------------------
                    float rect_area = BoundingBox.Width * BoundingBox.Height;
                    Extent = (float)Area / rect_area;
                    // ------------------------------------------------------

                    // --------------- ConvectHULL --------------------------
                    CvInvoke.ConvexHull(vetordeVetdePontos[i], ConvexHull, false);
                    //-------------------------------------------------------

                    // --------------- ConvectHULL_area ---------------------
                    ConvexHull_area = CvInvoke.ContourArea(ConvexHull);
                    //-------------------------------------------------------

                    //-----------------  Solidity ---------------------------
                    Solidity = Area / ConvexHull_area;
                    // ------------------------------------------------------

                    //-------------- Diametro Equivalente -------------------
                    EquivalentDiameter = Math.Sqrt(4 * Area / Math.PI);
                    // ------------------------------------------------------

                    //--------------- Circulo Envolvente --------------------
                    CirculoEnvolvente = CvInvoke.MinEnclosingCircle(vetordeVetdePontos[i]);
                    //-------------------------------------------------------

                    //--------------- Circulo Perimetro --------------------
                    perimetro = CvInvoke.ArcLength(vetordeVetdePontos[i], true);
                    // -----------------------------------------------------

                    // -------------- Circularity (Fator de forma)----------
                    Circularity = (4 * Math.PI * Area) / (perimetro * perimetro);
                    //------------------------------------------------------

                    // --------------- Verifica se é convexo ---------------
                    isConvex = CvInvoke.IsContourConvex(vetordeVetdePontos[i]);
                    //------------------------------------------------------

                    // ------------- Apriximação do contorno ---------------
                    CvInvoke.ApproxPolyDP(
                        vetordeVetdePontos[i],              // Cada vetor de um contorno iterado
                        ContourApproximation,               // Vetor que vai conter a aproximação
                        0.1 * perimetro,                    // Expande o perimetro
                        true                                // Calcula um aproximação ao contorno externo
                        );
                    // -----------------------------------------------------

                    // ------------- Devolve o contorno --------------------
                    Contorno = vetordeVetdePontos[i];
                    // ------------------------------------------------------

                    // ------------  Retangulo rodado  ---------------------
                    RotatedRect retanguloRodado = CvInvoke.MinAreaRect(vetordeVetdePontos[i]);
                    PointF[]    vetorPontos     = CvInvoke.BoxPoints(retanguloRodado);
                    BoundingBoxRectRodado = new Point[vetorPontos.Length];
                    for (int iterador = 0; iterador < vetorPontos.Length; iterador++)
                    {
                        BoundingBoxRectRodado[iterador].X = (int)vetorPontos[iterador].X;
                        BoundingBoxRectRodado[iterador].Y = (int)vetorPontos[iterador].Y;
                    }
                    // ------------ AnguloRecExterior ----------------------
                    AnguloRectExterior = retanguloRodado.Angle;
                    // -----------------------------------------------------

                    // ------------ EllipseImagem --------------------------
                    EllipseValores = CvInvoke.FitEllipseAMS(vetordeVetdePontos[i]);
                    // -----------------------------------------------------

                    // Fitting a Line ---------------
                    //---------------------------

                    // salta do ciclo for
                    i = vetordeVetdePontos.Size;
                }
            }

            Extreme.Mais_a_esquerda = buffer_Minx;
            Extreme.Mais_a_Direita  = buffer_MaxX;
            Extreme.Mais_em_baixo   = buffer_MaxY;
            Extreme.Mais_em_cima    = buffer_MinY;
        }
コード例 #39
0
ファイル: ImageExtension.cs プロジェクト: BrettHewitt/MWA
        public static List<VectorOfPoint> GetContours(Image<Gray, Byte> image, ChainApproxMethod apxMethod = ChainApproxMethod.ChainApproxSimple, RetrType retrievalType = RetrType.List, double accuracy = 0.001d, double minimumArea = 10)
        {
            List<VectorOfPoint> convertedContours = new List<VectorOfPoint>();

            using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint())
            {
                using (Image<Gray, Byte> tempImage = image.Copy())
                {
                    CvInvoke.FindContours(tempImage, contours, null, retrievalType, apxMethod);
                }

                int count = contours.Size;
                for (int i = 0; i < count; i++)
                {
                    using (VectorOfPoint contour = contours[i])
                    {
                        VectorOfPoint approxContour = new VectorOfPoint();
                        CvInvoke.ApproxPolyDP(contour, approxContour, accuracy, false);
                        if (CvInvoke.ContourArea(approxContour, false) > minimumArea)
                        {
                            convertedContours.Add(approxContour);
                        }
                    }
                }
            }

            return convertedContours;
        }
コード例 #40
0
    // Update is called once per frame
    void Update()
    {
        IsAvailable = _sensor.IsAvailable;

        if (depthFrameReader != null)
        {
            var frame = depthFrameReader.AcquireLatestFrame();

            if (frame != null)
            {
                frame.CopyFrameDataToArray(rawDepthPixels);

                //Primero acoto los limites de la mesa
                if (!edgesDetected)
                {
                    //Grafico las profundidades para detectar los bordes de la mesa
                    for (int depth = 0; depth < rawDepthPixels.Length; depth++)
                    {
                        depthPixel = rawDepthPixels[depth];
                        if (depthPixel > MIN_DEPTH && depthPixel < MAX_DEPTH)
                        {
                            colorImage[depth * 3]     = 255;
                            colorImage[depth * 3 + 1] = 255;
                            colorImage[depth * 3 + 2] = 255;
                        }
                        else
                        {
                            colorImage[depth * 3]     = 0;
                            colorImage[depth * 3 + 1] = 0;
                            colorImage[depth * 3 + 2] = 0;
                        }
                    }
                    frameOpenCV.SetTo(colorImage);
                    UMat uimage = new UMat();
                    CvInvoke.CvtColor(frameOpenCV, uimage, ColorConversion.Bgr2Gray);

                    //Suavizo los puntos pequeños
                    Mat erodeElement  = CvInvoke.GetStructuringElement(ElementShape.Rectangle, new System.Drawing.Size(3, 3), new System.Drawing.Point(-1, -1));
                    Mat dilateElement = CvInvoke.GetStructuringElement(ElementShape.Rectangle, new System.Drawing.Size(10, 10), new System.Drawing.Point(-1, -1));

                    MCvScalar scalarD = new MCvScalar(5, 5);
                    CvInvoke.Erode(uimage, uimage, erodeElement, new System.Drawing.Point(-1, -1), 4, BorderType.Constant, scalarD);
                    CvInvoke.Dilate(uimage, uimage, dilateElement, new System.Drawing.Point(-1, -1), 2, BorderType.Constant, scalarD);


                    //Busco contornos
                    edgesTable = new Emgu.CV.Util.VectorOfVectorOfPoint();
                    Mat heir = new Mat();
                    Image <Rgb, byte> imgout = new Image <Rgb, byte>(frameOpenCV.Width, frameOpenCV.Height, new Rgb(200, 200, 200));
                    CvInvoke.FindContours(uimage, edgesTable, heir, Emgu.CV.CvEnum.RetrType.Ccomp, Emgu.CV.CvEnum.ChainApproxMethod.ChainApproxSimple);

                    double maxArea = 0;
                    for (int i = 0; i < edgesTable.Size; i++)
                    {
                        var moment = CvInvoke.Moments(edgesTable[i]);
                        area = moment.M00;
                        //Me quedo con el area mas grande que es la mesa de ping pong
                        if (area > maxArea)
                        {
                            //PERO tengo que descartar el area que es todo el cuadrado(el frame de la imagen)
                            if (area < WIDTH * HEIGHT * 0.22 && area > WIDTH * HEIGHT * 0.18)
                            {
                                maxArea   = area;
                                indexArea = i;
                            }
                        }
                    }

                    for (int i = 0; i < edgesTable[indexArea].Size; i++)
                    {
                        //Encuentro el X mas bajo y alto, lo mismo para la Y
                        if (edgesTable[indexArea][i].X > maxX)
                        {
                            maxX = edgesTable[indexArea][i].X + desborde;
                        }
                        if (edgesTable[indexArea][i].X < minX)
                        {
                            minX = edgesTable[indexArea][i].X - desborde;
                        }
                        if (edgesTable[indexArea][i].Y > maxY)
                        {
                            maxY = edgesTable[indexArea][i].Y + desborde;
                        }
                        if (edgesTable[indexArea][i].Y < minY)
                        {
                            minY = edgesTable[indexArea][i].Y - desborde;
                        }
                    }

                    CvInvoke.DrawContours(imgout, edgesTable, indexArea, new MCvScalar(255, 0, 0), 1);
                    CvInvoke.Circle(imgout, new System.Drawing.Point(minX, minY), 2, colorDetected, 2);
                    CvInvoke.Circle(imgout, new System.Drawing.Point(minX, maxY), 2, colorDetected, 2);
                    CvInvoke.Circle(imgout, new System.Drawing.Point(maxX, minY), 2, colorDetected, 2);
                    CvInvoke.Circle(imgout, new System.Drawing.Point(maxX, maxY), 2, colorDetected, 2);
                    edgesDetected = true;
                }


                //Despues mapeo la profundidad de la mesa
                if (!depthMapped && edgesDetected)
                {
                    //Cargo por unica vez la matriz de configuracion de profundidad
                    if (listConfig.Count < CONFIG_ITERACIONES)
                    {
                        var configDepth = new int[WIDTH * HEIGHT];
                        for (int row = minY; row < maxY; row++)
                        {
                            for (int col = minX; col < maxX; col++)
                            {
                                //transformo un fila columna en su equivalente de vector
                                depthPixel = rawDepthPixels[(row * WIDTH) + (col)];
                                if (depthPixel > MIN_DEPTH && depthPixel < MAX_DEPTH)
                                {
                                    configDepth[(row * WIDTH) + (col)] = depthPixel;
                                }
                                else
                                {
                                    //Le pongo 700 para que no se vaya a valor muy bajo con el -1 y no arruine el prom
                                    configDepth[(row * WIDTH) + (col)] = MAX_DEPTH - 200;
                                }
                            }
                        }

                        listConfig.Add(configDepth);
                        if (frame != null)
                        {
                            frame.Dispose();
                            frame = null;
                        }
                        return;
                    }


                    //Una vez que hizo las pasadas de configuracion saco el promedio
                    if (listConfig.Count == CONFIG_ITERACIONES)
                    {
                        //Saco el promedio para cada punto.
                        foreach (var item in listConfig)
                        {
                            for (int depth = 0; depth < averageDepthConfig.Length; depth++)
                            {
                                averageDepthConfig[depth] += item[depth];
                            }
                        }

                        for (int depth = 0; depth < averageDepthConfig.Length; depth++)
                        {
                            averageDepthConfig[depth] /= CONFIG_ITERACIONES;
                        }

                        depthMapped = true;
                        //Y limpio la matriz para que quede todo en negro.
                        for (int i = 0; i < colorImage.Length; i += 3)
                        {
                            colorImage[i + 0] = 0;
                            colorImage[i + 1] = 0;
                            colorImage[i + 2] = 0;
                        }
                    }
                }
                //Recien ahora puedo empezar a detectar profundidades y piques
                if (edgesDetected && depthMapped)
                {
                    for (int row = minY; row < maxY; row++)
                    {
                        for (int col = minX; col < maxX; col++)
                        {
                            //transformo un fila columna en su equivalente de vector
                            depthPixel = rawDepthPixels[(row * WIDTH) + (col)];
                            if (depthPixel > MIN_DEPTH && depthPixel < MAX_DEPTH && depthPixel < averageDepthConfig[(row * WIDTH) + (col)] - 5)
                            {
                                colorImage[(row * WIDTH * 3) + (col * 3) + 0] = 255;
                                colorImage[(row * WIDTH * 3) + (col * 3) + 1] = 255;
                                colorImage[(row * WIDTH * 3) + (col * 3) + 2] = 255;
                            }
                            else
                            {
                                colorImage[(row * WIDTH * 3) + (col * 3) + 0] = 0;
                                colorImage[(row * WIDTH * 3) + (col * 3) + 1] = 0;
                                colorImage[(row * WIDTH * 3) + (col * 3) + 2] = 0;
                            }
                        }
                    }

                    //Transformo mis pixeles en un formato OPENCV
                    frameOpenCV.SetTo(colorImage);
                    UMat uimage = new UMat();
                    CvInvoke.CvtColor(frameOpenCV, uimage, ColorConversion.Bgr2Gray);

                    //CvInvoke.Imshow("kinect camera", frameOpenCV);
                    //Suavizo los puntos pequeños
                    Mat erodeElement  = CvInvoke.GetStructuringElement(ElementShape.Rectangle, new System.Drawing.Size(3, 3), new System.Drawing.Point(-1, -1));
                    Mat dilateElement = CvInvoke.GetStructuringElement(ElementShape.Rectangle, new System.Drawing.Size(5, 5), new System.Drawing.Point(-1, -1));

                    MCvScalar scalarD = new MCvScalar(5, 5);
                    CvInvoke.Erode(uimage, uimage, erodeElement, new System.Drawing.Point(-1, -1), 2, BorderType.Constant, scalarD);
                    CvInvoke.Dilate(uimage, uimage, dilateElement, new System.Drawing.Point(-1, -1), 4, BorderType.Constant, scalarD);

                    //CvInvoke.Imshow("Vision OPENCV", uimage);


                    //Busco contornos
                    Emgu.CV.Util.VectorOfVectorOfPoint countors = new Emgu.CV.Util.VectorOfVectorOfPoint();
                    Mat heir = new Mat();
                    Image <Rgb, byte> imgout = new Image <Rgb, byte>(frameOpenCV.Width, frameOpenCV.Height, new Rgb(200, 200, 200));
                    CvInvoke.FindContours(uimage, countors, heir, Emgu.CV.CvEnum.RetrType.Ccomp, Emgu.CV.CvEnum.ChainApproxMethod.ChainApproxSimple);



                    for (int i = 0; i < countors.Size; i++)
                    {
                        var moment = CvInvoke.Moments(countors[i]);
                        area = moment.M00;
                        //PENSAR ALGO MAS SELECTIVO QUE DESCARTE OBJETOS QUE NO SEAN CIRCULOS
                        if (area > MIN_OBJECT_AREA && area < MAX_OBJECT_AREA)
                        {
                            x = (int)(moment.M10 / area);
                            y = (int)(moment.M01 / area);
                            CvInvoke.DrawContours(imgout, countors, i, new MCvScalar(255, 0, 0), 1);
                            break;
                        }
                    }

                    if (x != 0 && y != 0)
                    {
                        int centerDepth = rawDepthPixels[y * WIDTH + x];


                        control.Add(centerDepth - averageDepthConfig[y * WIDTH + x]);

                        // AddTrajectory(false, new System.Drawing.Point(x, y));

                        //Es un pique solo si la diferencia entre la mesa y la pelota es minima, la mesa puede estar inclinada
                        if (centerDepth < averageDepthConfig[y * WIDTH + x] - 5 && centerDepth > averageDepthConfig[y * WIDTH + x] - DEPTH_TOL)
                        {
                            //Se detecto un pique
                            if (centerDepth - beforeCenterDepth >= 0)
                            {
                                if (centerDepth - beforeCenterDepth != 0)
                                {
                                    confirmacion = false;
                                }

                                System.Console.WriteLine("NO Pico" + " BF " + beforeCenterDepth + " CD " + centerDepth + " confirmacion: " + confirmacion);
                            }
                            else
                            {
                                if (!confirmacion)
                                {
                                    System.Console.WriteLine("Pico" + " BF " + beforeCenterDepth + " CD " + centerDepth + " confirmacion: " + confirmacion);
                                    debugBounces.Add(new System.Drawing.Point(beforeX, beforeY));
                                    confirmacion = true;
                                }
                            }
                            beforeCenterDepth = centerDepth;
                            beforeX           = x;
                            beforeY           = y;
                            ball.position     = new Vector3(beforeX + 25, beforeY + 25, 0);
                        }

                        CvInvoke.Circle(imgout, new System.Drawing.Point(x, y), 20, colorDetected, 6);
                        CvInvoke.PutText(imgout, ((double)centerDepth / 1000).ToString("F") + "m", new System.Drawing.Point(x - 38, y + 50), FontFace.HersheyPlain, 1.3, colorBounce, 2);

                        x = 0;
                        y = 0;
                    }

                    foreach (var item in debugBounces)
                    {
                        CvInvoke.Circle(imgout, new System.Drawing.Point(item.X, item.Y), 10, colorBounce, 2);
                    }


                    if (debugBounces.Count > 1)
                    {
                        debugBounces.RemoveAt(0);
                    }
                    foreach (var item in trajectory)
                    {
                        CvInvoke.Circle(imgout, new System.Drawing.Point(item.X, item.Y), 10, colorBounce, 2);
                    }
                    CvInvoke.DrawContours(imgout, edgesTable, indexArea, new MCvScalar(255, 0, 0), 1);
                    CvInvoke.Imshow("Deteccion", imgout);
                }
                if (frame != null)
                {
                    frame.Dispose();
                    frame = null;
                }
            }

            if (frame != null)
            {
                frame.Dispose();
                frame = null;
            }
        }
    }
コード例 #41
0
        private Drawing.Bitmap GetMaskedBitmap(string imagePath, IList<Point> pointCollection)
        {
            Mat matrix = new Mat(imagePath, LoadImageType.AnyColor);
            UMat uMatrix = matrix.ToUMat(AccessType.ReadWrite);

            // Scale Polygon
            List<Point> scaledPoints = GetScaledPoints(pointCollection, uMatrix.Rows, uMatrix.Cols);

            polygonPoints = GetPolygonPoints(scaledPoints, uMatrix.Rows, uMatrix.Cols);

            // Apply Polygon
            using (VectorOfPoint vPoint = new VectorOfPoint(polygonPoints.ToArray()))
            using (VectorOfVectorOfPoint vvPoint = new VectorOfVectorOfPoint(vPoint))
            {
                CvInvoke.FillPoly(uMatrix, vvPoint, new Bgr(0, 0, 0).MCvScalar);
            }

            // Crop Bitmap
            int left = (int)scaledPoints.Min(p => p.X);
            int top = (int)scaledPoints.Min(p => p.Y);
            int width = (int)scaledPoints.Max(p => p.X) - left;
            int height = (int)scaledPoints.Max(p => p.Y) - top;

            Image<Bgr, byte> image = new Image<Bgr, byte>(uMatrix.Bitmap);
            image.ROI = new Drawing.Rectangle(left, top, width, height);

            return image.Bitmap;
        }
コード例 #42
0
ファイル: CvInvokeCvextern.cs プロジェクト: KaganRoman/Eval
      /*
      public static void TestDrawLine(IntPtr img, int startX, int startY, int endX, int endY, MCvScalar color)
      {
         TestDrawLine(img, startX, startY, endX, endY, color.v0, color.v1, color.v2, color.v3);
      }

      [DllImport(CvInvoke.EXTERN_LIBRARY, CallingConvention = CvInvoke.CvCallingConvention, EntryPoint="testDrawLine")]
      private static extern void TestDrawLine(IntPtr img, int startX, int startY, int endX, int endY, double v0, double v1, double v2, double v3);
      */

      /// <summary>
      /// Implements the chamfer matching algorithm on images taking into account both distance from
      /// the template pixels to the nearest pixels and orientation alignment between template and image
      /// contours.
      /// </summary>
      /// <param name="img">The edge image where search is performed</param>
      /// <param name="templ">The template (an edge image)</param>
      /// <param name="contours">The output contours</param>
      /// <param name="cost">The cost associated with the matching</param>
      /// <param name="templScale">The template scale, use 1 for default</param>
      /// <param name="maxMatches">The maximum number of matches, use 20 for default</param>
      /// <param name="minMatchDistance">The minimum match distance. use 1.0 for default</param>
      /// <param name="padX">PadX, use 3 for default</param>
      /// <param name="padY">PadY, use 3 for default</param>
      /// <param name="scales">Scales, use 5 for default</param>
      /// <param name="minScale">Minimum scale, use 0.6 for default</param>
      /// <param name="maxScale">Maximum scale, use 1.6 for default</param>
      /// <param name="orientationWeight">Orientation weight, use 0.5 for default</param>
      /// <param name="truncate">Truncate, use 20 for default</param>
      /// <returns>The number of matches</returns>
      public static int cvChamferMatching(Image<Gray, Byte> img, Image<Gray, Byte> templ,
         out Point[][] contours, out float[] cost,
         double templScale, int maxMatches,
         double minMatchDistance, int padX,
         int padY, int scales, double minScale, double maxScale,
         double orientationWeight, double truncate)
      {
         using (Emgu.CV.Util.VectorOfVectorOfPoint vecOfVecOfPoint = new Util.VectorOfVectorOfPoint())
         using (Emgu.CV.Util.VectorOfFloat vecOfFloat = new Util.VectorOfFloat())
         {
            int count = _cvChamferMatching(img, templ, vecOfVecOfPoint, vecOfFloat, templScale, maxMatches, minMatchDistance, padX, padY, scales, minScale, maxScale, orientationWeight, truncate);
            contours = vecOfVecOfPoint.ToArray();
            cost = vecOfFloat.ToArray();
            return count;
         }
      }
コード例 #43
0
      private void FindStopSign(Mat img, List<Mat> stopSignList, List<Rectangle> boxList, VectorOfVectorOfPoint contours, int[,] hierachy, int idx)
      {
         for (; idx >= 0; idx = hierachy[idx, 0])
         {
            using (VectorOfPoint c = contours[idx])
            using (VectorOfPoint approx = new VectorOfPoint())
            {
               CvInvoke.ApproxPolyDP(c, approx, CvInvoke.ArcLength(c, true) * 0.02, true);
               double area = CvInvoke.ContourArea(approx);
               if (area > 200)
               {
                  double ratio = CvInvoke.MatchShapes(_octagon, approx, Emgu.CV.CvEnum.ContoursMatchType.I3);

                  if (ratio > 0.1) //not a good match of contour shape
                  {
                     //check children
                     if (hierachy[idx, 2] >= 0)
                        FindStopSign(img, stopSignList, boxList, contours, hierachy, hierachy[idx, 2]);
                     continue;
                  }

                  Rectangle box = CvInvoke.BoundingRectangle(c);

                  Mat candidate = new Mat();
                  using (Mat tmp = new Mat(img, box))
                     CvInvoke.CvtColor(tmp, candidate, ColorConversion.Bgr2Gray);

                  //set the value of pixels not in the contour region to zero
                  using (Mat mask = new Mat(candidate.Size.Height, candidate.Width, DepthType.Cv8U, 1))
                  {
                     mask.SetTo(new MCvScalar(0));
                     CvInvoke.DrawContours(mask, contours, idx, new MCvScalar(255), -1, LineType.EightConnected, null, int.MaxValue, new Point(-box.X, -box.Y));

                     double mean = CvInvoke.Mean(candidate, mask).V0;
                     CvInvoke.Threshold(candidate, candidate, mean, 255, ThresholdType.Binary);
                     CvInvoke.BitwiseNot(candidate, candidate);
                     CvInvoke.BitwiseNot(mask, mask);

                     candidate.SetTo(new MCvScalar(0), mask);
                  }

                  int minMatchCount = 8;
                  double uniquenessThreshold = 0.8;
                  VectorOfKeyPoint _observeredKeypoint = new VectorOfKeyPoint();
                  Mat _observeredDescriptor = new Mat();
                  _detector.DetectAndCompute(candidate, null, _observeredKeypoint, _observeredDescriptor, false);

                  if (_observeredKeypoint.Size >= minMatchCount)
                  {
                     int k = 2;

                     Mat mask;

                     using (VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch())
                     {
                        _modelDescriptorMatcher.KnnMatch(_observeredDescriptor, matches, k, null);
                        mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1);
                        mask.SetTo(new MCvScalar(255));
                        Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask);
                     }

                     int nonZeroCount = CvInvoke.CountNonZero(mask);
                     if (nonZeroCount >= minMatchCount)
                     {
                        boxList.Add(box);
                        stopSignList.Add(candidate);
                     }
                  }
               }
            }
         }
      }
コード例 #44
0
      public void DetectStopSign(Mat img, List<Mat> stopSignList, List<Rectangle> boxList)
      {
         Mat smoothImg = new Mat();
         CvInvoke.GaussianBlur(img, smoothImg, new Size(5, 5), 1.5, 1.5);
         //Image<Bgr, Byte> smoothImg = img.SmoothGaussian(5, 5, 1.5, 1.5);

         Mat smoothedRedMask = new Mat();
         GetRedPixelMask(smoothImg, smoothedRedMask);

         //Use Dilate followed by Erode to eliminate small gaps in some contour.
         CvInvoke.Dilate(smoothedRedMask, smoothedRedMask, null, new Point(-1, -1), 1, BorderType.Constant, CvInvoke.MorphologyDefaultBorderValue);
         CvInvoke.Erode(smoothedRedMask, smoothedRedMask, null, new Point(-1, -1), 1, BorderType.Constant, CvInvoke.MorphologyDefaultBorderValue);

         using (Mat canny = new Mat())
         using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint())
         {
            CvInvoke.Canny(smoothedRedMask, canny, 100, 50);
            int[,] hierachy = CvInvoke.FindContourTree(canny, contours, ChainApproxMethod.ChainApproxSimple);

            //Image<Gray, Byte> tmp = new Image<Gray, byte>(canny.Size);
            //CvInvoke.DrawContours(tmp, contours, -1, new MCvScalar(255, 255, 255));
            //Emgu.CV.UI.ImageViewer.Show(tmp);

            if (hierachy.GetLength(0) > 0)
               FindStopSign(img, stopSignList, boxList, contours, hierachy, 0);
         }

      }
コード例 #45
0
ファイル: MainForm.cs プロジェクト: Lerbytech/ShapeDetection
        public void getBlackContours(Image<Gray, Byte> src, VectorOfVectorOfPoint blackborders, List<RotatedRect> Black_boxList, VectorOfVectorOfPoint othercontours_black)
        {
            //blackborders = new VectorOfVectorOfPoint();//list of black borders
             //Black_boxList = new List<RotatedRect>(); //a box is a rotated rectangle
             //othercontours_black = new VectorOfVectorOfPoint();

            Bitmap TMPGood = new Bitmap(src.ToBitmap() , src.Width, src.Height);
            Bitmap TMPBad = new Bitmap(src.ToBitmap(), src.Width, src.Height);
            Graphics gGood = Graphics.FromImage(TMPGood);
            Graphics gBad = Graphics.FromImage(TMPBad);
            //Pen RedPen = new Pen(Color.Red);
            //Pen GreenPen = new Pen(Color.Green);
            Brush RedBrush = new SolidBrush(Color.Red);
            Brush GreenBrush = new SolidBrush(Color.Green);

                using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint())
                {
                  CvInvoke.FindContours(src, contours, null, RetrType.List, ChainApproxMethod.ChainApproxSimple);
                    for (int i = 0; i < contours.Size; i++)
                    {
                        using (VectorOfPoint contour = contours[i])
                        using (VectorOfPoint approxContour = new VectorOfPoint())
                        {
                          Point[] ptsContour = contour.ToArray();
                          for (int k = 0; k < ptsContour.Length; k++)
                          {
                            gBad.FillEllipse(RedBrush, ptsContour[k].X, ptsContour[k].Y, 6, 6);
                          }

                            CvInvoke.ApproxPolyDP(contour, approxContour, CvInvoke.ArcLength(contour, true) * 0.05, true);
                            if (CvInvoke.ContourArea(approxContour, false) > 250) //only consider contours with area greater than 250
                            {
                              Point[] ptsApprox = approxContour.ToArray();

                              //TMP.Draw(pts, new Bgr(Color.DarkOrange), 5); //!!!!!!!!!!!!!!!
                              for (int k = 0; k < ptsApprox.Length; k++)
                              {
                                gGood.FillEllipse(GreenBrush, ptsApprox[k].X, ptsApprox[k].Y, 6, 6);
                              }

                                if (CvInvoke.ContourArea(approxContour, false) > 250 && approxContour.Size == 4)
                                {
                                    Black_boxList.Add(CvInvoke.MinAreaRect(approxContour));
                                    blackborders.Push(contour);
                                }
                                else
                                {
                                    othercontours_black.Push(contour);
                                    //Point[] pts = approxContour.ToArray();
                                    //other.Add(PointCollection.PolyLine(pts, true));
                                }
                            }
                        }
                    }
                }
                TMPGood.Save("C:\\Emgu\\Dump\\Black contour corners GOOD.png", System.Drawing.Imaging.ImageFormat.Png);
                TMPBad.Save("C:\\Emgu\\Dump\\Black contour corners BAD.png", System.Drawing.Imaging.ImageFormat.Png);
        }
コード例 #46
-1
ファイル: ERFilter.cs プロジェクト: Delaley/emgucv
      /// <summary>
      /// Find groups of Extremal Regions that are organized as text blocks.
      /// </summary>
      /// <param name="image">The image where ER grouping is to be perform on</param>
      /// <param name="channels">Array of single channel images from which the regions were extracted</param>
      /// <param name="erstats">Vector of ER’s retrieved from the ERFilter algorithm from each channel</param>
      /// <param name="groupingTrainedFileName">The XML or YAML file with the classifier model (e.g. trained_classifier_erGrouping.xml)</param>
      /// <param name="minProbability">The minimum probability for accepting a group.</param>
      /// <param name="groupMethods">The grouping methods</param>
      /// <returns>The output of the algorithm that indicates the text regions</returns>
      public static System.Drawing.Rectangle[] ERGrouping(IInputArray image, IInputArrayOfArrays channels, VectorOfERStat[] erstats, GroupingMethod groupMethods = GroupingMethod.OrientationHoriz, String groupingTrainedFileName = null, float minProbability = 0.5f)
      {
         IntPtr[] erstatPtrs = new IntPtr[erstats.Length];

         for (int i = 0; i < erstatPtrs.Length; i++)
         {
            erstatPtrs[i] = erstats[i].Ptr;
         }

         using (VectorOfVectorOfPoint regionGroups = new VectorOfVectorOfPoint())
         using (VectorOfRect groupsBoxes = new VectorOfRect())
         using (InputArray iaImage = image.GetInputArray())
         using (InputArray iaChannels = channels.GetInputArray())
         using (CvString s = (groupingTrainedFileName == null ? new CvString() : new CvString(groupingTrainedFileName)))
         {
            GCHandle erstatsHandle = GCHandle.Alloc(erstatPtrs, GCHandleType.Pinned);
            CvERGrouping(
               iaImage, iaChannels,
               erstatsHandle.AddrOfPinnedObject(), erstatPtrs.Length,
               regionGroups, groupsBoxes,
               groupMethods,
               s, minProbability);

            erstatsHandle.Free();
            return groupsBoxes.ToArray();
         }
      }
コード例 #47
-1
        public void CalculateAverageBrightessForArea(string reference0, string reference1, StrassenbilderMetaDataContext dataContext)
        {
            // Image-Meta-Daten laden
            string name0 = Path.GetFileNameWithoutExtension(reference0);
            string name1 = Path.GetFileNameWithoutExtension(reference1);
            Image image0 = dataContext.Images.Where(i => i.Name == name0).FirstOrDefault();
            Image image1 = dataContext.Images.Where(i => i.Name == name1).FirstOrDefault();

            // Polygone Laden
            IEnumerable<Polygon> polygons = dataContext.Polygons.Where(p => p.CameraName == image0.Place);

            // Pro Maske anwenden
            foreach (var polygon in polygons)
            {
                IList<Point> polygonPoints = JsonConvert.DeserializeObject<Media.PointCollection>(polygon.PolygonPointCollection);

                // Maskiertes Bild laden
                Drawing.Bitmap bitmap0 = GetMaskedBitmap(reference0, polygonPoints);
                Drawing.Bitmap bitmap1 = GetMaskedBitmap(reference1, polygonPoints);

                Image<Bgr, byte> cvImage0 = new Image<Bgr, byte>(bitmap0);
                Image<Bgr, byte> cvImage1 = new Image<Bgr, byte>(bitmap1);

                // Maske generieren aus Polygon
                Mat matMask = new Mat(new Drawing.Size(cvImage0.Cols, cvImage0.Rows), DepthType.Cv8U, 3);
                // Polygone skalieren und generieren
                List<Point> scaledPoints = GetScaledPoints(polygonPoints, cvImage0.Rows, cvImage0.Cols);
                List<Drawing.Point> scaledDrawingPoints = GetPolygonPoints(scaledPoints, cvImage0.Rows, cvImage0.Cols);
                // Polygon weiss zeichnen
                using (VectorOfPoint vPoint = new VectorOfPoint(scaledDrawingPoints.ToArray()))
                using (VectorOfVectorOfPoint vvPoint = new VectorOfVectorOfPoint(vPoint))
                {
                    CvInvoke.FillPoly(matMask, vvPoint, new Bgr(255, 255, 255).MCvScalar);
                }
                Image<Gray, byte> imageMask = new Image<Gray, byte>(matMask.Bitmap);

                // Durchschnittsfarbe rechnen mit Maske
                Bgr result0 = cvImage0.GetAverage(imageMask);
                Bgr result1 = cvImage1.GetAverage(imageMask);
                // Resultat abspeichern
                polygon.BgrSnow = JsonConvert.SerializeObject(result0);
                polygon.BgrNormal = JsonConvert.SerializeObject(result1);
                dataContext.SubmitChanges();
            }
        }
コード例 #48
-1
        public short Calculate(string imageFilePath, Polygon polygon, Media.PointCollection pointCollection)
        {
            // Maskiertes Bild laden
            // Drawing.Bitmap maskedBitmap = GetMaskedBitmap(imageFilePath, pointCollection);

            Image<Bgr, byte> cvImage = new Image<Bgr, byte>(imageFilePath);

            // Maske generieren aus Polygon
            Mat matMask = new Mat(new Drawing.Size(cvImage.Cols, cvImage.Rows), DepthType.Cv8U, 3);
            // Polygone skalieren und generieren
            List<Point> scaledPoints = GetScaledPoints(pointCollection, cvImage.Rows, cvImage.Cols);
            List<Drawing.Point> scaledDrawingPoints = GetPolygonPoints(scaledPoints, cvImage.Rows, cvImage.Cols);
            // Polygon weiss zeichnen
            using (VectorOfPoint vPoint = new VectorOfPoint(scaledDrawingPoints.ToArray()))
            using (VectorOfVectorOfPoint vvPoint = new VectorOfVectorOfPoint(vPoint))
            {
                CvInvoke.FillPoly(matMask, vvPoint, new Bgr(255, 255, 255).MCvScalar);
            }
            Image<Gray, byte> imageMask = new Image<Gray, byte>(matMask.Bitmap);

            // Durchschnittsfarbe rechnen mit Maske
            Bgr result = cvImage.GetAverage(imageMask);
            // Vergleichen mit Referenzbildern
            Bgr snow = JsonConvert.DeserializeObject<Bgr>(polygon.BgrSnow);
            Bgr normal = JsonConvert.DeserializeObject<Bgr>(polygon.BgrNormal);

            double resultSnow = Math.Abs(snow.Blue - result.Blue) + Math.Abs(snow.Green - result.Green) + Math.Abs(snow.Red - result.Red);
            double resultNormal = Math.Abs(normal.Blue - result.Blue) + Math.Abs(normal.Green - result.Green) + Math.Abs(normal.Red - result.Red);

            if (Math.Abs(resultSnow - resultNormal) < 10)
            {
                return 0;
            }
            else if (resultSnow < resultNormal)
            {
                return 1;
            }
            else
            {
                return -1;
            }
        }