FindContours() public static method

Finds contours in a binary image.
public static FindContours ( InputOutputArray image, Mat &contours, OutputArray hierarchy, RetrievalModes mode, ContourApproximationModes method, Point offset = null ) : void
image InputOutputArray Source, an 8-bit single-channel image. Non-zero pixels are treated as 1’s. /// Zero pixels remain 0’s, so the image is treated as binary. /// The function modifies the image while extracting the contours.
contours Mat Detected contours. Each contour is stored as a vector of points.
hierarchy OutputArray Optional output vector, containing information about the image topology. /// It has as many elements as the number of contours. For each i-th contour contours[i], /// the members of the elements hierarchy[i] are set to 0-based indices in contours of the next /// and previous contours at the same hierarchical level, the first child contour and the parent contour, respectively. /// If for the contour i there are no next, previous, parent, or nested contours, the corresponding elements of hierarchy[i] will be negative.
mode RetrievalModes Contour retrieval mode
method ContourApproximationModes Contour approximation method
offset Point Optional offset by which every contour point is shifted. /// This is useful if the contours are extracted from the image ROI and then they should be analyzed in the whole image context.
return void
コード例 #1
0
ファイル: ImageEngine.cs プロジェクト: ultranoobian/Rosetta
        public static int ExtractTables(Image img)
        {
            //Delete this after
            Bitmap bit;


            List <Rectangle> AreasOfInterest = new List <Rectangle>();
            Bitmap           bitmap          = (Bitmap)img;

            Mat srcImg = OCS.Extensions.BitmapConverter.ToMat(bitmap);

            if (srcImg.Data == null)
            {
                throw new NullReferenceException("Image has nothing?");
            }

            //Resize into smaller size
            Mat rsz = new Mat();

            OpenCvSharp.Size size = new OpenCvSharp.Size(4000, 2828);
            Cv2.Resize(srcImg, rsz, size);

            // Convert to greyscale if it has more than one channel
            // else just leave it alone
            Mat grey = new Mat();

            Cv2.CvtColor(rsz, grey, ColorConversionCodes.BGR2GRAY);

#if IMG_DEBUG
            Cv2.ImShow("grey", grey);
            Cv2.WaitKey(0);
#endif

            //Apply adaptive thresholding to get negative
            Mat bw = new Mat();
            Cv2.AdaptiveThreshold(~grey, bw, 255, AdaptiveThresholdTypes.MeanC, ThresholdTypes.Binary, 15, -2);
            bit = OCS.Extensions.BitmapConverter.ToBitmap(bw);
            bit.Save("bw.tiff", System.Drawing.Imaging.ImageFormat.Tiff);

            // Create two new masks cloned from bw.

            Mat horizontal = bw.Clone();
            Mat vertical   = bw.Clone();

            // adjust this for number of lines
            int scale = 10;

            /////////////////////////
            /////////////////////////
            /////////////////////////

            // Specify size on horizontal axis
            int horizontalsize = horizontal.Cols / scale;

            // Create structure element for extracting horizontal lines through morphology operations
            //Mat horizontalStructure = getStructuringElement(MORPH_RECT, Size(horizontalsize, 1));
            Mat horizontalStructure = Cv2.GetStructuringElement(MorphShapes.Rect, new OCS.Size(horizontalsize, 1));

            // Apply morphology operations
            //erode(horizontal, horizontal, horizontalStructure, Point(-1, -1));
            Cv2.Erode(horizontal, horizontal, horizontalStructure, new OCS.Point(-1, -1));
            //dilate(horizontal, horizontal, horizontalStructure, Point(-1, -1));
            Cv2.Dilate(horizontal, horizontal, horizontalStructure, new OCS.Point(-1, -1));

            //    dilate(horizontal, horizontal, horizontalStructure, Point(-1, -1)); // expand horizontal lines

            // Show extracted horizontal lines
#if IMG_DEBUG
            Cv2.ImShow("horizontal", horizontal);
            Cv2.WaitKey(0);
#endif
            bit = OCS.Extensions.BitmapConverter.ToBitmap(horizontal);
            bit.Save("horizontal.tiff", System.Drawing.Imaging.ImageFormat.Tiff);

            // Specify size on vertical axis
            int verticalsize = vertical.Rows / scale;

            // Create structure element for extracting vertical lines through morphology operations
            //Mat verticalStructure = getStructuringElement(MORPH_RECT, Size(1, verticalsize));
            Mat verticalStructure = Cv2.GetStructuringElement(MorphShapes.Rect, new OCS.Size(1, verticalsize));

            // Apply morphology operations
            //erode(vertical, vertical, verticalStructure, Point(-1, -1));
            Cv2.Erode(vertical, vertical, verticalStructure, new OCS.Point(-1, -1));
            //dilate(vertical, vertical, verticalStructure, Point(-1, -1));
            Cv2.Dilate(vertical, vertical, verticalStructure, new OCS.Point(-1, -1));

            // Show extracted vertical lines
#if IMG_DEBUG
            Cv2.ImShow("vertical", vertical);
            Cv2.WaitKey(0);
#endif
            bit = OCS.Extensions.BitmapConverter.ToBitmap(vertical);
            bit.Save("vertical.tiff", System.Drawing.Imaging.ImageFormat.Tiff);


            // create a mask which includes the tables
            Mat mask = horizontal + vertical;
#if IMG_DEBUG
            Cv2.ImShow("mask", mask);
            Cv2.WaitKey(0);
#endif

            // find the joints between the lines of the tables, we will use this information in order to descriminate tables from pictures (tables will contain more than 4 joints while a picture only 4 (i.e. at the corners))
            Mat joints = new Mat();
            //bitwise_and(horizontal, vertical, joints);
            Cv2.BitwiseAnd(horizontal, vertical, joints);

            //Cv2.ImShow("joints", joints);
            bit = OCS.Extensions.BitmapConverter.ToBitmap(joints);
            bit.Save("joints.tiff", System.Drawing.Imaging.ImageFormat.Tiff);
#if IMG_DEBUG
            Cv2.ImShow("a", joints);
            Cv2.WaitKey(0);
#endif

            //Thread.Sleep(2000);


            // Find external contours from the mask, which most probably will belong to tables or to images
            //vector<Vec4i> hierarchy;
            //std::vector<std::vector<cv::Point>> contours;
            OCS.HierarchyIndex[] hierarchy;
            //List<List<OCS.Point>> contours = new List<List<OCS.Point>>;
            OCS.Point[][] contours;
            //cv::findContours(mask, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
            Cv2.FindContours(mask, out contours, out hierarchy, OCS.RetrievalModes.External, OCS.ContourApproximationModes.ApproxSimple, new OCS.Point(0, 0));

            //////vector<vector<Point>> contours_poly(contours.size() );
            //////vector<Rect> boundRect(contours.size() );
            //////vector<Mat> rois;
            List <List <OCS.Point> > contours_poly = new List <List <OCS.Point> >(contours.Length);
            List <OCS.Rect>          boundRect     = new List <OCS.Rect>(contours.Length);
            List <Mat> rois = new List <Mat>();


            ////for (size_t i = 0; i < contours.size(); i++)
            ////{
            ////    // find the area of each contour
            ////    double area = contourArea(contours[i]);
            ///
            ////    //        // filter individual lines of blobs that might exist and they do not represent a table
            ////    if (area < 100) // value is randomly chosen, you will need to find that by yourself with trial and error procedure
            ////        continue;
            ////    approxPolyDP(Mat(contours[i]), contours_poly[i], 3, true);
            ////    boundRect[i] = boundingRect(Mat(contours_poly[i]));
            ////    // find the number of joints that each table has
            ////    Mat roi = joints(boundRect[i]);
            ////    vector<vector<Point>> joints_contours;
            ////    findContours(roi, joints_contours, RETR_CCOMP, CHAIN_APPROX_SIMPLE);
            ////    // if the number is not more than 5 then most likely it not a table
            ////    if (joints_contours.size() <= 4)
            ////        continue;
            ////    rois.push_back(rsz(boundRect[i]).clone());
            ////    //drawContours( rsz, contours, i, Scalar(0, 0, 255), CV_FILLED, 8, vector<Vec4i>(), 0, Point() );
            ////    rectangle(rsz, boundRect[i].tl(), boundRect[i].br(), Scalar(0, 255, 0), 1, 8, 0);
            ////}

            for (int i = 0; i < contours.Length; i++)
            {
                double area = Cv2.ContourArea(contours[i]);
                if (area < 100.0)
                {
                    // Skip because its not likely such a small area is a cell
                    continue;
                }
                // contours_poly is null at runtime. so we create a new entry and exit array
                contours_poly.Add(new List <OCS.Point>());
                OutputArray contour_poly_output = OutputArray.Create(contours_poly[i]);

                InputArray contour_poly_input = InputArray.Create(contours[i]);
                Cv2.ApproxPolyDP(InputArray.Create(contours[i]), contour_poly_output, 0.0, true);
                Rect boundingRect = Cv2.BoundingRect(InputArray.Create(contours_poly[i]));
                boundRect.Add(boundingRect);
                //boundRect[i] = Cv2.BoundingRect(InputArray.Create(contours_poly[i]));
                //OCS.Mat roi = Cv2.joints()
            }
#if IMG_DEBUG
            Cv2.NamedWindow("Output", WindowMode.KeepRatio);
            Cv2.Rectangle(rsz, boundRect.ElementAt(0), Scalar.Red, 10);
            Cv2.ImShow("Output", rsz);
            Cv2.WaitKey(0);
            Cv2.DestroyAllWindows();
#endif
            ////for (size_t i = 0; i < rois.size(); ++i)
            ////{
            ////    /* Now you can do whatever post process you want
            ////     * with the data within the rectangles/tables. */
            ////    imshow("roi", rois[i]);
            ////    waitKey();
            ////}

            return(boundRect.Count);
        }
コード例 #2
0
        //OpenCVを使用して、座標を求める。
        void OpenCVTexture(Texture2D texture)
        {
            Mat newMat = Unity.TextureToMat(texture);

            //画像をCv2.Equalsで変化があるかグローバルのoldMatと比較して検知しようとしたが、できなかった。

            //Convert image to grayscale
            Mat imgGray = new Mat();

            Cv2.CvtColor(newMat, imgGray, ColorConversionCodes.BGR2GRAY);

            //Debug.Log(Cv2.Equals(imgGray, imgGray));


            // Clean up image using Gaussian Blur
            Mat imgGrayBlur = new Mat();

            Cv2.GaussianBlur(imgGray, imgGrayBlur, new Size(5, 5), 0);

            //Extract edges
            Mat cannyEdges = new Mat();

            Cv2.Canny(imgGrayBlur, cannyEdges, 10.0, 70.0);

            //Do an invert binarize the image
            Mat mask = new Mat();

            Cv2.Threshold(cannyEdges, mask, 70.0, 255.0, ThresholdTypes.BinaryInv);

            // Extract Contours
            Point[][]        contours;                                                                                                    //特徴点が格納される変数。
            HierarchyIndex[] hierarchy;                                                                                                   //特徴点の階層が格納される。
            Cv2.FindContours(cannyEdges, out contours, out hierarchy, RetrievalModes.Tree, ContourApproximationModes.ApproxSimple, null); //特徴点を検出する。


            PointChangeNumSendUDP(contours);  //Pointが変化しなければ送信を実装しようとしている途中。
            //StartCoroutine(udpSendCoroutine(contours));


            //輪郭描画
            int width    = (int)transform.GetComponent <RectTransform>().sizeDelta.x;
            int height   = (int)transform.GetComponent <RectTransform>().sizeDelta.y;
            Mat Contours = new Mat(width, height, MatType.CV_8UC3, new Scalar(0, 0, 0)); //初期値として黒い画面を作成する。

            Cv2.DrawContours(Contours, contours, -1, new Scalar(0, 255, 0, 255), 1);     //MatにCountours(特徴点)を描画する。
            Texture2D changedTex = Unity.MatToTexture(Contours);                         //MatをTexture2Dへ変更

            GetComponent <RawImage>().texture = changedTex;                              //RaxImageにTexture2Dを書き込み。

            //MatをDisposeする。
            newMat.Dispose();
            imgGray.Dispose();
            imgGrayBlur.Dispose();
            cannyEdges.Dispose();
            mask.Dispose();
            Contours.Dispose();

            //TextureをDestryしないとメモリーリークを送りました。
            MonoBehaviour.Destroy(texture);
            if (changedTex != oldChangedTex)
            {
                MonoBehaviour.Destroy(oldChangedTex);
                oldChangedTex = changedTex;
            }
        }