コード例 #1
0
        public Mat BarcodeRegion(Mat src_)
        {
            //Cv2.Resize(src, src, new Size(src.Size().Width / 2, src.Size().Height / 2));
            Mat src = src_.Clone();

            Cv2.CvtColor(src, src, ColorConversionCodes.RGB2GRAY);
            Cv2.GaussianBlur(src, src, new Size(3, 3), 0);
            Mat img_X = new Mat();
            Mat img_Y = new Mat();

            Cv2.Sobel(src, img_X, MatType.CV_16S, 1, 0);
            Cv2.Sobel(src, img_Y, MatType.CV_16S, 0, 1);

            Cv2.ConvertScaleAbs(img_X, img_X, 1, 0);
            Cv2.ConvertScaleAbs(img_Y, img_Y, 1, 0);

            Mat margin = img_X - img_Y;

            //Cv2.ImShow("img_Y", margin);
            //Cv2.WaitKey();
            Cv2.Resize(margin, margin, new Size(margin.Width * 0.3, margin.Height * 1.5), 0, 0, InterpolationFlags.Area);
            Cv2.Blur(margin, margin, new Size(3, 3));
            Cv2.MedianBlur(margin, margin, 3);

            Mat imgthreshold = new Mat();

            Cv2.Threshold(margin, imgthreshold, 80, 255, ThresholdTypes.Binary);
            //Cv2.AdaptiveThreshold(margin, imgthreshold, 255, AdaptiveThresholdTypes.GaussianC, ThresholdTypes.Binary, 3, -1);
            Cv2.ImShow("thresh", imgthreshold);
            Cv2.WaitKey();

            //先在水平方向上膨胀,填充条码中间的空隙
            Mat element = Cv2.GetStructuringElement(MorphShapes.Cross, new Size(5, 1));

            Cv2.MorphologyEx(imgthreshold, imgthreshold, MorphTypes.Dilate, element);
            //在垂直方向上腐蚀,分离条码和字符
            element = Cv2.GetStructuringElement(MorphShapes.Cross, new Size(1, 5));
            Cv2.MorphologyEx(imgthreshold, imgthreshold, MorphTypes.Erode, element);

            //去除字符
            element = Cv2.GetStructuringElement(MorphShapes.Cross, new Size(10, 10));
            Cv2.MorphologyEx(imgthreshold, imgthreshold, MorphTypes.Open, element);
            Cv2.MorphologyEx(imgthreshold, imgthreshold, MorphTypes.Close, element);


            element = Cv2.GetStructuringElement(MorphShapes.Cross, new Size(10, 10));
            Cv2.Erode(imgthreshold, imgthreshold, element);
            Cv2.Erode(imgthreshold, imgthreshold, element);
            Cv2.Dilate(imgthreshold, imgthreshold, element);
            Cv2.Resize(imgthreshold, imgthreshold, new Size(src.Width, src.Height), 0, 0, InterpolationFlags.Area);
            Cv2.ImShow("thresh", imgthreshold);
            Cv2.WaitKey();

            return(imgthreshold);


            //计算每个区域的最大内接矩,然后算其包含图像的黑白区域比例

            //Cv2.Dilate(imgthreshold, imgthreshold, element);
        }
コード例 #2
0
        void Recognize()
        {
            OpenCvSharp.Point[][] contours;

            HierarchyIndex[] hierarchy;

            Mat after_2gray = new Mat();
            Mat after_canny = new Mat();
            Mat after_blur  = new Mat();
            Mat frame_range = new Mat();
            Mat after_after = new Mat();

            Cv2.Blur(frame_in, after_blur, new OpenCvSharp.Size(3, 3));
            Cv2.InRange(after_blur, new Scalar(0, 0, 0), new Scalar(70, 70, 70), frame_range);

            Cv2.FindContours(frame_range, out contours, out hierarchy, RetrievalModes.External, ContourApproximationModes.ApproxSimple);

            for (int i = 0; i < contours.Count(); i++)
            {
                Rect bR = Cv2.BoundingRect(contours[i]);

                if ((bR.Width > 20) && (bR.Height > 20))
                {
                    Cv2.DrawContours(frame_out, contours, i, new Scalar(0, 0, 255), 2);

                    try
                    {
                        Point2d[] GetTempPoints = FindTemp(contours[i]);

                        ShowTemp(GetTempPoints);
                    }
                    catch { }
                }
            }
        }
コード例 #3
0
ファイル: matchTemplate.cs プロジェクト: Cecil-yj/Yj_Opencv
        public static void cv_06()  // 边缘轮廓的检测/查找
        {
            Mat srcImage = Cv2.ImRead(@"G:\\pics\\123.jpg", ImreadModes.Color);
            Mat src_gray = new Mat();

            Cv2.CvtColor(srcImage, src_gray, ColorConversionCodes.RGB2GRAY); //转换为灰度图
            Cv2.Blur(src_gray, src_gray, new OpenCvSharp.Size(2, 2));        //滤波

            Mat canny_Image = new Mat();

            Cv2.Canny(src_gray, canny_Image, 100, 200);      //Canny边缘检测

            OpenCvSharp.Point[][] contours;
            HierarchyIndex[]      hierarchly;
            Cv2.FindContours(canny_Image, out contours, out hierarchly, RetrievalModes.Tree, ContourApproximationModes.ApproxSimple, new OpenCvSharp.Point(0, 0)); //获得轮廓

            Mat    dst_Image = Mat.Zeros(canny_Image.Size(), srcImage.Type());                                                                                     // 图片像素值归零
            Random rnd       = new Random();

            for (int i = 0; i < contours.Length; i++)
            {
                Scalar color = new Scalar(rnd.Next(0, 255), rnd.Next(0, 255), rnd.Next(0, 255));
                Cv2.DrawContours(dst_Image, contours, i, color, 2, LineTypes.Link8, hierarchly);       //画出轮廓
            }
            //return dst_Image;   //返回结果
            Cv2.ImShow("dst_Image", dst_Image);
            Cv2.ImShow("canny_Image", canny_Image);
            Cv2.WaitKey();
            //return canny_Image;
        }
コード例 #4
0
        public string SolveThePuzzle(string filename, string key, string endpoint)
        {
            try
            {
                Mat imgSource = Cv2.ImRead(filename);
                Mat img       = new Mat();
                Cv2.CvtColor(imgSource, img, ColorConversionCodes.BGR2GRAY);
                Cv2.Blur(img, img, new Size(3, 3));
                Cv2.AdaptiveThreshold(img, img, 255, AdaptiveThresholdTypes.GaussianC, ThresholdTypes.Binary, 15, 15);
                SegmentPuzzle(img, out img, out Rect puzzleRect);

                SplitToDigits(img, out List <Mat> digits, out List <int> puzzle, new Size(50, 50));
                Mat        digitsAll   = new Mat();
                List <Mat> blkNonEmpty = new List <Mat>();
                for (int k = 0; k != puzzle.Count; ++k)
                {
                    if (puzzle[k] != 0)
                    {
                        blkNonEmpty.Add(digits[k]);
                    }
                }
                Cv2.HConcat(blkNonEmpty.ToArray(), digitsAll);
                recognizeDigits(digitsAll, key, endpoint);

                using (new Window("Image", WindowMode.Normal, digitsAll))
                {
                    Cv2.WaitKey();
                }
                return("solved");
            }
            catch (Exception e)
            {
                return(e.Message);
            }
        }
コード例 #5
0
    public void find_circle(Mat img_mat, double threadhold)
    {
        Cv2.Blur(img_mat, img_mat, new Size(5, 5));
        CircleSegment[] circle_seg = new CircleSegment[100];
        circle_seg = Cv2.HoughCircles(img_mat, HoughMethods.Gradient, 1, 100, 200, 40, 20);
        for (int i = 0; i < circle_seg.Length; i++)
        {
            bool bfound = false;
            foreach (Polygon_T s in polygon_array)
            {
                if (circle_seg[i].Center.X > 0 && circle_seg[i].Center.Y > 0)
                {
                    double dist = Math.Sqrt((circle_seg[i].Center.X - s.center_pos.X) *
                                            (circle_seg[i].Center.X - s.center_pos.X) +
                                            (circle_seg[i].Center.Y - s.center_pos.Y) *
                                            (circle_seg[i].Center.Y - s.center_pos.Y));
                    if (dist < threadhold)
                    {
                        bfound = true;
                    }
                }
            }

            if (bfound == false)
            {
                Polygon_T p = new Polygon_T();
                p.center_pos = circle_seg[i].Center;
                p.gt         = GraphicType.GT_Sphere;
                polygon_array.Add(p);

                Cv2.Circle(img_mat, circle_seg[i].Center, 5, Scalar.Red, 2);
                Cv2.Circle(img_mat, (int)circle_seg[i].Center.X, (int)circle_seg[i].Center.Y, (int)circle_seg[i].Radius, Scalar.Red, 2);
            }
        }
    }
コード例 #6
0
        private Mat FindContours(Mat srcImage)
        {
            Mat src_gray = new Mat();

            Cv2.CvtColor(srcImage, src_gray, ColorConversionCodes.RGB2GRAY);
            Cv2.Blur(src_gray, src_gray, new OpenCvSharp.Size(2, 2));

            Mat cannyImage = new Mat();

            Cv2.Canny(src_gray, cannyImage, 100, 200);

            OpenCvSharp.Point[][] contours;
            HierarchyIndex[]      hierarchly;
            Cv2.FindContours(cannyImage, out contours, out hierarchly, RetrievalModes.Tree,
                             ContourApproximationModes.ApproxSimple, new OpenCvSharp.Point(0, 0));

            Mat dstImage = Mat.Zeros(cannyImage.Size(), srcImage.Type());

            Random rnd = new Random();

            for (int i = 0; i < contours.Length; i++)
            {
                Scalar color = new Scalar(rnd.Next(0, 255), rnd.Next(0, 255), rnd.Next(0, 255));
                Cv2.DrawContours(dstImage, contours, i, color, 2, LineTypes.Link8, hierarchly);
            }

            return(dstImage);
        }
コード例 #7
0
        ///////////////////////////////////////////////////////////////////////////////////

        #region フィルタ処理

        private void mnuFilterSmoothBlur_Click(object sender, EventArgs e)
        {
            // 平滑化フィルタ(5x5サイズ)
            Cv2.Blur(_matDisp, _matDisp, new OpenCvSharp.CPlusPlus.Size(5, 5));
            // 画像の描画
            DrawMatImage(_matDisp);
        }
コード例 #8
0
 /// <summary>
 /// 均值滤波
 /// </summary>
 /// <param name="image">图片对象</param>
 /// <returns></returns>
 public static Image AverageFiltering(this Image image)
 {
     using (Mat src = new Bitmap(image).ToMat())
     {
         Cv2.Blur(src, src, new OpenCvSharp.Size(5, 5));
         return(src.ToBitmap());
     }
 }
コード例 #9
0
 public void Blur_Example()
 {
     using Mat WindowImage = new Mat("./Resource.png", ImreadModes.AnyColor);
     using Mat blurImage   = new Mat(WindowImage.Size(), WindowImage.Type());
     Cv2.Blur(WindowImage, blurImage, new Size(5, 5));
     using var openCloseWindow = new Window("OpenCVWindow", WindowMode.AutoSize, WindowImage);
     Debug.WriteLine(Cv2.WaitKey());
 }
コード例 #10
0
        /// <summary>
        /// 图像轮廓识别
        /// </summary>
        /// <param name="src"></param>
        public static List <Point2f[]> Findarea(Mat src)
        {
            Mat img   = src;
            Mat gray  = new Mat();
            Mat black = new Mat();

            Point[][]        contours;
            HierarchyIndex[] hierarchy;
            Point2f[]        point2Fs = new Point2f[] { };
            List <Point2f[]> point2 = new List <Point2f[]>();
            Point            p0 = new Point(0, 0), p1 = new Point(0, 0), p2 = new Point(0, 0), p3 = new Point(0, 0);
            Mat soX = new Mat(), soY = new Mat();

            Cv2.CvtColor(img, gray, ColorConversionCodes.BGR2GRAY, 0);
            Cv2.Blur(gray, gray, new Size(10, 10));
            int thresh_size = (100 / 4) * 2 + 1;//自适应阈值化

            Cv2.AdaptiveThreshold(gray, black, 255, 0, ThresholdTypes.Binary, thresh_size, thresh_size / 3);
            new Window("二值图", WindowMode.FreeRatio, black);
            Cv2.FindContours(black, out contours, out hierarchy, RetrievalModes.External, ContourApproximationModes.ApproxSimple, null);
            int resultnum = 0;

            Point[][] Excontours = contours;
            for (int i = 0; i < hierarchy.Length; i++)
            {
                if (contours[i].Length < 100)
                {
                    continue;
                }
                RotatedRect rect = Cv2.MinAreaRect(contours[i]);
                point2Fs = rect.Points();
                Point[] po = change(rect.Points());
                //point2.Add(point2Fs);
                Excontours[resultnum] = po;
                for (int z = 0; z < point2Fs.Length; z++)//小数位精度——2
                {
                    point2Fs[z].X = (float)Math.Round(point2Fs[z].X, 2);
                    point2Fs[z].Y = (float)Math.Round(point2Fs[z].Y, 2);
                }
                point2.Add(point2Fs);
                for (int j = 0; j < 3; j++)
                {
                    p0 = new Point(point2Fs[j].X, point2Fs[j].Y);
                    p1 = new Point(point2Fs[j + 1].X, point2Fs[j + 1].Y);
                    Cv2.Line(img, p0, p1, Scalar.Red, 1, LineTypes.Link8);
                }
                p2 = new Point(point2Fs[3].X, point2Fs[3].Y);
                p3 = new Point(point2Fs[0].X, point2Fs[0].Y);
                Point TP = new Point((((p0.X + p1.X) / 2)), ((p1.Y + p2.Y) / 2));
                Cv2.Line(img, p2, p3, Scalar.Red, 1, LineTypes.Link8);
                resultnum++;
            }
            Console.WriteLine("剔除后的轮廓数:" + resultnum);
            return(point2);
            //Console.WriteLine(js);
            //new Window("result", WindowMode.FreeRatio, img);
            //Window.WaitKey(0);
        }
コード例 #11
0
        private static void example02()
        {
            var src = new Mat(@"..\..\Images\fruits.jpg", LoadMode.AnyDepth | LoadMode.AnyColor);

            Cv2.ImShow("Source", src);
            Cv2.WaitKey(1); // do events

            Cv2.Blur(src, src, new Size(15, 15));
            Cv2.ImShow("Blurred Image", src);
            Cv2.WaitKey(1); // do events

            // Converts the MxNx3 image into a Kx3 matrix where K=MxN and
            // each row is now a vector in the 3-D space of RGB.
            // change to a Mx3 column vector (M is number of pixels in image)
            var columnVector = src.Reshape(cn: 3, rows: src.Rows * src.Cols);

            // convert to floating point, it is a requirement of the k-means method of OpenCV.
            var samples = new Mat();

            columnVector.ConvertTo(samples, MatType.CV_32FC3);

            for (var clustersCount = 2; clustersCount <= 8; clustersCount += 2)
            {
                var bestLabels = new Mat();
                var centers    = new Mat();
                Cv2.Kmeans(
                    data: samples,
                    k: clustersCount,
                    bestLabels: bestLabels,
                    criteria:
                    new TermCriteria(type: CriteriaType.Epsilon | CriteriaType.Iteration, maxCount: 10, epsilon: 1.0),
                    attempts: 3,
                    flags: KMeansFlag.PpCenters,
                    centers: centers);


                var clusteredImage = new Mat(src.Rows, src.Cols, src.Type());
                for (var size = 0; size < src.Cols * src.Rows; size++)
                {
                    var clusterIndex = bestLabels.At <int>(0, size);
                    var newPixel     = new Vec3b
                    {
                        Item0 = (byte)(centers.At <float>(clusterIndex, 0)), // B
                        Item1 = (byte)(centers.At <float>(clusterIndex, 1)), // G
                        Item2 = (byte)(centers.At <float>(clusterIndex, 2))  // R
                    };
                    clusteredImage.Set(size / src.Cols, size % src.Cols, newPixel);
                }

                Cv2.ImShow(string.Format("Clustered Image [k:{0}]", clustersCount), clusteredImage);
                Cv2.WaitKey(1); // do events
            }

            Cv2.WaitKey();
            Cv2.DestroyAllWindows();
        }
コード例 #12
0
        /// <summary>
        /// 블러링
        /// </summary>
        /// <param name="sender"></param>
        /// <param name="e"></param>
        private void 블러링ToolStripMenuItem_Click(object sender, EventArgs e)
        {
            // 알고리즘에 의해서 outCvImage의 크기를 결정
            int oH, oW;

            oH         = inCvImage.Height;
            oW         = inCvImage.Width;
            outCvImage = new Mat();

            Cv2.Blur(inCvImage, outCvImage, new OpenCvSharp.Size(15, 15));
            Cv2ToOutImage();
        }
コード例 #13
0
ファイル: Form1.cs プロジェクト: emfprhs119/ImageGenerator
        public Image Apply(Image image)
        {
            int blurSize = random.Next(range);

            blurSize = blurSize < 3 ? 3 : blurSize;
            blurSize = blurSize / 2 * 2 + 1;
            Mat outImg = new Mat();
            Mat img    = OpenCvSharp.Extensions.BitmapConverter.ToMat((Bitmap)image);

            Cv2.Blur(img, outImg, new OpenCvSharp.Size(blurSize, blurSize));
            return(OpenCvSharp.Extensions.BitmapConverter.ToBitmap(outImg));
        }
コード例 #14
0
ファイル: Form4.cs プロジェクト: zxn09007/RoboPerception
        private void button2_Click(object sender, EventArgs e)
        {
            using (Mat src = new Mat("F:\\Microsoft Visual Studio\\project\\yoloaforge\\yoloaforge\\a.jpg", ImreadModes.AnyColor | ImreadModes.AnyDepth))
            {
                //转为灰度图像
                Mat dst = new Mat();
                Cv2.CvtColor(src, dst, ColorConversionCodes.BGR2GRAY);

                //转为二值图像

                /*
                 * API AdaptiveThreshold:
                 * 参数:1:输入的灰度图像  '~' 符号是背景色取反
                 *      2:输出的二值化图像
                 *      3:二值化的最大值
                 *      4:自适应的方法(枚举类型,目前只有两个算法)
                 *      5:阀值类型(枚举类型,这里选择二进制)
                 *      6: 块大小
                 *      7: 常量 (可以是正数 0 负数)
                 */
                Mat binImage = new Mat();
                Cv2.AdaptiveThreshold(~dst, binImage, 255, AdaptiveThresholdTypes.MeanC, ThresholdTypes.Binary, 15, -2);

                int xSize = dst.Cols / 16; //宽
                int ySize = dst.Rows / 16; //高

                //定义结构元素 new Size(xSize, 1) 相当于横着的一条线:水平结构体 new Size(1, ySize) 相当于竖着的一条线:垂直结构体
                InputArray kernelX = Cv2.GetStructuringElement(MorphShapes.Rect, new OpenCvSharp.Size(xSize, 1), new OpenCvSharp.Point(-1, -1));
                InputArray kernelY = Cv2.GetStructuringElement(MorphShapes.Rect, new OpenCvSharp.Size(1, ySize), new OpenCvSharp.Point(-1, -1));

                Mat result = new Mat();
                ////腐蚀
                //Cv2.Erode(binImage, result, kernelY);
                ////膨胀
                //Cv2.Dilate(result, result, kernelY);

                //开操作代替 腐蚀和膨胀
                Cv2.MorphologyEx(binImage, result, MorphTypes.Open, kernelY);
                Cv2.Blur(result, result, new OpenCvSharp.Size(3, 3), new OpenCvSharp.Point(-1, -1)); //使用归一化框过滤器平滑图像
                Cv2.BitwiseNot(result, result);                                                      //背景变成白色(背景值取反)


                using (new Window("result", WindowMode.Normal, result))
                    using (new Window("binImage", WindowMode.Normal, binImage))
                        using (new Window("dst", WindowMode.Normal, dst))
                            using (new Window("SRC", WindowMode.Normal, src))
                            {
                                Cv2.WaitKey(0);
                            }
            }
        }
コード例 #15
0
        static List <Point[][]> canny_test(Mat Src, List <OpenCvSharp.Point[]> contours_final, string fileindex)
        {
            Mat Canny_Src = Mat.Zeros(Src.Size(), MatType.CV_8UC1);

            //用adaptive threshold 濾出瑕疵
            Cv2.Blur(Src, Canny_Src, new OpenCvSharp.Size(5, 7));

            Cv2.Canny(Canny_Src, Canny_Src, 90, 0);


            Mat kernel = Mat.Ones(5, 5, MatType.CV_8UC1);//改變凹角大小

            Canny_Src = Canny_Src.MorphologyEx(MorphTypes.Close, kernel);

            OpenCvSharp.Point[][] temp = new Point[1][];

            temp[0] = contours_final[0];
            Cv2.DrawContours(Canny_Src, temp, -1, 0, 3);
            temp[0] = contours_final[1];
            Cv2.DrawContours(Canny_Src, temp, -1, 0, 3);


            Canny_Src.SaveImage("./result/canny/test" + fileindex);

            // denoise
            Point[][]        contours;
            HierarchyIndex[] hierarchly;
            Cv2.FindContours(Canny_Src, out contours, out hierarchly, RetrievalModes.Tree, ContourApproximationModes.ApproxSimple);

            List <OpenCvSharp.Point[][]> final_area = new List <OpenCvSharp.Point[][]>();
            Mat img_temp = Mat.Zeros(Canny_Src.Size(), MatType.CV_8UC1);

            foreach (OpenCvSharp.Point[] contour_now in contours)
            {
                //用bounding rec 濾出white noise
                RotatedRect BoundingRectangle = Cv2.MinAreaRect(contour_now);
                if (BoundingRectangle.Size.Height * BoundingRectangle.Size.Width > 500)
                {
                    OpenCvSharp.Point[][] temp_final = new Point[1][];//記得放在裡面宣告
                    //Console.WriteLine("Arc Length: " + (Cv2.ArcLength(contour_now, true) + " Area: " + Cv2.ContourArea(contour_now))+" Length/Area:" +(Cv2.ArcLength(contour_now, true) / Cv2.ContourArea(contour_now)));
                    OpenCvSharp.Point[] approx = Cv2.ApproxPolyDP(contour_now, 0.000, true);
                    temp_final[0] = approx;
                    Cv2.DrawContours(img_temp, temp_final, -1, 255, -1);
                    final_area.Add(temp_final);
                }
            }


            //img_temp.SaveImage("./result/canny/test" + fileindex);
            return(final_area);
        }
コード例 #16
0
ファイル: OcvOp.cs プロジェクト: Noemata/OCVSharpTest
        public void Blur(SoftwareBitmap input, SoftwareBitmap output, Algorithm algorithm)
        {
            if (algorithm.AlgorithmName == "Blur")
            {
                using Mat mInput  = SoftwareBitmap2Mat(input);
                using Mat mOutput = new Mat(mInput.Rows, mInput.Cols, MatType.CV_8UC4);

                Cv2.Blur(mInput, mOutput,
                         (Size)algorithm.AlgorithmProperties[0].CurrentValue,
                         (Point)algorithm.AlgorithmProperties[1].CurrentValue,
                         (BorderTypes)algorithm.AlgorithmProperties[2].CurrentValue);
                Mat2SoftwareBitmap(mOutput, output);
            }
        }
コード例 #17
0
        private void blur_Click(object sender, System.Windows.RoutedEventArgs e)
        {
            if (load == true)
            {
                Mat src = new Mat(fileName);
                Mat dst = new Mat();

                int size = (int)blue_ksize.Value;
                Cv2.Blur(src, dst, new OpenCvSharp.Size(size, size));

                showWindow(dst, "blur");
            }
            else
            {
                textLoad.Foreground = System.Windows.Media.Brushes.OrangeRed;
            }
        }
コード例 #18
0
 //     blur(edges, edges, Size(7, 7));//模糊化
 //Canny(edges, edges, 0, 30, 3);//边缘化
 //模糊
 private void BtnChange8_Click(object sender, RoutedEventArgs e)
 {
     using (var src = new Mat(@"..\..\Images\ocv02.jpg", ImreadModes.AnyDepth | ImreadModes.AnyColor))
     {
         using (var dst = new Mat())//复制以后处理
         {
             Cv2.Blur(src, dst, new OpenCvSharp.Size(7, 7));
             var         mem = dst.ToMemoryStream();
             BitmapImage bmp = new BitmapImage();
             bmp.BeginInit();
             bmp.StreamSource = mem;
             bmp.EndInit();
             imgOutput.Source = bmp;
         }
     }
     SetSource(@"..\..\Images\ocv02.jpg");
 }
コード例 #19
0
    // Update is called once per frame
    void Update()
    {
        webcam.Read(frameMat);



        // Convert webcam texture to mat object.
        //WebCamToMat(frameMat);
        using (Mat gray = new Mat())
        {
            Cv2.CvtColor(frameMat, gray, ColorConversionCodes.RGB2GRAY);
            // Blur the image.
            Cv2.Blur(gray, gray, new Size(3, 3));
            // Detect edges using canny - Lower number = more contours.
            Cv2.Canny(gray, gray, 85, 255);
            // Cv2.Threshold(gray, cannyOutput,100,255,ThresholdTypes.BinaryInv);
            Cv2.ImShow("Contours", gray);
            currentFrame = MatToTexture(frameMat);
            GetComponent <Renderer>().material.mainTexture = currentFrame;
        }
    }
コード例 #20
0
        public void ColourLoop(VideoCapture cam_test, Window colourWindow)
        {
            src = cam_test.RetrieveMat();
            Cv2.Resize(src, src, new Size(CamCalibration.GetFrameArray()[0], CamCalibration.GetFrameArray()[1]), 1, 1, InterpolationFlags.Cubic);

            if (colourPicked)
            {
                HSV = new Mat();
                RGB = new Mat(src, new Rect(colourPoint.X, colourPoint.Y, 1, 1));

                Cv2.CvtColor(RGB, HSV, ColorConversionCodes.BGR2HSV);
                hsv          = HSV.At <Vec3b>(0, 0);
                newColor_hsv = new Scalar(hsv[0], hsv[1], hsv[2]);
                Hue          = hsv[0];

                rgb          = RGB.At <Vec3b>(0, 0);
                newColor_rgb = new Scalar(rgb[0], rgb[1], rgb[2]);

                colourPicked = false;
                AddNewColour = true;

                HSV.Dispose();
                RGB.Dispose();
            }

            if (blobView)
            {
                Cv2.Blur(src, src, new Size(3, 3));
                Cv2.CvtColor(src, src, ColorConversionCodes.BGR2HSV);
                Cv2.InRange(src, new Scalar(hsv[0] - 10, elementSm, elementVm), new Scalar(hsv[0] + 10, elementSM, elementVM), src);
            }

            Cv2.ImShow(colourWindow.Name, src);

            Cv2.WaitKey(1);

            src.Dispose();
        }
コード例 #21
0
        //Filter
        private void comboBox1_SelectedIndexChanged(object sender, EventArgs e)
        {
            int index = comboBox1.SelectedIndex;

            switch (index)
            {
            case 0:
                // 단순 흐림
                //입력, 출력, 커널크기, 앵커 (-1, -1)이 중심, 테두리 유형
                Cv2.Blur(MyImage, Filter, new OpenCvSharp.Size(5, 5), new OpenCvSharp.Point(-1, -1),
                         BorderTypes.Default);
                break;

            case 1:
                //박스 필터
                Cv2.BoxFilter(MyImage, Filter, MatType.CV_8UC3, new OpenCvSharp.Size(7, 7),
                              new OpenCvSharp.Point(-1, -1), true, BorderTypes.Default);
                break;

            case 2:
                //중간값 흐림
                Cv2.MedianBlur(MyImage, Filter, 9);
                break;

            case 3:
                //가우시안 흐림
                Cv2.GaussianBlur(MyImage, Filter, new OpenCvSharp.Size(3, 3), 1, 0, BorderTypes.Default);
                break;

            case 4:
                //쌍방 필터
                //과도한 노이즈 필터링이 필요한 오프라인에는 d = 9를 사용하는 것이 좋습니다.
                //시그마 값이 크면 클수록 만화 처럼 보입니다.(50 ,50)
                Cv2.BilateralFilter(MyImage, Filter, 9, 50, 50, BorderTypes.Default);
                break;
            }
            pictureBox2.Image = OpenCvSharp.Extensions.BitmapConverter.ToBitmap(Filter);
        }
コード例 #22
0
ファイル: matchTemplate.cs プロジェクト: Cecil-yj/Yj_Opencv
        public static void cv_09()  // 图像的滤波处理:均值滤波/高斯滤波/中值滤波
        {
            //  滤波作用:在进行图像处理之前的预处理,降低图像的噪点,提高图像的平滑度。
            //  中值滤波是取卷积计算的中间值,中值滤波的好处是对图像的椒盐噪声有很好的抑制作用,因为图像的椒盐噪点,是图像某一片区域像素的极大值或者极小值,使用中值滤波可以过滤掉这些噪点,同时它可以保护图像尖锐的边缘,选择适当的点来替代污染点的值,所以处理效果好。
            //  高斯滤波也叫高斯模糊,高斯平滑。对图像邻域内像素进行平滑时,邻域内不同位置的像素被赋予不同的权值,对图像进行平滑的同时,同时能够更多的保留图像的总体灰度分布特征。
            //  均值滤波是把每个像素都用周围的N个像素来做均值操作,幅值近似相等且随机分布在不同位置上,这样可以平滑图像,速度较快,算法简单。但是无法去掉噪声,只能微弱的减弱它。

            Mat src_img = Cv2.ImRead(@"G:\\pics\\5.jpg");

            Mat meanBlur   = new Mat();
            Mat gaussBlur  = new Mat();
            Mat medianBlur = new Mat();

            Cv2.Blur(src_img, meanBlur, new OpenCvSharp.Size(15, 15), new OpenCvSharp.Point(-1, -1)); //均值模糊。参数:1,输入;2,输出;3,卷积核;4,卷积核中心点位置
            Cv2.GaussianBlur(src_img, gaussBlur, new OpenCvSharp.Size(15, 15), 11, 11);               //高斯模糊。参数:1,输入;2,输出;3,卷积核,为正奇数;4,X方向上高斯核标准偏差;5,Y方向上高斯核标准偏差
            Cv2.MedianBlur(src_img, medianBlur, 15);                                                  //中值滤波。参数:1,输入;2,输出;3,卷积核,大于1的奇数

            Cv2.ImShow("src_img", src_img);
            Cv2.ImShow("meanBlur", meanBlur);
            Cv2.ImShow("gaussBlur", gaussBlur);
            Cv2.ImShow("medianBlur", medianBlur);
            Cv2.WaitKey();
        }
コード例 #23
0
ファイル: Samples.cs プロジェクト: kideath/BarrierX_NagiBot
        // https://github.com/VahidN/OpenCVSharp-Samples/blob/master/OpenCVSharpSample19/Program.cs
        public static void Gradient(Mat src)
        {
            var gray     = new Mat();
            var channels = src.Channels();

            if (channels > 1)
            {
                Cv2.CvtColor(src, gray, ColorConversionCodes.BGRA2GRAY);
            }
            else
            {
                src.CopyTo(gray);
            }


            // compute the Scharr gradient magnitude representation of the images
            // in both the x and y direction
            var gradX = new Mat();

            Cv2.Sobel(gray, gradX, MatType.CV_32F, xorder: 1, yorder: 0, ksize: -1);
            //Cv2.Scharr(gray, gradX, MatType.CV_32F, xorder: 1, yorder: 0);
            Cv2.ImShow("gradX", gradX);

            var gradY = new Mat();

            Cv2.Sobel(gray, gradY, MatType.CV_32F, xorder: 0, yorder: 1, ksize: -1);
            //Cv2.Scharr(gray, gradY, MatType.CV_32F, xorder: 0, yorder: 1);
            Cv2.ImShow("gradY", gradY);

            // subtract the y-gradient from the x-gradient
            var gradient = new Mat();

            Cv2.Subtract(gradX, gradY, gradient);
            Cv2.ConvertScaleAbs(gradient, gradient);

            Cv2.ImShow("Gradient", gradient);


            // blur and threshold the image
            var blurred = new Mat();

            Cv2.Blur(gradient, blurred, new Size(9, 9));

            double thresh      = 127.0;
            var    threshImage = new Mat();

            Cv2.Threshold(blurred, threshImage, thresh, 255, ThresholdTypes.Binary);


            bool debug = true;

            if (debug)
            {
                Cv2.ImShow("Thresh", threshImage);
                Cv2.WaitKey(1); // do events
            }


            // construct a closing kernel and apply it to the thresholded image
            var kernel = Cv2.GetStructuringElement(MorphShapes.Rect, new Size(21, 7));
            var closed = new Mat();

            Cv2.MorphologyEx(threshImage, closed, MorphTypes.Close, kernel);

            if (debug)
            {
                Cv2.ImShow("Closed", closed);
                Cv2.WaitKey(1); // do events
            }


            // perform a series of erosions and dilations
            Cv2.Erode(closed, closed, null, iterations: 4);
            Cv2.Dilate(closed, closed, null, iterations: 4);

            if (debug)
            {
                Cv2.ImShow("Erode & Dilate", closed);
                Cv2.WaitKey(1); // do events
            }


            //find the contours in the thresholded image, then sort the contours
            //by their area, keeping only the largest one

            Point[][]        contours;
            HierarchyIndex[] hierarchyIndexes;
            Cv2.FindContours(
                closed,
                out contours,
                out hierarchyIndexes,
                mode: RetrievalModes.CComp,
                method: ContourApproximationModes.ApproxSimple);

            if (contours.Length == 0)
            {
                throw new NotSupportedException("Couldn't find any object in the image.");
            }

            var contourIndex       = 0;
            var previousArea       = 0;
            var biggestContourRect = Cv2.BoundingRect(contours[0]);

            while ((contourIndex >= 0))
            {
                var contour = contours[contourIndex];

                var boundingRect     = Cv2.BoundingRect(contour); //Find bounding rect for each contour
                var boundingRectArea = boundingRect.Width * boundingRect.Height;
                if (boundingRectArea > previousArea)
                {
                    biggestContourRect = boundingRect;
                    previousArea       = boundingRectArea;
                }

                contourIndex = hierarchyIndexes[contourIndex].Next;
            }
        }
コード例 #24
0
ファイル: FindBody.cs プロジェクト: jerryfekky/muqiangshibie
        public static Point[][]  Findarea(Mat src)
        {
            Mat img   = src;
            Mat gray  = new Mat();
            Mat black = new Mat();

            //Point[][] contours;
            //HierarchyIndex[] hierarchy;
            Point2f[]        point2Fs = new Point2f[] { };
            List <Point2f[]> point2 = new List <Point2f[]>();
            Point            p0 = new Point(0, 0), p1 = new Point(0, 0), p2 = new Point(0, 0), p3 = new Point(0, 0);
            Mat soX = new Mat(), soY = new Mat();

            Cv2.Laplacian(img, gray, 0, 1, 1, 0, BorderTypes.Default);
            Cv2.CvtColor(img, gray, ColorConversionCodes.BGR2GRAY, 0);
            //Cv2.Sobel(gray, soX, MatType.CV_8U, 1, 0);
            //Cv2.Sobel(gray, soY,MatType.CV_8U, 0,1);
            //Cv2.AddWeighted(soX,0.5,soY,0.5,0,gray);
            Cv2.Blur(gray, gray, new Size(10, 10));
            int thresh_size = (100 / 4) * 2 + 1;//自适应阈值化

            Cv2.AdaptiveThreshold(gray, black, 255, 0, ThresholdTypes.Binary, thresh_size, thresh_size / 3);
            //Cv2.Threshold(gray,black,100,255,ThresholdTypes.Binary);
            Mat x = Cv2.GetStructuringElement(MorphShapes.Ellipse, new Size(5, 5)); //!!!调整size

            Cv2.MorphologyEx(black, black, MorphTypes.Open, x);                     //!!!调整MorphTypes
            //new Window("二值图", WindowMode.FreeRatio, black);
            Cv2.FindContours(black, out contours, out hierarchys, RetrievalModes.External, ContourApproximationModes.ApproxSimple, null);
            int resultnum = 0;

            Point[][] Excontours = contours;
            for (int i = 0; i < hierarchys.Length; i++)
            {
                if (contours[i].Length < 100)
                {
                    continue;
                }
                RotatedRect rect = Cv2.MinAreaRect(contours[i]);
                point2Fs = rect.Points();
                Point[] po = change(rect.Points());
                //point2.Add(point2Fs);
                Excontours[resultnum] = po;
                for (int j = 0; j < 3; j++)
                {
                    p0 = new Point(point2Fs[j].X, point2Fs[j].Y);
                    p1 = new Point(point2Fs[j + 1].X, point2Fs[j + 1].Y);
                    Cv2.Line(img, p0, p1, Scalar.Red, 1, LineTypes.Link8);
                }
                p2 = new Point(point2Fs[3].X, point2Fs[3].Y);
                p3 = new Point(point2Fs[0].X, point2Fs[0].Y);
                Point TP = new Point((((p0.X + p1.X) / 2)), ((p1.Y + p2.Y) / 2));
                Cv2.Line(img, p2, p3, Scalar.Red, 1, LineTypes.Link8);
                //Cv2.PutText(img, resultnum.ToString(), TP, HersheyFonts.HersheyComplex, 2, Scalar.Blue, 1, LineTypes.Link8, false);
                resultnum++;
            }
            Console.WriteLine("剔除后的轮廓数:" + resultnum);

            new Window("results", WindowMode.FreeRatio, img);
            return(Excontours);

            Window.WaitKey(0);
        }
コード例 #25
0
        public double MeasureArea(Mat img, bool showImage = false)   // (Mat img,bool showImage = false)
        {
            RNG g_rng = new RNG(12345);

            Mat[] g_vContours;
            Mat   g_vHierarchy = new Mat();

            //Mat img = new Mat(Images.picMainRef, ImreadModes.Color);
            //Cv2.ImShow("SRC", img);

            //灰度图
            Mat g_grayImage = img.CvtColor(ColorConversionCodes.BGR2GRAY);

            //模糊
            Cv2.Blur(g_grayImage, g_grayImage, new Size(3, 3));
            //Cv2.ImShow("grayblur",g_grayImage);

            ////canny
            //Mat g_cannyMat_output = new Mat();
            //Cv2.Canny(g_grayImage, g_cannyMat_output, g_nThresh, g_nThresh * 2, 3);
            //Cv2.ImShow("canny", g_cannyMat_output);

            //二值化
            Mat binary = new Mat();

            Cv2.Threshold(g_grayImage, binary, 20, 255, ThresholdTypes.Tozero);
            //Cv2.ImShow("binary", binary);

            //形态学操作
            Mat morphImg = new Mat();
            Mat kernel   = Cv2.GetStructuringElement(MorphShapes.Rect, new Size(5, 5), new Point(-1, -1));

            Cv2.MorphologyEx(binary, morphImg, MorphTypes.Close, kernel, new Point(-1, -1));
            //Cv2.ImShow("morph Image", morphImg);

            //Contours
            Mat Contours = Mat.Zeros(img.Size(), MatType.CV_8UC3);//img.EmptyClone();

            Cv2.FindContours(binary, out g_vContours, g_vHierarchy, RetrievalModes.Tree, ContourApproximationModes.ApproxSimple, new Point(0, 0));
            //Cv2.ImShow("test", Contours);

            double g_ContourArea = 0;

            for (int i = 0; i < g_vContours.Count(); i++)
            {
                Scalar color = new Scalar(g_rng.Uniform(0, 255), g_rng.Uniform(0, 255), g_rng.Uniform(0, 255));//随机生成颜色值

                Cv2.DrawContours(Contours, g_vContours, i, color, -1, LineTypes.Link8, null, int.MaxValue, null);

                double dContourArea = Cv2.ContourArea(g_vContours[i]);
                g_ContourArea += dContourArea;
            }
            if (showImage == true)
            {
                Cv2.ImShow("Contours", Contours);
                Cv2.WaitKey();
                Cv2.DestroyAllWindows();
            }

            return(g_ContourArea);
        }
コード例 #26
0
        // 主要内容,图像处理方法的api
        private Mat myOPENCV_run(Mat image_in, Mat image_out)
        {
            image_out = image_in;                          // 入图传给出图
            for (int i = 0; i < listBox2.Items.Count; i++) //执行 列表框2内的方法
            {
                switch ((MyOPENCV)myOPENCV_runlist[i, 0])  // 列表框2内的运行方法
                {
                case MyOPENCV.cvt_color:                   //颜色转换 (入图,出图,颜色转换符,)
                {
                    Cv2.CvtColor(image_out, image_out, (ColorConversionCodes)myOPENCV_runlist[i, 1], myOPENCV_runlist[i, 2]);
                    break;
                }

                case MyOPENCV.boxfilter:    //方框滤波
                {
                    OpenCvSharp.Size size;
                    size.Width  = myOPENCV_runlist[i, 2];
                    size.Height = myOPENCV_runlist[i, 3];
                    Cv2.BoxFilter(image_out, image_out, myOPENCV_runlist[i, 1], size);
                    break;
                }

                case MyOPENCV.blur:     //均值滤波
                {
                    OpenCvSharp.Size size;
                    size.Width  = myOPENCV_runlist[i, 1];
                    size.Height = myOPENCV_runlist[i, 2];
                    Cv2.Blur(image_out, image_out, size);
                    break;
                }

                case MyOPENCV.gaussianblur:      // 高斯滤波
                {
                    OpenCvSharp.Size size;
                    double           sigmaX, sigmaY;
                    size.Width  = myOPENCV_runlist[i, 1];
                    size.Height = myOPENCV_runlist[i, 2];
                    sigmaX      = (double)myOPENCV_runlist[i, 3];
                    sigmaY      = (double)myOPENCV_runlist[i, 4];

                    Cv2.GaussianBlur(image_out, image_out, size, sigmaX, sigmaY);
                    break;
                }

                case MyOPENCV.medianblur:    //中值滤波
                {
                    Cv2.MedianBlur(image_in, image_out, myOPENCV_runlist[i, 1]);
                    break;
                }

                case MyOPENCV.bilateralfilter:    //双边滤波
                {
                    Mat    image_out2 = new Mat();
                    double sigmaColor, sigmaSpace;
                    sigmaColor = (double)myOPENCV_runlist[i, 2] * 2;
                    sigmaSpace = (double)myOPENCV_runlist[i, 3] / 2;
                    Cv2.BilateralFilter(image_out, image_out2, myOPENCV_runlist[i, 1], sigmaColor, sigmaSpace);
                    image_out = image_out2;
                    break;
                }

                case MyOPENCV.dilate:    //膨胀
                {
                    Mat image_element = new Mat();
                    OpenCvSharp.Size size;
                    size.Width    = myOPENCV_runlist[i, 2];
                    size.Height   = myOPENCV_runlist[i, 3];
                    image_element = Cv2.GetStructuringElement((MorphShapes)myOPENCV_runlist[i, 1], size);
                    Cv2.Dilate(image_out, image_out, image_element);
                    break;
                }

                case MyOPENCV.erode:    //腐蚀
                {
                    Mat image_element = new Mat();
                    OpenCvSharp.Size size;
                    size.Width    = myOPENCV_runlist[i, 2];
                    size.Height   = myOPENCV_runlist[i, 3];
                    image_element = Cv2.GetStructuringElement((MorphShapes)myOPENCV_runlist[i, 1], size);
                    Cv2.Erode(image_out, image_out, image_element);
                    break;
                }

                case MyOPENCV.morphologyex:    //高级形态学变换
                {
                    Mat image_element = new Mat();
                    OpenCvSharp.Size size;
                    size.Width    = myOPENCV_runlist[i, 3];
                    size.Height   = myOPENCV_runlist[i, 4];
                    image_element = Cv2.GetStructuringElement((MorphShapes)myOPENCV_runlist[i, 2], size);
                    Cv2.MorphologyEx(image_out, image_out, (MorphTypes)myOPENCV_runlist[i, 1], image_element);
                    break;
                }

                case MyOPENCV.floodfill:    //漫水填充
                {
                    OpenCvSharp.Point point;
                    point.X = myOPENCV_runlist[i, 1];
                    point.Y = myOPENCV_runlist[i, 2];
                    OpenCvSharp.Scalar scalar;
                    scalar = myOPENCV_runlist[i, 3];
                    Cv2.FloodFill(image_out, point, scalar);
                    break;
                }

                case MyOPENCV.pyrup:    //尺寸放大
                {
                    OpenCvSharp.Size size;
                    size.Width  = image_out.Cols * 2;
                    size.Height = image_out.Rows * 2;
                    Cv2.PyrUp(image_out, image_out, size);
                    break;
                }

                case MyOPENCV.pyrdown:    //尺寸缩小
                {
                    OpenCvSharp.Size size;
                    size.Width  = image_out.Cols / 2;
                    size.Height = image_out.Rows / 2;
                    Cv2.PyrDown(image_out, image_out, size);
                    break;
                }

                case MyOPENCV.resize:    //尺寸调整
                {
                    OpenCvSharp.Size   size;
                    InterpolationFlags interpolationFlags;
                    size.Width         = image_out.Cols * myOPENCV_runlist[i, 1] / 10;
                    size.Height        = image_out.Rows * myOPENCV_runlist[i, 2] / 10;
                    interpolationFlags = (InterpolationFlags)myOPENCV_runlist[i, 3];
                    Cv2.Resize(image_out, image_out, size, 0, 0, interpolationFlags);
                    break;
                }

                case MyOPENCV.threshold:    //固定阈值化
                {
                    Cv2.Threshold(image_out, image_out, myOPENCV_runlist[i, 1], myOPENCV_runlist[i, 2], (ThresholdTypes)myOPENCV_runlist[i, 3]);
                    break;
                }

                case MyOPENCV.canny:    //边缘检测CANNY
                {
                    Mat image_out2 = new Mat();
                    Cv2.Canny(image_out, image_out2, myOPENCV_runlist[i, 1], myOPENCV_runlist[i, 2], myOPENCV_runlist[i, 3]);
                    image_out = image_out2;
                    break;
                }

                case MyOPENCV.sobel:    //边缘检测SOBEL
                {
                    Cv2.Sobel(image_out, image_out, -1, myOPENCV_runlist[i, 1], myOPENCV_runlist[i, 2], myOPENCV_runlist[i, 3]);
                    break;
                }

                case MyOPENCV.laplacian:    //边缘检测LAPLACIAN
                {
                    myOPENCV_runlist[i, 1] = 0;
                    Cv2.Laplacian(image_out, image_out, 0, myOPENCV_runlist[i, 2], myOPENCV_runlist[i, 3]);
                    break;
                }

                case MyOPENCV.scharr:    //边缘检测SCHARR
                {
                    Cv2.Scharr(image_out, image_out, -1, myOPENCV_runlist[i, 1], myOPENCV_runlist[i, 2]);
                    break;
                }

                case MyOPENCV.convertscaleabs:    //图像快速增强
                {
                    double alpha, beta;
                    alpha = (double)myOPENCV_runlist[i, 1] / 10;
                    beta  = (double)myOPENCV_runlist[i, 2] / 10;
                    Cv2.ConvertScaleAbs(image_out, image_out, alpha, beta);
                    break;
                }

                case MyOPENCV.addweighted:    //图像融合
                {
                    Mat    image_in2 = new Mat(my_imagesource2);
                    double alpha, beta, gamma;
                    alpha = (double)myOPENCV_runlist[i, 1] / 10;
                    beta  = (double)myOPENCV_runlist[i, 2] / 10;
                    gamma = (double)myOPENCV_runlist[i, 3] / 10;
                    Cv2.AddWeighted(image_out, alpha, image_in2, beta, gamma, image_out);
                    break;
                }

                case MyOPENCV.houghlines:                                     //霍夫标准变换
                {
                    Scalar             scalar = new Scalar(0x00, 0xFF, 0x00); //绿色
                    LineSegmentPolar[] lines;
                    OpenCvSharp.Size   size = new OpenCvSharp.Size(image_out.Width, image_out.Height);
                    Mat image_out3          = new Mat(size, MatType.CV_8UC3);
                    lines = Cv2.HoughLines(image_out, 1, Cv2.PI / 180, myOPENCV_runlist[i, 1]);
                    for (int ii = 0; ii < lines.Length; ii++)
                    {
                        //double rho, theta;
                        OpenCvSharp.Point pt1, pt2;
                        double            a = Math.Cos(lines[ii].Theta), b = Math.Sin(lines[ii].Theta);
                        double            x0 = a * lines[ii].Rho, y0 = b * lines[ii].Rho;
                        pt1.X = (int)Math.Round(x0 + 1000 * (-b));
                        pt1.Y = (int)Math.Round(y0 + 1000 * (a));
                        pt2.X = (int)Math.Round(x0 - 1000 * (-b));
                        pt2.Y = (int)Math.Round(y0 - 1000 * (a));
                        Cv2.Line(image_out3, pt1, pt2, scalar, 1, LineTypes.AntiAlias);
                    }
                    if (myOPENCV_runlist[i, 2] == 0)
                    {
                        Cv2.AddWeighted(image_out3, (double)myOPENCV_runlist[i, 3] / 10, image_in, (double)myOPENCV_runlist[i, 4] / 10, 0, image_out);
                    }
                    else
                    {
                        image_out = image_out3;
                    }
                    break;
                }

                case MyOPENCV.houghlinep:                                     //霍夫累计概率变换
                {
                    Scalar             scalar = new Scalar(0x00, 0xFF, 0x00); //绿色
                    LineSegmentPoint[] lines;                                 // 线段检索
                    OpenCvSharp.Size   size = new OpenCvSharp.Size(image_out.Width, image_out.Height);
                    Mat image_out3          = new Mat(size, MatType.CV_8UC3);
                    lines = Cv2.HoughLinesP(image_out, 1, Cv2.PI / 180, myOPENCV_runlist[i, 1], myOPENCV_runlist[i, 3], myOPENCV_runlist[i, 4]);
                    for (int ii = 0; ii < lines.Length; ii++)
                    {
                        OpenCvSharp.Point point1, point2;
                        point1.X = lines[i].P1.X;
                        point1.Y = lines[i].P1.Y;
                        point2.X = lines[i].P2.X;
                        point2.Y = lines[i].P2.Y;
                        Cv2.Line(image_out3, point1, point2, scalar, 1, LineTypes.AntiAlias);
                    }
                    if (myOPENCV_runlist[i, 2] == 0)
                    {
                        Cv2.AddWeighted(image_out3, 1, image_in, 0.8, 0, image_out);
                    }
                    else
                    {
                        image_out = image_out3;
                    }
                    break;
                }

                case MyOPENCV.houghcircles:                                 //霍夫圆变换
                {
                    Scalar           scalar = new Scalar(0x00, 0xFF, 0x00); //绿色
                    CircleSegment[]  circles;
                    OpenCvSharp.Size size = new OpenCvSharp.Size(image_out.Width, image_out.Height);
                    Mat image_out3        = new Mat(size, MatType.CV_8UC3);
                    circles = Cv2.HoughCircles(image_out, HoughMethods.Gradient, 1, myOPENCV_runlist[i, 1], myOPENCV_runlist[i, 2], myOPENCV_runlist[i, 3], 0, myOPENCV_runlist[i, 4]);
                    for (int ii = 0; ii < circles.Length; ii++)
                    {
                        OpenCvSharp.Point center;
                        center.X = (int)Math.Round(circles[ii].Center.X);
                        center.Y = (int)Math.Round(circles[ii].Center.Y);
                        int radius = (int)Math.Round(circles[ii].Radius);
                        Cv2.Circle(image_out3, center.X, center.Y, radius, scalar);
                        Cv2.Circle(image_out3, center, radius, scalar);
                    }
                    Cv2.AddWeighted(image_out3, 1, image_in, 0.6, 0, image_out);

                    break;
                }

                case MyOPENCV.remap:    //重映射
                {
                    OpenCvSharp.Size size = new OpenCvSharp.Size(image_out.Width, image_out.Height);

                    Mat map_x = new Mat(size, MatType.CV_32FC1), map_y = new Mat(size, MatType.CV_32FC1);
                    for (int ii = 0; ii < image_out.Rows; ii++)
                    {
                        for (int jj = 0; jj < image_out.Cols; jj++)
                        {
                            if (myOPENCV_runlist[i, 1] == 0)
                            {
                                map_x.Set <float>(ii, jj, jj);                  //上下翻转
                                map_y.Set <float>(ii, jj, image_out.Rows - ii); //上下翻转
                            }
                            else if (myOPENCV_runlist[i, 1] == 1)
                            {
                                map_x.Set <float>(ii, jj, image_out.Cols - jj); //左右翻转
                                map_y.Set <float>(ii, jj, ii);                  //左右翻转
                            }
                            else if (myOPENCV_runlist[i, 1] == 2)
                            {
                                map_x.Set <float>(ii, jj, image_out.Cols - jj);       //上下左右翻转
                                map_y.Set <float>(ii, jj, image_out.Rows - ii);       //上下左右翻转
                            }
                            else if (myOPENCV_runlist[i, 1] == 3)
                            {
                                map_x.Set <float>(ii, jj, (float)myOPENCV_runlist[i, 2] / 10 * jj);       //放大缩小
                                map_y.Set <float>(ii, jj, (float)myOPENCV_runlist[i, 2] / 10 * ii);       //放大缩小
                            }
                        }
                    }
                    Cv2.Remap(image_out, image_out, map_x, map_y);
                    break;
                }

                case MyOPENCV.warpaffine:    //仿射变换
                {
                    if (0 == myOPENCV_runlist[i, 1])
                    {
                        Mat rot_mat = new Mat(2, 3, MatType.CV_32FC1);
                        OpenCvSharp.Point center = new OpenCvSharp.Point(image_out.Cols / 2, image_out.Rows / 2);
                        double            angle  = myOPENCV_runlist[i, 2];
                        double            scale  = (double)myOPENCV_runlist[i, 3] / 10;
                        ///// 通过上面的旋转细节信息求得旋转矩阵
                        rot_mat = Cv2.GetRotationMatrix2D(center, angle, scale);
                        ///// 旋转已扭曲图像
                        Cv2.WarpAffine(image_out, image_out, rot_mat, image_out.Size());
                    }
                    else
                    {
                        Point2f[] srcTri   = new Point2f[3];
                        Point2f[] dstTri   = new Point2f[3];
                        Mat       warp_mat = new Mat(2, 3, MatType.CV_32FC1);
                        Mat       warp_dst;
                        warp_dst  = Mat.Zeros(image_out.Rows, image_out.Cols, image_out.Type());
                        srcTri[0] = new Point2f(0, 0);
                        srcTri[1] = new Point2f(image_out.Cols, 0);
                        srcTri[2] = new Point2f(0, image_out.Rows);
                        dstTri[0] = new Point2f((float)(image_out.Cols * myOPENCV_runlist[i, 2] / 100), (float)(image_out.Rows * myOPENCV_runlist[i, 2] / 100));
                        dstTri[1] = new Point2f((float)(image_out.Cols * (1 - (float)myOPENCV_runlist[i, 3] / 100)), (float)(image_out.Rows * myOPENCV_runlist[i, 3] / 100));
                        dstTri[2] = new Point2f((float)(image_out.Cols * myOPENCV_runlist[i, 4] / 100), (float)(image_out.Rows * (1 - (float)myOPENCV_runlist[i, 4] / 100)));
                        warp_mat  = Cv2.GetAffineTransform(srcTri, dstTri);
                        Cv2.WarpAffine(image_out, image_out, warp_mat, image_out.Size());
                    }
                    break;
                }

                case MyOPENCV.equalizehist:    //直方图均衡化
                {
                    Cv2.EqualizeHist(image_out, image_out);
                    break;
                }

                case MyOPENCV.facedetection:         //人脸识别
                {
                    if (0 == myOPENCV_runlist[i, 1]) // 参数一为0 调用haar,其余数字调用lbp
                    {
                        var haarCascade = new CascadeClassifier(@"haarcascade_frontalface_alt.xml");
                        Mat haarResult  = DetectFace(image_out, haarCascade);
                        image_out = haarResult;
                    }
                    else
                    {
                        var lbpCascade = new CascadeClassifier(@"lbpcascade_frontalface.xml");
                        Mat lbpResult  = DetectFace(image_out, lbpCascade);
                        image_out = lbpResult;
                    }

                    break;
                }

                case MyOPENCV.matchtemplate:                                             // 模板匹配
                {
                    Mat originalMat = Cv2.ImRead(my_imagesource, ImreadModes.AnyColor);  //母图
                    Mat modelMat    = Cv2.ImRead(my_imagesource2, ImreadModes.AnyColor); //模板
                    Mat resultMat   = new Mat();                                         // 匹配结果

                    //resultMat.Create(mat1.Cols - modelMat.Cols + 1, mat1.Rows - modelMat.Cols + 1, MatType.CV_32FC1);//创建result的模板,就是MatchTemplate里的第三个参数
                    if (0 == myOPENCV_runlist[i, 1])
                    {
                        Cv2.MatchTemplate(originalMat, modelMat, resultMat, TemplateMatchModes.SqDiff);        //进行匹配(1母图,2模版子图,3返回的result,4匹配模式)
                    }
                    else if (1 == myOPENCV_runlist[i, 1])
                    {
                        Cv2.MatchTemplate(originalMat, modelMat, resultMat, TemplateMatchModes.SqDiffNormed);
                    }
                    else if (2 == myOPENCV_runlist[i, 1])
                    {
                        Cv2.MatchTemplate(originalMat, modelMat, resultMat, TemplateMatchModes.CCorr);
                    }
                    else if (3 == myOPENCV_runlist[i, 1])
                    {
                        Cv2.MatchTemplate(originalMat, modelMat, resultMat, TemplateMatchModes.CCorrNormed);
                    }
                    else if (4 == myOPENCV_runlist[i, 1])
                    {
                        Cv2.MatchTemplate(originalMat, modelMat, resultMat, TemplateMatchModes.CCoeff);
                    }
                    else if (5 == myOPENCV_runlist[i, 1])
                    {
                        Cv2.MatchTemplate(originalMat, modelMat, resultMat, TemplateMatchModes.CCoeffNormed);
                    }
                    OpenCvSharp.Point minLocation, maxLocation, matchLocation;
                    Cv2.MinMaxLoc(resultMat, out minLocation, out maxLocation);
                    matchLocation = maxLocation;
                    Mat mask = originalMat.Clone();

                    Cv2.Rectangle(mask, minLocation, new OpenCvSharp.Point(minLocation.X + modelMat.Cols, minLocation.Y + modelMat.Rows), Scalar.Green, 2);         //画出匹配的矩  (图像,最小点,最大点,颜色,线宽)

                    image_out = mask;
                    break;
                }

                case MyOPENCV.find_draw_contours:                                      // 找出并绘制轮廓
                {
                    Cv2.CvtColor(image_out, image_out, ColorConversionCodes.RGB2GRAY); //转换为灰度图
                    //Cv2.Blur(image_out, image_out, new OpenCvSharp.Size(2, 2));  //滤波

                    Cv2.Canny(image_out, image_out, 100, 200);              //Canny边缘检测

                    OpenCvSharp.Point[][] contours;
                    HierarchyIndex[]      hierarchly;
                    Cv2.FindContours(image_out, out contours, out hierarchly, RetrievalModes.Tree, ContourApproximationModes.ApproxSimple, new OpenCvSharp.Point(0, 0)); //获得轮廓

                    Mat    dst_Image = Mat.Zeros(image_out.Size(), image_out.Type());                                                                                    // 图片像素值归零
                    Random rnd       = new Random();
                    for (int j = 0; j < contours.Length; j++)
                    {
                        Scalar color = new Scalar(myOPENCV_runlist[i, 1], myOPENCV_runlist[i, 2], myOPENCV_runlist[i, 3]);
                        //Scalar color = new Scalar(rnd.Next(0, 255), rnd.Next(0, 255), rnd.Next(0, 255));
                        Cv2.DrawContours(dst_Image, contours, j, color, myOPENCV_runlist[i, 4], LineTypes.Link8, hierarchly);             //画出轮廓
                    }
                    image_out = dst_Image;
                    break;
                }

                case MyOPENCV.componentdefectdetecting:                                // 零件缺陷检测
                {
                    Cv2.CvtColor(image_out, image_out, ColorConversionCodes.RGB2GRAY); //转换为灰度图
                    //Cv2.Blur(image_out, image_out, new OpenCvSharp.Size(2, 2));  //滤波

                    Cv2.Canny(image_out, image_out, 100, 200);              //Canny边缘检测

                    OpenCvSharp.Point[][] contours;
                    HierarchyIndex[]      hierarchly;
                    Cv2.FindContours(image_out, out contours, out hierarchly, RetrievalModes.Tree, ContourApproximationModes.ApproxSimple, new OpenCvSharp.Point(0, 0)); //获得轮廓

                    Mat    dst_Image = Mat.Zeros(image_out.Size(), image_out.Type());                                                                                    // 图片像素值归零
                    Random rnd       = new Random();
                    for (int j = 0; j < contours.Length; j++)
                    {
                        Scalar color = new Scalar(myOPENCV_runlist[i, 1], myOPENCV_runlist[i, 2], myOPENCV_runlist[i, 3]);
                        //Scalar color = new Scalar(rnd.Next(0, 255), rnd.Next(0, 255), rnd.Next(0, 255));
                        Cv2.DrawContours(dst_Image, contours, j, color, myOPENCV_runlist[i, 4], LineTypes.Link8, hierarchly);               //画出轮廓
                    }


                    Mat cnt = new Mat();
                    Cv2.ConvexHull(image_out, cnt);



                    break;
                }

                default: break;
                }
            }
            return(image_out);
        }
コード例 #27
0
        private static string detectBarcode(string fileName, double thresh, bool debug = false, double rotation = 0)
        {
            Console.WriteLine("\nProcessing: {0}", fileName);

            // load the image and convert it to grayscale
            var image = new Mat(fileName);

            if (rotation != 0)
            {
                rotateImage(image, image, rotation, 1);
            }

            if (debug)
            {
                Cv2.ImShow("Source", image);
                Cv2.WaitKey(1); // do events
            }

            var gray     = new Mat();
            var channels = image.Channels();

            if (channels > 1)
            {
                Cv2.CvtColor(image, gray, ColorConversion.BgrToGray);
            }
            else
            {
                image.CopyTo(gray);
            }


            // compute the Scharr gradient magnitude representation of the images
            // in both the x and y direction
            var gradX = new Mat();

            Cv2.Sobel(gray, gradX, MatType.CV_32F, xorder: 1, yorder: 0, ksize: -1);
            //Cv2.Scharr(gray, gradX, MatType.CV_32F, xorder: 1, yorder: 0);

            var gradY = new Mat();

            Cv2.Sobel(gray, gradY, MatType.CV_32F, xorder: 0, yorder: 1, ksize: -1);
            //Cv2.Scharr(gray, gradY, MatType.CV_32F, xorder: 0, yorder: 1);

            // subtract the y-gradient from the x-gradient
            var gradient = new Mat();

            Cv2.Subtract(gradX, gradY, gradient);
            Cv2.ConvertScaleAbs(gradient, gradient);

            if (debug)
            {
                Cv2.ImShow("Gradient", gradient);
                Cv2.WaitKey(1); // do events
            }


            // blur and threshold the image
            var blurred = new Mat();

            Cv2.Blur(gradient, blurred, new Size(9, 9));

            var threshImage = new Mat();

            Cv2.Threshold(blurred, threshImage, thresh, 255, ThresholdType.Binary);

            if (debug)
            {
                Cv2.ImShow("Thresh", threshImage);
                Cv2.WaitKey(1); // do events
            }


            // construct a closing kernel and apply it to the thresholded image
            var kernel = Cv2.GetStructuringElement(StructuringElementShape.Rect, new Size(21, 7));
            var closed = new Mat();

            Cv2.MorphologyEx(threshImage, closed, MorphologyOperation.Close, kernel);

            if (debug)
            {
                Cv2.ImShow("Closed", closed);
                Cv2.WaitKey(1); // do events
            }


            // perform a series of erosions and dilations
            Cv2.Erode(closed, closed, null, iterations: 4);
            Cv2.Dilate(closed, closed, null, iterations: 4);

            if (debug)
            {
                Cv2.ImShow("Erode & Dilate", closed);
                Cv2.WaitKey(1); // do events
            }


            //find the contours in the thresholded image, then sort the contours
            //by their area, keeping only the largest one

            Point[][]       contours;
            HiearchyIndex[] hierarchyIndexes;
            Cv2.FindContours(
                closed,
                out contours,
                out hierarchyIndexes,
                mode: ContourRetrieval.CComp,
                method: ContourChain.ApproxSimple);

            if (contours.Length == 0)
            {
                throw new NotSupportedException("Couldn't find any object in the image.");
            }

            var contourIndex       = 0;
            var previousArea       = 0;
            var biggestContourRect = Cv2.BoundingRect(contours[0]);

            while ((contourIndex >= 0))
            {
                var contour = contours[contourIndex];

                var boundingRect     = Cv2.BoundingRect(contour); //Find bounding rect for each contour
                var boundingRectArea = boundingRect.Width * boundingRect.Height;
                if (boundingRectArea > previousArea)
                {
                    biggestContourRect = boundingRect;
                    previousArea       = boundingRectArea;
                }

                contourIndex = hierarchyIndexes[contourIndex].Next;
            }


            /*biggestContourRect.Width += 10;
             * biggestContourRect.Height += 10;
             * biggestContourRect.Left -= 5;
             * biggestContourRect.Top -= 5;*/


            var barcode = new Mat(image, biggestContourRect); //Crop the image

            Cv2.CvtColor(barcode, barcode, ColorConversion.BgrToGray);

            Cv2.ImShow("Barcode", barcode);
            Cv2.WaitKey(1); // do events

            var barcodeClone = barcode.Clone();
            var barcodeText  = getBarcodeText(barcodeClone);

            if (string.IsNullOrWhiteSpace(barcodeText))
            {
                Console.WriteLine("Enhancing the barcode...");
                //Cv2.AdaptiveThreshold(barcode, barcode, 255,
                //AdaptiveThresholdType.GaussianC, ThresholdType.Binary, 9, 1);
                //var th = 119;
                var th = 100;
                Cv2.Threshold(barcode, barcode, th, 255, ThresholdType.ToZero);
                Cv2.Threshold(barcode, barcode, th, 255, ThresholdType.Binary);
                barcodeText = getBarcodeText(barcode);
            }

            Cv2.Rectangle(image,
                          new Point(biggestContourRect.X, biggestContourRect.Y),
                          new Point(biggestContourRect.X + biggestContourRect.Width, biggestContourRect.Y + biggestContourRect.Height),
                          new Scalar(0, 255, 0),
                          2);

            if (debug)
            {
                Cv2.ImShow("Segmented Source", image);
                Cv2.WaitKey(1); // do events
            }

            Cv2.WaitKey(0);
            Cv2.DestroyAllWindows();

            return(barcodeText);
        }
コード例 #28
0
 //--------------------------------------------------------------
 public static void blur(Mat src, Mat dst, OpenCvSharp.Size k)
 {
     Cv2.Blur(src, dst, k);
 }
コード例 #29
0
        static void Stop3_Detector(Mat Src, string filename)
        {
            //============================threshold===================
            Int64 OK_NG_Flag = 0;
            //=========================================================
            int threshold_1phase   = 130;
            int threshold_2phase_1 = 38; //35
            int threshold_2phase_2 = 22; //20
            int threshold_2phase_3 = 65;
            int blur_size          = 3;
            int neighbor_degree    = 5;
            //==========================algorithm====================
            Mat vis_rgb = Src.CvtColor(ColorConversionCodes.GRAY2RGB);

            Mat thresh1 = Src.Threshold(70, 255, ThresholdTypes.Binary);

            Mat kernel = Cv2.GetStructuringElement(MorphShapes.Ellipse, new Size(4, 3));

            thresh1 = thresh1.MorphologyEx(MorphTypes.Open, kernel);

            //=========================find contours================
            Point[][]        contours;
            HierarchyIndex[] hierarchly;
            Cv2.FindContours(thresh1, out contours, out hierarchly, RetrievalModes.Tree, ContourApproximationModes.ApproxSimple);

            // find final circle
            List <Point[]> contours_final = new List <Point[]>();
            List <int>     index          = new List <int>();
            int            count          = 0;//replace for loop index

            foreach (Point[] contour_now in contours)
            {
                //if (np.array(contours[i]).shape[0] > 1500 and cv2.contourArea(contours[i]) < 5000000):
                if (Cv2.ContourArea(contour_now) > 300000 && Cv2.ContourArea(contour_now) < 500000)
                {
                    Point[] approx = Cv2.ApproxPolyDP(contour_now, 0.005, true);
                    contours_final.Add(approx);
                    index.Add(count);
                }
                count++;
            }
            //==========================================find outer circle==============================================
            Point2f center;
            float   radius;

            Cv2.MinEnclosingCircle(contours_final[0], out center, out radius);

            Mat mask_img = Mat.Zeros(Src.Size(), MatType.CV_8UC1);

            Cv2.Circle(mask_img, (OpenCvSharp.Point)center, (int)(radius + 7), 255, thickness: -1);

            //outer contour2 in order to make mask area = 255
            Mat mask_img2 = new Mat(Src.Size(), MatType.CV_8UC1, new Scalar(255));//initilize Mat with the value 255

            Cv2.Circle(mask_img2, (OpenCvSharp.Point)center, (int)(radius + 7), 0, thickness: -1);


            Mat image = Mat.Zeros(Src.Size(), MatType.CV_8UC1);

            Src.CopyTo(image, mask_img);

            //in order to make mask area = 255
            image = image + mask_img2;
            //image.SaveImage("./mask.jpg");
            //============================now already find the ROI, start algorithm=====================================
            Cv2.Blur(image, image, ksize: new Size(blur_size, blur_size));
            //image.SaveImage("./mask2.jpg");

            //==========================create 0.5 degree a line===================================================
            double       factor       = 3.141592653589793 / 180;
            List <Point> outer_index  = new List <Point>();
            List <Point> inner_index  = new List <Point>();
            double       degree_delta = 0.5;
            int          cx           = (int)center.X;
            int          cy           = (int)center.Y;
            int          r            = (int)radius;

            //Console.WriteLine(r + " "+ cx + " "+ cy);

            for (int degree = 0; degree < (360 / degree_delta); degree++)
            {
                double degree_real = degree * degree_delta;
                int    now_x       = (int)((r + 2) * Math.Sin(degree_real * factor)) + cx;
                int    now_y       = (int)((r + 2) * Math.Cos(degree_real * factor)) + cy;
                int    now_inner_x = (int)((r - 50) * Math.Sin(degree_real * factor)) + cx;
                int    now_inner_y = (int)((r - 50) * Math.Cos(degree_real * factor)) + cy;

                inner_index.Add(new Point(now_inner_x, now_inner_y));
                outer_index.Add(new Point(now_x, now_y));
            }
            //==========================shot from center=========================================
            List <int>  all_valley_list         = new List <int>();
            List <int>  all_peak_list           = new List <int>();
            List <int>  all_diff_list           = new List <int>();
            List <int>  Candidate_1_phase_index = new List <int>();
            List <byte> value = new List <byte>();

            for (int degree = 0; degree < (360 / degree_delta); degree++)
            {
                LineIterator Line = new LineIterator(image, inner_index[degree], outer_index[degree]);
                foreach (var lip in Line)
                {
                    value.Add(lip.GetValue <byte>());
                }

                int peak         = 255;
                int valley       = 255;
                int peak_index   = 0;
                int valley_index = 0;


                int temp_valley       = 255;
                int temp_valley_index = 0;
                int max_diff          = 0;

                for (int pts_index = 1; pts_index < value.Count - 1; pts_index++)//peak of valley will not at 0 and last element.
                {
                    if (max_diff < value[pts_index] - temp_valley)
                    {
                        max_diff   = value[pts_index] - temp_valley;
                        peak_index = pts_index;
                        //peak = (value[pts_index]+ value[pts_index-1]+ value[pts_index]+1)/3;
                        peak         = value[pts_index];
                        valley       = temp_valley;
                        valley_index = temp_valley_index;
                    }
                    if (temp_valley > value[pts_index])
                    {
                        //temp_valley = (value[pts_index]+ value[pts_index-1]+ value[pts_index + 1])/3;
                        temp_valley       = (value[pts_index]);
                        temp_valley_index = pts_index;
                    }
                }

                all_peak_list.Add(peak);
                all_valley_list.Add(valley);
                all_diff_list.Add(peak - valley);
                //phase1
                if (valley > 120 || peak - valley < threshold_1phase)
                {
                    Candidate_1_phase_index.Add(degree);
                }

                //Console.WriteLine("Count:"+Candidate_1_phase_index.Count);
                value.Clear();
            }
            //phase 2
            foreach (var candidate_degree in Candidate_1_phase_index)
            {
                //Console.WriteLine(candidate_degree);
                int now_valley_value  = all_valley_list[candidate_degree];
                int prev_valley_value = all_valley_list[(candidate_degree + neighbor_degree + 720) % 720];
                int next_valley_value = all_valley_list[(candidate_degree - neighbor_degree + 720) % 720];

                int now_peak_value  = all_peak_list[candidate_degree];
                int prev_peak_value = all_peak_list[(candidate_degree + neighbor_degree + 720) % 720];
                int next_peak_value = all_peak_list[(candidate_degree - neighbor_degree + 720) % 720];

                int now_peak_valley_difference  = all_diff_list[candidate_degree];
                int prev_peak_valley_difference = all_diff_list[(candidate_degree + neighbor_degree + 720) % 720];
                int next_peak_valley_difference = all_diff_list[(candidate_degree - neighbor_degree + 720) % 720];

                float value1 = ((float)now_valley_value - (float)prev_valley_value) + ((float)now_valley_value - (float)next_valley_value);
                float value2 = ((float)prev_peak_valley_difference - (float)now_peak_valley_difference) + ((float)next_peak_valley_difference - (float)now_peak_valley_difference);
                if ((((value1 > threshold_2phase_1)) && (value2 > threshold_2phase_2)) && (value2 > 0) && (value1 > 0))
                {
                    Console.WriteLine(candidate_degree + " " + now_peak_value + " " + now_valley_value + " " + (now_peak_value - now_valley_value) + " " + value1 + " " + value2);

                    Cv2.Circle(vis_rgb, outer_index[candidate_degree], 30, new Scalar(0, 255, 255), thickness: 5);
                    OK_NG_Flag = 1;
                }
            }

            vis_rgb.SaveImage("./result/test" + filename);
        }
コード例 #30
0
        static void Main(string[] args)
        {
            Mat flow, cflow, gray, prevgray, img_bgr;

            Point[][]        contours;
            HierarchyIndex[] hierarchy;
            prevgray = new Mat();

            VideoCapture cap = new VideoCapture();

            cap.Open(0);
            int sleepTime = (int)Math.Round(1000 / cap.Fps);

            using (Window window = new Window("capture"))
                using (Mat frame = new Mat()) // Frame image buffer
                {
                    while (true)
                    {
                        cap.Read(frame);
                        if (frame.Empty())
                        {
                            break;
                        }
                        gray    = new Mat();
                        flow    = new Mat();
                        cflow   = new Mat();
                        img_bgr = new Mat();
                        Cv2.CvtColor(frame, gray, ColorConversionCodes.BGR2GRAY);
                        if (prevgray.Empty())
                        {
                            prevgray = gray;
                        }
                        else
                        {
                            Cv2.CalcOpticalFlowFarneback(prevgray, gray, flow, 0.5, 5, 16, 3, 5, 1.2, OpticalFlowFlags.FarnebackGaussian);
                            Cv2.CvtColor(prevgray, cflow, ColorConversionCodes.GRAY2BGR);
                            drawOptFlowMap(ref flow, ref cflow, 1.5, 16, new Scalar(0, 0, 255));
                            drawHsv(flow, out img_bgr);
                            Mat gray_bgr = new Mat();
                            gray_bgr = Mat.Zeros(frame.Rows, frame.Cols, MatType.CV_8UC1);
                            Cv2.CvtColor(img_bgr, gray_bgr, ColorConversionCodes.BGR2GRAY);
                            Cv2.Normalize(gray_bgr, gray_bgr, 0, 255, NormTypes.MinMax, MatType.CV_8UC1);
                            Cv2.Blur(gray_bgr, gray_bgr, new Size(3, 3));

                            // Detect edges using Threshold
                            Mat img_thresh = new Mat();
                            img_thresh = Mat.Zeros(frame.Rows, frame.Cols, MatType.CV_8UC1);
                            Cv2.Threshold(gray_bgr, img_thresh, 155, 255, ThresholdTypes.BinaryInv);
                            Cv2.FindContours(img_thresh, out contours, out hierarchy, RetrievalModes.External, ContourApproximationModes.ApproxSimple);

                            if (contours.Length == 0)
                            {
                                throw new NotSupportedException("Couldn't find any object in the image.");
                            }

                            for (int i = 0; i < contours.Length; i++)
                            {
                                Rect box = Cv2.BoundingRect(contours[i]);
                                if (box.Width > 50 && box.Height > 50 && box.Width < 900 && box.Height < 680)
                                {
                                    Cv2.Rectangle(frame,
                                                  box.TopLeft, box.BottomRight,
                                                  new Scalar(0, 255, 0), 4);
                                }
                            }
                            window.Image = frame;
                            Char c = (Char)Cv2.WaitKey(1);
                            if (c == 27)
                            {
                                break;
                            }
                            Swap <Mat>(ref gray, ref prevgray);
                        }
                    }
                }
        }