Exemple #1
0
        /// <summary>
        /// 背景画像を読み出し。
        /// </summary>
        /// <param name="elem">読み出した要素</param>
        public Mat backgroundImage()
        {
            double scale = 1.0;

            Cv2.ConvertScaleAbs(background_image, imgR, scale);
            return(this.imgR);
        }
Exemple #2
0
        public static void cv_11()  // 边缘检测系列之 Laplacian算子
        {
            //Sobel 算子进行一阶求导,得到边缘像素是最大值(最高点)。
            //这是基于:在边缘区域中,像素强度显示出“跳跃”或强度的高度变化。
            //获得强度的一阶导数,我们观察到边缘的特征是最大值。
            //Laplace算子是进行二阶求导,从图上观察到二阶导数为零。
            //因此,我们也可以使用此标准来尝试检测图像中的边缘。
            //但请注意,零不仅会出现在边缘(它们实际上可能出现在其他无意义的位置);
            //这可以通过在需要时应用过滤来解决。
            //Laplacian 算子对噪声比较敏感,所以图像一般先经过平滑处理。
            Mat srcImg       = Cv2.ImRead(@"G:\\pics\\3.jpg");
            Mat LaplacianImg = new Mat();

            Mat gussImage = new Mat();

            Cv2.GaussianBlur(srcImg, gussImage, new OpenCvSharp.Size(3, 3), 0, 0, BorderTypes.Default);    //高斯模糊

            Mat grayImage = new Mat();

            Cv2.CvtColor(gussImage, grayImage, ColorConversionCodes.RGB2GRAY); //灰度图

            Cv2.Laplacian(grayImage, LaplacianImg, -1, 3);                     //Laplacian运算, 计算二阶导数。参数:1,源图像;2,输出图像;3,目标图像的所需深度 默认填 -1,与源图一致;4,用于计算二阶导数滤波器的卷积核大小,需奇数。

            Cv2.ConvertScaleAbs(LaplacianImg, LaplacianImg);                   ////缩放、计算绝对值并将结果转换为8位,显示图相只能是8U类型

            Cv2.ImShow("srcImg", srcImg);
            Cv2.ImShow("gussImage", gussImage);
            Cv2.ImShow("dstImg", LaplacianImg);

            Cv2.WaitKey();
        }
        //分水岭分割函数封装
        private Mat waterShed(Mat src, int MEADIANBlUR_KSIZE, Size ELEMENT_SIZE)
        {
            var imageGray   = new Mat();
            var thresh      = new Mat();
            var fg          = new Mat();
            var bgt         = new Mat();
            var bg          = new Mat();
            var marker      = new Mat();
            var marker32    = new Mat();
            var m           = new Mat();
            var res         = new Mat();
            var threshOpen  = new Mat();
            var threshClose = new Mat();

            Cv2.CvtColor(src, imageGray, ColorConversionCodes.BGR2GRAY);
            Cv2.EqualizeHist(imageGray, imageGray);                  //直方图均衡化
            Cv2.MedianBlur(imageGray, imageGray, MEADIANBlUR_KSIZE); //中值滤波
            Cv2.Threshold(imageGray, thresh, 0, 255, ThresholdTypes.Otsu);
            Cv2.Erode(thresh, fg, 0, null, 2);
            Cv2.Dilate(thresh, bgt, 0, null, 3);
            Cv2.Threshold(bgt, bg, 1, 128, ThresholdTypes.BinaryInv);
            marker = fg + bg;
            marker.ConvertTo(marker32, MatType.CV_32SC1);
            Cv2.Watershed(src, marker32);
            Cv2.ConvertScaleAbs(marker32, m);
            Cv2.Threshold(m, thresh, 0, 255, ThresholdTypes.Otsu);
            var element = Cv2.GetStructuringElement(MorphShapes.Rect, ELEMENT_SIZE); //获取自定义核

            Cv2.MorphologyEx(thresh, threshOpen, MorphTypes.Open, element);          //开运算
            Cv2.MorphologyEx(threshOpen, threshClose, MorphTypes.Close, element);    //闭运算;
            Cv2.BitwiseAnd(src, src, res, threshClose);
            return(res);
        }
Exemple #4
0
        public Mat BarcodeRegion(Mat src_)
        {
            //Cv2.Resize(src, src, new Size(src.Size().Width / 2, src.Size().Height / 2));
            Mat src = src_.Clone();

            Cv2.CvtColor(src, src, ColorConversionCodes.RGB2GRAY);
            Cv2.GaussianBlur(src, src, new Size(3, 3), 0);
            Mat img_X = new Mat();
            Mat img_Y = new Mat();

            Cv2.Sobel(src, img_X, MatType.CV_16S, 1, 0);
            Cv2.Sobel(src, img_Y, MatType.CV_16S, 0, 1);

            Cv2.ConvertScaleAbs(img_X, img_X, 1, 0);
            Cv2.ConvertScaleAbs(img_Y, img_Y, 1, 0);

            Mat margin = img_X - img_Y;

            //Cv2.ImShow("img_Y", margin);
            //Cv2.WaitKey();
            Cv2.Resize(margin, margin, new Size(margin.Width * 0.3, margin.Height * 1.5), 0, 0, InterpolationFlags.Area);
            Cv2.Blur(margin, margin, new Size(3, 3));
            Cv2.MedianBlur(margin, margin, 3);

            Mat imgthreshold = new Mat();

            Cv2.Threshold(margin, imgthreshold, 80, 255, ThresholdTypes.Binary);
            //Cv2.AdaptiveThreshold(margin, imgthreshold, 255, AdaptiveThresholdTypes.GaussianC, ThresholdTypes.Binary, 3, -1);
            Cv2.ImShow("thresh", imgthreshold);
            Cv2.WaitKey();

            //先在水平方向上膨胀,填充条码中间的空隙
            Mat element = Cv2.GetStructuringElement(MorphShapes.Cross, new Size(5, 1));

            Cv2.MorphologyEx(imgthreshold, imgthreshold, MorphTypes.Dilate, element);
            //在垂直方向上腐蚀,分离条码和字符
            element = Cv2.GetStructuringElement(MorphShapes.Cross, new Size(1, 5));
            Cv2.MorphologyEx(imgthreshold, imgthreshold, MorphTypes.Erode, element);

            //去除字符
            element = Cv2.GetStructuringElement(MorphShapes.Cross, new Size(10, 10));
            Cv2.MorphologyEx(imgthreshold, imgthreshold, MorphTypes.Open, element);
            Cv2.MorphologyEx(imgthreshold, imgthreshold, MorphTypes.Close, element);


            element = Cv2.GetStructuringElement(MorphShapes.Cross, new Size(10, 10));
            Cv2.Erode(imgthreshold, imgthreshold, element);
            Cv2.Erode(imgthreshold, imgthreshold, element);
            Cv2.Dilate(imgthreshold, imgthreshold, element);
            Cv2.Resize(imgthreshold, imgthreshold, new Size(src.Width, src.Height), 0, 0, InterpolationFlags.Area);
            Cv2.ImShow("thresh", imgthreshold);
            Cv2.WaitKey();

            return(imgthreshold);


            //计算每个区域的最大内接矩,然后算其包含图像的黑白区域比例

            //Cv2.Dilate(imgthreshold, imgthreshold, element);
        }
Exemple #5
0
        public static void cv_10()  // 边缘检测系列之 Sobel算子​
        {
            //  Sobel算子是像素图像边缘检测中最重要的算子之一
            //  Sobel算子是一个离散微分算子(discrete differentiation operator),用来计算图像灰度的近似值
            //  Sobel算子功能集合了高斯平滑和微分求导
            //  又被称为一阶微分算子,求导算子,在水平和垂直两个方向上求导,得到图像X方向和Y方向的梯度图像。
            //  Sobel算子是基于权重比,来扩大像素之间的差异,从而更好的寻找边缘。下面两个矩阵都是对称的
            Mat src_img = Cv2.ImRead(@"G:\\pics\\4.jpg");

            Mat dst = new Mat();

            Cv2.GaussianBlur(src_img, dst, new OpenCvSharp.Size(3, 3), 0, 0, BorderTypes.Default);   //高斯平滑,Sobel算子对噪声较敏感,可先进行降噪

            Mat grayImage = new Mat();

            Cv2.CvtColor(dst, grayImage, ColorConversionCodes.BGR2GRAY);   //转换为灰度图

            Mat X = new Mat();
            Mat Y = new Mat();

            Cv2.Sobel(grayImage, X, MatType.CV_16S, 1, 0, 3); //Sobel边缘查找,参数:1,输入;2,输出X方向梯度图像;3,输出图像的深度;4,X方向几阶导数;5,Y方向几阶导数;6,卷积核大小,必须为奇数。
            Cv2.Sobel(grayImage, Y, MatType.CV_16S, 0, 1, 3); //输出Y方向梯度图像

            Cv2.ConvertScaleAbs(X, X);                        //缩放、计算绝对值并将结果转换为8位,显示图相只能是8U类型
            Cv2.ConvertScaleAbs(Y, Y);

            Cv2.ImShow("src_img", src_img);
            Cv2.ImShow("X方向梯度图", X);
            Cv2.ImShow("Y方向梯度图", Y);

            int width  = X.Cols;
            int hight  = Y.Rows;
            Mat output = new Mat(X.Size(), X.Type());

            for (int x = 0; x < hight; x++)    //合并X和Y,G= (Gx*Gx +Gy*Gy)的开平方根
            {
                for (int y = 0; y < width; y++)
                {
                    int xg = X.At <byte>(x, y);
                    int yg = Y.At <byte>(x, y);

                    double v1  = Math.Pow(xg, 2);         //平方
                    double v2  = Math.Pow(yg, 2);
                    int    val = (int)Math.Sqrt(v1 + v2); //开平方根
                    if (val > 255)                        //确保像素值在 0至255之间
                    {
                        val = 255;
                    }
                    if (val < 0)
                    {
                        val = 0;
                    }
                    byte xy = (byte)val;
                    output.Set <byte>(x, y, xy);
                }
            }
            Cv2.ImShow("outputX+Y", output);
            Cv2.WaitKey();
        }
Exemple #6
0
 public void SaveAvgImage()
 {
     using (Mat img_avg = img_mask.Clone())
     {
         Cv2.ConvertScaleAbs(imgAvg, img_avg, 1.0, 0.0);
         img_avg.SaveImage("AvgImage.png");
     }
 }
Exemple #7
0
        private void CreatImg()
        {
            if (Filter.SelectedIndex == 0 || Filter.SelectedIndex == -1)
            {
                for (int i = 0; i < 8640; i = i + 4)
                {
                    _iniImage[i / 4] = BitConverter.ToSingle(_recvBuf, i);
                }

                for (int i = _imGray.Height / 2; i < _imGray.Height; i++)
                {
                    for (int j = 0; j < _imGray.Width; j++)
                    {
                        float temp = _iniImage[(i - 27) + j * 27] * 255 / 50000;
                        //float temp = iniImage[i - 27 + (j + 8) * 27] * iniImage[i - 27 + (j + 8) * 27] * iniImage[i - 27 + (j + 8) * 27];
                        _imGray.Set <float>(i, j, temp);
                    }
                }

                Cv2.Resize(_imGray, imGrayResult2, new OpenCvSharp.Size(600, 300));
            }
            else
            {
                for (int i = 0; i < 8640; i = i + 4)
                {
                    _iniImage[i / 4] = BitConverter.ToSingle(_recvBuf, i);
                }

                for (int i = _imGray.Height / 2; i < _imGray.Height; i++)
                {
                    for (int j = 0; j < _imGray.Width; j++)
                    {
                        float temp = _iniImage[(i - 27) + j * 27] * 255 / 10000;

                        _imGray.Set <float>(i, j, temp);
                    }
                }

                Cv2.Resize(_imGray, imGrayResult2, new OpenCvSharp.Size(600, 300));
            }

            Cv2.ConvertScaleAbs(imGrayResult2, imGrayResult1);

            Point2f point2F = new Point2f(imGrayResult1.Width / 2, imGrayResult1.Height);

            Cv2.LinearPolar(imGrayResult1, recover, point2F, 300, InterpolationFlags.WarpInverseMap);
            Cv2.ApplyColorMap(recover, dst, ColormapTypes.Jet);

            Cv2.ImShow("Demo", dst);
        }
Exemple #8
0
    private void MakeEdgeMat()
    {
        Mat sobelX = new Mat();

        Cv2.Sobel(plantSegmentasionImage, sobelX, MatType.CV_16S, 1, 0, 3);
        Cv2.ConvertScaleAbs(sobelX, sobelX);

        Mat sobelY = new Mat();

        Cv2.Sobel(plantSegmentasionImage, sobelY, MatType.CV_16S, 0, 1, 3);
        Cv2.ConvertScaleAbs(sobelY, sobelY);

        plantEdges = new Mat();
        Cv2.AddWeighted(sobelX, 0.5, sobelY, 0.5, 0, plantEdges);
    }
Exemple #9
0
        static Mat DogContrastBinalize(Mat image, int kernel = 51, int threshold = 100, ThresholdType thtype = ThresholdType.Binary)
        {
            Mat img = DifferenceOfGaussian(image, kernel);

            double Max_kido;
            double Min_kido;

            OpenCvSharp.CPlusPlus.Point maxloc;
            OpenCvSharp.CPlusPlus.Point minloc;
            Cv2.MinMaxLoc(img, out Min_kido, out Max_kido, out minloc, out maxloc);

            Cv2.ConvertScaleAbs(img, img, 255 / (Max_kido - Min_kido), -255 * Min_kido / (Max_kido - Min_kido));
            Cv2.Threshold(img, img, threshold, 1, thtype);

            return(img);
        }
Exemple #10
0
        private void mnuFilterEdgeSobel_Click(object sender, EventArgs e)
        {
            // ソーベルフィルタ

            using (var xSobel = new Mat(_matDisp.Size(), MatType.CV_16S))
                using (var ySobel = new Mat(_matDisp.Size(), MatType.CV_16S))
                {
                    // X方向の微分
                    Cv2.Sobel(_matDisp, xSobel, MatType.CV_16S, 1, 0);
                    // Y方向の微分
                    Cv2.Sobel(_matDisp, ySobel, MatType.CV_16S, 0, 1);
                    // X方向とY方向を足し合わせる
                    Cv2.ConvertScaleAbs((xSobel + ySobel), _matDisp);
                }
            // 画像の描画
            DrawMatImage(_matDisp);
        }
Exemple #11
0
        /// <summary>
        /// 画像を保存します。
        /// </summary>
        /// <remarks>
        /// ビデオ書き込み開放処理
        /// </remarks>
        public void VideoWriterRelease()
        {
            if (vw != null)
            {
                double scale = 1.0;
                Cv2.ConvertScaleAbs(background_image, imgR, scale);
                vw.Write(imgR);
                vw.Dispose();
            }
            System.IO.File.AppendAllText(log_fn, appendText);
            appendText = "";

            //     if (writer != null)
            //     {
            //         writer.Close();
            //     }
        }
        public void Run()
        {
            //https://drive.google.com/file/d/1rc13wZ9zC03ObG5zB3uccUtsg_rsI8hC/view
            VideoCapture cap = VideoCapture.FromFile("Input.mp4");

            Mat avg    = new Mat();
            Mat output = new Mat();

            while (true)
            {
                Mat frame = new Mat();
                // Capture frame-by-frame
                cap.Read(frame);

                // If the frame is empty, break immediately
                if (frame.Empty())
                {
                    break;
                }

                if (cap.Get(CaptureProperty.PosFrames) == 1)
                {
                    frame.ConvertTo(avg, MatType.CV_32F);
                }

                Cv2.AccumulateWeighted(frame, avg, 0.0005, null);
                Cv2.ConvertScaleAbs(avg, output);

                Cv2.ImShow("output", output);
                // Press  ESC on keyboard to exit
                char c = (char)Cv2.WaitKey(25);
                if (c == 27)
                {
                    break;
                }
                frame.Release();
            }

            // When everything done, release the video capture object
            cap.Release();
            avg.Release();
            output.Release();
            // Closes all the frames
            Cv2.DestroyAllWindows();
        }
Exemple #13
0
            public void EdgeDetect(string fileName)
            {
                Mat srcMat = Cv2.ImRead(fileName);

                Mat srcBlue = srcMat.Split()[0];

                Mat cannyMat  = new Mat();
                Mat sobelMat  = new Mat();
                Mat LaplasMat = new Mat();

                Cv2.Canny(srcBlue, cannyMat, 50, 200, 3);

                //Cv2.ConvertScaleAbs()

                Cv2.Sobel(srcBlue, sobelMat, sobelMat.Type(), 1, 1, 1);

                Cv2.Laplacian(srcBlue, LaplasMat, 3);
                Cv2.ConvertScaleAbs(LaplasMat, LaplasMat, 1, 0);

                ImShow(srcBlue);
                ImShow(cannyMat);
                ImShow(sobelMat);
                ImShow(LaplasMat);

                Mat merge = new Mat(srcMat.Size(), MatType.CV_8U);

                foreach (Mat tm in srcMat.Split())
                {
                    Mat tmm = new Mat();
                    //Cv2.Canny(tm, tmm, 10, 250, 3);
                    Cv2.Laplacian(tm, tmm, 3);
                    Cv2.ConvertScaleAbs(tmm, tmm, 1, 0);
                    ImShow(tmm);
                    Cv2.Threshold(tmm, tmm, 0.5, 255, ThresholdTypes.Binary);

                    Cv2.BitwiseOr(merge, tmm, merge);
                }

                Cv2.MorphologyEx(merge, merge, MorphTypes.Erode, Cv2.GetStructuringElement(MorphShapes.Rect, new OpenCvSharp.Size(3, 3), new OpenCvSharp.Point(1, 1)));

                ImShow(merge);

                Cv2.WaitKey();
            }
Exemple #14
0
        private static void Sobel(string path)
        {
            using (Mat src = new Mat(path, ImreadModes.AnyColor | ImreadModes.AnyDepth))
            {
                //1:高斯模糊平滑
                Mat dst = new Mat();
                Cv2.GaussianBlur(src, dst, new OpenCvSharp.Size(3, 3), 0, 0, BorderTypes.Default);
                //转为灰度
                //Mat gray = new Mat();
                //Cv2.CvtColor(dst, gray, ColorConversionCodes.BGR2GRAY);

                MatType m = src.Type();

                //求 X 和 Y 方向的梯度  Sobel  and scharr
                Mat xgrad = new Mat();
                Mat ygrad = new Mat();
                Cv2.Sobel(src, xgrad, MatType.CV_16S, 1, 0, 3);
                Cv2.Sobel(src, ygrad, MatType.CV_16S, 0, 1, 3);

                Cv2.ConvertScaleAbs(xgrad, xgrad);//缩放、计算绝对值并将结果转换为8位。不做转换的化显示不了,显示图相只能是8U类型
                Cv2.ConvertScaleAbs(ygrad, ygrad);

                //加强边缘检测
                //Cv2.Scharr(gray, xgrad, -1, 1, 0, 3);
                //Cv2.Scharr(gray, ygrad, -1, 0, 1, 3);

                Mat output = new Mat(xgrad.Size(), xgrad.Type());
                //图像混合相加(基于权重 0.5)不精确
                //Cv2.AddWeighted(xgrad, 0.5, ygrad, 0.5, 0, output);

                //基于 算法 G=|Gx|+|Gy|
                int width = xgrad.Cols;
                int hight = xgrad.Rows;

                //基于 G= (Gx*Gx +Gy*Gy)的开方根
                for (int x = 0; x < hight; x++)
                {
                    for (int y = 0; y < width; y++)
                    {
                        int xg = xgrad.At <byte>(x, y);
                        int yg = ygrad.At <byte>(x, y);
                        //byte xy =(byte) (xg + yg);
                        double v1  = Math.Pow(xg, 2);
                        double v2  = Math.Pow(yg, 2);
                        int    val = (int)Math.Sqrt(v1 + v2);
                        if (val > 255) //确保像素值在 0 -- 255 之间
                        {
                            val = 255;
                        }
                        if (val < 0)
                        {
                            val = 0;
                        }
                        byte xy = (byte)val;
                        output.Set <byte>(x, y, xy);
                    }
                }
                using (new OpenCvSharp.Window("X Image", WindowMode.Normal, xgrad));
                //using (new Window("Y Image", WindowMode.Normal, ygrad))
                //using (new Window("OUTPUT Image", WindowMode.Normal, output))
                //using (new Window("SRC", WindowMode.Normal, src))
            }
        }
Exemple #15
0
        // https://github.com/VahidN/OpenCVSharp-Samples/blob/master/OpenCVSharpSample19/Program.cs
        public static void Gradient(Mat src)
        {
            var gray     = new Mat();
            var channels = src.Channels();

            if (channels > 1)
            {
                Cv2.CvtColor(src, gray, ColorConversionCodes.BGRA2GRAY);
            }
            else
            {
                src.CopyTo(gray);
            }


            // compute the Scharr gradient magnitude representation of the images
            // in both the x and y direction
            var gradX = new Mat();

            Cv2.Sobel(gray, gradX, MatType.CV_32F, xorder: 1, yorder: 0, ksize: -1);
            //Cv2.Scharr(gray, gradX, MatType.CV_32F, xorder: 1, yorder: 0);
            Cv2.ImShow("gradX", gradX);

            var gradY = new Mat();

            Cv2.Sobel(gray, gradY, MatType.CV_32F, xorder: 0, yorder: 1, ksize: -1);
            //Cv2.Scharr(gray, gradY, MatType.CV_32F, xorder: 0, yorder: 1);
            Cv2.ImShow("gradY", gradY);

            // subtract the y-gradient from the x-gradient
            var gradient = new Mat();

            Cv2.Subtract(gradX, gradY, gradient);
            Cv2.ConvertScaleAbs(gradient, gradient);

            Cv2.ImShow("Gradient", gradient);


            // blur and threshold the image
            var blurred = new Mat();

            Cv2.Blur(gradient, blurred, new Size(9, 9));

            double thresh      = 127.0;
            var    threshImage = new Mat();

            Cv2.Threshold(blurred, threshImage, thresh, 255, ThresholdTypes.Binary);


            bool debug = true;

            if (debug)
            {
                Cv2.ImShow("Thresh", threshImage);
                Cv2.WaitKey(1); // do events
            }


            // construct a closing kernel and apply it to the thresholded image
            var kernel = Cv2.GetStructuringElement(MorphShapes.Rect, new Size(21, 7));
            var closed = new Mat();

            Cv2.MorphologyEx(threshImage, closed, MorphTypes.Close, kernel);

            if (debug)
            {
                Cv2.ImShow("Closed", closed);
                Cv2.WaitKey(1); // do events
            }


            // perform a series of erosions and dilations
            Cv2.Erode(closed, closed, null, iterations: 4);
            Cv2.Dilate(closed, closed, null, iterations: 4);

            if (debug)
            {
                Cv2.ImShow("Erode & Dilate", closed);
                Cv2.WaitKey(1); // do events
            }


            //find the contours in the thresholded image, then sort the contours
            //by their area, keeping only the largest one

            Point[][]        contours;
            HierarchyIndex[] hierarchyIndexes;
            Cv2.FindContours(
                closed,
                out contours,
                out hierarchyIndexes,
                mode: RetrievalModes.CComp,
                method: ContourApproximationModes.ApproxSimple);

            if (contours.Length == 0)
            {
                throw new NotSupportedException("Couldn't find any object in the image.");
            }

            var contourIndex       = 0;
            var previousArea       = 0;
            var biggestContourRect = Cv2.BoundingRect(contours[0]);

            while ((contourIndex >= 0))
            {
                var contour = contours[contourIndex];

                var boundingRect     = Cv2.BoundingRect(contour); //Find bounding rect for each contour
                var boundingRectArea = boundingRect.Width * boundingRect.Height;
                if (boundingRectArea > previousArea)
                {
                    biggestContourRect = boundingRect;
                    previousArea       = boundingRectArea;
                }

                contourIndex = hierarchyIndexes[contourIndex].Next;
            }
        }
Exemple #16
0
        private void Form1_Load(object sender, EventArgs e)
        {
            OpenFileDialog openFileDialog = new OpenFileDialog();

            openFileDialog.Filter = "Image Files(*.PNG;*.JPG)|*.PNG;*.JPG|All Files(*.*)|*.*";
            if (openFileDialog.ShowDialog() == DialogResult.OK)
            {
                inputImage        = new Mat(openFileDialog.FileName);
                pictureBox1.Image = BitmapConverter.ToBitmap(inputImage);
            }

            Cv2.CvtColor(inputImage, grayscaleImage, ColorConversionCodes.BGRA2GRAY);
            pictureBox2.Image = BitmapConverter.ToBitmap(grayscaleImage);

            Cv2.Sobel(grayscaleImage, gradientXImage, MatType.CV_16S, 1, 0);
            Cv2.Sobel(grayscaleImage, gradientYImage, MatType.CV_16S, 0, 1);

            Cv2.ConvertScaleAbs(gradientXImage, gradientXImage);
            Cv2.ConvertScaleAbs(gradientYImage, gradientYImage);

            Cv2.AddWeighted(gradientXImage, 1.0, gradientYImage, 1.0, 0, gradientImage);

            Cv2.Canny(grayscaleImage, cannyImage, 50, 200);
            pictureBox3.Image = BitmapConverter.ToBitmap(cannyImage);

            Cv2.DistanceTransform(1 - cannyImage, distance, DistanceTypes.L2, DistanceMaskSize.Mask3);

            Cv2.Normalize(distance, distance, 0, 1.0, NormTypes.MinMax);
            pictureBox4.Image = BitmapConverter.ToBitmap(distance);

            Cv2.Integral(inputImage, integrateImage, MatType.CV_32F);

            for (int i = 0; i < filterImage.Width; i++)
            {
                for (int j = 0; j < filterImage.Height; j++)
                {
                    float tets = distance.Get <float>(i, j);
                    int   size = (int)(10 * distance.Get <float>(i, j));
                    if (size >= 1)
                    {
                        int pixelsCount = ((Clamp(i + size, 0, integrateImage.Width - 1) - Clamp(i - size, 0, integrateImage.Width - 1)) *
                                           (Clamp(j + size, 0, integrateImage.Height - 1) - Clamp(j - size, 0, integrateImage.Height - 1)));

                        var p0 = new OpenCvSharp.Point(Clamp(i - size, 0, integrateImage.Width - 1), Clamp(j - size, 0, integrateImage.Height - 1));
                        var p1 = new OpenCvSharp.Point(Clamp(i + size, 0, integrateImage.Width - 1), Clamp(j + size, 0, integrateImage.Height - 1));
                        var p2 = new OpenCvSharp.Point(Clamp(i - size, 0, integrateImage.Width - 1), Clamp(j + size, 0, integrateImage.Height - 1));
                        var p3 = new OpenCvSharp.Point(Clamp(i + size, 0, integrateImage.Width - 1), Clamp(j - size, 0, integrateImage.Height - 1));

                        filterImage.Set <Vec3b>(i, j, new Vec3b((byte)((integrateImage.Get <Vec3f>(p0.X, p0.Y).Item0
                                                                        + integrateImage.Get <Vec3f>(p1.X, p1.Y).Item0 - integrateImage.Get <Vec3f>(p2.X, p2.Y).Item0
                                                                        - integrateImage.Get <Vec3f>(p3.X, p3.Y).Item0) / pixelsCount),
                                                                (byte)((integrateImage.Get <Vec3f>(p0.X, p0.Y).Item1 + integrateImage.Get <Vec3f>(p1.X, p1.Y).Item1
                                                                        - integrateImage.Get <Vec3f>(p2.X, p2.Y).Item1 - integrateImage.Get <Vec3f>(p3.X, p3.Y).Item1)
                                                                       / pixelsCount),
                                                                (byte)((integrateImage.Get <Vec3f>(p0.X, p0.Y).Item2 + integrateImage.Get <Vec3f>(p1.X, p1.Y).Item2
                                                                        - integrateImage.Get <Vec3f>(p2.X, p2.Y).Item2 - integrateImage.Get <Vec3f>(p3.X, p3.Y).Item2)
                                                                       / pixelsCount)));
                    }
                    else
                    {
                        filterImage.Set <Vec3b>(i, j, inputImage.Get <Vec3b>(i, j));
                    }
                }
            }
            pictureBox5.Image = BitmapConverter.ToBitmap(filterImage);
        }
Exemple #17
0
        // 主要内容,图像处理方法的api
        private Mat myOPENCV_run(Mat image_in, Mat image_out)
        {
            image_out = image_in;                          // 入图传给出图
            for (int i = 0; i < listBox2.Items.Count; i++) //执行 列表框2内的方法
            {
                switch ((MyOPENCV)myOPENCV_runlist[i, 0])  // 列表框2内的运行方法
                {
                case MyOPENCV.cvt_color:                   //颜色转换 (入图,出图,颜色转换符,)
                {
                    Cv2.CvtColor(image_out, image_out, (ColorConversionCodes)myOPENCV_runlist[i, 1], myOPENCV_runlist[i, 2]);
                    break;
                }

                case MyOPENCV.boxfilter:    //方框滤波
                {
                    OpenCvSharp.Size size;
                    size.Width  = myOPENCV_runlist[i, 2];
                    size.Height = myOPENCV_runlist[i, 3];
                    Cv2.BoxFilter(image_out, image_out, myOPENCV_runlist[i, 1], size);
                    break;
                }

                case MyOPENCV.blur:     //均值滤波
                {
                    OpenCvSharp.Size size;
                    size.Width  = myOPENCV_runlist[i, 1];
                    size.Height = myOPENCV_runlist[i, 2];
                    Cv2.Blur(image_out, image_out, size);
                    break;
                }

                case MyOPENCV.gaussianblur:      // 高斯滤波
                {
                    OpenCvSharp.Size size;
                    double           sigmaX, sigmaY;
                    size.Width  = myOPENCV_runlist[i, 1];
                    size.Height = myOPENCV_runlist[i, 2];
                    sigmaX      = (double)myOPENCV_runlist[i, 3];
                    sigmaY      = (double)myOPENCV_runlist[i, 4];

                    Cv2.GaussianBlur(image_out, image_out, size, sigmaX, sigmaY);
                    break;
                }

                case MyOPENCV.medianblur:    //中值滤波
                {
                    Cv2.MedianBlur(image_in, image_out, myOPENCV_runlist[i, 1]);
                    break;
                }

                case MyOPENCV.bilateralfilter:    //双边滤波
                {
                    Mat    image_out2 = new Mat();
                    double sigmaColor, sigmaSpace;
                    sigmaColor = (double)myOPENCV_runlist[i, 2] * 2;
                    sigmaSpace = (double)myOPENCV_runlist[i, 3] / 2;
                    Cv2.BilateralFilter(image_out, image_out2, myOPENCV_runlist[i, 1], sigmaColor, sigmaSpace);
                    image_out = image_out2;
                    break;
                }

                case MyOPENCV.dilate:    //膨胀
                {
                    Mat image_element = new Mat();
                    OpenCvSharp.Size size;
                    size.Width    = myOPENCV_runlist[i, 2];
                    size.Height   = myOPENCV_runlist[i, 3];
                    image_element = Cv2.GetStructuringElement((MorphShapes)myOPENCV_runlist[i, 1], size);
                    Cv2.Dilate(image_out, image_out, image_element);
                    break;
                }

                case MyOPENCV.erode:    //腐蚀
                {
                    Mat image_element = new Mat();
                    OpenCvSharp.Size size;
                    size.Width    = myOPENCV_runlist[i, 2];
                    size.Height   = myOPENCV_runlist[i, 3];
                    image_element = Cv2.GetStructuringElement((MorphShapes)myOPENCV_runlist[i, 1], size);
                    Cv2.Erode(image_out, image_out, image_element);
                    break;
                }

                case MyOPENCV.morphologyex:    //高级形态学变换
                {
                    Mat image_element = new Mat();
                    OpenCvSharp.Size size;
                    size.Width    = myOPENCV_runlist[i, 3];
                    size.Height   = myOPENCV_runlist[i, 4];
                    image_element = Cv2.GetStructuringElement((MorphShapes)myOPENCV_runlist[i, 2], size);
                    Cv2.MorphologyEx(image_out, image_out, (MorphTypes)myOPENCV_runlist[i, 1], image_element);
                    break;
                }

                case MyOPENCV.floodfill:    //漫水填充
                {
                    OpenCvSharp.Point point;
                    point.X = myOPENCV_runlist[i, 1];
                    point.Y = myOPENCV_runlist[i, 2];
                    OpenCvSharp.Scalar scalar;
                    scalar = myOPENCV_runlist[i, 3];
                    Cv2.FloodFill(image_out, point, scalar);
                    break;
                }

                case MyOPENCV.pyrup:    //尺寸放大
                {
                    OpenCvSharp.Size size;
                    size.Width  = image_out.Cols * 2;
                    size.Height = image_out.Rows * 2;
                    Cv2.PyrUp(image_out, image_out, size);
                    break;
                }

                case MyOPENCV.pyrdown:    //尺寸缩小
                {
                    OpenCvSharp.Size size;
                    size.Width  = image_out.Cols / 2;
                    size.Height = image_out.Rows / 2;
                    Cv2.PyrDown(image_out, image_out, size);
                    break;
                }

                case MyOPENCV.resize:    //尺寸调整
                {
                    OpenCvSharp.Size   size;
                    InterpolationFlags interpolationFlags;
                    size.Width         = image_out.Cols * myOPENCV_runlist[i, 1] / 10;
                    size.Height        = image_out.Rows * myOPENCV_runlist[i, 2] / 10;
                    interpolationFlags = (InterpolationFlags)myOPENCV_runlist[i, 3];
                    Cv2.Resize(image_out, image_out, size, 0, 0, interpolationFlags);
                    break;
                }

                case MyOPENCV.threshold:    //固定阈值化
                {
                    Cv2.Threshold(image_out, image_out, myOPENCV_runlist[i, 1], myOPENCV_runlist[i, 2], (ThresholdTypes)myOPENCV_runlist[i, 3]);
                    break;
                }

                case MyOPENCV.canny:    //边缘检测CANNY
                {
                    Mat image_out2 = new Mat();
                    Cv2.Canny(image_out, image_out2, myOPENCV_runlist[i, 1], myOPENCV_runlist[i, 2], myOPENCV_runlist[i, 3]);
                    image_out = image_out2;
                    break;
                }

                case MyOPENCV.sobel:    //边缘检测SOBEL
                {
                    Cv2.Sobel(image_out, image_out, -1, myOPENCV_runlist[i, 1], myOPENCV_runlist[i, 2], myOPENCV_runlist[i, 3]);
                    break;
                }

                case MyOPENCV.laplacian:    //边缘检测LAPLACIAN
                {
                    myOPENCV_runlist[i, 1] = 0;
                    Cv2.Laplacian(image_out, image_out, 0, myOPENCV_runlist[i, 2], myOPENCV_runlist[i, 3]);
                    break;
                }

                case MyOPENCV.scharr:    //边缘检测SCHARR
                {
                    Cv2.Scharr(image_out, image_out, -1, myOPENCV_runlist[i, 1], myOPENCV_runlist[i, 2]);
                    break;
                }

                case MyOPENCV.convertscaleabs:    //图像快速增强
                {
                    double alpha, beta;
                    alpha = (double)myOPENCV_runlist[i, 1] / 10;
                    beta  = (double)myOPENCV_runlist[i, 2] / 10;
                    Cv2.ConvertScaleAbs(image_out, image_out, alpha, beta);
                    break;
                }

                case MyOPENCV.addweighted:    //图像融合
                {
                    Mat    image_in2 = new Mat(my_imagesource2);
                    double alpha, beta, gamma;
                    alpha = (double)myOPENCV_runlist[i, 1] / 10;
                    beta  = (double)myOPENCV_runlist[i, 2] / 10;
                    gamma = (double)myOPENCV_runlist[i, 3] / 10;
                    Cv2.AddWeighted(image_out, alpha, image_in2, beta, gamma, image_out);
                    break;
                }

                case MyOPENCV.houghlines:                                     //霍夫标准变换
                {
                    Scalar             scalar = new Scalar(0x00, 0xFF, 0x00); //绿色
                    LineSegmentPolar[] lines;
                    OpenCvSharp.Size   size = new OpenCvSharp.Size(image_out.Width, image_out.Height);
                    Mat image_out3          = new Mat(size, MatType.CV_8UC3);
                    lines = Cv2.HoughLines(image_out, 1, Cv2.PI / 180, myOPENCV_runlist[i, 1]);
                    for (int ii = 0; ii < lines.Length; ii++)
                    {
                        //double rho, theta;
                        OpenCvSharp.Point pt1, pt2;
                        double            a = Math.Cos(lines[ii].Theta), b = Math.Sin(lines[ii].Theta);
                        double            x0 = a * lines[ii].Rho, y0 = b * lines[ii].Rho;
                        pt1.X = (int)Math.Round(x0 + 1000 * (-b));
                        pt1.Y = (int)Math.Round(y0 + 1000 * (a));
                        pt2.X = (int)Math.Round(x0 - 1000 * (-b));
                        pt2.Y = (int)Math.Round(y0 - 1000 * (a));
                        Cv2.Line(image_out3, pt1, pt2, scalar, 1, LineTypes.AntiAlias);
                    }
                    if (myOPENCV_runlist[i, 2] == 0)
                    {
                        Cv2.AddWeighted(image_out3, (double)myOPENCV_runlist[i, 3] / 10, image_in, (double)myOPENCV_runlist[i, 4] / 10, 0, image_out);
                    }
                    else
                    {
                        image_out = image_out3;
                    }
                    break;
                }

                case MyOPENCV.houghlinep:                                     //霍夫累计概率变换
                {
                    Scalar             scalar = new Scalar(0x00, 0xFF, 0x00); //绿色
                    LineSegmentPoint[] lines;                                 // 线段检索
                    OpenCvSharp.Size   size = new OpenCvSharp.Size(image_out.Width, image_out.Height);
                    Mat image_out3          = new Mat(size, MatType.CV_8UC3);
                    lines = Cv2.HoughLinesP(image_out, 1, Cv2.PI / 180, myOPENCV_runlist[i, 1], myOPENCV_runlist[i, 3], myOPENCV_runlist[i, 4]);
                    for (int ii = 0; ii < lines.Length; ii++)
                    {
                        OpenCvSharp.Point point1, point2;
                        point1.X = lines[i].P1.X;
                        point1.Y = lines[i].P1.Y;
                        point2.X = lines[i].P2.X;
                        point2.Y = lines[i].P2.Y;
                        Cv2.Line(image_out3, point1, point2, scalar, 1, LineTypes.AntiAlias);
                    }
                    if (myOPENCV_runlist[i, 2] == 0)
                    {
                        Cv2.AddWeighted(image_out3, 1, image_in, 0.8, 0, image_out);
                    }
                    else
                    {
                        image_out = image_out3;
                    }
                    break;
                }

                case MyOPENCV.houghcircles:                                 //霍夫圆变换
                {
                    Scalar           scalar = new Scalar(0x00, 0xFF, 0x00); //绿色
                    CircleSegment[]  circles;
                    OpenCvSharp.Size size = new OpenCvSharp.Size(image_out.Width, image_out.Height);
                    Mat image_out3        = new Mat(size, MatType.CV_8UC3);
                    circles = Cv2.HoughCircles(image_out, HoughMethods.Gradient, 1, myOPENCV_runlist[i, 1], myOPENCV_runlist[i, 2], myOPENCV_runlist[i, 3], 0, myOPENCV_runlist[i, 4]);
                    for (int ii = 0; ii < circles.Length; ii++)
                    {
                        OpenCvSharp.Point center;
                        center.X = (int)Math.Round(circles[ii].Center.X);
                        center.Y = (int)Math.Round(circles[ii].Center.Y);
                        int radius = (int)Math.Round(circles[ii].Radius);
                        Cv2.Circle(image_out3, center.X, center.Y, radius, scalar);
                        Cv2.Circle(image_out3, center, radius, scalar);
                    }
                    Cv2.AddWeighted(image_out3, 1, image_in, 0.6, 0, image_out);

                    break;
                }

                case MyOPENCV.remap:    //重映射
                {
                    OpenCvSharp.Size size = new OpenCvSharp.Size(image_out.Width, image_out.Height);

                    Mat map_x = new Mat(size, MatType.CV_32FC1), map_y = new Mat(size, MatType.CV_32FC1);
                    for (int ii = 0; ii < image_out.Rows; ii++)
                    {
                        for (int jj = 0; jj < image_out.Cols; jj++)
                        {
                            if (myOPENCV_runlist[i, 1] == 0)
                            {
                                map_x.Set <float>(ii, jj, jj);                  //上下翻转
                                map_y.Set <float>(ii, jj, image_out.Rows - ii); //上下翻转
                            }
                            else if (myOPENCV_runlist[i, 1] == 1)
                            {
                                map_x.Set <float>(ii, jj, image_out.Cols - jj); //左右翻转
                                map_y.Set <float>(ii, jj, ii);                  //左右翻转
                            }
                            else if (myOPENCV_runlist[i, 1] == 2)
                            {
                                map_x.Set <float>(ii, jj, image_out.Cols - jj);       //上下左右翻转
                                map_y.Set <float>(ii, jj, image_out.Rows - ii);       //上下左右翻转
                            }
                            else if (myOPENCV_runlist[i, 1] == 3)
                            {
                                map_x.Set <float>(ii, jj, (float)myOPENCV_runlist[i, 2] / 10 * jj);       //放大缩小
                                map_y.Set <float>(ii, jj, (float)myOPENCV_runlist[i, 2] / 10 * ii);       //放大缩小
                            }
                        }
                    }
                    Cv2.Remap(image_out, image_out, map_x, map_y);
                    break;
                }

                case MyOPENCV.warpaffine:    //仿射变换
                {
                    if (0 == myOPENCV_runlist[i, 1])
                    {
                        Mat rot_mat = new Mat(2, 3, MatType.CV_32FC1);
                        OpenCvSharp.Point center = new OpenCvSharp.Point(image_out.Cols / 2, image_out.Rows / 2);
                        double            angle  = myOPENCV_runlist[i, 2];
                        double            scale  = (double)myOPENCV_runlist[i, 3] / 10;
                        ///// 通过上面的旋转细节信息求得旋转矩阵
                        rot_mat = Cv2.GetRotationMatrix2D(center, angle, scale);
                        ///// 旋转已扭曲图像
                        Cv2.WarpAffine(image_out, image_out, rot_mat, image_out.Size());
                    }
                    else
                    {
                        Point2f[] srcTri   = new Point2f[3];
                        Point2f[] dstTri   = new Point2f[3];
                        Mat       warp_mat = new Mat(2, 3, MatType.CV_32FC1);
                        Mat       warp_dst;
                        warp_dst  = Mat.Zeros(image_out.Rows, image_out.Cols, image_out.Type());
                        srcTri[0] = new Point2f(0, 0);
                        srcTri[1] = new Point2f(image_out.Cols, 0);
                        srcTri[2] = new Point2f(0, image_out.Rows);
                        dstTri[0] = new Point2f((float)(image_out.Cols * myOPENCV_runlist[i, 2] / 100), (float)(image_out.Rows * myOPENCV_runlist[i, 2] / 100));
                        dstTri[1] = new Point2f((float)(image_out.Cols * (1 - (float)myOPENCV_runlist[i, 3] / 100)), (float)(image_out.Rows * myOPENCV_runlist[i, 3] / 100));
                        dstTri[2] = new Point2f((float)(image_out.Cols * myOPENCV_runlist[i, 4] / 100), (float)(image_out.Rows * (1 - (float)myOPENCV_runlist[i, 4] / 100)));
                        warp_mat  = Cv2.GetAffineTransform(srcTri, dstTri);
                        Cv2.WarpAffine(image_out, image_out, warp_mat, image_out.Size());
                    }
                    break;
                }

                case MyOPENCV.equalizehist:    //直方图均衡化
                {
                    Cv2.EqualizeHist(image_out, image_out);
                    break;
                }

                case MyOPENCV.facedetection:         //人脸识别
                {
                    if (0 == myOPENCV_runlist[i, 1]) // 参数一为0 调用haar,其余数字调用lbp
                    {
                        var haarCascade = new CascadeClassifier(@"haarcascade_frontalface_alt.xml");
                        Mat haarResult  = DetectFace(image_out, haarCascade);
                        image_out = haarResult;
                    }
                    else
                    {
                        var lbpCascade = new CascadeClassifier(@"lbpcascade_frontalface.xml");
                        Mat lbpResult  = DetectFace(image_out, lbpCascade);
                        image_out = lbpResult;
                    }

                    break;
                }

                case MyOPENCV.matchtemplate:                                             // 模板匹配
                {
                    Mat originalMat = Cv2.ImRead(my_imagesource, ImreadModes.AnyColor);  //母图
                    Mat modelMat    = Cv2.ImRead(my_imagesource2, ImreadModes.AnyColor); //模板
                    Mat resultMat   = new Mat();                                         // 匹配结果

                    //resultMat.Create(mat1.Cols - modelMat.Cols + 1, mat1.Rows - modelMat.Cols + 1, MatType.CV_32FC1);//创建result的模板,就是MatchTemplate里的第三个参数
                    if (0 == myOPENCV_runlist[i, 1])
                    {
                        Cv2.MatchTemplate(originalMat, modelMat, resultMat, TemplateMatchModes.SqDiff);        //进行匹配(1母图,2模版子图,3返回的result,4匹配模式)
                    }
                    else if (1 == myOPENCV_runlist[i, 1])
                    {
                        Cv2.MatchTemplate(originalMat, modelMat, resultMat, TemplateMatchModes.SqDiffNormed);
                    }
                    else if (2 == myOPENCV_runlist[i, 1])
                    {
                        Cv2.MatchTemplate(originalMat, modelMat, resultMat, TemplateMatchModes.CCorr);
                    }
                    else if (3 == myOPENCV_runlist[i, 1])
                    {
                        Cv2.MatchTemplate(originalMat, modelMat, resultMat, TemplateMatchModes.CCorrNormed);
                    }
                    else if (4 == myOPENCV_runlist[i, 1])
                    {
                        Cv2.MatchTemplate(originalMat, modelMat, resultMat, TemplateMatchModes.CCoeff);
                    }
                    else if (5 == myOPENCV_runlist[i, 1])
                    {
                        Cv2.MatchTemplate(originalMat, modelMat, resultMat, TemplateMatchModes.CCoeffNormed);
                    }
                    OpenCvSharp.Point minLocation, maxLocation, matchLocation;
                    Cv2.MinMaxLoc(resultMat, out minLocation, out maxLocation);
                    matchLocation = maxLocation;
                    Mat mask = originalMat.Clone();

                    Cv2.Rectangle(mask, minLocation, new OpenCvSharp.Point(minLocation.X + modelMat.Cols, minLocation.Y + modelMat.Rows), Scalar.Green, 2);         //画出匹配的矩  (图像,最小点,最大点,颜色,线宽)

                    image_out = mask;
                    break;
                }

                case MyOPENCV.find_draw_contours:                                      // 找出并绘制轮廓
                {
                    Cv2.CvtColor(image_out, image_out, ColorConversionCodes.RGB2GRAY); //转换为灰度图
                    //Cv2.Blur(image_out, image_out, new OpenCvSharp.Size(2, 2));  //滤波

                    Cv2.Canny(image_out, image_out, 100, 200);              //Canny边缘检测

                    OpenCvSharp.Point[][] contours;
                    HierarchyIndex[]      hierarchly;
                    Cv2.FindContours(image_out, out contours, out hierarchly, RetrievalModes.Tree, ContourApproximationModes.ApproxSimple, new OpenCvSharp.Point(0, 0)); //获得轮廓

                    Mat    dst_Image = Mat.Zeros(image_out.Size(), image_out.Type());                                                                                    // 图片像素值归零
                    Random rnd       = new Random();
                    for (int j = 0; j < contours.Length; j++)
                    {
                        Scalar color = new Scalar(myOPENCV_runlist[i, 1], myOPENCV_runlist[i, 2], myOPENCV_runlist[i, 3]);
                        //Scalar color = new Scalar(rnd.Next(0, 255), rnd.Next(0, 255), rnd.Next(0, 255));
                        Cv2.DrawContours(dst_Image, contours, j, color, myOPENCV_runlist[i, 4], LineTypes.Link8, hierarchly);             //画出轮廓
                    }
                    image_out = dst_Image;
                    break;
                }

                case MyOPENCV.componentdefectdetecting:                                // 零件缺陷检测
                {
                    Cv2.CvtColor(image_out, image_out, ColorConversionCodes.RGB2GRAY); //转换为灰度图
                    //Cv2.Blur(image_out, image_out, new OpenCvSharp.Size(2, 2));  //滤波

                    Cv2.Canny(image_out, image_out, 100, 200);              //Canny边缘检测

                    OpenCvSharp.Point[][] contours;
                    HierarchyIndex[]      hierarchly;
                    Cv2.FindContours(image_out, out contours, out hierarchly, RetrievalModes.Tree, ContourApproximationModes.ApproxSimple, new OpenCvSharp.Point(0, 0)); //获得轮廓

                    Mat    dst_Image = Mat.Zeros(image_out.Size(), image_out.Type());                                                                                    // 图片像素值归零
                    Random rnd       = new Random();
                    for (int j = 0; j < contours.Length; j++)
                    {
                        Scalar color = new Scalar(myOPENCV_runlist[i, 1], myOPENCV_runlist[i, 2], myOPENCV_runlist[i, 3]);
                        //Scalar color = new Scalar(rnd.Next(0, 255), rnd.Next(0, 255), rnd.Next(0, 255));
                        Cv2.DrawContours(dst_Image, contours, j, color, myOPENCV_runlist[i, 4], LineTypes.Link8, hierarchly);               //画出轮廓
                    }


                    Mat cnt = new Mat();
                    Cv2.ConvexHull(image_out, cnt);



                    break;
                }

                default: break;
                }
            }
            return(image_out);
        }
        private static string detectBarcode(string fileName, double thresh, bool debug = false, double rotation = 0)
        {
            Console.WriteLine("\nProcessing: {0}", fileName);

            // load the image and convert it to grayscale
            var image = new Mat(fileName);

            if (rotation != 0)
            {
                rotateImage(image, image, rotation, 1);
            }

            if (debug)
            {
                Cv2.ImShow("Source", image);
                Cv2.WaitKey(1); // do events
            }

            var gray     = new Mat();
            var channels = image.Channels();

            if (channels > 1)
            {
                Cv2.CvtColor(image, gray, ColorConversion.BgrToGray);
            }
            else
            {
                image.CopyTo(gray);
            }


            // compute the Scharr gradient magnitude representation of the images
            // in both the x and y direction
            var gradX = new Mat();

            Cv2.Sobel(gray, gradX, MatType.CV_32F, xorder: 1, yorder: 0, ksize: -1);
            //Cv2.Scharr(gray, gradX, MatType.CV_32F, xorder: 1, yorder: 0);

            var gradY = new Mat();

            Cv2.Sobel(gray, gradY, MatType.CV_32F, xorder: 0, yorder: 1, ksize: -1);
            //Cv2.Scharr(gray, gradY, MatType.CV_32F, xorder: 0, yorder: 1);

            // subtract the y-gradient from the x-gradient
            var gradient = new Mat();

            Cv2.Subtract(gradX, gradY, gradient);
            Cv2.ConvertScaleAbs(gradient, gradient);

            if (debug)
            {
                Cv2.ImShow("Gradient", gradient);
                Cv2.WaitKey(1); // do events
            }


            // blur and threshold the image
            var blurred = new Mat();

            Cv2.Blur(gradient, blurred, new Size(9, 9));

            var threshImage = new Mat();

            Cv2.Threshold(blurred, threshImage, thresh, 255, ThresholdType.Binary);

            if (debug)
            {
                Cv2.ImShow("Thresh", threshImage);
                Cv2.WaitKey(1); // do events
            }


            // construct a closing kernel and apply it to the thresholded image
            var kernel = Cv2.GetStructuringElement(StructuringElementShape.Rect, new Size(21, 7));
            var closed = new Mat();

            Cv2.MorphologyEx(threshImage, closed, MorphologyOperation.Close, kernel);

            if (debug)
            {
                Cv2.ImShow("Closed", closed);
                Cv2.WaitKey(1); // do events
            }


            // perform a series of erosions and dilations
            Cv2.Erode(closed, closed, null, iterations: 4);
            Cv2.Dilate(closed, closed, null, iterations: 4);

            if (debug)
            {
                Cv2.ImShow("Erode & Dilate", closed);
                Cv2.WaitKey(1); // do events
            }


            //find the contours in the thresholded image, then sort the contours
            //by their area, keeping only the largest one

            Point[][]       contours;
            HiearchyIndex[] hierarchyIndexes;
            Cv2.FindContours(
                closed,
                out contours,
                out hierarchyIndexes,
                mode: ContourRetrieval.CComp,
                method: ContourChain.ApproxSimple);

            if (contours.Length == 0)
            {
                throw new NotSupportedException("Couldn't find any object in the image.");
            }

            var contourIndex       = 0;
            var previousArea       = 0;
            var biggestContourRect = Cv2.BoundingRect(contours[0]);

            while ((contourIndex >= 0))
            {
                var contour = contours[contourIndex];

                var boundingRect     = Cv2.BoundingRect(contour); //Find bounding rect for each contour
                var boundingRectArea = boundingRect.Width * boundingRect.Height;
                if (boundingRectArea > previousArea)
                {
                    biggestContourRect = boundingRect;
                    previousArea       = boundingRectArea;
                }

                contourIndex = hierarchyIndexes[contourIndex].Next;
            }


            /*biggestContourRect.Width += 10;
             * biggestContourRect.Height += 10;
             * biggestContourRect.Left -= 5;
             * biggestContourRect.Top -= 5;*/


            var barcode = new Mat(image, biggestContourRect); //Crop the image

            Cv2.CvtColor(barcode, barcode, ColorConversion.BgrToGray);

            Cv2.ImShow("Barcode", barcode);
            Cv2.WaitKey(1); // do events

            var barcodeClone = barcode.Clone();
            var barcodeText  = getBarcodeText(barcodeClone);

            if (string.IsNullOrWhiteSpace(barcodeText))
            {
                Console.WriteLine("Enhancing the barcode...");
                //Cv2.AdaptiveThreshold(barcode, barcode, 255,
                //AdaptiveThresholdType.GaussianC, ThresholdType.Binary, 9, 1);
                //var th = 119;
                var th = 100;
                Cv2.Threshold(barcode, barcode, th, 255, ThresholdType.ToZero);
                Cv2.Threshold(barcode, barcode, th, 255, ThresholdType.Binary);
                barcodeText = getBarcodeText(barcode);
            }

            Cv2.Rectangle(image,
                          new Point(biggestContourRect.X, biggestContourRect.Y),
                          new Point(biggestContourRect.X + biggestContourRect.Width, biggestContourRect.Y + biggestContourRect.Height),
                          new Scalar(0, 255, 0),
                          2);

            if (debug)
            {
                Cv2.ImShow("Segmented Source", image);
                Cv2.WaitKey(1); // do events
            }

            Cv2.WaitKey(0);
            Cv2.DestroyAllWindows();

            return(barcodeText);
        }
Exemple #19
0
        public void FindContours(string sLeftPictureFile, string sRightPictureFile)
        {
            Mat tokuLeft  = new Mat();
            Mat tokuRight = new Mat();
            Mat output    = new Mat();

            AKAZE akaze = AKAZE.Create();

            KeyPoint[] keyPointsLeft;
            KeyPoint[] keyPointsRight;

            Mat descriptorLeft  = new Mat();
            Mat descriptorRight = new Mat();

            DescriptorMatcher matcher; //マッチング方法

            DMatch[] matches;          //特徴量ベクトル同士のマッチング結果を格納する配列

            //画像をグレースケールとして読み込み、平滑化する
            Mat Lsrc = new Mat(sLeftPictureFile, ImreadModes.Color);

            //画像をグレースケールとして読み込み、平滑化する
            Mat Rsrc = new Mat(sRightPictureFile, ImreadModes.Color);

            //特徴量の検出と特徴量ベクトルの計算
            akaze.DetectAndCompute(Lsrc, null, out keyPointsLeft, descriptorLeft);
            akaze.DetectAndCompute(Rsrc, null, out keyPointsRight, descriptorRight);


            //画像1の特徴点をoutput1に出力
            Cv2.DrawKeypoints(Lsrc, keyPointsLeft, tokuLeft);
            Image imageLeftToku = BitmapConverter.ToBitmap(tokuLeft);

            pictureBox3.SizeMode = PictureBoxSizeMode.Zoom;
            pictureBox3.Image    = imageLeftToku;
            tokuLeft.SaveImage("result/LeftToku.jpg");



            //画像2の特徴点をoutput1に出力
            Cv2.DrawKeypoints(Rsrc, keyPointsRight, tokuRight);
            Image imageRightToku = BitmapConverter.ToBitmap(tokuRight);

            pictureBox4.SizeMode = PictureBoxSizeMode.Zoom;
            pictureBox4.Image    = imageRightToku;
            tokuRight.SaveImage("result/RightToku.jpg");

            //総当たりマッチング
            matcher = DescriptorMatcher.Create("BruteForce");
            matches = matcher.Match(descriptorLeft, descriptorRight);

            Cv2.DrawMatches(Lsrc, keyPointsLeft, Rsrc, keyPointsRight, matches, output);
            output.SaveImage(@"result\output.jpg");

            int size         = matches.Count();
            var getPtsSrc    = new Vec2f[size];
            var getPtsTarget = new Vec2f[size];

            int count = 0;

            foreach (var item in matches)
            {
                var ptSrc    = keyPointsLeft[item.QueryIdx].Pt;
                var ptTarget = keyPointsRight[item.TrainIdx].Pt;
                getPtsSrc[count][0]    = ptSrc.X;
                getPtsSrc[count][1]    = ptSrc.Y;
                getPtsTarget[count][0] = ptTarget.X;
                getPtsTarget[count][1] = ptTarget.Y;
                count++;
            }

            // SrcをTargetにあわせこむ変換行列homを取得する。ロバスト推定法はRANZAC。
            var hom = Cv2.FindHomography(
                InputArray.Create(getPtsSrc),
                InputArray.Create(getPtsTarget),
                HomographyMethods.Ransac);

            // 行列homを用いてSrcに射影変換を適用する。
            Mat WarpedSrcMat = new Mat();

            Cv2.WarpPerspective(
                Lsrc, WarpedSrcMat, hom,
                new OpenCvSharp.Size(Rsrc.Width, Rsrc.Height));

            WarpedSrcMat.SaveImage(@"result\Warap.jpg");

            //画像1の特徴点をoutput1に出力
            Image imageLeftSyaei = BitmapConverter.ToBitmap(WarpedSrcMat);

            pictureBox5.SizeMode = PictureBoxSizeMode.Zoom;
            pictureBox5.Image    = imageLeftSyaei;


            //画像2の特徴点をoutput1に出力
            Image imageRightSyaei = BitmapConverter.ToBitmap(Rsrc);

            pictureBox6.SizeMode = PictureBoxSizeMode.Zoom;
            pictureBox6.Image    = imageRightSyaei;


            Mat LmatFloat = new Mat();

            WarpedSrcMat.ConvertTo(LmatFloat, MatType.CV_16SC3);
            Mat[] LmatPlanes = LmatFloat.Split();

            Mat RmatFloat = new Mat();

            Rsrc.ConvertTo(RmatFloat, MatType.CV_16SC3);
            Mat[] RmatPlanes = RmatFloat.Split();

            Mat diff0 = new Mat();
            Mat diff1 = new Mat();
            Mat diff2 = new Mat();


            Cv2.Absdiff(LmatPlanes[0], RmatPlanes[0], diff0);
            Cv2.Absdiff(LmatPlanes[1], RmatPlanes[1], diff1);
            Cv2.Absdiff(LmatPlanes[2], RmatPlanes[2], diff2);

            Cv2.MedianBlur(diff0, diff0, 5);
            Cv2.MedianBlur(diff1, diff1, 5);
            Cv2.MedianBlur(diff2, diff2, 5);

            diff0.SaveImage("result/diff0.jpg");
            diff1.SaveImage("result/diff1.jpg");
            diff2.SaveImage("result/diff2.jpg");

            Mat wiseMat = new Mat();

            Cv2.BitwiseOr(diff0, diff1, wiseMat);
            Cv2.BitwiseOr(wiseMat, diff2, wiseMat);

            wiseMat.SaveImage("result/wiseMat.jpg");

            Mat openingMat = new Mat();

            Cv2.MorphologyEx(wiseMat, openingMat, MorphTypes.Open, new Mat());

            Mat dilationMat = new Mat();

            Cv2.Dilate(openingMat, dilationMat, new Mat());
            Cv2.Threshold(dilationMat, dilationMat, 100, 255, ThresholdTypes.Binary);
            dilationMat.SaveImage(@"result\dilationMat.jpg");

            Mat LaddMat = new Mat();
            Mat RaddMat = new Mat();

            Console.WriteLine(dilationMat.GetType());
            Console.WriteLine(Rsrc.GetType());

            // dilationMatはグレースケールなので合成先のMatと同じ色空間に変換する
            Mat dilationScaleMat = new Mat();
            Mat dilationColorMat = new Mat();

            Cv2.ConvertScaleAbs(dilationMat, dilationScaleMat);
            Cv2.CvtColor(dilationScaleMat, dilationColorMat, ColorConversionCodes.GRAY2RGB);

            Cv2.AddWeighted(WarpedSrcMat, 0.3, dilationColorMat, 0.7, 0, LaddMat);
            Cv2.AddWeighted(Rsrc, 0.3, dilationColorMat, 0.7, 0, RaddMat);

            Image LaddImage = BitmapConverter.ToBitmap(LaddMat);

            pictureBox7.SizeMode = PictureBoxSizeMode.Zoom;
            pictureBox7.Image    = LaddImage;

            Image RaddImage = BitmapConverter.ToBitmap(RaddMat);

            pictureBox8.SizeMode = PictureBoxSizeMode.Zoom;
            pictureBox8.Image    = RaddImage;

            RaddMat.SaveImage(@"result\Result.jpg");

            MessageBox.Show("Done!");
        }
Exemple #20
0
        public Mat fourier(Mat img)
        {
            Mat padded = new Mat();
            int m      = Cv2.GetOptimalDFTSize(img.Rows);
            int n      = Cv2.GetOptimalDFTSize(img.Cols); // on the border add zero values

            Cv2.CopyMakeBorder(img, padded, 0, m - img.Rows, 0, n - img.Cols, BorderTypes.Constant, Scalar.All(0));

            // Add to the expanded another plane with zeros
            Mat paddedF32 = new Mat();

            padded.ConvertTo(paddedF32, MatType.CV_32F);
            Mat[] planes  = { paddedF32, Mat.Zeros(padded.Size(), MatType.CV_32F) };
            Mat   complex = new Mat();

            Cv2.Merge(planes, complex);

            // this way the result may fit in the source matrix
            Mat dft = new Mat();

            Cv2.Dft(complex, dft);

            // compute the magnitude and switch to logarithmic scale
            // => log(1 + sqrt(Re(DFT(I))^2 + Im(DFT(I))^2))
            Mat[] dftPlanes;
            Cv2.Split(dft, out dftPlanes);  // planes[0] = Re(DFT(I), planes[1] = Im(DFT(I))

            // planes[0] = magnitude
            Mat magnitude = new Mat();

            Cv2.Magnitude(dftPlanes[0], dftPlanes[1], magnitude);

            magnitude += Scalar.All(1);  // switch to logarithmic scale
            Cv2.Log(magnitude, magnitude);

            // crop the spectrum, if it has an odd number of rows or columns
            Mat spectrum = magnitude[
                new OpenCvSharp.Rect(0, 0, magnitude.Cols & -2, magnitude.Rows & -2)];

            // rearrange the quadrants of Fourier image  so that the origin is at the image center
            int cx = spectrum.Cols / 2;
            int cy = spectrum.Rows / 2;

            Mat q0 = new Mat(spectrum, new OpenCvSharp.Rect(0, 0, cx, cy));   // Top-Left - Create a ROI per quadrant
            Mat q1 = new Mat(spectrum, new OpenCvSharp.Rect(cx, 0, cx, cy));  // Top-Right
            Mat q2 = new Mat(spectrum, new OpenCvSharp.Rect(0, cy, cx, cy));  // Bottom-Left
            Mat q3 = new Mat(spectrum, new OpenCvSharp.Rect(cx, cy, cx, cy)); // Bottom-Right

            // swap quadrants (Top-Left with Bottom-Right)
            Mat tmp = new Mat();

            q0.CopyTo(tmp);
            q3.CopyTo(q0);
            tmp.CopyTo(q3);

            // swap quadrant (Top-Right with Bottom-Left)
            q1.CopyTo(tmp);
            q2.CopyTo(q1);
            tmp.CopyTo(q2);

            // Transform the matrix with float values into a
            Cv2.Normalize(spectrum, spectrum, 0, 1, NormTypes.MinMax);

            // calculating the idft
            Mat inverseTransform = new Mat();

            Cv2.Dft(dft, inverseTransform, DftFlags.Inverse | DftFlags.RealOutput);
            Cv2.Normalize(inverseTransform, inverseTransform, 0, 1, NormTypes.MinMax);

            double minVal = 0.0, maxVal = 0.0;

            Cv2.MinMaxIdx(inverseTransform, out minVal, out maxVal);
            Cv2.ConvertScaleAbs(inverseTransform, inverseTransform, 255.0 / (maxVal - minVal), -minVal * 255.0 / (maxVal - minVal));

            return(inverseTransform);
        }
Exemple #21
0
        static void Main(string[] args)
        {
            #region
            Mat[] splitall;
            Mat   src;
            Mat   channel_depth = new Mat();
            Mat   channel_gray  = new Mat();
            Mat   channel_three = new Mat();
            Mat   element3      = Cv2.GetStructuringElement(MorphShapes.Rect, new Size(3, 1));
            Mat   element5      = Cv2.GetStructuringElement(MorphShapes.Rect, new Size(3, 3));

            FileStorage cld_date = new FileStorage("0924.yaml", FileStorage.Mode.Read); //101161kk.yaml
            using (var fs = new FileStorage("0924.yaml", FileStorage.Mode.Read))        //0924.yaml
            {
                src = fs["vocabulary"].ReadMat();
            }
            Cv2.Split(src, out splitall);

            splitall[2].ConvertTo(channel_depth, MatType.CV_32FC1);
            //var window1 = new Window("depth",channel_depth);
            //Cv2.WaitKey();

            splitall[3].ConvertTo(channel_gray, MatType.CV_8UC1);
            //using (var window = new Window("原始图", WindowMode.Normal, channel_gray))
            //{
            //    Cv2.WaitKey();
            //}


            int imgcols = channel_depth.Cols, imgrows = channel_depth.Rows;

            Mat model_calc_gray = Mat.Zeros(channel_depth.Rows, channel_depth.Cols, MatType.CV_32FC1);
            Mat model_gray      = Mat.Zeros(channel_depth.Rows, channel_depth.Cols, MatType.CV_8UC1);
            Mat model_step1     = Mat.Zeros(channel_depth.Rows, channel_depth.Cols, MatType.CV_32FC1);

            for (int i = 0; i < channel_depth.Rows; i++)
            {
                for (int j = 0; j < channel_depth.Cols; j++)
                {
                    if (channel_depth.At <float>(i, j) < 900)                            //900时为临界 ==》 0943
                    {
                        model_calc_gray.Set <float>(i, j, channel_gray.At <Byte>(i, j)); //= channel_gray.At<short>(i, j);//char convert to float that could calcaulate
                    }//方便后面sigmoid计算
                    else
                    {
                        continue;
                    }
                }
            }
            Mat Edge_one = model_calc_gray.Clone();
            for (int i = 0; i < 100; i++)
            {
                for (int j = 0; j < model_calc_gray.Cols; j++)
                {
                    Edge_one.Set <float>(i, j, 0);
                }
            }


            //取反
            Mat Edge = Mat.Zeros(channel_depth.Rows, channel_depth.Cols, MatType.CV_32FC1);
            Edge = new Scalar(255) - Edge_one;                   //

            int    zero_cout = Cv2.CountNonZero(Edge);           //返回矩阵中的非零值个数
            Scalar zero_sum  = Cv2.Sum(Edge);                    //对mat类四个通道求和
            float  matMean   = (float)(zero_sum[0] / zero_cout); //对非0像素求均值
            float  angle     = 0.2f;

            for (int i = 0; i < imgrows; i++)
            {
                for (int j = 0; j < imgcols; j++)
                {
                    if (Edge.At <float>(i, j) != 0)
                    {
                        model_step1.Set <float>(i, j, sigmod(Edge.At <float>(i, j), matMean, angle));
                    }
                }
            }

            Mat show_change_two = Mat.Zeros(channel_depth.Rows, channel_depth.Cols, MatType.CV_8UC1);
            Cv2.Normalize(model_step1, show_change_two, 0, 255, NormTypes.MinMax, MatType.CV_8UC1);
            using (var window = new Window("转换展示图", WindowMode.Normal, show_change_two))
            {
                Cv2.WaitKey();
            }

            ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////

            Cv2.MorphologyEx(show_change_two, show_change_two, MorphTypes.Close, element5, new Point(-1, -1), 10);
            using (var window = new Window("5次模糊", WindowMode.Normal, show_change_two))
            {
                Cv2.WaitKey();
            }


            Cv2.MorphologyEx(show_change_two, show_change_two, MorphTypes.Dilate, element5, new Point(-1, -1), 8);
            using (var window = new Window("5次模糊", WindowMode.Normal, show_change_two))
            {
                Cv2.WaitKey();
            }
            #region 注释代码
            //Cv2.MorphologyEx(show_change_two, show_change_two, MorphTypes.Erode, element5, new Point(-1, -1), 2);
            //using (var window = new Window("5次模糊", WindowMode.Normal, show_change_two))
            //{
            //    Cv2.WaitKey();
            //}
            //Cv2.MorphologyEx(show_change_two, show_change_two, MorphTypes.Open, element5, new Point(-1, -1), 10);
            //using (var window = new Window("5次模糊", WindowMode.Normal, show_change_two))
            //{
            //    Cv2.WaitKey();
            //}

            //for (int num1 = 0; num1 < 5; num1++)
            //    //Cv2.MedianBlur(show_change_two, show_change_two, 5);
            // Cv2.GaussianBlur(show_change_two, show_change_two, new Size(3, 1), MatType.CV_8UC1);
            //using (var window = new Window("10次中值", WindowMode.Normal, show_change_two))
            //{
            //    Cv2.WaitKey();
            //}
            //Cv2.MorphologyEx(show_change_two, show_change_two, MorphTypes.Close, element5, new Point(-1, -1), 3);
            //using (var window = new Window("5比原算", WindowMode.Normal, show_change_two))
            //{
            //    Cv2.WaitKey();
            //}
            //for (int num1 = 0; num1 < 10; num1++)
            //    Cv2.GaussianBlur(show_change_two, show_change_two, new Size(1, 3), MatType.CV_8UC1);
            //using (var window = new Window("5次模糊", WindowMode.Normal, show_change_two))
            //{
            //    Cv2.WaitKey();
            //}
            //Cv2.MorphologyEx(show_change_two, show_change_two, MorphTypes.Dilate, element3, new Point(-1, -1), 3);
            ////调试结果较好,二值化之前都不要对图像进行滤波,会丧失边界
            //Cv2.Dilate(show_change_two, show_change_two, element3, new Point(-1, -1), 5);
            //using (var window = new Window("5膨胀", WindowMode.Normal, show_change_two))
            //{
            //    Cv2.WaitKey();
            //}
            // Cv2.Dilate(show_change_two, show_change_two, element5, new Point(-1, -1), 5);
            //using (var window = new Window("5次腐蚀", WindowMode.Normal, show_change_two))
            //{
            //    Cv2.WaitKey();
            //}

            // for (int num1 = 0; num1 < 10; num1++)
            //     Cv2.MedianBlur(show_change_two, show_change_two, 5);
            //// Cv2.GaussianBlur(show_change_two, show_change_two, new Size(1, 3), MatType.CV_8UC1);
            // using (var window = new Window("10次中值", WindowMode.Normal, show_change_two))
            // {
            //     Cv2.WaitKey();
            // }
            // int a = 0;


            //Cv2.Threshold(show_change_two, show_change_two, 0, 255,ThresholdTypes.Otsu);
            //PixelConnectivity pixelConnectivity =new PixelConnectivity();

            //Cv2.ConnectedComponents(show_change_two, show_change_two, 4);
            //////Cv2.MedianBlur(show_change_two, show_change_two, 5);
            //using (var window = new Window("结果二值", WindowMode.Normal, show_change_two))
            //{
            //    Cv2.WaitKey();
            //}

            //for (int num1 = 0; num1 < 10; num1++)
            //    Cv2.MedianBlur(show_change_two, show_change_two, 5);
            //// Cv2.GaussianBlur(show_change_two, show_change_two, new Size(1, 3), MatType.CV_8UC1);
            //using (var window = new Window("10次中值", WindowMode.Normal, show_change_two))
            //{
            //    Cv2.WaitKey();
            //}
            //int a = 0;
            #endregion
            #endregion
            ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////

            Mat Sobel_Edge   = new Mat();
            Mat Sobel_result = Mat.Zeros(imgrows, imgcols, MatType.CV_8UC1);
            Cv2.Sobel(show_change_two, Sobel_Edge, MatType.CV_16SC1, 4, 0, 5, 1, 0, BorderTypes.Default);
            Cv2.ConvertScaleAbs(Sobel_Edge, Sobel_result);
            Cv2.Threshold(Sobel_result, Sobel_result, 20, 255, ThresholdTypes.Otsu);
            using (var window = new Window("sobel结果二值", WindowMode.Normal, Sobel_result))
            {
                Cv2.WaitKey();
            }



            //for (int num1 = 0; num1 < 3; num1++)
            ////Cv2.MedianBlur(Sobel_result, Sobel_result, 3);
            //  Cv2.GaussianBlur(Sobel_result, Sobel_result, new Size(1, 3), MatType.CV_8UC1);
            //using (var window = new Window("10次模糊", WindowMode.Normal, Sobel_result))
            //{
            //    Cv2.WaitKey();
            //}

            //Mat img_step2 = new Mat();
            //Cv2.MorphologyEx(Sobel_result, Sobel_result, MorphTypes.Open, element5, new Point(-1, -1), 1);
            //using (var window = new Window("sobel5后膨胀", WindowMode.Normal, Sobel_result))
            //{
            //    Cv2.WaitKey();
            //}

            Mat              result = Mat.Zeros(imgrows, imgcols, MatType.CV_8UC1);
            Point[][]        contours_one;
            HierarchyIndex[] hierarchy_one;
            Cv2.FindContours(Sobel_result.Clone(), out contours_one, out hierarchy_one, RetrievalModes.External, ContourApproximationModes.ApproxSimple, new Point(0, 0));


            List <Point[]> afterFilter = new List <Point[]>();
            Console.WriteLine("轮廓数量" + contours_one.Length);
            for (int c = 0; c < contours_one.Length; c++)
            {
                Console.WriteLine("轮廓" + c + "长度" + contours_one[c].Length);
            }

            //vector<vector<Point>>::iterator itc = contours_one.begin();
            for (int c = 0; c < contours_one.Length; c++)
            {
                double area = Cv2.ContourArea(contours_one[c]);

                Console.WriteLine(area);
                if (area > 800)
                {
                    afterFilter.Add(contours_one[c]);
                }
            }

            Cv2.DrawContours(result, afterFilter, -1, new Scalar(255), -1);
            using (var window = new Window("去除小面积结果图", WindowMode.Normal, result))
            {
                Cv2.WaitKey();
            }


            ////for (int num = 0; num < 5; num++)
            ////    Cv2.Dilate(result, result, element3);
            //using (var window = new Window("连接下面的部分5次膨胀结果图", WindowMode.Normal, result))
            //{
            //    Cv2.WaitKey();
            //}

            Cv2.MorphologyEx(result, result, MorphTypes.Close, element3, new Point(-1, -1), 5);
            using (var window = new Window("闭运算再次迭代10次结果图", result))
            {
                Cv2.WaitKey();
            }


            Mat              result1 = Mat.Zeros(imgrows, imgcols, MatType.CV_8UC1);
            Point[][]        contours_one1;
            HierarchyIndex[] hierarchy_one1;
            Cv2.FindContours(result.Clone(), out contours_one1, out hierarchy_one1, RetrievalModes.External, ContourApproximationModes.ApproxSimple, new Point(0, 0));
            List <Point[]> afterFilter1 = new List <Point[]>();
            Console.WriteLine(contours_one1.Length);
            //vector<vector<Point>>::iterator itc = contours_one.begin();
            for (int c = 0; c < contours_one1.Length; c++)
            {
                double area = Cv2.ContourArea(contours_one1[c]);
                Console.WriteLine(area);
                if (area > 3000)
                {
                    afterFilter1.Add(contours_one1[c]);
                }
            }
            Cv2.DrawContours(result1, afterFilter1, -1, new Scalar(255), -1);
            using (var window = new Window("再次去除小面积结果图", WindowMode.Normal, result1))
            {
                Cv2.WaitKey();
            }


            Mat result_uchar = Mat.Zeros(imgrows, imgcols, MatType.CV_8UC1);
            result1.ConvertTo(result_uchar, MatType.CV_8UC1);
            Point[][]        contours_three;
            HierarchyIndex[] hierarchy_three;
            Cv2.FindContours(result_uchar.Clone(), out contours_three, out hierarchy_three, RetrievalModes.External, ContourApproximationModes.ApproxSimple, new Point(0, 0));
            Mat           rectangle_one = Mat.Zeros(imgrows, imgcols, MatType.CV_8UC3);
            Rect[]        boundRect_one = new Rect[contours_three.Length]; //定义外接矩形集合
            RotatedRect[] box_one       = new RotatedRect[contours_three.Length];
            Point2f[]     rect_one      = new Point2f[4];
            Console.WriteLine("最终边界数量:" + contours_three.Length);

            List <Point2f[]> rec_vec      = new List <Point2f[]>(contours_three.Length);
            float[]          center_one_x = new float[contours_three.Length];
            float[]          center_one_y = new float[contours_three.Length];
            for (int i = 0; i < contours_three.Length; i++)
            {
                box_one[i]       = Cv2.MinAreaRect(contours_three[i]);                                                        //计算外接旋转矩形
                boundRect_one[i] = Cv2.BoundingRect(contours_three[i]);                                                       //计算每个轮廓最小外接矩形
                Cv2.Circle(rectangle_one, new Point(box_one[i].Center.X, box_one[i].Center.Y), 5, new Scalar(0, 255, 0), -1); //绘制最旋转矩形的中心点
                rect_one = box_one[i].Points();                                                                               //把最小外接矩形四个端点复制给rect数组  复制构造
                Cv2.Rectangle(rectangle_one, boundRect_one[i], new Scalar(0, 255, 0), 5);                                     //画最小外接矩形
                center_one_x[i] = box_one[i].Center.X;
                center_one_y[i] = box_one[i].Center.Y;
                //cout << "end" <<center_one.size() << endl;
                for (int j = 0; j < 4; j++)
                {
                    Cv2.Line(rectangle_one, (Point)rect_one[j], (Point)rect_one[(j + 1) % 4], new Scalar(0, 0, 255), 2);  //绘制旋转矩形每条边
                    // rec_vec[i].push_back(rect_one[j]);          /*cout << "第"<<j<<"个角点"<< rect[j] << endl;*/
                }
                using (var window = new Window("绘制最小外接矩形结果图", WindowMode.Normal, rectangle_one))
                {
                    Cv2.WaitKey();
                }
            }

            int[] ind = new int[center_one_x.Length];
            BubbleSort(center_one_x, ind);
            for (int i = 0; i < contours_three.Length - 1; i++)
            {
                // cout << "ind" << ind[i] << endl;
                Point point_one;
                point_one.X = boundRect_one[ind[i]].X + boundRect_one[ind[i]].Width / 2;
                point_one.Y = boundRect_one[ind[i]].Y;
                Point point_two;
                point_two.X = boundRect_one[ind[i + 1]].X + boundRect_one[ind[i + 1]].Width / 2;
                point_two.Y = boundRect_one[ind[i + 1]].Y + boundRect_one[ind[i + 1]].Height;
                Point point_three;
                point_three   = point_two - point_one;
                point_three.X = Math.Abs(point_three.X);
                point_three.Y = Math.Abs(point_three.Y);
                Rect rect        = new Rect(point_one.X, point_one.Y, point_three.X, point_three.Y);
                Mat  capture_one = channel_gray[rect];
                //imshow("截图第一幅图结果图", capture_one);
                using (var window = new Window("截图第一幅图结果图", WindowMode.Normal, capture_one))
                {
                    Cv2.WaitKey();
                }
                Point point_four;
                point_four.X = point_one.X + point_three.X / 2;
                point_four.Y = point_one.Y + 100;
                Cv2.Circle(channel_gray, point_four, 9, new Scalar(0, 0, 255));
                Console.WriteLine("贴标X:" + splitall[0].At <float>(point_four.Y, point_four.X));
                Console.WriteLine("贴标Y:" + splitall[1].At <float>(point_four.Y, point_four.X));
                Console.WriteLine("贴标Z:" + splitall[2].At <float>(point_four.Y, point_four.X));
                using (var window = new Window("截图第一幅圈圈", WindowMode.Normal, channel_gray))
                {
                    Cv2.WaitKey();
                }

                Point point_five;
                point_five.X = point_one.X + point_three.Y / 2;
                point_five.Y = point_one.Y + 200;
                Cv2.Circle(channel_gray, point_five, 9, new Scalar(0, 0, 255));
                Console.WriteLine("喷码X:" + splitall[0].At <float>(point_five.Y, point_five.X));
                Console.WriteLine("喷码Y:" + splitall[1].At <float>(point_five.Y, point_five.X));
                Console.WriteLine("喷码Z:" + splitall[2].At <float>(point_five.Y, point_five.X));
                using (var window = new Window("截图第二幅圈圈", WindowMode.Normal, channel_gray))
                {
                    Cv2.WaitKey();
                }
            }
        }
Exemple #22
0
        static void Main()
        {
            Mat mat    = new Mat("lenna.png");
            Mat result = mat.EmptyClone();

            Mat matGray = new Mat();

            Cv2.CvtColor(mat, matGray, ColorConversionCodes.BGRA2GRAY);

            Mat edges = new Mat();

            Mat gradX    = new Mat();
            Mat gradY    = new Mat();
            Mat absGradX = new Mat();
            Mat absGradY = new Mat();

            Cv2.Sobel(matGray, gradX, MatType.CV_16S, 1, 0);
            Cv2.Sobel(matGray, gradY, MatType.CV_16S, 0, 1);

            Cv2.ConvertScaleAbs(gradX, absGradX);
            Cv2.ConvertScaleAbs(gradY, absGradY);

            Mat grad = new Mat();

            Cv2.AddWeighted(absGradX, 1.0, absGradY, 1.0, 0, grad);

            Mat dist = new Mat();

            Cv2.Canny(matGray, edges, 50, 200);

            Cv2.DistanceTransform(1 - edges, dist, DistanceTypes.L2, DistanceMaskSize.Mask3);

            Mat normDist = new Mat();

            Cv2.Normalize(dist, normDist, 0, 1.0, NormTypes.MinMax);

            Mat integralImage = new Mat();

            Cv2.Integral(mat, integralImage, MatType.CV_32F);

            for (int i = 0; i < result.Width; i++)
            {
                for (int j = 0; j < result.Height; j++)
                {
                    int size = (int)(10 * dist.Get <float>(i, j));
                    if (size >= 1)
                    {
                        int pixelsCount = ((Clamp(i + size, 0, integralImage.Width - 1) - Clamp(i - size, 0, integralImage.Width - 1)) *
                                           (Clamp(j + size, 0, integralImage.Height - 1) - Clamp(j - size, 0, integralImage.Height - 1)));

                        var p0 = new Point(Clamp(i - size, 0, integralImage.Width - 1), Clamp(j - size, 0, integralImage.Height - 1));
                        var p1 = new Point(Clamp(i + size, 0, integralImage.Width - 1), Clamp(j + size, 0, integralImage.Height - 1));
                        var p2 = new Point(Clamp(i - size, 0, integralImage.Width - 1), Clamp(j + size, 0, integralImage.Height - 1));
                        var p3 = new Point(Clamp(i + size, 0, integralImage.Width - 1), Clamp(j - size, 0, integralImage.Height - 1));

                        result.Set <Vec3b>(i, j, new Vec3b(
                                               (byte)((
                                                          integralImage.Get <Vec3f>(p0.X, p0.Y).Item0
                                                          + integralImage.Get <Vec3f>(p1.X, p1.Y).Item0
                                                          - integralImage.Get <Vec3f>(p2.X, p2.Y).Item0
                                                          - integralImage.Get <Vec3f>(p3.X, p3.Y).Item0
                                                          ) / pixelsCount),
                                               (byte)((
                                                          integralImage.Get <Vec3f>(p0.X, p0.Y).Item1
                                                          + integralImage.Get <Vec3f>(p1.X, p1.Y).Item1
                                                          - integralImage.Get <Vec3f>(p2.X, p2.Y).Item1
                                                          - integralImage.Get <Vec3f>(p3.X, p3.Y).Item1
                                                          ) / pixelsCount),
                                               (byte)((
                                                          integralImage.Get <Vec3f>(p0.X, p0.Y).Item2
                                                          + integralImage.Get <Vec3f>(p1.X, p1.Y).Item2
                                                          - integralImage.Get <Vec3f>(p2.X, p2.Y).Item2
                                                          - integralImage.Get <Vec3f>(p3.X, p3.Y).Item2
                                                          ) / pixelsCount)));
                    }
                    else
                    {
                        result.Set <Vec3b>(i, j, mat.Get <Vec3b>(i, j));
                    }
                }
            }
            using (new Window("src image", mat))
                using (new Window("matGray image", matGray))
                    using (new Window("grad image", grad))
                        using (new Window("edges image", edges))
                            using (new Window("normDist image", normDist))
                                using (new Window("result image", result))
                                {
                                    Cv2.WaitKey();
                                }
        }