public void findBarOne() { var imagecopy = image.Copy(); var sobelX = image.Sobel(1, 0, -1); showImage(sobelX, "sobelX"); var sobelY = image.Sobel(0, 1, -1); showImage(sobelY, "sobleY"); var gradient = sobelX - sobelY; CvInvoke.ConvertScaleAbs(gradient, gradient, 1, 0); showImage(gradient, "gradient"); gradient._ThresholdBinary(new Gray(255), new Gray(255)); showImage(gradient, "thresholdImage"); var element = CvInvoke.GetStructuringElement(Emgu.CV.CvEnum.ElementShape.Rectangle, new Size(3, 3), new Point(-1, -1)); var diletImage = gradient.Dilate(2); showImage(diletImage, "diletImage"); var closeImage = new Image <Gray, byte>(diletImage.Width, diletImage.Height); CvInvoke.MorphologyEx(diletImage, closeImage, Emgu.CV.CvEnum.MorphOp.Close, element, new Point(-1, -1), 2, Emgu.CV.CvEnum.BorderType.Default, CvInvoke.MorphologyDefaultBorderValue); showImage1(closeImage, "closeImage"); }
static void Main(string[] args) { var img = CvInvoke.Imread(Path.Join("resources", "ZeroSweater.jpg")); var gray = new Mat(img.Rows, img.Cols, Emgu.CV.CvEnum.DepthType.Cv16S, 1); var gradX = new Mat(gray.Rows, gray.Cols, Emgu.CV.CvEnum.DepthType.Cv16S, 1); var gradY = new Mat(gray.Rows, gray.Cols, Emgu.CV.CvEnum.DepthType.Cv16S, 1); var absGradX = new Mat(gray.Rows, gray.Cols, Emgu.CV.CvEnum.DepthType.Cv8U, 1); var absGradY = new Mat(gray.Rows, gray.Cols, Emgu.CV.CvEnum.DepthType.Cv8U, 1); var sobelGrad = new Mat(gray.Rows, gray.Cols, Emgu.CV.CvEnum.DepthType.Cv8U, 1); CvInvoke.CvtColor(img, gray, Emgu.CV.CvEnum.ColorConversion.Bgr2Gray); CvInvoke.GaussianBlur(gray, gray, new System.Drawing.Size(3, 3), 0); CvInvoke.Sobel(gray, gradX, Emgu.CV.CvEnum.DepthType.Cv8U, 1, 0, 3); CvInvoke.Sobel(gray, gradY, Emgu.CV.CvEnum.DepthType.Cv8U, 0, 1, 3); CvInvoke.ConvertScaleAbs(gradX, absGradX, 1, 0); CvInvoke.ConvertScaleAbs(gradY, absGradY, 1, 0); CvInvoke.AddWeighted(absGradX, .5, absGradY, .5, 0, sobelGrad); CvInvoke.Imshow("sobel x", absGradX); CvInvoke.Imshow("sobel Y", absGradY); CvInvoke.Imshow("sobel", sobelGrad); CvInvoke.Imwrite("sobelX.jpg", absGradX); CvInvoke.Imwrite("sobelY.jpg", absGradY); CvInvoke.Imwrite("sobel.jpg", sobelGrad); CvInvoke.Imshow("gray", gray); CvInvoke.WaitKey(0); }
private static Tensor ReadTensorFromMatBgr(Mat image, float inputMean, float scale, DataType type) { if (type == DataType.Float) { Tensor t = new Tensor(type, new int[] { 1, image.Height, image.Width, 3 }); using (Mat matF = new Mat(image.Size, Emgu.CV.CvEnum.DepthType.Cv32F, 3, t.DataPointer, sizeof(float) * 3 * image.Width)) { image.ConvertTo(matF, Emgu.CV.CvEnum.DepthType.Cv32F, scale, -inputMean * scale); } return(t); } else if (type == DataType.Uint8) { Tensor t = new Tensor(type, new int[] { 1, image.Height, image.Width, 3 }); using (Mat matB = new Mat(image.Size, Emgu.CV.CvEnum.DepthType.Cv8U, 3, t.DataPointer, sizeof(byte) * 3 * image.Width)) { if (scale == 1.0f && inputMean == 0) { image.CopyTo(matB); } else { CvInvoke.ConvertScaleAbs(image, matB, scale, -inputMean * scale); } } return(t); } else { throw new Exception(String.Format("Data Type of {0} is not supported.", type)); } }
//拉普拉斯锐化 private void button4_Click(object sender, EventArgs e) { Image <Bgr, byte> srcimage = src.Copy(); //【0】变量的定义 Mat src_gray = new Mat(); Mat dst = new Mat(); Mat abs_dst = new Mat(); //【1】使用高斯滤波消除噪声 CvInvoke.GaussianBlur(srcimage, srcimage, new Size(3, 3), 0, 0); imageBox2.Image = srcimage; //【2】转换为灰度图 CvInvoke.CvtColor(srcimage, src_gray, ColorConversion.Bgr2Gray); imageBox3.Image = src_gray; //【3】使用Laplace函数 CvInvoke.Laplacian(src_gray, dst, DepthType.Cv16S, 3, 1, 0); //第一个参数,InputArray类型的image,输入图像,即源图像,填Mat类的对象即可,且需为单通道8位图像。 //第二个参数,OutputArray类型的edges,输出的边缘图,需要和源图片有一样的尺寸和通道数。 //第三个参数,int类型的ddept,目标图像的深度。 //第四个参数,int类型的ksize,用于计算二阶导数的滤波器的孔径尺寸,大小必须为正奇数,且有默认值1。 //第五个参数,double类型的scale,计算拉普拉斯值的时候可选的比例因子,有默认值1。 //第六个参数,double类型的delta,表示在结果存入目标图(第二个参数dst)之前可选的delta值,有默认值0。 //第七个参数, int类型的borderType,边界模式,默认值为BORDER_DEFAULT。这个参数可以在官方文档中borderInterpolate()处得到更详细的信息。 //【4】计算绝对值,并将结果转换成8位 CvInvoke.ConvertScaleAbs(dst, abs_dst, 1, 0); imageBox4.Image = abs_dst; }
/// <summary> /// Private interface to convert a BGR Mat image to a Tensor. /// Actually perfom Emgu.CV.Mat to Emgu.TF.Lite.Tensor conversion. /// Stolen from https://github.com/emgucv/emgutf/blob/master/Emgu.TF.Example/CVInterop/TensorConvert.cs /// </summary> /// <param name="image">The input Emgu CV Mat</param> /// <param name="inputMean">The mean, if it is not 0, the value will be substracted from the pixel values</param> /// <param name="scale">The optional scale</param> /// <param name="t">The pre-allocated output tensor. Dimension must match (1, inputHeight, inputWidth, 3)</param> /// <returns>The tensorflow tensor</returns> private static void ReadTensorFromMatBgr(Mat image, float inputMean, float scale, Tensor t) { DataType type = t.Type; if (type == DataType.Float32) { using (Mat matF = new Mat(image.Size, Emgu.CV.CvEnum.DepthType.Cv32F, 3, t.DataPointer, sizeof(float) * 3 * image.Width)) { image.ConvertTo(matF, Emgu.CV.CvEnum.DepthType.Cv32F); } } else if (type == DataType.UInt8) { using (Mat matB = new Mat(image.Size, Emgu.CV.CvEnum.DepthType.Cv8U, 3, t.DataPointer, sizeof(byte) * 3 * image.Width)) { if (scale == 1.0f && inputMean == 0) { image.CopyTo(matB); } else { CvInvoke.ConvertScaleAbs(image, matB, scale, -inputMean * scale); } } } else { throw new Exception(String.Format("Data Type of {0} is not supported.", type)); } }
private void sobelToolStripMenuItem_Click(object sender, EventArgs e) { try { if (pictureBox1.Image == null) { return; } float[,] data = { { -1, 0, 1 }, { -2, 0, 2 }, { -1, 0, 1 } }; Matrix <float> SEx = new Matrix <float>(data); Matrix <float> SEy = SEx.Transpose(); var img = new Bitmap(pictureBox1.Image) .ToImage <Bgr, float>(); var Gx = new Mat(); var Gy = new Mat(); CvInvoke.Sobel(img, Gx, Emgu.CV.CvEnum.DepthType.Cv32F, 1, 0); CvInvoke.Sobel(img, Gy, Emgu.CV.CvEnum.DepthType.Cv32F, 0, 1); var gx = Gx.ToImage <Gray, float>(); var gy = Gy.ToImage <Gray, float>(); var Gxx = new Mat(Gx.Size, Emgu.CV.CvEnum.DepthType.Cv32F, 1); var Gyy = new Mat(Gx.Size, Emgu.CV.CvEnum.DepthType.Cv32F, 1); CvInvoke.ConvertScaleAbs(Gx, Gxx, 0, 0); CvInvoke.ConvertScaleAbs(Gy, Gyy, 0, 0); var mag = new Mat(Gx.Size, Emgu.CV.CvEnum.DepthType.Cv8U, 1); CvInvoke.AddWeighted(Gxx, 0.5, Gyy, 0.5, 0, mag); AddImage(mag.ToImage <Bgr, byte>(), "Mag Absolute"); gx._Mul(gx); gy._Mul(gy); var M = new Mat(gx.Size, Emgu.CV.CvEnum.DepthType.Cv8U, 1); CvInvoke.Sqrt(gx + gy, M); AddImage(M.ToImage <Bgr, byte>(), "Mag Squared"); //CvInvoke.Filter2D(img, Gx, SEx, new Point(-1, -1)); //CvInvoke.Filter2D(img, Gy, SEy, new Point(-1, -1)); pictureBox1.Image = M.ToBitmap(); } catch (Exception ex) { MessageBox.Show(ex.Message); } }
static Rectangle get_rectangle_by_sobel(string filename) { Rectangle ret = Rectangle.Empty; //string filename = @"C:\Tools\avia\images\Final270\iphone6 Gold\0123.6.bmp"; Mat m = CvInvoke.Imread(filename, Emgu.CV.CvEnum.ImreadModes.Grayscale); Image <Gray, Byte> img = m.ToImage <Gray, Byte>().GetSubRect(new Rectangle(new Point(5, 5), new Size(m.Width - 10, m.Height - 10))); Mat b1 = new Mat(); CvInvoke.GaussianBlur(img, b1, new Size(3, 3), 0, 0, BorderType.Default); Mat dx = new Mat(); Mat dy = new Mat(); CvInvoke.Sobel(b1, dx, DepthType.Cv16S, 1, 0); CvInvoke.ConvertScaleAbs(dx, dx, 1, 0); CvInvoke.Sobel(b1, dy, DepthType.Cv16S, 0, 1); CvInvoke.ConvertScaleAbs(dy, dy, 1, 0); dx.Save("temp_x.bmp"); dy.Save("temp_y.bmp"); Mat[] ms = new Mat[] { dx, dy }; Rectangle[] rs = new Rectangle[] { Rectangle.Empty, Rectangle.Empty }; for (int idx = 0; idx < ms.Length; idx++) { double otsu = CvInvoke.Threshold(ms[idx], ms[idx], 0, 255, ThresholdType.Binary | ThresholdType.Otsu); Rectangle roi = new Rectangle(); using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint()) { CvInvoke.FindContours(ms[idx], contours, null, RetrType.External, ChainApproxMethod.ChainApproxNone); int count = contours.Size; for (int i = 0; i < count; i++) { //double a1 = CvInvoke.ContourArea(contours[i], false); //if (a1 > 1) { //Program.logIt($"area: {a1}"); Rectangle rt = CvInvoke.BoundingRectangle(contours[i]); if (roi.IsEmpty) { roi = rt; } else { roi = Rectangle.Union(roi, rt); } } } } rs[idx] = roi; //Program.logIt($"RectX: {roi}, size={toFloat(roi)}"); } ret = new Rectangle(rs[0].X, rs[1].Y, rs[0].Width, rs[1].Height); //Program.logIt($"Rect: {ret}, size={toFloat(ret)}"); return(ret); }
//public void findBarTwo() { // var imagecopy = image.Copy(); // // var smoothImage = imagecopy.SmoothGaussian(3); // var barLineImage = imagecopy.InRange(new Gray(0),new Gray(100)); // var lines= barLineImage.HoughLinesBinary(1,Math.PI/45,10,10,10)[0]; // foreach (LineSegment2D line in lines) { // barLineImage.Draw(line,new Gray(60),3); // line. // } // imageBox.Image = barLineImage; //} public void findBar3() { var imagecopy = image.Copy(); var sobelX = image.Sobel(1, 0, -1); var sobelY = image.Sobel(0, 1, -1); var gradient = sobelX.Sub(sobelY); CvInvoke.ConvertScaleAbs(gradient, gradient, 1, 0); var bluredImage = gradient.SmoothBlur(9, 9); var byteImage = bluredImage.Convert <Gray, byte>(); var thresholdImage = byteImage.ThresholdBinary(new Gray(180), new Gray(255)); var element = CvInvoke.GetStructuringElement(Emgu.CV.CvEnum.ElementShape.Rectangle, new Size(21, 7), new Point(-1, -1)); var closed = new Image <Gray, byte>(byteImage.Width, byteImage.Height); closed = thresholdImage.Dilate(2); // CvInvoke.MorphologyEx(closed,closed, Emgu.CV.CvEnum.MorphOp.Close,element,new Point(-1,-1),2, Emgu.CV.CvEnum.BorderType.Default,CvInvoke.MorphologyDefaultBorderValue); // closed = closed.Dilate(2); // closed = closed.Erode(4); VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint(); // showImage1(closed, "Threshold"); CvInvoke.FindContours(closed, contours, null, Emgu.CV.CvEnum.RetrType.List, Emgu.CV.CvEnum.ChainApproxMethod.ChainApproxNone); double maxArea = 0; int maxIndex = -1; for (int i = 0; i < contours.Size; i++) { double tempArea = CvInvoke.ContourArea(contours[i]); if (tempArea > maxArea) { maxArea = tempArea; maxIndex = i; } } var rect = CvInvoke.BoundingRectangle(contours[maxIndex]); // CvInvoke.DrawContours(imagecopy, contours, maxIndex, new MCvScalar(60), 3); var imagecopy1 = rgbImage.Copy(); imagecopy1.Draw(rect, new Bgra(0, 255, 0, 100), 3); CvInvoke.PutText(imagecopy1, "Found!", rect.Location, Emgu.CV.CvEnum.FontFace.HersheyComplex, 14, new MCvScalar(0, 255, 0)); imageBox.Image = imagecopy1; showImage(sobelX, "SobelX"); showImage(sobelY, "SobelY"); showImage(gradient, "Gradient"); showImage(bluredImage, "BluredImage"); showImage1(byteImage, "ByteImage"); showImage1(thresholdImage, "ThresholdImage"); showImage1(closed, "Closed"); }
public Rectangle DetectBarcode(Image <Bgr, byte> capturedFrame, int a = 21, int b = 7) { Mat grayscaleFrame = new Mat(); Mat gradX = new Mat(); Mat gradY = new Mat(); Mat absGradX = new Mat(); Mat absGradY = new Mat(); Mat fullGrad = new Mat(); Mat bluredFrame = new Mat(); Mat thresholdFrame = new Mat(); //Mat verticalRectangle = CvInvoke.GetStructuringElement(Emgu.CV.CvEnum.ElementShape.Rectangle, new Size(16, 11), new Point(1, 1)); //Mat horizontalRectangle = CvInvoke.GetStructuringElement(Emgu.CV.CvEnum.ElementShape.Rectangle, new Size(3, 6), new Point(1, 1)); Mat verticalRectangle = CvInvoke.GetStructuringElement(Emgu.CV.CvEnum.ElementShape.Rectangle, new Size(Math.Max(a, 1), Math.Max(b, 1)), new Point(1, 1)); Mat horizontalRectangle = CvInvoke.GetStructuringElement(Emgu.CV.CvEnum.ElementShape.Rectangle, new Size(6, 6), new Point(1, 1)); //Convert to grayscale CvInvoke.CvtColor(capturedFrame, grayscaleFrame, ColorConversion.Bgr2Gray); // Gradient X AND Y CvInvoke.Sobel(grayscaleFrame, gradX, Emgu.CV.CvEnum.DepthType.Cv8U, 1, 0); CvInvoke.Sobel(grayscaleFrame, gradY, Emgu.CV.CvEnum.DepthType.Cv8U, 0, 1); //Abs values of gradient CvInvoke.ConvertScaleAbs(gradX, absGradX, (double)2, (double)0); CvInvoke.ConvertScaleAbs(gradY, absGradY, (double)2, (double)0); //Detection of vertical lines CvInvoke.Subtract(absGradX, absGradY, fullGrad); //Blur CvInvoke.Blur(fullGrad, bluredFrame, new Size(2, 2), new Point(-1, -1)); //Binarization CvInvoke.Threshold(bluredFrame, thresholdFrame, 80, 255, Emgu.CV.CvEnum.ThresholdType.Binary); // Closure CvInvoke.MorphologyEx(thresholdFrame, thresholdFrame, Emgu.CV.CvEnum.MorphOp.Close, verticalRectangle, new Point(-1, -1), 2, Emgu.CV.CvEnum.BorderType.Constant, new MCvScalar((double)0)); //Erosion CvInvoke.MorphologyEx(thresholdFrame, thresholdFrame, Emgu.CV.CvEnum.MorphOp.Erode, horizontalRectangle, new Point(-1, -1), 1, Emgu.CV.CvEnum.BorderType.Constant, new MCvScalar((double)0)); ProcessedImageStep1 = gradX; ProcessedImageStep2 = gradY; ProcessedImageStep3 = fullGrad; ProcessedImageStep4 = bluredFrame; ProcessedImageStep5 = thresholdFrame; VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint(); CvInvoke.FindContours(thresholdFrame, contours, null, Emgu.CV.CvEnum.RetrType.List, Emgu.CV.CvEnum.ChainApproxMethod.ChainApproxNone); Rectangle r = _FindLargestRectFromContours(contours); Rectangle newrectangle = new Rectangle((int)(r.X - r.Width / 8), (int)(r.Y - r.Height / 8), (int)(r.Width * 1.25m), (int)(r.Height * 1.25m)); return(newrectangle); }
int max_thresh = 175; //最大阈值 private void button2_Click(object sender, EventArgs e) { //---------------------------【1】定义一些局部变量----------------------------- Image <Gray, float> dstImage = new Image <Gray, float>(src.Size); //目标图 Mat normImage = new Mat(); //归一化后的图 Image <Gray, byte> scaledImage = new Image <Gray, byte>(src.Size); //线性变换后的八位无符号整型的图 //---------------------------【2】初始化--------------------------------------- //置零当前需要显示的两幅图,即清除上一次调用此函数时他们的值 Image <Gray, byte> g_srcImage1 = src.Clone(); //---------------------------【3】正式检测------------------------------------- //进行角点检测 点检测传出的为Float类型的数据 CvInvoke.CornerHarris(src, dstImage, 2); // 归一化与转换 CvInvoke.Normalize(dstImage, normImage, 0, 255, Emgu.CV.CvEnum.NormType.MinMax); double min = 0, max = 0; Point minp = new Point(0, 0); Point maxp = new Point(0, 0); CvInvoke.MinMaxLoc(normImage, ref min, ref max, ref minp, ref maxp); double scale = 255 / (max - min); double shift = min * scale; CvInvoke.ConvertScaleAbs(normImage, scaledImage, scale, shift);//将归一化后的图线性变换成8位无符号整型 //---------------------------【4】进行绘制------------------------------------- // 将检测到的,且符合阈值条件的角点绘制出来 byte[] data = scaledImage.Bytes; for (int j = 0; j < normImage.Rows; j++) { for (int i = 0; i < normImage.Cols; i++) { int k = i * src.Width + j; if (k < data.Length) { if (data[k] > thresh) { CvInvoke.Circle(g_srcImage1, new Point(i, j), 5, new MCvScalar(10, 10, 255), 2); CvInvoke.Circle(scaledImage, new Point(i, j), 5, new MCvScalar(0, 10, 255), 2); } } } } imageBox1.Image = g_srcImage1; imageBox2.Image = scaledImage; }
private void scharrToolStripMenuItem_Click(object sender, EventArgs e) { try { if (pictureBox1.Image == null) { return; } float[,] data = { { -3, 0, 3 }, { -10, 0, 10 }, { -3, 0, 3 } }; Matrix <float> SEx = new Matrix <float>(data); Matrix <float> SEy = SEx.Transpose(); var img = new Bitmap(pictureBox1.Image) .ToImage <Gray, float>(); var Gx = new Mat(); var Gy = new Mat(); CvInvoke.Scharr(img, Gx, Emgu.CV.CvEnum.DepthType.Cv16S, 1, 0); CvInvoke.Scharr(img, Gy, Emgu.CV.CvEnum.DepthType.Cv16S, 0, 1); CvInvoke.ConvertScaleAbs(Gx, Gx, 0, 0); CvInvoke.ConvertScaleAbs(Gy, Gy, 0, 0); CvInvoke.Multiply(Gx, Gx, Gx); CvInvoke.Multiply(Gy, Gy, Gy); Gx.ConvertTo(Gx, Emgu.CV.CvEnum.DepthType.Cv32F); Gy.ConvertTo(Gy, Emgu.CV.CvEnum.DepthType.Cv32F); var M = new Mat(Gx.Size, Emgu.CV.CvEnum.DepthType.Cv32F, 1); CvInvoke.Sqrt(Gx + Gy, M); var imgout = M.ToImage <Bgr, byte>(); AddImage(imgout, "Scharr"); pictureBox1.Image = imgout.ToBitmap(); } catch (Exception ex) { MessageBox.Show(ex.Message); } }
private void Btn_Scharr_Click(object sender, EventArgs e) { var bitmap = this.pic_src.GetFirstRegionRect(); Image <Gray, byte> image = new Image <Gray, byte>(bitmap); Mat matX = new Mat(image.Size, DepthType.Cv64F, 1); Mat matY = new Mat(image.Size, DepthType.Cv64F, 1); CvInvoke.Scharr(image, matX, DepthType.Cv64F, 1, 0); CvInvoke.ConvertScaleAbs(matX, matX, 1, 0); CvInvoke.Scharr(image, matY, DepthType.Cv64F, 0, 1); CvInvoke.ConvertScaleAbs(matY, matY, 1, 0); this.ibX.Image = matX; this.ibY.Image = matY; }
private void button2_Click(object sender, EventArgs e) { if (srcImg == null) { MessageBox.Show("请选择原图片!"); return; } Image <Bgr, Byte> dstImg = srcImg.CopyBlank(); Image <Bgr, Byte> dstImg2 = srcImg.CopyBlank(); CvInvoke.Laplacian(srcImg, dstImg, DepthType.Default); imageBox2.Image = dstImg; //梯度图 CvInvoke.ConvertScaleAbs(dstImg, dstImg2, 1, 0); // 和下面一样 只是试试这种方法 dstImg2 = srcImg.Add(dstImg2); imageBox3.Image = dstImg2; }
private void manageBlur(Mat img, int blurMode, int kSize = 3) { Mat store = img.Clone(); Image <Gray, Byte> gray = store.ToImage <Gray, Byte>(); switch (blurMode) { case Constants.Sobel: { Image <Gray, float> sobelX = gray.Sobel(1, 0, kSize); Image <Gray, float> sobelY = gray.Sobel(0, 1, kSize); sobelX = sobelX.AbsDiff(new Gray(0)); sobelY = sobelY.AbsDiff(new Gray(0)); Image <Gray, float> sobel = sobelX + sobelY; double[] mins, maxs; //Find sobel min or max value position System.Drawing.Point[] minLoc, maxLoc; sobel.MinMax(out mins, out maxs, out minLoc, out maxLoc); //Conversion to 8-bit image Image <Gray, Byte> sobelImage = sobel.ConvertScale <byte>(255 / maxs[0], 0); CurrentMat = sobelImage.Mat; break; } case Constants.Laplace: { Image <Gray, float> targetImage = gray.Laplace(kSize); CvInvoke.ConvertScaleAbs(targetImage, targetImage, 1, 0); CurrentMat = targetImage.Mat; break; } case Constants.Median: break; case Constants.Gaussian: break; } showImg(CurrentMat); }
public Mat FingerprintDescriptor(Mat input) { var harris_normalised = PrepareImage(input); float threshold = 125.0f; List <MKeyPoint> mKeyPoints = new List <MKeyPoint>(); Mat rescaled = new Mat(); VectorOfKeyPoint keypoints = new VectorOfKeyPoint(); double scale = 1.0, shift = 0.0; CvInvoke.ConvertScaleAbs(harris_normalised, rescaled, scale, shift); Mat[] mat = new Mat[] { rescaled, rescaled, rescaled }; VectorOfMat vectorOfMat = new VectorOfMat(mat); int[] from_to = { 0, 0, 1, 1, 2, 2 }; Mat harris_c = new Mat(rescaled.Size, DepthType.Cv8U, 3); CvInvoke.MixChannels(vectorOfMat, harris_c, from_to); for (int x = 0; x < harris_c.Width; x++) { for (int y = 0; y < harris_c.Height; y++) { if (GetFloatValue(harris_c, x, y) > threshold) { MKeyPoint m = new MKeyPoint { Size = 1, Point = new PointF(x, y) }; mKeyPoints.Add(m); } } } keypoints.Push(mKeyPoints.ToArray()); Mat descriptors = new Mat(); ORBDetector ORBCPU = new ORBDetector(); ORBCPU.Compute(_input_thinned, keypoints, descriptors); return(descriptors); }
private void AddNoise(Mat image) { const double noiseLevel = 0.75; var mean = new MCvScalar(0); var std = new MCvScalar(255); const int gaussSize = 13; const double scale = 0.5; const double shift = 100; var noise = new Mat(image.Size, DepthType.Cv8U, 1); using (ScalarArray scalarArray1 = new ScalarArray(mean)) using (ScalarArray scalarArray2 = new ScalarArray(std)) { CvInvoke.Randn(noise, scalarArray1, scalarArray2); } CvInvoke.GaussianBlur(noise, noise, new Size(gaussSize, gaussSize), 0.0); CvInvoke.AddWeighted(image, 1 - noiseLevel, noise, noiseLevel, 0, image, image.Depth); CvInvoke.ConvertScaleAbs(image, image, scale, shift); }
private void button1_Click(object sender, EventArgs e) { Mat kernel; Image <Gray, float> grayImage = new Image <Gray, float>(img.Bitmap); if (radioButton1.Checked) { //kernel = new Matrix<float>(mask1); kernel = mask1.Mat; } else if (radioButton2.Checked) { kernel = mask2.Mat; } else { kernel = mask3.Mat; } Point anchor = new Point(-1, -1); Mat srcImg = grayImage.Mat; Mat imgDst = grayImage.Mat; Mat m = new Mat(); kernel.ConvertTo(m, Emgu.CV.CvEnum.DepthType.Cv8S); CvInvoke.Filter2D(srcImg, imgDst, m, anchor); var img3 = imgDst; CvInvoke.ConvertScaleAbs(imgDst, img3, 1, 0); img = new Image <Gray, float>(img3.Bitmap); imgt = new Image <Bgr, byte>(img3.Bitmap); form.SetImage(new Image <Bgr, byte>(img3.Bitmap)); form.Refresh(); }
public static Mat sobelEdgeDetection(ref Mat src_roi) { Mat roi_gray = new Mat(); Mat grad_x = new Mat(); Mat grad_y = new Mat(); Mat grad_absx = new Mat(); Mat grad_absy = new Mat(); Mat roi_soble = new Mat(); CvInvoke.CvtColor(src_roi, roi_gray, ColorConversion.Rgb2Gray); CvInvoke.Sobel(roi_gray, grad_x, DepthType.Cv16S, 1, 0, 3, 1, 1, BorderType.Default);//x方向的sobel检测 CvInvoke.ConvertScaleAbs(grad_x, grad_absx, 1, 0); CvInvoke.Sobel(roi_gray, grad_y, DepthType.Cv16S, 0, 1, 3, 1, 1, BorderType.Default);//y方向的sobel检测 CvInvoke.ConvertScaleAbs(grad_y, grad_absy, 1, 0); CvInvoke.AddWeighted(grad_absx, 0.5, grad_absy, 0.5, 0, roi_soble); return(roi_soble); }
public void CornerDetection(VisionProfile profile, Mat img) { var output = new Mat(); var outputNormalized = new Mat(); var outputNormalizedScaled = new Mat(); CvInvoke.CornerHarris(img, output, profile.HarrisCornerBlockSize, profile.HarrisCornerAperture, profile.HarrisCornerK, BorderType.Default); CvInvoke.Normalize(output, outputNormalized, 0, 255, NormType.MinMax, DepthType.Cv32F, null); CvInvoke.ConvertScaleAbs(outputNormalized, outputNormalizedScaled, 5, 5); for (int j = 0; j < outputNormalized.Rows; j++) { for (int i = 0; i < outputNormalized.Cols; i++) { // if ((int)outputNormalized.GetData(j,i) > profile.HarrisCornerThreshold) { // circle(outputNormalizedScaled, Point(i, j), 5, Scalar(0), 2, 8, 0); } } } }
/// <summary> /// Calculates the approximation of the image gradient. /// </summary> /// <param name="srcGray">Source image.</param> /// <param name="scale">Scale of the Sobel operator.</param> /// <param name="delta">Delta of the Sobel operator.</param> /// <param name="kernelSize">Kernel size of the Sobel operator.</param> /// <returns>Mat.</returns> private static Mat GetGradient(Mat srcGray, int scale = 1, int delta = 0, int kernelSize = 5) { Mat gradX = new Mat(); Mat gradY = new Mat(); Mat absGradX = new Mat(); Mat absGradY = new Mat(); DepthType ddepth = DepthType.Cv32F; // Calculate the x and y gradients using Sobel operator CvInvoke.Sobel(srcGray, gradX, ddepth, 1, 0, kernelSize, scale, delta, BorderType.Default); CvInvoke.ConvertScaleAbs(gradX, absGradX, scale, delta); CvInvoke.Sobel(srcGray, gradY, ddepth, 0, 1, kernelSize, scale, delta, BorderType.Default); CvInvoke.ConvertScaleAbs(gradY, absGradY, scale, delta); // Combine the two gradients Mat grad = new Mat(); CvInvoke.AddWeighted(absGradX, 0.5, absGradY, 0.5, 0, grad); return(grad); }
private void button3_Click(object sender, EventArgs e) { if (srcImg == null) { MessageBox.Show("请选择原图片!"); return; } Image <Bgr, Byte> dstImg = srcImg.CopyBlank(); Image <Bgr, Byte> dstImg2 = srcImg.CopyBlank(); CvInvoke.Sobel(srcImg, dstImg, DepthType.Default, 1, 0); imageBox2.Image = dstImg; dstImg2 = srcImg.Add(dstImg); imageBox3.Image = dstImg2; //显示梯度图2 Image <Bgr, Byte> grad_x = srcImg.CopyBlank(); Image <Bgr, Byte> grad_y = srcImg.CopyBlank(); Image <Bgr, Byte> abs_x = srcImg.CopyBlank(); Image <Bgr, Byte> abs_y = srcImg.CopyBlank(); Image <Bgr, Byte> gradImg = srcImg.CopyBlank(); CvInvoke.Sobel(srcImg, grad_x, DepthType.Default, 1, 0); CvInvoke.Sobel(srcImg, grad_y, DepthType.Default, 0, 1); //绝对值 CvInvoke.ConvertScaleAbs(grad_x, abs_x, 1, 0); CvInvoke.ConvertScaleAbs(grad_y, abs_y, 1, 0); //两个方向平方 abs_x.Pow(2); abs_y.Pow(2); //结果是梯度平方 CvInvoke.Add(abs_x, abs_y, gradImg); imageBox4.Image = gradImg; }
public List <Vector2> GetCornerPoints() { Image <Gray, float> cornerimg = new Image <Gray, float>(this.img.Size); Image <Gray, Byte> cornerthrimg = new Image <Gray, Byte>(this.img.Size); Image <Gray, Byte> cannyimg = this.img.Canny(60, 100); CvInvoke.CornerHarris(cannyimg, cornerimg, 3, 3, 0.04); //CvInvoke.cvNormalize(cornerimg, cornerimg, 0, 255, Emgu.CV.CvEnum.NORM_TYPE.CV_MINMAX, IntPtr.Zero); //标准化处理 double min = 0, max = 0; System.Drawing.Point minp = new System.Drawing.Point(0, 0); System.Drawing.Point maxp = new System.Drawing.Point(0, 0); CvInvoke.MinMaxLoc(cornerimg, ref min, ref max, ref minp, ref maxp); double scale = 255 / (max - min); double shift = min * scale; CvInvoke.ConvertScaleAbs(cornerimg, cornerthrimg, scale, shift);//进行缩放,转化为byte类型 byte[] data = cornerthrimg.Bytes; List <Vector2> corners = new List <Vector2>(); List <Vector3> corners_3 = new List <Vector3>(); for (int i = 0; i < cornerimg.Height; i++) { for (int j = 0; j < cornerimg.Width; j++) { int k = i * cornerthrimg.Width + j; if (data[k] > 80) //通过阈值判断 { corners.Add(new Vector2(j, i)); corners_3.Add(m_projector.ImageToWorld(corners.Last())); } } } m_renderEngine.DrawPoints(corners_3); return(corners); }
private void Bt_filter_Click(object sender, EventArgs e) { var bitmap = this.pic_src.GetFirstRegionRect(); float[,] arr = new float[3, 3] { { 0, -1, 0 }, { -1, 4, -1 }, { 0, -1, 0 } }; // // |-1 0 1| |-2 0 2| |-1 0 1| float[,] arr1 = new float[3, 3] { { -1, 0, 1 }, { -2, 0, 2 }, { -1, 0, 1 } }; Matrix <float> kernel = new Matrix <float>(arr1); var image = new Image <Gray, byte>(bitmap); Mat matX = new Mat(image.Size, DepthType.Cv64F, 1); CvInvoke.Filter2D(image, matX, kernel, new Point(-1, -1)); CvInvoke.ConvertScaleAbs(matX, matX, 1, 0); this.ibX.Image = matX; }
private Mat PreProcessImage(Mat image) { ShowImage("Image", image); Mat tmpImg = new Mat(); Mat sob1 = new Mat(); Mat sob2 = new Mat(); CvInvoke.CvtColor(image, tmpImg, Emgu.CV.CvEnum.ColorConversion.Bgra2Gray); CvInvoke.Sobel(tmpImg, sob1, Emgu.CV.CvEnum.DepthType.Cv8U, 1, 0, 1); CvInvoke.Sobel(tmpImg, sob2, Emgu.CV.CvEnum.DepthType.Cv8U, 0, 1, 1); CvInvoke.Subtract(sob1, sob2, tmpImg); ShowImage("AfterSobel", tmpImg); CvInvoke.ConvertScaleAbs(tmpImg, tmpImg, 1, 0); CvInvoke.Blur(tmpImg, tmpImg, new Size(9, 9), new Point(0, 0)); CvInvoke.Threshold(tmpImg, tmpImg, 45, 255, Emgu.CV.CvEnum.ThresholdType.Binary); //CvInvoke.AdaptiveThreshold(tmpImg, tmpImg, 255, Emgu.CV.CvEnum.AdaptiveThresholdType.MeanC, Emgu.CV.CvEnum.ThresholdType.Binary, 3, 10); //CvInvoke.BitwiseNot(tmpImg, tmpImg); ShowImage("AfterThresh", tmpImg); var kernel = CvInvoke.GetStructuringElement(Emgu.CV.CvEnum.ElementShape.Rectangle, new Size(21, 7), new Point(0, 0)); CvInvoke.MorphologyEx(tmpImg, tmpImg, Emgu.CV.CvEnum.MorphOp.Close, kernel, new Point(0, 0), 1, Emgu.CV.CvEnum.BorderType.Default, new MCvScalar(255)); ShowImage("After Morphology", tmpImg); CvInvoke.Erode(tmpImg, tmpImg, null, new Point(0, 0), 7, Emgu.CV.CvEnum.BorderType.Default, new MCvScalar(255)); CvInvoke.Dilate(tmpImg, tmpImg, null, new Point(0, 0), 7, Emgu.CV.CvEnum.BorderType.Default, new MCvScalar(255)); Mat cts = new Mat(tmpImg.Size, Emgu.CV.CvEnum.DepthType.Cv8U, 1); ShowImage("After Erode/Dilate", tmpImg); VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint(); CvInvoke.FindContours(tmpImg, contours, null, Emgu.CV.CvEnum.RetrType.External, Emgu.CV.CvEnum.ChainApproxMethod.ChainApproxSimple); DrawContours(contours, cts); ShowImage("contours", cts); Rectangle rect = FindGlobalContour(contours, tmpImg); CvInvoke.Rectangle(image, rect, new MCvScalar(255), 3); return(new Mat(image, rect)); }
/// <summary> /// Sobel边缘检测 /// </summary> /// <param name="sender"></param> /// <param name="e"></param> private void button3_Click(object sender, EventArgs e) { Image <Bgr, byte> srcimage = src.Copy(); //【1】创建 grad_x 和 grad_y 矩阵 Mat grad_x = new Mat(); Mat grad_y = new Mat(); Mat abs_grad_x = new Mat(); Mat abs_grad_y = new Mat(); Mat dst = new Mat(); //【2】求 X方向梯度 CvInvoke.Sobel(srcimage, grad_x, DepthType.Default, 1, 0); CvInvoke.ConvertScaleAbs(grad_x, abs_grad_x, 1, 0); imageBox2.Image = abs_grad_x; //【3】求Y方向梯度 CvInvoke.Sobel(srcimage, grad_y, DepthType.Default, 0, 1); CvInvoke.ConvertScaleAbs(grad_y, abs_grad_y, 1, 0); imageBox3.Image = abs_grad_y; //【4】合并梯度(近似) CvInvoke.AddWeighted(abs_grad_x, 0.5, abs_grad_y, 0.5, 0, dst); imageBox4.Image = dst; }
static void Main(string[] args) { // Read image Mat img = CvInvoke.Imread(@"C:\EMT\Image\EMT_Lab2\ConnectImage3.png", ImreadModes.Grayscale); // Create window and show image CvInvoke.NamedWindow("Original", NamedWindowType.KeepRatio); CvInvoke.Imshow("Original", img); // Create new cv image same size as img Mat imglabel = new Mat(img.Rows, img.Cols, DepthType.Cv16U, 1); // Count number of connected components int label = CvInvoke.ConnectedComponents(img, imglabel, LineType.FourConnected, DepthType.Cv16U); // Create new cv image with unsigned 8 bit Mat imglabelscale = new Mat(img.Rows, img.Cols, DepthType.Cv8U, 1); // Find max and min values in cv image array CvInvoke.MinMaxIdx(imglabel, out double minVal, out double maxVal, null, null); // Scale image to follow min and max CvInvoke.ConvertScaleAbs(imglabel, imglabelscale, 255 / (maxVal - minVal), 0); // Write image to path as png CvInvoke.Imwrite(@"C:\EMT\Image\EMT_Lab2\Results1.png", imglabelscale); String imgtitle = "Connected Objects = " + Convert.ToString(label - 1); CvInvoke.NamedWindow(imgtitle, NamedWindowType.KeepRatio); CvInvoke.Imshow(imgtitle, imglabelscale); CvInvoke.WaitKey(0); CvInvoke.DestroyAllWindows(); }
//DetectEdges public void DetectEdges() { CvInvoke.Normalize(CameraMat24, CameraMat24, 1, 254, NormType.MinMax); Debug.Log("Kauel: DetectEdges()"); PauseCamera(); if (EdgeMap != null) { EdgeMap.Dispose(); } EdgeMap = new Mat(CameraMat24.Rows + 2, CameraMat24.Cols + 2, DepthType.Cv8U, 1); Rectangle roi = new Rectangle(1, 1, CameraMat24.Width, CameraMat24.Height); Mat EdgeMapCenter = new Mat(EdgeMap, roi); Mat img1 = CameraMat24.Clone(); Mat img2 = img1.Clone(); Mat img3 = img1.Clone(); CvInvoke.FastNlMeansDenoising(img1, img1); //Elimina el ruido. CvInvoke.GaussianBlur(img1, img2, new Size(9, 9), 9); //Blur CvInvoke.AddWeighted(img1, 1.5, img2, -0.5, 0, img1, DepthType.Cv8U); //img1.Save("C:/dnn/Filter.png"); Mat imgCanny = img1.Clone(); CvInvoke.Canny(img1, imgCanny, CannyLow, CannyHigh, CannyAperture); CvInvoke.CvtColor(img1, img1, ColorConversion.Bgr2Gray); //Bordes en escala de grises. CvInvoke.Sobel(img1, img2, DepthType.Cv32F, 1, 0, BorderAperture, 1); CvInvoke.Sobel(img1, img3, DepthType.Cv32F, 0, 1, BorderAperture, 1); CvInvoke.ConvertScaleAbs(img2, img2, 1, 0); CvInvoke.ConvertScaleAbs(img3, img3, 1, 0); CvInvoke.AddWeighted(img2, 1, img3, 1, 0, img3); img3.ConvertTo(img3, DepthType.Cv8U); //img3.Save("C:/dnn/SobelEroded.png"); CvInvoke.AdaptiveThreshold(img3, img3, 255, AdaptiveThresholdType.MeanC, ThresholdType.Binary, ContrastAperture, -Contrast); //img3.Save("C:/dnn/Adaptive.png"); CvInvoke.BitwiseOr(imgCanny, img3, img3); img3.CopyTo(img2); LineSegment2D[] lines = CvInvoke.HoughLinesP(img2, HoughLineRho, HoughLineAngle, HoughLineThreshold, HoughLineMinLineLength, HoughLineMaxGap); //img2.SetTo(Black); for (int i = 0; i < lines.Length; i++) { CvInvoke.Line(img3, lines[i].P1, lines[i].P2, White, 1); } lines = null; img3.CopyTo(EdgeMapCenter); img1.Dispose(); img2.Dispose(); img3.Dispose(); imgCanny.Dispose(); EdgeMapCenter.Dispose(); }
/// <summary> /// Convert raw data to bitmap /// </summary> /// <param name="scan0">The pointer to the raw data</param> /// <param name="step">The step</param> /// <param name="size">The size of the image</param> /// <param name="srcColorType">The source image color type</param> /// <param name="numberOfChannels">The number of channels</param> /// <param name="srcDepthType">The source image depth type</param> /// <param name="tryDataSharing">Try to create Bitmap that shares the data with the image</param> /// <returns>The Bitmap</returns> public static Bitmap RawDataToBitmap(IntPtr scan0, int step, Size size, Type srcColorType, int numberOfChannels, Type srcDepthType, bool tryDataSharing = false) { if (tryDataSharing) { if (srcColorType == typeof(Gray) && srcDepthType == typeof(Byte)) { //Grayscale of Bytes Bitmap bmpGray = new Bitmap( size.Width, size.Height, step, PixelFormat.Format8bppIndexed, scan0 ) { Palette = GrayscalePalette }; return(bmpGray); } // Mono in Linux doesn't support scan0 constructor with Format24bppRgb, use ToBitmap instead // See https://bugzilla.novell.com/show_bug.cgi?id=363431 // TODO: check mono buzilla Bug 363431 to see when it will be fixed if ( Platform.OperationSystem == Platform.OS.Windows && Platform.ClrType == Platform.Clr.DotNet && srcColorType == typeof(Bgr) && srcDepthType == typeof(Byte) && (step & 3) == 0) { //Bgr byte return(new Bitmap( size.Width, size.Height, step, PixelFormat.Format24bppRgb, scan0)); } if (srcColorType == typeof(Bgra) && srcDepthType == typeof(Byte)) { //Bgra byte return(new Bitmap( size.Width, size.Height, step, PixelFormat.Format32bppArgb, scan0)); } //PixelFormat.Format16bppGrayScale is not supported in .NET //else if (typeof(TColor) == typeof(Gray) && typeof(TDepth) == typeof(UInt16)) //{ // return new Bitmap( // size.width, // size.height, // step, // PixelFormat.Format16bppGrayScale; // scan0); //} } PixelFormat format; //= System.Drawing.Imaging.PixelFormat.Undefined; if (srcColorType == typeof(Gray)) // if this is a gray scale image { format = PixelFormat.Format8bppIndexed; } else if (srcColorType == typeof(Bgra)) //if this is Bgra image { format = PixelFormat.Format32bppArgb; } else if (srcColorType == typeof(Bgr)) //if this is a Bgr Byte image { format = PixelFormat.Format24bppRgb; } else { using (Mat m = new Mat(size.Height, size.Width, CvInvoke.GetDepthType(srcDepthType), numberOfChannels, scan0, step)) using (Mat m2 = new Mat()) { CvInvoke.CvtColor(m, m2, srcColorType, typeof(Bgr)); return(RawDataToBitmap(m2.DataPointer, m2.Step, m2.Size, typeof(Bgr), 3, srcDepthType)); } } Bitmap bmp = new Bitmap(size.Width, size.Height, format); BitmapData data = bmp.LockBits( new Rectangle(Point.Empty, size), ImageLockMode.WriteOnly, format); using (Mat bmpMat = new Mat(size.Height, size.Width, DepthType.Cv8U, numberOfChannels, data.Scan0, data.Stride)) using (Mat dataMat = new Mat(size.Height, size.Width, CvInvoke.GetDepthType(srcDepthType), numberOfChannels, scan0, step)) { if (srcDepthType == typeof(Byte)) { dataMat.CopyTo(bmpMat); } else { double scale = 1.0, shift = 0.0; RangeF range = dataMat.GetValueRange(); if (range.Max > 255.0 || range.Min < 0) { scale = range.Max.Equals(range.Min) ? 0.0 : 255.0 / (range.Max - range.Min); shift = scale.Equals(0) ? range.Min : -range.Min * scale; } CvInvoke.ConvertScaleAbs(dataMat, bmpMat, scale, shift); } } bmp.UnlockBits(data); if (format == PixelFormat.Format8bppIndexed) { bmp.Palette = GrayscalePalette; } return(bmp); }
public void generateChannels(Image <Bgr, byte> img_original, string image_name, string destination_folder) { Directory.CreateDirectory(destination_folder); //string destination_folder = tbDestination.Text; img_original = img_original.SmoothGaussian(3); //smooth gaussian Image <Luv, byte> luv; //will contain luv image to extract LUV channels luv = img_original.Convert <Luv, byte>(); //convert from bgr to luv VectorOfUMat channels = new VectorOfUMat(); //contains luv channels CvInvoke.Split(luv, channels); //split them Image <Gray, double> image_channel_L = channels[0].ToImage <Gray, double>(); //L channel image_channel_L = image_channel_L.SmoothGaussian(3); Image <Gray, double> image_channel_U = channels[1].ToImage <Gray, double>(); //U channel image_channel_U = image_channel_U.SmoothGaussian(3); Image <Gray, double> image_channel_V = channels[2].ToImage <Gray, double>(); //V channel image_channel_V = image_channel_V.SmoothGaussian(3); CvInvoke.Imwrite(@destination_folder + "__L.jpg", image_channel_L); CvInvoke.Imwrite(@destination_folder + "__U.jpg", image_channel_U); CvInvoke.Imwrite(@destination_folder + "__V.jpg", image_channel_V); Mat gray = new Mat(); //gray version of the original image Mat grad = new Mat(); //will contain the gradient magnitude Mat grad_x = new Mat(); //sobel x Mat grad_y = new Mat(); //sobel y Mat abs_grad_x = new Mat(); //abs Mat abs_grad_y = new Mat(); Mat angles = new Mat(); //matrix will contain the angle of every edge in grad magnitude channel CvInvoke.CvtColor(img_original, gray, Emgu.CV.CvEnum.ColorConversion.Bgr2Gray); //get gray image from bgr //channels defined below will contain the edges in different angles Image <Gray, UInt16> C1 = new Image <Gray, UInt16>(img_original.Cols, img_original.Rows); Image <Gray, UInt16> C2 = new Image <Gray, UInt16>(img_original.Cols, img_original.Rows); Image <Gray, UInt16> C3 = new Image <Gray, UInt16>(img_original.Cols, img_original.Rows); Image <Gray, UInt16> C4 = new Image <Gray, UInt16>(img_original.Cols, img_original.Rows); Image <Gray, UInt16> C5 = new Image <Gray, UInt16>(img_original.Cols, img_original.Rows); Image <Gray, UInt16> C6 = new Image <Gray, UInt16>(img_original.Cols, img_original.Rows); //apply sobel CvInvoke.Sobel(gray, grad_x, Emgu.CV.CvEnum.DepthType.Cv32F, 1, 0, 3); CvInvoke.ConvertScaleAbs(grad_x, abs_grad_x, 1, 0); CvInvoke.Sobel(gray, grad_y, Emgu.CV.CvEnum.DepthType.Cv32F, 0, 1, 3); CvInvoke.ConvertScaleAbs(grad_y, abs_grad_y, 1, 0); CvInvoke.AddWeighted(abs_grad_x, 0.5, abs_grad_y, 0.5, 0, grad); Image <Gray, UInt16> img_gradient = grad.ToImage <Gray, UInt16>(); //will store gradient magnitude as an image img_gradient = normalize(img_gradient); CvInvoke.Imwrite(@destination_folder + "__G.jpg", img_gradient); Emgu.CV.Cuda.CudaInvoke.Phase(grad_x, grad_y, angles, true); //get angles Image <Gray, double> img_angles = angles.ToImage <Gray, double>(); //stores the angles as a gray image //loop through angles for (int i = 0; i < img_angles.Height; i++) { for (int j = 0; j < img_angles.Width; j++) { double current_angle = img_angles.Data[i, j, 0]; //current angle value in degrees if (current_angle > 180) //if greater than 180 { img_angles.Data[i, j, 0] = (double)(img_angles.Data[i, j, 0] - 180); //fix it } current_angle = img_angles.Data[i, j, 0]; //update current value //according to the value of the angle, add it to the corresponding channel if (current_angle >= 0 && current_angle <= 30) { addEdgeToChannel(i, j, img_gradient.Data[i, j, 0], C1); } else if (current_angle > 30 && current_angle <= 60) { addEdgeToChannel(i, j, img_gradient.Data[i, j, 0], C2); } else if (current_angle > 60 && current_angle <= 90) { addEdgeToChannel(i, j, img_gradient.Data[i, j, 0], C3); } else if (current_angle > 90 && current_angle <= 120) { addEdgeToChannel(i, j, img_gradient.Data[i, j, 0], C4); } else if (current_angle > 120 && current_angle <= 150) { addEdgeToChannel(i, j, img_gradient.Data[i, j, 0], C5); } else if (current_angle > 150 && current_angle <= 180) { addEdgeToChannel(i, j, img_gradient.Data[i, j, 0], C6); } } } //smooth channels C1 = C1.SmoothGaussian(3); C2 = C2.SmoothGaussian(3); C3 = C3.SmoothGaussian(3); C4 = C4.SmoothGaussian(3); C5 = C5.SmoothGaussian(3); C6 = C6.SmoothGaussian(3); CvInvoke.Imwrite(@destination_folder + "__C1.jpg", C1); CvInvoke.Imwrite(@destination_folder + "__C2.jpg", C2); CvInvoke.Imwrite(@destination_folder + "__C3.jpg", C3); CvInvoke.Imwrite(@destination_folder + "__C4.jpg", C4); CvInvoke.Imwrite(@destination_folder + "__C5.jpg", C5); CvInvoke.Imwrite(@destination_folder + "__C6.jpg", C6); }
public static void GetBarcodeFromImageEmgu(string fname, out string format, out string code) { Image <Bgr, Byte> orgimage = new Image <Bgr, byte>(fname); double scaleFactor = 1; if (orgimage.Height > 2048) { scaleFactor = 2048 / (double)orgimage.Height; } Image <Bgr, Byte> image = new Image <Bgr, byte>((int)(orgimage.Width * scaleFactor), (int)(orgimage.Height * scaleFactor)); //image = cv2.resize(image, (0, 0), fx = scaleFactor, fy = scaleFactor, interpolation = cv2.INTER_AREA) CvInvoke.Resize(orgimage, image, new Size(0, 0), scaleFactor, scaleFactor, Inter.Area); orgimage.Dispose(); UMat gray = new UMat(); CvInvoke.CvtColor(image, gray, ColorConversion.Bgr2Gray); /* * gradX = cv2.Sobel(gray, ddepth = cv2.cv.CV_32F, dx = 1, dy = 0, ksize = -1) * gradY = cv2.Sobel(gray, ddepth = cv2.cv.CV_32F, dx = 0, dy = 1, ksize = -1) */ UMat gradX = new UMat(); UMat gradY = new UMat(); CvInvoke.Sobel(gray, gradX, DepthType.Cv8U, 1, 0, -1); CvInvoke.Sobel(gray, gradY, DepthType.Cv8U, 0, 1, -1); gray.Dispose(); //pictureBox1.Image = gradY.Bitmap; /* # subtract the y-gradient from the x-gradient # gradient = cv2.subtract(gradX, gradY) # gradient = cv2.convertScaleAbs(gradient) */ UMat gradient = new UMat(); CvInvoke.Subtract(gradX, gradY, gradient); CvInvoke.ConvertScaleAbs(gradient, gradient, 1, 0); gradX.Dispose(); gradY.Dispose(); //pictureBox1.Image = gradient.Bitmap; /* # blur and threshold the image # blurred = cv2.blur(gradient, (9, 9)) # (_, thresh) = cv2.threshold(blurred, 225, 255, cv2.THRESH_BINARY) */ UMat blurred = new UMat(); UMat thresh = new UMat(); CvInvoke.Blur(gradient, blurred, new Size(9, 9), new Point(-1, -1)); CvInvoke.Threshold(blurred, thresh, 88, 255, ThresholdType.Binary); //pictureBox1.Image= thresh.Bitmap; //return; /* # construct a closing kernel and apply it to the thresholded image # kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (21, 7)) # closed = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel) */ UMat closed = new UMat(); var kernel = CvInvoke.GetStructuringElement(ElementShape.Rectangle, new Size(21, 7), new Point(-1, -1)); CvInvoke.MorphologyEx(thresh, closed, MorphOp.Close, kernel, new Point(-1, -1), 1, BorderType.Constant, CvInvoke.MorphologyDefaultBorderValue); blurred.Dispose(); thresh.Dispose(); //pictureBox1.Image= closed.Bitmap; //return; /* # perform a series of erosions and dilations # closed = cv2.erode(closed, None, iterations = 4) # closed = cv2.dilate(closed, None, iterations = 4) */ UMat eroded = new UMat(); UMat dilated = new UMat(); CvInvoke.Erode(closed, eroded, null, new Point(-1, -1), 4, BorderType.Constant, CvInvoke.MorphologyDefaultBorderValue); CvInvoke.Dilate(eroded, dilated, null, new Point(-1, -1), 4, BorderType.Constant, CvInvoke.MorphologyDefaultBorderValue); //pictureBox1.Image= dilated.Bitmap; //return; /* * (cnts, _) = cv2.findContours(closed.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) * c = sorted(cnts, key = cv2.contourArea, reverse = True)[0] */ VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint(); CvInvoke.FindContours(dilated, contours, null, RetrType.External, ChainApproxMethod.ChainApproxSimple); eroded.Dispose(); dilated.Dispose(); double largest_area = 0; int largest_contour_index = -1; for (int i = 0; i < contours.Size; i++) { var rect = CvInvoke.MinAreaRect(contours[i]); PointF[] points = rect.GetVertices(); Rectangle BBox = GetBoundingBox(points); //Get largest bounding boxes that has width>height if (BBox.Width > BBox.Height) { double a = CvInvoke.ContourArea(contours[i], false); if (a > largest_area) { largest_area = a; largest_contour_index = i; } } } //PointF[] points = rect.GetVertices(); var ROIrect = CvInvoke.MinAreaRect(contours[largest_contour_index]); PointF[] ROIpoints = ROIrect.GetVertices(); Rectangle ROIBBox = GetBoundingBox(ROIpoints); var extraWidth = (int)(ROIBBox.Width * 0.2); var extraHeight = (int)(ROIBBox.Height * 0.2); ROIBBox.X -= extraWidth; ROIBBox.Y -= extraHeight; ROIBBox.Width += extraWidth * 2; ROIBBox.Height += extraHeight * 2; Bitmap ROIbmp = new Bitmap(ROIBBox.Width, ROIBBox.Height); Graphics g = Graphics.FromImage(ROIbmp); g.DrawImage(image.ToBitmap(), 0, 0, ROIBBox, GraphicsUnit.Pixel); IBarcodeReader reader = new BarcodeReader(); var result = reader.Decode(ROIbmp); // do something with the result if (result != null) { format = result.BarcodeFormat.ToString(); code = result.Text; } else { format = ""; code = ""; } //ROIbmp.Dispose(); contours.Dispose(); image.Dispose(); }