public Mat Ball_only(Mat a) { Mat bwmat = new Mat(); CvInvoke.CvtColor(a, bwmat, Emgu.CV.CvEnum.ColorConversion.Bgr2Hsv); CvInvoke.InRange(bwmat, new ScalarArray(new MCvScalar((double)Global.colors.B1, (double)Global.colors.G1, (double)Global.colors.R1)), new ScalarArray(new MCvScalar((double)Global.colors.B2, (double)Global.colors.G2, (double)Global.colors.R2)), bwmat); CvInvoke.MedianBlur(bwmat, bwmat, 7); CvInvoke.Dilate(bwmat, bwmat, s, new Point(-1, -1), 1, Emgu.CV.CvEnum.BorderType.Default, sk); CvInvoke.Erode(bwmat, bwmat, s, new Point(-1, -1), 1, Emgu.CV.CvEnum.BorderType.Default, sk); CvInvoke.Blur(bwmat, bwmat, new Size(10, 10), new Point(-1, -1), Emgu.CV.CvEnum.BorderType.Default); CvInvoke.Threshold(bwmat, bwmat, 20, 255, Emgu.CV.CvEnum.ThresholdType.Binary); CvInvoke.Blur(bwmat, bwmat, new Size(10, 10), new Point(-1, -1), Emgu.CV.CvEnum.BorderType.Default); CvInvoke.Threshold(bwmat, bwmat, 20, 255, Emgu.CV.CvEnum.ThresholdType.Binary); CvInvoke.Blur(bwmat, bwmat, new Size(10, 10), new Point(-1, -1), Emgu.CV.CvEnum.BorderType.Default); CvInvoke.Threshold(bwmat, bwmat, 20, 255, Emgu.CV.CvEnum.ThresholdType.Binary); CvInvoke.Blur(bwmat, bwmat, new Size(10, 10), new Point(-1, -1), Emgu.CV.CvEnum.BorderType.Default); CvInvoke.Threshold(bwmat, bwmat, 20, 255, Emgu.CV.CvEnum.ThresholdType.Binary); CvInvoke.Blur(bwmat, bwmat, new Size(10, 10), new Point(-1, -1), Emgu.CV.CvEnum.BorderType.Default); CvInvoke.Threshold(bwmat, bwmat, 20, 255, Emgu.CV.CvEnum.ThresholdType.Binary); CvInvoke.Blur(bwmat, bwmat, new Size(10, 10), new Point(-1, -1), Emgu.CV.CvEnum.BorderType.Default); CvInvoke.Threshold(bwmat, bwmat, 20, 255, Emgu.CV.CvEnum.ThresholdType.Binary); CvInvoke.Blur(bwmat, bwmat, new Size(10, 10), new Point(-1, -1), Emgu.CV.CvEnum.BorderType.Default); CvInvoke.Threshold(bwmat, bwmat, 20, 255, Emgu.CV.CvEnum.ThresholdType.Binary); return(bwmat); }
//阈值化处理后图像的边缘光滑处理 //size表示取均值的窗口大小,threshold表示对均值图像进行二值化的阈值 private static void imageblur(Mat roi_threshold, Mat roi_blur, Size blur_size, int srcthreshold) { int height = roi_threshold.Rows; int width = roi_threshold.Cols; //CvInvoke.Blur(roi_threshold,roi_blur,blur_size); CvInvoke.Blur(roi_threshold, roi_blur, blur_size, new Point(-1, -1)); Image <Gray, byte> src = roi_blur.ToImage <Gray, byte>(); for (int i = 0; i < height; i++) { // Byte[] p = roi_blur.GetData(); for (int j = 0; j < width; j++) { if (src[i, j].Intensity < srcthreshold) { src.Data[i, j, 0] = 0; } else { src.Data[i, j, 0] = 255; } } } roi_blur = src.Mat; //imshow("Blur", dst); }
// Update is called once per frame void Update() { biggestContour = new VectorOfPoint(); contours = new VectorOfVectorOfPoint(); biggestContourArea = 0; Mat image, imgGray, imgHSV, imgBlur, imgMedianBlur, imgGaussianBlur; image = _webcam.QueryFrame(); CvInvoke.Flip(image, image, FlipType.Horizontal); imgBlur = image.Clone(); imgMedianBlur = image.Clone(); imgGaussianBlur = image.Clone(); CvInvoke.Blur(image, imgBlur, new Size(sizeBlur, sizeBlur), new Point(-1, 1)); CvInvoke.GaussianBlur(image, imgGaussianBlur, new Size(sizeBlur, sizeBlur), sizeBlur / 2.0); CvInvoke.MedianBlur(image, imgMedianBlur, sizeBlur); imgGray = image.Clone(); CvInvoke.CvtColor(image, imgGray, ColorConversion.Bgr2Gray); imgHSV = image.Clone(); CvInvoke.CvtColor(image, imgHSV, ColorConversion.Rgb2Hsv); Image <Hsv, byte> ImgHSV = imgHSV.ToImage <Hsv, byte>(); Image <Gray, byte> hsv = ImgHSV.InRange(new Hsv(60 - intensity, sMin, vMin), new Hsv(60 + intensity, sMax, vMax)); CvInvoke.Imshow("HSV", hsv); Image <Gray, byte> dilate = hsv.Clone(); Mat structuringElement = CvInvoke.GetStructuringElement(ElementShape.Ellipse, new Size(2 * sizeStruct + 1, 2 * sizeStruct + 1), new Point(sizeStruct, sizeStruct)); CvInvoke.Dilate(hsv, dilate, structuringElement, new Point(-1, -1), nbOfIter, BorderType.Constant, new MCvScalar(0)); CvInvoke.Erode(dilate, dilate, structuringElement, new Point(-1, -1), nbOfIter, BorderType.Constant, new MCvScalar(0)); Mat hierarchy = new Mat(); CvInvoke.FindContours(dilate, contours, hierarchy, RetrType.List, ChainApproxMethod.ChainApproxNone); for (int i = 0; i < contours.Size; i++) { if (CvInvoke.ContourArea(contours[i]) > biggestContourArea) { biggestContour = contours[i]; biggestContourIndex = i; biggestContourArea = CvInvoke.ContourArea(contours[i]); } } var moments = CvInvoke.Moments(biggestContour); int cx = (int)(moments.M10 / moments.M00); int cy = (int)(moments.M01 / moments.M00); Point centroid = new Point(cx, cy); CvInvoke.Circle(image, centroid, 2, new MCvScalar(0, 0, 255), 2); CvInvoke.DrawContours(image, contours, biggestContourIndex, new MCvScalar(0, 0, 255), 5); CvInvoke.Imshow("DILATATION", dilate); CvInvoke.Imshow("Webcam View", image); CvInvoke.WaitKey(24); }
private ImageData Operation(ImageForm_Service service, List <int> args) { if (args == null) { return(null); } if (args.Count < 4) { return(null); } Size k = new Size(args[0], args[1]); Point anchor = new Point(args[2], args[3]); try { Image <Bgra, byte> image = new Image <Bgra, byte>(service.data.LastData().Bitmap); //Image<Gray, byte> gray = image.Convert<Gray, byte>(); //Image<Gray, byte> blur = new Image<Gray, byte>(gray.Width, gray.Height, new Gray(0)); //CvInvoke.Blur(gray, blur, k, anchor); Image <Bgra, byte> blur = new Image <Bgra, byte>(image.Width, image.Height); CvInvoke.Blur(image, blur, k, anchor); return(new ImageData(blur.Bitmap, service.data.LastData().ID)); } catch { return(null); } }
public static RotatedRect[] detectBarcodes(Bitmap inputImage) { RotatedRect[] toReturn = new RotatedRect[2]; Image <Gray, Byte> grayImage = new Image <Bgr, byte>(inputImage).Convert <Gray, Byte>(); CvInvoke.Threshold(grayImage, grayImage, 120, 1000, Emgu.CV.CvEnum.ThresholdType.BinaryInv); CvInvoke.Blur(grayImage, grayImage, new Size(27, 9), new Point(-1, -1)); Mat kernel = CvInvoke.GetStructuringElement(Emgu.CV.CvEnum.ElementShape.Rectangle, new Size(21, 7), new Point(-1, -1)); CvInvoke.MorphologyEx(grayImage, grayImage, Emgu.CV.CvEnum.MorphOp.Close, kernel, new Point(-1, -1), 1, Emgu.CV.CvEnum.BorderType.Default, new MCvScalar(1)); CvInvoke.Erode(grayImage, grayImage, null, new Point(-1, -1), 40, Emgu.CV.CvEnum.BorderType.Default, new MCvScalar(1)); CvInvoke.Dilate(grayImage, grayImage, null, new Point(-1, -1), 40, Emgu.CV.CvEnum.BorderType.Default, new MCvScalar(1)); grayImage.ToBitmap().Save("backscatter.png"); VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint(); CvInvoke.FindContours(grayImage, contours, null, Emgu.CV.CvEnum.RetrType.List, Emgu.CV.CvEnum.ChainApproxMethod.ChainApproxSimple); Graphics boxGraphics = Graphics.FromImage(inputImage); toReturn[0] = CvInvoke.MinAreaRect(contours[0]); toReturn[1] = CvInvoke.MinAreaRect(contours[1]); Debug.WriteLine("Barcode Size: " + contours.Size); return(toReturn); }
//图片边缘光滑处理 //size表示取均值的窗口大小,threshold表示对均值图像进行二值化的阈值 private static void imageblur(Mat roi_threshold, Mat roi_blur, Size blur_size, int srcthreshold) { int height = roi_threshold.Rows; int width = roi_threshold.Cols; //CvInvoke.Blur(roi_threshold,roi_blur,blur_size); CvInvoke.Blur(roi_threshold, roi_blur, blur_size, new Point(-1, -1)); for (int i = 0; i < height; i++) { Byte[] p = roi_blur.GetData(); for (int j = 0; j < width; j++) { if (p[j] < srcthreshold) { p[j] = 0; } else { p[j] = 255; } } } //imshow("Blur", dst); }
/// <summary> /// Tries to find the line in the image and returns a group of lines at the edges of the detected line. /// </summary> /// <param name="img"></param> /// <returns></returns> private LineSegment2D[] filterLines(Image <Bgr, Byte> img) { if (!Directory.Exists(_path)) { System.IO.Directory.CreateDirectory(_path); } Image <Hsv, Byte> hsvImage = img.Convert <Hsv, Byte>(); Image <Gray, Byte>[] channels = hsvImage.Split(); Image <Gray, byte> grayImg = channels[2].InRange(new Gray(0), new Gray(30)); //Decrease noise in the images CvInvoke.Dilate(grayImg, grayImg, null, new Point(), 1, BorderType.Default, default(MCvScalar)); CvInvoke.Erode(grayImg, grayImg, null, new Point(), 2, BorderType.Default, default(MCvScalar)); CvInvoke.Blur(grayImg, grayImg, new Size(20, 20), new Point()); LineSegment2D[] lines = grayImg.HoughLines(0, 0, 5, 5, 10, 7, 1)[0]; //Show lines in image for logging and testing purposes img = grayImg.Convert <Bgr, Byte>(); foreach (LineSegment2D line in lines) { img.Draw(line, new Bgr(Color.Green), 3); } img.Save(_path + "OutputImage" + _count + ".png"); return(lines); }
Mat preprocess(Mat momel) { Mat squirrel = new Mat(); CvInvoke.CvtColor(momel, squirrel, ColorConversion.Bgr2Bgra); CvInvoke.Blur(squirrel, squirrel, new Size(3, 3), new Point(-1, 1)); return(squirrel); }
private void buttonBlurOpenCV_Click(object sender, EventArgs e) { if (blurFactor < 3) { blurFactor = 3; } CvInvoke.Blur(imageOriginal, imageConverted, new Size(blurFactor, blurFactor), new Point(0, 0), BorderType.Default); imageBoxShapen.Image = imageConverted; }
//convert image in black and white Image <Gray, byte> Convert(Vector3 seuilb, Vector3 seuilh) { CvInvoke.CvtColor(imageMat, imageHSV, ColorConversion.Bgr2Hsv); CvInvoke.Blur(imageHSV, imageHSV, new Size(4, 4), new Point(1, 1)); seuilbasHsv = new Hsv(seuilb.x, seuilb.y, seuilb.z); seuilhautHsv = new Hsv(seuilh.x, seuilh.y, seuilh.z); Image <Hsv, byte> imgConverti = imageHSV.ToImage <Hsv, byte>(); Image <Gray, byte> imgseuil = imgConverti.InRange(seuilbasHsv, seuilhautHsv); return(imgseuil); }
private void MeanToolStripMenuItem_Click(object sender, EventArgs e) { if (!src.IsEmpty) { FilterSize fs = new FilterSize(); fs.ShowDialog(); dst = new Mat(); dst = src.Clone(); CvInvoke.Blur(src, dst, new Size(par, par), new Point(-1, -1)); imageBox1.Image = dst; } }
private void button3_Click(object sender, EventArgs e) { Image <Bgr, byte> dst = src.CopyBlank(); CvInvoke.Blur(src, dst, new Size(g_nBlurValue, g_nBlurValue), new Point(-1, -1)); //第一个参数,InputArray类型的src,输入图像,即源图像,填Mat类的对象即可。该函数对通道是独立处理的,且可以处理任意通道数的图片,但需要注意,待处理的图片深度应该为CV_8U, CV_16U, CV_16S, CV_32F 以及 CV_64F之一。 //第二个参数,OutputArray类型的dst,即目标图像,需要和源图片有一样的尺寸和类型。比如可以用Mat::Clone,以源图片为模板,来初始化得到如假包换的目标图。 //第三个参数,Size类型的ksize内核的大小。一般这样写Size(w, h)来表示内核的大小(其中,w 为像素宽度, h为像素高度)。Size(3,3)就表示3x3的核大小,Size(5,5)就表示5x5的核大小 //第四个参数,Point类型的anchor,表示锚点(即被平滑的那个点),注意他有默认值Point(-1, -1)。如果这个点坐标是负值的话,就表示取核的中心为锚点,所以默认值Point(-1, -1)表示这个锚点在核的中心。 //第五个参数,int类型的borderType,用于推断图像外部像素的某种边界模式。有默认值BORDER_DEFAULT,我们一般不去管它。 imageBox2.Image = dst; }
private void ResetBackground(IImage background) { Log.Info("(Re)initializing background"); var blurredBackground = new Image <Gray, byte>(background.Size); CvInvoke.Blur(background, blurredBackground, new Size(10, 10), new Point(-1, -1)); blurredBackground.Save($@"C:\Thermobox\background{++_backgroundIndex}.jpg"); _backgroundMean = CvInvoke.Mean(blurredBackground).V0; _lastBackgroundReset = _timeProvider.Now; _noBoundingBox = null; _resetBackground = null; _foundNothingCount = 0; }
private void button7_Click(object sender, EventArgs e) { if (sourceImg == null) { MessageBox.Show("请选择图片!"); return; } Image <Bgr, Byte> blurImg = new Image <Bgr, byte>(imageBox1.Image.Bitmap); //blurImg = blurImg.SmoothBlur(blurImg.Width, blurImg.Height); Point anchor = new Point(-1, -1); int size = trackBar2.Value * 2 + 3; CvInvoke.Blur(blurImg, blurImg, new Size(size, size), anchor); imageBox3.Image = blurImg; label3.Text = "均值滤波"; }
private void FrmBlurAverage_PassValuesEvent(object sender, FunctionArgs.BlurAverageArgs e) { Size ksize = new Size(e.KernelSize, e.KernelSize); Point anchor = new Point(-1, -1); //ToDo: 添加BorderType的选项(问题点:BoderType.Constant) BorderType borderType = BorderType.Default; switch (e.BlurType) { case FilterType.Average: CvInvoke.Blur(mCurrentImage, mTempImage, ksize, anchor, borderType); break; case FilterType.Box: CvInvoke.BoxFilter(mCurrentImage, mTempImage, DepthType.Default, ksize, anchor, e.Normalize, borderType); break; case FilterType.Gaussian: //ToDo: 添加SigmaX的选项 CvInvoke.GaussianBlur(mCurrentImage, mTempImage, ksize, e.SigmaX, 0, borderType); break; case FilterType.Median: CvInvoke.MedianBlur(mCurrentImage, mTempImage, e.KernelSize); break; case FilterType.Bilateral: //ToDo: 双边滤波的选项 //CvInvoke.BilateralFilter(mCurrentImage,mTempImage) break; default: break; } //没有启用预览,恢复当前的图 if (!e.PreviewEnabled) { mFrmMainImage.SetImageSource(mCurrentImage); } else { mFrmMainImage.SetImageSource(mTempImage); } }
private void button2_Click(object sender, EventArgs e) { double Thershold1 = Convert.ToDouble(numericUpDown1.Value); double Thershold2 = Convert.ToDouble(numericUpDown2.Value); Image <Gray, byte> dst_gray = src.Convert <Gray, byte>(); // 先使用 3x3内核来降噪 CvInvoke.Blur(dst_gray, dst_gray, new Size(3, 3), new Point(1, 1)); CvInvoke.Canny(dst_gray, dst_gray, Thershold1, Thershold2); //第一个参数,InputArray类型的image,输入图像,即源图像,填Mat类的对象即可,且需为单通道8位图像。 //第二个参数,OutputArray类型的edges,输出的边缘图,需要和源图片有一样的尺寸和类型。 //第三个参数,double类型的threshold1,第一个滞后性阈值。 //第四个参数,double类型的threshold2,第二个滞后性阈值。 //第五个参数,int类型的apertureSize,表示应用Sobel算子的孔径大小,其有默认值3。 //第六个参数,bool类型的L2gradient,一个计算图像梯度幅值的标识,有默认值false。 imageBox2.Image = dst_gray; }
public static byte[] Canny(IntPtr buffer, int width, int height, bool smooth = false) { unsafe { Mat source = new Mat(height, width, DepthType.Cv8U, 1, buffer, width); Mat blurred = new Mat(height, width, DepthType.Cv8U, 1); if (smooth) { CvInvoke.Blur(source, blurred, new Size(3, 3), new Point(-1, -1)); } Mat cannyEdges = new Mat(height, width, DepthType.Cv8U, 1); double cannyThreshold = 180.0; double cannyThresholdLinking = 60.0; CvInvoke.Canny(smooth ? blurred : source, cannyEdges, cannyThreshold, cannyThresholdLinking); return(cannyEdges.GetData()); } }