/// <summary> /// Get contours from tresholded original image /// </summary> /// <param name="imgThresh">Treshold of original image</param> /// <param name="errorCode">Error code</param> /// <returns>Vector of vector of point contours</returns> private static VectorOfVectorOfPoint GetImageContours(Mat imgThresh, ref int errorCode) { try { // Initiation of the image that will be used in this method Mat imgCanny = new Mat(); // Initiate of vector of vector of point contours VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint(); // Find edges using Canny algorithm CvInvoke.Canny(imgThresh, imgCanny, 150, 50, 7); // Add all contours from canny image to contours variable CvInvoke.FindContours(imgCanny, contours, null, RetrType.Ccomp, ChainApproxMethod.ChainApproxNone); // Check if debug enabled if (Properties.Settings.Default.debug) { // Show image of contours CvInvoke.Imshow("Contours", imgCanny); } // Return the contours return(contours); } catch (Exception ex) { Console.WriteLine(ex); errorCode = 6; return(new VectorOfVectorOfPoint()); } }
private void Btn_findLine_Click(object sender, EventArgs e) { //LineSegment2D line1 = new LineSegment2D(new Point(1, 1), new Point(1, 20)); //LineSegment2D line2 = new LineSegment2D(new Point(1, 1), new Point(20, 1)); //MessageBox.Show(line1.GetExteriorAngleDegree(line2).ToString()); var bitmap = this.picSrc.GetFirstRegionRect(); var image = new Image <Bgr, byte>(bitmap); UMat grayImage = new UMat(); CvInvoke.CvtColor(image, grayImage, ColorConversion.Bgr2Gray); //使用高斯滤波去除噪声 //CvInvoke.GaussianBlur(grayImage, grayImage, new Size(5, 5), 3); //CvInvoke.Imshow("Blur Image", grayImage); UMat cannyEdges = new UMat(); CvInvoke.Canny(grayImage, cannyEdges, 100, 120); CvInvoke.Imshow("Canny Image", cannyEdges); CvInvoke.WaitKey(2); var lines = CvInvoke.HoughLinesP(cannyEdges, 1, Math.PI / 40.0, 20, 8, 3); for (int i = 0; i < lines.Length; i++) { var line = lines[i]; CvInvoke.Line(image, line.P1, line.P2, new MCvScalar(0, 0, 255)); } this.picTarget.LoadImage(image.ToBitmap()); }
private Mat getCannyImage() { Mat GrayFrame = new Mat(); Mat BlurFrame = new Mat(); Mat CannyFrame = new Mat(); CvInvoke.CvtColor(ROIFrame, GrayFrame, Emgu.CV.CvEnum.ColorConversion.Bgr2Gray); CvInvoke.MedianBlur(GrayFrame, BlurFrame, parameters.blurValue); double CannyThresh2 = parameters.cannyThreshold1; double CannyThresh1 = parameters.cannyThreshold2; if (parameters.isAdaptive) { CannyThresh2 = CvInvoke.Threshold(BlurFrame, CannyFrame, 0, 255, Emgu.CV.CvEnum.ThresholdType.Binary | Emgu.CV.CvEnum.ThresholdType.Otsu); CannyThresh1 = 0.1 * CannyThresh2; if (InvokeRequired) { Invoke(new MethodInvoker(() => { cannyThreshold1.Value = (int)CannyThresh1; cannyThreshold2.Value = (int)CannyThresh2; })); } } CvInvoke.Canny(BlurFrame, CannyFrame, CannyThresh1, CannyThresh2); return(CannyFrame); }
public VectorOfVectorOfPoint GetContours(Bitmap bitmap) { using var emguImage = bitmap.ToImage <Bgr, byte>(); // Grayscale var grayScaleImage = emguImage.Convert <Gray, byte>(); // Applying GaussianBlur //var blurredImage = grayScaleImage.SmoothGaussian(5, 5, 0, 0); var blurredImage = new Image <Gray, byte>(grayScaleImage.Size); CvInvoke.GaussianBlur(grayScaleImage, blurredImage, new Size(5, 5), 0); // Applying Canny algorithm using var cannyImage = new UMat(); CvInvoke.Canny(blurredImage, cannyImage, 50, 100); //var name = @"C:\Users\aleks\Workspace\SmartSaver\ReceiptRecognizer\assets\Result\" // + DateTime.Now.ToString("mm.ss.ff") + "canny.jpg"; //cannyImage.ToBitmap().Save(name, ImageFormat.Jpeg); // Finding contours var contours = new VectorOfVectorOfPoint(); CvInvoke.FindContours(cannyImage, contours, null, RetrType.Tree, ChainApproxMethod.ChainApproxSimple); return(contours); }
public void Can_use_indexes() { var sourceBitmap = Samples.sample13; var w = sourceBitmap.Width; var h = sourceBitmap.Height; using var src = new UMat(); var srcMat = sourceBitmap.ToMat(); srcMat.CopyTo(src); using var gray = new UMat(); using var resized = new UMat(); using var canny = new UMat(); CvInvoke.CvtColor(src, gray, ColorConversion.Bgra2Gray); CvInvoke.Resize(gray, resized, new Size(w * 2, h * 2)); CvInvoke.Canny(resized, canny, 100, 40); Run("samples/sample13.png"); canny.Save("canny.png"); Run("canny.png"); }
private void ProcessFrame(object sender, EventArgs arg) { if (_capture0 != null && _capture0.Ptr != IntPtr.Zero) { _capture0.Retrieve(_frame0, 0); CvInvoke.CvtColor(_frame0, _grayFrame, ColorConversion.Bgr2Gray); CvInvoke.PyrDown(_grayFrame, _smallGrayFrame); CvInvoke.PyrUp(_smallGrayFrame, _smoothedGrayFrame); CvInvoke.Canny(_smoothedGrayFrame, _cannyFrame, 100, 60); captureImageBox.Image = _frame0; //grayscaleImageBox.Image = _grayFrame; smoothedGrayscaleImageBox.Image = _smoothedGrayFrame; cannyImageBox.Image = _cannyFrame; ILog log = log4net.LogManager.GetLogger("visClient.Logging"); log.Info(DateTime.Now.ToString("yyyy-MM-dd HH:mm:ss.fff") + "frame0 captured"); if (flag) { vw.Write(_frame0); log.Info(DateTime.Now.ToString("yyyy-MM-dd HH:mm:ss.fff") + "video writing..."); } } }
private Mat Canny(Mat img) { Mat output = new Mat(); CvInvoke.Canny(img, output, 100, 50, 3); return(output); }
private void btnGetCanny_Click(object sender, EventArgs e) { Mat canny = new Mat(); CvInvoke.Canny(imgClone.Image, canny, 30, 200); imgClone.Image = canny; }
private void UpdateImage() { Mat displayImage = new Mat(m_image.Size, DepthType.Cv8U, 3); CvInvoke.CvtColor(m_image, displayImage, ColorConversion.Gray2Bgr); if (cbEnableCanny.IsChecked.Value) { CvInvoke.Canny(m_image, displayImage, scThresh1.Value, scThresh2.Value); } if (cbEnableCircles.IsChecked.Value) { try { var circs = CvInvoke.HoughCircles(m_image, HoughType.Gradient, 1, (int)scHoughDist.Value, (int)scThresh1.Value, (int)scThresh2.Value, (int)scHoughMin.Value, (int)scHoughMax.Value); foreach (var circle in circs) { CvInvoke.Circle(displayImage, new System.Drawing.Point((int)circle.Center.X, (int)circle.Center.Y), (int)circle.Radius, new Emgu.CV.Structure.MCvScalar(0, 0, 255), 3); } } catch (Exception exc) { Console.WriteLine("HoughCircle Exception: " + exc.Message); } } imgPreview.Source = ConvertImage.ToBitmapSource(displayImage); }
/// <summary> /// sobel边缘检测 /// </summary> /// <param name="mat"></param> /// <returns></returns> public static Mat Sobel(Mat mat) { Mat outMat = new Mat(); CvInvoke.Canny(mat, outMat, 90, 120, 3); return(outMat); }
/// <summary> /// 移除与图像边缘联通的区域 /// </summary> /// <param name="inMat">输入图片:Mat</param> /// <param name="color">填充颜色</param> /// <returns></returns> public static Mat ClearBorder(Mat inMat, Color color) { Image <Gray, byte> src = new Image <Gray, byte>(inMat.Size); src = inMat.ToImage <Gray, byte>(); Image <Gray, byte> cannyOut = new Image <Gray, byte>(src.Size); CvInvoke.Canny(src, cannyOut, 5, 5 * 3); //边缘延拓 Image <Gray, byte> mask = new Image <Gray, byte>(new Size(src.Width + 2, src.Height + 2)); CvInvoke.cvSetImageROI(mask, new Rectangle(1, 1, src.Width, src.Height)); cannyOut.CopyTo(mask); CvInvoke.cvResetImageROI(mask); Rectangle rect = new Rectangle(); CvInvoke.FloodFill( src, //1 原图像 mask, //2 掩码 new Point(1, 1), //3 种子点 new MCvScalar(color.Blue, color.Green, color.Red), //4 填充颜色值 out rect, //5 new MCvScalar(25, 25, 25), //6 new MCvScalar(0, 0, 0), //7 Connectivity.EightConnected, //8 连通性设置 FloodFillType.FixedRange //9 ); src = src - cannyOut; return(src.Mat); }
/// <summary> /// Prepare the image. /// </summary> /// <returns>The <see cref="UMat"/> instance.</returns> private UMat PrepareImage() { // Resize image. if (_image.Width > MaxSize && _image.Height > MaxSize) { _image = _image.Resize(MaxSize, MaxSize * _image.Width / _image.Height, Inter.Linear, true); } // Convert the image to gray-scale and filter out the noise. using var uimage = new UMat(); CvInvoke.CvtColor(_image, uimage, ColorConversion.Bgr2Gray); // Use image pyramid to remove noise. using var pyrDown = new UMat(); CvInvoke.PyrDown(uimage, pyrDown); CvInvoke.PyrUp(pyrDown, uimage); var cannyEdges = new UMat(); CvInvoke.Canny(uimage, cannyEdges, ThresholdMin, ThresholdMax, l2Gradient: L2Gradient); // Another way to process image, but worse. Use only one! //CvInvoke.Threshold(uimage, cannyEdges, 50.0, 100.0, ThresholdType.Binary); //CvInvoke.AdaptiveThreshold(uimage, cannyEdges, 50, AdaptiveThresholdType.MeanC, ThresholdType.Binary, 7, 1); return cannyEdges; }
public override void ImageGrabbedHandler(object sender, EventArgs e) { var frame = new Mat(); CameraCapture.Retrieve(frame); var grayFrame = new Mat(); CvInvoke.CvtColor(frame, grayFrame, ColorConversion.Bgr2Gray); var smallGrayFrame = new Mat(); CvInvoke.PyrDown(grayFrame, smallGrayFrame); var smoothedGrayFrame = new Mat(); CvInvoke.PyrUp(smallGrayFrame, smoothedGrayFrame); var cannyFrame = new Mat(); CvInvoke.Canny(smoothedGrayFrame, cannyFrame, 100, 60); imageBoxCaptured.Image = frame; imageBoxGray.Image = grayFrame; imageBoxSmoothedGray.Image = smoothedGrayFrame; imageBoxCanny.Image = cannyFrame; NotifyStatus(string.Empty); }
private void ProcessFrame(object sender, EventArgs arg) { Mat frame = new Mat(); _capture.Retrieve(frame, 0); Mat grayFrame = new Mat(); CvInvoke.CvtColor(frame, grayFrame, ColorConversion.Bgr2Gray); Mat smallGrayFrame = new Mat(); CvInvoke.PyrDown(grayFrame, smallGrayFrame); Mat smoothedGrayFrame = new Mat(); CvInvoke.PyrUp(smallGrayFrame, smoothedGrayFrame); Mat cannyFrame = new Mat(); CvInvoke.Canny(smoothedGrayFrame, cannyFrame, 100, 60); this.ImgNormalZm.Image = frame; this.ImgGrayZm.Image = grayFrame; this.ImgSmoothedGrayZm.Image = smoothedGrayFrame; this.ImgCannyZm.Image = cannyFrame; }
public static void TestPreprocess() { OpenFileDialog ofd = new OpenFileDialog(); ofd.Multiselect = false; if (!(ofd.ShowDialog() == DialogResult.OK)) { return; } //简单图像处理 Mat img = CvInvoke.Imread(ofd.FileName, ImreadModes.Unchanged); if (img.IsEmpty) { Console.WriteLine("can not load the image \n"); } CvInvoke.Imshow("Image", img); Mat grayImg = new Mat(); //转换为灰度图像 CvInvoke.CvtColor(img, grayImg, ColorConversion.Rgb2Gray); CvInvoke.Imshow("Gray Image", grayImg); //sobel Mat sobelImg = new Mat(); CvInvoke.Sobel(grayImg, sobelImg, grayImg.Depth, 1, 0); //使用canny算子查找边缘 Mat cannyImg = new Mat(); CvInvoke.Canny(grayImg, cannyImg, 20, 40); CvInvoke.Imshow("Canny Image", cannyImg); CvInvoke.WaitKey(0); }
private void ProcessFrame(object sender, EventArgs arg) { Mat frame = new Mat(); _capture.Retrieve(frame, 0); Mat grayFrame = new Mat(); CvInvoke.CvtColor(frame, grayFrame, ColorConversion.Bgr2Gray); Mat smallGrayFrame = new Mat(); CvInvoke.PyrDown(grayFrame, smallGrayFrame); Mat smoothedGrayFrame = new Mat(); CvInvoke.PyrUp(smallGrayFrame, smoothedGrayFrame); //Image<Gray, Byte> smallGrayFrame = grayFrame.PyrDown(); //Image<Gray, Byte> smoothedGrayFrame = smallGrayFrame.PyrUp(); Mat cannyFrame = new Mat(); CvInvoke.Canny(smoothedGrayFrame, cannyFrame, 100, 60); //Image<Gray, Byte> cannyFrame = smoothedGrayFrame.Canny(100, 60); captureImageBox.Image = frame; /*grayscaleImageBox.Image = grayFrame; * smoothedGrayscaleImageBox.Image = smoothedGrayFrame; * cannyImageBox.Image = cannyFrame;*/ }
private void cannyToolStripMenuItem_Click(object sender, EventArgs e) { Bitmap bm; if (!processed) { bm = (Bitmap)srcPicBox.Image; } else { bm = (Bitmap)resPicBox.Image; } Image <Bgr, Byte> img = new Image <Bgr, Byte>(bm); var uimage = new UMat(); CvInvoke.CvtColor(img, uimage, ColorConversion.Bgr2Gray); var grayimage = new Image <Gray, byte>(bm); CvInvoke.CvtColor(img, grayimage, ColorConversion.Bgr2Gray); BlackBG(grayimage); var cannyThreshold = GetKMeansThreshold(grayimage); cannyThreshold = GetKMeansThreshold(grayimage); var cannyEdges = new UMat(); CvInvoke.Canny(uimage, cannyEdges, cannyThreshold, cannyThreshold); resPicBox.Image = cannyEdges.Bitmap; processed = true; }
public VectorOfVectorOfPoint Canny(ref UMat uimage, double cannyThreshold, double cannyThresholdLinking, double rho, int thresh, double minwidth, double gap) { Stopwatch watch = Stopwatch.StartNew(); UMat cannyEdges = new UMat(); CvInvoke.Canny(uimage, cannyEdges, cannyThreshold, cannyThresholdLinking); lines = CvInvoke.HoughLinesP( cannyEdges, rho, //Distance resolution in pixel-related units Math.PI / 45, //Angle resolution measured in radians. // Math.PI / 45, //Angle resolution measured in radians. thresh, //threshold 100 minwidth, //min Line width 2 gap); //gap between lines // 20, //threshold // 30, //min Line width // 10); //gap between lines VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint(); CvInvoke.FindContours(cannyEdges, contours, null, RetrType.List, ChainApproxMethod.ChainApproxSimple); watch.Stop(); msgBuilder.Append(String.Format("Canny & Hough lines - {0} ms; ", watch.ElapsedMilliseconds)); // lineImageBox.Image = lineImage; return(contours); }
public void Setup() { var sourceBitmap = Samples.sample13; _width = sourceBitmap.Width; _height = sourceBitmap.Height; _source = new UMat(); _gray = new UMat(); _resized = new UMat(); _canny = new UMat(); _dx = new UMat(); _dy = new UMat(); var source = sourceBitmap.ToMat(); source.CopyTo(_source); CvInvoke.CvtColor(_source, _gray, ColorConversion.Bgra2Gray); CvInvoke.Resize(_gray, _resized, new Size(_width * 2, _height * 2), interpolation: Inter.Linear); CvInvoke.Sobel(_resized, _dx, DepthType.Cv16S, 1, 0); CvInvoke.Sobel(_resized, _dy, DepthType.Cv16S, 0, 1); CvInvoke.Canny(_dx, _dy, _canny, 80, 40); _mser = new MSER( minArea: 5, maxArea: 80, edgeBlurSize: 5); }
public Image <Rgba, byte> Hough(ref Image <Rgb, byte> redImg2) { // WrongHough(redImg2); Mat imageIn = redImg2.Mat;//.ImRead(filename, ImreadModes.GrayScale).Resize(new Size(800, 600)); Mat edges = new Mat(); CvInvoke.Canny(imageIn, edges, 95, 100); //HoughLinesP double theta = Math.PI / 180; LineSegment2D[] segHoughP = CvInvoke.HoughLinesP(edges, 1, theta, 100, 100, 10); Mat imageOutP = imageIn.Clone(); MCvScalar c = new MCvScalar(0); foreach (LineSegment2D s in segHoughP) { CvInvoke.Line(imageOutP, s.P1, s.P2, c, 1, Emgu.CV.CvEnum.LineType.AntiAlias, 0); } // using (new Window("Edges", WindowMode.AutoSize, edges)) // using (new Window("HoughLinesP", WindowMode.AutoSize, imageOutP)) // { // Window.WaitKey(0); // } // MessageBox.Show(hs.Count().ToString()); return(imageOutP.ToImage <Rgba, Byte>()); }
private void Canny_Click(object sender, RoutedEventArgs e) { Image <Bgr, Byte> sourceImg = new Image <Bgr, Byte>("C:\\Users\\Chase\\Desktop\\camoSample\\testSubjectOne.jpg"); Mat source = sourceImg.Mat; Mat gray = new Mat(); CvInvoke.CvtColor(source, gray, Emgu.CV.CvEnum.ColorConversion.Bgr2Gray); CvInvoke.Canny(gray, edges, 80, 250); Image <Bgr, Byte> cannyEdges = edges.ToImage <Bgr, Byte>(); CvInvoke.Imwrite("C:\\Users\\Chase\\Desktop\\camoSample\\cannyEdges.jpg", cannyEdges); MCvScalar m = new MCvScalar(0, 0, 255); lines = CvInvoke.HoughLinesP(edges, 1, Math.PI / 180, 0, 0, 0); Image <Bgr, Byte> lineImage = sourceImg.CopyBlank(); foreach (LineSegment2D line in lines) { lineImage.Draw(line, new Bgr(System.Drawing.Color.Red), 1); sourceImg.Draw(line, new Bgr(System.Drawing.Color.Red), 1); } CvInvoke.Imwrite("C:\\Users\\Chase\\Desktop\\camoSample\\cannyHoughPattern.jpg", lineImage); CvInvoke.Imwrite("C:\\Users\\Chase\\Desktop\\camoSample\\originalHoughPattern.jpg", sourceImg); BitmapImage bitmapLineImage = new BitmapImage(new Uri("C:\\Users\\Chase\\Desktop\\camoSample\\cannyHoughPattern.jpg")); imageControlFour.Source = bitmapLineImage; }
public List <String> DetectLicensePlate( IInputArray img, List <IInputOutputArray> licensePlateImagesList, List <IInputOutputArray> filteredLicensePlateImagesList, List <RotatedRect> detectedLicensePlateRegionList) { List <String> licenses = new List <String>(); using (Mat gray = new Mat()) using (Mat canny = new Mat()) using (Mat gaussian = new Mat()) using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint()) { CvInvoke.Imshow("Citra Berwarna", img); CvInvoke.CvtColor(img, gray, ColorConversion.Bgr2Gray); CvInvoke.Imshow("Citra Abu-abu", gray); CvInvoke.GaussianBlur(gray, gaussian, new Size(5, 5), 1, 0, BorderType.Default); CvInvoke.Imshow("Hasil Gaussian Filter", gaussian); CvInvoke.Canny(gaussian, canny, 240, 100, 3, false); CvInvoke.Imshow("Hasil Deteksi Tepi Canny", canny); int[,] hierachy = CvInvoke.FindContourTree(canny, contours, ChainApproxMethod.ChainApproxSimple); FindLicensePlate(contours, hierachy, 0, gray, canny, licensePlateImagesList, filteredLicensePlateImagesList, detectedLicensePlateRegionList, licenses); } return(licenses); }
public void DetectStopSign(Mat img, List <Mat> stopSignList, List <Rectangle> boxList) { Mat smoothImg = new Mat(); CvInvoke.GaussianBlur(img, smoothImg, new Size(5, 5), 1.5, 1.5); //Image<Bgr, Byte> smoothImg = img.SmoothGaussian(5, 5, 1.5, 1.5); Mat smoothedRedMask = new Mat(); GetRedPixelMask(smoothImg, smoothedRedMask); //Use Dilate followed by Erode to eliminate small gaps in some contour. CvInvoke.Dilate(smoothedRedMask, smoothedRedMask, null, new Point(-1, -1), 1, BorderType.Constant, CvInvoke.MorphologyDefaultBorderValue); CvInvoke.Erode(smoothedRedMask, smoothedRedMask, null, new Point(-1, -1), 1, BorderType.Constant, CvInvoke.MorphologyDefaultBorderValue); using (Mat canny = new Mat()) using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint()) { CvInvoke.Canny(smoothedRedMask, canny, 100, 50); int[,] hierachy = CvInvoke.FindContourTree(canny, contours, ChainApproxMethod.ChainApproxSimple); //Image<Gray, Byte> tmp = new Image<Gray, byte>(canny.Size); //CvInvoke.DrawContours(tmp, contours, -1, new MCvScalar(255, 255, 255)); //Emgu.CV.UI.ImageViewer.Show(tmp); if (hierachy.GetLength(0) > 0) { FindStopSign(img, stopSignList, boxList, contours, hierachy, 0); } } }
private void button1_Click(object sender, EventArgs e) { OpenFileDialog Openfile = new OpenFileDialog(); if (Openfile.ShowDialog() == DialogResult.OK) { Image <Bgr, Byte> My_Image = new Image <Bgr, byte>(Openfile.FileName); //Mat My_Image = CvInvoke.Imread(Openfile.FileName, Emgu.CV.CvEnum.ImreadModes.Unchanged); // 高斯滤波 CvInvoke.GaussianBlur(My_Image, My_Image, new Size(3, 3), 0, 0); CvInvoke.Imshow("GaussianBlur.", My_Image); // 转换灰度图 二值化 CvInvoke.Threshold(My_Image, My_Image, 100, 255, Emgu.CV.CvEnum.ThresholdType.Binary); CvInvoke.Imshow("Gray.", My_Image); // 腐蚀 膨胀核 Emgu.CV.Mat StructingElement = CvInvoke.GetStructuringElement(Emgu.CV.CvEnum.ElementShape.Ellipse, new Size(7, 7), new Point(2, 2)); // 核 CvInvoke.Erode(My_Image, My_Image, StructingElement, new Point(-1, -1), 10, Emgu.CV.CvEnum.BorderType.Default, new MCvScalar(0)); Image <Bgr, Byte> image1 = My_Image.Clone(); CvInvoke.Erode(My_Image, image1, StructingElement, new Point(-1, -1), 1, Emgu.CV.CvEnum.BorderType.Default, new MCvScalar(0)); image1 = My_Image - image1; CvInvoke.Imshow("GetStructuringElement.", image1); CvInvoke.Canny(image1, image1, trackBar1.Value, trackBar1.Value * 3, 3); imageBox1.Image = image1; ScanBarCodeZbar(image1.ToBitmap()); } }
private Mat CannyShapeDetection(Mat frame) { Mat returnImg = new Mat(frame.Rows, frame.Cols, frame.Depth, frame.NumberOfChannels); CvInvoke.Canny(frame, returnImg, cannyThreshold, cannyThresholdLinking); List <Triangle2DF> triangleList = new List <Triangle2DF>(); using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint()) { CvInvoke.FindContours(returnImg, contours, null, RetrType.List, ChainApproxMethod.ChainApproxSimple); int count = contours.Size; for (int i = 0; i < count; i++) { using (VectorOfPoint contour = contours[i]) using (VectorOfPoint approxContour = new VectorOfPoint()) { CvInvoke.ApproxPolyDP(contour, approxContour, CvInvoke.ArcLength(contour, true) * 0.05, true); if (CvInvoke.ContourArea(approxContour, false) > 250) { if (approxContour.Size == 3) { var pts = approxContour.ToArray(); triangleList.Add(new Triangle2DF(pts[0], pts[1], pts[2])); } } } } } foreach (var triangle in triangleList) { CvInvoke.Polylines(returnImg, Array.ConvertAll(triangle.GetVertices(), System.Drawing.Point.Round), true, new MCvScalar(255)); } return(returnImg); }
/// <summary> /// Get the Canny Image Of the Gray Image /// </summary> public Mat GetCannyImage(Func <Mat> getCannyBase, double angle, CannyParam cannyParameter) { if (cannyParameter == null) { throw new ArgumentNullException("cannyParameter"); } if (_cannyImage.image == null || _cannyImage.angle != angle || !_cannyImage.cannyParam.Equals(cannyParameter)) { if (_cannyImage.image != null) { _cannyImage.image.Dispose(); _cannyImage.image = null; } using (Mat grayImage = getCannyBase()) { using (Mat cannyImage = new Mat()) { CvInvoke.Canny(grayImage, cannyImage, threshold1: cannyParameter.Threshold1, threshold2: cannyParameter.Threshold2, apertureSize: cannyParameter.Aperture, l2Gradient: cannyParameter.L2Graident); _cannyImage.image = cannyImage.Clone(); _cannyImage.angle = angle; _cannyImage.cannyParam = cannyParameter; } } } return(_cannyImage.image.Clone()); }
void Canny() { UMat NB = new UMat(); CvInvoke.CvtColor(sourceImage, NB, ColorConversion.Bgr2Gray); CvInvoke.Canny(NB, processedImage, 50, 150); }
public List <String> DetectLicensePlate( IInputArray img, List <IInputOutputArray> licensePlateImagesList, List <IInputOutputArray> filteredLicensePlateImagesList, List <RotatedRect> detectedLicensePlateRegionList) { List <String> licenses = new List <String>(); using (Mat gray = new Mat()) using (Mat smallGray = new Mat()) using (Mat smoothedGray = new Mat()) using (Mat canny = new Mat()) using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint()) { try { CvInvoke.CvtColor(img, gray, ColorConversion.Bgr2Gray); CvInvoke.Canny(gray, canny, 100, 50, 3, false); int[,] hierachy = CvInvoke.FindContourTree(canny, contours, ChainApproxMethod.ChainApproxSimple); FindLicensePlate(contours, hierachy, 0, gray, canny, licensePlateImagesList, filteredLicensePlateImagesList, detectedLicensePlateRegionList, licenses); } catch { } } return(licenses); }
private void ProcessFrame(object sender, EventArgs arg) { if (_capture0 != null && _capture0.Ptr != IntPtr.Zero) { _capture0.Retrieve(_frame0, 0); CvInvoke.CvtColor(_frame0, _grayFrame, ColorConversion.Bgr2Gray); CvInvoke.PyrDown(_grayFrame, _smallGrayFrame); CvInvoke.PyrUp(_smallGrayFrame, _smoothedGrayFrame); CvInvoke.Canny(_smoothedGrayFrame, _cannyFrame, 100, 60); captureImageBox.Image = _frame0; //grayscaleImageBox.Image = _grayFrame; smoothedGrayscaleImageBox.Image = _smoothedGrayFrame; cannyImageBox.Image = _cannyFrame; if (flag) { vw.Write(_frame0); } } }
/// <summary> /// 从给定的图像中检测字符 /// </summary> /// <param name="img">给定图像</param> /// <param name="PlateImagesList">存储检测到的字符串区域的图像列表</param> /// <param name="filterePlateImagesList"></param> /// <param name="detectedPlateRegionList">存储字符区域列表</param> /// <returns>每个字符串的单词列表</returns> public List <String> DetectPlate( IInputArray img, List <IInputOutputArray> PlateImagesList, List <IInputOutputArray> filterePlateImagesList, List <RotatedRect> detectedPlateRegionList ) { List <String> word = new List <string>(); using (Mat gray = img.GetInputArray().GetMat()) using (Mat canny = new Mat()) using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint()) { //CvInvoke.CvtColor(img, gray, ColorConversion.Bgr2Gray); // 检测轮廓 CvInvoke.Canny(gray, canny, 100, 50, 3, false); // 寻找边缘 // 得到层级关系 //hierachy [0,i,0] 是下一个轮廓的索引 //hierachy [0,i,1] 是同一层次结构级别上的先前轮廓的索引,或者 - 如果不存在则为-1。 //hierachy [0,i,2] code > 是轮廓 i 的子项的索引或 -如果不存在则为 - 1 //hierachy [0,i,3] 是轮廓父类的索引 i 或 - 1如果不存在 int[,] hierachy = CvInvoke.FindContourTree(canny, contours, ChainApproxMethod.ChainApproxSimple); FindPlate(contours, hierachy, 0, gray, canny, PlateImagesList, filterePlateImagesList, detectedPlateRegionList, word); } return(word); }