public void FilterTiles(Mat image, Mat modifiedMat) { CvInvoke.Imshow("0", image); Stopwatch sw1 = new Stopwatch(); sw1.Start(); Mat laplaced = new Mat(); CvInvoke.CvtColor(image, laplaced, Emgu.CV.CvEnum.ColorConversion.Bgr2Gray); Mat greyResult = laplaced.Clone(); Mat greySource = laplaced.Clone(); Mat cannySrc = new Mat(); //if not half inch, do canny and subtract to separate tiles better. Basically "sharpens" the edge if (scan.TileSettings.CannyEdges) { //create canny image, these parameters could be adjusted probably? CvInvoke.Canny(greySource, greyResult, 50, 150); //dilate canny CvInvoke.Dilate(greyResult, greyResult, null, new System.Drawing.Point(1, 1), scan.TileSettings.CannyDilate, BorderType.Default, CvInvoke.MorphologyDefaultBorderValue); CvInvoke.Erode(greyResult, greyResult, null, new System.Drawing.Point(1, 1), scan.TileSettings.CannyDilate, BorderType.Default, CvInvoke.MorphologyDefaultBorderValue); CvInvoke.Imshow("1a", greyResult); //subtract dilated canny from source to get separation CvInvoke.Subtract(greySource, greyResult, greyResult); greySource = greyResult.Clone(); CvInvoke.Imshow("1b", greyResult); } if (scan.TileSettings.ThresholdEdges) { Mat edges = new Mat(); CvInvoke.Threshold(greyResult, edges, (float)thresholdTrackbar.Value, 0, ThresholdType.ToZero); CvInvoke.Subtract(greySource, edges, greyResult); CvInvoke.Erode(greyResult, greyResult, null, new System.Drawing.Point(1, 1), 2, BorderType.Default, CvInvoke.MorphologyDefaultBorderValue); CvInvoke.Imshow("pres-1c", greyResult); } //perform distance transform CvInvoke.DistanceTransform(greyResult, greyResult, null, DistType.L2, 5); //normalize the image to bring out the peaks CvInvoke.Normalize(greyResult, greyResult, 0, 1, NormType.MinMax); CvInvoke.Imshow("2", greyResult); //threshold the image, different thresholds for different tiles CvInvoke.Threshold(greyResult, greyResult, scan.TileSettings.ThresholdVal, 1, ThresholdType.Binary); CvInvoke.Imshow("3", greyResult); //erode to split the blobs CvInvoke.Erode(greyResult, greyResult, null, new System.Drawing.Point(-1, -1), scan.TileSettings.ThresholdErode, BorderType.Default, CvInvoke.MorphologyDefaultBorderValue); //convert to 8 bit unsigned needed for canny greyResult.ConvertTo(greyResult, DepthType.Cv8U); VectorOfVectorOfPoint markers = new VectorOfVectorOfPoint(); //create 32bit, single channel image for result of markers Mat markerImage = new Mat(greyResult.Size, DepthType.Cv32S, 1); //set image to 0 markerImage.SetTo(new MCvScalar(0, 0, 0)); //find the contours CvInvoke.FindContours(greyResult, markers, null, RetrType.External, ChainApproxMethod.LinkRuns); //label the markers from 1 -> n, the rest of the image should remain 0 for (int i = 0; i < markers.Size; i++) CvInvoke.DrawContours(markerImage, markers, i, new MCvScalar(i + 1, i + 1, i + 1), -1); ScalarArray mult = new ScalarArray(5000); Mat markerVisual = new Mat(); CvInvoke.Multiply(markerImage, mult, markerVisual); CvInvoke.Imshow("4", markerVisual); //draw the background marker CvInvoke.Circle(markerImage, new System.Drawing.Point(5, 5), 3, new MCvScalar(255, 255, 255), -1); //convert to 3 channel Mat convertedOriginal = new Mat(); //use canny modified if 3/4", or use the gray image for others CvInvoke.CvtColor(greySource, convertedOriginal, ColorConversion.Gray2Bgr); //watershed!! CvInvoke.Watershed(convertedOriginal, markerImage); //visualize CvInvoke.Multiply(markerImage, mult, markerVisual); CvInvoke.Imshow("5", markerVisual); //get contours to get the actual tiles now that they are separate... VectorOfVectorOfPoint tilesContours = new VectorOfVectorOfPoint(); markerVisual.ConvertTo(markerVisual, DepthType.Cv8U); CvInvoke.BitwiseNot(markerVisual, markerVisual); CvInvoke.Imshow("6", markerVisual); CvInvoke.Dilate(markerVisual, markerVisual, null, new System.Drawing.Point(1, 1), 2, BorderType.Default, CvInvoke.MorphologyDefaultBorderValue); CvInvoke.FindContours(markerVisual, tilesContours, null, RetrType.External, ChainApproxMethod.LinkRuns); List<System.Drawing.Point> tiles = new List<System.Drawing.Point>(); for (int i = 0; i < tilesContours.Size; i++) { using(VectorOfPoint c = tilesContours[i]) using (VectorOfPoint approx = new VectorOfPoint()) { //epsilon = arclength * .05 to get rid of convex areas CvInvoke.ApproxPolyDP(c, approx, CvInvoke.ArcLength(c, true) * .05, true); double area = CvInvoke.ContourArea(approx); //filter out the small contours... if (area > scan.TileSettings.MinArea && area < scan.TileSettings.MaxArea) { //match the shape to the square double ratio = CvInvoke.MatchShapes(_square, approx, Emgu.CV.CvEnum.ContoursMatchType.I3); if (ratio < .05) { var M = CvInvoke.Moments(c); int cx = (int)(M.M10 / M.M00); int cy = (int)(M.M01 / M.M00); //filter out any that are too close if (!tiles.Any(x => Math.Abs(x.X - cx) < 50 && Math.Abs(x.Y - cy) < 50)) { tiles.Add(new System.Drawing.Point(cx, cy)); for (int j = 0; j < approx.Size; j++) { int second = j+1 == approx.Size ? 0 : j + 1; //do some detection for upsidedown/right side up here.... CvInvoke.Line(image, new System.Drawing.Point(approx[j].X, approx[j].Y), new System.Drawing.Point(approx[second].X, approx[second].Y), new MCvScalar(255, 255, 255,255), 4); } } } } } } sw1.Stop(); dataTextBox.AppendText(String.Format("Took {0} ms to detect {1} tiles{2}", sw1.ElapsedMilliseconds, tiles.Count, Environment.NewLine)); // dataTextBox.AppendText(String.Format("Found {0} tiles{1}", tiles.Count, Environment.NewLine)); this.originalBox.Image = image; resultBox.Image = markerVisual; }
public StopSignDetector(IInputArray stopSignModel) { _detector = new SURF(500); using (Mat redMask = new Mat()) { GetRedPixelMask(stopSignModel, redMask); _modelKeypoints = new VectorOfKeyPoint(); _modelDescriptors = new Mat(); _detector.DetectAndCompute(redMask, null, _modelKeypoints, _modelDescriptors, false); if (_modelKeypoints.Size == 0) throw new Exception("No image feature has been found in the stop sign model"); } _modelDescriptorMatcher = new BFMatcher(DistanceType.L2); _modelDescriptorMatcher.Add(_modelDescriptors); _octagon = new VectorOfPoint( new Point[] { new Point(1, 0), new Point(2, 0), new Point(3, 1), new Point(3, 2), new Point(2, 3), new Point(1, 3), new Point(0, 2), new Point(0, 1) }); }
public static VectorOfPoint FindLargestContour(IInputOutputArray cannyEdges, IInputOutputArray result) { int largest_contour_index = 0; double largest_area = 0; VectorOfPoint largestContour; using (Mat hierachy = new Mat()) using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint()) { IOutputArray hirarchy; CvInvoke.FindContours(cannyEdges, contours, hierachy, RetrType.Tree, ChainApproxMethod.ChainApproxNone); for (int i = 0; i < contours.Size; i++) { MCvScalar color = new MCvScalar(0, 0, 255); double a = CvInvoke.ContourArea(contours[i], false); // Find the area of contour if (a > largest_area) { largest_area = a; largest_contour_index = i; //Store the index of largest contour } CvInvoke.DrawContours(result, contours, largest_contour_index, new MCvScalar(255, 0, 0)); } CvInvoke.DrawContours(result, contours, largest_contour_index, new MCvScalar(0, 0, 255), 3, LineType.EightConnected, hierachy); largestContour = new VectorOfPoint(contours[largest_contour_index].ToArray()); } return largestContour; }
private void ProcessImage(IInputOutputArray image) { Stopwatch watch = Stopwatch.StartNew(); // time the detection process List<IInputOutputArray> licensePlateImagesList = new List<IInputOutputArray>(); List<IInputOutputArray> filteredLicensePlateImagesList = new List<IInputOutputArray>(); List<RotatedRect> licenseBoxList = new List<RotatedRect>(); List<string> words = _licensePlateDetector.DetectLicensePlate( image, licensePlateImagesList, filteredLicensePlateImagesList, licenseBoxList); watch.Stop(); //stop the timer processTimeLabel.Text = String.Format("License Plate Recognition time: {0} milli-seconds", watch.Elapsed.TotalMilliseconds); panel1.Controls.Clear(); Point startPoint = new Point(10, 10); for (int i = 0; i < words.Count; i++) { Mat dest = new Mat(); CvInvoke.VConcat(licensePlateImagesList[i], filteredLicensePlateImagesList[i], dest); AddLabelAndImage( ref startPoint, String.Format("License: {0}", words[i]), dest); PointF[] verticesF = licenseBoxList[i].GetVertices(); Point[] vertices = Array.ConvertAll(verticesF, Point.Round); using(VectorOfPoint pts = new VectorOfPoint(vertices)) CvInvoke.Polylines(image, pts, true, new Bgr(Color.Red).MCvScalar,2 ); } }
/// <summary> /// Draw the planar subdivision /// </summary> /// <param name="maxValue">The points contains values between [0, maxValue)</param> /// <param name="pointCount">The total number of points</param> /// <returns>An image representing the planar subvidision of the points</returns> public static Mat Draw(float maxValue, int pointCount) { Triangle2DF[] delaunayTriangles; VoronoiFacet[] voronoiFacets; Random r = new Random((int)(DateTime.Now.Ticks & 0x0000ffff)); CreateSubdivision(maxValue, pointCount, out delaunayTriangles, out voronoiFacets); //create an image for display purpose Mat img = new Mat((int)maxValue, (int)maxValue, DepthType.Cv8U, 3); //Draw the voronoi Facets foreach (VoronoiFacet facet in voronoiFacets) { #if NETFX_CORE Point[] polyline = Extensions.ConvertAll<PointF, Point>(facet.Vertices, Point.Round); #else Point[] polyline = Array.ConvertAll<PointF, Point>(facet.Vertices, Point.Round); #endif using (VectorOfPoint vp = new VectorOfPoint(polyline)) using (VectorOfVectorOfPoint vvp = new VectorOfVectorOfPoint(vp)) { //Draw the facet in color CvInvoke.FillPoly( img, vvp, new Bgr(r.NextDouble()*120, r.NextDouble()*120, r.NextDouble()*120).MCvScalar); //highlight the edge of the facet in black CvInvoke.Polylines(img, vp, true, new Bgr(0, 0, 0).MCvScalar, 2); } //draw the points associated with each facet in red CvInvoke.Circle(img, Point.Round( facet.Point ), 5, new Bgr(0, 0, 255).MCvScalar, -1); } //Draw the Delaunay triangulation foreach (Triangle2DF triangle in delaunayTriangles) { #if NETFX_CORE Point[] vertices = Extensions.ConvertAll<PointF, Point>(triangle.GetVertices(), Point.Round); #else Point[] vertices = Array.ConvertAll<PointF, Point>(triangle.GetVertices(), Point.Round); #endif using (VectorOfPoint vp = new VectorOfPoint(vertices)) { CvInvoke.Polylines(img, vp, true, new Bgr(255, 255, 255).MCvScalar); } } return img; }
public void FindTiles(Mat image, VectorOfVectorOfPoint contours, int[,] hierachy, int hIndex) { Mat resultContours = new Mat(image.Size, image.Depth, image.NumberOfChannels); // originalBox.Image = image; //for all of the root hierarchies... for(; hIndex >= 0; hIndex = hierachy[hIndex,0]) { MCvScalar color = new MCvScalar(0, 0, 255); using (VectorOfPoint c = contours[hIndex]) using (VectorOfPoint approx = new VectorOfPoint()) { CvInvoke.ApproxPolyDP(c, approx, CvInvoke.ArcLength(c, true) * .02, true);//CvInvoke.ArcLength(c, true) * .02, true); double area = CvInvoke.ContourArea(approx); //filter out the small contours... //if (area > 20000 && area < 30000 ) //3/4" tiles if(area > 0 && area < 100000) { //match the shape to the square double ratio = CvInvoke.MatchShapes(_square, approx, Emgu.CV.CvEnum.ContoursMatchType.I3); if(ratio < .1) { CvInvoke.FillConvexPoly(resultContours, c, new MCvScalar(255), LineType.AntiAlias); var M = CvInvoke.Moments(c); int cx = (int)(M.M10 / M.M00); int cy = (int)(M.M01 / M.M00); for (int i = 0; i < approx.Size; i++) { int second = i + 1; if (second ==approx.Size) second = 0; CvInvoke.Line(resultContours, new System.Drawing.Point(approx[i].X, approx[i].Y), new System.Drawing.Point(approx[second].X, approx[second].Y), new MCvScalar(128), 10); } CvInvoke.Rectangle(resultContours, new Rectangle(new System.Drawing.Point(cx - 50, cy - 50), new Size(100, 100)), new MCvScalar(128), 2); } } } color = new MCvScalar(0, 255, 0); } resultBox.Image = resultContours; }
/// <summary> /// Get the contour that defines the blob /// </summary> /// <returns>The contour of the blob</returns> public Point[] GetContour() { using (VectorOfPoint vp = new VectorOfPoint()) { cvbCvBlobGetContour(_ptr, vp.Ptr); return vp.ToArray(); } }
public void TestConvexityDefacts() { Image<Bgr, Byte> image = new Image<Bgr, byte>(300, 300); Point[] polyline = new Point[] { new Point(10, 10), new Point(10, 250), new Point(100, 100), new Point(250, 250), new Point(250, 10)}; using (VectorOfPoint vp = new VectorOfPoint(polyline)) using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint(vp)) using (VectorOfInt convexHull = new VectorOfInt()) using (Mat convexityDefect = new Mat()) { //Draw the contour in white thick line CvInvoke.DrawContours(image, contours, -1, new MCvScalar(255, 255, 255), 3); CvInvoke.ConvexHull(vp, convexHull); CvInvoke.ConvexityDefects(vp, convexHull, convexityDefect); //convexity defect is a four channel mat, when k rows and 1 cols, where k = the number of convexity defects. if (!convexityDefect.IsEmpty) { //Data from Mat are not directly readable so we convert it to Matrix<> Matrix<int> m = new Matrix<int>(convexityDefect.Rows, convexityDefect.Cols, convexityDefect.NumberOfChannels); convexityDefect.CopyTo(m); for (int i = 0; i < m.Rows; i++) { int startIdx = m.Data[i, 0]; int endIdx = m.Data[i, 1]; Point startPoint = polyline[startIdx]; Point endPoint = polyline[endIdx]; //draw a line connecting the convexity defect start point and end point in thin red line CvInvoke.Line(image, startPoint, endPoint, new MCvScalar(0, 0, 255)); } } //Emgu.CV.UI.ImageViewer.Show(image); } }
/// <summary> /// Approximates an elliptic arc with a polyline. /// The function ellipse2Poly computes the vertices of a polyline that /// approximates the specified elliptic arc. It is used by cv::ellipse. /// </summary> /// <param name="center">Center of the arc.</param> /// <param name="axes">Half of the size of the ellipse main axes. See the ellipse for details.</param> /// <param name="angle">Rotation angle of the ellipse in degrees. See the ellipse for details.</param> /// <param name="arcStart">Starting angle of the elliptic arc in degrees.</param> /// <param name="arcEnd">Ending angle of the elliptic arc in degrees.</param> /// <param name="delta">Angle between the subsequent polyline vertices. It defines the approximation</param> /// <returns>Output vector of polyline vertices.</returns> public static Point[] Ellipse2Poly(Point center, Size axes, int angle, int arcStart, int arcEnd, int delta) { using (var vec = new VectorOfPoint()) { NativeMethods.imgproc_ellipse2Poly(center, axes, angle, arcStart, arcEnd, delta, vec.CvPtr); return vec.ToArray(); } }
public void getBlueContours(Image<Gray, Byte> src, VectorOfVectorOfPoint blueborders, List<RotatedRect> Blue_boxList, VectorOfVectorOfPoint othercontours_blue) { //blueborders = new VectorOfVectorOfPoint();//list of blue borders //Blue_boxList = new List<RotatedRect>(); //a box is a rotated rectangle //othercontours_blue = new VectorOfVectorOfPoint(); using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint()) { CvInvoke.FindContours(src, contours, null, RetrType.List, ChainApproxMethod.ChainApproxSimple); for (int i = 0; i < contours.Size; i++) { using (VectorOfPoint contour = contours[i]) using (VectorOfPoint approxContour = new VectorOfPoint()) { CvInvoke.ApproxPolyDP(contour, approxContour, CvInvoke.ArcLength(contour, true) * 0.05, true); if (CvInvoke.ContourArea(approxContour, false) > 250 && CvInvoke.BoundingRectangle(approxContour).Width * CvInvoke.BoundingRectangle(approxContour).Height > 1000) //only consider contours with area greater than 250 { if (approxContour.Size == 4) { Blue_boxList.Add(CvInvoke.MinAreaRect(approxContour)); blueborders.Push(contour); } else { othercontours_blue.Push(contour); //Point[] pts = approxContour.ToArray(); //other.Add(PointCollection.PolyLine(pts, true)); } } } } } }
public void getBlackContours(Image<Gray, Byte> src, VectorOfVectorOfPoint blackborders, List<RotatedRect> Black_boxList, VectorOfVectorOfPoint othercontours_black) { //blackborders = new VectorOfVectorOfPoint();//list of black borders //Black_boxList = new List<RotatedRect>(); //a box is a rotated rectangle //othercontours_black = new VectorOfVectorOfPoint(); Bitmap TMPGood = new Bitmap(src.ToBitmap() , src.Width, src.Height); Bitmap TMPBad = new Bitmap(src.ToBitmap(), src.Width, src.Height); Graphics gGood = Graphics.FromImage(TMPGood); Graphics gBad = Graphics.FromImage(TMPBad); //Pen RedPen = new Pen(Color.Red); //Pen GreenPen = new Pen(Color.Green); Brush RedBrush = new SolidBrush(Color.Red); Brush GreenBrush = new SolidBrush(Color.Green); using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint()) { CvInvoke.FindContours(src, contours, null, RetrType.List, ChainApproxMethod.ChainApproxSimple); for (int i = 0; i < contours.Size; i++) { using (VectorOfPoint contour = contours[i]) using (VectorOfPoint approxContour = new VectorOfPoint()) { Point[] ptsContour = contour.ToArray(); for (int k = 0; k < ptsContour.Length; k++) { gBad.FillEllipse(RedBrush, ptsContour[k].X, ptsContour[k].Y, 6, 6); } CvInvoke.ApproxPolyDP(contour, approxContour, CvInvoke.ArcLength(contour, true) * 0.05, true); if (CvInvoke.ContourArea(approxContour, false) > 250) //only consider contours with area greater than 250 { Point[] ptsApprox = approxContour.ToArray(); //TMP.Draw(pts, new Bgr(Color.DarkOrange), 5); //!!!!!!!!!!!!!!! for (int k = 0; k < ptsApprox.Length; k++) { gGood.FillEllipse(GreenBrush, ptsApprox[k].X, ptsApprox[k].Y, 6, 6); } if (CvInvoke.ContourArea(approxContour, false) > 250 && approxContour.Size == 4) { Black_boxList.Add(CvInvoke.MinAreaRect(approxContour)); blackborders.Push(contour); } else { othercontours_black.Push(contour); //Point[] pts = approxContour.ToArray(); //other.Add(PointCollection.PolyLine(pts, true)); } } } } } TMPGood.Save("C:\\Emgu\\Dump\\Black contour corners GOOD.png", System.Drawing.Imaging.ImageFormat.Png); TMPBad.Save("C:\\Emgu\\Dump\\Black contour corners BAD.png", System.Drawing.Imaging.ImageFormat.Png); }
protected override void DisposeObject() { if (_modelKeypoints != null) { _modelKeypoints.Dispose(); _modelKeypoints = null; } if (_modelDescriptors != null) { _modelDescriptors.Dispose(); _modelDescriptors = null; } if (_modelDescriptorMatcher != null) { _modelDescriptorMatcher.Dispose(); _modelDescriptorMatcher = null; } if (_octagon != null) { _octagon.Dispose(); _octagon = null; } }
private void timer1_Tick(object sender, EventArgs e) { if (time == 10) { Mat frame = new Mat(); capture.Retrieve(frame, 0); Mat grayVideo = new Mat(); CvInvoke.CvtColor(frame, grayVideo, ColorConversion.Bgr2Gray); UMat videoDescriptors = new UMat(); VectorOfKeyPoint videoKeyPoints = new VectorOfKeyPoint(); calculatedescriptors(grayVideo, videoDescriptors, videoKeyPoints); VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch(); BFMatcher matcher = new BFMatcher(DistanceType.L2); matcher.Add(originalImageDescriptors); matcher.KnnMatch(videoDescriptors, matches, 2, null); Mat mask = mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1); mask.SetTo(new MCvScalar(255)); Features2DToolbox.VoteForUniqueness(matches, 0.8, mask); Mat homography = new Mat(); int nonZeroCount = CvInvoke.CountNonZero(mask); if (nonZeroCount >= 4) { nonZeroCount = Features2DToolbox.VoteForSizeAndOrientation(originalImageKeyPoints, videoKeyPoints, matches, mask, 1.5, 20); if (nonZeroCount >= 4) homography = Features2DToolbox.GetHomographyMatrixFromMatchedFeatures(originalImageKeyPoints, videoKeyPoints, matches, mask, 2); } Mat result = new Mat(); Features2DToolbox.DrawMatches(grayImage, originalImageKeyPoints, grayVideo, videoKeyPoints, matches, result, new MCvScalar(255, 255, 255), new MCvScalar(255, 255, 255), mask); if (homography != null) { //draw a rectangle along the projected model Rectangle rect = new Rectangle(Point.Empty, grayImage.Size); PointF[] pts = new PointF[] { new PointF(rect.Left, rect.Bottom), new PointF(rect.Right, rect.Bottom), new PointF(rect.Right, rect.Top), new PointF(rect.Left, rect.Top) }; pts = CvInvoke.PerspectiveTransform(pts, homography); Point[] points = Array.ConvertAll<PointF, Point>(pts, Point.Round); using (VectorOfPoint vp = new VectorOfPoint(points)) { CvInvoke.Polylines(result, vp, true, new MCvScalar(255, 0, 0, 255), 5); } viewer.Image = result; } time = 0; } else { time++; } }
//********************************************************************************************************************************************************************************************** /// <summary> /// Push a single point to a vector of points /// </summary> /// <param name="vector">Vector to push the point to</param> /// <param name="point">Point to push to the vector</param> public static void Push(this VectorOfPoint vector, Point point) { vector.Push(new Point[] { point }); }
private void ExtractContourAndHull(Image <Gray, byte> skin) // kiem vien va lop ngoai { using (MemStorage storage = new MemStorage()) { VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint(); CvInvoke.FindContours(skin, contours, new Mat(), Emgu.CV.CvEnum.RetrType.External, Emgu.CV.CvEnum.ChainApproxMethod.ChainApproxSimple); VectorOfPoint biggestContour = new VectorOfPoint(); // mang point[] chua vien` lon nhat Double Result1 = 0; // area dang xet Result = 0; for (int i = 0; i < contours.Size; i++) { VectorOfPoint contour = contours[i]; // chuyen sang Point[][] double area = CvInvoke.ContourArea(contour, false); // tinh area Result1 = area; if (Result1 > Result) { Result = Result1; biggestContour = contour; } } label8.Text = "Size Rect :" + Result.ToString(); if (biggestContour != null) { CvInvoke.ApproxPolyDP(biggestContour, biggestContour, 0.00025, false); points = biggestContour.ToArray(); currentFrame.Draw(points, new Bgr(255, 0, 255), 4); VectorOfPoint hull = new VectorOfPoint(); VectorOfInt convexHull = new VectorOfInt(); CvInvoke.ConvexHull(biggestContour, hull, false); //~ Hull box = CvInvoke.MinAreaRect(hull); currentFrame.Draw(new CircleF(box.Center, 5), new Bgr(Color.Black), 4); CvInvoke.ConvexHull(biggestContour, convexHull); //PointF[] Vertices = box.GetVertices(); // handRect = box.MinAreaRect(); currentFrame.Draw(box, new Bgr(200, 0, 0), 1); // ve khung ban tay khung bao quanh tay currentFrame.DrawPolyline(hull.ToArray(), true, new Bgr(200, 125, 75), 4); currentFrame.Draw(new CircleF(new PointF(box.Center.X, box.Center.Y), 3), new Bgr(200, 125, 75)); // tim convex defect CvInvoke.ConvexityDefects(biggestContour, convexHull, defect); // chuyen sang Matrix if (!defect.IsEmpty) { mDefect = new Matrix <int>(defect.Rows, defect.Cols, defect.NumberOfChannels); defect.CopyTo(mDefect); } } } }
/// <summary> /// Count number of fingers on skinMask and draw debug information /// </summary> /// <param name="skinMask">Skin mask to count fingers on</param> /// <returns>Mat with detection debug information</returns> public Mat FindFingersCount(Mat skinMask) { Mat contoursImage = Mat.Ones(skinMask.Height, skinMask.Width, DepthType.Cv8U, 3); if (skinMask.IsEmpty || skinMask.NumberOfChannels != 1) { return(contoursImage); } VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint(); Mat hierarchy = new Mat(); CvInvoke.FindContours(skinMask, contours, hierarchy, RetrType.External, ChainApproxMethod.ChainApproxNone); if (contours.Size <= 0) { return(contoursImage); } int biggestContourIndex = -1; double biggestArea = 0; for (int i = 0; i < contours.Size; i++) { double area = CvInvoke.ContourArea(contours[i], false); if (area > biggestArea) { biggestArea = area; biggestContourIndex = i; } } if (biggestContourIndex < 0) { return(contoursImage); } VectorOfPoint hullPoints = new VectorOfPoint(); VectorOfInt hullInts = new VectorOfInt(); CvInvoke.ConvexHull(contours[biggestContourIndex], hullPoints, true); CvInvoke.ConvexHull(contours[biggestContourIndex], hullInts, false); Mat defects = new Mat(); if (hullInts.Size > 3) { CvInvoke.ConvexityDefects(contours[biggestContourIndex], hullInts, defects); } else { return(contoursImage); } Rectangle boundingRectangle = CvInvoke.BoundingRectangle(hullPoints); Point centerBoundingRectangle = new Point((boundingRectangle.X + boundingRectangle.Right) / 2, (boundingRectangle.Y + boundingRectangle.Bottom) / 2); VectorOfPoint startPoints = new VectorOfPoint(); VectorOfPoint farPoints = new VectorOfPoint(); int[,,] defectsData = (int[, , ])defects.GetData(); for (int i = 0; i < defectsData.Length / 4; i++) { Point startPoint = contours[biggestContourIndex][defectsData[i, 0, 0]]; if (!startPoints.ToArray().ToList().Any(p => Math.Abs(p.X - startPoint.X) < 30 && Math.Abs(p.Y - startPoint.Y) < 30)) { VectorOfPoint startPointVector = new VectorOfPoint(new Point[] { startPoint }); startPoints.Push(startPointVector); } Point farPoint = contours[biggestContourIndex][defectsData[i, 0, 2]]; if (findPointsDistance(farPoint, centerBoundingRectangle) < boundingRectangle.Height * BOUNDING_RECT_FINGER_SIZE_SCALING) { VectorOfPoint farPointVector = new VectorOfPoint(new Point[] { farPoint }); farPoints.Push(farPointVector); } } VectorOfPoint filteredStartPoints = CompactOnNeighborhoodMedian(startPoints, boundingRectangle.Height * BOUNDING_RECT_NEIGHBOR_DISTANCE_SCALING); VectorOfPoint filteredFarPoints = CompactOnNeighborhoodMedian(farPoints, boundingRectangle.Height * BOUNDING_RECT_NEIGHBOR_DISTANCE_SCALING); VectorOfPoint filteredFingerPoints = new VectorOfPoint(); if (filteredFarPoints.Size > 1) { VectorOfPoint fingerPoints = new VectorOfPoint(); for (int i = 0; i < filteredStartPoints.Size; i++) { VectorOfPoint closestPoints = findClosestOnX(filteredFarPoints, filteredStartPoints[i]); if (isFinger(closestPoints[0], filteredStartPoints[i], closestPoints[1], LIMIT_ANGLE_INF, LIMIT_ANGLE_SUP, centerBoundingRectangle, boundingRectangle.Height * BOUNDING_RECT_FINGER_SIZE_SCALING)) { fingerPoints.Push(new Point[] { filteredStartPoints[i] }); } } if (fingerPoints.Size > 0) { while (fingerPoints.Size > 5) { //Remove extra fingers //Convert to list and remove last item List <Point> points = new List <Point>(fingerPoints.ToArray()); points.Remove(points.Last()); fingerPoints = new VectorOfPoint(points.ToArray()); } for (int i = 0; i < fingerPoints.Size - 1; i++) { } filteredFingerPoints = fingerPoints; this.NumberOfFingersRaised = filteredFingerPoints.Size; } } Bgr colorRed = new Bgr(Color.Red); Bgr colorGreen = new Bgr(Color.Green); Bgr colorBlue = new Bgr(Color.Blue); Bgr colorYellow = new Bgr(Color.Yellow); Bgr colorPurple = new Bgr(Color.Purple); Bgr colorWhite = new Bgr(Color.White); //Debug, draw defects defectsData = (int[, , ])defects.GetData(); for (int i = 0; i < defectsData.Length / 4; i++) { Point start = contours[biggestContourIndex][defectsData[i, 0, 0]]; Point far = contours[biggestContourIndex][defectsData[i, 0, 2]]; Point end = contours[biggestContourIndex][defectsData[i, 0, 1]]; CvInvoke.Polylines(contoursImage, new Point[] { start, far, end }, true, colorPurple.MCvScalar, DRAW_THICKNESS / 2); CvInvoke.Circle(contoursImage, start, 5, colorWhite.MCvScalar); CvInvoke.Circle(contoursImage, far, 5, colorRed.MCvScalar, 10); CvInvoke.Circle(contoursImage, end, 5, colorBlue.MCvScalar); } //Draw information about what was detected (Contours, key points, fingers / how many fingers) CvInvoke.DrawContours(contoursImage, contours, 0, colorGreen.MCvScalar, DRAW_THICKNESS, LineType.AntiAlias); CvInvoke.Polylines(contoursImage, hullPoints, true, colorBlue.MCvScalar, DRAW_THICKNESS); CvInvoke.Rectangle(contoursImage, boundingRectangle, colorRed.MCvScalar, DRAW_THICKNESS); CvInvoke.Circle(contoursImage, centerBoundingRectangle, 5, colorYellow.MCvScalar, DRAW_THICKNESS); drawVectorPoints(contoursImage, filteredStartPoints, colorRed.MCvScalar, true, 3); drawVectorPoints(contoursImage, filteredFarPoints, colorWhite.MCvScalar, true, 3); drawVectorPoints(contoursImage, filteredFingerPoints, colorYellow.MCvScalar, false, 3); CvInvoke.PutText(contoursImage, filteredFingerPoints.Size.ToString(), centerBoundingRectangle, FontFace.HersheyComplex, 2, colorYellow.MCvScalar); return(contoursImage); }
/// <summary> /// Draw the model image and observed image, the matched features and homography projection. /// </summary> /// <param name="modelImage">The model image</param> /// <param name="observedImage">The observed image</param> /// <param name="matchTime">The output total time for computing the homography matrix.</param> /// <returns>The model image and observed image, the matched features and homography projection.</returns> public static Mat Draw(Mat modelImage, Mat observedImage, out long matchTime) { Mat homography; VectorOfKeyPoint modelKeyPoints; VectorOfKeyPoint observedKeyPoints; using (VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch()) { Mat mask; FindMatch(modelImage, observedImage, out matchTime, out modelKeyPoints, out observedKeyPoints, matches, out mask, out homography); //Draw the matched keypoints Mat result = new Mat(); Features2DToolbox.DrawMatches(modelImage, modelKeyPoints, observedImage, observedKeyPoints, matches, result, new MCvScalar(255, 255, 255), new MCvScalar(255, 255, 255), mask); #region draw the projected region on the image if (homography != null) { //draw a rectangle along the projected model Rectangle rect = new Rectangle(Point.Empty, modelImage.Size); PointF[] pts = new PointF[] { new PointF(rect.Left, rect.Bottom), new PointF(rect.Right, rect.Bottom), new PointF(rect.Right, rect.Top), new PointF(rect.Left, rect.Top) }; pts = CvInvoke.PerspectiveTransform(pts, homography); Point[] points = Array.ConvertAll<PointF, Point>(pts, Point.Round); using (VectorOfPoint vp = new VectorOfPoint(points)) { CvInvoke.Polylines(result, vp, true, new MCvScalar(255, 0, 0, 255), 5); } } #endregion return result; } }
static void Main(string[] args) { var files = Directory.GetFiles(@"C:\Users\lucas\OneDrive\Imagens\receipts"); foreach (var path in files) { var fileName = Path.GetFileName(path); Bitmap bmp = new Bitmap(path); bmp.SetResolution(50, 50); Image <Gray, Byte> GrayBmp; Image <Bgr, Byte> orig = new Image <Bgr, Byte>(bmp); double ratioX = (double)500 / (double)orig.Width; double ratioY = (double)500 / (double)orig.Height; double ratio = ratioX < ratioY ? ratioX : ratioY; int newHeight = Convert.ToInt32(orig.Height * ratio); int newWidth = Convert.ToInt32(orig.Width * ratio); orig = orig.Resize(newWidth, newHeight, Emgu.CV.CvEnum.Inter.Area); Bitmap output; GrayBmp = orig.Convert <Gray, byte>(); GrayBmp._SmoothGaussian(3); GrayBmp.Save(Path.Combine(@"C:\Users\lucas\OneDrive\Imagens\receipts\result\", DateTime.Now.ToString("dd-MM-yy-mm-hh-ss") + "__GRAY__" + fileName)); Gray grayCannyThreshold = new Gray(75); Gray grayThreshLinking = new Gray(200); var Cannybmp = GrayBmp.Canny(grayCannyThreshold.Intensity, grayThreshLinking.Intensity); output = Cannybmp.ToBitmap(); output.Save(Path.Combine(@"C:\Users\lucas\OneDrive\Imagens\receipts\result\", DateTime.Now.ToString("dd-MM-yy-mm-hh-ss") + "__CANNY__" + fileName)); var r = Cannybmp.HoughLinesBinary(2, Math.PI / 180.0, 100, 30, 3)[0]; var biggestLines = r.OrderByDescending(x => x.Length).Take(4).ToList(); var edges = PointCollection.PolyLine(biggestLines.Select(x => (PointF)x.P1).ToArray(), true); PointF[] f = new PointF[4]; //PointF[] srcs = new PointF[4]; //Trapezoid shape //srcs[0] = new PointF(1, 1); //srcs[1] = new PointF(300, 1); //srcs[2] = new PointF(400, 150); //srcs[3] = new PointF(100, 150); //PointF[] dests = new PointF[4]; // Rectangle Shape //dests[0] = new PointF(3, 3); //dests[1] = new PointF(150, 3); //dests[2] = new PointF(180, 200); //dests[3] = new PointF(150, 200); //CvInvoke.WarpPerspective //var wr = CvInvoke.GetPerspectiveTransform(srcs, dests); //Image<Gray, byte> transformed = Cannybmp.WarpPerspective<Mat>( // mapMatrix: wr, width: 400, height: 200, interpolationType: Emgu.CV.CvEnum.Inter.Cubic, //warpType: Emgu.CV.CvEnum.Warp.Default, borderType: Emgu.CV.CvEnum.BorderType.Default, backgroundColor: new Gray(5)); Cannybmp.DrawPolyline(biggestLines.Select(x => x.P1).ToArray(), true, new Gray(100)); //output.Save(Path.Combine(@"C:\Users\lucas\OneDrive\Imagens\receipts\result\", DateTime.Now.ToString("dd-MM-yy-mm-hh-ss") + "__DRAW__" + fileName)); Mat hierarchy = new Mat(); VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint(); VectorOfPoint screenCnt; CvInvoke.FindContours(Cannybmp.Copy(), contours, null, Emgu.CV.CvEnum.RetrType.List, Emgu.CV.CvEnum.ChainApproxMethod.ChainApproxSimple); //VectorOfVectorOfPoint asa = new VectorOfVectorOfPoint(); //VectorOfVectorOfPoint asfa = new VectorOfVectorOfPoint(1000); //CvInvoke.ConvertPointsToHomogeneous(contours, asfa); for (int i = 0; i < contours.Size; i++) { var peri = CvInvoke.ArcLength(contours[i], true); VectorOfPoint approxCurve = new VectorOfPoint(); CvInvoke.ApproxPolyDP(contours[i], approxCurve, 0.02 * peri, true); if (approxCurve.Size == 4) { screenCnt = approxCurve; break; } } } }
private void FindLicensePlate( VectorOfVectorOfPoint contours, int[,] hierachy, int idx, IInputArray gray, IInputArray canny, List <IInputOutputArray> licensePlateImagesList, List <IInputOutputArray> filteredLicensePlateImagesList, List <RotatedRect> detectedLicensePlateRegionList, List <String> licenses) { for (; idx >= 0; idx = hierachy[idx, 0]) { int lettersCount = GetNumberOfChildren(hierachy, idx); //if it does not contains any children (charactor), it is not a license plate region if (lettersCount == 0) { continue; } using (VectorOfPoint contour = contours[idx]) { if (CvInvoke.ContourArea(contour) > 400) { if (lettersCount < 2) { //If the contour has less than 3 children, it is not a license plate (assuming license plate has at least 3 charactor) //However we should search the children of this contour to see if any of them is a license plate FindLicensePlate(contours, hierachy, hierachy[idx, 2], gray, canny, licensePlateImagesList, filteredLicensePlateImagesList, detectedLicensePlateRegionList, licenses); continue; } RotatedRect box = CvInvoke.MinAreaRect(contour); if (box.Angle < -45.0) { float tmp = box.Size.Width; box.Size.Width = box.Size.Height; box.Size.Height = tmp; box.Angle += 90.0f; } else if (box.Angle > 45.0) { float tmp = box.Size.Width; box.Size.Width = box.Size.Height; box.Size.Height = tmp; box.Angle -= 90.0f; } using (UMat tmp1 = new UMat()) using (UMat tmp2 = new UMat()) { PointF[] srcCorners = box.GetVertices(); PointF[] destCorners = new PointF[] { new PointF(0, box.Size.Height - 1), new PointF(0, 0), new PointF(box.Size.Width - 1, 0), new PointF(box.Size.Width - 1, box.Size.Height - 1) }; using (Mat rot = CvInvoke.GetAffineTransform(srcCorners, destCorners)) { CvInvoke.WarpAffine(gray, tmp1, rot, Size.Round(box.Size)); } //resize the license plate such that the front is ~ 10-12. This size of front results in better accuracy from tesseract Size approxSize = new Size(240, 180); double scale = Math.Min(approxSize.Width / box.Size.Width, approxSize.Height / box.Size.Height); Size newSize = new Size((int)Math.Round(box.Size.Width * scale), (int)Math.Round(box.Size.Height * scale)); CvInvoke.Resize(tmp1, tmp2, newSize, 0, 0, Inter.Cubic); //removes some pixels from the edge int edgePixelSize = 3; Rectangle newRoi = new Rectangle(new Point(edgePixelSize, edgePixelSize), tmp2.Size - new Size(2 * edgePixelSize, 2 * edgePixelSize)); UMat plate = new UMat(tmp2, newRoi); ocr.SetImage(plate.Clone()); ocr.Recognize(); licenses.Add(ocr.GetUTF8Text()); licensePlateImagesList.Add(plate); filteredLicensePlateImagesList.Add(plate); detectedLicensePlateRegionList.Add(box); } } } } }
/// <summary> /// This comparison iterates over every point in "edge1" contour, finds the closest point in "edge2" contour and sums up those distances. /// The end result is the sum divided by length of the both contours. /// It also takes the difference of the distances of the contour endpoints into account. /// </summary> /// <param name="edge1">First Edge to compare to edge2</param> /// <param name="edge2">Second Edge to compare to edge1</param> /// <returns>Similarity factor of edges. Special values are: /// 300000000: Same piece /// 200000000: At least one edge is a line edge /// 150000000: The pieces have the same edge type /// 100000000: One of the contour sizes is 0</returns> public override double CompareEdges(Edge edge1, Edge edge2) { try { //Return large numbers if we know that these shapes simply wont match... if (edge1.PieceID == edge2.PieceID) { return(300000000); } if (edge1.EdgeType == EdgeTypes.LINE || edge2.EdgeType == EdgeTypes.LINE) { return(200000000); } if (edge1.EdgeType == edge2.EdgeType) { return(150000000); } if (edge1.NormalizedContour.Size == 0 || edge2.ReverseNormalizedContour.Size == 0) { return(100000000); } double cost = 0; double total_length = CvInvoke.ArcLength(edge1.NormalizedContour, false) + CvInvoke.ArcLength(edge2.ReverseNormalizedContour, false); int windowSizePoints = (int)(Math.Max(edge1.NormalizedContour.Size, edge2.ReverseNormalizedContour.Size) * EdgeCompareWindowSizePercent); if (windowSizePoints < 1) { windowSizePoints = 1; } double distEndpointsContour1 = Utils.Distance(edge1.NormalizedContour[0], edge1.NormalizedContour[edge1.NormalizedContour.Size - 1]); double distEndpointsContour2 = Utils.Distance(edge2.ReverseNormalizedContour[0], edge2.ReverseNormalizedContour[edge2.ReverseNormalizedContour.Size - 1]); double distEndpointContoursDiff = Math.Abs(distEndpointsContour1 - distEndpointsContour2); if (distEndpointContoursDiff <= EdgeCompareEndpointDiffIgnoreThreshold) { distEndpointContoursDiff = 0; } for (int i = 0; i < Math.Min(edge1.NormalizedContour.Size, edge2.ReverseNormalizedContour.Size); i++) { double min = 10000000; for (int j = Math.Max(0, i - windowSizePoints); j < Math.Min(edge2.ReverseNormalizedContour.Size, i + windowSizePoints); j++) { if (PluginFactory.CancelToken.IsCancellationRequested) { PluginFactory.CancelToken.ThrowIfCancellationRequested(); } double dist = Utils.Distance(edge1.NormalizedContour[i], edge2.ReverseNormalizedContour[j]); if (dist < min) { min = dist; } } cost += min; } double matchResult = cost / total_length; if (PluginFactory.GetGeneralSettingsPlugin().SolverShowDebugResults) { Image <Rgb, byte> contourOverlay = new Image <Rgb, byte>(500, 500); VectorOfPoint contour1 = Utils.TranslateContour(edge1.NormalizedContour, 100, 0); VectorOfPoint contour2 = Utils.TranslateContour(edge2.ReverseNormalizedContour, 100, 0); CvInvoke.DrawContours(contourOverlay, new VectorOfVectorOfPoint(contour1), -1, new MCvScalar(0, 255, 0), 2); CvInvoke.DrawContours(contourOverlay, new VectorOfVectorOfPoint(contour2), -1, new MCvScalar(0, 0, 255), 2); PluginFactory.LogHandle.Report(new LogEventImage("Compare " + edge1.PieceID + "_Edge" + edge1.EdgeNumber + " <-->" + edge2.PieceID + "_Edge" + edge2.EdgeNumber + " ==> distEndpoint = " + distEndpointContoursDiff.ToString() + ", MatchResult = " + matchResult, contourOverlay.Bitmap)); } return(distEndpointContoursDiff + matchResult); } catch (OperationCanceledException) { throw; } }
private void Btn_shapFind_Click(object sender, EventArgs e) { StringBuilder msgBuilder = new StringBuilder("Performance: "); //Load the image from file and resize it for display var bitmap = this.picSrc.GetFirstRegionRect(); Image <Bgr, Byte> img = new Image <Bgr, byte>(bitmap) .Resize(400, 400, Emgu.CV.CvEnum.Inter.Linear, true); //Convert the image to grayscale and filter out the noise UMat uimage = new UMat(); CvInvoke.CvtColor(img, uimage, ColorConversion.Bgr2Gray); CvInvoke.Imshow("Image", uimage); CvInvoke.WaitKey(2); //二值化 //UMat cannyEdges = new UMat(); CvInvoke.Threshold(uimage, uimage, 230, 255, ThresholdType.Binary); CvInvoke.Imshow("After Threshold", uimage); CvInvoke.WaitKey(2); //use image pyr to remove noise //UMat pyrDown = new UMat(); //CvInvoke.PyrDown(uimage, pyrDown); //CvInvoke.PyrUp(pyrDown, uimage); //CvInvoke.Imshow("pyrDownUp", uimage); //CvInvoke.WaitKey(2); //Image<Gray, Byte> gray = img.Convert<Gray, Byte>().PyrDown().PyrUp(); #region circle detection Stopwatch watch = Stopwatch.StartNew(); double cannyThreshold = 180.0; double circleAccumulatorThreshold = 100; CircleF[] circles = CvInvoke.HoughCircles(uimage, HoughType.Gradient, 2.0, 20.0, cannyThreshold, circleAccumulatorThreshold, 5); watch.Stop(); msgBuilder.Append(String.Format("Hough circles - {0} ms; ", watch.ElapsedMilliseconds)); #endregion #region Canny and edge detection watch.Reset(); watch.Start(); double cannyThresholdLinking = 120.0; UMat cannyEdges = new UMat(); CvInvoke.Canny(uimage, cannyEdges, cannyThreshold, cannyThresholdLinking); LineSegment2D[] lines = CvInvoke.HoughLinesP( cannyEdges, 1, //Distance resolution in pixel-related units Math.PI / 45.0, //Angle resolution measured in radians. 20, //threshold 30, //min Line width 10); //gap between lines watch.Stop(); msgBuilder.Append(String.Format("Canny & Hough lines - {0} ms; ", watch.ElapsedMilliseconds)); #endregion #region Find triangles and rectangles watch.Reset(); watch.Start(); List <Triangle2DF> triangleList = new List <Triangle2DF>(); List <RotatedRect> boxList = new List <RotatedRect>(); //a box is a rotated rectangle using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint()) { CvInvoke.FindContours(cannyEdges, contours, null, RetrType.List, ChainApproxMethod.ChainApproxSimple); int count = contours.Size; for (int i = 0; i < count; i++) { using (VectorOfPoint contour = contours[i]) using (VectorOfPoint approxContour = new VectorOfPoint()) { CvInvoke.ApproxPolyDP(contour, approxContour, CvInvoke.ArcLength(contour, true) * 0.05, true); if (CvInvoke.ContourArea(approxContour, false) > 200) //only consider contours with area greater than 250 { if (approxContour.Size == 3) //The contour has 3 vertices, it is a triangle { Point[] pts = approxContour.ToArray(); triangleList.Add(new Triangle2DF( pts[0], pts[1], pts[2] )); } else if (approxContour.Size == 4) //The contour has 4 vertices. { #region determine if all the angles in the contour are within [80, 100] degree bool isRectangle = true; Point[] pts = approxContour.ToArray(); LineSegment2D[] edges = PointCollection.PolyLine(pts, true); for (int j = 0; j < edges.Length; j++) { double angle = Math.Abs( edges[(j + 1) % edges.Length].GetExteriorAngleDegree(edges[j])); if (angle < 80 || angle > 100) { isRectangle = false; break; } } #endregion if (isRectangle) { boxList.Add(CvInvoke.MinAreaRect(approxContour)); } } } } } } watch.Stop(); msgBuilder.Append(String.Format("Triangles & Rectangles - {0} ms; ", watch.ElapsedMilliseconds)); #endregion //originalImageBox.Image = img; Console.WriteLine(msgBuilder.ToString()); #region draw triangles and rectangles Image <Bgr, Byte> triangleRectangleImage = img.CopyBlank(); foreach (Triangle2DF triangle in triangleList) { triangleRectangleImage.Draw(triangle, new Bgr(Color.DarkBlue), 2); } foreach (RotatedRect box in boxList) { triangleRectangleImage.Draw(box, new Bgr(Color.DarkOrange), 2); } this.picRect.LoadImage(triangleRectangleImage.ToBitmap()); #endregion #region draw circles Image <Bgr, Byte> circleImage = img.CopyBlank(); foreach (CircleF circle in circles) { circleImage.Draw(circle, new Bgr(Color.Brown), 2); } this.picCircle.LoadImage(circleImage.Bitmap); #endregion #region draw lines Image <Bgr, Byte> lineImage = img.CopyBlank(); foreach (LineSegment2D line in lines) { lineImage.Draw(line, new Bgr(Color.Green), 2); } this.picTarget.LoadImage(lineImage.Bitmap); #endregion }
/// <summary> /// Approximates contour or a curve using Douglas-Peucker algorithm /// </summary> /// <param name="curve">The polygon or curve to approximate.</param> /// <param name="epsilon">Specifies the approximation accuracy. /// This is the maximum distance between the original curve and its approximation.</param> /// <param name="closed">The result of the approximation; /// The type should match the type of the input curve</param> /// <returns>The result of the approximation; /// The type should match the type of the input curve</returns> public static Point[] ApproxPolyDP(IEnumerable<Point> curve, double epsilon, bool closed) { if(curve == null) throw new ArgumentNullException("curve"); Point[] curveArray = EnumerableEx.ToArray(curve); IntPtr approxCurvePtr; NativeMethods.imgproc_approxPolyDP_Point(curveArray, curveArray.Length, out approxCurvePtr, epsilon, closed ? 1 : 0); using (var approxCurveVec = new VectorOfPoint(approxCurvePtr)) { return approxCurveVec.ToArray(); } }
/// <summary> /// finds intersection of two convex polygons /// </summary> /// <param name="p1"></param> /// <param name="p2"></param> /// <param name="p12"></param> /// <param name="handleNested"></param> /// <returns></returns> public static float IntersectConvexConvex(IEnumerable<Point> p1, IEnumerable<Point> p2, out Point[] p12, bool handleNested = true) { if (p1 == null) throw new ArgumentNullException("p1"); if (p2 == null) throw new ArgumentNullException("p2"); Point[] p1Array = EnumerableEx.ToArray(p1); Point[] p2Array = EnumerableEx.ToArray(p2); IntPtr p12Ptr; float ret = NativeMethods.imgproc_intersectConvexConvex_Point(p1Array, p1Array.Length, p2Array, p2Array.Length, out p12Ptr, handleNested ? 1 : 0); using (var p12Vec = new VectorOfPoint(p12Ptr)) { p12 = p12Vec.ToArray(); } return ret; }
//searcher with contour as input private Point[] searcher(VectorOfPoint contour, Mat input_image1, Mat input_image2, ref Mat[] templ) { Mat image1 = input_image1.Clone(); Mat image2 = input_image2.Clone(); //Convert the image to grayscale and filter out the noise CvInvoke.CvtColor(image1, image1, ColorConversion.Bgr2Gray); CvInvoke.CvtColor(image2, image2, ColorConversion.Bgr2Gray); //blur Mat imageread = new Mat(); Mat pyrDown1 = new Mat(); Mat imageread2 = new Mat(); Mat pyrDown2 = new Mat(); CvInvoke.PyrDown(image1, pyrDown1); CvInvoke.PyrUp(pyrDown1, imageread); CvInvoke.PyrDown(image2, pyrDown2); CvInvoke.PyrUp(pyrDown2, imageread2); //unsharp Size ksize = new Size(3, 3); double aplha = 1.5; double beta = -0.5; double gamma = 0; CvInvoke.AddWeighted(image1, aplha, imageread, beta, gamma, imageread); CvInvoke.AddWeighted(image2, aplha, imageread2, beta, gamma, imageread2); //find corners VectorOfPoint contour2 = new VectorOfPoint(); CvInvoke.ApproxPolyDP(contour, contour2, CvInvoke.ArcLength(contour, true) * 0.05, true); //approximate the contour to each main lines Point[] pts = contour2.ToArray(); //make it array from vector LineSegment2D[] edges = PointCollection.PolyLine(pts, true); //create array of LineSegment2D for each of the main lines Point[] gwnies = new Point[edges.Length]; //where the edges of the contour are stored for (int i = 0; i < gwnies.Length; i++) //for every 2 consecutive lines calculate the intersection point with gwnia(LineSegment2D, LineSegment2D) function { if (gwnies.Length == i + 1) { gwnies[i] = gwnia(edges[i], edges[0]); } else { gwnies[i] = gwnia(edges[i], edges[i + 1]); } if (gwnies[i].X < 0) { return(null); } } // int patternWindow = 30; //the template size int searchWindow = 150; //the search window size Mat[] recs = new Mat[edges.Length]; //where the templates are stored Mat[] img = new Mat[edges.Length]; //where the search windows are stored Point[] points = new Point[edges.Length]; //where the new edges are stored for (int i = 0; i < gwnies.Length; i++) //for each corner in the starting contour, search for the most similar point in the next frame { recs[i] = new Mat(); img[i] = new Mat(); CvInvoke.GetRectSubPix(imageread, new System.Drawing.Size(patternWindow, patternWindow), gwnies[i], recs[i]); //cut the template from image 1 CvInvoke.GetRectSubPix(imageread2, new System.Drawing.Size(searchWindow, searchWindow), gwnies[i], img[i]); //cut the search windows from image 2 Mat outp = new Mat(); //the match template output CvInvoke.MatchTemplate(img[i], recs[i], outp, TemplateMatchingType.CcoeffNormed); // match the template inside the window //CvInvoke.MatchTemplate(img[i], recs[i], outp, TemplateMatchingType.SqdiffNormed);// match the template inside the window double minVal = 0; double maxVal = 0; Point minLoc = new Point(); CvInvoke.MinMaxLoc(outp, ref minVal, ref maxVal, ref minLoc, ref points[i]); //find the locations of min and max similarity and their values //CvInvoke.MinMaxLoc(outp, ref minVal, ref maxVal, ref points[i], ref minLoc);//find the locations of min and max similarity and their values int printpointX = gwnies[i].X + points[i].X - searchWindow / 2 + patternWindow / 2; //translation from X coord of search window to X coord of image2 int printpointY = gwnies[i].Y + points[i].Y - searchWindow / 2 + patternWindow / 2; //translation from Y coord of search window to Y coord of image2 points[i] = new Point(printpointX, printpointY); //the final point CvInvoke.Circle(input_image2, points[i], (int)(patternWindow / 3), new MCvScalar(132, 255, 122)); //print a circle around the search point //////////////////// } templ = recs; //Draw Lines Image <Bgr, Byte> Draw = input_image2.ToImage <Bgr, Byte>(); ////////// if (points.Length > 0) { int i; for (i = 1; i < points.Length; i++) { Draw.Draw(new LineSegment2D(points[i - 1], points[i]), new Bgr(Color.White), 1); } Draw.Draw(new LineSegment2D(points[i - 1], points[0]), new Bgr(Color.White), 1); } imageBox2.Image = Draw.Mat;//the output image with circles printed around new corners return(points); }
//---------------------ExampleMethod-----------------------------------------------// //Process Frame() below is our user defined function in which we will create an EmguCv //type image called ImageFrame. capture a frame from camera and allocate it to our //ImageFrame. then show this image in ourEmguCV imageBox //------------------------------------------------------------------------------// private void ProcessFrame(object sender, EventArgs arg) { Mat frame = new Mat(); capture.Retrieve(frame, 0); imageBox1.Image = frame; if (State == 0) { imageBox2.Image = frame; contours = Shape1(frame); // run the Shape function to find the area if (contours != null) { PreviousFrame = frame.Clone(); State++; } } else if (State == 1) { points = searcher(contours, PreviousFrame, frame, ref templ); // template matching to the second frame after the area is found pointsStart = points; try { //------------Calculate the norm of each of the corner teplates of the second frame-------------// for (int i = 0; i < points.Length; i++) { Original[i] = new Mat(); CvInvoke.GetRectSubPix(frame, new System.Drawing.Size(20, 20), points[i], Original[i]); normSquared[i] = Original[i].Dot(Original[i]); } //---------------------------------------------------------------------------------------------// } catch (System.NullReferenceException e) { Console.WriteLine("NullReferenceException in points. Line 669"); points = null; } PreviousFrame = frame.Clone(); if (points != null) { State++; } else { State = 0; } } else { points = searcher(points, PreviousFrame, frame, ref templ); // template matching to each next frame PreviousFrame = frame.Clone(); if (framecount5 == 0) { points5behind = points; } else if (framecount5 == 1) { for (int i = 0; i < points.Length; i++) { Mat CurrentFrame = new Mat(); CvInvoke.GetRectSubPix(frame, new System.Drawing.Size(20, 20), points[i], CurrentFrame); double ratio = CurrentFrame.Dot(Original[i]) / normSquared[i]; // ratio betwwen the norm of the first template and the Dot product between the first and new template if (ratio < 0.9 || ratio > 1.1) //error checking { State = 0; Console.WriteLine(ratio); Console.WriteLine("Refresh cause Norm"); break; } if (points.Length == i + 1) { //calculate distances double tmp3 = Math.Sqrt(Math.Pow(points[0].X - points[i].X, 2) + Math.Pow(points[0].Y - points[i].Y, 2)); double tmp1 = Math.Sqrt(Math.Pow(pointsStart[0].X - pointsStart[i].X, 2) + Math.Pow(pointsStart[0].Y - pointsStart[i].Y, 2)); double tmp2 = Math.Sqrt(Math.Pow(points5behind[0].X - points5behind[i].X, 2) + Math.Pow(points5behind[0].Y - points5behind[i].Y, 2)); ratio = tmp1 / tmp2; double ratio2 = tmp3 / tmp2; if ((ratio2 < 0.9 || ratio2 > 1.1) || (ratio < 0.9 || ratio > 1.1)) //error checking { State = 0; Console.WriteLine(ratio); Console.WriteLine("Refresh"); break; } } else { //calculate distances double tmp3 = Math.Sqrt(Math.Pow(points[i + 1].X - points[i].X, 2) + Math.Pow(points[i + 1].Y - points[i].Y, 2)); double tmp1 = Math.Sqrt(Math.Pow(pointsStart[i + 1].X - pointsStart[i].X, 2) + Math.Pow(pointsStart[i + 1].Y - pointsStart[i].Y, 2)); double tmp2 = Math.Sqrt(Math.Pow(points5behind[i + 1].X - points5behind[i].X, 2) + Math.Pow(points5behind[i + 1].Y - points5behind[i].Y, 2)); ratio = tmp1 / tmp2; double ratio2 = tmp3 / tmp2; if ((ratio2 < 0.9 || ratio2 > 1.1) || (ratio < 0.9 || ratio > 1.1)) //error checking { State = 0; Console.WriteLine(ratio); Console.WriteLine("Refresh"); break; } } } framecount5 = -1; } framecount5++; } frame.Dispose(); }
/// <summary> /// Draws a single or multiple polygonal curves /// </summary> /// <param name="img">Image</param> /// <param name="pts">Array points</param> /// <param name="isClosed"> /// Indicates whether the polylines must be drawn closed. /// If !=0, the function draws the line from the last vertex of every contour to the first vertex. /// </param> /// <param name="color">Polyline color</param> /// <param name="thickness">Thickness of the polyline edges</param> /// <param name="lineType">Type of the line segments, see cvLine description</param> /// <param name="shift">Number of fractional bits in the vertex coordinates</param> public static void Polylines(IInputOutputArray img, Point[] pts, bool isClosed, MCvScalar color, int thickness = 1, CvEnum.LineType lineType = CvEnum.LineType.EightConnected, int shift = 0) { using (VectorOfPoint vps = new VectorOfPoint(pts)) Polylines(img, vps, isClosed, color, thickness, lineType, shift); }
/// <summary> /// /// </summary> /// <param name="img"></param> /// <param name="hitThreshold"></param> /// <param name="winStride"></param> /// <param name="padding"></param> /// <returns></returns> public virtual Point[] Detect(GpuMat img, double hitThreshold, Size winStride, Size padding) { if (disposed) throw new ObjectDisposedException("HOGDescriptor"); if (img == null) throw new ArgumentNullException("img"); using (var flVec = new VectorOfPoint()) { NativeMethods.HOGDescriptor_detect(ptr, img.CvPtr, flVec.CvPtr, hitThreshold, winStride, padding); // std::vector<cv::Point>*からCvPoint[]に移し替えて返す return flVec.ToArray(); } }
private void ProcessFrame() { try { #region Background/Foreground Image<Bgr, byte> difference = BackgroundSubstractionOptions.Substract(_currentFrame, _frameHistoryBuffer); Rectangle? handArea = ForegoundExtractionOptions.HighlightForeground(difference); Image<Bgr, byte> skinDetectionFrame = _currentFrame.Copy(); if (handArea.HasValue) ForegoundExtractionOptions.CutBackground(skinDetectionFrame, handArea.Value); #endregion #region Skin filtering / Morphological / Smooth filtering Image<Gray, byte> skinDetectionFrameGray = SkinFilteringOptions.ActiveItem.FilterFrame(skinDetectionFrame); MorphologicalFilteringOptions.StackSync.EnterReadLock(); foreach (var operation in MorphologicalFilteringOptions.OperationStack) { if (operation.FilterType == Model.Enums.MorphologicalFilterType.Dilatation) { CvInvoke.Dilate(skinDetectionFrameGray, skinDetectionFrameGray, operation.GetKernel(), new Point(operation.KernelAnchorX, operation.KernelAnchorY), operation.Intensity, operation.KernelBorderType, new MCvScalar(operation.KernelBorderThickness)); } else { CvInvoke.Erode(skinDetectionFrameGray, skinDetectionFrameGray, operation.GetKernel(), new Point(operation.KernelAnchorX, operation.KernelAnchorY), operation.Intensity, operation.KernelBorderType, new MCvScalar(operation.KernelBorderThickness)); } } MorphologicalFilteringOptions.StackSync.ExitReadLock(); skinDetectionFrameGray = SmoothFilteringOptions.FilterFrame(skinDetectionFrameGray); #endregion #region Contours / ConvexHull / ConvexityDefects Image<Bgr, byte> fingerTrackerFrame = _currentFrame.Copy(); List<Point> fingers = new List<Point>(); using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint()) { CvInvoke.FindContours(skinDetectionFrameGray.Copy(), contours, null, RetrType.List, FingerTrackingOptions.ApproxMethod); if (contours.Size > 0) { VectorOfPoint biggestContour = contours[0]; if (contours.Size > 1) { for (int i = 1; i < contours.Size; i++) { if (CvInvoke.ContourArea(contours[i], false) > CvInvoke.ContourArea(biggestContour, false)) biggestContour = contours[i]; } } if (CvInvoke.ContourArea(biggestContour, false) > FingerTrackingOptions.MinContourArea) { using (VectorOfPoint contour = biggestContour) { using (VectorOfPoint approxContour = new VectorOfPoint()) { CvInvoke.ApproxPolyDP(contour, approxContour, CvInvoke.ArcLength(contour, true) * FingerTrackingOptions.PerimeterScalingFactor.Value, true); fingerTrackerFrame.Draw(approxContour.ToArray(), new Bgr(FingerTrackingOptions.ContourHighlightColor), 2); VectorOfPoint convexHull = new VectorOfPoint(); VectorOfInt intHull = new VectorOfInt(); CvInvoke.ConvexHull(approxContour, convexHull, FingerTrackingOptions.ConvexHullCW); CvInvoke.ConvexHull(approxContour, intHull, FingerTrackingOptions.ConvexHullCW); fingerTrackerFrame.DrawPolyline(convexHull.ToArray(), true, new Bgr(FingerTrackingOptions.ConvexHullColor), 2); var countourRect = CvInvoke.MinAreaRect(approxContour); fingerTrackerFrame.Draw(new CircleF(new PointF(countourRect.Center.X, countourRect.Center.Y), 3), new Bgr(FingerTrackingOptions.DefectLinesColor), 2); Mat defects = new Mat(); CvInvoke.ConvexityDefects(approxContour, intHull, defects); if (!defects.IsEmpty) { var contourPoints = approxContour.ToArray(); Matrix<int> m = new Matrix<int>(defects.Rows, defects.Cols, defects.NumberOfChannels); defects.CopyTo(m); for (int i = 0; i < m.Rows; i++) { int startIdx = m.Data[i, 0]; int endIdx = m.Data[i, 1]; int depthIdx = m.Data[i, 2]; Point startPoint = contourPoints[startIdx]; Point endPoint = contourPoints[endIdx]; Point depthPoint = contourPoints[depthIdx]; LineSegment2D startDepthLine = new LineSegment2D(startPoint, depthPoint); LineSegment2D depthEndLine = new LineSegment2D(depthPoint, endPoint); LineSegment2D startCenterLine = new LineSegment2D(startPoint, new Point((int)countourRect.Center.X, (int)countourRect.Center.Y)); LineSegment2D depthCenterLine = new LineSegment2D(depthPoint, new Point((int)countourRect.Center.X, (int)countourRect.Center.Y)); LineSegment2D endCenterLine = new LineSegment2D(endPoint, new Point((int)countourRect.Center.X, (int)countourRect.Center.Y)); CircleF startCircle = new CircleF(startPoint, 5); CircleF depthCircle = new CircleF(depthPoint, 5); CircleF endCircle = new CircleF(endPoint, 5); if (startPoint.Y < countourRect.Center.Y) fingers.Add(startPoint); if (!FingerTrackingOptions.TrackOnlyControlPoint) { fingerTrackerFrame.Draw(startCircle, new Bgr(FingerTrackingOptions.DefectStartPointHighlightColor), 2); fingerTrackerFrame.Draw(depthCircle, new Bgr(FingerTrackingOptions.DefectDepthPointHighlightColor), 2); fingerTrackerFrame.Draw(endCircle, new Bgr(FingerTrackingOptions.DefectEndPointHighlightColor), 2); fingerTrackerFrame.Draw(startDepthLine, new Bgr(FingerTrackingOptions.DefectLinesColor), 2); //fingerTrackerFrame.Draw(depthEndLine, new Bgr(FingerTrackingOptions.DefectLinesColor), 2); fingerTrackerFrame.Draw(startCenterLine, new Bgr(FingerTrackingOptions.DefectLinesColor), 2); //fingerTrackerFrame.Draw(depthCenterLine, new Bgr(FingerTrackingOptions.DefectLinesColor), 2); // fingerTrackerFrame.Draw(endCenterLine, new Bgr(FingerTrackingOptions.DefectLinesColor), 2); } } _lastControlPoint = _currentControlPoint; _currentControlPoint = MouseControlOptions.UseHandCenter ? new Point((int)countourRect.Center.X, (int)countourRect.Center.Y) : fingers.FirstOrDefault(f => f.Y == fingers.Min(line => line.Y)); fingers.Clear(); if (FingerTrackingOptions.TrackOnlyControlPoint) { fingerTrackerFrame = new Image<Bgr, byte>(fingerTrackerFrame.Width, fingerTrackerFrame.Height, new Bgr(Color.Black)); fingerTrackerFrame.Draw(new CircleF(_currentControlPoint, 5), new Bgr(Color.Red), 2); } } } } } } } #endregion #region Mouse control if (_currentControlPoint.X != -1 && _currentControlPoint.Y != -1 && _lastControlPoint.X != -1 && _lastControlPoint.Y != -1 && _currentControlPoint.X != _lastControlPoint.X && _currentControlPoint.Y != _lastControlPoint.Y && Math.Abs(_currentControlPoint.X - _lastControlPoint.X) < (MouseControlOptions.FrameWidth / 10) && Math.Abs(_currentControlPoint.Y - _lastControlPoint.Y) < (MouseControlOptions.FrameHeight / 10)) { int frameX = _currentControlPoint.X; int frameY = _currentControlPoint.Y; int moveX = _currentControlPoint.X - _lastControlPoint.X; int moveY = _currentControlPoint.Y - _lastControlPoint.Y; int sensitiveX = 1; int sensitiveY = 1; if (MouseControlOptions.MouseSensitive.Value > 0) { sensitiveX = (int)(((double)MouseControlOptions.ScreenWidth / MouseControlOptions.FrameWidth) * MouseControlOptions.MouseSensitive.Value); sensitiveY = (int)(((double)MouseControlOptions.ScreenHeight / MouseControlOptions.FrameHeight) * MouseControlOptions.MouseSensitive.Value); } else if (MouseControlOptions.MouseSensitive.Value < 0) { sensitiveX = (int)(((double)MouseControlOptions.FrameWidth / MouseControlOptions.ScreenWidth) * MouseControlOptions.MouseSensitive.Value * -1); sensitiveY = (int)(((double)MouseControlOptions.FrameHeight / MouseControlOptions.ScreenHeight) * MouseControlOptions.MouseSensitive.Value * -1); } moveX *= sensitiveX * -1; moveY *= sensitiveY; Point currentMousePosition = GetMousePosition(); int destinationX = currentMousePosition.X + moveX; int destinationY = currentMousePosition.Y + moveY; Messanger.PublishOnCurrentThread(new FingerMovedMessage(MouseControlOptions.ControlMouse, frameX, frameY, destinationX, destinationY)); if (MouseControlOptions.ControlMouse && MouseControlOptions.MouseSensitive.Value != 0 && destinationX >= 0 && destinationY >= 0) SetCursorPos(destinationX, destinationY); } #endregion Messanger.PublishOnCurrentThread(new FrameProcessedMessage(_currentFrame, difference, skinDetectionFrameGray, fingerTrackerFrame)); } catch { } }
private Image <Bgr, byte> GetVisualRepresentation(VectorOfPoint liquidContour, VectorOfPoint approxLiquidContour, VectorOfPoint glassTopContour, VectorOfPoint approxGlassTopContour, Point topLiquidPoint1, Point topLiquidPoint2, DrawOptions drawOptions = DrawOptions.TopContour | DrawOptions.TopApproxContour | DrawOptions.LiquidContour | DrawOptions.ApproxLiquidContour) { Size imgSize = new Size(_img.Width, _img.Height); Image <Bgr, byte> img = new Image <Bgr, byte>(imgSize); Point[] points; if (_glassTopContour.Size != 0) { Point topPoint1 = new Point(topLiquidPoint1.X, glassTopContour.ToArray()[0].Y); Point topPoint2 = new Point(topLiquidPoint2.X, glassTopContour.ToArray()[1].Y); points = new Point[] { topPoint2, topLiquidPoint2, topLiquidPoint1, topPoint1 }; img.DrawPolyline(points, true, new Bgr(Color.DarkMagenta), 1); } points = liquidContour.ToArray(); img.DrawPolyline(points, true, new Bgr(Color.Aqua), 2); points = approxLiquidContour.ToArray(); img.DrawPolyline(points, true, new Bgr(Color.DeepPink), 4); points = glassTopContour.ToArray(); img.DrawPolyline(points, true, new Bgr(Color.YellowGreen), 5); points = approxGlassTopContour.ToArray(); img.DrawPolyline(points, true, new Bgr(Color.OrangeRed), 5); return(img); }
public static List<VectorOfPoint> GetContours(Image<Gray, Byte> image, ChainApproxMethod apxMethod = ChainApproxMethod.ChainApproxSimple, RetrType retrievalType = RetrType.List, double accuracy = 0.001d, double minimumArea = 10) { List<VectorOfPoint> convertedContours = new List<VectorOfPoint>(); using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint()) { using (Image<Gray, Byte> tempImage = image.Copy()) { CvInvoke.FindContours(tempImage, contours, null, retrievalType, apxMethod); } int count = contours.Size; for (int i = 0; i < count; i++) { using (VectorOfPoint contour = contours[i]) { VectorOfPoint approxContour = new VectorOfPoint(); CvInvoke.ApproxPolyDP(contour, approxContour, accuracy, false); if (CvInvoke.ContourArea(approxContour, false) > minimumArea) { convertedContours.Add(approxContour); } } } } return convertedContours; }
private void ProcessImage() { while (_capture.IsOpened) { // frame maintenance Mat workingImage = _capture.QueryFrame(); // resize to PictureBox aspect ratio int newHeight = (workingImage.Size.Height * pictureBox1.Size.Width) / workingImage.Size.Width; Size newSize = new Size(pictureBox1.Size.Width, newHeight); CvInvoke.Resize(workingImage, workingImage, newSize); // as a test for comparison, create a copy of the image with a binary filter: var binaryImage = workingImage.ToImage <Gray, byte>().ThresholdBinary(new Gray(125), new Gray(255)).Mat; // Sample for gaussian blur: var blurredImage = new Mat(); var cannyImage = new Mat(); var decoratedImage = new Mat(); CvInvoke.GaussianBlur(workingImage, blurredImage, new Size(9, 9), 0); // convert to B/W CvInvoke.CvtColor(blurredImage, blurredImage, typeof(Bgr), typeof(Gray)); // apply canny: // NOTE: Canny function can frequently create duplicate lines on the same shape // depending on blur amount and threshold values, some tweaking might be needed. // You might also find that not using Canny and instead using FindContours on // a binary-threshold image is more accurate. CvInvoke.Canny(blurredImage, cannyImage, 150, 255); // make a copy of the canny image, convert it to color for decorating: CvInvoke.CvtColor(cannyImage, decoratedImage, typeof(Gray), typeof(Bgr)); // find contours: //Mat sourceFrameWithArt = workingImage.Clone(); using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint()) { string shape = " "; // Build list of contours CvInvoke.FindContours(cannyImage, contours, null, RetrType.List, ChainApproxMethod.ChainApproxSimple); for (int i = 0; i < contours.Size; i++) { VectorOfPoint contour = contours[i]; CvInvoke.Polylines(decoratedImage, contour, true, new Bgr(Color.Black).MCvScalar); //*****************This Line hides the uneeded contours*************** using (VectorOfPoint approxContour = new VectorOfPoint()) { CvInvoke.ApproxPolyDP(contour, approxContour, CvInvoke.ArcLength(contour, true) * 0.05, true); if (CvInvoke.ContourArea(approxContour, false) > 250) //only consider contours with area greater than 250 { if (approxContour.Size == 3) { shape = "Triangle"; Invoke(new Action(() => { label3.Text = "Triangle"; label8.Text = $"0"; CvInvoke.Polylines(decoratedImage, contour, true, new Bgr(Color.Green).MCvScalar); })); } if (approxContour.Size == 4) { shape = "Rectangle"; Invoke(new Action(() => { label3.Text = "Rectangle"; label8.Text = $"1"; CvInvoke.Polylines(decoratedImage, contour, true, new Bgr(Color.Red).MCvScalar); })); } Invoke(new Action(() => { label2.Text = $"There are {approxContour.Size} corners detected"; })); } Rectangle boundingBox = CvInvoke.BoundingRectangle(contours[i]); //CvInvoke.Polylines(decoratedImage, contour, true, new Bgr(Color.Green).MCvScalar); MarkDetectedObject(workingImage, contours[i], boundingBox, CvInvoke.ContourArea(contour), shape); Point center = new Point(boundingBox.X + boundingBox.Width / 2, boundingBox.Y + boundingBox.Height / 2); Invoke(new Action(() => { label4.Text = $"Position: {center.X}, {center.Y}"; //coords that get sent to the arduino textBox1.Text = $"{center.X}"; textBox2.Text = $"{center.Y}"; })); Thread.Sleep(50); } } Invoke(new Action(() => { label1.Text = $"There are {contours.Size} contours detected"; //# of total contours })); } // output images: pictureBox1.Image = workingImage.Bitmap; pictureBox2.Image = decoratedImage.Bitmap; } }
public static Image <Bgr, Byte> CropCodeFromImage(Image <Bgr, Byte> source, VectorOfVectorOfPoint Contours) { Image <Bgr, Byte> CroptedImage; int idMain = 0; int idBlue = 1; if (CvInvoke.ContourArea(Contours[0]) < CvInvoke.ContourArea(Contours[1])) { idMain = 1; idBlue = 0; } Point[] mainPoints = Contours[idMain].ToArray(); Point[] subPoints = Contours[idBlue].ToArray(); Image <Bgr, Byte> RotatedImage; // 0.Вырежем прямоугольник описанный вокруг основного контура(в целях оптимизации) Point[] contourRectangle = new Point[4]; int bottom = mainPoints.Min(x => x.Y); contourRectangle[0] = mainPoints.Where(p => p.Y == bottom).First(); int left = mainPoints.Min(x => x.X); contourRectangle[1] = mainPoints.Where(p => p.X == left).First(); int top = mainPoints.Max(x => x.Y); contourRectangle[2] = mainPoints.Where(p => p.Y == top).First(); int right = mainPoints.Max(x => x.X); contourRectangle[3] = mainPoints.Where(p => p.X == right).First(); Rectangle box = new Rectangle(left, bottom, right - left, top - bottom); CroptedImage = source;//.GetSubRect(box); // 1.Ищем угол поворота относительно вертикали для вертикального выравнивания контуров double Angle = 0; // 1.1.найдем две точки : самую левую основного контура и самую левую синего контура Point mainLeft = (from p in mainPoints orderby p.X select p).FirstOrDefault(); Point mainBottom = (from p in mainPoints orderby p.Y descending select p).FirstOrDefault(); Point subLeft = (from p in subPoints orderby p.X select p).FirstOrDefault(); Point center = new Point(CroptedImage.Width / 2, CroptedImage.Height / 2); // 1.2 Ищем угол поворота, который поставит одну над второй if (mainLeft.X != subLeft.X) { double tg = -(mainLeft.Y - subLeft.Y) / (double)(mainLeft.X - subLeft.X); Angle = Math.Atan(tg) + Math.PI / 2; } #region вертикальное выравнивание // случай перевернутого изображения mainLeft.X -= center.X; mainLeft.Y -= center.Y; // умножение на матрицу поворота Point old = new Point(mainLeft.X, mainLeft.Y); mainLeft.X = (int)(old.X * Math.Cos(Angle) - old.Y * Math.Sin(Angle)); mainLeft.Y = (int)(+old.X * Math.Sin(Angle) + old.Y * Math.Cos(Angle)); // обратный переход в координаты отрезанного изображения mainLeft.X += center.X; mainLeft.Y += center.Y; // случай перевернутого изображения subLeft.X -= center.X; subLeft.Y -= center.Y; // умножение на матрицу поворота old = new Point(subLeft.X, subLeft.Y); subLeft.X = (int)(old.X * Math.Cos(Angle) - old.Y * Math.Sin(Angle)); subLeft.Y = (int)(+old.X * Math.Sin(Angle) + old.Y * Math.Cos(Angle)); // обратный переход в координаты отрезанного изображения subLeft.X += center.X; subLeft.Y += center.Y; if (mainLeft.Y > subLeft.Y) { Angle -= Math.PI; } #endregion // 1.3 Поворачиваем основное изображение на этот угол /*DEBUG*/ //source.Draw(contourRectangle, new Bgr(Color.Blue), 3); RotatedImage = new Image <Bgr, byte>(CroptedImage.Rotate(Angle * 180 / Math.PI, new Bgr(Color.White), false).Bitmap); // 2 Поворачиваем контур на этот угол RotateContour(mainPoints, Angle, center, new Point(center.X + (RotatedImage.Width - CroptedImage.Width) / 2, center.Y + (RotatedImage.Height - CroptedImage.Height) / 2)); // 3.Вырезаем основной контур(повернутый) из обрезанного и повернутого изображения bottom = mainPoints.Min(x => x.Y); top = mainPoints.Max(x => x.Y); left = mainPoints.Min(x => x.X); right = mainPoints.Max(x => x.X); box = new Rectangle(left, bottom, right - left, top - bottom); VectorOfVectorOfPoint contoursRoteated = new VectorOfVectorOfPoint(); contoursRoteated.Push(new VectorOfPoint()); contoursRoteated.Push(new VectorOfPoint()); contoursRoteated[0].Push(mainPoints); contoursRoteated[1].Push(mainPoints); // При некорректном контуре падает if (left < 0 || right < 0 || top > RotatedImage.Height || right > RotatedImage.Width) { RotatedImage.DrawPolyline(mainPoints, true, new Bgr(Color.Red), 2); return(RotatedImage); } else { return(CropImage(RotatedImage, contoursRoteated)); CroptedImage = RotatedImage.GetSubRect(box); } for (int i = 0; i < mainPoints.Length; i++) { mainPoints[i].X -= left; mainPoints[i].Y -= bottom; if (mainPoints[i].X > box.Width) { mainPoints[i].X = box.Width; } if (mainPoints[i].Y > box.Height) { mainPoints[i].Y = box.Height; } } return(CropImage(CroptedImage, contoursRoteated)); VectorOfPoint ApproxRect = new VectorOfPoint(); Contours[idMain].Clear(); Contours[idMain].Push(mainPoints); // Approximation CvInvoke.ApproxPolyDP(Contours[idMain], ApproxRect, CvInvoke.ArcLength(Contours[idMain], true) * 0.15, true); MCvMoments moments = CvInvoke.Moments(ApproxRect); Point Code_gravityCenter = new Point((int)(moments.M10 / moments.M00), (int)(moments.M01 / moments.M00)); //corners Point[] corners_mainBox = ApproxRect.ToArray(); //sort corners List <Point> leftl = new List <Point>(); List <Point> rightl = new List <Point>(); for (int i = 0; i < corners_mainBox.Length; i++) { if (corners_mainBox[i].X < Code_gravityCenter.X) { leftl.Add(corners_mainBox[i]); } else { rightl.Add(corners_mainBox[i]); } } //matrix with starting Points // top-left top-right bottom-right bottom-left PointF[] corners = new PointF[4]; corners[0] = leftl[0].Y < leftl[1].Y ? leftl[0] : leftl[1]; //top-left corners[1] = rightl[0].Y < rightl[1].Y ? rightl[0] : rightl[1]; //top-right corners[2] = rightl[0].Y > rightl[1].Y ? rightl[0] : rightl[1]; //bottom-right corners[3] = leftl[0].Y > leftl[1].Y ? leftl[0] : leftl[1]; //bottom-left //create matrixes box = CvInvoke.BoundingRectangle(ApproxRect); PointF[] targetPF = { new PointF(0, 0), new PointF(box.Width, 0), new PointF(box.Width, box.Width), new PointF(0, box.Width), }; //matrix with destination Points //rotate crop Image <Bgr, Byte> newcroppimg = CroptedImage.Clone(); //transformation matrix Mat TransformMat = CvInvoke.GetPerspectiveTransform(corners, targetPF); Size ROI = box.Size; //transformation CvInvoke.WarpPerspective(CroptedImage, newcroppimg, TransformMat, ROI); return(newcroppimg); }
public void PutDescriptions(Image <Bgr, byte> imgInput, VectorOfVectorOfPoint contours, int i, VectorOfPoint approx) { var moments = CvInvoke.Moments(contours[i]); int x = (int)(moments.M10 / moments.M00); int y = (int)(moments.M01 / moments.M00); if (approx.Size == 3) { CvInvoke.PutText(imgInput, "Triangle", new Point(x, y), Emgu.CV.CvEnum.FontFace.HersheySimplex, 0.5, new MCvScalar(0, 0, 255), 2); } if (approx.Size == 4) { Rectangle rect = CvInvoke.BoundingRectangle(contours[i]); if (Math.Abs(1 - (rect.Width / rect.Height)) < 0.06) { CvInvoke.PutText(imgInput, "Square", new Point(x, y), Emgu.CV.CvEnum.FontFace.HersheySimplex, 0.5, new MCvScalar(0, 0, 255), 2); } else { CvInvoke.PutText(imgInput, "Rectangle", new Point(x, y), Emgu.CV.CvEnum.FontFace.HersheySimplex, 0.5, new MCvScalar(0, 0, 255), 2); } } if (approx.Size == 5) { CvInvoke.PutText(imgInput, "Pentagon", new Point(x, y), Emgu.CV.CvEnum.FontFace.HersheySimplex, 0.5, new MCvScalar(0, 0, 255), 2); } }
/// <summary> /// check whether a contour is a character or not /// </summary> /// <param name="plate_after_preprocessing"></param> /// <param name="suspected_contour">just a single contour</param> /// <returns>true: if yes, false: if not</returns> private static bool is_contour_a_character(Image <Gray, byte> plate_after_preprocessing, VectorOfPoint suspected_contour) { Rectangle r = CvInvoke.BoundingRectangle(suspected_contour); double c_W = r.Width; double c_H = r.Height; double i_W = plate_after_preprocessing.Width; double i_H = plate_after_preprocessing.Height; // ratio_area_contour_over_img double ratio_1 = (i_W * i_H) / (c_W * c_H); // ratio_contour_over_img double ratio_2 = c_H / i_H; if ((ratio_1 >= 10 && ratio_1 < 43) && (ratio_2 >= 0.4) && ((c_H / c_W) > 1.2)) { return(true); } else { return(false); } }
private Mat Draw(Mat observedImage) { if (button1.Text == "UnLoad") { FindMatch(observedImage); Mat result = new Mat(); if (matches.Size > 2) { //Draw the matched keypoints Features2DToolbox.DrawMatches(objImage, objKeyPoints, observedImage, observedKeyPoints, matches, result, new MCvScalar(255, 255, 255), new MCvScalar(255, 255, 255), mask, Features2DToolbox.KeypointDrawType.NotDrawSinglePoints); //Bgr rgb5 = new Bgr(250, 250, 250); //Features2DToolbox.DrawKeypoints(observedImage, observedKeyPoints, observedImage, rgb5); int i; float X = 0; float Y = 0; int count = 0; for (i = 0; i < matches.Size; i++) { if (mask.GetData(i)[0] == 1) { X += observedKeyPoints[matches[i][0].QueryIdx].Point.X - objKeyPoints[matches[i][0].TrainIdx].Point.X; Y += observedKeyPoints[matches[i][0].QueryIdx].Point.Y - objKeyPoints[matches[i][0].TrainIdx].Point.Y; count++; } } if (count > 0) { X = X / count + (float)(Math.Abs(objwidth / 2.0)); Y = Y / count + (float)(Math.Abs(objheight / 2.0)); } else { X = -1; Y = -1; } if (CvInvoke.CountNonZero(mask) > 10) { if (trackings[trackings.Length - 1].Y >= 0) { Point[] newtrackings = new Point[2 * trackings.Length + 1]; for (i = 0; i < trackings.Length; i++) { newtrackings[i] = trackings[i]; } newtrackings[i].X = (int)X; newtrackings[i].Y = (int)Y; for (i++; i < newtrackings.Length; i++) { newtrackings[i].X = -1; newtrackings[i].Y = -1; } trackings = newtrackings; } else { for (i = 0; trackings[i].Y >= 0; i++) { } trackings[i].X = (int)X; trackings[i].Y = (int)Y; } } if (homography != null) { //draw a rectangle along the projected model System.Drawing.Rectangle rect = new System.Drawing.Rectangle(Point.Empty, objImage.Size); PointF[] pts = new PointF[] { new PointF(rect.Left, rect.Bottom), new PointF(rect.Right, rect.Bottom), new PointF(rect.Right, rect.Top), new PointF(rect.Left, rect.Top) }; pts = CvInvoke.PerspectiveTransform(pts, homography); Point[] points = Array.ConvertAll <PointF, Point>(pts, Point.Round); using (VectorOfPoint vp = new VectorOfPoint(points)) { //CvInvoke.Polylines(observedImage, vp, true, new MCvScalar(255, 0, 0, 255), 5); } } float a = 2; CvInvoke.Ellipse(result, new RotatedRect(new PointF(X, Y), new SizeF(30, 30), a), new MCvScalar(0, 250, 255, 255), 4); return(result); } } else if (button1.Text == "Load") { uObservedImage = observedImage.GetUMat(AccessType.ReadWrite); // extract features from the observed image ORBCPU.DetectAndCompute(uObservedImage, null, observedKeyPoints, observedDescriptors, false); Bgr rgb1 = new Bgr(250, 250, 250); Features2DToolbox.DrawKeypoints(observedImage, observedKeyPoints, observedImage, rgb1); } return(observedImage); }
private static PadErrorDetail CheckPad(Models.PadItem padItem, PadSegmentInfo[] padSegment, double umPPixel, bool Inflate = false) { Rectangle boundPadRef = padItem.BoudingAdjust; if (Inflate) { boundPadRef.Inflate(3, 3); } double sPadRef = padItem.Area; List <int> idPadSegOverlap = new List <int>(); for (int j = 0; j < padSegment.Length; j++) { //if (boundPadRef.IntersectsWith(padSegment[j].Bouding)) //{ // idPadSegOverlap.Add(j); //} if (IntersectsContour(padItem.ContourAdjust, padSegment[j].Contours, boundPadRef)) { idPadSegOverlap.Add(j); } } boundPadRef = padItem.Bouding; PadErrorDetail padEr = new PadErrorDetail(); double scaleArea = 0; double scaleAreaAddperimeter = 0; double shiftx = 0; double shifty = 0; int inflate = 40; padEr.AreaStdHight = padItem.AreaThresh.UM_USL; padEr.AreaStdLow = padItem.AreaThresh.PERCENT_LSL; padEr.ShiftXStduM = padItem.ShiftXThresh.UM_USL; padEr.ShiftYStduM = padItem.ShiftYThresh.UM_USL; padEr.ShiftXStdArea = padItem.ShiftXThresh.PERCENT_LSL; padEr.ShiftYStdArea = padItem.ShiftYThresh.PERCENT_LSL; padEr.ROI = Rectangle.Inflate(boundPadRef, inflate, inflate); padEr.Pad = padItem; if (padItem.FOVs.Count > 0) { padEr.FOVNo = padItem.FOVs[0]; } if (idPadSegOverlap.Count > 0) { double areaAllPadSeg = 0; double perimeter = 0; Rectangle boundAllPadSeg = new Rectangle(); for (int j = 0; j < idPadSegOverlap.Count; j++) { PadSegmentInfo padSeg = padSegment[idPadSegOverlap[j]]; areaAllPadSeg += padSeg.Area; using (VectorOfPoint cnt = new VectorOfPoint(padSeg.Contours)) { perimeter += CvInvoke.ArcLength(cnt, true) / 2; } if (j == 0) { boundAllPadSeg = padSeg.Bouding; continue; } if (padSeg.Bouding.X < boundAllPadSeg.X) { boundAllPadSeg.X = padSeg.Bouding.X; } if (padSeg.Bouding.Y < boundAllPadSeg.Y) { boundAllPadSeg.Y = padSeg.Bouding.Y; } if (padSeg.Bouding.X + padSeg.Bouding.Width > boundAllPadSeg.X + boundAllPadSeg.Width) { boundAllPadSeg.Width = padSeg.Bouding.X + padSeg.Bouding.Width - boundAllPadSeg.X; } if (padSeg.Bouding.Y + padSeg.Bouding.Height > boundAllPadSeg.Y + boundAllPadSeg.Height) { boundAllPadSeg.Height = padSeg.Bouding.Y + padSeg.Bouding.Height - boundAllPadSeg.Y; } } padEr.Center = new Point(boundAllPadSeg.X + boundAllPadSeg.Width / 2, boundAllPadSeg.Y + boundAllPadSeg.Height / 2); scaleArea = areaAllPadSeg * 100 / sPadRef; scaleAreaAddperimeter = (areaAllPadSeg + perimeter) * 100 / sPadRef; shiftx = (Math.Max(Math.Abs(boundPadRef.X - boundAllPadSeg.X), Math.Abs((boundPadRef.X + boundPadRef.Width) - (boundAllPadSeg.X + boundAllPadSeg.Width))) * umPPixel); shifty = (Math.Max(Math.Abs(boundPadRef.Y - boundAllPadSeg.Y), Math.Abs((boundPadRef.Y + boundPadRef.Height) - (boundAllPadSeg.Y + boundAllPadSeg.Height))) * umPPixel); bool insert = false; double deviation = (100 - (sPadRef / umPPixel)) / 2; deviation = deviation < 0 ? 0 : deviation; if (scaleAreaAddperimeter < padItem.AreaThresh.PERCENT_LSL - deviation) { //if (sPadRef > 100 || (sPadRef < 100 && scaleArea < 5)) { insert = true; } } if (scaleArea > padItem.AreaThresh.UM_USL) { insert = true; } if (shiftx > padItem.ShiftXThresh.UM_USL) { insert = true; } if (shifty > padItem.ShiftXThresh.UM_USL) { insert = true; } if (insert) { padEr.Area = scaleArea; padEr.ShiftX = shiftx; padEr.ShiftY = shifty; return(padEr); } else { // pad pass return(null); } } else { // not found solder paste return(padEr); } }
/// <summary> /// Performs object detection without a multi-scale window. /// </summary> /// <param name="img">Source image. CV_8UC1 and CV_8UC4 types are supported for now.</param> /// <param name="weights"></param> /// <param name="hitThreshold">Threshold for the distance between features and SVM classifying plane. /// Usually it is 0 and should be specfied in the detector coefficients (as the last free coefficient). /// But if the free coefficient is omitted (which is allowed), you can specify it manually here.</param> /// <param name="winStride">Window stride. It must be a multiple of block stride.</param> /// <param name="padding">Mock parameter to keep the CPU interface compatibility. It must be (0,0).</param> /// <param name="searchLocations"></param> /// <returns>Left-top corner points of detected objects boundaries.</returns> public virtual Point[] Detect(Mat img, out double[] weights, double hitThreshold = 0, Size? winStride = null, Size? padding = null, Point[] searchLocations = null) { if (disposed) throw new ObjectDisposedException("HOGDescriptor"); if (img == null) throw new ArgumentNullException("img"); img.ThrowIfDisposed(); Size winStride0 = winStride.GetValueOrDefault(new Size()); Size padding0 = padding.GetValueOrDefault(new Size()); using (var flVec = new VectorOfPoint()) using (var weightsVec = new VectorOfDouble()) { int slLength = (searchLocations != null) ? searchLocations.Length : 0; NativeMethods.objdetect_HOGDescriptor_detect(ptr, img.CvPtr, flVec.CvPtr, weightsVec.CvPtr, hitThreshold, winStride0, padding0, searchLocations, slLength); weights = weightsVec.ToArray(); return flVec.ToArray(); } }
private Drawing.Bitmap GetMaskedBitmap(string imagePath, IList<Point> pointCollection) { Mat matrix = new Mat(imagePath, LoadImageType.AnyColor); UMat uMatrix = matrix.ToUMat(AccessType.ReadWrite); // Scale Polygon List<Point> scaledPoints = GetScaledPoints(pointCollection, uMatrix.Rows, uMatrix.Cols); polygonPoints = GetPolygonPoints(scaledPoints, uMatrix.Rows, uMatrix.Cols); // Apply Polygon using (VectorOfPoint vPoint = new VectorOfPoint(polygonPoints.ToArray())) using (VectorOfVectorOfPoint vvPoint = new VectorOfVectorOfPoint(vPoint)) { CvInvoke.FillPoly(uMatrix, vvPoint, new Bgr(0, 0, 0).MCvScalar); } // Crop Bitmap int left = (int)scaledPoints.Min(p => p.X); int top = (int)scaledPoints.Min(p => p.Y); int width = (int)scaledPoints.Max(p => p.X) - left; int height = (int)scaledPoints.Max(p => p.Y) - top; Image<Bgr, byte> image = new Image<Bgr, byte>(uMatrix.Bitmap); image.ROI = new Drawing.Rectangle(left, top, width, height); return image.Bitmap; }
/// <summary> /// /// </summary> /// <param name="image">The image</param> /// <param name="winStride">Window stride. Must be a multiple of block stride. Use Size.Empty for default</param> /// <param name="padding">Padding. Use Size.Empty for default</param> /// <param name="locations">Locations for the computation. Can be null if not needed</param> /// <returns>The descriptor vector</returns> public float[] Compute(IInputArray image, Size winStride = new Size(), Size padding = new Size(), Point[] locations = null) { using (VectorOfFloat desc = new VectorOfFloat()) using (InputArray iaImage = image.GetInputArray()) { if (locations == null) { CvInvoke.cveHOGDescriptorCompute(_ptr, iaImage, desc, ref winStride, ref padding, IntPtr.Zero); } else { using (VectorOfPoint vp = new VectorOfPoint(locations)) { CvInvoke.cveHOGDescriptorCompute(_ptr, iaImage, desc, ref winStride, ref padding, vp); } } return desc.ToArray(); } }
private void button1_Click(object sender, EventArgs e) { if (_frame.IsEmpty) { return; } //try //{ Mat m = new Mat(); Mat n = new Mat(); Mat o = new Mat(); Mat aux = new Mat(); Mat binaryDiffFrame = new Mat(); Mat denoisedDiffFrame = new Mat(); Mat finalFrame = new Mat(); // //OBTENER COLOR // Image <Bgr, Byte> imge = _frame.ToImage <Bgr, Byte>(); Image <Bgr, byte> ret = imge.Copy(); Image <Bgr, byte> auxImge = imge.Copy(); Image <Bgr, byte> auxImge2 = imge.Copy(); Image <Bgr, byte> auxImge3 = imge.Copy(); Image <Bgr, byte> resultadoFinal = imge.Copy(); //Transformar a espacio de color HSV Image <Hsv, Byte> hsvimg = auxImge.Convert <Hsv, Byte>(); //extract the hue and value channels Image <Gray, Byte>[] channels = hsvimg.Split(); //separar en componentes Image <Gray, Byte> imghue = channels[0]; //hsv, channels[0] es hue. Image <Gray, Byte> imgval = channels[2]; //hsv, channels[2] es value. //Filtro AZUL --> 90 a 120 //Verde --> 40 a 70 Image <Gray, byte> huefilter = imghue.InRange(new Gray(90), new Gray(120)); //Filtro colores menos brillantes Image <Gray, byte> valfilter = imgval.InRange(new Gray(100), new Gray(255)); //Filtro de saturación - quitar blancos channels[1]._ThresholdBinary(new Gray(10), new Gray(255)); // Saturacion //Unir los filtros para obtener la imagen Image <Gray, byte> colordetimg = huefilter.And(valfilter).And(channels[1]);//aqui habia un Not() //Colorear imagen var mat = auxImge2.Mat; mat.SetTo(new MCvScalar(0, 0, 255), colordetimg); mat.CopyTo(ret); //Image<Bgr, byte> imgout = ret.CopyBlank();//imagen sin fondo negro ret._Or(auxImge2); //Muestra imagen con los rojos destacados pictureBox2.Image = ret.Bitmap; Mat SE2 = CvInvoke.GetStructuringElement(Emgu.CV.CvEnum.ElementShape.Rectangle, new Size(3, 2), new Point(-1, -1)); CvInvoke.MorphologyEx(colordetimg, colordetimg, MorphOp.Erode, SE2, new Point(-1, -1), 1, BorderType.Default, new MCvScalar(255)); Mat SE3 = CvInvoke.GetStructuringElement(Emgu.CV.CvEnum.ElementShape.Rectangle, new Size(3, 2), new Point(-1, -1)); CvInvoke.MorphologyEx(colordetimg, colordetimg, MorphOp.Dilate, SE3, new Point(-1, -1), 1, BorderType.Default, new MCvScalar(255)); Mat SE = CvInvoke.GetStructuringElement(Emgu.CV.CvEnum.ElementShape.Rectangle, new Size(3, 3), new Point(-1, -1)); CvInvoke.MorphologyEx(colordetimg, aux, Emgu.CV.CvEnum.MorphOp.Close, SE, new Point(-1, -1), 2, Emgu.CV.CvEnum.BorderType.Reflect, new MCvScalar(255)); pictureBox2.Image = aux.Bitmap; Image <Bgr, byte> temp = aux.ToImage <Bgr, byte>(); var temp2 = temp.SmoothGaussian(5).Convert <Gray, byte>().ThresholdBinary(new Gray(230), new Gray(255)); VectorOfVectorOfPoint contorno = new VectorOfVectorOfPoint(); Mat matAux = new Mat(); CvInvoke.FindContours(temp2, contorno, matAux, Emgu.CV.CvEnum.RetrType.External, Emgu.CV.CvEnum.ChainApproxMethod.ChainApproxSimple); if (contorno.Size > 0) { for (int i = 0; i < contorno.Size; i++) { VectorOfPoint approxContour = new VectorOfPoint(); double perimetro = CvInvoke.ArcLength(contorno[i], true); VectorOfPoint approx = new VectorOfPoint(); double area = CvInvoke.ContourArea(contorno[i]); if (area > 1000) { var moments = CvInvoke.Moments(contorno[i]); int x = (int)(moments.M10 / moments.M00); int y = (int)(moments.M01 / moments.M00); CvInvoke.ApproxPolyDP(contorno[i], approx, 0.04 * perimetro, true); CvInvoke.DrawContours(resultadoFinal, contorno, i, new MCvScalar(0, 255, 255), 2); RotatedRect rectangle = CvInvoke.MinAreaRect(approx); //CvInvoke.DrawContours(resultadoFinal, contorno, i, new MCvScalar(255, 255, 255), 2, LineType.AntiAlias); MessageBox.Show("Tamano figura " + rectangle.Size.Width * rectangle.Size.Height); resultadoFinal.Draw(rectangle, new Bgr(Color.Cyan), 1); CvInvoke.PutText(resultadoFinal, "Marcador Pagina", new Point(x, y), Emgu.CV.CvEnum.FontFace.HersheySimplex, 0.5, new MCvScalar(0, 255, 255), 2); pictureBox3.Image = resultadoFinal.ToBitmap(); } } } }
public static double RadiusAve(System.Drawing.Point pt, List <ConvexityDefect> cdList, VectorOfPoint vp) { double sumOfDist = 0.0; double numOfSample = (double)cdList.Count; foreach (ConvexityDefect cd in cdList) { sumOfDist += Geometry.Distance(pt, vp[cd.Point]); } return(sumOfDist / numOfSample); }
private void button6_Click(object sender, EventArgs e) { SWReset(); List <Triangle2DF> triangleList = new List <Triangle2DF>(); //一个box 就是一个旋转的长方形 List <RotatedRect> boxList = new List <RotatedRect>(); //contours 轮廓线 using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint()) { CvInvoke.FindContours(cannyEdges, contours, null, RetrType.List, ChainApproxMethod.ChainApproxSimple); int count = contours.Size; for (int i = 0; i < count; i++) { using (VectorOfPoint contour = contours[i]) using (VectorOfPoint approxContour = new VectorOfPoint()) { CvInvoke.ApproxPolyDP(contour, approxContour, CvInvoke.ArcLength(contour, true) * 0.05, true); if (CvInvoke.ContourArea(approxContour, false) > 10) //只用面积大于250的? { if (approxContour.Size == 3) //轮廓有三个顶点,那就是一个三角形 { Point[] pts = approxContour.ToArray(); triangleList.Add(new Triangle2DF( pts[0], pts[1], pts[2] )); } else if (approxContour.Size == 4) //轮廓有四个顶点 //检测轮廓内所有的角度是否为在[80,100]度之间 { bool isRectangle = true; Point[] pts = approxContour.ToArray(); //将向量转化为数组 LineSegment2D[] edges = PointCollection.PolyLine(pts, true); //将点转化为线 for (int j = 0; j < edges.Length; j++) { double angle = Math.Abs(edges[(j + 1) % edges.Length].GetExteriorAngleDegree(edges[j]));//获取线之间的角度 if (angle < 80 || angle > 100) { isRectangle = false; break; } } if (isRectangle) { boxList.Add(CvInvoke.MinAreaRect(approxContour)); } } } } } } SWStop("检测长方形和三角形"); //画三角形和长方形 Image <Bgr, byte> triangleRectangleImage = srcImage.CopyBlank(); foreach (Triangle2DF triangle in triangleList) { triangleRectangleImage.Draw(triangle, new Bgr(Color.DarkBlue), 2); } foreach (RotatedRect box in boxList) { triangleRectangleImage.Draw(box, new Bgr(Color.DarkOrange), 2); } imageBox2.Image = triangleRectangleImage; }
public BestBox(RotatedRect box, double value, VectorOfPoint vector) { Box = box; Value = value; VectorsList = vector; }
private VectorOfPointF FindTarget(Mat input) { var cannyEdges = new Mat(); var uImage = new Mat(); var gray = new Mat(); var blurred = new Mat(); // Convert to greyscale CvInvoke.CvtColor(input, uImage, ColorConversion.Bgr2Gray); CvInvoke.BilateralFilter(uImage, gray, 11, 17, 17); uImage.Dispose(); CvInvoke.MedianBlur(gray, blurred, 11); gray.Dispose(); // Get edged version const double cannyThreshold = 0.0; const double cannyThresholdLinking = 200.0; CvInvoke.Canny(blurred, cannyEdges, cannyThreshold, cannyThresholdLinking); blurred.Dispose(); if (_showEdged) { CvInvoke.Imshow("Source", cannyEdges); } // Get contours using (var contours = new VectorOfVectorOfPoint()) { CvInvoke.FindContours(cannyEdges, contours, null, RetrType.List, ChainApproxMethod.ChainApproxSimple); var count = contours.Size; // Looping contours for (var i = 0; i < count; i++) { var approxContour = new VectorOfPoint(); using var contour = contours[i]; CvInvoke.ApproxPolyDP(contour, approxContour, CvInvoke.ArcLength(contour, true) * 0.02, true); if (approxContour.Size != 4) { continue; } var cntArea = CvInvoke.ContourArea(approxContour); if (!(cntArea / _srcArea > .15)) { continue; } var pointOut = new VectorOfPointF(SortPoints(approxContour)); _targets.Add(VPointFToVPoint(pointOut)); } if (_showEdged) { var color = new MCvScalar(255, 255, 0); CvInvoke.DrawContours(input, contours, -1, color); CvInvoke.Imshow("Edged", cannyEdges); } } var output = CountTargets(_targets); cannyEdges.Dispose(); return(output); }
//------------------Main Algorithm----------------------// private VectorOfPoint Shape1(Mat FrameFormCam) { StringBuilder msgBuilder = new StringBuilder("Performance: "); //start clock to measure performance Mat imageread = FrameFormCam; int original_Height = imageread.Height; int original_Width = imageread.Width; CvInvoke.Resize(imageread, imageread, new Size(640, 480)); // Resize/ Interpolation of our image to reduce the complexity //---------Convert the image to grayscale and filter out the noise-------// Mat image = new Mat(); CvInvoke.CvtColor(imageread, image, ColorConversion.Bgr2Gray); //-----------------------------------------------------------------------// //------use image pyramid to remove noise----------// Mat lowerImage = new Mat(); Mat pyrDown = new Mat(); CvInvoke.PyrDown(image, pyrDown); CvInvoke.PyrUp(pyrDown, lowerImage); //------------------------------------------------// //----------unsharp image to enhance contrast-------// Size ksize = new Size(3, 3); double aplha = 1.5; double beta = -0.5; double gamma = 0; CvInvoke.AddWeighted(image, aplha, lowerImage, beta, gamma, lowerImage); //-------------------------------------------------// //-----------Lower Image Specs-------------// int Height = lowerImage.Height; int Width = lowerImage.Width; //----------------------------------------// //----------Center of the image----------// double x_image = Width / 2; double y_image = Height / 2; #region Canny and edge detection Stopwatch watch = Stopwatch.StartNew(); //Time elapsed. It is useful for micro-benchmarks in code optimization. #region Read Canny Thresholds from User or take default double cannyThreshold, cannyThresholdLinking; try // read thresholds from user, if empty take default thresholds { cannyThreshold = double.Parse(textBox1.Text); cannyThresholdLinking = double.Parse(textBox2.Text); } catch (System.FormatException e) { cannyThreshold = 120; cannyThresholdLinking = 60; } #endregion Mat cannyEdges = new Mat(); CvInvoke.Canny(lowerImage, cannyEdges, cannyThreshold, cannyThresholdLinking); // Canny Edge Detection watch.Stop(); msgBuilder.Append(String.Format("Canny - {0} ms; ", watch.ElapsedMilliseconds)); #endregion #region Find Approximate Rectangles watch.Reset(); watch.Start(); List <RotatedRect> boxList = new List <RotatedRect>(); // list of the Minimum Area Rectangles of contours List <VectorOfPoint> ListOfBestBoxContours = new List <VectorOfPoint>(); // list of our contours VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint(); // A vector of vector of points Random rng = new Random(); Mat Dilated = new Mat(); Mat rect_6 = CvInvoke.GetStructuringElement(Emgu.CV.CvEnum.ElementShape.Rectangle, new Size(5, 5), new Point(3, 3)); CvInvoke.Dilate(cannyEdges, Dilated, rect_6, new Point(-1, -1), 5, BorderType.Constant, CvInvoke.MorphologyDefaultBorderValue); // Dilate the image 5 times Mat Eroded = new Mat(); CvInvoke.Erode(Dilated, Eroded, rect_6, new Point(-1, -1), 4, BorderType.Constant, CvInvoke.MorphologyDefaultBorderValue); // Erode the image 4 times Mat cresult = new Mat(cannyEdges.Size, DepthType.Cv8U, 3); CvInvoke.FindContours(Eroded, contours, null, RetrType.List, ChainApproxMethod.ChainApproxNone); // Finds the contours of the image int count = contours.Size; for (int i = 0; i < count; i++) { using (VectorOfPoint contour = contours[i]) using (VectorOfPoint approxContour = new VectorOfPoint()) { CvInvoke.ApproxPolyDP(contour, approxContour, 5, true); if (CvInvoke.ContourArea(approxContour, true) > 6500) // only consider contours with area greater than 6500 pixels { if (approxContour.Size > 3) //The contour has > 3 vertices. { ListOfBestBoxContours.Add(contour); // add candidate contour to the list boxList.Add(CvInvoke.MinAreaRect(approxContour)); // add the Minimum Area Rectangle of the candidate contour to the list } } } } #endregion try { #region find best rectangle and take its contour #region find the minArea,MaxArea,minDistance,maxDistance for the Rescaling double maxArea = 0; // maximum area amongst the rectangles representing the contours double minArea = 10000000000; // minimum area amongst the rectangles representing the contours double maxDistance = 0; // the distance of the rectangle witch is further away from the center of the frame from it double minDistance = 10000000000; // the distance of the rectangle witch is close to the center of the frame from it List <double> value = new List <double>(); //find the minArea,MaxArea,minDistance,maxDistance for the Rescaling for (int z = 0; z < boxList.Count; z++) { maxArea = Math.Max(maxArea, ((boxList[z].Size.Height) * (boxList[z].Size.Width))); //maxArea = max(maxArea,area[z]) minArea = Math.Min(minArea, ((boxList[z].Size.Height) * (boxList[z].Size.Width))); //minArea = min(minArea,area[z]) maxDistance = Math.Max(maxDistance, (Math.Pow(Math.Abs((boxList[z].Center.X - x_image)), 2) + Math.Pow(Math.Abs((boxList[z].Center.Y - y_image)), 2))); //maxDistance = max(maxDistance,DistanceFromTheCenterOfTheImage[z]) minDistance = Math.Min(minDistance, (Math.Pow(Math.Abs((boxList[z].Center.X - x_image)), 2) + Math.Pow(Math.Abs((boxList[z].Center.Y - y_image)), 2))); //minDistance = min(minDistance,DistanceFromTheCenterOfTheImage[z]) } #endregion #region Find the Values of each Rectangle foreach (RotatedRect y in boxList) { // Rescaled(Area[z]) = (Area[z] - MinArea )/(MaxArea-MinArea) , Rescaled(Area[z]) e [0,1] // Rescaled(DistanceFromCenterOfImage[z]) = (DistanceFromCenterOfImage[z] - minDistance )/(maxDistance-minDistance) , Rescaled(DistanceFromCenterOfImage[z]) e [0,1] // List of value = 0.5 * Rescaled(Area[z]) + 0.5 * (1-Rescaled(DistanceFromCenterOfImage[z])) value.Add(((((y.Size.Height) * (y.Size.Width)) - minArea) / (maxArea - minArea)) * 0.5 + 0.5 * (1 - (((Math.Pow(Math.Abs(y.Center.X - x_image), 2) + Math.Pow(Math.Abs(y.Center.Y - y_image), 2)) - minDistance) / (maxDistance - minDistance)))); } #endregion #region Insert in class BestBox and sort it by descending value List <BestBox> Best = new List <BestBox>(); //A Structure for finding the best contour. Deffinition on line 32 for (int i = 0; i < value.Count; i++) { // add the rectangle, its contour and its value Best.Add(new BestBox(boxList[i], value[i], ListOfBestBoxContours[i])); } Best.Sort(delegate(BestBox x, BestBox y) { //Sort Descending by value return(y.Value.CompareTo(x.Value)); }); #endregion #endregion Image <Bgr, Byte> Filling1 = lowerImage.ToImage <Bgr, Byte>().CopyBlank(); // a blank image to draw on and fill the best contour CvInvoke.FillConvexPoly(Filling1, Best[0].VectorsList, new MCvScalar(0, 0, 255)); // Fill the inside of the Best Contour to make it solid CvInvoke.Canny(Filling1, cannyEdges, cannyThreshold, cannyThresholdLinking); // Canny Edge Detection //clear the lists and structs boxList.Clear(); ListOfBestBoxContours.Clear(); Best.Clear(); value.Clear(); #region Lough Lines #region Compute Hough Lines on best contour region //------------------Compute HoughLines----------------// LineSegment2D[] lines = CvInvoke.HoughLinesP( cannyEdges, 1, //Distance resolution in pixel-related units Math.PI / 180, //Angle resolution measured in radians. 40, //threshold 0, //min Line width 15); //gap between lines //---------------------------------------------------// #endregion #region Extend the Hough Lines Point A = new Point(); Point B = new Point(); Point CB = new Point(); Point CA = new Point(); int length = 500; // extending the hough lines, allowing them to intesect with each other for (int i = 0; i < lines.Length; i++) { A = lines[i].P1; B = lines[i].P2; CB.X = (int)(B.X + (B.X - A.X) / lines[i].Length * length); CB.Y = (int)(B.Y + (B.Y - A.Y) / lines[i].Length * length); CA.X = (int)(A.X + (A.X - B.X) / lines[i].Length * length); CA.Y = (int)(A.Y + (A.Y - B.Y) / lines[i].Length * length); lines[i].P1 = CA; lines[i].P2 = CB; } #endregion #region Draw Hough Lines representing our area on a blank image Image <Bgr, Byte> DrawLines3 = lowerImage.ToImage <Bgr, Byte>().CopyBlank(); foreach (LineSegment2D line in lines) { DrawLines3.Draw(line, new Bgr(Color.White), 2); // Draw these lines on a blank image } CvInvoke.CvtColor(DrawLines3, DrawLines3, ColorConversion.Bgr2Gray); #endregion #endregion #region Find new Contours and Rectangle areas VectorOfVectorOfPoint contours1 = new VectorOfVectorOfPoint(); CvInvoke.FindContours(DrawLines3.Mat, contours1, null, RetrType.List, ChainApproxMethod.ChainApproxNone); //find the contours VectorOfPoint contour; count = contours1.Size; for (int i = 0; i < count; i++) { contour = contours1[i]; using (VectorOfPoint approxContour = new VectorOfPoint()) { CvInvoke.ApproxPolyDP(contour, approxContour, 5, true); if (CvInvoke.ContourArea(approxContour, true) > 6500) //only consider contours with area greater than 6500 { if (approxContour.Size > 3) //The contour has > 3 vertices. { ListOfBestBoxContours.Add(contour); // add candidate contour to the list boxList.Add(CvInvoke.MinAreaRect(approxContour)); // add the Minimum Area Rectangle of the candidate contour to the list } } } } #endregion #region find best rectangle and take its contour maxArea = 0; minArea = 10000000000; maxDistance = 0; minDistance = 10000000000; #region find the minArea,MaxArea,minDistance,maxDistance for the Rescaling for (int z = 0; z < boxList.Count; z++) { maxArea = Math.Max(maxArea, ((boxList[z].Size.Height) * (boxList[z].Size.Width))); //maxArea = max(maxArea,area[z]) minArea = Math.Min(minArea, ((boxList[z].Size.Height) * (boxList[z].Size.Width))); //minArea = min(minArea,area[z]) maxDistance = Math.Max(maxDistance, (Math.Pow(Math.Abs((boxList[z].Center.X - x_image)), 2) + Math.Pow(Math.Abs((boxList[z].Center.Y - y_image)), 2))); //maxDistance = max(maxDistance,DistanceFromTheCenterOfTheImage[z]) minDistance = Math.Min(minDistance, (Math.Pow(Math.Abs((boxList[z].Center.X - x_image)), 2) + Math.Pow(Math.Abs((boxList[z].Center.Y - y_image)), 2))); //minDistance = min(minDistance,DistanceFromTheCenterOfTheImage[z]) } #endregion #region Find the Values of each Rectangle foreach (RotatedRect y in boxList) { // Rescaled(Area[z]) = (Area[z] - MinArea )/(MaxArea-MinArea) , Rescaled(Area[z]) e [0,1] // Rescaled(DistanceFromCenterOfImage[z]) = (DistanceFromCenterOfImage[z] - minDistance )/(maxDistance-minDistance) , Rescaled(DistanceFromCenterOfImage[z]) e [0,1] // List of value = 0.5 * Rescaled(Area[z]) + 0.5 * (1-Rescaled(DistanceFromCenterOfImage[z])) value.Add(((((y.Size.Height) * (y.Size.Width)) - minArea) / (maxArea - minArea)) * 0.5 + 0.5 * (1 - (((Math.Pow(Math.Abs(y.Center.X - x_image), 2) + Math.Pow(Math.Abs(y.Center.Y - y_image), 2)) - minDistance) / (maxDistance - minDistance)))); } #endregion #region Insert in class BestBox and sort it by descending value for (int i = 0; i < value.Count; i++) { // add the rectangle, its contour and its value Best.Add(new BestBox(boxList[i], value[i], ListOfBestBoxContours[i])); } Best.Sort(delegate(BestBox x, BestBox y) { //Sort Descending by value return(y.Value.CompareTo(x.Value)); }); #endregion #endregion contour = Best[0].VectorsList; //the first element is always the best contour region. This is the final desired area in the frame return(contour); } catch (Exception e) when(e is System.InvalidOperationException || e is System.ArgumentOutOfRangeException) { Mat result = imageread; return(null); } }
private void ProcessFrame(object sender, EventArgs e) { Mat image = new Mat(); Mat diffImage = new Mat(); capture.Retrieve(image); if (lastImage != null) { CvInvoke.AbsDiff(image, lastImage, diffImage); } Image<Gray, byte> mask = new Image<Gray, byte>(image.Width, image.Height); if (lastImage != null) { VectorOfPoint vp = new VectorOfPoint(RegionOfInterestPoints.ToArray()); CvInvoke.Polylines(image, vp, true, new Bgr(0, 0, 255).MCvScalar, 2); if (vp.Size >= 3) { CvInvoke.FillConvexPoly(mask, vp, new MCvScalar(255)); overlayImage = new Mat((int)lastImage.Height, (int)lastImage.Width, DepthType.Cv8U, 3); diffImage.CopyTo(overlayImage, mask); byte[] data = new byte[overlayImage.Width * overlayImage.Height * 3]; GCHandle handle = GCHandle.Alloc(data, GCHandleType.Pinned); using (Mat m2 = new Mat(overlayImage.Size, DepthType.Cv8U, 3, handle.AddrOfPinnedObject(), overlayImage.Width * 3)) CvInvoke.BitwiseNot(overlayImage, m2); handle.Free(); CheckTrigger(data, overlayImage.Width, overlayImage.Height); } } if (FrameCallback != null) { FrameCallback.FrameUpdate( image, overlayImage ); } lastImage = image; }
/// <summary> /// Computes convex hull for a set of 2D points. /// </summary> /// <param name="points">The input 2D point set, represented by CV_32SC2 or CV_32FC2 matrix</param> /// <param name="clockwise">If true, the output convex hull will be oriented clockwise, /// otherwise it will be oriented counter-clockwise. Here, the usual screen coordinate /// system is assumed - the origin is at the top-left corner, x axis is oriented to the right, /// and y axis is oriented downwards.</param> /// <returns>The output convex hull. It is a vector of points that form /// the hull (must have the same type as the input points).</returns> public static Point[] ConvexHull(IEnumerable<Point> points, bool clockwise = false) { if (points == null) throw new ArgumentNullException("points"); Point[] pointsArray = EnumerableEx.ToArray(points); IntPtr hullPtr; NativeMethods.imgproc_convexHull_Point_ReturnsPoints(pointsArray, pointsArray.Length, out hullPtr, clockwise ? 1 : 0); using (var hullVec = new VectorOfPoint(hullPtr)) { return hullVec.ToArray(); } }
private void ExtractContourAndHull(Image <Bgr, byte> originalImage, Image <Gray, byte> skin) { var contours = new VectorOfVectorOfPoint(); CvInvoke.FindContours(skin, contours, new Mat(), RetrType.List, ChainApproxMethod.ChainApproxSimple); var result2 = 0; VectorOfPoint biggestContour = null; if (contours.Size != 0) { biggestContour = contours[0]; } for (var i = 0; i < contours.Size; i++) { var result1 = contours[i].Size; if (result1 <= result2) { continue; } result2 = result1; biggestContour = contours[i]; } if (biggestContour == null) { return; } currentContour = new VectorOfPoint(); CvInvoke.ApproxPolyDP(biggestContour, currentContour, 0, true); //TODO Get to know why it gives exception //ImageFrame.Draw(biggestContour, 3, new Bgr(Color.LimeGreen)); biggestContour = currentContour; var pointsToFs = new PointF[currentContour.Size]; for (var i = 0; i < currentContour.Size; i++) { pointsToFs[i] = new PointF(currentContour[i].X, currentContour[i].Y); } var hull = CvInvoke.ConvexHull(pointsToFs, true); pointsToFs = new PointF[biggestContour.Size]; for (var i = 0; i < biggestContour.Size; i++) { pointsToFs[i] = new PointF(biggestContour[i].X, biggestContour[i].Y); } box = CvInvoke.MinAreaRect(pointsToFs); var points = box.GetVertices(); var ps = new Point[points.Length]; for (var i = 0; i < points.Length; i++) { ps[i] = new Point((int)points[i].X, (int)points[i].Y); } var hullToPoints = new Point[hull.Length]; for (var i = 0; i < hull.Length; i++) { hullToPoints[i] = Point.Round(hull[i]); } originalImage.DrawPolyline(hullToPoints, true, new Bgr(200, 125, 75), 2); originalImage.Draw(new CircleF(new PointF(box.Center.X, box.Center.Y), 3), new Bgr(200, 125, 75), 2); var convexHull = new VectorOfInt(); CvInvoke.ConvexHull(currentContour, convexHull, false, false); defects = new Mat(); CvInvoke.ConvexityDefects(currentContour, convexHull, defects); if (!defects.IsEmpty) { Matrix <int> m = new Matrix <int>(defects.Rows, defects.Cols, defects.NumberOfChannels); // copy Mat to a matrix... defects.CopyTo(m); Matrix <int>[] channels = m.Split(); if (channels.Length >= 2) { startIndex = channels.ElementAt(0).Data; endIndex = channels.ElementAt(1).Data; depthIndex = channels.ElementAt(2).Data; } } }
private void ProcessFrame(object sender, EventArgs e) { try { Mat imageHSV = new Mat(); Mat imageGray = new Mat(); Mat imageBlured = new Mat(); fluxVideo.Retrieve(image, 0); CvInvoke.CvtColor(image, imageGray, ColorConversion.Bgr2Gray); CvInvoke.GaussianBlur(image, imageBlured, new Size(3, 3), 10); CvInvoke.CvtColor(imageBlured, imageHSV, ColorConversion.Bgr2Hsv); Image <Hsv, byte> imgHSV = imageHSV.ToImage <Hsv, byte>(); imgHSV = imgHSV.Flip(FlipType.Horizontal); Image <Gray, byte> imgGray = imgHSV.InRange(new Hsv(min.x, min.y, min.z), new Hsv(max.x, max.y, max.z)); Mat structElement = CvInvoke.GetStructuringElement(ElementShape.Rectangle, new Size(5, 5), new Point(-1, -1)); CvInvoke.Erode(imgGray, imgGray, structElement, new Point(-1, -1), 2, BorderType.Constant, new MCvScalar(0)); //détection de contours VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint(); Mat m = new Mat(); CvInvoke.FindContours(imgGray, contours, m, Emgu.CV.CvEnum.RetrType.External, Emgu.CV.CvEnum.ChainApproxMethod.ChainApproxSimple); for (int i = 0; i < contours.Size; i++) { double perimeter = CvInvoke.ArcLength(contours[i], true); VectorOfPoint approx = new VectorOfPoint(); CvInvoke.ApproxPolyDP(contours[i], approx, 0.04 * perimeter, true); CvInvoke.DrawContours(image, contours, i, new MCvScalar(0, 0, 255)); if (approx.Size == 4) { isRectangle = true; } if (approx.Size == 3) { if (perimeter > 100) { isTriangle = true; } else { isTriangle = false; } } if (approx.Size > 4) { isCircle = true; } } if (!isTriangle) { zones[0].GetComponent <Zone>().SetIsValid(true); zones[0].GetComponent <MeshRenderer>().material = validMat; } else { zones[0].GetComponent <Zone>().SetIsValid(false); zones[0].GetComponent <MeshRenderer>().material = notValidMat; } if (!isRectangle) { zones[1].GetComponent <Zone>().SetIsValid(true); zones[1].GetComponent <MeshRenderer>().material = validMat; } else { zones[1].GetComponent <Zone>().SetIsValid(false); zones[1].GetComponent <MeshRenderer>().material = notValidMat; } if (!isCircle) { zones[2].GetComponent <Zone>().SetIsValid(true); zones[2].GetComponent <MeshRenderer>().material = validMat; } else { zones[2].GetComponent <Zone>().SetIsValid(false); zones[2].GetComponent <MeshRenderer>().material = notValidMat; } if (!isWaiting) { StartCoroutine(wait()); } Image <Bgra, byte> imgToDisplay = new Image <Bgra, byte>(imgGray.Width, imgGray.Height); CvInvoke.CvtColor(imgGray, imgToDisplay, ColorConversion.Gray2Bgra); tex.LoadRawTextureData(imgToDisplay.Bytes); tex.Apply(); imageCameraBinaire.sprite = Sprite.Create(tex, new Rect(0.0f, 0.0f, tex.width, tex.height), new Vector2(0.5f, 0.5f), 1.0f); Image <Bgra, byte> imgToDisplayReal = new Image <Bgra, byte>(image.Width, image.Height); CvInvoke.CvtColor(image, imgToDisplayReal, ColorConversion.Bgr2Bgra); tex2.LoadRawTextureData(imgToDisplayReal.Bytes); tex2.Apply(); imageCameraReelle.sprite = Sprite.Create(tex2, new Rect(0.0f, 0.0f, tex2.width, tex2.height), new Vector2(0.5f, 0.5f), 1.0f); //CvInvoke.Imshow("Cam view", imgGray); } catch (Exception exception) { Debug.Log(exception.Message); } }
private void FindStopSign(Mat img, List<Mat> stopSignList, List<Rectangle> boxList, VectorOfVectorOfPoint contours, int[,] hierachy, int idx) { for (; idx >= 0; idx = hierachy[idx, 0]) { using (VectorOfPoint c = contours[idx]) using (VectorOfPoint approx = new VectorOfPoint()) { CvInvoke.ApproxPolyDP(c, approx, CvInvoke.ArcLength(c, true) * 0.02, true); double area = CvInvoke.ContourArea(approx); if (area > 200) { double ratio = CvInvoke.MatchShapes(_octagon, approx, Emgu.CV.CvEnum.ContoursMatchType.I3); if (ratio > 0.1) //not a good match of contour shape { //check children if (hierachy[idx, 2] >= 0) FindStopSign(img, stopSignList, boxList, contours, hierachy, hierachy[idx, 2]); continue; } Rectangle box = CvInvoke.BoundingRectangle(c); Mat candidate = new Mat(); using (Mat tmp = new Mat(img, box)) CvInvoke.CvtColor(tmp, candidate, ColorConversion.Bgr2Gray); //set the value of pixels not in the contour region to zero using (Mat mask = new Mat(candidate.Size.Height, candidate.Width, DepthType.Cv8U, 1)) { mask.SetTo(new MCvScalar(0)); CvInvoke.DrawContours(mask, contours, idx, new MCvScalar(255), -1, LineType.EightConnected, null, int.MaxValue, new Point(-box.X, -box.Y)); double mean = CvInvoke.Mean(candidate, mask).V0; CvInvoke.Threshold(candidate, candidate, mean, 255, ThresholdType.Binary); CvInvoke.BitwiseNot(candidate, candidate); CvInvoke.BitwiseNot(mask, mask); candidate.SetTo(new MCvScalar(0), mask); } int minMatchCount = 8; double uniquenessThreshold = 0.8; VectorOfKeyPoint _observeredKeypoint = new VectorOfKeyPoint(); Mat _observeredDescriptor = new Mat(); _detector.DetectAndCompute(candidate, null, _observeredKeypoint, _observeredDescriptor, false); if (_observeredKeypoint.Size >= minMatchCount) { int k = 2; Mat mask; using (VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch()) { _modelDescriptorMatcher.KnnMatch(_observeredDescriptor, matches, k, null); mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1); mask.SetTo(new MCvScalar(255)); Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask); } int nonZeroCount = CvInvoke.CountNonZero(mask); if (nonZeroCount >= minMatchCount) { boxList.Add(box); stopSignList.Add(candidate); } } } } } }
//Mat _frame public int ObtenerFiguras(Mat _frame, string nombrePdf, int page) { Console.WriteLine("Entro en Metodo obtener figuras del dll"); int resultado = 0; //resultados.Clear(); //if (_frame.IsEmpty) //{ //return; //} Mat finalFrame = new Mat(); Mat aux = new Mat(); Mat aux2 = new Mat(); Mat aux3 = new Mat(); Image <Bgr, byte> img = _frame.ToImage <Bgr, byte>(); //Transformar a espacio de color HSV Image <Hsv, Byte> hsvimg = img.Convert <Hsv, Byte>(); //extract the hue and value channels Image <Gray, Byte>[] channels = hsvimg.Split(); //separar en componentes Image <Gray, Byte> imghue = channels[0]; //hsv, channels[0] es hue. Image <Gray, Byte> imgval = channels[2]; //hsv, channels[2] es value. //Filtro color //140 en adelante //funciona 160 Image <Gray, byte> huefilter = imghue.InRange(new Gray(150), new Gray(255)); //Filtro colores menos brillantes Image <Gray, byte> valfilter = imgval.InRange(new Gray(100), new Gray(255)); //Filtro de saturación - quitar blancos channels[1]._ThresholdBinary(new Gray(20), new Gray(255)); // Saturacion //Unir los filtros para obtener la imagen Image <Gray, byte> colordetimg = huefilter.And(valfilter).And(channels[1]); //aqui habia un Not() //pictureBox2.Image = colordetimg.Bitmap; //colordetimg._Erode(1); //2 y 4 Mat SE2 = CvInvoke.GetStructuringElement(Emgu.CV.CvEnum.ElementShape.Rectangle, new Size(3, 2), new System.Drawing.Point(1, 1)); CvInvoke.MorphologyEx(colordetimg, aux, MorphOp.Erode, SE2, new System.Drawing.Point(-1, -1), 2, BorderType.Default, new MCvScalar(255)); Mat SE3 = CvInvoke.GetStructuringElement(Emgu.CV.CvEnum.ElementShape.Rectangle, new Size(3, 3), new System.Drawing.Point(1, 1)); CvInvoke.MorphologyEx(aux, aux2, MorphOp.Dilate, SE3, new System.Drawing.Point(-1, -1), 3, BorderType.Replicate, new MCvScalar(255)); Mat SE = CvInvoke.GetStructuringElement(Emgu.CV.CvEnum.ElementShape.Rectangle, new Size(3, 3), new System.Drawing.Point(-1, -1)); CvInvoke.MorphologyEx(aux2, aux3, Emgu.CV.CvEnum.MorphOp.Close, SE, new System.Drawing.Point(-1, -1), 5, Emgu.CV.CvEnum.BorderType.Reflect, new MCvScalar(255)); CvInvoke.MorphologyEx(aux3, aux3, Emgu.CV.CvEnum.MorphOp.Open, SE, new System.Drawing.Point(-1, -1), 1, Emgu.CV.CvEnum.BorderType.Reflect, new MCvScalar(255)); _frame.CopyTo(finalFrame); // DetectarFiguras(aux3, finalFrame); // System.Drawing.Rectangle box = new System.Drawing.Rectangle(); Image <Bgr, byte> temp = aux3.ToImage <Bgr, byte>(); //Image<Bgr, Byte> buffer_im = displayFrame.ToImage<Bgr, Byte>(); //float a = buffer_im.Width; //float b = buffer_im.Height; //MessageBox.Show("El tamano es "+ a.ToString()+" y " + b.ToString()); rect.Clear(); circleList.Clear(); ellipseList.Clear(); //transforma imagen //UMat uimage = new UMat(); // CvInvoke.CvtColor(displayFrame, uimage, ColorConversion.Bgr2Gray); using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint()) { Image <Bgr, Byte> resultadoFinal = finalFrame.ToImage <Bgr, byte>(); double maxArea = 1000; //int chosen = 0; // VectorOfPoint contour = null; //Dibuja borde rojo var temp2 = temp.SmoothGaussian(5).Convert <Gray, byte>().ThresholdBinary(new Gray(20), new Gray(255)); VectorOfVectorOfPoint contorno = new VectorOfVectorOfPoint(); Mat mat = new Mat(); CvInvoke.FindContours(temp2, contorno, mat, Emgu.CV.CvEnum.RetrType.External, Emgu.CV.CvEnum.ChainApproxMethod.ChainApproxSimple); for (int i = 0; i < contorno.Size; i++) { //VectorOfPoint approxContour = new VectorOfPoint(); double perimetro = CvInvoke.ArcLength(contorno[i], true); VectorOfPoint approx = new VectorOfPoint(); // VectorOfPointF approxF = new VectorOfPointF(); double area = CvInvoke.ContourArea(contorno[i]); if (area > maxArea) { CvInvoke.ApproxPolyDP(contorno[i], approx, 0.04 * perimetro, true); //Obtiene los centros de las figuras var moments = CvInvoke.Moments(contorno[i]); int x = (int)(moments.M10 / moments.M00); int y = (int)(moments.M01 / moments.M00); resultados.Add(approx); if (approx.Size == 3) //The contour has 3 vertices, it is a triangle { RotatedRect rectangle = CvInvoke.MinAreaRect(approx); CvInvoke.DrawContours(resultadoFinal, contorno, i, new MCvScalar(255, 255, 255), 1, LineType.AntiAlias); resultadoFinal.Draw(rectangle, new Bgr(System.Drawing.Color.Cyan), 1); rect.Add(CvInvoke.BoundingRectangle(approx)); } if (approx.Size == 4) //The contour has 4 vertices. { //RotatedRect tt = new RotatedRect(CvInvoke.MinAreaRect(approx).Center, CvInvoke.MinAreaRect(approx).Size, 270) ; //boxList.Add(tt); //Calcular si es cuadrado System.Drawing.Rectangle rectAux = CvInvoke.BoundingRectangle(contorno[i]); double ar = (double)rectAux.Width / rectAux.Height; //Calcular circularidad double perimetro2 = CvInvoke.ArcLength(contorno[i], true); double area2 = CvInvoke.ContourArea(contorno[i]); double circularidad = 4 * Math.PI * area2 / Math.Pow(perimetro2, 2); //MessageBox.Show("circularidad rect " + circularidad); if (circularidad > 0.69) { //Si la circularidad>0.6 y cumple proporcion es cuadrado if (ar >= 0.8 && ar <= 1.0) { // MessageBox.Show("Cuadrado "); RotatedRect rectangle = CvInvoke.MinAreaRect(contorno[i]); CvInvoke.DrawContours(resultadoFinal, contorno, i, new MCvScalar(255, 255, 255), 1, LineType.AntiAlias); resultadoFinal.Draw(rectangle, new Bgr(System.Drawing.Color.Cyan), 1); CvInvoke.PutText(resultadoFinal, "Rectangle", new System.Drawing.Point(x, y), Emgu.CV.CvEnum.FontFace.HersheySimplex, 0.5, new MCvScalar(0, 255, 255), 2); rect.Add(CvInvoke.BoundingRectangle(approx)); } //Es elipse else { //MessageBox.Show("parecia rectangulo pero era elipse "); Ellipse final_ellipse = new Ellipse(CvInvoke.MinAreaRect(contorno[i]).Center, CvInvoke.MinAreaRect(contorno[i]).Size, 0); Ellipse final_ellipseDibujo = new Ellipse(CvInvoke.MinAreaRect(contorno[i]).Center, CvInvoke.MinAreaRect(contorno[i]).Size, 90); ellipseList.Add(final_ellipse); //IConvexPolygonF poligono = CvInvoke.MinAreaRect(approx); //resultadoFinal.Draw(poligono, new Bgr(Color.Cyan), 1); resultadoFinal.Draw(final_ellipseDibujo, new Bgr(System.Drawing.Color.Cyan), 1); CvInvoke.DrawContours(resultadoFinal, contorno, i, new MCvScalar(255, 255, 255), 1, LineType.AntiAlias); CvInvoke.PutText(resultadoFinal, "Figura circular", new System.Drawing.Point(x, y), Emgu.CV.CvEnum.FontFace.HersheySimplex, 0.5, new MCvScalar(0, 255, 255), 2); } } //Es rectangulo else { RotatedRect rectangle = CvInvoke.MinAreaRect(contorno[i]); CvInvoke.DrawContours(resultadoFinal, contorno, i, new MCvScalar(255, 255, 255), 1, LineType.AntiAlias); resultadoFinal.Draw(rectangle, new Bgr(System.Drawing.Color.Cyan), 1); CvInvoke.PutText(resultadoFinal, "Rectangle", new System.Drawing.Point(x, y), Emgu.CV.CvEnum.FontFace.HersheySimplex, 0.5, new MCvScalar(0, 255, 255), 2); rect.Add(CvInvoke.BoundingRectangle(approx)); } } if (approx.Size >= 5) { //double perimetro2 = CvInvoke.ArcLength(contorno[i], true); //double area2 = CvInvoke.ContourArea(contorno[i]); // double circularidad = 4 * Math.PI * area2 / Math.Pow(perimetro2, 2); //MessageBox.Show("circularidad elipse " + circularidad); Ellipse final_ellipse = new Ellipse(CvInvoke.MinAreaRect(contorno[i]).Center, CvInvoke.MinAreaRect(contorno[i]).Size, 0); Ellipse final_ellipseDibujo = new Ellipse(CvInvoke.MinAreaRect(contorno[i]).Center, CvInvoke.MinAreaRect(contorno[i]).Size, 90); ellipseList.Add(final_ellipse); resultadoFinal.Draw(final_ellipseDibujo, new Bgr(System.Drawing.Color.Cyan), 1); CvInvoke.DrawContours(resultadoFinal, contorno, i, new MCvScalar(255, 255, 255), 1, LineType.AntiAlias); CvInvoke.PutText(resultadoFinal, "Figura circular", new System.Drawing.Point(x, y), Emgu.CV.CvEnum.FontFace.HersheySimplex, 0.5, new MCvScalar(0, 255, 255), 2); } } } //pictureBox2.Image = resultadoFinal.Bitmap; //button2.Enabled = true; } if (rect.Count != 0 || ellipseList.Count != 0) { resultado = 1; Console.WriteLine("Rect: " + rect.Count + " y elipse: " + ellipseList.Count); Console.WriteLine("llamare a guardar figuras nombrepdf " + nombrePdf); GuardarFiguras(nombrePdf, page); } else { resultado = 0; } return(resultado); }
private void FindStopSign(Mat img, List <Mat> stopSignList, List <Rectangle> boxList, VectorOfVectorOfPoint contours, int[,] hierachy, int idx) { for (; idx >= 0; idx = hierachy[idx, 0]) { using (VectorOfPoint c = contours[idx]) using (VectorOfPoint approx = new VectorOfPoint()) { CvInvoke.ApproxPolyDP(c, approx, CvInvoke.ArcLength(c, true) * 0.02, true); double area = CvInvoke.ContourArea(approx); if (area > 200) { double ratio = CvInvoke.MatchShapes(_octagon, approx, Emgu.CV.CvEnum.ContoursMatchType.I3); if (ratio > 0.1) //not a good match of contour shape { //check children if (hierachy[idx, 2] >= 0) { FindStopSign(img, stopSignList, boxList, contours, hierachy, hierachy[idx, 2]); } continue; } Rectangle box = CvInvoke.BoundingRectangle(c); Mat candidate = new Mat(); using (Mat tmp = new Mat(img, box)) CvInvoke.CvtColor(tmp, candidate, ColorConversion.Bgr2Gray); //set the value of pixels not in the contour region to zero using (Mat mask = new Mat(candidate.Size.Height, candidate.Width, DepthType.Cv8U, 1)) { mask.SetTo(new MCvScalar(0)); CvInvoke.DrawContours(mask, contours, idx, new MCvScalar(255), -1, LineType.EightConnected, null, int.MaxValue, new Point(-box.X, -box.Y)); double mean = CvInvoke.Mean(candidate, mask).V0; CvInvoke.Threshold(candidate, candidate, mean, 255, ThresholdType.Binary); CvInvoke.BitwiseNot(candidate, candidate); CvInvoke.BitwiseNot(mask, mask); candidate.SetTo(new MCvScalar(0), mask); } int minMatchCount = 8; double uniquenessThreshold = 0.8; VectorOfKeyPoint _observeredKeypoint = new VectorOfKeyPoint(); Mat _observeredDescriptor = new Mat(); _detector.DetectAndCompute(candidate, null, _observeredKeypoint, _observeredDescriptor, false); if (_observeredKeypoint.Size >= minMatchCount) { int k = 2; Mat mask; using (VectorOfVectorOfDMatch matches = new VectorOfVectorOfDMatch()) { _modelDescriptorMatcher.KnnMatch(_observeredDescriptor, matches, k, null); mask = new Mat(matches.Size, 1, DepthType.Cv8U, 1); mask.SetTo(new MCvScalar(255)); Features2DToolbox.VoteForUniqueness(matches, uniquenessThreshold, mask); } int nonZeroCount = CvInvoke.CountNonZero(mask); if (nonZeroCount >= minMatchCount) { boxList.Add(box); stopSignList.Add(candidate); } } } } } }
public void imageHanding() { if (SoureceImage != null) { try { Mat img = SoureceImage.Mat; Image <Gray, byte> otsu = SoureceImage.CopyBlank(); // 转灰度图 //CvInvoke.CvtColor(img, img, ColorConversion.Bgr2Gray); // 中值滤波(去除椒盐噪声) CvInvoke.MedianBlur(img, otsu, 3); // Otsu二值化 CvInvoke.Threshold(otsu, img, 0, 255, ThresholdType.Otsu); // 高斯滤波 CvInvoke.GaussianBlur(img, img, new Size(3, 3), 0); // 形态学梯度运算 Mat StructingElement = CvInvoke.GetStructuringElement(ElementShape.Ellipse, new Size(5, 5), new Point(-1, -1)); CvInvoke.MorphologyEx(img, img, MorphOp.Gradient, StructingElement, new Point(-1, -1), 6, BorderType.Default, new MCvScalar(0)); // 边缘检测 CvInvoke.Canny(img, img, 100, 200); List <RotatedRect> boxList = new List <RotatedRect>(); List <int> index = new List <int>(); using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint()) { // 寻找轮廓 CvInvoke.FindContours(img, contours, null, RetrType.List, ChainApproxMethod.ChainApproxSimple); int count = contours.Size; for (int i = 0; i < count; i++) { using (VectorOfPoint contour = contours[i]) using (VectorOfPoint approxContour = new VectorOfPoint()) { CvInvoke.ApproxPolyDP(contour, approxContour, CvInvoke.ArcLength(contour, true) * 0.05, true); if (CvInvoke.ContourArea(approxContour, false) > 200) { if (approxContour.Size == 4) { bool isRectangle = true; Point[] pts = approxContour.ToArray(); LineSegment2D[] edges = PointCollection.PolyLine(pts, true); for (int j = 0; j < edges.Length; j++) { double angle = Math.Abs( edges[(j + 1) % edges.Length].GetExteriorAngleDegree(edges[j])); if (angle < 80 || angle > 100) { isRectangle = false; break; } } if (isRectangle) { boxList.Add(CvInvoke.MinAreaRect(approxContour)); } } } } } } Image <Bgr, Byte> lineImage = SoureceImage.Convert <Bgr, byte>().CopyBlank(); Image <Gray, byte> testimg = SoureceImage.CopyBlank(); foreach (RotatedRect box in boxList) { lineImage.Draw(box, new Bgr(Color.DarkOrange), 2); PointF[] vertices = new PointF[4]; vertices = box.GetVertices(); //for (int i = 0; i < 4; i++) //line(image, vertices[i], vertices[(i + 1) % 4], Scalar(0, 255, 0)); Rectangle brect = box.MinAreaRect(); lineImage.Draw(brect, new Bgr(Color.DarkOrange), 2); } otsu.ROI = boxList[1].MinAreaRect(); testimg = otsu.Clone(); otsu.ROI = Rectangle.Empty; Mat mapMatrix = new Mat(); PointF poi = new PointF(testimg.Size.Width / 2, testimg.Size.Height / 2); CvInvoke.GetRotationMatrix2D(poi, boxList[1].Angle, 1.2, mapMatrix); CvInvoke.WarpAffine(testimg, testimg, mapMatrix, testimg.Size); SoureceImage.Bitmap = testimg.Bitmap; //ShowFormImage(); } catch (Exception) { SimpleStatus.Image = Resources.SimpleState_False; mRuntime.Text = "运行时间:" + 0 + "毫秒"; ShowFormImage(); } } }
public static VectorOfVectorOfPoint DetectEdges(UIImage myImage, double th1, double th2, int aperture, bool value) { //Load the image from file and resize it for display Image <Bgr, Byte> img = new Image <Bgr, byte>(myImage.CGImage); //.Resize(400, 400, Emgu.CV.CvEnum.Inter.Linear, true); //Convert the image to grayscale and filter out the noise UMat uimage = new UMat(); CvInvoke.CvtColor(img, uimage, ColorConversion.Bgr2Gray); //use image pyr to remove noise UMat pyrDown = new UMat(); CvInvoke.PyrDown(uimage, pyrDown); CvInvoke.PyrUp(pyrDown, uimage); //Image<Gray, Byte> gray = img.Convert<Gray, Byte>().PyrDown().PyrUp(); #region circle detection double cannyThreshold = th1; //double circleAccumulatorThreshold = 120; //CircleF[] circles = CvInvoke.HoughCircles(uimage, HoughType.Gradient, 2.0, 20.0, cannyThreshold, circleAccumulatorThreshold, 5); #endregion #region Canny and edge detection double cannyThresholdLinking = th2; UMat cannyEdges = new UMat(); CvInvoke.Canny(uimage, cannyEdges, cannyThreshold, cannyThresholdLinking, aperture, true); VectorOfVectorOfPoint contourEdges = new VectorOfVectorOfPoint(); UMat hierarchy = new UMat(); CvInvoke.FindContours(cannyEdges, contourEdges, hierarchy, 0, ChainApproxMethod.ChainApproxNone); VectorOfVectorOfPoint newContourEdges = new VectorOfVectorOfPoint(); for (int i = 0; i < contourEdges.Size; i++) { if (contourEdges [i].Size > 3000) { newContourEdges.Push(contourEdges [i]); } } contourEdges.Dispose(); VectorOfPoint test1 = new VectorOfPoint(); VectorOfVectorOfPoint temp = new VectorOfVectorOfPoint(); temp.Push(newContourEdges [0]); for (int i = 0; i < newContourEdges.Size; i++) { Point[] testing = newContourEdges [i].ToArray(); temp[0].Push(newContourEdges [i].ToArray()); } VectorOfVectorOfPoint hull = new VectorOfVectorOfPoint(1); CvInvoke.ConvexHull(temp[0], hull[0], true); /*LineSegment2D[] lines = CvInvoke.HoughLinesP( * cannyEdges, * 1, //Distance resolution in pixel-related units * Math.PI/45.0, //Angle resolution measured in radians. * 20, //threshold * 30, //min Line width * 5); //gap between lines * * //VectorOfPoint test1 = new VectorOfPoint(); * //VectorOfVectorOfPoint temp = new VectorOfVectorOfPoint(); * //temp.Push(contourEdges[0]); * for (int i = 0; i < contourEdges.Size; i++) { * //temp[0].Push(contourEdges[i].ToArray()); * * CvInvoke.DrawContours(img, contourEdges, i, new MCvScalar(255,255,0), 4); * }*/ //VectorOfVectorOfPoint hull = new VectorOfVectorOfPoint(1); //CvInvoke.ConvexHull(temp[0], hull[0], true); //VectorOfVectorOfPoint result = new VectorOfVectorOfPoint(); #endregion #region Find triangles and rectangles //List<Triangle2DF> triangleList = new List<Triangle2DF>(); //List<RotatedRect> boxList = new List<RotatedRect>(); //a box is a rotated rectangle /*using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint()) * { * CvInvoke.FindContours(cannyEdges, contours, null, RetrType.List, ChainApproxMethod.ChainApproxSimple ); * int count = contours.Size; * for (int i = 0; i < count; i++) * { * using (VectorOfPoint contour = contours[i]) * using (VectorOfPoint approxContour = new VectorOfPoint()) * { * CvInvoke.ApproxPolyDP(contour, approxContour, CvInvoke.ArcLength(contour, true) * 0.05, true); * if (CvInvoke.ContourArea(approxContour, false) > 250) //only consider contours with area greater than 250 * { * if (approxContour.Size == 3) //The contour has 3 vertices, it is a triangle * { * Point[] pts = approxContour.ToArray(); * triangleList.Add(new Triangle2DF( * pts[0], * pts[1], * pts[2] * )); * } else if (approxContour.Size == 4) //The contour has 4 vertices. * { #region determine if all the angles in the contour are within [80, 100] degree * bool isRectangle = true; * Point[] pts = approxContour.ToArray(); * LineSegment2D[] edges = PointCollection.PolyLine(pts, true); * * for (int j = 0; j < edges.Length; j++) * { * double angle = Math.Abs( * edges[(j + 1) % edges.Length].GetExteriorAngleDegree(edges[j])); * if (angle < 80 || angle > 100) * { * isRectangle = false; * break; * } * } #endregion * * if (isRectangle) boxList.Add(CvInvoke.MinAreaRect(approxContour)); * } * } * } * } * }*/ #endregion //imageView.Image = img; #region draw triangles and rectangles //Image<Bgr, Byte> triangleRectangleImage = img; //foreach (Triangle2DF triangle in triangleList) // triangleRectangleImage.Draw(triangle, new Bgr(Color.DarkBlue), 2); //foreach (RotatedRect box in boxList) // triangleRectangleImage.Draw(box, new Bgr(Color.DarkOrange), 2); //imageView.Image = triangleRectangleImage; #endregion #region draw circles //Image<Bgr, Byte> circleImage = img.CopyBlank(); //foreach (CircleF circle in circles) // triangleRectangleImage.Draw(circle, new Bgr(Color.Brown), 2); //imageView.Image = circleImage; #endregion #region draw lines //Image<Bgr, Byte> lineImage = img; //foreach (LineSegment2D line in lines) // img.Draw(line, new Bgr(Color.Yellow), 2); //imageView.Image = lineImage; #endregion return(value ? hull : newContourEdges); //lineImage.ToUIImage(); }
/// <summary> /// evaluate specified ROI and return confidence value for each location /// </summary> /// <param name="img"></param> /// <param name="locations"></param> /// <param name="foundLocations"></param> /// <param name="confidences"></param> /// <param name="hitThreshold"></param> /// <param name="winStride"></param> /// <param name="padding"></param> public void DetectROI( Mat img, Point[] locations, out Point[] foundLocations, out double[] confidences, double hitThreshold = 0, Size? winStride = null, Size? padding = null) { if (disposed) throw new ObjectDisposedException("HOGDescriptor"); if (img == null) throw new ArgumentNullException("img"); if (locations == null) throw new ArgumentNullException("locations"); img.ThrowIfDisposed(); Size winStride0 = winStride.GetValueOrDefault(new Size()); Size padding0 = padding.GetValueOrDefault(new Size()); using (var flVec = new VectorOfPoint()) using (var cVec = new VectorOfDouble()) { NativeMethods.objdetect_HOGDescriptor_detectROI(ptr, img.CvPtr, locations, locations.Length, flVec.CvPtr, cVec.CvPtr, hitThreshold, winStride0, padding0); foundLocations = flVec.ToArray(); confidences = cVec.ToArray(); } }
public static Tuple <VectorOfPoint, double> MarkDetection(Image <Gray, byte> ImgBinary, VectorOfPoint Template) { VectorOfPoint mark = null; double crScore = 1; double areaTemplate = CvInvoke.ContourArea(Template); using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint()) { CvInvoke.FindContours(ImgBinary, contours, null, Emgu.CV.CvEnum.RetrType.Ccomp, Emgu.CV.CvEnum.ChainApproxMethod.ChainApproxSimple); for (int i = 0; i < contours.Size; i++) { double scoreMatching = CvInvoke.MatchShapes(Template, contours[i], Emgu.CV.CvEnum.ContoursMatchType.I3); double scoreCurrent = CvInvoke.ContourArea(contours[i]); double scoreArea = Math.Min(areaTemplate, scoreCurrent) / Math.Max(areaTemplate, scoreCurrent); scoreArea = 1 - scoreArea; double score = Math.Max(scoreMatching, scoreArea); if (score < crScore) { if (mark != null) { mark.Dispose(); mark = null; } crScore = score; mark = new VectorOfPoint(contours[i].ToArray()); } } } return(new Tuple <VectorOfPoint, double>(mark, crScore)); }
// get all of the valid contour maps, valid means circumfence > 200 px // this was not in their code, I added this feature, but I used their logic public static List<ColorfulContourMap> getAllContourMap(Mat input, int index, int mode = 0) { // use for all members List<ColorfulContourMap> result = new List<ColorfulContourMap>(); MatImage m1 = new MatImage(input); m1.Convert(); Mat gray = m1.Out(); // use for black background if (mode == 0) { MatImage m2 = new MatImage(gray); m2.SmoothGaussian(3); m2.ThresholdBinaryInv(245, 255); gray = m2.Out(); } // use for white background else { MatImage m2 = new MatImage(gray); m2.SmoothGaussian(3); m2.ThresholdBinaryInv(100, 255); gray = m2.Out(); } // one time use List<Point> pointList = new List<Point>(); List<Point> polyPointList = new List<Point>(); List<ColorfulPoint> cps = new List<ColorfulPoint>(); List<ColorfulPoint> pcps = new List<ColorfulPoint>(); // fetch all the contours using Emgu CV // fetch all the polys using Emgu CV // extract the points and colors Mat temp = gray.Clone(); VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint(); CvInvoke.FindContours(gray, contours, new Mat(), RetrType.List, ChainApproxMethod.ChainApproxNone); double area = Math.Abs(CvInvoke.ContourArea(contours[0])); VectorOfPoint maxArea = contours[0]; // maxArea is used as the current contour //contour = contour.HNext; // use this to loop for (int i = 0; i < contours.Size; i++) { double nextArea = Math.Abs(CvInvoke.ContourArea(contours[i], false)); // Find the area of contour area = nextArea; if (area >= Constants.MIN_AREA) { maxArea = contours[i]; VectorOfPoint poly = new VectorOfPoint(); CvInvoke.ApproxPolyDP(maxArea, poly, 1.0, true); pointList = maxArea.ToArray().ToList(); polyPointList = poly.ToArray().ToList(); foreach (Point p in pointList) { ColorfulPoint cp = new ColorfulPoint { X = p.X, Y = p.Y, color = extractPointColor(p, input) }; cps.Add(cp); } foreach (Point p in polyPointList) { ColorfulPoint cp = new ColorfulPoint { X = p.X, Y = p.Y, color = extractPointColor(p, input) }; pcps.Add(cp); } result.Add(new ColorfulContourMap(cps, pcps, index)); // clear temporal lists pointList = new List<Point>(); polyPointList = new List<Point>(); cps = new List<ColorfulPoint>(); pcps = new List<ColorfulPoint>(); } } return result; }
private void FindLicensePlate( VectorOfVectorOfPoint contours, int[,] hierachy, int idx, IInputArray gray, IInputArray canny, List <IInputOutputArray> licensePlateImagesList, List <IInputOutputArray> filteredLicensePlateImagesList, List <RotatedRect> detectedLicensePlateRegionList, List <String> licenses) { for (; idx >= 0; idx = hierachy[idx, 0]) { int numberOfChildren = GetNumberOfChildren(hierachy, idx); //if it does not contains any children (charactor), it is not a license plate region if (numberOfChildren == 0) { continue; } using (VectorOfPoint contour = contours[idx]) { if (CvInvoke.ContourArea(contour) > 400) { if (numberOfChildren < 3) { //If the contour has less than 3 children, it is not a license plate (assuming license plate has at least 3 charactor) //However we should search the children of this contour to see if any of them is a license plate FindLicensePlate(contours, hierachy, hierachy[idx, 2], gray, canny, licensePlateImagesList, filteredLicensePlateImagesList, detectedLicensePlateRegionList, licenses); continue; } RotatedRect box = CvInvoke.MinAreaRect(contour); if (box.Angle < -45.0) { float tmp = box.Size.Width; box.Size.Width = box.Size.Height; box.Size.Height = tmp; box.Angle += 90.0f; } else if (box.Angle > 45.0) { float tmp = box.Size.Width; box.Size.Width = box.Size.Height; box.Size.Height = tmp; box.Angle -= 90.0f; } double whRatio = (double)box.Size.Width / box.Size.Height; if (!(3.0 < whRatio && whRatio < 10.0)) //if (!(1.0 < whRatio && whRatio < 2.0)) { //if the width height ratio is not in the specific range,it is not a license plate //However we should search the children of this contour to see if any of them is a license plate //Contour<Point> child = contours.VNext; if (hierachy[idx, 2] > 0) { FindLicensePlate(contours, hierachy, hierachy[idx, 2], gray, canny, licensePlateImagesList, filteredLicensePlateImagesList, detectedLicensePlateRegionList, licenses); } continue; } using (UMat tmp1 = new UMat()) using (UMat tmp2 = new UMat()) { PointF[] srcCorners = box.GetVertices(); PointF[] destCorners = new PointF[] { new PointF(0, box.Size.Height - 1), new PointF(0, 0), new PointF(box.Size.Width - 1, 0), new PointF(box.Size.Width - 1, box.Size.Height - 1) }; using (Mat rot = CvInvoke.GetAffineTransform(srcCorners, destCorners)) { CvInvoke.WarpAffine(gray, tmp1, rot, Size.Round(box.Size)); } //resize the license plate such that the front is ~ 10-12. This size of front results in better accuracy from tesseract Size approxSize = new Size(240, 180); double scale = Math.Min(approxSize.Width / box.Size.Width, approxSize.Height / box.Size.Height); Size newSize = new Size((int)Math.Round(box.Size.Width * scale), (int)Math.Round(box.Size.Height * scale)); CvInvoke.Resize(tmp1, tmp2, newSize, 0, 0, Inter.Cubic); //removes some pixels from the edge int edgePixelSize = 2; Rectangle newRoi = new Rectangle(new Point(edgePixelSize, edgePixelSize), tmp2.Size - new Size(2 * edgePixelSize, 2 * edgePixelSize)); UMat plate = new UMat(tmp2, newRoi); UMat filteredPlate = FilterPlate(plate); Tesseract.Character[] words; StringBuilder strBuilder = new StringBuilder(); using (UMat tmp = filteredPlate.Clone()) { _ocr.Recognize(tmp); words = _ocr.GetCharacters(); if (words.Length == 0) { continue; } for (int i = 0; i < words.Length; i++) { strBuilder.Append(words[i].Text); } } licenses.Add(strBuilder.ToString()); licensePlateImagesList.Add(plate); filteredLicensePlateImagesList.Add(filteredPlate); detectedLicensePlateRegionList.Add(box); } } } } }
public short Calculate(string imageFilePath, Polygon polygon, Media.PointCollection pointCollection) { // Maskiertes Bild laden // Drawing.Bitmap maskedBitmap = GetMaskedBitmap(imageFilePath, pointCollection); Image<Bgr, byte> cvImage = new Image<Bgr, byte>(imageFilePath); // Maske generieren aus Polygon Mat matMask = new Mat(new Drawing.Size(cvImage.Cols, cvImage.Rows), DepthType.Cv8U, 3); // Polygone skalieren und generieren List<Point> scaledPoints = GetScaledPoints(pointCollection, cvImage.Rows, cvImage.Cols); List<Drawing.Point> scaledDrawingPoints = GetPolygonPoints(scaledPoints, cvImage.Rows, cvImage.Cols); // Polygon weiss zeichnen using (VectorOfPoint vPoint = new VectorOfPoint(scaledDrawingPoints.ToArray())) using (VectorOfVectorOfPoint vvPoint = new VectorOfVectorOfPoint(vPoint)) { CvInvoke.FillPoly(matMask, vvPoint, new Bgr(255, 255, 255).MCvScalar); } Image<Gray, byte> imageMask = new Image<Gray, byte>(matMask.Bitmap); // Durchschnittsfarbe rechnen mit Maske Bgr result = cvImage.GetAverage(imageMask); // Vergleichen mit Referenzbildern Bgr snow = JsonConvert.DeserializeObject<Bgr>(polygon.BgrSnow); Bgr normal = JsonConvert.DeserializeObject<Bgr>(polygon.BgrNormal); double resultSnow = Math.Abs(snow.Blue - result.Blue) + Math.Abs(snow.Green - result.Green) + Math.Abs(snow.Red - result.Red); double resultNormal = Math.Abs(normal.Blue - result.Blue) + Math.Abs(normal.Green - result.Green) + Math.Abs(normal.Red - result.Red); if (Math.Abs(resultSnow - resultNormal) < 10) { return 0; } else if (resultSnow < resultNormal) { return 1; } else { return -1; } }
public void CalculateAverageBrightessForArea(string reference0, string reference1, StrassenbilderMetaDataContext dataContext) { // Image-Meta-Daten laden string name0 = Path.GetFileNameWithoutExtension(reference0); string name1 = Path.GetFileNameWithoutExtension(reference1); Image image0 = dataContext.Images.Where(i => i.Name == name0).FirstOrDefault(); Image image1 = dataContext.Images.Where(i => i.Name == name1).FirstOrDefault(); // Polygone Laden IEnumerable<Polygon> polygons = dataContext.Polygons.Where(p => p.CameraName == image0.Place); // Pro Maske anwenden foreach (var polygon in polygons) { IList<Point> polygonPoints = JsonConvert.DeserializeObject<Media.PointCollection>(polygon.PolygonPointCollection); // Maskiertes Bild laden Drawing.Bitmap bitmap0 = GetMaskedBitmap(reference0, polygonPoints); Drawing.Bitmap bitmap1 = GetMaskedBitmap(reference1, polygonPoints); Image<Bgr, byte> cvImage0 = new Image<Bgr, byte>(bitmap0); Image<Bgr, byte> cvImage1 = new Image<Bgr, byte>(bitmap1); // Maske generieren aus Polygon Mat matMask = new Mat(new Drawing.Size(cvImage0.Cols, cvImage0.Rows), DepthType.Cv8U, 3); // Polygone skalieren und generieren List<Point> scaledPoints = GetScaledPoints(polygonPoints, cvImage0.Rows, cvImage0.Cols); List<Drawing.Point> scaledDrawingPoints = GetPolygonPoints(scaledPoints, cvImage0.Rows, cvImage0.Cols); // Polygon weiss zeichnen using (VectorOfPoint vPoint = new VectorOfPoint(scaledDrawingPoints.ToArray())) using (VectorOfVectorOfPoint vvPoint = new VectorOfVectorOfPoint(vPoint)) { CvInvoke.FillPoly(matMask, vvPoint, new Bgr(255, 255, 255).MCvScalar); } Image<Gray, byte> imageMask = new Image<Gray, byte>(matMask.Bitmap); // Durchschnittsfarbe rechnen mit Maske Bgr result0 = cvImage0.GetAverage(imageMask); Bgr result1 = cvImage1.GetAverage(imageMask); // Resultat abspeichern polygon.BgrSnow = JsonConvert.SerializeObject(result0); polygon.BgrNormal = JsonConvert.SerializeObject(result1); dataContext.SubmitChanges(); } }