static void Main(string[] args) { RotatedRect rotatedRect = new RotatedRect(new Point2f(100f, 100f), new Size2f(100, 100), 45f); Console.WriteLine(rotatedRect.BoundingRect()); Console.WriteLine(rotatedRect.Points().Length); Console.WriteLine(rotatedRect.Points()[0]); }
/// <summary> /// 图像轮廓识别 /// </summary> /// <param name="src"></param> public static List <Point2f[]> Findarea(Mat src) { Mat img = src; Mat gray = new Mat(); Mat black = new Mat(); Point[][] contours; HierarchyIndex[] hierarchy; Point2f[] point2Fs = new Point2f[] { }; List <Point2f[]> point2 = new List <Point2f[]>(); Point p0 = new Point(0, 0), p1 = new Point(0, 0), p2 = new Point(0, 0), p3 = new Point(0, 0); Mat soX = new Mat(), soY = new Mat(); Cv2.CvtColor(img, gray, ColorConversionCodes.BGR2GRAY, 0); Cv2.Blur(gray, gray, new Size(10, 10)); int thresh_size = (100 / 4) * 2 + 1;//自适应阈值化 Cv2.AdaptiveThreshold(gray, black, 255, 0, ThresholdTypes.Binary, thresh_size, thresh_size / 3); new Window("二值图", WindowMode.FreeRatio, black); Cv2.FindContours(black, out contours, out hierarchy, RetrievalModes.External, ContourApproximationModes.ApproxSimple, null); int resultnum = 0; Point[][] Excontours = contours; for (int i = 0; i < hierarchy.Length; i++) { if (contours[i].Length < 100) { continue; } RotatedRect rect = Cv2.MinAreaRect(contours[i]); point2Fs = rect.Points(); Point[] po = change(rect.Points()); //point2.Add(point2Fs); Excontours[resultnum] = po; for (int z = 0; z < point2Fs.Length; z++)//小数位精度——2 { point2Fs[z].X = (float)Math.Round(point2Fs[z].X, 2); point2Fs[z].Y = (float)Math.Round(point2Fs[z].Y, 2); } point2.Add(point2Fs); for (int j = 0; j < 3; j++) { p0 = new Point(point2Fs[j].X, point2Fs[j].Y); p1 = new Point(point2Fs[j + 1].X, point2Fs[j + 1].Y); Cv2.Line(img, p0, p1, Scalar.Red, 1, LineTypes.Link8); } p2 = new Point(point2Fs[3].X, point2Fs[3].Y); p3 = new Point(point2Fs[0].X, point2Fs[0].Y); Point TP = new Point((((p0.X + p1.X) / 2)), ((p1.Y + p2.Y) / 2)); Cv2.Line(img, p2, p3, Scalar.Red, 1, LineTypes.Link8); resultnum++; } Console.WriteLine("剔除后的轮廓数:" + resultnum); return(point2); //Console.WriteLine(js); //new Window("result", WindowMode.FreeRatio, img); //Window.WaitKey(0); }
public void RotatedRectangleIntersectionVector() { var rr1 = new RotatedRect(new Point2f(100, 100), new Size2f(100, 100), 45); var rr2 = new RotatedRect(new Point2f(130, 100), new Size2f(100, 100), 0); Cv2.RotatedRectangleIntersection(rr1, rr2, out var intersectingRegion); if (Debugger.IsAttached) { Point[] ToPoints(IEnumerable <Point2f> enumerable) { return(enumerable.Select(p => new Point(p.X, p.Y)).ToArray()); } using (var img = new Mat(200, 200, MatType.CV_8UC3, 0)) { img.Polylines(new[] { ToPoints(rr1.Points()) }, true, Scalar.Red); img.Polylines(new[] { ToPoints(rr2.Points()) }, true, Scalar.Green); img.Polylines(new[] { ToPoints(intersectingRegion) }, true, Scalar.White); Window.ShowImages(img); } } intersectingRegion.ToString(); }
public void draw_rotated_box(ref Mat img, ref RotatedRect box, Scalar color) { Point2f[] vertices = new Point2f[4]; vertices = box.Points(); for (int j = 0; j < 4; j++) { Cv2.Line(img, vertices[j], vertices[(j + 1) % 4], color); } }
public void DetectAllText(string fileName) { const int InputWidth = 320; const int InputHeight = 320; const float ConfThreshold = 0.5f; const float NmsThreshold = 0.4f; // Load network. using (Net net = CvDnn.ReadNet(Path.GetFullPath(LocalModelPath))) using (Mat img = new Mat(fileName)) // Prepare input image using (var blob = CvDnn.BlobFromImage(img, 1.0, new Size(InputWidth, InputHeight), new Scalar(123.68, 116.78, 103.94), true, false)) { // Forward Pass // Now that we have prepared the input, we will pass it through the network. There are two outputs of the network. // One specifies the geometry of the Text-box and the other specifies the confidence score of the detected box. // These are given by the layers : // feature_fusion/concat_3 // feature_fusion/Conv_7/Sigmoid var outputBlobNames = new string[] { "feature_fusion/Conv_7/Sigmoid", "feature_fusion/concat_3" }; var outputBlobs = outputBlobNames.Select(_ => new Mat()).ToArray(); net.SetInput(blob); net.Forward(outputBlobs, outputBlobNames); Mat scores = outputBlobs[0]; Mat geometry = outputBlobs[1]; // Decode predicted bounding boxes (decode the positions of the text boxes along with their orientation) Decode(scores, geometry, ConfThreshold, out var boxes, out var confidences); // Apply non-maximum suppression procedure for filtering out the false positives and get the final predictions CvDnn.NMSBoxes(boxes, confidences, ConfThreshold, NmsThreshold, out var indices); // Render detections. Point2f ratio = new Point2f((float)img.Cols / InputWidth, (float)img.Rows / InputHeight); for (var i = 0; i < indices.Length; ++i) { RotatedRect box = boxes[indices[i]]; Point2f[] vertices = box.Points(); for (int j = 0; j < 4; ++j) { vertices[j].X *= ratio.X; vertices[j].Y *= ratio.Y; } for (int j = 0; j < 4; ++j) { Cv2.Line(img, (int)vertices[j].X, (int)vertices[j].Y, (int)vertices[(j + 1) % 4].X, (int)vertices[(j + 1) % 4].Y, new Scalar(0, 255, 0), 3); } } ShowImagesWhenDebugMode(img); } }
public static string FindContours(Mat img) { Mat src = img; Mat gray = new Mat(), dst = new Mat(), hsvImage = new Mat(); Point[][] contours; HierarchyIndex[] hierarchys; Point2f[] point2Fs = new Point2f[] { }; List <Point2f[]> point2 = new List <Point2f[]>(); Point p0 = new Point(0, 0), p1 = new Point(0, 0), p2 = new Point(0, 0), p3 = new Point(0, 0); //ImageMethod.HistogramEqualization(src, src); Cv2.CvtColor(src, gray, ColorConversionCodes.BGR2GRAY, 0); Mat x = Cv2.GetStructuringElement(MorphShapes.Ellipse, new Size(6, 6)); //!!!调整size Cv2.MorphologyEx(gray, gray, MorphTypes.Open, x); //!!!调整MorphTypes Cv2.Erode(gray, gray, x); //Cv2.Dilate(gray,gray,x); int thresh_size = (100 / 4) * 2 + 1;//自适应阈值化 Cv2.AdaptiveThreshold(gray, gray, 255, 0, ThresholdTypes.Binary, thresh_size, thresh_size / 3); new Window("gray", WindowMode.FreeRatio, gray); Cv2.FindContours(gray, out contours, out hierarchys, RetrievalModes.External, ContourApproximationModes.ApproxSimple, null);//!!调整两个modes int resultnum = 0; Point[][] Excontours = contours; for (int i = 0; i < hierarchys.Length; i++) { double area = Cv2.ContourArea(contours[i], false); if (area < 50) { continue; } RotatedRect rect = Cv2.MinAreaRect(contours[i]); point2Fs = rect.Points(); Point[] po = change(rect.Points()); //point2.Add(point2Fs); Excontours[resultnum] = po; for (int z = 0; z < point2Fs.Length; z++)//小数位精度——2 { point2Fs[z].X = (float)Math.Round(point2Fs[z].X, 2); point2Fs[z].Y = (float)Math.Round(point2Fs[z].Y, 2); } point2.Add(point2Fs); for (int j = 0; j < 3; j++) { p0 = new Point(point2Fs[j].X, point2Fs[j].Y); p1 = new Point(point2Fs[j + 1].X, point2Fs[j + 1].Y); Cv2.Line(src, p0, p1, Scalar.Red, 3, LineTypes.Link8); } p2 = new Point(point2Fs[3].X, point2Fs[3].Y); p3 = new Point(point2Fs[0].X, point2Fs[0].Y); Point TP = new Point((((p0.X + p1.X) / 2)), ((p1.Y + p2.Y) / 2)); Cv2.Line(src, p2, p3, Scalar.Red, 3, LineTypes.Link8); resultnum++; } Console.WriteLine("剔除后的轮廓数:" + Excontours.Length); string json = JsonConvert.SerializeObject(Excontours); //Console.WriteLine(json); string path = @"C:\toolkipweb\miniProgram\opencvtest\opencv\test.jpg"; Cv2.ImWrite(path, src); path = "https://www.toolkip.com/miniProgram/opencvtest/opencv/test.jpg"; new Window("result", WindowMode.FreeRatio, src); Window.WaitKey(); return(path + "--" + json); }
public static Point[][] Findarea(Mat src) { Mat img = src; Mat gray = new Mat(); Mat black = new Mat(); //Point[][] contours; //HierarchyIndex[] hierarchy; Point2f[] point2Fs = new Point2f[] { }; List <Point2f[]> point2 = new List <Point2f[]>(); Point p0 = new Point(0, 0), p1 = new Point(0, 0), p2 = new Point(0, 0), p3 = new Point(0, 0); Mat soX = new Mat(), soY = new Mat(); Cv2.Laplacian(img, gray, 0, 1, 1, 0, BorderTypes.Default); Cv2.CvtColor(img, gray, ColorConversionCodes.BGR2GRAY, 0); //Cv2.Sobel(gray, soX, MatType.CV_8U, 1, 0); //Cv2.Sobel(gray, soY,MatType.CV_8U, 0,1); //Cv2.AddWeighted(soX,0.5,soY,0.5,0,gray); Cv2.Blur(gray, gray, new Size(10, 10)); int thresh_size = (100 / 4) * 2 + 1;//自适应阈值化 Cv2.AdaptiveThreshold(gray, black, 255, 0, ThresholdTypes.Binary, thresh_size, thresh_size / 3); //Cv2.Threshold(gray,black,100,255,ThresholdTypes.Binary); Mat x = Cv2.GetStructuringElement(MorphShapes.Ellipse, new Size(5, 5)); //!!!调整size Cv2.MorphologyEx(black, black, MorphTypes.Open, x); //!!!调整MorphTypes //new Window("二值图", WindowMode.FreeRatio, black); Cv2.FindContours(black, out contours, out hierarchys, RetrievalModes.External, ContourApproximationModes.ApproxSimple, null); int resultnum = 0; Point[][] Excontours = contours; for (int i = 0; i < hierarchys.Length; i++) { if (contours[i].Length < 100) { continue; } RotatedRect rect = Cv2.MinAreaRect(contours[i]); point2Fs = rect.Points(); Point[] po = change(rect.Points()); //point2.Add(point2Fs); Excontours[resultnum] = po; for (int j = 0; j < 3; j++) { p0 = new Point(point2Fs[j].X, point2Fs[j].Y); p1 = new Point(point2Fs[j + 1].X, point2Fs[j + 1].Y); Cv2.Line(img, p0, p1, Scalar.Red, 1, LineTypes.Link8); } p2 = new Point(point2Fs[3].X, point2Fs[3].Y); p3 = new Point(point2Fs[0].X, point2Fs[0].Y); Point TP = new Point((((p0.X + p1.X) / 2)), ((p1.Y + p2.Y) / 2)); Cv2.Line(img, p2, p3, Scalar.Red, 1, LineTypes.Link8); //Cv2.PutText(img, resultnum.ToString(), TP, HersheyFonts.HersheyComplex, 2, Scalar.Blue, 1, LineTypes.Link8, false); resultnum++; } Console.WriteLine("剔除后的轮廓数:" + resultnum); new Window("results", WindowMode.FreeRatio, img); return(Excontours); Window.WaitKey(0); }
/// <summary> /// The magic is here /// </summary> private void CalculateOutput() { Mat matGray = null; // instead of regular Grayscale, we use BGR -> HSV and take Hue channel as // source if (Settings.GrayMode == ScannerSettings.ColorMode.HueGrayscale) { var matHSV = matInput_.CvtColor(ColorConversionCodes.RGB2HSV); Mat[] hsvChannels = matHSV.Split(); matGray = hsvChannels[0]; } // Alternative: just plain BGR -> Grayscale else { matGray = matInput_.CvtColor(ColorConversionCodes.BGR2GRAY); } // scale down if necessary var matScaled = matGray; float sx = 1, sy = 1; if (Settings.Scale != 0) { if (matGray.Width > Settings.Scale) { sx = (float)Settings.Scale / matGray.Width; } if (matGray.Height > Settings.Scale) { sy = (float)Settings.Scale / matGray.Height; } matScaled = matGray.Resize(new Size(Math.Min(matGray.Width, Settings.Scale), Math.Min(matGray.Height, Settings.Scale))); } // reduce noise var matBlur = matScaled; if (Settings.NoiseReduction != 0) { int medianKernel = 11; // calculate kernel scale double kernelScale = Settings.NoiseReduction; if (0 == Settings.Scale) { kernelScale *= Math.Max(matInput_.Width, matInput_.Height) / 512.0; } // apply scale medianKernel = (int)(medianKernel * kernelScale + 0.5); medianKernel = medianKernel - (medianKernel % 2) + 1; if (medianKernel > 1) { matBlur = matScaled.MedianBlur(medianKernel); } } // detect edges with our 'adaptive' algorithm that computes bounds automatically with // image's mean value var matEdges = matBlur.AdaptiveEdges(Settings.EdgesTight); // now find contours Point[][] contours; HierarchyIndex[] hierarchy; Cv2.FindContours(matEdges, out contours, out hierarchy, RetrievalModes.List, ContourApproximationModes.ApproxNone, null); // check contours and drop those we consider "noise", all others put into a single huge "key points" map // also, detect all almost-rectangular contours with big area and try to determine whether they're exact match List <Point> keyPoints = new List <Point>(); List <Point[]> goodCandidates = new List <Point[]>(); double referenceArea = matScaled.Width * matScaled.Height; foreach (Point[] contour in contours) { double length = Cv2.ArcLength(contour, true); // drop mini-contours if (length >= 25.0) { Point[] approx = Cv2.ApproxPolyDP(contour, length * 0.01, true); keyPoints.AddRange(approx); if (approx.Length >= 4 && approx.Length <= 6) { double area = Cv2.ContourArea(approx); if (area / referenceArea >= Settings.ExpectedArea) { goodCandidates.Add(approx); } } } } // compute convex hull, considering we presume having an image of a document on more or less // homogeneous background, this accumulated convex hull should be the document bounding contour Point[] hull = Cv2.ConvexHull(keyPoints); Point[] hullContour = Cv2.ApproxPolyDP(hull, Cv2.ArcLength(hull, true) * 0.01, true); // find best guess for our contour Point[] paperContour = GetBestMatchingContour(matScaled.Width * matScaled.Height, goodCandidates, hullContour); if (null == paperContour) { shape_ = null; dirty_ = false; matOutput_ = matInput_; return; } // exact hit - we have 4 corners if (paperContour.Length == 4) { paperContour = SortCorners(paperContour); } // some hit: we either have 3 points or > 4 which we can try to make a 4-corner shape else if (paperContour.Length > 2) { // yet contour might contain too much points: along with calculation inaccuracies we might face a // bended piece of paper, missing corner etc. // the solution is to use bounding box RotatedRect bounds = Cv2.MinAreaRect(paperContour); Point2f[] points = bounds.Points(); Point[] intPoints = Array.ConvertAll(points, p => new Point(Math.Round(p.X), Math.Round(p.Y))); Point[] fourCorners = SortCorners(intPoints); // array.ClosestElement is not efficient but we can live with it since it's quite few // elements to search for System.Func <Point, Point, double> distance = (Point x, Point y) => Point.Distance(x, y); Point[] closest = new Point[4]; for (int i = 0; i < fourCorners.Length; ++i) { closest[i] = paperContour.ClosestElement(fourCorners[i], distance); } paperContour = closest; } // scale contour back to input image coordinate space - if necessary if (sx != 1 || sy != 1) { for (int i = 0; i < paperContour.Length; ++i) { Point2f pt = paperContour[i]; paperContour[i] = new Point2f(pt.X / sx, pt.Y / sy); } } // un-wrap var matUnwrapped = matInput_; bool needConvertionToBGR = true; if (paperContour.Length == 4) { matUnwrapped = matInput_.UnwrapShape(Array.ConvertAll(paperContour, p => new Point2f(p.X, p.Y))); // automatic color converter bool convertColor = (ScannerSettings.DecolorizationMode.Always == Settings.Decolorization); if (ScannerSettings.DecolorizationMode.Automatic == Settings.Decolorization) { convertColor = !IsColored(matUnwrapped); } // perform color conversion to b&w if (convertColor) { matUnwrapped = matUnwrapped.CvtColor(ColorConversionCodes.BGR2GRAY); // we have some constants for Adaptive, but this can be improved with some 'educated guess' for the constants depending on input image if (ScannerSettings.ScanType.Adaptive == Settings.ColorThreshold) { matUnwrapped = matUnwrapped.AdaptiveThreshold(255, AdaptiveThresholdTypes.MeanC, ThresholdTypes.Binary, 47, 25); } // Otsu doesn't need our help, decent on it's own else { matUnwrapped = matUnwrapped.Threshold(0, 255, ThresholdTypes.Binary | ThresholdTypes.Otsu); } } else { needConvertionToBGR = false; } } // assign result shape_ = paperContour; matOutput_ = matUnwrapped; if (needConvertionToBGR) { matOutput_ = matOutput_.CvtColor(ColorConversionCodes.GRAY2BGR); // to make it compatible with input texture } // mark we're good dirty_ = false; }
/// <summary> /// 图像轮廓识别 /// </summary> /// <param name="src"></param> public static List <Point[]> Findarea(Mat src) { //Mat img = src; Mat img = new Mat(); src.CopyTo(img); Mat gray = new Mat(); Mat black = new Mat(); Point[][] contours; HierarchyIndex[] hierarchy; Point2f[] point2Fs = new Point2f[] { }; List <Point2f[]> point2 = new List <Point2f[]>(); Point p0 = new Point(0, 0), p1 = new Point(0, 0), p2 = new Point(0, 0), p3 = new Point(0, 0); ImageMethod.HistogramEqualization(img, img); Mat soX = new Mat(), soY = new Mat(); Cv2.CvtColor(img, gray, ColorConversionCodes.BGR2GRAY, 0); //Cv2.Blur(gray, gray, new Size(10, 10)); Cv2.GaussianBlur(gray, gray, new Size(5, 5), 0, 5); Mat k = Cv2.GetStructuringElement(MorphShapes.Rect, new Size(5, 5)); //!!!调整size Cv2.MorphologyEx(gray, gray, MorphTypes.Open, k); //!!!调整MorphTypes Cv2.Erode(gray, gray, k); int thresh_size = (100 / 4) * 2 + 1; //自适应阈值化 Cv2.AdaptiveThreshold(gray, black, 255, 0, ThresholdTypes.Binary, thresh_size, thresh_size / 3); //new Window("二值图", WindowMode.FreeRatio, black); Cv2.FindContours(black, out contours, out hierarchy, RetrievalModes.External, ContourApproximationModes.ApproxSimple, null); int resultnum = 0; List <Point[]> Excontours = new List <Point[]>(); //Point[][] Excont =contours; for (int i = 0; i < hierarchy.Length; i++) { double area = Cv2.ContourArea(contours[i], false); if (area < 50) { continue; } RotatedRect rect = Cv2.MinAreaRect(contours[i]); point2Fs = rect.Points(); Point[] po = change(rect.Points()); //point2.Add(point2Fs); Excontours.Add(po); for (int z = 0; z < point2Fs.Length; z++)//小数位精度——2 { point2Fs[z].X = (float)Math.Round(point2Fs[z].X, 2); point2Fs[z].Y = (float)Math.Round(point2Fs[z].Y, 2); } point2.Add(point2Fs); for (int j = 0; j < 3; j++) { p0 = new Point(point2Fs[j].X, point2Fs[j].Y); p1 = new Point(point2Fs[j + 1].X, point2Fs[j + 1].Y); Cv2.Line(img, p0, p1, Scalar.Red, 1, LineTypes.Link8); } p2 = new Point(point2Fs[3].X, point2Fs[3].Y); p3 = new Point(point2Fs[0].X, point2Fs[0].Y); Point TP = new Point((((p0.X + p1.X) / 2)), ((p1.Y + p2.Y) / 2)); Cv2.Line(img, p2, p3, Scalar.Red, 1, LineTypes.Link8); resultnum++; } Console.WriteLine("剔除后的轮廓数:" + resultnum); //return Excontours; //return point2; //Console.WriteLine(js); //new Window("result", WindowMode.FreeRatio, img); //Window.WaitKey(0); return(Excontours); }
public List <GameObject> FindByWhiteMask(Mat src, Mat preview) { // thresh = cv2.inRange(image, (210, 210, 210), (255, 255, 255)) //var mask_white = new Mat(); //Cv2.InRange(src, new Scalar(150, 150, 150), new Scalar(255, 255, 255), mask_white); Cv2.InRange(src, new Scalar(210, 210, 210), new Scalar(255, 255, 255), mask_white); // cv2.imshow("edges", thresh) //_, cnts, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) //thresh.DrawContours() Mat[] contours = null; Mat dst = new Mat(); //Cv2.FindContours(edged, out contours, new Mat(), RetrievalModes.List, ContourApproximationModes.ApproxSimple); Cv2.FindContours(mask_white, out contours, dst, RetrievalModes.List, ContourApproximationModes.ApproxNone); //Cv2.ImShow("dst", dst); //if (config.Preview == 4) Cv2.ImShow("thresh", mask_white); //dst.Release(); //thresh.Release(); List <GameObject> list_result = new List <GameObject>(); if (contours != null) { Console.WriteLine("contours.size= " + contours.Length); for (int i = 0; i < contours.Length; i++) { Mat contour = contours[i]; double area = Cv2.ContourArea(contour); // if the contour is not sufficiently large, ignore it if (area < 9) { continue; } double peri = Cv2.ArcLength(contour, true); Mat approx = contour.ApproxPolyDP(0.04 * peri, true); // Point p2f= approx.Get<Point>(0, 1); // Console.WriteLine("p2x= " + p2f.X + ", " + p2f.Y); // Console.WriteLine("cols= "+ approx.Cols + ", rows= " + approx.Rows); // // Point[] points_approx = new Point[approx.Rows]; // for (int j = 0; j < approx.Rows; j++) { // points_approx[j] = approx.Get<Point>(0, j); // } if (config.Preview > 0) { Cv2.Polylines(preview, approx, true, new Scalar(0, 255, 0), 1); } //Rect rect = Cv2.BoundingRect(contour); //Cv2.Rectangle(preview, rect, new Scalar(0, 255, 0)); // https://csharp.hotexamples.com/examples/OpenCvSharp/RotatedRect/-/php-rotatedrect-class-examples.html // https://stackoverflow.com/questions/43342199/draw-rotated-rectangle-in-opencv-c RotatedRect rrect = Cv2.MinAreaRect(contour); Point2f[] points2f = rrect.Points(); //points2f = ImUtils.OrderPoints(points2f); if (config.Preview > 0) { ImUtils.PolylinesPoints2f(preview, points2f, Colors.green); } // https://www.pyimagesearch.com/2016/03/21/ordering-coordinates-clockwise-with-python-and-opencv/ // # loop over the original points and draw them //for (int i1 = 0; i1 < points2f.Length; i1++) { // Cv2.Circle(preview, new Point((int)points2f[i1].X, (int)points2f[i1].Y), 5, colors[i1], -1); //} double h = ImUtils.Distance(points2f[0], points2f[1]); double w = ImUtils.Distance(points2f[0], points2f[3]); double ratio = h / w; if (h > w) { ratio = w / h; } // compute the center of the contour Moments m = Cv2.Moments(contour); Point pt_center = new Point((int)(m.M10 / m.M00), (int)(m.M01 / m.M00)); if (this.config.Preview > 0) { Cv2.Circle(preview, pt_center, 2, new Scalar(255, 0, 0), -1); } //Console.Write(", " + Cv2.ContourArea(contour)); // if (Cv2.ContourArea(contour) > max_val && Cv2.ContourArea(contour) < 408) // { // max_val = Cv2.ContourArea(contour); // max_i = i; // //Cv2.DrawContours(image, contours, i, new Scalar(255, 128, 128), 5); // } //Cv2.DrawContours(image, contours, i, new Scalar(255, 128, 128), 5); // https://metanit.com/sharp/tutorial/7.5.php //Console.WriteLine($"object: area={(int)area,5}, pts_count={approx.Rows,3}, ratio= {ratio:f4}, center={pt_center} "); GameObject obj = new GameObject(contour, area, approx, pt_center, ratio, rrect); if (ratio > 0.05) { obj.type = GameObject.ObjectType.unknown; list_result.Add(obj); } else { //obj.type = GameObject.ObjectType.noise; } } dst.Release(); //mask_white.Release(); } //if (configPreview!=0) { // Cv2.DrawContours(preview, contours, -1, new Scalar(0, 255, 0), 2); //} return(list_result); }
//public Mat mask_white = null; // за первый проход наполняю с сортировкой по X // за второй ищу цифры по высоте // https://answers.opencv.org/question/74777/how-to-use-approxpolydp-to-close-contours/ public void PreProcessByWhiteMask(Mat src, Mat preview, Mat mask_white) { // количество гиганских объектов int big_count = 0; int max_y = 0; //Point pt_tmp = new Point(0, 0); Mat[] contours = null; Mat dst = new Mat(); //Cv2.FindContours(edged, out contours, new Mat(), RetrievalModes.List, ContourApproximationModes.ApproxSimple); Cv2.FindContours(mask_white, out contours, dst, RetrievalModes.External, ContourApproximationModes.ApproxNone); if (contours != null) { Console.WriteLine($"contours.size={contours.Length} "); for (int i = 0; i < contours.Length; i++) { Mat contour = contours[i]; double area = Cv2.ContourArea(contour); // if the contour is not sufficiently large, ignore it if (area < 1) { continue; } double peri = Cv2.ArcLength(contour, true); Mat points_approx = contour.ApproxPolyDP(0.04 * peri, true); // Vec2i pt_top= points_approx.Get<Vec2i>(0, 0), pt_bottom = points_approx.Get<Vec2i>(0, 0); // for (int y = 0; y < points_approx.Height; y++) // { // for (int x = 0; x < points_approx.Width; x++) // { // Vec2i pt = points_approx.Get<Vec2i>(y, x); // //if (i==3) { // //Console.WriteLine("" + y + "=" + pt.Item0 + ", "+ pt.Item1+ " "); // if (this.config.Preview > 0) Cv2.Circle(preview, new Point(pt.Item0, pt.Item1), 3, new Scalar(0, 255, 0), -1); // //} // // if (pt.Item1 < pt_top.Item1) { pt_top = pt; } // if (pt.Item1 > pt_bottom.Item1) { pt_bottom = pt; } // } // } // Point[] points_approx = new Point[approx.Rows]; // for (int j = 0; j < approx.Rows; j++) { // points_approx[j] = approx.Get<Point>(0, j); // } // if (config.Preview > 0) Cv2.Polylines(preview, points_approx, true, new Scalar(0, 0, 255), 4); //Rect rect = Cv2.BoundingRect(contour); //Cv2.Rectangle(preview, rect, new Scalar(0, 255, 0)); // https://csharp.hotexamples.com/examples/OpenCvSharp/RotatedRect/-/php-rotatedrect-class-examples.html // https://stackoverflow.com/questions/43342199/draw-rotated-rectangle-in-opencv-c RotatedRect rrect = Cv2.MinAreaRect(contour); // скопировано из ImUtils Point2f[] rrpoints = ImUtils.OrderPoints(rrect.Points()); Point2f tl = rrpoints[0]; Point2f tr = rrpoints[1]; Point2f br = rrpoints[2]; Point2f bl = rrpoints[3]; //if (config.Preview > 0) ImUtils.PolylinesPoints2f(preview, rrpoints); double widthA = ImUtils.Distance(br, bl); double widthB = ImUtils.Distance(tr, tl); double width = (widthA > widthB ? widthA : widthB); double heightA = ImUtils.Distance(tr, br); double heightB = ImUtils.Distance(tl, bl); double height = (heightA > heightB ? heightA : heightB); double ratio = height / width; if (height > width) { ratio = width / height; } int top = (int)tl.Y; if (top > (int)tr.Y) { top = (int)tr.Y; } int bottom = (int)bl.Y; if (bottom < (int)br.Y) { bottom = (int)br.Y; } // compute the center of the contour Moments m = Cv2.Moments(contour); Point pt_center = new Point((int)(m.M10 / m.M00), (int)(m.M01 / m.M00)); //if (this.config.Preview > 0) Cv2.Circle(preview, pt_center, 2, new Scalar(255, 0, 0), -1); // ищу большие буквы - конец игры if (top > 15 && top < 125 && bottom > 300 && bottom < 390 && height > 180 && area > 1000) { // рисую голубым большую цифру rgb(0, 255, 255) new Scalar(51, 0, 153) //if (this.config.Preview > 0) Cv2.Circle(preview, pt_center, 2, new Scalar(255, 255, 0), -1); //if (config.Preview > 0) ImUtils.PolylinesPoints2f(preview, rrpoints, colors[1]); if (config.Preview > 0) { Cv2.DrawContours(preview, contours, i, Colors.red, 2); } //if (config.Preview > 0) Cv2.Polylines(preview, points_approx, true, colors[0], 2); big_count++; } // проверяю цифры if ( //&& obj.center.Y < Program.screenCenter.Y // должен быть в верхней половине экрана bottom < 240 && // должен быть в верхней половине экрана points_approx.Rows >= 2 && // у него должно быть более 2 точек area > 4 && area < 1000 && // должен быть относительно крупный ratio > 0.05 && height > 20 && height < 41 && width > 5 && width < 126 ) { //if (config.Preview > 0) Cv2.Polylines(preview, points_approx, true, colors[1], 2); if (config.Preview > 0) { ImUtils.PolylinesPoints2f(preview, rrpoints, Colors.green2); } if (config.Preview > 0) { Cv2.DrawContours(preview, contours, i, Colors.green2, 2); } GameObject obj = new GameObject(contour, area, points_approx, pt_center, ratio, rrect); list_digits.Add(obj); } // ищу игрока if (player != null && // игнорирую проход до поворота top > screenCenter.Y && pt_center.Y > max_y && // объект должен быть снизу других pt_center.Y > screenCenter.Y && // должен быть в нижней половине экрана points_approx.Rows > 2 && // у него должно быть более 2 точек area > 100 && // должен быть относительно крупный ratio > 0.19 ) { max_y = pt_center.Y; player.contour = contour; player.area = area; player.points_approx = points_approx; // точки контура player.center = pt_center; //player.ratio; // width / height to skip lines //Console.WriteLine("PLAYER" + i); } // https://metanit.com/sharp/tutorial/7.5.php //Console.WriteLine($"object{i}: area={(int)area,5}, pts_count={points_approx.Rows,3}, ratio= {ratio:f4}, top={top}, bottom={bottom}, h={height}, w= {width}, center={pt_center} "); // https://www.pyimagesearch.com/2016/03/21/ordering-coordinates-clockwise-with-python-and-opencv/ // # loop over the original points and draw them //for (int i1 = 0; i1 < points2f.Length; i1++) { // Cv2.Circle(preview, new Point((int)points2f[i1].X, (int)points2f[i1].Y), 2, colors[i1], -1); //} //double h = ImUtils.Distance(points2f[0], points2f[1]); //double w = ImUtils.Distance(points2f[0], points2f[3]); } //mask_white.Release(); } //Cv2.WaitKey(5000); //if (configPreview!=0) { // Cv2.DrawContours(preview, contours, -1, new Scalar(0, 255, 0), 2); //} dst.Release(); if (big_count >= 2) { isGameOver = true; } if (max_y == 0) { player = null; } Console.WriteLine($"big_count={big_count}, isGameOver={isGameOver} "); }
private unsafe void OpenCV(ref Bitmap bitmap) { Mat testMat = BitmapConverter.ToMat(bitmap); MatOfDouble mu = new MatOfDouble(); MatOfDouble sigma = new MatOfDouble(); Cv2.MeanStdDev(testMat, mu, sigma); double mean = mu.GetArray(0, 0)[0]; mu.Dispose(); sigma.Dispose(); SimpleBlobDetector.Params circleParameters = new SimpleBlobDetector.Params(); circleParameters.FilterByCircularity = true; circleParameters.MinCircularity = (float)0.85; circleParameters.MaxCircularity = (float)1; circleParameters.MinArea = 30; // Modify the value on the fly (TODO use bigger circle) SimpleBlobDetector detectCircleBlobs = new SimpleBlobDetector(circleParameters); fingerPoints = detectCircleBlobs.Detect(testMat); detectCircleBlobs.Dispose(); // If Finger found basically if (fingerPoints != null) { this.fingerSize = 0; int fingerIndex = -1; for (int i = 0; i < fingerPoints.Length; i++) { if (fingerPoints[i].Size >= this.fingerSize) { this.fingerSize = (int)fingerPoints[i].Size; fingerIndex = i; } } if (fingerIndex != -1) { OpenCvSharp.CPlusPlus.Point coordinate = fingerPoints[fingerIndex].Pt; this.fingerSize = (int)((fingerPoints[fingerIndex].Size) * Math.Sqrt(2)); testMat.Set <Vec3b>(coordinate.Y, coordinate.X, new Vec3b(0, 255, 0)); RotatedRect rRect = new RotatedRect(new Point2f(coordinate.X, coordinate.Y), new Size2f(this.fingerSize, this.fingerSize), 0); Point2f[] circleVerticies = rRect.Points(); //this.fingerCoordinates[0] = coordinate.X; //this.fingerCoordinates[1] = coordinate.Y; int height = (int)(circleVerticies[0].Y - circleVerticies[1].Y); int width = (int)(circleVerticies[2].X - circleVerticies[1].X); int startX = (int)(circleVerticies[0].X); int startY = (int)(circleVerticies[1].Y); this.fingerDepth = MapColortoDepth(startX, startY, this.fingerSize, this.fingerSize); OpenCvSharp.CPlusPlus.Rect featureRect = new OpenCvSharp.CPlusPlus.Rect(startX, startY, this.fingerSize, this.fingerSize); // Draw box around finger for (int j = 0; j < 4; j++) { Cv2.Line(testMat, circleVerticies[j], circleVerticies[(j + 1) % 4], new Scalar(0, 255, 0)); } Boolean intersectOccurance = false; List <int> intersectIndicies = new List <int>(); for (int i = 0; i < this.controls.Count; i++) { if (this.controls[i].boundingRect.IntersectsWith(featureRect)) { double diff = fingerDepth - this.controls[i].depth; if (Math.Abs(diff) < 0.5) { intersectOccurance = true; intersectIndicies.Add(i); } } } System.Text.StringBuilder append = new System.Text.StringBuilder(); if (intersectOccurance) { for (int i = 0; i < intersectIndicies.Count; i++) { append.Append(" " + this.controls[intersectIndicies[i]].title + " " + intersectIndicies[i].ToString()); } this.OutputText = "Pressed Button" + append; //TODO Make this more obvious } else { this.OutputText = "No State"; } } } bitmap = OpenCvSharp.Extensions.BitmapConverter.ToBitmap(testMat); testMat.Dispose(); }
public System.Drawing.Bitmap frameCut(System.Drawing.Bitmap bitmap = null) { Mat frame; frameMutex.WaitOne(); if (bitmap == null) { frame = frameMat.Clone(); } else { frame = ConvertBitmapToMat(bitmap); frameMat = frame.Clone(); framePalette = bitmap.Palette; } List <Point2f> points_userClicked = corners.GetPoints(); if (corners.isUsabel()) { // minRect RotatedRect minRect = Cv2.MinAreaRect(points_userClicked); List <Point2f> points_minRect = minRect.Points().ToList(); // sort points for (int i = 0; i < 4; i++) { double min_distance = Math.Sqrt(frame.Width * frame.Width + frame.Height * frame.Height); for (int j = i; j < 4; j++) { double distance_tmp = points_minRect[j].DistanceTo(points_userClicked[i]); if (distance_tmp < min_distance) { min_distance = distance_tmp; var tmpPoint = points_minRect[i]; points_minRect[i] = points_minRect[j]; points_minRect[j] = tmpPoint; } } } //calc correct mat Mat warpMatrix = Cv2.GetPerspectiveTransform(points_userClicked, points_minRect); //correct img Cv2.WarpPerspective(frame, frame, warpMatrix, frame.Size()); //calc rotate mat double angle = minRect.Angle; Size size = new Size(minRect.Size.Width, minRect.Size.Height); if (angle < -45.0) { angle += 90.0; var tmp = size.Width; size.Width = size.Height; size.Height = tmp; } Mat rotateMatrix = Cv2.GetRotationMatrix2D(minRect.Center, angle, 1.0); //rotate img Cv2.WarpAffine(frame, frame, rotateMatrix, frame.Size(), InterpolationFlags.Cubic); Cv2.GetRectSubPix(frame, size, minRect.Center, frame); } else { // draw points & lines for (int i = 0; i < points_userClicked.Count(); i++) { //draw points Cv2.Circle(frame, points_userClicked[i], (int)(5 * frameScale), new Scalar(189, 73, 17), (int)(5 * frameScale)); //draw lines if (i < points_userClicked.Count() - 1) { Cv2.Line(frame, points_userClicked[i], points_userClicked[i + 1], new Scalar(232, 162, 0), (int)(5 * frameScale)); } } } if (standardSize.Height == 0 || standardSize.Width == 0 || !corners.isUsabel()) { Cv2.Resize(frame, frame, new Size((int)(frame.Width * frameScale) / 4 * 4, (int)(frame.Height * frameScale))); } else { Cv2.Resize(frame, frame, standardSize); } System.Drawing.Bitmap bmpImgRet = ConvertMatToBitmap(frame); if (frame.Channels() == 1) { if (bitmap == null) { bmpImgRet.Palette = framePalette;//copy palette } else { bmpImgRet.Palette = bitmap.Palette;//copy palette } } frameMutex.ReleaseMutex(); return(bmpImgRet); }