public void SetImage(BitmapImage image, double width, double height) { mTargetImageURL = image.UriSource.LocalPath; contour_form.Width = width; contour_form.Height = height; Mat mat = new Mat(mTargetImageURL, ImreadModes.GrayScale); // 원본 사진 흑백 변환 mat = mat.Canny(75, 200, 3, true); // 외곽선 추출 함수 if (File.Exists(CONTOUR_IMAGE)) { File.Delete(CONTOUR_IMAGE); } Bitmap bitmap = OpenCvSharp.Extensions.BitmapConverter.ToBitmap(mat); bitmap.Save(CONTOUR_IMAGE, System.Drawing.Imaging.ImageFormat.Png); bitmap.Dispose(); bitmap = null; BitmapImage bitmapImage = new BitmapImage(); bitmapImage.BeginInit(); bitmapImage.CacheOption = BitmapCacheOption.OnLoad; bitmapImage.UriSource = new Uri(CONTOUR_IMAGE); bitmapImage.EndInit(); contour_img.Source = bitmapImage; }
/// <summary> /// 颜色法 /// </summary> /// <param name="srcImg"></param> /// <param name="blurSize"></param> /// <param name="cannyThreshold1"></param> /// <param name="cannyThreshold2"></param> /// <param name="HSVEvenimg"></param> /// <returns></returns> public static Mat ColorMethod(Mat srcImg, int blurSize, int cannyThreshold1, int cannyThreshold2, Mat HSVEvenimg) { if (srcImg == null) { return(null); } if (srcImg.Empty()) { return(null); } //灰度 Grayimg = srcImg.CvtColor(ColorConversionCodes.BGR2GRAY); //模糊 blurimg = Grayimg.GaussianBlur(new OpenCvSharp.Size(blurSize, blurSize), 0); // 边缘 Cannyimg = blurimg.Canny(cannyThreshold1, cannyThreshold2); //二值化 thimg = blurimg.Threshold(0, 255, ThresholdTypes.Otsu | ThresholdTypes.Binary); //HSV hsvimg = srcImg.CvtColor(ColorConversionCodes.BGR2HSV); //对t通道进行拆分 Split mats_hsv = hsvimg.Split(); mat_v = mats_hsv[2]; //V 均衡化 mats_hsv[2] = mats_hsv[2].EqualizeHist(); //对拆分的通道数据合并 Merge Cv2.Merge(mats_hsv, HSVEvenimg); //释放内存 --- 不够及时 GC.Collect(); return(HSVEvenimg); }
public static void Canny(Mat mat, out Mat result) { result = new Mat(); mat.CopyTo(result); GrayScale(result); result = result.Canny(100, 200); }
// // Update // 프레임과 마스크를 업데이트하여 검출상태를 갱신합니다. // // Parameters // source 영역 검출을 위한 행렬입니다. // 이 변수의 엣지를 검출한 뒤에 영역을 구하기 때문에 잡음이 없는 마스크 형식이 가장 이상적입니다. // public void Update(Mat source, isDetectedDelegate callback) { if (source.Type() != MatType.CV_8UC1) { source.ConvertTo(source, MatType.CV_8UC1); } this._detectedRects.Clear(); var edged = source.Canny(this._threshold.Start, this._threshold.End).Dilate(null).Erode(null); var cnts = null as Point[][]; var hierarchy = null as HierarchyIndex[]; edged.FindContours(out cnts, out hierarchy, RetrievalModes.External, ContourApproximationModes.ApproxSimple); foreach (var c in cnts) { if (Cv2.ContourArea(c) < 100) { continue; } var detectedRect = Cv2.MinAreaRect(c); if (callback(detectedRect) == false) { continue; } this._detectedRects.Add(detectedRect); } }
static void SobelEdges() { string image = "..\\image.png"; Mat image = Cv2.ImRead(image); //converting to grayscale Mat gray = image.CvtColor(ColorConversionCodes.BGR2GRAY); gray.SaveImage("..\\grey.png"); Mat canny = gray.Canny(50, 150); canny.SaveImage("..\\canny.png"); Mat thresh = gray.Threshold(0, 255, ThresholdTypes.Otsu); //sobel vertical lines Mat sobelVertical = canny.Sobel(MatType.CV_8U, 1, 0); //sobel horizontal lines Mat sobelHorizontal = canny.Sobel(MatType.CV_8U, 0, 1); //display in console window using (new Window("SobelVertical", WindowMode.AutoSize, sobelVertical)) { Window.WaitKey(0); } using (new Window("SobelHorizontal", WindowMode.AutoSize, sobelHorizontal)) { Window.WaitKey(0); } }
private static Mat GetTresh() { var tr = grey.Canny(t1, t2, asize, false); //var tr = TresholdingUtil.AdaptiveTresholding(grey); return(tr); }
public void HoughLines(SoftwareBitmap input, SoftwareBitmap output, Algorithm algorithm) { if (algorithm.AlgorithmName == "HoughLines") { using Mat mInput = SoftwareBitmap2Mat(input); using Mat mOutput = new Mat(mInput.Rows, mInput.Cols, MatType.CV_8UC4); mInput.CopyTo(mOutput); using Mat gray = mInput.CvtColor(ColorConversionCodes.BGRA2GRAY); using Mat edges = gray.Canny(50, 200); var res = Cv2.HoughLinesP(edges, rho: (double)algorithm.AlgorithmProperties[0].CurrentValue, theta: (double)algorithm.AlgorithmProperties[1].CurrentValue / 100.0, threshold: (int)algorithm.AlgorithmProperties[2].CurrentValue, minLineLength: (double)algorithm.AlgorithmProperties[3].CurrentValue, maxLineGap: (double)algorithm.AlgorithmProperties[4].CurrentValue); for (int i = 0; i < res.Length; i++) { Cv2.Line(mOutput, res[i].P1, res[i].P2, color: (Scalar)algorithm.AlgorithmProperties[5].CurrentValue, thickness: (int)algorithm.AlgorithmProperties[6].CurrentValue, lineType: (LineTypes)algorithm.AlgorithmProperties[7].CurrentValue); } Mat2SoftwareBitmap(mOutput, output); } }
/// <summary> /// Automatic edge detector, defines bounds on it's own /// Presumes the input is GRAY image /// </summary> /// <param name="matGray">Mat to detect edges on</param> /// <param name="sigma">Threshold strength param where [0, 1] means [tightest, widest] threshold</param> /// <returns>Mat with edges filtered</returns> public static Mat AdaptiveEdges(this Mat matGray, double sigma = 0.33) { int upper, lower; CalculateThresholdBounds(matGray, out lower, out upper, sigma); return(matGray.Canny(lower, upper, 3, true)); }
private void Test2(double canny1, double canny2, double blur) { byte[] imageData = System.IO.File.ReadAllBytes(@"./wwwroot/images/section-4.png"); Mat img1 = Mat.FromImageData(imageData, ImreadModes.Color); //Convert the img1 to grayscale and then filter out the noise Mat gray1 = Mat.FromImageData(imageData, ImreadModes.Grayscale).PyrDown().PyrUp(); //gray1 = gray1.GaussianBlur(new OpenCvSharp.Size(blur, blur), 0); var edges = gray1.Canny(canny1, canny2); //var lines = edges.HoughLines(1, Math.PI / 180, 250); var lines = edges.HoughLinesP(1, Math.PI / 180, 50, 50, 10); var r = new Random(); Mat copy = img1.Clone(); foreach (var line in lines) { Scalar scalar = Scalar.FromRgb(r.Next(0, 255), r.Next(0, 255), r.Next(0, 255)); copy.Line(line.P1, line.P2, scalar); /*float rho = line.Rho, theta = line.Theta; * Point pt1, pt2; * double a = Math.Cos(theta), b = Math.Sin(theta); * double x0 = a * rho, y0 = b * rho; * pt1.X = (int) Math.Round(x0 + 1000 * (-b)); * pt1.Y = (int)Math.Round(y0 + 1000 * (a)); * pt2.X = (int)Math.Round(x0 - 1000 * (-b)); * pt2.Y = (int)Math.Round(y0 - 1000 * (a)); * copy.Line(pt1, pt2, scalar);*/ } copy.SaveImage("wwwroot/images/output.png"); }
//http://www.pyimagesearch.com/2015/04/06/zero-parameter-automatic-canny-edge-detection-with-python-and-opencv/ public static Mat AutoCanny(this Mat image, double sigma = 0.33) { //Assumes single channel 8 bit image. double v = image.GetAll <byte>().Median(); int lower = (int)Math.Max(0, (1.0 - sigma) * v); int upper = (int)Math.Min(255, (1.0 + sigma) * v); return(image.Canny(lower, upper)); }
public async void Contours(SoftwareBitmap input, SoftwareBitmap output, Algorithm algorithm) { if (algorithm.AlgorithmName == "Contours") { using Mat mInput = SoftwareBitmap2Mat(input); using Mat mOutput = new Mat(mInput.Rows, mInput.Cols, MatType.CV_8UC4); mInput.CopyTo(mOutput); using Mat gray = mInput.CvtColor(ColorConversionCodes.BGRA2GRAY); using Mat edges = gray.Canny((double)algorithm.AlgorithmProperties[6].CurrentValue, (double)algorithm.AlgorithmProperties[7].CurrentValue); Cv2.FindContours( edges, out OpenCvSharp.Point[][] contours, out HierarchyIndex[] outputArray,
public EdgeDetection(string imagePath) { using (Mat src = new Mat(imagePath, ImreadModes.Unchanged)) { using (Mat dst = src.Canny(50, 200)) { Mat file = new Mat(); Bitmap bit = OpenCvSharp.Extensions.BitmapConverter.ToBitmap(src); Mat matImg = OpenCvSharp.Extensions.BitmapConverter.ToMat(bit); Cv2.BitwiseNot(matImg, file); } } }
public Mat ApplyCartoonFilter(Mat imageSource) { if (imageSource == null) { return(null); } Mat bilateralImage = ApplyBilateralFilter(imageSource, cartoonFilterFactor); Mat cannyImage = 255 - imageSource.Canny(100, 200); Cv2.BitwiseAnd(bilateralImage, cannyImage, imageSource); return(imageSource); }
// returns sequence of squares detected on the image. // the sequence is stored in the specified memory storage public static void FindSquares(Mat image, List <List <Point> > squares, bool inv = false) { squares.Clear(); Mat grey = image.CvtColor(ColorConversionCodes.BGR2GRAY); Mat blur = grey.GaussianBlur(new Size(7, 7), 1.5, 1.5); Mat canny = blur.Canny(0, 30, 3); // find contours and store them all as a list Point[][] contours; HierarchyIndex[] hierarchIndex; canny.FindContours(out contours, out hierarchIndex, RetrievalModes.List, ContourApproximationModes.ApproxSimple); // test each contour for (int i = 0; i < contours.Count(); i++) { // approximate contour with accuracy proportional // to the contour perimeter //List<Point> approx; var approx = new List <Point>(contours[i]); Cv2.ApproxPolyDP(approx, 9, true); // square contours should have 4 vertices after approximation // relatively large area (to filter out noisy contours) // and be convex. // Note: absolute value of an area is used because // area may be positive or negative - in accordance with the // contour orientation if (approx.Count == 4 && Math.Abs(Cv2.ContourArea(approx)) > 5 && Cv2.IsContourConvex(approx)) { double maxCosine = 0; for (int j = 2; j < 5; j++) { // find the maximum cosine of the angle between joint edges double cosine = Math.Abs(Angle(approx[j % 4], approx[j - 2], approx[j - 1])); maxCosine = Math.Max(maxCosine, cosine); } // if cosines of all angles are small // (all angles are ~90 degree) then write quandrange // vertices to resultant sequence if (maxCosine < 0.3) { squares.Add(approx); } } } }
public void showImage(int img_index) { switch (img_index) { case 0: cv_out_img = cv_cur_img.Canny(Canny_min.Value, Canny_max.Value); cur_method = 0; break; case 1: Cv2.Laplacian(cv_cur_img, cv_out_img, -1, 3); cur_method = 1; break; case 2: //Cv2.Sobel(cv_cur_img, cv_out_img, MatType.CV_16S, 1, 0, 3); Sobel(cur_img_path[sel_img.SelectedIndex]); cur_method = 2; break; case 3: cur_method = 3; ImageOperator(cur_img_path[sel_img.SelectedIndex]); break; case 4: Cv2.Laplacian(cv_cur_img, cv_out_img, -1, 3); break; case 5: Cv2.Laplacian(cv_cur_img, cv_out_img, -1, 3); break; case 6: Cv2.Laplacian(cv_cur_img, cv_out_img, -1, 3); break; case 7: Cv2.Laplacian(cv_cur_img, cv_out_img, -1, 3); break; } if (img_index != 3 && img_index != 2) { Cv2.ImShow(cur_pro_method[cur_method], cv_out_img); } }
private static void ChamferMatchingSample() { using (var img = new Mat("data/lenna.png", LoadMode.GrayScale)) using (var templ = new Mat("data/lennas_eye.png", LoadMode.GrayScale)) { Point[][] points; float[] cost; using (var imgEdge = img.Canny(50, 200)) using (var templEdge = templ.Canny(50, 200)) { imgEdge.SaveImage("e1.png"); templEdge.SaveImage("e2.png"); var ret = Cv2.ChamferMatching(imgEdge, templEdge, out points, out cost); int i = 0; Console.WriteLine(ret); Console.WriteLine(points.Count()); using (var img3 = img.CvtColor(ColorConversion.GrayToRgb)) { foreach (var point in points) { foreach (var point1 in point) { Vec3b c = new Vec3b(0, 255, 0); img3.Set <Vec3b>(point1.Y, point1.X, c); } Console.WriteLine(cost[i]); i++; } foreach (var point1 in points[0]) { Vec3b c = new Vec3b(255, 0, 255); img3.Set <Vec3b>(point1.Y, point1.X, c); } Window.ShowImages(img3); img3.SaveImage("final.png"); } } } }
public void HoughLines(SoftwareBitmap input, SoftwareBitmap output, Algorithm algorithm) { if (algorithm.AlgorithmName == "HoughLines") { using Mat mInput = SoftwareBitmap2Mat(input); using Mat mOutput = new Mat(mInput.Rows, mInput.Cols, MatType.CV_8UC4); mInput.CopyTo(mOutput); using Mat gray = mInput.CvtColor(ColorConversionCodes.BGRA2GRAY); using Mat edges = gray.Canny(50, 200); //var res = Cv2.HoughLinesP(mInput, // (double)algorithm.findParambyName("rho"), // (double)algorithm.findParambyName("theta"), // (int)algorithm.findParambyName("threshold"), // (double)algorithm.findParambyName("minLineLength"), // (double)algorithm.findParambyName("maxLineGap") //); var res = Cv2.HoughLinesP(edges, (double)algorithm.AlgorithmProperties[0].CurrentValue, (double)algorithm.AlgorithmProperties[1].CurrentValue / 100.0, (int)algorithm.AlgorithmProperties[2].CurrentValue, (double)algorithm.AlgorithmProperties[3].CurrentValue, (double)algorithm.AlgorithmProperties[4].CurrentValue); for (int i = 0; i < res.Length; i++) { //Cv2.Line(mOutput, res[i].P1, res[i].P2, // (Scalar)algorithm.findParambyName("color"), // (int)algorithm.findParambyName("thickness"), // (LineTypes)algorithm.findParambyName("linetype")); Cv2.Line(mOutput, res[i].P1, res[i].P2, (Scalar)algorithm.AlgorithmProperties[5].CurrentValue, (int)algorithm.AlgorithmProperties[6].CurrentValue, (LineTypes)algorithm.AlgorithmProperties[7].CurrentValue); //Cv2.Line(mOutput, res[i].P1, res[i].P2, // Scalar.Azure, // 2, // LineTypes.Link4); } //Cv2.ImShow("HoughLines", mOutput); Mat2SoftwareBitmap(mOutput, output); } }
public override Mat Preview(Mat frame) { if (ksize % 2 != 1) { ksize++; } var gray = frame.CvtColor(ColorConversionCodes.BGR2GRAY); var blur = gray .GaussianBlur(new Size(ksize, ksize), 0); var grX = blur.Sobel(MatType.CV_16S, 1, 0, ksize, scale, delta); var grY = blur.Sobel(MatType.CV_16S, 0, 1, ksize, scale, delta); var absGradX = grX.ConvertScaleAbs(); var absGradY = grY.ConvertScaleAbs(); var grad = new Mat(); Cv2.AddWeighted(absGradX, 0.5, absGradY, 0.5, 0, grad); var edges = grad.Canny(LTr, HTr); return(edges); }
private static Mat findContur(Mat srcMat, Rect rect, int colormode) { try { Point2f offset = new Point2f(rect.X, rect.Y); Mat result = new Mat(srcMat, rect); Cv2.CvtColor(result, result, ColorConversionCodes.BGR2GRAY); OpenCvSharp.Point[][] points; HierarchyIndex[] indexs; result = result.Canny(100, 250, 3, true); //result = OverWriteShape(result, srcMat, rect); //Cv2.CvtColor(result, result, ColorConversionCodes.BGR2GRAY); //result = //Cv2.FindContours(InputOutputArray image, out Point[][] contours, out HierarchyIndex[] hierarchy, RetrievalModes mode, ContourApproximationModes method, Point ? offset = null); Cv2.FindContours(result, out points, out indexs, RetrievalModes.List, ContourApproximationModes.ApproxSimple, offset); if (points != null && points.Length > 0) { for (int i = 0; i < points.Length; i++) { srcMat.DrawContours(points, i, Scalar.Red, -1, LineTypes.Link8, null, 2147483647); if (colormode != 0) { ZoneColor(srcMat, points, colormode); } } } result.Dispose(); return(srcMat); } catch (Exception e) { e.ToString(); } return(srcMat); }
private void Test3(string img, double canny1, double canny2, double blur) { var tess = new TesseractEngine(@"./wwwroot/tessdata", "eng", EngineMode.LstmOnly); byte[] imageData = System.IO.File.ReadAllBytes(@"./wwwroot/images/" + img); Mat img1 = Mat.FromImageData(imageData, ImreadModes.Color); //Convert the img1 to grayscale and then filter out the noise Mat gray1 = Mat.FromImageData(imageData, ImreadModes.Grayscale) /*.PyrDown().PyrUp()*/; //gray1 = gray1.GaussianBlur(new OpenCvSharp.Size(blur, blur), 0); //gray1 = gray1.AdaptiveThreshold(255, AdaptiveThresholdTypes.MeanC, ThresholdTypes.BinaryInv, (int)canny1, canny2); // 11,2 ; 75,10 ; 60,255 gray1 = gray1.GaussianBlur(new Size(blur, blur), 0); //gray1 = gray1.Threshold(128, 255, ThresholdTypes.Binary); //Canny Edge Detector //Image<Gray, Byte> cannyGray = gray1.Canny(20, 50); //Image<Bgr, Byte> imageResult = img1.Copy(); Mat cannyGray = gray1.Canny(canny1, canny2); //var cannyGray = gray1; // treba aj GaussianBlur, adaptiveThreshold Random r = new Random(); //int lastY = 0; Point[][] contours; //vector<vector<Point>> contours; HierarchyIndex[] hierarchy; //vector<Vec4i> hierarchy; //int draw = 0; Cv2.FindContours(cannyGray, out contours, out hierarchy, mode: RetrievalModes.Tree, method: ContourApproximationModes.ApproxSimple); Debug.WriteLine("po�et - " + contours.Length); Mat copy = img1.Clone(); Cv2.DrawContours(copy, contours, -1, Scalar.Orange); var j = 0; while (j != -1) { var index = hierarchy[j]; if (index.Parent != -1) { j = index.Next; continue; } Scalar scalar = Scalar.FromRgb(r.Next(0, 255), r.Next(0, 255), r.Next(0, 255)); Cv2.DrawContours(copy, contours, j, scalar); var edges = contours[j]; Point[] contoursAp = Cv2.ApproxPolyDP(edges, Cv2.ArcLength(edges, true) * 0.01, true); //Debug.WriteLine(j + "," + contoursAp.Length); /*var rect = Cv2.BoundingRect(edges); * var roi2 = img1.Clone(rect); * roi2.SaveImage("pozri-" + j + ".png");*/ j = index.Next; } var m = 0; foreach (var c in contours) { //var rect = Cv2.BoundingRect(c); //var roi2 = img1.Clone(rect); //roi2.SaveImage("pozri-" + m + ".png"); m++; } copy.SaveImage("wwwroot/images/output.png"); }
public async void Contours(SoftwareBitmap input, SoftwareBitmap output, Algorithm algorithm) { if (algorithm.AlgorithmName == "Contours") { using Mat mInput = SoftwareBitmap2Mat(input); using Mat mOutput = new Mat(mInput.Rows, mInput.Cols, MatType.CV_8UC4); mInput.CopyTo(mOutput); using Mat gray = mInput.CvtColor(ColorConversionCodes.BGRA2GRAY); using Mat edges = gray.Canny((double)algorithm.AlgorithmProperties[6].CurrentValue, (double)algorithm.AlgorithmProperties[7].CurrentValue); Cv2.FindContours( image: edges, contours: out OpenCvSharp.Point[][] contours, hierarchy: out HierarchyIndex[] outputArray, mode: (RetrievalModes)algorithm.AlgorithmProperties[0].CurrentValue, method: (ContourApproximationModes)algorithm.AlgorithmProperties[1].CurrentValue, offset: (Point)algorithm.AlgorithmProperties[2].CurrentValue); int maxLen = 0; int maxIdx = -1; for (int i = 0; i < contours.Length; i++) { if (contours[i].Length > maxLen) { maxIdx = i; maxLen = contours[i].Length; } if (contours[i].Length > (int)algorithm.AlgorithmProperties[8].CurrentValue) { Cv2.DrawContours( mOutput, contours, contourIdx: i, color: (Scalar)algorithm.AlgorithmProperties[3].CurrentValue, thickness: (int)algorithm.AlgorithmProperties[4].CurrentValue, lineType: (LineTypes)algorithm.AlgorithmProperties[5].CurrentValue, hierarchy: outputArray, maxLevel: 0); } } if (maxIdx != -1) { var res = Cv2.ApproxPolyDP(contours[maxIdx], 1, true); //Cv2.DrawContours( // mOutput, // contours, // maxIdx, // (Scalar)algorithm.algorithmProperties[3].CurrentValue, // (int)algorithm.algorithmProperties[4].CurrentValue, // (LineTypes)algorithm.algorithmProperties[5].CurrentValue, // outputArray, // 0); ////return Cv2.ContourArea(res); } Mat2SoftwareBitmap(mOutput, output); // Must run on UI thread. The winrt container also needs to be set. if (App.container != null) { await App.container.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () => { Cv2.ImShow("Contours", mOutput); }); } } }
static void FindLaneInTheImage(string path) { Mat workAreaMask = CreateMask(); using (Window win1 = new Window("test1")) { Mat image = new Mat(path); // Get the work area Mat imageS = image.SubMat(Camera.vert_frame[0], Camera.vert_frame[1], Camera.hor_frame[0], Camera.hor_frame[1]); Mat workArea = new Mat(); imageS.CopyTo(workArea, workAreaMask); // Get HSV, gray and canny Mat hsvImage = workArea.CvtColor(ColorConversionCodes.RGB2HSV); Mat canny1 = hsvImage.Canny(40, 60); Mat gray = workArea.CvtColor(ColorConversionCodes.BGR2GRAY); Mat canny2 = gray.Canny(40, 60); Mat canny = new Mat(); Cv2.BitwiseAnd(canny1, canny2, canny); // Get, filter and draw contours Mat hsvContoures = new Mat(); hsvImage.CopyTo(hsvContoures); var contoures = FindContoures(canny); hsvContoures.DrawContours(contoures, -1, Scalar.Red); // Get indexers MatOfByte3 hsvContInd = new MatOfByte3(hsvContoures); MatOfByte3 hsvInd = new MatOfByte3(hsvImage); var hsvContIndexer = hsvContInd.GetIndexer(); var hsvIndexer = hsvInd.GetIndexer(); // Make steps of the algorithm List <Sensor> sensors = GetSensors(hsvContoures, hsvContIndexer); List <Sensor> filteredByContours = FilterByContours(sensors, hsvContIndexer); List <Sensor> filteredByColors = FilterByColorAndChangeColor(filteredByContours, hsvIndexer); List <Sensor> filteredByNearSensors = FilterByNearSensors(filteredByColors); List <List <Sensor> > groupedByAngle = GroupByAngle(filteredByNearSensors).Where(g => g.Count > 2).ToList(); List <List <Sensor> > groupedByDistance = GroupByDistance(groupedByAngle).Where(g => g.Count > 2).ToList(); List <List <Sensor> > groupedWithoudCovering = DeleteCovering(groupedByDistance); List <List <Sensor> > unionGroups = UnionGroups(groupedWithoudCovering).Where(g => g.Count > 2).ToList(); List <List <Sensor> > resultGroups = SelectGroups(unionGroups); // Draw the result foreach (var group in resultGroups) { if (group != null) { foreach (var line in GetLinesForGroup(group)) { image.Line(line.x1 + Camera.hor_frame[0], line.y1 + Camera.vert_frame[0], line.x2 + Camera.hor_frame[0], line.y2 + Camera.vert_frame[0], Scalar.Blue, 5); } } } Mat imageForDisplay = image.Resize(new Size(0, 0), 0.5, 0.5); win1.ShowImage(imageForDisplay); Cv2.WaitKey(0); // Free resourses image.Release(); imageS.Release(); workArea.Release(); hsvInd.Release(); hsvContInd.Release(); gray.Release(); canny1.Release(); canny2.Release(); canny.Release(); hsvImage.Release(); hsvContoures.Release(); } }
private void detectShapeCandidates(ref Bitmap bitmap, Boolean saveShapes) { Debug.WriteLine("Running OpenCV"); string myPhotos = Environment.GetFolderPath(Environment.SpecialFolder.MyPictures); Mat colorMat = BitmapConverter.ToMat(bitmap); MatOfDouble mu = new MatOfDouble(); MatOfDouble sigma = new MatOfDouble(); Cv2.MeanStdDev(colorMat, mu, sigma); double mean = mu.GetArray(0, 0)[0]; mu.Dispose(); sigma.Dispose(); Mat greyMat = new Mat(); Cv2.CvtColor(colorMat, greyMat, ColorConversion.BgraToGray, 0); greyMat = greyMat.GaussianBlur(new OpenCvSharp.CPlusPlus.Size(1, 1), 5, 5, BorderType.Default); greyMat = greyMat.Canny(0.5 * mean, 1.2 * mean, 3, true); Mat contourMat = new Mat(greyMat.Size(), colorMat.Type()); greyMat.CopyTo(contourMat); var contours = contourMat.FindContoursAsArray(ContourRetrieval.List, ContourChain.ApproxSimple); for (int j = 0; j < contours.Length; j++) { var poly = Cv2.ApproxPolyDP(contours[j], 0.01 * Cv2.ArcLength(contours[j], true), true); int num = poly.Length; if (num >= 4 && num < 20) { var color = Scalar.Blue; var rect = Cv2.BoundingRect(poly); if (rect.Height < 20 || rect.Width < 20) { continue; } if (saveShapes) { string path = Path.Combine(myPhotos, "shape_samples"); path = Path.Combine(path, "shape_sample_" + Path.GetRandomFileName() + ".png"); var matRect = new OpenCvSharp.CPlusPlus.Rect(0, 0, greyMat.Width, greyMat.Height); rect.Inflate((int)(rect.Width * 0.1), (int)(rect.Height * 0.1)); rect = rect.Intersect(matRect); Mat shapeMat = greyMat.SubMat(rect); var size = new OpenCvSharp.CPlusPlus.Size(128, 128); shapeMat = shapeMat.Resize(size); Bitmap shape = shapeMat.ToBitmap(); shape.Save(path); shape.Dispose(); shapeMat.Dispose(); continue; } Cv2.Rectangle(colorMat, rect, color, 2); } } bitmap = OpenCvSharp.Extensions.BitmapConverter.ToBitmap(colorMat); colorMat.Dispose(); greyMat.Dispose(); contourMat.Dispose(); }
static void FindLaneInTheVideo(string path) { VideoCapture capture = new VideoCapture(path); Mat workAreaMask = CreateMask(); using (Window win1 = new Window("test1")) { Mat image = new Mat(); // We will save previous results here List <List <Sensor> > oldResultGroups = null; int[] countTaked = new int[2] { 0, 0 }; while (true) { DateTime dt1 = DateTime.Now; capture.Read(image); if (image.Empty()) { break; } if (capture.PosFrames % 2 != 0) { continue; } // Get the work area Mat image_s = image.SubMat(Camera.vert_frame[0], Camera.vert_frame[1], Camera.hor_frame[0], Camera.hor_frame[1]); Mat workArea = new Mat(); image_s.CopyTo(workArea, workAreaMask); // Get HSV, grat and canny Mat hsv_image = workArea.CvtColor(ColorConversionCodes.RGB2HSV); Mat canny1 = hsv_image.Canny(40, 60); Mat gray = workArea.CvtColor(ColorConversionCodes.BGR2GRAY); Mat canny2 = gray.Canny(40, 60); Mat canny = new Mat(); Cv2.BitwiseAnd(canny1, canny2, canny); // Get, filter and draw contours Mat hsv_contoures = new Mat(); hsv_image.CopyTo(hsv_contoures); var contoures = FindContoures(canny); hsv_contoures.DrawContours(contoures, -1, Scalar.Red); // Get indexers MatOfByte3 hsv_cont_ind = new MatOfByte3(hsv_contoures); MatOfByte3 hsv_ind = new MatOfByte3(hsv_image); var hsv_cont_indexer = hsv_cont_ind.GetIndexer(); var hsv_indexer = hsv_ind.GetIndexer(); // Make steps of the algorithm List <Sensor> sensors = GetSensors(hsv_contoures, hsv_cont_indexer); List <Sensor> filteredByContours = FilterByContours(sensors, hsv_cont_indexer); List <Sensor> filteredByColors = FilterByColorAndChangeColor(filteredByContours, hsv_indexer); List <Sensor> filteredByNearSensors = FilterByNearSensors(filteredByColors); List <List <Sensor> > groupedByAngle = GroupByAngle(filteredByNearSensors).Where(g => g.Count > 2).ToList(); List <List <Sensor> > groupedByDistance = GroupByDistance(groupedByAngle).Where(g => g.Count > 2).ToList(); List <List <Sensor> > groupedWithoudCovering = DeleteCovering(groupedByDistance); List <List <Sensor> > unionGroups = UnionGroups(groupedWithoudCovering).Where(g => g.Count > 2).ToList(); List <List <Sensor> > resultGroups = SelectGroups(unionGroups, oldResultGroups, ref countTaked); image.SaveImage("image.png"); // Draw the result foreach (var group in resultGroups) { if (group != null) { foreach (var line in GetLinesForGroup(group)) { image.Line(line.x1 + Camera.hor_frame[0], line.y1 + Camera.vert_frame[0], line.x2 + Camera.hor_frame[0], line.y2 + Camera.vert_frame[0], Scalar.Blue, 5); } } } image.SaveImage("res.png"); Mat imageForDisplay = image.Resize(new Size(0, 0), 0.5, 0.5); win1.ShowImage(imageForDisplay); oldResultGroups = resultGroups; DateTime dt2 = DateTime.Now; Console.WriteLine("{0}\tms", (dt2 - dt1).TotalMilliseconds); int key = Cv2.WaitKey(0); if (key == 27) { break; //escape } // Free resourses image_s.Release(); workArea.Release(); hsv_ind.Release(); hsv_cont_ind.Release(); gray.Release(); canny1.Release(); canny2.Release(); canny.Release(); hsv_image.Release(); hsv_contoures.Release(); } } }
public static void Run(Options options) { //load the image and compute the ratio of the old height //to the new height, clone it, and resize it using (var disposer = new Disposer()) { Mat image = new Mat(options.Image); disposer.Add(image); Mat orig = image.Clone(); disposer.Add(orig); double ratio = image.Height / 500.0; image = ImageUtil.Resize(image, height: 500); disposer.Add(image); Mat gray = image.CvtColor(ColorConversionCodes.BGR2GRAY); disposer.Add(gray); gray = gray.GaussianBlur(new Size(5, 5), 0); disposer.Add(gray); Mat edged = gray.Canny(75, 200); disposer.Add(edged); Console.WriteLine("STEP 1: Edge Detection"); Cv2.ImShow("Image", image); Cv2.ImShow("Edged", edged); Cv2.WaitKey(); Cv2.DestroyAllWindows(); //find the contours in the edged image, keeping only the //largest ones, and initialize the screen contour Mat[] cnts; using (Mat edgedClone = edged.Clone()) { edgedClone.FindContours(out cnts, new Mat(), RetrievalModes.List, ContourApproximationModes.ApproxSimple); } disposer.Add(cnts); Mat screenCnt = null; //loop over the contours foreach (Mat c in cnts.OrderByDescending(c => c.ContourArea()).Take(5)) { //approximate the contour double peri = c.ArcLength(true); using (Mat approx = c.ApproxPolyDP(0.02 * peri, true)) { //if our approximated contour has four points, then we //can assume that we have found our screen if (approx.Rows == 4) { screenCnt = approx.Clone(); break; } } } if (screenCnt == null) { Console.WriteLine("Failed to find polygon with four points"); return; } disposer.Add(screenCnt); //show the contour (outline) of the piece of paper Console.WriteLine("STEP 2: Find contours of paper"); Cv2.DrawContours(image, new[] { screenCnt }, -1, Scalar.FromRgb(0, 255, 0), 2); Cv2.ImShow("Outline", image); Cv2.WaitKey(); Cv2.DestroyAllWindows(); //apply the four point transform to obtain a top-down //view of the original image Mat warped = FourPointTransform(orig, screenCnt * ratio); disposer.Add(warped); //convert the warped image to grayscale, then threshold it //to give it that 'black and white' paper effect warped = warped.CvtColor(ColorConversionCodes.BGR2GRAY); disposer.Add(warped); Cv2.AdaptiveThreshold(warped, warped, 251, AdaptiveThresholdTypes.GaussianC, ThresholdTypes.Binary, 251, 10); disposer.Add(warped); Console.WriteLine("STEP 3: Apply perspective transform"); Mat origResized = ImageUtil.Resize(orig, height: 650); disposer.Add(origResized); Cv2.ImShow("Original", origResized); Mat warpedResized = ImageUtil.Resize(warped, height: 650); disposer.Add(warpedResized); Cv2.ImShow("Scanned", warpedResized); Cv2.WaitKey(); Cv2.DestroyAllWindows(); } }
private string Test(string img, double canny1, double canny2, double blur) { var tess = new TesseractEngine(@"./wwwroot/tessdata", "eng", EngineMode.LstmOnly); byte[] imageData = System.IO.File.ReadAllBytes(@"./wwwroot/images/" + img); Mat img1 = Mat.FromImageData(imageData, ImreadModes.Color); //Convert the img1 to grayscale and then filter out the noise Mat gray1 = Mat.FromImageData(imageData, ImreadModes.Grayscale) /*.PyrDown().PyrUp()*/; //gray1 = gray1.GaussianBlur(new OpenCvSharp.Size(blur, blur), 0); //gray1 = gray1.AdaptiveThreshold(255, AdaptiveThresholdTypes.GaussianC, ThresholdTypes.BinaryInv, 105, 2); // 11,2 ; 75,10 ; 60,255 //gray1 = gray1.Threshold(60, 255, ThresholdTypes.BinaryInv); Console.WriteLine(img1.Width + img1.Height); //Canny Edge Detector //Image<Gray, Byte> cannyGray = gray1.Canny(20, 50); //Image<Bgr, Byte> imageResult = img1.Copy(); Mat cannyGray = gray1.Canny(canny1, canny2); //var cannyGray = gray1; // treba aj GaussianBlur, adaptiveThreshold Random r = new Random(); int lastY = 0; Point[][] contours; //vector<vector<Point>> contours; HierarchyIndex[] hierarchy; //vector<Vec4i> hierarchy; int draw = 0; Cv2.FindContours(cannyGray, out contours, out hierarchy, mode: RetrievalModes.Tree, method: ContourApproximationModes.ApproxSimple); Mat copy = img1.Clone(); Cv2.DrawContours(copy, contours, -1, Scalar.Orange); /*for (int i = contours.Length - 1; i >= 0; i--) * { * Scalar scalar = Scalar.FromRgb(r.Next(0, 255), r.Next(0, 255), r.Next(0, 255)); * Cv2.DrawContours(copy, contours, i, scalar); * }*/ var j = 0; while (j != -1) { var index = hierarchy[j]; if (index.Parent != -1) { j = index.Next; continue; } Scalar scalar = Scalar.FromRgb(r.Next(0, 255), r.Next(0, 255), r.Next(0, 255)); Cv2.DrawContours(copy, contours, j, scalar); j = index.Next; } copy.SaveImage("wwwroot/images/output.png"); Debug.WriteLine("po�et " + contours.Length); /* * for (int i = contours.Length - 1; i >= 0; i--) * { * Point[] edges = contours[i]; * Point[] normalizedEdges = Cv2.ApproxPolyDP(edges, 17, true); * Boolean section = false; * Color color = Color.FromArgb(r.Next(0, 255), r.Next(0, 255), r.Next(0, 255)); * //MCvScalar color = new MCvScalar(r.Next(0, 255), r.Next(0, 255), r.Next(0, 255)); * //MCvScalar color = new MCvScalar(255, 0, 0); * * //Polygon Approximations * Point[] contoursAp = Cv2.ApproxPolyDP(edges, Cv2.ArcLength(edges, true) * 0.05, true); * * // Area * double area = Cv2.ContourArea(edges, false); * * // Bounding box * Rect rect = Cv2.BoundingRect(edges); * * var roi2 = img1.Clone(rect); * * //Mat roi = new Mat(); * //gray1.CopyTo(roi, rect); * * roi2.SaveImage("image-" + i + ".png"); * * * Console.WriteLine(contoursAp.Length); * * if (draw == 5) * { * //CvInvoke.DrawContours(imageResult, contours, i, color); * * * //Console.WriteLine(i + " " + color.V0 + " " + color.V1 + " " + color.V2 + " " + color.V3); * //Console.WriteLine("ap " + contoursAP.Size); * //Console.WriteLine("area " + CvInvoke.ContourArea(contours[i], true)); * //Console.WriteLine("width " + rect.Width + " height " + rect.Height + " x " + rect.X + " y " + rect.Y); * //Console.WriteLine(); * * //Console.WriteLine((rect.Width * 2 + rect.Height * 2 > 40 && contoursAP.Size >= 4)); * } * draw++; * * * * Boolean check = true; * * // Check for horizontal lines * if (rect.X == 0 && Math.Abs(area) <= 1 && rect.Width > 100) * { * check = false; * * if (contoursAp.Length == 2 && rect.Height <= 10) * { * //CvInvoke.DrawContours(imageResult, contours, i, color); * * //Console.WriteLine(i + " " + color.V0 + " " + color.V1 + " " + color.V2 + " " + color.V3); * //Console.WriteLine("ap " + contoursAP.Size); * //Console.WriteLine("area " + CvInvoke.ContourArea(contours[i], true)); * //Console.WriteLine("width " + rect.Width + " height " + rect.Height + " x " + rect.X + " y " + rect.Y); * //Console.WriteLine(); * * htmlBody += $"<section style=\'height:{rect.Y - lastY}px;background:rgb({color.R},{color.G},{color.B});\'>"; * * lastY = rect.Y; * section = true; * } * else if (contoursAp.Length == 4 && rect.Height > 10) * { * //CvInvoke.DrawContours(imageResult, contours, i, color); * * //Console.WriteLine(i + " " + color.V0 + " " + color.V1 + " " + color.V2 + " " + color.V3); * //Console.WriteLine("ap " + contoursAP.Size); * //Console.WriteLine("area " + CvInvoke.ContourArea(contours[i], true)); * //Console.WriteLine("width " + rect.Width + " height " + rect.Height + " x " + rect.X + " y " + rect.Y); * //Console.WriteLine(); * * htmlBody += $"<section style=\'height:{rect.Height}px;background:rgb({color.R},{color.G},{color.B});\'>"; * * lastY = rect.Y + rect.Height; * section = true; * } * else * { * check = true; * } * } * * // Check for text * if (check && rect.Width * 2 + rect.Height * 2 > 50 && contoursAp.Length >= 2) * { * //Console.WriteLine("text check " + i); * //Bitmap cloneBitmap = img1.ToBitmap().Clone(rect, PixelFormat.DontCare); * //Image<Bgr, Byte> imageCV = new Image<Bgr, byte>(cloneBitmap); * var roi = img1.Clone(rect); * * //Mat roi = new Mat(); * //gray1.CopyTo(roi, rect); * * //roi.SaveImage("Image-" + i + ".png"); * * //pictureBox1.Image = cloneBitmap; * //var segment = Pix.LoadTiffFromMemory(roi.ToBytes()); * //var stream = roi.ToMemoryStream(); * //var seg = Pix.LoadTiffFromMemory(stream.ToArray()); * * using (var page = tess.Process(Pix.LoadFromFile("Image-" + i + ".png"), PageSegMode.SingleBlock)) * { * var text = page.GetText(); * * Console.WriteLine(text); * * //ocr.Run(roi, out string text, out var componentRects, out var componentTexts, out var componentConfidences); * Console.WriteLine("vysledok " + text); * * if (text.Length >= 3) * { * htmlBody += $"<span>{text.Trim()}</span>"; * } * } * } * * * // Close sections * if (section) * { * htmlBody += "</section>"; * } * } */ return(""); }
private Bitmap CreateObjectMask(Bitmap img, /*out IplImage image_mask,*/ out double mask_length, out double mask_area, out double mask_width, out double mask_height, out double mask_pvheight, int num_smooth, int contrast, double canny1, double canny2, out Mat image_mask_spc, out double mask2_area, int filter_size = 3, int brightAreaThreshold = -1, int darkAreaThreshold = -1) { Bitmap dst = null; //IplImage img_mask = Cv.CreateImage(new CvSize(img.Width, img.Height), BitDepth.U8, 1); Mat img_mask = new Mat(new OpenCvSharp.Size(img.Width, img.Height), MatType.CV_8UC1, 0); image_mask_spc = null; mask_length = mask_area = mask_width = mask_height = mask_pvheight = mask2_area = 0; Mat img_gray; Mat img_canny; Mat img_mask_copy; int i, x, y, offset; IntPtr ptr; Byte pixel; ////////////////// var distance = new List <double>(); double center_x = 0; double center_y = 0; double center_count = 0; double distance_mean = 0; double distance_stddev = 0; double sum_m = 0; double sum_v = 0; double temp = 0; ////////////////// //////////////////////////////////////////////////////////// ////////////////////////Mask make/////////////////////////// //////////////////////////////////////////////////////////// img_gray = new Mat(new OpenCvSharp.Size(img.Width, img.Height), MatType.CV_8UC1, 0); img_canny = new Mat(new OpenCvSharp.Size(img.Width, img.Height), MatType.CV_8UC1, 0); img_mask_copy = new Mat(new OpenCvSharp.Size(img.Width, img.Height), MatType.CV_8UC1, 0); Mat src = BitmapConverter.ToMat(img); Cv2.CvtColor(src, img_gray, ColorConversionCodes.BGR2GRAY); //Contrast -> Increase the edge contrast for transparent diamond byte[] lut = CalcLut(contrast, 0); //img_gray.LUT(img_gray, lut); Cv2.LUT(img_gray, lut, img_gray); //Median filter -> Eliminate point noise in the image //Elimination of big dusts should be coded here if (num_smooth > 0) { //for (i = 0; i < num_smooth; i++) img_gray.Smooth(img_gray, SmoothType.Median, 3, 3, 0, 0); //for (i = 0; i < num_smooth; i++) img_gray.Smooth(img_gray, SmoothType.Median, filter_size, filter_size, 0, 0); for (i = 0; i < num_smooth; i++) { Cv2.MedianBlur(img_gray, img_gray, filter_size); } img_canny = img_gray.Canny(canny1, canny2); } else { img_canny = img_gray.Canny(canny1, canny2); } ///////////////////////////////////////////////////////////// //ConvexHull ///////////////////////////////////////////////////////////// //OpenCvSharp.CvMemStorage storage = new CvMemStorage(0); //CvSeq points = Cv.CreateSeq(SeqType.EltypePoint, CvSeq.SizeOf, CvPoint.SizeOf, storage); //CvSeq<CvPoint> points = new CvSeq<CvPoint>(SeqType.EltypePoint, CvSeq.SizeOf, storage); //CvPoint pt; List <OpenCvSharp.Point> points = new List <OpenCvSharp.Point>(); OpenCvSharp.Point pt; ptr = img_canny.Data; for (y = 0; y < img_canny.Height; y++) { for (x = 0; x < img_canny.Width; x++) { offset = (img_canny.Width * y) + (x); pixel = Marshal.ReadByte(ptr, offset); if (pixel > 0) { pt.X = x; pt.Y = y; points.Add(pt); ////////////////////// center_x = center_x + x; center_y = center_y + y; center_count++; ////////////////////// } } } center_x = center_x / center_count; center_y = center_y / center_count; //CvPoint[] hull; //CvMemStorage storage1 = new CvMemStorage(0); //CvSeq<CvPoint> contours; //List<Mat> hull = new List<Mat>(); MatOfPoint hull = new MatOfPoint(); int x_min = 3000, x_max = 0, y_min = 3000, y_max = 0; int y_x_min = 3000, y_x_max = 3000; if (points.Count > 0) { //Calcurate Ave and Std of distance from each edge points to the weighed center for (i = 0; i < points.Count; i++) { pt = points[i]; temp = Math.Sqrt((pt.X - center_x) * (pt.X - center_x) + (pt.Y - center_y) * (pt.Y - center_y)); distance.Add(temp); sum_m += temp; sum_v += temp * temp; } distance_mean = sum_m / points.Count; temp = (sum_v / points.Count) - distance_mean * distance_mean; distance_stddev = Math.Sqrt(temp); // Outlier elimination for (i = points.Count - 1; i >= 0; i--) { if (distance[i] > (distance_mean + 3.0 * distance_stddev)) { points.RemoveAt(i); } } Cv2.ConvexHull(MatOfPoint.FromArray(points), hull, true); //2014/4/14 Add calc mask_width, mask_height and mask_pvheight foreach (OpenCvSharp.Point item in hull) { if (x_min > item.X) { x_min = item.X; y_x_min = item.Y; } else if (x_min == item.X && y_x_min > item.Y) { y_x_min = item.Y; } if (x_max < item.X) { x_max = item.X; y_x_max = item.Y; } else if (x_max == item.X && y_x_max > item.Y) { y_x_max = item.Y; } if (y_min > item.Y) { y_min = item.Y; } if (y_max < item.Y) { y_max = item.Y; } } mask_width = x_max - x_min; mask_height = y_max - y_min; mask_pvheight = ((double)y_x_max + (double)y_x_min) / 2 - (double)y_min; ///////////////////////////////////////////////////////////// // For icecream cone shape diamond, need to use triangle mask ///////////////////////////////////////////////////////////// if (diamond_group == DIAMOND_GROUPING.RBC_HighDepth) { for (i = 0; i < hull.Count(); i++) { OpenCvSharp.Point p = hull.At <OpenCvSharp.Point>(i); if (y_x_max >= y_x_min) { if (p.Y > y_x_min) { p.X = x_max; p.Y = y_x_max; } } else { if (p.Y > y_x_max) { p.X = x_min; p.Y = y_x_min; } } } } ////////////////////////////////////////////////////////////// Cv2.FillConvexPoly(img_mask, hull, Scalar.White, LineTypes.AntiAlias, 0); //2013/11/3 Add erode function if (erode > 0) { for (i = 0; i < erode; i++) { Cv2.Erode(img_mask, img_mask, null); } } //Calc length and area of img_mask -> use for fancy shape diamonds //Cv.FindContours(img_mask, storage1, out contours, CvContour.SizeOf, ContourRetrieval.External, ContourChain.ApproxSimple); //Cv.FIndCOntours overwrites img_mask, need to use copy image //IplImage img_mask_copy = Cv.Clone(img_mask); //Cv2.Copy(img_mask, img_mask_copy); Mat hierarchy = new Mat(); Mat[] contours; img_mask.CopyTo(img_mask_copy); Cv2.FindContours(img_mask_copy, out contours, hierarchy, RetrievalModes.External, ContourApproximationModes.ApproxSimple); //Cv.ReleaseImage(img_mask_copy); mask_length = Cv2.ArcLength(contours[0], true); mask_area = Math.Abs(Cv2.ContourArea(contours[0])); //Cv.ClearSeq(contours); } else { mask_length = 0.0; mask_area = 0.0; } //Memory release //Cv.ReleaseImage(img_gray); //Cv.ReleaseImage(img_canny); //Cv.ReleaseImage(img_mask_copy); //Cv.ClearSeq(points); //Cv.ReleaseMemStorage(storage); //Cv.ReleaseMemStorage(storage1); //if the diamond is out of croped image, do not calc color values if (x_min == 0 | x_max == (img.Width - 1) | y_min == 0 | y_max == (img.Height - 1)) { return(dst); } //img_mask.SaveImage(@"P:\Projects\DustDetection\TestSamples\gColorFancyImages\temp\image_mask_hiroshi.jpg"); if (mask_length > 0) { dst = BitmapConverter.ToBitmap(img_mask); } return(dst); }
public void execute(Mat OriginalImage) { //clone image Mat modifiedImage = new Mat(OriginalImage.Rows, OriginalImage.Cols, OriginalImage.Type()); OriginalImage.CopyTo(modifiedImage); //Step 1 Grayscale modifiedImage = modifiedImage.CvtColor(ColorConversionCodes.BGR2GRAY); //Step 2 Blur the image //modifiedImage = modifiedImage.GaussianBlur(new Size(5, 5), 0); modifiedImage = modifiedImage.MedianBlur(3); //Step 3 find edges (Canny and Dilate) modifiedImage = modifiedImage.Canny(75, 200); // dilate canny output to remove potential // holes between edge segments modifiedImage = modifiedImage.Dilate(null); //Step 4 Find Contour with 4 points (rectangle) with lagest area (find the doc edges) HierarchyIndex[] hierarchyIndexes; Point[][] contours; modifiedImage.FindContours(out contours, out hierarchyIndexes, RetrievalModes.List, ContourApproximationModes.ApproxSimple); //find largest area with 4 points double largestarea = 0; var largestareacontourindex = 0; var contourIndex = 0; Point[] docEdgesPoints = null; //debug purpose, uncomment to see all contours captured by openCV //debug_showallcontours(OriginalImage, hierarchyIndexes, contours); foreach (var cont in contours) { var peri = Cv2.ArcLength(cont, true); //only take contour area that are closed shape no gap var approx = Cv2.ApproxPolyDP(cont, 0.02 * peri, true); //TODO: we need to check and to not tranform if the contour size is larger or = to the picture size, //or smaller than certain size means lagest contour detected is incorrect. then we output original image without transform if (approx.Length == 4 && Cv2.ContourArea(contours[contourIndex]) > largestarea) { largestarea = Cv2.ContourArea(contours[contourIndex]); largestareacontourindex = contourIndex; docEdgesPoints = approx; } contourIndex = hierarchyIndexes[contourIndex].Next; } //draw contour (debug purpose) Mat EdgingImage = new Mat(OriginalImage.Rows, OriginalImage.Cols, OriginalImage.Type()); OriginalImage.CopyTo(EdgingImage); Cv2.DrawContours( EdgingImage, contours, largestareacontourindex, color: Scalar.Yellow, thickness: 3, lineType: LineTypes.Link8, hierarchy: hierarchyIndexes, maxLevel: int.MaxValue); //Steps 4.1 find the max size of contour area (entire image) //to be used to check if the largest contour area is the doc edges (ratio) var imageSize = OriginalImage.Size().Height *OriginalImage.Size().Width; // Steps 5: apply the four point transform to obtain a top-down // view of the original image Mat transformImage = null; if (Cv2.ContourArea(contours[largestareacontourindex]) < imageSize * 0.5) { //if largest contour smaller than 50% of the picture, assume document edges not found //proceed with simple filter transformImage = apply_doc_filters(OriginalImage); } else { //doc closed edges detected, proceed tranformation //convert to point2f foreach (var item in docEdgesPoints) { point2Fs.Add(new Point2f(item.X, item.Y)); } transformImage = transform(OriginalImage, point2Fs); if (transformImage != null) { //Step 6: grayscale it to give it that 'black and white' paper effect transformImage = apply_doc_filters(transformImage); } } if (transformImage != null) { transformImage.SaveImage($"output_{Guid.NewGuid()}.jpg"); var ms = transformImage.ToMemoryStream(); myimg.Source = ImageSource.FromStream(() => new MemoryStream(ms.ToArray())); DependencyService.Get <ISaveViewFile>().SaveAndViewAsync("Output.jpg", ms); box.IsVisible = false; stackloading.IsVisible = false; loading.IsVisible = false; loading.IsRunning = false; } }
private void button1_Click(object sender, EventArgs e) { ShowVideo(); OpenFileDialog OF = new OpenFileDialog(); OF.InitialDirectory = Application.StartupPath; OF.Filter = "Image files (*.jpg, *.jpeg, *.bmap, *.bmp, *.png, *.gif) | *.jpg; *.jpeg; *.bmap; *.bmp; *.png; *.gif";//TODO set filters... if (OF.ShowDialog() == System.Windows.Forms.DialogResult.OK) { //filename = OF.FileName; var frame = new Mat(OF.FileName); var prevgrayframe = new Mat(); Cv2.CvtColor(frame, prevgrayframe, ColorConversion.BgrToGray); var elem = Cv2.GetStructuringElement(StructuringElementShape.Ellipse, new OpenCvSharp.CPlusPlus.Size(3, 3)); //Cv2.MedianBlur(prevgrayframe, prevgrayframe, 5); //Cv2.GaussianBlur(prevgrayframe, prevgrayframe, new OpenCvSharp.CPlusPlus.Size(3,3), 2, 2); var edges = new Mat(); //Cv2.Threshold(prevgrayframe, edges, 240, 255, ThresholdType.BinaryInv); //Cv2.MorphologyEx(edges, edges, MorphologyOperation.Open, elem); //var skel = GetSceleton(prevgrayframe); //edges = skel; Cv2.Canny(prevgrayframe, edges, 100, 200, 3); /*var elem = Cv2.GetStructuringElement(StructuringElementShape.Ellipse, * new OpenCvSharp.CPlusPlus.Size(2 * 3 + 1, 2 * 3 + 1), * new Point(3, 3));*/ edges = edges.Dilate(elem).Erode(elem); //edges = edges.Erode(new Mat()); //CvInvoke.MedianBlur(prevgrayframe, prevgrayframe, 5); //CvInvoke.AdaptiveThreshold(prevgrayframe, prevgrayframe, 255, AdaptiveThresholdType.GaussianC, ThresholdType.Binary, 11, 3.5); var hsvFrame = new Mat(); Cv2.CvtColor(frame, hsvFrame, ColorConversion.BgrToHsv); var mask1 = hsvFrame.InRange(new Scalar(0, 70, 50), new Scalar(10, 255, 255)); var mask2 = hsvFrame.InRange(new Scalar(170, 70, 50), new Scalar(180, 255, 255)); var mask = mask1 | mask2; var maskBgr = new Mat(); Cv2.CvtColor(mask, maskBgr, ColorConversion.GrayToBgr); var maskedImage = frame.Clone(); Cv2.BitwiseAnd(maskedImage, maskBgr, maskedImage); using (CvWindow window = new CvWindow(maskedImage.ToIplImage())) { CvWindow.WaitKey(); } /* using (CvWindow window = new CvWindow(prevgrayframe.ToIplImage())) * { * CvWindow.WaitKey(); * }*/ using (CvWindow window = new CvWindow(edges.ToIplImage())) { CvWindow.WaitKey(); } var circles = Cv2.HoughCircles(prevgrayframe, HoughCirclesMethod.Gradient, 1.0, frame.Height / 8, 200, 80, 0, 0); var lines = Cv2.HoughLinesP(edges, 1, Cv.PI / 180, 60, 30, 2); //IOutputArray countours = new VectorOfVectorOfPoint(); // IOutputArray hierarchy; //var contours = new List<C>(); //frame.Convert<Hsv, byte>().InRange() //inRange(hsv, Scalar(0, 70, 50), Scalar(10, 255, 255), mask1); //inRange(hsv, Scalar(170, 70, 50), Scalar(180, 255, 255), mask2); var frameCopy = frame.Clone(); foreach (var circle in circles) { Cv2.Circle(frameCopy, (int)circle.Center.X, (int)circle.Center.Y, (int)circle.Radius, Scalar.Green, 3); //Cv2.Circle(frameCopy, circle.Center, circle.Radius); //frameCopy.(circle, new Bgr(Color.GreenYellow), 3); } for (var i = 0; i < lines.Length; i++) { var line = lines[i]; Cv2.Line(frameCopy, line.P1, line.P2, Scalar.Red, 2); } var switchedChannels = new Mat(); var channels = frame.Split(); Cv2.Merge(channels.Reverse().ToArray(), switchedChannels); /* * using (CvWindow window = new CvWindow(switchedChannels.ToIplImage())) * { * CvWindow.WaitKey(); * }*/ using (CvWindow window = new CvWindow(frameCopy.ToIplImage())) { CvWindow.WaitKey(); } var bw = prevgrayframe.Canny(100, 2 * 100); Point[][] countours; HierarchyIndex[] indices; Cv2.FindContours(bw, out countours, out indices, ContourRetrieval.External, ContourChain.ApproxSimple); frameCopy.DrawContours(countours, -1, Scalar.Blue, 2); /*using (CvWindow window = new CvWindow(frameCopy.ToIplImage())) * { * CvWindow.WaitKey(); * } */ //CascadeClassifier a = new CascadeClassifier(); //a.DetectMultiScale() //DisplayImage(prevgrayframe.ToBitmap(), pictureBox1); //thread safe display for camera cross thread errors } }
private void detectShapeCandidates(ref Bitmap bitmap, Boolean saveShapes) { string myPhotos = Environment.GetFolderPath(Environment.SpecialFolder.MyPictures); Mat colorMat = BitmapConverter.ToMat(bitmap); MatOfDouble mu = new MatOfDouble(); MatOfDouble sigma = new MatOfDouble(); Cv2.MeanStdDev(colorMat, mu, sigma); double mean = mu.GetArray(0, 0)[0]; mu.Dispose(); sigma.Dispose(); Mat greyMat = new Mat(); Cv2.CvtColor(colorMat, greyMat, ColorConversion.BgraToGray, 0); greyMat = greyMat.GaussianBlur(new OpenCvSharp.CPlusPlus.Size(1, 1), 5, 5, BorderType.Default); greyMat = greyMat.Canny(0.5 * mean, 1.2 * mean, 3, true); Mat contourMat = new Mat(greyMat.Size(), colorMat.Type()); greyMat.CopyTo(contourMat); var contours = contourMat.FindContoursAsArray(ContourRetrieval.List, ContourChain.ApproxSimple); this.controls.Clear(); for (int j = 0; j < contours.Length; j++) { var poly = Cv2.ApproxPolyDP(contours[j], 0.01 * Cv2.ArcLength(contours[j], true), true); int num = poly.Length; if (num >= 4 && num < 20) { var color = Scalar.Blue; var rect = Cv2.BoundingRect(poly); if (rect.Height < 20 || rect.Width < 20) { continue; } if (saveShapes) { string path = Path.Combine(myPhotos, "shape_samples"); path = Path.Combine(path, "shape_sample_" + Path.GetRandomFileName() + ".png"); Mat shapeMat = preprocessShape(rect, greyMat); Bitmap shape = shapeMat.ToBitmap(); shape.Save(path); shape.Dispose(); shapeMat.Dispose(); continue; } if (shapeSVM != null) { Mat shapeMat = preprocessShape(rect, greyMat); float shapeClass = classifyShape(shapeMat, shapeSVM); if (shapeClass >= 0) { Shape shape = null; switch ((int)shapeClass) { case 0: color = Scalar.Red; shape = new Shape(Shape.ShapeType.SQUARE, rect); break; case 1: color = Scalar.Yellow; shape = new Shape(Shape.ShapeType.CIRCLE, rect); break; case 2: color = Scalar.Green; shape = new Shape(Shape.ShapeType.SLIDER, rect); break; } Cv2.Rectangle(colorMat, rect, color, 2); this.controls.Add(shape); } shapeMat.Dispose(); } else { Cv2.Rectangle(colorMat, rect, color, 2); } } } bitmap = OpenCvSharp.Extensions.BitmapConverter.ToBitmap(colorMat); colorMat.Dispose(); greyMat.Dispose(); contourMat.Dispose(); }