public dynamic PreProcessImage(ref Mat image, Mat sourceImage) { var copy = new Mat(); try { Cv2.BilateralFilter(image, copy, 9, 75, 75); Cv2.AdaptiveThreshold(copy, copy, 255, AdaptiveThresholdTypes.GaussianC, ThresholdTypes.Binary, 115, 4); Cv2.MedianBlur(copy, copy, 11); Cv2.CopyMakeBorder(copy, copy, 5, 5, 5, 5, BorderTypes.Constant, Scalar.Black); // TODO: Dispose new Mat() var otsu = Cv2.Threshold(copy, new Mat(), 0, 255, ThresholdTypes.Binary | ThresholdTypes.Otsu); Cv2.Canny(copy, copy, otsu, otsu * 2, 3, true); } catch { copy.Dispose(); throw; } image.Dispose(); image = copy; return(null); }
static void BeautyIt(Mat src) { using (ResourceTracker t = new ResourceTracker()) { Mat dst = t.NewMat(); Cv2.BilateralFilter(src, dst, 15, 35, 35); dst.CopyTo(src); } }
public void Calibrate2(double planeDistance, double baseLine, double focalLength, int width, int height, Mat[] imgs) { var d0 = baseLine * focalLength / planeDistance; var decoded = code.Decode(imgs)[0]; var filtered = new Mat(); Cv2.BilateralFilter(decoded, filtered, -1, 4, 4); Calib.CalibrateCoefficient(filtered, d0); }
private void OpenCvWorker_DoWork(object sender, DoWorkEventArgs e) { InitCamera(); //Load image Mat camImage = cam.RetrieveMat(); //Convert to gray and apply bilateral filter to smooth the image Mat gray = new Mat(); //Mat camImage = new Mat(); //Cv2.Resize(camImage, resizedCamImage, new OpenCvSharp.Size(0, 0), 0.3, 0.3); Cv2.ImWrite("tmp.jpg", camImage); Cv2.CvtColor(camImage, gray, ColorConversionCodes.BGR2GRAY); Mat bilateral = new Mat(); Cv2.BilateralFilter(gray, bilateral, 25, 10, 80); //Binarize the image and find the countours of the shape Mat useless = new Mat(); Mat binary = new Mat(); //Cv2.Threshold(bilateral, binary, 0, 255, ThresholdTypes.BinaryInv | ThresholdTypes.Otsu); Cv2.Threshold(bilateral, binary, 100, 255, ThresholdTypes.BinaryInv | ThresholdTypes.Otsu); //Cv2.AdaptiveThreshold(bilateral, binary, 255, AdaptiveThresholdTypes.GaussianC, ThresholdTypes.BinaryInv, 11, 2); Cv2.FindContours(binary.Clone(), out var contours, useless, RetrievalModes.Tree, ContourApproximationModes.ApproxSimple); //Create a blank image and draw countours find inside Mat background = new Mat(camImage.Rows, camImage.Cols, MatType.CV_8U); Mat[] new_contours = new Mat[contours.Length]; for (int i = 0; i < contours.Length; i++) { new_contours[i] = DefinePointless(contours[i]); } Cv2.DrawContours(background, contours, -1, Scalar.White); //Invert the image and convert it to RGB Mat invertedBackground = new Mat(); Cv2.BitwiseNot(background, invertedBackground); Mat coloredBackground = new Mat(); Cv2.CvtColor(invertedBackground, coloredBackground, ColorConversionCodes.GRAY2BGR); this.background = coloredBackground; this.binary = binary; this.contours = contours.Length; //Report progress OpenCvWorker.ReportProgress(100, coloredBackground); }
private void BilateralFilter() { WriteableBitmap wb = ShowImage.Source as WriteableBitmap; Mat image = wb.ToMat(); if (image == null) { return; } Cv2.BilateralFilter(image, image, -1, 15, 15); DisplayImage(image); }
public void Calibrate(double planeDistance, double baseLine, double focalLength, int width, int height, byte[][] capturedImages) { var imgs = capturedImages .Select(buff => Array.ConvertAll(buff, b => Math.Min(255, (int)b))) .Select(arr => (new Mat(height, width, MatType.CV_32SC1, arr) * 255).ToMat()) .ToArray(); var d0 = baseLine * focalLength / planeDistance; var decoded = code.Decode(imgs)[0]; var filtered = new Mat(); Cv2.BilateralFilter(decoded, filtered, -1, 4, 4); Calib.CalibrateCoefficient(filtered, d0); }
//美颜磨皮 双边滤波 private void BtnChange12_Click(object sender, RoutedEventArgs e) { using (var src = new Mat(@"..\..\Images\ocv02.jpg", ImreadModes.AnyDepth | ImreadModes.AnyColor)) { using (var dst = new Mat())//复制以后处理 { Cv2.BilateralFilter(src, dst, 15, 35d, 35d); var mem = dst.ToMemoryStream(); BitmapImage bmp = new BitmapImage(); bmp.BeginInit(); bmp.StreamSource = mem; bmp.EndInit(); imgOutput.Source = bmp; } } SetSource(@"..\..\Images\ocv02.jpg"); }
/// <summary> /// 双边滤波 /// </summary> /// <param name="image">图片对象</param> /// <returns></returns> public static Image BilateralFilter(this Bitmap bmp) { try { using (Mat src = bmp.ToMat()) { using (Mat result = new Mat()) { Cv2.BilateralFilter(src, result, 25, 25 * 2, 25 / 2); return(result.ToBitmap()); } } } catch (OpenCVException) { return(bmp); } }
public IplImage cartoon(IplImage image) { Mat mat = new Mat(image); Mat tmp = new Mat(mat.Size(), MatType.CV_8UC3); int repetitions = 7; int kernelSize = 9; double sigmaColor = 9; double sigmaSpace = 7; for (int i = 0; i < repetitions; i++) { Cv2.BilateralFilter(mat, tmp, kernelSize, sigmaColor, sigmaSpace); Cv2.BilateralFilter(tmp, mat, kernelSize, sigmaColor, sigmaSpace); } image = mat.ToIplImage(); return(image); }
public void cuda_BilateralFilter() { Mat src = Mat.Zeros(128, 128, MatType.CV_8UC1); Size size = src.Size(); int kernel_size = 5; float sigma_color = 10f; float sigma_spatial = 3.5f; using (GpuMat g_src = new GpuMat(size, src.Type())) using (GpuMat d_dst = new GpuMat()) { g_src.Upload(src); Cuda.cuda.bilateralFilter(g_src, d_dst, kernel_size, sigma_color, sigma_spatial); Mat dst_gold = new Mat(); Cv2.BilateralFilter(src, dst_gold, kernel_size, sigma_color, sigma_spatial); ImageEquals(dst_gold, d_dst); ShowImagesWhenDebugMode(src, d_dst); } }
//Filter private void comboBox1_SelectedIndexChanged(object sender, EventArgs e) { int index = comboBox1.SelectedIndex; switch (index) { case 0: // 단순 흐림 //입력, 출력, 커널크기, 앵커 (-1, -1)이 중심, 테두리 유형 Cv2.Blur(MyImage, Filter, new OpenCvSharp.Size(5, 5), new OpenCvSharp.Point(-1, -1), BorderTypes.Default); break; case 1: //박스 필터 Cv2.BoxFilter(MyImage, Filter, MatType.CV_8UC3, new OpenCvSharp.Size(7, 7), new OpenCvSharp.Point(-1, -1), true, BorderTypes.Default); break; case 2: //중간값 흐림 Cv2.MedianBlur(MyImage, Filter, 9); break; case 3: //가우시안 흐림 Cv2.GaussianBlur(MyImage, Filter, new OpenCvSharp.Size(3, 3), 1, 0, BorderTypes.Default); break; case 4: //쌍방 필터 //과도한 노이즈 필터링이 필요한 오프라인에는 d = 9를 사용하는 것이 좋습니다. //시그마 값이 크면 클수록 만화 처럼 보입니다.(50 ,50) Cv2.BilateralFilter(MyImage, Filter, 9, 50, 50, BorderTypes.Default); break; } pictureBox2.Image = OpenCvSharp.Extensions.BitmapConverter.ToBitmap(Filter); }
private List <OpenCvSharp.Rect> do_cv(Bitmap crop) { // convert the given image to an openCV mat(erial) var image = OpenCvSharp.Extensions.BitmapConverter.ToMat(crop); //new CvWindowEx(image); // apply some image filters to the given image to improve CV operations var gray = new Mat(); Cv2.CvtColor(image, gray, ColorConversionCodes.BGR2GRAY); var gray2 = new Mat(); Cv2.BilateralFilter(gray, gray2, 10, 17, 17); var edged = new Mat(); Cv2.Canny(gray, edged, 80, 200); //new CvWindowEx(edged); // dilate / thicken the shapes in the given image var dilated = new Mat(); Cv2.Dilate(edged, dilated, null); //new CvWindowEx(edged); // // use openCV to compute 'contours' (or boxes) around 'features' in // in the area of interest (the cropped screenshot) // OpenCvSharp.Point[][] contours; OpenCvSharp.HierarchyIndex[] hierarchy; Cv2.FindContours(dilated, out contours, out hierarchy, RetrievalModes.Tree, ContourApproximationModes.ApproxSimple); //---------------------------------------------------------------- // OBJECT CONTOUR / CLICK TARGET FILTRATION //---------------------------------------------------------------- var filteredContours = new List <OpenCvSharp.Rect>(); foreach (var c in contours) { var contourRect = Cv2.BoundingRect(c); double aspectRatio = contourRect.Width / contourRect.Height; // discard contours that look like 'lines' such as the edge of a window if ((aspectRatio > 3 || aspectRatio < 0.2) && (contourRect.Height < 8 || contourRect.Width < 8)) { //Cv2.Rectangle(image, contourRect, new Scalar(0, 0, 255, 255)); Console.WriteLine("Bad ratio... " + aspectRatio); continue; } // discard contours deemed 'too large' if (contourRect.Width * contourRect.Height > 6000) { //Cv2.Rectangle(image, contourRect, new Scalar(255, 0, 0, 255)); Console.WriteLine("Bad size... " + contourRect.Width); continue; } filteredContours.Add(contourRect); //Cv2.Rectangle(image, contourRect, new Scalar(255, 255, 0, 255)); } //new CvWindowEx(image); //---------------------------------------------------------------- // CLICK TARGET COORDINATE TRANSLATION //---------------------------------------------------------------- // TODO we should probably move this outside of this function var transRects = new List <Rect>(); var goodRects = NonMaxSuppression(filteredContours, 0.3f); Console.WriteLine(goodRects.Count); foreach (var rect in goodRects) { Cv2.Rectangle(image, rect, new Scalar(0, 255, 0, 255)); transRects.Add(new Rect(ScreenGazeSnapshot.m_GazeRect.X + rect.X, ScreenGazeSnapshot.m_GazeRect.Y + rect.Y, rect.Width, rect.Height)); } //new CvWindowEx(image); // save the 'rendered' cv results incase we want to dump it to disk later m_LastScreenCrop = crop; m_LastScreenCropCV = OpenCvSharp.Extensions.BitmapConverter.ToBitmap(image); // return the results return(transRects); }
// 主要内容,图像处理方法的api private Mat myOPENCV_run(Mat image_in, Mat image_out) { image_out = image_in; // 入图传给出图 for (int i = 0; i < listBox2.Items.Count; i++) //执行 列表框2内的方法 { switch ((MyOPENCV)myOPENCV_runlist[i, 0]) // 列表框2内的运行方法 { case MyOPENCV.cvt_color: //颜色转换 (入图,出图,颜色转换符,) { Cv2.CvtColor(image_out, image_out, (ColorConversionCodes)myOPENCV_runlist[i, 1], myOPENCV_runlist[i, 2]); break; } case MyOPENCV.boxfilter: //方框滤波 { OpenCvSharp.Size size; size.Width = myOPENCV_runlist[i, 2]; size.Height = myOPENCV_runlist[i, 3]; Cv2.BoxFilter(image_out, image_out, myOPENCV_runlist[i, 1], size); break; } case MyOPENCV.blur: //均值滤波 { OpenCvSharp.Size size; size.Width = myOPENCV_runlist[i, 1]; size.Height = myOPENCV_runlist[i, 2]; Cv2.Blur(image_out, image_out, size); break; } case MyOPENCV.gaussianblur: // 高斯滤波 { OpenCvSharp.Size size; double sigmaX, sigmaY; size.Width = myOPENCV_runlist[i, 1]; size.Height = myOPENCV_runlist[i, 2]; sigmaX = (double)myOPENCV_runlist[i, 3]; sigmaY = (double)myOPENCV_runlist[i, 4]; Cv2.GaussianBlur(image_out, image_out, size, sigmaX, sigmaY); break; } case MyOPENCV.medianblur: //中值滤波 { Cv2.MedianBlur(image_in, image_out, myOPENCV_runlist[i, 1]); break; } case MyOPENCV.bilateralfilter: //双边滤波 { Mat image_out2 = new Mat(); double sigmaColor, sigmaSpace; sigmaColor = (double)myOPENCV_runlist[i, 2] * 2; sigmaSpace = (double)myOPENCV_runlist[i, 3] / 2; Cv2.BilateralFilter(image_out, image_out2, myOPENCV_runlist[i, 1], sigmaColor, sigmaSpace); image_out = image_out2; break; } case MyOPENCV.dilate: //膨胀 { Mat image_element = new Mat(); OpenCvSharp.Size size; size.Width = myOPENCV_runlist[i, 2]; size.Height = myOPENCV_runlist[i, 3]; image_element = Cv2.GetStructuringElement((MorphShapes)myOPENCV_runlist[i, 1], size); Cv2.Dilate(image_out, image_out, image_element); break; } case MyOPENCV.erode: //腐蚀 { Mat image_element = new Mat(); OpenCvSharp.Size size; size.Width = myOPENCV_runlist[i, 2]; size.Height = myOPENCV_runlist[i, 3]; image_element = Cv2.GetStructuringElement((MorphShapes)myOPENCV_runlist[i, 1], size); Cv2.Erode(image_out, image_out, image_element); break; } case MyOPENCV.morphologyex: //高级形态学变换 { Mat image_element = new Mat(); OpenCvSharp.Size size; size.Width = myOPENCV_runlist[i, 3]; size.Height = myOPENCV_runlist[i, 4]; image_element = Cv2.GetStructuringElement((MorphShapes)myOPENCV_runlist[i, 2], size); Cv2.MorphologyEx(image_out, image_out, (MorphTypes)myOPENCV_runlist[i, 1], image_element); break; } case MyOPENCV.floodfill: //漫水填充 { OpenCvSharp.Point point; point.X = myOPENCV_runlist[i, 1]; point.Y = myOPENCV_runlist[i, 2]; OpenCvSharp.Scalar scalar; scalar = myOPENCV_runlist[i, 3]; Cv2.FloodFill(image_out, point, scalar); break; } case MyOPENCV.pyrup: //尺寸放大 { OpenCvSharp.Size size; size.Width = image_out.Cols * 2; size.Height = image_out.Rows * 2; Cv2.PyrUp(image_out, image_out, size); break; } case MyOPENCV.pyrdown: //尺寸缩小 { OpenCvSharp.Size size; size.Width = image_out.Cols / 2; size.Height = image_out.Rows / 2; Cv2.PyrDown(image_out, image_out, size); break; } case MyOPENCV.resize: //尺寸调整 { OpenCvSharp.Size size; InterpolationFlags interpolationFlags; size.Width = image_out.Cols * myOPENCV_runlist[i, 1] / 10; size.Height = image_out.Rows * myOPENCV_runlist[i, 2] / 10; interpolationFlags = (InterpolationFlags)myOPENCV_runlist[i, 3]; Cv2.Resize(image_out, image_out, size, 0, 0, interpolationFlags); break; } case MyOPENCV.threshold: //固定阈值化 { Cv2.Threshold(image_out, image_out, myOPENCV_runlist[i, 1], myOPENCV_runlist[i, 2], (ThresholdTypes)myOPENCV_runlist[i, 3]); break; } case MyOPENCV.canny: //边缘检测CANNY { Mat image_out2 = new Mat(); Cv2.Canny(image_out, image_out2, myOPENCV_runlist[i, 1], myOPENCV_runlist[i, 2], myOPENCV_runlist[i, 3]); image_out = image_out2; break; } case MyOPENCV.sobel: //边缘检测SOBEL { Cv2.Sobel(image_out, image_out, -1, myOPENCV_runlist[i, 1], myOPENCV_runlist[i, 2], myOPENCV_runlist[i, 3]); break; } case MyOPENCV.laplacian: //边缘检测LAPLACIAN { myOPENCV_runlist[i, 1] = 0; Cv2.Laplacian(image_out, image_out, 0, myOPENCV_runlist[i, 2], myOPENCV_runlist[i, 3]); break; } case MyOPENCV.scharr: //边缘检测SCHARR { Cv2.Scharr(image_out, image_out, -1, myOPENCV_runlist[i, 1], myOPENCV_runlist[i, 2]); break; } case MyOPENCV.convertscaleabs: //图像快速增强 { double alpha, beta; alpha = (double)myOPENCV_runlist[i, 1] / 10; beta = (double)myOPENCV_runlist[i, 2] / 10; Cv2.ConvertScaleAbs(image_out, image_out, alpha, beta); break; } case MyOPENCV.addweighted: //图像融合 { Mat image_in2 = new Mat(my_imagesource2); double alpha, beta, gamma; alpha = (double)myOPENCV_runlist[i, 1] / 10; beta = (double)myOPENCV_runlist[i, 2] / 10; gamma = (double)myOPENCV_runlist[i, 3] / 10; Cv2.AddWeighted(image_out, alpha, image_in2, beta, gamma, image_out); break; } case MyOPENCV.houghlines: //霍夫标准变换 { Scalar scalar = new Scalar(0x00, 0xFF, 0x00); //绿色 LineSegmentPolar[] lines; OpenCvSharp.Size size = new OpenCvSharp.Size(image_out.Width, image_out.Height); Mat image_out3 = new Mat(size, MatType.CV_8UC3); lines = Cv2.HoughLines(image_out, 1, Cv2.PI / 180, myOPENCV_runlist[i, 1]); for (int ii = 0; ii < lines.Length; ii++) { //double rho, theta; OpenCvSharp.Point pt1, pt2; double a = Math.Cos(lines[ii].Theta), b = Math.Sin(lines[ii].Theta); double x0 = a * lines[ii].Rho, y0 = b * lines[ii].Rho; pt1.X = (int)Math.Round(x0 + 1000 * (-b)); pt1.Y = (int)Math.Round(y0 + 1000 * (a)); pt2.X = (int)Math.Round(x0 - 1000 * (-b)); pt2.Y = (int)Math.Round(y0 - 1000 * (a)); Cv2.Line(image_out3, pt1, pt2, scalar, 1, LineTypes.AntiAlias); } if (myOPENCV_runlist[i, 2] == 0) { Cv2.AddWeighted(image_out3, (double)myOPENCV_runlist[i, 3] / 10, image_in, (double)myOPENCV_runlist[i, 4] / 10, 0, image_out); } else { image_out = image_out3; } break; } case MyOPENCV.houghlinep: //霍夫累计概率变换 { Scalar scalar = new Scalar(0x00, 0xFF, 0x00); //绿色 LineSegmentPoint[] lines; // 线段检索 OpenCvSharp.Size size = new OpenCvSharp.Size(image_out.Width, image_out.Height); Mat image_out3 = new Mat(size, MatType.CV_8UC3); lines = Cv2.HoughLinesP(image_out, 1, Cv2.PI / 180, myOPENCV_runlist[i, 1], myOPENCV_runlist[i, 3], myOPENCV_runlist[i, 4]); for (int ii = 0; ii < lines.Length; ii++) { OpenCvSharp.Point point1, point2; point1.X = lines[i].P1.X; point1.Y = lines[i].P1.Y; point2.X = lines[i].P2.X; point2.Y = lines[i].P2.Y; Cv2.Line(image_out3, point1, point2, scalar, 1, LineTypes.AntiAlias); } if (myOPENCV_runlist[i, 2] == 0) { Cv2.AddWeighted(image_out3, 1, image_in, 0.8, 0, image_out); } else { image_out = image_out3; } break; } case MyOPENCV.houghcircles: //霍夫圆变换 { Scalar scalar = new Scalar(0x00, 0xFF, 0x00); //绿色 CircleSegment[] circles; OpenCvSharp.Size size = new OpenCvSharp.Size(image_out.Width, image_out.Height); Mat image_out3 = new Mat(size, MatType.CV_8UC3); circles = Cv2.HoughCircles(image_out, HoughMethods.Gradient, 1, myOPENCV_runlist[i, 1], myOPENCV_runlist[i, 2], myOPENCV_runlist[i, 3], 0, myOPENCV_runlist[i, 4]); for (int ii = 0; ii < circles.Length; ii++) { OpenCvSharp.Point center; center.X = (int)Math.Round(circles[ii].Center.X); center.Y = (int)Math.Round(circles[ii].Center.Y); int radius = (int)Math.Round(circles[ii].Radius); Cv2.Circle(image_out3, center.X, center.Y, radius, scalar); Cv2.Circle(image_out3, center, radius, scalar); } Cv2.AddWeighted(image_out3, 1, image_in, 0.6, 0, image_out); break; } case MyOPENCV.remap: //重映射 { OpenCvSharp.Size size = new OpenCvSharp.Size(image_out.Width, image_out.Height); Mat map_x = new Mat(size, MatType.CV_32FC1), map_y = new Mat(size, MatType.CV_32FC1); for (int ii = 0; ii < image_out.Rows; ii++) { for (int jj = 0; jj < image_out.Cols; jj++) { if (myOPENCV_runlist[i, 1] == 0) { map_x.Set <float>(ii, jj, jj); //上下翻转 map_y.Set <float>(ii, jj, image_out.Rows - ii); //上下翻转 } else if (myOPENCV_runlist[i, 1] == 1) { map_x.Set <float>(ii, jj, image_out.Cols - jj); //左右翻转 map_y.Set <float>(ii, jj, ii); //左右翻转 } else if (myOPENCV_runlist[i, 1] == 2) { map_x.Set <float>(ii, jj, image_out.Cols - jj); //上下左右翻转 map_y.Set <float>(ii, jj, image_out.Rows - ii); //上下左右翻转 } else if (myOPENCV_runlist[i, 1] == 3) { map_x.Set <float>(ii, jj, (float)myOPENCV_runlist[i, 2] / 10 * jj); //放大缩小 map_y.Set <float>(ii, jj, (float)myOPENCV_runlist[i, 2] / 10 * ii); //放大缩小 } } } Cv2.Remap(image_out, image_out, map_x, map_y); break; } case MyOPENCV.warpaffine: //仿射变换 { if (0 == myOPENCV_runlist[i, 1]) { Mat rot_mat = new Mat(2, 3, MatType.CV_32FC1); OpenCvSharp.Point center = new OpenCvSharp.Point(image_out.Cols / 2, image_out.Rows / 2); double angle = myOPENCV_runlist[i, 2]; double scale = (double)myOPENCV_runlist[i, 3] / 10; ///// 通过上面的旋转细节信息求得旋转矩阵 rot_mat = Cv2.GetRotationMatrix2D(center, angle, scale); ///// 旋转已扭曲图像 Cv2.WarpAffine(image_out, image_out, rot_mat, image_out.Size()); } else { Point2f[] srcTri = new Point2f[3]; Point2f[] dstTri = new Point2f[3]; Mat warp_mat = new Mat(2, 3, MatType.CV_32FC1); Mat warp_dst; warp_dst = Mat.Zeros(image_out.Rows, image_out.Cols, image_out.Type()); srcTri[0] = new Point2f(0, 0); srcTri[1] = new Point2f(image_out.Cols, 0); srcTri[2] = new Point2f(0, image_out.Rows); dstTri[0] = new Point2f((float)(image_out.Cols * myOPENCV_runlist[i, 2] / 100), (float)(image_out.Rows * myOPENCV_runlist[i, 2] / 100)); dstTri[1] = new Point2f((float)(image_out.Cols * (1 - (float)myOPENCV_runlist[i, 3] / 100)), (float)(image_out.Rows * myOPENCV_runlist[i, 3] / 100)); dstTri[2] = new Point2f((float)(image_out.Cols * myOPENCV_runlist[i, 4] / 100), (float)(image_out.Rows * (1 - (float)myOPENCV_runlist[i, 4] / 100))); warp_mat = Cv2.GetAffineTransform(srcTri, dstTri); Cv2.WarpAffine(image_out, image_out, warp_mat, image_out.Size()); } break; } case MyOPENCV.equalizehist: //直方图均衡化 { Cv2.EqualizeHist(image_out, image_out); break; } case MyOPENCV.facedetection: //人脸识别 { if (0 == myOPENCV_runlist[i, 1]) // 参数一为0 调用haar,其余数字调用lbp { var haarCascade = new CascadeClassifier(@"haarcascade_frontalface_alt.xml"); Mat haarResult = DetectFace(image_out, haarCascade); image_out = haarResult; } else { var lbpCascade = new CascadeClassifier(@"lbpcascade_frontalface.xml"); Mat lbpResult = DetectFace(image_out, lbpCascade); image_out = lbpResult; } break; } case MyOPENCV.matchtemplate: // 模板匹配 { Mat originalMat = Cv2.ImRead(my_imagesource, ImreadModes.AnyColor); //母图 Mat modelMat = Cv2.ImRead(my_imagesource2, ImreadModes.AnyColor); //模板 Mat resultMat = new Mat(); // 匹配结果 //resultMat.Create(mat1.Cols - modelMat.Cols + 1, mat1.Rows - modelMat.Cols + 1, MatType.CV_32FC1);//创建result的模板,就是MatchTemplate里的第三个参数 if (0 == myOPENCV_runlist[i, 1]) { Cv2.MatchTemplate(originalMat, modelMat, resultMat, TemplateMatchModes.SqDiff); //进行匹配(1母图,2模版子图,3返回的result,4匹配模式) } else if (1 == myOPENCV_runlist[i, 1]) { Cv2.MatchTemplate(originalMat, modelMat, resultMat, TemplateMatchModes.SqDiffNormed); } else if (2 == myOPENCV_runlist[i, 1]) { Cv2.MatchTemplate(originalMat, modelMat, resultMat, TemplateMatchModes.CCorr); } else if (3 == myOPENCV_runlist[i, 1]) { Cv2.MatchTemplate(originalMat, modelMat, resultMat, TemplateMatchModes.CCorrNormed); } else if (4 == myOPENCV_runlist[i, 1]) { Cv2.MatchTemplate(originalMat, modelMat, resultMat, TemplateMatchModes.CCoeff); } else if (5 == myOPENCV_runlist[i, 1]) { Cv2.MatchTemplate(originalMat, modelMat, resultMat, TemplateMatchModes.CCoeffNormed); } OpenCvSharp.Point minLocation, maxLocation, matchLocation; Cv2.MinMaxLoc(resultMat, out minLocation, out maxLocation); matchLocation = maxLocation; Mat mask = originalMat.Clone(); Cv2.Rectangle(mask, minLocation, new OpenCvSharp.Point(minLocation.X + modelMat.Cols, minLocation.Y + modelMat.Rows), Scalar.Green, 2); //画出匹配的矩 (图像,最小点,最大点,颜色,线宽) image_out = mask; break; } case MyOPENCV.find_draw_contours: // 找出并绘制轮廓 { Cv2.CvtColor(image_out, image_out, ColorConversionCodes.RGB2GRAY); //转换为灰度图 //Cv2.Blur(image_out, image_out, new OpenCvSharp.Size(2, 2)); //滤波 Cv2.Canny(image_out, image_out, 100, 200); //Canny边缘检测 OpenCvSharp.Point[][] contours; HierarchyIndex[] hierarchly; Cv2.FindContours(image_out, out contours, out hierarchly, RetrievalModes.Tree, ContourApproximationModes.ApproxSimple, new OpenCvSharp.Point(0, 0)); //获得轮廓 Mat dst_Image = Mat.Zeros(image_out.Size(), image_out.Type()); // 图片像素值归零 Random rnd = new Random(); for (int j = 0; j < contours.Length; j++) { Scalar color = new Scalar(myOPENCV_runlist[i, 1], myOPENCV_runlist[i, 2], myOPENCV_runlist[i, 3]); //Scalar color = new Scalar(rnd.Next(0, 255), rnd.Next(0, 255), rnd.Next(0, 255)); Cv2.DrawContours(dst_Image, contours, j, color, myOPENCV_runlist[i, 4], LineTypes.Link8, hierarchly); //画出轮廓 } image_out = dst_Image; break; } case MyOPENCV.componentdefectdetecting: // 零件缺陷检测 { Cv2.CvtColor(image_out, image_out, ColorConversionCodes.RGB2GRAY); //转换为灰度图 //Cv2.Blur(image_out, image_out, new OpenCvSharp.Size(2, 2)); //滤波 Cv2.Canny(image_out, image_out, 100, 200); //Canny边缘检测 OpenCvSharp.Point[][] contours; HierarchyIndex[] hierarchly; Cv2.FindContours(image_out, out contours, out hierarchly, RetrievalModes.Tree, ContourApproximationModes.ApproxSimple, new OpenCvSharp.Point(0, 0)); //获得轮廓 Mat dst_Image = Mat.Zeros(image_out.Size(), image_out.Type()); // 图片像素值归零 Random rnd = new Random(); for (int j = 0; j < contours.Length; j++) { Scalar color = new Scalar(myOPENCV_runlist[i, 1], myOPENCV_runlist[i, 2], myOPENCV_runlist[i, 3]); //Scalar color = new Scalar(rnd.Next(0, 255), rnd.Next(0, 255), rnd.Next(0, 255)); Cv2.DrawContours(dst_Image, contours, j, color, myOPENCV_runlist[i, 4], LineTypes.Link8, hierarchly); //画出轮廓 } Mat cnt = new Mat(); Cv2.ConvexHull(image_out, cnt); break; } default: break; } } return(image_out); }
//--------------------------------------------------------------- public static void bilateral_filter(Mat src, Mat dst, int d, double sigma_color, double sigma_space) { Cv2.BilateralFilter(src, dst, d, sigma_color, sigma_space); }