/// <summary> /// filter滤波 /// </summary> private void Filter() { //isWarningImage = false; Matrix <byte> BoxArray = this.AlignImage.Clone(); //滤波矩阵 //Mat MedianMat = InitMiddleImage.Clone(); //Matrix<byte> MedianMat = this.AlignImage.Clone(); //Mat Outputtemp = new Mat(); //BoxArray._Mul(MedianMat); //中值滤波 CvInvoke.MedianBlur(BoxArray, BoxArray, 9); //Boxfilter CvInvoke.BoxFilter(BoxArray, BoxArray, DepthType.Cv8U, new Size(9, 9), new Point(-1, -1), true, BorderType.Reflect101); //CvInvoke.FastNlMeansDenoising(MedianMat,BoxArray); //背景归0 //Matrix<byte> tempArray = new Matrix<byte>(new Size(BoxArray.Width, BoxArray.Height)); //CvInvoke.Threshold(BoxArray, tempArray, 100, 1, ThresholdType.Binary); //BoxArray._Mul(tempArray); //ACE图像增强 BoxArray = ACE(BoxArray); //CvInvoke.MedianBlur(BoxArray, BoxArray, 9); CvInvoke.BoxFilter(BoxArray, BoxArray, DepthType.Cv8U, new Size(9, 9), new Point(-1, -1), true, BorderType.Reflect101); //输出图像为滤波后图像转三通道 //CvInvoke.Threshold(BoxArray, Outputtemp, Th>50?Th:50, 255, ThresholdType.ToZero); CvInvoke.CvtColor(BoxArray, this.FinalImage, ColorConversion.Gray2Bgr);//Outputtemp,BoxArray this.FilterImage = BoxArray; }
//######################################################映美金##################################################################### /// <summary> /// 映美金粗略找合适的轮廓 /// </summary> public static void getContoursForYMJ(Image <Gray, byte> garyImage, PictureBox ptbDisplay) { GLB.TitleStr = ""; int AREA = TisCamera.width * TisCamera.height; //总面积 Image <Gray, byte> dnc = new Image <Gray, byte>(TisCamera.width, TisCamera.height); VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint(2000); //区块集合 CvInvoke.Threshold(garyImage, garyImage, 120, 255, ThresholdType.Otsu); //ptbDisplay.Image = garyImage.ToBitmap();//显示映美金图像 CvInvoke.BoxFilter(garyImage, garyImage, Emgu.CV.CvEnum.DepthType.Cv8U, new Size(3, 3), new Point(-1, -1)); //方框滤波 CvInvoke.FindContours(garyImage, contours, dnc, RetrType.Ccomp, ChainApproxMethod.ChainApproxSimple); //轮廓集合 List <VectorOfPoint> myContours = new List <VectorOfPoint>(); //序号,轮廓 for (int k = 0; k < contours.Size; k++) { double area = CvInvoke.ContourArea(contours[k]); //获取各连通域的面积 if (area > 0.15 * AREA && area < 0.75 * AREA) //根据面积作筛选(指定最小面积,最大面积): { myContours.Add(contours[k]); } } if (myContours.Count != 0) { try { int maxSize = myContours.Max(t => t.Size); //VectorOfPoint productContour = (VectorOfPoint)myContours.Where(t => t.Size.Equals(maxSize)); int index = myContours.FindIndex(t => t.Size.Equals(maxSize));//最大的轮廓 VectorOfPoint productContour = myContours[index]; getProduceInfoForYMJ(productContour); if (ProduceMacth() == true)//匹配成功 { GLB.robot_device_point.X = GLB.MatYMJCam[0] * GLB.camera_device_point.X + GLB.MatYMJCam[1] * GLB.camera_device_point.Y + GLB.MatYMJCam[2]; GLB.robot_device_point.Y = GLB.MatYMJCam[3] * GLB.camera_device_point.X + GLB.MatYMJCam[4] * GLB.camera_device_point.Y + GLB.MatYMJCam[5]; GLB.robot_device_point.Z = 990; //平台高度 GLB.device_angl += -2f; //映美金相机与机器人夹角2度 GLB.device_angl = (float)(GLB.device_angl * Math.PI / 180f); if (GLB.robot_device_point.X < 1500 || GLB.robot_device_point.X > 2500 || //限定范围 GLB.robot_device_point.Y < -600 || GLB.robot_device_point.Y > 600) { GLB.Match_success = false; GLB.TitleStr += ",但是超出范围"; } else { GLB.Match_success = true; } } else { GLB.Match_success = false; } } catch { } } myContours.Clear(); }
//输入图像为单通道,预估的透射率图 引导图像为单通道,原图像的灰度图 输出图像为单通道,导向滤波后的透射图 private static Image <Gray, byte> GuidedFilter(Image <Gray, Byte> p, Image <Gray, Byte> I, int r, double e) { //int r, r; //w = h = 2 * r + 1; Image <Gray, byte> mean_p = new Image <Gray, byte>(p.Width, p.Height); Image <Gray, byte> mean_I = new Image <Gray, byte>(I.Width, I.Height); Image <Gray, byte> II = new Image <Gray, byte>(I.Width, I.Height); Image <Gray, byte> Ip = new Image <Gray, byte>(I.Width, I.Height); Image <Gray, byte> corr_II = new Image <Gray, byte>(I.Width, I.Height); Image <Gray, byte> corr_Ip = new Image <Gray, byte>(I.Width, I.Height); Image <Gray, byte> var_II = new Image <Gray, byte>(I.Width, I.Height); Image <Gray, byte> cov_Ip = new Image <Gray, byte>(I.Width, I.Height); Image <Gray, byte> a = new Image <Gray, byte>(I.Width, I.Height); Image <Gray, byte> b = new Image <Gray, byte>(I.Width, I.Height); Image <Gray, byte> mean_a = new Image <Gray, byte>(I.Width, I.Height); Image <Gray, byte> mean_b = new Image <Gray, byte>(I.Width, I.Height); Image <Gray, byte> q = new Image <Gray, byte>(p.Width, p.Height); //利用 boxFilter 计算均值 原始均值 导向均值 自相关均值 互相关均值 CvInvoke.BoxFilter(p, mean_p, DepthType.Cv8U, new Size(r, r), new Point(-1, -1), true, BorderType.Reflect101); CvInvoke.BoxFilter(I, mean_I, DepthType.Cv8U, new Size(r, r), new Point(-1, -1), true, BorderType.Reflect101); CvInvoke.Multiply(I, I, II); CvInvoke.Multiply(I, p, Ip); CvInvoke.BoxFilter(II, corr_II, DepthType.Cv8U, new Size(r, r), new Point(-1, -1), true, BorderType.Reflect101); CvInvoke.BoxFilter(Ip, corr_Ip, DepthType.Cv8U, new Size(r, r), new Point(-1, -1), true, BorderType.Reflect101); CvInvoke.Multiply(mean_I, mean_I, var_II); CvInvoke.Subtract(corr_II, var_II, var_II); CvInvoke.Multiply(mean_I, mean_p, cov_Ip); CvInvoke.Subtract(corr_Ip, cov_Ip, cov_Ip); CvInvoke.Divide(cov_Ip, var_II + e, a); CvInvoke.Multiply(a, mean_I, b); CvInvoke.Subtract(mean_p, b, b); CvInvoke.BoxFilter(a, mean_a, DepthType.Cv8U, new Size(r, r), new Point(-1, -1), true, BorderType.Reflect101); CvInvoke.BoxFilter(b, mean_b, DepthType.Cv8U, new Size(r, r), new Point(-1, -1), true, BorderType.Reflect101); CvInvoke.Multiply(mean_a, I, q); CvInvoke.Add(mean_b, q, q); return(q); }
private void button2_Click(object sender, EventArgs e) { Image <Bgr, byte> dst = src.CopyBlank(); CvInvoke.BoxFilter(src, dst, Emgu.CV.CvEnum.DepthType.Default, new Size(g_nBoxFilterValue, g_nBoxFilterValue), new Point(-1, -1)); //第一个参数,InputArray类型的src,输入图像,即源图像。 //第二个参数,OutputArray类型的dst,即目标图像,需要和源图片有一样的尺寸和类型。 //第三个参数,int类型的ddepth,输出图像的深度,-1代表使用原图深度,即src.depth()。 //第四个参数,Size类型的ksize,内核的大小。一般这样写Size(w, h)来表示内核的大小(其中,w 为像素宽度, h为像素高度)。Size(3,3)就表示3x3的核大小,Size(5,5)就表示5x5的核大小,也就是滤波器模板的大小。 //第五个参数,Point类型的anchor,表示锚点(即被平滑的那个点),默认值为Point(-1, -1)。如果这个点坐标是负值的话,就表示取核的中心为锚点,所以默认值Point(-1, -1)表示这个锚点在核的中心。 //第六个参数,bool类型的normalize,默认值为true,一个标识符,表示内核是否被其区域归一化(normalized)了。 //第七个参数,int类型的borderType,用于推断图像外部像素的某种边界模式。有默认值BORDER_DEFAULT,我们一般不去管它。 imageBox2.Image = dst; }
private void FrmBlurAverage_PassValuesEvent(object sender, FunctionArgs.BlurAverageArgs e) { Size ksize = new Size(e.KernelSize, e.KernelSize); Point anchor = new Point(-1, -1); //ToDo: 添加BorderType的选项(问题点:BoderType.Constant) BorderType borderType = BorderType.Default; switch (e.BlurType) { case FilterType.Average: CvInvoke.Blur(mCurrentImage, mTempImage, ksize, anchor, borderType); break; case FilterType.Box: CvInvoke.BoxFilter(mCurrentImage, mTempImage, DepthType.Default, ksize, anchor, e.Normalize, borderType); break; case FilterType.Gaussian: //ToDo: 添加SigmaX的选项 CvInvoke.GaussianBlur(mCurrentImage, mTempImage, ksize, e.SigmaX, 0, borderType); break; case FilterType.Median: CvInvoke.MedianBlur(mCurrentImage, mTempImage, e.KernelSize); break; case FilterType.Bilateral: //ToDo: 双边滤波的选项 //CvInvoke.BilateralFilter(mCurrentImage,mTempImage) break; default: break; } //没有启用预览,恢复当前的图 if (!e.PreviewEnabled) { mFrmMainImage.SetImageSource(mCurrentImage); } else { mFrmMainImage.SetImageSource(mTempImage); } }
public Image <Bgr, byte> BoxFilter(Image <Bgr, byte> image, int filterSize) { CvInvoke.BoxFilter(image, image, Emgu.CV.CvEnum.DepthType.Default, new System.Drawing.Size(filterSize, filterSize), new System.Drawing.Point(-1, -1)); return(image); }
public Form1() { InitializeComponent(); this.imageBox1.Image = image; // Task 1 ConvolutionKernelF Kernel = new ConvolutionKernelF( new float[, ] { { -0.1f, -0.2f, -0.1f, -0.2f, 2.5f, -0.2f, -0.1f, -0.2f, -0.1f, -0.1f, -0.2f, -0.1f, -0.1f, -0.2f, -0.1f, -0.1f, -0.2f, -0.1f } } ); var img1 = image.Copy(); CvInvoke.Filter2D(image, img1, Kernel, new Point() { X = -1, Y = -1 }); this.imageBox2.Image = img1; // Task 2 var img2 = image.Copy(); CvInvoke.Blur(image, img2, new Size { Height = 10, Width = 10 }, new Point() { X = 1, Y = 1 }); this.imageBox3.Image = img2; var img3 = image.Copy(); CvInvoke.BoxFilter(image, img3, Emgu.CV.CvEnum.DepthType.Default, new Size { Height = 15, Width = 15 }, new Point() { X = 1, Y = 1 }); this.imageBox4.Image = img3; var img4 = image.Copy(); CvInvoke.GaussianBlur(image, img4, new Size { Height = 201, Width = 201 }, 4); this.imageBox5.Image = img4; var img5 = image.Copy(); CvInvoke.MedianBlur(image, img5, 7); this.imageBox6.Image = img5; // Task 3 var img6 = image.Copy(); CvInvoke.Erode(image, img6, new Mat(), new Point() { X = -1, Y = -1 }, 1, Emgu.CV.CvEnum.BorderType.Default, new MCvScalar(1)); this.imageBox7.Image = img6; var img7 = image.Copy(); CvInvoke.Dilate(image, img7, new Mat(), new Point() { X = -1, Y = -1 }, 1, Emgu.CV.CvEnum.BorderType.Default, new MCvScalar(1)); this.imageBox8.Image = img7; // Task 4 var img_canny = new Image <Gray, byte>("C:/Disk D/Studying/Course_4/Multimedia/laba2/laba2/images/image.jpg"); var img8 = img_canny.Copy(); CvInvoke.Canny(image, img8, 10, 100); this.imageBox9.Image = img8; // Task 5 var img = new Image <Gray, byte>("C:/Disk D/Studying/Course_4/Multimedia/laba2/laba2/images/image.jpg"); var img9 = img.Copy(); CvInvoke.EqualizeHist(img, img9); this.imageBox10.Image = img9; }
FileOperation fileOperation = new FileOperation();//实例化处理文本的类 /// <summary> /// 获取各区块的轮廓 /// </summary> public void getContours(TextBox txtTypeName, PictureBox ptb) //找最近的轮廓 { GLB.Match_success = false; //重新检测赋值 Image <Gray, byte> dnc = new Image <Gray, byte>(GLB.BUFW, GLB.BUFH); Image <Gray, byte> threshImage = new Image <Gray, byte>(GLB.BUFW, GLB.BUFH); CvInvoke.CvtColor(GLB.frame, threshImage, ColorConversion.Bgra2Gray);//灰度化 //CvInvoke.BilateralFilter(threshImage, threshImage, 10, 10, 4);//双边滤波 //CvInvoke.GaussianBlur(threshImage, threshImage, new Size(3, 3), 4);//高斯滤波 CvInvoke.BoxFilter(threshImage, threshImage, Emgu.CV.CvEnum.DepthType.Cv8U, new Size(3, 3), new Point(-1, -1));//方框滤波 #region //var kernal1 = CvInvoke.GetStructuringElement(ElementShape.Rectangle, new Size(3, 3), new Point(-1, -1)); //CvInvoke.Dilate(threshImage, threshImage, kernal1, new Point(-1, -1), 2, BorderType.Default, new MCvScalar());//膨胀 //var kernal1 = CvInvoke.GetStructuringElement(ElementShape.Rectangle, new Size(3, 3), new Point(-1, -1)); //CvInvoke.Erode(threshImage, threshImage, kernal1, new Point(-1, -1), 2, BorderType.Default, new MCvScalar());//腐蚀 //方式1 //CvInvoke.Threshold(threshImage, threshImage, 100, 255, ThresholdType.BinaryInv | ThresholdType.Otsu);//二值化 //if (Mainform.runMode == 6)//匹配托盘 //{ // var kernal1 = CvInvoke.GetStructuringElement(ElementShape.Rectangle, new Size(9, 9), new Point(-1, -1)); // CvInvoke.Erode(threshImage, threshImage, kernal1, new Point(-1, -1), 1, BorderType.Default, new MCvScalar());//腐蚀 //} //else//匹配箱子 //{ // var kernal1 = CvInvoke.GetStructuringElement(ElementShape.Rectangle, new Size(3, 3), new Point(-1, -1)); // CvInvoke.Erode(threshImage, threshImage, kernal1, new Point(-1, -1), 2, BorderType.Default, new MCvScalar());//腐蚀 //} //方式2 //if (Mainform.runMode == 6)//匹配托盘 //{ // var kernal1 = CvInvoke.GetStructuringElement(ElementShape.Rectangle, new Size(9, 9), new Point(-1, -1)); // CvInvoke.Dilate(threshImage, threshImage, kernal1, new Point(-1, -1), 1, BorderType.Default, new MCvScalar());//膨胀 //} //else //加了膨胀跳动更大 //{ // var kernal1 = CvInvoke.GetStructuringElement(ElementShape.Rectangle, new Size(5, 5), new Point(-1, -1)); // CvInvoke.Dilate(threshImage, threshImage, kernal1, new Point(-1, -1), 1, BorderType.Default, new MCvScalar());//膨胀 //} //ptb.Image = threshImage.ToBitmap(); #endregion //检测连通域,每一个连通域以一系列的点表示,FindContours方法只能得到第一个域: try { VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint(2000); //区块集合 CvInvoke.FindContours(threshImage, contours, dnc, RetrType.Ccomp, ChainApproxMethod.ChainApproxSimple); //轮廓集合 GLB.block_num = 0; Dictionary <int, VectorOfPoint> mycontours = new Dictionary <int, VectorOfPoint>(100);//序号,轮廓 mycontours.Clear(); for (int k = 0; k < contours.Size; k++) { double area = CvInvoke.ContourArea(contours[k]); //获取各连通域的面积 if (area > 100000 && area < 800000) //根据面积作筛选(指定最小面积,最大面积): { if (!mycontours.ContainsKey(k)) { mycontours.Add(k, contours[k]); } } } float my_depth_temp = GLB.myp3d[(GLB.BUFH / 2 * GLB.BUFW + GLB.BUFW / 2) * 3 + 2]; if (mycontours.Count == 0 && Mainform.ProduceArrive == true && Mainform.CarryMode == 0 && Mainform.runMode == 1 && (my_depth_temp > 1400 || double.IsNaN(my_depth_temp)))//空车来,小车自动离开 { Mainform.ProduceArrive = false; Mainform.SetCarryArrive(0); //修改产品没送到 ArrayList array = new ArrayList(); //多条SQL语句数组 string sql = "update Agv_list set isworking =0,stowerid ='',pronum =0 where agvid in(select agvid from Agvmission_list where fstatus =7 and messionType =1 and stowerid='" + GLB.RobotId + "')"; //修改小车状态 string sql1 = "update Agvmission_list set fstatus =6 ,actionenddate=getdate() where fstatus =7 and messionType =1 and stowerid='" + GLB.RobotId + "'"; //修改任务 等待状态为完成状态 array.Add(sql); array.Add(sql1); bool isok = MyDataLib.transactionOp_list(array); Mainform.SetRobotStatus(2, "等待送货");//修改码垛机器人状态 } //按面积最大排序 生成新的字典 Dictionary <int, VectorOfPoint> mycontours_SortedByKey = new Dictionary <int, VectorOfPoint>(100);//序号,轮廓; mycontours_SortedByKey.Clear(); mycontours_SortedByKey = mycontours.OrderByDescending(o => CvInvoke.ContourArea(o.Value)).ToDictionary(p => p.Key, o => o.Value); GLB.obj.Clear(); foreach (int k in mycontours_SortedByKey.Keys) { OBJ obj = new OBJ(); { if (!GLB.obj.ContainsKey(GLB.block_num)) { GLB.obj.Add(GLB.block_num, obj); //不含这个,就添加 } GLB.obj[GLB.block_num].typName = txtTypeName.Text.Replace(" ", ""); // 对象名称 if (getMinAreaRect(mycontours_SortedByKey[k], GLB.block_num) == true) //获取最小外接矩形并处理相关参数 { if (GLB.img_mode == 0) //匹配模式 { if (Device_Macth(GLB.block_num) == true) //与库对比,生成工件位置,法向量,旋转角 { Thread.Sleep(400); break; } } } GLB.TitleStr += "block_num=" + GLB.block_num; GLB.block_num++;//区块计数器 } } } catch (Exception ex) { Console.WriteLine("发生错误: " + ex.Message); throw; } }