public void ProcessImage(Image <Gray, byte> grayFrame) { if (equalizeHist) { grayFrame._EqualizeHist();//autocontrast } //smoothed Image <Gray, byte> smoothedGrayFrame = grayFrame.PyrDown(); smoothedGrayFrame = smoothedGrayFrame.PyrUp(); //canny Image <Gray, byte> cannyFrame = null; if (noiseFilter) { cannyFrame = smoothedGrayFrame.Canny(new Gray(cannyThreshold), new Gray(cannyThreshold)); } //smoothing if (blur) { grayFrame = smoothedGrayFrame; } //binarize CvInvoke.cvAdaptiveThreshold(grayFrame, grayFrame, 255, Emgu.CV.CvEnum.ADAPTIVE_THRESHOLD_TYPE.CV_ADAPTIVE_THRESH_MEAN_C, Emgu.CV.CvEnum.THRESH.CV_THRESH_BINARY, adaptiveThresholdBlockSize + adaptiveThresholdBlockSize % 2 + 1, adaptiveThresholdParameter); // grayFrame._Not(); // if (addCanny) { if (cannyFrame != null) { grayFrame._Or(cannyFrame); } } // this.binarizedFrame = grayFrame; //dilate canny contours for filtering if (cannyFrame != null) { cannyFrame = cannyFrame.Dilate(3); } //find contours var sourceContours = grayFrame.FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_NONE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_LIST); //filter contours contours = FilterContours(sourceContours, cannyFrame, grayFrame.Width, grayFrame.Height); //find templates lock (foundTemplates) foundTemplates.Clear(); samples.Clear(); lock (templates) Parallel.ForEach <Contour <Point> >(contours, (contour) => { var arr = contour.ToArray(); Template sample = new Template(arr, contour.Area, samples.templateSize); lock (samples) samples.Add(sample); if (!onlyFindContours) { FoundTemplateDesc desc = finder.FindTemplate(templates, sample); if (desc != null) { lock (foundTemplates) foundTemplates.Add(desc); } } } ); // FilterByIntersection(ref foundTemplates); }
public void ProcessImage(Image <Gray, byte> grayFrame) { if (equalizeHist) { grayFrame._EqualizeHist();//autocontrast } //高斯平滑 Image <Gray, byte> smoothedGrayFrame = grayFrame.PyrDown(); smoothedGrayFrame = smoothedGrayFrame.PyrUp(); //canny Image <Gray, byte> cannyFrame = null; if (noiseFilter) { cannyFrame = smoothedGrayFrame.Canny(cannyThreshold, cannyThreshold); } //smoothing if (blur) { grayFrame = smoothedGrayFrame; } //局部自适应阈值二值化,阈值本身作为了一个变量,检测更有效 //CvInvoke.cvAdaptiveThreshold(grayFrame, grayFrame, 255, Emgu.CV.CvEnum.ADAPTIVE_THRESHOLD_TYPE.CV_ADAPTIVE_THRESH_MEAN_C, Emgu.CV.CvEnum.THRESH.CV_THRESH_BINARY, adaptiveThresholdBlockSize + adaptiveThresholdBlockSize % 2 + 1, adaptiveThresholdParameter); CvInvoke.AdaptiveThreshold(grayFrame, grayFrame, 255, Emgu.CV.CvEnum.AdaptiveThresholdType.MeanC, Emgu.CV.CvEnum.ThresholdType.Binary, adaptiveThresholdBlockSize + adaptiveThresholdBlockSize % 2 + 1, adaptiveThresholdParameter); // grayFrame._Not(); // if (addCanny) { if (cannyFrame != null) { grayFrame._Or(cannyFrame); //试验了一下,这样轮廓会更加明显 } } // this.binarizedFrame = grayFrame; //dilate canny contours for filtering //膨胀操作,使白色区域的像素增加一圈,看起来轮廓更细了 if (cannyFrame != null) { cannyFrame = cannyFrame.Dilate(3); } //find contours VectorOfVectorOfPoint sourceContours = new VectorOfVectorOfPoint(); CvInvoke.FindContours(grayFrame, sourceContours, null, Emgu.CV.CvEnum.RetrType.List, Emgu.CV.CvEnum.ChainApproxMethod.ChainApproxNone); //var sourceContours = grayFrame.FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_NONE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_LIST); //filter contours contours = FilterContours(sourceContours, cannyFrame, grayFrame.Width, grayFrame.Height); //find templates lock (foundTemplates) foundTemplates.Clear(); samples.Clear(); lock (templates) //多线程,异步执行 Parallel.ForEach <VectorOfPoint>(contours, (contour) => { var arr = contour.ToArray(); Template sample = new Template(arr, CvInvoke.ContourArea(contour), samples.templateSize); lock (samples) samples.Add(sample); if (!onlyFindContours) { FoundTemplateDesc desc = finder.FindTemplate(templates, sample); if (desc != null) { lock (foundTemplates) foundTemplates.Add(desc); } } } ); // FilterByIntersection(ref foundTemplates); }
private void DrawAugmentedReality(FoundTemplateDesc found, Graphics gr) { string fileName = Path.GetDirectoryName(templateFile) + "\\" + found.template.name; if (!AugmentedRealityImages.ContainsKey(fileName)) { if (!File.Exists(fileName)) return; AugmentedRealityImages[fileName] = Image.FromFile(fileName); } Image img = AugmentedRealityImages[fileName]; Point p = found.sample.contour.SourceBoundingRect.Center(); var state = gr.Save(); gr.TranslateTransform(p.X, p.Y); MessageBox.Show((180f * found.angle / Math.PI).ToString()); gr.RotateTransform((float)(180f * found.angle / Math.PI)); gr.ScaleTransform((float)(found.scale), (float)(found.scale)); gr.DrawImage(img, new Point(-img.Width/2, -img.Height/2)); gr.Restore(state); }
public FoundTemplateDesc FindTemplate(Templates templates, Template sample) { FoundTemplateDesc desc2; double num = 0.0; double angle = 0.0; Complex complex = new Complex(); Template objA = null; using (List <Template> .Enumerator enumerator = templates.GetEnumerator()) { while (true) { if (!enumerator.MoveNext()) { break; } Template current = enumerator.Current; if ((Math.Abs((int)(sample.autoCorrDescriptor1 - current.autoCorrDescriptor1)) <= this.maxACFDescriptorDeviation) && ((Math.Abs((int)(sample.autoCorrDescriptor2 - current.autoCorrDescriptor2)) <= this.maxACFDescriptorDeviation) && ((Math.Abs((int)(sample.autoCorrDescriptor3 - current.autoCorrDescriptor3)) <= this.maxACFDescriptorDeviation) && (Math.Abs((int)(sample.autoCorrDescriptor4 - current.autoCorrDescriptor4)) <= this.maxACFDescriptorDeviation)))) { double num3 = 0.0; if (!this.checkACF || (current.autoCorr.NormDot(sample.autoCorr).Norma >= this.minACF)) { if (this.checkICF) { complex = current.contour.InterCorrelation(sample.contour).FindMaxNorma(); num3 = complex.Norma / (current.contourNorma * sample.contourNorma); if (num3 < this.minICF) { continue; } if (Math.Abs(complex.Angle) > this.maxRotateAngle) { continue; } } if ((!current.preferredAngleNoMore90 || (Math.Abs(complex.Angle) < 1.5707963267948966)) && (num3 >= num)) { num = num3; objA = current; angle = complex.Angle; } } } } } if ((objA != null) && (objA.name == this.antiPatternName)) { objA = null; } if (ReferenceEquals(objA, null)) { desc2 = null; } else { desc2 = new FoundTemplateDesc { template = objA, rate = num, sample = sample, angle = angle }; } return(desc2); }
public void ProcessImage(Image <Gray, byte> grayFrame) { Action <Contour <Point> > body = null; if (this.equalizeHist) { grayFrame._EqualizeHist(); } Image <Gray, byte> image = grayFrame.PyrDown().PyrUp(); Image <Gray, byte> image2 = null; if (this.noiseFilter) { image2 = image.Canny(new Gray((double)this.cannyThreshold), new Gray((double)this.cannyThreshold)); } if (this.blur) { grayFrame = image; } CvInvoke.cvAdaptiveThreshold((IntPtr)grayFrame, (IntPtr)grayFrame, 255.0, ADAPTIVE_THRESHOLD_TYPE.CV_ADAPTIVE_THRESH_MEAN_C, THRESH.CV_THRESH_BINARY, (this.adaptiveThresholdBlockSize + (this.adaptiveThresholdBlockSize % 2)) + 1, 1.0); grayFrame._Not(); if (this.addCanny && (image2 != null)) { grayFrame._Or(image2); } this.binarizedFrame = grayFrame; if (image2 != null) { image2 = image2.Dilate(3); } Contour <Point> contours = grayFrame.FindContours(CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_NONE, RETR_TYPE.CV_RETR_LIST); this.contours = this.FilterContours(contours, image2, grayFrame.Width, grayFrame.Height); lock (this.foundTemplates) { this.foundTemplates.Clear(); } this.samples.Clear(); lock (this.templates) { if (body == null) { body = delegate(Contour <Point> contour) { Template item = new Template(contour.ToArray(), contour.Area, this.samples.templateSize); lock (this.samples) { this.samples.Add(item); } if (!this.onlyFindContours) { FoundTemplateDesc desc = this.finder.FindTemplate(this.templates, item); if (desc != null) { lock (this.foundTemplates) { this.foundTemplates.Add(desc); } } } }; } Parallel.ForEach <Contour <Point> >(this.contours, body); } FilterByIntersection(ref this.foundTemplates); }