public void GridDetection() { // convert to gray-scaler image Mat image = originalImage.Mat.Clone(); // blur the image CvInvoke.GaussianBlur(image, image, new Size(11, 11), 0); // threshold the image CvInvoke.AdaptiveThreshold(image, image, 255, AdaptiveThresholdType.MeanC, ThresholdType.Binary, 5, 2); CvInvoke.BitwiseNot(image, image); Mat kernel = new Mat(new Size(3, 3), DepthType.Cv8U, 1); Marshal.Copy(new byte[] { 0, 1, 0, 1, 1, 1, 0, 1, 0 }, 0, kernel.DataPointer, 9); CvInvoke.Dilate(image, image, kernel, new Point(-1, -1), 1, BorderType.Default, new MCvScalar(255)); FindOuterGridByFloorFill(image); CvInvoke.Erode(image, image, kernel, new Point(-1, -1), 1, BorderType.Default, new MCvScalar(255)); ImageShowCase.ShowImage(image, "biggest blob"); VectorOfPointF lines = new VectorOfPointF(); CvInvoke.HoughLines(image, lines, 1, Math.PI / 180, 200); // merging lines PointF[] linesArray = lines.ToArray(); //MergeLines(linesArray, image); lines = RemoveUnusedLine(linesArray); Mat harrisResponse = new Mat(image.Size, DepthType.Cv8U, 1); CvInvoke.CornerHarris(image, harrisResponse, 5); DrawLines(lines.ToArray(), image); ImageShowCase.ShowImage(image, "corners"); }
private void btnHarrisApply_Click(object sender, EventArgs e) { try { Image <Gray, float> response = new Image <Gray, float>(img.Size); //we store in the response image CvInvoke.CornerHarris(img, response, 2, 3, 0.04); //calling Harris Corner Detector double max = 0; double min = 0; Point maxLoc = new Point(0, 0); Point minLoc = new Point(0, 0); CvInvoke.MinMaxLoc(response, ref min, ref max, ref minLoc, ref maxLoc); //Locating the maximum response Bitmap result = (Bitmap)picOriginal.Image.Clone(); double tresh = 0.01 * max; //For the i, j locations where the response[i,j] > tresh, we will set the pixel(i,j) to Red. for (int i = 0; i < result.Width; i++) { for (int j = 0; j < result.Height; j++) { double r = response.Data[i, j, 0]; if (r > tresh) { //Setting to Red result.SetPixel(j, i, Color.FromArgb(255, 0, 0)); } } } picResult.Image = result; }catch (Exception ex) { MessageBox.Show("Please load image first."); } }
private void button_CornerHarris_Click(object sender, EventArgs e) { // imageBox1.Image = imagebgr; // Image<Gray, byte> image_CornerHarris = imagetest; Mat imtstoimat = imagetest.Mat; Mat cornmat = imagetest.Mat; //CvInvoke.CvtColor(imtstoimat, cornmat, ColorConversion.Bgr2Gray); //Mat scr = new Mat(@"D:\image_CornerHarris.jpg", ImreadModes.AnyColor);//加载图像图片。 // Mat gray_scr = new Mat(@"D:\image_CornerHarris.jpg", ImreadModes.Grayscale);//加载灰度 图像。 ////CvInvoke.Threshold(gray_scr, gray_scr, 150, 255, ThresholdType.Otsu);//二值 化图像。 ////CvInvoke.CornerHarris(gray_scr, gray_scr, 3);//二值化图像角点检测。 ////CvInvoke.Canny(gray_scr, gray_scr, 120, 150);//canny 处理。 ////CvInvoke.CornerHarris(gray_scr, gray_scr, 3);//轮廓角点检测。 //CvInvoke.CornerHarris(gray_scr, gray_scr, 3);//灰度图像角点检测 //CvInvoke.Normalize(gray_scr, gray_scr, 0, 255, NormType.MinMax);//进行映 射到【0,255】区域中。 // gray_scr = gray_scr.ToImage<Gray, byte>().Mat;//把 gray 类型转成 Byte 类型。 //imageBox1.Image = scr;//显示输入图像。 //imageBox2.Image = gray_scr;//显示角点检测图像 CvInvoke.CornerHarris(cornmat, cornmat, 3); //灰度图像角点检测 CvInvoke.Normalize(cornmat, cornmat, 0, 255, NormType.MinMax); //进行映 射到【0,255】区域中。 cornmat = cornmat.ToImage <Gray, byte>().Mat; imageBox1.Image = imagemat; //显示输入图像。 imageBox2.Image = cornmat; //显示角点检测图像 }
private static Image <Gray, float> GetRawCornerImage(Image <Gray, byte> contourImage) { var cornerImage = new Image <Gray, float>(contourImage.Size); CvInvoke.CornerHarris(contourImage, cornerImage, 7); return(cornerImage); }
static void DoHarris(string imagePath) { var srcImage = new Image <Gray, byte>(imagePath); var cornerImage = new Image <Gray, float>(srcImage.Size); var thresholdImage = new Image <Gray, byte>(srcImage.Size); CvInvoke.CornerHarris(srcImage, cornerImage, 5, 5, 0.01); CvInvoke.Threshold(cornerImage, thresholdImage, 0.0001, 255.0, Emgu.CV.CvEnum.ThresholdType.BinaryInv); thresholdImage.Save($"corner-{imagePath}"); }
private void button20_Click(object sender, EventArgs e) { // crear una imagen para contener las esquinas Image <Gray, float> ImgFuentes = new Image <Gray, float>(PictureAnalizer.ImagenEntrada); Image <Gray, float> ImgEsquinas = new Image <Gray, float>(PictureAnalizer.ImagenEntrada); CvInvoke.CornerHarris(ImgFuentes, ImgEsquinas, 3, 3, 0.01); PictureAnalizer.ImagenEntrada = ImgEsquinas.ToBitmap(); ImagenEntrada.Image = PictureAnalizer.ImagenEntrada; }
private void processFrame(object sender, EventArgs arg) { cam.Retrieve(inputImage, 3); CvInvoke.CvtColor(inputImage, inputGrayImage, ColorConversion.Bgr2Gray, 0); CvInvoke.CornerHarris(inputGrayImage, outputCornerImage, 3); CvInvoke.Normalize(outputCornerImage, outputCornerImage, 0, 255, NormType.MinMax, DepthType.Cv32F); imageBoxInput.Image = inputGrayImage; imageBoxOutput.Image = outputCornerImage; }
private Mat PrepareImage(Mat Image) { Mat inputBinary = new Mat(); CvInvoke.Threshold(Image, inputBinary, 0, 255, ThresholdType.Binary | ThresholdType.Otsu); _input_thinned = Skelatanize(inputBinary.Bitmap).Mat; Mat harris_corners = Mat.Zeros(_input_thinned.Rows, _input_thinned.Cols, DepthType.Cv32F, 3); CvInvoke.CornerHarris(_input_thinned, harris_corners, 2, 3, 0.04, BorderType.Default); Mat harris_normalised = new Mat(); CvInvoke.Normalize(harris_corners, harris_normalised, 0, 255, NormType.MinMax, DepthType.Cv32F); return(harris_normalised); }
int max_thresh = 175; //最大阈值 private void button2_Click(object sender, EventArgs e) { //---------------------------【1】定义一些局部变量----------------------------- Image <Gray, float> dstImage = new Image <Gray, float>(src.Size); //目标图 Mat normImage = new Mat(); //归一化后的图 Image <Gray, byte> scaledImage = new Image <Gray, byte>(src.Size); //线性变换后的八位无符号整型的图 //---------------------------【2】初始化--------------------------------------- //置零当前需要显示的两幅图,即清除上一次调用此函数时他们的值 Image <Gray, byte> g_srcImage1 = src.Clone(); //---------------------------【3】正式检测------------------------------------- //进行角点检测 点检测传出的为Float类型的数据 CvInvoke.CornerHarris(src, dstImage, 2); // 归一化与转换 CvInvoke.Normalize(dstImage, normImage, 0, 255, Emgu.CV.CvEnum.NormType.MinMax); double min = 0, max = 0; Point minp = new Point(0, 0); Point maxp = new Point(0, 0); CvInvoke.MinMaxLoc(normImage, ref min, ref max, ref minp, ref maxp); double scale = 255 / (max - min); double shift = min * scale; CvInvoke.ConvertScaleAbs(normImage, scaledImage, scale, shift);//将归一化后的图线性变换成8位无符号整型 //---------------------------【4】进行绘制------------------------------------- // 将检测到的,且符合阈值条件的角点绘制出来 byte[] data = scaledImage.Bytes; for (int j = 0; j < normImage.Rows; j++) { for (int i = 0; i < normImage.Cols; i++) { int k = i * src.Width + j; if (k < data.Length) { if (data[k] > thresh) { CvInvoke.Circle(g_srcImage1, new Point(i, j), 5, new MCvScalar(10, 10, 255), 2); CvInvoke.Circle(scaledImage, new Point(i, j), 5, new MCvScalar(0, 10, 255), 2); } } } } imageBox1.Image = g_srcImage1; imageBox2.Image = scaledImage; }
public override void Process(Image <Bgr, byte> image, out Image <Bgr, byte> annotatedImage, out List <object> data) { base.Process(image, out annotatedImage, out data); // create image for the corners var corners = new Image <Gray, float>(image.Size); // run the Harris corner detector against the image CvInvoke.CornerHarris( image.Convert <Gray, byte>(), corners, _blockSize, _apertureSize); // normalize the image CvInvoke.Normalize(corners, corners); // optionally show the corners image if (_viewCorners) { annotatedImage = corners.Convert <Bgr, byte>(); return; } // crate gray byte image from corners image var gray = corners.Convert <Gray, byte>(); data = new List <object>(); // for each pixel annotate the corner if // the inensity is beyond the threshold for (var j = 0; j < gray.Rows; j++) { for (var i = 0; i < gray.Cols; i++) { if (!(gray[j, i].Intensity > _threshold)) { continue; } var circle = new CircleF(new PointF(i, j), 1); annotatedImage.Draw(circle, new Bgr(_annoColor.Color()), _lineThick); data.Add(new Circle(circle)); } } }
static void Main(string[] args) { var img = new Image <Bgr, Byte>(DataDir.Contours("multipieces.jpg")); var grey = new Image <Gray, byte>(img.Bitmap); var smooth = grey.SmoothGaussian(7); var thresholded = smooth.ThresholdBinary(new Gray(160), new Gray(256)); using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint()) { // Build list of contours CvInvoke.FindContours(thresholded, contours, null, RetrType.List, ChainApproxMethod.ChainApproxSimple); var contourAreas = Enumerable.Range(0, contours.Size).Select(i => CvInvoke.ContourArea(contours[i])) .ToArray(); var filteredAreas = contourAreas.Where(a => a > 500).OrderBy(a => a).ToArray(); var median = filteredAreas.Skip(filteredAreas.Length / 2).FirstOrDefault(); const double deviation = 0.2; var from = median * (1 - deviation); var to = median * (1 + deviation); for (int i = 0; i < contours.Size; i++) { var area = contourAreas[i]; if (area < from || area > to) { continue; } var contour = contours[i]; CvInvoke.Polylines(img, contour, true, new Bgr(Color.Red).MCvScalar, 4); } } var cornerImage = new Image <Gray, float>(thresholded.Size); CvInvoke.CornerHarris(thresholded, cornerImage, 3); var cornersThresholded = cornerImage.ThresholdBinaryInv(new Gray(160), new Gray(256)); cornersThresholded.Bitmap.Save(DataDir.Contours("cornerImage.bmp"), ImageFormat.Bmp); img.Bitmap.Save(DataDir.Contours("markedContours.bmp"), ImageFormat.Bmp); }
/// <summary> /// Compute Harris Conrner /// </summary> /// <param name="img">Source Image</param> public void Detect(Image <Gray, Byte> img) { this._CornerStrength = new Image <Gray, float>(img.Size); //Harris computation CvInvoke.CornerHarris( img, this._CornerStrength, this._Aperture, this._Neighborhood, this._K); //internal threshold computation double[] maxStrength; double[] minStrength; //not used Point[] minPoints; //not used Point[] maxPoints; //not used this._CornerStrength.MinMax(out minStrength, out maxStrength, out minPoints, out maxPoints); this._MaxStrength = maxStrength[0]; }
/// <summary> /// Compute Harris corners /// </summary> /// <param name="image">source image</param> public void Detect(Image <Gray, Byte> image) { this._CornerStrength = new Image <Gray, float>(image.Size); //Harris computation CvInvoke.CornerHarris( image, //source image this._CornerStrength, //result image this._Neighborhood, //neighborhood size this._Aperture, //aperture size this._K); //Harris parameter //internal threshold computation double[] maxStrength; double[] minStrength; //not used Point[] minPoints; //not used Point[] maxPoints; //not used this._CornerStrength.MinMax(out minStrength, out maxStrength, out minPoints, out maxPoints); this._MaxStrength = maxStrength[0]; }
public void CornerDetection(VisionProfile profile, Mat img) { var output = new Mat(); var outputNormalized = new Mat(); var outputNormalizedScaled = new Mat(); CvInvoke.CornerHarris(img, output, profile.HarrisCornerBlockSize, profile.HarrisCornerAperture, profile.HarrisCornerK, BorderType.Default); CvInvoke.Normalize(output, outputNormalized, 0, 255, NormType.MinMax, DepthType.Cv32F, null); CvInvoke.ConvertScaleAbs(outputNormalized, outputNormalizedScaled, 5, 5); for (int j = 0; j < outputNormalized.Rows; j++) { for (int i = 0; i < outputNormalized.Cols; i++) { // if ((int)outputNormalized.GetData(j,i) > profile.HarrisCornerThreshold) { // circle(outputNormalizedScaled, Point(i, j), 5, Scalar(0), 2, 8, 0); } } } }
public List <Vector2> GetCornerPoints() { Image <Gray, float> cornerimg = new Image <Gray, float>(this.img.Size); Image <Gray, Byte> cornerthrimg = new Image <Gray, Byte>(this.img.Size); Image <Gray, Byte> cannyimg = this.img.Canny(60, 100); CvInvoke.CornerHarris(cannyimg, cornerimg, 3, 3, 0.04); //CvInvoke.cvNormalize(cornerimg, cornerimg, 0, 255, Emgu.CV.CvEnum.NORM_TYPE.CV_MINMAX, IntPtr.Zero); //标准化处理 double min = 0, max = 0; System.Drawing.Point minp = new System.Drawing.Point(0, 0); System.Drawing.Point maxp = new System.Drawing.Point(0, 0); CvInvoke.MinMaxLoc(cornerimg, ref min, ref max, ref minp, ref maxp); double scale = 255 / (max - min); double shift = min * scale; CvInvoke.ConvertScaleAbs(cornerimg, cornerthrimg, scale, shift);//进行缩放,转化为byte类型 byte[] data = cornerthrimg.Bytes; List <Vector2> corners = new List <Vector2>(); List <Vector3> corners_3 = new List <Vector3>(); for (int i = 0; i < cornerimg.Height; i++) { for (int j = 0; j < cornerimg.Width; j++) { int k = i * cornerthrimg.Width + j; if (data[k] > 80) //通过阈值判断 { corners.Add(new Vector2(j, i)); corners_3.Add(m_projector.ImageToWorld(corners.Last())); } } } m_renderEngine.DrawPoints(corners_3); return(corners); }
private void ApplyHarisCorner(int threshold = 200) { try { if (imgList["Input"] == null) { return; } var img = imgList["Input"].Clone(); var gray = img.Convert <Gray, byte>(); var corners = new Mat(); CvInvoke.CornerHarris(gray, corners, 2); CvInvoke.Normalize(corners, corners, 255, 0, Emgu.CV.CvEnum.NormType.MinMax); Matrix <float> matrix = new Matrix <float>(corners.Rows, corners.Cols); corners.CopyTo(matrix); for (int i = 0; i < matrix.Rows; i++) { for (int j = 0; j < matrix.Cols; j++) { if (matrix[i, j] > threshold) { CvInvoke.Circle(img, new Point(j, i), 5, new MCvScalar(0, 0, 255), 3); } } } imageBoxEx1.Image = img.AsBitmap(); } catch (Exception ex) { throw new Exception(ex.Message); } }
public Image <Gray, Byte> harrisCornerDetection() { // original source image as grayscale Image <Gray, Byte> m_SourceImage = null; // raw corner strength image (must be 32-bit float) Image <Gray, float> m_CornerImage = null; // inverted thresholded corner strengths (for display) Image <Gray, Byte> m_ThresholdImage = null; // create and show source image as grayscale m_SourceImage = this.preProcessedImageInGrayScale; // create corner strength image and do Harris m_CornerImage = new Image <Gray, float>(m_SourceImage.Size); CvInvoke.CornerHarris(m_SourceImage, m_CornerImage, 3, 3, 0.01); // create and show inverted threshold image m_ThresholdImage = new Image <Gray, Byte>(m_SourceImage.Size); CvInvoke.Threshold(m_CornerImage, m_ThresholdImage, 0.0001, 255.0, ThresholdType.BinaryInv); m_ThresholdImage.Save("harrisCornerDetection.jpg"); return(m_ThresholdImage); }
private void button3_Click(object sender, EventArgs e) { harrisImage = new Image <Gray, float>(img.Size); CvInvoke.CornerHarris(img, harrisImage, 2, 3, 0.01); picProcImage.BackgroundImage = harrisImage.ToBitmap(); }
public AlgorithmResult DetectCornerHarris( string filename, byte threshold, int blockSize, int apertureSize, double k, HarrisBorderType borderType) { AlgorithmResult result = new AlgorithmResult(); Image <Bgr, byte> image = ImageHelper.GetImage(filename); var resultImage = new Image <Bgr, byte>(filename); // Create new (gray, float) image for corner var corners = new Image <Gray, float>(image.Size); // Harris corner with GrayScale CvInvoke.CornerHarris( image.Convert <Gray, byte>(), corners, blockSize, apertureSize, k, GetBorderType(borderType)); // Normalize CvInvoke.Normalize(corners, corners); // Set resultImage after normalizing resultImage = corners.Convert <Bgr, byte>(); // Crate new (gray, byte) image for corner var gray = corners.Convert <Gray, byte>(); result.CircleDatas = new List <CirclePointModel>(); // for each pixel annotate the corner // if inensity is beyond the threshold for (var j = 0; j < gray.Rows; j++) { for (var i = 0; i < gray.Cols; i++) { if (!(gray[j, i].Intensity > threshold)) { continue; } var circle = new CircleF(new PointF(i, j), 1); resultImage.Draw(circle, new Bgr(Color.FromArgb(255, 77, 77)), 3); result.CircleDatas.Add(new CirclePointModel() { CenterX = circle.Center.X, CenterY = circle.Center.Y, Radius = circle.Radius, Area = circle.Area }); } } result.ImageArray = ImageHelper.SetImage(resultImage); return(result); }
public static void processImage() { IImage image; string finalFileName; string finalFileNameForANN; string imgNumber; Mat grayscaleImg = new Mat(); Mat histImg = new Mat(); Mat downScaledImg = new Mat(); Mat smoothedImg = new Mat(); Mat sobelImg = new Mat(); Mat cannyImg = new Mat(); Mat eigenImg = new Mat(); int xorder = 0; int yorder = 1; string folderResizedImgFaces = @"testImages/ResizedFaces"; string folderProcessedImgFaces = @"testImages/ProcessedFaces/"; string folderProcessedSobel = @"testImages/ProcessedSobel/"; string folderEasyRecog = @"testImages/ProcessedSmoothEasyRecog/"; string folderCannyRecog = @"testImages/ProcessedCannyRecog/"; string imgExtension = ".bmp"; int number = 0; //Read all files in the source image folder var sourceImgFiles = Directory.GetFiles(folderResizedImgFaces, "*.bmp", SearchOption.AllDirectories); //List<string> imgFiles = new List<string>(); foreach (string fileName in sourceImgFiles) { // convert source image to UMat format (an array class) image = new UMat(fileName, ImreadModes.Color); // turns color image to grayscale CvInvoke.CvtColor(image, grayscaleImg, ColorConversion.Bgr2Gray); imgNumber = number.ToString(); finalFileName = folderProcessedImgFaces + imgNumber + imgExtension; grayscaleImg.Save(finalFileName); number = number + 1; // normalizes brightness and contrast CvInvoke.EqualizeHist(grayscaleImg, histImg); imgNumber = number.ToString(); finalFileName = folderProcessedImgFaces + imgNumber + imgExtension; grayscaleImg.Save(finalFileName); number = number + 1; // downsamples and rejects even rows and columns CvInvoke.PyrDown(grayscaleImg, downScaledImg); imgNumber = number.ToString(); finalFileName = folderProcessedImgFaces + imgNumber + imgExtension; downScaledImg.Save(finalFileName); number = number + 1; // upsamples and injects zero rows and columns ( smoothed image) CvInvoke.PyrUp(downScaledImg, smoothedImg); imgNumber = number.ToString(); finalFileNameForANN = folderEasyRecog + imgNumber + imgExtension; finalFileName = folderProcessedImgFaces + imgNumber + imgExtension; smoothedImg.Save(finalFileName); smoothedImg.Save(finalFileNameForANN); number = number + 1; // perform Sobel operation on the img // save to Sobel folder CvInvoke.Sobel(smoothedImg, sobelImg, DepthType.Default, xorder, yorder); imgNumber = number.ToString(); finalFileName = folderProcessedImgFaces + imgNumber + imgExtension; finalFileNameForANN = folderProcessedSobel + imgNumber + imgExtension; sobelImg.Save(finalFileName); sobelImg.Save(finalFileNameForANN); number = number + 1; // perform Canny edge detection on the img CvInvoke.Canny(smoothedImg, cannyImg, 100, 60); imgNumber = number.ToString(); finalFileName = folderProcessedImgFaces + imgNumber + imgExtension; finalFileNameForANN = folderCannyRecog + imgNumber + imgExtension; cannyImg.Save(finalFileName); cannyImg.Save(finalFileNameForANN); number = number + 1; // perform Corner Harris detection on the img CvInvoke.CornerHarris(smoothedImg, eigenImg, 3, 3, 0.04, BorderType.Default); imgNumber = number.ToString(); finalFileName = folderProcessedImgFaces + imgNumber + imgExtension; eigenImg.Save(finalFileName); number = number + 1; } }
int symm_rad_range = 5; //100D : 66 //indu 20 //hospi 35 public Particle_parameter_for_fullimg(Image <Bgr, byte> img) { width = img.Width; height = img.Height; grayimg = img.Convert <Gray, byte>(); //**Gradient //horizontal filter sobelX = grayimg.Sobel(1, 0, 3); //vertical filter sobelY = grayimg.Sobel(0, 1, 3); //**Saturation saturation = grayimg.CopyBlank(); for (int j = 0; j < height; ++j) { for (int i = 0; i < width; ++i) { saturation[j, i] = new Gray(Color.FromArgb((int)img[j, i].Red, (int)img[j, i].Green, (int)img[j, i].Blue).GetSaturation() * 256); } } Image <Gray, Byte> imageL = saturation.Not(); CvInvoke.MedianBlur(imageL.Mat, saturation.Mat, 3); saturation._EqualizeHist(); double otsu; otsu = CvInvoke.Threshold(saturation, imageL, 0, 255, ThresholdType.Otsu); CvInvoke.Threshold(saturation, saturation, (otsu + 255) * 0.6, 255, ThresholdType.Binary); //**frst Mat input = grayimg.Mat; Mat output; fullfrst(ref input, alpha_symm, 0.1); //frst2d(ref input, out output, rad_symm, alpha_symm, 0.1); //frst = output.Clone(); //for (int i = 0; i < contours.Size; i++) //{ // img.Draw(new CircleF(mc[i], 2), new Bgr(Color.Green), 0); //} //**harris m_SourceImage = grayimg.Clone(); // create corner strength image and do Harris m_CornerImage = new Image <Gray, float>(m_SourceImage.Width, m_SourceImage.Height); //CvInvoke.CornerHarris(m_SourceImage, m_CornerImage, 3, 3); // create and show inverted threshold image //m_ThresholdImage = new Image<Gray, Byte>(m_SourceImage.Size); //CvInvoke.Threshold(m_CornerImage, m_ThresholdImage, 0.0001, // 255.0, Emgu.CV.CvEnum.ThresholdType.BinaryInv); //Image<Gray, byte> c = new Image<Gray, byte>(m_SourceImage.Width, m_SourceImage.Height); CvInvoke.CornerHarris(m_SourceImage, m_CornerImage, HarrisBlockSize); //注意:角点检测传出的为Float类型的数据 CvInvoke.Normalize(m_CornerImage, m_CornerImage, 0, 255, NormType.MinMax, DepthType.Cv32F); //标准化处理 double min = 0, max = 0; Point minp = new Point(0, 0); Point maxp = new Point(0, 0); CvInvoke.MinMaxLoc(m_CornerImage, ref min, ref max, ref minp, ref maxp); double scale = 255 / (max - min); double shift = min * scale; CvInvoke.ConvertScaleAbs(m_CornerImage, m_SourceImage, scale, shift);//进行缩放,转化为byte类型 //c.Save("harris.bmp"); //byte[] data = c.Bytes; //for (int i = 0; i < m_CornerImage.Height; i++) //{ // for (int j = 0; j < m_CornerImage.Width; j++) // { // int k = i * m_SourceImage.Width + j; // if (data[k] > 100) //通过阈值判断 // { // CvInvoke.Circle(m_SourceImage, new Point(j, i), 1, new MCvScalar(0, 0, 255, 255), 2); // } // } //} }
public static void processImage(string imgPath) { IImage image; string finalFileName; string imgNumber; string imgToProcess = imgPath; Mat grayscaleImg = new Mat(); Mat histImg = new Mat(); Mat downScaledImg = new Mat(); Mat smoothedImg = new Mat(); Mat sobelImg = new Mat(); Mat cannyImg = new Mat(); Mat eigenImg = new Mat(); int xorder = 0; int yorder = 1; string folderProcessedImgFaces = @"testImages/ShowFaces/pro"; string imgExtension = ".bmp"; int number = 0; // convert source image to UMat format (an array class) image = new UMat(imgToProcess, ImreadModes.Color); // turns color image to grayscale CvInvoke.CvtColor(image, grayscaleImg, ColorConversion.Bgr2Gray); imgNumber = number.ToString(); finalFileName = folderProcessedImgFaces + imgNumber + imgExtension; grayscaleImg.Save(finalFileName); number = number + 1; // normalizes brightness and contrast CvInvoke.EqualizeHist(grayscaleImg, histImg); imgNumber = number.ToString(); finalFileName = folderProcessedImgFaces + imgNumber + imgExtension; grayscaleImg.Save(finalFileName); number = number + 1; // downsamples and rejects even rows and columns CvInvoke.PyrDown(grayscaleImg, downScaledImg); imgNumber = number.ToString(); finalFileName = folderProcessedImgFaces + imgNumber + imgExtension; downScaledImg.Save(finalFileName); number = number + 1; // upsamples and injects zero rows and columns ( smoothed image) CvInvoke.PyrUp(downScaledImg, smoothedImg); imgNumber = number.ToString(); finalFileName = folderProcessedImgFaces + imgNumber + imgExtension; smoothedImg.Save(finalFileName); number = number + 1; // perform Sobel operation on the img CvInvoke.Sobel(smoothedImg, sobelImg, DepthType.Default, xorder, yorder); imgNumber = number.ToString(); finalFileName = folderProcessedImgFaces + imgNumber + imgExtension; sobelImg.Save(finalFileName); number = number + 1; // perform Canny edge detection on the img CvInvoke.Canny(smoothedImg, cannyImg, 100, 60); imgNumber = number.ToString(); finalFileName = folderProcessedImgFaces + imgNumber + imgExtension; cannyImg.Save(finalFileName); number = number + 1; // perform Canny edge detection on the img CvInvoke.CornerHarris(smoothedImg, eigenImg, 3, 3, 0.04, BorderType.Default); imgNumber = number.ToString(); finalFileName = folderProcessedImgFaces + imgNumber + imgExtension; eigenImg.Save(finalFileName); number = number + 1; }
public Point WVPF(String Identifier) { #region setting if (Identifier.Last().ToString() == "R") { img = CornerR.Clone(); this.row = CornerR.Height; this.col = CornerR.Width; rowMean = new double[row]; colMean = new double[col]; col_row_Mean_Black(); } if (Identifier.Last().ToString() == "L") { img = CornerL.Clone(); this.row = CornerL.Height; this.col = CornerL.Width; rowMean = new double[row]; colMean = new double[col]; col_row_Mean_Black(); } #endregion Dictionary <int, Double> vVPF = new Dictionary <int, double>(); Dictionary <int, Double> hVPF = new Dictionary <int, double>(); #region harris Image <Gray, float> CornerL_harris = img.Convert <Gray, float>().CopyBlank(); Image <Gray, float> CornerR_harris = img.Convert <Gray, float>().CopyBlank(); double min = 0, max = 0; Point minP = new Point(); Point maxP = new Point(); if (Identifier.First().ToString() == "R" && Identifier.Last().ToString() == "L")// Patient's right eye inner corner { CornerL_harris = CornerL.Convert <Gray, float>(); CvInvoke.CornerHarris(CornerL, CornerL_harris, 3); CvInvoke.Normalize(CornerL_harris, CornerL_harris, 0, 255, NormType.MinMax, DepthType.Cv32F); CornerL_harris = CornerL_harris.AbsDiff(new Gray(0)); CvInvoke.MinMaxLoc(CornerL_harris, ref min, ref max, ref minP, ref maxP); vVPF = calculateVPF(vVPF, "Vertical", CornerR_harris); hVPF = calculateVPF(hVPF, "Horizontal", CornerR_harris); } else if (Identifier.First().ToString() == "L" && Identifier.Last().ToString() == "R")// Patient's left eye inner corner { CornerR_harris = CornerR.Convert <Gray, float>(); CvInvoke.CornerHarris(CornerR, CornerR_harris, 3); CvInvoke.Normalize(CornerR_harris, CornerR_harris, 0, 255, NormType.MinMax, DepthType.Cv32F); CornerR_harris = CornerR_harris.AbsDiff(new Gray(0)); CvInvoke.MinMaxLoc(CornerR_harris, ref min, ref max, ref minP, ref maxP); vVPF = calculateVPF(vVPF, "Vertical", CornerL_harris); hVPF = calculateVPF(hVPF, "Horizontal", CornerL_harris); } else { vVPF = calculateVPF(vVPF, "Vertical"); hVPF = calculateVPF(hVPF, "Horizontal"); } #endregion //Draw the variance for visualization Image <Gray, byte> variance = img.Clone(); // Calculate the diffrience between the point of variance Point[] p = new Point[col]; Dictionary <int, int> diffMax = new Dictionary <int, int>(); diffMax = VerticalDiffMax(p, diffMax, vVPF); var dicSort = from objDic in diffMax orderby objDic.Value descending select objDic; int CornerX = 0; if (Identifier.Last().ToString() == "L") { CornerX = dicSort.ElementAt(0).Key + CornerL.ROI.X; } if (Identifier.Last().ToString() == "R") { CornerX = dicSort.ElementAt(0).Key + CornerR.ROI.X; } variance.Draw(new LineSegment2D(new Point(dicSort.ElementAt(0).Key, 0), new Point(dicSort.ElementAt(0).Key, variance.Height)), new Gray(0), 1); variance.DrawPolyline(p, false, new Gray(255)); variance.Save(Identifier.First().ToString() + "\\varianceVertical" + Identifier.Last().ToString() + ".jpg"); // Calculate the diffrience between the point of variance p = new Point[row]; diffMax = new Dictionary <int, int>(); diffMax = HorizontalDiffMax(p, diffMax, hVPF); dicSort = from objDic in diffMax orderby objDic.Value descending select objDic; int CornerY = 0; if (Identifier.Last().ToString() == "L") { CornerY = dicSort.ElementAt(0).Key + CornerL.ROI.Y; } else if (Identifier.Last().ToString() == "R") { CornerY = dicSort.ElementAt(0).Key + CornerR.ROI.Y; } // Draw a line to segment the CORNER area variance.Draw(new LineSegment2D(new Point(0, dicSort.ElementAt(0).Key), new Point(variance.Width, dicSort.ElementAt(0).Key)), new Gray(0), 1); variance.DrawPolyline(p, false, new Gray(255)); variance.Save(Identifier.First().ToString() + "\\varianceCross" + Identifier.Last().ToString() + ".jpg"); variance = img.Clone(); variance.Draw(new LineSegment2D(new Point(0, dicSort.ElementAt(0).Key), new Point(variance.Width, dicSort.ElementAt(0).Key)), new Gray(0), 1); variance.DrawPolyline(p, false, new Gray(255)); variance.Save(Identifier.First().ToString() + "\\varianceHorizontal" + Identifier.Last().ToString() + ".jpg"); if (Identifier.First().ToString() == "R" && Identifier.Last().ToString() == "L")// Patient's right eye inner corner { return(maxP); } else if (Identifier.First().ToString() == "L" && Identifier.Last().ToString() == "R")// Patient's left eye inner corner { return(maxP); } else { return(new Point(CornerX, CornerY)); } }
private void harris_btn_Click(object sender, EventArgs e) { harrisImage = new Image <Gray, float>(img.Size); CvInvoke.CornerHarris(img, harrisImage, 2, 3, 0.02); modified_img.BackgroundImage = harrisImage.ToBitmap(); }