public void Resize(string outputPath, int newX_size, int height, string rootDirectoryName = "") { Image <Bgr, Byte> img1 = new Emgu.CV.Image <Bgr, Byte>(this.ImagePath); var oldX = img1.Width; var oldY = img1.Height; float ratio = (float)oldX / (float)newX_size; int newY = (int)Math.Round((double)((float)oldY / (float)ratio)); int newX = (int)Math.Round((double)((float)oldX / (float)ratio)); img1 = img1.Resize(newX, newY, Emgu.CV.CvEnum.Inter.LinearExact); var delta_w = newX_size - newX; var delta_h = height - newY; var top = delta_h / 2; var bottom = delta_h - top; var left = delta_w / 2; var right = delta_w - left; //img1.Save(@"C:\Users\lkathke\Desktop\EmguTest\resized.jpg"); Mat newImage = new Mat(); CvInvoke.CopyMakeBorder(img1, newImage, top, bottom, left, right, Emgu.CV.CvEnum.BorderType.Constant); newImage.Save(System.IO.Path.Combine(outputPath, System.IO.Path.GetFileName(this.ImagePath))); ResizeAnnotations(this.XMLPath, newX_size, height, newX, newY, oldX, oldY, top, left, outputPath, rootDirectoryName); }
private Mat keepAspectRatioResize(Mat inputImage, Size targetSize, int padColor = 0) { Mat resizedOutputImage = new Mat(); double h1 = targetSize.Width * (inputImage.Rows / (double)inputImage.Cols); double w2 = targetSize.Height * (inputImage.Cols / (double)inputImage.Rows); if (h1 <= targetSize.Height) { CvInvoke.Resize(inputImage, resizedOutputImage, new Size(targetSize.Width, (int)h1), 0, 0, Inter.Lanczos4); } else { CvInvoke.Resize(inputImage, resizedOutputImage, new Size((int)w2, targetSize.Height), 0, 0, Inter.Lanczos4); } int top = (targetSize.Height - resizedOutputImage.Rows) / 2; int down = (targetSize.Height - resizedOutputImage.Rows + 1) / 2; int left = (targetSize.Width - resizedOutputImage.Cols) / 2; int right = (targetSize.Width - resizedOutputImage.Cols + 1) / 2; CvInvoke.CopyMakeBorder(resizedOutputImage, resizedOutputImage, top, down, left, right, BorderType.Constant, new MCvScalar(padColor, padColor, padColor)); return(resizedOutputImage); }
public void DFT() { Mat padded = new Mat(); //扩展行宽进行快速,其他位置用0扩充 CvInvoke.CopyMakeBorder(sourceMat, padded, 0, fftRows - sourceMat.Rows, 0, fftCols - sourceMat.Cols, BorderType.Constant, new MCvScalar(0)); //构造虚部 Mat tmp = new Mat(padded.Size, DepthType.Cv32F, 1); tmp.SetTo(new MCvScalar(0)); VectorOfMat planes = new VectorOfMat(padded, tmp); Mat complexI = new Mat(); //多通道混合 CvInvoke.Merge(planes, complexI); //进行DFT变换 CvInvoke.Dft(complexI, complexI, DxtType.Forward, padded.Rows); //将混合的单通道分离实数域虚数域 CvInvoke.Split(complexI, planes); //计算幅度谱,相位谱 CudaInvoke.Magnitude(planes[0], planes[1], magnitudeMat); CudaInvoke.Phase(planes[0], planes[1], phaseMat); magnitudeMat.CopyTo(magPMat); }
public void ZeroPadImage(UMat img) { //Pad image for more optimal DFT speed int m = CvInvoke.GetOptimalDFTSize(img.Rows); int n = CvInvoke.GetOptimalDFTSize(img.Cols); CvInvoke.CopyMakeBorder(img, img, 0, m - img.Rows, 0, n - img.Cols, BorderType.Constant, new MCvScalar(0)); //pad with '0's }
public static Mat MakePadding(Mat src, int padding) { if (padding <= 0) { return(src); } MCvScalar paddingScalar = new MCvScalar(255, 255, 255); Mat paddingSrc = new Mat(); CvInvoke.CopyMakeBorder(src, paddingSrc, padding, padding, padding, padding, BorderType.Isolated, paddingScalar); return(paddingSrc); }
private ColorfulContourMap AddCroppedImages(bool blackOrWhite, ColorfulContourMap cmap) { // get the image Mat img; if (blackOrWhite) { img = Form1.whiteSourceImages[cmap.imageIndex].Clone(); } else { img = Form1.blackSourceImages[cmap.imageIndex].Clone(); } // get the min max x y int minX = cmap.Center.X - cmap.Width / 2; int minY = cmap.Center.Y - cmap.Height / 2; int maxX = cmap.Center.X + cmap.Width / 2; int maxY = cmap.Center.Y + cmap.Height / 2; // crop the corresponding image Mat result = new Mat(img, new Rectangle(new Point(minX, minY), new Size(maxX - minX, maxY - minY))); /*Mat result = new Mat(new Size(maxX-minX,maxY-minY),DepthType.Cv8U,3); * CvInvoke.cvResetImageROI(img); * CvInvoke.cvSetImageROI(img, new Rectangle(new Point(minX, minY), new Size(maxX - minX, maxY - minY))); * CvInvoke.cvCopy(img, result,IntPtr.Zero);*/ if (blackOrWhite) { CvInvoke.CopyMakeBorder(result, result, 100, 100, 100, 100, BorderType.Constant, new MCvScalar(255, 255, 255)); } else { CvInvoke.CopyMakeBorder(result, result, 100, 100, 100, 100, BorderType.Constant, new MCvScalar(0, 0, 0)); } // output the image //result = img; if (blackOrWhite) { Form1.whiteCroppedImages.Add(result); cmap = ColorfulContourMap.getAllContourMap(result, cmap.imageIndex, 0)[0]; // update the contour map for the new image } else { Form1.blackCroppedImages.Add(result); cmap = ColorfulContourMap.getAllContourMap(result, cmap.imageIndex, 1)[0]; // update the contour map for the new image } return(cmap); }
private void button2_Click(object sender, EventArgs e) { //获取最佳Size,以便可以使用FFT,通常2*3*5倍数 int M = CvInvoke.GetOptimalDFTSize(image.Rows); int N = CvInvoke.GetOptimalDFTSize(image.Cols); //图像扩展 Mat padded = new Mat(); CvInvoke.CopyMakeBorder(image, padded, 0, M - image.Rows, 0, N - image.Cols, BorderType.Constant, new MCvScalar(1)); //创建一个2通道矩阵,0通道为源图数据,1通道为0 Mat m = new Mat(padded.Size, DepthType.Cv32F, 1); m.SetTo(new MCvScalar(255)); CvInvoke.Divide(padded, m, padded); m.SetTo(new MCvScalar(0)); VectorOfMat matVector = new VectorOfMat(); matVector.Push(padded); matVector.Push(m); Mat matComplex = new Mat(padded.Size, DepthType.Cv32F, 2); CvInvoke.Merge(matVector, matComplex); padded.Dispose(); m.Dispose(); matVector.Dispose(); // This will hold the DFT data,创建2通道矩阵,储存变换后结果 Matrix <float> forwardDft = new Matrix <float>(image.Rows, image.Cols, 2); CvInvoke.Dft(matComplex, forwardDft, DxtType.Forward, 0); // We'll display the magnitude,显示谱图像 Matrix <float> forwardDftMagnitude = GetDftMagnitude(forwardDft); SwitchQuadrants(ref forwardDftMagnitude); // Now compute the inverse to see if we can get back the original //进行反变换 Matrix <float> reverseDft = new Matrix <float>(forwardDft.Rows, forwardDft.Cols, 2); CvInvoke.Dft(forwardDft, reverseDft, DxtType.InvScale, 0); Matrix <float> reverseDftMagnitude = GetDftMagnitude(reverseDft); imageBox1.Image = image; imageBox2.Image = Matrix2Image(forwardDftMagnitude); imageBox3.Image = Matrix2Image(reverseDftMagnitude); }
/// <summary> /// Takes an image of a line of text and returns a list of images containing each character. /// </summary> private static List <Mat> segmentCharacters(Image <Gray, byte> image) { // Better contouring with inverted image (black background on white text) image._Not(); // Get contours for each character VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint(); Mat hier = new Mat(); CvInvoke.FindContours(image, contours, hier, RetrType.External, ChainApproxMethod.ChainApproxTc89Kcos, new Point(0, 0)); // Invert image back to original image._Not(); // Gather all bounding rectangles (one for each char) List <Rectangle> letterBoundries = new List <Rectangle>(); for (int i = 0; i < contours.Size; i++) { letterBoundries.Add(CvInvoke.BoundingRectangle(contours[i])); } List <Mat> characters = new List <Mat>(); // Sort bounding rectangles from left to right letterBoundries = letterBoundries.OrderBy(p => p.Left).ToList(); for (int i = 0; i < letterBoundries.Count; i++) { // Grab current letter into new Mat image.ROI = letterBoundries[i]; Mat letter = image.Mat.Clone(); // 5 pixel white border around the letter CvInvoke.CopyMakeBorder(image.Mat, letter, 5, 5, 5, 5, BorderType.Constant, new MCvScalar(255, 255, 255)); characters.Add(letter); image.ROI = Rectangle.Empty; } return(characters); }
private void borderPaddingToolStripMenuItem_Click(object sender, EventArgs e) { try { if (pictureBox1.Image == null) { return; } var img = new Bitmap(pictureBox1.Image) .ToImage <Bgr, float>(); var imgout = new Mat(); CvInvoke.CopyMakeBorder(img, imgout, 5, 5, 5, 5, Emgu.CV.CvEnum.BorderType.Constant, new MCvScalar(0)); pictureBox1.Image = imgout.ToBitmap(); } catch (Exception ex) { MessageBox.Show(ex.Message); } }
private Mat AdjustTargetImg(Mat src, int dstWidth, int dstHeight) { Mat srcResize = new Mat(); float scale = (float)dstHeight / (float)src.Rows; int angleWidth = (int)((float)src.Cols * scale); CvInvoke.Resize(src, srcResize, new Size(angleWidth, dstHeight)); Mat srcFit = new Mat(dstHeight, dstWidth, DepthType.Cv8U, 3); //srcFit.SetTo(new MCvScalar(255,255,255)); if (angleWidth < dstWidth) { CvInvoke.CopyMakeBorder(srcResize, srcFit, 0, 0, 0, dstWidth - angleWidth, BorderType.Isolated, new MCvScalar(255, 255, 255)); } else { Rectangle rect = new Rectangle(0, 0, dstWidth, dstHeight); Mat partAngle = new Mat(srcResize, rect); partAngle.CopyTo(srcFit); } return(srcFit); }
public void DFTFromMat(Bitmap Image) { Image <Bgr, Single> imageCV_spl = new Image <Bgr, Single>(Image); //приняли RGB Image <Gray, Single>[] imageCV = imageCV_spl.Split(); //разделили на 3 канала var image = new Mat(imageCV[0].Mat, ROI); var extended = new Mat(); CvInvoke.CopyMakeBorder(image, extended, 0, optRows - image.Rows, 0, optCols - image.Cols, BorderType.Constant); extended.ConvertTo(extended, DepthType.Cv32F); var vec = new VectorOfMat(extended, new Mat(extended.Size, DepthType.Cv32F, 1)); var complex = new Mat(); CvInvoke.Merge(vec, complex); CvInvoke.Dft(complex, complex, DxtType.Forward, 0); //dft CvInvoke.Split(complex, vec); //разделение реальной и комплексной матрицы var outReal = vec[0]; var outIm = vec[1]; CvInvoke.Pow(outReal, 2.0, outReal); //Re^2 CvInvoke.Pow(outIm, 2.0, outIm); //Im^2 CvInvoke.Add(outReal, outIm, outReal); //Re^2+Im^2 CvInvoke.Sqrt(outReal, outReal); //sqrt(Re^2+Im^2) CvInvoke.Log(outReal, outReal); //для нормального отображения outReal.CopyTo(finalmatrix); finalmatrix = finalmatrix.Clone(); SwitchQuadrants(ref finalmatrix); CvInvoke.Normalize(finalmatrix, finalmatrix, 0.0, 255.0, Emgu.CV.CvEnum.NormType.MinMax); RefreshDFT(finalmatrix.Mat); }
/* Stitch all Images given together. * Assumes order of Images is given: */ public Mat Stitching_images(List <Mat> images, int seg_R, int seg_C) { /* Testing to see if an Image can be loaded in using Emgu.CV: */ /* * var path = "C:\\Users\\Kestutis\\Documents\\PSU\\Images\\Intel\\Stiched_Image.jpg"; * var image = CvInvoke.Imread(path, ImreadModes.AnyColor); * CvInvoke.NamedWindow("Image", NamedWindowType.Normal); * CvInvoke.Imshow("Image", image); * CvInvoke.WaitKey(); * CvInvoke.DestroyAllWindows(); */ var overlap = 0.4; //amount of assumed overlap for the photos var good_overlap = 0.4; var good_max_val = 0.1; List <Mat> final = new List <Mat>(); /* Stithing based on width! */ for (int k = 0; k < seg_R; k++) { var img_stitched_w = new Mat(); var done = new Mat(); for (int y = 0; y < seg_C - 1; y++) { var img2 = new Mat(); var img1 = new Mat(); CvInvoke.CvtColor(images[seg_C * k + y], img1, ColorConversion.Bgr2Gray); CvInvoke.CvtColor(images[seg_C * k + (y + 1)], img2, ColorConversion.Bgr2Gray); overlap = 0.40; double min_val = 0; double max_val = 0; Point min_loc = new Point(); Point max_loc = new Point(); /* Find the overlap ratio that gives the best match: */ while (overlap >= 0.4 && overlap <= 0.7) { Rectangle test_crop_region_img1 = new Rectangle(Convert.ToInt32(Math.Round(img1.Size.Width * (1 - overlap))), 0, Convert.ToInt32(Math.Round(overlap * img1.Size.Width)), img2.Size.Height); Rectangle test_crop_region_img2 = new Rectangle(0, 0, Convert.ToInt32(Math.Round(overlap * img2.Size.Width)), img2.Size.Height); Rectangle test_crop_region_test = new Rectangle(0, 0, Convert.ToInt32(Math.Round(overlap * img2.Size.Width)), img2.Size.Height); var test_img1_temp = Crop_image(img1, test_crop_region_img1); var test_img2_temp = Crop_image(img2, test_crop_region_img2); CvInvoke.CopyMakeBorder(test_img1_temp, test_img1_temp, Convert.ToInt32(Math.Round(img1.Size.Height * (.10))), Convert.ToInt32(Math.Round(img1.Size.Height * (.10))), Convert.ToInt32(Math.Round(img1.Size.Width * (.10))), Convert.ToInt32(Math.Round(img1.Size.Width * (.10))), BorderType.Constant); var res = new Mat(); //CvInvoke.Canny(img2_temp, img2_temp, 300, 10, 3); //CvInvoke.Canny(img1_temp, img1_temp, 300, 10, 3); CvInvoke.MatchTemplate(test_img1_temp, test_img2_temp, res, TemplateMatchingType.CcoeffNormed); CvInvoke.MinMaxLoc(res, ref min_val, ref max_val, ref min_loc, ref max_loc); Console.WriteLine("value_height: " + Convert.ToInt32(Math.Round(img1.Size.Height * (.10))) + " value width: " + Convert.ToInt32(Math.Round(img1.Size.Width * (.10)))); Console.WriteLine(max_val); Console.WriteLine(max_loc); /* * CvInvoke.NamedWindow("img1", NamedWindowType.Normal); * CvInvoke.Imshow("img1", img1); * CvInvoke.NamedWindow("img2", NamedWindowType.Normal); * CvInvoke.Imshow("img2", img2); * CvInvoke.WaitKey(); * CvInvoke.DestroyAllWindows(); */ if (good_max_val < max_val) { good_overlap = overlap; } overlap += .05; } overlap = good_overlap - .03; var temp = good_overlap; while (overlap >= temp - .03 && overlap <= temp + .03) { Rectangle test_crop_region_img1 = new Rectangle(Convert.ToInt32(Math.Round(img1.Size.Width * (1 - overlap))), 0, Convert.ToInt32(Math.Round(overlap * img1.Size.Width)), img2.Size.Height); Rectangle test_crop_region_img2 = new Rectangle(0, 0, Convert.ToInt32(Math.Round(overlap * img2.Size.Width)), img2.Size.Height); Rectangle test_crop_region_test = new Rectangle(0, 0, Convert.ToInt32(Math.Round(overlap * img2.Size.Width)), img2.Size.Height); var test_img1_temp = Crop_image(img1, test_crop_region_img1); var test_img2_temp = Crop_image(img2, test_crop_region_img2); CvInvoke.CopyMakeBorder(test_img1_temp, test_img1_temp, Convert.ToInt32(Math.Round(img1.Size.Height * (.10))), Convert.ToInt32(Math.Round(img1.Size.Height * (.10))), Convert.ToInt32(Math.Round(img1.Size.Width * (.10))), Convert.ToInt32(Math.Round(img1.Size.Width * (.10))), BorderType.Constant); var res = new Mat(); //CvInvoke.Canny(img2_temp, img2_temp, 300, 10, 3); //CvInvoke.Canny(img1_temp, img1_temp, 300, 10, 3); CvInvoke.MatchTemplate(test_img1_temp, test_img2_temp, res, TemplateMatchingType.CcoeffNormed); CvInvoke.MinMaxLoc(res, ref min_val, ref max_val, ref min_loc, ref max_loc); Console.WriteLine("value_height: " + Convert.ToInt32(Math.Round(img1.Size.Height * (.10))) + " value width: " + Convert.ToInt32(Math.Round(img1.Size.Width * (.10)))); Console.WriteLine(max_val); Console.WriteLine(max_loc); if (good_max_val < max_val) { good_overlap = overlap; } overlap += .005; } Rectangle crop_region_img1 = new Rectangle(Convert.ToInt32(Math.Round(img1.Size.Width * (1 - good_overlap))), 0, Convert.ToInt32(Math.Round(good_overlap * img1.Size.Width)), img2.Size.Height); Rectangle crop_region_img2 = new Rectangle(0, 0, Convert.ToInt32(Math.Round(good_overlap * img2.Size.Width)), img2.Size.Height); Rectangle crop_region_test = new Rectangle(0, 0, Convert.ToInt32(Math.Round(good_overlap * img2.Size.Width)), img2.Size.Height); var img1_temp = Crop_image(img1, crop_region_img1); var img2_temp = Crop_image(img2, crop_region_img2); CvInvoke.CopyMakeBorder(img1_temp, img1_temp, Convert.ToInt32(Math.Round(img1.Size.Height * (.10))), Convert.ToInt32(Math.Round(img1.Size.Height * (.10))), Convert.ToInt32(Math.Round(img1.Size.Width * (.10))), Convert.ToInt32(Math.Round(img1.Size.Width * (.10))), BorderType.Constant); var result = new Mat(); //CvInvoke.Canny(img2_temp, img2_temp, 300, 10, 3); //CvInvoke.Canny(img1_temp, img1_temp, 300, 10, 3); CvInvoke.MatchTemplate(img1_temp, img2_temp, result, TemplateMatchingType.CcoeffNormed); CvInvoke.MinMaxLoc(result, ref min_val, ref max_val, ref min_loc, ref max_loc); Mat imageResult = new Mat(); //CvInvoke.NamedWindow("img1", NamedWindowType.Normal); //CvInvoke.Imshow("img1", img1_temp); //CvInvoke.NamedWindow("img2", NamedWindowType.Normal); //CvInvoke.Imshow("img2", img2_temp); //CvInvoke.WaitKey(); //CvInvoke.DestroyAllWindows(); imageResult = Stitch_image_w(images[seg_C * k + y], images[seg_C * k + (y + 1)], max_loc, good_overlap); done = imageResult.Clone(); } /* For debugging purposes */ Console.WriteLine("Press enter to continue..."); Console.ReadLine(); final.Add(done); } /* check images! */ // final.Reverse(); int current = 0; for (int i = 0; i < seg_R; i++) { if (current < final[i].Size.Width) { current = final[i].Size.Width; } //CvInvoke.NamedWindow("Image_final", NamedWindowType.Normal); //CvInvoke.Imshow("Image_final", final[i]); //CvInvoke.WaitKey(); //CvInvoke.DestroyAllWindows(); } /* Stitching based on height! */ /* Reset values: */ overlap = 0.4; //amount of assumed overlap for the photos good_overlap = 0.4; good_max_val = 0.1; var finished = final[0]; for (int x = 0; x < seg_R - 1; x++) { var img2 = final[x + 1]; var img1 = finished; double min_val = 0; double max_val = 0; Point min_loc = new Point(); Point max_loc = new Point(); /* Find the overlap ratio that gives the best match: */ while (overlap >= 0.4 && overlap <= 0.7) { Rectangle test_crop_region_img1 = new Rectangle(0, Convert.ToInt32(Math.Round(img1.Size.Height * (1 - overlap))), img1.Size.Width, img1.Size.Height); Rectangle test_crop_region_img2 = new Rectangle(0, 0, img2.Size.Width, Convert.ToInt32(Math.Round((1 - overlap) * img2.Size.Height))); Rectangle test_crop_region_test = new Rectangle(0, 0, img2.Size.Width, Convert.ToInt32(Math.Round((1 - overlap) * img2.Size.Height))); var test_img1_temp = Crop_image(img1, test_crop_region_img1); var test_img2_temp = Crop_image(img2, test_crop_region_img2); CvInvoke.CopyMakeBorder(test_img1_temp, test_img1_temp, Convert.ToInt32(Math.Round(img1.Size.Height * (.15))), Convert.ToInt32(Math.Round(img1.Size.Height * (.15))), Convert.ToInt32(Math.Round(img1.Size.Width * (.15))), Convert.ToInt32(Math.Round(img1.Size.Width * (.15))), BorderType.Constant); var res = new Mat(); //CvInvoke.Canny(img2_temp, img2_temp, 300, 10, 3); //CvInvoke.Canny(img1_temp, img1_temp, 300, 10, 3); CvInvoke.MatchTemplate(test_img1_temp, test_img2_temp, res, TemplateMatchingType.CcoeffNormed); CvInvoke.MinMaxLoc(res, ref min_val, ref max_val, ref min_loc, ref max_loc); Console.WriteLine("value_height: " + Convert.ToInt32(Math.Round(img1.Size.Height * (.10))) + " value width: " + Convert.ToInt32(Math.Round(img1.Size.Width * (.10)))); Console.WriteLine(max_val); Console.WriteLine(max_loc); /* * CvInvoke.NamedWindow("img1", NamedWindowType.Normal); * CvInvoke.Imshow("img1", img1); * CvInvoke.NamedWindow("img2", NamedWindowType.Normal); * CvInvoke.Imshow("img2", img2); * CvInvoke.WaitKey(); * CvInvoke.DestroyAllWindows(); */ if (good_max_val < max_val) { good_overlap = overlap; } overlap += .05; } Rectangle crop_region_img1 = new Rectangle(0, Convert.ToInt32(Math.Round(img1.Size.Height * (1 - good_overlap))), img1.Size.Width, img1.Size.Height); Rectangle crop_region_img2 = new Rectangle(0, 0, img2.Size.Width, Convert.ToInt32(Math.Round((1 - good_overlap) * img2.Size.Height))); Rectangle crop_region_test = new Rectangle(0, 0, img2.Size.Width, Convert.ToInt32(Math.Round((1 - good_overlap) * img2.Size.Height))); var img1_temp = Crop_image(img1, crop_region_img1); var img2_temp = Crop_image(img2, crop_region_img2); CvInvoke.CopyMakeBorder(img1_temp, img1_temp, Convert.ToInt32(Math.Round(img1.Size.Height * (.10))), Convert.ToInt32(Math.Round(img1.Size.Height * (.10))), Convert.ToInt32(Math.Round(img1.Size.Width * (.10))), Convert.ToInt32(Math.Round(img1.Size.Width * (.10))), BorderType.Constant); var result = new Mat(); //CvInvoke.Canny(img2_temp, img2_temp, 300, 10, 3); //CvInvoke.Canny(img1_temp, img1_temp, 300, 10, 3); CvInvoke.MatchTemplate(img1_temp, img2_temp, result, TemplateMatchingType.CcoeffNormed); CvInvoke.MinMaxLoc(result, ref min_val, ref max_val, ref min_loc, ref max_loc); Mat imageResult = new Mat(); //CvInvoke.NamedWindow("img1", NamedWindowType.Normal); //CvInvoke.Imshow("img1", img1_temp); //CvInvoke.NamedWindow("img2", NamedWindowType.Normal); //CvInvoke.Imshow("img2", img2_temp); //CvInvoke.WaitKey(); //CvInvoke.DestroyAllWindows(); imageResult = Stitch_image_h(img1, img2, max_loc, good_overlap); finished = imageResult.Clone(); } return(finished); }
/* Stitch two Images together that are assumed to be in the same Column: */ static Mat Stitch_image_h(Mat img1, Mat img2, Point max_loc, double overlap) { if (img1.Size.Width != img2.Size.Width) { var temp = 0.0; if (img1.Size.Width > img2.Size.Width) { temp = img1.Size.Width - img2.Size.Width; CvInvoke.CopyMakeBorder(img2, img2, 0, 0, Convert.ToInt32(Math.Round(temp / 2)), Convert.ToInt32(Math.Round(temp / 2)), BorderType.Constant); } else { temp = img2.Size.Width - img1.Size.Width; CvInvoke.CopyMakeBorder(img1, img1, 0, 0, Convert.ToInt32(Math.Round(temp / 2)), Convert.ToInt32(Math.Round(temp / 2)), BorderType.Constant); } } Image <Bgr, Byte> first_image = img1.ToImage <Bgr, byte>(); Image <Bgr, Byte> second_image = img2.ToImage <Bgr, byte>(); var new_width = Math.Abs(max_loc.X - Convert.ToInt32(Math.Round(img1.Size.Width * (.10)))) + img1.Size.Width + 1; var new_hight = Math.Abs(max_loc.Y - Convert.ToInt32(Math.Round(img1.Size.Height * (.10)))) + img1.Size.Height + img2.Size.Height - Convert.ToInt32(Math.Round(img2.Size.Height * overlap)); Image <Bgr, Byte> imageResult = new Image <Bgr, byte>(new_width, new_hight); var y_val = max_loc.Y - Convert.ToInt32(Math.Round(img1.Size.Height * (.10))); var x_val = max_loc.X - Convert.ToInt32(Math.Round(img1.Size.Width * (.10))); /* ******************************************************* */ /* First Image: */ /* ******************************************************* */ /* img1 doesnt start at (0,0) */ if (y_val < 0) { for (int x = 0; x < img1.Size.Width; x++) { for (int y = 0; y < img1.Size.Height; y++) { imageResult[y + Math.Abs(y_val), x] = first_image[y, x]; } } } /* img1 starts at (0,0) */ else { for (int x = 0; x < img1.Size.Width; x++) { for (int y = 0; y < img1.Size.Height; y++) { imageResult[y, x] = first_image[y, x]; } } } // CvInvoke.NamedWindow("stitched", NamedWindowType.Normal); // CvInvoke.Imshow("stitched", imageResult); // CvInvoke.WaitKey(); // CvInvoke.DestroyAllWindows(); /* ******************************************************* */ /* Second Image: */ /* ******************************************************* */ /* img2 starts above img1 */ if (y_val < 0) { for (int x = 0; x < img2.Size.Width; x++) { for (int y = 0; y < img2.Size.Height; y++) { //imageResult[y, x+Math.Abs(x_val)+ Convert.ToInt32(Math.Round(img1.Size.Width * (1 - overlap)))] = second_image[y, x]; imageResult[y + y_val + Convert.ToInt32(Math.Round(img1.Size.Height * (1 - overlap))), x] = second_image[y, x]; } } } /* img2 starts at below img1 */ else { for (int x = 0; x < img2.Size.Width; x++) { for (int y = 0; y < img2.Size.Height; y++) { //imageResult[y+Math.Abs(y_val), x+Math.Abs(x_val)+ Convert.ToInt32(Math.Round(img1.Size.Width * (1 - overlap)))] = second_image[y, x]; imageResult[y + y_val + Convert.ToInt32(Math.Round(img1.Size.Height * (1 - overlap))), x] = second_image[y, x]; } } } return(imageResult.Mat); }
private static void CreateTheModel(string trainingSetPath, Project project) { var trainingKey = ConfigurationManager.AppSettings["CustomVision_TrainingKey"]; var trainingApi = new TrainingApi() { ApiKey = trainingKey }; var trainingModel = new List <Model>(); bool performImageAugmentation = true; try { performImageAugmentation = Convert.ToBoolean(ConfigurationManager.AppSettings["PerformImageAugmentation"]); } catch { } int widthHeight = 299; try { widthHeight = Convert.ToInt32(ConfigurationManager.AppSettings["WidthHeight"]); } catch { } int padImages = 10; try { padImages = Convert.ToInt32(ConfigurationManager.AppSettings["PadImages"]); } catch { } var trainingSet = Directory.GetDirectories(trainingSetPath); foreach (var subdirectory in trainingSet) { var dir = new DirectoryInfo(subdirectory); var name = dir.Name; Console.WriteLine($"\tAdding Tag - {name}"); var tag = trainingApi.CreateTag(project.Id, name); var images = Directory.GetFiles($"{subdirectory}").Select(f => { trainingModel.Add(new Model() { Label = name, Path = f }); return(new MemoryStream(File.ReadAllBytes(f))); }).ToList(); foreach (var image in images) { try { Console.WriteLine($"\tUploading image with tag: {tag.Name}"); if (performImageAugmentation) { // flip from RGB to BGR System.Drawing.Bitmap img = new System.Drawing.Bitmap(image); Image <Bgr, byte> ogImg = new Image <Bgr, byte>(img); // perform Intensity Image Equalization Image <Ycc, byte> ycrcb = ogImg.Convert <Ycc, byte>(); ycrcb._EqualizeHist(); ogImg = ycrcb.Convert <Bgr, byte>(); //replace original image with equalized image int top = 0; int bottom = 0; int left = 0; int right = 0; if (img.Width != img.Height) { // we need to pad our image if the width and height aren't set already in a previous smart crop step if (img.Width < img.Height) { int dif = img.Height - img.Width; left = dif / 2; right = dif - left; } if (img.Height < img.Width) { int dif = img.Width - img.Height; top = dif / 2; bottom = dif - top; } } if (padImages > 0) { top += padImages; bottom += padImages; left += padImages; right += padImages; } if ((top > 0) || (bottom > 0) || (left > 0) || (right > 0)) { Image <Bgr, byte> padImg = new Image <Bgr, byte>(img.Width + left + right, img.Height + top + bottom); CvInvoke.CopyMakeBorder(ogImg, padImg, top, bottom, left, right, Emgu.CV.CvEnum.BorderType.Constant, new MCvScalar(255, 255, 255)); // pad the image with a white background ogImg = padImg; } if (ogImg.Width != widthHeight) { // resize the padded image ogImg = ogImg.Resize(widthHeight, widthHeight, Emgu.CV.CvEnum.Inter.Linear); } trainStream(ogImg.ToBitmap(), trainingApi, project.Id, tag.Id.ToString()); for (var i = 0; i < 3; i++) { if ((new Random().Next(1, 11)) <= 5) { // 50% of the time flip the image horizontally before rotation ogImg = ogImg.Flip(Emgu.CV.CvEnum.FlipType.Horizontal); } trainStream(ogImg.Rotate(new Random().Next(-45, 45), new Bgr(255, 255, 255)).ToBitmap(), trainingApi, project.Id, tag.Id.ToString()); // rotate with a white background } } else { trainingApi.CreateImagesFromData(project.Id, image, new List <string>() { tag.Id.ToString() }); } } catch (Exception e) { //kill exception and carry on Console.WriteLine(e); } } } try { using (TextWriter writer = new StreamWriter($"{trainingSetPath}\\trainingModel.csv")) { var csv = new CsvWriter(writer); csv.WriteRecords(trainingModel); } } catch (Exception e) { Console.WriteLine(e); } }
private static void TestingTheModel(string testingSetPath, Project project) { bool performImageAugmentation = true; try { performImageAugmentation = Convert.ToBoolean(ConfigurationManager.AppSettings["PerformImageAugmentation"]); } catch { } int widthHeight = 299; try { widthHeight = Convert.ToInt32(ConfigurationManager.AppSettings["WidthHeight"]); } catch { } int padImages = 10; try { padImages = Convert.ToInt32(ConfigurationManager.AppSettings["PadImages"]); } catch { } var predictionKey = ConfigurationManager.AppSettings["CustomVision_PredictionKey"]; var predictionEndpoint = new PredictionEndpoint() { ApiKey = predictionKey }; var testModel = new List <Model>(); var testSet = Directory.GetDirectories(testingSetPath); var predictionResult = new Dictionary <string, int>(); var labels = new List <string>(); foreach (var subdirectory in testSet) { var images = Directory.GetFiles($"{subdirectory}").Select(f => { testModel.Add(new Model() { Label = subdirectory, Path = f }); return(new MemoryStream(File.ReadAllBytes(f))); }).ToList(); foreach (var testImage in images) { try { var dir = new DirectoryInfo(subdirectory); var label = dir.Name; labels.Add(label); Console.WriteLine($"\tActual tag: {label}"); Microsoft.Cognitive.CustomVision.Prediction.Models.ImagePredictionResultModel result; if (performImageAugmentation) { // flip from RGB to BGR System.Drawing.Bitmap img = new System.Drawing.Bitmap(testImage); Image <Bgr, byte> ogImg = new Image <Bgr, byte>(img); // perform Intensity Image Equalization Image <Ycc, byte> ycrcb = ogImg.Convert <Ycc, byte>(); ycrcb._EqualizeHist(); ogImg = ycrcb.Convert <Bgr, byte>(); //replace original image with equalized image int top = 0; int bottom = 0; int left = 0; int right = 0; if (img.Width != img.Height) { // we need to pad our image if the width and height aren't set already in a previous smart crop step if (img.Width < img.Height) { int dif = img.Height - img.Width; left = dif / 2; right = dif - left; } if (img.Height < img.Width) { int dif = img.Width - img.Height; top = dif / 2; bottom = dif - top; } } if (padImages > 0) { top += padImages; bottom += padImages; left += padImages; right += padImages; } if ((top > 0) || (bottom > 0) || (left > 0) || (right > 0)) { Image <Bgr, byte> padImg = new Image <Bgr, byte>(img.Width + left + right, img.Height + top + bottom); CvInvoke.CopyMakeBorder(ogImg, padImg, top, bottom, left, right, Emgu.CV.CvEnum.BorderType.Constant, new MCvScalar(255, 255, 255)); // pad the image with a white background ogImg = padImg; } if (ogImg.Width != widthHeight) { // resize the padded image ogImg = ogImg.Resize(widthHeight, widthHeight, Emgu.CV.CvEnum.Inter.Linear); } MemoryStream augImageStream = new MemoryStream(); ogImg.ToBitmap().Save(augImageStream, System.Drawing.Imaging.ImageFormat.Jpeg); augImageStream.Seek(0, SeekOrigin.Begin); result = predictionEndpoint.PredictImage(project.Id, augImageStream); } else { result = predictionEndpoint.PredictImage(project.Id, testImage); } var predictedClass = result.Predictions[0].Tag; var predictedProb = result.Predictions[0].Probability; var key = $"{label}|{predictedClass}"; if (!predictionResult.ContainsKey(key)) { predictionResult.Add(key, 0); } predictionResult[key] = predictionResult[key] + 1; // Loop over each prediction and write out the results foreach (var c in result.Predictions) { Console.WriteLine($"\t{c.Tag}: {c.Probability:P1}"); } } catch (Exception e) { //kill exception and carry on Console.WriteLine(e); } } } string testModelPath = $"{testingSetPath}\\testModel.csv"; try { // delete the previous confusion matrix if it exists System.IO.File.Delete(testModelPath); } catch { } try { using (TextWriter writer = new StreamWriter(testModelPath)) { var csv = new CsvWriter(writer); csv.WriteRecords(testModel); } } catch (Exception e) { Console.WriteLine(e); } var array2D = GenerateConfusionMatrix(labels, predictionResult); //pretty print PrettyPrint(array2D); ExporttoCSV(testingSetPath, array2D); }
/// <summary> /// /// </summary> /// <param name="inputRawImage">4Kx8Kx8bit input RAW double light image</param> /// <param name="cancelToken"></param> /// <param name="orientationThresholdInAdu"></param> /// <param name="sectionWidthInPixel"></param> /// <param name="lowerSpatialLimitInPixel"></param> /// <param name="upperSpatialLimitInPixel"></param> /// <returns>WaferOrientation.NormalWaferOrientation OR WaferOrientation.RotatedWafer</returns> public WaferOrientation?Run(byte[] inputRawImage, CancellationToken cancelToken, double orientationThresholdInAdu, int sectionWidthInPixel, int lowerSpatialLimitInPixel, int upperSpatialLimitInPixel) { WaferOrientation?orienatation = null; Monitor.Enter(_detectorLock); try { Stopwatch swatch = new Stopwatch(); swatch.Start(); // Variable initlization (and check): --------------------------------------------------------------------------------------------- if (!IsInitialized) { return(null); } _logger?.Info("WaferOrientationDetection started."); if (cancelToken.IsCancellationRequested) { _logger?.Info("Runing of WaferOrientationDetector was stopped by cancellationtoken."); } // frequency calculation: SectionWidthInPixel = sectionWidthInPixel; LowerFreqLimitIn1PerN = (LinescanEndInPixel - LinescanStartInPixel) / upperSpatialLimitInPixel; UpperFreqLimitIn1PerN = (LinescanEndInPixel - LinescanStartInPixel) / lowerSpatialLimitInPixel; bool splitResult = SplitDoubleLightImage(inputRawImage); if (!splitResult || _inputSectionVertical == null || _inputSectionHorizontal == null) { return(null); } // Prepare the input images: ------------------------------------------------------------------------------------------------------ _inputSectionVertical._Mul(1000); _inputSectionHorizontal._Mul(1000); double gaussianSigmaSize = 1.0; // MAGIC kernel size !!!! int gaussianKernelSize = (int)Math.Round(5 * gaussianSigmaSize, MidpointRounding.AwayFromZero); CvInvoke.GaussianBlur(_inputSectionVertical, _blurredInputSectionVertical, new Size(gaussianKernelSize, gaussianKernelSize), gaussianSigmaSize, gaussianSigmaSize, BorderType.Reflect); CvInvoke.GaussianBlur(_inputSectionHorizontal, _blurredInputSectionHorizontal, new Size(gaussianKernelSize, gaussianKernelSize), gaussianSigmaSize, gaussianSigmaSize, BorderType.Reflect); //create a single linescan from the image sections CvInvoke.Reduce(_blurredInputSectionVertical, _lineScanVertical, ReduceDimension.SingleCol, ReduceType.ReduceAvg); CvInvoke.Reduce(_blurredInputSectionHorizontal, _lineScanHorizontal, ReduceDimension.SingleCol, ReduceType.ReduceAvg); LogElapsedTime(swatch, "WaferOrientationDetector - linescan creation"); if (cancelToken.IsCancellationRequested) { _logger?.Info("Runing of WaferOrientationDetector was stopped by cancellationtoken."); return(null); } //create data-structures for dft ---------------------------------------------------------------------------------------------------- _dftInVertical = new Matrix <float>(_matLineScanVertical.Rows, _matLineScanVertical.Cols, 2); _dftInHorizontal = new Matrix <float>(_matLineScanHorizontal.Rows, _matLineScanHorizontal.Cols, 2); CvInvoke.CopyMakeBorder(_lineScanVertical.Mat, _matLineScanVertical, 0, _matLineScanVertical.Height - _lineScanVertical.Height, 0, _matLineScanVertical.Width - _lineScanVertical.Width, BorderType.Constant); CvInvoke.CopyMakeBorder(_lineScanHorizontal.Mat, _matLineScanHorizontal, 0, _matLineScanHorizontal.Height - _lineScanHorizontal.Height, 0, _matLineScanHorizontal.Width - _lineScanHorizontal.Width, BorderType.Constant); using (VectorOfMat mvVertical = new VectorOfMat(_matLineScanVertical.Mat, _dftInBlankVertical.Mat)) { CvInvoke.Merge(mvVertical, _dftInVertical); } using (VectorOfMat mvHorizontal = new VectorOfMat(_matLineScanHorizontal.Mat, _dftInBlankHorizontal.Mat)) { CvInvoke.Merge(mvHorizontal, _dftInHorizontal); } // perform dft: CvInvoke.Dft(_dftInVertical, _dftOutVertical, DxtType.Forward, 0); CvInvoke.Dft(_dftInHorizontal, _dftOutHorizontal, DxtType.Forward, 0); LogElapsedTime(swatch, "WaferOrientationDetector - dft peformed"); if (cancelToken.IsCancellationRequested) { _logger?.Info("Runing of WaferOrientationDetector was stopped by cancellationtoken."); return(null); } //get data from dft-result: --------------------------------------------------------------------------------------------------------- using (VectorOfMat vm = new VectorOfMat()) { vm.Push(_outRealVertical.Mat); vm.Push(_outImagVertical.Mat); CvInvoke.Split(_dftOutVertical, vm); } using (VectorOfMat vm2 = new VectorOfMat()) { vm2.Push(_outRealHorizontal.Mat); vm2.Push(_outImagHorizontal.Mat); CvInvoke.Split(_dftOutHorizontal, vm2); } // create power spectrum: ------------------------------------------------------------------------------------------------------------ CvInvoke.Pow(_outRealVertical, 2, _outRealVerticalPow); CvInvoke.Pow(_outImagVertical, 2, _outImagVerticalPow); CvInvoke.Pow(_outRealHorizontal, 2, _outRealHorizontalPow); CvInvoke.Pow(_outImagHorizontal, 2, _outImagHorizontalPow); _powerSpectrumVertical = _outRealVerticalPow + _outImagVerticalPow; _powerSpectrumHorizontal = _outRealHorizontalPow + _outImagHorizontalPow; CvInvoke.Sqrt(_powerSpectrumVertical, _powerSpectrumVertical); CvInvoke.Sqrt(_powerSpectrumHorizontal, _powerSpectrumHorizontal); MCvScalar meanPowerVertical = CvInvoke.Mean(_powerSpectrumVertical, _freqRangeMask); MCvScalar meanPowerHorizontal = CvInvoke.Mean(_powerSpectrumHorizontal, _freqRangeMask); // decision point: -------------------------------------------------------------------------------------------------------------------- orienatation = (meanPowerVertical.V0 / meanPowerHorizontal.V0) > orientationThresholdInAdu ? WaferOrientation.NormalWaferOrientation : WaferOrientation.RotatedWafer; LogElapsedTime(swatch, "WaferOrientationDetector calculation finished"); //Console.WriteLine(swatch.ElapsedMilliseconds); //using (StreamWriter sw = new StreamWriter("DFT_results.csv", true)) //{ // sw.WriteLine(meanPowerVertical.V0 + "," + meanPowerHorizontal.V0); //} } catch (Exception ex) { _logger?.Error($"Exception during WaferOrientation detection: {ex}"); } finally { Monitor.Exit(_detectorLock); } return(orienatation); }
//Followed this source code mostly //https://github.com/opencv-java/fourier-transform/blob/master/src/it/polito/teaching/cv/FourierController.java public static void DFTForward(DFTArgs args) { var imgSrc = CvInvoke.Imread(args.src, ImreadModes.Grayscale | ImreadModes.AnyDepth); //get optimal dimensions (power of 2 i think..) int xdftsz = CvInvoke.GetOptimalDFTSize(imgSrc.Rows); int ydftsz = CvInvoke.GetOptimalDFTSize(imgSrc.Cols); //pad input image to optimal dimensions CvInvoke.CopyMakeBorder(imgSrc, imgSrc, 0, xdftsz - imgSrc.Rows, 0, ydftsz - imgSrc.Cols, BorderType.Constant, new MCvScalar(0) ); imgSrc.PrintInfo("1"); //use 32F format for calcs imgSrc.ConvertTo(imgSrc, DepthType.Cv32F); imgSrc.PrintInfo("2"); //create a 2 channel mat using the input as the fist channel var planes = new VectorOfMat(); planes.Push(imgSrc); planes.Push(new Mat(imgSrc.Size, DepthType.Cv32F, 1)); Mat complex = new Mat(); CvInvoke.Merge(planes, complex); complex.PrintInfo("3"); //do the fourrier transform CvInvoke.Dft(complex, complex, DxtType.Forward, 0); complex.PrintInfo("4"); //split channels into real / imaginary var compos = new VectorOfMat(2); CvInvoke.Split(complex, compos); //convert real / imaginary to magnitude / phase - which is easier to deal with when looking for artifacts Mat mag = new Mat(); Mat phs = new Mat(); CvInvoke.CartToPolar(compos[0], compos[1], mag, phs); mag.PrintInfo("5m"); //convert to log scale since magnitude tends to have a huge range Helpers.AddS(mag, 1.0, mag); CvInvoke.Log(mag, mag); mag.PrintInfo("6m"); phs.PrintInfo("6p"); //regular DFT puts low frequencies in the corners - this flips them to the center RearrangeQuadrants(mag); RearrangeQuadrants(phs); double magMax, magMin; CvInvoke.MinMaxIdx(mag, out magMin, out magMax, null, null); //Console.WriteLine("-mi "+magMin+","+magMax+"]"); double phsMax, phsMin; CvInvoke.MinMaxIdx(phs, out phsMin, out phsMax, null, null); //convert to a 'normal' format and scale the data Mat magOut = new Mat(); Mat phsOut = new Mat(); CvInvoke.Normalize(mag, magOut, 0, 65535, NormType.MinMax, DepthType.Cv16U); CvInvoke.Normalize(phs, phsOut, 0, 65535, NormType.MinMax, DepthType.Cv16U); string name = Path.GetFileNameWithoutExtension(args.dst); magOut.PrintInfo("7m"); phsOut.PrintInfo("7p"); Console.WriteLine("-mi " + magMin + " -mx " + magMax + " -pi " + phsMin + " -px " + phsMax); magOut.Save(name + "-mag.png"); phsOut.Save(name + "-phs.png"); }
private Tensor <float> GetInputs(string imageFilePath, Mat rgbMat, out int new_h, out int new_w) { // Read image Mat imageMat = CvInvoke.Imread(imageFilePath, ImreadModes.Color); Mat imgDst = new Mat(); //扩充图像的边界(上下左右各50个像素点),并填充固定像素值(白色) CvInvoke.CopyMakeBorder(imageMat, imgDst, 50, 50, 50, 50, BorderType.Isolated, new MCvScalar(255, 255, 255)); //三通道bgr图转换到灰度图 CvInvoke.CvtColor(imgDst, rgbMat, ColorConversion.Bgr2Rgb); imageMat.Dispose(); imgDst.Dispose(); //取图片最小的边做为短边 int short_size = rgbMat.Height < rgbMat.Width ? rgbMat.Height : rgbMat.Width; ////测试发现如果短边小于96,会造成识别不出来或者识别不完全,要保证短边不小于96 ///(因为上面对图片做了4边各加50像素的预处理,结果肯定大于96了,这里不再是必需的) //if (short_size < 96) //{ // short_size = 96; //} //else //{ //确保短边大于图片最小的边 short_size += 32; //短边 取整为32的倍数 short_size = 32 * (short_size / 32); //} double scale_h = 0, tar_w = 0, scale_w = 0, tar_h = 0; if (rgbMat.Height < rgbMat.Width) { scale_h = short_size * 1.0 / rgbMat.Height; tar_w = rgbMat.Width * scale_h * 1.0; tar_w = tar_w - tar_w % 32; tar_w = Math.Max(32, tar_w); scale_w = tar_w / rgbMat.Width; } else { scale_w = short_size * 1.0 / rgbMat.Width; tar_h = rgbMat.Height * scale_w * 1.0; tar_h = tar_h - tar_h % 32; tar_h = Math.Max(32, tar_h); scale_h = tar_h / rgbMat.Height; } new_h = (int)(scale_h * rgbMat.Height); new_w = (int)(scale_w * rgbMat.Width); Mat imgResized = new Mat(new_h, new_w, DepthType.Cv32F, 3); CvInvoke.Resize(rgbMat, imgResized, new Size(new_w, new_h)); return(GetTensorInputFromImg(imgResized)); }
public static Bitmap ExtractDocumentFromBitmap(Bitmap bitmap) { if (bitmap.Width > bitmap.Height) { bitmap.RotateFlip(RotateFlipType.Rotate90FlipNone); } using (var image = new Image <Bgr, byte>(bitmap)) using (var imageGray = image.Convert <Gray, byte>()) using (var filteredImage = new Image <Bgr, byte>(bitmap)) using (var cannyEdges = new UMat()) using (var contours = new VectorOfVectorOfPoint()) { CvInvoke.BilateralFilter(imageGray, filteredImage, 9, 75, 75); CvInvoke.AdaptiveThreshold(filteredImage, filteredImage, 255, AdaptiveThresholdType.GaussianC, ThresholdType.Binary, 115, 4); CvInvoke.MedianBlur(filteredImage, filteredImage, 11); CvInvoke.CopyMakeBorder(filteredImage, filteredImage, 5, 5, 5, 5, BorderType.Constant, new MCvScalar(0, 0, 0)); CvInvoke.Canny(filteredImage, cannyEdges, 200, 250); CvInvoke.FindContours(cannyEdges, contours, null, RetrType.List, ChainApproxMethod.ChainApproxSimple); var cannyEdgesHeight = cannyEdges.Bitmap.Height; var cannyEdgesWidth = cannyEdges.Bitmap.Width; var areaContour = (cannyEdgesHeight - 10) * (cannyEdgesWidth - 10); var areaCount = areaContour * 0.5; double areaContour2; var sourcePointsVector = new VectorOfPoint(); for (int i = 0; i < contours.Size; i++) { using (var cont = contours[i]) { CvInvoke.ApproxPolyDP(cont, cont, CvInvoke.ArcLength(cont, true) * 0.05, true); if (cont.Size == 4 && CvInvoke.IsContourConvex(cont) && areaCount < CvInvoke.ContourArea(cont) && CvInvoke.ContourArea(cont) < areaContour) { sourcePointsVector = cont; areaContour2 = CvInvoke.ContourArea(cont); sortVector(sourcePointsVector); break; } } } var sortedVector = sortVector(sourcePointsVector); var vectorWithOffset = addOffsetToVector(sourcePointsVector, -5); var euclideanHeight = new int[] { getEuclideanDistance(vectorWithOffset[0], vectorWithOffset[1]), getEuclideanDistance(vectorWithOffset[2], vectorWithOffset[3]) }.Max(); var euclideanWidth = new int[] { getEuclideanDistance(vectorWithOffset[0], vectorWithOffset[2]), getEuclideanDistance(vectorWithOffset[1], vectorWithOffset[3]) }.Max(); VectorOfPoint targetPoints = new VectorOfPoint(new Point[] { new Point(0, 0), new Point(0, euclideanWidth), new Point(euclideanHeight, euclideanWidth), new Point(euclideanHeight, 0) }.ToArray()); var source = sortVector(vectorWithOffset).ToArray().Select(x => new PointF(x.X, x.Y)).ToArray(); var target = sortVector(targetPoints).ToArray().Select(x => new PointF(x.X, x.Y)).ToArray(); var tran = CvInvoke.GetPerspectiveTransform(source, target); CvInvoke.WarpPerspective(image, image, tran, new Size(euclideanHeight, euclideanWidth)); return(image.ToBitmap((int)standardDocumentWidth * 4, (int)standardDocumentHeight * 4)); } }
private bool DFT_Remove_Texture_Filter(Mat inputImg, ref Mat resultImg, int ksize, int crossBias) { if (ksize % 2 == 0) { return(false); } if (crossBias > inputImg.Width || crossBias > inputImg.Height) { return(false); } int m = CvInvoke.GetOptimalDFTSize(inputImg.Rows); int n = CvInvoke.GetOptimalDFTSize(inputImg.Cols); Mat padded = new Mat(); CvInvoke.CopyMakeBorder(inputImg, padded, 0, m - inputImg.Rows, 0, n - inputImg.Cols, BorderType.Constant); padded.ConvertTo(padded, DepthType.Cv32F); Mat zeroMat = Mat.Zeros(padded.Rows, padded.Cols, DepthType.Cv32F, 1); VectorOfMat matVector = new VectorOfMat(); matVector.Push(padded); matVector.Push(zeroMat); // make a complex mat Mat complexI = new Mat(padded.Size, DepthType.Cv32F, 2); CvInvoke.Merge(matVector, complexI); Mat fourier = new Mat(complexI.Size, DepthType.Cv32F, 2); // do dft CvInvoke.Dft(complexI, fourier, DxtType.Forward, complexI.Rows); /* temp is to show result of dft * * DEBUG ONLY * * Mat temp = Magnitude(fourier); * temp = new Mat(temp, new Rectangle(0, 0, temp.Cols & -2, temp.Rows & -2)); * SwitchQuadrants(ref temp); * CvInvoke.Normalize(temp, temp, 1.0, 0.0, NormType.MinMax, DepthType.Cv32F); * CvInvoke.Imshow("Fourier Transform", temp); * * */ Mat Real = new Mat(fourier.Size, DepthType.Cv32F, 1); Mat Imaginary = new Mat(fourier.Size, DepthType.Cv32F, 1); VectorOfMat channels = new VectorOfMat(); CvInvoke.Split(fourier, channels); Real = channels.GetOutputArray().GetMat(0); Imaginary = channels.GetOutputArray().GetMat(1); SwitchQuadrants(ref Real); //CvInvoke.Normalize(Real, Real, 1.0, 0.0, NormType.MinMax, DepthType.Cv32F); SwitchQuadrants(ref Imaginary); //CvInvoke.Normalize(Imaginary, Imaginary, 1.0, 0.0, NormType.MinMax, DepthType.Cv32F); // Array data // convert to image instead of using Mat's data pointer Image <Gray, float> img_R = Real.ToImage <Gray, float>(); Image <Gray, float> img_I = Real.ToImage <Gray, float>(); Array tmpR = Real.GetData(); // make a Real Image copy Array realCopy = Real.GetData(); Image <Gray, float> copy_R = Real.ToImage <Gray, float>(); for (int i = 0; i < img_R.Width; i++) { for (int j = 0; j < img_R.Height; j++) { copy_R.Data[j, i, 0] = (float)realCopy.GetValue(j, i); } } try { CvInvoke.MedianBlur(copy_R, copy_R, ksize); } catch { return(false); } Array tmpI = Imaginary.GetData(); int Center_w = img_R.Width / 2; int Center_h = img_I.Height / 2; for (int i = 0; i < img_R.Width; i++) { for (int j = 0; j < img_R.Height; j++) { if ((i >= Center_w - crossBias && i <= Center_w + crossBias)) { img_R.Data[j, i, 0] = (float)tmpR.GetValue(j, i); img_I.Data[j, i, 0] = (float)tmpI.GetValue(j, i); } else if ((j >= Center_h - crossBias && j <= Center_h + crossBias)) { img_R.Data[j, i, 0] = (float)tmpR.GetValue(j, i); img_I.Data[j, i, 0] = (float)tmpI.GetValue(j, i); } else { img_R.Data[j, i, 0] = copy_R.Data[j, i, 0]; img_I.Data[j, i, 0] = 0; } } } // Image back to Mat // make some merge Mat temp1 = new Mat(img_R.Size, DepthType.Cv32F, 1); Mat temp2 = new Mat(img_R.Size, DepthType.Cv32F, 1); temp1 = img_R.Mat; temp2 = img_I.Mat; VectorOfMat matVectorTemp = new VectorOfMat(); matVectorTemp.Push(temp1); matVectorTemp.Push(temp2); Mat merge_mat = new Mat(img_R.Size, DepthType.Cv32F, 2); CvInvoke.Merge(matVectorTemp, merge_mat); // DFT inverse CvInvoke.Dft(merge_mat, merge_mat, DxtType.Inverse, merge_mat.Rows); Mat magnitudeImage = MagnitudeInverse(merge_mat); CvInvoke.Normalize(magnitudeImage, magnitudeImage, 1.0, 0.0, NormType.MinMax, DepthType.Cv32F); //CvInvoke.Imshow("Fourier Transform Inverse", magnitudeImage); CvInvoke.Normalize(magnitudeImage, magnitudeImage, 0, 255, NormType.MinMax, DepthType.Cv8U); //CvInvoke.Imshow("Fourier Normalize", magnitudeImage); resultImg = magnitudeImage; return(true); }
/// <summary> /// return image bitmap and W-H (mm) of Metal stencil /// </summary> /// <param name="pathGerberFile"></param> /// <param name="dpi"></param> /// <param name="Foreground"></param> /// <param name="Background"></param> /// <returns></returns> public static GerberRenderResult Render(string pathGerberFile, float dpi, Color Foreground, Color Background) { GerberRenderResult result = new GerberRenderResult(); var log = new StandardConsoleLog(); GerberLibrary.Gerber.SaveIntermediateImages = false; GerberLibrary.Gerber.ShowProgress = false; GerberLibrary.Gerber.ExtremelyVerbose = false; GerberLibrary.Gerber.WaitForKey = false; GerberImageCreator.AA = false; if (GerberLibrary.Gerber.ThrowExceptions) { var task = Task.Run(() => GerberLibrary.Gerber.GetBitmapFromGerberFile(log, pathGerberFile, dpi, Foreground, Background)); if (task.Wait(TimeSpan.FromSeconds(10))) { ValueTuple <Bitmap, double, double> tempVal = task.Result; Image <Gray, byte> imgGerber = new Image <Gray, byte>(tempVal.Item1); // add border int max = Math.Max(imgGerber.Width, imgGerber.Height) + 4; int addx = (max - imgGerber.Width) / 2; int addy = (max - imgGerber.Height) / 2; Image <Gray, byte> imgGerberAdd = new Image <Gray, byte>(new System.Drawing.Size(imgGerber.Width + 2 * addx, imgGerber.Height + 2 * addy)); CvInvoke.CopyMakeBorder(imgGerber, imgGerberAdd, addy, addy, addx, addx, Emgu.CV.CvEnum.BorderType.Constant, new MCvScalar(0)); result.GerberImage = imgGerberAdd; imgGerber.Dispose(); imgGerber = null; result.Width = tempVal.Item2; result.Height = tempVal.Item3; result.Status = ActionStatus.Successfully; } else { result.Status = ActionStatus.Fail; } } else { var task = Task.Run(() => GerberLibrary.Gerber.GetBitmapFromGerberFile(log, pathGerberFile, dpi, Foreground, Background)); if (task.Wait(TimeSpan.FromSeconds(10))) { ValueTuple <Bitmap, double, double> tempVal = task.Result; Image <Gray, byte> imgGerber = new Image <Gray, byte>(tempVal.Item1); // add border int max = Math.Max(imgGerber.Width, imgGerber.Height) + 4; int addx = (max - imgGerber.Width) / 2; int addy = (max - imgGerber.Height) / 2; Image <Gray, byte> imgGerberAdd = new Image <Gray, byte>(new System.Drawing.Size(imgGerber.Width + 2 * addx, imgGerber.Height + 2 * addy)); CvInvoke.CopyMakeBorder(imgGerber, imgGerberAdd, addy, addy, addx, addx, Emgu.CV.CvEnum.BorderType.Constant, new MCvScalar(0)); result.GerberImage = imgGerberAdd; imgGerber.Dispose(); imgGerber = null; result.Width = tempVal.Item2; result.Height = tempVal.Item3; result.Status = ActionStatus.Successfully; } else { result.Status = ActionStatus.Fail; } } return(result); }
void EvaluationOfFilter(Image <Bgr, Byte> imgReference, Image <Bgr, Byte> imgFiltered) { Bgr colorFiltered, colorOriginal; Gray filteredCannyBlue, filteredCannyGreen, filteredCannyRed; Gray originalCannyBlue, originalCannyGreen, originalCannyRed; double temp = 0; double blueSum = 0; double greenSum = 0; double redSum = 0; //Measurement of Error { for (int i = 0; i < imgReference.Height; i++) { for (int j = 0; j < imgReference.Width; j++) { colorFiltered = imgFiltered[i, j];//im większa wartość, tym większy wkłąd kanału w notacji BGR colorOriginal = imgReference[i, j]; temp = Math.Pow(colorOriginal.Blue - colorFiltered.Blue, 2); blueSum += temp; temp = 0; temp = Math.Pow(colorOriginal.Green - colorFiltered.Green, 2); greenSum += temp; temp = 0; temp = Math.Pow(colorOriginal.Red - colorFiltered.Red, 2); redSum += temp; temp = 0; } } double blueMSE = blueSum / (imgFiltered.Width * imgFiltered.Height); double greenMSE = greenSum / (imgFiltered.Width * imgFiltered.Height); double redMSE = redSum / (imgFiltered.Width * imgFiltered.Height); PSNRMSE = 10 * Math.Log10(3 / (blueMSE + greenMSE + redMSE));//im większa liczba tym bardziej podobne } //Structural distortion { int PixelsOver25Height = imgReference.Height % 25; int PixelsOver25Width = imgReference.Width % 25; int AddHeight = 0; int AddWidth = 0; if (PixelsOver25Height != 0) { AddHeight = 25 - PixelsOver25Height; } if (PixelsOver25Width != 0) { AddWidth = 25 - PixelsOver25Width; } Image <Bgr, byte> OriginalResize = new Image <Bgr, byte>(imgReference.Width + AddWidth, imgReference.Height + AddHeight); Image <Bgr, byte> FilteredResize = new Image <Bgr, byte>(imgReference.Width + AddWidth, imgReference.Height + AddHeight); CvInvoke.CopyMakeBorder(imgReference, OriginalResize, 0, AddHeight, 0, AddWidth, 0, new MCvScalar(0)); CvInvoke.CopyMakeBorder(imgFiltered, FilteredResize, 0, AddHeight, 0, AddWidth, 0, new MCvScalar(0)); int LengthOfRegionsEdge = 25; int tempwidth, tempheight; tempwidth = OriginalResize.Width; tempheight = OriginalResize.Height; int NumberOfRegionsHorizontally = tempwidth / LengthOfRegionsEdge; int NumberOfRegionsVertically = tempheight / LengthOfRegionsEdge; double tempFiltered = 0; double tempOriginal = 0; double blueMeanOriginal = 0; double blueMinOriginal = 0; double blueMaxOriginal = 0; double blueTempSumOriginal = 0; double blueMeanFiltered = 0; double blueMinFiltered = 0; double blueMaxFiltered = 0; double blueTempSumFiltered = 0; double blueStruct = 0; double greenMeanOriginal = 0; double greenMinOriginal = 0; double greenMaxOriginal = 0; double greenTempSumOriginal = 0; double greenMeanFiltered = 0; double greenMinFiltered = 0; double greenMaxFiltered = 0; double greenTempSumFiltered = 0; double greenStruct = 0; double redMeanOriginal = 0; double redMinOriginal = 0; double redMaxOriginal = 0; double redTempSumOriginal = 0; double redMeanFiltered = 0; double redMinFiltered = 0; double redMaxFiltered = 0; double redTempSumFiltered = 0; double redStruct = 0; for (int i = 0; i < NumberOfRegionsVertically; i++) { for (int j = 0; j < NumberOfRegionsHorizontally; j++) { for (int x = 0; x < LengthOfRegionsEdge; x++) { for (int y = 0; y < LengthOfRegionsEdge; y++) { colorFiltered = FilteredResize[x + 25 * i, y + 25 * j]; colorOriginal = OriginalResize[x + 25 * i, y + 25 * j]; blueTempSumFiltered += colorFiltered.Blue; blueTempSumOriginal += colorOriginal.Blue; if (blueMinFiltered > colorFiltered.Blue) { blueMinFiltered = colorFiltered.Blue; } else if (blueMinOriginal > colorOriginal.Blue) { blueMinOriginal = colorOriginal.Blue; } if (blueMaxFiltered > colorFiltered.Blue) { blueMaxFiltered = colorFiltered.Blue; } else if (blueMaxOriginal > colorOriginal.Blue) { blueMaxOriginal = colorOriginal.Blue; } greenTempSumFiltered += colorFiltered.Green; greenTempSumOriginal += colorOriginal.Green; if (greenMinFiltered > colorFiltered.Green) { greenMinFiltered = colorFiltered.Green; } else if (greenMinOriginal > colorOriginal.Green) { greenMinOriginal = colorOriginal.Green; } if (greenMaxFiltered > colorFiltered.Green) { greenMaxFiltered = colorFiltered.Green; } else if (greenMaxOriginal > colorOriginal.Green) { greenMaxOriginal = colorOriginal.Green; } redTempSumFiltered += colorFiltered.Red; redTempSumOriginal += colorOriginal.Red; if (redMinFiltered > colorFiltered.Red) { redMinFiltered = colorFiltered.Red; } else if (redMinOriginal > colorOriginal.Red) { redMinOriginal = colorOriginal.Red; } if (redMaxFiltered > colorFiltered.Red) { redMaxFiltered = colorFiltered.Red; } else if (redMaxOriginal > colorOriginal.Red) { redMaxOriginal = colorOriginal.Red; } } } blueMeanFiltered = blueTempSumFiltered / (Math.Pow(LengthOfRegionsEdge, 2)); blueMeanOriginal = blueTempSumOriginal / (Math.Pow(LengthOfRegionsEdge, 2)); blueStruct += 0.5 * Math.Pow(blueMeanOriginal - blueMeanFiltered, 2) + 0.25 * Math.Pow(blueMaxOriginal - blueMaxFiltered, 2) + 0.25 * Math.Pow(blueMinOriginal - blueMinFiltered, 2); greenMeanFiltered = greenTempSumFiltered / (Math.Pow(LengthOfRegionsEdge, 2)); greenMeanOriginal = greenTempSumOriginal / (Math.Pow(LengthOfRegionsEdge, 2)); greenStruct += 0.5 * Math.Pow(greenMeanOriginal - greenMeanFiltered, 2) + 0.25 * Math.Pow(greenMaxOriginal - greenMaxFiltered, 2) + 0.25 * Math.Pow(greenMinOriginal - greenMinFiltered, 2); redMeanFiltered = redTempSumFiltered / (Math.Pow(LengthOfRegionsEdge, 2)); redMeanOriginal = redTempSumOriginal / (Math.Pow(LengthOfRegionsEdge, 2)); redStruct += 0.5 * Math.Pow(redMeanOriginal - redMeanFiltered, 2) + 0.25 * Math.Pow(redMaxOriginal - redMaxFiltered, 2) + 0.25 * Math.Pow(redMinOriginal - redMinFiltered, 2); blueTempSumFiltered = 0; blueTempSumOriginal = 0; blueMinFiltered = 0; blueMinOriginal = 0; blueMaxFiltered = 0; blueMaxOriginal = 0; blueMeanFiltered = 0; blueMeanOriginal = 0; greenTempSumFiltered = 0; greenTempSumOriginal = 0; greenMinFiltered = 0; greenMinOriginal = 0; greenMaxFiltered = 0; greenMaxOriginal = 0; greenMeanFiltered = 0; greenMeanOriginal = 0; redTempSumFiltered = 0; redTempSumOriginal = 0; redMinFiltered = 0; redMinOriginal = 0; redMaxFiltered = 0; redMaxOriginal = 0; redMeanFiltered = 0; redMeanOriginal = 0; } } blueStruct = blueStruct / (NumberOfRegionsHorizontally * NumberOfRegionsVertically); greenStruct = greenStruct / (NumberOfRegionsHorizontally * NumberOfRegionsVertically); redStruct = redStruct / (NumberOfRegionsHorizontally * NumberOfRegionsVertically); PSNRMSD = 10 * Math.Log10(3 / (blueStruct + greenStruct + redStruct)); } //Edge distortion { Image <Gray, byte> imgOriginalCannyBlue = imgReference[0].Canny(120, 50); Image <Gray, byte> imgOriginalCannyGreen = imgReference[1].Canny(120, 50); Image <Gray, byte> imgOriginalCannyRed = imgReference[2].Canny(120, 50); Image <Gray, byte> imgFilteredCannyBlue = imgFiltered[0].Canny(120, 50); Image <Gray, byte> imgFilteredCannyGreen = imgFiltered[1].Canny(120, 50); Image <Gray, byte> imgFilteredCannyRed = imgFiltered[2].Canny(120, 50); for (int i = 0; i < imgReference.Height; i++) { for (int j = 0; j < imgReference.Width; j++) { filteredCannyBlue = imgFilteredCannyBlue[i, j]; filteredCannyGreen = imgFilteredCannyGreen[i, j]; filteredCannyRed = imgFilteredCannyRed[i, j]; originalCannyBlue = imgOriginalCannyBlue[i, j]; originalCannyGreen = imgOriginalCannyGreen[i, j]; originalCannyRed = imgOriginalCannyRed[i, j]; temp = Math.Pow(originalCannyBlue.Intensity - filteredCannyBlue.Intensity, 2); blueSum += temp; temp = 0; temp = Math.Pow(originalCannyGreen.Intensity - filteredCannyGreen.Intensity, 2); greenSum += temp; temp = 0; temp = Math.Pow(originalCannyRed.Intensity - filteredCannyRed.Intensity, 2); redSum += temp; temp = 0; } } double blueMED = blueSum / (imgFiltered.Width * imgFiltered.Height); double greenMED = greenSum / (imgFiltered.Width * imgFiltered.Height); double redMED = redSum / (imgFiltered.Width * imgFiltered.Height); PSNRMED = 10 * Math.Log10(3 / (blueMED + greenMED + redMED));//im większa liczba tym bardziej podobne } //Marziliano metric { Image <Ycc, byte> YccImage = new Image <Ycc, byte>(imgFiltered.Size); CvInvoke.CvtColor(imgFiltered, YccImage, ColorConversion.Bgr2YCrCb); Image <Gray, byte> Luminance = new Image <Gray, byte>(YccImage.Size); Image <Gray, float> Sobelx = new Image <Gray, float>(YccImage.Size); Image <Gray, byte> SobelxToByte = new Image <Gray, byte>(YccImage.Size); Image <Gray, byte> ThreshImage = new Image <Gray, byte>(YccImage.Size); Luminance = YccImage[0]; Sobelx = Luminance.Sobel(1, 0, 3); SobelxToByte = Sobelx.Convert <Gray, byte>(); CvInvoke.AdaptiveThreshold(SobelxToByte, ThreshImage, 255, AdaptiveThresholdType.GaussianC, 0, 3, 3); float sumadlugosci = 0; float liczbakrawedzi = 0; for (int x = 0; x < ThreshImage.Height; x++) { int[] wiersz = new int[ThreshImage.Width]; for (int i = 0; i < ThreshImage.Width; i++) { wiersz[i] = (int)SobelxToByte[x, i].Intensity; } for (int y = 0; y < SobelxToByte.Width; y++) { if (ThreshImage[x, y].Intensity != 255) { int krawedz = y; int rp = 0, r = 0, m2; int lewo = 0, prawo = 0, dlugosc; List <int> ekstrema = new List <int>(); for (int m1 = 0; m1 < wiersz.Length - 1; m1++) { m2 = m1 + 1; r = wiersz[m2] - wiersz[m1]; if (Math.Sign(r) != Math.Sign(rp) && (r != 0 && rp != 0)) { ekstrema.Add(m1); } rp = r; } if (wiersz[0] > wiersz[1] || wiersz[0] < wiersz[1]) { ekstrema.Add(0); } if (wiersz[wiersz.Length - 2] > wiersz[wiersz.Length - 1] || wiersz[wiersz.Length - 2] < wiersz[wiersz.Length - 1]) { ekstrema.Add(wiersz.Length - 1); } ekstrema.Sort(); for (int i = 0; i < ekstrema.Count - 1; i++) { if (krawedz >= ekstrema.ElementAt(i) && krawedz < ekstrema.ElementAt(i + 1)) { lewo = ekstrema.ElementAt(i); prawo = ekstrema.ElementAt(i + 1); break; } } for (int i = lewo; i < krawedz; i++) { if (wiersz[i] == wiersz[i + 1]) { lewo = i + 1; } else { break; } } for (int i = prawo; i > krawedz + 1; i--) { if (wiersz[i] == wiersz[i - 1]) { prawo = i - 1; } else { break; } } dlugosc = prawo - lewo; if (dlugosc <= 0) { dlugosc = 1; } sumadlugosci = sumadlugosci + dlugosc; liczbakrawedzi = liczbakrawedzi + 1; } } } Marziliano = sumadlugosci / liczbakrawedzi; } //JNB /*{ * Image<Ycc, byte> YccImage = new Image<Ycc, byte>(imgReference.Size); * YccImage = imgFiltered.Convert<Ycc, byte>(); * Image<Gray, byte> luminance = new Image<Gray, byte>(imgReference.Size); * Image<Gray, float> sobelx = new Image<Gray, float>(imgReference.Size); * Image<Gray, byte> byteimage = new Image<Gray, byte>(imgReference.Size); * Image<Gray, byte> thresh = new Image<Gray, byte>(imgReference.Size); * luminance = YccImage[0]; * sobelx = luminance.Sobel(1, 0, 3); * byteimage = sobelx.Convert<Gray, byte>(); * //CvInvoke.AdaptiveThreshold(byteimage, thresh, 255, AdaptiveThresholdType.GaussianC, 0, 3, 3); * CvInvoke.Threshold(byteimage, thresh, 100, 255, 0); * float sumadlugosci = 0; * float liczbakrawedzi = 0; * //byteimage[wiersz, kolumna]; * for (int x = 0; x < thresh.Height; x++) * { * int[] wiersz = new int[thresh.Width]; * for (int i = 0; i < thresh.Width; i++) * { * wiersz[i] = (int)luminance[x, i].Intensity; * } * * for (int y = 0; y < byteimage.Width; y++) * { * if (thresh[x, y].Intensity != 255) * { * int krawedz = y; * int rp = 0, r = 0, m2; * int lewo = 0, prawo = 0, dlugosc; * List<int> ekstrema = new List<int>(); * for (int m1 = 0; m1 < wiersz.Length - 1; m1++) * { * m2 = m1 + 1; * r = wiersz[m2] - wiersz[m1]; * if (Math.Sign(r) != Math.Sign(rp) && (r != 0 && rp != 0)) * { * ekstrema.Add(m1); * } * rp = r; * } * if (wiersz[0] > wiersz[1] || wiersz[0] < wiersz[1]) * { * ekstrema.Add(0); * } * if (wiersz[wiersz.Length - 2] > wiersz[wiersz.Length - 1] || wiersz[wiersz.Length - 2] < wiersz[wiersz.Length - 1]) * { * ekstrema.Add(wiersz.Length - 1); * } * ekstrema.Sort(); * for (int i = 0; i < ekstrema.Count - 1; i++) * { * if (krawedz >= ekstrema.ElementAt(i) && krawedz < ekstrema.ElementAt(i + 1)) * { * lewo = ekstrema.ElementAt(i); * prawo = ekstrema.ElementAt(i + 1); * break; * } * } * for (int i = lewo; i < krawedz; i++) * { * if (wiersz[i] == wiersz[i + 1]) * { * lewo = i + 1; * } * else * { * break; * } * } * for (int i = prawo; i > krawedz + 1; i--) * { * if (wiersz[i] == wiersz[i - 1]) * { * prawo = i - 1; * } * else * { * break; * } * } * dlugosc = prawo - lewo; * if (dlugosc <= 0) * { * dlugosc = 1; * } * sumadlugosci = sumadlugosci + dlugosc; * liczbakrawedzi = liczbakrawedzi + 1; * } * } * } * JNB = sumadlugosci / liczbakrawedzi; * }*/ }