private void edgesToolStripMenuItem_Click(object sender, EventArgs e) { CannyEdgeDetector gb = new CannyEdgeDetector(); newImg = gb.Apply(newImg); pictureBox4.Image = newImg; }
private void CannyEdgeDetectionToolStripMenuItem1_Click(object sender, EventArgs e) { CannyEdgeDetector ed = new CannyEdgeDetector(); Bitmap s1 = ed.Apply(grayImage); pictureBox3.Image = s1; }
private void edgeToolStripMenuItem_Click(object sender, EventArgs e) { CannyEdgeDetector edgeObj = new CannyEdgeDetector(); edgeImage = new Bitmap(edgeObj.Apply(grayImage)); pictureBox3.Image = edgeImage; }
private void cannyEdgeToolStripMenuItem_Click(object sender, EventArgs e) { Bitmap grayImage = Grayscale.CommonAlgorithms.BT709.Apply(fd); CannyEdgeDetector canny = new CannyEdgeDetector(0, 0, 1.4); Bitmap edgeImage = canny.Apply(grayImage); pictureBox6.Image = edgeImage; // Console.WriteLine(edgeImage.Width); int i, j, x, y, u, v; double con1 = 0; Color blackC = Color.FromArgb(Color.Black.ToArgb()); u = edgeImage.Width / 6; v = edgeImage.Height / 6; //Console.WriteLine(u); //Console.WriteLine(v); for (i = 0; i < 6; i++) { for (j = 0; j < 6; j++) { int count = 0; for (x = i * u; x < ((i * u) + u); x++) { for (y = j * v; y < ((j * v) + v); y++) { Color c = (edgeImage.GetPixel(x, y)); //Console.WriteLine(c); if (c != blackC) { count++; con1++; } } } featureVector.Add(count); //Console.WriteLine(count); } } //Calculate Normalization and add the value to the featureNormVector List <double> featureNormVector = new List <double>(); //Total Pixel Count //Console.WriteLine(con1); int z; //Normalization for (z = 0; z < featureVector.Count; z++) { double normalizedValue = featureVector[z] / con1; Console.WriteLine(normalizedValue); featureNormVector.Add(normalizedValue); } }
public override Bitmap ApplyFilter(Bitmap img) { CannyEdgeDetector ced = new CannyEdgeDetector(255, 0); this.imgEdgeDetected = ced.Apply(img); return(this.imgEdgeDetected); }
private void reapplyToolStripMenuItem6_Click(object sender, EventArgs e) { //GrayscaleBT709 grayObject = new GrayscaleBT709(); //pictureBox2.Image = grayObject.Apply((Bitmap)pictureBox2.Image); CannyEdgeDetector filter = new CannyEdgeDetector(); pictureBox2.Image = filter.Apply((Bitmap)pictureBox2.Image); }
//metoda zrwacająca wynik filtru Cannyego public static Bitmap Canny(Bitmap bitmap) { Bitmap temporary = ImageSupporter.ColorToGrayscale(bitmap);; CannyEdgeDetector cannyEdgeDetector = new CannyEdgeDetector(); temporary = cannyEdgeDetector.Apply((Bitmap)temporary.Clone()); return(ImageSupporter.GrayScaleToColor(temporary)); }
public void ApplyCannyFilter(byte low, byte high, double sigma) { var f = new SobelEdgeDetector(); var filter = new CannyEdgeDetector(low, high, sigma); _bitmap = filter.Apply(_bitmap); OnPropertyChanged("ShowBitmap"); }
Bitmap CannyEdge(Bitmap bmp) { Bitmap tmp = AForge.Imaging.Image.Clone(bmp, bmp.PixelFormat); //higherThreshold default = 100 lower default = 20 CannyEdgeDetector obj_canny = new CannyEdgeDetector(150, 10); Bitmap cannyEdges = obj_canny.Apply(tmp.PixelFormat != PixelFormat.Format8bppIndexed ? Grayscale.CommonAlgorithms.BT709.Apply(tmp) : tmp); return(cannyEdges); }
public void cannyImageConvert() { //Apply canny edge detection on workingImage Grayscale grayscaleFilter = new Grayscale(.21, .07, .72); CannyEdgeDetector cannyFilter = new CannyEdgeDetector(); workingImage = grayscaleFilter.Apply(workingImage); workingImage = cannyFilter.Apply(workingImage); cannyImagePanel.Image = workingImage; }
public static Bitmap AccordCanny(Bitmap src) { Bitmap ret = (Bitmap)Convert(src); //Bitmap ret2 = (Bitmap)ret.Clone(); byte avg = GetAVG(ret); //ret.UnlockBits(null); CannyEdgeDetector Canny = new CannyEdgeDetector((byte)(avg / 3 - 20), (byte)(2 * (avg / 3) + 20), 1); return(Canny.Apply(ret)); }
/// <summary> /// Show edges on image /// </summary> /// <param name="image"></param> /// <returns></returns> public static Image CannyEdges(this Bitmap image) { if (image.PixelFormat != PixelFormat.Format8bppIndexed) { throw new NotSupportedException("Blob extractor can be applied to binary 8bpp images only"); } CannyEdgeDetector cannyEdge = new CannyEdgeDetector(); return(cannyEdge.Apply(image)); }
protected override IImage HandleCore(IImage src) { Bitmap bmpSrc = src.ToBitmap(); if (bmpSrc.PixelFormat != System.Drawing.Imaging.PixelFormat.Format8bppIndexed) { bmpSrc = Grayscale.CommonAlgorithms.BT709.Apply(bmpSrc); } Bitmap dst = m_filter.Apply(bmpSrc); return(new BitmapWrapper(dst)); }
/// <summary> /// Отобразить контуры для входного изображения /// </summary> /// <param name="filename">Входной файл</param> /// <param name="resultFilename">Результирующий файл, содержащий контуры</param> public static void DrawEdges(string filename, string resultFilename) { var canny = new CannyEdgeDetector((byte)80, (byte)180); var inputBmp = new Bitmap(filename); Bitmap grayScaled = ToGrayscale(inputBmp); using (Bitmap edgedImage = canny.Apply(grayScaled)) { edgedImage.Save(resultFilename); } }
private void edgeDetect() { Bitmap news = new Bitmap(imageGot); GrayscaleBT709 gs = new GrayscaleBT709(); imageGot = gs.Apply(imageGot); CannyEdgeDetector cn = new CannyEdgeDetector(); cn.LowThreshold = 0; cn.HighThreshold = 0; cn.GaussianSigma = 1.4; imageGot = cn.Apply(imageGot); pictureBox1.Image = imageGot; }
private void button4_Click(object sender, EventArgs e) { Grayscale grayfilter = new Grayscale(0.2125, 0.7154, 0.0721); Bitmap edgeImg = grayfilter.Apply(img1); OtsuThreshold filter = new OtsuThreshold(); filter.ApplyInPlace(edgeImg); CannyEdgeDetector filter2 = new CannyEdgeDetector(); img2 = filter2.Apply(edgeImg); pictureBox2.Image = img2; }
public void ApplyEdge(object sender, DoWorkEventArgs e) { Bitmap raw_image = null; if (edgeInputRB.Checked) { raw_image = Accord.Imaging.Filters.Grayscale.CommonAlgorithms.BT709.Apply((Bitmap)input_PB.Image.Clone()); } else if (edgeOutputRb.Checked) { raw_image = (Bitmap)outputImageBox.Image.Clone(); } if (sobelRb.Checked) { var sobel = new SobelEdgeDetector(); Bitmap raw_img = raw_image; UnmanagedImage res = sobel.Apply(UnmanagedImage.FromManagedImage(raw_img)); outputImageBox.Image.Dispose(); outputImageBox.Image = res.ToManagedImage(); } else if (prewittRb.Checked) { var prewitt = new DifferenceEdgeDetector(); Bitmap raw_img = raw_image; UnmanagedImage res = prewitt.Apply(UnmanagedImage.FromManagedImage(raw_img)); outputImageBox.Image.Dispose(); outputImageBox.Image = res.ToManagedImage(); } else if (CannyRb.Checked) { var canny = new CannyEdgeDetector(); Bitmap raw_img = raw_image; byte High = byte.Parse(textBox3.Text); byte Low = byte.Parse(textBox2.Text); double GaussSigma = double.Parse(textBox1.Text); int GaussSize = int.Parse(textBox4.Text); canny.GaussianSize = GaussSize; canny.HighThreshold = High; canny.LowThreshold = Low; canny.GaussianSigma = GaussSigma; UnmanagedImage res = canny.Apply(UnmanagedImage.FromManagedImage(raw_img)); outputImageBox.Image.Dispose(); outputImageBox.Image = res.ToManagedImage(); } }
public void doCanny(int low, int high, int sigma) { Bitmap imx = new Bitmap(path); imx = Grayscale.CommonAlgorithms.Y.Apply(imx); CannyEdgeDetector gb = new CannyEdgeDetector(((byte)low), ((byte)high), (sigma / 10)); imx = gb.Apply(imx); if (mov != null) { this.WorkItem.Workspaces[WorkspaceNames.TabWorkspace].Close(mov); } mov = this.WorkItem.SmartParts.AddNew <ImageAView>(); mov.panAndZoomPictureBox1.Image = imx; SmartPartInfo spi = new SmartPartInfo("Canny", "MyOwnDescription"); this.WorkItem.Workspaces[WorkspaceNames.TabWorkspace].Show(mov, spi); }
private void AcceptButton_Click(object sender, RoutedEventArgs e) { if (_gm.SourceBitmap == null) { MessageBox.Show("NO SourcePicture"); return; } var low = LowThresholdTextBox.Text.Trim().ToByte(); var high = HighThresholdTextBox.Text.Trim().ToByte(); CannyEdgeDetector filter = new CannyEdgeDetector(low, high); _gm.CannyBitmap = filter.Apply(_gm.SourceBitmap); PictureBox.Width = _gm.CannyBitmap.Width; PictureBox.Height = _gm.CannyBitmap.Height; PictureBox.Image = _gm.CannyBitmap; }
private void skinColorToolStripMenuItem_Click(object sender, EventArgs e) { //Extracting RGBs Bitmap hand = new Bitmap(pictureBox1.Image, newSize); Bitmap skinDetect = new Bitmap(hand.Width, hand.Height); //Bitmap blackWhite = new Bitmap(hand.Width, hand.Height); Color black = Color.Black; //Color white = Color.White; int i, j; for (i = 0; i < hand.Width; i++) { for (j = 0; j < hand.Height; j++) { Color pixel = hand.GetPixel(i, j); int red = pixel.R; int green = pixel.G; int blue = pixel.B; /* (R, G, B) is classified as skin if: * R > 95 and G > 40 and B > 20 and * max {R, G, B} – min{R, G, B} > 15 and |R – G| > 15 and R > G and R > B */ if ((red > 95 && green > 40 && blue > 20) && (max(red, green, blue) - min(red, green, blue) > 15) && Math.Abs(red - green) > 15 && red > green && red > blue) { //Console.WriteLine("Success"); skinDetect.SetPixel(i, j, pixel); } } } pictureBox2.Image = new Bitmap(skinDetect); pictureBox2.SizeMode = PictureBoxSizeMode.StretchImage; Grayscale filter = new Grayscale(0.2125, 0.71254, 0.0721); Bitmap grayImage = filter.Apply(skinDetect); Threshold filter2 = new Threshold(100); Bitmap filteredImage = filter2.Apply(grayImage); Closing close = new Closing(); close.ApplyInPlace(filteredImage); Opening open = new Opening(); open.ApplyInPlace(filteredImage); // create filter for the filtered image ExtractBiggestBlob filter3 = new ExtractBiggestBlob(); // apply the filter Bitmap biggestBlobsImage = filter3.Apply(filteredImage); AForge.IntPoint a = filter3.BlobPosition; Console.WriteLine(a); //Biggest blob for old extracted skin image ExtractBiggestBlob filter4 = new ExtractBiggestBlob(); Bitmap skinBlob = new Bitmap(skinDetect); //apply filter Bitmap biggestSkinBlob = filter4.Apply(skinBlob); //Skin color for largest blob Bitmap one = new Bitmap(biggestSkinBlob); Bitmap two = new Bitmap(biggestBlobsImage); for (i = 0; i < two.Width; i++) { for (j = 0; j < two.Height; j++) { Color pixelOne = one.GetPixel(i, j); Color pixelTwo = two.GetPixel(i, j); int redOne = pixelOne.R; int greenOne = pixelOne.G; int blueOne = pixelOne.B; int redTwo = pixelTwo.R; int greenTwo = pixelTwo.G; int blueTwo = pixelTwo.B; // This mask is logically AND with original image to extract only the palm which is required for feature extraction. two.SetPixel(i, j, Color.FromArgb(redOne & redTwo, greenOne & greenTwo, blueOne & blueTwo)); } } //Getting a grayscae image from the recolored image Bitmap getGrayImage = filter.Apply(two); // create filter CannyEdgeDetector filter1 = new CannyEdgeDetector(); filter1.LowThreshold = 0; filter1.HighThreshold = 0; filter1.GaussianSigma = 1.4; // apply the filter Bitmap cannyEdgeImage = filter1.Apply(getGrayImage); Bitmap resizeImage = new Bitmap(360, 360); using (var graphics = Graphics.FromImage(resizeImage)) graphics.DrawImage(cannyEdgeImage, 0, 0, 360, 360); pictureBox3.Image = new Bitmap(resizeImage); pictureBox3.SizeMode = PictureBoxSizeMode.StretchImage; int x, y; //Image to obtain blocks for Bitmap imageWithBlock = new Bitmap(resizeImage); Console.WriteLine("Width = " + resizeImage.Width + " Height = " + resizeImage.Height); int imageHeightSize = resizeImage.Height / blockSize; int imageWidthSize = resizeImage.Width / blockSize; Console.WriteLine("Width = " + imageWidthSize + " Height = " + imageHeightSize); List <int> featureVector = new List <int>(); double totalPixelCount = 0; for (i = 0; i < blockSize; i++) { for (j = 0; j < blockSize; j++) { int whiteEdgeCount = 0, blackEdgeCount = 0; for (x = i * imageWidthSize; x < (i * imageWidthSize) + imageWidthSize; x++) { for (y = j * imageHeightSize; y < (j * imageHeightSize) + imageHeightSize; y++) { // To count the edges in the range Color singlePixel = imageWithBlock.GetPixel(x, y); int red = singlePixel.R; int green = singlePixel.G; int blue = singlePixel.B; if (singlePixel != Color.FromArgb(Color.Black.ToArgb())) { whiteEdgeCount++; } else { blackEdgeCount++; } } } //Console.WriteLine("White = " + whiteEdgeCount + " Black = " + blackEdgeCount); //Add value to total count totalPixelCount += whiteEdgeCount; // whiteCount = edges in range featureVector.Add(whiteEdgeCount); } } //Calculate Normalization and add the value to the featureNormVector List <double> featureNormVector = new List <double>(); //Total Pixel Count //Console.WriteLine(totalPixelCount); //Normalization for (i = 0; i < featureVector.Count; i++) { double normalizedValue = featureVector[i] / totalPixelCount; Console.WriteLine(normalizedValue); featureNormVector.Add(normalizedValue); } }
public void picshow() { Bitmap temp1; Bitmap temp2; Bitmap temp3; Bitmap temp4; Bitmap temp5; Bitmap temp7; Bitmap temp8; Bitmap temp9; Bitmap sourceImage; //新建轮廓过滤器 CannyEdgeDetector filter = new CannyEdgeDetector(); //生成颜色过滤器 ColorFiltering colorFilter = new ColorFiltering(); //白色 colorFilter.Red = new IntRange(50, 255); colorFilter.Green = new IntRange(50, 255); colorFilter.Blue = new IntRange(50, 255); //从摄像头中截取图像 sourceImage = videoSourcePlayer1.GetCurrentVideoFrame(); //将原图格式化复制 temp1 = AForge.Imaging.Image.Clone(sourceImage, sourceImage.PixelFormat); sourceImage.Dispose(); sourceImage = temp1; int Height = sourceImage.Size.Height; int Width = sourceImage.Size.Width; //pictureBox1是原图 pictureBox1.Image = temp1; //pictureBox2原图轮廓 temp2 = filter.Apply(sourceImage.PixelFormat != PixelFormat.Format8bppIndexed ? Grayscale.CommonAlgorithms.BT709.Apply(sourceImage) : sourceImage); pictureBox2.Image = temp2; //pictureBox5提取颜色后的图 temp5 = colorFilter.Apply(temp1); pictureBox5.Image = temp5; //pictureBox3灰度转化后的图 temp3 = new Grayscale(0.2125, 0.7154, 0.0721).Apply(temp5); pictureBox3.Image = temp3; //pictureBox4二值化后的图 temp4 = new Threshold(10).Apply(temp3); pictureBox4.Image = temp4; //pictureBox7去噪点后的图 temp7 = new BlobsFiltering(40, 40, temp4.Width, temp4.Height).Apply(temp4); pictureBox7.Image = temp7; Bitmap temp6 = AForge.Imaging.Image.Clone(temp7, temp1.PixelFormat); temp8 = temp6; try { QuadrilateralFinder qf = new QuadrilateralFinder();//获取三角形、四边形角点 List <IntPoint> corners = qf.ProcessImage(temp6); /* * BlobCounter extractor = new BlobCounter(); * extractor.FilterBlobs = true; * extractor.MinWidth = extractor.MinHeight = 150; * extractor.MaxWidth = extractor.MaxHeight = 350; * extractor.ProcessImage(temp6); * * foreach (Blob blob in extractor.GetObjectsInformation()) * { * // 获取边缘点 * List<IntPoint> edgePoints = extractor.GetBlobsEdgePoints(blob); * // 利用边缘点,在原始图像上找到四角 * corners = PointsCloud.FindQuadrilateralCorners(edgePoints); * } */ corners = CornersChange(corners, temp6.Size.Width, temp6.Size.Height); QuadrilateralTransformation filter2 = new QuadrilateralTransformation(corners, 384, 216); temp8 = filter2.Apply(temp1); } catch { } //pictureBox8原图中的投影经过四边形转换后的图 temp9 = AForge.Imaging.Image.Clone(temp8, temp1.PixelFormat); pictureBox8.Image = temp8; //亮黄 ColorFiltering colorFilter2 = new ColorFiltering(); colorFilter2.Red = new IntRange(100, 255); colorFilter2.Green = new IntRange(100, 255); colorFilter2.Blue = new IntRange(0, 90); //提取颜色 temp5 = colorFilter2.Apply(temp9); pictureBox5.Image = temp5; //灰度转化 temp3 = new Grayscale(0.2125, 0.7154, 0.0721).Apply(temp5); pictureBox3.Image = temp3; //二值化 temp4 = new Threshold(10).Apply(temp3); //去噪点 temp7 = new BlobsFiltering(40, 40, temp4.Width, temp4.Height).Apply(temp4); temp6 = AForge.Imaging.Image.Clone(temp7, temp9.PixelFormat); temp9 = temp6; try { QuadrilateralFinder qf = new QuadrilateralFinder();//获取三角形、四边形角点 List <IntPoint> corners = qf.ProcessImage(temp6); corners = CornersChange(corners, temp6.Size.Width, temp6.Size.Height); QuadrilateralTransformation filter2 = new QuadrilateralTransformation(corners, 384, 216); BitmapData data = temp6.LockBits(new Rectangle(0, 0, temp6.Width, temp6.Height), ImageLockMode.ReadWrite, temp6.PixelFormat); Drawing.Polygon(data, corners, Color.Red); for (int i = 0; i < corners.Count; i++) { Drawing.FillRectangle(data, new Rectangle(corners[i].X - 2, corners[i].Y - 2, 10, 10), Color.Red); } float juli = (corners[0].Y + corners[3].Y - corners[1].Y - corners[2].Y) / 2; label1.Text = ((int)((400 - juli) / 7.5)).ToString(); temp6.UnlockBits(data); } catch { } pictureBox9.Image = temp9; }
public Bitmap Apply(Bitmap originalImage) { //reduce image size so that less, bicubic resizes with less breakage ResizeBicubic resizeObject = new ResizeBicubic(200, 200); Bitmap smallOriginalImage = resizeObject.Apply(originalImage); Bitmap copiedImage = (Bitmap)smallOriginalImage.Clone(); // to get the colour of the pixel passed as parameter for (int x = 0; x < smallOriginalImage.Width; x++) { for (int y = 0; y < smallOriginalImage.Height; y++) { if (!isSkin(copiedImage.GetPixel(x, y))) { copiedImage.SetPixel(x, y, Color.Black); } } } copiedImage = Grayscale.CommonAlgorithms.BT709.Apply(copiedImage); Threshold bwObj = new Threshold(50); copiedImage = bwObj.Apply(copiedImage); //applying closing to remove small black spots(closing holes in the image) i.e dilusion followed by erosion AForge.Imaging.Filters.Closing filter = new Closing(); copiedImage = filter.Apply(copiedImage); //pictureBox2.Image = copiedImage; //extracting the biggest blob or a blob to get only the palms, here we get the bounding box //bounding box is the smallest box having the image, hence we see only the palms ExtractBiggestBlob biggestblobObject = new ExtractBiggestBlob(); copiedImage = biggestblobObject.Apply(copiedImage); //we need to get the coordinates of the bounding box IntPoint point = biggestblobObject.BlobPosition; //create a rectangle to pass to the crop class, it takes x,y,height,width Rectangle rect = new Rectangle(point.X, point.Y, copiedImage.Width, copiedImage.Height); Crop cropObject = new Crop(rect); //we pass the original image because that cohtains noise, we remove the background and have only palms Bitmap croppedImage = cropObject.Apply(smallOriginalImage); //we still have a lot of background which need to be removed as the background between the fingers have background //hence we do a logical and between original image and the cropped image with pixels having white pixel //this operation is called as masking for (int x = 0; x < copiedImage.Width; x++) { for (int y = 0; y < copiedImage.Height; y++) { Color c = copiedImage.GetPixel(x, y); if (c.R == 0 && c.G == 0 && c.B == 0) { croppedImage.SetPixel(x, y, Color.Black); } } } //it takes time because each pixel is checked and the image is huge, //so we need to resize, hence we do smallOriginalImage //we need to resize all objects to a standard size croppedImage = resizeObject.Apply(croppedImage); //pictureBox2.Image = croppedImage; croppedImage = Grayscale.CommonAlgorithms.BT709.Apply(croppedImage); CannyEdgeDetector cannyObj = new CannyEdgeDetector(0, 0, 1.4); croppedImage = cannyObj.Apply(croppedImage); Threshold thresObj = new Threshold(20); croppedImage = thresObj.Apply(croppedImage); return(croppedImage); }
private List <double> automateFeatureNormalizationExtraction(Bitmap rawBitmapData) { Bitmap afterSkinOnly = performSkinExtract(rawBitmapData); Grayscale filter = new Grayscale(0.2125, 0.71254, 0.0721); Bitmap grayImage = filter.Apply(afterSkinOnly); Threshold filter2 = new Threshold(100); Bitmap filteredImage = filter2.Apply(grayImage); Closing close = new Closing(); close.ApplyInPlace(filteredImage); Opening open = new Opening(); open.ApplyInPlace(filteredImage); // create filter for the filtered image ExtractBiggestBlob filter3 = new ExtractBiggestBlob(); // apply the filter Bitmap biggestBlobsImage = filter3.Apply(filteredImage); AForge.IntPoint a = filter3.BlobPosition; //Console.WriteLine(a); //Biggest blob for old extracted skin image ExtractBiggestBlob filter4 = new ExtractBiggestBlob(); Bitmap skinBlob = new Bitmap(afterSkinOnly); //apply filter Bitmap biggestSkinBlob = filter4.Apply(skinBlob); //Skin color for largest blob Bitmap one = new Bitmap(biggestSkinBlob); Bitmap two = new Bitmap(biggestBlobsImage); int i, j; for (i = 0; i < two.Width; i++) { for (j = 0; j < two.Height; j++) { Color pixelOne = one.GetPixel(i, j); Color pixelTwo = two.GetPixel(i, j); int redOne = pixelOne.R; int greenOne = pixelOne.G; int blueOne = pixelOne.B; int redTwo = pixelTwo.R; int greenTwo = pixelTwo.G; int blueTwo = pixelTwo.B; // This mask is logically AND with original image to extract only the palm which is required for feature extraction. two.SetPixel(i, j, Color.FromArgb(redOne & redTwo, greenOne & greenTwo, blueOne & blueTwo)); } } //Getting a grayscae image from the recolored image Bitmap getGrayImage = filter.Apply(two); // create filter CannyEdgeDetector filter1 = new CannyEdgeDetector(); filter1.LowThreshold = 0; filter1.HighThreshold = 0; filter1.GaussianSigma = 1.4; // apply the filter Bitmap cannyEdgeImage = filter1.Apply(getGrayImage); Bitmap resizeImage = new Bitmap(360, 360); using (var graphics = Graphics.FromImage(resizeImage)) graphics.DrawImage(cannyEdgeImage, 0, 0, 360, 360); pictureBox3.Image = new Bitmap(resizeImage); pictureBox3.SizeMode = PictureBoxSizeMode.StretchImage; int x, y; //Image to obtain blocks for Bitmap imageWithBlock = new Bitmap(resizeImage); //Console.WriteLine("Width = " + resizeImage.Width + " Height = " + resizeImage.Height); int imageHeightSize = resizeImage.Height / blockSize; int imageWidthSize = resizeImage.Width / blockSize; //Console.WriteLine("Width = " + imageWidthSize + " Height = " + imageHeightSize); List <int> featureVector = new List <int>(); double totalPixelCount = 0; for (i = 0; i < blockSize; i++) { for (j = 0; j < blockSize; j++) { int whiteEdgeCount = 0, blackEdgeCount = 0; for (x = i * imageWidthSize; x < (i * imageWidthSize) + imageWidthSize; x++) { for (y = j * imageHeightSize; y < (j * imageHeightSize) + imageHeightSize; y++) { // To count the edges in the range Color singlePixel = imageWithBlock.GetPixel(x, y); int red = singlePixel.R; int green = singlePixel.G; int blue = singlePixel.B; if (singlePixel != Color.FromArgb(Color.Black.ToArgb())) { whiteEdgeCount++; } else { blackEdgeCount++; } } } //Console.WriteLine("White = " + whiteEdgeCount + " Black = " + blackEdgeCount); //Add value to total count totalPixelCount += whiteEdgeCount; // whiteCount = edges in range featureVector.Add(whiteEdgeCount); } } //Calculate Normalization and add the value to the featureNormVector List <double> featureNormVector = new List <double>(); //Total Pixel Count //Console.WriteLine(totalPixelCount); //Normalization for (i = 0; i < featureVector.Count; i++) { double normalizedValue = featureVector[i] / totalPixelCount; Console.WriteLine(normalizedValue); featureNormVector.Add(normalizedValue); } Console.WriteLine("Total count of norm(individual)=" + i); return(featureNormVector); }
public Bitmap Detect(Bitmap bitmap) { Bitmap grayscaleBitmap = Grayscale.CommonAlgorithms.BT709.Apply(bitmap); IFilter smoothingFilter = null; switch (_smoothMode) { case "None": smoothingFilter = null; break; case "Mean": smoothingFilter = new Mean(); break; case "Median": smoothingFilter = new Median(); break; case "Conservative": smoothingFilter = new ConservativeSmoothing(); break; case "Adaptive": smoothingFilter = new AdaptiveSmoothing(); break; case "Bilateral": smoothingFilter = new BilateralSmoothing(); break; } Bitmap smoothBitmap = smoothingFilter != null?smoothingFilter.Apply(grayscaleBitmap) : grayscaleBitmap; IFilter edgeFilter = null; switch (_edgeMode) { case "Homogenity": edgeFilter = new HomogenityEdgeDetector(); break; case "Difference": edgeFilter = new DifferenceEdgeDetector(); break; case "Sobel": edgeFilter = new SobelEdgeDetector(); break; case "Canny": edgeFilter = new CannyEdgeDetector(); break; } Bitmap edgeBitmap = edgeFilter != null?edgeFilter.Apply(smoothBitmap) : smoothBitmap; IFilter threshholdFilter = new Threshold(_threshold); Bitmap thresholdBitmap = _threshold == 0 ? edgeBitmap : threshholdFilter.Apply(edgeBitmap); BlobCounter blobCounter = new BlobCounter(); blobCounter.FilterBlobs = true; blobCounter.MinHeight = _minHeight; blobCounter.MinWidth = _minWidth; blobCounter.ProcessImage(thresholdBitmap); Blob[] blobs = blobCounter.GetObjectsInformation(); Bitmap outputBitmap = new Bitmap(thresholdBitmap.Width, thresholdBitmap.Height, PixelFormat.Format24bppRgb); Graphics bitmapGraphics = Graphics.FromImage(outputBitmap); Bitmap inputBitmap = null; switch (_drawMode) { case "Original": inputBitmap = bitmap; break; case "Grayscale": inputBitmap = grayscaleBitmap; break; case "Smooth": inputBitmap = smoothBitmap; break; case "Edge": inputBitmap = edgeBitmap; break; case "Threshold": inputBitmap = thresholdBitmap; break; } if (inputBitmap != null) { bitmapGraphics.DrawImage(inputBitmap, 0, 0); } Pen nonConvexPen = new Pen(Color.Red, 2); Pen nonRectPen = new Pen(Color.Orange, 2); Pen cardPen = new Pen(Color.Blue, 2); SimpleShapeChecker shapeChecker = new SimpleShapeChecker(); List <IntPoint> cardPositions = new List <IntPoint>(); for (int i = 0; i < blobs.Length; i++) { List <IntPoint> edgePoints = blobCounter.GetBlobsEdgePoints(blobs[i]); List <IntPoint> corners; if (shapeChecker.IsConvexPolygon(edgePoints, out corners)) { PolygonSubType subType = shapeChecker.CheckPolygonSubType(corners); if ((subType == PolygonSubType.Parallelogram || subType == PolygonSubType.Rectangle) && corners.Count == 4) { // Check if its sideways, if so rearrange the corners so it's vertical. RearrangeCorners(corners); // Prevent detecting the same card twice by comparing distance against other detected cards. bool sameCard = false; foreach (IntPoint point in cardPositions) { if (corners[0].DistanceTo(point) < _minDistance) { sameCard = true; break; } } if (sameCard) { continue; } // Hack to prevent it from detecting smaller sections of the card instead of the whole card. if (GetArea(corners) < _minArea) { continue; } cardPositions.Add(corners[0]); bitmapGraphics.DrawPolygon(cardPen, ToPointsArray(corners)); } else { foreach (IntPoint point in edgePoints.Take(300)) { bitmapGraphics.DrawEllipse(nonRectPen, point.X, point.Y, 1, 1); } } } else { foreach (IntPoint point in edgePoints.Take(300)) { bitmapGraphics.DrawEllipse(nonConvexPen, point.X, point.Y, 1, 1); } } } bitmapGraphics.Dispose(); nonConvexPen.Dispose(); nonRectPen.Dispose(); cardPen.Dispose(); return(outputBitmap); }
public void picback() { Bitmap temp1; Bitmap temp2; Bitmap temp3; Bitmap temp4; Bitmap temp5; Bitmap temp6; Bitmap temp7; Bitmap temp8; Bitmap temp9; Bitmap temp10; Bitmap sourceImage; //新建轮廓过滤器 CannyEdgeDetector filter = new CannyEdgeDetector(); //生成颜色过滤器 ColorFiltering colorFilter = new ColorFiltering(); //将颜色过滤器设置为白色 colorFilter.Red = new IntRange(50, 255); colorFilter.Green = new IntRange(50, 255); colorFilter.Blue = new IntRange(50, 255); //从摄像头中截取图像 sourceImage = videoSourcePlayer1.GetCurrentVideoFrame(); //将原图格式化复制 temp1 = AForge.Imaging.Image.Clone(sourceImage, sourceImage.PixelFormat); //清除sourceImage占用 sourceImage.Dispose(); //sourceImage = temp1; int Height = temp1.Size.Height; int Width = temp1.Size.Width; //pictureBox1是原图 //pictureBox1.Image = temp1; //从temp1提取颜色 temp2 = filter.Apply(temp1.PixelFormat != PixelFormat.Format8bppIndexed ? Grayscale.CommonAlgorithms.BT709.Apply(temp1) : temp1); //pictureBox2原图轮廓 //pictureBox2.Image = temp2; //从temp1进行颜色过滤 temp5 = colorFilter.Apply(temp1); //pictureBox5原图轮廓 //pictureBox5.Image = temp5; //从temp5进行灰度转化 temp3 = new Grayscale(0.2125, 0.7154, 0.0721).Apply(temp5); //pictureBox3灰度转化 //pictureBox3.Image = temp3; //从temp3进行二值化 temp4 = new Threshold(10).Apply(temp3); //pictureBox4是二值化后的图 //pictureBox4.Image = temp4; //temp7去噪点后的图 temp7 = new BlobsFiltering(40, 40, temp4.Width, temp4.Height).Apply(temp4); //pictureBox7.Image = temp7; //temp6先原图格式化复制 temp6 = AForge.Imaging.Image.Clone(temp7, temp1.PixelFormat); temp8 = temp6; try { QuadrilateralFinder qf = new QuadrilateralFinder();//获取三角形、四边形角点 List <IntPoint> corners = qf.ProcessImage(temp6); //进行角点转换 corners = CornersChange(corners, temp6.Size.Width, temp6.Size.Height); //生成四角变换过滤器 QuadrilateralTransformation filter2 = new QuadrilateralTransformation(corners, 1920, 1040); //对原图temp1进行四角型变换 temp8 = filter2.Apply(temp1); } catch { } //temp9为temp8的复制 temp9 = AForge.Imaging.Image.Clone(temp8, temp1.PixelFormat); //pictureBox8.Image = temp8; //生成一个新的过滤器 ColorFiltering colorFilter2 = new ColorFiltering(); colorFilter2.Red = new IntRange(100, 255); colorFilter2.Green = new IntRange(100, 255); colorFilter2.Blue = new IntRange(0, 90); //提取颜色 temp5 = colorFilter2.Apply(temp9); //灰度转化 temp3 = new Grayscale(0.2125, 0.7154, 0.0721).Apply(temp5); //二值化 temp4 = new Threshold(10).Apply(temp3); //去噪点 temp7 = new BlobsFiltering(40, 40, temp4.Width, temp4.Height).Apply(temp4); temp6 = AForge.Imaging.Image.Clone(temp7, temp9.PixelFormat); temp10 = AForge.Imaging.Image.Clone(temp6, temp6.PixelFormat); pictureBox8.Image = temp10; try { QuadrilateralFinder qf = new QuadrilateralFinder();//获取三角形、四边形角点 List <IntPoint> corners = qf.ProcessImage(temp6); corners = CornersChange(corners, temp6.Size.Width, temp6.Size.Height); Rectangle rect = new Rectangle(); rect = Screen.GetWorkingArea(this); string path = OriPath + "\\SourceInputImage.jpg"; Bitmap bt = new Bitmap(path); //初始化一个和屏幕面积一样大小的bitmap且格式和bt一样 DisplayBitmap = new Bitmap(rect.Width, rect.Height, bt.PixelFormat); Graphics g = Graphics.FromImage(DisplayBitmap); g.FillRectangle(Brushes.White, new Rectangle(0, 0, rect.Width, rect.Height));//这句实现填充矩形的功能 AForge.Imaging.Filters.BackwardQuadrilateralTransformation Bfilter = new AForge.Imaging.Filters.BackwardQuadrilateralTransformation(bt, corners); temp10 = Bfilter.Apply(DisplayBitmap); //string testsavepath = OriPath + "\\SourcePic.bmp"; //DisplayBitmap.Save(testsavepath); /* * BitmapData data = temp6.LockBits(new Rectangle(0, 0, temp6.Width, temp6.Height), * ImageLockMode.ReadWrite, temp6.PixelFormat); * Drawing.Polygon(data, corners, Color.Red); * for (int i = 0; i < corners.Count; i++) * { * Drawing.FillRectangle(data, * new Rectangle(corners[i].X - 2, corners[i].Y - 2, 10, 10), * Color.Red); * } * * temp6.UnlockBits(data); */ } catch { } pictureBox9.Image = temp10; }
/// <summary> /// <para>Pulls the image</para> /// <para>Runs the ocr on it</para> /// <para>fills in the blanks</para> /// <para>submits the page</para> /// </summary> /// <param name="challenge"></param> /// <param name="cancellationToken"></param> /// <param name="answer"></param> /// <returns></returns> private bool SolveCaptcha(Uri challenge, CancellationToken cancellationToken, out String answer) { answer = null; var tesseractEngine = this.TesseractEngine; if (null == tesseractEngine) { return(false); } var captchaData = this.PullCaptchaData(challenge); if (captchaData.ImageUri == null) { captchaData.Status = CaptchaStatus.NoImageFoundToBeSolved; this.UpdateCaptchaData(captchaData); return(false); } Console.WriteLine(Resources.Uber_SolveCaptcha_Attempting_OCR_on__0_, captchaData.ImageUri.AbsolutePath); captchaData.Status = CaptchaStatus.SolvingImage; this.UpdateCaptchaData(captchaData); var folder = new Folder(Path.GetTempPath()); Document document; folder.TryGetTempDocument(document: out document, extension: "png"); this.PictureBoxChallenge.Image.Save(document.FullPathWithFileName, ImageFormat.Png); var aforgeImage = AForge.Imaging.Image.FromFile(document.FullPathWithFileName); var smoothing = new ConservativeSmoothing(); var cannyEdgeDetector = new CannyEdgeDetector(); cannyEdgeDetector.Apply(aforgeImage); aforgeImage.Save(document.FullPathWithFileName, ImageFormat.Png); this.PictureBoxChallenge.ImageLocation = document.FullPathWithFileName; this.PictureBoxChallenge.Load(); this.Throttle(Seconds.Ten); using (var img = Pix.LoadFromFile(document.FullPathWithFileName).Deskew()) { using (var page = tesseractEngine.Process(img, PageSegMode.SingleLine)) { answer = page.GetText(); var paragraph = new Paragraph(answer); answer = new Sentence(paragraph.ToStrings(" ")).ToStrings(" "); FluentTimers.Create(Minutes.One, () => document.Delete()).AndStart(); if (!String.IsNullOrWhiteSpace(answer)) { captchaData.Status = CaptchaStatus.SolvedChallenge; this.UpdateCaptchaData(captchaData); return(true); } return(false); } } }
private void button2_Click(object sender, EventArgs e) { pictureBox2.Image = (Bitmap)pictureBox1.Image.Clone(); Bitmap src = new Bitmap(pictureBox2.Image); Bitmap res = new Bitmap(pictureBox2.Image); SaveFileDialog saveDialog = new SaveFileDialog(); src = resize(src, new Size(200, 200)); res = resize(res, new Size(200, 200)); pictureBox2.Image = src; srcImg = src; pictureBox2.Image = res; Bitmap sampleImage = new Bitmap(pictureBox2.Image); var rect = new Rectangle(0, 0, sampleImage.Width, sampleImage.Height); var data = sampleImage.LockBits(rect, ImageLockMode.ReadWrite, sampleImage.PixelFormat); var depth = Bitmap.GetPixelFormatSize(data.PixelFormat) / 8; //bytes per pixel var buffer = new byte[data.Width * data.Height * depth]; //copy pixels to buffer Marshal.Copy(data.Scan0, buffer, 0, buffer.Length); System.Threading.Tasks.Parallel.Invoke( () => { //upper-left Process(buffer, 0, 0, data.Width / 2, data.Height / 2, data.Width, depth); }, () => { //upper-right Process(buffer, data.Width / 2, 0, data.Width, data.Height / 2, data.Width, depth); }, () => { //lower-left Process(buffer, 0, data.Height / 2, data.Width / 2, data.Height, data.Width, depth); }, () => { //lower-right Process(buffer, data.Width / 2, data.Height / 2, data.Width, data.Height, data.Width, depth); } ); //Copy the buffer back to image Marshal.Copy(buffer, 0, data.Scan0, buffer.Length); sampleImage.UnlockBits(data); pictureBox2.Image = sampleImage; dstImg = sampleImage; void Process(byte[] buffer1, int x, int y, int endx, int endy, int width, int depth1) { for (int i = x; i < endx; i++) { for (int j = y; j < endy; j++) { var offset = ((j * width) + i) * depth; var B = buffer[offset + 0]; var G = buffer[offset + 1]; var R = buffer[offset + 2]; var a = Math.Max(R, Math.Max(B, G)); var b = Math.Min(R, Math.Min(B, G)); if (!(((R > 95) && (G > 40) && (B > 20) && ((a - b) > 15) && (Math.Abs(R - G) > 15) && (R > G) && (R > B)) || ((R > 220) && (G > 210) && (B > 170) && ((a - b) > 15) && (Math.Abs(R - G) > 15) && (R > G) && (G > B)))) { buffer[offset + 0] = buffer[offset + 1] = buffer[offset + 2] = 0; } else { buffer[offset + 0] = buffer[offset + 1] = buffer[offset + 2] = 255; } } } } //Graysacle GrayscaleBT709 filter = new GrayscaleBT709(); pictureBox2.Image = filter.Apply((Bitmap)pictureBox2.Image); dstImg = filter.Apply(dstImg); //Dilatation try { Dilatation filter1 = new Dilatation(); pictureBox2.Image = filter1.Apply((Bitmap)pictureBox2.Image); dstImg = filter1.Apply(dstImg); } catch (Exception) { System.Windows.Forms.MessageBox.Show("Apply Grayscale"); } //Biggest Blob Extraction ExtractBiggestBlob filter2 = new ExtractBiggestBlob(); pictureBox2.Image = filter2.Apply((Bitmap)pictureBox2.Image); dstImg = filter2.Apply(dstImg); blob = filter2.BlobPosition; Bitmap newBmp = new Bitmap(dstImg.Width, dstImg.Height, System.Drawing.Imaging.PixelFormat.Format32bppArgb); using (Graphics gfx = Graphics.FromImage(newBmp)) { gfx.DrawImage(dstImg, 0, 0); } //newBmp = dstImg; for (int i = 0; i < dstImg.Width; i++) { for (int j = 0; j < dstImg.Height; j++) { System.Drawing.Color srcColor = srcImg.GetPixel(i + blob.X, j + blob.Y); System.Drawing.Color dstColor = dstImg.GetPixel(i, j); if (!(dstColor.R >= 0 && dstColor.R <= 10 && dstColor.G >= 0 && dstColor.G <= 10 && dstColor.B >= 0 && dstColor.B <= 10)) { newBmp.SetPixel(i, j, srcColor); } } } dstImg = newBmp; pictureBox2.Image = newBmp; List <double> edgeCount = new List <double>(); List <double> ratio = new List <double>(); int pixelCount = 0; Bitmap hoefImage = new Bitmap(pictureBox2.Image); GrayscaleBT709 grayFilter = new GrayscaleBT709(); hoefImage = grayFilter.Apply((Bitmap)pictureBox2.Image); CannyEdgeDetector cannyFilter = new CannyEdgeDetector(0, 0, 1.4); hoefImage = cannyFilter.Apply(hoefImage); pictureBox2.Image = hoefImage; var imgarray = new System.Drawing.Image[36]; for (int i = 0; i < 6; i++) { for (int j = 0; j < 6; j++) { pixelCount++; var index = i * 6 + j; imgarray[index] = new Bitmap(40, 40); var graphics = Graphics.FromImage(imgarray[index]); graphics.DrawImage(hoefImage, new Rectangle(0, 0, 40, 40), new Rectangle(i * 40, j * 40, 40, 40), GraphicsUnit.Pixel); graphics.Dispose(); } } for (int n = 0; n < 36; n++) { int counter = 0; Bitmap bufferImage = new Bitmap(imgarray[n]); for (int i = 0; i < 40; i++) { for (int j = 0; j < 40; j++) { System.Drawing.Color hoefColor = bufferImage.GetPixel(i, j); if (!(hoefColor.R == 0 && hoefColor.G == 0 && hoefColor.B == 0)) { counter++; } } } edgeCount.Add(counter); } double Total = edgeCount.Sum(); foreach (double x in edgeCount) { var a = x / Total; ratio.Add(a); } FileStream fs = new FileStream(@"E:\test.txt", FileMode.Create, FileAccess.Write); StreamWriter sw = new StreamWriter(fs); int no = 0; sw.Write((++no) + " "); for (int i = 0; i < ratio.Count; ++i) { sw.Write(i + ":" + ratio[i].ToString() + " "); } sw.WriteLine(); sw.Close(); fs.Close(); //Support Vector Machine Problem train = Problem.Read(@"E:\AI.txt"); Problem test = Problem.Read(@"E:\test.txt"); Parameter parameters = new Parameter(); double C; double Gamma; parameters.C = 32; parameters.Gamma = 8; Model model = Training.Train(train, parameters); Prediction.Predict(test, @"E:\result.txt", model, false); FileStream fs1 = new FileStream(@"E:\result.txt", FileMode.Open, FileAccess.Read); StreamReader sw1 = new StreamReader(fs1); string w = sw1.ReadLine(); if (w == "1") { MessageBox.Show("A"); } else if (w == "2") { MessageBox.Show("B"); } else if (w == "3") { MessageBox.Show("C"); } else if (w == "4") { MessageBox.Show("D"); } else if (w == "5") { MessageBox.Show("E"); } else if (w == "6") { MessageBox.Show("F"); } else if (w == "7") { MessageBox.Show("G"); } else if (w == "8") { MessageBox.Show("H"); } else if (w == "9") { MessageBox.Show("I"); } else if (w == "10") { MessageBox.Show("J"); } else if (w == "11") { MessageBox.Show("K"); } //else { MessageBox.Show("L"); } }
private void hOGToolStripMenuItem_Click(object sender, EventArgs e) { List <double> edgeCount = new List <double>(); List <double> ratio = new List <double>(); int pixelCount = 0; Bitmap Destimg = new Bitmap(pictureBox2.Image); GrayscaleBT709 go = new GrayscaleBT709(); pictureBox2.Image = go.Apply((Bitmap)pictureBox2.Image); Destimg = go.Apply(Destimg); CannyEdgeDetector filter = new CannyEdgeDetector(0, 0, 1.4); pictureBox2.Image = filter.Apply((Bitmap)pictureBox2.Image); Destimg = filter.Apply(Destimg); var imgarray = new System.Drawing.Image[36]; for (int i = 0; i < 6; i++) { for (int j = 0; j < 6; j++) { pixelCount++; var index = i * 6 + j; imgarray[index] = new Bitmap(40, 40); var graphics = Graphics.FromImage(imgarray[index]); graphics.DrawImage(Destimg, new Rectangle(0, 0, 40, 40), new Rectangle(i * 40, j * 40, 40, 40), GraphicsUnit.Pixel); graphics.Dispose(); } } for (int n = 0; n < 36; n++) { int counter = 0; Bitmap bufferImage = new Bitmap(imgarray[n]); for (int i = 0; i < 40; i++) { for (int j = 0; j < 40; j++) { System.Drawing.Color hoefColor = bufferImage.GetPixel(i, j); //if(hoefColor.R<=255 && hoefColor.R>=230 && hoefColor.G <= 255 && hoefColor.G >= 230 && hoefColor.B <= 255 && hoefColor.B >= 230) if (!(hoefColor.R == 0 && hoefColor.G == 0 && hoefColor.B == 0)) { counter++; } } } edgeCount.Add(counter); //HistogramEqualization /*if (File.Exists(@"D:\AI.txt")) * { * using (StreamWriter ssw = new StreamWriter(@"D:\AI.txt")) * { * ssw.Write(counter); * //tw.WriteLine(Lists.edgeCount); * //tw.Close(); * * * } * }*/ } double total = edgeCount.Sum(); foreach (double x in edgeCount) { var a = (float)x / total; ratio.Add(a); } FileStream fs = new FileStream(@"D:\AI.txt", FileMode.Append, FileAccess.Write); StreamWriter sw = new StreamWriter(fs); for (int i = 0; i < ratio.Count; ++i) { sw.Write(i + ":" + ratio[i].ToString() + " "); } sw.WriteLine(); sw.Close(); fs.Close(); }
private void resultGestureToolStripMenuItem_Click(object sender, EventArgs e) { int dir; int no; List <string> filedir = new List <string>(Directory.GetDirectories(path)); for (dir = 0, no = 0; (dir < filedir.Count && no <= 26); dir++, no++) { string[] filePaths = Directory.GetFiles(filedir[dir].ToString()); List <Bitmap> y = new List <Bitmap>(); foreach (var iI in filePaths) { Bitmap Image = new Bitmap(iI); y.Add(Image); } foreach (Bitmap img in y) { pictureBox1.Image = img; srcImg = img; dstImg = img; Bitmap skin = new Bitmap(pictureBox1.Image); var rect = new Rectangle(0, 0, skin.Width, skin.Height); var data = skin.LockBits(rect, ImageLockMode.ReadWrite, skin.PixelFormat); var depth = Bitmap.GetPixelFormatSize(data.PixelFormat) / 8; //bytes per pixel var buffer = new byte[data.Width * data.Height * depth]; //copy pixels to buffer Marshal.Copy(data.Scan0, buffer, 0, buffer.Length); System.Threading.Tasks.Parallel.Invoke( () => { //upper-left Process(buffer, 0, 0, data.Width / 2, data.Height / 2, data.Width, depth); }, () => { //upper-right Process(buffer, data.Width / 2, 0, data.Width, data.Height / 2, data.Width, depth); }, () => { //lower-left Process(buffer, 0, data.Height / 2, data.Width / 2, data.Height, data.Width, depth); }, () => { //lower-right Process(buffer, data.Width / 2, data.Height / 2, data.Width, data.Height, data.Width, depth); } ); //Copy the buffer back to image Marshal.Copy(buffer, 0, data.Scan0, buffer.Length); skin.UnlockBits(data); pictureBox2.Image = skin; Bitmap src = new Bitmap(pictureBox1.Image); Bitmap res = new Bitmap(pictureBox2.Image); src = resize(src, new Size(200, 200)); res = resize(res, new Size(200, 200)); pictureBox1.Image = src; pictureBox2.Image = res; GrayscaleBT709 grayoject = new GrayscaleBT709(); pictureBox2.Image = grayoject.Apply((Bitmap)pictureBox2.Image); Dilatation filter = new Dilatation(); // apply the filter pictureBox2.Image = filter.Apply((Bitmap)pictureBox2.Image); ExtractBiggestBlob filter1 = new ExtractBiggestBlob(); pictureBox2.Image = filter.Apply((Bitmap)pictureBox2.Image); blob = filter1.BlobPosition; Bitmap src1 = new Bitmap(pictureBox1.Image); Bitmap res1 = new Bitmap(pictureBox2.Image); Bitmap newBmp = new Bitmap(src1.Width, res1.Height, System.Drawing.Imaging.PixelFormat.Format32bppArgb); //Threshold t = new Threshold(); //pictureBox2.Image = t.Apply((Bitmap)pictureBox2.Image); for (int i = 0; i < res1.Width; i++) { for (int j = 0; j < res1.Height; j++) { System.Drawing.Color srcColor = src1.GetPixel(i + blob.X, j + blob.Y); System.Drawing.Color dstColor = res1.GetPixel(i, j); if (!(dstColor.R >= 0 && dstColor.R <= 10 && dstColor.G >= 0 && dstColor.G <= 10 && dstColor.B >= 0 && dstColor.B <= 10)) { newBmp.SetPixel(i, j, srcColor); } else { newBmp.SetPixel(i, j, Color.Black); } } } res1 = newBmp; pictureBox2.Image = newBmp; List <double> edgeCount = new List <double>(); List <double> ratio = new List <double>(); int pixelCount = 0; Bitmap Destimg = new Bitmap(pictureBox2.Image); GrayscaleBT709 go = new GrayscaleBT709(); pictureBox2.Image = go.Apply((Bitmap)pictureBox2.Image); Destimg = go.Apply(Destimg); CannyEdgeDetector filter2 = new CannyEdgeDetector(0, 0, 1.4); pictureBox2.Image = filter2.Apply((Bitmap)pictureBox2.Image); Destimg = filter2.Apply(Destimg); var imgarray = new System.Drawing.Image[36]; for (int i = 0; i < 6; i++) { for (int j = 0; j < 6; j++) { pixelCount++; var index = i * 6 + j; imgarray[index] = new Bitmap(40, 40); var graphics = Graphics.FromImage(imgarray[index]); graphics.DrawImage(Destimg, new Rectangle(0, 0, 40, 40), new Rectangle(i * 40, j * 40, 40, 40), GraphicsUnit.Pixel); graphics.Dispose(); } } for (int n = 0; n < 36; n++) { int counter = 0; Bitmap bufferImage = new Bitmap(imgarray[n]); for (int i = 0; i < 40; i++) { for (int j = 0; j < 40; j++) { System.Drawing.Color hoefColor = bufferImage.GetPixel(i, j); //if(hoefColor.R<=255 && hoefColor.R>=230 && hoefColor.G <= 255 && hoefColor.G >= 230 && hoefColor.B <= 255 && hoefColor.B >= 230) if (!(hoefColor.R == 0 && hoefColor.G == 0 && hoefColor.B == 0)) { counter++; } } } edgeCount.Add(counter); } double total = edgeCount.Sum(); foreach (double x in edgeCount) { var a = (float)x / total; ratio.Add(a); } FileStream fs = new FileStream(@"D:\AI.txt", FileMode.Append, FileAccess.Write); StreamWriter sw = new StreamWriter(fs); sw.Write((no) + " "); for (int i = 0; i < ratio.Count; ++i) { sw.Write(i + ":" + ratio[i].ToString() + " "); } sw.WriteLine(); sw.Close(); fs.Close(); Problem train = Problem.Read(@"D:\AI.txt"); Problem test = Problem.Read(@"D:\test.txt"); Parameter parameters = new Parameter(); double C; double Gamma; parameters.C = 32; parameters.Gamma = 8; Model model = Training.Train(train, parameters); Prediction.Predict(test, @"D:\result.txt", model, false); } } }
private void button2_Click(object sender, EventArgs e) { if (FinalFrame.IsRunning == true) { pictureBox2.Image = (Bitmap)pictureBox1.Image.Clone(); } Bitmap InputImage = (Bitmap)pictureBox2.Image; Rectangle Tile = new Rectangle(0, 0, InputImage.Width, InputImage.Height); BitmapData bitmapdata = InputImage.LockBits(Tile, ImageLockMode.ReadWrite, InputImage.PixelFormat); int formatsize = Bitmap.GetPixelFormatSize(bitmapdata.PixelFormat) / 8; var tempreg = new byte[bitmapdata.Width * bitmapdata.Height * formatsize]; Marshal.Copy(bitmapdata.Scan0, tempreg, 0, tempreg.Length); System.Threading.Tasks.Parallel.Invoke( () => { multithread1(tempreg, 0, 0, bitmapdata.Width / 2, bitmapdata.Height / 2, bitmapdata.Width, formatsize); }, () => { multithread1(tempreg, 0, bitmapdata.Height / 2, bitmapdata.Width / 2, bitmapdata.Height, bitmapdata.Width, formatsize); }, () => { multithread1(tempreg, bitmapdata.Width / 2, 0, bitmapdata.Width, bitmapdata.Height / 2, bitmapdata.Width, formatsize); }, () => { multithread1(tempreg, bitmapdata.Width / 2, bitmapdata.Height / 2, bitmapdata.Width, bitmapdata.Height, bitmapdata.Width, formatsize); } ); Marshal.Copy(tempreg, 0, bitmapdata.Scan0, tempreg.Length); InputImage.UnlockBits(bitmapdata); Grayscale grayfilter = new Grayscale(0.2125, 0.7154, 0.0721);//GrayscaleBT709 grayfilter=new GrayscaleBT709(); Dilatation dilatefilter = new Dilatation(); Erosion erodefilter = new Erosion(); InputImage = grayfilter.Apply((Bitmap)InputImage); InputImage = dilatefilter.Apply((Bitmap)InputImage); InputImage = erodefilter.Apply((Bitmap)InputImage); //Opening openfilter = new Opening(); //InputImage=openfilter.Apply((Bitmap)InputImage); //Closing closefilter = new Closing(); //InputImage=closefilter.Apply((Bitmap)InputImage); ExtractBiggestBlob blob = new ExtractBiggestBlob(); InputImage = blob.Apply(InputImage); int cordx = blob.BlobPosition.X; int cordy = blob.BlobPosition.Y; Bitmap source = new Bitmap(pictureBox1.Image); Bitmap destination = new Bitmap(InputImage); var sourcerectangle = new Rectangle(0, 0, source.Width, source.Height); var destinationrectangle = new Rectangle(0, 0, destination.Width, destination.Height); var sourcedata = source.LockBits(sourcerectangle, ImageLockMode.ReadWrite, source.PixelFormat); var destinationdata = destination.LockBits(destinationrectangle, ImageLockMode.ReadWrite, destination.PixelFormat); var sourcedepth = Bitmap.GetPixelFormatSize(sourcedata.PixelFormat) / 8; var destinationdepth = Bitmap.GetPixelFormatSize(destinationdata.PixelFormat) / 8; var source1 = new byte[sourcedata.Width * sourcedata.Height * sourcedepth]; var destination1 = new byte[destinationdata.Width * destinationdata.Height * destinationdepth]; Marshal.Copy(sourcedata.Scan0, source1, 0, source1.Length); Marshal.Copy(destinationdata.Scan0, destination1, 0, destination1.Length); System.Threading.Tasks.Parallel.Invoke( () => { multithread2(source1, destination1, cordx, 0, cordy, 0, cordx + (destinationdata.Width / 2), destinationdata.Width / 2, cordy + (destinationdata.Height / 2), destinationdata.Height / 2, sourcedata.Width, destinationdata.Width, sourcedepth, destinationdepth); }, () => { multithread2(source1, destination1, cordx + (destinationdata.Width / 2), destinationdata.Width / 2, cordy, 0, cordx + (destinationdata.Width), destinationdata.Width, cordy + (destinationdata.Height / 2), destinationdata.Height / 2, sourcedata.Width, destinationdata.Width, sourcedepth, destinationdepth); }, () => { multithread2(source1, destination1, cordx, 0, cordy + (destinationdata.Height / 2), destinationdata.Height / 2, cordx + (destinationdata.Width / 2), destinationdata.Width / 2, cordy + (destinationdata.Height), destinationdata.Height, sourcedata.Width, destinationdata.Width, sourcedepth, destinationdepth); }, () => { multithread2(source1, destination1, cordx + (destinationdata.Width / 2), destinationdata.Width / 2, cordy + (destinationdata.Height / 2), destinationdata.Height / 2, cordx + (destinationdata.Width), destinationdata.Width, cordy + (destinationdata.Height), destinationdata.Height, sourcedata.Width, destinationdata.Width, sourcedepth, destinationdepth); } ); Marshal.Copy(source1, 0, sourcedata.Scan0, source1.Length); Marshal.Copy(destination1, 0, destinationdata.Scan0, destination1.Length); source.UnlockBits(sourcedata); destination.UnlockBits(destinationdata); InputImage = destination; InputImage = grayfilter.Apply((Bitmap)InputImage); CannyEdgeDetector edgesoutline = new CannyEdgeDetector(); InputImage = edgesoutline.Apply(InputImage); pictureBox2.Image = InputImage; Bitmap blocks = new Bitmap(InputImage); int[] numofedges = new int[100]; double[] normalized = new double[400]; String alphabet = null; int total = 0; int sq = 1; for (int p = 1; p <= 8; p++) { for (int q = 1; q <= 8; q++) { for (int x = (p - 1) * blocks.Width / 8; x < (p * blocks.Width / 8); x++) { for (int y = (q - 1) * blocks.Height / 8; y < (q * blocks.Height / 8); y++) { Color colorPixel = blocks.GetPixel(x, y); int r = colorPixel.R; int g = colorPixel.G; int b = colorPixel.B; if (r != 0 & g != 0 & b != 0) { numofedges[sq]++; } } } sq++; } } for (sq = 1; sq <= 64; sq++) { total = total + numofedges[sq]; } for (sq = 1; sq <= 64; sq++) { normalized[sq] = (double)numofedges[sq] / total; alphabet = alphabet + " " + sq.ToString() + ":" + normalized[sq].ToString(); } File.WriteAllText(@"datasets\testalpha.txt", label.ToString() + alphabet + Environment.NewLine); Problem train = Problem.Read(@"datasets\trainedset.txt"); Problem test = Problem.Read(@"datasets\testalpha.txt"); Parameter parameter = new Parameter(); parameter.C = 32; parameter.Gamma = 8; Model model = Training.Train(train, parameter); Prediction.Predict(test, @"datasets\result.txt", model, false); int value = Convert.ToInt32(File.ReadAllText(@"datasets\result.txt")); String res = null; res = res + (char)(value + 65); label1.Text = res; }