/// <summary> /// Compute the red pixel mask for the given image. /// A red pixel is a pixel where: 20 < hue < 160 AND saturation > 10 /// </summary> /// <param name="image">The color image to find red mask from</param> /// <param name="mask">The red pixel mask</param> private static void GetRedPixelMask(IInputArray image, IInputOutputArray mask) { bool useUMat; using (InputOutputArray ia = mask.GetInputOutputArray()) useUMat = ia.IsUMat; using (IImage hsv = useUMat ? (IImage) new UMat() : (IImage) new Mat()) using (IImage s = useUMat ? (IImage) new UMat() : (IImage) new Mat()) { CvInvoke.CvtColor(image, hsv, ColorConversion.Bgr2Hsv); CvInvoke.ExtractChannel(hsv, mask, 0); CvInvoke.ExtractChannel(hsv, s, 1); //the mask for hue less than 20 or larger than 160 using (ScalarArray lower = new ScalarArray(20)) using (ScalarArray upper = new ScalarArray(160)) CvInvoke.InRange(mask, lower, upper, mask); CvInvoke.BitwiseNot(mask, mask); //s is the mask for saturation of at least 10, this is mainly used to filter out white pixels CvInvoke.Threshold(s, s, 10, 255, ThresholdType.Binary); CvInvoke.BitwiseAnd(mask, s, mask, null); } }
private void BubbleDetectBtn_Click(object sender, EventArgs e) { //Applying Operations on transformed Image transformedImage = transformedImage.Resize(400, 400, Emgu.CV.CvEnum.Inter.Linear); Image <Bgr, byte> transCopy = transformedImage.Copy(); Emgu.CV.Util.VectorOfVectorOfPoint qtnVect = new Emgu.CV.Util.VectorOfVectorOfPoint(); Image <Gray, byte> qtnGray = transCopy.Convert <Gray, byte>(); Image <Gray, byte> copyG = qtnGray.Copy(); CvInvoke.GaussianBlur(qtnGray, qtnGray, new Size(5, 5), 0); CvInvoke.AdaptiveThreshold(qtnGray, qtnGray, 255, Emgu.CV.CvEnum.AdaptiveThresholdType.GaussianC, Emgu.CV.CvEnum.ThresholdType.Binary, 55, 9); CvInvoke.BitwiseNot(qtnGray, qtnGray); CvInvoke.FindContours(qtnGray, qtnVect, null, Emgu.CV.CvEnum.RetrType.External, Emgu.CV.CvEnum.ChainApproxMethod.ChainApproxSimple, default); //CIRCLE METHOD List <CircleF> circList = new List <CircleF>(); Emgu.CV.Util.VectorOfVectorOfPoint test = new Emgu.CV.Util.VectorOfVectorOfPoint(); Emgu.CV.Util.VectorOfPoint qtnApprox = new Emgu.CV.Util.VectorOfPoint(); Dictionary <int, double> qtnDict = new Dictionary <int, double>(); if (qtnVect.Size > 0) { for (int i = 0; i < qtnVect.Size; i++) { double area = CvInvoke.ContourArea(qtnVect[i]); if (area > 70) { qtnDict.Add(i, area); } } var item = qtnDict.OrderByDescending(v => v.Value); //.Take(1); Emgu.CV.Util.VectorOfPoint approxList = new Emgu.CV.Util.VectorOfPoint(); foreach (var it in item) { int key = Convert.ToInt32(it.Key.ToString()); double peri = CvInvoke.ArcLength(qtnVect[key], true); CvInvoke.ApproxPolyDP(qtnVect[key], qtnApprox, 0.02 * peri, true); if (qtnApprox.Size == 0) { } else if (qtnApprox.Size > 6) { CircleF circle = CvInvoke.MinEnclosingCircle(qtnVect[key]); Point centre = new Point(); centre.X = (int)circle.Center.X; centre.Y = (int)circle.Center.Y; CvInvoke.Circle(transformedImage, centre, (int)circle.Radius, new MCvScalar(0, 255, 0), 2, Emgu.CV.CvEnum.LineType.Filled, 0); //break; } } MessageBox.Show("Bubbles Detected"); bubbleImage.Image = transformedImage; } }
public static Bitmap BackProject(Bitmap bmp, int[] HueRange, int[] SaturationRange) { Emgu.CV.Image <Bgr, Byte> Mask = new Image <Bgr, Byte>(bmp); //Image Datatype switch Mat Copy = new Mat(); //Result Mat type bool useUMat; //bool for Mat Check using (InputOutputArray ia = Copy.GetInputOutputArray()) //Determine Mask type useUMat = ia.IsUMat; //If Mat, use Mat using (IImage hsv = useUMat ? (IImage) new UMat() : (IImage) new Mat()) //Mat Image Copies (Hue) using (IImage s = useUMat ? (IImage) new UMat() : (IImage) new Mat()) //Mat Image Copies (Saturation) { CvInvoke.CvtColor(Mask, hsv, ColorConversion.Bgr2Hsv); //Convert Image to Hsv CvInvoke.ExtractChannel(hsv, Copy, 0); //Extract Hue channel from Hsv CvInvoke.ExtractChannel(hsv, s, 1); //Extract Saturation channel from Hsv //the mask for hue less than 20 or larger than 160 using (ScalarArray lower = new ScalarArray(HueRange[0])) //hue min using (ScalarArray upper = new ScalarArray(HueRange[1])) //hue max CvInvoke.InRange(Copy, lower, upper, Copy); //Check Ranges CvInvoke.BitwiseNot(Copy, Copy); //If ranges dont line up, fade to black //s is the mask for saturation of at least 10, this is mainly used to filter out white pixels CvInvoke.Threshold(s, s, SaturationRange[0], SaturationRange[1], ThresholdType.Binary); //saturation check CvInvoke.BitwiseAnd(Copy, s, Copy, null); //If saturation and hue match requirements, place in mask } return(Copy.Bitmap); }
public static Image<Gray, byte> getPTXImage(String cFileName) { Image<Bgr, byte> fromImage = new Image<Bgr, byte>(cFileName); Rectangle rect = new Rectangle(); rect.X = StringEx.getInt(Config.GetAppSettings(AppConfig.RECT_LEFT)); rect.Y = StringEx.getInt(Config.GetAppSettings(AppConfig.RECT_TOP)); rect.Width = StringEx.getInt(Config.GetAppSettings(AppConfig.RECT_WIDTH)); rect.Height = StringEx.getInt(Config.GetAppSettings(AppConfig.RECT_HEIGHT)); rect.Width = StringEx.getInt(Config.GetAppSettings(AppConfig.RECT_WIDTH)); rect.Height = StringEx.getInt(Config.GetAppSettings(AppConfig.RECT_HEIGHT)); int iBINARY_MIN = StringEx.getInt(Config.GetAppSettings(AppConfig.BINARY_MIN)); int iBINARY_MAX = StringEx.getInt(Config.GetAppSettings(AppConfig.BINARY_MAX)); //裁剪 Image<Bgr, byte> vRectImage = fromImage.GetSubRect(rect); //转灰度 Image<Gray, byte> GrayImage = vRectImage.Convert<Gray, byte>(); //黑白翻转 Image<Gray, byte> ResultGrayImage = new Image<Gray, byte>(vRectImage.Width, vRectImage.Height); CvInvoke.BitwiseNot(GrayImage, ResultGrayImage); GrayImage = ResultGrayImage; //二值化 ResultGrayImage = new Image<Gray, byte>(vRectImage.Width, vRectImage.Height); CvInvoke.Threshold(GrayImage, ResultGrayImage, iBINARY_MIN, iBINARY_MAX, Emgu.CV.CvEnum.ThresholdType.Binary); return ResultGrayImage; }
/// <summary> /// Return a mask for where skin is detected using thresholds from calibration /// </summary> /// <param name="input">Input image</param> /// <returns></returns> public Mat GetSkinMask(Mat input) { Mat skinMask; skinMask = Mat.Zeros(input.Height, input.Width, DepthType.Cv8U, 3); if (!this.Calibrated) { return(skinMask); } Mat hsvInput = new Mat(); CvInvoke.CvtColor(input, hsvInput, ColorConversion.Rgb2Hsv); CvInvoke.InRange( hsvInput, new ScalarArray(new MCvScalar(hLowThreshold, sLowThreshold, vLowThreshold)), new ScalarArray(new MCvScalar(hHighThreshold, sHighThreshold, vHighThreshold)), skinMask); performOpening(skinMask, ElementShape.Ellipse, new Size(1, 1)); CvInvoke.Dilate(skinMask, skinMask, new Mat(), new Point(0, 0), 6, BorderType.Default, new MCvScalar()); CvInvoke.BitwiseNot(skinMask, skinMask); return(skinMask); }
///'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' public static void preprocess(Mat imgOriginal, ref Mat imgGrayscale, ref Mat imgThresh) { imgGrayscale = extractValue(imgOriginal); //extract value channel only from original image to get imgGrayscale Mat imgMaxContrastGrayscale = imgGrayscale; //Mat imgMaxContrastGrayscale = maximizeContrast(imgGrayscale); //maximize contrast with top hat and black hat Mat imgBlurred = new Mat(); CvInvoke.GaussianBlur(imgMaxContrastGrayscale, imgBlurred, new Size(GAUSSIAN_BLUR_FILTER_SIZE, GAUSSIAN_BLUR_FILTER_SIZE), 0.6); //gaussian blur //adaptive threshold to get imgThresh CvInvoke.AdaptiveThreshold(imgBlurred, imgThresh, 255.0, AdaptiveThresholdType.GaussianC, ThresholdType.BinaryInv, ADAPTIVE_THRESH_BLOCK_SIZE, ADAPTIVE_THRESH_WEIGHT); MCvScalar tempVal = CvInvoke.Mean(imgBlurred); double average = tempVal.V0; CvInvoke.Threshold(imgBlurred, imgThresh, 0, 255.0, Emgu.CV.CvEnum.ThresholdType.Otsu); CvInvoke.Erode(imgThresh, imgThresh, null, Point.Empty, 1, BorderType.Default, new MCvScalar(0)); CvInvoke.Dilate(imgThresh, imgThresh, null, Point.Empty, 1, BorderType.Default, new MCvScalar(0)); CvInvoke.BitwiseNot(imgThresh, imgThresh); }
public static Mat PreprocessImageForTesseract(Mat img) { int scalePercent = 18; int newWidth = (int)img.Width * scalePercent / 100; int newHeight = (int)img.Height * scalePercent / 100; CvInvoke.Resize(img, img, new System.Drawing.Size(newWidth, newHeight), interpolation: Inter.Area); img = ImageProcessor.ApplyBlur(img, 0, 3); Mat output = new Mat(img.Size, DepthType.Cv8U, 3); CvInvoke.CvtColor(img, img, ColorConversion.Bgr2Gray); //CvInvoke.EqualizeHist(img, img); CvInvoke.BitwiseNot(img, img); //img = ImageProcessor.CannyEdgeDetection(img, 20, 20); //img = ImageProcessor.ApplyErosion(img, 3); //CvInvoke.GaussianBlur(img, img, new System.Drawing.Size(3, 3), 0); CvInvoke.AdaptiveThreshold(img, img, 255, AdaptiveThresholdType.GaussianC, ThresholdType.Binary, 11, 2); CvInvoke.Threshold(img, output, 0, 255, ThresholdType.Otsu);//double ret = //output = ImageProcessor.ApplyErosion(output, 3); //CvInvoke.Threshold(output, output, ret, 255, ThresholdType.Binary); var kernel = CvInvoke.GetStructuringElement(Emgu.CV.CvEnum.ElementShape.Rectangle, new System.Drawing.Size(2, 2), new System.Drawing.Point(-1, -1)); CvInvoke.Dilate(output, output, kernel, new System.Drawing.Point(-1, -1), 2, Emgu.CV.CvEnum.BorderType.Constant, default(MCvScalar)); //output = ImageProcessor.ApplyDilation(output, 7); //CvInvoke.Invert() return(output); }
private void Start() { invert = new Mat(); string path = Application.streamingAssetsPath + "/test.jpg"; inputImage = CvInvoke.Imread(path, Emgu.CV.CvEnum.ImreadModes.AnyColor); CvInvoke.BitwiseNot(inputImage, invert); t_img = inputImage.ToImage <Bgr, byte>(); data = t_img.Data; tex = new Texture2D(invert.Width, invert.Height); img.rectTransform.sizeDelta = new Vector2(invert.Width, invert.Height); int l1 = data.GetLength(0); int l2 = data.GetLength(1); for (int y = l1 - 1; y >= 0; y--) { for (int x = 0; x < l2; x++) { float r = (byte)data[y, x, 1] / 255.0f; float g = (byte)data[y, x, 2] / 255.0f; float b = (byte)data[y, x, 0] / 255.0f; Color col = new Color(r, g, b); float noiseSample = (Mathf.PerlinNoise(x * noiseGranularity, y * noiseGranularity) * noiseScale); col = col - new Color(noiseSample, noiseSample, noiseSample); col.a = 1.0f; tex.SetPixel(Math.Abs(x - l2), Math.Abs(y - l1), col); } } tex.Apply(); theSprite = Sprite.Create(tex, new Rect(0f, 0f, (float)invert.Width, (float)invert.Height), Vector2.zero); img.sprite = theSprite; }
/// <summary> /// 计算文件名符合特定格式的文件名的图像序列的孔隙率 /// 默认情况下,白色是孔隙相,黑色是固体相,此时不需要对图像进行反相 /// example : /// ComputeParameters.ComputePorosity(@".\result\", "{0:D4}.bmp", 10, 21, ref porosity); /// </summary> /// /// <param name="folder">文件夹名称</param> /// <param name="pattern">文件名的格式</param> /// <param name="startIndex">起始index</param> /// <param name="endIndex">末尾index</param> /// <param name="porosity">孔隙率</param> /// <param name="needReverse">是否需要反相</param> /// <returns></returns> public static bool ComputePorosity(string folder, string pattern, int startIndex, int endIndex, ref double porosity, bool needReverse = false) { bool res = true; string fn = ""; porosity = 0.0; Matrix <double> pMat = new Matrix <double>(endIndex - startIndex + 1, 1); Mat img; for (int i = startIndex; i <= endIndex; i++) { fn = string.Format(folder + pattern, i); img = CvInvoke.Imread(fn, Emgu.CV.CvEnum.LoadImageType.Grayscale); if (img.IsEmpty) { res = false; break; } if (needReverse) { CvInvoke.BitwiseNot(img, img); } pMat[i - startIndex, 0] = CvInvoke.CountNonZero(img) * 1.0 / (img.Cols * img.Rows); porosity += pMat[i - startIndex, 0]; } if (res) { DataReadWriteHelper.RecordInfo("porosity.txt", PackingSystemSetting.ResultDir, pMat, false); } return(res); }
public void GridDetection() { // convert to gray-scaler image Mat image = originalImage.Mat.Clone(); // blur the image CvInvoke.GaussianBlur(image, image, new Size(11, 11), 0); // threshold the image CvInvoke.AdaptiveThreshold(image, image, 255, AdaptiveThresholdType.MeanC, ThresholdType.Binary, 5, 2); CvInvoke.BitwiseNot(image, image); Mat kernel = new Mat(new Size(3, 3), DepthType.Cv8U, 1); Marshal.Copy(new byte[] { 0, 1, 0, 1, 1, 1, 0, 1, 0 }, 0, kernel.DataPointer, 9); CvInvoke.Dilate(image, image, kernel, new Point(-1, -1), 1, BorderType.Default, new MCvScalar(255)); FindOuterGridByFloorFill(image); CvInvoke.Erode(image, image, kernel, new Point(-1, -1), 1, BorderType.Default, new MCvScalar(255)); ImageShowCase.ShowImage(image, "biggest blob"); VectorOfPointF lines = new VectorOfPointF(); CvInvoke.HoughLines(image, lines, 1, Math.PI / 180, 200); // merging lines PointF[] linesArray = lines.ToArray(); //MergeLines(linesArray, image); lines = RemoveUnusedLine(linesArray); Mat harrisResponse = new Mat(image.Size, DepthType.Cv8U, 1); CvInvoke.CornerHarris(image, harrisResponse, 5); DrawLines(lines.ToArray(), image); ImageShowCase.ShowImage(image, "corners"); }
private void ProcessImage(List <string> lines) { Mat imageOriginal = CvInvoke.Imread(ImageRecievedName, LoadImageType.AnyColor); var imageWithHitsBgr = CreateHitImage(imageOriginal.Size, lines); // create mask to have white circles wherever hits exist and to be black on all other parts var mask = new Mat(); CvInvoke.Threshold(imageWithHitsBgr, mask, 1, 255, ThresholdType.Binary); var inverseMask = new Mat(); CvInvoke.BitwiseNot(mask, inverseMask); // mapping level of gray to ColorMap CvInvoke.ApplyColorMap(imageWithHitsBgr, imageWithHitsBgr, ColorMapType.Jet); // from mapped image remove everything except hits var imageWithHitsWithoutBackground = new Mat(); CvInvoke.BitwiseAnd(imageWithHitsBgr, imageWithHitsBgr, imageWithHitsWithoutBackground, mask); // from original image remove only parts where hits happended var imageOriginalWithoutHits = new Mat(); CvInvoke.BitwiseAnd(imageOriginal, imageOriginal, imageOriginalWithoutHits, inverseMask); // result is combination of original image without hits and image with hits mapped to certain ColorMap var result = new Mat(); CvInvoke.Add(imageOriginalWithoutHits, imageWithHitsWithoutBackground, result); result.Save(ImageProcessedName); }
private PointF GetLocation() { int x = CvInvoke.BoundingRectangle(contour).Right - CvInvoke.BoundingRectangle(contour).Width / 2; int y = CvInvoke.BoundingRectangle(contour).Bottom - CvInvoke.BoundingRectangle(contour).Height / 2; PointF p = new PointF(x, y); Mat usefulMat = new Mat(); if (noteType > 2) { CvInvoke.Erode(blobMat, usefulMat, CvInvoke.GetStructuringElement(ElementShape.Cross, new Size(27, 27), new Point(13, 13)), new Point(1, 1), 1, BorderType.Default, new MCvScalar(1)); CvInvoke.Dilate(usefulMat, usefulMat, CvInvoke.GetStructuringElement(ElementShape.Ellipse, new Size(27, 27), new Point(13, 13)), new Point(1, 1), 1, BorderType.Default, new MCvScalar(1)); CvInvoke.BitwiseNot(usefulMat, usefulMat); } else { usefulMat = blobMat; } VectorOfKeyPoint keyPoints = new VectorOfKeyPoint(detector.Detect(usefulMat)); for (int i = 0; i < keyPoints.Size; i++) { if (keyPoints[i].Point.Y - width <p.Y && keyPoints[i].Point.Y + width> p.Y) { p.X = keyPoints[i].Point.X; p.Y = keyPoints[i].Point.Y; break; } } return(p); }
/// <summary> /// Нахадит пиксели с красным цветом /// </summary> /// <param name="image">Изображение для обработки</param> /// <param name="mask">Пиксельная маска</param> private static void GetRedPixelMask(IInputArray image, IInputOutputArray mask) { bool useUMat; using (InputOutputArray ia = mask.GetInputOutputArray()) { useUMat = ia.IsUMat; } using (IImage hsv = useUMat ? (IImage) new UMat() : (IImage) new Mat()) using (IImage s = useUMat ? (IImage) new UMat() : (IImage) new Mat()) { CvInvoke.CvtColor(image, hsv, ColorConversion.Bgr2Hsv); CvInvoke.ExtractChannel(hsv, mask, 0); CvInvoke.ExtractChannel(hsv, s, 1); //По маске от 20 до 160 using (ScalarArray lower = new ScalarArray(20)) using (ScalarArray upper = new ScalarArray(160)) { CvInvoke.InRange(mask, lower, upper, mask); } CvInvoke.BitwiseNot(mask, mask); //маска для насыщения не менее 10 CvInvoke.Threshold(s, s, 15, 255, ThresholdType.Binary); CvInvoke.BitwiseAnd(mask, s, mask, null); } }
private void Recognize_Click(object sender, EventArgs e) { if (ofd.FileName != "") { InImage = new Image <Gray, byte>(ofd.FileName); BitNotImage = ThresImage = OutImage = InImage; OutImage = ThresImage = InImage.ThresholdBinary(new Gray(200), new Gray(255)); CvInvoke.BitwiseNot(ThresImage, BitNotImage); if (checkBox1.Checked) { CvInvoke.MorphologyEx(BitNotImage, OutImage, Emgu.CV.CvEnum.MorphOp.Dilate, CvInvoke.GetStructuringElement(Emgu.CV.CvEnum.ElementShape.Ellipse, new Size(3, 3), new Point(-1, -1)), new Point(-1, -1), 1, Emgu.CV.CvEnum.BorderType.Default, new MCvScalar()); } Tesseract tess = new Tesseract(); tess.Init(@"C:\Emgu\tessdata", "eng", OcrEngineMode.Default); tess.SetVariable("tessedit_char_blacklist", "\\/'`‘"); tess.Recognize(OutImage); captchaBox.Image = OutImage.ToBitmap(); captchaBox.Refresh(); result.Text = "Результат: " + tess.GetText(); } }
/// <summary> /// 计算文件夹中的所有的bmp文件组成的图像序列的孔隙率 /// 默认情况下,白色是孔隙相,黑色是固体相,此时不需要对图像进行反相 /// example : /// ComputeParameters.ComputePorosity(dlg.SelectedPath, ref porosity, true) /// </summary> /// <param name="folder">文件夹名称</param> /// <param name="porosity">孔隙率</param> /// <param name="needReverse">是否需要反相</param> /// <returns></returns> public static bool ComputePorosity(string folder, ref double porosity, bool needReverse = false) { bool res = true; porosity = 0.0; if (Directory.Exists(folder)) { var files = Directory.GetFiles(folder, "*.bmp"); Matrix <double> pMat = new Matrix <double>(files.Count(), 1); Mat img; int index = 0; foreach (var file in files) { img = CvInvoke.Imread(file, Emgu.CV.CvEnum.LoadImageType.Grayscale); if (needReverse) { CvInvoke.BitwiseNot(img, img); } pMat[index++, 0] = CvInvoke.CountNonZero(img) * 1.0 / (img.Cols * img.Rows); porosity += pMat[index - 1, 0]; } porosity /= files.Count(); DataReadWriteHelper.RecordInfo("porosity.txt", PackingSystemSetting.ResultDir, pMat, false); } else { res = false; } return(res); }
public static List <UMat> GetCharacters(UMat plate) { bool resultThreshWork = false; double thresholdValue = 100; /////ten blok jest używany jak na sztywno chcesz podzielić jakąś mape bitową na osobne literki //var path = Path.GetFullPath("C:\\Users\\jurek993\\Desktop\\images.bmp"); //BitmapImage image = new Bitmap( // new Bitmap(path)).ToBitmapImage(); //Image<Bgr, Byte> imageCV = new Image<Bgr, byte>(image.ToBitmap()); //Mat ItsWrong_ChangeItToPlate = imageCV.Mat; ///// UMat plateThresh = new UMat(); List <UMat> characters = new List <UMat>(); do { CvInvoke.Threshold(plate, plateThresh, thresholdValue, 255, ThresholdType.BinaryInv); resultThreshWork = CheckHowManyBlackColor(plateThresh.Bitmap, 20); thresholdValue -= 10; } while (!resultThreshWork && thresholdValue > 0); Size plateSize = plate.Size; List <Rectangle> rectangles = new List <Rectangle>(); rectangles = FindCounturedCharacters(plateThresh, plateSize); if (rectangles.Count <= 5) { using (UMat plateCanny = new UMat()) { CvInvoke.Canny(plate, plateCanny, 100, 50); rectangles = FindCounturedCharacters(plateCanny, plateSize); if (rectangles.Count < 5) { rectangles = new List <Rectangle>(); } } } var orderedRectangles = rectangles.OrderBy(x => x.X).ToList(); foreach (var rect in orderedRectangles) { var cloneTresh = plateThresh.Clone(); var character = new UMat(cloneTresh, rect); CvInvoke.BitwiseNot(character, character); var result = CheckHowManyBlackColor(character.Bitmap, 10); if (result) { characters.Add(character); } //CvInvoke.Erode(character, character, null, new Point(-1, -1), 1, BorderType.Constant, CvInvoke.MorphologyDefaultBorderValue); //CvInvoke.Dilate(character, character, null, new Point(-1, -1), 1, BorderType.Constant, CvInvoke.MorphologyDefaultBorderValue); character.Bitmap.Save("C:\\Users\\jurek993\\Desktop\\test\\" + rect.Size.ToString() + rect.Location.ToString() + ".bmp", ImageFormat.Bmp); } return(characters); }
public Image <Gray, Byte> FillMask(Image <Gray, Byte> input) { // Fill all parts of image that can be reached from edges Image <Gray, Byte> reachableBackground = input.Clone(); for (int x = 0; x < input.Width; x++) { MCvScalar fillValue = new MCvScalar(255); Rectangle boundingBox = new Rectangle(); MCvScalar minDiff = new MCvScalar(0); MCvScalar maxDiff = new MCvScalar(255); // Top pixel Point startFromTop = new Point(x, 0); if (reachableBackground.Data[0, x, 0] == 0) { CvInvoke.FloodFill(reachableBackground, null, startFromTop, fillValue, out boundingBox, minDiff, maxDiff); } // Top pixel Point startFromBottom = new Point(x, input.Height - 1); if (reachableBackground.Data[input.Height - 1, x, 0] == 0) { CvInvoke.FloodFill(reachableBackground, null, startFromBottom, fillValue, out boundingBox, minDiff, maxDiff); } } for (int y = 0; y < input.Height; y++) { MCvScalar fillValue = new MCvScalar(255); Rectangle boundingBox = new Rectangle(); MCvScalar minDiff = new MCvScalar(0); MCvScalar maxDiff = new MCvScalar(255); // Top pixel Point startFromLeft = new Point(0, y); if (reachableBackground.Data[y, 0, 0] == 0) { CvInvoke.FloodFill(reachableBackground, null, startFromLeft, fillValue, out boundingBox, minDiff, maxDiff); } // Top pixel Point startFromRight = new Point(input.Width - 1, y); if (reachableBackground.Data[y, input.Width - 1, 0] == 0) { CvInvoke.FloodFill(reachableBackground, null, startFromRight, fillValue, out boundingBox, minDiff, maxDiff); } } // Grab unreachable holes in original image Image <Gray, Byte> holesToFill = reachableBackground.Clone(); CvInvoke.BitwiseNot(reachableBackground, holesToFill); Image <Gray, Byte> filledImg = input.Clone(); CvInvoke.BitwiseOr(input, holesToFill, filledImg); return(filledImg); }
private async void CaptureAndProcessButtonClick(object sender, RoutedEventArgs e) { using (Mat img = await Mat.FromMediaCapture(_mediaCapture)) { CvInvoke.BitwiseNot(img, img); ImageView.Source = img.ToWritableBitmap(); } }
public void InvertMask() { if (!HaveInputMask) { return; } CvInvoke.BitwiseNot(Mask, Mask); }
private Matrix <byte> GetSmoothedInvertedImage(Matrix <byte> grayImage) { Matrix <byte> newImage = new Matrix <byte>(grayImage.Size); CvInvoke.GaussianBlur(grayImage, newImage, new Size(_weightBlurSize, _weightBlurSize), 0, 0); CvInvoke.BitwiseNot(newImage, newImage); return(newImage); }
public void ThreadMain() { //MessageBox.Show("Hi from the thread!"); VideoWriter writer = new VideoWriter("video.mp4", 60, new Size(1280, 720), true); int frame = 0; Capture cap = new Emgu.CV.Capture(@"C:\Users\Peter Husman\Downloads\Wildlife.wmv"); Mat minions = new Capture(@"C:\Users\Peter Husman\Downloads\maxresdefault.jpg").QueryFrame(); Mat data = new Mat(); Mat chroma = new Mat(); Mat threshold = new Mat(); Mat bNot = new Mat(); Mat minionsMask = new Mat(); Mat vidMask = new Mat(); var filter = new BackgroundSubtractorMOG(); while (true) { try { cap.Grab(); bool grabbed = cap.Retrieve(data); CvInvoke.InRange(minions, new ScalarArray(new Emgu.CV.Structure.MCvScalar(0, 206, 0)), new ScalarArray(new Emgu.CV.Structure.MCvScalar(129, 255, 164)), threshold); threshold.CopyTo(bNot); CvInvoke.BitwiseNot(bNot, bNot); Mask(minions, bNot, minionsMask); Mask(data, threshold, vidMask); CvInvoke.BitwiseOr(minionsMask, vidMask, chroma); //CvInvoke.CvtColor(data, hsv, ColorConversion.Bgr2Hsv); //BackgroundSubtractorMOG //data.Dispose(); //CvInvoke.InRange //filter.Apply(data, hsv); //ChromaKey(data, minions, chroma,min,max); //CvInvoke.Imwrite($"{fileLocation}{frame.ToString()}.jpg", data); //writer.Write(chroma); CvInvoke.Imshow("Window", chroma); CvInvoke.WaitKey(1); frame++; } catch (Exception ex) { } } }
public static Rectangle findGridInImage(Image <Bgr, byte> image) { Image <Gray, byte> dest = new Image <Gray, byte>(image.Width, image.Height); dest = image.InRange(new Bgr(Color.FromArgb(250, 247, 238)), new Bgr(Color.FromArgb(251, 248, 239))); Image <Gray, byte> dest2 = new Image <Gray, byte>(image.Width, image.Height); CvInvoke.BitwiseNot(dest, dest2, null); VectorOfVectorOfPoint result = new VectorOfVectorOfPoint(); CvInvoke.FindContours(dest2, result, null, Emgu.CV.CvEnum.RetrType.List, Emgu.CV.CvEnum.ChainApproxMethod.ChainApproxSimple); List <Tuple <Rectangle, int> > possibleGrids = new List <Tuple <Rectangle, int> >(); for (int i = 0; i < result.Size; i++) { VectorOfPoint vop = result[i]; double contourArea = CvInvoke.ContourArea(vop, false); Rectangle r = CvInvoke.BoundingRectangle(vop); int boundingArea = r.Width * r.Height; //the bounding area will always be larger than the contour area double areaRatio = boundingArea / contourArea; //if (areaRatio > 1.5) // continue; if (r.Width > r.Height) { if ((float)r.Width / r.Height > 1.2) { continue; } } else { if ((float)r.Height / r.Width > 1.2) { continue; } } possibleGrids.Add(new Tuple <Rectangle, int>(r, boundingArea)); } possibleGrids = possibleGrids.OrderBy(x => x.Item2).ToList(); Rectangle grid = possibleGrids.First().Item1; Bgr lower = new Bgr(Color.FromArgb(200, 200, 200)); Bgr upper = new Bgr(Color.FromArgb(255, 255, 255)); shrinkBoxAroundObject(image, ref grid, lower, upper); return(grid); }
private void loadImage(string ImageURL) { OriginalImage = CvInvoke.Imread(ImageURL, Emgu.CV.CvEnum.ImreadModes.Grayscale); CvInvoke.Resize(OriginalImage, OriginalImage, new Size(2159, 3060)); originalThresholded = new Mat(); CvInvoke.Threshold(OriginalImage, originalThresholded, 0, 255, Emgu.CV.CvEnum.ThresholdType.Otsu); thresholded = originalThresholded.Clone(); CvInvoke.BitwiseNot(thresholded, thresholded); }
private void filter() { if (!imageSelected) { return; } int r1, g1, b1, r2, g2, b2; getValuesFromControls(out r1, out g1, out b1, out r2, out g2, out b2); if (grid.RowCount == 0) { CvInvoke.InRange(matIn, new ScalarArray(new MCvScalar(b1, g1, r1)), new ScalarArray(new MCvScalar(b2, g2, r2)), matOut); } else { //prepare the first range Mat temp = new Mat(); getValuesFromGrid(out r1, out g1, out b1, out r2, out g2, out b2, 0); CvInvoke.InRange(matIn, new ScalarArray(new MCvScalar(b1, g1, r1)), new ScalarArray(new MCvScalar(b2, g2, r2)), matOut); //apply other ranges for (int i = 1; i < grid.RowCount; i++) { String type = grid["colType", i].Value.ToString(); getValuesFromGrid(out r1, out g1, out b1, out r2, out g2, out b2, i); CvInvoke.InRange(matIn, new ScalarArray(new MCvScalar(b1, g1, r1)), new ScalarArray(new MCvScalar(b2, g2, r2)), temp); //apply operator: Union or Subtract if (type.Equals("Union")) { CvInvoke.BitwiseOr(temp, matOut, matOut); } else { CvInvoke.BitwiseNot(temp, matOut, matOut); } } } img2.Image = matOut.ToImage <Bgr, Byte>(); }
public static Mat ColorToTransparent(Mat image, System.Windows.Media.Color transparentColor) { Mat trasparentMask = new Mat(); IInputArray transparency = new ScalarArray(new MCvScalar(transparentColor.B, transparentColor.G, transparentColor.R, transparentColor.A)); CvInvoke.InRange(image, transparency, transparency, trasparentMask); CvInvoke.BitwiseNot(trasparentMask, trasparentMask); image.CopyTo(trasparentMask, trasparentMask); return(trasparentMask); }
// Update is called once per frame void Update() { if (webcamTexture != null && webcamTexture.didUpdateThisFrame) { if (data == null || (data.Length != webcamTexture.width * webcamTexture.height)) { data = new Color32[webcamTexture.width * webcamTexture.height]; } webcamTexture.GetPixels32(data); if (bytes == null || bytes.Length != data.Length * 3) { bytes = new byte[data.Length * 3]; } GCHandle handle = GCHandle.Alloc(data, GCHandleType.Pinned); GCHandle resultHandle = GCHandle.Alloc(bytes, GCHandleType.Pinned); using (Mat bgra = new Mat(new Size(webcamTexture.width, webcamTexture.height), DepthType.Cv8U, 4, handle.AddrOfPinnedObject(), webcamTexture.width * 4)) using (Mat bgr = new Mat(webcamTexture.height, webcamTexture.width, DepthType.Cv8U, 3, resultHandle.AddrOfPinnedObject(), webcamTexture.width * 3)) { CvInvoke.CvtColor(bgra, bgr, ColorConversion.Bgra2Bgr); #region do some image processing here CvInvoke.BitwiseNot(bgr, bgr); #endregion if (flip != FlipType.None) { CvInvoke.Flip(bgr, bgr, flip); } } handle.Free(); resultHandle.Free(); if (resultTexture == null || resultTexture.width != webcamTexture.width || resultTexture.height != webcamTexture.height) { resultTexture = new Texture2D(webcamTexture.width, webcamTexture.height, TextureFormat.RGB24, false); } resultTexture.LoadRawTextureData(bytes); resultTexture.Apply(); if (!_textureResized) { //this.GetComponent<GUITexture>().pixelInset = new Rect(-webcamTexture.width / 2, -webcamTexture.height / 2, webcamTexture.width, webcamTexture.height); ResizeTexture(resultTexture); _textureResized = true; } transform.rotation = baseRotation * Quaternion.AngleAxis(webcamTexture.videoRotationAngle, Vector3.up); //this.GetComponent<GUITexture>().texture = resultTexture; RenderTexture(resultTexture); //count++; } }
public static Image <Bgr, float> Bgr2Hsv( Image <Gray, float> b, Image <Gray, float> g, Image <Gray, float> r ) { var v = r.Max(g).Max(b); var h = new Image <Gray, float>(v.Size); var s = v - r.Min(g).Min(b); Mat z = new Mat(); var zeroAux = new Image <Gray, float>(s.Size); zeroAux.SetZero(); //z = ~s; CvInvoke.Compare(s, zeroAux, z, Emgu.CV.CvEnum.CmpType.Equal); //s(z) = 1; s.Mat.SetTo(new MCvScalar(1), z); //k = (r == v); var k = new Mat(); CvInvoke.Compare(r, v, k, Emgu.CV.CvEnum.CmpType.Equal); //h(k) = (g(k) - b(k))./ s(k); (g - b).Mul(1.0 / s).Mat.CopyTo(h, k); //k = (g == v); CvInvoke.Compare(g, v, k, Emgu.CV.CvEnum.CmpType.Equal); //h(k) = 2 + (b(k) - r(k))./ s(k); (2 + (b - r).Mul(1.0 / s)).Mat.CopyTo(h, k); //k = (b == v); CvInvoke.Compare(b, v, k, Emgu.CV.CvEnum.CmpType.Equal); //h(k) = 4 + (r(k) - g(k))./ s(k); (4 + (r - g).Mul(1.0 / s)).Mat.CopyTo(h, k); //h = h / 6; h._Mul(1.0 / 6.0); //k = (h < 0); CvInvoke.Compare(h, zeroAux, k, Emgu.CV.CvEnum.CmpType.Equal); //h(k) = h(k) + 1; (h + 1).Mat.CopyTo(h, k); //h(z) = 0; h.Mat.SetTo(new MCvScalar(0), z); //tmp = s./ v; var tmp = s.Mul(1.0 / v); //tmp(z) = 0; tmp.Mat.SetTo(new MCvScalar(0), z); //k = (v~= 0); //CvInvoke.FindNonZero(v, k); CvInvoke.Compare(v, zeroAux, k, Emgu.CV.CvEnum.CmpType.NotEqual); //s(k) = tmp(k); tmp.Mat.CopyTo(s, k); //s(~v) = 0; CvInvoke.BitwiseNot(k, k); s.Mat.SetTo(new MCvScalar(0), k); return(new Image <Bgr, float>(new Image <Gray, float>[] { v, s, h })); }
void Esquisse() { UMat NB = new UMat(); CvInvoke.CvtColor(sourceImage, NB, ColorConversion.Bgr2Gray); CvInvoke.PyrDown(NB, NB); CvInvoke.PyrUp(NB, NB); CvInvoke.EqualizeHist(NB, NB); CvInvoke.Canny(NB, NB, 50, 150); CvInvoke.BitwiseNot(NB, processedImage); }
//static List<Contour> getLines1(Image<Gray, byte> image, Size lineSize) //{ // List<Contour> lines = new List<Contour>(); // Mat se = CvInvoke.GetStructuringElement(ElementShape.Rectangle, lineSize, new Point(-1, -1)); // Image<Gray, byte> image2 = new Image<Gray, byte>(image.Size); // CvInvoke.MorphologyEx(image, image2, MorphOp.Open, se, new Point(-1, -1), 2, BorderType.Default, new MCvScalar()); // VectorOfVectorOfPoint cs = new VectorOfVectorOfPoint(); // Mat h = new Mat(); // CvInvoke.FindContours(image2, cs, h, RetrType.Tree, ChainApproxMethod.ChainApproxSimple); // if (cs.Size < 1) // return lines; // Array hierarchy = h.GetData(); // List<Contour> contours = new List<Contour>(); // for (int i = 0; i < cs.Size; i++) // contours.Add(new Contour(hierarchy, i, cs[i])); // if (contours.Where(a => a.ParentId < 0).Count() < 2)//the only parent is the whole page frame // contours.RemoveAll(a => a.ParentId < 0); // else // contours.RemoveAll(a => a.ParentId >= 0); // for (int i = 0; i < contours.Count; i++) // lines.Add(contours[i]); // return lines; //} static List <LineSegment2D> getLines(Image <Gray, byte> image, int minLineSize) //!!!needs tuning { CvInvoke.BitwiseNot(image, image); //to negative CvInvoke.GaussianBlur(image, image, new Size(9, 9), 0); //remove small spots CvInvoke.Threshold(image, image, 125, 255, ThresholdType.Otsu | ThresholdType.Binary); Mat se = CvInvoke.GetStructuringElement(ElementShape.Rectangle, new Size(30, 5), new Point(-1, -1)); CvInvoke.Dilate(image, image, se, new Point(-1, -1), 1, BorderType.Constant, CvInvoke.MorphologyDefaultBorderValue); CvInvoke.Canny(image, image, 100, 30, 3); return(CvInvoke.HoughLinesP(image, 1, 2 * Math.PI / 180, 10, minLineSize, 10).ToList()); }
private void drawContours(VectorOfVectorOfPoint contours) { Mat Canvas = Mat.Zeros(ROIFrame.Rows, ROIFrame.Cols, Emgu.CV.CvEnum.DepthType.Cv8U, 1); CvInvoke.BitwiseNot(Canvas, Canvas); CvInvoke.DrawContours(Canvas, contours, -1, new Emgu.CV.Structure.MCvScalar(0, 0, 0), 1); CvInvoke.Flip(Canvas, Canvas, Emgu.CV.CvEnum.FlipType.None); CvInvoke.Rectangle(Canvas, eraser, new Emgu.CV.Structure.MCvScalar(255, 0, 0)); imageView.Image = Canvas; CvInvoke.Flip(ROIFrame, Canvas, Emgu.CV.CvEnum.FlipType.None); originalView.Image = Canvas; }