private void saveImageToolStripMenuItem_Click(object sender, EventArgs e) { if (saveImageToFileDialog.ShowDialog() == DialogResult.OK) { try { if (this.InvokeRequired) { this.Invoke((Action) delegate() { using (InputArray ia = DisplayedImage.GetInputArray()) using (Mat mat = ia.GetMat()) { mat.Save(saveImageToFileDialog.FileName); } }); } else { using (InputArray ia = DisplayedImage.GetInputArray()) using (Mat mat = ia.GetMat()) { mat.Save(saveImageToFileDialog.FileName); } } } catch (Exception excpt) { MessageBox.Show(excpt.Message); } } }
public override bool run() { InputArray m = InputArray.Create <double>(matrix, MatType.CV_64FC1); double d1, d2, d3, d4, d5, d6; evaluateMatrix(m.GetMat(), out d1, out d2, out d3, out d4, out d5, out d6); dst = src.WarpAffine(m, src.Size(), InterpolationFlags.Lanczos4, BorderTypes.Constant); return(true); }
// ROI: Region of Interest private bool myDetector(InputArray image, out System.Drawing.Rectangle[] ROIs) { Mat gray = null; // Convert multi-channel BGR image to gray if (image.GetChannels() > 1) { CvInvoke.CvtColor(image.GetMat(), gray, ColorConversion.Bgr2Gray); } else { gray = image.GetMat().Clone(); } CvInvoke.EqualizeHist(gray, gray); ROIs = faceCsc.DetectMultiScale(gray, 1.1, 3, Size.Empty); return(true); }
public override void SetImage(IInputArray image) { if (image == null) { Xamarin.Forms.Device.BeginInvokeOnMainThread(() => { _imageView.SetImageBitmap(null); }); return; } int bufferIdx = _renderBufferIdx; Bitmap buffer; _renderBufferIdx = (_renderBufferIdx + 1) % _renderBuffer.Length; using (InputArray iaImage = image.GetInputArray()) using (Mat mat = iaImage.GetMat()) { if (_renderBuffer[bufferIdx] == null) { buffer = mat.ToBitmap(); _renderBuffer[bufferIdx] = buffer; } else { var size = iaImage.GetSize(); buffer = _renderBuffer[bufferIdx]; if (buffer.Width != size.Width || buffer.Height != size.Height) { buffer.Dispose(); _renderBuffer[bufferIdx] = mat.ToBitmap(); } else { mat.ToBitmap(buffer); } } } Xamarin.Forms.Device.BeginInvokeOnMainThread(() => { _imageView.SetImageBitmap(buffer); }); }
/// <summary> /// Convert an IImage to a WPF BitmapSource. The result can be used in the Set Property of Image.Source /// </summary> /// <param name="image">The Emgu CV Image</param> /// <returns>The equivalent BitmapSource</returns> public static BitmapSource ToBitmapSource(IInputArray image) { using (InputArray ia = image.GetInputArray()) using (Mat m = ia.GetMat()) using (System.Drawing.Bitmap source = m.Bitmap) { IntPtr ptr = source.GetHbitmap(); //obtain the Hbitmap BitmapSource bs = System.Windows.Interop.Imaging.CreateBitmapSourceFromHBitmap( ptr, IntPtr.Zero, Int32Rect.Empty, System.Windows.Media.Imaging.BitmapSizeOptions.FromEmptyOptions()); DeleteObject(ptr); //release the HBitmap return(bs); } }
public void SetImage(IInputArray image) { InvokeOnMainThread(delegate { using (InputArray iaImage = image.GetInputArray()) using (Mat mat = iaImage.GetMat()) using (UIImage i = mat.ToUIImage()) { _imageView.Frame = new CGRect( CGPoint.Empty, i.Size ); _imageView.Image = i; _imageView.SetNeedsDisplay(); ReloadData(); } }); }
private static void DrawMask(IInputOutputArray image, Mat mask, Rectangle rect, MCvScalar color) { using (Mat maskLarge = new Mat()) using (Mat maskLargeInv = new Mat()) using (InputArray iaImage = image.GetInputArray()) using (Mat matImage = iaImage.GetMat()) using (Mat subRegion = new Mat(matImage, rect)) using (Mat largeColor = new Mat( subRegion.Size, Emgu.CV.CvEnum.DepthType.Cv8U, 3)) { CvInvoke.Resize(mask, maskLarge, rect.Size); //give the mask at least 30% transparency using (ScalarArray sa = new ScalarArray(0.7)) CvInvoke.Min(sa, maskLarge, maskLarge); //Create the inverse mask for the original image using (ScalarArray sa = new ScalarArray(1.0)) CvInvoke.Subtract(sa, maskLarge, maskLargeInv); //The mask color largeColor.SetTo(color); if (subRegion.NumberOfChannels == 4) { using (Mat bgrSubRegion = new Mat()) { CvInvoke.CvtColor(subRegion, bgrSubRegion, ColorConversion.Bgra2Bgr); CvInvoke.BlendLinear(largeColor, bgrSubRegion, maskLarge, maskLargeInv, bgrSubRegion); CvInvoke.CvtColor(bgrSubRegion, subRegion, ColorConversion.Bgr2Bgra); } } else { CvInvoke.BlendLinear(largeColor, subRegion, maskLarge, maskLargeInv, subRegion); } } }
/// <summary> /// Set the mouse position over the image. /// It also set the color intensity of the pixel on the image where is mouse is at /// </summary> /// <param name="location">The location of the mouse on the image</param> public void SetMousePositionOnImage(Point location) { IInputArray img = _imageBox.DisplayedImage; using (InputArray iaImage = img.GetInputArray()) { Size size = iaImage.GetSize(); location.X = Math.Max(Math.Min(location.X, size.Width - 1), 0); location.Y = Math.Max(Math.Min(location.Y, size.Height - 1), 0); mousePositionTextbox.Text = location.ToString(); if (_imageType == typeof(CvArray <>)) { using (Mat mat = iaImage.GetMat()) { RenderIntensityForMat(mat, location); } } else if (_imageType == typeof(Mat)) { Mat mat = img as Mat; RenderIntensityForMat(mat, location); } else if (_imageType == typeof(UMat)) { UMat umat = img as UMat; using (Mat mat = umat.GetMat(AccessType.Read)) { RenderIntensityForMat(mat, location); } } else { colorIntensityTextbox.Text = String.Empty; } } }
public string ProcessAndRender(IInputArray imageIn, IInputOutputArray imageOut) { Stopwatch watch = Stopwatch.StartNew(); #region Pre-processing //Convert the image to grayscale and filter out the noise CvInvoke.CvtColor(imageIn, _gray, ColorConversion.Bgr2Gray); //Remove noise CvInvoke.GaussianBlur(_gray, _gray, new Size(3, 3), 1); double cannyThreshold = 180.0; double cannyThresholdLinking = 120.0; CvInvoke.Canny(_gray, _cannyEdges, cannyThreshold, cannyThresholdLinking); #endregion #region circle detection double circleAccumulatorThreshold = 120; CircleF[] circles = CvInvoke.HoughCircles(_gray, HoughModes.Gradient, 2.0, 20.0, cannyThreshold, circleAccumulatorThreshold, 5); #endregion #region Edge detection LineSegment2D[] lines = CvInvoke.HoughLinesP( _cannyEdges, 1, //Distance resolution in pixel-related units Math.PI / 45.0, //Angle resolution measured in radians. 20, //threshold 30, //min Line width 10); //gap between lines #endregion #region Find triangles and rectangles List <Triangle2DF> triangleList = new List <Triangle2DF>(); List <RotatedRect> boxList = new List <RotatedRect>(); //a box is a rotated rectangle using (VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint()) { CvInvoke.FindContours(_cannyEdges, contours, null, RetrType.List, ChainApproxMethod.ChainApproxSimple); int count = contours.Size; for (int i = 0; i < count; i++) { using (VectorOfPoint contour = contours[i]) using (VectorOfPoint approxContour = new VectorOfPoint()) { CvInvoke.ApproxPolyDP(contour, approxContour, CvInvoke.ArcLength(contour, true) * 0.05, true); if (CvInvoke.ContourArea(approxContour, false) > 250 ) //only consider contours with area greater than 250 { if (approxContour.Size == 3) //The contour has 3 vertices, it is a triangle { Point[] pts = approxContour.ToArray(); triangleList.Add(new Triangle2DF( pts[0], pts[1], pts[2] )); } else if (approxContour.Size == 4) //The contour has 4 vertices. { #region determine if all the angles in the contour are within [80, 100] degree bool isRectangle = true; Point[] pts = approxContour.ToArray(); LineSegment2D[] edges = PointCollection.PolyLine(pts, true); for (int j = 0; j < edges.Length; j++) { double angle = Math.Abs( edges[(j + 1) % edges.Length].GetExteriorAngleDegree(edges[j])); if (angle < 80 || angle > 100) { isRectangle = false; break; } } #endregion if (isRectangle) { boxList.Add(CvInvoke.MinAreaRect(approxContour)); } } } } } } #endregion watch.Stop(); using (Mat triangleRectangleImage = new Mat(_gray.Size, DepthType.Cv8U, 3)) //image to draw triangles and rectangles on using (Mat circleImage = new Mat(_gray.Size, DepthType.Cv8U, 3)) //image to draw circles on using (Mat lineImage = new Mat(_gray.Size, DepthType.Cv8U, 3)) //image to draw lines on { #region draw triangles and rectangles triangleRectangleImage.SetTo(new MCvScalar(0)); foreach (Triangle2DF triangle in triangleList) { CvInvoke.Polylines(triangleRectangleImage, Array.ConvertAll(triangle.GetVertices(), Point.Round), true, new Bgr(Color.DarkBlue).MCvScalar, 2); } foreach (RotatedRect box in boxList) { CvInvoke.Polylines(triangleRectangleImage, Array.ConvertAll(box.GetVertices(), Point.Round), true, new Bgr(Color.DarkOrange).MCvScalar, 2); } //Drawing a light gray frame around the image CvInvoke.Rectangle(triangleRectangleImage, new Rectangle(Point.Empty, new Size(triangleRectangleImage.Width - 1, triangleRectangleImage.Height - 1)), new MCvScalar(120, 120, 120)); //Draw the labels CvInvoke.PutText(triangleRectangleImage, "Triangles and Rectangles", new Point(20, 20), FontFace.HersheyDuplex, 0.5, new MCvScalar(120, 120, 120)); #endregion #region draw circles circleImage.SetTo(new MCvScalar(0)); foreach (CircleF circle in circles) { CvInvoke.Circle(circleImage, Point.Round(circle.Center), (int)circle.Radius, new Bgr(Color.Brown).MCvScalar, 2); } //Drawing a light gray frame around the image CvInvoke.Rectangle(circleImage, new Rectangle(Point.Empty, new Size(circleImage.Width - 1, circleImage.Height - 1)), new MCvScalar(120, 120, 120)); //Draw the labels CvInvoke.PutText(circleImage, "Circles", new Point(20, 20), FontFace.HersheyDuplex, 0.5, new MCvScalar(120, 120, 120)); #endregion #region draw lines lineImage.SetTo(new MCvScalar(0)); foreach (LineSegment2D line in lines) { CvInvoke.Line(lineImage, line.P1, line.P2, new Bgr(Color.Green).MCvScalar, 2); } //Drawing a light gray frame around the image CvInvoke.Rectangle(lineImage, new Rectangle(Point.Empty, new Size(lineImage.Width - 1, lineImage.Height - 1)), new MCvScalar(120, 120, 120)); //Draw the labels CvInvoke.PutText(lineImage, "Lines", new Point(20, 20), FontFace.HersheyDuplex, 0.5, new MCvScalar(120, 120, 120)); #endregion using (InputArray iaImageIn = imageIn.GetInputArray()) using (Mat imageInMat = iaImageIn.GetMat()) CvInvoke.VConcat(new Mat[] { imageInMat, triangleRectangleImage, circleImage, lineImage }, imageOut); } return(String.Format("Detected in {0} milliseconds.", watch.ElapsedMilliseconds)); }
/// <summary> /// Generate histograms for the image. One histogram is generated for each color channel. /// You will need to call the Refresh function to do the painting afterward. /// </summary> /// <param name="image">The image to generate histogram from</param> /// <param name="numberOfBins">The number of bins for each histogram</param> public void GenerateHistograms(IInputArray image, int numberOfBins) { using (InputArray iaImage = image.GetInputArray()) { int channelCount = iaImage.GetChannels(); Mat[] channels = new Mat[channelCount]; Type imageType; if ((imageType = Toolbox.GetBaseType(image.GetType(), "Image`2")) != null || (imageType = Toolbox.GetBaseType(image.GetType(), "Mat")) != null || (imageType = Toolbox.GetBaseType(image.GetType(), "UMat")) != null) { for (int i = 0; i < channelCount; i++) { Mat channel = new Mat(); CvInvoke.ExtractChannel(image, channel, i); channels[i] = channel; } } else if ((imageType = Toolbox.GetBaseType(image.GetType(), "CudaImage`2")) != null) { using (Mat img = imageType.GetMethod("ToMat").Invoke(image, null) as Mat) for (int i = 0; i < channelCount; i++) { Mat channel = new Mat(); CvInvoke.ExtractChannel(img, channel, i); channels[i] = channel; } } else { throw new ArgumentException(String.Format("The input image type of {0} is not supported", image.GetType().ToString())); } Type[] genericArguments = imageType.GetGenericArguments(); String[] channelNames; Color[] colors; Type typeOfDepth; if (genericArguments.Length > 0) { IColor typeOfColor = Activator.CreateInstance(genericArguments[0]) as IColor; channelNames = Reflection.ReflectColorType.GetNamesOfChannels(typeOfColor); colors = Reflection.ReflectColorType.GetDisplayColorOfChannels(typeOfColor); typeOfDepth = imageType.GetGenericArguments()[1]; } else { channelNames = new String[channelCount]; colors = new Color[channelCount]; for (int i = 0; i < channelCount; i++) { channelNames[i] = String.Format("Channel {0}", i); colors[i] = Color.Red; } if (image is Mat) { typeOfDepth = CvInvoke.GetDepthType(((Mat)image).Depth); } else if (image is UMat) { typeOfDepth = CvInvoke.GetDepthType(((UMat)image).Depth); } else { throw new ArgumentException(String.Format( "Unable to get the type of depth from image of type {0}", image.GetType().ToString())); } } float minVal, maxVal; #region Get the maximum and minimum color intensity values if (typeOfDepth == typeof(Byte)) { minVal = 0.0f; maxVal = 256.0f; } else { #region obtain the maximum and minimum color value double[] minValues, maxValues; Point[] minLocations, maxLocations; using (InputArray ia = image.GetInputArray()) using (Mat m = ia.GetMat()) { m.MinMax(out minValues, out maxValues, out minLocations, out maxLocations); double min = minValues[0], max = maxValues[0]; for (int i = 1; i < minValues.Length; i++) { if (minValues[i] < min) { min = minValues[i]; } if (maxValues[i] > max) { max = maxValues[i]; } } minVal = (float)min; maxVal = (float)max; } #endregion } #endregion Mat[] histograms = new Mat[channels.Length]; for (int i = 0; i < channels.Length; i++) { //using (DenseHistogram hist = new DenseHistogram(numberOfBins, new RangeF(minVal, maxVal))) using (Mat hist = new Mat()) using (Util.VectorOfMat vm = new Util.VectorOfMat()) { vm.Push(channels[i]); float[] ranges = new float[] { minVal, maxVal }; CvInvoke.CalcHist(vm, new int[] { 0 }, null, hist, new int[] { numberOfBins }, ranges, false); //hist.Calculate(new IImage[1] { channels[i] }, true, null); histograms[i] = GenerateHistogram(channelNames[i], colors[i], hist, numberOfBins, ranges); } } if (histograms.Length == 1) { this.Image = histograms[0]; } else { int maxWidth = 0; int totalHeight = 0; for (int i = 0; i < histograms.Length; i++) { maxWidth = Math.Max(maxWidth, histograms[i].Width); totalHeight += histograms[i].Height; } Mat concated = new Mat(new Size(maxWidth, totalHeight), histograms[0].Depth, histograms[0].NumberOfChannels); int currentY = 0; for (int i = 0; i < histograms.Length; i++) { using (Mat roi = new Mat(concated, new Rectangle(new Point(0, currentY), histograms[i].Size))) { histograms[i].CopyTo(roi); } currentY += histograms[i].Height; histograms[i].Dispose(); } this.Image = concated; } } }
/// <summary> /// Detect vehicle from the given image /// </summary> /// <param name="image">The image</param> /// <returns>The detected vehicles.</returns> public Vehicle[] Detect(IInputArray image) { float vehicleConfidenceThreshold = 0.5f; float licensePlateConfidenceThreshold = 0.5f; double scale = 1.0; MCvScalar meanVal = new MCvScalar(); List <Vehicle> vehicles = new List <Vehicle>(); List <LicensePlate> plates = new List <LicensePlate>(); using (InputArray iaImage = image.GetInputArray()) using (Mat iaImageMat = iaImage.GetMat()) foreach (DetectedObject vehicleOrPlate in _vehicleLicensePlateDetectionModel.Detect(image, 0.0f, 0.0f)) { Rectangle region = vehicleOrPlate.Region; if (vehicleOrPlate.ClassId == 1 && vehicleOrPlate.Confident > vehicleConfidenceThreshold) { //this is a vehicle Vehicle v = new Vehicle(); v.Region = region; #region find out the type and color of the vehicle using (Mat vehicle = new Mat(iaImageMat, region)) using (VectorOfMat vm = new VectorOfMat(2)) { _vehicleAttrRecognizerModel.Predict(vehicle, vm); //_vehicleAttrRecognizer.Forward(vm, new string[] { "color", "type" }); using (Mat vehicleColorMat = vm[0]) using (Mat vehicleTypeMat = vm[1]) { float[] vehicleColorData = vehicleColorMat.GetData(false) as float[]; float maxProbColor = vehicleColorData.Max(); int maxIdxColor = Array.IndexOf(vehicleColorData, maxProbColor); v.Color = _colorName[maxIdxColor]; float[] vehicleTypeData = vehicleTypeMat.GetData(false) as float[]; float maxProbType = vehicleTypeData.Max(); int maxIdxType = Array.IndexOf(vehicleTypeData, maxProbType); v.Type = _vehicleType[maxIdxType]; } } #endregion vehicles.Add(v); } else if (vehicleOrPlate.ClassId == 2 && vehicleOrPlate.Confident > licensePlateConfidenceThreshold) { //this is a license plate LicensePlate p = new LicensePlate(); p.Region = region; #region OCR on license plate using (Mat plate = new Mat(iaImageMat, region)) { using (Mat inputBlob = DnnInvoke.BlobFromImage( plate, scale, new Size(94, 24), meanVal, false, false, DepthType.Cv32F)) { _ocr.SetInput(inputBlob, "data"); using (Mat output = _ocr.Forward("decode")) { float[] plateValue = output.GetData(false) as float[]; StringBuilder licensePlateStringBuilder = new StringBuilder(); foreach (int j in plateValue) { if (j >= 0) { licensePlateStringBuilder.Append(_plateText[j]); } } p.Text = licensePlateStringBuilder.ToString(); } } } #endregion plates.Add(p); } } foreach (LicensePlate p in plates) { foreach (Vehicle v in vehicles) { if (v.ContainsPlate(p)) { v.LicensePlate = p; break; } } } return(vehicles.ToArray()); }