/* * /// <summary> * /// Convert this Mat to UMat * /// </summary> * /// <param name="access">Access type</param> * /// <returns>The UMat</returns> * public Mat ToMat(CvEnum.AccessType access) * { * return new Mat(UMatInvoke.cvUMatGetMat(Ptr, access), true); * }*/ ///<summary> ///Split current Image into an array of gray scale images where each element ///in the array represent a single color channel of the original image ///</summary> ///<returns> ///An array of gray scale images where each element ///in the array represent a single color channel of the original image ///</returns> public UMat[] Split() { UMat[] mats = new UMat[NumberOfChannels]; for (int i = 0; i < mats.Length; i++) { mats[i] = new UMat(Rows, Cols, Depth, NumberOfChannels); } using (VectorOfUMat vm = new VectorOfUMat(mats)) { CvInvoke.Split(this, vm); } return(mats); }
/* * private static CGImage RgbaByteMatToCGImage(Mat bgraByte) * { * using (CGColorSpace cspace = CGColorSpace.CreateDeviceRGB()) * using (CGBitmapContext context = new CGBitmapContext( * bgraByte.DataPointer, * bgraByte.Width, bgraByte.Height, * 8, * bgraByte.Width * 4, * cspace, * CGImageAlphaInfo.PremultipliedLast)) * return context.ToImage(); * }*/ /// <summary> /// Converts to CGImage /// </summary> /// <returns>The CGImage.</returns> public static CGImage ToCGImage(this UMat umat) { int nchannels = umat.NumberOfChannels; DepthType d = umat.Depth; if (nchannels == 4 && d == DepthType.Cv8U) { //bgra using (Mat tmp = new Mat()) { CvInvoke.CvtColor(umat, tmp, ColorConversion.Bgra2Rgba); return(RgbaByteMatToCGImage(tmp)); } } else if (nchannels == 3 && d == DepthType.Cv8U) { //bgr using (Mat tmp = new Mat()) { CvInvoke.CvtColor(umat, tmp, ColorConversion.Bgr2Rgba); return(RgbaByteMatToCGImage(tmp)); } } else if (nchannels == 1 && d == DepthType.Cv8U) { using (Mat tmp = new Mat()) { CvInvoke.CvtColor(umat, tmp, ColorConversion.Gray2Rgba); return(RgbaByteMatToCGImage(tmp)); } } else { throw new Exception(String.Format("Converting from Mat of {0} channels {1} to CGImage is not supported. Please convert Mat to 3 channel Bgr image of Byte before calling this function.", nchannels, d)); } }
public DebuggerProxy(UMat v) { _v = v; }
/// <summary> /// Get the Umat header for the specific roi of the parent /// </summary> /// <param name="parent">The parent Umat</param> /// <param name="roi">The region of interest</param> public UMat(UMat parent, Rectangle roi) : this(UMatInvoke.cvUMatCreateFromROI(parent.Ptr, ref roi), true) { }
/// <summary> /// Convert this Mat to Image /// </summary> /// <typeparam name="TColor">The type of Color</typeparam> /// <typeparam name="TDepth">The type of Depth</typeparam> /// <returns>The image</returns> public Image <TColor, TDepth> ToImage <TColor, TDepth>() where TColor : struct, IColor where TDepth : new() { TColor c = new TColor(); int numberOfChannels = NumberOfChannels; if (typeof(TDepth) == CvInvoke.GetDepthType(this.Depth) && c.Dimension == numberOfChannels) { //same color, same depth Image <TColor, TDepth> img = new Image <TColor, TDepth>(Size); CopyTo(img); return(img); } else if (typeof(TDepth) != CvInvoke.GetDepthType(this.Depth) && c.Dimension == numberOfChannels) { //different depth, same color Image <TColor, TDepth> result = new Image <TColor, TDepth>(Size); if (numberOfChannels == 1) { using (Image <Gray, TDepth> tmp = this.ToImage <Gray, TDepth>()) result.ConvertFrom(tmp); } else if (numberOfChannels == 3) { using (Image <Bgr, TDepth> tmp = this.ToImage <Bgr, TDepth>()) result.ConvertFrom(tmp); } else if (numberOfChannels == 4) { using (Image <Bgra, TDepth> tmp = this.ToImage <Bgra, TDepth>()) result.ConvertFrom(tmp); } else { throw new Exception("Unsupported conversion"); } return(result); } else if (typeof(TDepth) == CvInvoke.GetDepthType(this.Depth) && c.Dimension != numberOfChannels) { //same depth, different color Image <TColor, TDepth> result = new Image <TColor, TDepth>(Size); CvEnum.DepthType depth = Depth; if (depth == CvEnum.DepthType.Cv8U) { using (Image <TColor, Byte> tmp = this.ToImage <TColor, Byte>()) result.ConvertFrom(tmp); } else if (depth == CvEnum.DepthType.Cv8S) { using (Image <TColor, SByte> tmp = this.ToImage <TColor, SByte>()) result.ConvertFrom(tmp); } else if (depth == CvEnum.DepthType.Cv16U) { using (Image <TColor, UInt16> tmp = this.ToImage <TColor, UInt16>()) result.ConvertFrom(tmp); } else if (depth == CvEnum.DepthType.Cv16S) { using (Image <TColor, Int16> tmp = this.ToImage <TColor, Int16>()) result.ConvertFrom(tmp); } else if (depth == CvEnum.DepthType.Cv32S) { using (Image <TColor, Int32> tmp = this.ToImage <TColor, Int32>()) result.ConvertFrom(tmp); } else if (depth == CvEnum.DepthType.Cv32F) { using (Image <TColor, float> tmp = this.ToImage <TColor, float>()) result.ConvertFrom(tmp); } else if (depth == CvEnum.DepthType.Cv64F) { using (Image <TColor, double> tmp = this.ToImage <TColor, double>()) result.ConvertFrom(tmp); } else { throw new Exception("Unsupported conversion"); } return(result); } else { //different color, different depth using (UMat tmp = new UMat()) { ConvertTo(tmp, CvInvoke.GetDepthType(typeof(TDepth))); return(tmp.ToImage <TColor, TDepth>()); } } }
/// <summary> /// Converts to UIImage. /// </summary> /// <returns>The UIImage.</returns> public static UIImage ToUIImage(this UMat umat) { using (CGImage tmp = umat.ToCGImage()) { return(UIImage.FromImage(tmp)); } }
/// <summary> /// Converts to NSImage. /// </summary> /// <returns>The NSImage.</returns> public static NSImage ToNSImage(this UMat umat) { using (CGImage cgImage = umat.ToCGImage()) { return(new NSImage(cgImage, new CGSize(cgImage.Width, cgImage.Height))); } }
/// <summary> /// Same to cv::VideoCapture >gt; cv::UMat function /// </summary> /// <param name="umat">The UMat to be written to. If no more frame is available, the resulting UMat will be empty.</param> public bool Read(UMat umat) { CvInvoke.cveVideoCaptureReadToUMat(_ptr, umat); return(!umat.IsEmpty); }
public NewFrameEvent(Emgu.CV.UMat transfer) { this.transfer = transfer; }