/* * public static Mat ToMat(this WriteableBitmap writeableBitmap) * { * Mat m = new Mat(); * writeableBitmap.ToArray(m); * return m; * }*/ public static WriteableBitmap ToWritableBitmap(this IInputArray array) { using (InputArray ia = array.GetInputArray()) { Size size = ia.GetSize(); WriteableBitmap bmp = new WriteableBitmap(size.Width, size.Height); byte[] buffer = new byte[bmp.PixelWidth * bmp.PixelHeight * 4]; GCHandle handle = GCHandle.Alloc(buffer, GCHandleType.Pinned); using (Mat resultImage = new Mat( new Size(bmp.PixelWidth, bmp.PixelHeight), DepthType.Cv8U, 4, handle.AddrOfPinnedObject(), bmp.PixelWidth * 4)) { int channels = ia.GetChannels(); switch (channels) { case 1: CvInvoke.CvtColor(array, resultImage, ColorConversion.Gray2Bgra); break; case 3: CvInvoke.CvtColor(array, resultImage, ColorConversion.Bgr2Bgra); break; case 4: using (Mat m = ia.GetMat()) m.CopyTo(resultImage); break; default: throw new NotImplementedException(String.Format( "Conversion from {0} channel IInputArray to WritableBitmap is not supported", channels)); } } handle.Free(); using (Stream resultStream = bmp.PixelBuffer.AsStream()) { resultStream.Write(buffer, 0, buffer.Length); } return(bmp); } }
/// <summary> /// Apply converter and compute result for each channel of the image, for single channel image, apply converter directly, for multiple channel image, make a copy of each channel to a temperary image and apply the convertor /// </summary> /// <param name="image">The source image</param> /// <param name="action">The converter such that accept the IntPtr of a single channel IplImage, and image channel index which returning result of type R</param> /// <returns>An array which contains result for each channel</returns> public static void ForEachDuplicateChannel(this IInputArray image, Action <IInputArray, int> action) { using (InputArray ia = image.GetInputArray()) { int channels = ia.GetChannels(); if (channels == 1) { action(image, 0); } else { using (Mat tmp = new Mat()) for (int i = 0; i < channels; i++) { CvInvoke.ExtractChannel(image, tmp, i); action(tmp, i); } } } }
/// <summary> /// Apply converter and compute result for each channel of the image, for single channel image, apply converter directly, for multiple channel image, make a copy of each channel to a temperary image and apply the convertor /// </summary> /// <typeparam name="TReturn">The return type</typeparam> /// <param name="image">The source image</param> /// <param name="conv">The converter such that accept the IntPtr of a single channel IplImage, and image channel index which returning result of type R</param> /// <returns>An array which contains result for each channel</returns> public static TReturn[] ForEachDuplicateChannel <TReturn>(this IInputArray image, Func <IInputArray, int, TReturn> conv) { using (InputArray ia = image.GetInputArray()) { int channels = ia.GetChannels(); TReturn[] res = new TReturn[channels]; if (channels == 1) { res[0] = conv(image, 0); } else { using (Mat tmp = new Mat()) for (int i = 0; i < channels; i++) { CvInvoke.ExtractChannel(image, tmp, i); res[i] = conv(tmp, i); } } return(res); } }
/// <summary> /// Convert an input array to texture 2D /// </summary> /// <param name="image">The input image, if 3 channel, we assume it is Bgr, if 4 channels, we assume it is Bgra</param> /// <param name="flipType"></param> /// <param name="buffer">Optional buffer for the texture conversion, should be big enough to hold the image data. e.g. width*height*pixel_size</param> /// <returns>The texture 2D</returns> public static Texture2D InputArrayToTexture2D(IInputArray image, Emgu.CV.CvEnum.FlipType flipType = FlipType.Vertical, byte[] buffer = null) { using (InputArray iaImage = image.GetInputArray()) { Size size = iaImage.GetSize(); if (iaImage.GetChannels() == 3 && iaImage.GetDepth() == DepthType.Cv8U && SystemInfo.SupportsTextureFormat(TextureFormat.RGB24)) { //assuming 3 channel image is of BGR color Texture2D texture = new Texture2D(size.Width, size.Height, TextureFormat.RGB24, false); byte[] data; int bufferLength = size.Width * size.Height * 3; if (buffer != null) { if (buffer.Length < bufferLength) { throw new ArgumentException(String.Format("Buffer size ({0}) is not big enough for the RBG24 texture, width * height * 3 = {1} is required.", buffer.Length, bufferLength)); } data = buffer; } else { data = new byte[bufferLength]; } GCHandle dataHandle = GCHandle.Alloc(data, GCHandleType.Pinned); using ( Image <Rgb, byte> rgb = new Image <Rgb, byte>(size.Width, size.Height, size.Width * 3, dataHandle.AddrOfPinnedObject())) { rgb.ConvertFrom(image); if (flipType != FlipType.None) { CvInvoke.Flip(rgb, rgb, flipType); } } dataHandle.Free(); texture.LoadRawTextureData(data); texture.Apply(); return(texture); } else if (SystemInfo.SupportsTextureFormat(TextureFormat.RGBA32)) { Texture2D texture = new Texture2D(size.Width, size.Height, TextureFormat.RGBA32, false); byte[] data; int bufferLength = size.Width * size.Height * 4; if (buffer != null) { if (buffer.Length < bufferLength) { throw new ArgumentException( String.Format( "Buffer size ({0}) is not big enough for the RGBA32 texture, width * height * 4 = {1} is required.", buffer.Length, bufferLength)); } data = buffer; } else { data = new byte[bufferLength]; } GCHandle dataHandle = GCHandle.Alloc(data, GCHandleType.Pinned); using ( Image <Rgba, byte> rgba = new Image <Rgba, byte>(size.Width, size.Height, size.Width * 4, dataHandle.AddrOfPinnedObject())) { rgba.ConvertFrom(image); if (flipType != FlipType.None) { CvInvoke.Flip(rgba, rgba, flipType); } } dataHandle.Free(); texture.LoadRawTextureData(data); texture.Apply(); return(texture); } else { throw new Exception("TextureFormat of RGBA32 is not supported on this device"); } } }
/// <summary> /// Convert the image to Bitmap /// </summary> /// <param name="image">The image to convert to Bitmap</param> /// <param name="bitmap">The bitmap, must be of the same size and has bitmap config type of either Argb888 or Rgb565</param> /// <returns>The Bitmap</returns> public static void ToBitmap(this IInputArray image, Android.Graphics.Bitmap bitmap) { using (InputArray iaImage = image.GetInputArray()) { System.Drawing.Size size = iaImage.GetSize(); if (!(size.Width == bitmap.Width && size.Height == bitmap.Height)) { throw new Exception("Bitmap size doesn't match the Mat size"); } Android.Graphics.Bitmap.Config config = bitmap.GetConfig(); if (config == Android.Graphics.Bitmap.Config.Argb8888) { int channels = iaImage.GetChannels(); using (Mat m = new Mat(new Size(bitmap.Width, bitmap.Height), DepthType.Cv8U, 4, bitmap.LockPixels(), bitmap.RowBytes)) { if (channels == 1) { CvInvoke.CvtColor(image, m, ColorConversion.Gray2Rgba); } else if (channels == 3) { CvInvoke.CvtColor(image, m, ColorConversion.Bgr2Rgba); } else if (channels == 4) { CvInvoke.CvtColor(image, m, ColorConversion.Bgra2Rgba); } else { throw new NotImplementedException( String.Format("InputArray of {0} channels is supported.", channels)); } bitmap.UnlockPixels(); } } else if (config == Android.Graphics.Bitmap.Config.Rgb565) { int channels = iaImage.GetChannels(); using (Mat m = new Mat(new Size(bitmap.Width, bitmap.Height), DepthType.Cv8U, 2, bitmap.LockPixels(), bitmap.RowBytes)) { if (channels == 1) { CvInvoke.CvtColor(image, m, ColorConversion.Gray2Bgr565); } else if (channels == 3) { CvInvoke.CvtColor(image, m, ColorConversion.Bgr2Bgr565); } else if (channels == 4) { CvInvoke.CvtColor(image, m, ColorConversion.Bgra2Bgr565); } else { throw new NotImplementedException( String.Format("InputArray of {0} channels is supported.", channels)); } bitmap.UnlockPixels(); } } else { throw new NotImplementedException("Only Bitmap config of Argb888 or Rgb565 is supported."); } } }