public byte[] ResizeImageIOS(byte[] imageData, float size) { UIImage originalImage = ImageFromByteArray(imageData); System.Diagnostics.Debug.Write("originalImage.Size.Height"+ originalImage.Size.Height + ", " + originalImage.Size.Width); UIImageOrientation orientation = originalImage.Orientation; float width = size; float height = ((float)originalImage.Size.Height / (float)originalImage.Size.Width) * size; System.Diagnostics.Debug.Write("new size" + width + ", " + height); //create a 24bit RGB image using (CGBitmapContext context = new CGBitmapContext(IntPtr.Zero, (int)width, (int)height, 8, (int)(4 * width), CGColorSpace.CreateDeviceRGB(), CGImageAlphaInfo.PremultipliedFirst)) { RectangleF imageRect = new RectangleF(0, 0, width, height); // draw the image context.DrawImage(imageRect, originalImage.CGImage); UIKit.UIImage resizedImage = UIKit.UIImage.FromImage(context.ToImage(), 0, orientation); // save the image as a jpeg return resizedImage.AsJPEG().ToArray(); } }
public Bitmap(NSImage sourceImage) { Width = (int)sourceImage.CGImage.Width; Height = (int)sourceImage.CGImage.Height; _colors = new Color[Width * Height]; var rawData = new byte[Width * Height * 4]; var bytesPerPixel = 4; var bytesPerRow = bytesPerPixel * Width; var bitsPerComponent = 8; using (var colorSpace = CGColorSpace.CreateDeviceRGB()) { using (var context = new CGBitmapContext(rawData, Width, Height, bitsPerComponent, bytesPerRow, colorSpace, CGBitmapFlags.ByteOrder32Big | CGBitmapFlags.PremultipliedLast)) { context.DrawImage(new CGRect(0, 0, Width, Height), sourceImage.CGImage); for (int y = 0; y < Height; y++) { for (int x = 0; x < Width; x++) { var i = bytesPerRow * y + bytesPerPixel * x; byte red = rawData[i + 0]; byte green = rawData[i + 1]; byte blue = rawData[i + 2]; byte alpha = rawData[i + 3]; SetPixel(x, y, Color.FromArgb(alpha, red, green, blue)); } } } } }
/// <summary> /// Initializes a new instance of the <see cref="iOSSurfaceSource"/> class. /// </summary> /// <param name="stream">The <see cref="Stream"/> that contains the surface data.</param> public iOSSurfaceSource(Stream stream) { Contract.Require(stream, nameof(stream)); using (var data = NSData.FromStream(stream)) { using (var img = UIImage.LoadFromData(data)) { this.width = (Int32)img.Size.Width; this.height = (Int32)img.Size.Height; this.stride = (Int32)img.CGImage.BytesPerRow; this.bmpData = Marshal.AllocHGlobal(stride * height); using (var colorSpace = CGColorSpace.CreateDeviceRGB()) { using (var bmp = new CGBitmapContext(bmpData, width, height, 8, stride, colorSpace, CGImageAlphaInfo.PremultipliedLast)) { bmp.ClearRect(new CGRect(0, 0, width, height)); bmp.DrawImage(new CGRect(0, 0, width, height), img.CGImage); } } } } ReversePremultiplication(); }
protected override void init(Stream stream, bool flip, Loader.LoadedCallbackMethod loadedCallback) { try { using (var imageData = NSData.FromStream(stream)) using (var image = UIImage.LoadFromData(imageData)) { int width = (int)image.Size.Width; int height = (int)image.Size.Height; Mipmaps = new Mipmap[1]; Size = new Size2(width, height); var data = new byte[width * height * 4]; using (CGContext imageContext = new CGBitmapContext(data, width, height, 8, width*4, CGColorSpace.CreateDeviceRGB(), CGImageAlphaInfo.PremultipliedLast)) { imageContext.DrawImage(new RectangleF(0, 0, width, height), image.CGImage); Mipmaps[0] = new Mipmap(data, width, height, 1, 4); if (flip) Mipmaps[0].FlipVertical(); } } } catch (Exception e) { FailedToLoad = true; Loader.AddLoadableException(e); if (loadedCallback != null) loadedCallback(this, false); return; } Loaded = true; if (loadedCallback != null) loadedCallback(this, true); }
public IImage CreateImage(Color[] colors, int width, double scale = 1.0) { var pixelWidth = width; var pixelHeight = colors.Length / width; var bitmapInfo = CGImageAlphaInfo.PremultipliedFirst; var bitsPerComp = 8; var bytesPerRow = width * 4;// ((4 * pixelWidth + 3)/4) * 4; var colorSpace = CGColorSpace.CreateDeviceRGB (); var bitmap = new CGBitmapContext (IntPtr.Zero, pixelWidth, pixelHeight, bitsPerComp, bytesPerRow, colorSpace, bitmapInfo); var data = bitmap.Data; unsafe { fixed (Color *c = colors) { for (var y = 0; y < pixelHeight; y++) { var s = (byte*)c + 4*pixelWidth*y; var d = (byte*)data + bytesPerRow*y; for (var x = 0; x < pixelWidth; x++) { var b = *s++; var g = *s++; var r = *s++; var a = *s++; *d++ = a; *d++ = (byte)((r * a) >> 8); *d++ = (byte)((g * a) >> 8); *d++ = (byte)((b * a) >> 8); } } } } var image = bitmap.ToImage (); return new CGImageImage (image, scale); }
public override void ViewDidLoad () { base.ViewDidLoad (); // set the background color of the view to white View.BackgroundColor = UIColor.White; // instantiate a new image view that takes up the whole screen and add it to // the view hierarchy RectangleF imageViewFrame = new RectangleF (0, -NavigationController.NavigationBar.Frame.Height, View.Frame.Width, View.Frame.Height); imageView = new UIImageView (imageViewFrame); View.AddSubview (imageView); // create our offscreen bitmap context // size SizeF bitmapSize = new SizeF (imageView.Frame.Size); using (CGBitmapContext context = new CGBitmapContext (IntPtr.Zero, (int)bitmapSize.Width, (int)bitmapSize.Height, 8, (int)(4 * bitmapSize.Width), CGColorSpace.CreateDeviceRGB (), CGImageAlphaInfo.PremultipliedFirst)) { // draw our coordinates for reference DrawCoordinateSpace (context); // draw our flag DrawFlag (context); // add a label DrawCenteredTextAtPoint (context, 384, 700, "Stars and Stripes", 60); // output the drawing to the view imageView.Image = UIImage.FromImage (context.ToImage ()); } }
private UIColor GetPixelColor(CGPoint point, UIImage image) { var rawData = new byte[4]; var handle = GCHandle.Alloc(rawData); UIColor resultColor = null; try { using (var colorSpace = CGColorSpace.CreateDeviceRGB()) { using (var context = new CGBitmapContext(rawData, 1, 1, 8, 4, colorSpace, CGImageAlphaInfo.PremultipliedLast)) { context.DrawImage(new CGRect(-point.X, point.Y - image.Size.Height, image.Size.Width, image.Size.Height), image.CGImage); float red = (rawData[0]) / 255.0f; float green = (rawData[1]) / 255.0f; float blue = (rawData[2]) / 255.0f; float alpha = (rawData[3]) / 255.0f; resultColor = UIColor.FromRGBA(red, green, blue, alpha); } } } finally { handle.Free(); } return resultColor; }
/// <summary> /// Creates grayscaled image from existing image. /// </summary> /// <param name="oldImage">Image to convert.</param> /// <returns>Returns grayscaled image.</returns> public static UIImage GrayscaleImage( UIImage oldImage ) { var imageRect = new RectangleF(PointF.Empty, (SizeF) oldImage.Size); CGImage grayImage; // Create gray image. using (CGColorSpace colorSpace = CGColorSpace.CreateDeviceGray()) { using (var context = new CGBitmapContext(IntPtr.Zero, (int) imageRect.Width, (int) imageRect.Height, 8, 0, colorSpace, CGImageAlphaInfo.None)) { context.DrawImage(imageRect, oldImage.CGImage); grayImage = context.ToImage(); } } // Create mask for transparent areas. using (var context = new CGBitmapContext(IntPtr.Zero, (int) imageRect.Width, (int) imageRect.Height, 8, 0, CGColorSpace.Null, CGBitmapFlags.Only)) { context.DrawImage(imageRect, oldImage.CGImage); CGImage alphaMask = context.ToImage(); var newImage = new UIImage(grayImage.WithMask(alphaMask)); grayImage.Dispose(); alphaMask.Dispose(); return newImage; } }
protected void DrawScreen () { // create our offscreen bitmap context // size CGSize bitmapSize = new CGSize (imageView.Frame.Size); using (CGBitmapContext context = new CGBitmapContext (IntPtr.Zero, (int)bitmapSize.Width, (int)bitmapSize.Height, 8, (int)(4 * bitmapSize.Width), CGColorSpace.CreateDeviceRGB (), CGImageAlphaInfo.PremultipliedFirst)) { // save the state of the context while we change the CTM context.SaveState (); // draw our circle context.SetFillColor (1, 0, 0, 1); context.TranslateCTM (currentLocation.X, currentLocation.Y); context.RotateCTM (currentRotation); context.ScaleCTM (currentScale, currentScale); context.FillRect (new CGRect (-10, -10, 20, 20)); // restore our transformations context.RestoreState (); // draw our coordinates for reference DrawCoordinateSpace (context); // output the drawing to the view imageView.Image = UIImage.FromImage (context.ToImage ()); } }
void LoadBitmapData (int texId) { NSData texData = NSData.FromFile (NSBundle.MainBundle.PathForResource ("texture1", "png")); UIImage image = UIImage.LoadFromData (texData); if (image == null) return; int width = image.CGImage.Width; int height = image.CGImage.Height; CGColorSpace colorSpace = CGColorSpace.CreateDeviceRGB (); byte[] imageData = new byte[height * width * 4]; CGContext context = new CGBitmapContext (imageData, width, height, 8, 4 * width, colorSpace, CGBitmapFlags.PremultipliedLast | CGBitmapFlags.ByteOrder32Big); context.TranslateCTM (0, height); context.ScaleCTM (1, -1); colorSpace.Dispose (); context.ClearRect (new RectangleF (0, 0, width, height)); context.DrawImage (new RectangleF (0, 0, width, height), image.CGImage); GL.TexImage2D (TextureTarget.Texture2D, 0, PixelInternalFormat.Rgba, width, height, 0, PixelFormat.Rgba, PixelType.UnsignedByte, imageData); context.Dispose (); }
public byte[] ResizeImage(byte[] imageData, float width, float height) { UIImage originalImage = ImageFromByteArray (imageData); float oldWidth = (float)originalImage.Size.Width; float oldHeight = (float)originalImage.Size.Height; float scaleFactor = 0f; if (oldWidth > oldHeight) { scaleFactor = width / oldWidth; } else { scaleFactor = height / oldHeight; } float newHeight = oldHeight * scaleFactor; float newWidth = oldWidth * scaleFactor; //create a 24bit RGB image using (CGBitmapContext context = new CGBitmapContext (null, (int)newWidth, (int)newHeight, 8, 0, CGColorSpace.CreateDeviceRGB (), CGImageAlphaInfo.PremultipliedFirst)) { RectangleF imageRect = new RectangleF (0, 0, newWidth, newHeight); // draw the image context.DrawImage (imageRect, originalImage.CGImage); UIKit.UIImage resizedImage = UIKit.UIImage.FromImage (context.ToImage ()); // save the image as a jpeg return resizedImage.AsJPEG ().ToArray (); } }
public override void Draw(CGRect rect) { base.Draw (rect); var screenScale = UIScreen.MainScreen.Scale; var width = (int)(Bounds.Width * screenScale); var height = (int)(Bounds.Height * screenScale); IntPtr buff = System.Runtime.InteropServices.Marshal.AllocCoTaskMem (width * height * 4); try { using (var surface = SKSurface.Create (width, height, SKColorType.N_32, SKAlphaType.Premul, buff, width * 4)) { var skcanvas = surface.Canvas; skcanvas.Scale ((float)screenScale, (float)screenScale); using (new SKAutoCanvasRestore (skcanvas, true)) { skiaView.SendDraw (skcanvas); } } using (var colorSpace = CGColorSpace.CreateDeviceRGB ()) using (var bContext = new CGBitmapContext (buff, width, height, 8, width * 4, colorSpace, (CGImageAlphaInfo)bitmapInfo)) using (var image = bContext.ToImage ()) using (var context = UIGraphics.GetCurrentContext ()) { // flip the image for CGContext.DrawImage context.TranslateCTM (0, Frame.Height); context.ScaleCTM (1, -1); context.DrawImage (Bounds, image); } } finally { if (buff != IntPtr.Zero) System.Runtime.InteropServices.Marshal.FreeCoTaskMem (buff); } }
public static unsafe DemoImage LoadImage(string filePathName, bool flipVertical) { var imageClass = UIImage.FromFile(filePathName); var cgImage = imageClass.CGImage; if(cgImage == null) { return null; } var image = new DemoImage(); image.Width = cgImage.Width; image.Height = cgImage.Height; image.RowByteSize = image.Width * 4; image.Data = new byte[cgImage.Height * image.RowByteSize]; image.Format = PixelInternalFormat.Rgba; image.Type = PixelType.UnsignedByte; fixed (byte *ptr = &image.Data [0]){ using(var context = new CGBitmapContext((IntPtr) ptr, image.Width, image.Height, 8, image.RowByteSize, cgImage.ColorSpace, CGImageAlphaInfo.NoneSkipLast)) { context.SetBlendMode(CGBlendMode.Copy); if(flipVertical) { context.TranslateCTM(0.0f, (float)image.Height); context.ScaleCTM(1.0f, -1.0f); } context.DrawImage(new RectangleF(0f, 0f, image.Width, image.Height), cgImage); } } return image; }
public override void ViewDidLoad () { base.ViewDidLoad (); // no data IntPtr data = IntPtr.Zero; // size SizeF bitmapSize = new SizeF (200, 300); //View.Frame.Size; // 32bit RGB (8bits * 4components (aRGB) = 32bit) int bitsPerComponent = 8; // 4bytes for each pixel (32 bits = 4bytes) int bytesPerRow = (int)(4 * bitmapSize.Width); // no special color space CGColorSpace colorSpace = CGColorSpace.CreateDeviceRGB (); // aRGB CGImageAlphaInfo alphaType = CGImageAlphaInfo.PremultipliedFirst; using (CGBitmapContext context = new CGBitmapContext (data , (int)bitmapSize.Width, (int)bitmapSize.Height, bitsPerComponent , bytesPerRow, colorSpace, alphaType)) { // draw whatever here. } }
private void CalculateLuminance(UIImage d) { var imageRef = d.CGImage; var width = imageRef.Width; var height = imageRef.Height; var colorSpace = CGColorSpace.CreateDeviceRGB(); var rawData = Marshal.AllocHGlobal(height * width * 4); try { var flags = CGBitmapFlags.PremultipliedFirst | CGBitmapFlags.ByteOrder32Little; var context = new CGBitmapContext(rawData, width, height, 8, 4 * width, colorSpace, (CGImageAlphaInfo)flags); context.DrawImage(new RectangleF(0.0f, 0.0f, (float)width, (float)height), imageRef); var pixelData = new byte[height * width * 4]; Marshal.Copy(rawData, pixelData, 0, pixelData.Length); CalculateLuminance(pixelData, BitmapFormat.BGRA32); } finally { Marshal.FreeHGlobal(rawData); } }
/// <summary> /// Gets a single image frame from sample buffer. /// </summary> /// <returns>The image from sample buffer.</returns> /// <param name="sampleBuffer">Sample buffer.</param> private UIImage GetImageFromSampleBuffer(CMSampleBuffer sampleBuffer) { // Get a pixel buffer from the sample buffer using (var pixelBuffer = sampleBuffer.GetImageBuffer () as CVPixelBuffer) { // Lock the base address pixelBuffer.Lock ((CVPixelBufferLock)0); // Prepare to decode buffer var flags = CGBitmapFlags.PremultipliedFirst | CGBitmapFlags.ByteOrder32Little; // Decode buffer - Create a new colorspace using (var cs = CGColorSpace.CreateDeviceRGB ()) { // Create new context from buffer using (var context = new CGBitmapContext (pixelBuffer.BaseAddress, pixelBuffer.Width, pixelBuffer.Height, 8, pixelBuffer.BytesPerRow, cs, (CGImageAlphaInfo)flags)) { // Get the image from the context using (var cgImage = context.ToImage ()) { // Unlock and return image pixelBuffer.Unlock ((CVPixelBufferLock)0); return UIImage.FromImage (cgImage); } } } } }
static UIImage ProcessImageNamed (string imageName) { var image = UIImage.FromBundle (imageName); if (image == null) return null; if (imageName.Contains (".jpg")) return image; UIImage resultingImage; using (var colorSpace = CGColorSpace.CreateDeviceRGB ()) { // Create a bitmap context of the same size as the image. var imageWidth = (int)image.Size.Width; var imageHeight = (int)image.Size.Height; using (var bitmapContext = new CGBitmapContext (null, imageWidth, imageHeight, 8, imageHeight * 4, colorSpace, CGBitmapFlags.PremultipliedLast | CGBitmapFlags.ByteOrder32Little)) { // Draw the image into the graphics context. if (image.CGImage == null) throw new Exception ("Unable to get a CGImage from a UIImage."); bitmapContext.DrawImage (new CGRect (CGPoint.Empty, image.Size), image.CGImage); using (var newImageRef = bitmapContext.ToImage ()) { resultingImage = new UIImage (newImageRef); } } } return resultingImage; }
void GetImagaDataFromPath (string path) { NSImage src; CGImage image; CGContext context = null; src = new NSImage (path); var rect = CGRect.Empty; image = src.AsCGImage (ref rect, null, null); width =(int)image.Width; height = (int) image.Height; data = new byte[width * height * 4]; CGImageAlphaInfo ai = CGImageAlphaInfo.PremultipliedLast; context = new CGBitmapContext (data, width, height, 8, 4 * width, image.ColorSpace, ai); // Core Graphics referential is upside-down compared to OpenGL referential // Flip the Core Graphics context here // An alternative is to use flipped OpenGL texture coordinates when drawing textures context.TranslateCTM (0, height); context.ScaleCTM (1, -1); // Set the blend mode to copy before drawing since the previous contents of memory aren't used. // This avoids unnecessary blending. context.SetBlendMode (CGBlendMode.Copy); context.DrawImage (new CGRect (0, 0, width, height), image); }
public override void ViewDidLoad () { base.ViewDidLoad (); // set the background color of the view to white View.BackgroundColor = UIColor.White; // instantiate a new image view that takes up the whole screen and add it to // the view hierarchy RectangleF imageViewFrame = new RectangleF (0, -NavigationController.NavigationBar.Frame.Height, View.Frame.Width, View.Frame.Height); imageView = new UIImageView (imageViewFrame); View.AddSubview (imageView); // create our offscreen bitmap context // size SizeF bitmapSize = new SizeF (View.Frame.Size); using (CGBitmapContext context = new CGBitmapContext (IntPtr.Zero , (int)bitmapSize.Width, (int)bitmapSize.Height, 8 , (int)(4 * bitmapSize.Width), CGColorSpace.CreateDeviceRGB () , CGImageAlphaInfo.PremultipliedFirst)) { // declare vars UIImage apressImage = UIImage.FromFile ("Images/Icons/512_icon.png"); PointF imageOrigin = new PointF ((imageView.Frame.Width / 2) - (apressImage.CGImage.Width / 2), (imageView.Frame.Height / 2) - (apressImage.CGImage.Height / 2)); RectangleF imageRect = new RectangleF (imageOrigin.X, imageOrigin.Y, apressImage.CGImage.Width, apressImage.CGImage.Height); // draw the image context.DrawImage (imageRect, apressImage.CGImage); // output the drawing to the view imageView.Image = UIImage.FromImage (context.ToImage ()); } }
// Draws our animation path on the background image, just to show it protected void DrawPathAsBackground () { // create our offscreen bitmap context var bitmapSize = new SizeF (View.Frame.Size); using (var context = new CGBitmapContext ( IntPtr.Zero, (int)bitmapSize.Width, (int)bitmapSize.Height, 8, (int)(4 * bitmapSize.Width), CGColorSpace.CreateDeviceRGB (), CGImageAlphaInfo.PremultipliedFirst)) { // convert to View space var affineTransform = CGAffineTransform.MakeIdentity (); // invert the y axis affineTransform.Scale (1f, -1f); // move the y axis up affineTransform.Translate (0, View.Frame.Height); context.ConcatCTM (affineTransform); // actually draw the path context.AddPath (animationPath); context.SetStrokeColor (UIColor.LightGray.CGColor); context.SetLineWidth (3f); context.StrokePath (); // set what we've drawn as the backgound image backgroundImage.Image = UIImage.FromImage (context.ToImage()); } }
public static UIImage AddImageReflection(UIImage image, float reflectionFraction) { int reflectionHeight = (int) (image.Size.Height * reflectionFraction); // Create a 2 bit CGImage containing a gradient that will be used for masking the // main view content to create the 'fade' of the reflection. The CGImageCreateWithMask // function will stretch the bitmap image as required, so we can create a 1 pixel wide gradient // gradient is always black and white and the mask must be in the gray colorspace var colorSpace = CGColorSpace.CreateDeviceGray (); // Creat the bitmap context var gradientBitmapContext = new CGBitmapContext (IntPtr.Zero, 1, reflectionHeight, 8, 0, colorSpace, CGImageAlphaInfo.None); // define the start and end grayscale values (with the alpha, even though // our bitmap context doesn't support alpha the gradien requires it) float [] colors = { 0, 1, 1, 1 }; // Create the CGGradient and then release the gray color space var grayScaleGradient = new CGGradient (colorSpace, colors, null); colorSpace.Dispose (); // create the start and end points for the gradient vector (straight down) var gradientStartPoint = new PointF (0, reflectionHeight); var gradientEndPoint = PointF.Empty; // draw the gradient into the gray bitmap context gradientBitmapContext.DrawLinearGradient (grayScaleGradient, gradientStartPoint, gradientEndPoint, CGGradientDrawingOptions.DrawsAfterEndLocation); grayScaleGradient.Dispose (); // Add a black fill with 50% opactiy gradientBitmapContext.SetGrayFillColor (0, 0.5f); gradientBitmapContext.FillRect (new RectangleF (0, 0, 1, reflectionHeight)); // conver the context into a CGImage and release the context var gradientImageMask = gradientBitmapContext.ToImage (); gradientBitmapContext.Dispose (); // create an image by masking the bitmap of the mainView content with the gradient view // then release the pre-masked content bitmap and the gradient bitmap var reflectionImage = image.CGImage.WithMask (gradientImageMask); gradientImageMask.Dispose (); var size = new SizeF (image.Size.Width, image.Size.Height + reflectionHeight); UIGraphics.BeginImageContext (size); image.Draw (PointF.Empty); var context = UIGraphics.GetCurrentContext (); context.DrawImage (new RectangleF (0, image.Size.Height, image.Size.Width, reflectionHeight), reflectionImage); var result = UIGraphics.GetImageFromCurrentImageContext (); UIGraphics.EndImageContext (); reflectionImage.Dispose (); return result; }
UIImage ConvertToGrayScale(UIImage image) { RectangleF imageRect = new RectangleF (0, 0, (float)image.Size.Width, (float)image.Size.Height); using (var colorSpace = CGColorSpace.CreateDeviceGray ()) using (var context = new CGBitmapContext (IntPtr.Zero, (int) imageRect.Width, (int) imageRect.Height, 8, 0, colorSpace, CGImageAlphaInfo.None)) { context.DrawImage (imageRect, image.CGImage); using (var imageRef = context.ToImage ()) return new UIImage (imageRef); } }
public static UIImage ConvertToGrayScale (UIImage image) { var imageRect = new CGRect (CGPoint.Empty, image.Size); using (var colorSpace = CGColorSpace.CreateDeviceGray ()) using (var context = new CGBitmapContext (IntPtr.Zero, (int) imageRect.Width, (int) imageRect.Height, 8, 0, colorSpace, CGImageAlphaInfo.None)) { context.DrawImage (imageRect, image.CGImage); using (var imageRef = context.ToImage ()) return new UIImage (imageRef); } }
public void Dispose() { if (null != _context) { _context.Dispose(); _context = null; Marshal.FreeHGlobal(_buffer); } }
static UIImage MakeEmpty () { using (var cs = CGColorSpace.CreateDeviceRGB ()){ using (var bit = new CGBitmapContext (IntPtr.Zero, dimx, dimy, 8, 0, cs, CGImageAlphaInfo.PremultipliedFirst)){ bit.SetStrokeColor (1, 0, 0, 0.5f); bit.FillRect (new RectangleF (0, 0, dimx, dimy)); return UIImage.FromImage (bit.ToImage ()); } } }
public IImageCanvas CreateImageCanvas(Size size, double scale = 1.0, bool transparency = true) { var pixelWidth = (int)Math.Ceiling (size.Width * scale); var pixelHeight = (int)Math.Ceiling (size.Height * scale); var bitmapInfo = transparency ? CGImageAlphaInfo.PremultipliedFirst : CGImageAlphaInfo.NoneSkipFirst; var bitsPerComp = 8; var bytesPerRow = transparency ? 4 * pixelWidth : 4 * pixelWidth; var colorSpace = CGColorSpace.CreateDeviceRGB (); var bitmap = new CGBitmapContext (IntPtr.Zero, pixelWidth, pixelHeight, bitsPerComp, bytesPerRow, colorSpace, bitmapInfo); return new CGBitmapContextCanvas (bitmap, scale); }
private static CGImage RgbaByteMatToCGImage(Mat bgraByte) { using (CGColorSpace cspace = CGColorSpace.CreateDeviceRGB()) using (CGBitmapContext context = new CGBitmapContext( bgraByte.DataPointer, bgraByte.Width, bgraByte.Height, 8, bgraByte.Width*4, cspace, CGImageAlphaInfo.PremultipliedLast)) return context.ToImage(); }
private void PrepareImage (Size size) { int bitsPerComponent = 8; int width = size.Width; int paddedWidth = (width + 15); int bytesPerPixel = 4; int bytesPerRow = paddedWidth * bytesPerPixel; using (var colorSpace = CGColorSpace.CreateDeviceRGB ()) { renderContext = new CGBitmapContext (null, size.Width, size.Height, bitsPerComponent, bytesPerRow, colorSpace, CGImageAlphaInfo.PremultipliedFirst); } }
public static void EndImageContext() { // Return to previous context if (PreviousContext != null) { NSGraphicsContext.CurrentContext = PreviousContext; } // Release memory Context = null; PreviousContext = null; ColorSpace = null; ImageSize = CGSize.Empty; }
public static UIImage ToColorSpace(UIImage source, CGColorSpace colorSpace) { CGRect bounds = new CGRect(0, 0, source.Size.Width, source.Size.Height); using (var context = new CGBitmapContext(IntPtr.Zero, (int)bounds.Width, (int)bounds.Height, 8, 0, colorSpace, CGImageAlphaInfo.None)) { context.DrawImage(bounds, source.CGImage); using (var imageRef = context.ToImage()) { return new UIImage(imageRef); } } }
public void GetData <T>(T[] data) { // TODO Causese AV on Device, but not simulator GetData<T>(0, null, data, 0, Width * Height); if (data == null) { throw new ArgumentException("data cannot be null"); } int sz = 0; byte[] pixel = new byte[4]; int pos; IntPtr pixelOffset; // Get the Color values if ((typeof(T) == typeof(Color))) { // Load up texture into memory UIImage uiImage = UIImage.FromBundle(this.Name); if (uiImage == null) { throw new ContentLoadException("Error loading file via UIImage: " + Name); } CGImage image = uiImage.CGImage; if (image == null) { throw new ContentLoadException("Error with CGIamge: " + Name); } int width, height, i; CGContext context = null; IntPtr imageData; CGColorSpace colorSpace; IntPtr tempData; bool hasAlpha; CGImageAlphaInfo info; CGAffineTransform transform; Size imageSize; SurfaceFormat pixelFormat; bool sizeToFit = false; info = image.AlphaInfo; hasAlpha = ((info == CGImageAlphaInfo.PremultipliedLast) || (info == CGImageAlphaInfo.PremultipliedFirst) || (info == CGImageAlphaInfo.Last) || (info == CGImageAlphaInfo.First) ? true : false); if (image.ColorSpace != null) { if (hasAlpha) { pixelFormat = SurfaceFormat.Rgba32; } else { pixelFormat = SurfaceFormat.Rgb32; } } else { pixelFormat = SurfaceFormat.Alpha8; } imageSize = new Size(image.Width, image.Height); transform = CGAffineTransform.MakeIdentity(); width = imageSize.Width; if ((width != 1) && ((width & (width - 1)) != 0)) { i = 1; while ((sizeToFit ? 2 * i : i) < width) { i *= 2; } width = i; } height = imageSize.Height; if ((height != 1) && ((height & (height - 1)) != 0)) { i = 1; while ((sizeToFit ? 2 * i : i) < height) { i *= 2; } height = i; } // TODO: kMaxTextureSize = 1024 while ((width > 1024) || (height > 1024)) { width /= 2; height /= 2; transform = CGAffineTransform.MakeScale(0.5f, 0.5f); imageSize.Width /= 2; imageSize.Height /= 2; } switch (pixelFormat) { case SurfaceFormat.Rgba32: colorSpace = CGColorSpace.CreateDeviceRGB(); imageData = Marshal.AllocHGlobal(height * width * 4); context = new CGBitmapContext(imageData, width, height, 8, 4 * width, colorSpace, CGImageAlphaInfo.PremultipliedLast); colorSpace.Dispose(); break; case SurfaceFormat.Rgb32: colorSpace = CGColorSpace.CreateDeviceRGB(); imageData = Marshal.AllocHGlobal(height * width * 4); context = new CGBitmapContext(imageData, width, height, 8, 4 * width, colorSpace, CGImageAlphaInfo.NoneSkipLast); colorSpace.Dispose(); break; case SurfaceFormat.Alpha8: imageData = Marshal.AllocHGlobal(height * width); context = new CGBitmapContext(imageData, width, height, 8, width, null, CGImageAlphaInfo.Only); break; default: throw new NotSupportedException("Invalid pixel format"); } context.ClearRect(new RectangleF(0, 0, width, height)); context.TranslateCTM(0, height - imageSize.Height); if (!transform.IsIdentity) { context.ConcatCTM(transform); } context.DrawImage(new RectangleF(0, 0, image.Width, image.Height), image); //Convert "RRRRRRRRRGGGGGGGGBBBBBBBBAAAAAAAA" to "RRRRRGGGGGGBBBBB" if (pixelFormat == SurfaceFormat.Rgb32) { tempData = Marshal.AllocHGlobal(height * width * 2); int d32; short d16; int inPixel32Count = 0, outPixel16Count = 0; for (i = 0; i < width * height; ++i, inPixel32Count += sizeof(int)) { d32 = Marshal.ReadInt32(imageData, inPixel32Count); short R = (short)((((d32 >> 0) & 0xFF) >> 3) << 11); short G = (short)((((d32 >> 8) & 0xFF) >> 2) << 5); short B = (short)((((d32 >> 16) & 0xFF) >> 3) << 0); d16 = (short)(R | G | B); Marshal.WriteInt16(tempData, outPixel16Count, d16); outPixel16Count += sizeof(short); } Marshal.FreeHGlobal(imageData); imageData = tempData; } // Loop through and extract the data for (int y = 0; y < imageSize.Height; y++) { for (int x = 0; x < imageSize.Width; x++) { var result = new Color(0, 0, 0, 0); switch (pixelFormat) { case SurfaceFormat.Rgba32: //kTexture2DPixelFormat_RGBA8888 case SurfaceFormat.Dxt3: sz = 4; pos = ((y * imageSize.Width) + x) * sz; pixelOffset = new IntPtr(imageData.ToInt64() + pos); Marshal.Copy(pixelOffset, pixel, 0, 4); result.R = pixel[0]; result.G = pixel[1]; result.B = pixel[2]; result.A = pixel[3]; break; case SurfaceFormat.Bgra4444: //kTexture2DPixelFormat_RGBA4444 sz = 2; pos = ((y * imageSize.Width) + x) * sz; pixelOffset = new IntPtr(imageData.ToInt64() + pos); Marshal.Copy(pixelOffset, pixel, 0, 4); result.R = pixel[0]; result.G = pixel[1]; result.B = pixel[2]; result.A = pixel[3]; break; case SurfaceFormat.Bgra5551: //kTexture2DPixelFormat_RGB5A1 sz = 2; pos = ((y * imageSize.Width) + x) * sz; pixelOffset = new IntPtr(imageData.ToInt64() + pos); Marshal.Copy(pixelOffset, pixel, 0, 4); result.R = pixel[0]; result.G = pixel[1]; result.B = pixel[2]; result.A = pixel[3]; break; case SurfaceFormat.Rgb32: // kTexture2DPixelFormat_RGB565 sz = 2; pos = ((y * imageSize.Width) + x) * sz; pixelOffset = new IntPtr(imageData.ToInt64() + pos); Marshal.Copy(pixelOffset, pixel, 0, 4); result.R = pixel[0]; result.G = pixel[1]; result.B = pixel[2]; result.A = 255; break; case SurfaceFormat.Alpha8: // kTexture2DPixelFormat_A8 sz = 1; pos = ((y * imageSize.Width) + x) * sz; pixelOffset = new IntPtr(imageData.ToInt64() + pos); Marshal.Copy(pixelOffset, pixel, 0, 4); result.A = pixel[0]; break; default: throw new NotSupportedException("Texture format"); } data[((y * imageSize.Width) + x)] = (T)(object)result; } } context.Dispose(); Marshal.FreeHGlobal(imageData); } }
/// <summary> /// Read a NSImage, covert the data and save it to the native pointer /// </summary> /// <typeparam name="T">The type of the data to covert the image pixel values to. e.g. "float" or "byte"</typeparam> /// <param name="image">The input image</param> /// <param name="dest">The native pointer where the image pixels values will be saved to.</param> /// <param name="inputHeight">The height of the image, must match the height requirement for the tensor</param> /// <param name="inputWidth">The width of the image, must match the width requirement for the tensor</param> /// <param name="inputMean">The mean value, it will be subtracted from the input image pixel values</param> /// <param name="scale">The scale, after mean is subtracted, the scale will be used to multiply the pixel values</param> /// <param name="flipUpSideDown">If true, the image needs to be flipped up side down</param> /// <param name="swapBR">If true, will flip the Blue channel with the Red. e.g. If false, the tensor's color channel order will be RGB. If true, the tensor's color channle order will be BGR </param> /// <returns>The number of bytes written.</returns> public static int ReadImageToTensor <T>( NSImage image, IntPtr dest, int inputHeight = -1, int inputWidth = -1, float inputMean = 0.0f, float scale = 1.0f, bool flipUpSideDown = false, bool swapBR = false) where T : struct { if (inputHeight <= 0) { inputHeight = (int)image.Size.Height; } if (inputWidth <= 0) { inputWidth = (int)image.Size.Width; } int[] intValues = new int[inputWidth * inputHeight]; System.Runtime.InteropServices.GCHandle handle = System.Runtime.InteropServices.GCHandle.Alloc(intValues, System.Runtime.InteropServices.GCHandleType.Pinned); using (CGImage cgimage = image.CGImage) using (CGColorSpace cspace = CGColorSpace.CreateDeviceRGB()) using (CGBitmapContext context = new CGBitmapContext( handle.AddrOfPinnedObject(), inputWidth, inputHeight, 8, inputWidth * 4, cspace, CGImageAlphaInfo.PremultipliedLast )) { context.DrawImage(new CGRect(new CGPoint(), new CGSize(inputWidth, inputHeight)), cgimage); } int bytesWritten = 0; if (typeof(T) == typeof(float)) { bytesWritten = Emgu.TF.Util.Toolbox.Pixel32ToPixelFloat( handle.AddrOfPinnedObject(), inputWidth, inputHeight, inputMean, scale, flipUpSideDown, swapBR, dest); } else if (typeof(T) == typeof(byte)) { bytesWritten = Emgu.TF.Util.Toolbox.Pixel32ToPixelByte( handle.AddrOfPinnedObject(), inputWidth, inputHeight, inputMean, scale, flipUpSideDown, swapBR, dest); } else { throw new NotImplementedException(String.Format("Destination data type {0} is not supported.", typeof(T).ToString())); } handle.Free(); return(bytesWritten); }
public static byte[] PixelToJpeg(byte[] rawPixel, int width, int height, int channels) { #if __ANDROID__ if (channels != 4) { throw new NotImplementedException("Only 4 channel pixel input is supported."); } using (Bitmap bitmap = Bitmap.CreateBitmap(width, height, Bitmap.Config.Argb8888)) using (MemoryStream ms = new MemoryStream()) { IntPtr ptr = bitmap.LockPixels(); //GCHandle handle = GCHandle.Alloc(colors, GCHandleType.Pinned); Marshal.Copy(rawPixel, 0, ptr, rawPixel.Length); bitmap.UnlockPixels(); bitmap.Compress(Bitmap.CompressFormat.Jpeg, 90, ms); return(ms.ToArray()); } #elif __IOS__ if (channels != 3) { throw new NotImplementedException("Only 3 channel pixel input is supported."); } System.Drawing.Size sz = new System.Drawing.Size(width, height); GCHandle handle = GCHandle.Alloc(rawPixel, GCHandleType.Pinned); using (CGColorSpace cspace = CGColorSpace.CreateDeviceRGB()) using (CGBitmapContext context = new CGBitmapContext( handle.AddrOfPinnedObject(), sz.Width, sz.Height, 8, sz.Width * 3, cspace, CGImageAlphaInfo.PremultipliedLast)) using (CGImage cgImage = context.ToImage()) using (UIImage newImg = new UIImage(cgImage)) { handle.Free(); var jpegData = newImg.AsJPEG(); byte[] raw = new byte[jpegData.Length]; System.Runtime.InteropServices.Marshal.Copy(jpegData.Bytes, raw, 0, (int)jpegData.Length); return(raw); } #elif __UNIFIED__ //OSX if (channels != 4) { throw new NotImplementedException("Only 4 channel pixel input is supported."); } System.Drawing.Size sz = new System.Drawing.Size(width, height); using (CGColorSpace cspace = CGColorSpace.CreateDeviceRGB()) using (CGBitmapContext context = new CGBitmapContext( rawPixel, sz.Width, sz.Height, 8, sz.Width * 4, cspace, CGBitmapFlags.PremultipliedLast | CGBitmapFlags.ByteOrder32Big)) using (CGImage cgImage = context.ToImage()) using (NSBitmapImageRep newImg = new NSBitmapImageRep(cgImage)) { var jpegData = newImg.RepresentationUsingTypeProperties(NSBitmapImageFileType.Jpeg); byte[] raw = new byte[jpegData.Length]; System.Runtime.InteropServices.Marshal.Copy(jpegData.Bytes, raw, 0, (int)jpegData.Length); return(raw); } #else throw new NotImplementedException("Not Implemented"); #endif }
public override void DidOutputSampleBuffer(AVCaptureOutput captureOutput, CMSampleBuffer sampleBuffer, AVCaptureConnection connection) { Console.WriteLine("Got Sample Fromn Buffer"); lock (FaceDetectionViewController.lockerobj) { if (!FaceDetectionViewController.processingFaceDetection || isProcessingBuffer) { sampleBuffer.Dispose(); return; } isProcessingBuffer = true; } try { CIImage ciImage = null; CGRect cleanAperture = default(CGRect); using (sampleBuffer) { //CVPixelBuffer renderedOutputPixelBuffer = null; byte[] managedArray; int width; int height; int bytesPerRow; using (var pixelBuffer = sampleBuffer.GetImageBuffer() as CVPixelBuffer) { pixelBuffer.Lock(CVPixelBufferLock.None); CVPixelFormatType ft = pixelBuffer.PixelFormatType; IntPtr baseAddress = pixelBuffer.BaseAddress; bytesPerRow = (int)pixelBuffer.BytesPerRow; width = (int)pixelBuffer.Width; height = (int)pixelBuffer.Height; //managedArray = new byte[width * height]; managedArray = new byte[pixelBuffer.Height * pixelBuffer.BytesPerRow]; Marshal.Copy(baseAddress, managedArray, 0, managedArray.Length); pixelBuffer.Unlock(CVPixelBufferLock.None); } sampleBuffer.Dispose(); //int bytesPerPixel = 4; //int bytesPerRow = bytesPerPixel * width; int bitsPerComponent = 8; //CGColorSpace colorSpace = CGColorSpace.CreateDeviceRGB(); //CGContext context = new CGBitmapContext(managedArray, width, height, //bitsPerComponent, bytesPerRow, colorSpace, //CGBitmapFlags.PremultipliedLast | CGBitmapFlags.ByteOrder32Big); var flags = CGBitmapFlags.PremultipliedFirst | CGBitmapFlags.ByteOrder32Little; // Create a CGImage on the RGB colorspace from the configured parameter above using (var cs = CGColorSpace.CreateDeviceRGB()) { using (var context = new CGBitmapContext(managedArray, width, height, bitsPerComponent, bytesPerRow, cs, (CGImageAlphaInfo)flags)) { ciImage = context.ToImage(); //using (CGImage cgImage = context.ToImage()) { // //pixelBuffer.Unlock(CVPixelBufferLock.None); // //return UIImage.FromImage(cgImage); //} context.Dispose(); } } //var a = new CMSampleBuffer.; //using () { //} //UIImage image = GetImageFromSampleBuffer(sampleBuffer); //if (!FaceMainController.isFaceRegistered || isProcessing) //{ // // Console.WriteLine("OutputDelegate - Exit (isProcessing: " + DateTime.Now); // sampleBuffer.Dispose(); // Console.WriteLine("processing.."); // return; //} //Console.WriteLine("IsProcessing: "); //isProcessing = true; connection.VideoOrientation = AVCaptureVideoOrientation.Portrait; connection.VideoScaleAndCropFactor = 1.0f; //var bufferCopy = sampleBuffer.c //UIImage image = GetImageFromSampleBuffer(sampleBuffer); //ciImage = CIImage.FromCGImage(image.CGImage); //cleanAperture = sampleBuffer.GetVideoFormatDescription().GetCleanAperture(false); } /*For Face Detection using iOS APIs*/ //DispatchQueue.MainQueue.DispatchAsync(() => using (ciImage) { if (ciImage != null) { drawFacesCallback(UIImage.FromImage(ciImage), cleanAperture); } } isProcessingBuffer = false; //Console.WriteLine(ciImage); //Task.Run(async () => { // try { // //if (ViewController.IsFaceDetected) // //{ // Console.WriteLine("face detected: "); // imageAnalyzer = new ImageAnalyzer(() => Task.FromResult<Stream>(image.ResizeImageWithAspectRatio(300, 400).AsPNG().AsStream())); // await ProcessCameraCapture(imageAnalyzer); // //} // } // finally { // imageAnalyzer = null; // isProcessing = false; // Console.WriteLine("OUT "); // } //}); } catch (Exception ex) { Console.Write(ex); } finally { sampleBuffer.Dispose(); } }
/// <summary> /// Read an UIImage, covert the data and save it to the native pointer /// </summary> /// <typeparam name="T">The type of the data to covert the image pixel values to. e.g. "float" or "byte"</typeparam> /// <param name="imageOriginal">The input image</param> /// <param name="dest">The native pointer where the image pixels values will be saved to.</param> /// <param name="inputHeight">The height of the image, must match the height requirement for the tensor</param> /// <param name="inputWidth">The width of the image, must match the width requirement for the tensor</param> /// <param name="inputMean">The mean value, it will be subtracted from the input image pixel values</param> /// <param name="scale">The scale, after mean is subtracted, the scale will be used to multiply the pixel values</param> /// <param name="flipUpSideDown">If true, the image needs to be flipped up side down</param> /// <param name="swapBR">If true, will flip the Blue channel with the Red. e.g. If false, the tensor's color channel order will be RGB. If true, the tensor's color channle order will be BGR </param> public static void ReadImageToTensor <T>( UIImage imageOriginal, IntPtr dest, int inputHeight = -1, int inputWidth = -1, float inputMean = 0.0f, float scale = 1.0f, bool flipUpSideDown = false, bool swapBR = false) where T : struct { if (flipUpSideDown) { throw new NotImplementedException("Flip Up Side Down is Not implemented"); } UIImage image; if (inputHeight > 0 || inputWidth > 0) { image = imageOriginal.Scale(new CGSize(inputWidth, inputHeight)); //image.Dispose(); //image = resized; } else { image = imageOriginal; } try { int[] intValues = new int[(int)(image.Size.Width * image.Size.Height)]; float[] floatValues = new float[(int)(image.Size.Width * image.Size.Height * 3)]; System.Runtime.InteropServices.GCHandle handle = System.Runtime.InteropServices.GCHandle.Alloc(intValues, System.Runtime.InteropServices.GCHandleType.Pinned); using (CGImage cgimage = image.CGImage) using (CGColorSpace cspace = CGColorSpace.CreateDeviceRGB()) using (CGBitmapContext context = new CGBitmapContext( handle.AddrOfPinnedObject(), (nint)image.Size.Width, (nint)image.Size.Height, 8, (nint)image.Size.Width * 4, cspace, CGImageAlphaInfo.PremultipliedLast )) { context.DrawImage(new CGRect(new CGPoint(), image.Size), cgimage); } handle.Free(); if (swapBR) { for (int i = 0; i < intValues.Length; ++i) { int val = intValues[i]; floatValues[i * 3 + 0] = ((val & 0xFF) - inputMean) * scale; floatValues[i * 3 + 1] = (((val >> 8) & 0xFF) - inputMean) * scale; floatValues[i * 3 + 2] = (((val >> 16) & 0xFF) - inputMean) * scale; } } else { for (int i = 0; i < intValues.Length; ++i) { int val = intValues[i]; floatValues[i * 3 + 0] = (((val >> 16) & 0xFF) - inputMean) * scale; floatValues[i * 3 + 1] = (((val >> 8) & 0xFF) - inputMean) * scale; floatValues[i * 3 + 2] = ((val & 0xFF) - inputMean) * scale; } } if (typeof(T) == typeof(float)) { Marshal.Copy(floatValues, 0, dest, floatValues.Length); } else if (typeof(T) == typeof(byte)) { //copy float to bytes byte[] byteValues = new byte[floatValues.Length]; for (int i = 0; i < floatValues.Length; i++) { byteValues[i] = (byte)floatValues[i]; } Marshal.Copy(byteValues, 0, dest, byteValues.Length); } else { throw new NotImplementedException(String.Format("Destination data type {0} is not supported.", typeof(T).ToString())); } } finally { if (image != imageOriginal) { image.Dispose(); } } }
public static XIR.Image RemoteRepresentation(this CGBitmapContext context) { return(RemoteRepresentation(context.ToImage())); }
public /*interface DevicePainter*/ void Draw(KDeviceHandler.KDevice device, int canvasX, int canvasY, int canvasWidth, int canvasHeight) { (float margin, float padRadius, float deviceWidth, float deviceHeight) = device.FittedDimensions(canvasWidth, canvasHeight); float strokeWidth = padRadius / 10.0f; float accentStrokeWidth = 1.5f * strokeWidth; float textStrokeWidth = accentStrokeWidth / 2.0f; float deviceX = canvasX + (canvasWidth - deviceWidth) / 2.0f; float deviceY = canvasY + (canvasHeight - deviceHeight) / 2.0f; // don't lock device here, it will dedlock if (device.sizeChanged || cachedBackground == null || cachedBackground.Width != canvasWidth || cachedBackground.Height != canvasHeight) { cachedBackground = CG.Bitmap(canvasWidth, canvasHeight); DrawDevice(device, cachedBackground, canvasX, canvasY, canvasWidth, canvasHeight, deviceX, deviceY, deviceWidth, deviceHeight, padRadius, margin, device.pinchPan); device.sizeChanged = false; } if (!device.sizeChanged) { // copy cachedBackground to canvas DOH! canvas.AsBitmapContext().DrawImage(new CGRect(0, 0, cachedBackground.Width, cachedBackground.Height), cachedBackground.ToImage()); // do not apply pinchPan: background bitmap is alread scaled by it } if (device.displayPinchOrigin) { // same as: GraphSharp.GraphLayout.CanvasDrawCircle(canvas, pinchOrigin, 20, false, SKColors.LightGray); // same as: using (var paint = FillPaint(SKColors.LightGray)) { painter.DrawCircle(pinchOrigin, 20, paint); } //using (var paint = new SKPaint()) { // paint.TextSize = 10; paint.IsAntialias = true; paint.Color = SKColors.LightGray; paint.IsStroke = false; // canvas.DrawCircle(device.pinchOrigin.X, device.pinchOrigin.Y, 20, paint); //} } using (var dropletFillPaint = new SKPaint { Style = SKPaintStyle.Fill, Color = device.dropletColor, IsAntialias = true }) using (var dropletBiggieFillPaint = new SKPaint { Style = SKPaintStyle.Fill, Color = device.dropletBiggieColor, IsAntialias = true }) { KDeviceHandler.Place[,] places = device.places; KDeviceHandler.Placement placement = device.placement; for (int row = 0; row < places.GetLength(0); row++) { for (int col = 0; col < places.GetLength(1); col++) { KDeviceHandler.Place place = places[row, col]; if (place != null && placement.IsOccupied(place)) { SampleValue sample = placement.SampleOf(place); float volumeRadius = padRadius * (float)Math.Sqrt((sample.Volume()) * 1000000.0); // normal radius = 1μL float diameter = 2 * padRadius; SKPaint fillPaint = dropletFillPaint; bool biggie = false; if (volumeRadius > 2 * padRadius) { biggie = true; volumeRadius = 2 * padRadius; fillPaint = dropletBiggieFillPaint; } SKPoint here = new SKPoint(deviceX + margin + padRadius + col * diameter, deviceY + margin + padRadius + row * diameter); SKPoint rht = new SKPoint(deviceX + margin + padRadius + (col + 1) * diameter, deviceY + margin + padRadius + row * diameter); SKPoint lft = new SKPoint(deviceX + margin + padRadius + (col - 1) * diameter, deviceY + margin + padRadius + row * diameter); SKPoint bot = new SKPoint(deviceX + margin + padRadius + col * diameter, deviceY + margin + padRadius + (row + 1) * diameter); SKPoint top = new SKPoint(deviceX + margin + padRadius + col * diameter, deviceY + margin + padRadius + (row - 1) * diameter); string label = sample.symbol.Raw(); // sample.FormatSymbol(placement.StyleOf(sample, style)) if (place.IsAnimation(KDeviceHandler.Animation.None)) { DrawDroplet(canvas, label, biggie, here, padRadius, volumeRadius, textStrokeWidth, fillPaint, strokeWidth, accentStrokeWidth, device.pinchPan); } if (place.IsAnimation(KDeviceHandler.Animation.SizeHalf)) { DrawDroplet(canvas, label, biggie, here, padRadius, volumeRadius / 2, textStrokeWidth, fillPaint, strokeWidth, accentStrokeWidth, device.pinchPan); } if (place.IsAnimation(KDeviceHandler.Animation.SizeQuarter)) { DrawDroplet(canvas, label, biggie, here, padRadius, volumeRadius / 4, textStrokeWidth, fillPaint, strokeWidth, accentStrokeWidth, device.pinchPan); } if (place.IsAnimation(KDeviceHandler.Animation.PullRht)) { DrawDropletPulledHor(canvas, label, biggie, here, rht, KDeviceHandler.Direction.Rht, padRadius, volumeRadius * 5 / 6, volumeRadius * 5 / 12, volumeRadius * 1 / 3, textStrokeWidth, fillPaint, strokeWidth, accentStrokeWidth, device.pinchPan); } if (place.IsAnimation(KDeviceHandler.Animation.SplitRht)) { DrawDropletPulledHor(canvas, label, biggie, here, rht, KDeviceHandler.Direction.Rht, padRadius, volumeRadius * 2 / 3, volumeRadius * 1 / 3, volumeRadius * 2 / 3, textStrokeWidth, fillPaint, strokeWidth, accentStrokeWidth, device.pinchPan); } if (place.IsAnimation(KDeviceHandler.Animation.PullLft)) { DrawDropletPulledHor(canvas, label, biggie, lft, here, KDeviceHandler.Direction.Lft, padRadius, volumeRadius * 1 / 3, volumeRadius * 5 / 12, volumeRadius * 5 / 6, textStrokeWidth, fillPaint, strokeWidth, accentStrokeWidth, device.pinchPan); } if (place.IsAnimation(KDeviceHandler.Animation.SplitLft)) { DrawDropletPulledHor(canvas, label, biggie, lft, here, KDeviceHandler.Direction.Lft, padRadius, volumeRadius * 2 / 3, volumeRadius * 1 / 3, volumeRadius * 2 / 3, textStrokeWidth, fillPaint, strokeWidth, accentStrokeWidth, device.pinchPan); } if (place.IsAnimation(KDeviceHandler.Animation.PullBot)) { DrawDropletPulledVer(canvas, label, biggie, here, bot, KDeviceHandler.Direction.Bot, padRadius, volumeRadius * 5 / 6, volumeRadius * 5 / 12, volumeRadius * 1 / 3, textStrokeWidth, fillPaint, strokeWidth, accentStrokeWidth, device.pinchPan); } if (place.IsAnimation(KDeviceHandler.Animation.SplitBot)) { DrawDropletPulledVer(canvas, label, biggie, here, bot, KDeviceHandler.Direction.Bot, padRadius, volumeRadius * 2 / 3, volumeRadius * 1 / 3, volumeRadius * 2 / 3, textStrokeWidth, fillPaint, strokeWidth, accentStrokeWidth, device.pinchPan); } if (place.IsAnimation(KDeviceHandler.Animation.PullTop)) { DrawDropletPulledVer(canvas, label, biggie, top, here, KDeviceHandler.Direction.Top, padRadius, volumeRadius * 1 / 3, volumeRadius * 5 / 12, volumeRadius * 5 / 6, textStrokeWidth, fillPaint, strokeWidth, accentStrokeWidth, device.pinchPan); } if (place.IsAnimation(KDeviceHandler.Animation.SplitTop)) { DrawDropletPulledVer(canvas, label, biggie, top, here, KDeviceHandler.Direction.Top, padRadius, volumeRadius * 2 / 3, volumeRadius * 1 / 3, volumeRadius * 2 / 3, textStrokeWidth, fillPaint, strokeWidth, accentStrokeWidth, device.pinchPan); } } } } } canvas.Flush(); }
public static UIImage FixOrientation(UIImage originalImage) { if (originalImage.Orientation == UIImageOrientation.Up) { return(originalImage); } CGAffineTransform transform = CGAffineTransform.MakeIdentity(); switch (originalImage.Orientation) { case UIImageOrientation.Down: case UIImageOrientation.DownMirrored: transform.Rotate((float)Math.PI); transform.Translate(originalImage.Size.Width, originalImage.Size.Height); break; case UIImageOrientation.Left: case UIImageOrientation.LeftMirrored: transform.Rotate((float)Math.PI / 2); transform.Translate(originalImage.Size.Width, 0); break; case UIImageOrientation.Right: case UIImageOrientation.RightMirrored: transform.Rotate(-(float)Math.PI / 2); transform.Translate(0, originalImage.Size.Height); break; case UIImageOrientation.Up: case UIImageOrientation.UpMirrored: break; } switch (originalImage.Orientation) { case UIImageOrientation.UpMirrored: case UIImageOrientation.DownMirrored: transform.Translate(originalImage.Size.Width, 0); transform.Scale(-1, 1); break; case UIImageOrientation.LeftMirrored: case UIImageOrientation.RightMirrored: transform.Translate(originalImage.Size.Height, 0); transform.Scale(-1, 1); break; case UIImageOrientation.Up: case UIImageOrientation.Down: case UIImageOrientation.Left: case UIImageOrientation.Right: break; } var ctx = new CGBitmapContext(IntPtr.Zero, (nint)originalImage.Size.Width, (nint)originalImage.Size.Height, originalImage.CGImage.BitsPerComponent, originalImage.CGImage.BytesPerRow, originalImage.CGImage.ColorSpace, originalImage.CGImage.BitmapInfo); ctx.ConcatCTM(transform); switch (originalImage.Orientation) { case UIImageOrientation.Left: case UIImageOrientation.LeftMirrored: case UIImageOrientation.Right: case UIImageOrientation.RightMirrored: ctx.DrawImage(new CGRect(0, 0, originalImage.Size.Height, originalImage.Size.Width), originalImage.CGImage); break; default: ctx.DrawImage(new CGRect(0, 0, originalImage.Size.Width, originalImage.Size.Height), originalImage.CGImage); break; } var cgImage = ctx.ToImage(); UIImage result = UIImage.FromImage(cgImage); ctx.Dispose(); cgImage.Dispose(); return(result); }
static CGImage applyMosaic(int tileSize, List <Color> colorPalette, UIImage resizedImg, Bitmap bitmap) { // - Parameters int width = tileSize; // tile width int height = tileSize; // tile height int outWidth = (int)(resizedImg.Size.Width - (resizedImg.Size.Width % width)); // Round image size int outHeight = (int)(resizedImg.Size.Height - (resizedImg.Size.Height % height)); // -- Initialize buffer CGBitmapContext context = new CGBitmapContext(System.IntPtr.Zero, // data (int)outWidth, // width (int)outHeight, // height 8, // bitsPerComponent outWidth * 4, // bytesPerRow based on pixel width CGColorSpace.CreateDeviceRGB(), // colorSpace CGImageAlphaInfo.NoneSkipFirst); // bitmapInfo for (int yb = 0; yb < outHeight / height; yb++) { for (int xb = 0; xb < outWidth / width; xb++) { // -- Do the average colors on the source image for the // corresponding mosaic square int r_avg = 0; int g_avg = 0; int b_avg = 0; for (int y = yb * height; y < (yb * height) + height; y++) { for (int x = xb * width; x < (xb * width) + width; x++) { Color c = bitmap.GetPixel(x, y); // Retrieve color values of the source image r_avg += c.R; g_avg += c.G; b_avg += c.B; } } // Make average of R,G and B on filter size r_avg = r_avg / (width * height); g_avg = g_avg / (width * height); b_avg = b_avg / (width * height); // Find the nearest color in the palette Color mosaicColor = new Color(); double minDistance = int.MaxValue; foreach (Color c in colorPalette) { double distance = Math.Abs(Math.Pow(r_avg - c.R, 2) + Math.Pow(g_avg - c.G, 2) + Math.Pow(b_avg - c.B, 2)); if (distance < minDistance) { mosaicColor = c; minDistance = distance; } } // Apply mosaic for (int y = 0; y < height; y++) { for (int x = 0; x < width; x++) { context.SetFillColor(new CGColor(mosaicColor.R / 255f, mosaicColor.G / 255f, mosaicColor.B / 255f)); context.FillRect(new RectangleF(xb * width, yb * height, width, height)); } } } } //-- image from buffer CGImage flippedImage = context.ToImage(); context.Dispose(); return(flippedImage); }
public override object ConvertToBitmap(ImageDescription idesc, double scaleFactor, ImageFormat format) { double width = idesc.Size.Width; double height = idesc.Size.Height; int pixelWidth = (int)(width * scaleFactor); int pixelHeight = (int)(height * scaleFactor); if (idesc.Backend is CustomImage) { var flags = CGBitmapFlags.ByteOrderDefault; int bytesPerRow; switch (format) { case ImageFormat.ARGB32: bytesPerRow = pixelWidth * 4; flags |= CGBitmapFlags.PremultipliedFirst; break; case ImageFormat.RGB24: bytesPerRow = pixelWidth * 3; flags |= CGBitmapFlags.None; break; default: throw new NotImplementedException("ImageFormat: " + format.ToString()); } var bmp = new CGBitmapContext(IntPtr.Zero, pixelWidth, pixelHeight, 8, bytesPerRow, Util.DeviceRGBColorSpace, flags); bmp.TranslateCTM(0, pixelHeight); bmp.ScaleCTM((float)scaleFactor, (float)-scaleFactor); var ctx = new CGContextBackend { Context = bmp, Size = new CGSize((nfloat)width, (nfloat)height), InverseViewTransform = bmp.GetCTM().Invert(), ScaleFactor = scaleFactor }; var ci = (CustomImage)idesc.Backend; ci.DrawInContext(ctx, idesc); using (var img = new NSImage(((CGBitmapContext)bmp).ToImage(), new CGSize(pixelWidth, pixelHeight))) using (var imageData = img.AsTiff()) { var imageRep = (NSBitmapImageRep)NSBitmapImageRep.ImageRepFromData(imageData); var im = new NSImage(); im.AddRepresentation(imageRep); im.Size = new CGSize((nfloat)width, (nfloat)height); bmp.Dispose(); return(im); } } else { NSImage img = (NSImage)idesc.Backend; NSBitmapImageRep bitmap = img.Representations().OfType <NSBitmapImageRep> ().FirstOrDefault(); if (bitmap == null) { using (var imageData = img.AsTiff()) { var imageRep = (NSBitmapImageRep)NSBitmapImageRep.ImageRepFromData(imageData); var im = new NSImage(); im.AddRepresentation(imageRep); im.Size = new CGSize((nfloat)width, (nfloat)height); return(im); } } return(idesc.Backend); } }
public static NSImage ToTransformedCorners(NSImage source, double topLeftCornerSize, double topRightCornerSize, double bottomLeftCornerSize, double bottomRightCornerSize, CornerTransformType cornersTransformType, double cropWidthRatio, double cropHeightRatio) { double sourceWidth = source.CGImage.Width; double sourceHeight = source.CGImage.Height; double desiredWidth = sourceWidth; double desiredHeight = sourceHeight; double desiredRatio = cropWidthRatio / cropHeightRatio; double currentRatio = sourceWidth / sourceHeight; if (currentRatio > desiredRatio) { desiredWidth = (cropWidthRatio * sourceHeight / cropHeightRatio); } else if (currentRatio < desiredRatio) { desiredHeight = (cropHeightRatio * sourceWidth / cropWidthRatio); } topLeftCornerSize = topLeftCornerSize * (desiredWidth + desiredHeight) / 2 / 100; topRightCornerSize = topRightCornerSize * (desiredWidth + desiredHeight) / 2 / 100; bottomLeftCornerSize = bottomLeftCornerSize * (desiredWidth + desiredHeight) / 2 / 100; bottomRightCornerSize = bottomRightCornerSize * (desiredWidth + desiredHeight) / 2 / 100; float cropX = (float)((sourceWidth - desiredWidth) / 2); float cropY = (float)((sourceHeight - desiredHeight) / 2); var colorSpace = CGColorSpace.CreateDeviceRGB(); const int bytesPerPixel = 4; int width = (int)desiredWidth; int height = (int)desiredHeight; var bytes = new byte[width * height * bytesPerPixel]; int bytesPerRow = bytesPerPixel * width; const int bitsPerComponent = 8; using (var context = new CGBitmapContext(bytes, width, height, bitsPerComponent, bytesPerRow, colorSpace, CGBitmapFlags.PremultipliedLast | CGBitmapFlags.ByteOrder32Big)) { context.BeginPath(); using (var path = new NSBezierPath()) { // TopLeft if (cornersTransformType.HasFlag(CornerTransformType.TopLeftCut)) { path.MoveTo(new CGPoint(0, topLeftCornerSize)); path.LineTo(new CGPoint(topLeftCornerSize, 0)); } else if (cornersTransformType.HasFlag(CornerTransformType.TopLeftRounded)) { path.MoveTo(new CGPoint(0, topLeftCornerSize)); path.QuadCurveToPoint(new CGPoint(topLeftCornerSize, 0), new CGPoint(0, 0)); } else { path.MoveTo(new CGPoint(0, 0)); } // TopRight if (cornersTransformType.HasFlag(CornerTransformType.TopRightCut)) { path.LineTo(new CGPoint(desiredWidth - topRightCornerSize, 0)); path.LineTo(new CGPoint(desiredWidth, topRightCornerSize)); } else if (cornersTransformType.HasFlag(CornerTransformType.TopRightRounded)) { path.LineTo(new CGPoint(desiredWidth - topRightCornerSize, 0)); path.QuadCurveToPoint(new CGPoint(desiredWidth, topRightCornerSize), new CGPoint(desiredWidth, 0)); } else { path.LineTo(new CGPoint(desiredWidth, 0)); } // BottomRight if (cornersTransformType.HasFlag(CornerTransformType.BottomRightCut)) { path.LineTo(new CGPoint(desiredWidth, desiredHeight - bottomRightCornerSize)); path.LineTo(new CGPoint(desiredWidth - bottomRightCornerSize, desiredHeight)); } else if (cornersTransformType.HasFlag(CornerTransformType.BottomRightRounded)) { path.LineTo(new CGPoint(desiredWidth, desiredHeight - bottomRightCornerSize)); path.QuadCurveToPoint(new CGPoint(desiredWidth - bottomRightCornerSize, desiredHeight), new CGPoint(desiredWidth, desiredHeight)); } else { path.LineTo(new CGPoint(desiredWidth, desiredHeight)); } // BottomLeft if (cornersTransformType.HasFlag(CornerTransformType.BottomLeftCut)) { path.LineTo(new CGPoint(bottomLeftCornerSize, desiredHeight)); path.LineTo(new CGPoint(0, desiredHeight - bottomLeftCornerSize)); } else if (cornersTransformType.HasFlag(CornerTransformType.BottomLeftRounded)) { path.LineTo(new CGPoint(bottomLeftCornerSize, desiredHeight)); path.QuadCurveToPoint(new CGPoint(0, desiredHeight - bottomLeftCornerSize), new CGPoint(0, desiredHeight)); } else { path.LineTo(new CGPoint(0, desiredHeight)); } path.ClosePath(); context.AddPath(path.ToCGPath()); context.Clip(); } var drawRect = new CGRect(-cropX, -cropY, sourceWidth, sourceHeight); context.DrawImage(drawRect, source.CGImage); using (var output = context.ToImage()) { return(new NSImage(output, CGSize.Empty)); } } }
public static Stream RotateImage(UIImage image, int compressionQuality) { UIImage imageToReturn = null; if (image.Orientation == UIImageOrientation.Up) { imageToReturn = image; } else { var transform = CGAffineTransform.MakeIdentity(); switch (image.Orientation) { case UIImageOrientation.Down: case UIImageOrientation.DownMirrored: transform.Rotate((float)Math.PI); transform.Translate(image.Size.Width, image.Size.Height); break; case UIImageOrientation.Left: case UIImageOrientation.LeftMirrored: transform.Rotate((float)Math.PI / 2); transform.Translate(image.Size.Width, 0); break; case UIImageOrientation.Right: case UIImageOrientation.RightMirrored: transform.Rotate(-(float)Math.PI / 2); transform.Translate(0, image.Size.Height); break; case UIImageOrientation.Up: case UIImageOrientation.UpMirrored: break; } switch (image.Orientation) { case UIImageOrientation.UpMirrored: case UIImageOrientation.DownMirrored: transform.Translate(image.Size.Width, 0); transform.Scale(-1, 1); break; case UIImageOrientation.LeftMirrored: case UIImageOrientation.RightMirrored: transform.Translate(image.Size.Height, 0); transform.Scale(-1, 1); break; case UIImageOrientation.Up: case UIImageOrientation.Down: case UIImageOrientation.Left: case UIImageOrientation.Right: break; } using (var context = new CGBitmapContext(IntPtr.Zero, (int)image.Size.Width, (int)image.Size.Height, image.CGImage.BitsPerComponent, image.CGImage.BytesPerRow, image.CGImage.ColorSpace, image.CGImage.BitmapInfo)) { context.ConcatCTM(transform); switch (image.Orientation) { case UIImageOrientation.Left: case UIImageOrientation.LeftMirrored: case UIImageOrientation.Right: case UIImageOrientation.RightMirrored: context.DrawImage(new RectangleF(PointF.Empty, new SizeF((float)image.Size.Height, (float)image.Size.Width)), image.CGImage); break; default: context.DrawImage(new RectangleF(PointF.Empty, new SizeF((float)image.Size.Width, (float)image.Size.Height)), image.CGImage); break; } using (var imageRef = context.ToImage()) { imageToReturn = new UIImage(imageRef, 1, UIImageOrientation.Up); } } } var finalQuality = compressionQuality / 100f; var imageData = imageToReturn.AsJPEG(finalQuality); //continue to move down quality , rare instances while (imageData == null && finalQuality > 0) { finalQuality -= 0.05f; imageData = imageToReturn.AsJPEG(finalQuality); } if (imageData == null) { throw new NullReferenceException("Unable to convert image to jpeg, please ensure file exists or lower quality level"); } var stream = new MemoryStream(); imageData.AsStream().CopyTo(stream); stream.Position = 0; imageData.Dispose(); return(stream); }
public Size GetCharDataDC(char c, out UInt32[] cdata, Color fill, Color border) { NSString str = new NSString(c.ToString()); var size = str.StringSize(font); size.Width += 4; size.Height += 4; UIGraphics.BeginImageContextWithOptions(size, false, 1.0f); UIGraphics.GetCurrentContext().SetFillColor((float)border.R / 255.0f, (float)border.G / 255.0f, (float)border.B / 255.0f, (float)border.A / 255.0f); str.DrawString(new System.Drawing.PointF(0, 0), font); str.DrawString(new System.Drawing.PointF(0, 1), font); str.DrawString(new System.Drawing.PointF(0, 2), font); str.DrawString(new System.Drawing.PointF(0, 3), font); str.DrawString(new System.Drawing.PointF(0, 4), font); str.DrawString(new System.Drawing.PointF(1, 0), font); str.DrawString(new System.Drawing.PointF(1, 1), font); str.DrawString(new System.Drawing.PointF(1, 2), font); str.DrawString(new System.Drawing.PointF(1, 3), font); str.DrawString(new System.Drawing.PointF(1, 4), font); str.DrawString(new System.Drawing.PointF(3, 0), font); str.DrawString(new System.Drawing.PointF(3, 1), font); str.DrawString(new System.Drawing.PointF(3, 2), font); str.DrawString(new System.Drawing.PointF(3, 3), font); str.DrawString(new System.Drawing.PointF(3, 4), font); str.DrawString(new System.Drawing.PointF(4, 0), font); str.DrawString(new System.Drawing.PointF(4, 1), font); str.DrawString(new System.Drawing.PointF(4, 2), font); str.DrawString(new System.Drawing.PointF(4, 3), font); str.DrawString(new System.Drawing.PointF(4, 4), font); str.DrawString(new System.Drawing.PointF(2, 0), font); str.DrawString(new System.Drawing.PointF(2, 1), font); //str.DrawString(new System.Drawing.PointF(2,2),font); str.DrawString(new System.Drawing.PointF(2, 3), font); str.DrawString(new System.Drawing.PointF(2, 4), font); UIGraphics.GetCurrentContext().SetFillColor((float)fill.R / 255.0f, (float)fill.G / 255.0f, (float)fill.B / 255.0f, (float)fill.A / 255.0f); str.DrawString(new System.Drawing.PointF(2, 2), font); UIImage img = UIGraphics.GetImageFromCurrentImageContext(); UIGraphics.EndImageContext(); // int width = (int)size.Width; int height = (int)size.Height; cdata = new UInt32[width * height]; var gdata = new byte[width * height * 4]; var colorSpace = CGColorSpace.CreateDeviceRGB(); var gbmp = new CGBitmapContext(gdata, width, height, 8, width * 4, colorSpace, CGBitmapFlags.PremultipliedLast); //gbmp.ClearRect(new RectangleF(0,0,width,height)); gbmp.DrawImage(new System.Drawing.RectangleF(0, 0, width, height), img.CGImage); gbmp.Dispose(); colorSpace.Dispose(); unsafe { fixed(byte *srcb = gdata) fixed(UInt32 * dest = cdata) { UInt32 *src = (UInt32 *)srcb; for (int y = 0; y < height; y++) { for (int x = 0; x < width; x++) { dest[y * width + x] = src[y * width + x]; } } } } return(new Size(width, height)); }
internal static void NativeDrawString(CGBitmapContext bitmapContext, string s, CTFont font, CCColor4B brush, RectangleF layoutRectangle) { if (font == null) { throw new ArgumentNullException("font"); } if (s == null || s.Length == 0) { return; } bitmapContext.ConcatCTM(bitmapContext.GetCTM().Invert()); // This is not needed here since the color is set in the attributed string. //bitmapContext.SetFillColor(brush.R/255f, brush.G/255f, brush.B/255f, brush.A/255f); // I think we only Fill the text with no Stroke surrounding //bitmapContext.SetTextDrawingMode(CGTextDrawingMode.Fill); var attributedString = buildAttributedString(s, font, brush); // Work out the geometry RectangleF insetBounds = layoutRectangle; PointF textPosition = new PointF(insetBounds.X, insetBounds.Y); float boundsWidth = insetBounds.Width; // Calculate the lines int start = 0; int length = attributedString.Length; var typesetter = new CTTypesetter(attributedString); float baselineOffset = 0; // First we need to calculate the offset for Vertical Alignment if we // are using anything but Top if (vertical != CCVerticalTextAlignment.Top) { while (start < length) { int count = typesetter.SuggestLineBreak(start, boundsWidth); var line = typesetter.GetLine(new NSRange(start, count)); // Create and initialize some values from the bounds. float ascent; float descent; float leading; line.GetTypographicBounds(out ascent, out descent, out leading); baselineOffset += (float)Math.Ceiling(ascent + descent + leading + 1); // +1 matches best to CTFramesetter's behavior line.Dispose(); start += count; } } start = 0; while (start < length && textPosition.Y < insetBounds.Bottom) { // Now we ask the typesetter to break off a line for us. // This also will take into account line feeds embedded in the text. // Example: "This is text \n with a line feed embedded inside it" int count = typesetter.SuggestLineBreak(start, boundsWidth); var line = typesetter.GetLine(new NSRange(start, count)); // Create and initialize some values from the bounds. float ascent; float descent; float leading; line.GetTypographicBounds(out ascent, out descent, out leading); // Calculate the string format if need be var penFlushness = 0.0f; if (horizontal == CCTextAlignment.Right) { penFlushness = (float)line.GetPenOffsetForFlush(1.0f, boundsWidth); } else if (horizontal == CCTextAlignment.Center) { penFlushness = (float)line.GetPenOffsetForFlush(0.5f, boundsWidth); } // initialize our Text Matrix or we could get trash in here var textMatrix = CGAffineTransform.MakeIdentity(); if (vertical == CCVerticalTextAlignment.Top) { textMatrix.Translate(penFlushness, insetBounds.Height - textPosition.Y - (float)Math.Floor(ascent - 1)); } if (vertical == CCVerticalTextAlignment.Center) { textMatrix.Translate(penFlushness, ((insetBounds.Height / 2) + (baselineOffset / 2)) - textPosition.Y - (float)Math.Floor(ascent - 1)); } if (vertical == CCVerticalTextAlignment.Bottom) { textMatrix.Translate(penFlushness, baselineOffset - textPosition.Y - (float)Math.Floor(ascent - 1)); } // Set our matrix bitmapContext.TextMatrix = textMatrix; // and draw the line line.Draw(bitmapContext); // Move the index beyond the line break. start += count; textPosition.Y += (float)Math.Ceiling(ascent + descent + leading + 1); // +1 matches best to CTFramesetter's behavior line.Dispose(); } }
public static Tensor ReadTensorFromImageFile(String fileName, int inputHeight = -1, int inputWidth = -1, float inputMean = 0.0f, float scale = 1.0f, Status status = null) { #if __ANDROID__ Android.Graphics.Bitmap bmp = BitmapFactory.DecodeFile(fileName); if (inputHeight > 0 || inputWidth > 0) { Bitmap resized = Bitmap.CreateScaledBitmap(bmp, inputWidth, inputHeight, false); bmp.Dispose(); bmp = resized; } int[] intValues = new int[bmp.Width * bmp.Height]; float[] floatValues = new float[bmp.Width * bmp.Height * 3]; bmp.GetPixels(intValues, 0, bmp.Width, 0, 0, bmp.Width, bmp.Height); for (int i = 0; i < intValues.Length; ++i) { int val = intValues[i]; floatValues[i * 3 + 0] = (((val >> 16) & 0xFF) - inputMean) * scale; floatValues[i * 3 + 1] = (((val >> 8) & 0xFF) - inputMean) * scale; floatValues[i * 3 + 2] = ((val & 0xFF) - inputMean) * scale; } Tensor t = new Tensor(DataType.Float, new int[] { 1, bmp.Height, bmp.Width, 3 }); System.Runtime.InteropServices.Marshal.Copy(floatValues, 0, t.DataPointer, floatValues.Length); return(t); #elif __IOS__ UIImage image = new UIImage(fileName); if (inputHeight > 0 || inputWidth > 0) { UIImage resized = image.Scale(new CGSize(inputWidth, inputHeight)); image.Dispose(); image = resized; } int[] intValues = new int[(int)(image.Size.Width * image.Size.Height)]; float[] floatValues = new float[(int)(image.Size.Width * image.Size.Height * 3)]; System.Runtime.InteropServices.GCHandle handle = System.Runtime.InteropServices.GCHandle.Alloc(intValues, System.Runtime.InteropServices.GCHandleType.Pinned); using (CGImage cgimage = image.CGImage) using (CGColorSpace cspace = CGColorSpace.CreateDeviceRGB()) using (CGBitmapContext context = new CGBitmapContext( handle.AddrOfPinnedObject(), (nint)image.Size.Width, (nint)image.Size.Height, 8, (nint)image.Size.Width * 4, cspace, CGImageAlphaInfo.PremultipliedLast )) { context.DrawImage(new CGRect(new CGPoint(), image.Size), cgimage); } handle.Free(); for (int i = 0; i < intValues.Length; ++i) { int val = intValues[i]; floatValues[i * 3 + 0] = (((val >> 16) & 0xFF) - inputMean) * scale; floatValues[i * 3 + 1] = (((val >> 8) & 0xFF) - inputMean) * scale; floatValues[i * 3 + 2] = ((val & 0xFF) - inputMean) * scale; } Tensor t = new Tensor(DataType.Float, new int[] { 1, (int)image.Size.Height, (int)image.Size.Width, 3 }); System.Runtime.InteropServices.Marshal.Copy(floatValues, 0, t.DataPointer, floatValues.Length); return(t); #else if (Emgu.TF.Util.Platform.OperationSystem == OS.Windows) { Tensor t = new Tensor(DataType.Float, new int[] { 1, (int)inputHeight, (int)inputWidth, 3 }); NativeImageIO.ReadImageFileToTensor <float>(fileName, t.DataPointer, inputHeight, inputWidth, inputMean, scale); return(t); } else { //Mac OS or Linux using (StatusChecker checker = new StatusChecker(status)) { var graph = new Graph(); Operation input = graph.Placeholder(DataType.String); Operation jpegDecoder = graph.DecodeJpeg(input, 3); //dimension 3 Operation floatCaster = graph.Cast(jpegDecoder, DstT: DataType.Float); //cast to float Tensor axis = new Tensor(0); Operation axisOp = graph.Const(axis, axis.Type, opName: "axis"); Operation dimsExpander = graph.ExpandDims(floatCaster, axisOp); //turn it to dimension [1,3] Operation resized; bool resizeRequired = (inputHeight > 0) && (inputWidth > 0); if (resizeRequired) { Tensor size = new Tensor(new int[] { inputHeight, inputWidth }); // new size; Operation sizeOp = graph.Const(size, size.Type, opName: "size"); resized = graph.ResizeBilinear(dimsExpander, sizeOp); //resize image } else { resized = dimsExpander; } Tensor mean = new Tensor(inputMean); Operation meanOp = graph.Const(mean, mean.Type, opName: "mean"); Operation substracted = graph.Sub(resized, meanOp); Tensor scaleTensor = new Tensor(scale); Operation scaleOp = graph.Const(scaleTensor, scaleTensor.Type, opName: "scale"); Operation scaled = graph.Mul(substracted, scaleOp); Session session = new Session(graph); Tensor imageTensor = Tensor.FromString(File.ReadAllBytes(fileName), status); Tensor[] imageResults = session.Run(new Output[] { input }, new Tensor[] { imageTensor }, new Output[] { scaled }); return(imageResults[0]); } } #endif }
public static Tensor ReadTensorFromImageFile <T>( String fileName, int inputHeight = -1, int inputWidth = -1, float inputMean = 0.0f, float scale = 1.0f, bool flipUpSideDown = false, bool swapBR = false) where T : struct { #if __ANDROID__ return(NativeReadTensorFromImageFile <T>(fileName, inputHeight, inputWidth, inputMean, scale, flipUpSideDown, swapBR)); #elif __IOS__ UIImage image = new UIImage(fileName); if (inputHeight > 0 || inputWidth > 0) { UIImage resized = image.Scale(new CGSize(inputWidth, inputHeight)); image.Dispose(); image = resized; } int[] intValues = new int[(int)(image.Size.Width * image.Size.Height)]; float[] floatValues = new float[(int)(image.Size.Width * image.Size.Height * 3)]; System.Runtime.InteropServices.GCHandle handle = System.Runtime.InteropServices.GCHandle.Alloc(intValues, System.Runtime.InteropServices.GCHandleType.Pinned); using (CGImage cgimage = image.CGImage) using (CGColorSpace cspace = CGColorSpace.CreateDeviceRGB()) using (CGBitmapContext context = new CGBitmapContext( handle.AddrOfPinnedObject(), (nint)image.Size.Width, (nint)image.Size.Height, 8, (nint)image.Size.Width * 4, cspace, CGImageAlphaInfo.PremultipliedLast )) { context.DrawImage(new CGRect(new CGPoint(), image.Size), cgimage); } handle.Free(); for (int i = 0; i < intValues.Length; ++i) { int val = intValues[i]; floatValues[i * 3 + 0] = (((val >> 16) & 0xFF) - inputMean) * scale; floatValues[i * 3 + 1] = (((val >> 8) & 0xFF) - inputMean) * scale; floatValues[i * 3 + 2] = ((val & 0xFF) - inputMean) * scale; } Tensor t = new Tensor(DataType.Float, new int[] { 1, (int)image.Size.Height, (int)image.Size.Width, 3 }); System.Runtime.InteropServices.Marshal.Copy(floatValues, 0, t.DataPointer, floatValues.Length); return(t); #else FileInfo fi = new FileInfo(fileName); String extension = fi.Extension.ToLower(); //Use tensorflow to decode the following image formats if ((typeof(T) == typeof(float)) && (extension.Equals(".jpeg") || extension.Equals(".jpg") || extension.Equals(".png") || extension.Equals(".gif"))) { using (Graph graph = new Graph()) { Operation input = graph.Placeholder(DataType.String); //output dimension [height, width, 3] where 3 is the number of channels //DecodeJpeg can decode JPEG, PNG and GIF Operation jpegDecoder = graph.DecodeJpeg(input, 3); Operation floatCaster = graph.Cast(jpegDecoder, DstT: DataType.Float); //cast to float Tensor zeroConst = new Tensor(0); Operation zeroConstOp = graph.Const(zeroConst, zeroConst.Type, opName: "zeroConstOp"); Operation dimsExpander = graph.ExpandDims(floatCaster, zeroConstOp); //turn it to dimension [1, height, width, 3] Operation resized; bool resizeRequired = (inputHeight > 0) && (inputWidth > 0); if (resizeRequired) { Tensor size = new Tensor(new int[] { inputHeight, inputWidth }); // new size; Operation sizeOp = graph.Const(size, size.Type, opName: "size"); resized = graph.ResizeBilinear(dimsExpander, sizeOp); //resize image } else { resized = dimsExpander; } Tensor mean = new Tensor(inputMean); Operation meanOp = graph.Const(mean, mean.Type, opName: "mean"); Operation subtracted = graph.Sub(resized, meanOp); Tensor scaleTensor = new Tensor(scale); Operation scaleOp = graph.Const(scaleTensor, scaleTensor.Type, opName: "scale"); Operation scaled = graph.Mul(subtracted, scaleOp); Operation swapedBR; if (swapBR) { Tensor threeConst = new Tensor(new int[] { 3 }); Operation threeConstOp = graph.Const(threeConst, threeConst.Type, "threeConstOp"); swapedBR = graph.ReverseV2(scaled, threeConstOp, "swapBR"); } else { swapedBR = scaled; } Operation flipped; if (flipUpSideDown) { Tensor oneConst = new Tensor(new int[] { 1 }); Operation oneConstOp = graph.Const(oneConst, oneConst.Type, "oneConstOp"); flipped = graph.ReverseV2(swapedBR, oneConstOp, "flipUpSideDownOp"); } else { flipped = swapedBR; } using (Session session = new Session(graph)) { Tensor imageTensor = Tensor.FromString(File.ReadAllBytes(fileName)); Tensor[] imageResults = session.Run(new Output[] { input }, new Tensor[] { imageTensor }, new Output[] { flipped }); return(imageResults[0]); } } } else { return(NativeReadTensorFromImageFile <T>(fileName, inputHeight, inputWidth, inputMean, scale, flipUpSideDown, swapBR)); } #endif }
public static XIR.Image RemoteRepresentation(this CGLineCap obj) { var aPath = new CGPath(); var lineWidth = 10; var sampleWidth = 50; aPath.MoveToPoint(new CGPoint(lineWidth, lineWidth)); aPath.AddLineToPoint(new CGPoint(lineWidth + sampleWidth, lineWidth)); // let's make sure we leave a little room for the line width drawing as well by adding the lineWidth as well var width = (int)aPath.PathBoundingBox.Right + lineWidth; var height = (int)aPath.PathBoundingBox.Bottom + lineWidth; var bytesPerRow = width * 4; using (var context = new CGBitmapContext( IntPtr.Zero, width, height, 8, bytesPerRow, CGColorSpace.CreateDeviceRGB(), CGImageAlphaInfo.PremultipliedFirst)) { context.SaveState(); context.SetStrokeColor(new CGColor(0, 0, 0)); context.SetLineWidth(lineWidth); context.AddPath(aPath); switch ((CGLineCap)obj) { case CGLineCap.Square: context.SetLineCap(CGLineCap.Square); break; case CGLineCap.Butt: context.SetLineCap(CGLineCap.Butt); break; case CGLineCap.Round: context.SetLineCap(CGLineCap.Round); break; } context.DrawPath(CGPathDrawingMode.Stroke); context.RestoreState(); // Second, we draw the inset line to demonstrate the bounds aPath = new CGPath(); aPath.MoveToPoint(new CGPoint(lineWidth, lineWidth)); aPath.AddLineToPoint(new CGPoint(lineWidth + sampleWidth, lineWidth)); context.SetLineCap(CGLineCap.Butt); context.SetStrokeColor(NSColor.White.CGColor); context.SetLineWidth(1); context.SaveState(); context.AddPath(aPath); context.DrawPath(CGPathDrawingMode.Stroke); context.RestoreState(); // Third, we draw the inset line endings which are two circles var circleWidth = 2; aPath = new CGPath(); aPath.AddEllipseInRect(new CGRect(lineWidth - (int)(circleWidth / 2), lineWidth - (int)(circleWidth / 2), circleWidth, circleWidth)); aPath.AddEllipseInRect(new CGRect(lineWidth + sampleWidth - (int)(circleWidth / 2), lineWidth - (int)(circleWidth / 2), circleWidth, circleWidth)); context.SetLineWidth(circleWidth); context.SetStrokeColor(NSColor.White.CGColor); context.AddPath(aPath); context.DrawPath(CGPathDrawingMode.Stroke); return(RemoteRepresentation(context)); } }
private static UIImage ScaleImage(UIImage image, int maxSize) { UIImage res = image; CGImage imageRef = image.CGImage; CGImageAlphaInfo alphaInfo = imageRef.AlphaInfo; CGColorSpace colorSpaceInfo = CGColorSpace.CreateDeviceRGB(); if (alphaInfo == CGImageAlphaInfo.None) { alphaInfo = CGImageAlphaInfo.NoneSkipLast; } int width = imageRef.Width; int height = imageRef.Height; if (maxSize > 0 && maxSize < Math.Max(width, height)) { try { if (height >= width) { width = (int)Math.Floor(width * (maxSize / (double)height)); height = maxSize; } else { height = (int)Math.Floor(height * (maxSize / (double)width)); width = maxSize; } int bytesPerRow = (int)image.Size.Width * 4; var buffer = new byte[(int)(bytesPerRow * image.Size.Height)]; CGBitmapContext bitmap; if (image.Orientation == UIImageOrientation.Up || image.Orientation == UIImageOrientation.Down) { bitmap = new CGBitmapContext(buffer, width, height, imageRef.BitsPerComponent, imageRef.BytesPerRow, colorSpaceInfo, alphaInfo); } else { bitmap = new CGBitmapContext(buffer, height, width, imageRef.BitsPerComponent, imageRef.BytesPerRow, colorSpaceInfo, alphaInfo); } switch (image.Orientation) { case UIImageOrientation.Left: bitmap.RotateCTM((float)Math.PI / 2); bitmap.TranslateCTM(0, -height); break; case UIImageOrientation.Right: bitmap.RotateCTM(-((float)Math.PI / 2)); bitmap.TranslateCTM(-width, 0); break; case UIImageOrientation.Up: break; case UIImageOrientation.Down: bitmap.TranslateCTM(width, height); bitmap.RotateCTM(-(float)Math.PI); break; } bitmap.DrawImage(new RectangleF(0, 0, width, height), imageRef); res = UIImage.FromImage(bitmap.ToImage()); } finally { image.Dispose(); } } return(res); }
internal static void ToArray(this CGImage cgImage, IOutputArray mat, ImreadModes modes = ImreadModes.AnyColor) { Size sz = new Size((int)cgImage.Width, (int)cgImage.Height); using (Mat m = new Mat(sz, DepthType.Cv8U, 4)) { RectangleF rect = new RectangleF(PointF.Empty, new SizeF(cgImage.Width, cgImage.Height)); using (CGColorSpace cspace = CGColorSpace.CreateDeviceRGB()) using (CGBitmapContext context = new CGBitmapContext( m.DataPointer, sz.Width, sz.Height, 8, sz.Width * 4, cspace, CGImageAlphaInfo.PremultipliedLast)) context.DrawImage(rect, cgImage); if (modes == ImreadModes.Unchanged) { m.CopyTo(mat); } if (modes == ImreadModes.Grayscale) { CvInvoke.CvtColor(m, mat, ColorConversion.Rgba2Gray); } else if (modes == ImreadModes.AnyColor) { CvInvoke.CvtColor(m, mat, ColorConversion.Rgba2Bgra); } else if (modes == ImreadModes.Color) { CvInvoke.CvtColor(m, mat, ColorConversion.Rgba2Bgr); } else if (modes == ImreadModes.ReducedColor2) { using (Mat tmp = new Mat()) { CvInvoke.PyrDown(m, tmp); CvInvoke.CvtColor(tmp, mat, ColorConversion.Rgba2Bgr); } } else if (modes == ImreadModes.ReducedGrayscale2) { using (Mat tmp = new Mat()) { CvInvoke.PyrDown(m, tmp); CvInvoke.CvtColor(tmp, mat, ColorConversion.Rgba2Gray); } } else if (modes == ImreadModes.ReducedColor4 || modes == ImreadModes.ReducedColor8 || modes == ImreadModes.ReducedGrayscale4 || modes == ImreadModes.ReducedGrayscale8 || modes == ImreadModes.LoadGdal) { throw new NotImplementedException(String.Format("Conversion from PNG using mode {0} is not supported", modes)); } else { throw new Exception(String.Format("ImreadModes of {0} is not implemented.", modes.ToString())); //CvInvoke.CvtColor(m, mat, ColorConversion.Rgba2Bgr); } } }
/// <summary> /// Draws our coordinate grid /// </summary> protected void DrawCoordinateSpace(CGBitmapContext context) { //---- declare vars int remainder; int textHeight = 20; #region -= vertical ticks =- //---- create our vertical tick lines using (CGLayer verticalTickLayer = CGLayer.Create(context, new SizeF(20, 3))) { //---- draw a single tick verticalTickLayer.Context.FillRect(new RectangleF(0, 1, 20, 2)); //---- draw a vertical tick every 20 pixels float yPos = 20; int numberOfVerticalTicks = ((context.Height / 20) - 1); for (int i = 0; i < numberOfVerticalTicks; i++) { //---- draw the layer context.DrawLayer(verticalTickLayer, new PointF(0, yPos)); //---- starting at 40, draw the coordinate text nearly to the top if (yPos > 40 && i < (numberOfVerticalTicks - 2)) { //---- draw it every 80 points Math.DivRem((int)yPos, (int)80, out remainder); if (remainder == 0) { this.DrawTextAtPoint(context, 30, (yPos - (textHeight / 2)), yPos.ToString(), textHeight); } } //---- increment the position of the next tick yPos += 20; } } #endregion #region -= horizontal ticks =- //---- create our horizontal tick lines using (CGLayer horizontalTickLayer = CGLayer.Create(context, new SizeF(3, 20))) { horizontalTickLayer.Context.FillRect(new RectangleF(1, 0, 2, 20)); //---- draw a horizontal tick every 20 pixels float xPos = 20; int numberOfHorizontalTicks = ((context.Width / 20) - 1); for (int i = 0; i < numberOfHorizontalTicks; i++) { context.DrawLayer(horizontalTickLayer, new PointF(xPos, 0)); //---- starting at 100, draw the coordinate text nearly to the top if (xPos > 100 && i < (numberOfHorizontalTicks - 1)) { //---- draw it every 80 points Math.DivRem((int)xPos, (int)80, out remainder); if (remainder == 0) { this.DrawCenteredTextAtPoint(context, xPos, 30, xPos.ToString(), textHeight); } } //---- increment the position of the next tick xPos += 20; } } #endregion //---- draw our "origin" text DrawTextAtPoint(context, 20, (20 + (textHeight / 2)), "Origin (0,0)", textHeight); }
private void InitWithCGImage(CGImage image, All filter) { int width, height, i; CGContext context = null; IntPtr data; CGColorSpace colorSpace; IntPtr tempData; bool hasAlpha; CGImageAlphaInfo info; CGAffineTransform transform; Size imageSize; SurfaceFormat pixelFormat; bool sizeToFit = false; if (image == null) { throw new ArgumentException(" uimage is invalid! "); } info = image.AlphaInfo; hasAlpha = ((info == CGImageAlphaInfo.PremultipliedLast) || (info == CGImageAlphaInfo.PremultipliedFirst) || (info == CGImageAlphaInfo.Last) || (info == CGImageAlphaInfo.First) ? true : false); if (image.ColorSpace != null) { pixelFormat = SurfaceFormat.Color; } else { pixelFormat = SurfaceFormat.Alpha8; } imageSize = new Size(image.Width, image.Height); transform = CGAffineTransform.MakeIdentity(); width = imageSize.Width; if ((width != 1) && ((width & (width - 1)) != 0)) { i = 1; while ((sizeToFit ? 2 * i : i) < width) { i *= 2; } width = i; } height = imageSize.Height; if ((height != 1) && ((height & (height - 1)) != 0)) { i = 1; while ((sizeToFit ? 2 * i : i) < height) { i *= 2; } height = i; } // TODO: kMaxTextureSize = 1024 while ((width > 1024) || (height > 1024)) { width /= 2; height /= 2; transform = CGAffineTransform.MakeScale(0.5f, 0.5f); imageSize.Width /= 2; imageSize.Height /= 2; } switch (pixelFormat) { case SurfaceFormat.Color: colorSpace = CGColorSpace.CreateDeviceRGB(); data = Marshal.AllocHGlobal(height * width * 4); context = new CGBitmapContext(data, width, height, 8, 4 * width, colorSpace, CGImageAlphaInfo.PremultipliedLast); colorSpace.Dispose(); break; case SurfaceFormat.Alpha8: data = Marshal.AllocHGlobal(height * width); context = new CGBitmapContext(data, width, height, 8, width, null, CGImageAlphaInfo.Only); break; default: throw new NotSupportedException("Invalid pixel format"); } context.ClearRect(new RectangleF(0, 0, width, height)); context.TranslateCTM(0, height - imageSize.Height); if (!transform.IsIdentity) { context.ConcatCTM(transform); } context.DrawImage(new RectangleF(0, 0, image.Width, image.Height), image); //Convert "RRRRRRRRRGGGGGGGGBBBBBBBBAAAAAAAA" to "RRRRRGGGGGGBBBBB" /* * if(pixelFormat == SurfaceFormat.Rgb32) { * tempData = Marshal.AllocHGlobal(height * width * 2); * * int d32; * short d16; * int inPixel32Count=0,outPixel16Count=0; * for(i = 0; i < width * height; ++i, inPixel32Count+=sizeof(int)) * { * d32 = Marshal.ReadInt32(data,inPixel32Count); * short R = (short)((((d32 >> 0) & 0xFF) >> 3) << 11); * short G = (short)((((d32 >> 8) & 0xFF) >> 2) << 5); * short B = (short)((((d32 >> 16) & 0xFF) >> 3) << 0); * d16 = (short) (R | G | B); * Marshal.WriteInt16(tempData,outPixel16Count,d16); * outPixel16Count += sizeof(short); * } * Marshal.FreeHGlobal(data); * data = tempData; * } */ InitWithData(data, pixelFormat, width, height, imageSize, filter); context.Dispose(); Marshal.FreeHGlobal(data); }
//Create a Method to set orientation of image... private byte[] RotateImage(UIImage image) { UIImage imageToReturn = null; if (image.Orientation == UIImageOrientation.Up) { imageToReturn = image; } else { CGAffineTransform transform = CGAffineTransform.MakeIdentity(); switch (image.Orientation) { case UIImageOrientation.Down: case UIImageOrientation.DownMirrored: transform.Rotate((float)Math.PI); transform.Translate(image.Size.Width, image.Size.Height); break; case UIImageOrientation.Left: case UIImageOrientation.LeftMirrored: transform.Rotate((float)Math.PI / 2); transform.Translate(image.Size.Width, 0); break; case UIImageOrientation.Right: case UIImageOrientation.RightMirrored: transform.Rotate(-(float)Math.PI / 2); transform.Translate(0, image.Size.Height); break; case UIImageOrientation.Up: case UIImageOrientation.UpMirrored: break; } switch (image.Orientation) { case UIImageOrientation.UpMirrored: case UIImageOrientation.DownMirrored: transform.Translate(image.Size.Width, 0); transform.Scale(-1, 1); break; case UIImageOrientation.LeftMirrored: case UIImageOrientation.RightMirrored: transform.Translate(image.Size.Height, 0); transform.Scale(-1, 1); break; case UIImageOrientation.Up: case UIImageOrientation.Down: case UIImageOrientation.Left: case UIImageOrientation.Right: break; } //now draw image using (var context = new CGBitmapContext(IntPtr.Zero, (int)image.Size.Width, (int)image.Size.Height, image.CGImage.BitsPerComponent, image.CGImage.BytesPerRow, image.CGImage.ColorSpace, image.CGImage.BitmapInfo)) { context.ConcatCTM(transform); switch (image.Orientation) { case UIImageOrientation.Left: case UIImageOrientation.LeftMirrored: case UIImageOrientation.Right: case UIImageOrientation.RightMirrored: // Grr... context.DrawImage(new RectangleF(PointF.Empty, new SizeF((float)image.Size.Height, (float)image.Size.Width)), image.CGImage); break; default: context.DrawImage(new RectangleF(PointF.Empty, new SizeF((float)image.Size.Width, (float)image.Size.Height)), image.CGImage); break; } using (var imageRef = context.ToImage()) { imageToReturn = new UIImage(imageRef); } } } using (NSData imageData = imageToReturn.AsJPEG()) { Byte[] byteArray = new Byte[imageData.Length]; System.Runtime.InteropServices.Marshal.Copy(imageData.Bytes, byteArray, 0, Convert.ToInt32(imageData.Length)); return(byteArray); } }
public static void ReadImageFileToTensor(String fileName, IntPtr dest, int inputHeight = -1, int inputWidth = -1, float inputMean = 0.0f, float scale = 1.0f) { #if __ANDROID__ Android.Graphics.Bitmap bmp = BitmapFactory.DecodeFile(fileName); if (inputHeight > 0 || inputWidth > 0) { Bitmap resized = Bitmap.CreateScaledBitmap(bmp, inputWidth, inputHeight, false); bmp.Dispose(); bmp = resized; } int[] intValues = new int[bmp.Width * bmp.Height]; float[] floatValues = new float[bmp.Width * bmp.Height * 3]; bmp.GetPixels(intValues, 0, bmp.Width, 0, 0, bmp.Width, bmp.Height); for (int i = 0; i < intValues.Length; ++i) { int val = intValues[i]; floatValues[i * 3 + 0] = (((val >> 16) & 0xFF) - inputMean) * scale; floatValues[i * 3 + 1] = (((val >> 8) & 0xFF) - inputMean) * scale; floatValues[i * 3 + 2] = ((val & 0xFF) - inputMean) * scale; } System.Runtime.InteropServices.Marshal.Copy(floatValues, 0, dest, floatValues.Length); #elif __IOS__ UIImage image = new UIImage(fileName); if (inputHeight > 0 || inputWidth > 0) { UIImage resized = image.Scale(new CGSize(inputWidth, inputHeight)); image.Dispose(); image = resized; } int[] intValues = new int[(int)(image.Size.Width * image.Size.Height)]; float[] floatValues = new float[(int)(image.Size.Width * image.Size.Height * 3)]; System.Runtime.InteropServices.GCHandle handle = System.Runtime.InteropServices.GCHandle.Alloc(intValues, System.Runtime.InteropServices.GCHandleType.Pinned); using (CGImage cgimage = image.CGImage) using (CGColorSpace cspace = CGColorSpace.CreateDeviceRGB()) using (CGBitmapContext context = new CGBitmapContext( handle.AddrOfPinnedObject(), (nint)image.Size.Width, (nint)image.Size.Height, 8, (nint)image.Size.Width * 4, cspace, CGImageAlphaInfo.PremultipliedLast )) { context.DrawImage(new CGRect(new CGPoint(), image.Size), cgimage); } handle.Free(); for (int i = 0; i < intValues.Length; ++i) { int val = intValues[i]; floatValues[i * 3 + 0] = (((val >> 16) & 0xFF) - inputMean) * scale; floatValues[i * 3 + 1] = (((val >> 8) & 0xFF) - inputMean) * scale; floatValues[i * 3 + 2] = ((val & 0xFF) - inputMean) * scale; } System.Runtime.InteropServices.Marshal.Copy(floatValues, 0, dest, floatValues.Length); #elif __UNIFIED__ NSImage image = new NSImage(fileName); if (inputHeight > 0 || inputWidth > 0) { NSImage resized = new NSImage(new CGSize(inputWidth, inputHeight)); resized.LockFocus(); image.DrawInRect(new CGRect(0, 0, inputWidth, inputHeight), CGRect.Empty, NSCompositingOperation.SourceOver, 1.0f); resized.UnlockFocus(); image.Dispose(); image = resized; } int[] intValues = new int[(int)(image.Size.Width * image.Size.Height)]; float[] floatValues = new float[(int)(image.Size.Width * image.Size.Height * 3)]; System.Runtime.InteropServices.GCHandle handle = System.Runtime.InteropServices.GCHandle.Alloc(intValues, System.Runtime.InteropServices.GCHandleType.Pinned); using (CGImage cgimage = image.CGImage) using (CGColorSpace cspace = CGColorSpace.CreateDeviceRGB()) using (CGBitmapContext context = new CGBitmapContext( handle.AddrOfPinnedObject(), (nint)image.Size.Width, (nint)image.Size.Height, 8, (nint)image.Size.Width * 4, cspace, CGImageAlphaInfo.PremultipliedLast )) { context.DrawImage(new CGRect(new CGPoint(), image.Size), cgimage); } handle.Free(); for (int i = 0; i < intValues.Length; ++i) { int val = intValues[i]; floatValues[i * 3 + 0] = (((val >> 16) & 0xFF) - inputMean) * scale; floatValues[i * 3 + 1] = (((val >> 8) & 0xFF) - inputMean) * scale; floatValues[i * 3 + 2] = ((val & 0xFF) - inputMean) * scale; } System.Runtime.InteropServices.Marshal.Copy(floatValues, 0, dest, floatValues.Length); #else if (Emgu.TF.Util.Platform.OperationSystem == OS.Windows) { //Do something for Windows System.Drawing.Bitmap bmp = new Bitmap(fileName); if (inputHeight > 0 || inputWidth > 0) { //resize bmp System.Drawing.Bitmap newBmp = new Bitmap(bmp, inputWidth, inputHeight); bmp.Dispose(); bmp = newBmp; //bmp.Save("tmp.png"); } byte[] byteValues = new byte[bmp.Width * bmp.Height * 3]; System.Drawing.Imaging.BitmapData bd = new System.Drawing.Imaging.BitmapData(); bmp.LockBits( new Rectangle(0, 0, bmp.Width, bmp.Height), System.Drawing.Imaging.ImageLockMode.ReadOnly, System.Drawing.Imaging.PixelFormat.Format24bppRgb, bd); System.Runtime.InteropServices.Marshal.Copy(bd.Scan0, byteValues, 0, byteValues.Length); bmp.UnlockBits(bd); float[] floatValues = new float[bmp.Width * bmp.Height * 3]; for (int i = 0; i < byteValues.Length; ++i) { floatValues[i] = ((float)byteValues[i] - inputMean) * scale; } System.Runtime.InteropServices.Marshal.Copy(floatValues, 0, dest, floatValues.Length); } else { throw new Exception("Not implemented"); } #endif }
/// <summary> /// Read a pTexture from an arbritrary file. /// </summary> public static pTexture FromFile(string filename, bool mipmap) { //load base texture first... if (!NativeAssetManager.Instance.FileExists(filename)) { return(null); } pTexture tex = null; try { #if BITMAP_CACHING string bitmapFilename = GameBase.Instance.PathConfig + Path.GetFileName(filename.Replace(".png", ".raw")); string infoFilename = GameBase.Instance.PathConfig + Path.GetFileName(filename.Replace(".png", ".info")); if (!NativeAssetManager.Instance.FileExists(bitmapFilename)) { #if iOS using (UIImage image = UIImage.FromFile(filename)) { if (image == null) { return(null); } int width = (int)image.Size.Width; int height = (int)image.Size.Height; byte[] buffer = new byte[width * height * 4]; fixed(byte *p = buffer) { IntPtr data = (IntPtr)p; using (CGBitmapContext textureContext = new CGBitmapContext(data, width, height, 8, width * 4, image.CGImage.ColorSpace, CGImageAlphaInfo.PremultipliedLast)) { textureContext.DrawImage(new RectangleF(0, 0, width, height), image.CGImage); } File.WriteAllBytes(bitmapFilename, buffer); tex = FromRawBytes(data, width, height); } } #else using (Stream stream = NativeAssetManager.Instance.GetFileStream(filename)) using (Bitmap b = (Bitmap)Image.FromStream(stream, false, false)) { BitmapData data = b.LockBits(new Rectangle(0, 0, b.Width, b.Height), ImageLockMode.ReadOnly, System.Drawing.Imaging.PixelFormat.Format32bppArgb); byte[] bitmap = new byte[b.Width * b.Height * 4]; Marshal.Copy(data.Scan0, bitmap, 0, bitmap.Length); File.WriteAllBytes(bitmapFilename, bitmap); tex = FromRawBytes(data.Scan0, b.Width, b.Height); b.UnlockBits(data); } #endif if (tex != null) { string info = tex.Width + "x" + tex.Height; File.WriteAllText(infoFilename, info); } } else { byte[] buffer = File.ReadAllBytes(bitmapFilename); string info = File.ReadAllText(infoFilename); string[] split = info.Split('x'); int width = Int32.Parse(split[0]); int height = Int32.Parse(split[1]); fixed(byte *p = buffer) { IntPtr location = (IntPtr)p; tex = FromRawBytes(location, width, height); } } #else #if iOS using (UIImage image = UIImage.FromFile(filename)) tex = FromUIImage(image, filename); #else using (Stream stream = NativeAssetManager.Instance.GetFileStream(filename)) tex = FromStream(stream, filename); #endif #endif if (tex == null) { return(null); } //This makes sure we are always at the correct sprite resolution. //F*****g hack, or f*****g hax? tex.assetName = filename; tex.Width = (int)(tex.Width * 960f / GameBase.SpriteSheetResolution); tex.Height = (int)(tex.Height * 960f / GameBase.SpriteSheetResolution); tex.TextureGl.TextureWidth = tex.Width; tex.TextureGl.TextureHeight = tex.Height; return(tex); } catch { // ignored } return(null); }
// WHAT IS THE DEFAULT FONT FOR ANDROID? #endif // Partial class implementation of draw() in c# to allow use of unsafe code.. public unsafe void draw(IBitmapDrawable source, flash.geom.Matrix matrix = null, ColorTransform colorTransform = null, string blendMode = null, Rectangle clipRect = null, Boolean smoothing = false) { #if PLATFORM_MONOMAC || PLATFORM_MONOTOUCH if (source is flash.text.TextField) { flash.text.TextField tf = source as flash.text.TextField; flash.text.TextFormat format = tf.defaultTextFormat; // $$TODO figure out how to get rid of this extra data copy var sizeToDraw = (width * height) << 2; if (sizeToDraw == 0) { return; } string fontName = format.font; float fontSize = (format.size is double) ? (float)(double)format.size : ((format.size is int) ? (float)(int)format.size : 10); // Check if the font is installed? bool hasFont = true; if (!sHasFont.TryGetValue(fontName, out hasFont)) { #if PLATFORM_MONOTOUCH UIFont font = UIFont.FromName(fontName, 10); sHasFont[fontName] = hasFont = font != null; if (font != null) { font.Dispose(); } #elif PLATFORM_MONOMAC NSFont font = NSFont.FromFontName(fontName, 10); sHasFont[fontName] = hasFont = font != null; if (font != null) { font.Dispose(); } #else sHasFont[fontName] = false; #endif } if (!hasFont) { fontName = DEFAULT_FONT; } fixed(uint *data = mData) { using (CGBitmapContext context = new CGBitmapContext(new IntPtr(data), width, height, 8, 4 * width, CGColorSpace.CreateDeviceRGB(), CGImageAlphaInfo.PremultipliedLast)) { uint tfColor = format.color != null ? (uint)(format.color) : 0; float r = (float)((tfColor >> 16) & 0xFF) / 255.0f; float g = (float)((tfColor >> 8) & 0xFF) / 255.0f; float b = (float)((tfColor >> 0) & 0xFF) / 255.0f; float a = (float)(tf.alpha); CGColor color = new CGColor(r, g, b, a); context.SetFillColor(color); context.SetStrokeColor(color); context.SelectFont(fontName, fontSize, CGTextEncoding.MacRoman); context.SetAllowsAntialiasing(((tf.antiAliasType as string) == flash.text.AntiAliasType.ADVANCED)); double x = matrix.tx; double y = matrix.ty; // invert y because the CG origin is bottom,left y = height - tf.textHeight - y; // align text switch (format.align) { case TextFormatAlign.LEFT: // no adjustment required break; case TextFormatAlign.CENTER: // center x x += width / 2; x -= tf.textWidth / 2; break; case TextFormatAlign.RIGHT: // right justify x x += width; x -= tf.textWidth; break; default: throw new System.NotImplementedException(); } // draw text context.ShowTextAtPoint((float)x, (float)y, tf.text); } } } else #elif PLATFORM_MONODROID if (source is flash.text.TextField) { flash.text.TextField tf = source as flash.text.TextField; flash.text.TextFormat format = tf.defaultTextFormat; // $$TODO figure out how to get rid of this extra data copy var data = new byte[width * height * 4]; System.Buffer.BlockCopy(mData, 0, data, 0, data.Length); Android.Graphics.Bitmap.Config config = Android.Graphics.Bitmap.Config.Argb8888; Android.Graphics.Bitmap bitmap = Android.Graphics.Bitmap.CreateBitmap(width, height, config); Canvas canvas = new Canvas(bitmap); var x = matrix.tx; var y = matrix.ty; // invert y because the CG origin is bottom,left // y = height - tf.textHeight - y; // align text switch (format.align) { case TextFormatAlign.LEFT: // no adjustment required break; case TextFormatAlign.CENTER: // center x x += width / 2; x -= tf.textWidth / 2; break; case TextFormatAlign.RIGHT: // right justify x x += width; x -= tf.textWidth; break; default: throw new System.NotImplementedException(); } Paint paint = new Paint(PaintFlags.AntiAlias); paint.Color = Color.Black; paint.TextSize = (float)format.size; paint.SetTypeface(Typeface.Create(format.font, TypefaceStyle.Normal)); paint.TextAlign = Paint.Align.Center; canvas.DrawText(tf.text, (float)x, (float)y, paint); mData = new uint[bitmap.Width * bitmap.Height]; var buffer = new int[bitmap.Width * bitmap.Height]; bitmap.GetPixels(buffer, 0, width, 0, 0, width, height); for (int i = 0; i < buffer.Length; i++) { mData[i] = (uint)buffer[i]; } } else #endif if (source is flash.display.BitmapData) { //naive implementation , //to be implemented: // -smoothing / antialiasing, // -blend mode // -colorTransform // -cliprect BitmapData sourceBitmap = source as BitmapData; flash.geom.Matrix matInverse = (matrix != null) ? matrix.clone() : new flash.geom.Matrix(); matInverse.invert(); for (int y = 0; y < mHeight; y++) { for (int x = 0; x < mWidth; x++) { int x2 = (int)(x * matInverse.a + y * matInverse.c + matInverse.tx); int y2 = (int)(x * matInverse.b + y * matInverse.d + matInverse.ty); if (x2 >= 0 && y2 >= 0 && x2 < sourceBitmap.width && y2 < sourceBitmap.height) { mData[x + y * mWidth] = sourceBitmap.mData[x2 + y2 * sourceBitmap.mWidth]; } } } } else { _root.trace_fn.trace("NotImplementedWarning: BitmapData.draw()"); } }
public void GetData <T>(int level, Rectangle?rect, T[] data, int startIndex, int elementCount) { if (data == null) { throw new ArgumentException("data cannot be null"); } if (data.Length < startIndex + elementCount) { throw new ArgumentException("The data passed has a length of " + data.Length + " but " + elementCount + " pixels have been requested."); } Rectangle r; if (rect != null) { r = rect.Value; } else { r = new Rectangle(0, 0, Width, Height); } int sz = 0; byte[] pixel = new byte[4]; int pos; IntPtr pixelOffset; // Get the Color values if ((typeof(T) == typeof(Color))) { // Load up texture into memory UIImage uiImage = UIImage.FromBundle(this.Name); if (uiImage == null) { throw new ContentLoadException("Error loading file via UIImage: " + Name); } CGImage image = uiImage.CGImage; if (image == null) { throw new ContentLoadException("Error with CGIamge: " + Name); } int width, height, i; CGContext context = null; IntPtr imageData; CGColorSpace colorSpace; IntPtr tempData; bool hasAlpha; CGImageAlphaInfo info; CGAffineTransform transform; Size imageSize; SurfaceFormat pixelFormat; bool sizeToFit = false; info = image.AlphaInfo; hasAlpha = ((info == CGImageAlphaInfo.PremultipliedLast) || (info == CGImageAlphaInfo.PremultipliedFirst) || (info == CGImageAlphaInfo.Last) || (info == CGImageAlphaInfo.First) ? true : false); if (image.ColorSpace != null) { pixelFormat = SurfaceFormat.Color; } else { pixelFormat = SurfaceFormat.Alpha8; } imageSize = new Size(image.Width, image.Height); transform = CGAffineTransform.MakeIdentity(); width = imageSize.Width; if ((width != 1) && ((width & (width - 1)) != 0)) { i = 1; while ((sizeToFit ? 2 * i : i) < width) { i *= 2; } width = i; } height = imageSize.Height; if ((height != 1) && ((height & (height - 1)) != 0)) { i = 1; while ((sizeToFit ? 2 * i : i) < height) { i *= 2; } height = i; } // TODO: kMaxTextureSize = 1024 while ((width > 1024) || (height > 1024)) { width /= 2; height /= 2; transform = CGAffineTransform.MakeScale(0.5f, 0.5f); imageSize.Width /= 2; imageSize.Height /= 2; } switch (pixelFormat) { case SurfaceFormat.Color: colorSpace = CGColorSpace.CreateDeviceRGB(); imageData = Marshal.AllocHGlobal(height * width * 4); context = new CGBitmapContext(imageData, width, height, 8, 4 * width, colorSpace, CGImageAlphaInfo.PremultipliedLast); colorSpace.Dispose(); break; case SurfaceFormat.Alpha8: imageData = Marshal.AllocHGlobal(height * width); context = new CGBitmapContext(imageData, width, height, 8, width, null, CGImageAlphaInfo.Only); break; default: throw new NotSupportedException("Invalid pixel format"); } context.ClearRect(new RectangleF(0, 0, width, height)); context.TranslateCTM(0, height - imageSize.Height); if (!transform.IsIdentity) { context.ConcatCTM(transform); } context.DrawImage(new RectangleF(0, 0, image.Width, image.Height), image); //Convert "RRRRRRRRRGGGGGGGGBBBBBBBBAAAAAAAA" to "RRRRRGGGGGGBBBBB" /* * if(pixelFormat == SurfaceFormat.Rgb32) { * tempData = Marshal.AllocHGlobal(height * width * 2); * * int d32; * short d16; * int inPixel32Count=0,outPixel16Count=0; * for(i = 0; i < width * height; ++i, inPixel32Count+=sizeof(int)) * { * d32 = Marshal.ReadInt32(imageData,inPixel32Count); * short R = (short)((((d32 >> 0) & 0xFF) >> 3) << 11); * short G = (short)((((d32 >> 8) & 0xFF) >> 2) << 5); * short B = (short)((((d32 >> 16) & 0xFF) >> 3) << 0); * d16 = (short) (R | G | B); * Marshal.WriteInt16(tempData,outPixel16Count,d16); * outPixel16Count += sizeof(short); * } * Marshal.FreeHGlobal(imageData); * imageData = tempData; * } */ int count = 0; // Loop through and extract the data for (int y = r.Top; y < r.Bottom; y++) { for (int x = r.Left; x < r.Right; x++) { var result = new Color(0, 0, 0, 0); switch (this.Format) { case SurfaceFormat.Color /*kTexture2DPixelFormat_RGBA8888*/: case SurfaceFormat.Dxt3: sz = 4; pos = ((y * imageSize.Width) + x) * sz; pixelOffset = new IntPtr(imageData.ToInt64() + pos); Marshal.Copy(pixelOffset, pixel, 0, 4); result.R = pixel[0]; result.G = pixel[1]; result.B = pixel[2]; result.A = pixel[3]; break; case SurfaceFormat.Bgra4444 /*kTexture2DPixelFormat_RGBA4444*/: sz = 2; pos = ((y * imageSize.Width) + x) * sz; pixelOffset = new IntPtr(imageData.ToInt64() + pos); Marshal.Copy(pixelOffset, pixel, 0, 4); result.R = pixel[0]; result.G = pixel[1]; result.B = pixel[2]; result.A = pixel[3]; break; case SurfaceFormat.Bgra5551 /*kTexture2DPixelFormat_RGB5A1*/: sz = 2; pos = ((y * imageSize.Width) + x) * sz; pixelOffset = new IntPtr(imageData.ToInt64() + pos); Marshal.Copy(pixelOffset, pixel, 0, 4); result.R = pixel[0]; result.G = pixel[1]; result.B = pixel[2]; result.A = pixel[3]; break; case SurfaceFormat.Alpha8 /*kTexture2DPixelFormat_A8*/: sz = 1; pos = ((y * imageSize.Width) + x) * sz; pixelOffset = new IntPtr(imageData.ToInt64() + pos); Marshal.Copy(pixelOffset, pixel, 0, 4); result.A = pixel[0]; break; default: throw new NotSupportedException("Texture format"); } data[((y * imageSize.Width) + x)] = (T)(object)result; count++; if (count >= elementCount) { return; } } } context.Dispose(); Marshal.FreeHGlobal(imageData); } else { throw new NotImplementedException(); } }
private void InitWithCGImage(CGImage image) { int width, height; CGBitmapContext bitmap = null; bool hasAlpha; CGImageAlphaInfo alphaInfo; CGColorSpace colorSpace; int bitsPerComponent, bytesPerRow; CGBitmapFlags bitmapInfo; bool premultiplied = false; int bitsPerPixel = 0; if (image == null) { throw new ArgumentException(" image is invalid! "); } alphaInfo = image.AlphaInfo; hasAlpha = ((alphaInfo == CGImageAlphaInfo.PremultipliedLast) || (alphaInfo == CGImageAlphaInfo.PremultipliedFirst) || (alphaInfo == CGImageAlphaInfo.Last) || (alphaInfo == CGImageAlphaInfo.First) ? true : false); imageSize.Width = image.Width; imageSize.Height = image.Height; width = image.Width; height = image.Height; // Not sure yet if we need to keep the original image information // before we change it internally. TODO look at what windows does // and follow that. bitmapInfo = image.BitmapInfo; bitsPerComponent = image.BitsPerComponent; bitsPerPixel = image.BitsPerPixel; bytesPerRow = width * bitsPerPixel / bitsPerComponent; int size = bytesPerRow * height; colorSpace = image.ColorSpace; // Right now internally we represent the images all the same // I left the call here just in case we find that this is not // possible. Read the comments for non alpha images. if (colorSpace != null) { if (hasAlpha) { premultiplied = true; colorSpace = CGColorSpace.CreateDeviceRGB(); bitsPerComponent = 8; bitsPerPixel = 32; bitmapInfo = CGBitmapFlags.PremultipliedLast; } else { // even for images without alpha we will internally // represent them as RGB with alpha. There were problems // if we do not do it this way and creating a bitmap context. // The images were not drawing correctly and tearing. Also // creating a Graphics to draw on was a nightmare. This // should probably be looked into or maybe it is ok and we // can continue representing internally with this representation premultiplied = true; colorSpace = CGColorSpace.CreateDeviceRGB(); bitsPerComponent = 8; bitsPerPixel = 32; bitmapInfo = CGBitmapFlags.PremultipliedLast; } } else { premultiplied = true; colorSpace = CGColorSpace.CreateDeviceRGB(); bitsPerComponent = 8; bitsPerPixel = 32; bitmapInfo = CGBitmapFlags.PremultipliedLast; } bytesPerRow = width * bitsPerPixel / bitsPerComponent; size = bytesPerRow * height; bitmapBlock = Marshal.AllocHGlobal(size); bitmap = new CGBitmapContext(bitmapBlock, image.Width, image.Width, bitsPerComponent, bytesPerRow, colorSpace, bitmapInfo); bitmap.ClearRect(new RectangleF(0, 0, width, height)); // We need to flip the Y axis to go from right handed to lefted handed coordinate system var transform = new CGAffineTransform(1, 0, 0, -1, 0, image.Height); bitmap.ConcatCTM(transform); bitmap.DrawImage(new RectangleF(0, 0, image.Width, image.Height), image); var provider = new CGDataProvider(bitmapBlock, size, true); NativeCGImage = new CGImage(width, height, bitsPerComponent, bitsPerPixel, bytesPerRow, colorSpace, bitmapInfo, provider, null, false, CGColorRenderingIntent.Default); colorSpace.Dispose(); bitmap.Dispose(); }
/// <summary> /// Creates a composited image from a list of source images /// </summary> /// <param name="paths">The paths of the images, in order of lowest z-index to highest, to composite together</param> /// <param name="saveLocation">Where to save the composited image</param> /// <param name="overwrite"><c>true</c> to overwrite an existing file; otherwise <c>false</c>.</param> public void CreateCompositeImage(List <string> paths, string saveLocation, bool overwrite) { if (!overwrite && iApp.File.Exists(saveLocation)) { return; } paths = paths.Where(path => path != null && iApp.File.Exists(path)).ToList(); if (paths.Count == 0) { return; } try { var metric = System.DateTime.UtcNow; iApp.File.EnsureDirectoryExists(saveLocation); using (new NSAutoreleasePool()) { var images = new List <CGImage>(paths.Count); images.AddRange(paths.Select <string, CGImage>(path => path.EndsWith(".png", StringComparison.InvariantCultureIgnoreCase) ? CGImage.FromPNG(CGDataProvider.FromFile(path), null, false, CGColorRenderingIntent.Default) : CGImage.FromJPEG(CGDataProvider.FromFile(path), null, false, CGColorRenderingIntent.Default))); nint width = images[0].Width; nint height = images[0].Height; var bounds = new RectangleF(0, 0, width, height); CGBitmapContext g = new CGBitmapContext( System.IntPtr.Zero, width, height, images[0].BitsPerComponent, images[0].Width * 4, CGColorSpace.CreateDeviceRGB(), CGImageAlphaInfo.PremultipliedLast ); foreach (var cgImage in images) { g.DrawImage(bounds, cgImage); } lock (padlock) { // UIImage.AsPNG() should be safe to run on a background thread, but MT 6.2.6.6 says otherwise. // Xamarin confirmed that this was unintentional and that MT 6.2.7 will remove the UI check. UIApplication.CheckForIllegalCrossThreadCalls = false; NSError err = null; UIImage.FromImage(g.ToImage()).AsPNG().Save(saveLocation, true, out err); UIApplication.CheckForIllegalCrossThreadCalls = true; } } iApp.Log.Metric("ImageEngine icon creation", System.DateTime.UtcNow.Subtract(metric).TotalMilliseconds); } catch (Exception e) { iApp.Log.Error("An error occurred while compositing the image", e); } }
public static Texture2D FromStream(GraphicsDevice graphicsDevice, Stream stream) { //todo: partial classes would be cleaner #if IOS || MONOMAC #if IOS using (var uiImage = UIImage.LoadFromData(NSData.FromStream(stream))) #elif MONOMAC using (var nsImage = NSImage.FromStream (stream)) #endif { #if IOS var cgImage = uiImage.CGImage; #elif MONOMAC var rectangle = RectangleF.Empty; var cgImage = nsImage.AsCGImage (ref rectangle, null, null); #endif var width = cgImage.Width; var height = cgImage.Height; var data = new byte[width * height * 4]; var colorSpace = CGColorSpace.CreateDeviceRGB(); var bitmapContext = new CGBitmapContext(data, width, height, 8, width * 4, colorSpace, CGBitmapFlags.PremultipliedLast); bitmapContext.DrawImage(new RectangleF(0, 0, width, height), cgImage); bitmapContext.Dispose(); colorSpace.Dispose(); Texture2D texture = null; Threading.BlockOnUIThread(() => { texture = new Texture2D(graphicsDevice, width, height, false, SurfaceFormat.Color); texture.SetData(data); }); return texture; } #elif ANDROID using (Bitmap image = BitmapFactory.DecodeStream(stream, null, new BitmapFactory.Options { InScaled = false, InDither = false, InJustDecodeBounds = false, InPurgeable = true, InInputShareable = true, })) { var width = image.Width; var height = image.Height; int[] pixels = new int[width * height]; if ((width != image.Width) || (height != image.Height)) { using (Bitmap imagePadded = Bitmap.CreateBitmap(width, height, Bitmap.Config.Argb8888)) { Canvas canvas = new Canvas(imagePadded); canvas.DrawARGB(0, 0, 0, 0); canvas.DrawBitmap(image, 0, 0, null); imagePadded.GetPixels(pixels, 0, width, 0, 0, width, height); imagePadded.Recycle(); } } else { image.GetPixels(pixels, 0, width, 0, 0, width, height); } image.Recycle(); // Convert from ARGB to ABGR for (int i = 0; i < width * height; ++i) { uint pixel = (uint)pixels[i]; pixels[i] = (int)((pixel & 0xFF00FF00) | ((pixel & 0x00FF0000) >> 16) | ((pixel & 0x000000FF) << 16)); } Texture2D texture = null; Threading.BlockOnUIThread(() => { texture = new Texture2D(graphicsDevice, width, height, false, SurfaceFormat.Color); texture.SetData<int>(pixels); }); return texture; } #elif WINDOWS_PHONE throw new NotImplementedException(); #elif WINDOWS_STOREAPP || DIRECTX // For reference this implementation was ultimately found through this post: // http://stackoverflow.com/questions/9602102/loading-textures-with-sharpdx-in-metro Texture2D toReturn = null; SharpDX.WIC.BitmapDecoder decoder; using(var bitmap = LoadBitmap(stream, out decoder)) using (decoder) { SharpDX.Direct3D11.Texture2D sharpDxTexture = CreateTex2DFromBitmap(bitmap, graphicsDevice); toReturn = new Texture2D(graphicsDevice, bitmap.Size.Width, bitmap.Size.Height); toReturn._texture = sharpDxTexture; } return toReturn; #elif PSM return new Texture2D(graphicsDevice, stream); #else using (Bitmap image = (Bitmap)Bitmap.FromStream(stream)) { // Fix up the Image to match the expected format image.RGBToBGR(); var data = new byte[image.Width * image.Height * 4]; BitmapData bitmapData = image.LockBits(new System.Drawing.Rectangle(0, 0, image.Width, image.Height), ImageLockMode.ReadOnly, System.Drawing.Imaging.PixelFormat.Format32bppArgb); if (bitmapData.Stride != image.Width * 4) throw new NotImplementedException(); Marshal.Copy(bitmapData.Scan0, data, 0, data.Length); image.UnlockBits(bitmapData); Texture2D texture = null; texture = new Texture2D(graphicsDevice, image.Width, image.Height); texture.SetData(data); return texture; } #endif }
//Metodo para reajustar el tamaño de las imagenes que se muestran en la tabla. public static UIImage ScaleImage(UIImage image, int maxSize) { UIImage res; using (CGImage imageRef = image.CGImage) { CGImageAlphaInfo alphaInfo = imageRef.AlphaInfo; CGColorSpace colorSpaceInfo = CGColorSpace.CreateDeviceRGB(); if (alphaInfo == CGImageAlphaInfo.None) { alphaInfo = CGImageAlphaInfo.NoneSkipLast; } nint width, height; width = imageRef.Width; height = imageRef.Height; if (height >= width) { width = (int)Math.Floor((double)width * ((double)maxSize / (double)height)); height = maxSize; } else { height = (int)Math.Floor((double)height * ((double)maxSize / (double)width)); width = maxSize; } CGBitmapContext bitmap; if (image.Orientation == UIImageOrientation.Up || image.Orientation == UIImageOrientation.Down) { bitmap = new CGBitmapContext(IntPtr.Zero, width, height, imageRef.BitsPerComponent, imageRef.BytesPerRow, colorSpaceInfo, alphaInfo); } else { bitmap = new CGBitmapContext(IntPtr.Zero, height, width, imageRef.BitsPerComponent, imageRef.BytesPerRow, colorSpaceInfo, alphaInfo); } switch (image.Orientation) { case UIImageOrientation.Left: bitmap.RotateCTM((float)Math.PI / 2); bitmap.TranslateCTM(0, -height); break; case UIImageOrientation.Right: bitmap.RotateCTM(-((float)Math.PI / 2)); bitmap.TranslateCTM(-width, 0); break; case UIImageOrientation.Up: break; case UIImageOrientation.Down: bitmap.TranslateCTM(width, height); bitmap.RotateCTM(-(float)Math.PI); break; } bitmap.DrawImage(new CGRect(0, 0, width, height), imageRef); res = UIImage.FromImage(bitmap.ToImage()); bitmap = null; } return(res); }