public ColorSpaceTransformation(float[][] rgbawMatrix) { if (rgbawMatrix.Length != 5 || rgbawMatrix.Any(v => v.Length != 5)) throw new ArgumentException("Wrong size of RGBAW color matrix"); _colorSpace = null; _colorMatrix = new CIColorMatrix(); RGBAWMatrix = rgbawMatrix; }
public static void EndImageContext() { // Return to previous context if (PreviousContext != null) { NSGraphicsContext.CurrentContext = PreviousContext; } // Release memory Context = null; PreviousContext = null; ColorSpace = null; ImageSize = CGSize.Empty; }
public static UIImage ToColorSpace(UIImage source, CGColorSpace colorSpace) { CGRect bounds = new CGRect(0, 0, source.Size.Width, source.Size.Height); using (var context = new CGBitmapContext(IntPtr.Zero, (int)bounds.Width, (int)bounds.Height, 8, 0, colorSpace, CGImageAlphaInfo.None)) { context.DrawImage(bounds, source.CGImage); using (var imageRef = context.ToImage()) { return new UIImage(imageRef); } } }
public static void BeginImageContextWithOptions(CGSize size, bool opaque, nfloat scale) { // Create new image context ColorSpace = CGColorSpace.CreateDeviceRGB (); Context = new CGBitmapContext (null, (int)size.Width, (int)size.Height, 8, 0, ColorSpace, CGImageAlphaInfo.PremultipliedLast); // Flip context vertically var flipVertical = new CGAffineTransform(1,0,0,-1,0,size.Height); Context.ConcatCTM (flipVertical); // Save previous context ImageSize = size; PreviousContext = NSGraphicsContext.CurrentContext; NSGraphicsContext.CurrentContext = NSGraphicsContext.FromCGContext (Context, true); }
public PatternDrawingView() : base() { // First we need to create a CGPattern that specifies the qualities of our pattern. using (var coloredPattern = new CGPattern ( new RectangleF(0, 0, 16, 16), // the pattern coordinate space, drawing is clipped to this rectangle CGAffineTransform.MakeIdentity (), // a transform on the pattern coordinate space used before it is drawn. 16, 16, // the spacing (horizontal, vertical) of the pattern - how far to move after drawing each cell CGPatternTiling.NoDistortion, true, // this is a colored pattern, which means that you only specify an alpha value when drawing it DrawColored)){ // To draw a pattern, you need a pattern colorspace. // Since this is an colored pattern, the parent colorspace is NULL, indicating that it only has an alpha value. using (var coloredPatternColorSpace = CGColorSpace.CreatePattern (null)){ float alpha = 1; // Since this pattern is colored, we'll create a CGColor for it to make drawing it easier and more efficient. // From here on, the colored pattern is referenced entirely via the associated CGColor rather than the // originally created CGPatternRef. coloredPatternColor = new CGColor (coloredPatternColorSpace, coloredPattern, new float [] { alpha }); } } // Uncolored Pattern setup // As above, we create a CGPattern that specifies the qualities of our pattern uncoloredPattern = new CGPattern ( new RectangleF(0, 0, 16, 16), // coordinate space CGAffineTransform.MakeIdentity (), // transform 16, 16, // spacing CGPatternTiling.NoDistortion, false, // this is an uncolored pattern, thus to draw it we need to specify both color and alpha DrawUncolored); // callbacks for this pattern // With an uncolored pattern we still need to create a pattern colorspace, but now we need a parent colorspace // We'll use the DeviceRGB colorspace here. We'll need this colorspace along with the CGPatternRef to draw this pattern later. using (var deviceRGB = CGColorSpace.CreateDeviceRGB()){ uncoloredPatternColorSpace = CGColorSpace.CreatePattern(deviceRGB); } }
/// <summary> /// Creates a composited image from a list of source images /// </summary> /// <param name="paths">The paths of the images, in order of lowest z-index to highest, to composite together</param> /// <param name="saveLocation">Where to save the composited image</param> /// <param name="overwrite"><c>true</c> to overwrite an existing file; otherwise <c>false</c>.</param> public void CreateCompositeImage(List <string> paths, string saveLocation, bool overwrite) { if (!overwrite && iApp.File.Exists(saveLocation)) { return; } paths = paths.Where(path => path != null && iApp.File.Exists(path)).ToList(); if (paths.Count == 0) { return; } try { var metric = System.DateTime.UtcNow; iApp.File.EnsureDirectoryExists(saveLocation); using (new NSAutoreleasePool()) { var images = new List <CGImage>(paths.Count); images.AddRange(paths.Select <string, CGImage>(path => path.EndsWith(".png", StringComparison.InvariantCultureIgnoreCase) ? CGImage.FromPNG(CGDataProvider.FromFile(path), null, false, CGColorRenderingIntent.Default) : CGImage.FromJPEG(CGDataProvider.FromFile(path), null, false, CGColorRenderingIntent.Default))); nint width = images[0].Width; nint height = images[0].Height; var bounds = new RectangleF(0, 0, width, height); CGBitmapContext g = new CGBitmapContext( System.IntPtr.Zero, width, height, images[0].BitsPerComponent, images[0].Width * 4, CGColorSpace.CreateDeviceRGB(), CGImageAlphaInfo.PremultipliedLast ); foreach (var cgImage in images) { g.DrawImage(bounds, cgImage); } lock (padlock) { // UIImage.AsPNG() should be safe to run on a background thread, but MT 6.2.6.6 says otherwise. // Xamarin confirmed that this was unintentional and that MT 6.2.7 will remove the UI check. UIApplication.CheckForIllegalCrossThreadCalls = false; NSError err = null; UIImage.FromImage(g.ToImage()).AsPNG().Save(saveLocation, true, out err); UIApplication.CheckForIllegalCrossThreadCalls = true; } } iApp.Log.Metric("ImageEngine icon creation", System.DateTime.UtcNow.Subtract(metric).TotalMilliseconds); } catch (Exception e) { iApp.Log.Error("An error occurred while compositing the image", e); } }
public static CIImage FromProvider(ICIImageProvider provider, nuint width, nuint height, CIFormat pixelFormat, CGColorSpace colorSpace, CIImageProviderOptions options) { return(FromProvider(provider, width, height, CIImage.CIFormatToInt(pixelFormat), colorSpace, options == null ? null : options.Dictionary)); }
public void UpdateRouteView() { using (var context = new CGBitmapContext(null, (int)_RouteView.Frame.Width, (int)_RouteView.Frame.Height, 8, (int)(4 * _RouteView.Frame.Width), CGColorSpace.CreateDeviceRGB(), CGImageAlphaInfo.PremultipliedLast)) { context.SetStrokeColor(LineColor.CGColor); context.SetFillColor(0.0f, 0.0f, 1.0f, 1.0f); context.SetLineWidth(3.0f); for (int i = 0; i < _Routes.Count(); i++) { var route = _Routes[i]; var point = _MapView.ConvertCoordinate(route.Coordinate, _RouteView); if (i == 0) { context.MoveTo(point.X, _RouteView.Frame.Height - point.Y); } else { context.AddLineToPoint(point.X, _RouteView.Frame.Height - point.Y); } } context.StrokePath(); var cgImage = context.ToImage(); var image = UIImage.FromImage(cgImage); _RouteView.Image = image; } }
public static Tensor ReadTensorFromImageFile(String fileName, int inputHeight = -1, int inputWidth = -1, float inputMean = 0.0f, float scale = 1.0f, Status status = null) { #if __ANDROID__ Android.Graphics.Bitmap bmp = BitmapFactory.DecodeFile(fileName); if (inputHeight > 0 || inputWidth > 0) { Bitmap resized = Bitmap.CreateScaledBitmap(bmp, inputWidth, inputHeight, false); bmp.Dispose(); bmp = resized; } int[] intValues = new int[bmp.Width * bmp.Height]; float[] floatValues = new float[bmp.Width * bmp.Height * 3]; bmp.GetPixels(intValues, 0, bmp.Width, 0, 0, bmp.Width, bmp.Height); for (int i = 0; i < intValues.Length; ++i) { int val = intValues[i]; floatValues[i * 3 + 0] = (((val >> 16) & 0xFF) - inputMean) * scale; floatValues[i * 3 + 1] = (((val >> 8) & 0xFF) - inputMean) * scale; floatValues[i * 3 + 2] = ((val & 0xFF) - inputMean) * scale; } Tensor t = new Tensor(DataType.Float, new int[] { 1, bmp.Height, bmp.Width, 3 }); System.Runtime.InteropServices.Marshal.Copy(floatValues, 0, t.DataPointer, floatValues.Length); return(t); #elif __IOS__ UIImage image = new UIImage(fileName); if (inputHeight > 0 || inputWidth > 0) { UIImage resized = image.Scale(new CGSize(inputWidth, inputHeight)); image.Dispose(); image = resized; } int[] intValues = new int[(int)(image.Size.Width * image.Size.Height)]; float[] floatValues = new float[(int)(image.Size.Width * image.Size.Height * 3)]; System.Runtime.InteropServices.GCHandle handle = System.Runtime.InteropServices.GCHandle.Alloc(intValues, System.Runtime.InteropServices.GCHandleType.Pinned); using (CGImage cgimage = image.CGImage) using (CGColorSpace cspace = CGColorSpace.CreateDeviceRGB()) using (CGBitmapContext context = new CGBitmapContext( handle.AddrOfPinnedObject(), (nint)image.Size.Width, (nint)image.Size.Height, 8, (nint)image.Size.Width * 4, cspace, CGImageAlphaInfo.PremultipliedLast )) { context.DrawImage(new CGRect(new CGPoint(), image.Size), cgimage); } handle.Free(); for (int i = 0; i < intValues.Length; ++i) { int val = intValues[i]; floatValues[i * 3 + 0] = (((val >> 16) & 0xFF) - inputMean) * scale; floatValues[i * 3 + 1] = (((val >> 8) & 0xFF) - inputMean) * scale; floatValues[i * 3 + 2] = ((val & 0xFF) - inputMean) * scale; } Tensor t = new Tensor(DataType.Float, new int[] { 1, (int)image.Size.Height, (int)image.Size.Width, 3 }); System.Runtime.InteropServices.Marshal.Copy(floatValues, 0, t.DataPointer, floatValues.Length); return(t); #else if (Emgu.TF.Util.Platform.OperationSystem == OS.Windows) { Tensor t = new Tensor(DataType.Float, new int[] { 1, (int)inputHeight, (int)inputWidth, 3 }); NativeImageIO.ReadImageFileToTensor <float>(fileName, t.DataPointer, inputHeight, inputWidth, inputMean, scale); return(t); } else { //Mac OS or Linux using (StatusChecker checker = new StatusChecker(status)) { var graph = new Graph(); Operation input = graph.Placeholder(DataType.String); Operation jpegDecoder = graph.DecodeJpeg(input, 3); //dimension 3 Operation floatCaster = graph.Cast(jpegDecoder, DstT: DataType.Float); //cast to float Tensor axis = new Tensor(0); Operation axisOp = graph.Const(axis, axis.Type, opName: "axis"); Operation dimsExpander = graph.ExpandDims(floatCaster, axisOp); //turn it to dimension [1,3] Operation resized; bool resizeRequired = (inputHeight > 0) && (inputWidth > 0); if (resizeRequired) { Tensor size = new Tensor(new int[] { inputHeight, inputWidth }); // new size; Operation sizeOp = graph.Const(size, size.Type, opName: "size"); resized = graph.ResizeBilinear(dimsExpander, sizeOp); //resize image } else { resized = dimsExpander; } Tensor mean = new Tensor(inputMean); Operation meanOp = graph.Const(mean, mean.Type, opName: "mean"); Operation substracted = graph.Sub(resized, meanOp); Tensor scaleTensor = new Tensor(scale); Operation scaleOp = graph.Const(scaleTensor, scaleTensor.Type, opName: "scale"); Operation scaled = graph.Mul(substracted, scaleOp); Session session = new Session(graph); Tensor imageTensor = Tensor.FromString(File.ReadAllBytes(fileName), status); Tensor[] imageResults = session.Run(new Output[] { input }, new Tensor[] { imageTensor }, new Output[] { scaled }); return(imageResults[0]); } } #endif }
//Metodo para redimensionar las imagenes de la lista. public static UIImage ScaleImage(UIImage image, int maxSize) { UIImage res; using (CGImage imageRef = image.CGImage) { CGImageAlphaInfo alphaInfo = imageRef.AlphaInfo; CGColorSpace colorSpaceInfo = CGColorSpace.CreateDeviceRGB(); if (alphaInfo == CGImageAlphaInfo.None) { alphaInfo = CGImageAlphaInfo.NoneSkipLast; } nint width, height; width = imageRef.Width; height = imageRef.Height; if (height >= width) { width = (int)Math.Floor((double)width * ((double)maxSize / (double)height)); height = maxSize; } else { height = (int)Math.Floor((double)height * ((double)maxSize / (double)width)); width = maxSize; } CGBitmapContext bitmap; if (image.Orientation == UIImageOrientation.Up || image.Orientation == UIImageOrientation.Down) { bitmap = new CGBitmapContext(IntPtr.Zero, width, height, imageRef.BitsPerComponent, imageRef.BytesPerRow, colorSpaceInfo, alphaInfo); } else { bitmap = new CGBitmapContext(IntPtr.Zero, height, width, imageRef.BitsPerComponent, imageRef.BytesPerRow, colorSpaceInfo, alphaInfo); } switch (image.Orientation) { case UIImageOrientation.Left: bitmap.RotateCTM((float)Math.PI / 2); bitmap.TranslateCTM(0, -height); break; case UIImageOrientation.Right: bitmap.RotateCTM(-((float)Math.PI / 2)); bitmap.TranslateCTM(-width, 0); break; case UIImageOrientation.Up: break; case UIImageOrientation.Down: bitmap.TranslateCTM(width, height); bitmap.RotateCTM(-(float)Math.PI); break; } bitmap.DrawImage(new CGRect(0, 0, width, height), imageRef); res = UIImage.FromImage(bitmap.ToImage()); bitmap = null; } return(res); }
public override void ViewDidLoad() { base.ViewDidLoad(); // set the background color of the view to white View.BackgroundColor = UIColor.White; // instantiate a new image view that takes up the whole screen and add it to // the view hierarchy RectangleF imageViewFrame = new RectangleF(0, -NavigationController.NavigationBar.Frame.Height, View.Frame.Width, View.Frame.Height); imageView = new UIImageView(imageViewFrame); View.AddSubview(imageView); // create our offscreen bitmap context // size SizeF bitmapSize = new SizeF(View.Frame.Size); using (CGBitmapContext context = new CGBitmapContext(IntPtr.Zero, (int)bitmapSize.Width, (int)bitmapSize.Height, 8, (int)(4 * bitmapSize.Width), CGColorSpace.CreateDeviceRGB(), CGImageAlphaInfo.PremultipliedFirst)) { // declare vars RectangleF patternRect = new RectangleF(0, 0, 16, 16); // set the color space of our fill to be the patter colorspace context.SetFillColorSpace(CGColorSpace.CreatePattern(null)); // create a new pattern CGPattern pattern = new CGPattern(patternRect, CGAffineTransform.MakeRotation(.3f), 16, 16, CGPatternTiling.NoDistortion, true, DrawPolkaDotPattern); // set our fill as our pattern, color doesn't matter because the pattern handles it context.SetFillPattern(pattern, new float[] { 1 }); // fill the entire view with that pattern context.FillRect(imageView.Frame); // output the drawing to the view imageView.Image = UIImage.FromImage(context.ToImage()); } }
// https://developer.apple.com/library/mac/#documentation/graphicsimaging/conceptual/drawingwithquartz2d/dq_patterns/dq_patterns.html#//apple_ref/doc/uid/TP30001066-CH206-TPXREF101 internal override void Setup(Graphics graphics, bool fill) { // if this is the same as the last that was set then return if (graphics.LastBrush == this) { return; } // obtain our width and height so we can set the pattern rectangle float hatch_width = getHatchWidth(hatchStyle); float hatch_height = getHatchHeight(hatchStyle); //choose the pattern to be filled based on the currentPattern selected var patternSpace = CGColorSpace.CreatePattern(null); graphics.context.SetFillColorSpace(patternSpace); graphics.context.SetStrokeColorSpace(patternSpace); patternSpace.Dispose(); // Pattern default work variables var patternRect = new CGRect(HALF_PIXEL_X, HALF_PIXEL_Y, hatch_width + HALF_PIXEL_X, hatch_height + HALF_PIXEL_Y); var patternTransform = CGAffineTransform.MakeIdentity(); // Since all the patterns were developed with MonoMac on Mac OS the coordinate system is // defaulted to the lower left corner being 0,0 which means for MonoTouch and any view // that is flipped we need to flip it again. Yep should have thought about it to begin with // will look into changing it later if need be. #if MONOMAC if (graphics.isFlipped) { patternTransform = new CGAffineTransform(1, 0, 0, -1, 0, hatch_height); } #endif #if MONOTOUCH if (!graphics.isFlipped) { patternTransform = new CGAffineTransform(1, 0, 0, -1, 0, hatch_height); } #endif // DrawPattern callback which will be set depending on hatch style CGPattern.DrawPattern drawPattern; switch (hatchStyle) { case HatchStyle.Horizontal: case HatchStyle.LightHorizontal: case HatchStyle.NarrowHorizontal: case HatchStyle.DarkHorizontal: drawPattern = HatchHorizontal; break; case HatchStyle.Vertical: case HatchStyle.LightVertical: case HatchStyle.NarrowVertical: case HatchStyle.DarkVertical: patternTransform = CGAffineTransform.MakeRotation(90 * (float)Math.PI / 180); drawPattern = HatchHorizontal; break; case HatchStyle.ForwardDiagonal: case HatchStyle.LightDownwardDiagonal: case HatchStyle.DarkDownwardDiagonal: case HatchStyle.WideDownwardDiagonal: // We will flip the x-axis here patternTransform = CGAffineTransform.MakeScale(-1, 1); drawPattern = HatchUpwardDiagonal; break; case HatchStyle.BackwardDiagonal: case HatchStyle.LightUpwardDiagonal: case HatchStyle.DarkUpwardDiagonal: case HatchStyle.WideUpwardDiagonal: drawPattern = HatchUpwardDiagonal; break; case HatchStyle.LargeGrid: case HatchStyle.SmallGrid: case HatchStyle.DottedGrid: drawPattern = HatchGrid; break; case HatchStyle.DiagonalCross: drawPattern = HatchDiagonalCross; break; case HatchStyle.Percent05: case HatchStyle.Percent10: case HatchStyle.Percent20: case HatchStyle.Percent25: case HatchStyle.Percent30: case HatchStyle.Percent40: case HatchStyle.Percent50: case HatchStyle.Percent60: case HatchStyle.Percent70: case HatchStyle.Percent75: case HatchStyle.Percent80: case HatchStyle.Percent90: drawPattern = HatchPercentage; break; case HatchStyle.Sphere: drawPattern = HatchSphere; break; case HatchStyle.DashedDownwardDiagonal: patternTransform = CGAffineTransform.MakeScale(-1, 1); drawPattern = HatchDashedDiagonal; break; case HatchStyle.DashedUpwardDiagonal: drawPattern = HatchDashedDiagonal; break; case HatchStyle.DashedHorizontal: drawPattern = HatchDashedHorizontal; break; case HatchStyle.DashedVertical: patternTransform = CGAffineTransform.MakeRotation(-90 * (float)Math.PI / 180); drawPattern = HatchDashedHorizontal; break; case HatchStyle.LargeConfetti: case HatchStyle.SmallConfetti: drawPattern = HatchConfetti; break; case HatchStyle.ZigZag: drawPattern = HatchZigZag; break; case HatchStyle.Wave: drawPattern = HatchWave; break; case HatchStyle.HorizontalBrick: drawPattern = HatchHorizontalBrick; break; case HatchStyle.DiagonalBrick: drawPattern = HatchDiagonalBrick; break; // case HatchStyle.Weave: // drawPattern = HatchWeave; // break; case HatchStyle.Trellis: drawPattern = HatchTrellis; break; case HatchStyle.LargeCheckerBoard: case HatchStyle.SmallCheckerBoard: drawPattern = HatchCheckered; break; case HatchStyle.OutlinedDiamond: drawPattern = HatchOutlinedDiamond; break; case HatchStyle.SolidDiamond: drawPattern = HatchSolidDiamond; break; case HatchStyle.DottedDiamond: drawPattern = HatchDottedDiamond; break; case HatchStyle.Divot: drawPattern = HatchDivot; break; case HatchStyle.Shingle: drawPattern = HatchShingle; break; case HatchStyle.Plaid: drawPattern = HatchPlaid; break; default: drawPattern = DrawPolkaDotPattern; break; } //set the pattern as the Current Context’s fill pattern var pattern = new CGPattern(patternRect, patternTransform, hatch_width, hatch_height, CGPatternTiling.NoDistortion, true, drawPattern); //we dont need to set any color, as the pattern cell itself has chosen its own color graphics.context.SetFillPattern(pattern, new nfloat[] { 1 }); graphics.context.SetStrokePattern(pattern, new nfloat[] { 1 }); graphics.LastBrush = this; // I am setting this to be used for Text coloring in DrawString graphics.lastBrushColor = foreColor; }
public static void ReadImageFileToTensor(String fileName, IntPtr dest, int inputHeight = -1, int inputWidth = -1, float inputMean = 0.0f, float scale = 1.0f) { #if __ANDROID__ Android.Graphics.Bitmap bmp = BitmapFactory.DecodeFile(fileName); if (inputHeight > 0 || inputWidth > 0) { Bitmap resized = Bitmap.CreateScaledBitmap(bmp, inputWidth, inputHeight, false); bmp.Dispose(); bmp = resized; } int[] intValues = new int[bmp.Width * bmp.Height]; float[] floatValues = new float[bmp.Width * bmp.Height * 3]; bmp.GetPixels(intValues, 0, bmp.Width, 0, 0, bmp.Width, bmp.Height); for (int i = 0; i < intValues.Length; ++i) { int val = intValues[i]; floatValues[i * 3 + 0] = (((val >> 16) & 0xFF) - inputMean) * scale; floatValues[i * 3 + 1] = (((val >> 8) & 0xFF) - inputMean) * scale; floatValues[i * 3 + 2] = ((val & 0xFF) - inputMean) * scale; } System.Runtime.InteropServices.Marshal.Copy(floatValues, 0, dest, floatValues.Length); #elif __IOS__ UIImage image = new UIImage(fileName); if (inputHeight > 0 || inputWidth > 0) { UIImage resized = image.Scale(new CGSize(inputWidth, inputHeight)); image.Dispose(); image = resized; } int[] intValues = new int[(int)(image.Size.Width * image.Size.Height)]; float[] floatValues = new float[(int)(image.Size.Width * image.Size.Height * 3)]; System.Runtime.InteropServices.GCHandle handle = System.Runtime.InteropServices.GCHandle.Alloc(intValues, System.Runtime.InteropServices.GCHandleType.Pinned); using (CGImage cgimage = image.CGImage) using (CGColorSpace cspace = CGColorSpace.CreateDeviceRGB()) using (CGBitmapContext context = new CGBitmapContext( handle.AddrOfPinnedObject(), (nint)image.Size.Width, (nint)image.Size.Height, 8, (nint)image.Size.Width * 4, cspace, CGImageAlphaInfo.PremultipliedLast )) { context.DrawImage(new CGRect(new CGPoint(), image.Size), cgimage); } handle.Free(); for (int i = 0; i < intValues.Length; ++i) { int val = intValues[i]; floatValues[i * 3 + 0] = (((val >> 16) & 0xFF) - inputMean) * scale; floatValues[i * 3 + 1] = (((val >> 8) & 0xFF) - inputMean) * scale; floatValues[i * 3 + 2] = ((val & 0xFF) - inputMean) * scale; } System.Runtime.InteropServices.Marshal.Copy(floatValues, 0, dest, floatValues.Length); #elif __UNIFIED__ NSImage image = new NSImage(fileName); if (inputHeight > 0 || inputWidth > 0) { NSImage resized = new NSImage(new CGSize(inputWidth, inputHeight)); resized.LockFocus(); image.DrawInRect(new CGRect(0, 0, inputWidth, inputHeight), CGRect.Empty, NSCompositingOperation.SourceOver, 1.0f); resized.UnlockFocus(); image.Dispose(); image = resized; } int[] intValues = new int[(int)(image.Size.Width * image.Size.Height)]; float[] floatValues = new float[(int)(image.Size.Width * image.Size.Height * 3)]; System.Runtime.InteropServices.GCHandle handle = System.Runtime.InteropServices.GCHandle.Alloc(intValues, System.Runtime.InteropServices.GCHandleType.Pinned); using (CGImage cgimage = image.CGImage) using (CGColorSpace cspace = CGColorSpace.CreateDeviceRGB()) using (CGBitmapContext context = new CGBitmapContext( handle.AddrOfPinnedObject(), (nint)image.Size.Width, (nint)image.Size.Height, 8, (nint)image.Size.Width * 4, cspace, CGImageAlphaInfo.PremultipliedLast )) { context.DrawImage(new CGRect(new CGPoint(), image.Size), cgimage); } handle.Free(); for (int i = 0; i < intValues.Length; ++i) { int val = intValues[i]; floatValues[i * 3 + 0] = (((val >> 16) & 0xFF) - inputMean) * scale; floatValues[i * 3 + 1] = (((val >> 8) & 0xFF) - inputMean) * scale; floatValues[i * 3 + 2] = ((val & 0xFF) - inputMean) * scale; } System.Runtime.InteropServices.Marshal.Copy(floatValues, 0, dest, floatValues.Length); #else if (Emgu.TF.Util.Platform.OperationSystem == OS.Windows) { //Do something for Windows System.Drawing.Bitmap bmp = new Bitmap(fileName); if (inputHeight > 0 || inputWidth > 0) { //resize bmp System.Drawing.Bitmap newBmp = new Bitmap(bmp, inputWidth, inputHeight); bmp.Dispose(); bmp = newBmp; //bmp.Save("tmp.png"); } byte[] byteValues = new byte[bmp.Width * bmp.Height * 3]; System.Drawing.Imaging.BitmapData bd = new System.Drawing.Imaging.BitmapData(); bmp.LockBits( new Rectangle(0, 0, bmp.Width, bmp.Height), System.Drawing.Imaging.ImageLockMode.ReadOnly, System.Drawing.Imaging.PixelFormat.Format24bppRgb, bd); System.Runtime.InteropServices.Marshal.Copy(bd.Scan0, byteValues, 0, byteValues.Length); bmp.UnlockBits(bd); float[] floatValues = new float[bmp.Width * bmp.Height * 3]; for (int i = 0; i < byteValues.Length; ++i) { floatValues[i] = ((float)byteValues[i] - inputMean) * scale; } System.Runtime.InteropServices.Marshal.Copy(floatValues, 0, dest, floatValues.Length); } else { throw new Exception("Not implemented"); } #endif }
public static byte[] PixelToJpeg(byte[] rawPixel, int width, int height, int channels) { #if __ANDROID__ if (channels != 4) { throw new NotImplementedException("Only 4 channel pixel input is supported."); } using (Bitmap bitmap = Bitmap.CreateBitmap(width, height, Bitmap.Config.Argb8888)) using (MemoryStream ms = new MemoryStream()) { IntPtr ptr = bitmap.LockPixels(); //GCHandle handle = GCHandle.Alloc(colors, GCHandleType.Pinned); Marshal.Copy(rawPixel, 0, ptr, rawPixel.Length); bitmap.UnlockPixels(); bitmap.Compress(Bitmap.CompressFormat.Jpeg, 90, ms); return(ms.ToArray()); } #elif __IOS__ if (channels != 3) { throw new NotImplementedException("Only 3 channel pixel input is supported."); } System.Drawing.Size sz = new System.Drawing.Size(width, height); GCHandle handle = GCHandle.Alloc(rawPixel, GCHandleType.Pinned); using (CGColorSpace cspace = CGColorSpace.CreateDeviceRGB()) using (CGBitmapContext context = new CGBitmapContext( handle.AddrOfPinnedObject(), sz.Width, sz.Height, 8, sz.Width * 3, cspace, CGImageAlphaInfo.PremultipliedLast)) using (CGImage cgImage = context.ToImage()) using (UIImage newImg = new UIImage(cgImage)) { handle.Free(); var jpegData = newImg.AsJPEG(); byte[] raw = new byte[jpegData.Length]; System.Runtime.InteropServices.Marshal.Copy(jpegData.Bytes, raw, 0, (int)jpegData.Length); return(raw); } #elif __UNIFIED__ //OSX if (channels != 4) { throw new NotImplementedException("Only 4 channel pixel input is supported."); } System.Drawing.Size sz = new System.Drawing.Size(width, height); using (CGColorSpace cspace = CGColorSpace.CreateDeviceRGB()) using (CGBitmapContext context = new CGBitmapContext( rawPixel, sz.Width, sz.Height, 8, sz.Width * 4, cspace, CGBitmapFlags.PremultipliedLast | CGBitmapFlags.ByteOrder32Big)) using (CGImage cgImage = context.ToImage()) using (NSBitmapImageRep newImg = new NSBitmapImageRep(cgImage)) { var jpegData = newImg.RepresentationUsingTypeProperties(NSBitmapImageFileType.Jpeg); byte[] raw = new byte[jpegData.Length]; System.Runtime.InteropServices.Marshal.Copy(jpegData.Bytes, raw, 0, (int)jpegData.Length); return(raw); } #else throw new NotImplementedException("Not Implemented"); #endif }
static Image NormalizeImage(Image image) { const int bitsPerComponent = 8; int components; uint flags = 0; byte [] buffer; switch (image.Format) { case ImageFormat.Rgb24: case ImageFormat.Bgr24: components = 3; break; case ImageFormat.Bgra32: components = 4; flags = (uint)CGBitmapFlags.ByteOrder32Little | (uint)CGImageAlphaInfo.First; break; case ImageFormat.Rgba32: components = 4; flags = (uint)CGImageAlphaInfo.Last; break; default: return(null); } switch (image.Format) { case ImageFormat.Bgr24: // FIXME: ugh, CGImage really does not want 24bpp in BGR order... // No CGBitmapFlags can convince it to do the channel swap unless // the buffer is 32bpp. buffer = new byte [image.Data.Length]; Array.Copy(image.Data, buffer, buffer.Length); for (int i = 0; i < buffer.Length; i += components) { var b = buffer [i]; buffer [i] = buffer [i + 2]; buffer [i + 2] = b; } break; default: buffer = image.Data; break; } using (NativeExceptionHandler.Trap()) return(new CGImage( image.Width, image.Height, bitsPerComponent, bitsPerComponent * components, image.Width * components, CGColorSpace.CreateDeviceRGB(), (CGBitmapFlags)flags, new CGDataProvider(buffer), null, false, CGColorRenderingIntent.AbsoluteColorimetric).RemoteRepresentation()); }
public static Texture2D FromStream(GraphicsDevice graphicsDevice, Stream stream) { //todo: partial classes would be cleaner #if IOS || MONOMAC #if IOS using (var uiImage = UIImage.LoadFromData(NSData.FromStream(stream))) #elif MONOMAC using (var nsImage = NSImage.FromStream (stream)) #endif { #if IOS var cgImage = uiImage.CGImage; #elif MONOMAC var rectangle = RectangleF.Empty; var cgImage = nsImage.AsCGImage (ref rectangle, null, null); #endif var width = cgImage.Width; var height = cgImage.Height; var data = new byte[width * height * 4]; var colorSpace = CGColorSpace.CreateDeviceRGB(); var bitmapContext = new CGBitmapContext(data, width, height, 8, width * 4, colorSpace, CGBitmapFlags.PremultipliedLast); bitmapContext.DrawImage(new RectangleF(0, 0, width, height), cgImage); bitmapContext.Dispose(); colorSpace.Dispose(); Texture2D texture = null; Threading.BlockOnUIThread(() => { texture = new Texture2D(graphicsDevice, width, height, false, SurfaceFormat.Color); texture.SetData(data); }); return texture; } #elif ANDROID using (Bitmap image = BitmapFactory.DecodeStream(stream, null, new BitmapFactory.Options { InScaled = false, InDither = false, InJustDecodeBounds = false, InPurgeable = true, InInputShareable = true, })) { var width = image.Width; var height = image.Height; int[] pixels = new int[width * height]; if ((width != image.Width) || (height != image.Height)) { using (Bitmap imagePadded = Bitmap.CreateBitmap(width, height, Bitmap.Config.Argb8888)) { Canvas canvas = new Canvas(imagePadded); canvas.DrawARGB(0, 0, 0, 0); canvas.DrawBitmap(image, 0, 0, null); imagePadded.GetPixels(pixels, 0, width, 0, 0, width, height); imagePadded.Recycle(); } } else { image.GetPixels(pixels, 0, width, 0, 0, width, height); } image.Recycle(); // Convert from ARGB to ABGR for (int i = 0; i < width * height; ++i) { uint pixel = (uint)pixels[i]; pixels[i] = (int)((pixel & 0xFF00FF00) | ((pixel & 0x00FF0000) >> 16) | ((pixel & 0x000000FF) << 16)); } Texture2D texture = null; Threading.BlockOnUIThread(() => { texture = new Texture2D(graphicsDevice, width, height, false, SurfaceFormat.Color); texture.SetData<int>(pixels); }); return texture; } #elif WINDOWS_PHONE throw new NotImplementedException(); #elif WINDOWS_STOREAPP || DIRECTX // For reference this implementation was ultimately found through this post: // http://stackoverflow.com/questions/9602102/loading-textures-with-sharpdx-in-metro Texture2D toReturn = null; SharpDX.WIC.BitmapDecoder decoder; using(var bitmap = LoadBitmap(stream, out decoder)) using (decoder) { SharpDX.Direct3D11.Texture2D sharpDxTexture = CreateTex2DFromBitmap(bitmap, graphicsDevice); toReturn = new Texture2D(graphicsDevice, bitmap.Size.Width, bitmap.Size.Height); toReturn._texture = sharpDxTexture; } return toReturn; #elif PSM return new Texture2D(graphicsDevice, stream); #else using (Bitmap image = (Bitmap)Bitmap.FromStream(stream)) { // Fix up the Image to match the expected format image.RGBToBGR(); var data = new byte[image.Width * image.Height * 4]; BitmapData bitmapData = image.LockBits(new System.Drawing.Rectangle(0, 0, image.Width, image.Height), ImageLockMode.ReadOnly, System.Drawing.Imaging.PixelFormat.Format32bppArgb); if (bitmapData.Stride != image.Width * 4) throw new NotImplementedException(); Marshal.Copy(bitmapData.Scan0, data, 0, data.Length); image.UnlockBits(bitmapData); Texture2D texture = null; texture = new Texture2D(graphicsDevice, image.Width, image.Height); texture.SetData(data); return texture; } #endif }
public static Tensor ReadTensorFromImageFile <T>( String fileName, int inputHeight = -1, int inputWidth = -1, float inputMean = 0.0f, float scale = 1.0f, bool flipUpSideDown = false, bool swapBR = false, Status status = null) where T : struct { #if __ANDROID__ return(NativeReadTensorFromImageFile <T>(fileName, inputHeight, inputWidth, inputMean, scale, flipUpSideDown, swapBR)); #elif __IOS__ UIImage image = new UIImage(fileName); if (inputHeight > 0 || inputWidth > 0) { UIImage resized = image.Scale(new CGSize(inputWidth, inputHeight)); image.Dispose(); image = resized; } int[] intValues = new int[(int)(image.Size.Width * image.Size.Height)]; float[] floatValues = new float[(int)(image.Size.Width * image.Size.Height * 3)]; System.Runtime.InteropServices.GCHandle handle = System.Runtime.InteropServices.GCHandle.Alloc(intValues, System.Runtime.InteropServices.GCHandleType.Pinned); using (CGImage cgimage = image.CGImage) using (CGColorSpace cspace = CGColorSpace.CreateDeviceRGB()) using (CGBitmapContext context = new CGBitmapContext( handle.AddrOfPinnedObject(), (nint)image.Size.Width, (nint)image.Size.Height, 8, (nint)image.Size.Width * 4, cspace, CGImageAlphaInfo.PremultipliedLast )) { context.DrawImage(new CGRect(new CGPoint(), image.Size), cgimage); } handle.Free(); for (int i = 0; i < intValues.Length; ++i) { int val = intValues[i]; floatValues[i * 3 + 0] = (((val >> 16) & 0xFF) - inputMean) * scale; floatValues[i * 3 + 1] = (((val >> 8) & 0xFF) - inputMean) * scale; floatValues[i * 3 + 2] = ((val & 0xFF) - inputMean) * scale; } Tensor t = new Tensor(DataType.Float, new int[] { 1, (int)image.Size.Height, (int)image.Size.Width, 3 }); System.Runtime.InteropServices.Marshal.Copy(floatValues, 0, t.DataPointer, floatValues.Length); return(t); #else FileInfo fi = new FileInfo(fileName); String extension = fi.Extension.ToLower(); //Use tensorflow to decode the following image formats if ((typeof(T) == typeof(float)) && (extension.Equals(".jpeg") || extension.Equals(".jpg") || extension.Equals(".png") || extension.Equals(".gif"))) { //using (StatusChecker checker = new StatusChecker(status)) using (Graph graph = new Graph()) { Operation input = graph.Placeholder(DataType.String); //output dimension [height, width, 3] where 3 is the number of channels //DecodeJpeg can decode JPEG, PNG and GIF Operation jpegDecoder = graph.DecodeJpeg(input, 3); Operation floatCaster = graph.Cast(jpegDecoder, DstT: DataType.Float); //cast to float Tensor zeroConst = new Tensor(0); Operation zeroConstOp = graph.Const(zeroConst, zeroConst.Type, opName: "zeroConstOp"); Operation dimsExpander = graph.ExpandDims(floatCaster, zeroConstOp); //turn it to dimension [1, height, width, 3] Operation resized; bool resizeRequired = (inputHeight > 0) && (inputWidth > 0); if (resizeRequired) { Tensor size = new Tensor(new int[] { inputHeight, inputWidth }); // new size; Operation sizeOp = graph.Const(size, size.Type, opName: "size"); resized = graph.ResizeBilinear(dimsExpander, sizeOp); //resize image } else { resized = dimsExpander; } Tensor mean = new Tensor(inputMean); Operation meanOp = graph.Const(mean, mean.Type, opName: "mean"); Operation substracted = graph.Sub(resized, meanOp); Tensor scaleTensor = new Tensor(scale); Operation scaleOp = graph.Const(scaleTensor, scaleTensor.Type, opName: "scale"); Operation scaled = graph.Mul(substracted, scaleOp); Operation swapedBR; if (swapBR) { Tensor threeConst = new Tensor(new int[] { 3 }); Operation threeConstOp = graph.Const(threeConst, threeConst.Type, "threeConstOp"); swapedBR = graph.ReverseV2(scaled, threeConstOp, "swapBR"); } else { swapedBR = scaled; } Operation flipped; if (flipUpSideDown) { Tensor oneConst = new Tensor(new int[] { 1 }); Operation oneConstOp = graph.Const(oneConst, oneConst.Type, "oneConstOp"); flipped = graph.ReverseV2(swapedBR, oneConstOp, "flipUpSideDownOp"); } else { flipped = swapedBR; } using (Session session = new Session(graph)) { Tensor imageTensor = Tensor.FromString(File.ReadAllBytes(fileName), status); Tensor[] imageResults = session.Run(new Output[] { input }, new Tensor[] { imageTensor }, new Output[] { flipped }); return(imageResults[0]); } } } else { return(NativeReadTensorFromImageFile <T>(fileName, inputHeight, inputWidth, inputMean, scale, flipUpSideDown, swapBR)); } #endif }
private void LoadTextureFromImage(Texture t, CGImage image) { IntPtr pixelData = IntPtr.Zero; CGBitmapContext bitmap = null; try { pixelData = Marshal.AllocHGlobal((int)image.Width * (int)image.Height * 4); bitmap = new CGBitmapContext(pixelData, image.Width, image.Height, 8, 4 * image.Width, CGColorSpace.CreateDeviceRGB(), CGBitmapFlags.PremultipliedLast | CGBitmapFlags.ByteOrder32Big); bitmap.ClearRect(new CGRect(0, 0, image.Width, image.Height)); bitmap.DrawImage(new CGRect(0, 0, image.Width, image.Height), image); LoadTextureInternal(t, bitmap); } catch (Exception) { t.Failed = true; return; } finally { if (bitmap != null) { bitmap.Dispose(); } if (pixelData != IntPtr.Zero) { Marshal.FreeHGlobal(pixelData); } } }
public static CIImage FromCGImage(CGImage image, CGColorSpace colorSpace) { if (colorSpace == null) throw new ArgumentNullException ("colorSpace"); using (var arr = NSArray.FromIntPtrs (new IntPtr [] { colorSpace.Handle })){ using (var keys = NSArray.FromIntPtrs (new IntPtr [] { CIImageColorSpace.Handle } )){ using (var dict = NSDictionary.FromObjectsAndKeysInternal (arr, keys)){ return FromCGImage (image, dict); } } } }
/// <summary> /// Read a NSImage, covert the data and save it to the native pointer /// </summary> /// <typeparam name="T">The type of the data to covert the image pixel values to. e.g. "float" or "byte"</typeparam> /// <param name="image">The input image</param> /// <param name="dest">The native pointer where the image pixels values will be saved to.</param> /// <param name="inputHeight">The height of the image, must match the height requirement for the tensor</param> /// <param name="inputWidth">The width of the image, must match the width requirement for the tensor</param> /// <param name="inputMean">The mean value, it will be subtracted from the input image pixel values</param> /// <param name="scale">The scale, after mean is subtracted, the scale will be used to multiply the pixel values</param> /// <param name="flipUpSideDown">If true, the image needs to be flipped up side down</param> /// <param name="swapBR">If true, will flip the Blue channel with the Red. e.g. If false, the tensor's color channel order will be RGB. If true, the tensor's color channle order will be BGR </param> /// <returns>The number of bytes written.</returns> public static int ReadImageToTensor <T>( NSImage image, IntPtr dest, int inputHeight = -1, int inputWidth = -1, float inputMean = 0.0f, float scale = 1.0f, bool flipUpSideDown = false, bool swapBR = false) where T : struct { if (inputHeight <= 0) { inputHeight = (int)image.Size.Height; } if (inputWidth <= 0) { inputWidth = (int)image.Size.Width; } int[] intValues = new int[inputWidth * inputHeight]; System.Runtime.InteropServices.GCHandle handle = System.Runtime.InteropServices.GCHandle.Alloc(intValues, System.Runtime.InteropServices.GCHandleType.Pinned); using (CGImage cgimage = image.CGImage) using (CGColorSpace cspace = CGColorSpace.CreateDeviceRGB()) using (CGBitmapContext context = new CGBitmapContext( handle.AddrOfPinnedObject(), inputWidth, inputHeight, 8, inputWidth * 4, cspace, CGImageAlphaInfo.PremultipliedLast )) { context.DrawImage(new CGRect(new CGPoint(), new CGSize(inputWidth, inputHeight)), cgimage); } int bytesWritten = 0; if (typeof(T) == typeof(float)) { bytesWritten = Emgu.TF.Util.Toolbox.Pixel32ToPixelFloat( handle.AddrOfPinnedObject(), inputWidth, inputHeight, inputMean, scale, flipUpSideDown, swapBR, dest); } else if (typeof(T) == typeof(byte)) { bytesWritten = Emgu.TF.Util.Toolbox.Pixel32ToPixelByte( handle.AddrOfPinnedObject(), inputWidth, inputHeight, inputMean, scale, flipUpSideDown, swapBR, dest); } else { throw new NotImplementedException(String.Format("Destination data type {0} is not supported.", typeof(T).ToString())); } handle.Free(); return(bytesWritten); }
public override void Draw(CGRect rectB) { CGColorSpace cs = null; CGContext ctx = null; CGRect bds; using (ctx = UIGraphics.GetCurrentContext()) { using (cs = CGColorSpace.CreateDeviceRGB()) { if (Vertical) { ctx.TranslateCTM(0, Bounds.Height); ctx.ScaleCTM(1, -1); bds = Bounds; } else { ctx.TranslateCTM(0, Bounds.Height); ctx.RotateCTM(-(float)Math.PI / 2); bds = new CGRect(0, 0, Bounds.Height, Bounds.Width); } ctx.SetFillColorSpace(cs); ctx.SetStrokeColorSpace(cs); if (NumLights == 0) { float currentTop = 0; if (BgColor != null) { BgColor.SetColor(); ctx.FillRect(bds); } foreach (var thisTresh in ColorThresholds) { var val = Math.Min(thisTresh.MaxValue, Level); var rect = new CGRect(0, bds.Height * currentTop, bds.Width, bds.Height * (val - currentTop)); thisTresh.Color.SetColor(); ctx.FillRect(rect); if (Level < thisTresh.MaxValue) { break; } currentTop = val; } if (BorderColor != null) { BorderColor.SetColor(); bds.Inflate(-0.5f, -0.5f); ctx.StrokeRect(bds); } } else { float lightMinVal = 0; float insetAmount, lightVSpace; int peakLight = -1; lightVSpace = (float)bds.Height / (float)NumLights; if (lightVSpace < 4) { insetAmount = 0; } else if (lightVSpace < 8) { insetAmount = 0.5f; } else { insetAmount = 1; } if (PeakLevel > 0) { peakLight = (int)(PeakLevel * NumLights); if (peakLight >= NumLights) { peakLight = NumLights - 1; } } for (int light_i = 0; light_i < NumLights; light_i++) { float lightMaxVal = (light_i + 1) / (float)NumLights; float lightIntensity; CGRect lightRect; UIColor lightColor; if (light_i == peakLight) { lightIntensity = 1; } else { lightIntensity = (Level - lightMinVal) / (lightMaxVal - lightMinVal); lightIntensity = Clamp(0, lightIntensity, 1); if (!VariableLightIntensity && lightIntensity > 0) { lightIntensity = 1; } } lightColor = ColorThresholds [0].Color; int color_i = 0; for (; color_i < ColorThresholds.Length - 1; color_i++) { var thisTresh = ColorThresholds [color_i]; var nextTresh = ColorThresholds [color_i + 1]; if (thisTresh.MaxValue <= lightMaxVal) { //Console.WriteLine ("PICKED COLOR at {0}", color_i); lightColor = nextTresh.Color; } } lightRect = new CGRect(0, bds.Height * light_i / (float)NumLights, bds.Width, bds.Height * (1f / NumLights)); lightRect.Inset(insetAmount, insetAmount); if (BgColor != null) { BgColor.SetColor(); ctx.FillRect(lightRect); } //Console.WriteLine ("Got: {0} {1}", lightColor, UIColor.Red); //lightColor = UIColor.Red; if (lightIntensity == 1) { lightColor.SetColor(); //Console.WriteLine ("Setting color to {0}", lightColor); ctx.FillRect(lightRect); } else if (lightIntensity > 0) { using (var clr = new CGColor(lightColor.CGColor, lightIntensity)) { ctx.SetFillColor(clr); ctx.FillRect(lightRect); } } if (BorderColor != null) { BorderColor.SetColor(); lightRect.Inset(0.5f, 0.5f); ctx.StrokeRect(lightRect); } lightMinVal = lightMaxVal; } } } } }
internal static void ToArray(this CGImage cgImage, IOutputArray mat, ImreadModes modes = ImreadModes.AnyColor) { Size sz = new Size((int)cgImage.Width, (int)cgImage.Height); using (Mat m = new Mat(sz, DepthType.Cv8U, 4)) { RectangleF rect = new RectangleF(PointF.Empty, new SizeF(cgImage.Width, cgImage.Height)); using (CGColorSpace cspace = CGColorSpace.CreateDeviceRGB()) using (CGBitmapContext context = new CGBitmapContext( m.DataPointer, sz.Width, sz.Height, 8, sz.Width * 4, cspace, CGImageAlphaInfo.PremultipliedLast)) context.DrawImage(rect, cgImage); if (modes == ImreadModes.Unchanged) { m.CopyTo(mat); } if (modes == ImreadModes.Grayscale) { CvInvoke.CvtColor(m, mat, ColorConversion.Rgba2Gray); } else if (modes == ImreadModes.AnyColor) { CvInvoke.CvtColor(m, mat, ColorConversion.Rgba2Bgra); } else if (modes == ImreadModes.Color) { CvInvoke.CvtColor(m, mat, ColorConversion.Rgba2Bgr); } else if (modes == ImreadModes.ReducedColor2) { using (Mat tmp = new Mat()) { CvInvoke.PyrDown(m, tmp); CvInvoke.CvtColor(tmp, mat, ColorConversion.Rgba2Bgr); } } else if (modes == ImreadModes.ReducedGrayscale2) { using (Mat tmp = new Mat()) { CvInvoke.PyrDown(m, tmp); CvInvoke.CvtColor(tmp, mat, ColorConversion.Rgba2Gray); } } else if (modes == ImreadModes.ReducedColor4 || modes == ImreadModes.ReducedColor8 || modes == ImreadModes.ReducedGrayscale4 || modes == ImreadModes.ReducedGrayscale8 || modes == ImreadModes.LoadGdal) { throw new NotImplementedException(String.Format("Conversion from PNG using mode {0} is not supported", modes)); } else { throw new Exception(String.Format("ImreadModes of {0} is not implemented.", modes.ToString())); //CvInvoke.CvtColor(m, mat, ColorConversion.Rgba2Bgr); } } }
protected void DrawScreen() { // create our offscreen bitmap context // size CGSize bitmapSize = new CGSize(imageView.Frame.Size); using (CGBitmapContext context = new CGBitmapContext(IntPtr.Zero, (int)bitmapSize.Width, (int)bitmapSize.Height, 8, (int)(4 * bitmapSize.Width), CGColorSpace.CreateDeviceRGB(), CGImageAlphaInfo.PremultipliedFirst)) { // save the state of the context while we change the CTM context.SaveState(); // draw our circle context.SetFillColor(1, 0, 0, 1); context.TranslateCTM(currentLocation.X, currentLocation.Y); context.RotateCTM(currentRotation); context.ScaleCTM(currentScale, currentScale); context.FillRect(new CGRect(-10, -10, 20, 20)); // restore our transformations context.RestoreState(); // draw our coordinates for reference DrawCoordinateSpace(context); // output the drawing to the view imageView.Image = UIImage.FromImage(context.ToImage()); } }
public static XIR.Image RemoteRepresentation(this CGLineCap obj) { var aPath = new CGPath(); var lineWidth = 10; var sampleWidth = 50; aPath.MoveToPoint(new CGPoint(lineWidth, lineWidth)); aPath.AddLineToPoint(new CGPoint(lineWidth + sampleWidth, lineWidth)); // let's make sure we leave a little room for the line width drawing as well by adding the lineWidth as well var width = (int)aPath.PathBoundingBox.Right + lineWidth; var height = (int)aPath.PathBoundingBox.Bottom + lineWidth; var bytesPerRow = width * 4; using (var context = new CGBitmapContext( IntPtr.Zero, width, height, 8, bytesPerRow, CGColorSpace.CreateDeviceRGB(), CGImageAlphaInfo.PremultipliedFirst)) { context.SaveState(); context.SetStrokeColor(new CGColor(0, 0, 0)); context.SetLineWidth(lineWidth); context.AddPath(aPath); switch ((CGLineCap)obj) { case CGLineCap.Square: context.SetLineCap(CGLineCap.Square); break; case CGLineCap.Butt: context.SetLineCap(CGLineCap.Butt); break; case CGLineCap.Round: context.SetLineCap(CGLineCap.Round); break; } context.DrawPath(CGPathDrawingMode.Stroke); context.RestoreState(); // Second, we draw the inset line to demonstrate the bounds aPath = new CGPath(); aPath.MoveToPoint(new CGPoint(lineWidth, lineWidth)); aPath.AddLineToPoint(new CGPoint(lineWidth + sampleWidth, lineWidth)); context.SetLineCap(CGLineCap.Butt); context.SetStrokeColor(NSColor.White.CGColor); context.SetLineWidth(1); context.SaveState(); context.AddPath(aPath); context.DrawPath(CGPathDrawingMode.Stroke); context.RestoreState(); // Third, we draw the inset line endings which are two circles var circleWidth = 2; aPath = new CGPath(); aPath.AddEllipseInRect(new CGRect(lineWidth - (int)(circleWidth / 2), lineWidth - (int)(circleWidth / 2), circleWidth, circleWidth)); aPath.AddEllipseInRect(new CGRect(lineWidth + sampleWidth - (int)(circleWidth / 2), lineWidth - (int)(circleWidth / 2), circleWidth, circleWidth)); context.SetLineWidth(circleWidth); context.SetStrokeColor(NSColor.White.CGColor); context.AddPath(aPath); context.DrawPath(CGPathDrawingMode.Stroke); return(RemoteRepresentation(context)); } }
internal override void Setup(Graphics graphics, bool fill) { // if this is the same as the last that was set then return and no changes have been made // then return. if (graphics.LastBrush == this && !changed) { return; } // obtain our width and height so we can set the pattern rectangle float textureWidth = textureImage.Width; float textureHeight = textureImage.Height; if (wrapMode == WrapMode.TileFlipX || wrapMode == WrapMode.TileFlipY) { textureWidth *= 2; } if (wrapMode == WrapMode.TileFlipXY) { textureWidth *= 2; textureHeight *= 2; } // this is here for testing only var textureOffset = new PointF(0, -0); //choose the pattern to be filled based on the currentPattern selected var patternSpace = CGColorSpace.CreatePattern(null); graphics.context.SetFillColorSpace(patternSpace); patternSpace.Dispose(); // Pattern default work variables var patternRect = new RectangleF(HALF_PIXEL_X, HALF_PIXEL_Y, textureWidth + HALF_PIXEL_X, textureHeight + HALF_PIXEL_Y); var patternTransform = CGAffineTransform.MakeIdentity(); // We need to take into account the orientation of the graphics object #if MONOMAC if (!graphics.isFlipped) { patternTransform = new CGAffineTransform(1, 0, 0, -1, textureOffset.X, textureHeight + textureOffset.Y); } #endif #if MONOTOUCH if (graphics.isFlipped) { patternTransform = new CGAffineTransform(1, 0, 0, -1, textureOffset.X, textureHeight + textureOffset.Y); } #endif patternTransform = CGAffineTransform.Multiply(patternTransform, textureTransform.transform); // DrawPattern callback which will be set depending on hatch style CGPattern.DrawPattern drawPattern; drawPattern = DrawTexture; //set the pattern as the Current Context’s fill pattern var pattern = new CGPattern(patternRect, patternTransform, textureWidth, textureHeight, //textureHeight, CGPatternTiling.NoDistortion, true, drawPattern); //we dont need to set any color, as the pattern cell itself has chosen its own color graphics.context.SetFillPattern(pattern, new float[] { 1 }); changed = false; graphics.LastBrush = this; // I am setting this to be used for Text coloring in DrawString //graphics.lastBrushColor = foreColor; }
static MessageSummaryView() { using (var colorspace = CGColorSpace.CreateDeviceRGB()){ gradient = new CGGradient(colorspace, new float [] { /* first */ .52f, .69f, .96f, 1, /* second */ .12f, .31f, .67f, 1 }, null); //new float [] { 0, 1 }); } }
public override void DidOutputSampleBuffer(AVCaptureOutput captureOutput, CMSampleBuffer sampleBuffer, AVCaptureConnection connection) { Console.WriteLine("Got Sample Fromn Buffer"); lock (FaceDetectionViewController.lockerobj) { if (!FaceDetectionViewController.processingFaceDetection || isProcessingBuffer) { sampleBuffer.Dispose(); return; } isProcessingBuffer = true; } try { CIImage ciImage = null; CGRect cleanAperture = default(CGRect); using (sampleBuffer) { //CVPixelBuffer renderedOutputPixelBuffer = null; byte[] managedArray; int width; int height; int bytesPerRow; using (var pixelBuffer = sampleBuffer.GetImageBuffer() as CVPixelBuffer) { pixelBuffer.Lock(CVPixelBufferLock.None); CVPixelFormatType ft = pixelBuffer.PixelFormatType; IntPtr baseAddress = pixelBuffer.BaseAddress; bytesPerRow = (int)pixelBuffer.BytesPerRow; width = (int)pixelBuffer.Width; height = (int)pixelBuffer.Height; //managedArray = new byte[width * height]; managedArray = new byte[pixelBuffer.Height * pixelBuffer.BytesPerRow]; Marshal.Copy(baseAddress, managedArray, 0, managedArray.Length); pixelBuffer.Unlock(CVPixelBufferLock.None); } sampleBuffer.Dispose(); //int bytesPerPixel = 4; //int bytesPerRow = bytesPerPixel * width; int bitsPerComponent = 8; //CGColorSpace colorSpace = CGColorSpace.CreateDeviceRGB(); //CGContext context = new CGBitmapContext(managedArray, width, height, //bitsPerComponent, bytesPerRow, colorSpace, //CGBitmapFlags.PremultipliedLast | CGBitmapFlags.ByteOrder32Big); var flags = CGBitmapFlags.PremultipliedFirst | CGBitmapFlags.ByteOrder32Little; // Create a CGImage on the RGB colorspace from the configured parameter above using (var cs = CGColorSpace.CreateDeviceRGB()) { using (var context = new CGBitmapContext(managedArray, width, height, bitsPerComponent, bytesPerRow, cs, (CGImageAlphaInfo)flags)) { ciImage = context.ToImage(); //using (CGImage cgImage = context.ToImage()) { // //pixelBuffer.Unlock(CVPixelBufferLock.None); // //return UIImage.FromImage(cgImage); //} context.Dispose(); } } //var a = new CMSampleBuffer.; //using () { //} //UIImage image = GetImageFromSampleBuffer(sampleBuffer); //if (!FaceMainController.isFaceRegistered || isProcessing) //{ // // Console.WriteLine("OutputDelegate - Exit (isProcessing: " + DateTime.Now); // sampleBuffer.Dispose(); // Console.WriteLine("processing.."); // return; //} //Console.WriteLine("IsProcessing: "); //isProcessing = true; connection.VideoOrientation = AVCaptureVideoOrientation.Portrait; connection.VideoScaleAndCropFactor = 1.0f; //var bufferCopy = sampleBuffer.c //UIImage image = GetImageFromSampleBuffer(sampleBuffer); //ciImage = CIImage.FromCGImage(image.CGImage); //cleanAperture = sampleBuffer.GetVideoFormatDescription().GetCleanAperture(false); } /*For Face Detection using iOS APIs*/ //DispatchQueue.MainQueue.DispatchAsync(() => using (ciImage) { if (ciImage != null) { drawFacesCallback(UIImage.FromImage(ciImage), cleanAperture); } } isProcessingBuffer = false; //Console.WriteLine(ciImage); //Task.Run(async () => { // try { // //if (ViewController.IsFaceDetected) // //{ // Console.WriteLine("face detected: "); // imageAnalyzer = new ImageAnalyzer(() => Task.FromResult<Stream>(image.ResizeImageWithAspectRatio(300, 400).AsPNG().AsStream())); // await ProcessCameraCapture(imageAnalyzer); // //} // } // finally { // imageAnalyzer = null; // isProcessing = false; // Console.WriteLine("OUT "); // } //}); } catch (Exception ex) { Console.Write(ex); } finally { sampleBuffer.Dispose(); } }
public static CIImage FromData(NSData bitmapData, nint bytesPerRow, CGSize size, CIFormat pixelFormat, CGColorSpace colorSpace) { return(FromData(bitmapData, bytesPerRow, size, CIImage.CIFormatToInt(pixelFormat), colorSpace)); }
protected INativeObject GetINativeInstance(Type t) { var ctor = t.GetConstructor(Type.EmptyTypes); if ((ctor != null) && !ctor.IsAbstract) { return(ctor.Invoke(null) as INativeObject); } if (!NativeObjectInterfaceType.IsAssignableFrom(t)) { throw new ArgumentException("t"); } switch (t.Name) { case "CFAllocator": return(CFAllocator.SystemDefault); case "CFArray": return(Runtime.GetINativeObject <CFArray> (new NSArray().Handle, false)); case "CFBundle": var bundles = CFBundle.GetAll(); if (bundles.Length > 0) { return(bundles [0]); } else { throw new InvalidOperationException(string.Format("Could not create the new instance for type {0}.", t.Name)); } case "CFNotificationCenter": return(CFNotificationCenter.Darwin); case "CFReadStream": case "CFStream": CFReadStream readStream; CFWriteStream writeStream; CFStream.CreatePairWithSocketToHost("www.google.com", 80, out readStream, out writeStream); return(readStream); case "CFWriteStream": CFStream.CreatePairWithSocketToHost("www.google.com", 80, out readStream, out writeStream); return(writeStream); case "CFUrl": return(CFUrl.FromFile("/etc")); case "CFPropertyList": return(CFPropertyList.FromData(NSData.FromString("<string>data</string>")).PropertyList); case "DispatchData": return(DispatchData.FromByteBuffer(new byte [] { 1, 2, 3, 4 })); case "AudioFile": var path = Path.GetFullPath("1.caf"); var af = AudioFile.Open(CFUrl.FromFile(path), AudioFilePermission.Read, AudioFileType.CAF); return(af); case "CFHTTPMessage": return(CFHTTPMessage.CreateEmpty(false)); case "CFMutableString": return(new CFMutableString("xamarin")); case "CGBitmapContext": byte[] data = new byte [400]; using (CGColorSpace space = CGColorSpace.CreateDeviceRGB()) { return(new CGBitmapContext(data, 10, 10, 8, 40, space, CGBitmapFlags.PremultipliedLast)); } case "CGContextPDF": var filename = Environment.GetFolderPath(Environment.SpecialFolder.CommonDocuments) + "/t.pdf"; using (var url = new NSUrl(filename)) return(new CGContextPDF(url)); case "CGColorConversionInfo": var cci = new GColorConversionInfoTriple() { Space = CGColorSpace.CreateGenericRgb(), Intent = CGColorRenderingIntent.Default, Transform = CGColorConversionInfoTransformType.ApplySpace }; return(new CGColorConversionInfo((NSDictionary)null, cci, cci, cci)); case "CGDataConsumer": using (NSMutableData destData = new NSMutableData()) { return(new CGDataConsumer(destData)); } case "CGDataProvider": #if __MACCATALYST__ filename = Path.Combine("Contents", "Resources", "xamarin1.png"); #else filename = "xamarin1.png"; #endif return(new CGDataProvider(filename)); case "CGFont": return(CGFont.CreateWithFontName("Courier New")); case "CGPattern": return(new CGPattern( new RectangleF(0, 0, 16, 16), CGAffineTransform.MakeIdentity(), 16, 16, CGPatternTiling.NoDistortion, true, (cgc) => {})); case "CMBufferQueue": return(CMBufferQueue.CreateUnsorted(2)); case "CTFont": CTFontDescriptorAttributes fda = new CTFontDescriptorAttributes() { FamilyName = "Courier", StyleName = "Bold", Size = 16.0f }; using (var fd = new CTFontDescriptor(fda)) return(new CTFont(fd, 10)); case "CTFontCollection": return(new CTFontCollection(new CTFontCollectionOptions())); case "CTFontDescriptor": fda = new CTFontDescriptorAttributes(); return(new CTFontDescriptor(fda)); case "CTTextTab": return(new CTTextTab(CTTextAlignment.Left, 2)); case "CTTypesetter": return(new CTTypesetter(new NSAttributedString("Hello, world", new CTStringAttributes() { ForegroundColorFromContext = true, Font = new CTFont("ArialMT", 24) }))); case "CTFrame": var framesetter = new CTFramesetter(new NSAttributedString("Hello, world", new CTStringAttributes() { ForegroundColorFromContext = true, Font = new CTFont("ArialMT", 24) })); var bPath = UIBezierPath.FromRect(new RectangleF(0, 0, 3, 3)); return(framesetter.GetFrame(new NSRange(0, 0), bPath.CGPath, null)); case "CTFramesetter": return(new CTFramesetter(new NSAttributedString("Hello, world", new CTStringAttributes() { ForegroundColorFromContext = true, Font = new CTFont("ArialMT", 24) }))); case "CTGlyphInfo": return(new CTGlyphInfo("copyright", new CTFont("ArialMY", 24), "Foo")); case "CTLine": return(new CTLine(new NSAttributedString("Hello, world", new CTStringAttributes() { ForegroundColorFromContext = true, Font = new CTFont("ArialMT", 24) }))); case "CGImageDestination": var storage = new NSMutableData(); return(CGImageDestination.Create(new CGDataConsumer(storage), "public.png", 1)); case "CGImageMetadataTag": using (NSString name = new NSString("tagName")) using (var value = new NSString("value")) return(new CGImageMetadataTag(CGImageMetadataTagNamespaces.Exif, CGImageMetadataTagPrefixes.Exif, name, CGImageMetadataType.Default, value)); case "CGImageSource": #if __MACCATALYST__ filename = Path.Combine("Contents", "Resources", "xamarin1.png"); #else filename = "xamarin1.png"; #endif return(CGImageSource.FromUrl(NSUrl.FromFilename(filename))); case "SecPolicy": return(SecPolicy.CreateSslPolicy(false, null)); case "SecIdentity": using (var options = NSDictionary.FromObjectAndKey(new NSString("farscape"), SecImportExport.Passphrase)) { NSDictionary[] array; var result = SecImportExport.ImportPkcs12(farscape_pfx, options, out array); if (result != SecStatusCode.Success) { throw new InvalidOperationException(string.Format("Could not create the new instance for type {0} due to {1}.", t.Name, result)); } return(Runtime.GetINativeObject <SecIdentity> (array [0].LowlevelObjectForKey(SecImportExport.Identity.Handle), false)); } case "SecTrust": X509Certificate x = new X509Certificate(mail_google_com); using (var policy = SecPolicy.CreateSslPolicy(true, "mail.google.com")) return(new SecTrust(x, policy)); case "SslContext": return(new SslContext(SslProtocolSide.Client, SslConnectionType.Stream)); case "UIFontFeature": return(new UIFontFeature(CTFontFeatureNumberSpacing.Selector.ProportionalNumbers)); case "NetworkReachability": return(new NetworkReachability(IPAddress.Loopback, null)); case "VTCompressionSession": case "VTSession": return(VTCompressionSession.Create(1024, 768, CMVideoCodecType.H264, (sourceFrame, status, flags, buffer) => { }, null, (CVPixelBufferAttributes)null)); case "VTFrameSilo": return(VTFrameSilo.Create()); case "VTMultiPassStorage": return(VTMultiPassStorage.Create()); case "CFString": return(new CFString("test")); case "DispatchBlock": return(new DispatchBlock(() => { })); case "DispatchQueue": return(new DispatchQueue("com.example.subsystem.taskXYZ")); case "DispatchGroup": return(DispatchGroup.Create()); case "CGColorSpace": return(CGColorSpace.CreateDeviceCmyk()); case "CGGradient": CGColor[] cArray = { UIColor.Black.CGColor, UIColor.Clear.CGColor, UIColor.Blue.CGColor }; return(new CGGradient(null, cArray)); case "CGImage": #if __MACCATALYST__ filename = Path.Combine("Contents", "Resources", "xamarin1.png"); #else filename = "xamarin1.png"; #endif using (var dp = new CGDataProvider(filename)) return(CGImage.FromPNG(dp, null, false, CGColorRenderingIntent.Default)); case "CGColor": return(UIColor.Black.CGColor); case "CMClock": return(CMClock.HostTimeClock); case "CMTimebase": return(new CMTimebase(CMClock.HostTimeClock)); case "CVPixelBufferPool": return(new CVPixelBufferPool( new CVPixelBufferPoolSettings(), new CVPixelBufferAttributes(CVPixelFormatType.CV24RGB, 100, 50) )); case "SecCertificate": using (var cdata = NSData.FromArray(mail_google_com)) return(new SecCertificate(cdata)); case "SecCertificate2": using (var cdata = NSData.FromArray(mail_google_com)) return(new SecCertificate2(new SecCertificate(cdata))); case "SecTrust2": X509Certificate x2 = new X509Certificate(mail_google_com); using (var policy = SecPolicy.CreateSslPolicy(true, "mail.google.com")) return(new SecTrust2(new SecTrust(x2, policy))); case "SecIdentity2": using (var options = NSDictionary.FromObjectAndKey(new NSString("farscape"), SecImportExport.Passphrase)) { NSDictionary[] array; var result = SecImportExport.ImportPkcs12(farscape_pfx, options, out array); if (result != SecStatusCode.Success) { throw new InvalidOperationException(string.Format("Could not create the new instance for type {0} due to {1}.", t.Name, result)); } return(new SecIdentity2(Runtime.GetINativeObject <SecIdentity> (array [0].LowlevelObjectForKey(SecImportExport.Identity.Handle), false))); } case "SecKey": SecKey private_key; SecKey public_key; using (var record = new SecRecord(SecKind.Key)) { record.KeyType = SecKeyType.RSA; record.KeySizeInBits = 512; // it's not a performance test :) SecKey.GenerateKeyPair(record.ToDictionary(), out public_key, out private_key); return(private_key); } case "SecAccessControl": return(new SecAccessControl(SecAccessible.WhenPasscodeSetThisDeviceOnly)); #if __MACCATALYST__ case "Authorization": return(Security.Authorization.Create(AuthorizationFlags.Defaults)); #endif default: throw new InvalidOperationException(string.Format("Could not create the new instance for type {0}.", t.Name)); } }
public CIImage(ICIImageProvider provider, nuint width, nuint height, CIFormat pixelFormat, CGColorSpace colorSpace, CIImageProviderOptions options) : this(provider, width, height, CIImage.CIFormatToInt(pixelFormat), colorSpace, options == null ? null : options.Dictionary) { }
private void InitWithCGImage(CGImage image, All filter) { int width, height, i; CGContext context = null; IntPtr data; CGColorSpace colorSpace; IntPtr tempData; bool hasAlpha; CGImageAlphaInfo info; CGAffineTransform transform; Size imageSize; SurfaceFormat pixelFormat; bool sizeToFit = false; if (image == null) { throw new ArgumentException(" uimage is invalid! "); } info = image.AlphaInfo; hasAlpha = ((info == CGImageAlphaInfo.PremultipliedLast) || (info == CGImageAlphaInfo.PremultipliedFirst) || (info == CGImageAlphaInfo.Last) || (info == CGImageAlphaInfo.First) ? true : false); if (image.ColorSpace != null) { pixelFormat = SurfaceFormat.Color; } else { pixelFormat = SurfaceFormat.Alpha8; } imageSize = new Size(image.Width, image.Height); transform = CGAffineTransform.MakeIdentity(); width = imageSize.Width; if ((width != 1) && ((width & (width - 1)) != 0)) { i = 1; while ((sizeToFit ? 2 * i : i) < width) { i *= 2; } width = i; } height = imageSize.Height; if ((height != 1) && ((height & (height - 1)) != 0)) { i = 1; while ((sizeToFit ? 2 * i : i) < height) { i *= 2; } height = i; } // TODO: kMaxTextureSize = 1024 while ((width > 1024) || (height > 1024)) { width /= 2; height /= 2; transform = CGAffineTransform.MakeScale(0.5f, 0.5f); imageSize.Width /= 2; imageSize.Height /= 2; } switch (pixelFormat) { case SurfaceFormat.Color: colorSpace = CGColorSpace.CreateDeviceRGB(); data = Marshal.AllocHGlobal(height * width * 4); context = new CGBitmapContext(data, width, height, 8, 4 * width, colorSpace, CGImageAlphaInfo.PremultipliedLast); colorSpace.Dispose(); break; case SurfaceFormat.Alpha8: data = Marshal.AllocHGlobal(height * width); context = new CGBitmapContext(data, width, height, 8, width, null, CGImageAlphaInfo.Only); break; default: throw new NotSupportedException("Invalid pixel format"); } context.ClearRect(new RectangleF(0, 0, width, height)); context.TranslateCTM(0, height - imageSize.Height); if (!transform.IsIdentity) { context.ConcatCTM(transform); } context.DrawImage(new RectangleF(0, 0, image.Width, image.Height), image); //Convert "RRRRRRRRRGGGGGGGGBBBBBBBBAAAAAAAA" to "RRRRRGGGGGGBBBBB" /* * if(pixelFormat == SurfaceFormat.Rgb32) { * tempData = Marshal.AllocHGlobal(height * width * 2); * * int d32; * short d16; * int inPixel32Count=0,outPixel16Count=0; * for(i = 0; i < width * height; ++i, inPixel32Count+=sizeof(int)) * { * d32 = Marshal.ReadInt32(data,inPixel32Count); * short R = (short)((((d32 >> 0) & 0xFF) >> 3) << 11); * short G = (short)((((d32 >> 8) & 0xFF) >> 2) << 5); * short B = (short)((((d32 >> 16) & 0xFF) >> 3) << 0); * d16 = (short) (R | G | B); * Marshal.WriteInt16(tempData,outPixel16Count,d16); * outPixel16Count += sizeof(short); * } * Marshal.FreeHGlobal(data); * data = tempData; * } */ InitWithData(data, pixelFormat, width, height, imageSize, filter); context.Dispose(); Marshal.FreeHGlobal(data); }
public override void Draw(CGRect rect) { base.Draw(rect); //// General Declarations var colorSpace = CGColorSpace.CreateDeviceRGB(); var context = UIGraphics.GetCurrentContext(); // //// Color Declarations // UIColor gradient2Color = UIColor.FromRGBA(0.906f, 0.910f, 0.910f, 1.000f); // UIColor gradient2Color2 = UIColor.FromRGBA(0.588f, 0.600f, 0.616f, 1.000f); // // //// Gradient Declarations // var gradient2Colors = new CGColor [] {gradient2Color.CGColor, gradient2Color2.CGColor}; // var gradient2Locations = new float [] {0, 1}; // var gradient2 = new CGGradient(colorSpace, gradient2Colors, gradient2Locations); //// Abstracted Attributes var textContent = "+ " + range; var text2Content = "0"; var text3Content = "- " + range; //// Rectangle Drawing // var rectanglePath = UIBezierPath.FromRect(rect); // context.SaveState(); // rectanglePath.AddClip(); // context.DrawLinearGradient(gradient2, new CGPoint(rect.Height, 0), new CGPoint(rect.Height, rect.Height), 0); // context.RestoreState(); if (Sliders.Count == 0) { return; } var sliderFrame = Sliders[0].Frame; var thumbH = 0; //Sliders[0].CurrentThumbImage.Size.Height / 2; var h = (sliderFrame.Height - (thumbH * 2)) / 8; var offset = sliderFrame.Y; var x = sliderFrame.X; var width = Sliders.Last().Frame.Right; for (int i = 0; i < 9; i++) { UIColor.Black.ColorWithAlpha(0f).SetStroke(); var currH = (i * h) + thumbH; //if (i == 0) //{ // //// Text Drawing // var textRect = new CGRect(0, currH + offset - 7.5f, 37, 13); // textColor.SetFill(); // new Foundation.NSString(textContent).DrawString(textRect, UIFont.FromName(Style.Fonts.AvenirLight, 10), UILineBreakMode.WordWrap, UITextAlignment.Right); // //UIColor.Black.ColorWithAlpha(.5f).SetStroke (); //} //else if (i == 4) { //// Text Drawing //var textRect = new CGRect(0, currH + offset - 7.5f, 37, 13); //textColor.SetFill(); //new Foundation.NSString(text2Content).DrawString(textRect, UIFont.FromName(Style.Fonts.AvenirLight, 10), UILineBreakMode.WordWrap, UITextAlignment.Right); //textColor.ColorWithAlpha(.5f).SetStroke(); // Style.Colors.LightGray.Value.ColorWithAlpha (.1f).SetStroke (); } //else if (i == 8) //{ // //// Text Drawing // var textRect = new CGRect(0, currH + offset - 7.5f, 37, 13); // textColor.SetFill(); // new Foundation.NSString(text3Content).DrawString(textRect, UIFont.FromName(Style.Fonts.AvenirLight, 10), UILineBreakMode.WordWrap, UITextAlignment.Right); // //UIColor.Black.ColorWithAlpha(.5f).SetStroke (); //} context.MoveTo(x, currH + offset); context.AddLineToPoint(width, currH + offset); context.StrokePath(); } }
public ColorSpaceTransformation(CGColorSpace colorSpace) { _colorSpace = colorSpace; _colorMatrix = null; }
void InitPaint() { _width = (int)Math.Round(_imageView.Frame.Width); _height = (int)Math.Round(_imageView.Frame.Height); if (_width == _oldWidth && _height == _oldHeight) { return; } _oldWidth = _width; _oldHeight = _height; _rawData = new byte[Width * Height * 4]; var bytesPerPixel = 4; var bitsPerComponent = 8; var bytesPerRow = bytesPerPixel * Width; if (_colorSpace == null) { _colorSpace = CGColorSpace.CreateDeviceRGB(); } if (_context != null) { _context.Dispose(); } _context = new CGBitmapContext(_rawData, Width, Height, bitsPerComponent, bytesPerRow, _colorSpace, CGBitmapFlags.ByteOrder32Big | CGBitmapFlags.PremultipliedLast); }
// Draws our animation path on the background image, just to show it protected void DrawPathAsBackground() { // create our offscreen bitmap context // size SizeF bitmapSize = new SizeF(this.View.Frame.Size); using (CGBitmapContext context = new CGBitmapContext(IntPtr.Zero , (int)bitmapSize.Width, (int)bitmapSize.Height, 8 , (int)(4 * bitmapSize.Width), CGColorSpace.CreateDeviceRGB() , CGImageAlphaInfo.PremultipliedFirst)) { // convert to View space CGAffineTransform affineTransform = CGAffineTransform.MakeIdentity(); // invert the y axis affineTransform.Scale(1, -1); // move the y axis up affineTransform.Translate(0, this.View.Frame.Height); context.ConcatCTM(affineTransform); // actually draw the path context.AddPath(animationPath); context.SetStrokeColorWithColor(UIColor.LightGray.CGColor); context.SetLineWidth(3); context.StrokePath(); // set what we've drawn as the backgound image backgroundImage.Image = UIImage.FromImage(context.ToImage()); } }
/// <summary> /// Read an UIImage, covert the data and save it to the native pointer /// </summary> /// <typeparam name="T">The type of the data to covert the image pixel values to. e.g. "float" or "byte"</typeparam> /// <param name="imageOriginal">The input image</param> /// <param name="dest">The native pointer where the image pixels values will be saved to.</param> /// <param name="inputHeight">The height of the image, must match the height requirement for the tensor</param> /// <param name="inputWidth">The width of the image, must match the width requirement for the tensor</param> /// <param name="inputMean">The mean value, it will be subtracted from the input image pixel values</param> /// <param name="scale">The scale, after mean is subtracted, the scale will be used to multiply the pixel values</param> /// <param name="flipUpSideDown">If true, the image needs to be flipped up side down</param> /// <param name="swapBR">If true, will flip the Blue channel with the Red. e.g. If false, the tensor's color channel order will be RGB. If true, the tensor's color channle order will be BGR </param> public static void ReadImageToTensor <T>( UIImage imageOriginal, IntPtr dest, int inputHeight = -1, int inputWidth = -1, float inputMean = 0.0f, float scale = 1.0f, bool flipUpSideDown = false, bool swapBR = false) where T : struct { if (flipUpSideDown) { throw new NotImplementedException("Flip Up Side Down is Not implemented"); } UIImage image; if (inputHeight > 0 || inputWidth > 0) { image = imageOriginal.Scale(new CGSize(inputWidth, inputHeight)); //image.Dispose(); //image = resized; } else { image = imageOriginal; } try { int[] intValues = new int[(int)(image.Size.Width * image.Size.Height)]; float[] floatValues = new float[(int)(image.Size.Width * image.Size.Height * 3)]; System.Runtime.InteropServices.GCHandle handle = System.Runtime.InteropServices.GCHandle.Alloc(intValues, System.Runtime.InteropServices.GCHandleType.Pinned); using (CGImage cgimage = image.CGImage) using (CGColorSpace cspace = CGColorSpace.CreateDeviceRGB()) using (CGBitmapContext context = new CGBitmapContext( handle.AddrOfPinnedObject(), (nint)image.Size.Width, (nint)image.Size.Height, 8, (nint)image.Size.Width * 4, cspace, CGImageAlphaInfo.PremultipliedLast )) { context.DrawImage(new CGRect(new CGPoint(), image.Size), cgimage); } handle.Free(); if (swapBR) { for (int i = 0; i < intValues.Length; ++i) { int val = intValues[i]; floatValues[i * 3 + 0] = ((val & 0xFF) - inputMean) * scale; floatValues[i * 3 + 1] = (((val >> 8) & 0xFF) - inputMean) * scale; floatValues[i * 3 + 2] = (((val >> 16) & 0xFF) - inputMean) * scale; } } else { for (int i = 0; i < intValues.Length; ++i) { int val = intValues[i]; floatValues[i * 3 + 0] = (((val >> 16) & 0xFF) - inputMean) * scale; floatValues[i * 3 + 1] = (((val >> 8) & 0xFF) - inputMean) * scale; floatValues[i * 3 + 2] = ((val & 0xFF) - inputMean) * scale; } } if (typeof(T) == typeof(float)) { Marshal.Copy(floatValues, 0, dest, floatValues.Length); } else if (typeof(T) == typeof(byte)) { //copy float to bytes byte[] byteValues = new byte[floatValues.Length]; for (int i = 0; i < floatValues.Length; i++) { byteValues[i] = (byte)floatValues[i]; } Marshal.Copy(byteValues, 0, dest, byteValues.Length); } else { throw new NotImplementedException(String.Format("Destination data type {0} is not supported.", typeof(T).ToString())); } } finally { if (image != imageOriginal) { image.Dispose(); } } }
public override void Draw(CGRect rect) { if (SplitViewController.DividerStyle == DividerStyle.Thin) { base.Draw(rect); } else if (SplitViewController.DividerStyle == DividerStyle.PaneSplitter) { // Draw gradient background. var bounds = Bounds; var rgb = CGColorSpace.CreateDeviceRGB(); var locations = new nfloat[] { 0, 1 }; var components = new nfloat[] { // light 0.988f, 0.988f, 0.988f, 1.0f, // dark 0.875f, 0.875f, 0.875f, 1.0f }; var gradient = new CGGradient(rgb, components, locations); var context = UIGraphics.GetCurrentContext(); CGPoint start; CGPoint end; if (SplitViewController.IsVertical) { // Light left to dark right. start = new CGPoint(bounds.GetMinX(), bounds.GetMidY()); end = new CGPoint(bounds.GetMaxX(), bounds.GetMidY()); } else { // Light top to dark bottom. start = new CGPoint(bounds.GetMidX(), bounds.GetMinY()); end = new CGPoint(bounds.GetMidX(), bounds.GetMaxY()); } context.DrawLinearGradient(gradient, start, end, CGGradientDrawingOptions.DrawsAfterEndLocation); rgb.Dispose(); gradient.Dispose(); // Draw borders. var borderThickness = 1.0f; UIColor.FromWhiteAlpha(0.7f, 1.0f).SetFill(); UIColor.FromWhiteAlpha(0.7f, 1.0f).SetStroke(); var borderRect = bounds; if (SplitViewController.IsVertical) { borderRect.Width = borderThickness; UIGraphics.RectFill(borderRect); borderRect.X = bounds.GetMaxX() - borderThickness; UIGraphics.RectFill(borderRect); } else { borderRect.Height = borderThickness; UIGraphics.RectFill(borderRect); borderRect.Y = bounds.GetMaxY() - borderThickness; UIGraphics.RectFill(borderRect); } // Draw grip. DrawGripThumbInRect(bounds); } }
public static UIImage MakeCalendarBadge(string smallText, string bigText) { UIGraphics.BeginImageContext(new SizeF(42, 42)); // ------------- START PAINTCODE ---------------- //// Abstracted Graphic Attributes var textContent = bigText; var text2Content = smallText; //// General Declarations var colorSpace = CGColorSpace.CreateDeviceRGB(); var context = UIGraphics.GetCurrentContext(); //// Color Declarations UIColor dateRed = UIColor.FromRGBA(0.83f, 0.11f, 0.06f, 1.00f); //// Gradient Declarations var greyGradientColors = new CGColor [] { UIColor.White.CGColor, UIColor.FromRGBA(0.57f, 0.57f, 0.57f, 1.00f).CGColor, UIColor.Black.CGColor }; var greyGradientLocations = new float [] { 0.65f, 0.75f, 0.75f }; var greyGradient = new CGGradient(colorSpace, greyGradientColors, greyGradientLocations); //// Shadow Declarations var dropShadow = UIColor.DarkGray.CGColor; var dropShadowOffset = new SizeF(2, 2); var dropShadowBlurRadius = 1; //// Rounded Rectangle Drawing var roundedRectanglePath = UIBezierPath.FromRoundedRect(new RectangleF(1.5f, 1.5f, 38, 38), 4); context.SaveState(); context.SetShadowWithColor(dropShadowOffset, dropShadowBlurRadius, dropShadow); context.BeginTransparencyLayer(null); roundedRectanglePath.AddClip(); context.DrawLinearGradient(greyGradient, new PointF(20.5f, 1.5f), new PointF(20.5f, 39.5f), 0); context.EndTransparencyLayer(); context.RestoreState(); UIColor.Black.SetStroke(); roundedRectanglePath.LineWidth = 1; roundedRectanglePath.Stroke(); //// Rounded Rectangle 2 Drawing var roundedRectangle2Path = UIBezierPath.FromRoundedRect(new RectangleF(2, 28, 37, 11), UIRectCorner.BottomLeft | UIRectCorner.BottomRight, new SizeF(4, 4)); dateRed.SetFill(); roundedRectangle2Path.Fill(); //// Text Drawing var textRect = new RectangleF(2, 0, 37, 28); UIColor.Black.SetFill(); new NSString(textContent).DrawString(textRect, UIFont.FromName("Helvetica-Bold", 24), UILineBreakMode.WordWrap, UITextAlignment.Center); //// Text 2 Drawing var text2Rect = new RectangleF(2, 27, 37, 15); UIColor.White.SetFill(); new NSString(text2Content).DrawString(text2Rect, UIFont.FromName("HelveticaNeue-Bold", 9), UILineBreakMode.WordWrap, UITextAlignment.Center); // ------------- END PAINTCODE ---------------- var converted = UIGraphics.GetImageFromCurrentImageContext(); UIGraphics.EndImageContext(); return(converted); }
/// <summary>Uses a three-dimensional color table to transform the source image pixels and maps the result to a specified color space.</summary> /// <remarks> /// <p></p><b>CIColorCubeWithColorSpace</b> /// <p class="abstract">Uses a three-dimensional color table to transform the source image pixels and maps the result to a specified color space.</p> /// /// <p></p><b>Parameters</b> /// <p></p> /// /// <em>cubeDimension</em> /// /// /// <p> /// An /// <c>float</c> /// object whose attribute type is /// <code>CIAttributeTypeCount</code> /// and whose display name is Cube Dimension. /// </p> /// <p>Default value: 2.00 Minimum: 2.00 Maximum: 128.00 Identity: 2.00</p> /// /// /// <em>cubeData</em> /// /// /// <p> /// An /// <c>byte[]</c> /// object whose display name is Cube Data. /// </p> /// /// /// <em>colorSpace</em> /// /// /// <p> /// An /// <c>CGColorSpace</c> /// object whose display name is ColorSpace. /// </p> /// /// /// /// /// <p></p><b>Discussion</b> /// <p> /// See /// <code> /// <a href="#//apple_ref/doc/filter/ci/CIColorCube">CIColorCube</a> /// </code> /// for more details on the color cube operation. To provide a /// <code> /// <a href="../../CGColorSpace/Reference/reference.html#//apple_ref/c/tdef/CGColorSpaceRef" target="_self">CGColorSpaceRef</a> /// </code> /// object as the input parameter, cast it to type /// <code>id</code> /// . With the default color space (null), which is equivalent to /// <code>kCGColorSpaceGenericRGBLinear</code> /// , this filter’s effect is identical to that of /// <code>CIColorCube</code> /// . /// </p> /// <p> /// <span class="content_text">Figure 23</span> /// uses the same color cube as /// <span class="content_text"> /// <a href="#//apple_ref/doc/uid/TP30000136-SW56">Figure 22</a> /// </span> /// , but with the sRGB color space. /// </p> /// /// <p></p><b>Member of</b> /// <code>CICategoryBuiltIn</code> /// , /// <code>CICategoryStillImage</code> /// , /// <code>CICategoryNonSquarePixels</code> /// , /// <code>CICategoryInterlaced</code> /// , /// <code>CICategoryVideo</code> /// , /// <code>CICategoryColorEffect</code> /// <p></p><b>Localized Display Name</b> /// Color Cube with ColorSpace /// /// /// <p></p><b>Availability</b> /// <ul> /// <li>Available in OS X v10.9 and later and in iOS 7.0 and later.</li> /// </ul> /// /// </remarks> /// <param name='cubeDimension'></param> /// <param name='cubeData'></param> /// <param name='colorSpace'></param> /// <returns>This object itself, for chaining filters</returns> public ImageFilter ColorCubeWithColorSpace(float cubeDimension, byte[] cubeData, CGColorSpace colorSpace) { return Filter("CIColorCubeWithColorSpace", new Dictionary<string, object>() { {"inputCubeDimension",cubeDimension}, {"inputCubeData",NSData.FromByteArray(cubeData)}, {"inputColorSpace",colorSpace} }); }