Exemplo n.º 1
0
        public static UIImage CreateBlurImageFromView(UIView view)
        {
            var blurRadius = 2f;

            var _size = view.Bounds.Size;

            UIGraphics.BeginImageContext(_size);
            view.DrawViewHierarchy(view.Bounds, false);
            var viewImage = UIGraphics.GetImageFromCurrentImageContext();

            // Blur Image
            var gaussianBlurFilter = new CIGaussianBlur();

            gaussianBlurFilter.Image  = CIImage.FromCGImage(viewImage.CGImage);
            gaussianBlurFilter.Radius = blurRadius;
            var resultImage = gaussianBlurFilter.OutputImage;

            var croppedImage    = resultImage.ImageByCroppingToRect(new CGRect(blurRadius, blurRadius, _size.Width - 2 * blurRadius, _size.Height - 2 * blurRadius));
            var transformFilter = new CIAffineTransform();
            var affineTransform = CGAffineTransform.MakeTranslation(-blurRadius, blurRadius);

            transformFilter.Transform = affineTransform;
            transformFilter.Image     = croppedImage;
            var transformedImage = transformFilter.OutputImage;

            return(new UIImage(transformedImage));
        }
Exemplo n.º 2
0
        public static void Colourize(NSView control, Color color, Action drawAction)
        {
            var size = control.Frame.Size;

            if (size.Width <= 0 || size.Height <= 0)
            {
                return;
            }
            var image = new NSImage(size);

            image.LockFocusFlipped(control.IsFlipped);
            drawAction();
            image.UnlockFocus();

            var ciImage = CIImage.FromCGImage(image.CGImage);

            SD.SizeF realSize;
            if (control.RespondsToSelector(selConvertSizeToBacking))
            {
                realSize = control.ConvertSizeToBacking(size);
            }
            else
            {
                realSize = control.ConvertSizeToBase(size);
            }

            if (control.IsFlipped)
            {
                var affineTransform = new NSAffineTransform();
                affineTransform.Translate(0, realSize.Height);
                affineTransform.Scale(1, -1);
                var filter1 = new CIAffineTransform();
                filter1.Image = ciImage;
                filter1.SetValueForKey(affineTransform, CIInputTransform);
                ciImage = filter1.ValueForKey(CIOutputImage) as CIImage;
            }

            var filter2 = new CIColorControls();

            filter2.SetDefaults();
            filter2.Image      = ciImage;
            filter2.Saturation = 0.0f;
            ciImage            = filter2.ValueForKey(CIOutputImage) as CIImage;

            var filter3 = new CIColorMatrix();

            filter3.SetDefaults();
            filter3.Image   = ciImage;
            filter3.RVector = new CIVector(0, color.R, 0);
            filter3.GVector = new CIVector(color.G, 0, 0);
            filter3.BVector = new CIVector(0, 0, color.B);
            ciImage         = filter3.ValueForKey(CIOutputImage) as CIImage;

            ciImage.Draw(new SD.RectangleF(SD.PointF.Empty, size), new SD.RectangleF(SD.PointF.Empty, realSize), NSCompositingOperation.SourceOver, 1);
        }
Exemplo n.º 3
0
        public CIImage AffineTransform()
        {
            // Create an AffineTransform to Skew the Image
            var transform = new CGAffineTransform(1F, .5F, .5F, 1F, 0F, 0F);

            var affineTransform = new CIAffineTransform()
            {
                Image     = flower,
                Transform = transform
            };

            return(affineTransform.OutputImage);
        }
        public CVPixelBuffer CroppedSampleBuffer(CMSampleBuffer sampleBuffer)
        {
            var imageBuffer = sampleBuffer.GetImageBuffer();

            if (imageBuffer == null)
            {
                throw new ArgumentException("Cannot convert to CVImageBuffer");
            }

            // Only doing these calculations once for efficiency.
            // If the incoming images could change orientation or size during a session, this would need to be reset when that happens.
            if (!alreadySet)
            {
                alreadySet = true;

                var imageSize = imageBuffer.EncodedSize;

                /*
                 * Incoming image size is set in VideoCapture.BeginSession as AVCaptureSession.Preset1920x1080;
                 * Which, buffer-wise, is always captured landscape-style, but info.plist specifies that this
                 * app runs only in portrait. Therefore, the buffer is always sideways, i.e., `imageSize == [Width: 1920, Height: 1080]`
                 *
                 * Since our UI blurs out the top and bottom of the image, what we're interested in is the middle
                 * 3/5 of the long side, and the entirety of the 1080 (short side), rotated 90 degrees anti-clockwise.
                 *
                 * To get good alignment, this also requires some manual tweaking (LayoutMargins?), which probably changes
                 * between hardware
                 */

                var rotatedSize = new CGSize(imageSize.Height, imageSize.Width);

                var shorterSide = rotatedSize.Width < rotatedSize.Height ? rotatedSize.Width : rotatedSize.Height;

                rotateTransform = new CIAffineTransform
                {
                    Transform = new CGAffineTransform(0, -1, 1, 0, 0, shorterSide)
                };

                cropTransform = new CIAffineTransform
                {
                    Transform = CGAffineTransform.MakeTranslation(0, (int)(1920.0 / 5) + 60)                      // Translate down past the cropped area + manual tweak
                };

                edgeDetector = new CIEdges();
            }

            // Convert to CIImage because it is easier to manipulate
            var ciImage = CIImage.FromImageBuffer(imageBuffer);

            rotateTransform.Image = ciImage;
            cropTransform.Image   = rotateTransform.OutputImage;
            edgeDetector.Image    = cropTransform.OutputImage;

            var cropped = edgeDetector.OutputImage;


            // Note that the above pipeline could be easily appended with other image manipulations.
            // For example, to change the image contrast, detect edges, etc. It would be most efficient to handle all of
            // the image manipulation in a single Core Image pipeline because it can be hardware optimized.

            // Only need to create this buffer one time and then we can reuse it for every frame
            if (resultBuffer == null || resultBuffer.Handle == IntPtr.Zero)
            {
                var    targetSize = new CGSize(1080, 1152);              //1080, 3/5 * 1920
                byte[] data       = new byte[(int)targetSize.Height * 4 * (int)targetSize.Width];

                resultBuffer = CVPixelBuffer.Create((nint)targetSize.Width, (nint)targetSize.Height, CVPixelFormatType.CV32BGRA, data, 4 * (nint)targetSize.Width, null);

                if (resultBuffer == null)
                {
                    throw new Exception("Can't allocate pixel buffer.");
                }
            }

            context.Render(cropped, resultBuffer);

            //  For debugging
            //var image = ImageBufferToUIImage(resultBuffer);
            //Console.WriteLine("Image size: " + image.Size); // set breakpoint to see image being provided to CoreML

            return(resultBuffer);
        }
Exemplo n.º 5
0
        public CVPixelBuffer CroppedSampleBuffer(CMSampleBuffer sampleBuffer, CGSize targetSize)
        {
            var imageBuffer = sampleBuffer.GetImageBuffer();

            if (imageBuffer == null)
            {
                throw new ArgumentException("Cannot convert to CVImageBuffer");
            }

            // Only doing these calculations once for efficiency.
            // If the incoming images could change orientation or size during a session, this would need to be reset when that happens.
            if (!alreadySet)
            {
                alreadySet = true;
                var imageSize   = imageBuffer.EncodedSize;
                var rotatedSize = new CGSize(imageSize.Height, imageSize.Width);

                if (targetSize.Width > rotatedSize.Width || targetSize.Height > rotatedSize.Height)
                {
                    throw new NotSupportedException("Captured image is smaller than image size for model.");
                }

                var shorterSide = rotatedSize.Width < rotatedSize.Height ? rotatedSize.Width : rotatedSize.Height;

                rotateTransform = new CIAffineTransform
                {
                    Transform = new CGAffineTransform(0, -1, 1, 0, 0, shorterSide)
                                //Transform = CGAffineTransform.MakeIdentity()
                };

                var scale = targetSize.Width / shorterSide;
                scaleTransform = new CIAffineTransform
                {
                    Transform = CGAffineTransform.MakeScale(scale, scale),
                };

                var xDiff = rotatedSize.Width * scale - targetSize.Width;
                var yDiff = rotatedSize.Height * scale - targetSize.Height;

                cropTransform = new CIAffineTransform
                {
                    //Transform = CGAffineTransform.MakeTranslation(xDiff / 2.0f, yDiff / 2.0f),
                    Transform = CGAffineTransform.MakeIdentity()
                };
            }

            // Convert to CIImage because it is easier to manipulate
            var ciImage = CIImage.FromImageBuffer(imageBuffer);

            rotateTransform.Image = ciImage;
            scaleTransform.Image  = rotateTransform.OutputImage;
            cropTransform.Image   = scaleTransform.OutputImage;
            var cropped = cropTransform.OutputImage;


            // Note that the above pipeline could be easily appended with other image manipulations.
            // For example, to change the image contrast. It would be most efficient to handle all of
            // the image manipulation in a single Core Image pipeline because it can be hardware optimized.

            // Only need to create this buffer one time and then we can reuse it for every frame
            if (resultBuffer == null || resultBuffer.Handle == IntPtr.Zero)
            {
                byte[] data = new byte[(int)targetSize.Height * 4 * (int)targetSize.Width];

                resultBuffer = CVPixelBuffer.Create((nint)targetSize.Width, (nint)targetSize.Height, CVPixelFormatType.CV32BGRA, data, 4 * (nint)targetSize.Width, null);                 // HACK

                if (resultBuffer == null)
                {
                    throw new Exception("Can't allocate pixel buffer.");
                }
            }

            context.Render(cropped, resultBuffer);

            //  For debugging
            //var image = ImageBufferToUIImage(resultBuffer);
            //Console.WriteLine("Image size: " + image.Size); // set breakpoint to see image being provided to CoreML

            return(resultBuffer);
        }
Exemplo n.º 6
0
		public CIImage AffineTransform ()
		{
			// Create an AffineTransform to Skew the Image
			var transform = new CGAffineTransform (1F, .5F, .5F, 1F, 0F, 0F);
			
			var affineTransform = new CIAffineTransform ()
			{
				Image = flower,
				Transform = transform
			};
			
			return affineTransform.OutputImage;
		}