Example #1
0
        public void CreateWithBytes()
        {
            nint     width       = 1280;
            nint     height      = 720;
            nint     bytesPerRow = width * 4;
            CVReturn status;

            var data = new byte [height * bytesPerRow];

            using (var buf = CVPixelBuffer.Create(width, height, CVPixelFormatType.CV32RGBA, data, bytesPerRow, null, out status)) {
                Assert.AreEqual(status, CVReturn.InvalidPixelFormat, "CV32RGBA");
                Assert.IsNull(buf, "CV32RGBA - null");
            }

            using (var buf = CVPixelBuffer.Create(width, height, CVPixelFormatType.CV32BGRA, data, bytesPerRow, null, out status)) {
                Assert.AreEqual(status, CVReturn.Success, "CV32RGBA");
                Assert.IsNotNull(buf, "CV32BGRA - null");
            }

            var dict = new CVPixelBufferAttributes();

            using (var buf = CVPixelBuffer.Create(width, height, CVPixelFormatType.CV32BGRA, data, bytesPerRow, dict)) {
                Assert.IsNotNull(buf);
            }

            Assert.Throws <ArgumentNullException> (() => CVPixelBuffer.Create(width, height, CVPixelFormatType.CV32BGRA, null, bytesPerRow, null), "null data");
            Assert.Throws <ArgumentOutOfRangeException> (() => CVPixelBuffer.Create(width, height, CVPixelFormatType.CV32BGRA, data, bytesPerRow + 1, null), "bytesPerRow+1");
            Assert.Throws <ArgumentOutOfRangeException> (() => CVPixelBuffer.Create(width, height + 1, CVPixelFormatType.CV32BGRA, data, bytesPerRow + 1, null), "height+1");
        }
Example #2
0
        public void CreateWithPlanarBytes()
        {
            nint width  = 1280;
            nint height = 720;

            nint[]   planeWidths      = new nint[] { width, width / 2 };
            nint[]   planeHeights     = new nint[] { height, height / 2 };
            nint[]   planeBytesPerRow = new nint[] { width, width };
            CVReturn status;

            var data = new byte[][] {
                new byte [planeHeights [0] * planeBytesPerRow [0]],
                new byte [planeHeights [1] * planeBytesPerRow [1]],
            };

            using (var buf = CVPixelBuffer.Create(width, height, CVPixelFormatType.CV32RGBA, data, planeWidths, planeHeights, planeBytesPerRow, null, out status)) {
                Assert.IsNull(buf);
                Assert.AreEqual(CVReturn.InvalidPixelFormat, status, "invalid status");
            }

            using (var buf = CVPixelBuffer.Create(width, height, CVPixelFormatType.CV420YpCbCr8BiPlanarVideoRange, data, planeWidths, planeHeights, planeBytesPerRow, null)) {
                Assert.IsNotNull(buf);
            }

            var dict = new CVPixelBufferAttributes();

            using (var buf = CVPixelBuffer.Create(width, height, CVPixelFormatType.CV420YpCbCr8BiPlanarVideoRange, data, planeWidths, planeHeights, planeBytesPerRow, dict)) {
                Assert.IsNotNull(buf);
            }

            Assert.Throws <ArgumentNullException> (() => CVPixelBuffer.Create(width, height, CVPixelFormatType.CV420YpCbCr8BiPlanarVideoRange, null, planeWidths, planeHeights, planeBytesPerRow, null), "null data");
            Assert.Throws <ArgumentNullException> (() => CVPixelBuffer.Create(width, height, CVPixelFormatType.CV420YpCbCr8BiPlanarVideoRange, data, null, planeHeights, planeBytesPerRow, null), "null widths");
            Assert.Throws <ArgumentNullException> (() => CVPixelBuffer.Create(width, height, CVPixelFormatType.CV420YpCbCr8BiPlanarVideoRange, data, planeWidths, null, planeBytesPerRow, null), "null heights");
            Assert.Throws <ArgumentNullException> (() => CVPixelBuffer.Create(width, height, CVPixelFormatType.CV420YpCbCr8BiPlanarVideoRange, data, planeWidths, planeHeights, null, null), "null bytesPerRow");

            Assert.Throws <ArgumentOutOfRangeException> (() => CVPixelBuffer.Create(width, height, CVPixelFormatType.CV420YpCbCr8BiPlanarVideoRange, data, new nint[] { width }, planeHeights, planeBytesPerRow, null), "invalid widths a");
            Assert.Throws <ArgumentOutOfRangeException> (() => CVPixelBuffer.Create(width, height, CVPixelFormatType.CV420YpCbCr8BiPlanarVideoRange, data, new nint[] { width, width, width }, planeHeights, planeBytesPerRow, null), "invalid widths b");
            Assert.Throws <ArgumentOutOfRangeException> (() => CVPixelBuffer.Create(width, height, CVPixelFormatType.CV420YpCbCr8BiPlanarVideoRange, data, planeWidths, new nint[] { height }, planeBytesPerRow, null), "invalid heights a");
            Assert.Throws <ArgumentOutOfRangeException> (() => CVPixelBuffer.Create(width, height, CVPixelFormatType.CV420YpCbCr8BiPlanarVideoRange, data, planeWidths, new nint[] { height, height, height }, planeBytesPerRow, null), "invalid heights b");
            Assert.Throws <ArgumentOutOfRangeException> (() => CVPixelBuffer.Create(width, height, CVPixelFormatType.CV420YpCbCr8BiPlanarVideoRange, data, planeWidths, planeHeights, new nint [] { width }, null), "invalid bytesPerRow");
            Assert.Throws <ArgumentOutOfRangeException> (() => CVPixelBuffer.Create(width, height, CVPixelFormatType.CV420YpCbCr8BiPlanarVideoRange, data, planeWidths, planeHeights, new nint [] { width, width, width }, null), "invalid bytesPerRow");
        }
Example #3
0
        // you will need to explicitly dispose the returned CVPixelBuffer after using it
        public static CVPixelBuffer ToCVPixelBuffer(FrameEntry frame)
        {
            var bytesPerPixel = 4;
            var bytesPerRow   = frame.Width * bytesPerPixel;
            var attr          = new CVPixelBufferAttributes()
            {
                PixelFormatType              = CVPixelFormatType.CV32BGRA,
                AllocateWithIOSurface        = true,
                MetalCompatibility           = true,
                OpenGLCompatibility          = true,
                CGImageCompatibility         = true,
                OpenGLESCompatibility        = true,
                CGBitmapContextCompatibility = true,
                BytesPerRowAlignment         = bytesPerRow,
                Height = frame.Height,
                Width  = frame.Width
            };
            var pixelBuffer = CVPixelBuffer.Create(frame.Width, frame.Height, CVPixelFormatType.CV32BGRA, frame.Frame, bytesPerRow, attr);

            return(pixelBuffer);
        }
        public CVPixelBuffer CroppedSampleBuffer(CMSampleBuffer sampleBuffer)
        {
            var imageBuffer = sampleBuffer.GetImageBuffer();

            if (imageBuffer == null)
            {
                throw new ArgumentException("Cannot convert to CVImageBuffer");
            }

            // Only doing these calculations once for efficiency.
            // If the incoming images could change orientation or size during a session, this would need to be reset when that happens.
            if (!alreadySet)
            {
                alreadySet = true;

                var imageSize = imageBuffer.EncodedSize;

                /*
                 * Incoming image size is set in VideoCapture.BeginSession as AVCaptureSession.Preset1920x1080;
                 * Which, buffer-wise, is always captured landscape-style, but info.plist specifies that this
                 * app runs only in portrait. Therefore, the buffer is always sideways, i.e., `imageSize == [Width: 1920, Height: 1080]`
                 *
                 * Since our UI blurs out the top and bottom of the image, what we're interested in is the middle
                 * 3/5 of the long side, and the entirety of the 1080 (short side), rotated 90 degrees anti-clockwise.
                 *
                 * To get good alignment, this also requires some manual tweaking (LayoutMargins?), which probably changes
                 * between hardware
                 */

                var rotatedSize = new CGSize(imageSize.Height, imageSize.Width);

                var shorterSide = rotatedSize.Width < rotatedSize.Height ? rotatedSize.Width : rotatedSize.Height;

                rotateTransform = new CIAffineTransform
                {
                    Transform = new CGAffineTransform(0, -1, 1, 0, 0, shorterSide)
                };

                cropTransform = new CIAffineTransform
                {
                    Transform = CGAffineTransform.MakeTranslation(0, (int)(1920.0 / 5) + 60)                      // Translate down past the cropped area + manual tweak
                };

                edgeDetector = new CIEdges();
            }

            // Convert to CIImage because it is easier to manipulate
            var ciImage = CIImage.FromImageBuffer(imageBuffer);

            rotateTransform.Image = ciImage;
            cropTransform.Image   = rotateTransform.OutputImage;
            edgeDetector.Image    = cropTransform.OutputImage;

            var cropped = edgeDetector.OutputImage;


            // Note that the above pipeline could be easily appended with other image manipulations.
            // For example, to change the image contrast, detect edges, etc. It would be most efficient to handle all of
            // the image manipulation in a single Core Image pipeline because it can be hardware optimized.

            // Only need to create this buffer one time and then we can reuse it for every frame
            if (resultBuffer == null || resultBuffer.Handle == IntPtr.Zero)
            {
                var    targetSize = new CGSize(1080, 1152);              //1080, 3/5 * 1920
                byte[] data       = new byte[(int)targetSize.Height * 4 * (int)targetSize.Width];

                resultBuffer = CVPixelBuffer.Create((nint)targetSize.Width, (nint)targetSize.Height, CVPixelFormatType.CV32BGRA, data, 4 * (nint)targetSize.Width, null);

                if (resultBuffer == null)
                {
                    throw new Exception("Can't allocate pixel buffer.");
                }
            }

            context.Render(cropped, resultBuffer);

            //  For debugging
            //var image = ImageBufferToUIImage(resultBuffer);
            //Console.WriteLine("Image size: " + image.Size); // set breakpoint to see image being provided to CoreML

            return(resultBuffer);
        }
Example #5
0
        public CVPixelBuffer CroppedSampleBuffer(CMSampleBuffer sampleBuffer, CGSize targetSize)
        {
            var imageBuffer = sampleBuffer.GetImageBuffer();

            if (imageBuffer == null)
            {
                throw new ArgumentException("Cannot convert to CVImageBuffer");
            }

            // Only doing these calculations once for efficiency.
            // If the incoming images could change orientation or size during a session, this would need to be reset when that happens.
            if (!alreadySet)
            {
                alreadySet = true;
                var imageSize   = imageBuffer.EncodedSize;
                var rotatedSize = new CGSize(imageSize.Height, imageSize.Width);

                if (targetSize.Width > rotatedSize.Width || targetSize.Height > rotatedSize.Height)
                {
                    throw new NotSupportedException("Captured image is smaller than image size for model.");
                }

                var shorterSide = rotatedSize.Width < rotatedSize.Height ? rotatedSize.Width : rotatedSize.Height;

                rotateTransform = new CIAffineTransform
                {
                    Transform = new CGAffineTransform(0, -1, 1, 0, 0, shorterSide)
                                //Transform = CGAffineTransform.MakeIdentity()
                };

                var scale = targetSize.Width / shorterSide;
                scaleTransform = new CIAffineTransform
                {
                    Transform = CGAffineTransform.MakeScale(scale, scale),
                };

                var xDiff = rotatedSize.Width * scale - targetSize.Width;
                var yDiff = rotatedSize.Height * scale - targetSize.Height;

                cropTransform = new CIAffineTransform
                {
                    //Transform = CGAffineTransform.MakeTranslation(xDiff / 2.0f, yDiff / 2.0f),
                    Transform = CGAffineTransform.MakeIdentity()
                };
            }

            // Convert to CIImage because it is easier to manipulate
            var ciImage = CIImage.FromImageBuffer(imageBuffer);

            rotateTransform.Image = ciImage;
            scaleTransform.Image  = rotateTransform.OutputImage;
            cropTransform.Image   = scaleTransform.OutputImage;
            var cropped = cropTransform.OutputImage;


            // Note that the above pipeline could be easily appended with other image manipulations.
            // For example, to change the image contrast. It would be most efficient to handle all of
            // the image manipulation in a single Core Image pipeline because it can be hardware optimized.

            // Only need to create this buffer one time and then we can reuse it for every frame
            if (resultBuffer == null || resultBuffer.Handle == IntPtr.Zero)
            {
                byte[] data = new byte[(int)targetSize.Height * 4 * (int)targetSize.Width];

                resultBuffer = CVPixelBuffer.Create((nint)targetSize.Width, (nint)targetSize.Height, CVPixelFormatType.CV32BGRA, data, 4 * (nint)targetSize.Width, null);                 // HACK

                if (resultBuffer == null)
                {
                    throw new Exception("Can't allocate pixel buffer.");
                }
            }

            context.Render(cropped, resultBuffer);

            //  For debugging
            //var image = ImageBufferToUIImage(resultBuffer);
            //Console.WriteLine("Image size: " + image.Size); // set breakpoint to see image being provided to CoreML

            return(resultBuffer);
        }