コード例 #1
0
        void ImagePicker_FinishedPickingMedia(object sender, UIImagePickerMediaPickedEventArgs e)
        {
            if (e.Info[UIImagePickerController.MediaType].ToString() == "public.image")
            {
                UIImage originalImage = e.Info[UIImagePickerController.OriginalImage] as UIImage;
                if (originalImage != null)
                {
                    var           scaledImage = originalImage.Scale(new CGSize(300, 300));
                    var           classifier  = new ImageClassifier();
                    var           coreImage   = new CIImage(scaledImage);
                    CVPixelBuffer buffer      = new CVPixelBuffer(300, 300, CVPixelFormatType.CV32ARGB);

                    UIGraphics.BeginImageContext(new CGSize(300, 300));
                    CIContext context = CIContext.FromContext(UIGraphics.GetCurrentContext(), null);
                    context.Render(coreImage, buffer);
                    UIGraphics.EndImageContext();


                    var output = classifier.GetPrediction(buffer, out NSError error);

                    imgSelected.Image = scaledImage;
                    lblResult.Text    = $"This looks like: {output.ClassLabel}";
                }
            }

            imagePicker.DismissModalViewController(true);
        }
コード例 #2
0
 public void AdjustPixelBuffer(CVPixelBuffer inputBuffer, CVPixelBuffer outputBuffer)
 {
     using (CIImage img = CIImage.FromImageBuffer(inputBuffer)) {
         ciFilter.Image = img;
         using (CIImage outImg = ciFilter.OutputImage)
             ciContext.Render(outImg, outputBuffer);
     }
 }
コード例 #3
0
        public void AdjustPixelBuffer(CVPixelBuffer inputBuffer, CVPixelBuffer outputBuffer)
        {
            CIImage img = CIImage.FromImageBuffer(inputBuffer);

            ciFilter.SetValueForKey(img, CIFilterInputKey.Image);
            img = ciFilter.OutputImage;

            ciContext.Render(img, outputBuffer);
        }
コード例 #4
0
        unsafe void UpdateBackground(ARFrame frame)
        {
            using (var img = frame.CapturedImage)
            {
                using (var ciImage = CIImage.FromImageBuffer(img))
                {
                    using (var rotatedImage = ciImage.CreateByApplyingOrientation(ImageIO.CGImagePropertyOrientation.Right))
                    {
                        var size = new CGSize(rotatedImage.Extent.Size);

                        using (var rotatedBuff = new CVPixelBuffer((nint)size.Width,
                                                                   (nint)size.Height, img.PixelFormatType))
                        {
                            var _coreImageContext = new CIContext(null);

                            rotatedBuff.Lock(CVPixelBufferLock.None);

                            _coreImageContext.Render(rotatedImage, rotatedBuff);

                            var yPtr  = rotatedBuff.BaseAddress;
                            var uvPtr = rotatedBuff.GetBaseAddress(1);

                            if (yPtr == IntPtr.Zero || uvPtr == IntPtr.Zero)
                            {
                                return;
                            }

                            int wY  = (int)rotatedBuff.Width;
                            int hY  = (int)rotatedBuff.Height;
                            int wUv = (int)rotatedBuff.GetWidthOfPlane(1);
                            int hUv = (int)rotatedBuff.GetHeightOfPlane(1);

                            cameraYtexture.SetData(0, 0, 0, wY, hY, (void *)yPtr);
                            cameraUVtexture.SetData(0, 0, 0, wUv, hUv, (void *)uvPtr);

                            rotatedBuff.Unlock(CVPixelBufferLock.None);

                            _coreImageContext.Dispose();
                        }
                    }
                }
            }
        }
コード例 #5
0
        public CVPixelBuffer CroppedSampleBuffer(CMSampleBuffer sampleBuffer)
        {
            var imageBuffer = sampleBuffer.GetImageBuffer();

            if (imageBuffer == null)
            {
                throw new ArgumentException("Cannot convert to CVImageBuffer");
            }

            // Only doing these calculations once for efficiency.
            // If the incoming images could change orientation or size during a session, this would need to be reset when that happens.
            if (!alreadySet)
            {
                alreadySet = true;

                var imageSize = imageBuffer.EncodedSize;

                /*
                 * Incoming image size is set in VideoCapture.BeginSession as AVCaptureSession.Preset1920x1080;
                 * Which, buffer-wise, is always captured landscape-style, but info.plist specifies that this
                 * app runs only in portrait. Therefore, the buffer is always sideways, i.e., `imageSize == [Width: 1920, Height: 1080]`
                 *
                 * Since our UI blurs out the top and bottom of the image, what we're interested in is the middle
                 * 3/5 of the long side, and the entirety of the 1080 (short side), rotated 90 degrees anti-clockwise.
                 *
                 * To get good alignment, this also requires some manual tweaking (LayoutMargins?), which probably changes
                 * between hardware
                 */

                var rotatedSize = new CGSize(imageSize.Height, imageSize.Width);

                var shorterSide = rotatedSize.Width < rotatedSize.Height ? rotatedSize.Width : rotatedSize.Height;

                rotateTransform = new CIAffineTransform
                {
                    Transform = new CGAffineTransform(0, -1, 1, 0, 0, shorterSide)
                };

                cropTransform = new CIAffineTransform
                {
                    Transform = CGAffineTransform.MakeTranslation(0, (int)(1920.0 / 5) + 60)                      // Translate down past the cropped area + manual tweak
                };

                edgeDetector = new CIEdges();
            }

            // Convert to CIImage because it is easier to manipulate
            var ciImage = CIImage.FromImageBuffer(imageBuffer);

            rotateTransform.Image = ciImage;
            cropTransform.Image   = rotateTransform.OutputImage;
            edgeDetector.Image    = cropTransform.OutputImage;

            var cropped = edgeDetector.OutputImage;


            // Note that the above pipeline could be easily appended with other image manipulations.
            // For example, to change the image contrast, detect edges, etc. It would be most efficient to handle all of
            // the image manipulation in a single Core Image pipeline because it can be hardware optimized.

            // Only need to create this buffer one time and then we can reuse it for every frame
            if (resultBuffer == null || resultBuffer.Handle == IntPtr.Zero)
            {
                var    targetSize = new CGSize(1080, 1152);              //1080, 3/5 * 1920
                byte[] data       = new byte[(int)targetSize.Height * 4 * (int)targetSize.Width];

                resultBuffer = CVPixelBuffer.Create((nint)targetSize.Width, (nint)targetSize.Height, CVPixelFormatType.CV32BGRA, data, 4 * (nint)targetSize.Width, null);

                if (resultBuffer == null)
                {
                    throw new Exception("Can't allocate pixel buffer.");
                }
            }

            context.Render(cropped, resultBuffer);

            //  For debugging
            //var image = ImageBufferToUIImage(resultBuffer);
            //Console.WriteLine("Image size: " + image.Size); // set breakpoint to see image being provided to CoreML

            return(resultBuffer);
        }
コード例 #6
0
        public CVPixelBuffer CroppedSampleBuffer(CMSampleBuffer sampleBuffer, CGSize targetSize)
        {
            var imageBuffer = sampleBuffer.GetImageBuffer();

            if (imageBuffer == null)
            {
                throw new ArgumentException("Cannot convert to CVImageBuffer");
            }

            // Only doing these calculations once for efficiency.
            // If the incoming images could change orientation or size during a session, this would need to be reset when that happens.
            if (!alreadySet)
            {
                alreadySet = true;
                var imageSize   = imageBuffer.EncodedSize;
                var rotatedSize = new CGSize(imageSize.Height, imageSize.Width);

                if (targetSize.Width > rotatedSize.Width || targetSize.Height > rotatedSize.Height)
                {
                    throw new NotSupportedException("Captured image is smaller than image size for model.");
                }

                var shorterSide = rotatedSize.Width < rotatedSize.Height ? rotatedSize.Width : rotatedSize.Height;

                rotateTransform = new CIAffineTransform
                {
                    Transform = new CGAffineTransform(0, -1, 1, 0, 0, shorterSide)
                                //Transform = CGAffineTransform.MakeIdentity()
                };

                var scale = targetSize.Width / shorterSide;
                scaleTransform = new CIAffineTransform
                {
                    Transform = CGAffineTransform.MakeScale(scale, scale),
                };

                var xDiff = rotatedSize.Width * scale - targetSize.Width;
                var yDiff = rotatedSize.Height * scale - targetSize.Height;

                cropTransform = new CIAffineTransform
                {
                    //Transform = CGAffineTransform.MakeTranslation(xDiff / 2.0f, yDiff / 2.0f),
                    Transform = CGAffineTransform.MakeIdentity()
                };
            }

            // Convert to CIImage because it is easier to manipulate
            var ciImage = CIImage.FromImageBuffer(imageBuffer);

            rotateTransform.Image = ciImage;
            scaleTransform.Image  = rotateTransform.OutputImage;
            cropTransform.Image   = scaleTransform.OutputImage;
            var cropped = cropTransform.OutputImage;


            // Note that the above pipeline could be easily appended with other image manipulations.
            // For example, to change the image contrast. It would be most efficient to handle all of
            // the image manipulation in a single Core Image pipeline because it can be hardware optimized.

            // Only need to create this buffer one time and then we can reuse it for every frame
            if (resultBuffer == null || resultBuffer.Handle == IntPtr.Zero)
            {
                byte[] data = new byte[(int)targetSize.Height * 4 * (int)targetSize.Width];

                resultBuffer = CVPixelBuffer.Create((nint)targetSize.Width, (nint)targetSize.Height, CVPixelFormatType.CV32BGRA, data, 4 * (nint)targetSize.Width, null);                 // HACK

                if (resultBuffer == null)
                {
                    throw new Exception("Can't allocate pixel buffer.");
                }
            }

            context.Render(cropped, resultBuffer);

            //  For debugging
            //var image = ImageBufferToUIImage(resultBuffer);
            //Console.WriteLine("Image size: " + image.Size); // set breakpoint to see image being provided to CoreML

            return(resultBuffer);
        }