Esempio n. 1
0
        private void DrawFaces(CIImage image, CGRect cleanAperture)
        {
            if (image == null)
            {
                return;
            }

            var features = faceDetector.FeaturesInImage(image);

            if (features.Count() > 0)
            {
                IsFaceDetected = true;
            }

            DrawFaces(features, cleanAperture, UIDeviceOrientation.Portrait);
        }
        /// <summary>
        /// Gets called by the VideoFrameSamplerDelegate if a new image has been captured. Does the rectangle detection.
        /// </summary>
        /// <param name="sender">Sender.</param>
        /// <param name="e">Event arguments</param>
        void HandleImageCaptured(object sender, ImageCaptureEventArgs e)
        {
            // Detect the rectangles in the captured image.
            // Important: case CGImage to CIImage. There is an implicit cast operator from CGImage to CIImage, but if we
            // pass the CGImage in to FeaturesInImage(), many many (implicit) CIImage instance will be created because this
            // method is called very often. The garbage collector cannot keep up with that and we runn out of memory.
            // By casting manually and using() the CIImage, it will be disposed immediately, freeing up memory.
            using (CIImage inputCIImage = (CIImage)e.Image)
            {
                // Let the detector do its work on the image.
                var rectangles = detector.FeaturesInImage(inputCIImage);

                // Find the biggest rectangle. Note: in my tests I have never seen that more than one rectangle would be detected, but better be prepared.
                nfloat             maxWidth    = 0f;
                nfloat             maxHeight   = 0f;
                CIRectangleFeature biggestRect = rectangles.Length > 0 ? (CIRectangleFeature)rectangles [0] : null;

                Console.WriteLine("Found " + rectangles.Length + " rectangles.");

                foreach (CIRectangleFeature rect in rectangles)
                {
                    Console.WriteLine("Found rect: " + rect);
                    nfloat minX = (nfloat)Math.Min(rect.TopLeft.X, rect.BottomLeft.X);
                    nfloat minY = (nfloat)Math.Min(rect.TopLeft.Y, rect.TopRight.Y);
                    nfloat maxX = (nfloat)Math.Min(rect.TopRight.X, rect.BottomRight.X);
                    nfloat maxY = (nfloat)Math.Min(rect.BottomLeft.Y, rect.BottomRight.Y);

                    if (maxX - minX > maxWidth && maxY - minY > maxHeight)
                    {
                        maxWidth  = maxX - minX;
                        maxHeight = maxY - minY;

                        biggestRect = rect;
                    }
                }

                if (biggestRect == null)
                {
                    this.InvokeOnMainThread(() => {
                        this.imageViewOverlay.Image     = null;
                        this.imageViewPerspective.Image = null;
                    });
                    return;
                }

                Console.WriteLine("Highlighting: top left = " + biggestRect.TopLeft + "; top right = " + biggestRect.TopRight + "; bottom left = " + biggestRect.BottomLeft + "; bottom right = " + biggestRect.BottomRight);

                // We are not on the main thread here.
                this.InvokeOnMainThread(() => {
                    // Adjust the overlay image to the corners of the detected rectangle with CIPerspectiveTransformWithExtent.
                    using (var dict = new NSMutableDictionary())
                    {
                        dict.Add(key: new NSString("inputExtent"), value: new CIVector(inputCIImage.Extent));
                        dict.Add(key: new NSString("inputTopLeft"), value: new CIVector(biggestRect.TopLeft));
                        dict.Add(key: new NSString("inputTopRight"), value: new CIVector(biggestRect.TopRight));
                        dict.Add(key: new NSString("inputBottomLeft"), value: new CIVector(biggestRect.BottomLeft));
                        dict.Add(key: new NSString("inputBottomRight"), value: new CIVector(biggestRect.BottomRight));

                        // Create a semi-transparent CIImage which will show the detected rectangle.
                        using (var overlayCIImage = new CIImage(color: CIColor.FromRgba(red: 1.0f, green: 0f, blue: 0f, alpha: 0.5f))
                                                    // Size it to the source image.
                                                    .ImageByCroppingToRect(inputCIImage.Extent)
                                                    // Apply perspective distortion to the overlay rectangle to map it to the current camera picture.
                                                    .CreateByFiltering("CIPerspectiveTransformWithExtent", dict)
                                                    // Place overlay on the image.
                                                    .CreateByCompositingOverImage(inputCIImage))
                        {
                            // Must convert the CIImage into a CGImage and from there into a UIImage.
                            // Could go directly from CIImage to UIImage but when assigning the result to a UIImageView, the ContentMode of
                            // the image view will be ignored and no proper aspect scaling will take place.
                            using (var ctx = CIContext.FromOptions(null))
                                using (CGImage convertedCGImage = ctx.CreateCGImage(overlayCIImage, overlayCIImage.Extent))
                                    // This crashes with Xamarin.iOS
                                    //using(UIImage convertedUIImage = UIImage.FromImage(convertedCGImage, 1f, UIApplication.SharedApplication.StatusBarOrientation == UIInterfaceOrientation.LandscapeLeft ? UIImageOrientation.DownMirrored : UIImageOrientation.UpMirrored))
                                    // This works.
                                    using (UIImage convertedUIImage = UIImage.FromImage(convertedCGImage))
                                    {
                                        // Show converted image in UI.
                                        this.imageViewOverlay.Image = convertedUIImage;
                                    }
                        }
                    }

                    // Apply a perspective correction with CIPerspectiveCorrection to the detected rectangle and display in another UIImageView.
                    using (var dict = new NSMutableDictionary())
                    {
                        dict.Add(key: new NSString("inputTopLeft"), value: new CIVector(biggestRect.TopLeft));
                        dict.Add(key: new NSString("inputTopRight"), value: new CIVector(biggestRect.TopRight));
                        dict.Add(key: new NSString("inputBottomLeft"), value: new CIVector(biggestRect.BottomLeft));
                        dict.Add(key: new NSString("inputBottomRight"), value: new CIVector(biggestRect.BottomRight));

                        // Use again CIImage -> CGImage -> UIImage to prevent scaling issues (see above).
                        using (var perspectiveCorrectedImage = inputCIImage.CreateByFiltering("CIPerspectiveCorrection", dict))
                            using (var ctx = CIContext.FromOptions(null))
                                using (CGImage convertedCGImage = ctx.CreateCGImage(perspectiveCorrectedImage, perspectiveCorrectedImage.Extent))
                                    using (UIImage convertedUIImage = UIImage.FromImage(convertedCGImage))
                                    {
                                        this.imageViewPerspective.Image = convertedUIImage;
                                    }
                    }
                });
            }

            Console.WriteLine("---------------------");
        }
Esempio n. 3
0
/*
 *              public void DetectInPixels32Async(Action<Face[]> callback, Color32[] data, int width, int height, CGImageOrientation imageOrientation = CGImageOrientation.Default) {
 *                      if (_opQueue == null)
 *                              _opQueue = new NSOperationQueue();
 *
 *                      var cgimage = CGImage.FromPixels32(data, width, height, preprocessImageScale);
 *                      data = null;
 *
 *                      _opQueue.AddOperation(delegate() {
 *                              var ciimage = new CIImage(cgimage);
 *                              cgimage = null;
 *                              Face[] faces = DetectInImage(ciimage, imageOrientation);
 *                              ciimage = null;
 *
 *                              CoreXT.RunOnMainThread(delegate() {
 *                                      callback(faces);
 *                                      faces = null;
 *                              });
 *                      });
 *              }
 */

        /// <summary>
        /// Detects the in image.
        /// </summary>
        /// <returns>The in image.</returns>
        /// <param name="ciimage">Ciimage.</param>
        /// <param name="imageOrientation">Image orientation.</param>
        public Face[] DetectInImage(CIImage ciimage, CGImageOrientation imageOrientation = CGImageOrientation.Default)
        {
            var rect        = ciimage.Extent();
            int imageHeight = (int)rect.height;
            int imageWidth  = (int)rect.width;

            _finalScale = projectedScale / preprocessImageScale;

            // options
            _imageOpts[CIDetector.ImageOrientation] = (int)imageOrientation;
            _imageOpts[CIDetector.Smile]            = detectSmiles;
            _imageOpts[CIDetector.EyeBlink]         = detectBlinks;

            // detect
            var features = _detector.FeaturesInImage(ciimage, _imageOpts);

            // go through features and transform coords
            var faces = new Face[features.Length];

            for (int i = 0; i < features.Length; i++)
            {
                var feature = features[i] as CIFaceFeature;
                var face    = new Face();

                face.bounds = _FixRect(feature.bounds, imageHeight, imageWidth, imageOrientation);

                if (feature.hasMouthPosition)
                {
                    face.hasMouthPosition = true;
                    face.mouthPosition    = _FixPoint(feature.mouthPosition, imageHeight, imageWidth, imageOrientation);
                }

                if (feature.hasLeftEyePosition)
                {
                    face.hasLeftEyePosition = true;
                    face.leftEyePosition    = _FixPoint(feature.leftEyePosition, imageHeight, imageWidth, imageOrientation);
                }

                if (feature.hasRightEyePosition)
                {
                    face.hasRightEyePosition = true;
                    face.rightEyePosition    = _FixPoint(feature.rightEyePosition, imageHeight, imageWidth, imageOrientation);
                }

                if (feature.RespondsToSelector("trackingID"))
                {
                    if (feature.hasTrackingID)
                    {
                        face.hasTrackingID = true;
                        face.trackingID    = feature.trackingID;
                    }

                    if (feature.hasTrackingFrameCount)
                    {
                        face.hasTrackingFrameCount = true;
                        face.trackingFrameCount    = feature.trackingFrameCount;
                    }
                }

                if (feature.RespondsToSelector("faceAngle"))
                {
                    if (feature.hasFaceAngle)
                    {
                        face.hasFaceAngle = true;
                        face.faceAngle    = feature.faceAngle;
                    }

                    face.hasSmile       = feature.hasSmile;
                    face.leftEyeClosed  = feature.leftEyeClosed;
                    face.rightEyeClosed = feature.rightEyeClosed;
                }

                faces[i] = face;
            }

            return(faces);
        }
Esempio n. 4
0
        /// <summary>
        /// Detects the in image.
        /// </summary>
        /// <returns>The in image.</returns>
        /// <param name="ciimage">Ciimage.</param>
        /// <param name="imageOrientation">Image orientation.</param>
        public Face[] DetectInImage(CIImage ciimage, CGImageOrientation imageOrientation = CGImageOrientation.Default)
        {
            var rect        = ciimage.Extent();
            int imageHeight = (int)(rect.height * preprocessImageScale);
            int imageWidth  = (int)(rect.width * preprocessImageScale);

            _finalScale = projectedScale / preprocessImageScale;

            // create CIImage from bitmapdata
//			var ciimage:CIImage = CIImage.fromBitmapData(image, preprocessImageScale); //TODO
//			var ciimage = new CIImage(CGImage.FromTexture2D(image));

            // orientation settings
            _imageOpts[CIDetector.ImageOrientation] = (int)imageOrientation;

            // detect
            var features = _detector.FeaturesInImage(ciimage, _imageOpts);

            // go through features and transform coords
            var faces = new Face[features.Length];

            for (int i = 0; i < features.Length; i++)
            {
                var feature = features[i] as CIFaceFeature;
                var face    = new Face();

                face.bounds = _FixRect(feature.bounds, imageHeight, imageWidth, imageOrientation);

                if (feature.hasMouthPosition)
                {
                    face.hasMouthPosition = true;
                    face.mouthPosition    = _FixPoint(feature.mouthPosition, imageHeight, imageWidth, imageOrientation);
                }

                if (feature.hasLeftEyePosition)
                {
                    face.hasLeftEyePosition = true;
                    face.leftEyePosition    = _FixPoint(feature.leftEyePosition, imageHeight, imageWidth, imageOrientation);
                }

                if (feature.hasRightEyePosition)
                {
                    face.hasRightEyePosition = true;
                    face.rightEyePosition    = _FixPoint(feature.rightEyePosition, imageHeight, imageWidth, imageOrientation);
                }

                if (feature.RespondsToSelector("trackingID"))
                {
                    if (feature.hasTrackingID)
                    {
                        face.hasTrackingID = true;
                        face.trackingID    = feature.trackingID;
                    }

                    if (feature.hasTrackingFrameCount)
                    {
                        face.hasTrackingFrameCount = true;
                        face.trackingFrameCount    = feature.trackingFrameCount;
                    }
                }

                faces[i] = face;
            }

            return(faces);
        }
        public void UseDetector()
        {
            var options = new CIDetectorOptions {
            Accuracy = FaceDetectorAccuracy.High,
            AspectRatio = 1.41f
             };

             detector = CIDetector.CreateRectangleDetector (context: null, detectorOptions: options);

             using (CIImage ciImage = new CIImage (_parent.ImageView.Image))
             {
            InvokeOnMainThread (() =>
            {
               using (var dict = new NSMutableDictionary ())
               {
                  var orient = GetExifOrientation (_parent.ImageView.Image);
                  var rectangles = detector.FeaturesInImage (ciImage, orient);
                  if (rectangles.Length > 0)
                  {
                     _currRect = (CIRectangleFeature)rectangles [0];

                     _markers [0].Location = ConvertImageToScreenCoords (_currRect.TopLeft);
                     _markers [1].Location = ConvertImageToScreenCoords (_currRect.TopRight);
                     _markers [2].Location = ConvertImageToScreenCoords (_currRect.BottomRight);
                     _markers [3].Location = ConvertImageToScreenCoords (_currRect.BottomLeft);
                  }
               }
            });
             }
        }