public void EmptyOptions()
        {
            CIDetectorOptions options = new CIDetectorOptions();

            using (var dtor = CIDetector.CreateFaceDetector(null, options)) {
                Assert.That(dtor.Description, Is.StringContaining("CIFaceCoreDetector").Or.StringContaining("FaceDetector"), "Description");
            }
        }
Пример #2
0
        public override void ViewDidLoad()
        {
            base.ViewDidLoad();

            this.Title   = "IISS";
            faceDetector = CIDetector.CreateFaceDetector(CIContext.FromOptions(null), false);
            borderImage  = UIImage.FromFile("square.png");

            UIDevice.CurrentDevice.BeginGeneratingDeviceOrientationNotifications();

            // Perform any additional setup after loading the view, typically from a nib.
        }
Пример #3
0
        /// <summary>
        /// Creates an instance of FaceDetector. Once created, the two arguments cannot be changed for this instance.
        /// If using high accuracy, the preprocessImageScale property is set to 1.0, otherwise defaults to 0.125.
        /// </summary>
        /// <param name="useHighAccuracy">Thether to use high accuracy for detection.</param>
        /// <param name="trackFaces">Whether to track faces in live video or successive image detections; available ONLY in iOS 6.0 and later.</param>
        public FaceDetector(bool useHighAccuracy = false, bool trackFaces = false)
        {
            // setup face detector
            var context = CIContext.Context((Dictionary<object, object>)null);
            var opts = new Dictionary<object, object>();
            opts[CIDetector.Accuracy] = useHighAccuracy ? CIDetector.AccuracyHigh : CIDetector.AccuracyLow;
            opts[CIDetector.Tracking] = trackFaces ? 1 : 0;

            _detector = CIDetector.DetectorOfType(CIDetector.TypeFace, context, opts);

            if (useHighAccuracy)
                PreprocessImageScale = 1.0f;
        }
        public void NullContext()
        {
            using (var dtor = CIDetector.CreateFaceDetector(null, true)) {
            }

            using (var dtor = CIDetector.CreateFaceDetector(null, false)) {
            }

            using (var dtor = CIDetector.CreateFaceDetector(null, false, 2.0f)) {
            }

            using (var dtor = CIDetector.CreateFaceDetector(null, null, null, null)) {
            }
        }
Пример #5
0
        public override void ViewDidLoad()
        {
            base.ViewDidLoad();

            this.Title = "Intelligent Kiosk";

            SetupSource();

            var options = new CIContextOptions();

            faceDetector = CIDetector.CreateFaceDetector(null, true);
            borderImage  = UIImage.FromFile("square.png");

            UIDevice.CurrentDevice.BeginGeneratingDeviceOrientationNotifications();
        }
Пример #6
0
        /// <summary>
        /// Creates an instance of FaceDetector. Once created, the two arguments cannot be changed for this instance.
        /// If using high accuracy, the preprocessImageScale property is set to 1.0, otherwise defaults to 0.125.
        /// </summary>
        /// <param name="useHighAccuracy">Thether to use high accuracy for detection.</param>
        /// <param name="trackFaces">Whether to track faces in live video or successive image detections; available ONLY in iOS 6.0 and later.</param>
        public FaceDetector(bool useHighAccuracy = false, bool trackFaces = false)
        {
            // setup face detector
            var context = CIContext.Context((Dictionary <object, object>)null);
            var opts    = new Dictionary <object, object>();

            opts[CIDetector.Accuracy] = useHighAccuracy ? CIDetector.AccuracyHigh : CIDetector.AccuracyLow;
            opts[CIDetector.Tracking] = trackFaces ? 1 : 0;

            _detector = CIDetector.DetectorOfType(CIDetector.TypeFace, context, opts);

            if (useHighAccuracy)
            {
                preprocessImageScale = 1.0f;
            }
        }
Пример #7
0
        public void NullContext()
        {
            using (var dtor = CIDetector.CreateFaceDetector(null, true)) {
            }

            using (var dtor = CIDetector.CreateFaceDetector(null, false)) {
            }

            if (TestRuntime.CheckSystemAndSDKVersion(6, 0))
            {
                using (var dtor = CIDetector.CreateFaceDetector(null, false, 2.0f)) {
                }
            }

            using (var dtor = CIDetector.CreateFaceDetector(null, null, null, null)) {
            }
        }
Пример #8
0
 public PersonObserver(CameraService cameraService)
 {
     _cameraService = cameraService;
     detector       = CIDetector.CreateFaceDetector(null, true);
     client         = new FaceAccessClient();
     speechService  = new SpeechService();
     timer          = new Task(async() =>
     {
         while (true)
         {
             await Task.Delay(500);
             var result = await CheckCurrentFrame();
             if (result)
             {
                 await Task.Delay(2000);
             }
         }
     });
 }
        public override void ViewDidLoad()
        {
            base.ViewDidLoad();

            this.View.BackgroundColor = UIColor.White;

            NSError error;

            // Setup detector options.
            var options = new CIDetectorOptions {
                Accuracy = FaceDetectorAccuracy.High,
                // Can give a hint here about the rects to detect. 1.4 would be for A4 sheets of paper for instance.
                AspectRatio = 1.41f,
            };

            // Create a rectangle detector. Note that you can also create QR detector or a face detector.
            // Most of this code will also work with other detectors (like streaming to a preview layer and grabbing images).
            this.detector = CIDetector.CreateRectangleDetector(context: null, detectorOptions: options);

            // Create the session. The AVCaptureSession is the managing instance of the whole video handling.
            var captureSession = new AVCaptureSession()
            {
                // Defines what quality we want to use for the images we grab. Photo gives highest resolutions.
                SessionPreset = AVCaptureSession.PresetPhoto
            };

            // Find a suitable AVCaptureDevice for video input.
            var device = AVCaptureDevice.DefaultDeviceWithMediaType(AVMediaType.Video);

            if (device == null)
            {
                // This will not work on the iOS Simulator - there is no camera. :-)
                throw new InvalidProgramException("Failed to get AVCaptureDevice for video input!");
            }

            // Create a device input with the device and add it to the session.
            var videoInput = AVCaptureDeviceInput.FromDevice(device, out error);

            if (videoInput == null)
            {
                throw new InvalidProgramException("Failed to get AVCaptureDeviceInput from AVCaptureDevice!");
            }

            // Let session read from the input, this is our source.
            captureSession.AddInput(videoInput);

            // Create output for the video stream. This is the destination.
            var videoOutput = new AVCaptureVideoDataOutput()
            {
                AlwaysDiscardsLateVideoFrames = true
            };

            // Define the video format we want to use. Note that Xamarin exposes the CompressedVideoSetting and UncompressedVideoSetting
            // properties on AVCaptureVideoDataOutput un Unified API, but I could not get these to work. The VideoSettings property is deprecated,
            // so I use the WeakVideoSettings instead which takes an NSDictionary as input.
            this.videoSettingsDict = new NSMutableDictionary();
            this.videoSettingsDict.Add(CVPixelBuffer.PixelFormatTypeKey, NSNumber.FromUInt32((uint)CVPixelFormatType.CV32BGRA));
            videoOutput.WeakVideoSettings = this.videoSettingsDict;

            // Create a delegate to report back to us when an image has been captured.
            // We want to grab the camera stream and feed it through a AVCaptureVideoDataOutputSampleBufferDelegate
            // which allows us to get notified if a new image is availeble. An implementation of that delegate is VideoFrameSampleDelegate in this project.
            this.sampleBufferDelegate = new VideoFrameSamplerDelegate();

            // Processing happens via Grand Central Dispatch (GCD), so we need to provide a queue.
            // This is pretty much like a system managed thread (see: http://zeroheroblog.com/ios/concurrency-in-ios-grand-central-dispatch-gcd-dispatch-queues).
            this.sessionQueue = new DispatchQueue("AVSessionQueue");

            // Assign the queue and the delegate to the output. Now all output will go through the delegate.
            videoOutput.SetSampleBufferDelegate(this.sampleBufferDelegate, this.sessionQueue);

            // Add output to session.
            captureSession.AddOutput(videoOutput);

            // We also want to visualize the input stream. The raw stream can be fed into an AVCaptureVideoPreviewLayer, which is a subclass of CALayer.
            // A CALayer can be added to a UIView. We add that layer to the controller's main view.
            var layer = this.View.Layer;

            this.videoLayer       = AVCaptureVideoPreviewLayer.FromSession(captureSession);
            this.videoLayer.Frame = layer.Bounds;
            layer.AddSublayer(this.videoLayer);

            // All setup! Start capturing!
            captureSession.StartRunning();

            // This is just for information and allows you to get valid values for the detection framerate.
            Console.WriteLine("Available capture framerates:");
            var rateRanges = device.ActiveFormat.VideoSupportedFrameRateRanges;

            foreach (var r in rateRanges)
            {
                Console.WriteLine(r.MinFrameRate + "; " + r.MaxFrameRate + "; " + r.MinFrameDuration + "; " + r.MaxFrameDuration);
            }

            // Configure framerate. Kind of weird way of doing it but the only one that works.
            device.LockForConfiguration(out error);
            // CMTime constructor means: 1 = one second, DETECTION_FPS = how many samples per unit, which is 1 second in this case.
            device.ActiveVideoMinFrameDuration = new CMTime(1, DETECTION_FPS);
            device.ActiveVideoMaxFrameDuration = new CMTime(1, DETECTION_FPS);
            device.UnlockForConfiguration();

            // Put a small image view at the top left that shows the live image with the detected rectangle(s).
            this.imageViewOverlay = new UIImageView
            {
                ContentMode     = UIViewContentMode.ScaleAspectFit,
                BackgroundColor = UIColor.Gray
            };
            this.imageViewOverlay.Layer.BorderColor = UIColor.Red.CGColor;
            this.imageViewOverlay.Layer.BorderWidth = 3f;
            this.Add(this.imageViewOverlay);

            // Put another image view top right that shows the image with perspective correction.
            this.imageViewPerspective = new UIImageView
            {
                ContentMode     = UIViewContentMode.ScaleAspectFit,
                BackgroundColor = UIColor.Gray
            };
            this.imageViewPerspective.Layer.BorderColor = UIColor.Red.CGColor;
            this.imageViewPerspective.Layer.BorderWidth = 3f;
            this.Add(this.imageViewPerspective);

            // Add some lables for information.
            this.mainWindowLbl = new UILabel
            {
                Text          = "Live stream from camera. Point camera to a rectangular object.",
                TextAlignment = UITextAlignment.Center
            };
            this.Add(this.mainWindowLbl);

            this.detectionWindowLbl = new UILabel
            {
                Text          = "Detected rectangle overlay",
                TextAlignment = UITextAlignment.Center
            };
            this.Add(this.detectionWindowLbl);

            this.perspectiveWindowLbl = new UILabel
            {
                Text          = "Perspective corrected",
                TextAlignment = UITextAlignment.Center
            };
            this.Add(this.perspectiveWindowLbl);
        }
Пример #10
0
        public void UseDetector()
        {
            var options = new CIDetectorOptions {
            Accuracy = FaceDetectorAccuracy.High,
            AspectRatio = 1.41f
             };

             detector = CIDetector.CreateRectangleDetector (context: null, detectorOptions: options);

             using (CIImage ciImage = new CIImage (_parent.ImageView.Image))
             {
            InvokeOnMainThread (() =>
            {
               using (var dict = new NSMutableDictionary ())
               {
                  var orient = GetExifOrientation (_parent.ImageView.Image);
                  var rectangles = detector.FeaturesInImage (ciImage, orient);
                  if (rectangles.Length > 0)
                  {
                     _currRect = (CIRectangleFeature)rectangles [0];

                     _markers [0].Location = ConvertImageToScreenCoords (_currRect.TopLeft);
                     _markers [1].Location = ConvertImageToScreenCoords (_currRect.TopRight);
                     _markers [2].Location = ConvertImageToScreenCoords (_currRect.BottomRight);
                     _markers [3].Location = ConvertImageToScreenCoords (_currRect.BottomLeft);
                  }
               }
            });
             }
        }
		public override void ViewDidLoad ()
		{
			base.ViewDidLoad ();

			this.View.BackgroundColor = UIColor.White;

			NSError error;

			// Setup detector options.
			var options = new CIDetectorOptions {
				Accuracy = FaceDetectorAccuracy.High,
				// Can give a hint here about the rects to detect. 1.4 would be for A4 sheets of paper for instance.
				AspectRatio = 1.41f,
				
			};

			// Create a rectangle detector. Note that you can also create QR detector or a face detector.
			// Most of this code will also work with other detectors (like streaming to a preview layer and grabbing images).
			this.detector = CIDetector.CreateRectangleDetector (context: null, detectorOptions: options);

			// Create the session. The AVCaptureSession is the managing instance of the whole video handling.
			var captureSession = new AVCaptureSession ()
			{ 
				// Defines what quality we want to use for the images we grab. Photo gives highest resolutions.
				SessionPreset = AVCaptureSession.PresetPhoto
			};

			// Find a suitable AVCaptureDevice for video input.
			var device = AVCaptureDevice.DefaultDeviceWithMediaType(AVMediaType.Video);
			if (device == null)
			{
				// This will not work on the iOS Simulator - there is no camera. :-)
				throw new InvalidProgramException ("Failed to get AVCaptureDevice for video input!");
			}

			// Create a device input with the device and add it to the session.
			var videoInput = AVCaptureDeviceInput.FromDevice (device, out error);
			if (videoInput == null)
			{
				throw new InvalidProgramException ("Failed to get AVCaptureDeviceInput from AVCaptureDevice!");
			}

			// Let session read from the input, this is our source.
			captureSession.AddInput (videoInput);

			// Create output for the video stream. This is the destination.
			var videoOutput = new AVCaptureVideoDataOutput () {
				AlwaysDiscardsLateVideoFrames = true
			};

			// Define the video format we want to use. Note that Xamarin exposes the CompressedVideoSetting and UncompressedVideoSetting 
			// properties on AVCaptureVideoDataOutput un Unified API, but I could not get these to work. The VideoSettings property is deprecated,
			// so I use the WeakVideoSettings instead which takes an NSDictionary as input.
			this.videoSettingsDict = new NSMutableDictionary ();
			this.videoSettingsDict.Add (CVPixelBuffer.PixelFormatTypeKey, NSNumber.FromUInt32((uint)CVPixelFormatType.CV32BGRA));
			videoOutput.WeakVideoSettings = this.videoSettingsDict;

			// Create a delegate to report back to us when an image has been captured.
			// We want to grab the camera stream and feed it through a AVCaptureVideoDataOutputSampleBufferDelegate
			// which allows us to get notified if a new image is availeble. An implementation of that delegate is VideoFrameSampleDelegate in this project.
			this.sampleBufferDelegate = new VideoFrameSamplerDelegate ();

			// Processing happens via Grand Central Dispatch (GCD), so we need to provide a queue.
			// This is pretty much like a system managed thread (see: http://zeroheroblog.com/ios/concurrency-in-ios-grand-central-dispatch-gcd-dispatch-queues).
			this.sessionQueue =  new DispatchQueue ("AVSessionQueue");

			// Assign the queue and the delegate to the output. Now all output will go through the delegate.
			videoOutput.SetSampleBufferDelegate(this.sampleBufferDelegate, this.sessionQueue);

			// Add output to session.
			captureSession.AddOutput(videoOutput);

			// We also want to visualize the input stream. The raw stream can be fed into an AVCaptureVideoPreviewLayer, which is a subclass of CALayer.
			// A CALayer can be added to a UIView. We add that layer to the controller's main view.
			var layer = this.View.Layer;
			this.videoLayer = AVCaptureVideoPreviewLayer.FromSession (captureSession);
			this.videoLayer.Frame = layer.Bounds;
			layer.AddSublayer (this.videoLayer);

			// All setup! Start capturing!
			captureSession.StartRunning ();

			// This is just for information and allows you to get valid values for the detection framerate. 
			Console.WriteLine ("Available capture framerates:");
			var rateRanges = device.ActiveFormat.VideoSupportedFrameRateRanges;
			foreach (var r in rateRanges)
			{
				Console.WriteLine (r.MinFrameRate + "; " + r.MaxFrameRate + "; " + r.MinFrameDuration + "; " + r.MaxFrameDuration);
			}

			// Configure framerate. Kind of weird way of doing it but the only one that works.
			device.LockForConfiguration (out error);
			// CMTime constructor means: 1 = one second, DETECTION_FPS = how many samples per unit, which is 1 second in this case.
			device.ActiveVideoMinFrameDuration = new CMTime(1, DETECTION_FPS);
			device.ActiveVideoMaxFrameDuration = new CMTime(1, DETECTION_FPS);
			device.UnlockForConfiguration ();

			// Put a small image view at the top left that shows the live image with the detected rectangle(s).
			this.imageViewOverlay = new UIImageView
			{ 
				ContentMode = UIViewContentMode.ScaleAspectFit,
				BackgroundColor = UIColor.Gray
			};
			this.imageViewOverlay.Layer.BorderColor = UIColor.Red.CGColor;
			this.imageViewOverlay.Layer.BorderWidth = 3f;
			this.Add (this.imageViewOverlay);

			// Put another image view top right that shows the image with perspective correction.
			this.imageViewPerspective = new UIImageView
			{ 
				ContentMode = UIViewContentMode.ScaleAspectFit,
				BackgroundColor = UIColor.Gray
			};
			this.imageViewPerspective.Layer.BorderColor = UIColor.Red.CGColor;
			this.imageViewPerspective.Layer.BorderWidth = 3f;
			this.Add (this.imageViewPerspective);

			// Add some lables for information.
			this.mainWindowLbl = new UILabel
			{
				Text = "Live stream from camera. Point camera to a rectangular object.",
				TextAlignment = UITextAlignment.Center
			};
			this.Add (this.mainWindowLbl);

			this.detectionWindowLbl = new UILabel
			{
				Text = "Detected rectangle overlay",
				TextAlignment = UITextAlignment.Center
			};
			this.Add (this.detectionWindowLbl);

			this.perspectiveWindowLbl = new UILabel
			{
				Text = "Perspective corrected",
				TextAlignment = UITextAlignment.Center
			};
			this.Add (this.perspectiveWindowLbl);
		}