public override void ViewDidLoad ()
		{
			base.ViewDidLoad ();

			this.View.BackgroundColor = UIColor.White;

			NSError error;

			// Setup detector options.
			var options = new CIDetectorOptions {
				Accuracy = FaceDetectorAccuracy.High,
				// Can give a hint here about the rects to detect. 1.4 would be for A4 sheets of paper for instance.
				AspectRatio = 1.41f,
				
			};

			// Create a rectangle detector. Note that you can also create QR detector or a face detector.
			// Most of this code will also work with other detectors (like streaming to a preview layer and grabbing images).
			this.detector = CIDetector.CreateRectangleDetector (context: null, detectorOptions: options);

			// Create the session. The AVCaptureSession is the managing instance of the whole video handling.
			var captureSession = new AVCaptureSession ()
			{ 
				// Defines what quality we want to use for the images we grab. Photo gives highest resolutions.
				SessionPreset = AVCaptureSession.PresetPhoto
			};

			// Find a suitable AVCaptureDevice for video input.
			var device = AVCaptureDevice.DefaultDeviceWithMediaType(AVMediaType.Video);
			if (device == null)
			{
				// This will not work on the iOS Simulator - there is no camera. :-)
				throw new InvalidProgramException ("Failed to get AVCaptureDevice for video input!");
			}

			// Create a device input with the device and add it to the session.
			var videoInput = AVCaptureDeviceInput.FromDevice (device, out error);
			if (videoInput == null)
			{
				throw new InvalidProgramException ("Failed to get AVCaptureDeviceInput from AVCaptureDevice!");
			}

			// Let session read from the input, this is our source.
			captureSession.AddInput (videoInput);

			// Create output for the video stream. This is the destination.
			var videoOutput = new AVCaptureVideoDataOutput () {
				AlwaysDiscardsLateVideoFrames = true
			};

			// Define the video format we want to use. Note that Xamarin exposes the CompressedVideoSetting and UncompressedVideoSetting 
			// properties on AVCaptureVideoDataOutput un Unified API, but I could not get these to work. The VideoSettings property is deprecated,
			// so I use the WeakVideoSettings instead which takes an NSDictionary as input.
			this.videoSettingsDict = new NSMutableDictionary ();
			this.videoSettingsDict.Add (CVPixelBuffer.PixelFormatTypeKey, NSNumber.FromUInt32((uint)CVPixelFormatType.CV32BGRA));
			videoOutput.WeakVideoSettings = this.videoSettingsDict;

			// Create a delegate to report back to us when an image has been captured.
			// We want to grab the camera stream and feed it through a AVCaptureVideoDataOutputSampleBufferDelegate
			// which allows us to get notified if a new image is availeble. An implementation of that delegate is VideoFrameSampleDelegate in this project.
			this.sampleBufferDelegate = new VideoFrameSamplerDelegate ();

			// Processing happens via Grand Central Dispatch (GCD), so we need to provide a queue.
			// This is pretty much like a system managed thread (see: http://zeroheroblog.com/ios/concurrency-in-ios-grand-central-dispatch-gcd-dispatch-queues).
			this.sessionQueue =  new DispatchQueue ("AVSessionQueue");

			// Assign the queue and the delegate to the output. Now all output will go through the delegate.
			videoOutput.SetSampleBufferDelegate(this.sampleBufferDelegate, this.sessionQueue);

			// Add output to session.
			captureSession.AddOutput(videoOutput);

			// We also want to visualize the input stream. The raw stream can be fed into an AVCaptureVideoPreviewLayer, which is a subclass of CALayer.
			// A CALayer can be added to a UIView. We add that layer to the controller's main view.
			var layer = this.View.Layer;
			this.videoLayer = AVCaptureVideoPreviewLayer.FromSession (captureSession);
			this.videoLayer.Frame = layer.Bounds;
			layer.AddSublayer (this.videoLayer);

			// All setup! Start capturing!
			captureSession.StartRunning ();

			// This is just for information and allows you to get valid values for the detection framerate. 
			Console.WriteLine ("Available capture framerates:");
			var rateRanges = device.ActiveFormat.VideoSupportedFrameRateRanges;
			foreach (var r in rateRanges)
			{
				Console.WriteLine (r.MinFrameRate + "; " + r.MaxFrameRate + "; " + r.MinFrameDuration + "; " + r.MaxFrameDuration);
			}

			// Configure framerate. Kind of weird way of doing it but the only one that works.
			device.LockForConfiguration (out error);
			// CMTime constructor means: 1 = one second, DETECTION_FPS = how many samples per unit, which is 1 second in this case.
			device.ActiveVideoMinFrameDuration = new CMTime(1, DETECTION_FPS);
			device.ActiveVideoMaxFrameDuration = new CMTime(1, DETECTION_FPS);
			device.UnlockForConfiguration ();

			// Put a small image view at the top left that shows the live image with the detected rectangle(s).
			this.imageViewOverlay = new UIImageView
			{ 
				ContentMode = UIViewContentMode.ScaleAspectFit,
				BackgroundColor = UIColor.Gray
			};
			this.imageViewOverlay.Layer.BorderColor = UIColor.Red.CGColor;
			this.imageViewOverlay.Layer.BorderWidth = 3f;
			this.Add (this.imageViewOverlay);

			// Put another image view top right that shows the image with perspective correction.
			this.imageViewPerspective = new UIImageView
			{ 
				ContentMode = UIViewContentMode.ScaleAspectFit,
				BackgroundColor = UIColor.Gray
			};
			this.imageViewPerspective.Layer.BorderColor = UIColor.Red.CGColor;
			this.imageViewPerspective.Layer.BorderWidth = 3f;
			this.Add (this.imageViewPerspective);

			// Add some lables for information.
			this.mainWindowLbl = new UILabel
			{
				Text = "Live stream from camera. Point camera to a rectangular object.",
				TextAlignment = UITextAlignment.Center
			};
			this.Add (this.mainWindowLbl);

			this.detectionWindowLbl = new UILabel
			{
				Text = "Detected rectangle overlay",
				TextAlignment = UITextAlignment.Center
			};
			this.Add (this.detectionWindowLbl);

			this.perspectiveWindowLbl = new UILabel
			{
				Text = "Perspective corrected",
				TextAlignment = UITextAlignment.Center
			};
			this.Add (this.perspectiveWindowLbl);
		}
        public override void ViewDidLoad()
        {
            base.ViewDidLoad();

            this.View.BackgroundColor = UIColor.White;

            NSError error;

            // Setup detector options.
            var options = new CIDetectorOptions {
                Accuracy = FaceDetectorAccuracy.High,
                // Can give a hint here about the rects to detect. 1.4 would be for A4 sheets of paper for instance.
                AspectRatio = 1.41f,
            };

            // Create a rectangle detector. Note that you can also create QR detector or a face detector.
            // Most of this code will also work with other detectors (like streaming to a preview layer and grabbing images).
            this.detector = CIDetector.CreateRectangleDetector(context: null, detectorOptions: options);

            // Create the session. The AVCaptureSession is the managing instance of the whole video handling.
            var captureSession = new AVCaptureSession()
            {
                // Defines what quality we want to use for the images we grab. Photo gives highest resolutions.
                SessionPreset = AVCaptureSession.PresetPhoto
            };

            // Find a suitable AVCaptureDevice for video input.
            var device = AVCaptureDevice.DefaultDeviceWithMediaType(AVMediaType.Video);

            if (device == null)
            {
                // This will not work on the iOS Simulator - there is no camera. :-)
                throw new InvalidProgramException("Failed to get AVCaptureDevice for video input!");
            }

            // Create a device input with the device and add it to the session.
            var videoInput = AVCaptureDeviceInput.FromDevice(device, out error);

            if (videoInput == null)
            {
                throw new InvalidProgramException("Failed to get AVCaptureDeviceInput from AVCaptureDevice!");
            }

            // Let session read from the input, this is our source.
            captureSession.AddInput(videoInput);

            // Create output for the video stream. This is the destination.
            var videoOutput = new AVCaptureVideoDataOutput()
            {
                AlwaysDiscardsLateVideoFrames = true
            };

            // Define the video format we want to use. Note that Xamarin exposes the CompressedVideoSetting and UncompressedVideoSetting
            // properties on AVCaptureVideoDataOutput un Unified API, but I could not get these to work. The VideoSettings property is deprecated,
            // so I use the WeakVideoSettings instead which takes an NSDictionary as input.
            this.videoSettingsDict = new NSMutableDictionary();
            this.videoSettingsDict.Add(CVPixelBuffer.PixelFormatTypeKey, NSNumber.FromUInt32((uint)CVPixelFormatType.CV32BGRA));
            videoOutput.WeakVideoSettings = this.videoSettingsDict;

            // Create a delegate to report back to us when an image has been captured.
            // We want to grab the camera stream and feed it through a AVCaptureVideoDataOutputSampleBufferDelegate
            // which allows us to get notified if a new image is availeble. An implementation of that delegate is VideoFrameSampleDelegate in this project.
            this.sampleBufferDelegate = new VideoFrameSamplerDelegate();

            // Processing happens via Grand Central Dispatch (GCD), so we need to provide a queue.
            // This is pretty much like a system managed thread (see: http://zeroheroblog.com/ios/concurrency-in-ios-grand-central-dispatch-gcd-dispatch-queues).
            this.sessionQueue = new DispatchQueue("AVSessionQueue");

            // Assign the queue and the delegate to the output. Now all output will go through the delegate.
            videoOutput.SetSampleBufferDelegate(this.sampleBufferDelegate, this.sessionQueue);

            // Add output to session.
            captureSession.AddOutput(videoOutput);

            // We also want to visualize the input stream. The raw stream can be fed into an AVCaptureVideoPreviewLayer, which is a subclass of CALayer.
            // A CALayer can be added to a UIView. We add that layer to the controller's main view.
            var layer = this.View.Layer;

            this.videoLayer       = AVCaptureVideoPreviewLayer.FromSession(captureSession);
            this.videoLayer.Frame = layer.Bounds;
            layer.AddSublayer(this.videoLayer);

            // All setup! Start capturing!
            captureSession.StartRunning();

            // This is just for information and allows you to get valid values for the detection framerate.
            Console.WriteLine("Available capture framerates:");
            var rateRanges = device.ActiveFormat.VideoSupportedFrameRateRanges;

            foreach (var r in rateRanges)
            {
                Console.WriteLine(r.MinFrameRate + "; " + r.MaxFrameRate + "; " + r.MinFrameDuration + "; " + r.MaxFrameDuration);
            }

            // Configure framerate. Kind of weird way of doing it but the only one that works.
            device.LockForConfiguration(out error);
            // CMTime constructor means: 1 = one second, DETECTION_FPS = how many samples per unit, which is 1 second in this case.
            device.ActiveVideoMinFrameDuration = new CMTime(1, DETECTION_FPS);
            device.ActiveVideoMaxFrameDuration = new CMTime(1, DETECTION_FPS);
            device.UnlockForConfiguration();

            // Put a small image view at the top left that shows the live image with the detected rectangle(s).
            this.imageViewOverlay = new UIImageView
            {
                ContentMode     = UIViewContentMode.ScaleAspectFit,
                BackgroundColor = UIColor.Gray
            };
            this.imageViewOverlay.Layer.BorderColor = UIColor.Red.CGColor;
            this.imageViewOverlay.Layer.BorderWidth = 3f;
            this.Add(this.imageViewOverlay);

            // Put another image view top right that shows the image with perspective correction.
            this.imageViewPerspective = new UIImageView
            {
                ContentMode     = UIViewContentMode.ScaleAspectFit,
                BackgroundColor = UIColor.Gray
            };
            this.imageViewPerspective.Layer.BorderColor = UIColor.Red.CGColor;
            this.imageViewPerspective.Layer.BorderWidth = 3f;
            this.Add(this.imageViewPerspective);

            // Add some lables for information.
            this.mainWindowLbl = new UILabel
            {
                Text          = "Live stream from camera. Point camera to a rectangular object.",
                TextAlignment = UITextAlignment.Center
            };
            this.Add(this.mainWindowLbl);

            this.detectionWindowLbl = new UILabel
            {
                Text          = "Detected rectangle overlay",
                TextAlignment = UITextAlignment.Center
            };
            this.Add(this.detectionWindowLbl);

            this.perspectiveWindowLbl = new UILabel
            {
                Text          = "Perspective corrected",
                TextAlignment = UITextAlignment.Center
            };
            this.Add(this.perspectiveWindowLbl);
        }
Exemple #3
0
        public override void ViewDidLoad()
        {
            base.ViewDidLoad();

            this.View.BackgroundColor = UIColor.White;

            NSError error;


            // Create the session. The AVCaptureSession is the managing instance of the whole video handling.
            var captureSession = new AVCaptureSession()
            {
                // Defines what quality we want to use for the images we grab. Photo gives highest resolutions.
                SessionPreset = AVCaptureSession.PresetPhoto
            };

            // Find a suitable AVCaptureDevice for video input.
            var device = AVCaptureDevice.GetDefaultDevice(AVMediaType.Video);

            if (device == null)
            {
                // This will not work on the iOS Simulator - there is no camera. :-)
                throw new InvalidProgramException("Failed to get AVCaptureDevice for video input!");
            }

            // Create a device input with the device and add it to the session.
            var videoInput = AVCaptureDeviceInput.FromDevice(device, out error);

            if (videoInput == null)
            {
                throw new InvalidProgramException("Failed to get AVCaptureDeviceInput from AVCaptureDevice!");
            }

            // Let session read from the input, this is our source.
            captureSession.AddInput(videoInput);

            // Create output for the video stream. This is the destination.
            var videoOutput = new AVCaptureVideoDataOutput()
            {
                AlwaysDiscardsLateVideoFrames = true
            };

            // Define the video format we want to use. Note that Xamarin exposes the CompressedVideoSetting and UncompressedVideoSetting
            // properties on AVCaptureVideoDataOutput un Unified API, but I could not get these to work. The VideoSettings property is deprecated,
            // so I use the WeakVideoSettings instead which takes an NSDictionary as input.
            this.videoSettingsDict = new NSMutableDictionary();
            this.videoSettingsDict.Add(CVPixelBuffer.PixelFormatTypeKey, NSNumber.FromUInt32((uint)CVPixelFormatType.CV32BGRA));
            videoOutput.WeakVideoSettings = this.videoSettingsDict;

            // Create a delegate to report back to us when an image has been captured.
            // We want to grab the camera stream and feed it through a AVCaptureVideoDataOutputSampleBufferDelegate
            // which allows us to get notified if a new image is availeble. An implementation of that delegate is VideoFrameSampleDelegate in this project.
            this.sampleBufferDelegate = new VideoFrameSamplerDelegate();

            // Processing happens via Grand Central Dispatch (GCD), so we need to provide a queue.
            // This is pretty much like a system managed thread (see: http://zeroheroblog.com/ios/concurrency-in-ios-grand-central-dispatch-gcd-dispatch-queues).
            this.sessionQueue = new DispatchQueue("AVSessionQueue");

            // Assign the queue and the delegate to the output. Now all output will go through the delegate.
            videoOutput.SetSampleBufferDelegateQueue(this.sampleBufferDelegate, this.sessionQueue);

            // Add output to session.
            captureSession.AddOutput(videoOutput);

            // We also want to visualize the input stream. The raw stream can be fed into an AVCaptureVideoPreviewLayer, which is a subclass of CALayer.
            // A CALayer can be added to a UIView. We add that layer to the controller's main view.
            var layer = this.View.Layer;

            this.videoLayer       = AVCaptureVideoPreviewLayer.FromSession(captureSession);
            this.videoLayer.Frame = layer.Bounds;
            layer.AddSublayer(this.videoLayer);

            // All setup! Start capturing!
            captureSession.StartRunning();

            // Configure framerate. Kind of weird way of doing it but the only one that works.
            device.LockForConfiguration(out error);
            // CMTime constructor means: 1 = one second, DETECTION_FPS = how many samples per unit, which is 1 second in this case.
            device.ActiveVideoMinFrameDuration = new CMTime(1, DETECTION_FPS);
            device.ActiveVideoMaxFrameDuration = new CMTime(1, DETECTION_FPS);
            device.UnlockForConfiguration();
        }