bool SetupCaptureSession ()
		{
			// configure the capture session for low resolution, change this if your code
			// can cope with more data or volume
			session = new AVCaptureSession () {
				SessionPreset = AVCaptureSession.PresetMedium
			};

			// create a device input and attach it to the session
			var captureDevice = AVCaptureDevice.DefaultDeviceWithMediaType (AVMediaType.Video);
			var input = AVCaptureDeviceInput.FromDevice (captureDevice);
			if (input == null){
				// No input device
				return false;
			}
			session.AddInput (input);

			// create a VideoDataOutput and add it to the sesion
			var output = new AVCaptureVideoDataOutput () {
				VideoSettings = new AVVideoSettings (CVPixelFormatType.CV32BGRA)
			};

			// configure the output
			queue = new DispatchQueue ("myQueue");
			qrScanner = new QrScanner (this);
			output.SetSampleBufferDelegateAndQueue (qrScanner, queue);
			session.AddOutput (output);

			previewLayer = new AVCaptureVideoPreviewLayer (session);
			previewLayer.Orientation = AVCaptureVideoOrientation.Portrait;
			previewLayer.VideoGravity = "AVLayerVideoGravityResizeAspectFill";

			session.StartRunning ();
			return true;
		}
Beispiel #2
0
		bool SetupCaptureSession ()
		{
			// configure the capture session for low resolution, change this if your code
			// can cope with more data or volume
			session = new AVCaptureSession () {
				SessionPreset = AVCaptureSession.PresetMedium
			};
			
			// create a device input and attach it to the session
			var captureDevice = AVCaptureDevice.DefaultDeviceWithMediaType (AVMediaType.Video);
			var input = AVCaptureDeviceInput.FromDevice (captureDevice);
			if (input == null){
				Console.WriteLine ("No input device");
				return false;
			}
			session.AddInput (input);
			
			// create a VideoDataOutput and add it to the sesion
			var output = new AVCaptureVideoDataOutput () {
				VideoSettings = new AVVideoSettings (CVPixelFormatType.CV32BGRA),
				
				// If you want to cap the frame rate at a given speed, in this sample: 15 frames per second
				MinFrameDuration = new CMTime (1, 15)
			};
			
			// configure the output
			queue = new MonoTouch.CoreFoundation.DispatchQueue ("myQueue");
			outputRecorder = new OutputRecorder ();
			output.SetSampleBufferDelegateAndQueue (outputRecorder, queue);
			session.AddOutput (output);
			
			session.StartRunning ();
			return true;
		}
Beispiel #3
0
        private bool addImageSamplerOutput(out string errorMessage, int minimumSampleIntervalInMilliSeconds)
        {
            errorMessage = "";

            // create a VideoDataOutput and add it to the capture session
            frameGrabberOutput = new AVCaptureVideoDataOutput();
            frameGrabberOutput.VideoSettings = new AVVideoSettings(CVPixelFormatType.CV32BGRA);

            // set up the output queue and delegate
            queue             = new MonoTouch.CoreFoundation.DispatchQueue("captureQueue");
            videoFrameSampler = new VideoFrameSamplerDelegate();
            frameGrabberOutput.SetSampleBufferDelegateAndQueue(videoFrameSampler, queue);

            // subscribe to from capture events
            videoFrameSampler.CaptureError  += new EventHandler <CaptureErrorEventArgs>(handleImageCaptureError);
            videoFrameSampler.ImageCaptured += new EventHandler <ImageCaptureEventArgs>(handleImageCaptured);

            // add the output to the session
            session.AddOutput(frameGrabberOutput);

            // set minimum time interval between image samples (if possible).
            try
            {
                AVCaptureConnection connection = (AVCaptureConnection)frameGrabberOutput.Connections[0];
                connection.VideoMinFrameDuration = new CMTime(minimumSampleIntervalInMilliSeconds, 1000);
            }
            catch
            {
            }

            return(true);
        }
Beispiel #4
0
        void CreateOutput()
        {
            output = new AVCaptureVideoDataOutput();
            output.VideoSettings = new AVVideoSettings(CVPixelFormatType.CV32BGRA);

            queue = new DispatchQueue("VideoCameraQueue");
            output.SetSampleBufferDelegateAndQueue(new VideoCameraDelegate {
                Camera = this
            }, queue);

            session.AddOutput(output);
        }
        void SetupAVCapture(NSString sessionPreset)
        {
            if ((videoTextureCache = CVOpenGLESTextureCache.FromEAGLContext(context)) == null)
            {
                Console.WriteLine("Could not create the CoreVideo TextureCache");
                return;
            }
            session = new AVCaptureSession();
            session.BeginConfiguration();

            // Preset size
            session.SessionPreset = sessionPreset;

            // Input device
            var videoDevice = AVCaptureDevice.DefaultDeviceWithMediaType(AVMediaType.Video);

            if (videoDevice == null)
            {
                Console.WriteLine("No video device");
                return;
            }
            NSError err;
            var     input = new AVCaptureDeviceInput(videoDevice, out err);

            if (err != null)
            {
                Console.WriteLine("Error creating video capture device");
                return;
            }
            session.AddInput(input);

            // Create the output device
            var dataOutput = new AVCaptureVideoDataOutput()
            {
                AlwaysDiscardsLateVideoFrames = true,

                // YUV 420, use "BiPlanar" to split the Y and UV planes in two separate blocks of
                // memory, then we can index 0 to get the Y and 1 for the UV planes in the frame decoding
                VideoSettings = new AVVideoSettings(CVPixelFormatType.CV420YpCbCr8BiPlanarFullRange)
            };

            dataOutputDelegate = new DataOutputDelegate(this);

            //
            // This dispatches the video frames into the main thread, because the OpenGL
            // code is accessing the data synchronously.
            //
            dataOutput.SetSampleBufferDelegateAndQueue(dataOutputDelegate, DispatchQueue.MainQueue);
            session.AddOutput(dataOutput);
            session.CommitConfiguration();
            session.StartRunning();
        }
Beispiel #6
0
        bool SetupCaptureSession()
        {
            // configure the capture session for low resolution, change this if your code
            // can cope with more data or volume
            session = new AVCaptureSession()
            {
                SessionPreset = AVCaptureSession.PresetMedium
            };

            // create a device input and attach it to the session
            var captureDevice = AVCaptureDevice.DefaultDeviceWithMediaType(AVMediaType.Video);

            if (captureDevice == null)
            {
                // No input device
                return(false);
            }
            var input = AVCaptureDeviceInput.FromDevice(captureDevice);

            if (input == null)
            {
                // No input device
                return(false);
            }
            session.AddInput(input);

            // create a VideoDataOutput and add it to the sesion
            var output = new AVCaptureVideoDataOutput()
            {
                VideoSettings = new AVVideoSettings(CVPixelFormatType.CV32BGRA)
            };

            // configure the output
            queue     = new DispatchQueue("myQueue");
            qrScanner = new QrScanner(this);
            output.SetSampleBufferDelegateAndQueue(qrScanner, queue);
            session.AddOutput(output);

            previewLayer              = new AVCaptureVideoPreviewLayer(session);
            previewLayer.Orientation  = AVCaptureVideoOrientation.Portrait;
            previewLayer.VideoGravity = "AVLayerVideoGravityResizeAspectFill";

            session.StartRunning();
            return(true);
        }
Beispiel #7
0
        bool SetupCaptureSession()
        {
            // configure the capture session for low resolution, change this if your code
            // can cope with more data or volume
            session = new AVCaptureSession()
            {
                SessionPreset = AVCaptureSession.PresetMedium
            };

            // create a device input and attach it to the session
            var captureDevice = AVCaptureDevice.DefaultDeviceWithMediaType(AVMediaType.Video);
            var input         = AVCaptureDeviceInput.FromDevice(captureDevice);

            if (input == null)
            {
                Console.WriteLine("No input device");
                return(false);
            }
            session.AddInput(input);

            // create a VideoDataOutput and add it to the sesion
            var output = new AVCaptureVideoDataOutput()
            {
                VideoSettings = new AVVideoSettings(CVPixelFormatType.CV32BGRA),

                // If you want to cap the frame rate at a given speed, in this sample: 15 frames per second
                MinFrameDuration = new CMTime(1, 15)
            };

            // configure the output
            queue          = new MonoTouch.CoreFoundation.DispatchQueue("myQueue");
            outputRecorder = new OutputRecorder();
            output.SetSampleBufferDelegateAndQueue(outputRecorder, queue);
            session.AddOutput(output);

            session.StartRunning();
            return(true);
        }
Beispiel #8
0
        bool SetupCaptureSession()
        {
            Console.WriteLine("SetupCaptureSession");
            // Overview: RosyWriter uses separate GCD queues for audio and video capture.  If a single GCD queue
            // is used to deliver both audio and video buffers, and our video processing consistently takes
            // too long, the delivery queue can back up, resulting in audio being dropped.
            //
            // When recording, RosyWriter creates a third GCD queue for calls to AVAssetWriter.  This ensures
            // that AVAssetWriter is not called to start or finish writing from multiple threads simultaneously.
            //
            // RosyWriter uses AVCaptureSession's default preset, AVCaptureSessionPresetHigh.

            // Create Capture session
            captureSession = new AVCaptureSession();
            captureSession.BeginConfiguration();

            // Create audio connection
            NSError error;
            var     audioDevice = AVCaptureDevice.DefaultDeviceWithMediaType(AVMediaType.Audio);          //AudioDevice ();

            if (audioDevice == null)
            {
                return(false);                // e.g. simulator
            }
            AVCaptureDeviceInput audioIn = new AVCaptureDeviceInput(audioDevice, out error);

            if (captureSession.CanAddInput(audioIn))
            {
                captureSession.AddInput(audioIn);
            }

            AVCaptureAudioDataOutput audioOut = new AVCaptureAudioDataOutput();

            // Add the Delegate to capture each sample that comes through
            audioOut.SetSampleBufferDelegatequeue(audioDataOutputDelegate, movieWritingQueue);

            if (captureSession.CanAddOutput(audioOut))
            {
                captureSession.AddOutput(audioOut);
            }

            // Create Video Session
            var videoDevice = AVCaptureDevice.DefaultDeviceWithMediaType(AVMediaType.Video);              //VideoDeviceWithPosition (AVCaptureDevicePosition.Back);
            AVCaptureDeviceInput videoIn = new AVCaptureDeviceInput(videoDevice, out error);

            if (captureSession.CanAddInput(videoIn))
            {
                captureSession.AddInput(videoIn);
            }

            // RosyWriter prefers to discard late video frames early in the capture pipeline, since its
            // processing can take longer than real-time on some platforms (such as iPhone 3GS).
            // Clients whose image processing is faster than real-time should consider setting AVCaptureVideoDataOutput's
            // alwaysDiscardsLateVideoFrames property to NO.
            AVCaptureVideoDataOutput videoOut = new AVCaptureVideoDataOutput()
            {
                AlwaysDiscardsLateVideoFrames = true,
                VideoSettings = new AVVideoSettings(CVPixelFormatType.CV32BGRA)
            };

            // Create a DispatchQueue for the Video Processing
            DispatchQueue videoCaptureQueue = new DispatchQueue("Video Capture Queue");

            videoOut.SetSampleBufferDelegateAndQueue(videoDataOutputDelegate, videoCaptureQueue);

            if (captureSession.CanAddOutput(videoOut))
            {
                captureSession.AddOutput(videoOut);
            }

            // Set the Video connection from the Video Output object
            videoConnection  = videoOut.ConnectionFromMediaType(AVMediaType.Video);
            videoOrientation = videoConnection.VideoOrientation;

            captureSession.CommitConfiguration();

            return(true);
        }
Beispiel #9
0
		void CreateOutput ()
		{
			output = new AVCaptureVideoDataOutput ();
			output.VideoSettings = new AVVideoSettings (CVPixelFormatType.CV32BGRA);

			queue = new DispatchQueue ("VideoCameraQueue");
			output.SetSampleBufferDelegateAndQueue (new VideoCameraDelegate { Camera = this }, queue);

			session.AddOutput (output);
		}
        bool SetupCaptureSession()
        {
            session = new AVCaptureSession () {
                SessionPreset = AVCaptureSession.PresetMedium
            };

            AVCaptureDevice[] capDevices = AVCaptureDevice.DevicesWithMediaType(AVMediaType.Video);

            AVCaptureDeviceInput input = null;
            if (capDevices.Length != 0) input = AVCaptureDeviceInput.FromDevice (capDevices[0]);
            if (input == null){
                new UIAlertView("Error", "Camera not available", null, "OK", null).Show();
                Console.WriteLine ("Camera not available");
                return false;
            }
            session.AddInput (input);

            var output = new AVCaptureVideoDataOutput () {
                VideoSettings = new AVVideoSettings (CVPixelFormatType.CV32BGRA),
                MinFrameDuration = new CMTime (1, 30)  //second parameter is frames per second
            };

            queue = new MonoTouch.CoreFoundation.DispatchQueue ("myQueue");
            outputRecorder = new OutputRecorder ();
            output.SetSampleBufferDelegateAndQueue (outputRecorder, queue);
            session.AddOutput (output);

            session.StartRunning ();
            return true;
        }
		void SetupAVCapture (NSString sessionPreset)
		{
			if ((videoTextureCache = CVOpenGLESTextureCache.FromEAGLContext (context)) == null){
				Console.WriteLine ("Could not create the CoreVideo TextureCache");
				return;
			}
			session = new AVCaptureSession ();
			session.BeginConfiguration ();
			
			// Preset size
			session.SessionPreset = sessionPreset;
			
			// Input device
			var videoDevice = AVCaptureDevice.DefaultDeviceWithMediaType (AVMediaType.Video);
			if (videoDevice == null){
				Console.WriteLine ("No video device");
				return;
			}
			NSError err;
			var input = new AVCaptureDeviceInput (videoDevice, out err);
			if (err != null){
				Console.WriteLine ("Error creating video capture device");
				return;
			}
			session.AddInput (input);
			
			// Create the output device
			var dataOutput = new AVCaptureVideoDataOutput () {
				AlwaysDiscardsLateVideoFrames = true,
				
				// YUV 420, use "BiPlanar" to split the Y and UV planes in two separate blocks of 
				// memory, then we can index 0 to get the Y and 1 for the UV planes in the frame decoding
				VideoSettings = new AVVideoSettings (CVPixelFormatType.CV420YpCbCr8BiPlanarFullRange)
			};
					
			dataOutputDelegate = new DataOutputDelegate (this);

			// 
			// This dispatches the video frames into the main thread, because the OpenGL
			// code is accessing the data synchronously.
			//
			dataOutput.SetSampleBufferDelegateAndQueue (dataOutputDelegate, DispatchQueue.MainQueue);
			session.AddOutput (dataOutput);
			session.CommitConfiguration ();
			session.StartRunning ();
		}
	    bool SetupCaptureSession ()
		{
			Console.WriteLine ("SetupCaptureSession");
			// Overview: RosyWriter uses separate GCD queues for audio and video capture.  If a single GCD queue
			// is used to deliver both audio and video buffers, and our video processing consistently takes
			// too long, the delivery queue can back up, resulting in audio being dropped.
			// 
			// When recording, RosyWriter creates a third GCD queue for calls to AVAssetWriter.  This ensures
			// that AVAssetWriter is not called to start or finish writing from multiple threads simultaneously.
			//				
			// RosyWriter uses AVCaptureSession's default preset, AVCaptureSessionPresetHigh.
			
			// Create Capture session
			captureSession = new AVCaptureSession ();
			captureSession.BeginConfiguration ();
			
			// Create audio connection
			NSError error;
			var audioDevice = AVCaptureDevice.DefaultDeviceWithMediaType (AVMediaType.Audio); //AudioDevice ();
			if (audioDevice == null)
				return false; // e.g. simulator

			AVCaptureDeviceInput audioIn = new AVCaptureDeviceInput (audioDevice, out error);
			if (captureSession.CanAddInput (audioIn))
				captureSession.AddInput (audioIn);
			
			AVCaptureAudioDataOutput audioOut = new AVCaptureAudioDataOutput ();

			// Add the Delegate to capture each sample that comes through
			audioOut.SetSampleBufferDelegatequeue (audioDataOutputDelegate, movieWritingQueue);
			
			if (captureSession.CanAddOutput (audioOut))
				captureSession.AddOutput (audioOut);
			
			// Create Video Session
			var videoDevice = AVCaptureDevice.DefaultDeviceWithMediaType (AVMediaType.Video); //VideoDeviceWithPosition (AVCaptureDevicePosition.Back);
			AVCaptureDeviceInput videoIn = new AVCaptureDeviceInput (videoDevice, out error);
			
			if (captureSession.CanAddInput (videoIn))
				captureSession.AddInput (videoIn);
			
			// RosyWriter prefers to discard late video frames early in the capture pipeline, since its
			// processing can take longer than real-time on some platforms (such as iPhone 3GS).
			// Clients whose image processing is faster than real-time should consider setting AVCaptureVideoDataOutput's
			// alwaysDiscardsLateVideoFrames property to NO.
			AVCaptureVideoDataOutput videoOut = new AVCaptureVideoDataOutput (){
				AlwaysDiscardsLateVideoFrames = true,
				VideoSettings = new AVVideoSettings (CVPixelFormatType.CV32BGRA)
			};
			
			// Create a DispatchQueue for the Video Processing
			DispatchQueue videoCaptureQueue = new DispatchQueue ("Video Capture Queue");
			videoOut.SetSampleBufferDelegateAndQueue (videoDataOutputDelegate, videoCaptureQueue);
			
			if (captureSession.CanAddOutput (videoOut))
				captureSession.AddOutput (videoOut);
			
			// Set the Video connection from the Video Output object
			videoConnection = videoOut.ConnectionFromMediaType (AVMediaType.Video);
			videoOrientation = videoConnection.VideoOrientation;
			
			captureSession.CommitConfiguration ();
			
			return true;
		}
		private bool addImageSamplerOutput( out string errorMessage, int minimumSampleIntervalInMilliSeconds )
		{
			errorMessage = "";
			
			// create a VideoDataOutput and add it to the capture session
			frameGrabberOutput = new AVCaptureVideoDataOutput();
			frameGrabberOutput.VideoSettings = new AVVideoSettings (CVPixelFormatType.CV32BGRA);
			
			// set up the output queue and delegate
			queue = new MonoTouch.CoreFoundation.DispatchQueue ("captureQueue");
			videoFrameSampler = new VideoFrameSamplerDelegate();
			frameGrabberOutput.SetSampleBufferDelegateAndQueue (videoFrameSampler, queue);
			
			// subscribe to from capture events
			videoFrameSampler.CaptureError += new EventHandler<CaptureErrorEventArgs>( handleImageCaptureError );
			videoFrameSampler.ImageCaptured += new EventHandler<ImageCaptureEventArgs>( handleImageCaptured );
			
			// add the output to the session
			session.AddOutput (frameGrabberOutput);

			// set minimum time interval between image samples (if possible).
			try
			{
				AVCaptureConnection connection = (AVCaptureConnection)frameGrabberOutput.Connections[0];
				connection.VideoMinFrameDuration = new CMTime(minimumSampleIntervalInMilliSeconds, 1000);
			}
			catch
			{
			}
			
			return true;
		}