/******************************* MAIN FUNCTIONS *******************************/ public override void ViewDidLoad () { base.ViewDidLoad (); var captureDevice = AVCaptureDevice.DefaultDeviceWithMediaType (AVMediaType.Video); var input = AVCaptureDeviceInput.FromDevice (captureDevice); CaptureSession = new AVCaptureSession(); CaptureSession.AddInput (input as AVCaptureInput); var captureMetadataOutput = new AVCaptureMetadataOutput(); metadataDelegate = new MetadataObjectsDelegate(); metadataDelegate.outer = this; captureMetadataOutput.SetDelegate(metadataDelegate, DispatchQueue.MainQueue); CaptureSession.AddOutput(captureMetadataOutput); captureMetadataOutput.MetadataObjectTypes = AVMetadataObjectType.QRCode; VideoPreviewLayer = new AVCaptureVideoPreviewLayer (CaptureSession); VideoPreviewLayer.VideoGravity = AVLayerVideoGravity.ResizeAspectFill; VideoPreviewLayer.Frame = View.Layer.Bounds; View.Layer.AddSublayer (VideoPreviewLayer); View.BringSubviewToFront (messageLabel); QRCodeFrameView = new UIView (); QRCodeFrameView.Layer.BorderColor = UIColor.Green.CGColor; QRCodeFrameView.Layer.BorderWidth = 2; View.AddSubview (QRCodeFrameView); View.BringSubviewToFront (QRCodeFrameView); CaptureSession.StartRunning(); cancelButton.Clicked += (sender, e) => { this.DismissViewController (true, null); }; }
private AVCaptureMovieFileOutput _getMovieOutput() { var shouldReinitializeMovieOutput = movieOutput == null; if (!shouldReinitializeMovieOutput) { var connection = movieOutput.ConnectionFromMediaType(AVMediaType.Video); if (connection != null) { shouldReinitializeMovieOutput = shouldReinitializeMovieOutput || !connection.Active; } } if (shouldReinitializeMovieOutput) { movieOutput = new AVCaptureMovieFileOutput(); movieOutput.MovieFragmentInterval = CMTime.Invalid; captureSession.BeginConfiguration(); captureSession.AddOutput(movieOutput); captureSession.CommitConfiguration(); } return(movieOutput); }
#pragma warning restore CS4014 private void SetupLiveCameraStream() { captureSession = new AVCaptureSession(); var viewLayer = CameraFeedView.Layer; videoPreviewLayer = new AVCaptureVideoPreviewLayer(captureSession) { Frame = CameraFeedView.Frame, VideoGravity = AVLayerVideoGravity.ResizeAspectFill }; CameraFeedView.Layer.AddSublayer(videoPreviewLayer); AVCaptureDevice captureDevice = AVCaptureDevice.GetDefaultDevice(AVMediaType.Video); ConfigureCameraForDevice(captureDevice); captureDeviceInput = AVCaptureDeviceInput.FromDevice(captureDevice); captureSession.AddInput(captureDeviceInput); if (isMovie) { // Add audio var audioDevice = AVCaptureDevice.GetDefaultDevice(AVMediaTypes.Audio); var audioDeviceInput = AVCaptureDeviceInput.FromDevice(audioDevice, out NSError audioErr); if (audioErr != null) { Console.WriteLine("Couldn't create audio device input: " + audioErr.LocalizedDescription); } if (captureSession.CanAddInput(audioDeviceInput)) { captureSession.AddInput(audioDeviceInput); } else { Console.WriteLine("Couldn't add audio input to session"); } movieOutput = new AVCaptureMovieFileOutput(); captureSession.AddOutput(movieOutput); captureSession.SessionPreset = AVCaptureSession.Preset1280x720; var connection = movieOutput.ConnectionFromMediaType(AVMediaType.Video); if (connection != null && connection.SupportsVideoStabilization) { connection.PreferredVideoStabilizationMode = AVCaptureVideoStabilizationMode.Auto; } captureSession.CommitConfiguration(); } else { stillImageOutput = new AVCapturePhotoOutput(); stillImageOutput.IsHighResolutionCaptureEnabled = true; stillImageOutput.IsLivePhotoCaptureEnabled = false; captureSession.AddOutput(stillImageOutput); captureSession.CommitConfiguration(); } ShutterButton.Hidden = false; captureSession.StartRunning(); }
void SetupPhotoCapture() { captureSession.SessionPreset = AVCaptureSession.PresetPhoto; // Add photo output. photoOutput = new AVCapturePhotoOutput(); photoOutput.IsHighResolutionCaptureEnabled = true; if (captureSession.CanAddOutput(photoOutput)) { captureSession.AddOutput(photoOutput); } }
/// <summary> /// Start camera preview /// </summary> public override void StartCamera() { if (Session == null) { Session = new AVCaptureSession(); Device = Configuration.ShowBackCameraFirst ? AVCaptureDevice.Devices.FirstOrDefault(d => d.Position == AVCaptureDevicePosition.Back) : AVCaptureDevice.Devices.FirstOrDefault(d => d.Position == AVCaptureDevicePosition.Front); if (Device == null) { NoCameraAvailable(); Console.WriteLine("Could not find capture device, does your device have a camera?"); return; } try { NSError error; VideoInput = new AVCaptureDeviceInput(Device, out error); Session.AddInput(VideoInput); _videoOutput = new AVCaptureMovieFileOutput { MinFreeDiskSpaceLimit = 1024 * 1024 }; if (Session.CanAddOutput(_videoOutput)) { Session.AddOutput(_videoOutput); } if (Configuration.RecordAudio) { var audioDevice = AVCaptureDevice.DefaultDeviceWithMediaType(AVMediaType.Audio); _audioInput = new AVCaptureDeviceInput(audioDevice, out error); if (Session.CanAddInput(_audioInput)) { Session.AddInput(_audioInput); } } if (Configuration.DetectFaces) { SetupFaceDetection(); } SetupVideoPreviewLayer(); Session.StartRunning(); } catch { /* ignore */ } FlashConfiguration(true); } base.StartCamera(); }
private void SetupLiveCameraStream() { captureSession = new AVCaptureSession(); var videoPreviewLayer = new AVCaptureVideoPreviewLayer(captureSession) { Frame = liveCameraStream.Bounds }; liveCameraStream.Layer.AddSublayer(videoPreviewLayer); var captureDevice = AVCaptureDevice.DefaultDeviceWithMediaType(AVMediaType.Video); ConfigureCameraForDevice(captureDevice); captureDeviceInput = AVCaptureDeviceInput.FromDevice(captureDevice); stillImageOutput = new AVCaptureStillImageOutput { OutputSettings = new NSDictionary() }; captureSession.AddOutput(stillImageOutput); captureSession.AddInput(captureDeviceInput); captureSession.StartRunning(); }
public void SetupLiveCameraStream() { captureSession = new AVCaptureSession(); //var viewLayer = liveCameraStream.Layer; var videoPreviewLayer = new AVCaptureVideoPreviewLayer(captureSession) { Frame = liveCameraStream.Bounds }; liveCameraStream.Layer.AddSublayer(videoPreviewLayer); var captureDevice = AVCaptureDevice.GetDefaultDevice(AVMediaTypes.Video); // HACK: Dunno why this is returning null???? if (captureDevice is null) { return; } ConfigureCameraForDevice(captureDevice); captureDeviceInput = AVCaptureDeviceInput.FromDevice(captureDevice); //var dictionary = new NSMutableDictionary //{ // [AVVideo.CodecKey] = new NSNumber((int)AVVideoCodec.JPEG) //}; stillImageOutput = new AVCaptureStillImageOutput() { OutputSettings = new NSDictionary() }; captureSession.AddOutput(stillImageOutput); captureSession.AddInput(captureDeviceInput); captureSession.StartRunning(); }
public void SetupLiveCameraStream() { CaptureSession = new AVCaptureSession(); var viewLayer = this.Layer; videoPreviewLayer = new AVCaptureVideoPreviewLayer(CaptureSession) { Frame = this.Frame }; this.Layer.AddSublayer(videoPreviewLayer); var captureDevice = AVCaptureDevice.DefaultDeviceWithMediaType(AVMediaType.Video); ConfigureCameraForDevice(captureDevice); captureDeviceInput = AVCaptureDeviceInput.FromDevice(captureDevice); CaptureSession.AddInput(captureDeviceInput); var dictionary = new NSMutableDictionary(); dictionary[AVVideo.CodecKey] = new NSNumber((int)AVVideoCodec.JPEG); stillImageOutput = new AVCaptureStillImageOutput() { OutputSettings = new NSDictionary() }; CaptureSession.AddOutput(stillImageOutput); CaptureSession.StartRunning(); }
private void SetupLiveCameraStream() { captureSession = new AVCaptureSession(); videoPreviewLayer = new AVCaptureVideoPreviewLayer(captureSession) { Frame = liveCameraStream.Bounds, Orientation = GetCameraForOrientation() }; liveCameraStream.Layer.AddSublayer(videoPreviewLayer); var captureDevice = AVCaptureDevice.DefaultDeviceWithMediaType(AVMediaType.Video); ConfigureCameraForDevice(captureDevice); captureDeviceInput = AVCaptureDeviceInput.FromDevice(captureDevice); var dictionary = new NSMutableDictionary(); dictionary[AVVideo.CodecKey] = new NSNumber((int)AVVideoCodec.JPEG); stillImageOutput = new AVCaptureStillImageOutput() { OutputSettings = new NSDictionary(), }; captureSession.AddOutput(stillImageOutput); captureSession.AddInput(captureDeviceInput); stillImageOutput.ConnectionFromMediaType(AVMediaType.Video).VideoOrientation = GetCameraForOrientation(); captureSession.StartRunning(); }
/// <summary> /// 初期化処理 /// </summary> private void Initialize() { previewLayer = new AVCaptureVideoPreviewLayer(CaptureSession) { Frame = Bounds, VideoGravity = AVLayerVideoGravity.ResizeAspectFill, }; AVCaptureDevice[] videoDevices = AVCaptureDevice.DevicesWithMediaType(AVMediaType.Video); AVCaptureDevicePosition cameraPosition = (cameraOptions == CameraOptions.Front) ? AVCaptureDevicePosition.Front : AVCaptureDevicePosition.Back; device = videoDevices.FirstOrDefault(d => d.Position == cameraPosition); //キャプチャデバイスの生成に失敗している場合エラー if (device == null) { Console.WriteLine("CameraStartup Error"); return; } var input = new AVCaptureDeviceInput(device, out NSError error); CaptureSession.AddInput(input); // カメラの映像表示設定 Layer.AddSublayer(previewLayer); // 映像出力設定後、少しだけ待機 Thread.Sleep(300); // 出力開始 CaptureSession.StartRunning(); IsPreviewing = true; // キャプチャー出力設定 PhotoOutput = new AVCapturePhotoOutput(); CaptureSession.AddOutput(PhotoOutput); }
void Initialize() { CaptureSession = new AVCaptureSession(); previewLayer = new AVCaptureVideoPreviewLayer(CaptureSession) { Frame = Bounds, VideoGravity = AVLayerVideoGravity.ResizeAspectFill }; var videoDevices = AVCaptureDevice.DevicesWithMediaType(AVMediaType.Video); var cameraPosition = (cameraOptions == CameraOptions.Front) ? AVCaptureDevicePosition.Front : AVCaptureDevicePosition.Back; var device = videoDevices.FirstOrDefault(d => d.Position == cameraPosition); if (device == null) { return; } NSError error; try { var input = new AVCaptureDeviceInput(device, out error); CaptureSession.AddInput(input); CaptureSession.AddOutput(new AVCaptureMovieFileOutput()); Layer.AddSublayer(previewLayer); CaptureSession.StartRunning(); IsPreviewing = true; } catch (Exception exception) { } }
private void SetupLiveCameraStream() { captureSession = new AVCaptureSession(); captureSession.SessionPreset = AVCaptureSession.PresetMedium; videoPreviewLayer = new AVCaptureVideoPreviewLayer(captureSession) { Frame = new CGRect(0f, 0f, View.Bounds.Width, View.Bounds.Height), Orientation = GetCameraForOrientation() }; liveCameraStream.Layer.AddSublayer(videoPreviewLayer); var captureDevice = AVCaptureDevice.DefaultDeviceWithMediaType(AVMediaType.Video); ConfigureCameraForDevice(captureDevice); captureDeviceInput = AVCaptureDeviceInput.FromDevice(captureDevice); aVCaptureMovieFileOutput = new AVCaptureMovieFileOutput(); var audioDevice = AVCaptureDevice.GetDefaultDevice(AVMediaType.Audio); var audioDeviceInput = AVCaptureDeviceInput.FromDevice(audioDevice); captureSession.AddOutput(aVCaptureMovieFileOutput); captureSession.AddInput(captureDeviceInput); captureSession.AddInput(audioDeviceInput); aVCaptureMovieFileOutput.ConnectionFromMediaType(AVMediaType.Video).VideoOrientation = GetCameraForOrientation(); captureSession.StartRunning(); }
public void RecordVideoToPath(UIViewController ViewController, string VideoPath) { // setup capture device AVCaptureDevice videoRecordingDevice = AVCaptureDevice.DefaultDeviceWithMediaType(AVMediaType.Video); NSError error; AVCaptureDeviceInput videoInput = new AVCaptureDeviceInput(videoRecordingDevice, out error); // create and assign a capture session AVCaptureSession captureSession = new AVCaptureSession(); captureSession.SessionPreset = AVCaptureSession.Preset1280x720; captureSession.AddInput(videoInput); // Create capture device output AVCaptureVideoDataOutput videoOutput = new AVCaptureVideoDataOutput(); captureSession.AddOutput(videoOutput); videoOutput.VideoSettings.PixelFormat = CVPixelFormatType.CV32BGRA; videoOutput.MinFrameDuration = new CMTime(1, 30); videoOutput.SetSampleBufferDelegatequeue(captureVideoDelegate, System.IntPtr.Zero); // create a delegate class for handling capture captureVideoDelegate = new CaptureVideoDelegate(ViewController); // Start capture session captureSession.StartRunning(); }
private void PrepareCaptureSession() { try { session = new AVCaptureSession { SessionPreset = config.FrameQualityPreset, }; captureDevice = config.Device; deviceInput = AVCaptureDeviceInput.FromDevice(captureDevice); deviceOutput = new AVCaptureVideoDataOutput(); deviceOutput.WeakVideoSettings = new CVPixelBufferAttributes { PixelFormatType = config.PixelFormat }.Dictionary; deviceOutput.SetSampleBufferDelegateQueue(this, queue); session.AddInput(deviceInput); session.AddOutput(deviceOutput); } catch (Exception ex) { System.Console.WriteLine(ex.Message); } }
void SetupLiveCameraStream() { _captureSession = new AVCaptureSession(); var viewLayer = _liveCameraStream.Layer; _videoPreviewLayer = new AVCaptureVideoPreviewLayer(_captureSession) { Frame = _liveCameraStream.Bounds }; _liveCameraStream.AddObserver("bounds", NSKeyValueObservingOptions.New, ObservedBoundsChange); _liveCameraStream.Layer.AddSublayer(_videoPreviewLayer); var captureDevice = AVCaptureDevice.DefaultDeviceWithMediaType(AVMediaType.Video); ConfigureCameraForDevice(captureDevice); _captureDeviceInput = AVCaptureDeviceInput.FromDevice(captureDevice); var dictionary = new NSMutableDictionary(); dictionary[AVVideo.CodecKey] = new NSNumber((int)AVVideoCodec.JPEG); _stillImageOutput = new AVCaptureStillImageOutput { OutputSettings = new NSDictionary() }; _captureSession.AddOutput(_stillImageOutput); _captureSession.AddInput(_captureDeviceInput); _captureSession.StartRunning(); }
bool SetupCaptureSession () { // configure the capture session for low resolution, change this if your code // can cope with more data or volume session = new AVCaptureSession () { SessionPreset = AVCaptureSession.PresetMedium }; // create a device input and attach it to the session var captureDevice = AVCaptureDevice.DefaultDeviceWithMediaType (AVMediaType.Video); var input = AVCaptureDeviceInput.FromDevice (captureDevice); if (input == null){ Console.WriteLine ("No input device"); return false; } session.AddInput (input); // create a VideoDataOutput and add it to the sesion var output = new AVCaptureVideoDataOutput () { VideoSettings = new AVVideoSettings (CVPixelFormatType.CV32BGRA), // If you want to cap the frame rate at a given speed, in this sample: 15 frames per second MinFrameDuration = new CMTime (1, 15) }; // configure the output queue = new MonoTouch.CoreFoundation.DispatchQueue ("myQueue"); outputRecorder = new OutputRecorder (); output.SetSampleBufferDelegateAndQueue (outputRecorder, queue); session.AddOutput (output); session.StartRunning (); return true; }
bool SetupCaptureSession () { // configure the capture session for low resolution, change this if your code // can cope with more data or volume session = new AVCaptureSession () { SessionPreset = AVCaptureSession.PresetMedium }; // create a device input and attach it to the session var captureDevice = AVCaptureDevice.DefaultDeviceWithMediaType (AVMediaType.Video); var input = AVCaptureDeviceInput.FromDevice (captureDevice); if (input == null){ // No input device return false; } session.AddInput (input); // create a VideoDataOutput and add it to the sesion var output = new AVCaptureVideoDataOutput () { VideoSettings = new AVVideoSettings (CVPixelFormatType.CV32BGRA) }; // configure the output queue = new DispatchQueue ("myQueue"); qrScanner = new QrScanner (this); output.SetSampleBufferDelegateAndQueue (qrScanner, queue); session.AddOutput (output); previewLayer = new AVCaptureVideoPreviewLayer (session); previewLayer.Orientation = AVCaptureVideoOrientation.Portrait; previewLayer.VideoGravity = "AVLayerVideoGravityResizeAspectFill"; session.StartRunning (); return true; }
public void SetupLiveCameraStream() { captureSession = new AVCaptureSession(); videoPreviewLayer = new AVCaptureVideoPreviewLayer(captureSession) { Frame = liveCameraStream.Bounds }; liveCameraStream.Layer.AddSublayer(videoPreviewLayer); var captureDevice = AVCaptureDevice.GetDefaultDevice(AVMediaType.Video); device = captureDevice; maxExposure = device.ActiveFormat.MaxISO; minExposure = device.ActiveFormat.MinISO; maxDuration = device.ActiveFormat.MaxExposureDuration.Seconds; minDuration = device.ActiveFormat.MinExposureDuration.Seconds; ConfigureCameraForDevice(captureDevice); captureDeviceInput = AVCaptureDeviceInput.FromDevice(captureDevice); var dictionary = new NSMutableDictionary(); dictionary[AVVideo.CodecKey] = new NSNumber((int)AVVideoCodec.JPEG); stillImageOutput = new AVCaptureStillImageOutput() { OutputSettings = new NSDictionary() }; captureSession.AddOutput(stillImageOutput); captureSession.AddInput(captureDeviceInput); captureSession.StartRunning(); }
void ShowCameraPreview() { var captureSession = new AVCaptureSession(); var viewLayer = liveCameraStream.Layer; videoPreviewLayer = new AVCaptureVideoPreviewLayer(captureSession) { Frame = this.View.Frame }; liveCameraStream.Layer.AddSublayer(videoPreviewLayer); var captureDevice = AVCaptureDevice.DefaultDeviceWithMediaType(AVMediaType.Video); ConfigureCameraForDevice(captureDevice); captureDeviceInput = AVCaptureDeviceInput.FromDevice(captureDevice); captureSession.AddInput(captureDeviceInput); var dictionary = new NSMutableDictionary(); dictionary[AVVideo.CodecKey] = new NSNumber((int)AVVideoCodec.JPEG); stillImageOutput = new AVCaptureStillImageOutput() { OutputSettings = new NSDictionary() }; captureSession.AddOutput(stillImageOutput); captureSession.StartRunning(); captureButton.Hidden = false; cancelButton.Hidden = false; }
void Initialize() { CaptureSession = new AVCaptureSession(); previewLayer = new AVCaptureVideoPreviewLayer(CaptureSession) { Frame = Bounds, VideoGravity = AVLayerVideoGravity.ResizeAspectFill }; var videoDevices = AVCaptureDevice.DevicesWithMediaType(AVMediaType.Video); var cameraPosition = (cameraOptions == CameraOptions.Front) ? AVCaptureDevicePosition.Front : AVCaptureDevicePosition.Back; var device = videoDevices.FirstOrDefault(d => d.Position == cameraPosition); if (device == null) { return; } NSError error; var input = new AVCaptureDeviceInput(device, out error); CaptureSession.AddInput(input); Layer.AddSublayer(previewLayer); CaptureSession.StartRunning(); outputSession = new AVCaptureStillImageOutput(); var dict = new NSMutableDictionary(); dict[AVVideo.CodecKey] = new NSNumber((int)AVVideoCodec.JPEG); CaptureSession.AddOutput(outputSession); }
private void TryStart() { if (contentLayer != null) { session = new AVCaptureSession(); var camera = AVCaptureDevice.DefaultDeviceWithMediaType(AVMediaType.Video); var input = AVCaptureDeviceInput.FromDevice(camera); session.AddInput(input); // create a VideoDataOutput and add it to the sesion var settings = new CVPixelBufferAttributes { PixelFormatType = CVPixelFormatType.CV32BGRA }; using (var output = new AVCaptureVideoDataOutput { WeakVideoSettings = settings.Dictionary }) { queue = new DispatchQueue("s4mQueue"); outputRecorder = new OutputRecorder(); output.SetSampleBufferDelegate(outputRecorder, queue); session.AddOutput(output); } this.contentLayer.Session = session; session.StartRunning(); } }
public async void SetupLiveCameraStream() { captureSession = new AVCaptureSession(); var viewLayer = liveCameraStream.Layer; var videoPreviewLayer = new AVCaptureVideoPreviewLayer(captureSession) { Frame = liveCameraStream.Bounds }; liveCameraStream.Layer.AddSublayer(videoPreviewLayer); var captureDevice = AVCaptureDevice.DefaultDeviceWithMediaType(AVMediaType.Video); ConfigureCameraForDevice(captureDevice); captureDeviceInput = AVCaptureDeviceInput.FromDevice(captureDevice); var dictionary = new NSMutableDictionary(); dictionary[AVVideo.CodecKey] = new NSNumber((int)AVVideoCodec.JPEG); stillImageOutput = new AVCaptureStillImageOutput() { OutputSettings = new NSDictionary() }; captureSession.AddOutput(stillImageOutput); captureSession.AddInput(captureDeviceInput); captureSession.StartRunning(); await SkinSelfie.Pages.CameraPage.ShowTip(); }
private void SetupCamera() { try { if (_captureSession == null) { _captureSession = new AVCaptureSession { SessionPreset = AVCaptureSession.PresetPhoto }; } SetPreviewSizing(); SetPreviewOrientation(); if (_photoOutput == null) { _device = AVCaptureDevice.GetDefaultDevice(AVMediaTypes.Video); TurnOffFlashAndSetContinuousAutoMode(_device); _photoOutput = new AVCapturePhotoOutput { IsHighResolutionCaptureEnabled = true }; _captureSession.AddOutput(_photoOutput); _captureSession.AddInput(AVCaptureDeviceInput.FromDevice(_device)); } } catch (Exception e) { _cameraModule.ErrorMessage = e.ToString(); } }
bool SetupCaptureSession() { // configure the capture session for low resolution, change this if your code // can cope with more data or volume session = new AVCaptureSession() { SessionPreset = AVCaptureSession.PresetMedium }; // create a device input and attach it to the session var captureDevice = AVCaptureDevice.DefaultDeviceWithMediaType(AVMediaType.Video); if (captureDevice == null) { Console.WriteLine("No captureDevice - this won't work on the simulator, try a physical device"); return(false); } //Configure for 15 FPS. Note use of LockForConigfuration()/UnlockForConfiguration() NSError error = null; captureDevice.LockForConfiguration(out error); if (error != null) { Console.WriteLine(error); captureDevice.UnlockForConfiguration(); return(false); } if (UIDevice.CurrentDevice.CheckSystemVersion(7, 0)) { captureDevice.ActiveVideoMinFrameDuration = new CMTime(1, 15); } captureDevice.UnlockForConfiguration(); var input = AVCaptureDeviceInput.FromDevice(captureDevice); if (input == null) { Console.WriteLine("No input - this won't work on the simulator, try a physical device"); return(false); } session.AddInput(input); // create a VideoDataOutput and add it to the sesion var output = new AVCaptureVideoDataOutput() { WeakVideoSettings = new CVPixelBufferAttributes() { PixelFormatType = CVPixelFormatType.CV32BGRA }.Dictionary, }; // configure the output queue = new CoreFoundation.DispatchQueue("myQueue"); outputRecorder = new OutputRecorder(); output.SetSampleBufferDelegate(outputRecorder, queue); session.AddOutput(output); session.StartRunning(); return(true); }
private void InitDevice() { captureDevice = AVCaptureDevice.GetDefaultDevice(AVMediaType.Video); AVCaptureDeviceInput input; try { input = new AVCaptureDeviceInput(captureDevice, out NSError err); if (err == null) { captureSession = new AVCaptureSession(); captureSession.AddInput(input); previewLayer = new AVCaptureVideoPreviewLayer(captureSession) { VideoGravity = AVLayerVideoGravity.ResizeAspectFill, Frame = previewView.Layer.Bounds }; previewView.Layer.AddSublayer(previewLayer); captureOutput = new AVCapturePhotoOutput { IsHighResolutionCaptureEnabled = true }; captureSession.AddOutput(captureOutput); captureSession.StartRunning(); } } catch (Exception ex) { allowAndBack(); } }
private void InitSession() { try { //init capture session _AVSession = new AVCaptureSession(); //check permissions var authorizationStatus = AVCaptureDevice.GetAuthorizationStatus(AVMediaType.Video); if (authorizationStatus != AVAuthorizationStatus.Authorized) { return; } //check capture camera var cameras = AVCaptureDevice.DevicesWithMediaType(AVMediaType.Video); var camera = cameras.FirstOrDefault(d => d.Position == AVCaptureDevicePosition.Back); if (camera == null) { return; } //add input to capture session _AVDeviceImput = new AVCaptureDeviceInput(camera, out NSError _); if (_AVSession.CanAddInput(_AVDeviceImput)) { _AVSession.AddInput(_AVDeviceImput); } else { return; } //add output to camera session _MetadataObjectsQueue = new DispatchQueue("metadata objects queue"); _AVMetadataOutput = new AVCaptureMetadataOutput(); if (_AVSession.CanAddOutput(_AVMetadataOutput)) { _AVSession.AddOutput(_AVMetadataOutput); } else { return; } _AVMetadataOutput.SetDelegate(this, _MetadataObjectsQueue); //init the video preview layer and add it to the current view _AVVideoPeviewLayer = new AVCaptureVideoPreviewLayer(_AVSession); _AVVideoPeviewLayer.VideoGravity = AVLayerVideoGravity.ResizeAspectFill; _AVVideoPeviewLayer.Frame = Bounds; this.Layer.AddSublayer(_AVVideoPeviewLayer); //start capture session StartSession(true); } catch (Exception ex) { Console.WriteLine("IOS_SCAN | init error", ex); } }
public void Start() { captureSession = new AVCaptureSession(); previewLayer = new AVCaptureVideoPreviewLayer(captureSession) { VideoGravity = AVLayerVideoGravity.ResizeAspectFill, }; try { var captureDevice = AVCaptureDevice.GetDefaultDevice(AVMediaTypes.Video); var input = AVCaptureDeviceInput.FromDevice(captureDevice); var output = new AVCaptureMetadataOutput(); var queue = new DispatchQueue("qrQueue"); captureSession.AddInput(input); captureSession.AddOutput(output); output.SetDelegate(this, queue); output.MetadataObjectTypes = AVMetadataObjectType.QRCode; Layer.AddSublayer(previewLayer); captureSession.StartRunning(); } catch (Exception e) { Console.WriteLine(e); } }
private void InitialiseCaptureSession() { try { _captureSession.SessionPreset = AVCaptureSession.Preset1920x1080; var captureDevice = AVCaptureDevice.GetDefaultDevice(AVMediaTypes.Video) as AVCaptureDevice; NSError error; var input = new AVCaptureDeviceInput(captureDevice, out error); if (error?.Code != 0) { Console.WriteLine($"Error {error.ToString()}"); } if (_captureSession.CanAddInput(input)) { _captureSession.AddInput(input); } var videoOutput = new AVCaptureVideoDataOutput(); videoOutput.SetSampleBufferDelegateQueue(this, new DispatchQueue("sample buffer delegate")); if (_captureSession.CanAddOutput(videoOutput)) { _captureSession.AddOutput(videoOutput); } _captureSession.StartRunning(); } catch (Exception ex) { int i = 0; i++; } }
public void SetupLiveCameraStream() { captureSession = new AVCaptureSession(); var viewLayer = liveCameraStream.Layer; Console.WriteLine(viewLayer.Frame.Width); var videoPreviewLayer = new AVCaptureVideoPreviewLayer(captureSession) { Frame = liveCameraStream.Bounds }; liveCameraStream.Layer.AddSublayer(videoPreviewLayer); Console.WriteLine(liveCameraStream.Layer.Frame.Width); var captureDevice = AVCaptureDevice.DefaultDeviceWithMediaType(AVMediaType.Video); ConfigureCameraForDevice(captureDevice); captureDeviceInput = AVCaptureDeviceInput.FromDevice(captureDevice); var dictionary = new NSMutableDictionary(); dictionary[AVVideo.CodecKey] = new NSNumber((int)AVVideoCodec.JPEG); stillImageOutput = new AVCaptureStillImageOutput() { OutputSettings = new NSDictionary() }; captureSession.AddOutput(stillImageOutput); captureSession.AddInput(captureDeviceInput); captureSession.StartRunning(); ViewWillLayoutSubviews(); }
void Initialize() { CaptureSession = new AVCaptureSession(); previewLayer = new AVCaptureVideoPreviewLayer(CaptureSession); previewLayer.Frame = Bounds; previewLayer.VideoGravity = AVLayerVideoGravity.ResizeAspectFill; previewLayer.Orientation = AVCaptureVideoOrientation.Portrait; var videoDevices = AVCaptureDevice.DevicesWithMediaType(AVMediaType.Video); var cameraPosition = (CameraOption == CameraOptions.Front) ? AVCaptureDevicePosition.Front : AVCaptureDevicePosition.Back; var device = videoDevices.FirstOrDefault(d => d.Position == cameraPosition); if (device == null) { return; } ConfigureCameraForDevice(device); captureDeviceInput = AVCaptureDeviceInput.FromDevice(device); CaptureSession.AddInput(captureDeviceInput); Layer.AddSublayer(previewLayer); output = new AVCaptureStillImageOutput { OutputSettings = new NSDictionary(AVVideo.CodecKey, AVVideo.CodecJPEG) }; CaptureSession.AddOutput(output); }
public void SetupLiveCameraStream() { captureSession = new AVCaptureSession(); //.PresetPhoto for camera image feed captureSession.SessionPreset = AVCaptureSession.PresetPhoto; var viewLayer = liveCameraStream.Layer; videoPreviewLayer = new AVCaptureVideoPreviewLayer(captureSession) { Frame = this.View.Frame, }; liveCameraStream.Layer.AddSublayer(videoPreviewLayer); var captureDevice = AVCaptureDevice.GetDefaultDevice(AVMediaType.Video); ConfigureCameraForDevice(captureDevice); captureDeviceInput = AVCaptureDeviceInput.FromDevice(captureDevice); captureSession.AddInput(captureDeviceInput); var dictionary = new NSMutableDictionary { [AVVideo.CodecKey] = new NSNumber((int)AVVideoCodec.JPEG) }; stillImageOutput = new AVCaptureStillImageOutput { OutputSettings = new NSDictionary() }; captureSession.AddOutput(stillImageOutput); captureSession.StartRunning(); }
private bool InitScanner(BarcodeScanner.BarcodeFormat barcodeType) { device = AVCaptureDevice.GetDefaultDevice(AVMediaType.Video); if (device == null) { return(false); } input = AVCaptureDeviceInput.FromDevice(device); if (input.Device.IsFocusModeSupported(AVCaptureFocusMode.ContinuousAutoFocus)) { input.Device.LockForConfiguration(out NSError err); input.Device.FocusMode = AVCaptureFocusMode.ContinuousAutoFocus; input.Device.UnlockForConfiguration(); } if (input == null) { return(false); } output = new AVCaptureMetadataOutput(); output.SetDelegate(this, DispatchQueue.MainQueue); session = new AVCaptureSession(); session.AddInput(input); session.AddOutput(output); output.MetadataObjectTypes = GetBarcodeFormat(barcodeType); captureVideoPreviewLayer = AVCaptureVideoPreviewLayer.FromSession(session); captureVideoPreviewLayer.Frame = CGRect.Empty; captureVideoPreviewLayer.VideoGravity = AVLayerVideoGravity.ResizeAspectFill; captureVideoPreviewLayer.Connection.VideoOrientation = GetDeviceOrientation(); return(true); }
private bool initScanner() { device = AVCaptureDevice.DefaultDeviceWithMediaType(AVMediaType.Video); if (device == null) { this.Debug("AVCaptureDevice is null"); return(false); } input = AVCaptureDeviceInput.FromDevice(device); if (input == null) { this.Debug("AVCaptureDeviceInput is null"); return(false); } output = new AVCaptureMetadataOutput(); output.SetDelegate(this, DispatchQueue.MainQueue); session = new AVCaptureSession(); session.AddInput(input); session.AddOutput(output); output.MetadataObjectTypes = configuration.Barcodes.ConvertToIOS(); captureVideoPreviewLayer = AVCaptureVideoPreviewLayer.FromSession(session); captureVideoPreviewLayer.Frame = CGRect.Empty; captureVideoPreviewLayer.VideoGravity = AVLayerVideoGravity.ResizeAspectFill; captureVideoPreviewLayer.Connection.VideoOrientation = getDeviceOrientation(); return(true); }
public SessionSetupResult ConfigureSession(AVCaptureSession session) { var inputDeviceConfigureResult = _videoDeviceInputManager.ConfigureVideoDeviceInput(session); if (inputDeviceConfigureResult != SessionSetupResult.Success) { return(inputDeviceConfigureResult); } // Add movie file output. Console.WriteLine("capture session: configuring - adding movie file input"); var movieFileOutput = new AVCaptureMovieFileOutput(); if (session.CanAddOutput(movieFileOutput)) { session.AddOutput(movieFileOutput); _videoFileOutput = movieFileOutput; DispatchQueue.MainQueue.DispatchAsync(() => { _videoRecordingDelegate?.DidBecomeReadyForVideoRecording(this); }); } else { Console.WriteLine("capture session: could not add video output to the session"); return(SessionSetupResult.ConfigurationFailed); } _audioCaptureSession = new AudioCaptureSession(); _audioCaptureSession.ConfigureSession(session); return(SessionSetupResult.Success); }
bool SetupCaptureSession () { // configure the capture session for low resolution, change this if your code // can cope with more data or volume session = new AVCaptureSession { SessionPreset = AVCaptureSession.PresetMedium }; // create a device input and attach it to the session var captureDevice = AVCaptureDevice.DefaultDeviceWithMediaType (AVMediaType.Video); if (captureDevice == null) { Console.WriteLine ("No captureDevice - this won't work on the simulator, try a physical device"); return false; } //Configure for 15 FPS. Note use of LockForConigfuration()/UnlockForConfiguration() NSError error = null; captureDevice.LockForConfiguration (out error); if (error != null) { Console.WriteLine (error); captureDevice.UnlockForConfiguration (); return false; } if (UIDevice.CurrentDevice.CheckSystemVersion (7, 0)) captureDevice.ActiveVideoMinFrameDuration = new CMTime (1, 15); captureDevice.UnlockForConfiguration (); var input = AVCaptureDeviceInput.FromDevice (captureDevice); if (input == null) { Console.WriteLine ("No input - this won't work on the simulator, try a physical device"); return false; } session.AddInput (input); // create a VideoDataOutput and add it to the sesion var settings = new CVPixelBufferAttributes { PixelFormatType = CVPixelFormatType.CV32BGRA }; using (var output = new AVCaptureVideoDataOutput { WeakVideoSettings = settings.Dictionary }) { queue = new DispatchQueue ("myQueue"); outputRecorder = new OutputRecorder (); output.SetSampleBufferDelegate (outputRecorder, queue); session.AddOutput (output); } session.StartRunning (); return true; }
public override void ViewDidLoad () { base.ViewDidLoad (); session = new AVCaptureSession (); var camera = AVCaptureDevice.DefaultDeviceWithMediaType(AVMediaType.Video); var input = AVCaptureDeviceInput.FromDevice(camera); session.AddInput(input); output = new AVCaptureMetadataOutput(); var metadataDelegate = new MetadataOutputDelegate(); output.SetDelegate(metadataDelegate, DispatchQueue.MainQueue); session.AddOutput(output); output.MetadataObjectTypes = new NSString[] { AVMetadataObject.TypeQRCode, AVMetadataObject.TypeEAN13Code }; var previewLayer = new AVCaptureVideoPreviewLayer(session); //var view = new ContentView(UIColor.LightGray, previewLayer, metadataDelegate); previewLayer.MasksToBounds = true; previewLayer.VideoGravity = AVCaptureVideoPreviewLayer.GravityResizeAspectFill; previewLayer.Frame = UIScreen.MainScreen.Bounds; this.View.Layer.AddSublayer(previewLayer); metadataDelegate.MetadataFound += (s, e) => { session.StopRunning(); new UIAlertView("Scanned!",e.StringValue, null ,"OK",null).Show(); }; session.StartRunning(); }
private void SetupCamera() { CaptureSession = null; CaptureSession = new AVCaptureSession(); CaptureSession.SessionPreset = AVCaptureSession.PresetPhoto; currentDevice = null; inputDevice1 = null; inputDevice2 = null; foreach (AVCaptureDevice device in AVCaptureDevice.DevicesWithMediaType(AVMediaType.Video)) { if (device.Position == AVCaptureDevicePosition.Front) { inputDevice1 = device; } else if (device.Position == AVCaptureDevicePosition.Back) { inputDevice2 = device; } } NSError error; if (inputDevice1.HasFlash) { inputDevice1.LockForConfiguration(out error); inputDevice1.FlashMode = AVCaptureFlashMode.Off; FlashButton.TitleLabel.Text = "Flash Off"; } if (inputDevice2.HasFlash) { inputDevice2.LockForConfiguration(out error); inputDevice2.FlashMode = AVCaptureFlashMode.Off; FlashButton.TitleLabel.Text = "Flash Off"; } frontCamera = AVCaptureDeviceInput.FromDevice(inputDevice1, out error); rearCamera = AVCaptureDeviceInput.FromDevice(inputDevice2, out error); currentDevice = inputDevice2; if (CaptureSession.CanAddInput(rearCamera)) { CaptureSession.AddInput(rearCamera); } AVCaptureVideoPreviewLayer previewLayer = new AVCaptureVideoPreviewLayer(CaptureSession); previewLayer.VideoGravity = AVLayerVideoGravity.ResizeAspectFill; previewLayer.Frame = View.Frame; View.Layer.InsertSublayer(previewLayer, 0); StillImageOutput = new AVCaptureStillImageOutput(); StillImageOutput.OutputSettings = new NSDictionary(AVVideo.CodecKey, AVVideo.CodecJPEG); CaptureSession.AddOutput(StillImageOutput); CaptureSession.StartRunning(); }
private void StartCameraWithCompletionHandler(Action<bool, NSError> completion) { captureSession = new AVCaptureSession (); captureSession.BeginConfiguration (); captureDevice = CameraDeviceForPosition (AVCaptureDevicePosition.Back); if (captureDevice == null) { string message = "Error message back camera - not found"; string title = "Error"; ShowErrorMessage (message, title); return; } NSError error; AVCaptureDeviceInput deviceInput = AVCaptureDeviceInput.FromDevice (captureDevice, out error); if (deviceInput == null) { Console.WriteLine ("This error should be handled appropriately in your app -- obtain device input: {0}", error); string message = "Error message back camera - can't open."; string title = "Error"; ShowErrorMessage (message, title); return; } captureSession.AddInput (deviceInput); stillImageOutput = new AVCaptureStillImageOutput (); //Or instead of JPEG, we can use one of the following pixel formats: BGRA, 420f output stillImageOutput.OutputSettings = new NSDictionary (AVVideo.CodecKey, AVVideo.CodecJPEG); captureSession.AddOutput (stillImageOutput); cameraPreviewView.ConfigureCaptureSession (captureSession, stillImageOutput); captureSession.SessionPreset = AVCaptureSession.PresetPhoto; captureDeviceFormat = captureDevice.ActiveFormat; captureSession.CommitConfiguration (); captureSession.StartRunning (); maxBracketCount = stillImageOutput.MaxBracketedCaptureStillImageCount; PrepareBracketsWithCompletionHandler (completion); }
public override void ViewDidLoad () { base.ViewDidLoad (); this.View.BackgroundColor = UIColor.White; NSError error; // Setup detector options. var options = new CIDetectorOptions { Accuracy = FaceDetectorAccuracy.High, // Can give a hint here about the rects to detect. 1.4 would be for A4 sheets of paper for instance. AspectRatio = 1.41f, }; // Create a rectangle detector. Note that you can also create QR detector or a face detector. // Most of this code will also work with other detectors (like streaming to a preview layer and grabbing images). this.detector = CIDetector.CreateRectangleDetector (context: null, detectorOptions: options); // Create the session. The AVCaptureSession is the managing instance of the whole video handling. var captureSession = new AVCaptureSession () { // Defines what quality we want to use for the images we grab. Photo gives highest resolutions. SessionPreset = AVCaptureSession.PresetPhoto }; // Find a suitable AVCaptureDevice for video input. var device = AVCaptureDevice.DefaultDeviceWithMediaType(AVMediaType.Video); if (device == null) { // This will not work on the iOS Simulator - there is no camera. :-) throw new InvalidProgramException ("Failed to get AVCaptureDevice for video input!"); } // Create a device input with the device and add it to the session. var videoInput = AVCaptureDeviceInput.FromDevice (device, out error); if (videoInput == null) { throw new InvalidProgramException ("Failed to get AVCaptureDeviceInput from AVCaptureDevice!"); } // Let session read from the input, this is our source. captureSession.AddInput (videoInput); // Create output for the video stream. This is the destination. var videoOutput = new AVCaptureVideoDataOutput () { AlwaysDiscardsLateVideoFrames = true }; // Define the video format we want to use. Note that Xamarin exposes the CompressedVideoSetting and UncompressedVideoSetting // properties on AVCaptureVideoDataOutput un Unified API, but I could not get these to work. The VideoSettings property is deprecated, // so I use the WeakVideoSettings instead which takes an NSDictionary as input. this.videoSettingsDict = new NSMutableDictionary (); this.videoSettingsDict.Add (CVPixelBuffer.PixelFormatTypeKey, NSNumber.FromUInt32((uint)CVPixelFormatType.CV32BGRA)); videoOutput.WeakVideoSettings = this.videoSettingsDict; // Create a delegate to report back to us when an image has been captured. // We want to grab the camera stream and feed it through a AVCaptureVideoDataOutputSampleBufferDelegate // which allows us to get notified if a new image is availeble. An implementation of that delegate is VideoFrameSampleDelegate in this project. this.sampleBufferDelegate = new VideoFrameSamplerDelegate (); // Processing happens via Grand Central Dispatch (GCD), so we need to provide a queue. // This is pretty much like a system managed thread (see: http://zeroheroblog.com/ios/concurrency-in-ios-grand-central-dispatch-gcd-dispatch-queues). this.sessionQueue = new DispatchQueue ("AVSessionQueue"); // Assign the queue and the delegate to the output. Now all output will go through the delegate. videoOutput.SetSampleBufferDelegate(this.sampleBufferDelegate, this.sessionQueue); // Add output to session. captureSession.AddOutput(videoOutput); // We also want to visualize the input stream. The raw stream can be fed into an AVCaptureVideoPreviewLayer, which is a subclass of CALayer. // A CALayer can be added to a UIView. We add that layer to the controller's main view. var layer = this.View.Layer; this.videoLayer = AVCaptureVideoPreviewLayer.FromSession (captureSession); this.videoLayer.Frame = layer.Bounds; layer.AddSublayer (this.videoLayer); // All setup! Start capturing! captureSession.StartRunning (); // This is just for information and allows you to get valid values for the detection framerate. Console.WriteLine ("Available capture framerates:"); var rateRanges = device.ActiveFormat.VideoSupportedFrameRateRanges; foreach (var r in rateRanges) { Console.WriteLine (r.MinFrameRate + "; " + r.MaxFrameRate + "; " + r.MinFrameDuration + "; " + r.MaxFrameDuration); } // Configure framerate. Kind of weird way of doing it but the only one that works. device.LockForConfiguration (out error); // CMTime constructor means: 1 = one second, DETECTION_FPS = how many samples per unit, which is 1 second in this case. device.ActiveVideoMinFrameDuration = new CMTime(1, DETECTION_FPS); device.ActiveVideoMaxFrameDuration = new CMTime(1, DETECTION_FPS); device.UnlockForConfiguration (); // Put a small image view at the top left that shows the live image with the detected rectangle(s). this.imageViewOverlay = new UIImageView { ContentMode = UIViewContentMode.ScaleAspectFit, BackgroundColor = UIColor.Gray }; this.imageViewOverlay.Layer.BorderColor = UIColor.Red.CGColor; this.imageViewOverlay.Layer.BorderWidth = 3f; this.Add (this.imageViewOverlay); // Put another image view top right that shows the image with perspective correction. this.imageViewPerspective = new UIImageView { ContentMode = UIViewContentMode.ScaleAspectFit, BackgroundColor = UIColor.Gray }; this.imageViewPerspective.Layer.BorderColor = UIColor.Red.CGColor; this.imageViewPerspective.Layer.BorderWidth = 3f; this.Add (this.imageViewPerspective); // Add some lables for information. this.mainWindowLbl = new UILabel { Text = "Live stream from camera. Point camera to a rectangular object.", TextAlignment = UITextAlignment.Center }; this.Add (this.mainWindowLbl); this.detectionWindowLbl = new UILabel { Text = "Detected rectangle overlay", TextAlignment = UITextAlignment.Center }; this.Add (this.detectionWindowLbl); this.perspectiveWindowLbl = new UILabel { Text = "Perspective corrected", TextAlignment = UITextAlignment.Center }; this.Add (this.perspectiveWindowLbl); }
void SetupAVCapture (NSString sessionPreset) { if ((videoTextureCache = CVOpenGLESTextureCache.FromEAGLContext (context)) == null){ Console.WriteLine ("Could not create the CoreVideo TextureCache"); return; } session = new AVCaptureSession (); session.BeginConfiguration (); // Preset size session.SessionPreset = sessionPreset; // Input device var videoDevice = AVCaptureDevice.DefaultDeviceWithMediaType (AVMediaType.Video); if (videoDevice == null){ Console.WriteLine ("No video device"); return; } NSError err; var input = new AVCaptureDeviceInput (videoDevice, out err); if (err != null){ Console.WriteLine ("Error creating video capture device"); return; } session.AddInput (input); // Create the output device var dataOutput = new AVCaptureVideoDataOutput () { AlwaysDiscardsLateVideoFrames = true, // YUV 420, use "BiPlanar" to split the Y and UV planes in two separate blocks of // memory, then we can index 0 to get the Y and 1 for the UV planes in the frame decoding VideoSettings = new AVVideoSettings (CVPixelFormatType.CV420YpCbCr8BiPlanarFullRange) }; dataOutputDelegate = new DataOutputDelegate (this); // // This dispatches the video frames into the main thread, because the OpenGL // code is accessing the data synchronously. // dataOutput.SetSampleBufferDelegateAndQueue (dataOutputDelegate, DispatchQueue.MainQueue); session.AddOutput (dataOutput); session.CommitConfiguration (); session.StartRunning (); }
public override void FinishedLaunching(UIApplication application) { // Create a new capture session Session = new AVCaptureSession (); Session.SessionPreset = AVCaptureSession.PresetMedium; // Create a device input CaptureDevice = GetFrontCamera(); if (CaptureDevice == null) { // Video capture not supported, abort Console.WriteLine ("Video recording not supported on this device"); CameraAvailable = false; return; } // Prepare device for configuration CaptureDevice.LockForConfiguration (out Error); if (Error != null) { // There has been an issue, abort Console.WriteLine ("Error: {0}", Error.LocalizedDescription); CaptureDevice.UnlockForConfiguration (); return; } // Configure stream for 15 frames per second (fps) CaptureDevice.ActiveVideoMinFrameDuration = new CMTime (1, 15); // Unlock configuration CaptureDevice.UnlockForConfiguration (); // Get input from capture device Input = AVCaptureDeviceInput.FromDevice (CaptureDevice); if (Input == null) { // Error, report and abort Console.WriteLine ("Unable to gain input from capture device."); CameraAvailable = false; return; } // Attach input to session Session.AddInput (Input); // Create a new output var output = new AVCaptureVideoDataOutput (); var settings = new AVVideoSettingsUncompressed (); settings.PixelFormatType = CVPixelFormatType.CV32BGRA; output.WeakVideoSettings = settings.Dictionary; // Configure and attach to the output to the session Queue = new DispatchQueue ("ManCamQueue"); Recorder = new OutputRecorder (); output.SetSampleBufferDelegate (Recorder, Queue); Session.AddOutput (output); // Configure and attach a still image output for bracketed capture StillImageOutput = new AVCaptureStillImageOutput (); var dict = new NSMutableDictionary(); dict[AVVideo.CodecKey] = new NSNumber((int) AVVideoCodec.JPEG); Session.AddOutput (StillImageOutput); // Let tabs know that a camera is available CameraAvailable = true; }
bool SetupCaptureSession () { // configure the capture session for low resolution, change this if your code // can cope with more data or volume session = new AVCaptureSession () { SessionPreset = AVCaptureSession.PresetMedium }; AVCaptureDevice captureDevice = null; var videoDevices = AVCaptureDevice.DevicesWithMediaType (AVMediaType.Video); foreach (AVCaptureDevice Device in videoDevices) { if (Device.Position == AVCaptureDevicePosition.Front) { captureDevice = Device; break; } } // create a device input and attach it to the session if(captureDevice==null){ captureDevice = AVCaptureDevice.DefaultDeviceWithMediaType (AVMediaType.Video); } if (captureDevice == null){ return false; } //Configure for 15 FPS. Note use of LockForConigfuration()/UnlockForConfiguration() NSError error = null; captureDevice.LockForConfiguration(out error); if(error != null) { captureDevice.UnlockForConfiguration(); return false; } if(UIDevice.CurrentDevice.CheckSystemVersion(7,0)) captureDevice.ActiveVideoMinFrameDuration = new CMTime (1,15); captureDevice.UnlockForConfiguration(); var input = AVCaptureDeviceInput.FromDevice (captureDevice); if (input == null){ return false; } session.AddInput (input); // create a VideoDataOutput and add it to the sesion var output = new AVCaptureVideoDataOutput () { VideoSettings = new AVVideoSettings (CVPixelFormatType.CV32BGRA), }; // configure the output queue = new MonoTouch.CoreFoundation.DispatchQueue ("myQueue"); outputRecorder = new OutputRecorder (_state); output.SetSampleBufferDelegate (outputRecorder, queue); session.AddOutput (output); session.StartRunning (); return true; }
void setupCaptureSession () { if (CaptureSession != null) return; CaptureSession = new AVCaptureSession (); NSNotificationCenter.DefaultCenter.AddObserver (null, captureSessionNotification, CaptureSession); applicationWillEnterForegroundNotificationObserver = NSNotificationCenter.DefaultCenter.AddObserver (UIApplication.WillEnterForegroundNotification.ToString (), UIApplication.SharedApplication, NSOperationQueue.CurrentQueue, delegate(NSNotification notification) { applicationWillEnterForeground (); }); videoDevice = AVCaptureDevice.DefaultDeviceWithMediaType (AVMediaType.Video); NSError error; videoInput = new AVCaptureDeviceInput (videoDevice, out error); if (CaptureSession.CanAddInput (videoInput)) CaptureSession.AddInput (videoInput); metadataOutput = new AVCaptureMetadataOutput (); var metadataQueue = new DispatchQueue ("com.AVCam.metadata"); metadataObjectsDelegate = new MetadataObjectsDelegate { DidOutputMetadataObjectsAction = DidOutputMetadataObjects }; metadataOutput.SetDelegate (metadataObjectsDelegate, metadataQueue); if (CaptureSession.CanAddOutput (metadataOutput)) CaptureSession.AddOutput (metadataOutput); }
//Protected protected AVCaptureSession MaybeInitializeSession() { //Create the capture session var session = new AVCaptureSession() { SessionPreset = AVCaptureSession.PresetMedium }; //Setup the video capture var captureDevice = AVCaptureDevice.DefaultDeviceWithMediaType(AVMediaType.Video); if (captureDevice == null) { Failure.Alert("No captureDevice - this won't work on the simulator, try a physical device"); return null; } var input = AVCaptureDeviceInput.FromDevice(captureDevice); if (input == null) { Failure.Alert("No input - this won't work on the simulator, try a physical device"); return null; } session.AddInput(input); // create a VideoDataOutput and add it to the sesion var output = new AVCaptureVideoDataOutput() { VideoSettings = new AVVideoSettings(CVPixelFormatType.CV32BGRA), }; // configure the output var queue = new MonoTouch.CoreFoundation.DispatchQueue("myQueue"); output.SetSampleBufferDelegate(this, queue); session.AddOutput(output); return session; }
bool SetupCaptureSession () { // configure the capture session for low resolution, change this if your code // can cope with more data or volume session = new AVCaptureSession () { SessionPreset = AVCaptureSession.Preset640x480 }; // create a device input and attach it to the session var captureDevice = AVCaptureDevice.DefaultDeviceWithMediaType (AVMediaType.Video); if (captureDevice == null){ Console.WriteLine ("No captureDevice - this won't work on the simulator, try a physical device"); return false; } var input = AVCaptureDeviceInput.FromDevice (captureDevice); if (input == null){ Console.WriteLine ("No input - this won't work on the simulator, try a physical device"); return false; } else session.AddInput (input); previewLayer = new AVCaptureVideoPreviewLayer(session); //Framerate set here (15 fps) if (previewLayer.RespondsToSelector(new Selector("connection"))) previewLayer.Connection.VideoMinFrameDuration = new CMTime(1, 10); previewLayer.LayerVideoGravity = AVLayerVideoGravity.ResizeAspectFill; previewLayer.Frame = this.Frame; previewLayer.Position = new PointF(this.Layer.Bounds.Width / 2, (this.Layer.Bounds.Height / 2)); layerView = new UIView(this.Frame); layerView.AutoresizingMask = UIViewAutoresizing.FlexibleWidth | UIViewAutoresizing.FlexibleHeight; layerView.Layer.AddSublayer(previewLayer); this.AddSubview(layerView); ResizePreview(UIApplication.SharedApplication.StatusBarOrientation); if (overlayView != null) { this.AddSubview (overlayView); this.BringSubviewToFront (overlayView); //overlayView.LayoutSubviews (); } session.StartRunning (); Console.WriteLine ("RUNNING!!!"); // create a VideoDataOutput and add it to the sesion output = new AVCaptureVideoDataOutput () { //videoSettings VideoSettings = new AVVideoSettings (CVPixelFormatType.CV32BGRA), }; // configure the output queue = new MonoTouch.CoreFoundation.DispatchQueue("ZxingScannerView"); // (Guid.NewGuid().ToString()); var barcodeReader = new BarcodeReader(null, (img) => { var src = new RGBLuminanceSource(img); //, bmp.Width, bmp.Height); //Don't try and rotate properly if we're autorotating anyway if (options.AutoRotate.HasValue && options.AutoRotate.Value) return src; switch (UIDevice.CurrentDevice.Orientation) { case UIDeviceOrientation.Portrait: return src.rotateCounterClockwise().rotateCounterClockwise().rotateCounterClockwise(); case UIDeviceOrientation.PortraitUpsideDown: return src.rotateCounterClockwise().rotateCounterClockwise().rotateCounterClockwise(); case UIDeviceOrientation.LandscapeLeft: return src; case UIDeviceOrientation.LandscapeRight: return src; } return src; }, null, null); //(p, w, h, f) => new RGBLuminanceSource(p, w, h, RGBLuminanceSource.BitmapFormat.Unknown)); if (this.options.TryHarder.HasValue) { Console.WriteLine("TRY_HARDER: " + this.options.TryHarder.Value); barcodeReader.Options.TryHarder = this.options.TryHarder.Value; } if (this.options.PureBarcode.HasValue) barcodeReader.Options.PureBarcode = this.options.PureBarcode.Value; if (this.options.AutoRotate.HasValue) { Console.WriteLine("AUTO_ROTATE: " + this.options.AutoRotate.Value); barcodeReader.AutoRotate = this.options.AutoRotate.Value; } if (!string.IsNullOrEmpty (this.options.CharacterSet)) barcodeReader.Options.CharacterSet = this.options.CharacterSet; if (this.options.TryInverted.HasValue) barcodeReader.TryInverted = this.options.TryInverted.Value; if (this.options.PossibleFormats != null && this.options.PossibleFormats.Count > 0) { barcodeReader.Options.PossibleFormats = new List<BarcodeFormat>(); foreach (var pf in this.options.PossibleFormats) barcodeReader.Options.PossibleFormats.Add(pf); } outputRecorder = new OutputRecorder (this.options, img => { try { var started = DateTime.Now; var rs = barcodeReader.Decode(img); var total = DateTime.Now - started; Console.WriteLine("Decode Time: " + total.TotalMilliseconds + " ms"); if (rs != null) resultCallback(rs); } catch (Exception ex) { Console.WriteLine("DECODE FAILED: " + ex); } }); output.AlwaysDiscardsLateVideoFrames = true; output.SetSampleBufferDelegate (outputRecorder, queue); Console.WriteLine("SetupCamera Finished"); session.AddOutput (output); //session.StartRunning (); if (captureDevice.IsFocusModeSupported(AVCaptureFocusMode.ModeContinuousAutoFocus)) { NSError err = null; if (captureDevice.LockForConfiguration(out err)) { captureDevice.FocusMode = AVCaptureFocusMode.ModeContinuousAutoFocus; if (captureDevice.FocusPointOfInterestSupported) captureDevice.FocusPointOfInterest = new PointF(0.5f, 0.5f); captureDevice.UnlockForConfiguration(); } else Console.WriteLine("Failed to Lock for Config: " + err.Description); } return true; }
bool SetupCaptureSession () { //Console.WriteLine ("SetupCaptureSession"); // Overview: RosyWriter uses separate GCD queues for audio and video capture. If a single GCD queue // is used to deliver both audio and video buffers, and our video processing consistently takes // too long, the delivery queue can back up, resulting in audio being dropped. // // When recording, RosyWriter creates a third GCD queue for calls to AVAssetWriter. This ensures // that AVAssetWriter is not called to start or finish writing from multiple threads simultaneously. // // RosyWriter uses AVCaptureSession's default preset, AVCaptureSessionPresetHigh. // Create Capture session captureSession = new AVCaptureSession (); captureSession.BeginConfiguration (); // Create audio connection NSError error; var audioDevice = AVCaptureDevice.DefaultDeviceWithMediaType (AVMediaType.Audio); if (audioDevice == null) return false; // e.g. simulator var audioIn = new AVCaptureDeviceInput (audioDevice, out error); if (captureSession.CanAddInput (audioIn)) captureSession.AddInput (audioIn); var audioOut = new AVCaptureAudioDataOutput (); var audioCaptureQueue = new DispatchQueue ("Audio Capture Queue"); // Add the Delegate to capture each sample that comes through audioOut.SetSampleBufferDelegateQueue (this, audioCaptureQueue); if (captureSession.CanAddOutput (audioOut)) captureSession.AddOutput (audioOut); audioConnection = audioOut.ConnectionFromMediaType (AVMediaType.Audio); // Create Video Session var videoDevice = VideoDeviceWithPosition (AVCaptureDevicePosition.Back); var videoIn = new AVCaptureDeviceInput (videoDevice, out error); if (captureSession.CanAddInput (videoIn)) captureSession.AddInput (videoIn); // RosyWriter prefers to discard late video frames early in the capture pipeline, since its // processing can take longer than real-time on some platforms (such as iPhone 3GS). // Clients whose image processing is faster than real-time should consider setting AVCaptureVideoDataOutput's // alwaysDiscardsLateVideoFrames property to NO. var videoOut = new AVCaptureVideoDataOutput { AlwaysDiscardsLateVideoFrames = true, VideoSettings = new AVVideoSettings (CVPixelFormatType.CV32BGRA) }; // Create a DispatchQueue for the Video Processing var videoCaptureQueue = new DispatchQueue ("Video Capture Queue"); videoOut.SetSampleBufferDelegateQueue (this, videoCaptureQueue); if (captureSession.CanAddOutput (videoOut)) captureSession.AddOutput (videoOut); // Set the Video connection from the Video Output object videoConnection = videoOut.ConnectionFromMediaType (AVMediaType.Video); videoOrientation = videoConnection.VideoOrientation; captureSession.CommitConfiguration (); return true; }
bool SetupCaptureSession() { session = new AVCaptureSession(); AVCaptureDevice device = AVCaptureDevice.DefaultDeviceWithMediaType(AVMediaType.Video); if (device == null) { Console.WriteLine("No video camera (in simulator?)"); return false; // simulator? } NSError error = null; AVCaptureDeviceInput input = AVCaptureDeviceInput.FromDevice(device, out error); if (input == null) Console.WriteLine("Error: " + error); else session.AddInput(input); AVCaptureMetadataOutput output = new AVCaptureMetadataOutput(); var dg = new CaptureDelegate(this); output.SetDelegate(dg, MonoTouch.CoreFoundation.DispatchQueue.MainQueue); session.AddOutput(output); // This could be any list of supported barcode types output.MetadataObjectTypes = new NSString[] {AVMetadataObject.TypeQRCode, AVMetadataObject.TypeAztecCode}; // OR you could just accept "all" with the following line; // output.MetadataObjectTypes = output.AvailableMetadataObjectTypes; // empty // DEBUG: use this if you're curious about the available types // foreach (var t in output.AvailableMetadataObjectTypes) // Console.WriteLine(t); AVCaptureVideoPreviewLayer previewLayer = new AVCaptureVideoPreviewLayer(session); //previewLayer.Frame = new RectangleF(0,0, View.Frame.Size.Width, View.Frame.Size.Height); previewLayer.Frame = new RectangleF(0, 0, 320, 290); previewLayer.VideoGravity = AVLayerVideoGravity.ResizeAspectFill.ToString(); View.Layer.AddSublayer (previewLayer); session.StartRunning(); Console.WriteLine("StartRunning"); return true; }
bool SetupCaptureSession() { session = new AVCaptureSession () { SessionPreset = AVCaptureSession.PresetMedium }; AVCaptureDevice[] capDevices = AVCaptureDevice.DevicesWithMediaType(AVMediaType.Video); AVCaptureDeviceInput input = null; if (capDevices.Length != 0) input = AVCaptureDeviceInput.FromDevice (capDevices[0]); if (input == null){ new UIAlertView("Error", "Camera not available", null, "OK", null).Show(); Console.WriteLine ("Camera not available"); return false; } session.AddInput (input); var output = new AVCaptureVideoDataOutput () { VideoSettings = new AVVideoSettings (CVPixelFormatType.CV32BGRA), MinFrameDuration = new CMTime (1, 30) //second parameter is frames per second }; queue = new MonoTouch.CoreFoundation.DispatchQueue ("myQueue"); outputRecorder = new OutputRecorder (); output.SetSampleBufferDelegateAndQueue (outputRecorder, queue); session.AddOutput (output); session.StartRunning (); return true; }
public async override void ViewDidLoad () { base.ViewDidLoad (); // Disable UI. The UI is enabled if and only if the session starts running. CameraButton.Enabled = false; RecordButton.Enabled = false; StillButton.Enabled = false; // Create the AVCaptureSession. Session = new AVCaptureSession (); // Setup the preview view. PreviewView.Session = Session; // Communicate with the session and other session objects on this queue. SessionQueue = new DispatchQueue ("session queue"); SetupResult = AVCamSetupResult.Success; // Check video authorization status. Video access is required and audio access is optional. // If audio access is denied, audio is not recorded during movie recording. switch (AVCaptureDevice.GetAuthorizationStatus (AVMediaType.Video)) { // The user has previously granted access to the camera. case AVAuthorizationStatus.Authorized: break; // The user has not yet been presented with the option to grant video access. // We suspend the session queue to delay session setup until the access request has completed to avoid // asking the user for audio access if video access is denied. // Note that audio access will be implicitly requested when we create an AVCaptureDeviceInput for audio during session setup. case AVAuthorizationStatus.NotDetermined: SessionQueue.Suspend (); var granted = await AVCaptureDevice.RequestAccessForMediaTypeAsync (AVMediaType.Video); if (!granted) SetupResult = AVCamSetupResult.CameraNotAuthorized; SessionQueue.Resume (); break; // The user has previously denied access. default: SetupResult = AVCamSetupResult.CameraNotAuthorized; break; } // Setup the capture session. // In general it is not safe to mutate an AVCaptureSession or any of its inputs, outputs, or connections from multiple threads at the same time. // Why not do all of this on the main queue? // Because AVCaptureSession.StartRunning is a blocking call which can take a long time. We dispatch session setup to the sessionQueue // so that the main queue isn't blocked, which keeps the UI responsive. SessionQueue.DispatchAsync (() => { if (SetupResult != AVCamSetupResult.Success) return; backgroundRecordingID = -1; NSError error; AVCaptureDevice videoDevice = CreateDevice (AVMediaType.Video, AVCaptureDevicePosition.Back); AVCaptureDeviceInput videoDeviceInput = AVCaptureDeviceInput.FromDevice (videoDevice, out error); if (videoDeviceInput == null) Console.WriteLine ("Could not create video device input: {0}", error); Session.BeginConfiguration (); if (Session.CanAddInput (videoDeviceInput)) { Session.AddInput (VideoDeviceInput = videoDeviceInput); DispatchQueue.MainQueue.DispatchAsync (() => { // Why are we dispatching this to the main queue? // Because AVCaptureVideoPreviewLayer is the backing layer for PreviewView and UIView // can only be manipulated on the main thread. // Note: As an exception to the above rule, it is not necessary to serialize video orientation changes // on the AVCaptureVideoPreviewLayer’s connection with other session manipulation. // Use the status bar orientation as the initial video orientation. Subsequent orientation changes are handled by // ViewWillTransitionToSize method. UIInterfaceOrientation statusBarOrientation = UIApplication.SharedApplication.StatusBarOrientation; AVCaptureVideoOrientation initialVideoOrientation = AVCaptureVideoOrientation.Portrait; if (statusBarOrientation != UIInterfaceOrientation.Unknown) initialVideoOrientation = (AVCaptureVideoOrientation)(long)statusBarOrientation; var previewLayer = (AVCaptureVideoPreviewLayer)PreviewView.Layer; previewLayer.Connection.VideoOrientation = initialVideoOrientation; }); } else { Console.WriteLine ("Could not add video device input to the session"); SetupResult = AVCamSetupResult.SessionConfigurationFailed; } AVCaptureDevice audioDevice = AVCaptureDevice.DefaultDeviceWithMediaType (AVMediaType.Audio); AVCaptureDeviceInput audioDeviceInput = AVCaptureDeviceInput.FromDevice (audioDevice, out error); if (audioDeviceInput == null) Console.WriteLine ("Could not create audio device input: {0}", error); if (Session.CanAddInput (audioDeviceInput)) Session.AddInput (audioDeviceInput); else Console.WriteLine ("Could not add audio device input to the session"); var movieFileOutput = new AVCaptureMovieFileOutput (); if (Session.CanAddOutput (movieFileOutput)) { Session.AddOutput (MovieFileOutput = movieFileOutput); AVCaptureConnection connection = movieFileOutput.ConnectionFromMediaType (AVMediaType.Video); if (connection.SupportsVideoStabilization) connection.PreferredVideoStabilizationMode = AVCaptureVideoStabilizationMode.Auto; } else { Console.WriteLine ("Could not add movie file output to the session"); SetupResult = AVCamSetupResult.SessionConfigurationFailed; } var stillImageOutput = new AVCaptureStillImageOutput (); if (Session.CanAddOutput (stillImageOutput)) { stillImageOutput.CompressedVideoSetting = new AVVideoSettingsCompressed { Codec = AVVideoCodec.JPEG }; Session.AddOutput (StillImageOutput = stillImageOutput); } else { Console.WriteLine ("Could not add still image output to the session"); SetupResult = AVCamSetupResult.SessionConfigurationFailed; } Session.CommitConfiguration (); }); }
public override void ViewDidLoad() { base.ViewDidLoad (); weAreRecording = false; lblError.Hidden = true; btnStartRecording.SetTitle("Start Recording", UIControlState.Normal); //Set up session session = new AVCaptureSession (); //Set up inputs and add them to the session //this will only work if using a physical device! Console.WriteLine ("getting device inputs"); try{ //add video capture device device = AVCaptureDevice.DefaultDeviceWithMediaType (AVMediaType.Video); input = AVCaptureDeviceInput.FromDevice (device); session.AddInput (input); //add audio capture device audioDevice = AVCaptureDevice.DefaultDeviceWithMediaType(AVMediaType.Audio); audioInput = AVCaptureDeviceInput.FromDevice(audioDevice); session.AddInput(audioInput); } catch(Exception ex){ //show the label error. This will always show when running in simulator instead of physical device. lblError.Hidden = false; return; } //Set up preview layer (shows what the input device sees) Console.WriteLine ("setting up preview layer"); previewlayer = new AVCaptureVideoPreviewLayer (session); previewlayer.Frame = this.View.Bounds; //this code makes UI controls sit on top of the preview layer! Allows you to just place the controls in interface builder UIView cameraView = new UIView (); cameraView = new UIView (); cameraView.Layer.AddSublayer (previewlayer); this.View.AddSubview (cameraView); this.View.SendSubviewToBack (cameraView); Console.WriteLine ("Configuring output"); output = new AVCaptureMovieFileOutput (); long totalSeconds = 10000; Int32 preferredTimeScale = 30; CMTime maxDuration = new CMTime (totalSeconds, preferredTimeScale); output.MinFreeDiskSpaceLimit = 1024 * 1024; output.MaxRecordedDuration = maxDuration; if (session.CanAddOutput (output)) { session.AddOutput (output); } session.SessionPreset = AVCaptureSession.PresetMedium; Console.WriteLine ("About to start running session"); session.StartRunning (); //toggle recording button was pushed. btnStartRecording.TouchUpInside += startStopPushed; //Console.ReadLine (); }
bool SetupCaptureSession () { // configure the capture session for low resolution, change this if your code // can cope with more data or volume session = new AVCaptureSession () { SessionPreset = AVCaptureSession.PresetMedium }; // create a device input and attach it to the session var captureDevice = AVCaptureDevice.DefaultDeviceWithMediaType (AVMediaType.Video); if (captureDevice == null) { Image<Bgr, Byte> img = new Image<Bgr, byte> (512, 512, new Bgr (255, 255, 255)); CvInvoke.PutText ( img, "Capture device not found.", new Point (10, 200), FontFace.HersheyComplex, 1, new MCvScalar (), 2); ImageView.Image = img.ToUIImage(); return false; } var input = AVCaptureDeviceInput.FromDevice (captureDevice); if (input == null){ Console.WriteLine ("No input device"); return false; } session.AddInput (input); // create a VideoDataOutput and add it to the sesion AVVideoSettingsUncompressed settingUncomp = new AVVideoSettingsUncompressed(); settingUncomp.PixelFormatType = CVPixelFormatType.CV32BGRA; var output = new AVCaptureVideoDataOutput () { UncompressedVideoSetting = settingUncomp, // If you want to cap the frame rate at a given speed, in this sample: 15 frames per second //MinFrameDuration = new CMTime (1, 15) }; // configure the output queue = new DispatchQueue ("myQueue"); outputRecorder = new OutputRecorder (ImageView); output.SetSampleBufferDelegateQueue(outputRecorder, queue); session.AddOutput (output); session.StartRunning (); return true; }