Ejemplo n.º 1
0
        private bool InitScanner(BarcodeScanner.BarcodeFormat barcodeType)
        {
            device = AVCaptureDevice.GetDefaultDevice(AVMediaType.Video);
            if (device == null)
            {
                return(false);
            }

            input = AVCaptureDeviceInput.FromDevice(device);
            if (input.Device.IsFocusModeSupported(AVCaptureFocusMode.ContinuousAutoFocus))
            {
                input.Device.LockForConfiguration(out NSError err);
                input.Device.FocusMode = AVCaptureFocusMode.ContinuousAutoFocus;
                input.Device.UnlockForConfiguration();
            }

            if (input == null)
            {
                return(false);
            }

            output = new AVCaptureMetadataOutput();
            output.SetDelegate(this, DispatchQueue.MainQueue);

            session = new AVCaptureSession();
            session.AddInput(input);
            session.AddOutput(output);
            output.MetadataObjectTypes = GetBarcodeFormat(barcodeType);

            captureVideoPreviewLayer              = AVCaptureVideoPreviewLayer.FromSession(session);
            captureVideoPreviewLayer.Frame        = CGRect.Empty;
            captureVideoPreviewLayer.VideoGravity = AVLayerVideoGravity.ResizeAspectFill;
            captureVideoPreviewLayer.Connection.VideoOrientation = GetDeviceOrientation();
            return(true);
        }
        private bool initScanner()
        {
            device = AVCaptureDevice.DefaultDeviceWithMediaType(AVMediaType.Video);
            if (device == null)
            {
                this.Debug("AVCaptureDevice is null");

                return(false);
            }

            input = AVCaptureDeviceInput.FromDevice(device);

            if (input == null)
            {
                this.Debug("AVCaptureDeviceInput is null");

                return(false);
            }

            output = new AVCaptureMetadataOutput();
            output.SetDelegate(this, DispatchQueue.MainQueue);

            session = new AVCaptureSession();
            session.AddInput(input);
            session.AddOutput(output);
            output.MetadataObjectTypes = configuration.Barcodes.ConvertToIOS();

            captureVideoPreviewLayer              = AVCaptureVideoPreviewLayer.FromSession(session);
            captureVideoPreviewLayer.Frame        = CGRect.Empty;
            captureVideoPreviewLayer.VideoGravity = AVLayerVideoGravity.ResizeAspectFill;
            captureVideoPreviewLayer.Connection.VideoOrientation = getDeviceOrientation();

            return(true);
        }
Ejemplo n.º 3
0
        void SetupCaptureDevice()
        {
            captureDevice = AVCaptureDevice.GetDefaultDevice(AVMediaTypes.Video);
            if (captureDevice == null)
            {
                Console.WriteLine("Error: no video devices available");
                return;
            }

            videoDeviceInput = AVCaptureDeviceInput.FromDevice(captureDevice);
            if (videoDeviceInput == null)
            {
                Console.WriteLine("Error: could not create AVCaptureDeviceInput");
                return;
            }

            if (captureSession.CanAddInput(videoDeviceInput))
            {
                captureSession.AddInput(videoDeviceInput);
            }

            previewLayer = AVCaptureVideoPreviewLayer.FromSession(captureSession);
            previewLayer.VideoGravity = AVLayerVideoGravity.ResizeAspect;
            previewLayer.Connection.VideoOrientation = AVCaptureVideoOrientation.Portrait;
        }
        public override void ViewDidLoad()
        {
            base.ViewDidLoad();
            var device = AVCaptureDevice.GetDefaultDevice(AVMediaTypes.Video);

            if (device is null)
            {
                this.ShowAlert("无法访问相机", null);
                return;
            }

            var input = AVCaptureDeviceInput.FromDevice(device);

            if (input is null)
            {
                this.ShowAlert("无法访问相机", null);
                return;
            }

            session.AddInput(input);
            try
            {
                var output = new AVCaptureMetadataOutput();
                output.SetDelegate(this, DispatchQueue.MainQueue);
                session.AddOutput(output);

                output.MetadataObjectTypes = AVMetadataObjectType.QRCode;
            }
            catch
            {
                return;
            }

            preview = AVCaptureVideoPreviewLayer.FromSession(session);
            if (preview is null)
            {
                this.ShowAlert("无法显示扫描预览", null);
                return;
            }
            preview.VideoGravity = AVLayerVideoGravity.Resize;
            preview.Frame        = View.Layer.Bounds;
            View.Layer.AddSublayer(preview);

            session.StartRunning();

            codeFrame = new UIView();
            codeFrame.Layer.BorderColor = UIColor.Green.CGColor;
            codeFrame.Layer.BorderWidth = 2;
            View.AddSubview(codeFrame);
            View.BringSubviewToFront(codeFrame);
        }
Ejemplo n.º 5
0
        void InitializeCameraLayer()
        {
            this.captureSession = new AVCaptureSession()
            {
                SessionPreset = AVCaptureSession.PresetMedium                 // TODO investigate that
            };
            var captureDevice = AVCaptureDevice.DevicesWithMediaType(AVMediaType.Video).Where(dev => dev.Position == AVCaptureDevicePosition.Front).FirstOrDefault();

            if (captureDevice == null)
            {
                Console.WriteLine("No captureDevice - this won't work on the simulator, try a physical device");
                return;
            }
            var input = AVCaptureDeviceInput.FromDevice(captureDevice);

            if (input == null)
            {
                Console.WriteLine("No input - this won't work on the simulator, try a physical device");
                return;
            }
            this.captureSession.AddInput(input);

            // set up the output
            output = new AVCaptureStillImageOutput();
            var dict = new NSMutableDictionary();

            dict [AVVideo.CodecKey] = new NSNumber((int)AVVideoCodec.JPEG);
            captureSession.AddOutput(output);

            this.previewLayer = AVCaptureVideoPreviewLayer.FromSession(this.captureSession);
            this.previewLayer.LayerVideoGravity = AVLayerVideoGravity.ResizeAspectFill;
            this.previewLayer.Frame             = this.View.Frame;
            this.captureSession.StartRunning();

            this.cameraInitialized = true;
        }
Ejemplo n.º 6
0
        void setupCaptureSession()
        {
            //Create a device for capturing Barcodes
            var captureDevice = AVCaptureDevice.GetDefaultDevice(AVMediaTypes.Video);

            //Configure the dvice for something fancy autofocus stuffs
            ConfigureCameraForDevice(captureDevice);
            //Create an input from that device - meaning to instaniate the device to make an input node... err something like that
            var captureInput = AVCaptureDeviceInput.FromDevice(captureDevice);

            //Add the input to the session
            session.AddInput(captureInput);

            //Create a preview layer for the view
            var previewLayer = AVCaptureVideoPreviewLayer.FromSession(session);

            previewLayer.VideoGravity = AVLayerVideoGravity.ResizeAspectFill;
            previewLayer.Frame        = vie_Preview_cam.Frame;

            //Add the preview layer to the View for the camera uiview
            vie_Preview_cam.Layer.AddSublayer(previewLayer);

            //Assign who's going to handle the metadataoutput
            var metadataoutput = new AVCaptureMetadataOutput();

            //Set delegate
            metadataoutput.SetDelegate(this, CoreFoundation.DispatchQueue.MainQueue);
            //Add the metadataoutput to session
            session.AddOutput(metadataoutput);

            //Assign which type of Codes will be read,
            metadataoutput.MetadataObjectTypes = AVMetadataObjectType.QRCode;

            //Start the Session
            session.StartRunning();
        }
        public override void ViewDidLoad()
        {
            base.ViewDidLoad();

            this.View.BackgroundColor = UIColor.White;

            NSError error;

            // Setup detector options.
            var options = new CIDetectorOptions {
                Accuracy = FaceDetectorAccuracy.High,
                // Can give a hint here about the rects to detect. 1.4 would be for A4 sheets of paper for instance.
                AspectRatio = 1.41f,
            };

            // Create a rectangle detector. Note that you can also create QR detector or a face detector.
            // Most of this code will also work with other detectors (like streaming to a preview layer and grabbing images).
            this.detector = CIDetector.CreateRectangleDetector(context: null, detectorOptions: options);

            // Create the session. The AVCaptureSession is the managing instance of the whole video handling.
            var captureSession = new AVCaptureSession()
            {
                // Defines what quality we want to use for the images we grab. Photo gives highest resolutions.
                SessionPreset = AVCaptureSession.PresetPhoto
            };

            // Find a suitable AVCaptureDevice for video input.
            var device = AVCaptureDevice.DefaultDeviceWithMediaType(AVMediaType.Video);

            if (device == null)
            {
                // This will not work on the iOS Simulator - there is no camera. :-)
                throw new InvalidProgramException("Failed to get AVCaptureDevice for video input!");
            }

            // Create a device input with the device and add it to the session.
            var videoInput = AVCaptureDeviceInput.FromDevice(device, out error);

            if (videoInput == null)
            {
                throw new InvalidProgramException("Failed to get AVCaptureDeviceInput from AVCaptureDevice!");
            }

            // Let session read from the input, this is our source.
            captureSession.AddInput(videoInput);

            // Create output for the video stream. This is the destination.
            var videoOutput = new AVCaptureVideoDataOutput()
            {
                AlwaysDiscardsLateVideoFrames = true
            };

            // Define the video format we want to use. Note that Xamarin exposes the CompressedVideoSetting and UncompressedVideoSetting
            // properties on AVCaptureVideoDataOutput un Unified API, but I could not get these to work. The VideoSettings property is deprecated,
            // so I use the WeakVideoSettings instead which takes an NSDictionary as input.
            this.videoSettingsDict = new NSMutableDictionary();
            this.videoSettingsDict.Add(CVPixelBuffer.PixelFormatTypeKey, NSNumber.FromUInt32((uint)CVPixelFormatType.CV32BGRA));
            videoOutput.WeakVideoSettings = this.videoSettingsDict;

            // Create a delegate to report back to us when an image has been captured.
            // We want to grab the camera stream and feed it through a AVCaptureVideoDataOutputSampleBufferDelegate
            // which allows us to get notified if a new image is availeble. An implementation of that delegate is VideoFrameSampleDelegate in this project.
            this.sampleBufferDelegate = new VideoFrameSamplerDelegate();

            // Processing happens via Grand Central Dispatch (GCD), so we need to provide a queue.
            // This is pretty much like a system managed thread (see: http://zeroheroblog.com/ios/concurrency-in-ios-grand-central-dispatch-gcd-dispatch-queues).
            this.sessionQueue = new DispatchQueue("AVSessionQueue");

            // Assign the queue and the delegate to the output. Now all output will go through the delegate.
            videoOutput.SetSampleBufferDelegate(this.sampleBufferDelegate, this.sessionQueue);

            // Add output to session.
            captureSession.AddOutput(videoOutput);

            // We also want to visualize the input stream. The raw stream can be fed into an AVCaptureVideoPreviewLayer, which is a subclass of CALayer.
            // A CALayer can be added to a UIView. We add that layer to the controller's main view.
            var layer = this.View.Layer;

            this.videoLayer       = AVCaptureVideoPreviewLayer.FromSession(captureSession);
            this.videoLayer.Frame = layer.Bounds;
            layer.AddSublayer(this.videoLayer);

            // All setup! Start capturing!
            captureSession.StartRunning();

            // This is just for information and allows you to get valid values for the detection framerate.
            Console.WriteLine("Available capture framerates:");
            var rateRanges = device.ActiveFormat.VideoSupportedFrameRateRanges;

            foreach (var r in rateRanges)
            {
                Console.WriteLine(r.MinFrameRate + "; " + r.MaxFrameRate + "; " + r.MinFrameDuration + "; " + r.MaxFrameDuration);
            }

            // Configure framerate. Kind of weird way of doing it but the only one that works.
            device.LockForConfiguration(out error);
            // CMTime constructor means: 1 = one second, DETECTION_FPS = how many samples per unit, which is 1 second in this case.
            device.ActiveVideoMinFrameDuration = new CMTime(1, DETECTION_FPS);
            device.ActiveVideoMaxFrameDuration = new CMTime(1, DETECTION_FPS);
            device.UnlockForConfiguration();

            // Put a small image view at the top left that shows the live image with the detected rectangle(s).
            this.imageViewOverlay = new UIImageView
            {
                ContentMode     = UIViewContentMode.ScaleAspectFit,
                BackgroundColor = UIColor.Gray
            };
            this.imageViewOverlay.Layer.BorderColor = UIColor.Red.CGColor;
            this.imageViewOverlay.Layer.BorderWidth = 3f;
            this.Add(this.imageViewOverlay);

            // Put another image view top right that shows the image with perspective correction.
            this.imageViewPerspective = new UIImageView
            {
                ContentMode     = UIViewContentMode.ScaleAspectFit,
                BackgroundColor = UIColor.Gray
            };
            this.imageViewPerspective.Layer.BorderColor = UIColor.Red.CGColor;
            this.imageViewPerspective.Layer.BorderWidth = 3f;
            this.Add(this.imageViewPerspective);

            // Add some lables for information.
            this.mainWindowLbl = new UILabel
            {
                Text          = "Live stream from camera. Point camera to a rectangular object.",
                TextAlignment = UITextAlignment.Center
            };
            this.Add(this.mainWindowLbl);

            this.detectionWindowLbl = new UILabel
            {
                Text          = "Detected rectangle overlay",
                TextAlignment = UITextAlignment.Center
            };
            this.Add(this.detectionWindowLbl);

            this.perspectiveWindowLbl = new UILabel
            {
                Text          = "Perspective corrected",
                TextAlignment = UITextAlignment.Center
            };
            this.Add(this.perspectiveWindowLbl);
        }
Ejemplo n.º 8
0
        /// <summary>
        /// 스캐너를 초기화합니다.
        /// </summary>
        /// <param name="barcodeFormat">인식할 바코드 포멧</param>
        /// <param name="sessionPreset">해상도</param>
        /// <returns></returns>
        private bool InitScanner(AVMetadataObjectType barcodeFormat, NSString sessionPreset)
        {
            // 카메라 접근 권한 확인
            if (!IsCameraAuthorized)
            {
                this.Write("카메라 사용이 허용되지 않습니다.");
                return(false);
            }

            // 후면 카메라를 캡처할 장치로 설정
            Device = AVCaptureDevice
                     .DevicesWithMediaType(AVMediaType.Video)
                     .FirstOrDefault(e => e.Position == AVCaptureDevicePosition.Back);
            if (Device == null)
            {
                this.Write("후면 카메라가 없습니다.");
                return(false);
            }

            // 입력 설정
            Input = AVCaptureDeviceInput.FromDevice(Device);
            if (Input == null)
            {
                this.Write("AVCaptureDeviceInput이 null 입니다.");
                return(false);
            }

            // 출력 설정
            CaptureDelegate = new CaptureDelegate((metadataObjects) =>
            {
                if (BarcodeDetected == null)
                {
                    return;
                }

                foreach (var metadata in metadataObjects)
                {
                    var data = ((AVMetadataMachineReadableCodeObject)metadata).StringValue;
                    BarcodeDetected?.Invoke(new BarcodeData(metadata.Type.ToTmonFormat(), data));
                }
            });
            Output = new AVCaptureMetadataOutput();
            Output.SetDelegate(CaptureDelegate, DispatchQueue.MainQueue);

            // 세션 설정
            Session = new AVCaptureSession()
            {
                SessionPreset = sessionPreset,
            };
            Session.AddInput(Input);
            Session.AddOutput(Output);

            // 검출할 바코드 포멧 설정(중요 : 반드시 세션 설정 뒤에 와야함)
            Output.MetadataObjectTypes = barcodeFormat;

            // 프리뷰어 설정
            Previewer              = AVCaptureVideoPreviewLayer.FromSession(Session);
            Previewer.Frame        = CGRect.Empty;
            Previewer.VideoGravity = AVLayerVideoGravity.ResizeAspectFill;
            Previewer.Connection.VideoOrientation = DeviceOrientation;

            return(true);
        }
Ejemplo n.º 9
0
        protected virtual void SetupAVCapture()
        {
            AVCaptureDeviceInput deviceInput;

            // Select a video device, make an input
            var videoDevice = AVCaptureDeviceDiscoverySession.Create(
                new AVCaptureDeviceType[] { AVCaptureDeviceType.BuiltInWideAngleCamera },
                AVMediaType.Video,
                AVCaptureDevicePosition.Back
                ).Devices.FirstOrDefault();

            deviceInput = new AVCaptureDeviceInput(videoDevice, out NSError error);
            if (error != null)
            {
                Console.WriteLine($"Could not create video device input: {error.LocalizedDescription}");
                return;
            }

            session.BeginConfiguration();
            session.SessionPreset = AVCaptureSession.Preset640x480; // Model image size is smaller

            // Add a video input
            if (!session.CanAddInput(deviceInput))
            {
                Console.WriteLine("Could not add video device input to the session");
                session.CommitConfiguration();
                return;
            }
            session.AddInput(deviceInput);

            if (session.CanAddOutput(videoDataOutput))
            {
                session.AddOutput(videoDataOutput);
                // Add a video data ouptut
                videoDataOutput.AlwaysDiscardsLateVideoFrames = true;
                videoDataOutput.WeakVideoSettings             = new NSDictionary(CVPixelBuffer.PixelFormatTypeKey, CVPixelFormatType.CV420YpCbCr8BiPlanarFullRange);
                videoDataOutput.SetSampleBufferDelegateQueue(this, videoDataOutputQueue);
            }
            else
            {
                Console.WriteLine("Could not add video data output to the session");
                session.CommitConfiguration();
                return;
            }

            var captureConnection = videoDataOutput.ConnectionFromMediaType(AVMediaType.Video);

            // Always process the frames
            captureConnection.Enabled = true;
            videoDevice.LockForConfiguration(out NSError error2);
            if (error2 == null)
            {
                var formatDescription        = videoDevice.ActiveFormat.FormatDescription as CMVideoFormatDescription;
                CMVideoDimensions dimensions = formatDescription.Dimensions;
                bufferSize.Width  = dimensions.Width;
                bufferSize.Height = dimensions.Height;
                videoDevice.UnlockForConfiguration();
            }
            else
            {
                Console.WriteLine($"{error2.LocalizedDescription}");
            }
            session.CommitConfiguration();
            previewLayer = AVCaptureVideoPreviewLayer.FromSession(session);
            previewLayer.VideoGravity = AVLayerVideoGravity.ResizeAspectFill;
            rootLayer          = previewView.Layer;
            previewLayer.Frame = rootLayer.Bounds;
            rootLayer.AddSublayer(previewLayer);
        }
Ejemplo n.º 10
0
        public override void ViewDidLoad()
        {
            base.ViewDidLoad();

            this.View.BackgroundColor = UIColor.White;

            NSError error;


            // Create the session. The AVCaptureSession is the managing instance of the whole video handling.
            var captureSession = new AVCaptureSession()
            {
                // Defines what quality we want to use for the images we grab. Photo gives highest resolutions.
                SessionPreset = AVCaptureSession.PresetPhoto
            };

            // Find a suitable AVCaptureDevice for video input.
            var device = AVCaptureDevice.GetDefaultDevice(AVMediaType.Video);

            if (device == null)
            {
                // This will not work on the iOS Simulator - there is no camera. :-)
                throw new InvalidProgramException("Failed to get AVCaptureDevice for video input!");
            }

            // Create a device input with the device and add it to the session.
            var videoInput = AVCaptureDeviceInput.FromDevice(device, out error);

            if (videoInput == null)
            {
                throw new InvalidProgramException("Failed to get AVCaptureDeviceInput from AVCaptureDevice!");
            }

            // Let session read from the input, this is our source.
            captureSession.AddInput(videoInput);

            // Create output for the video stream. This is the destination.
            var videoOutput = new AVCaptureVideoDataOutput()
            {
                AlwaysDiscardsLateVideoFrames = true
            };

            // Define the video format we want to use. Note that Xamarin exposes the CompressedVideoSetting and UncompressedVideoSetting
            // properties on AVCaptureVideoDataOutput un Unified API, but I could not get these to work. The VideoSettings property is deprecated,
            // so I use the WeakVideoSettings instead which takes an NSDictionary as input.
            this.videoSettingsDict = new NSMutableDictionary();
            this.videoSettingsDict.Add(CVPixelBuffer.PixelFormatTypeKey, NSNumber.FromUInt32((uint)CVPixelFormatType.CV32BGRA));
            videoOutput.WeakVideoSettings = this.videoSettingsDict;

            // Create a delegate to report back to us when an image has been captured.
            // We want to grab the camera stream and feed it through a AVCaptureVideoDataOutputSampleBufferDelegate
            // which allows us to get notified if a new image is availeble. An implementation of that delegate is VideoFrameSampleDelegate in this project.
            this.sampleBufferDelegate = new VideoFrameSamplerDelegate();

            // Processing happens via Grand Central Dispatch (GCD), so we need to provide a queue.
            // This is pretty much like a system managed thread (see: http://zeroheroblog.com/ios/concurrency-in-ios-grand-central-dispatch-gcd-dispatch-queues).
            this.sessionQueue = new DispatchQueue("AVSessionQueue");

            // Assign the queue and the delegate to the output. Now all output will go through the delegate.
            videoOutput.SetSampleBufferDelegateQueue(this.sampleBufferDelegate, this.sessionQueue);

            // Add output to session.
            captureSession.AddOutput(videoOutput);

            // We also want to visualize the input stream. The raw stream can be fed into an AVCaptureVideoPreviewLayer, which is a subclass of CALayer.
            // A CALayer can be added to a UIView. We add that layer to the controller's main view.
            var layer = this.View.Layer;

            this.videoLayer       = AVCaptureVideoPreviewLayer.FromSession(captureSession);
            this.videoLayer.Frame = layer.Bounds;
            layer.AddSublayer(this.videoLayer);

            // All setup! Start capturing!
            captureSession.StartRunning();

            // Configure framerate. Kind of weird way of doing it but the only one that works.
            device.LockForConfiguration(out error);
            // CMTime constructor means: 1 = one second, DETECTION_FPS = how many samples per unit, which is 1 second in this case.
            device.ActiveVideoMinFrameDuration = new CMTime(1, DETECTION_FPS);
            device.ActiveVideoMaxFrameDuration = new CMTime(1, DETECTION_FPS);
            device.UnlockForConfiguration();
        }