Ejemplo n.º 1
0
		private async Task<FrameWithFaces> CaptureFrameAsync()
		{
			var previewProperties = mediaCapture.VideoDeviceController.GetMediaStreamProperties(MediaStreamType.VideoPreview) as VideoEncodingProperties;
			var width = (int)previewProperties.Width;
			var height = (int)previewProperties.Height;
			var videoFrame = new VideoFrame(BitmapPixelFormat.Rgba8, width, height);
			if (tempWb == null)
				tempWb = new WriteableBitmap(width, height);

			using (var currentFrame = await mediaCapture.GetPreviewFrameAsync(videoFrame))
			{
				SoftwareBitmap bitmap = currentFrame.SoftwareBitmap;
				var detector = await Windows.Media.FaceAnalysis.FaceDetector.CreateAsync();
				var supportedBitmapPixelFormats = Windows.Media.FaceAnalysis.FaceDetector.GetSupportedBitmapPixelFormats();
				var convertedBitmap = SoftwareBitmap.Convert(bitmap, supportedBitmapPixelFormats.First());
				var detectedFaces = await detector.DetectFacesAsync(convertedBitmap);

				byte[] bytes;
				bitmap.CopyToBuffer(tempWb.PixelBuffer);
				using (Stream stream = tempWb.PixelBuffer.AsStream())
				using (MemoryStream memoryStream = new MemoryStream())
				{
					stream.CopyTo(memoryStream);
					bytes = memoryStream.ToArray();
					return new FrameWithFaces
						{
							FrameData = bytes,
							FrameWidth = width,
							FrameHeight = height,
							Faces = detectedFaces.Select(f => 
								new Rect {X = f.FaceBox.X, Y = f.FaceBox.Y, Width = f.FaceBox.Width, Height = f.FaceBox.Height}).ToArray()
						};
				}
			}
		}
        public override VideoFrame getFrame()
        {
            TeRK.Image image = proxy.getFrame(0);

               VideoFrame frame = new VideoFrame();
               frame.ImageData = image.data;
               frame.Width = image.width;
               frame.Height = image.height;

               return frame;
        }
Ejemplo n.º 3
0
        private FaceMatrix(FaceTracker faceTracker, MediaCapture mediaCapture, int rowsCount, int columnsCount)
        {
            _faceTracker = faceTracker;
            _mediaCapture = mediaCapture;
            
            // get properties of the stream, we need them to get width/height for face detection
            var videoProperties = _mediaCapture.VideoDeviceController.GetMediaStreamProperties(MediaStreamType.VideoPreview) as VideoEncodingProperties;
            _previewFrame = new VideoFrame(BitmapPixelFormat.Nv12, (int)videoProperties.Width, (int)videoProperties.Height);

            _rowsCount = rowsCount;
            _columnsCount = columnsCount;
        }
Ejemplo n.º 4
0
        private async Task<byte[]> EncodeToBytes(VideoFrame frame)
        {
            byte[] array;

            using (var stream = new InMemoryRandomAccessStream())
            {
                var encoder = await CreateBitmapEncoder(stream);
                encoder.SetSoftwareBitmap(frame.SoftwareBitmap);
                await encoder.FlushAsync();
                array = new byte[stream.Size];
                await stream.ReadAsync(array.AsBuffer(), (uint) stream.Size, InputStreamOptions.None);
            }

            return array;
        }
        private async Task<VideoFrame> GetVideoFrame()
        {
            var frameProperties = configuration.CurrentVideoSize;

            var videoFrame = new VideoFrame(BitmapPixelFormat.Rgba16, (int) frameProperties.Width,
                (int) frameProperties.Height);

            try
            {
                return await controller.GetFrameAsync(videoFrame);
            }

            catch (Exception)
            {
                return videoFrame;
            }
        }
Ejemplo n.º 6
0
			public virtual void run()
			{
				bool dropFrame = outerInstance.view.Width == 0 || outerInstance.view.Height == 0;

				// Only capture the view if the dimensions have been established
				if (!dropFrame)
				{
					// Draw view into bitmap backed canvas
					int measuredWidth = View.MeasureSpec.makeMeasureSpec(outerInstance.view.Width, View.MeasureSpec.EXACTLY);
					int measuredHeight = View.MeasureSpec.makeMeasureSpec(outerInstance.view.Height, View.MeasureSpec.EXACTLY);
					outerInstance.view.measure(measuredWidth, measuredHeight);
					outerInstance.view.layout(0, 0, outerInstance.view.MeasuredWidth, outerInstance.view.MeasuredHeight);

					Bitmap viewBitmap = Bitmap.createBitmap(outerInstance.view.Width, outerInstance.view.Height, Bitmap.Config.ARGB_8888);
					Canvas viewCanvas = new Canvas(viewBitmap);
					outerInstance.view.draw(viewCanvas);

					// Extract the frame from the bitmap
					int bytes = viewBitmap.ByteCount;
					ByteBuffer buffer = ByteBuffer.allocate(bytes);
					viewBitmap.copyPixelsToBuffer(buffer);
					sbyte[] array = buffer.array();
//JAVA TO C# CONVERTER WARNING: The original Java variable was marked 'final':
//ORIGINAL LINE: final long captureTimeNs = java.util.concurrent.TimeUnit.MILLISECONDS.toNanos(android.os.SystemClock.elapsedRealtime());
					long captureTimeNs = TimeUnit.MILLISECONDS.toNanos(SystemClock.elapsedRealtime());

					// Create video frame
					VideoDimensions dimensions = new VideoDimensions(outerInstance.view.Width, outerInstance.view.Height);
					VideoFrame videoFrame = new VideoFrame(array, dimensions, 0, captureTimeNs);

					// Notify the listener
					if (outerInstance.started.get())
					{
						outerInstance.videoCapturerListener.onFrameCaptured(videoFrame);
					}
				}

				// Schedule the next capture
				if (outerInstance.started.get())
				{
					outerInstance.handler.postDelayed(this, VIEW_CAPTURER_FRAMERATE_MS);
				}
			}
Ejemplo n.º 7
0
 public double ComputeEmotionIntensity(VideoFrame frame, Emotion emotion)
 {
     if (frame.emotion == emotion)
         return frame.emotionIntensity;
     else
         return 0;
 }
 public async Task <LiveCameraResult> VisionAnalysisFunction(VideoFrame frame)
 {
     return(await SubmitVisionAnalysisFunction(frame));
 }
Ejemplo n.º 9
0
 public NewFrameEventArgs(VideoFrame frame)
 {
     Frame = frame;
 }
        /// <summary>
        /// Gets the current preview frame as a SoftwareBitmap, displays its properties in a TextBlock, and can optionally display the image
        /// in the UI and/or save it to disk as a jpg
        /// </summary>
        /// <returns></returns>
        private async Task GetPreviewFrameAsSoftwareBitmapAsync()
        {
            // Get information about the preview
            var previewProperties = _mediaCapture.VideoDeviceController.GetMediaStreamProperties(MediaStreamType.VideoPreview) as VideoEncodingProperties;

            // Create the video frame to request a SoftwareBitmap preview frame
            var videoFrame = new VideoFrame(BitmapPixelFormat.Bgra8, (int)previewProperties.Width, (int)previewProperties.Height);

            // Capture the preview frame
            using (var currentFrame = await _mediaCapture.GetPreviewFrameAsync(videoFrame))
            {
                // Collect the resulting frame
                SoftwareBitmap previewFrame = currentFrame.SoftwareBitmap;

                // Show the frame information
                FrameInfoTextBlock.Text = String.Format("{0}x{1} {2}", previewFrame.PixelWidth, previewFrame.PixelHeight, previewFrame.BitmapPixelFormat);

                // Add a simple green filter effect to the SoftwareBitmap
                if (GreenEffectCheckBox.IsChecked == true)
                {
                    ApplyGreenFilter(previewFrame);
                }

                // Show the frame (as is, no rotation is being applied)
                if (ShowFrameCheckBox.IsChecked == true)
                {
                    // Create a SoftwareBitmapSource to display the SoftwareBitmap to the user
                    var sbSource = new SoftwareBitmapSource();
                    await sbSource.SetBitmapAsync(previewFrame);

                    // Display it in the Image control
                    PreviewFrameImage.Source = sbSource;
                }

                // Save the frame (as is, no rotation is being applied)
                if (SaveFrameCheckBox.IsChecked == true)
                {
                    await SaveSoftwareBitmapAsync(previewFrame);
                }
            }
        }
Ejemplo n.º 11
0
        private async void InitializeCameraAsync()
        {
            var allVideoDevices = await DeviceInformation.FindAllAsync(DeviceClass.VideoCapture);
            //总是获取不到摄像头
            DeviceInformation cameraDevice = allVideoDevices.FirstOrDefault();
            var mediaInitSettings = new MediaCaptureInitializationSettings { VideoDeviceId = cameraDevice.Id };
            MyMediaCapture = new MediaCapture();

            try
            {
                await MyMediaCapture.InitializeAsync(mediaInitSettings);
            }
            catch (UnauthorizedAccessException)
            {

            }

            PreviewControl.Height = 180;
            PreviewControl.Width = 240;
            //PreviewControl.Source = MyMediaCapture;

            await MyMediaCapture.StartPreviewAsync();
            videoFrame = new VideoFrame(BitmapPixelFormat.Bgra8, 240, 180,0);
            buffer = new Windows.Storage.Streams.Buffer((uint)(240 * 180 * 8));
        }
Ejemplo n.º 12
0
		public override void  videoFrame(VideoFrame tag)
		{
			encodeTagHeader(tag.code, 4 + tag.videoData.Length, false);
			int idref = dict.getId(tag.stream);
			writer.writeUI16(idref);
			writer.writeUI16(tag.frameNum);
			writer.write(tag.videoData);
		}
        /// <summary>
        /// Gets the current orientation of the UI in relation to the device and applies a corrective rotation to the preview
        /// </summary>
        private async Task SetPreviewRotationAsync(IMediaEncodingProperties props)
        {
            // Only need to update the orientation if the camera is mounted on the device.
            if (mediaCapture == null)
                return;

            // Calculate which way and how far to rotate the preview.
            int rotationDegrees;
            VideoRotation sourceRotation;
            CalculatePreviewRotation(out sourceRotation, out rotationDegrees);

            // Set preview rotation in the preview source.
            mediaCapture.SetPreviewRotation(sourceRotation);

            // Add rotation metadata to the preview stream to make sure the aspect ratio / dimensions match when rendering and getting preview frames
            //var props = mediaCapture.VideoDeviceController.GetMediaStreamProperties(MediaStreamType.VideoPreview);
            props.Properties.Add(RotationKey, rotationDegrees);
            await mediaCapture.VideoDeviceController.SetMediaStreamPropertiesAsync(MediaStreamType.VideoPreview, props);
            
            var currentPreviewResolution = GetPreviewResolution(props);
            // Setup a frame to use as the input settings
            videoFrame = new VideoFrame(Windows.Graphics.Imaging.BitmapPixelFormat.Bgra8, (int)currentPreviewResolution.Width, (int)currentPreviewResolution.Height);
        }
        public MainWindow()
        {
            InitializeComponent();

            try
            {
                Action <VideoFrame> updateDepth;
                Action <VideoFrame> updateColor;

                pipeline  = new Pipeline();
                colorizer = new Colorizer();

                var cfg = new Config();
                cfg.EnableStream(Stream.Depth, 640, 480, Format.Z16, 30);
                cfg.EnableStream(Stream.Color, 640, 480, Format.Rgb8, 30);

                var profile = pipeline.Start(cfg);

                SetupWindow(profile, out updateDepth, out updateColor);

                // Setup the SW device and sensors
                var software_dev  = new SoftwareDevice();
                var depth_sensor  = software_dev.AddSensor("Depth");
                var depth_profile = depth_sensor.AddVideoStream(new VideoStream
                {
                    type       = Stream.Depth,
                    index      = 0,
                    uid        = 100,
                    width      = 640,
                    height     = 480,
                    fps        = 30,
                    bpp        = 2,
                    fmt        = Format.Z16,
                    intrinsics = (profile.GetStream(Stream.Depth) as VideoStreamProfile).GetIntrinsics()
                });
                var color_sensor  = software_dev.AddSensor("Color");
                var color_profile = color_sensor.AddVideoStream(new VideoStream
                {
                    type       = Stream.Color,
                    index      = 0,
                    uid        = 101,
                    width      = 640,
                    height     = 480,
                    fps        = 30,
                    bpp        = 3,
                    fmt        = Format.Rgb8,
                    intrinsics = (profile.GetStream(Stream.Color) as VideoStreamProfile).GetIntrinsics()
                });

                // Note about the Syncer: If actual FPS is significantly different from reported FPS in AddVideoStream
                // this can confuse the syncer and prevent it from producing synchronized pairs
                software_dev.SetMatcher(Matchers.Default);

                var sync = new Syncer();

                depth_sensor.Open(depth_profile);
                color_sensor.Open(color_profile);

                // Push the SW device frames to the syncer
                depth_sensor.Start(f => { sync.SubmitFrame(f); });
                color_sensor.Start(f => { sync.SubmitFrame(f); });

                var token = tokenSource.Token;

                var t = Task.Factory.StartNew(() =>
                {
                    while (!token.IsCancellationRequested)
                    {
                        // We use the frames that are captured from live camera as the input data for the SW device
                        using (var frames = pipeline.WaitForFrames())
                        {
                            var depthFrame = frames.DepthFrame.DisposeWith(frames);
                            var colorFrame = frames.ColorFrame.DisposeWith(frames);

                            var depthBytes = new byte[depthFrame.Stride * depthFrame.Height];
                            depthFrame.CopyTo(depthBytes);
                            depth_sensor.AddVideoFrame(depthBytes, depthFrame.Stride, depthFrame.BitsPerPixel / 8, depthFrame.Timestamp,
                                                       depthFrame.TimestampDomain, (int)depthFrame.Number, depth_profile);

                            var colorBytes = new byte[colorFrame.Stride * colorFrame.Height];
                            colorFrame.CopyTo(colorBytes);
                            color_sensor.AddVideoFrame(colorBytes, colorFrame.Stride, colorFrame.BitsPerPixel / 8, colorFrame.Timestamp,
                                                       colorFrame.TimestampDomain, (int)colorFrame.Number, color_profile);
                        }

                        // Dispaly the frames that come from the SW device after synchronization
                        using (var new_frames = sync.WaitForFrames())
                        {
                            if (new_frames.Count == 2)
                            {
                                var depthFrame = new_frames.DepthFrame.DisposeWith(new_frames);
                                var colorFrame = new_frames.ColorFrame.DisposeWith(new_frames);

                                VideoFrame colorizedDepth = colorizer.Process(depthFrame).DisposeWith(new_frames) as VideoFrame;

                                // Render the frames.
                                Dispatcher.Invoke(DispatcherPriority.Render, updateDepth, colorizedDepth);
                                Dispatcher.Invoke(DispatcherPriority.Render, updateColor, colorFrame);
                            }
                        }
                    }
                }, token);
            }
            catch (Exception ex)
            {
                MessageBox.Show(ex.Message);
                Application.Current.Shutdown();
            }
        }
        /// <summary>
        /// This is event handler for 'Extract' button.
        /// Captures image from camera ,recognizes text and displays it.
        /// </summary>
        /// <param name="sender"></param>
        /// <param name="e"></param>
        private async void ExtractButton_Tapped(object sender, Windows.UI.Xaml.Input.TappedRoutedEventArgs e)
        {
            //Get information about the preview.
            var previewProperties = mediaCapture.VideoDeviceController.GetMediaStreamProperties(MediaStreamType.VideoPreview) as VideoEncodingProperties;
            int videoFrameWidth   = (int)previewProperties.Width;
            int videoFrameHeight  = (int)previewProperties.Height;

            // In portrait modes, the width and height must be swapped for the VideoFrame to have the correct aspect ratio and avoid letterboxing / black bars.
            if (!externalCamera && (displayInformation.CurrentOrientation == DisplayOrientations.Portrait || displayInformation.CurrentOrientation == DisplayOrientations.PortraitFlipped))
            {
                videoFrameWidth  = (int)previewProperties.Height;
                videoFrameHeight = (int)previewProperties.Width;
            }

            // Create the video frame to request a SoftwareBitmap preview frame.
            var videoFrame = new VideoFrame(BitmapPixelFormat.Bgra8, videoFrameWidth, videoFrameHeight);

            // Capture the preview frame.
            using (var currentFrame = await mediaCapture.GetPreviewFrameAsync(videoFrame))
            {
                // Collect the resulting frame.
                SoftwareBitmap bitmap = currentFrame.SoftwareBitmap;

                OcrEngine ocrEngine = OcrEngine.TryCreateFromLanguage(ocrLanguage);

                if (ocrEngine == null)
                {
                    rootPage.NotifyUser(ocrLanguage.DisplayName + " is not supported.", NotifyType.ErrorMessage);

                    return;
                }

                var imgSource = new WriteableBitmap(bitmap.PixelWidth, bitmap.PixelHeight);
                bitmap.CopyToBuffer(imgSource.PixelBuffer);
                PreviewImage.Source = imgSource;

                var ocrResult = await ocrEngine.RecognizeAsync(bitmap);

                // Used for text overlay.
                // Prepare scale transform for words since image is not displayed in original format.
                var scaleTrasform = new ScaleTransform
                {
                    CenterX = 0,
                    CenterY = 0,
                    ScaleX  = PreviewControl.ActualWidth / bitmap.PixelWidth,
                    ScaleY  = PreviewControl.ActualHeight / bitmap.PixelHeight
                };

                if (ocrResult.TextAngle != null)
                {
                    // If text is detected under some angle in this sample scenario we want to
                    // overlay word boxes over original image, so we rotate overlay boxes.
                    TextOverlay.RenderTransform = new RotateTransform
                    {
                        Angle   = (double)ocrResult.TextAngle,
                        CenterX = PreviewImage.ActualWidth / 2,
                        CenterY = PreviewImage.ActualHeight / 2
                    };
                }

                // Iterate over recognized lines of text.
                foreach (var line in ocrResult.Lines)
                {
                    // Iterate over words in line.
                    foreach (var word in line.Words)
                    {
                        // Define the TextBlock.
                        var wordTextBlock = new TextBlock()
                        {
                            Text  = word.Text,
                            Style = (Style)this.Resources["ExtractedWordTextStyle"]
                        };

                        WordOverlay wordBoxOverlay = new WordOverlay(word);

                        // Keep references to word boxes.
                        wordBoxes.Add(wordBoxOverlay);

                        // Define position, background, etc.
                        var overlay = new Border()
                        {
                            Child = wordTextBlock,
                            Style = (Style)this.Resources["HighlightedWordBoxHorizontalLine"]
                        };

                        // Bind word boxes to UI.
                        overlay.SetBinding(Border.MarginProperty, wordBoxOverlay.CreateWordPositionBinding());
                        overlay.SetBinding(Border.WidthProperty, wordBoxOverlay.CreateWordWidthBinding());
                        overlay.SetBinding(Border.HeightProperty, wordBoxOverlay.CreateWordHeightBinding());

                        // Put the filled textblock in the results grid.
                        TextOverlay.Children.Add(overlay);
                    }
                }

                rootPage.NotifyUser("Image processed using " + ocrEngine.RecognizerLanguage.DisplayName + " language.", NotifyType.StatusMessage);
            }

            UpdateWordBoxTransform();

            PreviewControl.Visibility = Visibility.Collapsed;
            Image.Visibility          = Visibility.Visible;
            ExtractButton.Visibility  = Visibility.Collapsed;
            CameraButton.Visibility   = Visibility.Visible;
        }
 private void CameraHelper_FrameArrived(object sender, FrameEventArgs e)
 {
     _currentVideoFrame = e.VideoFrame;
 }
        public async Task StartPreviewAsync(QR_Code_Scanner.Business.ComboboxItem comboboxItem)
        {
            FrameSourceInformation frameSourceInformation = new FrameSourceInformation();

            try
            {
                mediaCapture = new MediaCapture();


                var settings = new MediaCaptureInitializationSettings()
                {
                    StreamingCaptureMode = StreamingCaptureMode.Video
                };
                if (comboboxItem != null)
                {
                    settings.VideoDeviceId = comboboxItem.ID;
                    frameSourceInformation = comboboxItem.MediaFrameSourceInformation;
                }
                else
                {
                    if (availableColorCameras == null)
                    {
                        var frameSourceInformations = await GetFrameSourceInformationAsync();

                        frameSourceInformation = frameSourceInformations.First();
                        availableColorCameras  = await GetFrameSourceGroupsAsync(frameSourceInformation);
                    }
                    settings.VideoDeviceId = availableColorCameras.First().Id;
                }

                qrAnalyzerCancellationTokenSource = new CancellationTokenSource();
                try
                {
                    await mediaCapture.InitializeAsync(settings);
                }
                catch (Exception ex)
                {
                    MessageManager.ShowMessageToUserAsync("Tried to initialize a color camera but failed to do so.");
                }
                List <VideoEncodingProperties> availableResolutions = null;
                try
                {
                    availableResolutions = mediaCapture.VideoDeviceController.GetAvailableMediaStreamProperties(MediaStreamType.VideoPreview).Where(properties => properties is VideoEncodingProperties).Select(properties => (VideoEncodingProperties)properties).ToList();
                }
                catch (Exception ex)
                {
                    MessageManager.ShowMessageToUserAsync("No resolutions could be detected, trying default mode.");
                }

                VideoEncodingProperties bestVideoResolution = this.findBestResolution(availableResolutions);

                if (bestVideoResolution != null)
                {
                    await mediaCapture.VideoDeviceController.SetMediaStreamPropertiesAsync(MediaStreamType.VideoPreview, bestVideoResolution);
                }

                displayRequest.RequestActive();
            }
            catch (UnauthorizedAccessException)
            {
                // This will be thrown if the user denied access to the camera in privacy settings
                MessageManager.ShowMessageToUserAsync("The app was denied access to the camera");
                return;
            }

            try
            {
                this.ScanForQRcodes         = true;
                previewWindowElement.Source = mediaCapture;
                await mediaCapture.StartPreviewAsync();

                isPreviewing = true;
                var imgProp = new ImageEncodingProperties
                {
                    Subtype = "BMP",
                    Width   = (uint)imgCaptureWidth,
                    Height  = (uint)imgCaptureHeight
                };
                var bcReader          = new BarcodeReader();
                var qrCaptureInterval = 200;

                var torch = mediaCapture.VideoDeviceController.TorchControl;
                var exposureCompensationControl = mediaCapture.VideoDeviceController.ExposureCompensationControl;

                if (torch.Supported)
                {
                    torch.Enabled = false;
                }
                //if (exposureCompensationControl.Supported) {
                //    var maxSupported = exposureCompensationControl.Max;
                //    var minSupported = exposureCompensationControl.Min;
                //    var middleExposure = (maxSupported + minSupported) / 2;
                //    var quarterExposure = (middleExposure + minSupported) / 2;
                //    await exposureCompensationControl.SetValueAsync(quarterExposure);
                //}

                // Get information about the preview
                var previewProperties = mediaCapture.VideoDeviceController.GetMediaStreamProperties(MediaStreamType.VideoPreview) as VideoEncodingProperties;

                while (!qrAnalyzerCancellationTokenSource.IsCancellationRequested && qrAnalyzerCancellationTokenSource != null && qrAnalyzerCancellationTokenSource.Token != null)
                {
                    //try capture qr code here
                    if (ScanForQRcodes)
                    {
                        VideoFrame videoFrameFormatPlaceholder = new VideoFrame(BitmapPixelFormat.Bgra8, (int)previewProperties.Width, (int)previewProperties.Height);
                        await mediaCapture.GetPreviewFrameAsync(videoFrameFormatPlaceholder);
                        await findQRinImageAsync(bcReader, videoFrameFormatPlaceholder);

                        videoFrameFormatPlaceholder.Dispose();
                        videoFrameFormatPlaceholder = null;
                    }

                    //await Task.Delay(qrCaptureInterval, qrAnalyzerCancellationTokenSource.Token);
                    var   delayTask        = Task.Delay(qrCaptureInterval, qrAnalyzerCancellationTokenSource.Token);
                    var   continuationTask = delayTask.ContinueWith(task => { });
                    await continuationTask;
                }
            }
            catch (System.IO.FileLoadException)
            {
                mediaCapture.CaptureDeviceExclusiveControlStatusChanged += mediaCapture_CaptureDeviceExclusiveControlStatusChanged;
            }
            catch (System.ObjectDisposedException)
            {
                Debug.WriteLine("object was disposed");
            }
            catch (Exception)
            {
                Debug.WriteLine("another exception occurred.");
            }
        }
 public async Task<VideoFrame> GetFrameAsync(VideoFrame videoFrame)
 {
     return await Source.GetPreviewFrameAsync(videoFrame);
 }
 public BitmapSource Visualize(VideoFrame videoFrame, LiveCameraResult currentLiveCameraResult)
 {
     return(VisualizeResult(videoFrame, currentLiveCameraResult));
 }
Ejemplo n.º 20
0
        async void CapturePhotoFromCameraAsync()
        {
            // On en traite 1 à la fois et on poubellise celles ci arrivent entre temps
            if (await _semRender.WaitAsync(0) == true)
            {
                try
                {
                    VideoFrame videoFrame = new VideoFrame(BitmapPixelFormat.Bgra8, (int)_width, (int)_height);
                    await m_mediaCapture.GetPreviewFrameAsync(videoFrame);

                    var bytes = await SaveSoftwareBitmapToBufferAsync(videoFrame.SoftwareBitmap);
                    ScanImageAsync(bytes);
                }
                finally
                {
                    _semRender.Release();
                }
            }
        }
Ejemplo n.º 21
0
    public async Task <SqueezeNetResult> EvaluateVideoFrameAsync(VideoFrame inputFrame, int topResultsCount = 3)
    {
        // Sometimes on HL RS4 the D3D surface returned is null, so simply skip those frames
        if (inputFrame == null || (inputFrame.Direct3DSurface == null && inputFrame.SoftwareBitmap == null))
        {
            return(new SqueezeNetResult
            {
                TopResultsFormatted = "No input frame",
                DominantResultLabel = "No input frame",
                DominantResultProbability = 0,
                ElapsedMilliseconds = 0
            });
        }

        // Bind the input
        var binding     = new LearningModelBinding(_session);
        var imageTensor = ImageFeatureValue.CreateFromVideoFrame(inputFrame);

        binding.Bind("data_0", imageTensor);

        // Process the frame and get the results
        var stopwatch = Stopwatch.StartNew();
        var results   = await _session.EvaluateAsync(binding, stopwatch.ElapsedMilliseconds.ToString());

        stopwatch.Stop();
        var resultTensor        = results.Outputs[_outputDescription.Name] as TensorFloat;
        var resultProbabilities = resultTensor.GetAsVectorView();

        // Find and sort the result of the evaluation in the bound output (the top classes detected with the max confidence)
        var topProbabilities           = new float[topResultsCount];
        var topProbabilityLabelIndexes = new int[topResultsCount];

        for (int i = 0; i < resultProbabilities.Count; i++)
        {
            for (int j = 0; j < topResultsCount; j++)
            {
                if (resultProbabilities[i] > topProbabilities[j])
                {
                    topProbabilityLabelIndexes[j] = i;
                    topProbabilities[j]           = resultProbabilities[i];
                    break;
                }
            }
        }

        // Format the result strings and return results
        string message = string.Empty;

        for (int i = 0; i < topResultsCount; i++)
        {
            message += $"\n{topProbabilities[i] * 100,4:f0}% : { _labels[topProbabilityLabelIndexes[i]]} ";
        }
        var mainLabel = _labels[topProbabilityLabelIndexes[0]].Split(',')[0];

        return(new SqueezeNetResult
        {
            TopResultsFormatted = message,
            DominantResultLabel = mainLabel,
            DominantResultProbability = topProbabilities[0],
            ElapsedMilliseconds = stopwatch.ElapsedMilliseconds
        });
    }
Ejemplo n.º 22
0
 /// <summary>
 /// Evaluate input video frame work ml model result
 /// </summary>
 protected virtual async Task EvaluateAsync(MLModelResult result, VideoFrame inputFrame)
 {
     throw new NotImplementedException();
 }
Ejemplo n.º 23
0
        public void display(VideoFrame videoFrame, Color backColor, RenderMode mode)
        {
            lock (renderLock)
            {
                if (device == null)
                {
                    return;
                }

                SharpDX.Result deviceStatus = device.TestCooperativeLevel();

                if (deviceStatus.Success == true)
                {
                    try
                    {
                        SharpDX.ColorBGRA backColorDX = new SharpDX.ColorBGRA(backColor.R,
                                                                              backColor.G, backColor.B, backColor.A);

                        device.Clear(D3D.ClearFlags.Target, backColorDX, 1.0f, 0);

                        if (mode == RenderMode.CLEAR_SCREEN)
                        {
                            device.Present();
                            return;
                        }

                        device.BeginScene();

                        SharpDX.Rectangle videoSourceRect = new SharpDX.Rectangle();

                        if (mode == RenderMode.NORMAL)
                        {
                            videoSourceRect = new SharpDX.Rectangle(0, 0, videoFrame.Width, videoFrame.Height);

                            SharpDX.DataRectangle stream = offscreen.LockRectangle(LockFlags.None);

                            videoFrame.copyFrameDataToSurface(stream.DataPointer, stream.Pitch);

                            offscreen.UnlockRectangle();
                        }
                        else if (mode == RenderMode.PAUSED)
                        {
                            videoSourceRect = new SharpDX.Rectangle(0, 0, offscreen.Description.Width, offscreen.Description.Height);
                        }

                        videoDestRect = getVideoDestRect(backBuffer);

                        device.StretchRectangle(offscreen, videoSourceRect,
                                                backBuffer, videoDestRect, D3D.TextureFilter.Linear);

                        drawText();

                        device.EndScene();
                        device.Present();

                        RenderMode = mode;
                    }
                    catch (SharpDX.SharpDXException)
                    {
                        //log.Info("lost direct3d device", e);
                        deviceStatus = device.TestCooperativeLevel();
                    }
                }

                if (deviceStatus.Code == D3D.ResultCode.DeviceLost.Result)
                {
                    //Can't Reset yet, wait for a bit
                }
                else if (deviceStatus.Code == D3D.ResultCode.DeviceNotReset.Result)
                {
                    resetDevice();
                }
            }
        }
Ejemplo n.º 24
0
		public override void  videoFrame(VideoFrame tag)
		{
			open(tag);
			out_Renamed.Write(" streamId='" + idRef(tag.stream) + "'");
			out_Renamed.Write(" frame='" + tag.frameNum + "'");
			close();
		}
Ejemplo n.º 25
0
 public override void  videoFrame(VideoFrame tag)
 {
     tags.Add(tag);
 }
        /// <summary>
        /// This method is invoked by a ThreadPoolTimer to execute the FaceTracker and Visualization logic at approximately 15 frames per second.
        /// </summary>
        /// <remarks>
        /// Keep in mind this method is called from a Timer and not sychronized with the camera stream. Also, the processing time of FaceTracker
        /// will vary depending on the size of each frame and the number of faces being tracked. That is, a large image with several tracked faces may
        /// take longer to process.
        /// </remarks>
        /// <param name="timer">Timer object invoking this call</param>
        private async void ProcessCurrentVideoFrame(ThreadPoolTimer timer)
        {
            if (this.currentState != ScenarioState.Streaming)
            {
                return;
            }

            // If a lock is being held it means we're still waiting for processing work on the previous frame to complete.
            // In this situation, don't wait on the semaphore but exit immediately.
            if (!frameProcessingSemaphore.Wait(0))
            {
                return;
            }

            try
            {
                IList<DetectedFace> faces = null;

                // Create a VideoFrame object specifying the pixel format we want our capture image to be (NV12 bitmap in this case).
                // GetPreviewFrame will convert the native webcam frame into this format.
                const BitmapPixelFormat InputPixelFormat = BitmapPixelFormat.Nv12;
                using (VideoFrame previewFrame = new VideoFrame(InputPixelFormat, (int)this.videoProperties.Width, (int)this.videoProperties.Height))
                {
                    await this.mediaCapture.GetPreviewFrameAsync(previewFrame);

                    // The returned VideoFrame should be in the supported NV12 format but we need to verify this.
                    if (FaceDetector.IsBitmapPixelFormatSupported(previewFrame.SoftwareBitmap.BitmapPixelFormat))
                    {
                        faces = await this.faceTracker.ProcessNextFrameAsync(previewFrame);
                    }
                    else
                    {
                        throw new System.NotSupportedException("PixelFormat '" + InputPixelFormat.ToString() + "' is not supported by FaceDetector");
                    }

                    // Create our visualization using the frame dimensions and face results but run it on the UI thread.
                    var previewFrameSize = new Windows.Foundation.Size(previewFrame.SoftwareBitmap.PixelWidth, previewFrame.SoftwareBitmap.PixelHeight);
                    var ignored = this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () =>
                    {
                        this.SetupVisualization(previewFrameSize, faces);
                    });
                }
            }
            catch (Exception ex)
            {
                var ignored = this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () =>
                {
                    this.rootPage.NotifyUser(ex.ToString(), NotifyType.ErrorMessage);
                });
            }
            finally
            {
                frameProcessingSemaphore.Release();
            }

        }
Ejemplo n.º 27
0
        private void _video_VideoFrameEvent(int width, int height, byte[] data, int timeStamp)
        {
            VideoFrame vf = new VideoFrame(width, height, timeStamp, data);

            onVideoFrameParsed(new VideoFrameEventArgs(vf));
        }
Ejemplo n.º 28
0
		public override void  videoFrame(VideoFrame tag)
		{
			frame.controlTags.Add(tag);
		}
        /// <summary>
        /// Runs the skill against a binding object, executing the skill logic on the associated input features and populating the output ones
        /// This skill proceeds in 2 steps:
        /// 1) Run FaceDetector against the image and populate the face bound feature in the binding object
        /// 2) If a face was detected, proceeds with sentiment analysis of that portion fo the image using Windows ML then updating the score
        /// of each possible sentiment returned as result
        /// </summary>
        /// <param name="binding"></param>
        /// <returns></returns>
        public IAsyncAction EvaluateAsync(ISkillBinding binding)
        {
            FaceSentimentAnalyzerBinding bindingObj = binding as FaceSentimentAnalyzerBinding;

            if (bindingObj == null)
            {
                throw new ArgumentException("Invalid ISkillBinding parameter: This skill handles evaluation of FaceSentimentAnalyzerBinding instances only");
            }

            return(AsyncInfo.Run(async(token) =>
            {
                // Retrieve input frame from the binding object
                VideoFrame inputFrame = (binding[FaceSentimentAnalyzerConst.SKILL_INPUTNAME_IMAGE].FeatureValue as SkillFeatureImageValue).VideoFrame;
                SoftwareBitmap softwareBitmapInput = inputFrame.SoftwareBitmap;

                // Retrieve a SoftwareBitmap to run face detection
                if (softwareBitmapInput == null)
                {
                    if (inputFrame.Direct3DSurface == null)
                    {
                        throw (new ArgumentNullException("An invalid input frame has been bound"));
                    }
                    softwareBitmapInput = await SoftwareBitmap.CreateCopyFromSurfaceAsync(inputFrame.Direct3DSurface);
                }

                // Run face detection and retrieve face detection result
                var faceDetectionResult = await m_faceDetector.DetectFacesAsync(softwareBitmapInput);

                // Retrieve face rectangle feature from the binding object
                var faceRectangleFeature = binding[FaceSentimentAnalyzerConst.SKILL_OUTPUTNAME_FACERECTANGLE];

                // Retrieve face sentiment scores feature from the binding object
                var faceSentimentScores = binding[FaceSentimentAnalyzerConst.SKILL_OUTPUTNAME_FACESENTIMENTSCORES];

                // If a face is found, update face rectangle feature
                if (faceDetectionResult.Count > 0)
                {
                    // Retrieve the face bound and enlarge it by a factor of 1.5x while also ensuring clamping to frame dimensions
                    BitmapBounds faceBound = faceDetectionResult[0].FaceBox;
                    var additionalOffset = faceBound.Width / 2;
                    faceBound.X = Math.Max(0, faceBound.X - additionalOffset);
                    faceBound.Y = Math.Max(0, faceBound.Y - additionalOffset);
                    faceBound.Width = (uint)Math.Min(faceBound.Width + 2 * additionalOffset, softwareBitmapInput.PixelWidth - faceBound.X);
                    faceBound.Height = (uint)Math.Min(faceBound.Height + 2 * additionalOffset, softwareBitmapInput.PixelHeight - faceBound.Y);

                    // Set the face rectangle SkillFeatureValue in the skill binding object
                    // note that values are in normalized coordinates between [0, 1] for ease of use
                    await faceRectangleFeature.SetFeatureValueAsync(
                        new List <float>()
                    {
                        (float)faceBound.X / softwareBitmapInput.PixelWidth,                      // left
                        (float)faceBound.Y / softwareBitmapInput.PixelHeight,                     // top
                        (float)(faceBound.X + faceBound.Width) / softwareBitmapInput.PixelWidth,  // right
                        (float)(faceBound.Y + faceBound.Height) / softwareBitmapInput.PixelHeight // bottom
                    });

                    // Bind the WinML input frame with the adequate face bounds specified as metadata
                    bindingObj.m_winmlBinding.Bind(
                        FaceSentimentAnalyzerConst.WINML_MODEL_INPUTNAME, // WinML feature name
                        inputFrame,                                       // VideoFrame
                        new PropertySet()                                 // VideoFrame bounds
                    {
                        { "BitmapBounds",
                          PropertyValue.CreateUInt32Array(new uint[] { faceBound.X, faceBound.Y, faceBound.Width, faceBound.Height }) }
                    });

                    // Run WinML evaluation
                    var winMLEvaluationResult = await m_winmlSession.EvaluateAsync(bindingObj.m_winmlBinding, "");
                    var winMLModelResult = (winMLEvaluationResult.Outputs[FaceSentimentAnalyzerConst.WINML_MODEL_OUTPUTNAME] as TensorFloat).GetAsVectorView();
                    var predictionScores = SoftMax(winMLModelResult);

                    // Set the SkillFeatureValue in the skill binding object related to the face sentiment scores for each possible SentimentType
                    // note that we SoftMax the output of WinML to give a score normalized between [0, 1] for ease of use
                    await faceSentimentScores.SetFeatureValueAsync(predictionScores);
                }
                else // if no face found, reset output SkillFeatureValues with 0s
                {
                    await faceRectangleFeature.SetFeatureValueAsync(FaceSentimentAnalyzerConst.ZeroFaceRectangleCoordinates);
                    await faceSentimentScores.SetFeatureValueAsync(FaceSentimentAnalyzerConst.ZeroFaceSentimentScores);
                }
            }));
        }
Ejemplo n.º 30
0
        public void display(VideoFrame videoFrame, Color backColor, RenderMode mode)
        {
            
            lock (renderLock)
            {
                if (device == null) return;          

                SharpDX.Result deviceStatus = device.TestCooperativeLevel();
          
                if (deviceStatus.Success == true)
                {
                    try
                    {
                        SharpDX.ColorBGRA backColorDX = new SharpDX.ColorBGRA(backColor.R,
                            backColor.G, backColor.B, backColor.A);

                        device.Clear(D3D.ClearFlags.Target, backColorDX, 1.0f, 0);

                        if (mode == RenderMode.CLEAR_SCREEN)
                        {
                            device.Present();
                            return;
                        }           

                        device.BeginScene();
                 
                        SharpDX.Rectangle videoSourceRect = new SharpDX.Rectangle();

                        if (mode == RenderMode.NORMAL)
                        {

                            videoSourceRect = new SharpDX.Rectangle(0, 0, videoFrame.Width, videoFrame.Height);

                            SharpDX.DataRectangle stream = offscreen.LockRectangle(LockFlags.None);

                            videoFrame.copyFrameDataToSurface(stream.DataPointer, stream.Pitch);

                            offscreen.UnlockRectangle();

                        }
                        else if (mode == RenderMode.PAUSED)
                        {
                            videoSourceRect = new SharpDX.Rectangle(0, 0, offscreen.Description.Width, offscreen.Description.Height);
                        }
                                          
                        videoDestRect = getVideoDestRect(backBuffer);

                        device.StretchRectangle(offscreen, videoSourceRect,
                            backBuffer, videoDestRect, D3D.TextureFilter.Linear);

                        drawText();

                        device.EndScene();
                        device.Present();

                        RenderMode = mode;

                    }
                    catch (SharpDX.SharpDXException)
                    {

                        //log.Info("lost direct3d device", e);
                        deviceStatus = device.TestCooperativeLevel();
                    }
                }

                if (deviceStatus.Code == D3D.ResultCode.DeviceLost.Result)
                {

                    //Can't Reset yet, wait for a bit

                }
                else if (deviceStatus.Code == D3D.ResultCode.DeviceNotReset.Result)
                {
                    
                    resetDevice();
                }
            }
        }
 /// <summary>
 /// Sets the input image to be processed by the skill
 /// </summary>
 /// <param name="frame"></param>
 /// <returns></returns>
 public IAsyncAction SetInputImageAsync(VideoFrame frame)
 {
     return(m_bindingHelper.SetInputImageAsync(frame));
 }
Ejemplo n.º 32
0
 private void OnVideoPacketDecoded(VideoFrame frame)
 {
     _frame = frame;
 }
        /// <summary>
        /// Triggered when UIButtonFilePick is clicked
        /// </summary>
        /// <param name="sender"></param>
        /// <param name="e"></param>
        private async void UIButtonFilePick_Click(object sender, RoutedEventArgs e)
        {
            // Disable subsequent trigger of this event callback
            UIButtonFilePick.IsEnabled     = false;
            UICameraToggle.IsEnabled       = false;
            UIButtonSwitchOutput.IsEnabled = false;

            // Stop Camera preview
            UICameraPreview.Stop();
            if (UICameraPreview.CameraHelper != null)
            {
                await UICameraPreview.CameraHelper.CleanUpAsync();
            }
            UICameraPreview.Visibility = Visibility.Collapsed;

            try
            {
                m_currentFrameSourceToggled = FrameSourceToggledType.ImageFile;

                var frame = await LoadVideoFrameFromFilePickedAsync();

                // Instantiate skill only if object is null or selected execution device has changed
                if ((m_paramsSkillObj.m_selectedDeviceId != UISkillExecutionDevices.SelectedIndex) || (m_paramsSkillObj.m_skill == null))
                {
                    //Release previous instance
                    if (m_paramsSkillObj.m_skill != null)
                    {
                        m_paramsSkillObj.m_skill = null;
                    }

                    // Update selected device
                    m_paramsSkillObj.m_selectedDeviceId = UISkillExecutionDevices.SelectedIndex;

                    // Initialize skill with the selected supported device
                    m_paramsSkillObj.m_skill = await m_paramsSkillObj.m_skillDescriptor.CreateSkillAsync(m_paramsSkillObj.m_availableExecutionDevices[m_paramsSkillObj.m_selectedDeviceId]) as SuperResolutionSkill;

                    // Instantiate a binding object that will hold the skill's input and output resource
                    m_paramsSkillObj.m_binding = await m_paramsSkillObj.m_skill.CreateSkillBindingAsync() as SuperResolutionBinding;
                }

                if (frame != null)
                {
                    // Update input image and run the skill against it
                    await m_paramsSkillObj.m_binding.SetInputImageAsync(frame);

                    // Evaluate skill
                    await m_paramsSkillObj.m_skill.EvaluateAsync(m_paramsSkillObj.m_binding);

                    VideoFrame superResOutput = null;

                    await Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, async() =>
                    {
                        // Retrieve result and display
                        superResOutput = await m_paramsSkillObj.m_binding.GetOutputImageAsync();
                        if (superResOutput.SoftwareBitmap == null)
                        {
                            SoftwareBitmap softwareBitmapOut = await SoftwareBitmap.CreateCopyFromSurfaceAsync(superResOutput.Direct3DSurface);
                            softwareBitmapOut = SoftwareBitmap.Convert(softwareBitmapOut, BitmapPixelFormat.Bgra8, BitmapAlphaMode.Premultiplied);

                            await m_bitmapSource.SetBitmapAsync(softwareBitmapOut);
                            UIOutputViewer.Source = m_bitmapSource;
                        }
                        else
                        {
                            await m_bitmapSource.SetBitmapAsync(superResOutput.SoftwareBitmap);
                            UIOutputViewer.Source = m_bitmapSource;
                        }
                    });
                }
            }
            catch (Exception ex)
            {
                await(new MessageDialog(ex.Message)).ShowAsync();
            }

            // Enable subsequent trigger of this event callback
            UIButtonFilePick.IsEnabled = true;
            UICameraToggle.IsEnabled   = true;
        }
Ejemplo n.º 34
0
        private async void ProcessCurrentVideoFrame(ThreadPoolTimer timer)
        {
            // If state is not Streaming, return.
            if (_state != StreamingState.Streaming)
            {
                return;
            }

            // If there has a process still running, return.
            if (!_semaphoreSlim.Wait(0))
            {
                return;
            }

            const BitmapPixelFormat PixelFormat = BitmapPixelFormat.Nv12;

            try
            {
                using (VideoFrame currentFrame = new VideoFrame(PixelFormat, (int)_videoProperties.Width, (int)_videoProperties.Height))
                {
                    // Get current preview frame from _mediaCaputre and copy into currentFrame.
                    await _mediaCapture.GetPreviewFrameAsync(currentFrame);

                    // Detected face by _faceTracker.
                    IList <DetectedFace> builtinFaces = await _faceTracker.ProcessNextFrameAsync(currentFrame);

                    SoftwareBitmap tempBitmap = SoftwareBitmap.Convert(currentFrame.SoftwareBitmap, BitmapPixelFormat.Bgra8);

                    if (builtinFaces.Count != 0)
                    {
                        var frameSize = new Size(currentFrame.SoftwareBitmap.PixelWidth, currentFrame.SoftwareBitmap.PixelHeight);
                        await Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () =>
                        {
                            ShowFromBuiltIn(frameSize, builtinFaces);
                        });

                        // Get picture from videoframe.
                        IRandomAccessStream stream  = new InMemoryRandomAccessStream();
                        BitmapEncoder       encoder = await BitmapEncoder.CreateAsync(BitmapEncoder.JpegEncoderId, stream);

                        encoder.SetSoftwareBitmap(tempBitmap);
                        await encoder.FlushAsync();

                        CustomFaceModel customFaces = await _faceApiHelper.GetIdentifySingleResultAsync(stream.AsStream());


                        if (customFaces != null)
                        {
                            await _dataHelper.ChangeAttendStatusAsync(customFaces.Name, true);

                            await Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () =>
                                                      ShowLoginSuccess(customFaces));
                        }
                        //await Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () =>
                        //    ShowFromFaceApi(frameSize, customFaces));
                    }
                    else
                    {
                        await Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () =>
                                                  PaintingCanvas.Children.Clear());
                    }
                }
            }
            catch (Microsoft.ProjectOxford.Face.FaceAPIException faceEx)
            {
                await Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () =>
                                          ShowAlertHelper.ShowDialog(faceEx.ErrorMessage, faceEx.ErrorCode));
            }
            catch (Exception ex)
            {
                await Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () =>
                                          ShowAlertHelper.ShowDialog(ex.Message));
            }
            finally
            {
                _semaphoreSlim.Release();
            }
        }
Ejemplo n.º 35
0
 public ColorImage(VideoFrame frame)
 {
     this.frame = frame;
     Info       = frame.ToImageInfo(frame.Width, frame.Height);
 }
        // ** Methods ** //

        // This is run for every video frame passed in the media pipleine (MediaPlayer, MediaCapture, etc)
        public void ProcessFrame(ProcessVideoFrameContext context)
        {
            evaluatableVideoFrame = VideoFrame.CreateWithDirect3D11Surface(context.InputFrame.Direct3DSurface);

            // ********** Draw Bounding Boxes with Win2D ********** //

            // Use Direct3DSurface if using GPU memory
            if (context.InputFrame.Direct3DSurface != null)
            {
                if (modelBindingComplete && options.PreferredDeviceKind != LearningModelDeviceKindPreview.LearningDeviceGpu)
                {
                    options.PreferredDeviceKind = LearningModelDeviceKindPreview.LearningDeviceGpu;
                }

                using (var inputBitmap = CanvasBitmap.CreateFromDirect3D11Surface(canvasDevice, context.InputFrame.Direct3DSurface))
                    using (var renderTarget = CanvasRenderTarget.CreateFromDirect3D11Surface(canvasDevice, context.OutputFrame.Direct3DSurface))
                        using (var ds = renderTarget.CreateDrawingSession())
                        {
                            ds.DrawImage(inputBitmap);

                            foreach (var box in filteredBoxes)
                            {
                                var x = (uint)Math.Max(box.X, 0);
                                var y = (uint)Math.Max(box.Y, 0);
                                var w = (uint)Math.Min(renderTarget.Bounds.Width - x, box.Width);
                                var h = (uint)Math.Min(renderTarget.Bounds.Height - y, box.Height);

                                // Draw the Text 10px above the top of the bounding box
                                ds.DrawText(box.Label, x, y - 10, Colors.Yellow);
                                ds.DrawRectangle(new Rect(x, y, w, h), new CanvasSolidColorBrush(canvasDevice, Colors.Yellow), 2f);
                            }
                        }

                return;
            }

            // Use SoftwareBitmap if using CPU memory
            if (context.InputFrame.SoftwareBitmap != null)
            {
                if (modelBindingComplete && options.PreferredDeviceKind != LearningModelDeviceKindPreview.LearningDeviceCpu)
                {
                    options.PreferredDeviceKind = LearningModelDeviceKindPreview.LearningDeviceCpu;
                }

                // InputFrame's pixels
                byte[] inputFrameBytes = new byte[4 * context.InputFrame.SoftwareBitmap.PixelWidth * context.InputFrame.SoftwareBitmap.PixelHeight];
                context.InputFrame.SoftwareBitmap.CopyToBuffer(inputFrameBytes.AsBuffer());

                using (var inputBitmap = CanvasBitmap.CreateFromBytes(canvasDevice, inputFrameBytes, context.InputFrame.SoftwareBitmap.PixelWidth, context.InputFrame.SoftwareBitmap.PixelHeight, context.InputFrame.SoftwareBitmap.BitmapPixelFormat.ToDirectXPixelFormat()))
                    using (var renderTarget = new CanvasRenderTarget(canvasDevice, context.OutputFrame.SoftwareBitmap.PixelWidth, context.InputFrame.SoftwareBitmap.PixelHeight, (float)context.OutputFrame.SoftwareBitmap.DpiX, context.OutputFrame.SoftwareBitmap.BitmapPixelFormat.ToDirectXPixelFormat(), CanvasAlphaMode.Premultiplied))
                        using (var ds = renderTarget.CreateDrawingSession())
                        {
                            ds.DrawImage(inputBitmap);

                            foreach (var box in filteredBoxes)
                            {
                                var x = (uint)Math.Max(box.X, 0);
                                var y = (uint)Math.Max(box.Y, 0);
                                var w = (uint)Math.Min(context.OutputFrame.SoftwareBitmap.PixelWidth - x, box.Width);
                                var h = (uint)Math.Min(context.OutputFrame.SoftwareBitmap.PixelHeight - y, box.Height);

                                // Draw the Text 10px above the top of the bounding box
                                ds.DrawText(box.Label, x, y - 10, Colors.Yellow);
                                ds.DrawRectangle(new Rect(x, y, w, h), new CanvasSolidColorBrush(canvasDevice, Colors.Yellow), 2f);
                            }
                        }
            }
        }
Ejemplo n.º 37
0
        private async void ProcessCurrentVideoFrame(ThreadPoolTimer timer)
        {
            if (this.currentState != ScenarioState.Streaming)
            {
                return;
            }

            if (!frameProcessingSemaphore.Wait(0))
            {
                return;
            }


            try
            {
                const BitmapPixelFormat InputPixelFormat1 = BitmapPixelFormat.Nv12;


                using (VideoFrame previewFrame = new VideoFrame(InputPixelFormat1, (int)this.videoProperties.Width, (int)this.videoProperties.Height))
                {
                    var valor = await this.mediaCapture.GetPreviewFrameAsync(previewFrame);


                    if (FaceDetector.IsBitmapPixelFormatSupported(previewFrame.SoftwareBitmap.BitmapPixelFormat))
                    {
                        var previewFrameSize = new Windows.Foundation.Size(previewFrame.SoftwareBitmap.PixelWidth, previewFrame.SoftwareBitmap.PixelHeight);
                        var ignored          = this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.High, () =>
                        {
                            var caraNuevaValor = "";
                            this.SetupVisualization(previewFrameSize, faces);


                            //this.imagenCompletar.Source = bitmpatSRC;
                            //bitmpatSRC.SetBitmapAsync(previewFrameBMO);
                        });
                        faces = await this.faceTracker.ProcessNextFrameAsync(previewFrame);

                        if (faces.Count != 0 && IdentidadEncontrada == "")
                        {
                            string nombre   = "";
                            int    contador = 0;
                            foreach (var caraEncontrad in faces)
                            {
                                var cara = caraEncontrad.FaceBox.ToString();

                                nombre = await ObtenerIdentidad();

                                contador           += 1;
                                IdentidadEncontrada = nombre;
                                identidades.Identidad(new GenericEventArgs <string>(IdentidadEncontrada));
                            }
                        }
                    }
                    else
                    {
                        throw new System.NotSupportedException("PixelFormat '" + InputPixelFormat.ToString() + "' is not supported by FaceDetector");
                    }
                }
            }
            catch (Exception ex)
            {
                var ignored = this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () =>
                {
                    //this.rootPage.NotifyUser(ex.ToString(), NotifyType.ErrorMessage);
                });
            }
            finally
            {
                frameProcessingSemaphore.Release();
            }
        }
 private void CameraPreviewControl_FrameArrived(object sender, FrameEventArgs e)
 {
     _currentVideoFrame = e.VideoFrame;
 }
Ejemplo n.º 39
0
        public async Task <string> ObtenerIdentidad()
        {
            byte[] arrayImage;
            var    PersonName = "";

            identidades.TieneIdentidad += ObtenerLaIdentidad;

            try
            {
                const BitmapPixelFormat InputPixelFormat1 = BitmapPixelFormat.Bgra8;

                using (VideoFrame previewFrame = new VideoFrame(InputPixelFormat1, (int)this.videoProperties.Width, (int)this.videoProperties.Height))
                {
                    var valor = await this.mediaCapture.GetPreviewFrameAsync(previewFrame);


                    SoftwareBitmap softwareBitmapPreviewFrame = valor.SoftwareBitmap;

                    Size  sizeCrop      = new Size(softwareBitmapPreviewFrame.PixelWidth, softwareBitmapPreviewFrame.PixelHeight);
                    Point point         = new Point(0, 0);
                    Rect  rect          = new Rect(0, 0, softwareBitmapPreviewFrame.PixelWidth, softwareBitmapPreviewFrame.PixelHeight);
                    var   arrayByteData = await EncodedBytes(softwareBitmapPreviewFrame, BitmapEncoder.BmpEncoderId);

                    SoftwareBitmap softwareBitmapCropped = await CreateFromBitmap(softwareBitmapPreviewFrame, (uint)softwareBitmapPreviewFrame.PixelWidth, (uint)softwareBitmapPreviewFrame.PixelHeight, arrayByteData);

                    SoftwareBitmap displayableImage = SoftwareBitmap.Convert(softwareBitmapCropped, BitmapPixelFormat.Bgra8, BitmapAlphaMode.Premultiplied);
                    arrayImage = await EncodedBytes(displayableImage, BitmapEncoder.BmpEncoderId);

                    var nuevoStreamFace = new MemoryStream(arrayImage);

                    //var ignored1 = this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () =>
                    //{
                    //    softwareBitmapSource.SetBitmapAsync(displayableImage);

                    //    imagenCamaraWeb.Source = softwareBitmapSource;

                    //});

                    string subscriptionKey      = "c568304102b84df291d2556d34c8d623";
                    string subscriptionEndpoint = "https://eastus2.api.cognitive.microsoft.com/face/v1.0";
                    var    faceServiceClient    = new FaceServiceClient(subscriptionKey, subscriptionEndpoint);

                    try
                    {
                        // using (var fsStream = File.OpenRead(sampleFile))
                        // {
                        IEnumerable <FaceAttributeType> faceAttributes =
                            new FaceAttributeType[] { FaceAttributeType.Gender, FaceAttributeType.Age, FaceAttributeType.Smile, FaceAttributeType.Emotion, FaceAttributeType.Glasses, FaceAttributeType.Hair };


                        var faces = await faceServiceClient.DetectAsync(nuevoStreamFace, true, false, faceAttributes);

                        string edad = "";
                        var    resultadoIdentifiacion = await faceServiceClient.IdentifyAsync(faces.Select(ff => ff.FaceId).ToArray(), largePersonGroupId : this.GroupId);

                        for (int idx = 0; idx < faces.Length; idx++)
                        {
                            // Update identification result for rendering
                            edad = faces[idx].FaceAttributes.Age.ToString();


                            var res = resultadoIdentifiacion[idx];

                            if (res.Candidates.Length > 0)
                            {
                                var nombrePersona = await faceServiceClient.GetPersonInLargePersonGroupAsync(GroupId, res.Candidates[0].PersonId);

                                PersonName = nombrePersona.Name.ToString();
                                //var estadoAnimo =
                                var ignored2 = this.Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () =>
                                {
                                    txtResult.Text = nombrePersona.Name.ToString();
                                });
                            }
                            else
                            {
                                txtResult.Text = "Unknown";
                            }
                        }
                        //}
                    }
                    catch (Exception ex)
                    {
                        var error = ex.Message.ToString();
                    }
                }
            }
            catch (Exception ex)
            {
                var mensaje = ex.Message.ToString();
            }
            return(PersonName);
        }
Ejemplo n.º 40
0
 internal FaceDetectionEffectFrame(VideoFrame frame, IList <DetectedFace> detectedFaces)
 {
     Source = frame;
     DetectedFacesSource = detectedFaces.ToList();
 }
Ejemplo n.º 41
0
 bool VideoFrameObserver.OnCaptureVideoFrame(VideoFrame frame)
 {
     return(OnCaptureVideoFrame == null ? true : OnCaptureVideoFrame(frame));
 }
Ejemplo n.º 42
0
 public async Task<string> Encode(VideoFrame frame)
 {
     var array = await EncodeToBytes(frame);
     var encodedImage = Convert.ToBase64String(array);
     return encodedImage;
 }
Ejemplo n.º 43
0
 bool VideoFrameObserver.OnRenderVideoFrameEx(string channel, ulong uid, VideoFrame frame)
 {
     return(OnRenderVideoFrameEx == null ? true : OnRenderVideoFrameEx(channel, uid, frame));
 }
        /// <summary>
        /// This is event handler for 'Extract' button.
        /// Captures image from camera ,recognizes text and displays it.
        /// </summary>
        /// <param name="sender"></param>
        /// <param name="e"></param>
        private async void ExtractButton_Tapped(object sender, Windows.UI.Xaml.Input.TappedRoutedEventArgs e)
        {
            //Get information about the preview.
            var previewProperties = mediaCapture.VideoDeviceController.GetMediaStreamProperties(MediaStreamType.VideoPreview) as VideoEncodingProperties;
            int videoFrameWidth = (int)previewProperties.Width;
            int videoFrameHeight = (int)previewProperties.Height;

            // In portrait modes, the width and height must be swapped for the VideoFrame to have the correct aspect ratio and avoid letterboxing / black bars.
            if (!externalCamera && (displayInformation.CurrentOrientation == DisplayOrientations.Portrait || displayInformation.CurrentOrientation == DisplayOrientations.PortraitFlipped))
            {
                videoFrameWidth = (int)previewProperties.Height;
                videoFrameHeight = (int)previewProperties.Width;
            }

            // Create the video frame to request a SoftwareBitmap preview frame.
            var videoFrame = new VideoFrame(BitmapPixelFormat.Bgra8, videoFrameWidth, videoFrameHeight);

            // Capture the preview frame.
            using (var currentFrame = await mediaCapture.GetPreviewFrameAsync(videoFrame))
            {
                // Collect the resulting frame.
                SoftwareBitmap bitmap = currentFrame.SoftwareBitmap;

                OcrEngine ocrEngine = OcrEngine.TryCreateFromLanguage(ocrLanguage);

                if (ocrEngine == null)
                {
                    rootPage.NotifyUser(ocrLanguage.DisplayName + " is not supported.", NotifyType.ErrorMessage);

                    return;
                }

                var imgSource = new WriteableBitmap(bitmap.PixelWidth, bitmap.PixelHeight);
                bitmap.CopyToBuffer(imgSource.PixelBuffer);
                PreviewImage.Source = imgSource;

                var ocrResult = await ocrEngine.RecognizeAsync(bitmap);

                // Used for text overlay.
                // Prepare scale transform for words since image is not displayed in original format.
                var scaleTrasform = new ScaleTransform
                {
                    CenterX = 0,
                    CenterY = 0,
                    ScaleX = PreviewControl.ActualWidth / bitmap.PixelWidth,
                    ScaleY = PreviewControl.ActualHeight / bitmap.PixelHeight
                };

                if (ocrResult.TextAngle != null)
                {
                    // If text is detected under some angle in this sample scenario we want to
                    // overlay word boxes over original image, so we rotate overlay boxes.
                    TextOverlay.RenderTransform = new RotateTransform
                    {
                        Angle = (double)ocrResult.TextAngle,
                        CenterX = PreviewImage.ActualWidth / 2,
                        CenterY = PreviewImage.ActualHeight / 2
                    };
                }

                // Iterate over recognized lines of text.
                foreach (var line in ocrResult.Lines)
                {
                    // Iterate over words in line.
                    foreach (var word in line.Words)
                    {
                        // Define the TextBlock.
                        var wordTextBlock = new TextBlock()
                        {
                            Text = word.Text,
                            Style = (Style)this.Resources["ExtractedWordTextStyle"]
                        };

                        WordOverlay wordBoxOverlay = new WordOverlay(word);

                        // Keep references to word boxes.
                        wordBoxes.Add(wordBoxOverlay);

                        // Define position, background, etc.
                        var overlay = new Border()
                        {
                            Child = wordTextBlock,
                            Style = (Style)this.Resources["HighlightedWordBoxHorizontalLine"]
                        };

                        // Bind word boxes to UI.
                        overlay.SetBinding(Border.MarginProperty, wordBoxOverlay.CreateWordPositionBinding());
                        overlay.SetBinding(Border.WidthProperty, wordBoxOverlay.CreateWordWidthBinding());
                        overlay.SetBinding(Border.HeightProperty, wordBoxOverlay.CreateWordHeightBinding());

                        // Put the filled textblock in the results grid.
                        TextOverlay.Children.Add(overlay);
                    }
                }

                rootPage.NotifyUser("Image processed using " + ocrEngine.RecognizerLanguage.DisplayName + " language.", NotifyType.StatusMessage);
            }

            UpdateWordBoxTransform();

            PreviewControl.Visibility = Visibility.Collapsed;
            Image.Visibility = Visibility.Visible;
            ExtractButton.Visibility = Visibility.Collapsed;
            CameraButton.Visibility = Visibility.Visible;
        }
Ejemplo n.º 45
0
        private async void Timer_Tick(object sender, object e)
        {
            var previewProperties = mc.VideoDeviceController.GetMediaStreamProperties(MediaStreamType.VideoPreview) as VideoEncodingProperties;
            var videoFrame        = new VideoFrame(BitmapPixelFormat.Bgra8, (int)previewProperties.Width, (int)previewProperties.Height);
            var frame             = await mc.GetPreviewFrameAsync(videoFrame);

            var bitmap = new WriteableBitmap(frame.SoftwareBitmap.PixelWidth, frame.SoftwareBitmap.PixelHeight);

            frame.SoftwareBitmap.CopyToBuffer(bitmap.PixelBuffer);
            var resized = bitmap.Resize(frame.SoftwareBitmap.PixelWidth / 8, frame.SoftwareBitmap.PixelHeight / 8, WriteableBitmapExtensions.Interpolation.Bilinear);

            var result_sol = "";
            var result_sag = "";
            var result_üst = "";
            var result_alt = "";
            var beyaz      = 250;
            var sol        = 0;
            var sag        = 0;
            var ust        = 0;
            var alt        = 0;
            var hataPayi   = 20;
            var yatayOrt   = resized.PixelWidth / 2;
            var dikeyOrt   = resized.PixelHeight / 2;

            for (int x = 0; x < resized.PixelWidth; x += 1)
            {
                for (int y = 0; y < resized.PixelHeight; y += 1)
                {
                    var color = resized.GetPixel(x, y);

                    byte c = (byte)((color.R + color.B + color.G) / 3);

                    if (c >= beyaz)
                    {
                        if (x < yatayOrt)
                        {
                            sol++;
                        }
                        else
                        {
                            sag++;
                        }

                        if (y < dikeyOrt)
                        {
                            ust++;
                        }
                        else
                        {
                            alt++;
                        }
                    }
                }
            }

            if (sol > sag + hataPayi)
            {
                result_sol = "sol";
                if (sol != sag)
                {
                    pulse_x = pulse_x + 0.2; if (pulse_x >= 4)
                    {
                        pulse_x = 4;
                    }
                    _servomotor_x.SetPulse(pulse_x);
                }
                else
                {
                }
            }
            if (sag > sol + hataPayi)
            {
                result_sag = "sağ";
                if (sag != sol)
                {
                    pulse_x = pulse_x - 0.2; if (pulse_x <= 0.01)
                    {
                        pulse_x = 0.01;
                    }
                    _servomotor_x.SetPulse(pulse_x);
                }
                else
                {
                }
            }
            if (ust > alt + hataPayi)
            {
                result_üst = "üst";
                if (ust != alt)
                {
                    pulse_y = pulse_y + 0.2; if (pulse_y >= 4)
                    {
                        pulse_y = 4;
                    }
                    _servomotor_y.SetPulse(pulse_y);
                }
                else
                {
                }
            }
            if (alt > ust + hataPayi)
            {
                result_alt = "alt";
                if (alt != ust)
                {
                    pulse_y = pulse_y - 0.2; if (pulse_y <= 0.01)
                    {
                        pulse_y = 0.01;
                    }
                    _servomotor_y.SetPulse(pulse_y);
                }
                else
                {
                }
            }
            lblResult_sag.Text = result_sag + sag;
            lblResult_sol.Text = result_sol + sol;
            lblResult_üst.Text = result_üst + ust;
            lblResult_alt.Text = result_alt + alt;
        }
        /// <summary>
        /// Captures a single frame from the running webcam stream and executes the FaceDetector on the image. If successful calls SetupVisualization to display the results.
        /// </summary>
        /// <returns>Async Task object returning true if the capture was successful and false if an exception occurred.</returns>
        private async Task<bool> TakeSnapshotAndFindFaces()
        {
            bool successful = true;

            try
            {
                if (this.currentState != ScenarioState.Streaming)
                {
                    return false;
                }

                WriteableBitmap displaySource = null;
                IList<DetectedFace> faces = null;

                // Create a VideoFrame object specifying the pixel format we want our capture image to be (NV12 bitmap in this case).
                // GetPreviewFrame will convert the native webcam frame into this format.
                const BitmapPixelFormat InputPixelFormat = BitmapPixelFormat.Nv12;
                using (VideoFrame previewFrame = new VideoFrame(InputPixelFormat, (int)this.videoProperties.Width, (int)this.videoProperties.Height))
                {
                    await this.mediaCapture.GetPreviewFrameAsync(previewFrame);

                    // The returned VideoFrame should be in the supported NV12 format but we need to verify this.
                    if (FaceDetector.IsBitmapPixelFormatSupported(previewFrame.SoftwareBitmap.BitmapPixelFormat))
                    {
                        faces = await this.faceDetector.DetectFacesAsync(previewFrame.SoftwareBitmap);
                    }
                    else
                    {
                        this.rootPage.NotifyUser("PixelFormat '" + InputPixelFormat.ToString() + "' is not supported by FaceDetector", NotifyType.ErrorMessage);
                    }

                    // Create a WritableBitmap for our visualization display; copy the original bitmap pixels to wb's buffer.
                    // Note that WriteableBitmap doesn't support NV12 and we have to convert it to 32-bit BGRA.
                    using (SoftwareBitmap convertedSource = SoftwareBitmap.Convert(previewFrame.SoftwareBitmap, BitmapPixelFormat.Bgra8))
                    {
                        displaySource = new WriteableBitmap(convertedSource.PixelWidth, convertedSource.PixelHeight);
                        convertedSource.CopyToBuffer(displaySource.PixelBuffer);
                    }

                    // Create our display using the available image and face results.
                    this.SetupVisualization(displaySource, faces);
                }
            }
            catch (Exception ex)
            {
                this.rootPage.NotifyUser(ex.ToString(), NotifyType.ErrorMessage);
                successful = false;
            }

            return successful;
        }
Ejemplo n.º 47
0
        async private void beginCapture()
        {
            ZXing.Mobile.MobileBarcodeScanningOptions ScanningOptions = MobileBarcodeScanningOptions.Default;
            var zxing = ScanningOptions.BuildBarcodeReader();
            SoftwareBitmapLuminanceSource luminanceSource = null;

            // Capture the preview frame, analyzem repeat until a result or cancel
            while (result == null && !hasBeenCancelled)
            {
                var videoFrame = new VideoFrame(BitmapPixelFormat.Bgra8, (int)capturePreview.ActualWidth, (int)capturePreview.ActualHeight);
                using (var frame = await mediaCapture.GetPreviewFrameAsync(videoFrame))
                {
                    try
                    {
                        // Create our luminance source
                        luminanceSource = new SoftwareBitmapLuminanceSource(frame.SoftwareBitmap);
                    }
                    catch (Exception ex)
                    {
                        System.Diagnostics.Debug.WriteLine("GetPreviewFrame Failed: {0}", ex);
                    }

                    try
                    {
                        zxing.Options.TryHarder = true;
                        // Try decoding the image
                        if (luminanceSource != null)
                            result = zxing.Decode(luminanceSource);
                    }
                    catch (Exception ex)
                    {

                    }
                }

                if (result != null)
                {
                    if(App.APP_SETTINGS.UPCFormatsEnabled.Where(f => f == result.BarcodeFormat).Count() > 0 || App.APP_SETTINGS.UPCFormatsEnabled.Where(f => f == ZXing.BarcodeFormat.All_1D).Count() > 0)
                        alert.Play();                            
                }
            }
        }
Ejemplo n.º 48
0
        async private void Timer_Tick(object sender, object e)
        {
            if(timer_tick_complete_flag == 1)
            {
                return;
            }
            timer_tick_complete_flag = 1;

            /*  stream client */
            try
            {
                if(streamSocketClient.flag_client_start == 0)
                {
                    if (streamSocketSrv.receive_client_ip == 1)
                    {
                        await streamSocketClient.start(streamSocketSrv.stringtemp, "22343");
                    }
                }
                else
                {

                    if (MyMediaCapture.CameraStreamState == CameraStreamState.Streaming)
                    {
                        previewFrame = await MyMediaCapture.GetPreviewFrameAsync(videoFrame);
                        previewFrame.SoftwareBitmap.CopyToBuffer(buffer);
                        await streamSocketClient.sendBuffer(buffer);
                    }

                }
            }
            catch (Exception )
            {

            }

            if(streamSocketSrv.receive_byte_flag == 1)
            {
                if (streamSocketSrv.readByte.Length == 5)
                {
                    if ((streamSocketSrv.readByte[0] == 0xff) && (streamSocketSrv.readByte[4] == 0xff))
                    {
                            if (streamSocketSrv.readByte[1] == 0x00)
                            {
#if STEP3
                            if (streamSocketSrv.readByte[2] == 0x00)
                                {
                                    stop();
#if STEP4
                                await TakePhoto();
#endif
                            }
                                else if (streamSocketSrv.readByte[2] == 0x01)
                                {
                                    go();
                                    await Task.Delay(200);
                                    stop();
                                 }
                                else if (streamSocketSrv.readByte[2] == 0x02)
                                {
                                    back();
                                    await Task.Delay(200);
                                    stop();
                                }
                                else if (streamSocketSrv.readByte[2] == 0x03)
                                {
                                    left();
                                    await Task.Delay(200);
                                    stop();
                                }
                                else if (streamSocketSrv.readByte[2] == 0x04)
                                {
                                    right();
                                    await Task.Delay(200);
                                    stop();
                                }
#endif
                            }
                    }
                }
                streamSocketSrv.receive_byte_flag = 0;
            }
            timer_tick_complete_flag = 0;
        }
Ejemplo n.º 49
0
        /// <summary>
        /// Start uploading the stream
        /// </summary>
        /// <param name="timer"></param>
        public async void UploadStream(ThreadPoolTimer timer)
        {
            if (this.currentState != ScenarioState.Streaming)
            {
                return;
            }

            // If a lock is being held it means we're still waiting for processing work on the previous frame to complete.
            // In this situation, don't wait on the semaphore but exit immediately.
            if (this.frameProcessingSemaphore.Wait(0))
            {
                return;
            }
            try
            {
                using (VideoFrame frame = new VideoFrame(BitmapPixelFormat.Bgra8, (int)this.videoProperties.Width, (int)this.videoProperties.Height))
                {
                    await this.mediaCapture.GetPreviewFrameAsync(frame);

                    SoftwareBitmap bitmap = frame.SoftwareBitmap;
                    EncodeStream(bitmap);
                }
            }
            catch (Exception)
            {
                //Handle the unexpected result error
                //throw;
            }
            finally
            {
                frameProcessingSemaphore.Release();
            }
        }
Ejemplo n.º 50
0
        /// <summary>
        /// Gets the current preview frame as a SoftwareBitmap, displays its properties in a TextBlock, and can optionally display the image
        /// in the UI and/or save it to disk as a jpg
        /// </summary>
        /// <returns>Task representing the async event status</returns>
        private async Task GetPreviewFrameAsSoftwareBitmapAsync()
        {
            // Get information about the preview
            VideoEncodingProperties previewProperties = this._mediaCapture.VideoDeviceController.GetMediaStreamProperties(MediaStreamType.VideoPreview) as VideoEncodingProperties;

            // Create the video frame to request a SoftwareBitmap preview frame
            VideoFrame videoFrame = new VideoFrame(BitmapPixelFormat.Bgra8, (int)previewProperties.Width, (int)previewProperties.Height);

            // Capture the preview frame
            using (VideoFrame currentFrame = await this._mediaCapture.GetPreviewFrameAsync(videoFrame))
            {
                // Collect the resulting frame
                SoftwareBitmap previewFrame = currentFrame.SoftwareBitmap;

                // Show the frame information
                Debug.WriteLine("{0}x{1} {2}", previewFrame.PixelWidth, previewFrame.PixelHeight, previewFrame.BitmapPixelFormat);

                tempWriteableBitmap = new WriteableBitmap(previewFrame.PixelWidth, previewFrame.PixelHeight);
                previewFrame.CopyToBuffer(tempWriteableBitmap.PixelBuffer);

                // Crop to a square, based on the smallest side
                int minEdge = Math.Min(tempWriteableBitmap.PixelWidth, tempWriteableBitmap.PixelHeight);

                tempWriteableBitmap = tempWriteableBitmap
                    .Crop(0, 0, minEdge, minEdge)
                    .Resize(App.LedMatrix.PixelWidth, App.LedMatrix.PixelHeight, WriteableBitmapExtensions.Interpolation.Bilinear);

                WriteableBitmap previewFrameImageSource =
                    tempWriteableBitmap.Rotate(90).Resize(
                        (int)this.postViewbox.Height,
                        (int)this.postViewbox.Width,
                        WriteableBitmapExtensions.Interpolation.NearestNeighbor);

                this.previewFrameImage.Source = previewFrameImageSource;
            }
        }
        //
        // Save a modified VideoFrame using an existing image file path with an appended suffix
        //
        static async Task <string> SaveModifiedVideoFrameToFileAsync(string imageFilePath, VideoFrame frame)
        {
            try
            {
                StorageFile file = await StorageFile.GetFileFromPathAsync(imageFilePath);

                StorageFolder folder = await file.GetParentAsync();

                imageFilePath = file.Name.Replace(file.FileType, "_mod.jpg");
                StorageFile modifiedFile = await folder.CreateFileAsync(imageFilePath, CreationCollisionOption.GenerateUniqueName);

                imageFilePath = modifiedFile.Path;
                // Create the encoder from the stream
                using (IRandomAccessStream stream = await modifiedFile.OpenAsync(FileAccessMode.ReadWrite))
                {
                    BitmapEncoder encoder = await BitmapEncoder.CreateAsync(BitmapEncoder.JpegEncoderId, stream);

                    SoftwareBitmap softwareBitmap = frame.SoftwareBitmap;
                    encoder.SetSoftwareBitmap(softwareBitmap);
                    await encoder.FlushAsync();
                }
            }
            catch (Exception ex)
            {
                Console.WriteLine($"Could not save modified VideoFrame from file: {imageFilePath}");
                throw ex;
            }
            return(imageFilePath);
        }
Ejemplo n.º 52
0
    void UpdateVideoRenderer()
    {
        if (!useDedicatedServer || !Network.isServer)
        {
            VideoFrame videoFrame = new VideoFrame();
            //Initialize Videoplane, BackgroundCamera etc.
            if (!videoInitialized)
            {
                if (videoInitialized = videoSubscriber.getPixels(videoFrame))
                {
                    width = videoFrame.getWidth();
                    height = videoFrame.getHeight();

                    videoObject = new GameObject("VideoObject");
                    videoMesh = videoObject.AddComponent<MeshFilter>();
                    float horScale = 1.0f;// width;
                    float verScale = 1.0f;//height;
                    Vector3[] newVertices = { new Vector3(0, 0, 0), new Vector3(0, verScale, 0), new Vector3(horScale, verScale, 0), new Vector3(horScale, 0, 0) };
                    Vector2[] newUV = { new Vector2(1, 1), new Vector2(1, 0), new Vector2(0, 0), new Vector2(0, 1) };
                    int[] newTriangles = { 0, 1, 2, 2, 3, 0 };

                    videoMesh.mesh.vertices = newVertices;
                    videoMesh.mesh.uv = newUV;
                    videoMesh.mesh.triangles = newTriangles;
                    videoMesh.mesh.RecalculateNormals();

                    videoMeshRenderer = videoObject.AddComponent<MeshRenderer>();
                    videoObject.layer = 10;
                    videoMeshRenderer.enabled = true;
                    imagePlaneMaterial = new Material(Shader.Find("Diffuse"));
                    videoMeshRenderer.material = imagePlaneMaterial;

                    tex = new Texture2D(width/imageScaleFactor, height/imageScaleFactor, TextureFormat.RGB24, false);//new Texture2D(width, height);
                    this.imagePlaneMaterial.SetTexture("_MainTex", tex);

                    //flip y-Axis
                    Vector3 _positionCamera=positionCamera;
                    _positionCamera.y = -_positionCamera.y;

                    createForeGroundCamera(_positionCamera, size);
                    createBackGroundCamera(_positionCamera, size);
                    createVideoPlane(_positionCamera, size);
                }
            }
            //Update the texture every time there is a new video-frame
            if (videoInitialized)
            {
                uint currentCounter = videoSubscriber.getUpdateCounter();
                if (m_updateCounter != currentCounter)
                {
                    m_updateCounter = currentCounter;
                    videoSubscriber.getPixels(videoFrame);
                    int texId=tex.GetNativeTextureID();
                    if (! videoFrame.set(texId))//Works only in OpenGL
                    {
                        Color[] val = new Color[width/imageScaleFactor * height/imageScaleFactor];

                   		float[] color = new float[3];
                        for (int y = 0; y < height; y++)
                        {
                            for (int x = 0; x < width; x++)
                            {
                                if ((y < height) && (x < width))
                                {
                                    videoFrame.getPixel(x, y, out color[0], out color[1], out color[2]);
                                }
                                else
                                {
                                    color[0] = 0.0f;
                                    color[1] = 0.0f;
                                    color[2] = 0.0f;
                                }
                                val[(y/imageScaleFactor * width/imageScaleFactor) + x/imageScaleFactor] = new Color(color[0], color[1], color[2]);
                                //val[y * width + x] = new Color(color[0], color[1], color[2]);
                            }
                        }

                        tex.SetPixels(val);
                        tex.Apply();
                    }
                }
            }
        }
    }
        /// <summary>
        /// Entry point of program
        /// </summary>
        /// <param name="args"></param>
        /// <returns></returns>
        static void Main(string[] args)
        {
            Task.Run(async() =>
            {
                string fileName;
                ImageRectifierInterpolationKind imageRectifierInterpolationKind = ImageRectifierInterpolationKind.Bilinear; // default value if none specified as argument
                ImageCleaningKind imageCleaningPreset    = ImageCleaningKind.WhiteboardOrDocument;                          // default value if none specified as argument
                bool skipQuadDetectionImageRectification = false;

                Console.WriteLine("Image Scanning .NetCore 3.0 Console App - This app executes a common productivity scenario using an input image and saving the result image to file:\n" +
                                  "1. finds the predominant quadrangle\n" +
                                  "2. uses this quadrangle to rectify and crop the image\n" +
                                  "3. cleans the rectified image\n\n");

                try
                {
                    // Parse arguments
                    if (args.Length < 1)
                    {
                        Console.WriteLine($"Allowed command arguments: <file path to .jpg or .png>" +
                                          " <optional image rectifier interpolation to apply to the rectified image:\n" +
                                          $"\t1. {ImageRectifierInterpolationKind.Bilinear}\n" +
                                          $"\t2. {ImageRectifierInterpolationKind.Bicubic}>\n" +
                                          "<optional image cleaning preset to apply to the rectified image:\n" +
                                          $"\t1. {ImageCleaningKind.WhiteboardOrDocument}\n" +
                                          $"\t2. {ImageCleaningKind.Whiteboard}\n" +
                                          $"\t3. {ImageCleaningKind.Document}\n" +
                                          $"\t4. {ImageCleaningKind.Picture}> ");
                        Console.WriteLine("i.e.: \n> ImageScanningSample_NetCore3.exe c:\\test\\test.jpg 1 1\n\n");
                        return;
                    }

                    // Load image from specified file path
                    fileName       = args[0];
                    var videoFrame = await LoadVideoFrameFromImageFileAsync(fileName);

                    // Parse optional image interpolation preset argument
                    int selection = 0;
                    if (args.Length < 2)
                    {
                        while (selection < 1 || selection > 3)
                        {
                            Console.WriteLine($"Select the image rectifier interpolation to apply to the rectified image:\n" +
                                              $"\t1. {ImageRectifierInterpolationKind.Bilinear}\n" +
                                              $"\t2. {ImageRectifierInterpolationKind.Bicubic}\n" +
                                              $"\t3. Skip Quad Detection and Image Rectification\n");
                            selection = int.Parse(Console.ReadLine());
                        }
                    }
                    else
                    {
                        selection = int.Parse(args[1]);
                        if (selection < 1 || selection > 3)
                        {
                            Console.WriteLine($"Invalid image rectifier interpolation specified, defaulting to {ImageRectifierInterpolationKind.Bilinear.ToString()}");
                            selection = (int)ImageRectifierInterpolationKind.Bilinear + 1;
                        }
                    }
                    skipQuadDetectionImageRectification = (selection == 3);
                    if (!skipQuadDetectionImageRectification)
                    {
                        imageRectifierInterpolationKind = (ImageRectifierInterpolationKind)(selection - 1);
                    }

                    // Parse optional image cleaning preset argument
                    selection = 0;
                    if (args.Length < 3)
                    {
                        while (selection < 1 || selection > 4)
                        {
                            Console.WriteLine($"Select the image cleaning preset to apply to the rectified image:\n" +
                                              $"\t1. {ImageCleaningKind.WhiteboardOrDocument}\n" +
                                              $"\t2. { ImageCleaningKind.Whiteboard}\n" +
                                              $"\t3. { ImageCleaningKind.Document}\n" +
                                              $"\t4. { ImageCleaningKind.Picture}");
                            selection = int.Parse(Console.ReadLine());
                        }
                    }
                    else
                    {
                        selection = int.Parse(args[1]);
                        if (selection < 1 || selection > 4)
                        {
                            Console.WriteLine($"Invalid image cleaning preset specified, defaulting to {ImageCleaningKind.WhiteboardOrDocument.ToString()}");
                            selection = (int)ImageCleaningKind.WhiteboardOrDocument + 1;
                        }
                    }
                    imageCleaningPreset = (ImageCleaningKind)(selection - 1);

                    // Create the skill descriptors
                    QuadDetectorDescriptor quadDetectorSkillDescriptor     = null;
                    ImageRectifierDescriptor imageRectifierSkillDescriptor = null;
                    ImageCleanerDescriptor imageCleanerSkillDescriptor     = new ImageCleanerDescriptor();
                    if (!skipQuadDetectionImageRectification)
                    {
                        quadDetectorSkillDescriptor   = new QuadDetectorDescriptor();
                        imageRectifierSkillDescriptor = new ImageRectifierDescriptor();
                    }


                    // Create instance of the skills
                    QuadDetectorSkill quadDetectorSkill     = null;
                    ImageRectifierSkill imageRectifierSkill = null;
                    ImageCleanerSkill imageCleanerSkill     = await imageCleanerSkillDescriptor.CreateSkillAsync() as ImageCleanerSkill;
                    if (!skipQuadDetectionImageRectification)
                    {
                        quadDetectorSkill   = await quadDetectorSkillDescriptor.CreateSkillAsync() as QuadDetectorSkill;
                        imageRectifierSkill = await imageRectifierSkillDescriptor.CreateSkillAsync() as ImageRectifierSkill;
                    }
                    var skillDevice = imageCleanerSkill.Device;
                    Console.WriteLine("Running Skill on : " + skillDevice.ExecutionDeviceKind.ToString() + ": " + skillDevice.Name);
                    Console.WriteLine($"Image file: {fileName}");

                    VideoFrame imageCleanerInputImage = videoFrame;
                    if (!skipQuadDetectionImageRectification)
                    {
                        // ### 1. Quad detection ###
                        // Create instance of QuadDetectorBinding and set features
                        var quadDetectorBinding = await quadDetectorSkill.CreateSkillBindingAsync() as QuadDetectorBinding;
                        await quadDetectorBinding.SetInputImageAsync(videoFrame);

                        // Run QuadDetectorSkill
                        await quadDetectorSkill.EvaluateAsync(quadDetectorBinding);

                        // ### 2. Image rectification ###
                        // Create instance of ImageRectifierBinding and set input features
                        var imageRectifierBinding = await imageRectifierSkill.CreateSkillBindingAsync() as ImageRectifierBinding;
                        await imageRectifierBinding.SetInputImageAsync(videoFrame);
                        await imageRectifierBinding.SetInputQuadAsync(quadDetectorBinding.DetectedQuads);
                        imageRectifierBinding.SetInterpolationKind(imageRectifierInterpolationKind);

                        // Run ImageRectifierSkill
                        await imageRectifierSkill.EvaluateAsync(imageRectifierBinding);

                        // Use the image rectification result as input image to the image cleaner skill
                        imageCleanerInputImage = imageRectifierBinding.OutputImage;
                    }
                    // ### 3. Image cleaner ###
                    // Create instance of QuadDetectorBinding and set features
                    var imageCleanerBinding = await imageCleanerSkill.CreateSkillBindingAsync() as ImageCleanerBinding;
                    await imageCleanerBinding.SetImageCleaningKindAsync(imageCleaningPreset);
                    await imageCleanerBinding.SetInputImageAsync(imageCleanerInputImage);

                    // Run ImageCleanerSkill
                    await imageCleanerSkill.EvaluateAsync(imageCleanerBinding);

                    // Retrieve result and save it to file
                    var results = imageCleanerBinding.OutputImage;

                    string outputFilePath = await SaveModifiedVideoFrameToFileAsync(fileName, results);
                    Console.WriteLine($"Written output image to {outputFilePath}");
                }
                catch (Exception e)
                {
                    Console.WriteLine($"Error: {e.TargetSite.ToString()}\n{e.Source.ToString()}\n{e.StackTrace.ToString()}\n{e.Message.ToString()}");
                    Console.WriteLine("To get more insight on the parameter format, call the executable without any parameters");
                    Environment.Exit(e.HResult);
                }
            }).Wait();
        }
Ejemplo n.º 54
0
        unsafe static void Main(string[] args)
        {
            const double MinimumFrequency = 10;
            const double MaximumFrequency = 20000;
            const double MaxDB            = 65;
            const int    fftSize          = 4192 * 6;

            var reader      = new MediaReader(@"D:\MyDocuments\Music\虾米音乐\Shiggy Jr.-oyasumi.mp3");
            var decoder     = reader.Decoders.OfType <AudioDecoder>().First();
            var videoFormat = new VideoFormat(1280, 720, AVPixelFormat.Rgb0, 4);
            var writer      = new MediaWriter(@"D:\fft.mp4")
                              .AddEncoder(new VideoEncoder(AVCodecID.H264, videoFormat, new VideoEncoderParameters {
                FrameRate = new Fraction(30), GopSize = 10
            }))
                              .AddEncoder(new AudioEncoder(AVCodecID.Mp3, decoder.InFormat))
                              //.AddVideo(videoFormat, new VideoEncoderParameters { FrameRate = new Fraction(30), GopSize = 10 })
                              //.AddAudio(decoder.InFormat)
                              .Initialize();

            int sampleRate = decoder.InFormat.SampleRate;
            int channels   = decoder.InFormat.Channels;
            var resampler  = new AudioResampler(decoder.InFormat, new AudioFormat(sampleRate, channels, 64));
            var inFrame    = new AudioFrame();
            var outFrame   = new AudioFrame();
            var image      = new VideoFrame(videoFormat);

            var viewHeight  = videoFormat.Height / 2;
            var observer    = new StreamObserver <double>(fftSize * 2, fftSize / 6, 2);
            var fft         = DoubleFFT.Create(fftSize);
            var inFFT       = fft.AllocInput();
            var outFFT      = fft.AllocOutput();
            var cutLength   = FFTTools.CutFrequencyLength(fftSize, MinimumFrequency, MaximumFrequency, sampleRate, fft.FFTComplexCount);
            var cutFFT      = Marshal.AllocHGlobal(cutLength * sizeof(double));
            var outFFT2     = Marshal.AllocHGlobal(fft.FFTComplexCount * sizeof(double));
            var outFFTFinal = Marshal.AllocHGlobal(viewHeight * sizeof(double));
            var window      = new BlackmanHarrisWindow(fftSize);
            var log         = new Spectrum3DLog();

            void FFT()
            {
                window.Apply((double *)inFFT, (double *)inFFT);
                fft.Execute(inFFT, outFFT);
                FFTTools.Abs(fftSize, (double *)outFFT, (double *)outFFT2);
                FFTTools.CutFrequency(fftSize, (double *)outFFT2, fft.FFTComplexCount, MinimumFrequency, MaximumFrequency, sampleRate, (double *)cutFFT, cutLength);
                FFTTools.Logarithm((double *)cutFFT, cutLength, MinimumFrequency, MaximumFrequency, (double *)outFFTFinal, viewHeight, log);
                FFTTools.ToDB((double *)outFFTFinal, viewHeight, MaxDB);
                FFTTools.Scale((double *)outFFTFinal, viewHeight, 1 / MaxDB);
            }

            void LeftShiftImage()
            {
                int w = image.Format.Width - 1;
                int h = image.Format.Height;

                for (int y = 0; y < h; y++)
                {
                    var p = (uint *)(image.Data[0] + image.Format.Strides[0] * y);
                    for (int x = 0; x < w; x++)
                    {
                        p[x] = p[x + 1];
                    }
                }
            }

            observer.Completed += data => {
                LeftShiftImage();

                int w      = image.Format.Width - 1;
                int h      = image.Format.Height;
                var p      = (byte *)((uint *)image.Data[0] + w);
                var stride = image.Format.Strides[0];

                for (int i = 0; i < fftSize; i++)
                {
                    ((double *)inFFT)[i] = ((double *)data)[2 * i];
                }
                FFT();
                for (int y = 0; y < viewHeight; y++, p += stride)
                {
                    var val = ((double *)outFFTFinal)[viewHeight - y - 1] * 256;
                    if (val < 0)
                    {
                        val = 0;
                    }
                    else if (val > 255)
                    {
                        val = 255;
                    }
                    p[0] = p[1] = p[2] = (byte)val;
                }

                for (int i = 0; i < fftSize; i++)
                {
                    ((double *)inFFT)[i] = ((double *)data)[2 * i + 1];
                }
                FFT();
                for (int y = 0; y < viewHeight; y++, p += stride)
                {
                    var val = ((double *)outFFTFinal)[viewHeight - y - 1] * 256;
                    if (val < 0)
                    {
                        val = 0;
                    }
                    else if (val > 255)
                    {
                        val = 255;
                    }
                    p[0] = p[1] = p[2] = (byte)val;
                }
            };

            bool end = false, run = true;

            while (run)
            {
                writer.Write(encoder => {
                    switch (encoder)
                    {
                    case AudioEncoder audioEncoder:
                        Console.Write($"\r{audioEncoder.InputTimestamp}");
                        if (reader.NextFrame(inFrame, decoder.StreamIndex))
                        {
                            resampler.Resample(inFrame, outFrame);
                            observer.Write(outFrame.Data[0], outFrame.SampleCount * channels);
                            return(inFrame);
                        }
                        else
                        {
                            resampler.ResampleFinal(outFrame);
                            observer.Write(outFrame.Data[0], outFrame.SampleCount * channels);
                            end = true;
                            Console.WriteLine($"\r{audioEncoder.InputTimestamp}");
                            return(null);
                        }

                    case VideoEncoder videoEncoder:
                        if (end)
                        {
                            run = false;
                        }
                        return(image);

                    default:
                        throw new NotImplementedException();
                    }
                });
            }
            writer.Dispose();
        }
Ejemplo n.º 55
0
 public double ComputeValenceIntensity(VideoFrame frame)
 {
     int weight = 0;
     if (frame.valence == Valence.NEGATIVE)
         weight = -1;
     else if (frame.valence == Valence.POSITIVE)
         weight = 1;
     else if (frame.valence == Valence.NEUTRAL)
         weight = 0;
     return frame.valenceIntensity * (double)weight;
 }
        public async Task StartScanningAsync(Action<ZXing.Result> scanCallback, MobileBarcodeScanningOptions options = null)
        {
            if (stopping)
                return;

            isAnalyzing = true;
            ScanCallback = scanCallback;
            ScanningOptions = options ?? MobileBarcodeScanningOptions.Default;

            topText.Text = TopText ?? string.Empty;
            bottomText.Text = BottomText ?? string.Empty;

            if (UseCustomOverlay)
            {
                gridCustomOverlay.Children.Clear();
                if (CustomOverlay != null)
                    gridCustomOverlay.Children.Add(CustomOverlay);

                gridCustomOverlay.Visibility = Visibility.Visible;
                gridDefaultOverlay.Visibility = Visibility.Collapsed;
            }
            else
            {
                gridCustomOverlay.Visibility = Visibility.Collapsed;
                gridDefaultOverlay.Visibility = Visibility.Visible;
            }

            // Find which device to use
            var preferredCamera = await this.GetFilteredCameraOrDefaultAsync(ScanningOptions);            
            mediaCapture = new MediaCapture();
            
            // Initialize the capture with the settings above
            await mediaCapture.InitializeAsync(new MediaCaptureInitializationSettings()
            {
                StreamingCaptureMode = StreamingCaptureMode.Video,
                VideoDeviceId = preferredCamera.Id
            });

            // Set the capture element's source to show it in the UI
            captureElement.Source = mediaCapture;

            // Start the preview
            await mediaCapture.StartPreviewAsync();
            
            // Get all the available resolutions for preview
            var availableProperties = mediaCapture.VideoDeviceController.GetAvailableMediaStreamProperties(MediaStreamType.VideoPreview);
            var availableResolutions = new List<CameraResolution>();
            foreach (var ap in availableProperties)
            {
                var vp = (VideoEncodingProperties)ap;
                System.Diagnostics.Debug.WriteLine("Camera Preview Resolution: {0}x{1}", vp.Width, vp.Height);
                availableResolutions.Add(new CameraResolution { Width = (int)vp.Width, Height = (int)vp.Height });                
            }
            CameraResolution previewResolution = null;
            if (ScanningOptions.CameraResolutionSelector != null)
                previewResolution = ScanningOptions.CameraResolutionSelector(availableResolutions);

            // If the user did not specify a resolution, let's try and find a suitable one
            if (previewResolution == null)
            {
                // Loop through all supported sizes
                foreach (var sps in availableResolutions)
                {
                    // Find one that's >= 640x360 but <= 1000x1000
                    // This will likely pick the *smallest* size in that range, which should be fine
                    if (sps.Width >= 640 && sps.Width <= 1000 && sps.Height >= 360 && sps.Height <= 1000)
                    {
                        previewResolution = new CameraResolution
                        {
                            Width = sps.Width,
                            Height = sps.Height
                        };
                        break;
                    }
                }
            }

            if (previewResolution == null)
                previewResolution = availableResolutions.LastOrDefault();

            System.Diagnostics.Debug.WriteLine("Using Preview Resolution: {0}x{1}", previewResolution.Width, previewResolution.Height);

            // Find the matching property based on the selection, again
            var chosenProp = availableProperties.FirstOrDefault(ap => ((VideoEncodingProperties)ap).Width == previewResolution.Width && ((VideoEncodingProperties)ap).Height == previewResolution.Height);
            
            // Set the selected resolution
            await mediaCapture.VideoDeviceController.SetMediaStreamPropertiesAsync(MediaStreamType.VideoPreview, chosenProp);

            await SetPreviewRotationAsync();

            captureElement.Stretch = Stretch.UniformToFill;

            // Get our preview properties
            var previewProperties = mediaCapture.VideoDeviceController.GetMediaStreamProperties(MediaStreamType.VideoPreview) as VideoEncodingProperties;
            
            // Setup a frame to use as the input settings
            var destFrame = new VideoFrame(Windows.Graphics.Imaging.BitmapPixelFormat.Bgra8, (int)previewProperties.Width, (int)previewProperties.Height);

            var zxing = ScanningOptions.BuildBarcodeReader();

            timerPreview = new Timer(async (state) => {
                if (stopping)
                    return;                               
                if (mediaCapture == null || mediaCapture.CameraStreamState != Windows.Media.Devices.CameraStreamState.Streaming)
                    return;
                if (processing)
                    return;
                if (!isAnalyzing)
                    return;

                processing = true;

                SoftwareBitmapLuminanceSource luminanceSource = null;

                try
                {

                    // Get preview 
                    var frame = await mediaCapture.GetPreviewFrameAsync(destFrame);

                    // Create our luminance source
                    luminanceSource = new SoftwareBitmapLuminanceSource(frame.SoftwareBitmap);

                } catch (Exception ex)
                {
                    System.Diagnostics.Debug.WriteLine("GetPreviewFrame Failed: {0}", ex);
                }

                ZXing.Result result = null;

                try
                {
                    // Try decoding the image
                    if (luminanceSource != null)
                        result = zxing.Decode(luminanceSource);
                }
                catch (Exception ex)
                {
                    
                }

                // Check if a result was found
                if (result != null && !string.IsNullOrEmpty (result.Text))
                {
                    if (!ContinuousScanning)
                        await StopScanningAsync();
                    LastScanResult = result;
                    ScanCallback(result);                    
                }

                processing = false;
                         
            }, null, TimeSpan.FromMilliseconds(200), TimeSpan.FromMilliseconds(200));           
        }
Ejemplo n.º 57
0
 public static void Read(Stream stream, out long timestamp, out IVideoFrame frame) =>
 Read(stream, s0 => VideoFrame.Read(s0), out timestamp, out frame);