private async Task StartPreviewAsync() { // Prevent the device from sleeping while the preview is running _displayRequest.RequestActive(); // Setup preview source in UI and mirror if required VideoCapture.Source = _mediaCapture; VideoCapture.FlowDirection = _mirroringPreview ? FlowDirection.RightToLeft : FlowDirection.LeftToRight; this.VideoProperties = this._mediaCapture.VideoDeviceController.GetMediaStreamProperties(MediaStreamType.VideoPreview) as VideoEncodingProperties; // Start preview await _mediaCapture.StartPreviewAsync(); _isPreviewing = true; if (_isPreviewing) { await SetPreviewRotationAsync(); } _timer = new DispatcherTimer { Interval = TimeSpan.FromSeconds(0.5) }; _timer.Tick += async(sender, o) => { if (_mediaCapture == null) { return; } const BitmapPixelFormat inputPixelFormat = BitmapPixelFormat.Bgra8; var previewFrame = new Windows.Media.VideoFrame(inputPixelFormat, CameraResolutionWidth, CameraResolutionHeight); await _mediaCapture.GetPreviewFrameAsync(previewFrame); var imageBytes = await GetPixelBytesFromSoftwareBitmapAsync(previewFrame.SoftwareBitmap); NewFrameCaptured?.Invoke(this, new NewFrameEventArgs(new Models.VideoFrame() { ImageBytes = imageBytes, Timestamp = DateTime.Now })); previewFrame.Dispose(); }; _timer.Start(); }
static void PerformInference(Windows.Media.VideoFrame frame) { Console.WriteLine("Load squeezenet.onnx."); using (var model = Microsoft.AI.MachineLearning.LearningModel.LoadFromFilePath("squeezenet.onnx")) { Console.WriteLine("Create LearningModelSession."); using (var session = new Microsoft.AI.MachineLearning.LearningModelSession(model)) { Console.WriteLine("Create LearningModelBinding."); var binding = new Microsoft.AI.MachineLearning.LearningModelBinding(session); Console.WriteLine("Bind data_0."); binding.Bind("data_0", frame); Console.WriteLine("Evaluate."); var results = session.Evaluate(binding, ""); } Console.WriteLine("Success!\n"); } }