private async Task<Face[]> imageProcessor() { string imageFileUriPath = System.IO.Path.Combine(Directory.GetCurrentDirectory(), Guid.NewGuid().ToString() + ".jpg"); FileStream fileStream = TakeSnapshotUsingTemporaryFile(imageFileUriPath); Face[] faces; try { ImageAnalyzer analyzer = new ImageAnalyzer(); faces = await analyzer.AnalyzeImageUsingHelper(fileStream); } catch (Exception e) { Ids.Children.Clear(); return null; } double videoWidth = CameraVideoDeviceControl.ActualWidth * WebcamDevice.thisDpiWidthFactor; double videoHeight = CameraVideoDeviceControl.ActualHeight * WebcamDevice.thisDpiHeightFactor; double imageWidth = OutputImage.ActualWidth; double imageHeight = OutputImage.ActualHeight; double widthRatio = videoWidth / imageWidth; double heightRatio = videoHeight / imageHeight; this.Dispatcher.Invoke((Action)(() => { UpdateUIWithFaces(faces, widthRatio, heightRatio); })); fileStream.Close(); File.Delete(imageFileUriPath); return faces; }
/// <summary> /// Train the model to detect particular faces. /// </summary> private async void TrainFaceDetector(object sender, RoutedEventArgs e) { StatusText.Text = "Training..."; ImageAnalyzer analyzer = new ImageAnalyzer(); await analyzer.TrainFaceDetector(); }