/// <summary> /// Pick image for detection, get detection result and put detection results into RightResultCollection /// </summary> /// <param name="sender">Event sender</param> /// <param name="e">Event argument</param> private async void RightImagePicker_Click(object sender, RoutedEventArgs e) { // Show image picker, show jpg type files only Microsoft.Win32.OpenFileDialog dlg = new Microsoft.Win32.OpenFileDialog(); dlg.DefaultExt = ".jpg"; dlg.Filter = "Image files(*.jpg, *.png, *.bmp, *.gif) | *.jpg; *.png; *.bmp; *.gif"; var result = dlg.ShowDialog(); if (result.HasValue && result.Value) { FaceVerifyResult = string.Empty; // User already picked one image var pickedImagePath = dlg.FileName; var renderingImage = UIHelper.LoadImageAppliedOrientation(pickedImagePath); var imageInfo = UIHelper.GetImageInfoForRendering(renderingImage); RightImageDisplay.Source = renderingImage; // Clear last time detection results RightResultCollection.Clear(); FaceVerifyButton.IsEnabled = (LeftResultCollection.Count != 0 && RightResultCollection.Count != 0); MainWindow.Log("Request: Detecting in {0}", pickedImagePath); var sw = Stopwatch.StartNew(); // Call detection REST API, detect faces inside the image using (var fileStream = File.OpenRead(pickedImagePath)) { try { var faceServiceClient = FaceServiceClientHelper.GetInstance(this); var faces = await faceServiceClient.Face.DetectWithStreamAsync(fileStream, recognitionModel : recognitionModel); // Handle REST API calling error if (faces == null) { return; } MainWindow.Log("Response: Success. Detected {0} face(s) in {1}", faces.Count, pickedImagePath); // Convert detection results into UI binding object for rendering foreach (var face in UIHelper.CalculateFaceRectangleForRendering(faces, MaxImageSize, imageInfo)) { // Detected faces are hosted in result container, will be used in the verification later RightResultCollection.Add(face); } FaceVerifyButton.IsEnabled = (LeftResultCollection.Count != 0 && RightResultCollection.Count != 0); } catch (APIErrorException ex) { MainWindow.Log("Response: {0}. {1}", ex.Body.Error.Code, ex.Body.Error.Message); return; } } } GC.Collect(); }
/// <summary> /// Pick image for detection, get detection result and put detection results into RightResultCollection /// </summary> /// <param name="sender">Event sender</param> /// <param name="e">Event argument</param> private async void RightImagePicker_Click(object sender, RoutedEventArgs e) { // Show image picker, show jpg type files only Microsoft.Win32.OpenFileDialog dlg = new Microsoft.Win32.OpenFileDialog(); dlg.DefaultExt = ".jpg"; dlg.Filter = "Image files(*.jpg) | *.jpg"; var result = dlg.ShowDialog(); if (result.HasValue && result.Value) { VerifyResult = string.Empty; // User already picked one image var pickedImagePath = dlg.FileName; var imageInfo = UIHelper.GetImageInfoForRendering(pickedImagePath); RightImageDisplay.Source = new BitmapImage(new Uri(pickedImagePath)); // Clear last time detection results RightResultCollection.Clear(); MainWindow.Log("Request: Detecting in {0}", pickedImagePath); var sw = Stopwatch.StartNew(); // Call detection REST API, detect faces inside the image using (var fileStream = File.OpenRead(pickedImagePath)) { try { MainWindow mainWindow = Window.GetWindow(this) as MainWindow; string subscriptionKey = mainWindow._scenariosControl.SubscriptionKey; var faceServiceClient = new FaceServiceClient(subscriptionKey); var faces = await faceServiceClient.DetectAsync(fileStream); // Handle REST API calling error if (faces == null) { return; } MainWindow.Log("Response: Success. Detected {0} face(s) in {1}", faces.Length, pickedImagePath); // Convert detection results into UI binding object for rendering foreach (var face in UIHelper.CalculateFaceRectangleForRendering(faces, MaxImageSize, imageInfo)) { // Detected faces are hosted in result container, will be used in the verification later RightResultCollection.Add(face); } } catch (FaceAPIException ex) { MainWindow.Log("Response: {0}. {1}", ex.ErrorCode, ex.ErrorMessage); return; } } } }
private async Task SetModelImage(MainWindow mainWindowInstance) { var pickedImagePath2 = System.IO.Path.GetFullPath(Constants.OwnerImagePath); var renderingImage2 = FaceRecognitionHelper.LoadImageAppliedOrientation(pickedImagePath2); var imageInfo2 = FaceRecognitionHelper.GetImageInfoForRendering(renderingImage2); RightImageDisplay.Source = renderingImage2; mainWindowInstance.Log(string.Format("Request: Detecting in {0}", pickedImagePath2)); var sw2 = Stopwatch.StartNew(); // Clear last time detection results RightResultCollection.Clear(); var detectedFaces = await MicrosoftApiHelper.DetectFaces(pickedImagePath2, mainWindowInstance, imageInfo2); for (var i = 0; i < detectedFaces.Count; i++) { RightResultCollection.Add(detectedFaces[i]); } }
/// <summary> /// Pick image for detection, get detection result and put detection results into RightResultCollection /// </summary> /// <param name="sender">Event sender</param> /// <param name="e">Event argument</param> private async void RightImagePicker_Click(object sender, RoutedEventArgs e) { // Show image picker, show jpg type files only Microsoft.Win32.OpenFileDialog dlg = new Microsoft.Win32.OpenFileDialog { DefaultExt = ".jpg", Filter = "Image files(*.jpg) | *.jpg" }; var result = dlg.ShowDialog(); if (!result.HasValue || !result.Value) { return; } VerifyResult = string.Empty; // User already picked one image var pickedImagePath = dlg.FileName; var imageInfo = UIHelper.GetImageInfoForRendering(pickedImagePath); RightImageDisplay.Source = new BitmapImage(new Uri(pickedImagePath)); // Clear last time detection results RightResultCollection.Clear(); Output = Output.AppendLine($"发送请求: 检测图片 {pickedImagePath} 中"); var sw = Stopwatch.StartNew(); // Call detection REST API, detect faces inside the image using (var fileStream = File.OpenRead(pickedImagePath)) { try { MainWindow mainWindow = Window.GetWindow(this) as MainWindow; if (mainWindow == null) { return; } string subscriptionKey = mainWindow.SubscriptionKey; var faceServiceClient = new FaceServiceClient(subscriptionKey); var faces = await faceServiceClient.DetectAsync(fileStream); // Handle REST API calling error if (faces == null) { return; } Output = Output.AppendLine($"反馈:检测成功. 共发现 {faces.Length} 张脸 在图片 {pickedImagePath}"); // Convert detection results into UI binding object for rendering foreach (var face in UIHelper.CalculateFaceRectangleForRendering(faces, MaxImageSize, imageInfo)) { // Detected faces are hosted in result container, will be used in the verification later RightResultCollection.Add(face); } } catch (ClientException ex) { Output = Output.AppendLine($"反馈: 出错啦 {ex.Error.Code}. {ex.Error.Message}"); return; } } }
/// <summary> /// Check if authentication was successful /// </summary> /// <returns></returns> private async Task <bool> VerifyAuth(MainWindow mainWindowInstance) { // return true; // User already picked one image var pickedImagePath = System.IO.Path.GetFullPath(ImageNameHelper.GetLatestWebcamImage()); var renderingImage = FaceRecognitionHelper.LoadImageAppliedOrientation(pickedImagePath); var imageInfo = FaceRecognitionHelper.GetImageInfoForRendering(renderingImage); mainWindowInstance.Log(string.Format("Request: Detecting in {0}", pickedImagePath)); var sw = Stopwatch.StartNew(); LeftResultCollection.Clear(); var detectedFaces = await MicrosoftApiHelper.DetectFaces(pickedImagePath, mainWindowInstance, imageInfo); for (var i = 0; i < detectedFaces.Count; i++) { LeftResultCollection.Add(detectedFaces[i]); } // FaceVerifyResult = string.Empty; var pickedImagePath2 = System.IO.Path.GetFullPath(Constants.OwnerImagePath); var renderingImage2 = FaceRecognitionHelper.LoadImageAppliedOrientation(pickedImagePath2); var imageInfo2 = FaceRecognitionHelper.GetImageInfoForRendering(renderingImage2); mainWindowInstance.Log(string.Format("Request: Detecting in {0}", pickedImagePath2)); var sw2 = Stopwatch.StartNew(); // Clear last time detection results RightResultCollection.Clear(); detectedFaces = await MicrosoftApiHelper.DetectFaces(pickedImagePath2, mainWindowInstance, imageInfo2); for (var i = 0; i < detectedFaces.Count; i++) { RightResultCollection.Add(detectedFaces[i]); } if (LeftResultCollection.Count == 1 && RightResultCollection.Count == 1) { var faceId1 = LeftResultCollection[0].FaceId; var faceId2 = RightResultCollection[0].FaceId; mainWindowInstance.Log(string.Format("Request: Verifying face {0} and {1}", faceId1, faceId2)); // Call verify REST API with two face id try { var faceServiceClient = new FaceServiceClient(Constants.SubscriptionKey, Constants.ApiEndpoint); var res = await faceServiceClient.VerifyAsync(Guid.Parse(faceId1), Guid.Parse(faceId2)); mainWindowInstance.Log(string.Format("Response: Confidence = {0:0.00}, Face {1} and {2} {3} to the same person", res.Confidence, faceId1, faceId2, res.IsIdentical ? "belong" : "not belong")); return(res.IsIdentical); } catch (FaceAPIException ex) { mainWindowInstance.Log(string.Format("Response: {0}. {1}", ex.ErrorCode, ex.ErrorMessage)); return(false); } } else { MessageBox.Show("Verification accepts two faces as input, please pick images with only one detectable face in it.", "Warning", MessageBoxButton.OK); mainWindowInstance.NavigateTo(typeof(WebcamPage)); return(false); } GC.Collect(); }