public static FaceViewModel Analyzed(FaceAnalyzeRequest request, FaceAnalyzeResponse response) { if (!string.IsNullOrWhiteSpace(response.ApiRequestErrorMessage) || !string.IsNullOrWhiteSpace(response.ApiRequestErrorContent) || !string.IsNullOrWhiteSpace(response.OtherErrorMessage) || !string.IsNullOrWhiteSpace(response.OtherErrorContent)) { return(new FaceViewModel { IsAnalyzed = false, FaceAnalyzeRequest = request, FaceAnalyzeResponse = null, ApiRequestErrorMessage = response.ApiRequestErrorMessage, ApiRequestErrorContent = response.ApiRequestErrorContent, OtherErrorMessage = response.OtherErrorMessage, OtherErrorContent = response.OtherErrorContent }); } return(new FaceViewModel { IsAnalyzed = true, FaceAnalyzeRequest = request, FaceAnalyzeResponse = response }); }
public async Task <ActionResult <FaceViewModel> > Face([FromForm] FaceAnalyzeRequest request) { var errorContent = ""; if (string.IsNullOrWhiteSpace(request.FaceSubscriptionKey)) { errorContent += $"Missing or invalid Face Subscription Key (see 'Azure Settings' tab){Environment.NewLine}"; } if (string.IsNullOrWhiteSpace(request.FaceEndpoint)) { errorContent += $"Missing or invalid Face Endpoint (see 'Azure Settings' tab){Environment.NewLine}"; } if (string.IsNullOrWhiteSpace(request.ImageUrl) && (request.File == null || !_allowedFileContentType.Contains(request.File.ContentType))) { errorContent += $"Missing or invalid ImageUrl / no file provided{Environment.NewLine}"; } if (request.EnableIdentification && string.IsNullOrWhiteSpace(request.IdentificationGroupId)) { errorContent += $"Missing or invalid Identification Group Id{Environment.NewLine}"; } if (!string.IsNullOrWhiteSpace(errorContent)) { return(View(FaceViewModel.Analyzed(request, new FaceAnalyzeResponse { OtherErrorMessage = "Request not processed due to the following error(s):", OtherErrorContent = errorContent }))); } Track("Vision_Face"); var imageAnalyzer = new ImageFaceAnalyzer(request.FaceSubscriptionKey, request.FaceEndpoint, _httpClientFactory); var analyzeResult = await imageAnalyzer.AnalyzeAsync(request.ImageUrl, request.File, request.DetectionModel, request.EnableIdentification, request.RecognitionModel, request.IdentificationGroupType, request.IdentificationGroupId); return(View(FaceViewModel.Analyzed(request, analyzeResult))); }
/// <summary> /// Get face landmarks and attributes by passing its face_token which you can get from Detect API. Face Analyze API allows you to process up to 5 face_token at a time. /// </summary> /// <param name="request">Request for analyze</param> /// <returns>Response from face analyze</returns> public async Task <FaceAnalyzeResponse> FaceAnalyzeAsync(FaceAnalyzeRequest request) { var faceAnalyzeUrl = $"{_baseUrl}/facepp/{Version}/face/analyze"; return(await FaceApiRequest <FaceAnalyzeRequest, FaceAnalyzeResponse>(request, faceAnalyzeUrl)); }