public static async Task <string> AnalyzePhoto(TraceWriter log, PhotoToAnalyze photoToAnalyze, ImageAnalysisTableAdapter imageAnalysisTableAdapter) { using (HttpClient httpClient = new HttpClient()) { Stopwatch stopwatch = Stopwatch.StartNew(); VisionApiResponse visionApiResponse = await GetGoogleVisionApi(log, photoToAnalyze, httpClient); List <Face> msFaces = await GetMsFaces(log, photoToAnalyze, httpClient); Analysis msAnalysis = await GetMsAnalysis(log, photoToAnalyze, httpClient); if (visionApiResponse?.Responses.Count == 1 && msAnalysis != null) { ImageAnalysis canonicalImageAnalysis = new ImageAnalysis(visionApiResponse.Responses[0], msAnalysis, msFaces); ImageAnalysisEntity imageAnalysisEntity = new ImageAnalysisEntity { // for canonical truncate decimal precision to 4 decimal places, for others keep original precision CanonicalJson = JsonConvert.SerializeObject(canonicalImageAnalysis, JsonUtils.AnalysisSerializerSettings), GoogleVisionApiJson = JsonConvert.SerializeObject(visionApiResponse.Responses[0], JsonUtils.JsonSerializerSettings), MsCognitiveFaceDetectJson = JsonConvert.SerializeObject(msFaces, JsonUtils.JsonSerializerSettings), MsAnalysisJson = JsonConvert.SerializeObject(msAnalysis, JsonUtils.JsonSerializerSettings) }; if (imageAnalysisEntity.GoogleVisionApiJson.Length > 30000) { log.Warning($"Google vision API response JSON is {imageAnalysisEntity.GoogleVisionApiJson.Length} chars, removing TextAnnotations"); visionApiResponse.Responses[0].TextAnnotations = null; imageAnalysisEntity.GoogleVisionApiJson = JsonConvert.SerializeObject(visionApiResponse.Responses[0], JsonUtils.JsonSerializerSettings); } if (imageAnalysisEntity.GoogleVisionApiJson.Length > 45000) { log.Warning($"GoogleVisionApiJson still is {imageAnalysisEntity.GoogleVisionApiJson.Length} chars after removing TextAnnotations"); } if (imageAnalysisEntity.CanonicalJson.Length > 45000) { log.Warning($"CanonicalJson is {imageAnalysisEntity.CanonicalJson.Length} chars"); } if (imageAnalysisEntity.MsCognitiveFaceDetectJson.Length > 45000) { log.Warning($"MsCognitiveFaceDetectJson is {imageAnalysisEntity.MsCognitiveFaceDetectJson.Length} chars"); } if (imageAnalysisEntity.MsAnalysisJson.Length > 45000) { log.Warning($"MsAnalysisJson is {imageAnalysisEntity.MsAnalysisJson.Length} chars"); } imageAnalysisTableAdapter.InsertImageAnalysis(imageAnalysisEntity, photoToAnalyze.Url); imageAnalysisTableAdapter.InsertBlogImageAnalysis(SanityHelper.SanitizeSourceBlog(photoToAnalyze.Blog), photoToAnalyze.Url); log.Info($"All analyses for {photoToAnalyze.Url} saved in {stopwatch.ElapsedMilliseconds}ms"); return(null); } log.Warning("Failed to get all responses"); return("Failed to get all responses"); } }
public static HttpResponseMessage Run([HttpTrigger(AuthorizationLevel.Function, "get", "post", Route = "getfrequencies")] HttpRequestMessage req, TraceWriter log) { Startup.Init(); ImageAnalysisTableAdapter imageAnalysisTableAdapter = new ImageAnalysisTableAdapter(); imageAnalysisTableAdapter.Init(); TokenAllocationTableAdapter tokenAllocationTableAdapter = new TokenAllocationTableAdapter(); tokenAllocationTableAdapter.Init(); List <ImageAnalysisEntity> analyses = imageAnalysisTableAdapter.GetAllCanonical(); Dictionary <string, int> digramFrequencies = new Dictionary <string, int>(); Dictionary <string, int> labelFrequencies = new Dictionary <string, int>(); int processedCount = 0; foreach (ImageAnalysisEntity entity in analyses) { ImageAnalysis canonicalAnalysis = JsonConvert.DeserializeObject <ImageAnalysis>(entity.CanonicalJson); UpdateCounts(StringTokenizer.GetDigrams(canonicalAnalysis.TokenizedText), digramFrequencies); UpdateCounts(canonicalAnalysis.Labels.Keys.Distinct(), labelFrequencies); processedCount++; if (processedCount % 100 == 0) { log.Info($"Processed frequencies for {processedCount} image analyses"); } } log.Info($"Inserting {digramFrequencies.Count} digrams"); tokenAllocationTableAdapter.InsertFrequencies(TokenAllocationTableAdapter.PartitionDigram, digramFrequencies); log.Info($"Inserting {labelFrequencies.Count} labels"); tokenAllocationTableAdapter.InsertFrequencies(TokenAllocationTableAdapter.PartitionLabel, labelFrequencies); return(req.CreateResponse(HttpStatusCode.OK, $"Processed {analyses.Count} analyses")); }
public static HttpResponseMessage Run([HttpTrigger(AuthorizationLevel.Function, "get", "post", Route = "redo-canonical")] HttpRequestMessage req, TraceWriter log) { Startup.Init(); ImageAnalysisTableAdapter imageAnalysisTableAdapter = new ImageAnalysisTableAdapter(); imageAnalysisTableAdapter.Init(); List <ImageAnalysisEntity> analyses = imageAnalysisTableAdapter.GetAll(); foreach (ImageAnalysisEntity entity in analyses) { Response visionResponse = JsonConvert.DeserializeObject <Response>(entity.GoogleVisionApiJson); Analysis msAnalysis = JsonConvert.DeserializeObject <Analysis>(entity.MsAnalysisJson); List <Face> msFaces = JsonConvert.DeserializeObject <List <Face> >(entity.MsCognitiveFaceDetectJson); ImageAnalysis canonicalImageAnalysis = new ImageAnalysis(visionResponse, msAnalysis, msFaces); entity.CanonicalJson = JsonConvert.SerializeObject(canonicalImageAnalysis, JsonUtils.AnalysisSerializerSettings); imageAnalysisTableAdapter.UpdateImageAnalysis(entity); } return(req.CreateResponse(HttpStatusCode.OK, $"Updated {analyses.Count} analyses")); }
public static async Task Run([HttpTrigger(AuthorizationLevel.Function, "get", "post", Route = "processimage")] HttpRequestMessage req, TraceWriter log) { Startup.Init(); ImageAnalysisTableAdapter imageAnalysisTableAdapter = new ImageAnalysisTableAdapter(); imageAnalysisTableAdapter.Init(); const string imageUrl = "https://tumblrpics.blob.core.windows.net/orig-66/c48440825ac6eab1af6c4de39bbc59d6_ph8zkhbeVA1tf8706_1280.jpg"; using (HttpClient httpClient = new HttpClient()) { Stopwatch stopwatch = Stopwatch.StartNew(); string apiKey = ConfigurationManager.AppSettings["GoogleApiKey"]; string url = "https://vision.googleapis.com/v1/images:annotate?key=" + apiKey; VisionApiRequest request = VisionApiRequest.CreateFromImageUris(imageUrl); string requestJson = JsonConvert.SerializeObject(request, JsonUtils.GoogleSerializerSettings); StringContent stringContent = new StringContent(requestJson, Encoding.UTF8, "application/json"); HttpResponseMessage response = await httpClient.PostAsync(url, stringContent); HttpContent responseContent = response.Content; string googleVisionResponseString = await responseContent.ReadAsStringAsync(); VisionApiResponse visionApiResponse = JsonConvert.DeserializeObject <VisionApiResponse>(googleVisionResponseString, JsonUtils.GoogleSerializerSettings); string faceApiKey = ConfigurationManager.AppSettings["FaceApiKey"]; httpClient.DefaultRequestHeaders.Add("Ocp-Apim-Subscription-Key", faceApiKey); stringContent = new StringContent($"{{\"url\":\"{imageUrl}\"}}", Encoding.UTF8, "application/json"); response = await httpClient.PostAsync( "https://northeurope.api.cognitive.microsoft.com/face/v1.0/detect?returnFaceId=true&returnFaceLandmarks=false&returnFaceAttributes=" + "age,gender,headPose,smile,facialHair,glasses,emotion,hair,makeup,occlusion,accessories,blur,exposure,noise", stringContent); responseContent = response.Content; string msDetectResponseString = await responseContent.ReadAsStringAsync(); List <Face> msFaces = JsonConvert.DeserializeObject <List <Face> >(msDetectResponseString); string visionApiKey = ConfigurationManager.AppSettings["ComputerVisionApiKey"]; httpClient.DefaultRequestHeaders.Remove("Ocp-Apim-Subscription-Key"); httpClient.DefaultRequestHeaders.Add("Ocp-Apim-Subscription-Key", visionApiKey); stringContent = new StringContent($"{{\"url\":\"{imageUrl}\"}}", Encoding.UTF8, "application/json"); response = await httpClient.PostAsync( "https://northeurope.api.cognitive.microsoft.com/vision/v2.0/analyze?visualFeatures=Description,ImageType,Adult,Categories,Tags,Objects,Color&language=en", stringContent); responseContent = response.Content; string msAnalyzeResponseString = await responseContent.ReadAsStringAsync(); Analysis msAnalysis = JsonConvert.DeserializeObject <Analysis>(msAnalyzeResponseString); if (visionApiResponse.Responses.Count == 1 && msFaces.Count > 0 && msAnalysis != null) { ImageAnalysis canonicalImageAnalysis = new ImageAnalysis(visionApiResponse.Responses[0], msAnalysis, msFaces); } } }