private void UpdateWithAnalysis(ImageAnalysis analysis, ImagePrediction analysisCV) { try { if (Dispatcher.HasThreadAccess) { captionsControl.UpdateEvent(new CognitiveEvent() { ImageAnalysis = analysis, ImageAnalysisCV = analysisCV, ImageHeight = imageHeight, ImageWidth = imageWidth }); tagsControl.UpdateEvent(new CognitiveEvent() { ImageAnalysis = analysis, ImageAnalysisCV = analysisCV }); } else { var task = Dispatcher.RunAsync(Windows.UI.Core.CoreDispatcherPriority.Normal, () => { captionsControl.UpdateEvent(new CognitiveEvent() { ImageAnalysis = analysis, ImageAnalysisCV = analysisCV, ImageHeight = imageHeight, ImageWidth = imageWidth }); tagsControl.UpdateEvent(new CognitiveEvent() { ImageAnalysis = analysis, ImageAnalysisCV = analysisCV }); }); } } catch (Exception) { // Eat this exception } }
public static async Task AnalyzeImageUrl(ComputerVisionClient client, string imageUrl) { Console.WriteLine("----------------------------------------------------------"); Console.WriteLine("ANALYZE IMAGE - URL"); Console.WriteLine(); // Creating a list that defines the features to be extracted from the image. List <VisualFeatureTypes?> features = new List <VisualFeatureTypes?>() { VisualFeatureTypes.Categories, VisualFeatureTypes.Description, VisualFeatureTypes.Faces, VisualFeatureTypes.ImageType, VisualFeatureTypes.Tags, VisualFeatureTypes.Adult, VisualFeatureTypes.Color, VisualFeatureTypes.Brands, VisualFeatureTypes.Objects }; ImageAnalysis results = await client.AnalyzeImageAsync(imageUrl, features); Console.WriteLine("Objects:"); foreach (var obj in results.Objects) { Console.WriteLine($"{obj.ObjectProperty} with confidence {obj.Confidence} at location {obj.Rectangle.X}, " + $"{obj.Rectangle.X + obj.Rectangle.W}, {obj.Rectangle.Y}, {obj.Rectangle.Y + obj.Rectangle.H}"); } Console.WriteLine(); }
private static void DisplayResults(ImageAnalysis result, TraceWriter log, string filename) { if (!(result.Description != null && result.Description.Captions != null && result.Description.Captions.Count > 0 && result.Tags != null && result.Tags.Count > 0)) { return; } log.Info("---------------------------------------------------------"); log.Info("Image Uploaded - Filename is " + filename); log.Info("---------------------------------------------------------"); if (result.Description != null && result.Description.Captions != null && result.Description.Captions.Count > 0) { log.Info("Image Captions:"); foreach (var caption in result.Description.Captions) { log.Info(caption.Text + " (" + caption.Confidence + ")"); } } foreach (var tag in result.Tags) { log.Info(tag.Name + " (" + tag.Confidence + ")"); } }
public async Task Run([BlobTrigger("picsin/{name}", Connection = "BlobStorageConnection")] byte[] myBlob, string name, ILogger log, Binder binder) { log.LogInformation($"C# Blob trigger AnalyzationFunction Processed blob\n Name:{name} \n Size: {myBlob.Length} Bytes"); ImageAnalysis analysis = await VisionClient.AnalyzeImageInStreamAsync(new MemoryStream(myBlob), Features); Attribute[] attributes; if (analysis.Adult.IsAdultContent || analysis.Adult.IsGoryContent || analysis.Adult.IsRacyContent) { log.LogInformation($"Image {name} was detected as adult content"); log.LogInformation($"Adult content = {analysis.Adult.IsAdultContent}\n" + $"Gory content = {analysis.Adult.IsGoryContent}\n" + $"Racy content = {analysis.Adult.IsRacyContent}"); attributes = new Attribute[] { new BlobAttribute($"picsrejected/{name}", FileAccess.Write), new StorageAccountAttribute("BlobStorageConnection") }; } else { log.LogInformation($"Image {name} is clean"); attributes = new Attribute[] { new BlobAttribute($"festivalpics/{name}", FileAccess.Write), new StorageAccountAttribute("BlobStorageConnection") }; } using Stream fileOutputStream = await binder.BindAsync <Stream>(attributes); fileOutputStream.Write(myBlob); }
private ComputerVisionInsight ConvertImageAnalysisToInsight(ImageAnalysis imageAnalysis) { ComputerVisionInsight visionInsight = new ComputerVisionInsight(); ImageDescriptionDetails imageDescription = imageAnalysis.Description; foreach (DetectedObject obj in imageAnalysis.Objects) { visionInsight.DetectedObjects.Add(new Entity.Item { Name = obj.ObjectProperty, Confidence = obj.Confidence }); } foreach (DetectedBrand brand in imageAnalysis.Brands) { visionInsight.Brands.Add(new Entity.Item { Name = brand.Name, Confidence = brand.Confidence }); } foreach (ImageTag tag in imageAnalysis.Tags) { visionInsight.Tags.Add(new Entity.Item { Name = tag.Name, Confidence = tag.Confidence }); } return(visionInsight); }
public override void Update(PropertyAccess propertyAccess, ImageAnalysis imageAnalyzerResult, OcrResult ocrResult, TranslationService translationService) { if (imageAnalyzerResult?.Categories == null) { return; } var celebrities = imageAnalyzerResult.Categories.Where(x => x.Detail != null) .Select(x => x.Detail.Celebrities).Where(x => x != null) .SelectMany(x => x) .Select(y => y.Name) .ToList(); if (!celebrities.Any()) { return; } if (IsStringProperty(propertyAccess.Property)) { propertyAccess.SetValue(string.Join(", ", celebrities)); } else if (IsStringListProperty(propertyAccess.Property)) { propertyAccess.SetValue(celebrities.ToList()); } }
public async Task <MediaAI> AnalyseImageAsync( Stream imageStream, CancellationToken cancellationToken) { ComputerVisionClient computerVision = _computerVisionClientFunc(); try { ImageAnalysis analysis = await computerVision.AnalyzeImageInStreamAsync( imageStream, Features, cancellationToken : cancellationToken); return(ToMediaAI(analysis)); } catch (ComputerVisionErrorException ex) { Log.Error(ex, "Error in analyse image. Message: {Message}", ex.Response.Content); throw; } catch (Exception ex) { Log.Error(ex, "Error in analyse image"); throw; } }
public AnalysisResult Analyze(string imageUrl, string utterance) { var rosetta = new Rosetta(RossyConfig.RosettaConfig); var intent = rosetta.GuessIntent(utterance); var analyzer = GetAnalyzer(intent); List <VisualFeatureTypes> features = analyzer.SetupAnalysisFeatures(); var client = new ComputerVisionClient(new ApiKeyServiceClientCredentials(RossyConfig.GeordiConfig.SubscriptionKey)) { Endpoint = RossyConfig.GeordiConfig.Endpoint }; ImageAnalysis imageAnalysis = client.AnalyzeImageAsync(imageUrl, features).Result; string log = analyzer.ProduceLog(imageAnalysis); var language = rosetta.GuessLanguage(utterance); string speechText; switch (language) { case "it": speechText = analyzer.ProduceSpeechTextItalian(imageAnalysis); break; case "en": default: speechText = analyzer.ProduceSpeechTextEnglish(imageAnalysis); break; } return(new AnalysisResult(speechText, log)); }
public async Task <ImageAnalysis> Analizer(string urlAnalize) { string subscriptionKey = "eeaf71b7af4b424f8784608593261774"; string subscriptionKey2 = "90fa26632f7c49b8ae8723ae5a46ee40"; string endpoint = "https://computervisionsrg.cognitiveservices.azure.com/"; string ANALYZE_URL_IMAGE = urlAnalize; ComputerVisionClient client = new ComputerVisionClient(new ApiKeyServiceClientCredentials(subscriptionKey)) { Endpoint = endpoint }; List <VisualFeatureTypes?> features = new List <VisualFeatureTypes?>() { VisualFeatureTypes.Categories, VisualFeatureTypes.Description, VisualFeatureTypes.Faces, VisualFeatureTypes.ImageType, VisualFeatureTypes.Tags, VisualFeatureTypes.Adult, VisualFeatureTypes.Color, VisualFeatureTypes.Brands, VisualFeatureTypes.Objects }; ImageAnalysis results = await client.AnalyzeImageAsync(ANALYZE_URL_IMAGE, visualFeatures : features); return(results); }
// Display the most relevant caption for the image public static void DisplayResults(ImageAnalysis analysis, string imageUri) { Console.WriteLine(imageUri); string json = JsonConvert.SerializeObject(analysis, Formatting.Indented); Console.WriteLine(json + "\n"); }
public static async Task AnalyzeImageUrl(ComputerVisionClient client, string imageUrl) { Console.WriteLine("----------------------------------------------------------"); Console.WriteLine("ANALYZE IMAGE - URL"); Console.WriteLine(); // Creating a list that defines the features to be extracted from the image. List <VisualFeatureTypes?> features = new List <VisualFeatureTypes?>() { VisualFeatureTypes.Tags }; Console.WriteLine($"Analyzing the image {Path.GetFileName(imageUrl)}..."); Console.WriteLine(); // Analyze the URL image ImageAnalysis results = await client.AnalyzeImageAsync(imageUrl, visualFeatures : features); // Image tags and their confidence score Console.WriteLine("Tags:"); foreach (var tag in results.Tags) { Console.WriteLine($"{tag.Name} {tag.Confidence}"); } Console.WriteLine(); }
private static void DisplayDomainSpecificResults(ImageAnalysis analysis) { //celebrities Console.WriteLine("Celebrities:"); foreach (var category in analysis.Categories) { if (category.Detail?.Celebrities != null) { foreach (var celeb in category.Detail.Celebrities) { Console.WriteLine("{0} with confidence {1} at location {2},{3},{4},{5}", celeb.Name, celeb.Confidence, celeb.FaceRectangle.Left, celeb.FaceRectangle.Top, celeb.FaceRectangle.Height, celeb.FaceRectangle.Width); } } } //landmarks Console.WriteLine("Landmarks:"); foreach (var category in analysis.Categories) { if (category.Detail?.Landmarks != null) { foreach (var landmark in category.Detail.Landmarks) { Console.WriteLine("{0} with confidence {1}", landmark.Name, landmark.Confidence); } } } Console.WriteLine("\n"); }
public string ProduceSpeechTextItalian(ImageAnalysis imageAnalysis) { var resultBuilder = new StringBuilder(); if (imageAnalysis.Faces.Count == 0) { resultBuilder.Append("Non vedo persone"); } else if (imageAnalysis.Faces.Count == 1) { var face = imageAnalysis.Faces.First(); resultBuilder.Append(face.Gender == Gender.Female ? "C'è una donna" : "C'è un uomo"); resultBuilder.Append($" che sembra avere { face.Age} anni"); } else { resultBuilder.Append($"Ci sono {imageAnalysis.Faces.Count} persone. Più precisamente: "); foreach (var face in imageAnalysis.Faces) { resultBuilder.Append(face.Gender == Gender.Female ? "una donna" : "un uomo"); resultBuilder.Append($" che sembra avere {face.Age} anni, "); } resultBuilder.Append("."); //a little hack } var ssml = Modem.BuildSsmlAsync(resultBuilder.ToString(), "it").Result; return(ssml); }
private static IEnumerable <BirdAnalysisAnimal> GetAnimals(ImageAnalysis analyzedImage) { return(analyzedImage.Objects.ToDictionary(x => x, GetObjectHierarchy) .Where(x => x.Value.ContainsKey(AnimalObjectKey)) .Select(x => { var first = x.Value.FirstOrDefault(); var hasAnimalGroup = x.Value.Count >= 3; var isAnimal = x.Value.ContainsKey(AnimalObjectKey); var isBird = x.Value.ContainsKey(BirdObjectKey); return new BirdAnalysisAnimal { AnimalGroup = hasAnimalGroup ? Capitalize(first.Key) : string.Empty, AnimalGroupConfidence = hasAnimalGroup ? first.Value : (double?)null, IsAnimal = isAnimal, IsAnimalConfidence = isAnimal ? x.Value[AnimalObjectKey] : (double?)null, IsBird = isBird, IsBirdConfidence = isBird ? x.Value[BirdObjectKey] : (double?)null, Rectangle = new Models.BoundingRect { x = x.Key.Rectangle.X, y = x.Key.Rectangle.Y, width = x.Key.Rectangle.W, height = x.Key.Rectangle.H } }; })); }
// Display the most relevant caption for the image private void DisplayResults(ImageAnalysis analysis, string imageUri) { WebClient wc = new WebClient(); byte[] bytes = wc.DownloadData(imageUri); MemoryStream ms = new MemoryStream(bytes); Image img = Image.FromStream(ms); ms.Close(); if (analysis.Objects.Any(o => o.ObjectProperty == "animal")) { MessageBox.Show("Увага тварина!"); using (Graphics g = Graphics.FromImage(img)) { BoundingRect animalRectangle = analysis.Objects.First(o => o.ObjectProperty == "animal").Rectangle; g.DrawRectangle(new Pen(Color.Red), new Rectangle(animalRectangle.X, animalRectangle.Y, animalRectangle.W, animalRectangle.H)); } } else { if (analysis.Objects.Any(o => o.Parent != null)) { List <DetectedObject> childObjects = analysis.Objects.Where(o => o.Parent != null).ToList(); List <ObjectHierarchy> parentObject = new List <ObjectHierarchy>(); foreach (var detectedObject in childObjects) { parentObject.Add(detectedObject.Parent); } if (checkedAnimal(parentObject)) { MessageBox.Show("Увага тварина!"); using (Graphics g = Graphics.FromImage(img)) { List <DetectedObject> animalObjects = new List <DetectedObject>(); foreach (var item in childObjects) { if (checkedAnimal(new List <ObjectHierarchy>() { item.Parent })) { BoundingRect animalRectangle = item.Rectangle; g.DrawRectangle(new Pen(Color.Red), new Rectangle(animalRectangle.X, animalRectangle.Y, animalRectangle.W, animalRectangle.H)); } } } } else { MessageBox.Show("Усе безпечно"); } } } pictureBox1.Image = img; }
public string ProduceLog(ImageAnalysis imageAnalysis) { var logBuilder = new StringBuilder(); logBuilder.Append("----------------------------------------------------------\n"); logBuilder.Append("ANALYZE IMAGE - PEOPLE\n"); // Summarizes the image content. logBuilder.Append("Summary:\n"); foreach (var caption in imageAnalysis.Description.Captions) { logBuilder.Append($"{caption.Text} with confidence {caption.Confidence}\n"); } // Faces logBuilder.Append("Faces:\n"); foreach (var face in imageAnalysis.Faces) { logBuilder.Append($"A {face.Gender} of age {face.Age} at location {face.FaceRectangle.Left}, " + $"{face.FaceRectangle.Left}, {face.FaceRectangle.Top + face.FaceRectangle.Width}, " + $"{face.FaceRectangle.Top + face.FaceRectangle.Height}\n"); } logBuilder.Append("----------------------------------------------------------\n"); return(logBuilder.ToString()); }
public string ProduceSpeechTextEnglish(ImageAnalysis imageAnalysis) { var resultBuilder = new StringBuilder(); resultBuilder.Append("<speak version=\"1.0\" xmlns=\"https://www.w3.org/2001/10/synthesis\" xml:lang=\"en-US\">"); resultBuilder.Append("<voice name=\"en-US-GuyNeural\">"); if (imageAnalysis.Faces.Count == 0) { resultBuilder.Append("There are no people around"); } else if (imageAnalysis.Faces.Count == 1) { var face = imageAnalysis.Faces.First(); resultBuilder.Append($"There is one {face.Gender} person of age {face.Age}."); } else { resultBuilder.Append($"There are {imageAnalysis.Faces.Count} people around. More in detail: "); foreach (var face in imageAnalysis.Faces) { resultBuilder.Append($"a {face.Gender} of age {face.Age}, "); } resultBuilder.Append("."); //a little hack } resultBuilder.Append("</voice>"); resultBuilder.Append("</speak>"); return(resultBuilder.ToString()); }
public string ProduceSpeechTextItalian(ImageAnalysis imageAnalysis) { var resultBuilder = new StringBuilder(); resultBuilder.Append("<speak version=\"1.0\" xmlns=\"https://www.w3.org/2001/10/synthesis\" xml:lang=\"it-IT\">"); resultBuilder.Append("<voice name=\"it-IT-ElsaNeural\">"); if (imageAnalysis.Faces.Count == 0) { resultBuilder.Append("Non vedo persone"); } else if (imageAnalysis.Faces.Count == 1) { var face = imageAnalysis.Faces.First(); resultBuilder.Append(face.Gender == Gender.Female ? "C'è una donna" : "C'è un uomo"); resultBuilder.Append($" che sembra avere { face.Age} anni"); } else { resultBuilder.Append($"Ci sono {imageAnalysis.Faces.Count} persone. Più precisamente: "); foreach (var face in imageAnalysis.Faces) { resultBuilder.Append(face.Gender == Gender.Female ? "una donna" : "un uomo"); resultBuilder.Append($" che sembra avere {face.Age} anni, "); } resultBuilder.Append("."); //a little hack } resultBuilder.Append("</voice>"); resultBuilder.Append("</speak>"); return(resultBuilder.ToString()); }
public static async Task<ImageDetails> AnalyzeImageUrl(ComputerVisionClient client, string imageUrl) { var imageDetails = new ImageDetails(); imageDetails.ImageUrl = imageUrl; // Creating a list that defines the features to be extracted from the image. List<VisualFeatureTypes> features = new List<VisualFeatureTypes>() { VisualFeatureTypes.Description, VisualFeatureTypes.Tags }; ImageAnalysis results = await client.AnalyzeImageAsync(imageUrl, features); foreach (var caption in results.Description.Captions) { imageDetails.Description.Captions.Add( new Caption { Text = caption.Text, Confidence = caption.Confidence } ); } foreach (var tag in results.Tags) { imageDetails.Description.Tags.Add( tag.Name ); } return imageDetails; }
public async Task <OcrResultDTO> Post() { StringBuilder sb = new StringBuilder(); OcrResultDTO ocrResultDTO = new OcrResultDTO(); try { if (Request.Form.Files.Count > 0) { var file = Request.Form.Files[Request.Form.Files.Count - 1]; if (file.Length > 0) { var memoryStream = new MemoryStream(); file.CopyTo(memoryStream); byte[] imageFileBytes = memoryStream.ToArray(); memoryStream.Flush(); string JSONResult = await ReadTextFromStreamAzure(imageFileBytes); string JSONResult2 = await ReadTextFromStreamAWS(imageFileBytes); ImageDescription imgDescAzure = JsonConvert.DeserializeObject <ImageDescription>(JSONResult); var AWSList = JsonConvert.DeserializeObject <List <string> >(JSONResult2); ImageAnalysis imageAnalysis = JsonConvert.DeserializeObject <ImageAnalysis>(JSONResult); TagResult TagResult = JsonConvert.DeserializeObject <TagResult>(JSONResult); //OcrResult ocrResult = JsonConvert.DeserializeObject<OcrResult>(JSONResult); var AzureList = new List <string>(); foreach (var item in imageAnalysis.Description.Tags) { AzureList.Add(item); } var selectedRTRCAzure = GetWinner(AzureList); var selectedRTRCAWS = GetWinner(AWSList); sb.Append("*********************Azure*********************** "); sb.Append("\n"); sb.Append(selectedRTRCAzure); sb.Append("\n"); sb.Append("**********************AWS************************ "); sb.Append("\n"); sb.Append(selectedRTRCAWS); sb.Append("\n"); ocrResultDTO.DetectedText = sb.ToString(); } } return(ocrResultDTO); } catch { ocrResultDTO.DetectedText = "Error occurred. Try again"; ocrResultDTO.Language = "unk"; return(ocrResultDTO); } }
public static async Task <string> GetImageDetailsAsync(string key, AzureRegions region, string imageUrl) { StringBuilder imageDetails = new StringBuilder(); IComputerVisionAPI client = new ComputerVisionAPI(new ApiKeyServiceClientCredentials(key)); client.AzureRegion = region; var requiredFeatures = new List <VisualFeatureTypes> { VisualFeatureTypes.Adult, VisualFeatureTypes.Categories, VisualFeatureTypes.Color, VisualFeatureTypes.ImageType, }; ImageAnalysis imageAnalysis = await client.AnalyzeImageAsync(imageUrl, requiredFeatures); imageDetails.AppendLine($"Dominant Background Color: {imageAnalysis.Color.DominantColorBackground} <br />"); imageDetails.AppendLine($"Dominant Forground Color: {imageAnalysis.Color.DominantColorForeground} <br />"); imageDetails.AppendLine($"Is Black & White: {imageAnalysis.Color.IsBWImg} <br />"); string isLineDraw = imageAnalysis.ImageType.LineDrawingType == 1 ? "Yes" : "No"; imageDetails.AppendLine($"Is Line drawing: {isLineDraw} <br />"); imageDetails.AppendLine($"Is Adult content: {imageAnalysis.Adult.IsAdultContent} <br />"); List <string> categoryList = new List <string>(); foreach (var category in imageAnalysis.Categories) { categoryList.Add(category.Name); } imageDetails.AppendLine($"Categories: {string.Join(", ", categoryList)}"); return(imageDetails.ToString()); }
public void AnalyzeImageTest() { using (MockContext context = MockContext.Start(this.GetType().FullName)) { HttpMockServer.Initialize(this.GetType().FullName, "AnalyzeImageTest"); using (IComputerVisionClient client = GetComputerVisionClient(HttpMockServer.CreateInstance())) { ImageAnalysis result = client.AnalyzeImageAsync( GetTestImageUrl("house.jpg"), new List <VisualFeatureTypes>() { VisualFeatureTypes.Adult, VisualFeatureTypes.Categories, VisualFeatureTypes.Color, VisualFeatureTypes.Faces, VisualFeatureTypes.ImageType, VisualFeatureTypes.Tags }) .Result; Assert.Equal("grass", result.Tags[0].Name); Assert.True(result.Tags[0].Confidence > 0.9); Assert.Equal("Jpeg", result.Metadata.Format); Assert.False(result.Adult.IsAdultContent); Assert.False(result.Adult.IsRacyContent); Assert.True(result.Adult.AdultScore < 0.1); Assert.True(result.Adult.RacyScore < 0.1); Assert.Equal("building_", result.Categories[0].Name); Assert.True(result.Categories[0].Score > 0.5); Assert.Equal("Green", result.Color.DominantColorBackground); Assert.Equal("Green", result.Color.DominantColorForeground); } } }
private MediaAI ToMediaAI(ImageAnalysis analysis) { var mediaAI = new MediaAI(); mediaAI.Tags = analysis.Tags.Select(x => new MediaAITag { Name = x.Name, Confidence = x.Confidence * 100, Source = Source }); mediaAI.Objects = analysis.Objects.Select(x => new MediaAIObject { Name = x.ObjectProperty, Confidence = x.Confidence * 100, Source = Source, Box = MapImageBox(x.Rectangle, analysis.Metadata) }); mediaAI.Colors = MapColors(analysis.Color); mediaAI.Caption = MapCaption(analysis.Description); mediaAI.SourceInfo = GetSourceInfo(analysis); return(mediaAI); }
public void AnalyzeBrandsTest() { using (MockContext context = MockContext.Start(this.GetType().FullName)) { HttpMockServer.Initialize(this.GetType().FullName, "AnalyzeBrandsTest"); using (IComputerVisionClient client = GetComputerVisionClient(HttpMockServer.CreateInstance())) using (FileStream stream = new FileStream(GetTestImagePath("MicrosoftRealMadrid.jpg"), FileMode.Open)) { ImageAnalysis result = client.AnalyzeImageInStreamAsync( stream, new List <VisualFeatureTypes>() { VisualFeatureTypes.Brands }) .Result; Assert.Equal("Microsoft", result.Brands[0].Name); Assert.True(result.Brands[0].Confidence > 0.5); Assert.True(result.Brands[0].Rectangle.X >= 0); Assert.True(result.Brands[0].Rectangle.W >= 0); Assert.True(result.Brands[0].Rectangle.X + result.Brands[0].Rectangle.W <= result.Metadata.Width); Assert.True(result.Brands[0].Rectangle.Y >= 0); Assert.True(result.Brands[0].Rectangle.H >= 0); Assert.True(result.Brands[0].Rectangle.Y + result.Brands[0].Rectangle.H <= result.Metadata.Height); } } }
/// <summary> /// Analyze the image from an http based image url /// </summary> /// <param name="imageUrl">Http url of image</param> /// <returns></returns> public async Task <ImageAnalysis> AnalyzeImageFromUrlAsync(string imageUrl) { if (!imageUrl.StartsWith("http")) { throw new Exception("Image must be a publicly accessible url. Provide an image url of http or https type"); } List <VisualFeatureTypes> features = new List <VisualFeatureTypes>() { VisualFeatureTypes.Categories, VisualFeatureTypes.Brands, VisualFeatureTypes.Description, VisualFeatureTypes.Faces, VisualFeatureTypes.Tags, VisualFeatureTypes.Objects, VisualFeatureTypes.Adult }; List <Details> details = new List <Details>() { Details.Celebrities, Details.Landmarks }; ImageAnalysis result = await visionClient.AnalyzeImageAsync(imageUrl, features, details); return(result); }
public string ProduceSpeechTextEnglish(ImageAnalysis imageAnalysis) { var resultBuilder = new StringBuilder(); if (imageAnalysis.Faces.Count == 0) { resultBuilder.Append("There are no people around"); } else if (imageAnalysis.Faces.Count == 1) { var face = imageAnalysis.Faces.First(); resultBuilder.Append($"There is one {face.Gender} person of age {face.Age}."); } else { resultBuilder.Append($"There are {imageAnalysis.Faces.Count} people around. More in detail: "); foreach (var face in imageAnalysis.Faces) { resultBuilder.Append($"a {face.Gender} of age {face.Age}, "); } resultBuilder.Append("."); //a little hack } var ssml = Modem.BuildSsmlAsync(resultBuilder.ToString(), "en").Result; return(ssml); }
private static KeyValuePair <bool, double?> GetIsBird(ImageAnalysis analyzedImage, List <BirdAnalysisAnimal> animals) { var fromObjects = new KeyValuePair <bool, double?>( animals.Any(x => x.IsBird), animals.Where(x => x.IsBird).Max(x => x.IsBirdConfidence) ); var birdTag = analyzedImage.Tags.FirstOrDefault(x => TextEqualsBird(x.Name) || TextEqualsBird(x.Hint)); var fromTags = new KeyValuePair <bool, double?>( birdTag != null, birdTag?.Confidence ); if (fromObjects.Key || fromTags.Key) { return(new KeyValuePair <bool, double?>( true, Math.Max(fromObjects.Value.GetValueOrDefault(), fromTags.Value.GetValueOrDefault()) )); } var descriptionWords = analyzedImage.Description.Captions.SelectMany(x => x.Text.Split(' ')).ToList(); var anyBirdWordInDescription = descriptionWords.Any(TextEqualsBird); return(new KeyValuePair <bool, double?>( anyBirdWordInDescription, null )); }
public async Task <OcrResultDTO> Post([FromBody] myImg image) { StringBuilder sb = new StringBuilder(); OcrResultDTO ocrResultDTO = new OcrResultDTO(); // var baseURLImage = "https://www.fmpilot2.com/Attachment/IFM/AAR-PV08/WEB-1721300/1[20200616_182215543].jpg"; try { if (!string.IsNullOrEmpty(image.filename)) { string JSONResult = await ReadTextFromStreamAzureUrl(image.filename); //string JSONResult2 = await ReadTextFromStreamAWS(ImageUrl); ImageDescription imgDescAzure = JsonConvert.DeserializeObject <ImageDescription>(JSONResult); // var AWSList = JsonConvert.DeserializeObject<List<string>>(JSONResult2); ImageAnalysis imageAnalysis = JsonConvert.DeserializeObject <ImageAnalysis>(JSONResult); TagResult TagResult = JsonConvert.DeserializeObject <TagResult>(JSONResult); //OcrResult ocrResult = JsonConvert.DeserializeObject<OcrResult>(JSONResult); var AzureList = new List <string>(); if (imageAnalysis.Description != null) { foreach (var item in imageAnalysis.Description.Tags) { AzureList.Add(item); } var selectedRTRCAzure = GetWinner(AzureList); // var selectedRTRCAWS = GetWinner(AWSList); sb.Append("*********************Azure*********************** "); sb.Append("\n"); sb.Append(selectedRTRCAzure); sb.Append("\n"); sb.Append("**********************AWS************************ "); sb.Append("\n"); // sb.Append(selectedRTRCAWS); sb.Append("\n"); ocrResultDTO.DetectedText = sb.ToString(); } else { ocrResultDTO.DetectedText = "Cannot process this image"; } } return(ocrResultDTO); } catch { ocrResultDTO.DetectedText = "Error occurred. Try again"; ocrResultDTO.Language = "unk"; return(ocrResultDTO); } }
private static void DisplayImageTypeResults(ImageAnalysis analysis) { //image types Console.WriteLine("Image Type:"); //please look at the API documentation to know more about what the scores mean Console.WriteLine("Clip Art Type: " + analysis.ImageType.ClipArtType); Console.WriteLine("Line Drawing Type: " + analysis.ImageType.LineDrawingType); Console.WriteLine("\n"); }
private static void DisplayAdultResults(ImageAnalysis analysis) { //racy content Console.WriteLine("Adult:"); Console.WriteLine("Is adult content: {0} with confidence {1}", analysis.Adult.IsAdultContent, analysis.Adult.AdultScore); Console.WriteLine("Has racy content: {0} with confidence {1} ", analysis.Adult.IsRacyContent, analysis.Adult.RacyScore); Console.WriteLine("\n"); }