/// <summary> /// Uploads the image to Project Oxford and generates tags /// </summary> /// <param name="imageFilePath">The image file path.</param> /// <returns></returns> private async Task<AnalysisResult> UploadAndGetTagsForImage(string imageFilePath) { // ----------------------------------------------------------------------- // KEY SAMPLE CODE STARTS HERE // ----------------------------------------------------------------------- // // Create Project Oxford Vision API Service client // VisionServiceClient VisionServiceClient = new VisionServiceClient(SubscriptionKey); Log("VisionServiceClient is created"); using (Stream imageFileStream = File.OpenRead(imageFilePath)) { // // Upload and image and generate tags // Log("Calling VisionServiceClient.GetTagsAsync()..."); AnalysisResult analysisResult = await VisionServiceClient.GetTagsAsync(imageFileStream); return analysisResult; } // ----------------------------------------------------------------------- // KEY SAMPLE CODE ENDS HERE // ----------------------------------------------------------------------- }
/// <summary> /// Sends a url to Project Oxford and generates a thumbnail /// </summary> /// <param name="imageUrl">The url of the image to generate a thumbnail for</param> /// <param name="width">Width of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50.</param> /// <param name="height">Height of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50.</param> /// <param name="smartCropping">Boolean flag for enabling smart cropping.</param> /// <returns></returns> private static async Task <byte[]> ThumbnailUrl(string imageUrl, int width, int height, bool smartCropping) { // ----------------------------------------------------------------------- // KEY SAMPLE CODE STARTS HERE // ----------------------------------------------------------------------- // // Create Project Oxford Vision API Service client // VisionServiceClient VisionServiceClient = new VisionServiceClient(ComKey); Log("VisionServiceClient is created"); // // Generate a thumbnail for the given url // Log("Calling VisionServiceClient.GetThumbnailAsync()..."); byte[] thumbnail = await VisionServiceClient.GetThumbnailAsync(imageUrl, width, height, smartCropping); return(thumbnail); // ----------------------------------------------------------------------- // KEY SAMPLE CODE ENDS HERE // ----------------------------------------------------------------------- }
/// <summary> /// Uploads the image to Project Oxford and performs analysis /// </summary> /// <param name="imageFilePath">The image file path.</param> /// <returns></returns> private async Task<AnalysisResult> UploadAndAnalyzeImage(string imageFilePath) { // ----------------------------------------------------------------------- // KEY SAMPLE CODE STARTS HERE // ----------------------------------------------------------------------- // // Create Project Oxford Vision API Service client // VisionServiceClient VisionServiceClient = new VisionServiceClient(SubscriptionKey); Log("VisionServiceClient is created"); using (Stream imageFileStream = File.OpenRead(imageFilePath)) { // // Analyze the image for all visual features // Log("Calling VisionServiceClient.AnalyzeImageAsync()..."); VisualFeature[] visualFeatures = new VisualFeature[] { VisualFeature.Adult, VisualFeature.Categories, VisualFeature.Color, VisualFeature.Description, VisualFeature.Faces, VisualFeature.ImageType, VisualFeature.Tags }; AnalysisResult analysisResult = await VisionServiceClient.AnalyzeImageAsync(imageFileStream, visualFeatures); return analysisResult; } // ----------------------------------------------------------------------- // KEY SAMPLE CODE ENDS HERE // ----------------------------------------------------------------------- }
/// <summary> /// Sends a url to Project Oxford and performs OCR /// </summary> /// <param name="imageUrl">The url to perform recognition on</param> /// <param name="language">The language code to recognize for</param> /// <returns></returns> private static async Task <OcrResults> RecognizeUrl(string imageUrl, string language) { // ----------------------------------------------------------------------- // KEY SAMPLE CODE STARTS HERE // ----------------------------------------------------------------------- // // Create Project Oxford Vision API Service client // VisionServiceClient VisionServiceClient = new VisionServiceClient(ComKey); Log("VisionServiceClient is created"); // // Perform OCR on the given url // Log("Calling VisionServiceClient.RecognizeTextAsync()..."); OcrResults ocrResult = await VisionServiceClient.RecognizeTextAsync(imageUrl, language); return(ocrResult); // ----------------------------------------------------------------------- // KEY SAMPLE CODE ENDS HERE // ----------------------------------------------------------------------- }
private static async void AnalyzeImage(string filePath) { string url; var client = new VisionServiceClient(SubscriptionKey); using (Stream imageFileStream = File.OpenRead(filePath)) { var resultHw = await client.CreateHandwritingRecognitionOperationAsync(imageFileStream); url = resultHw.Url; } var operation = new HandwritingRecognitionOperation { Url = url }; var result = await client.GetHandwritingRecognitionOperationResultAsync(operation); foreach (var line in result.RecognitionResult.Lines) { foreach (var word in line.Words) { Console.WriteLine(word.Text); } } }
public async Task PictureAnalyseAsync(Activity activity) { VisionServiceClient visionClient; string visionApiKey; visionApiKey = ConfigurationManager.AppSettings["VisionApiKey"]; visionClient = new VisionServiceClient(visionApiKey); StringBuilder reply = new StringBuilder(); //If the user uploaded an image, read it, and send it to the Vision API if (activity.Attachments.Any() && activity.Attachments.First().ContentType.Contains("image")) { //stores image url (parsed from attachment or message) string uploadedImageUrl = activity.Attachments.First().ContentUrl; StringConstructor stringConstructor = new StringConstructor(); using (Stream imageFileStream = GetStreamFromUrl(uploadedImageUrl)) { StringConstructorSDK client = new StringConstructorSDK() { WebAppUrl = $"{ ConfigurationManager.AppSettings["WebAppUrl"] }" }; reply.Append(await client.PictureAnalyseAsync(visionApiKey, imageFileStream)); } } ConnectorClient connector = new ConnectorClient(new Uri(activity.ServiceUrl)); await connector.Conversations.ReplyToActivityAsync(activity.CreateReply(reply.ToString())); }
/// <summary> /// Gets the caption of the image from an image stream. /// <remarks> /// This method calls <see cref="IVisionServiceClient.AnalyzeImageAsync(Stream, string[])"/> and /// returns the first caption from the returned <see cref="AnalysisResult.Description"/> /// </remarks> /// </summary> /// <param name="stream">The stream to an image.</param> /// <returns>Description if caption found, null otherwise.</returns> public async Task <string> GetCaptionAsync(Stream stream) { var client = new VisionServiceClient(ApiKey); var result = await client.AnalyzeImageAsync(stream, VisualFeatures); return(ProcessAnalysisResult(result)); }
/// <summary> /// Uploads the image to Project Oxford and performs OCR /// </summary> /// <param name="imageFilePath">The image file path.</param> /// <param name="language">The language code to recognize for</param> /// <returns></returns> private async Task<OcrResults> UploadAndRecognizeImage(string imageFilePath, string language) { // ----------------------------------------------------------------------- // KEY SAMPLE CODE STARTS HERE // ----------------------------------------------------------------------- // // Create Project Oxford Vision API Service client // VisionServiceClient VisionServiceClient = new VisionServiceClient(SubscriptionKey); Log("VisionServiceClient is created"); using (Stream imageFileStream = File.OpenRead(imageFilePath)) { // // Upload an image and perform OCR // Log("Calling VisionServiceClient.RecognizeTextAsync()..."); OcrResults ocrResult = await VisionServiceClient.RecognizeTextAsync(imageFileStream, language); return ocrResult; } // ----------------------------------------------------------------------- // KEY SAMPLE CODE ENDS HERE // ----------------------------------------------------------------------- }
private static async Task <AnalysisResult> UploadAndAnalyzeImage(string imageFilePath) { // ----------------------------------------------------------------------- // KEY SAMPLE CODE STARTS HERE // ----------------------------------------------------------------------- // // Create Project Oxford Computer Vision API Service client // VisionServiceClient VisionServiceClient = new VisionServiceClient(ComKey); Log("VisionServiceClient is created"); using (Stream imageFileStream = File.OpenRead(imageFilePath)) { // // Analyze the image for all visual features // Log("Calling VisionServiceClient.AnalyzeImageAsync()..."); VisualFeature[] visualFeatures = new VisualFeature[] { VisualFeature.Adult, VisualFeature.Categories, VisualFeature.Color, VisualFeature.Description, VisualFeature.Faces, VisualFeature.ImageType, VisualFeature.Tags }; AnalysisResult analysisResult = await VisionServiceClient.AnalyzeImageAsync(imageFileStream, visualFeatures); return(analysisResult); } // ----------------------------------------------------------------------- // KEY SAMPLE CODE ENDS HERE // ----------------------------------------------------------------------- }
private static async Task <OcrResults> UploadAndRecognizeImage(string imageFilePath, string language) { // ----------------------------------------------------------------------- // KEY SAMPLE CODE STARTS HERE // ----------------------------------------------------------------------- // // Create Project Oxford Vision API Service client // VisionServiceClient VisionServiceClient = new VisionServiceClient(ComKey); Log("VisionServiceClient is created"); using (Stream imageFileStream = File.OpenRead(imageFilePath)) { // // Upload an image and perform OCR // Log("Calling VisionServiceClient.RecognizeTextAsync()..."); OcrResults ocrResult = await VisionServiceClient.RecognizeTextAsync(imageFileStream, language); return(ocrResult); } // ----------------------------------------------------------------------- // KEY SAMPLE CODE ENDS HERE // ----------------------------------------------------------------------- }
/// <summary> /// Uploads the image to Project Oxford and generates a thumbnail /// </summary> /// <param name="imageFilePath">The image file path.</param> /// <param name="width">Width of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50.</param> /// <param name="height">Height of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50.</param> /// <param name="smartCropping">Boolean flag for enabling smart cropping.</param> /// <returns></returns> private async Task<byte[]> UploadAndThumbnailImage(string imageFilePath, int width, int height, bool smartCropping) { // ----------------------------------------------------------------------- // KEY SAMPLE CODE STARTS HERE // ----------------------------------------------------------------------- // // Create Project Oxford Vision API Service client // VisionServiceClient VisionServiceClient = new VisionServiceClient(SubscriptionKey); Log("VisionServiceClient is created"); using (Stream imageFileStream = File.OpenRead(imageFilePath)) { // // Upload an image and generate a thumbnail // Log("Calling VisionServiceClient.GetThumbnailAsync()..."); return await VisionServiceClient.GetThumbnailAsync(imageFileStream, width, height, smartCropping); } // ----------------------------------------------------------------------- // KEY SAMPLE CODE ENDS HERE // ----------------------------------------------------------------------- }
/// <summary> /// Sends a url to Project Oxford and generates tags for it /// </summary> /// <param name="imageUrl">The url of the image to generate tags for</param> /// <returns></returns> private async Task <AnalysisResult> GenerateTagsForUrl(string imageUrl) { // ----------------------------------------------------------------------- // KEY SAMPLE CODE STARTS HERE // ----------------------------------------------------------------------- // // Create Project Oxford Vision API Service client // VisionServiceClient VisionServiceClient = new VisionServiceClient(SubscriptionKey); Log("VisionServiceClient is created"); // // Generate tags for the given url // Log("Calling VisionServiceClient.GetTagsAsync()..."); AnalysisResult analysisResult = await VisionServiceClient.GetTagsAsync(imageUrl); return(analysisResult); // ----------------------------------------------------------------------- // KEY SAMPLE CODE ENDS HERE // ----------------------------------------------------------------------- }
private async Task RunTest() { output.Text = "We hebben je residu kunnen scannen, zie hieronder jouw persoonlijke toekomst beeld:"; try { var client = new VisionServiceClient("bae2f176b113415f81bdca26eccab2e9"); var url = "http://koffiedikkijker.azurewebsites.net" + image.ImageUrl; var fileReq = (HttpWebRequest)WebRequest.Create(url); var fileResp = (HttpWebResponse) fileReq.GetResponse(); var stream = fileResp.GetResponseStream(); var result = await client.AnalyzeImageAsync(stream); foreach (var face in result.Faces) { var age = face.Age; var gender = face.Gender == "Male" ? "man" : "vrouw"; output.Text = string.Format("Helaas, deze {0} jarige {1} kan (nog) niet worden gekwalificeerd als een leeg kopje koffie.", age, gender); output.Visible = true; containerContent.Visible = false; } } catch (Exception ex) { } }
private async void StartButton_Click(object sender, RoutedEventArgs e) { if (!CameraList.HasItems) { MessageArea.Text = "No cameras found; cannot start processing"; return; } // Clean leading/trailing spaces in API keys. Properties.Settings.Default.FaceAPIKey = Properties.Settings.Default.FaceAPIKey.Trim(); Properties.Settings.Default.EmotionAPIKey = Properties.Settings.Default.EmotionAPIKey.Trim(); Properties.Settings.Default.VisionAPIKey = Properties.Settings.Default.VisionAPIKey.Trim(); // Create API clients. _faceClient = new FaceServiceClient(Properties.Settings.Default.FaceAPIKey, Properties.Settings.Default.FaceAPIHost); _emotionClient = new EmotionServiceClient(Properties.Settings.Default.EmotionAPIKey, Properties.Settings.Default.EmotionAPIHost); _visionClient = new VisionServiceClient(Properties.Settings.Default.VisionAPIKey, Properties.Settings.Default.VisionAPIHost); // How often to analyze. _grabber.TriggerAnalysisOnInterval(Properties.Settings.Default.AnalysisInterval); // Reset message. MessageArea.Text = ""; // Record start time, for auto-stop _startTime = DateTime.Now; await _grabber.StartProcessingCameraAsync(CameraList.SelectedIndex); }
public void StartSnapPhoto() { textmesh.text = "Verifying..."; cameraButton.interactable = false; LoadingCircle.Show(); StartCoroutine(controller.SnapPhoto(async tex => { try { audioSource.PlayOneShot(clipCamera); // encode the image from the camera as a PNG to send to the Computer Vision API byte[] pngBuff = tex.EncodeToPNG(); MemoryStream ms = new MemoryStream(pngBuff); // call the vision service and get the image analysis VisionServiceClient client = new VisionServiceClient(Globals.VisionKey, Globals.VisionEndpoint); AnalysisResult result = await client.DescribeAsync(ms); // send the tag list to the debug log string tags = result.Description.Tags.Aggregate((x, y) => $"{x}, {y}"); Debug.Log(tags); foreach (string itemTag in Globals.CurrentItem.Tags) { if (result.Description.Tags.Contains(itemTag.ToLower())) { audioSource.PlayOneShot(clipFound); textmesh.text = "You found it!"; PlayFabEvents.WriteEvent(PlayFabEventType.ItemFound); // if the image matches, call the ItemFound function to record it string s = JsonConvert.SerializeObject(Globals.CurrentItem); await Globals.HttpClient.PostAsync("ItemFound", new StringContent(s, Encoding.UTF8, "application/json")); LoadingCircle.Dismiss(); SceneManager.LoadScene("ItemList"); return; } } audioSource.PlayOneShot(clipNotFound); textmesh.text = "Not a match, please try again."; PlayFabEvents.WriteEvent(PlayFabEventType.ItemNotFound); controller.StartStream(); cameraButton.interactable = true; LoadingCircle.Dismiss(); } catch (Exception e) { LoadingCircle.Dismiss(); Debug.Log(e); DialogBox.Show(e.Message); } })); }
internal static string GetCaption(Activity message, string key, string url) { var visionAPiClient = new VisionServiceClient(key, url); var image = message.Attachments? .FirstOrDefault(x => x.ContentType.Contains("image")); if (image != null) { //emulator using (var stream = GetImageStream(image)) { var result = visionAPiClient.AnalyzeImageAsync(stream, new string[] { VisualFeature.Description.ToString() }) .GetAwaiter() .GetResult(); return(result.Description.Captions.FirstOrDefault().Text); } } //Facebook messenger var messengerResult = visionAPiClient.AnalyzeImageAsync(message.Text, new string[] { VisualFeature.Description.ToString() }).GetAwaiter().GetResult(); return(messengerResult.Description.Captions.FirstOrDefault().Text); }
private static async Task <byte[]> UploadAndThumbnailImage(string imageFilePath, int width, int height, bool smartCropping) { // ----------------------------------------------------------------------- // KEY SAMPLE CODE STARTS HERE // ----------------------------------------------------------------------- // // Create Project Oxford Vision API Service client // VisionServiceClient VisionServiceClient = new VisionServiceClient(ComKey); Log("VisionServiceClient is created"); using (Stream imageFileStream = File.OpenRead(imageFilePath)) { // // Upload an image and generate a thumbnail // Log("Calling VisionServiceClient.GetThumbnailAsync()..."); return(await VisionServiceClient.GetThumbnailAsync(imageFileStream, width, height, smartCropping)); } // ----------------------------------------------------------------------- // KEY SAMPLE CODE ENDS HERE // ----------------------------------------------------------------------- }
async Task analyseImage(Stream imageStream) { try { VisionServiceClient visionClient = new VisionServiceClient("c19d4b8bb6c242ea99a8a998195a24f0"); VisualFeature[] features = { VisualFeature.Tags, VisualFeature.Categories, VisualFeature.Description }; var analysisResult = await visionClient.AnalyzeImageAsync(imageStream, features.ToList(), null); Tag[] list = analysisResult.Tags.ToArray(); Console.Out.WriteLine("Tags:\n"); foreach (Tag t in list) { Console.Out.WriteLine(t.Name); Console.Out.WriteLine(t.Confidence); } Console.Out.WriteLine("Cats:\n"); foreach (Category c in analysisResult.Categories.ToArray()) { Console.Out.WriteLine(c.Name); Console.Out.WriteLine(c.Score); } AnalysisLabel.Text = string.Empty; analysisResult.Description.Tags.ToList().ForEach(tag => AnalysisLabel.Text = AnalysisLabel.Text + tag + "\n"); //Console.Out.WriteLine(analysisResult.Categories.t); } catch (Microsoft.ProjectOxford.Vision.ClientException ex) { AnalysisLabel.Text = ex.Error.Message; } }
/// <summary> /// Get a list of available domain models /// </summary> /// <returns></returns> private async Task <ModelResult> GetAvailableDomainModels() { // ----------------------------------------------------------------------- // KEY SAMPLE CODE STARTS HERE // ----------------------------------------------------------------------- // // Create Project Oxford Vision API Service client // VisionServiceClient VisionServiceClient = new VisionServiceClient(SubscriptionKey, "https://westcentralus.api.cognitive.microsoft.com/vision/v1.0"); Log("VisionServiceClient is created"); // // Analyze the url against the given domain // Log("Calling VisionServiceClient.ListModelsAsync()..."); ModelResult modelResult = await VisionServiceClient.ListModelsAsync(); return(modelResult); // ----------------------------------------------------------------------- // KEY SAMPLE CODE ENDS HERE // ----------------------------------------------------------------------- }
public ComputerVisionBaseController() { var apiKey = ConfigurationManager.AppSettings["CognitiveServicesVisionApiKey"]; var apiRoot = ConfigurationManager.AppSettings["CognitiveServicesVisionApiUrl"]; VisionServiceClient = new VisionServiceClient(apiKey, apiRoot); }
private async Task <AnalysisResult> GetImageDescription(Stream imageStream) { VisionServiceClient visionClient = new VisionServiceClient("4d673d0f18bc44d3b519400de1a7e76d", "https://westcentralus.api.cognitive.microsoft.com/vision/v1.0"); VisualFeature[] features = { VisualFeature.Tags }; return(await visionClient.AnalyzeImageAsync(imageStream, features.ToList(), null)); }
public async Task <VisionResult> Analyze(Stream stream) { var client = new VisionServiceClient(Constants.VisionApiKey, Constants.VisionApiEndpoint); var attributes = new List <VisionAttribute>(); var rectangles = new List <Rectangle>(); using (stream) { var features = new VisualFeature[] { VisualFeature.Tags, VisualFeature.Faces }; var visionsResult = await client.AnalyzeImageAsync(stream, features, null); if (visionsResult != null && visionsResult?.Tags.Length > 0) { if (visionsResult.Faces != null) { foreach (var face in visionsResult.Faces) { rectangles.Add(face.FaceRectangle.ToRectangle()); } } foreach (var tag in visionsResult.Tags) { attributes.Add(new VisionAttribute(tag.Name, tag.Hint, tag.Confidence)); } } } return(new VisionResult { Attributes = attributes, Rectangles = rectangles }); }
public static async Task <AnalysisResult> SmartImageProcessing(string fname, string method) { AnalysisResult analyzed = null; VisionServiceClient client = new VisionServiceClient(API_key, API_location); IEnumerable <VisualFeature> visualFeatures = GetVisualFeatures(); if (File.Exists(fname)) { using (Stream stream = File.OpenRead(fname)) switch (method) { case "analyze": analyzed = await client.AnalyzeImageAsync(stream, visualFeatures); break; case "describe": analyzed = await client.DescribeAsync(stream); break; case "tag": analyzed = await client.GetTagsAsync(stream); break; } } return(analyzed); }
/// <summary> /// Identifies text found in a list of photos. Text found is added as a property back into each photo. /// </summary> /// <param name="Photos">Provided list of photos.</param> /// <returns></returns> public async Task identifyTextInPhoto(List<Photo> Photos) { try { foreach (Photo photo in Photos) { VisionServiceClient client = new VisionServiceClient(SubscriptionKey); Stream stream = new MemoryStream(photo.Image); OcrResults result = await client.RecognizeTextAsync(stream, Language, DetectOrientation); photo.LanguageDetectedInPhoto = result.Language; foreach (Region region in result.Regions) { for (int i=0; i< region.Lines.Length; i++) { Line line = region.Lines[i]; string lineText = ""; for (int j= 0; j < line.Words.Length; j++) { lineText += line.Words[j].Text; if (j < line.Words.Length -1) { lineText += " "; } } photo.TextInPhoto.Add(lineText); } } } } catch (Exception e) { throw; } }
/// <summary> /// Uploads the image to Project Oxford and performs analysis against a given domain /// </summary> /// <param name="imageFilePath">The image file path.</param> /// <param name="domainModel">The domain model to analyze against</param> /// <returns></returns> private async Task <AnalysisInDomainResult> UploadAndAnalyzeInDomainImage(string imageFilePath, Model domainModel) { // ----------------------------------------------------------------------- // KEY SAMPLE CODE STARTS HERE // ----------------------------------------------------------------------- // // Create Project Oxford Vision API Service client // VisionServiceClient VisionServiceClient = new VisionServiceClient(SubscriptionKey, SubscriptionEndpoint); Log("VisionServiceClient is created"); using (Stream imageFileStream = File.OpenRead(imageFilePath)) { // // Analyze the image for the given domain // Log("Calling VisionServiceClient.AnalyzeImageInDomainAsync()..."); AnalysisInDomainResult analysisResult = await VisionServiceClient.AnalyzeImageInDomainAsync(imageFileStream, domainModel); return(analysisResult); } // ----------------------------------------------------------------------- // KEY SAMPLE CODE ENDS HERE // ----------------------------------------------------------------------- }
public async Task <string> GetText(string fileName) { const string key = "84cc0008b9614936ad5a7edfdec501d3"; //create the OCR client VisionServiceClient client = new VisionServiceClient(key); //keep track of our program input from OCR image processing StringBuilder program = new StringBuilder(); //pull our main program from a local image file. using (var stream = File.OpenRead(fileName)) { //send the image for processing var ocrResult = await client.RecognizeTextAsync(stream); //now enumerate over results to build up program. foreach (var region in ocrResult.Regions) { foreach (var line in region.Lines) { foreach (var word in line.Words) { //print every word/symbol followed by a space to our string builder. program.Append(word.Text + " "); } } } } return(program.ToString()); }
private static async Task<OcrResults> DetectText(string imageUrl, string VisionServiceApiKey, TraceWriter log) { VisionServiceClient visionServiceClient = new VisionServiceClient(VisionServiceApiKey); int retriesLeft = int.Parse(CloudConfigurationManager.GetSetting("CognitiveServicesRetryCount")); int delay = int.Parse(CloudConfigurationManager.GetSetting("CognitiveServicesInitialRetryDelayms")); OcrResults response = null; while (true) { try { response = await visionServiceClient.RecognizeTextAsync(imageUrl); break; } catch (ClientException exception) when (exception.HttpStatus == (HttpStatusCode)429 && retriesLeft > 0) { log.Info($"Computer Vision OCR call has been throttled. {retriesLeft} retries left."); if (retriesLeft == 1) { log.Warning($"Computer Vision OCR call still throttled after {CloudConfigurationManager.GetSetting("CognitiveServicesRetryCount")} attempts, giving up."); } await Task.Delay(delay); retriesLeft--; delay *= 2; continue; } } return response; }
/// <summary> /// Sends a url to Project Oxford and performs analysis against a given domain /// </summary> /// <param name="imageUrl">The url of the image to analyze</param> /// <param name="domainModel">The domain model to analyze against</param> /// <returns></returns> private async Task <AnalysisInDomainResult> AnalyzeInDomainUrl(string imageUrl, Model domainModel) { // ----------------------------------------------------------------------- // KEY SAMPLE CODE STARTS HERE // ----------------------------------------------------------------------- // // Create Project Oxford Vision API Service client // VisionServiceClient VisionServiceClient = new VisionServiceClient(SubscriptionKey, SubscriptionEndpoint); Log("VisionServiceClient is created"); // // Analyze the url against the given domain // Log("Calling VisionServiceClient.AnalyzeImageInDomainAsync()..."); AnalysisInDomainResult analysisResult = await VisionServiceClient.AnalyzeImageInDomainAsync(imageUrl, domainModel); return(analysisResult); // ----------------------------------------------------------------------- // KEY SAMPLE CODE ENDS HERE // ----------------------------------------------------------------------- }
public async Task <VisionResult> Analyze(Stream stream) { var client = new VisionServiceClient(Constants.VisionApiKey, Constants.VisionApiEndpoint); var attributes = new List <VisionAttribute>(); var rectangles = new List <Rectangle>(); using (stream) { var ocrResult = await client.RecognizeTextAsync(stream); foreach (var region in ocrResult.Regions) { rectangles.Add(region.Rectangle.ToRectangle()); var lines = region.Lines .Select(l => new { phrase = string.Join(" ", l.Words.Select(w => w.Text)), rectangle = l.Rectangle?.ToRectangle() }); foreach (var line in lines) { attributes.Add(new VisionAttribute(line.phrase)); rectangles.Add(line.rectangle); } } } return(new VisionResult { Attributes = attributes, Rectangles = rectangles }); }
/// <summary> /// Sends a url to Project Oxford and performs description /// </summary> /// <param name="imageUrl">The url of the image to describe</param> /// <returns></returns> private async Task <AnalysisResult> DescribeUrl(string imageUrl) { // ----------------------------------------------------------------------- // KEY SAMPLE CODE STARTS HERE // ----------------------------------------------------------------------- // // Create Project Oxford Vision API Service client // VisionServiceClient VisionServiceClient = new VisionServiceClient(SubscriptionKey); Log("VisionServiceClient is created"); // // Describe the url and ask for three captions // Log("Calling VisionServiceClient.DescribeAsync()..."); AnalysisResult analysisResult = await VisionServiceClient.DescribeAsync(imageUrl, 3); return(analysisResult); // ----------------------------------------------------------------------- // KEY SAMPLE CODE ENDS HERE // ----------------------------------------------------------------------- }
/// <summary> /// POST: api/Messages /// Receive a message from a user and reply to it /// </summary> public async Task <Message> Post([FromBody] Message message) { if (message.Type == "Message") { var latestImage = GetLatestImageUrl(); if (string.IsNullOrEmpty(latestImage)) { return(message.CreateReplyMessage("Not really sure to be completely honest...")); } else { VisionServiceClient client = new VisionServiceClient(VISION_CLIENTID); var r = await client.DescribeAsync(latestImage); var msg = string.Join(", ", r.Description.Captions.Select(c => c.Text)); // return our reply to the user var reply = message.CreateReplyMessage($"Thanks for asking, I think I see the following: {msg}."); reply.Attachments = new List <Attachment>(); reply.Attachments.Add(new Attachment() { ContentUrl = latestImage, ContentType = "image/jpg" }); return(reply); } } else { return(HandleSystemMessage(message)); } }
/// <summary> /// Uploads the image to Project Oxford and performs description /// </summary> /// <param name="imageFilePath">The image file path.</param> /// <returns></returns> private async Task <AnalysisResult> UploadAndDescribeImage(string imageFilePath) { // ----------------------------------------------------------------------- // KEY SAMPLE CODE STARTS HERE // ----------------------------------------------------------------------- // // Create Project Oxford Vision API Service client // VisionServiceClient VisionServiceClient = new VisionServiceClient(SubscriptionKey); Log("VisionServiceClient is created"); using (Stream imageFileStream = File.OpenRead(imageFilePath)) { // // Upload and image and request three descriptions // Log("Calling VisionServiceClient.DescribeAsync()..."); AnalysisResult analysisResult = await VisionServiceClient.DescribeAsync(imageFileStream, 3); return(analysisResult); } // ----------------------------------------------------------------------- // KEY SAMPLE CODE ENDS HERE // ----------------------------------------------------------------------- }
public async Task <AnalysisResult> GetImageDescription(Stream imageStream) { VisionServiceClient visionClient = new VisionServiceClient("", "https://eastus.api.cognitive.microsoft.com/vision/v1.0"); VisualFeature[] features = { VisualFeature.Tags, VisualFeature.Categories, VisualFeature.Description }; return(await visionClient.AnalyzeImageAsync(imageStream, features.ToList(), null)); }
/// <summary> /// Uploads the image to Project Oxford and generates a thumbnail /// </summary> /// <param name="imageFilePath">The image file path.</param> /// <param name="width">Width of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50.</param> /// <param name="height">Height of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50.</param> /// <param name="smartCropping">Boolean flag for enabling smart cropping.</param> /// <returns></returns> private async Task <byte[]> UploadAndThumbnailImage(string imageFilePath, int width, int height, bool smartCropping) { // ----------------------------------------------------------------------- // KEY SAMPLE CODE STARTS HERE // ----------------------------------------------------------------------- // // Create Project Oxford Vision API Service client // VisionServiceClient VisionServiceClient = new VisionServiceClient(SubscriptionKey, "https://westcentralus.api.cognitive.microsoft.com/vision/v1.0"); Log("VisionServiceClient is created"); using (Stream imageFileStream = File.OpenRead(imageFilePath)) { // // Upload an image and generate a thumbnail // Log("Calling VisionServiceClient.GetThumbnailAsync()..."); return(await VisionServiceClient.GetThumbnailAsync(imageFileStream, width, height, smartCropping)); } // ----------------------------------------------------------------------- // KEY SAMPLE CODE ENDS HERE // ----------------------------------------------------------------------- }
public async Task <ActionResult> Index() { if (Request.HttpMethod == "GET") { return(View("Index")); } var model = new DescribeImageModel(); var features = new[] { VisualFeature.Adult, VisualFeature.Categories, VisualFeature.Color, VisualFeature.Description, VisualFeature.Faces, VisualFeature.ImageType, VisualFeature.Tags }; await RunOperationOnImage(async stream => { model.Result = await VisionServiceClient.AnalyzeImageAsync(stream, features); }); await RunOperationOnImage(async stream => { var bytes = new byte[stream.Length]; stream.Read(bytes, 0, bytes.Length); var base64 = Convert.ToBase64String(bytes); model.ImageDump = String.Format("data:image/png;base64,{0}", base64); }); return(View(model)); }
/// <summary> /// Get a list of available domain models /// </summary> /// <returns></returns> private async Task <ModelResult> GetAvailableDomainModels() { // ----------------------------------------------------------------------- // KEY SAMPLE CODE STARTS HERE // ----------------------------------------------------------------------- // // Create Project Oxford Vision API Service client // VisionServiceClient VisionServiceClient = new VisionServiceClient(SubscriptionKey, SubscriptionEndpoint); Log("VisionServiceClient is created"); // // Analyze the url against the given domain // Log("Calling VisionServiceClient.ListModelsAsync()..."); ModelResult modelResult = await VisionServiceClient.ListModelsAsync(); return(modelResult); // ----------------------------------------------------------------------- // KEY SAMPLE CODE ENDS HERE // ----------------------------------------------------------------------- }
// POST api/values public async Task<string> Get(string imgUrl) { VisionServiceClient visionclient = new VisionServiceClient("CognitiveServiceVisionKey"); AnalysisResult result = null; try { result = await visionclient.DescribeAsync(imgUrl); } catch (Exception ex) { Debug.WriteLine(ex.Message); } return result.Description.Captions[0].Text; }
public async Task GetText() { VisionServiceClient visionServiceClient= new VisionServiceClient("key"); var result = await visionServiceClient.RecognizeTextAsync(this.item.Url); var sb= new StringBuilder(); foreach (var word in result.Regions[0].Lines.SelectMany(lines => lines.Words)) { sb.Append(word.Text +" "); } var item = new ImageView {Nombre = sb.ToString(), Url = this.item.Url}; TextCollection.Add(item); }
/// <summary> /// Get a list of available domain models /// </summary> /// <returns></returns> private async Task<ModelResult> GetAvailableDomainModels() { // ----------------------------------------------------------------------- // KEY SAMPLE CODE STARTS HERE // ----------------------------------------------------------------------- // // Create Project Oxford Vision API Service client // VisionServiceClient VisionServiceClient = new VisionServiceClient(SubscriptionKey); Log("VisionServiceClient is created"); // // Analyze the url against the given domain // Log("Calling VisionServiceClient.ListModelsAsync()..."); ModelResult modelResult = await VisionServiceClient.ListModelsAsync(); return modelResult; // ----------------------------------------------------------------------- // KEY SAMPLE CODE ENDS HERE // ----------------------------------------------------------------------- }
/// <summary> /// Method responsible for getting categories of image /// </summary> /// <param name="fileURL">file URL</param> /// <returns>Categories array</returns> public async static Task<string[]> GetCategories(string fileURL) { try { string subscriptionKey = ConfigurationManager.AppSettings["CognitiveServiceSubscriptionKey"]; var visionServiceClient = new VisionServiceClient(subscriptionKey); var info = await visionServiceClient.GetTagsAsync(fileURL); List<string> tags = new List<string>(); foreach (var tag in info.Tags) { tags.Add(tag.Name); } return tags.ToArray(); } catch (System.Exception ex) { throw ex; } }
public async Task<OcrResults> OcrRecognizeText(string selectedFile, bool detectOrientation = true, string languageCode = LanguageCodes.AutoDetect) { IVisionServiceClient visionClient = new VisionServiceClient(_subscriptionKeyVision); OcrResults ocrResult = null; try { if (File.Exists(selectedFile)) { using (var fileStreamVision = File.OpenRead(selectedFile)) { ocrResult = await visionClient.RecognizeTextAsync(fileStreamVision, languageCode, detectOrientation); } } else { ErrorMesssage = "Invalid image path or Url"; } } catch (ClientException e) { ErrorMesssage = e.Error != null ? e.Error.Message : e.Message; } catch (Exception exception) { ErrorMesssage = exception.ToString(); } return ocrResult; }
/// <summary> /// Microsoft ProjectOxford Vision API を使用して画像の解析を行います /// </summary> /// <param name="imageUri">画像URL</param> /// <returns>解析結果を格納したAnalysisResultオブジェクト</returns> public async static Task<AnalysisResult> AnalyzeImageAsync(Uri imageUri, string subscriptionKey) { var visionClient = new VisionServiceClient(subscriptionKey); return await visionClient.AnalyzeImageAsync(imageUri.AbsoluteUri).ConfigureAwait(false); }
async Task<string> CaptureAndAnalyze(bool readText = false) { var imgFormat = ImageEncodingProperties.CreateJpeg(); //NOTE: this is how you can save a frame to the CameraRoll folder: //var file = await KnownFolders.CameraRoll.CreateFileAsync($"MCS_Photo{DateTime.Now:HH-mm-ss}.jpg", CreationCollisionOption.GenerateUniqueName); //await mediaCapture.CapturePhotoToStorageFileAsync(imgFormat, file); //var stream = await file.OpenStreamForReadAsync(); // Capture a frame and put it to MemoryStream var memoryStream = new MemoryStream(); using (var ras = new InMemoryRandomAccessStream()) { await mediaCapture.CapturePhotoToStreamAsync(imgFormat, ras); ras.Seek(0); using (var stream = ras.AsStreamForRead()) stream.CopyTo(memoryStream); } var imageBytes = memoryStream.ToArray(); memoryStream.Position = 0; if (withPreview) { InvokeOnMain(() => { var image = new Image(); image.Load(new Urho.MemoryBuffer(imageBytes)); Node child = Scene.CreateChild(); child.Position = LeftCamera.Node.WorldPosition + LeftCamera.Node.WorldDirection * 2f; child.LookAt(LeftCamera.Node.WorldPosition, Vector3.Up, TransformSpace.World); child.Scale = new Vector3(1f, image.Height / (float)image.Width, 0.1f) / 10; var texture = new Texture2D(); texture.SetData(image, true); var material = new Material(); material.SetTechnique(0, CoreAssets.Techniques.Diff, 0, 0); material.SetTexture(TextureUnit.Diffuse, texture); var box = child.CreateComponent<Box>(); box.SetMaterial(material); child.RunActions(new EaseBounceOut(new ScaleBy(1f, 5))); }); } try { var client = new VisionServiceClient(VisionApiKey); if (readText) { var ocrResult = await client.RecognizeTextAsync(memoryStream, detectOrientation: false); var words = ocrResult.Regions.SelectMany(region => region.Lines).SelectMany(line => line.Words).Select(word => word.Text); return "it says: " + string.Join(" ", words); } else { // just describe the picture, you can also use cleint.AnalyzeImageAsync method to get more info var result = await client.DescribeAsync(memoryStream); return result?.Description?.Captions?.FirstOrDefault()?.Text; } } catch (ClientException exc) { return exc?.Error?.Message ?? "Failed"; } catch (Exception exc) { return "Failed"; } }
/// <summary> /// Sends a url to Project Oxford and performs OCR /// </summary> /// <param name="imageUrl">The url to perform recognition on</param> /// <param name="language">The language code to recognize for</param> /// <returns></returns> private async Task<OcrResults> RecognizeUrl(string imageUrl, string language) { // ----------------------------------------------------------------------- // KEY SAMPLE CODE STARTS HERE // ----------------------------------------------------------------------- // // Create Project Oxford Vision API Service client // VisionServiceClient VisionServiceClient = new VisionServiceClient(SubscriptionKey); Log("VisionServiceClient is created"); // // Perform OCR on the given url // Log("Calling VisionServiceClient.RecognizeTextAsync()..."); OcrResults ocrResult = await VisionServiceClient.RecognizeTextAsync(imageUrl, language); return ocrResult; // ----------------------------------------------------------------------- // KEY SAMPLE CODE ENDS HERE // ----------------------------------------------------------------------- }
private async Task<AnalysisResult> UploadAndAnalyzeImage(Stream imageStream) { var visionServiceClient = new VisionServiceClient(Constants.CuomputerVisionApiKey); var assembley = this.GetType().GetTypeInfo().Assembly; using (Stream imageFileStream = imageStream) { VisualFeature[] visualFeatures = new VisualFeature[] { VisualFeature.Adult, VisualFeature.Categories, VisualFeature.Color, VisualFeature.Description, VisualFeature.Faces, VisualFeature.ImageType, VisualFeature.Tags }; AnalysisResult analysisResult = await visionServiceClient.AnalyzeImageAsync(imageFileStream, visualFeatures); return analysisResult; } }
/// <summary> /// Sends a url to Project Oxford and generates a thumbnail /// </summary> /// <param name="imageUrl">The url of the image to generate a thumbnail for</param> /// <param name="width">Width of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50.</param> /// <param name="height">Height of the thumbnail. It must be between 1 and 1024. Recommended minimum of 50.</param> /// <param name="smartCropping">Boolean flag for enabling smart cropping.</param> /// <returns></returns> private async Task<byte[]> ThumbnailUrl(string imageUrl, int width, int height, bool smartCropping) { // ----------------------------------------------------------------------- // KEY SAMPLE CODE STARTS HERE // ----------------------------------------------------------------------- // // Create Project Oxford Vision API Service client // VisionServiceClient VisionServiceClient = new VisionServiceClient(SubscriptionKey); Log("VisionServiceClient is created"); // // Generate a thumbnail for the given url // Log("Calling VisionServiceClient.GetThumbnailAsync()..."); byte[] thumbnail = await VisionServiceClient.GetThumbnailAsync(imageUrl, width, height, smartCropping); return thumbnail; // ----------------------------------------------------------------------- // KEY SAMPLE CODE ENDS HERE // ----------------------------------------------------------------------- }
/// <summary> /// Sends a url to Project Oxford and performs analysis against a given domain /// </summary> /// <param name="imageUrl">The url of the image to analyze</param> /// <param name="domainModel">The domain model to analyze against</param> /// <returns></returns> private async Task<AnalysisInDomainResult> AnalyzeInDomainUrl(string imageUrl, Model domainModel) { // ----------------------------------------------------------------------- // KEY SAMPLE CODE STARTS HERE // ----------------------------------------------------------------------- // // Create Project Oxford Vision API Service client // VisionServiceClient VisionServiceClient = new VisionServiceClient(SubscriptionKey); Log("VisionServiceClient is created"); // // Analyze the url against the given domain // Log("Calling VisionServiceClient.AnalyzeImageInDomainAsync()..."); AnalysisInDomainResult analysisResult = await VisionServiceClient.AnalyzeImageInDomainAsync(imageUrl, domainModel); return analysisResult; // ----------------------------------------------------------------------- // KEY SAMPLE CODE ENDS HERE // ----------------------------------------------------------------------- }
/// <summary> /// Uploads the image to Project Oxford and performs analysis against a given domain /// </summary> /// <param name="imageFilePath">The image file path.</param> /// <param name="domainModel">The domain model to analyze against</param> /// <returns></returns> private async Task<AnalysisInDomainResult> UploadAndAnalyzeInDomainImage(string imageFilePath, Model domainModel) { // ----------------------------------------------------------------------- // KEY SAMPLE CODE STARTS HERE // ----------------------------------------------------------------------- // // Create Project Oxford Vision API Service client // VisionServiceClient VisionServiceClient = new VisionServiceClient(SubscriptionKey); Log("VisionServiceClient is created"); using (Stream imageFileStream = File.OpenRead(imageFilePath)) { // // Analyze the image for the given domain // Log("Calling VisionServiceClient.AnalyzeImageInDomainAsync()..."); AnalysisInDomainResult analysisResult = await VisionServiceClient.AnalyzeImageInDomainAsync(imageFileStream, domainModel); return analysisResult; } // ----------------------------------------------------------------------- // KEY SAMPLE CODE ENDS HERE // ----------------------------------------------------------------------- }
public async Task<AnalysisResult> AnalyzeImage(string selectedFile) { IVisionServiceClient visionClient = new VisionServiceClient(_subscriptionKeyVision); AnalysisResult analysisResult = null; ErrorMesssage = string.Empty; try { if (File.Exists(selectedFile)) { //using (FileStream stream = File.Open(imagePathOrUrl, FileMode.Open)) using (var fileStreamVision = File.OpenRead(selectedFile)) { analysisResult = await visionClient.AnalyzeImageAsync(fileStreamVision); } } else if (Uri.IsWellFormedUriString(selectedFile, UriKind.RelativeOrAbsolute)) { analysisResult = await visionClient.AnalyzeImageAsync(selectedFile); } else { ErrorMesssage = "Invalid image path or Url"; } } catch (ClientException e) { ErrorMesssage = e.Error != null ? e.Error.Message : e.Message; } catch (Exception exception) { ErrorMesssage = exception.ToString(); } return analysisResult; }
/// <summary> /// Sends a url to Project Oxford and generates tags for it /// </summary> /// <param name="imageUrl">The url of the image to generate tags for</param> /// <returns></returns> private async Task<AnalysisResult> GenerateTagsForUrl(string imageUrl) { // ----------------------------------------------------------------------- // KEY SAMPLE CODE STARTS HERE // ----------------------------------------------------------------------- // // Create Project Oxford Vision API Service client // VisionServiceClient VisionServiceClient = new VisionServiceClient(SubscriptionKey); Log("VisionServiceClient is created"); // // Generate tags for the given url // Log("Calling VisionServiceClient.GetTagsAsync()..."); AnalysisResult analysisResult = await VisionServiceClient.GetTagsAsync(imageUrl); return analysisResult; // ----------------------------------------------------------------------- // KEY SAMPLE CODE ENDS HERE // ----------------------------------------------------------------------- }
/// <summary> /// Sends a url to Project Oxford and performs description /// </summary> /// <param name="imageUrl">The url of the image to describe</param> /// <returns></returns> private async Task<AnalysisResult> DescribeUrl(string imageUrl) { // ----------------------------------------------------------------------- // KEY SAMPLE CODE STARTS HERE // ----------------------------------------------------------------------- // // Create Project Oxford Vision API Service client // VisionServiceClient VisionServiceClient = new VisionServiceClient(SubscriptionKey); Log("VisionServiceClient is created"); // // Describe the url and ask for three captions // Log("Calling VisionServiceClient.DescribeAsync()..."); AnalysisResult analysisResult = await VisionServiceClient.DescribeAsync(imageUrl, 3); return analysisResult; // ----------------------------------------------------------------------- // KEY SAMPLE CODE ENDS HERE // ----------------------------------------------------------------------- }
/// <summary> /// Sends a url to Project Oxford and performs analysis /// </summary> /// <param name="imageUrl">The url of the image to analyze</param> /// <returns></returns> private async Task<AnalysisResult> AnalyzeUrl(string imageUrl) { // ----------------------------------------------------------------------- // KEY SAMPLE CODE STARTS HERE // ----------------------------------------------------------------------- // // Create Project Oxford Vision API Service client // VisionServiceClient VisionServiceClient = new VisionServiceClient(SubscriptionKey); Log("VisionServiceClient is created"); // // Analyze the url for all visual features // Log("Calling VisionServiceClient.AnalyzeImageAsync()..."); VisualFeature[] visualFeatures = new VisualFeature[] { VisualFeature.Adult, VisualFeature.Categories, VisualFeature.Color, VisualFeature.Description, VisualFeature.Faces, VisualFeature.ImageType, VisualFeature.Tags }; AnalysisResult analysisResult = await VisionServiceClient.AnalyzeImageAsync(imageUrl, visualFeatures); return analysisResult; // ----------------------------------------------------------------------- // KEY SAMPLE CODE ENDS HERE // ----------------------------------------------------------------------- }
private async Task<AnalysisResult> UploadAndAnalyzeImage(string subscriptionKey, Stream imageStream) { try { var visionServiceClient = new VisionServiceClient(subscriptionKey); using (var imageFileStream = imageStream) { var visualFeatures = new[] { VisualFeature.Adult, VisualFeature.Categories, VisualFeature.Color, VisualFeature.Description, VisualFeature.Faces, VisualFeature.ImageType, VisualFeature.Tags }; var analysisResult = await visionServiceClient.AnalyzeImageAsync(imageFileStream, visualFeatures); return analysisResult; } } catch (Exception ex) { Debug.WriteLine($"{ex}"); return null; } }