/// <summary> /// Sends a url to Project Oxford and generates tags for it /// </summary> /// <param name="imageUrl">The url of the image to generate tags for</param> /// <returns></returns> private async Task <AnalysisResult> GenerateTagsForUrl(string imageUrl) { // ----------------------------------------------------------------------- // KEY SAMPLE CODE STARTS HERE // ----------------------------------------------------------------------- // // Create Project Oxford Vision API Service client // VisionServiceClient VisionServiceClient = new VisionServiceClient(SubscriptionKey); Log("VisionServiceClient is created"); // // Generate tags for the given url // Log("Calling VisionServiceClient.GetTagsAsync()..."); AnalysisResult analysisResult = await VisionServiceClient.GetTagsAsync(imageUrl); return(analysisResult); // ----------------------------------------------------------------------- // KEY SAMPLE CODE ENDS HERE // ----------------------------------------------------------------------- }
//Core image processing method -> Internally, this method will be able to invoke several CV API methods, such as analyze and describe. //This is why, we specify a second parameter named method. public static async Task <AnalysisResult> SmartImageProcess(string fname, string method) { AnalysisResult analyzed = null; VisionServiceClient client = new VisionServiceClient(API_key, API_location); //We call GetVisualFeatures() and assign it to an internal variable, which we will pass onto API method calls. IEnumerable <VisualFeature> visualFeatures = GetVisualFeatures(); //Then, we read the input image, and with a switch statement, check which CV API method we want to invoke. if (File.Exists(fname)) { using (Stream stream = File.OpenRead(fname)) switch (method) { case "analyze": analyzed = await client.AnalyzeImageAsync(stream, visualFeatures); break; case "describe": analyzed = await client.DescribeAsync(stream); break; case "tag": analyzed = await client.GetTagsAsync(stream); break; } } return(analyzed); }
public static async Task <AnalysisResult> SmartImageProcessing(string fname, string method) { AnalysisResult analyzed = null; VisionServiceClient client = new VisionServiceClient(API_key, API_location); IEnumerable <VisualFeature> visualFeatures = GetVisualFeatures(); if (File.Exists(fname)) { using (Stream stream = File.OpenRead(fname)) switch (method) { case "analyze": analyzed = await client.AnalyzeImageAsync(stream, visualFeatures); break; case "describe": analyzed = await client.DescribeAsync(stream); break; case "tag": analyzed = await client.GetTagsAsync(stream); break; } } return(analyzed); }
/// <summary> /// Uploads the image to Project Oxford and generates tags /// </summary> /// <param name="imageFilePath">The image file path.</param> /// <returns></returns> private async Task<AnalysisResult> UploadAndGetTagsForImage(string imageFilePath) { // ----------------------------------------------------------------------- // KEY SAMPLE CODE STARTS HERE // ----------------------------------------------------------------------- // // Create Project Oxford Vision API Service client // VisionServiceClient VisionServiceClient = new VisionServiceClient(SubscriptionKey); Log("VisionServiceClient is created"); using (Stream imageFileStream = File.OpenRead(imageFilePath)) { // // Upload and image and generate tags // Log("Calling VisionServiceClient.GetTagsAsync()..."); AnalysisResult analysisResult = await VisionServiceClient.GetTagsAsync(imageFileStream); return analysisResult; } // ----------------------------------------------------------------------- // KEY SAMPLE CODE ENDS HERE // ----------------------------------------------------------------------- }
/// <summary> /// Uploads the image to Project Oxford and generates tags /// </summary> /// <param name="imageFilePath">The image file path.</param> /// <returns></returns> private async Task <AnalysisResult> UploadAndGetTagsForImage(string imageFilePath) { // ----------------------------------------------------------------------- // KEY SAMPLE CODE STARTS HERE // ----------------------------------------------------------------------- // // Create Project Oxford Vision API Service client // VisionServiceClient VisionServiceClient = new VisionServiceClient(SubscriptionKey); Log("VisionServiceClient is created"); using (Stream imageFileStream = File.OpenRead(imageFilePath)) { // // Upload and image and generate tags // Log("Calling VisionServiceClient.GetTagsAsync()..."); AnalysisResult analysisResult = await VisionServiceClient.GetTagsAsync(imageFileStream); return(analysisResult); } // ----------------------------------------------------------------------- // KEY SAMPLE CODE ENDS HERE // ----------------------------------------------------------------------- }
public async Task<ActionResult> Tags() { var result = await visionClient.GetTagsAsync( "https://oxfordportal.blob.core.windows.net/vision/Analysis/11-1.jpg" ); return View(); }
/// <summary> Function which submits a frame to the Computer Vision API for tagging. </summary> /// <param name="frame"> The video frame to submit. </param> /// <returns> A <see cref="Task{LiveCameraResult}"/> representing the asynchronous API call, /// and containing the tags returned by the API. </returns> private async Task <LiveCameraResult> TaggingAnalysisFunction(VideoFrame frame) { // Encode image. var jpg = frame.Image.ToMemoryStream(".jpg", s_jpegParams); // Submit image to API. var analysis = await _visionClient.GetTagsAsync(jpg); // Count the API call. Properties.Settings.Default.VisionAPICallCount++; // Output. LiveCameraResult _result = new LiveCameraResult { Tags = analysis.Tags, TimeStamp = DateTime.Now }; TotalAPIResults.Add(_result); ApiResult = _result; analysisLog.SaveData(_result); return(_result); }
/// <summary> Function which submits a frame to the Computer Vision API for tagging. </summary> /// <param name="frame"> The video frame to submit. </param> /// <returns> A <see cref="Task{LiveCameraResult}"/> representing the asynchronous API call, /// and containing the tags returned by the API. </returns> private async Task <LiveCameraResult> TaggingAnalysisFunction(VideoFrame frame) { // Encode image. var jpg = frame.Image.ToMemoryStream(".jpg", s_jpegParams); // Submit image to API. var analysis = await _visionClient.GetTagsAsync(jpg); // Count the API call. Properties.Settings.Default.VisionAPICallCount++; // Output. return(new LiveCameraResult { Tags = analysis.Tags }); }
private async Task <List <Tag> > TaggingAnalysisFunction(Task <Stream> task) { // Submit image to API. var analysis = await _visionClient.GetTagsAsync(task.Result); List <Tag> tags = new List <Tag>(); foreach (Tag tag in analysis.Tags) { tags.Add(tag); } // Output. return(tags); }
static private async Task <AnalysisResult> UploadAndAnalyzeImage(string imageFilePath) { VisionServiceClient VisionServiceClient = new VisionServiceClient(key, link); Console.WriteLine("VisionServiceClient is created"); using (Stream imageFileStream = File.OpenRead(imageFilePath)) { Console.WriteLine("Calling VisionServiceClient.AnalyzeImageAsync()..."); VisualFeature[] visualFeatures = new VisualFeature[] { VisualFeature.Adult, VisualFeature.Categories, VisualFeature.Color, VisualFeature.Description, VisualFeature.Faces, VisualFeature.ImageType, VisualFeature.Tags }; AnalysisResult analysisResult = await VisionServiceClient.GetTagsAsync(imageFileStream); return(analysisResult); } }
private static async Task <AnalysisResult> UploadAndAnalyzeImage(string imageFilePath) { VisionServiceClient VisionServiceClient = new VisionServiceClient(SubscriptionKey, "https://westcentralus.api.cognitive.microsoft.com/vision/v1.0"); Console.WriteLine("VisionServiceClient is created"); using (Stream imageFileStream = File.OpenRead(imageFilePath)) { Console.WriteLine("Calling VisionServiceClient.AnalyzeImageAsync()..."); VisualFeature[] visualFeatures = new VisualFeature[] { VisualFeature.Adult, VisualFeature.Categories, VisualFeature.Color, VisualFeature.Description, VisualFeature.Faces, VisualFeature.ImageType, VisualFeature.Tags }; AnalysisResult analysisResult = await VisionServiceClient.GetTagsAsync(imageFileStream); return(analysisResult); } }
private async Task <Result> TaggingAnalysisFunction(Func <VideoFrame> videoFrame) { // Encode image. var frame = videoFrame(); var jpg = frame.Image.ToMemoryStream(".jpg", g_jpegParams); // Submit image to API. var analysis = await _visionClient.GetTagsAsync(jpg); // Count the API call. // Output. return(new Result() { Tags = analysis.Tags, Frame = frame }); }
public async Task <Scene> CreateSceneAsync(byte[] bitmap) { var scene = new Scene(); var client = new VisionServiceClient(OxfordComputerKey); using (var stream = new MemoryStream(bitmap)) { var vision = await client.GetTagsAsync(stream); scene.IsToothbrushing = IsToothBrushing(vision); //scene.Faces = vision.Faces; } return(scene); }
private async void GardenButton_Click(object sender, RoutedEventArgs e) { DisableAllButtons(); try { Cursor = Cursors.AppStarting; var client = new VisionServiceClient(""); using (var file = new FileStream("Pictures\\garden.jpg", FileMode.Open)) { var result = await client.GetTagsAsync(file); txtResults.Text = JsonConvert.SerializeObject(result, Formatting.Indented); } Cursor = Cursors.Arrow; } finally { EnableAllButtons(); } }
protected async Task InitializeGenerateTags(Uri IMGUri) { Log("DESCRIBING IMAGE..."); AnalysisResult Result; string AbsoluteURI = IMGUri.AbsoluteUri; // ----------------------------------------------------------------------- // KEY SAMPLE CODE STARTS HERE // ----------------------------------------------------------------------- // // Create Project Oxford Vision API Service client // VisionServiceClient VisionServiceClient = new VisionServiceClient(Constants.VisionKey); Log("VisionServiceClient is created"); // // Generate tags for the given url // Log("Calling VisionServiceClient.GetTagsAsync()..."); Result = await VisionServiceClient.GetTagsAsync(AbsoluteURI); // ----------------------------------------------------------------------- // KEY SAMPLE CODE ENDS HERE // ----------------------------------------------------------------------- Status = "TAG GENERATION DONE."; // // Log analysis result in the log window // Log(""); Log("Get Tags Result:"); LogAnalysisResult(Result); }
/// <summary> /// Method responsible for getting categories of image /// </summary> /// <param name="fileURL">file URL</param> /// <returns>Categories array</returns> public async static Task<string[]> GetCategories(string fileURL) { try { string subscriptionKey = ConfigurationManager.AppSettings["CognitiveServiceSubscriptionKey"]; var visionServiceClient = new VisionServiceClient(subscriptionKey); var info = await visionServiceClient.GetTagsAsync(fileURL); List<string> tags = new List<string>(); foreach (var tag in info.Tags) { tags.Add(tag.Name); } return tags.ToArray(); } catch (System.Exception ex) { throw ex; } }
/// <summary> /// Method responsible for getting categories of image /// </summary> /// <param name="fileURL">file URL</param> /// <returns>Categories array</returns> public async static Task <string[]> GetCategories(string fileURL) { try { string subscriptionKey = ConfigurationManager.AppSettings["CognitiveServiceSubscriptionKey"]; var visionServiceClient = new VisionServiceClient(subscriptionKey); var info = await visionServiceClient.GetTagsAsync(fileURL); List <string> tags = new List <string>(); foreach (var tag in info.Tags) { tags.Add(tag.Name); } return(tags.ToArray()); } catch (System.Exception ex) { throw ex; } }
/// <summary> Function which submits a frame to the Computer Vision API for tagging. </summary> /// <param name="frame"> The video frame to submit. </param> /// <returns> A <see cref="Task{LiveCameraResult}"/> representing the asynchronous API call, /// and containing the tags returned by the API. </returns> private async Task <LiveCameraResult> TaggingAnalysisFunction(VideoFrame frame) { // Encode image. var jpg = frame.Image.ToMemoryStream(".jpg", s_jpegParams); //TODO Hackathon: // Submit image to API. // Count the API call. // Output. return null is just a dummy. var result = new LiveCameraResult(); try { var tagsResult = await _visionClient.GetTagsAsync(jpg); result.Tags = tagsResult.Tags; } catch (Exception e) { MessageBox.Show(e.ToString()); } return(result); }
private async Task <List <Tag> > TaggingAnalysisFunction(Task <Stream> task) { List <Tag> tags = new List <Tag>(); try { // Submit image to API. var analysis = await _visionClient.GetTagsAsync(task.Result); foreach (Tag tag in analysis.Tags) { tags.Add(tag); } } catch (Exception ex) { Console.WriteLine(ex.ToString()); //todo: log propperly //throw; } // Output. return(tags); }
/// <summary> /// Gets an array of tags seen in the give image. /// </summary> /// <param name="memoryStream">The memory stream of the image to analyse.</param> /// <returns>A list of Tag objects describing the image in the memory stream.</returns> public async Task <Tag[]> GetTags(MemoryStream memoryStream) { var analysis = await visionServiceClient.GetTagsAsync(memoryStream); return(analysis.Tags); }
public async Task <ImageAnalysisResult> AnalyzeImageAsync(string url) { var analysisResult = new ImageAnalysisResult(); try { // USING Microsoft provided VisionClientLibrary seems not working in NET Core as-is, a fix is required for ExpandoObject // see: https://github.com/Microsoft/Cognitive-Vision-DotNetCore/pull/1/commits/9c4647edb400aecd4def330537d5bcd74f126111 Console.WriteLine("\t\t\tContentAnalyzer.AnalyzeImageAsync(): initializing VisionAPI client"); var visionApiClient = new VisionServiceClient(m_VisionAPISubscriptionKey, "https://westeurope.api.cognitive.microsoft.com/vision/v1.0"); var visualFeatures = new List <VisualFeature> { VisualFeature.Adult, VisualFeature.Categories, VisualFeature.Color, VisualFeature.Description, VisualFeature.Faces, VisualFeature.ImageType /*, VisualFeature.Tags */ }; var details = new List <string> { "Celebrities", "Landmarks" }; Console.WriteLine("\t\t\tContentAnalyzer.AnalyzeImageAsync(): started image analysis"); var visionApiResult = await visionApiClient.AnalyzeImageAsync(url, visualFeatures, details).ConfigureAwait(false); Console.WriteLine("\t\t\tContentAnalyzer.AnalyzeImageAsync(): executing OCR"); var ocrResult = await visionApiClient.RecognizeTextAsync(url).ConfigureAwait(false); Console.WriteLine("\t\t\tContentAnalyzer.AnalyzeImageAsync(): performing tag identification"); var tagsResult = await visionApiClient.GetTagsAsync(url).ConfigureAwait(false); Console.WriteLine("\t\t\tContentAnalyzer.AnalyzeImageAsync(): analysis completed"); // Mapping VisionAPI Client entity to domain entity analysisResult.AdultContent = new ImageAnalysisAdultContentResult { AdultScore = visionApiResult.Adult.AdultScore, IsAdultContent = visionApiResult.Adult.IsAdultContent, IsRacyContent = visionApiResult.Adult.IsRacyContent, RacyScore = visionApiResult.Adult.RacyScore }; analysisResult.Colors = new ImageAnalysisColorResult { AccentColor = visionApiResult.Color.AccentColor, DominantColorBackground = visionApiResult.Color.DominantColorBackground, DominantColorForeground = visionApiResult.Color.DominantColorForeground, IsBWImg = visionApiResult.Color.IsBWImg }; analysisResult.Categories = visionApiResult.Categories.Select(c => new ImageAnalysisCategoryResult { Text = c.Name, Score = c.Score }).OrderByDescending(c => c.Score).ToList(); analysisResult.Descriptions = visionApiResult.Description.Captions.Select(c => new ImageAnalysisDescriptionResult { Text = c.Text, Score = c.Confidence }).OrderByDescending(c => c.Score).ToList(); // Merge detected tags from image analysis and image tags analysisResult.Tags = tagsResult.Tags.Select(t => new ImageAnalysisTagResult { Text = t.Name, Score = t.Confidence, Hint = t.Hint }).ToList(); foreach (var t in visionApiResult.Description.Tags) { analysisResult.Tags.Add(new ImageAnalysisTagResult { Text = t, Score = 0.0, Hint = string.Empty }); } analysisResult.Faces = visionApiResult.Faces.Select(f => new ImageAnalysisFaceResult { Age = f.Age, Gender = f.Gender == "Male" ? Gender.Male : f.Gender == "female" ? Gender.Female : Gender.Unknown }).ToList(); analysisResult.Text = ocrResult.Regions.Select(r => new ImageAnalysisTextResult() { Language = ocrResult.Language, Orientation = ocrResult.Orientation, TextAngle = ocrResult.TextAngle.GetValueOrDefault(), Text = string.Join(" ", r.Lines.Select(l => string.Join(" ", l.Words.Select(w => w.Text)))) }).ToList(); // Extend analysis by estimating reading time for each transcribed text foreach (var text in analysisResult.Text) { text.WordCount = TextTokenizer.GetWordCount(text.Text); text.ReadingTimeInMinutes = ReadingTimeEstimator.GetEstimatedReadingTime(text.WordCount, text.Language); analysisResult.WatchingTimeInMinutes += text.ReadingTimeInMinutes; } // Add an additional default time for estimating how long it will take to the user to watch the picture analysisResult.WatchingTimeInMinutes += DefaultImageWatchingTime; } catch (Exception ex) { Console.WriteLine($"\t\t\tContentAnalyzer.AnalyzeImageAsync(): an error occured while analyzing image - {ex.Message}"); } return(analysisResult); }
private async Task <AnalysisResult> UploadAndGetTagsForImage(MediaFile file) { AnalysisResult analysisResult = await visionServiceClient.GetTagsAsync(file.GetStream()); return(analysisResult); }
/// <summary> /// Sends a url to Project Oxford and generates tags for it /// </summary> /// <param name="imageUrl">The url of the image to generate tags for</param> /// <returns></returns> private async Task<AnalysisResult> GenerateTagsForUrl(string imageUrl) { // ----------------------------------------------------------------------- // KEY SAMPLE CODE STARTS HERE // ----------------------------------------------------------------------- // // Create Project Oxford Vision API Service client // VisionServiceClient VisionServiceClient = new VisionServiceClient(SubscriptionKey); Log("VisionServiceClient is created"); // // Generate tags for the given url // Log("Calling VisionServiceClient.GetTagsAsync()..."); AnalysisResult analysisResult = await VisionServiceClient.GetTagsAsync(imageUrl); return analysisResult; // ----------------------------------------------------------------------- // KEY SAMPLE CODE ENDS HERE // ----------------------------------------------------------------------- }