private static void Main(string[] args) { FoodlandsTextDetection sample = new FoodlandsTextDetection(); string imagePath; if (args.Length == 0) { Console.WriteLine(usage); return; } imagePath = args[0]; // Create a new Cloud Vision client authorized via Application // Default Credentials VisionService vision = sample.CreateAuthorizedClient(); // Use the client to get text annotations for the given image IList <AnnotateImageResponse> result = sample.DetectText( vision, imagePath); // Check for valid text annotations in response if (result[0].TextAnnotations != null) { // Loop through and output text annotations for the image foreach (var response in result) { Console.WriteLine("Text found in image: " + imagePath); Console.WriteLine(); foreach (var text in response.TextAnnotations) { Console.WriteLine(text.Description); Console.Write("(Bounding Polygon: "); var index = 0; foreach (var point in text.BoundingPoly.Vertices) { if (index > 0) { Console.Write(", "); } Console.Write("[" + point.X + "," + point.Y + "]"); index++; } Console.Write(")"); Console.WriteLine(Environment.NewLine); } } } else { if (result[0].Error == null) { Console.WriteLine("No text found."); } else { Console.WriteLine("Not a valid image."); } } Console.WriteLine("Press any key..."); Console.ReadKey(); }
// Get labels from image in memory public static IList <AnnotateImageResponse> GetLabels(byte[] imageArray) { try { VisionService vision = CreateAuthorizedClient(); // Convert image to Base64 encoded for JSON ASCII text based request string imageContent = Convert.ToBase64String(imageArray); // Post label detection request to the Vision API var responses = vision.Images.Annotate( new BatchAnnotateImagesRequest() { Requests = new[] { new AnnotateImageRequest() { Features = new [] { new Feature() { Type = "LABEL_DETECTION" } }, Image = new Image() { Content = imageContent } } } }).Execute(); return(responses.Responses); } catch (Exception ex) { Trace.TraceError(ex.StackTrace); } return(null); }
private async void AnalyzeButton_Clicked(object sender, EventArgs e) { if (foto != null) { try { Loading(true); // Fase 2 - Vision var descripcion = await VisionService.DescribePicture(foto); DescriptionLabel.Text = descripcion.Description.Captions.First().Text; } catch (Exception ex) { await DisplayAlert("Error", "Excepción: " + ex.Message, "OK"); } finally { Loading(false); } } else { await DisplayAlert("Error", "Debes tomar la fotografía", "OK"); } }
/// <summary> /// Detect text within an image using the Cloud Vision API. /// </summary> /// <param name="vision">an authorized Cloud Vision client.</param> /// <param name="imagePath">the path where the image is stored.</param> /// <returns>a list of text detected by the Vision API for the image. /// </returns> public IList <AnnotateImageResponse> DetectText( VisionService vision, string imagePath) { Console.WriteLine("Detecting Text..."); // Convert image to Base64 encoded for JSON ASCII text based request byte[] imageArray = System.IO.File.ReadAllBytes(imagePath); string imageContent = Convert.ToBase64String(imageArray); // Post text detection request to the Vision API var responses = vision.Images.Annotate( new BatchAnnotateImagesRequest() { Requests = new[] { new AnnotateImageRequest() { Features = new [] { new Feature() { Type = "TEXT_DETECTION" } }, Image = new Image() { Content = imageContent } } } }).Execute(); return(responses.Responses); }
private static void Main(string[] args) { Program sample = new Program(); string imagePath = @"Sample1.jpg"; // Create a new Cloud Vision client authorized via Application // Default Credentials VisionService vision = sample.CreateAuthorizedClient(); // Use the client to get text annotations for the given image IList <AnnotateImageResponse> result = sample.DetectText( vision, imagePath); // Check for valid text annotations in response if (result[0].TextAnnotations != null) { Console.WriteLine(result[0].TextAnnotations[0].Description); } else { if (result[0].Error == null) { Console.WriteLine("No text found."); } else { Console.WriteLine("Not a valid image."); } } Console.WriteLine("Press any key..."); Console.ReadKey(); }
async Task OCRHandwriting(Stream incomingImage) { var vs = new VisionService(); var desc = await vs.OCRHandwriting(incomingImage); descriptionLabel.Text = desc; }
/// <summary> /// Detect labels for an image using the Cloud Vision API. /// </summary> /// <param name="vision">an authorized Cloud Vision client.</param> /// <param name="base64Image">Base64 encoded for JSON ASCII text based request</param> /// <returns>a list of labels detected by the Vision API for the image. /// </returns> public static IList <AnnotateImageResponse> DetectLabels( VisionService vision, string base64Image ) { // Post label detection request to the Vision API var responses = vision.Images.Annotate( new BatchAnnotateImagesRequest { Requests = new[] { new AnnotateImageRequest() { Features = new [] { new Feature { Type = "LABEL_DETECTION" } }, Image = new Image { Content = base64Image } } } }).Execute(); return(responses.Responses); }
public Day20(VisionService visionService) { _visionService = visionService; _jsonSerializerSettings = new JsonSerializerSettings() { ContractResolver = new CamelCasePropertyNamesContractResolver() }; }
private void Configure() { _visionService = new VisionService(new BaseClientService.Initializer { ApiKey = "AIzaSyDbuBnG-8f41OVET1BXXoHjhZRTlQFFnvU", GZipEnabled = false }); }
public BatchAnnotateImagesResponse SendGoogleOcrRequest(List <StorageFile> files) { VisionService service = this.CreateService(); var annotate = service.Images.Annotate(this.CreateRequest(files)); BatchAnnotateImagesResponse batchAnnotateImagesResponse = annotate.Execute(); return(batchAnnotateImagesResponse); }
public GameViewFactory ( VisionService visionService, PathingService pathingService ) { PathingService = pathingService; VisionService = visionService; }
public void TestDetectText() { TestInitialize(); VisionService visionService = service.CreateAuthorizedClient(); byte[] imageArray = System.IO.File.ReadAllBytes(@"C:\TravelersGuide\Images\EidMubarak.jpg"); service.DetectText(visionService, imageArray); }
private async Task <string> AnnotateAsync(string path) { // Create the service var service = new VisionService(new BaseClientService.Initializer { ApiKey = "" }); var bytes = File.ReadAllBytes(path); // Create the image request var imgReq = new AnnotateImageRequest { Image = new Google.Apis.Vision.v1.Data.Image { Content = Convert.ToBase64String(bytes) }, Features = new List <Feature> { new Feature() { Type = "TEXT_DETECTION" } } }; // Create the request var request = new BatchAnnotateImagesRequest { Requests = new List <AnnotateImageRequest> { imgReq } }; // Get the response var result = await service.Images.Annotate(request).ExecuteAsync(); // Extract the keywords string keywords = ""; if (result?.Responses?.Count > 0 && result.Responses[0].TextAnnotations != null) { var desc = result.Responses[0].TextAnnotations[0].Description; string[] words = desc.Split( new[] { "\r\n", "\r", "\n" }, StringSplitOptions.None ); keywords = String.Join(" ", words); } return(keywords); }
public VisionProvider(string connectionString) { var credential = GoogleCredential.FromJson(connectionString).CreateScoped(VisionService.Scope.CloudPlatform); this.visionService = new VisionService(new BaseClientService.Initializer() { HttpClientInitializer = credential, ApplicationName = "GV Service Account", }); }
async Task Translate(string language, Stream incomingImage) { var vs = new VisionService(); var desc = await vs.OCRPhoto(incomingImage, language); var ts = new TranslateService(); var translatedDesc = await ts.TranslateText(desc, language, LanguageCodes.English); descriptionLabel.Text = translatedDesc; }
/// Creates the service. private static VisionService CreateService(string applicationName, IConfigurableHttpClientInitializer credentials) { var service = new VisionService(new BaseClientService.Initializer() { ApplicationName = applicationName, HttpClientInitializer = credentials }); return(service); }
private VisionService CreateService(GoogleCredential credential) { var service = new VisionService(new BaseClientService.Initializer() { HttpClientInitializer = credential, ApplicationName = ApplicationName, GZipEnabled = true, }); return(service); }
public void Get_All_Photos_Count() { var tableRepository = new TableRepository(); var blobRepository = new BlobRepository(); var visionService = new VisionService(); var photoService = new PhotoService(tableRepository, blobRepository, visionService); var photos = photoService.GetAll(); Assert.IsTrue(photos.Count > 0); }
public override void initialize(string accessKey, string baseLang) { base.initialize(accessKey, baseLang); Google.Apis.Auth.OAuth2.GoogleCredential credential = Google.Apis.Auth.OAuth2.GoogleCredential.FromFile(accessKey); credential = credential.CreateScoped(new[] { VisionService.Scope.CloudPlatform }); visionService = new VisionService(new BaseClientService.Initializer { HttpClientInitializer = credential, GZipEnabled = false }); }
private int DetectTextWord( VisionService vision, byte[] getImage, ref string FullText) { int result = 1; Console.WriteLine("Detecting image to texts..."); // Convert image to Base64 encoded for JSON ASCII text based request string imageContent = Convert.ToBase64String(getImage); try { // Post label detection request to the Vision API var responses = vision.Images.Annotate( new BatchAnnotateImagesRequest() { Requests = new[] { new AnnotateImageRequest() { Features = new [] { new Feature() { Type = "TEXT_DETECTION" } }, Image = new Image() { Content = imageContent } } } }).Execute(); if (responses.Responses != null) { FullText = responses.Responses[0].TextAnnotations[0].Description; Console.WriteLine("SUCCESS:Cloud Vision API Access."); result = 0; } else { FullText = ""; Console.WriteLine("ERROR : No text found."); result = -1; } } catch { FullText = ""; Console.WriteLine("ERROR : Not Access Cloud Vision API."); result = -1; } return(result); }
private VisionService CreateService() { var service = new VisionService(new BaseClientService.Initializer() { HttpClientInitializer = googleCredential, ApplicationName = "G1ANT-Robot", GZipEnabled = true, }); return(service); }
public static void Run(TimerInfo myTimer, TraceWriter log) { try { var translatorSetting = new TranslatorSetting { APIKey = ConfigurationManager.AppSettings["TranslatorAPIKey"], }; var twitterSetting = new TwitterSetting { ConsumerKey = ConfigurationManager.AppSettings["TwitterConsumerKey"], ConsumerSecret = ConfigurationManager.AppSettings["TwitterConsumerSecret"], AccessToken = ConfigurationManager.AppSettings["TwitterAccessToken"], AccessTokenSecret = ConfigurationManager.AppSettings["TwitterAccessTokenSecret"], }; var visionSetting = new VisionSetting { APIKey = ConfigurationManager.AppSettings["VisionAPIAPIKey"], Endpoint = ConfigurationManager.AppSettings["VisionAPIEndpoint"], }; var cosmosDbSetting = new CosmosDbSetting { EndpointUri = ConfigurationManager.AppSettings["CosmosDbEndpointUri"], PrimaryKey = ConfigurationManager.AppSettings["CosmosDbPrimaryKey"], }; var analyzeSetting = new AnalyzeSetting { Keyword = ConfigurationManager.AppSettings["AnalyzeKeyword"], }; // services var logger = new TraceWriterLogger(log); var translatorService = new TranslatorService(Options.Create(translatorSetting), logger); var twitterService = new TwitterService(Options.Create(twitterSetting), logger); var visionService = new VisionService(Options.Create(visionSetting), translatorService, logger); var categolizedImageRepository = new CategorizedImageRepository(Options.Create(cosmosDbSetting), logger); var analyzeService = new AnalyzeService(Options.Create(analyzeSetting), twitterService, visionService, categolizedImageRepository, logger); // initialize twitterService.InitializeAsync().Wait(); // run analyzeService.AnalyzeAsync().Wait(); } catch (Exception ex) { log.Error($"Error {ex.ToString()}"); } }
public void TestDetectLabelsForValidResponse() { var sample = new GoogleCloudSamples.LabelDetectionSample(); VisionService vision = sample.CreateAuthorizedClient(); var result = sample.DetectLabels(vision, @"..\..\..\data\cat.jpg"); // Confirm that DetectLabels returns expected result for test image. var response = result[0]; var label = response.LabelAnnotations[0]; Assert.IsNotNull(label.Description); Assert.IsTrue(label.Description.Contains("cat")); }
private VisionService CreateService() { GoogleCredential credential = this.CreateCredential(); var service = new VisionService(new BaseClientService.Initializer() { HttpClientInitializer = credential, ApplicationName = "OCR", GZipEnabled = true, }); return(service); }
// [END detect_labels] internal async Task <ModerationResult> doLabels(string inUrl) { string url = ""; ModerationResult res = new ModerationResult(""); try { // Create a new Cloud Vision client authorized via Application // Default Credentials VisionService vision = service; if (vision == null) { MainParams.nlog.Trace("!!!!!!! GOOGLE VISION NULL"); } // Use the client to get label annotations for the given image // [START parse_response] IList <AnnotateImageResponse> result = await DetectLabels(vision, inUrl); // Check if label annotations were found if (result != null) { MainParams.nlog.Trace("Labels for image: " + inUrl); // Loop through and output label annotations for the image foreach (var response in result) { foreach (var label in response.LabelAnnotations) { double _score = label.Score == null ? 0 : Convert.ToDouble(label.Score.Value); res.AddClassScore(label.Description.Trim(), _score, label.Mid); //MainParams.nlog.Trace(label.Description + " (score:" + _score + ")"); } } res.json = res.ToStringNoNewline(); if (res.json.Length > 499) { res.json = res.json.Substring(0, 499); } MainParams.nlog.Trace(res.ToStringNoNewline()); } else { MainParams.nlog.Trace("No labels found."); } } catch (Exception e) { MainParams.nlog.Debug("***NewLogs; GOOGLE doLabels!!! ;EX=;" + e.Message + ";Source=" + e.Source + ";stack=" + e.StackTrace + ";e.inner=" + e.InnerException); MainParams.nlog.Debug(e); } return(res); // [END parse_response] }
public GoogleVisionService(string serviceAccountKeyPath, int maxResults) { if (string.IsNullOrEmpty(serviceAccountKeyPath)) { _visionService = GoogleAuthenticator.CreateDefaultAuthorizedClient(); } else { _visionService = GoogleAuthenticator.CreateAuthorizedClient(serviceAccountKeyPath); } _maxResults = maxResults; }
public void TestDetectTextForImageContainingNoText() { var sample = new GoogleCloudSamples.TextDetectionSample();; VisionService vision = sample.CreateAuthorizedClient(); // Confirm text isn't found for image that contains no text. var result = sample.DetectText(vision, @"..\..\..\test\data\no-text.jpg"); if (result[0].Error == null) { Assert.IsNull(result[0].TextAnnotations); } }
public void TestDetectTextForValidResponse() { var sample = new GoogleCloudSamples.TextDetectionSample(); VisionService vision = sample.CreateAuthorizedClient(); var result = sample.DetectText(vision, @"..\..\..\test\data\succulents.jpg"); // Confirm that DetectText returns expected result for test image. var response = result[0]; var text = response.TextAnnotations[0]; Assert.IsNotNull(text.Description); Assert.IsTrue(text.Description.Contains("Succulents")); }
public void TestDetectLabelsForInvalidImage() { var sample = new GoogleCloudSamples.LabelDetectionSample(); VisionService vision = sample.CreateAuthorizedClient(); // Confirm invalid image doesn't get labels and throws an exception var result = sample.DetectLabels(vision, @"..\..\..\data\bad.txt"); var response = result[0]; var label = response.LabelAnnotations[0]; if (!String.IsNullOrEmpty(label.Description)) { Assert.Fail(); } }
public int getTextAndRoi(byte[] getImage, ref string FullText) { int result = 1; GCPVisonAPI sample = new GCPVisonAPI(); // Create a new Cloud Vision clieuthorint azed via Application // Default Credentials VisionService vision = sample.CreateAuthorizedClient(); // Use the client to get label annotations for the given image string getFullText = ""; result = sample.DetectTextWord(vision, getImage, ref getFullText); FullText = getFullText; return(result); }