public void BatchAnnotateImages() { moq::Mock <ImageAnnotator.ImageAnnotatorClient> mockGrpcClient = new moq::Mock <ImageAnnotator.ImageAnnotatorClient>(moq::MockBehavior.Strict); mockGrpcClient.Setup(x => x.CreateOperationsClient()).Returns(new moq::Mock <lro::Operations.OperationsClient>().Object); BatchAnnotateImagesRequest request = new BatchAnnotateImagesRequest { Requests = { new AnnotateImageRequest(), }, }; BatchAnnotateImagesResponse expectedResponse = new BatchAnnotateImagesResponse { Responses = { new AnnotateImageResponse(), }, }; mockGrpcClient.Setup(x => x.BatchAnnotateImages(request, moq::It.IsAny <grpccore::CallOptions>())).Returns(expectedResponse); ImageAnnotatorClient client = new ImageAnnotatorClientImpl(mockGrpcClient.Object, null); BatchAnnotateImagesResponse response = client.BatchAnnotateImages(request.Requests); xunit::Assert.Same(expectedResponse, response); mockGrpcClient.VerifyAll(); }
static void Main(string[] args) { string language = "en"; string type = "DOCUMENT_TEXT_DETECTION"; byte[] file = System.IO.File.ReadAllBytes("ATM.png"); BatchAnnotateImagesRequest batchRequest = new BatchAnnotateImagesRequest(); batchRequest.Requests = new List <AnnotateImageRequest>(); batchRequest.Requests.Add(new AnnotateImageRequest() { Features = new List <Feature>() { new Feature() { Type = type, MaxResults = 1 }, }, ImageContext = new ImageContext() { LanguageHints = new List <string>() { language } }, Image = new Image() { Content = Convert.ToBase64String(file) } }); var temp = JsonConvert.SerializeObject(batchRequest); }
/// <summary> /// Run image detection and annotation for a batch of images. /// </summary> /// <param name="request"> /// The request object containing all of the parameters for the API call. /// </param> /// <param name="callSettings"> /// If not null, applies overrides to this RPC call. /// </param> /// <returns> /// A Task containing the RPC response. /// </returns> public override Task <BatchAnnotateImagesResponse> BatchAnnotateImagesAsync( BatchAnnotateImagesRequest request, CallSettings callSettings = null) { Modify_BatchAnnotateImagesRequest(ref request, ref callSettings); return(_callBatchAnnotateImages.Async(request, callSettings)); }
/// <summary> /// Run image detection and annotation for a batch of images. /// </summary> /// <param name="request"> /// The request object containing all of the parameters for the API call. /// </param> /// <param name="callSettings"> /// If not null, applies overrides to this RPC call. /// </param> /// <returns> /// The RPC response. /// </returns> public override BatchAnnotateImagesResponse BatchAnnotateImages( BatchAnnotateImagesRequest request, gaxgrpc::CallSettings callSettings = null) { Modify_BatchAnnotateImagesRequest(ref request, ref callSettings); return(_callBatchAnnotateImages.Sync(request, callSettings)); }
private BatchAnnotateImagesRequest CreateRequest(List <StorageFile> files) { BatchAnnotateImagesRequest batchRequest = new BatchAnnotateImagesRequest { Requests = new List <AnnotateImageRequest>() }; foreach (StorageFile file in files) { batchRequest.Requests.Add(new AnnotateImageRequest() { Features = new List <Feature>() { new Feature() { Type = "TEXT_DETECTION", MaxResults = 1 }, }, ImageContext = new ImageContext() { LanguageHints = Constants.languages }, Image = new Image() { Content = Convert.ToBase64String(this.ConvertFileToByteArray(file).Result) } }); } return(batchRequest); }
public async stt::Task BatchAnnotateImagesRequestObjectAsync() { moq::Mock <ImageAnnotator.ImageAnnotatorClient> mockGrpcClient = new moq::Mock <ImageAnnotator.ImageAnnotatorClient>(moq::MockBehavior.Strict); mockGrpcClient.Setup(x => x.CreateOperationsClient()).Returns(new moq::Mock <lro::Operations.OperationsClient>().Object); BatchAnnotateImagesRequest request = new BatchAnnotateImagesRequest { Requests = { new AnnotateImageRequest(), }, Parent = "parent7858e4d0", }; BatchAnnotateImagesResponse expectedResponse = new BatchAnnotateImagesResponse { Responses = { new AnnotateImageResponse(), }, }; mockGrpcClient.Setup(x => x.BatchAnnotateImagesAsync(request, moq::It.IsAny <grpccore::CallOptions>())).Returns(new grpccore::AsyncUnaryCall <BatchAnnotateImagesResponse>(stt::Task.FromResult(expectedResponse), null, null, null, null)); ImageAnnotatorClient client = new ImageAnnotatorClientImpl(mockGrpcClient.Object, null); BatchAnnotateImagesResponse responseCallSettings = await client.BatchAnnotateImagesAsync(request, gaxgrpc::CallSettings.FromCancellationToken(st::CancellationToken.None)); xunit::Assert.Same(expectedResponse, responseCallSettings); BatchAnnotateImagesResponse responseCancellationToken = await client.BatchAnnotateImagesAsync(request, st::CancellationToken.None); xunit::Assert.Same(expectedResponse, responseCancellationToken); mockGrpcClient.VerifyAll(); }
public static void IniciarServicio() { //Preparacion Servicio VisionSettings vs = new VisionSettings(); var credential = vs.CreateCredential(); var service = vs.CreateService(credential); //Preparacion de la peticion BatchAnnotateImagesRequest batchRequest = new BatchAnnotateImagesRequest(); batchRequest.Requests = new List <AnnotateImageRequest>(); var infoImagenes = ObtenerInfoArchivos(); foreach (var imagen in infoImagenes) { byte[] file = File.ReadAllBytes(imagen.FullName.ToString()); batchRequest.Requests.Add(new AnnotateImageRequest() { Features = new List <Feature>() { new Feature() { Type = "TEXT_DETECTION", MaxResults = 1 }, }, ImageContext = new ImageContext() { LanguageHints = new List <string>() { "es" } }, Image = new Image() { Content = Convert.ToBase64String(file) } }); } var annotate = service.Images.Annotate(batchRequest); BatchAnnotateImagesResponse batchAnnotateImagesResponse = annotate.Execute(); var cantidadRespuestasImagenes = batchAnnotateImagesResponse.Responses.Count(); for (int i = 0; i < cantidadRespuestasImagenes; i++) { AnnotateImageResponse annotateImageResponse = batchAnnotateImagesResponse.Responses[i]; if (annotateImageResponse.TextAnnotations != null) { var texto = annotateImageResponse.TextAnnotations[0].Description; using (var tw = new StreamWriter(@"D:\" + infoImagenes[i].Name.ToString() + ".txt", true)) { tw.WriteLine(texto); } Console.WriteLine(texto); } } }
static void Main(string[] args) { BatchAnnotateImagesRequest batchRequest = new BatchAnnotateImagesRequest(); batchRequest.Requests = new List <AnnotateImageRequest>(); var service = CreateService(CreateCredential()); var file = File.ReadAllBytes(@"C:\Users\Loui\Downloads\IMG_20171207_212819.jpg"); batchRequest.Requests.Add(new AnnotateImageRequest() { Features = new List <Feature>() { new Feature() { Type = "TEXT_DETECTION" }, }, ImageContext = new ImageContext() { }, Image = new Image() { Content = Convert.ToBase64String(file) } }); var annotate = service.Images.Annotate(batchRequest); BatchAnnotateImagesResponse batchAnnotateImagesResponse = annotate.Execute(); if (batchAnnotateImagesResponse.Responses.Any()) { foreach (var res in batchAnnotateImagesResponse.Responses) { if (res.Error != null) { if (res.Error.Message != null) { Console.WriteLine(res.Error.Message); } } else { if (res.TextAnnotations != null && res.TextAnnotations.Any()) { foreach (var text in res.TextAnnotations) { //text.BoundingPoly.Vertices[0].X // y Console.WriteLine("Description:" + text.Description + " - Etag:" + text.ETag + " - Locale:" + text.Locale + " - Score:" + text.Score); } } } } } }
private async Task <string> AnnotateAsync(string path) { // Create the service var service = new VisionService(new BaseClientService.Initializer { ApiKey = "" }); var bytes = File.ReadAllBytes(path); // Create the image request var imgReq = new AnnotateImageRequest { Image = new Google.Apis.Vision.v1.Data.Image { Content = Convert.ToBase64String(bytes) }, Features = new List <Feature> { new Feature() { Type = "TEXT_DETECTION" } } }; // Create the request var request = new BatchAnnotateImagesRequest { Requests = new List <AnnotateImageRequest> { imgReq } }; // Get the response var result = await service.Images.Annotate(request).ExecuteAsync(); // Extract the keywords string keywords = ""; if (result?.Responses?.Count > 0 && result.Responses[0].TextAnnotations != null) { var desc = result.Responses[0].TextAnnotations[0].Description; string[] words = desc.Split( new[] { "\r\n", "\r", "\n" }, StringSplitOptions.None ); keywords = String.Join(" ", words); } return(keywords); }
/// <summary> /// read image as byte and send to google api /// </summary> /// <param name="imgPath"></param> /// <param name="language"></param> /// <param name="type"></param> /// <returns></returns> public AnnotateImageResponse AnalyseImage(string imgPath, string language, string type, string jsonPath) { OAuthService oAuth = new OAuthService(); var credential = oAuth.CreateCredential(jsonPath); var service = oAuth.CreateService(credential); service.HttpClient.Timeout = new TimeSpan(1, 1, 1); byte[] file = File.ReadAllBytes(imgPath); BatchAnnotateImagesRequest batchRequest = new BatchAnnotateImagesRequest(); batchRequest.Requests = new List <AnnotateImageRequest>(); batchRequest.Requests.Add(new AnnotateImageRequest() { Features = new List <Feature>() { new Feature() { Type = type }, }, ImageContext = new ImageContext() { LanguageHints = new List <string>() { language } }, Image = new Image() { Content = Convert.ToBase64String(file) } }); var annotate = service.Images.Annotate(batchRequest); BatchAnnotateImagesResponse batchAnnotateImagesResponse = annotate.Execute(); if (batchAnnotateImagesResponse.Responses.Any()) { AnnotateImageResponse annotateImageResponse = batchAnnotateImagesResponse.Responses[0]; if (annotateImageResponse.Error != null) { if (annotateImageResponse.Error.Message != null) { Error = annotateImageResponse.Error.Message; } } else { return(annotateImageResponse); } } return(new AnnotateImageResponse()); }
/// <summary> /// read image as byte and send to google api /// </summary> /// <param name="imgPath"></param> /// <param name="language"></param> /// <param name="type"></param> /// <returns></returns> public async Task <string> GetText(System.Drawing.Bitmap imagem, string language, string type) { TextResult = JsonResult = ""; var credential = CreateCredential(); var service = CreateService(credential); service.HttpClient.Timeout = new TimeSpan(1, 1, 1); byte[] file = ImageToByte(imagem); BatchAnnotateImagesRequest batchRequest = new BatchAnnotateImagesRequest(); batchRequest.Requests = new List <AnnotateImageRequest>(); batchRequest.Requests.Add(new AnnotateImageRequest() { Features = new List <Feature>() { new Feature() { Type = type, MaxResults = 1 }, }, ImageContext = new ImageContext() { LanguageHints = new List <string>() { language } }, Image = new Image() { Content = Convert.ToBase64String(file) } }); var annotate = service.Images.Annotate(batchRequest); BatchAnnotateImagesResponse batchAnnotateImagesResponse = annotate.Execute(); if (batchAnnotateImagesResponse.Responses.Any()) { AnnotateImageResponse annotateImageResponse = batchAnnotateImagesResponse.Responses[0]; if (annotateImageResponse.Error != null) { if (annotateImageResponse.Error.Message != null) { Error = annotateImageResponse.Error.Message; } } else { TextResult = annotateImageResponse.TextAnnotations[0].Description.Replace("\n", ";"); } } return(""); }
private static BatchAnnotateImagesRequest CreateRequest(Stream stream) { var request = new BatchAnnotateImagesRequest { Requests = new List <AnnotateImageRequest> { CreateAnnotationImageRequest(stream) } }; return(request); }
/// <summary> /// Run image detection and annotation for a batch of images. /// </summary> /// <param name="requests"> /// Individual image annotation requests for this batch. /// </param> /// <param name="callSettings"> /// If not null, applies overrides to this RPC call. /// </param> /// <returns> /// The RPC response. /// </returns> public override BatchAnnotateImagesResponse BatchAnnotateImages( IEnumerable <AnnotateImageRequest> requests, CallSettings callSettings = null) { BatchAnnotateImagesRequest request = new BatchAnnotateImagesRequest { Requests = { requests }, }; Modify_BatchAnnotateImagesRequest(ref request, ref callSettings); return(_callBatchAnnotateImages.Sync(request, callSettings)); }
/// <summary>Snippet for BatchAnnotateImages</summary> public void BatchAnnotateImages_RequestObject() { // Snippet: BatchAnnotateImages(BatchAnnotateImagesRequest,CallSettings) // Create client ImageAnnotatorClient imageAnnotatorClient = ImageAnnotatorClient.Create(); // Initialize request argument(s) BatchAnnotateImagesRequest request = new BatchAnnotateImagesRequest { Requests = { }, }; // Make the request BatchAnnotateImagesResponse response = imageAnnotatorClient.BatchAnnotateImages(request); // End snippet }
public async Task <List <string> > Run(string base64EncodedImage) { var service = new VisionService(new BaseClientService.Initializer { ApplicationName = "CortanaWhatsThat", //FIXME Private Information //================================================== ApiKey = "AIzaSyBLJacVUI5sxuir2RHwkDNE2fveMT1-T0U", //================================================== }); var resource = new ImagesResource(service); var googleImage = new Image() { Content = base64EncodedImage }; var googleFeature = new Feature() { Type = "LABEL_DETECTION", MaxResults = 10 }; var googleAnnotateRequest = new AnnotateImageRequest() { Features = new List <Feature>() { googleFeature }, Image = googleImage //Image = new Image { Content = "" } }; var googleBatchAnnotateRequest = new BatchAnnotateImagesRequest() { Requests = new List <AnnotateImageRequest>() { googleAnnotateRequest } }; var request = resource.Annotate(googleBatchAnnotateRequest); var response = await request.ExecuteAsync(); List <string> listToReturn = new List <string>(); response.Responses[0].LabelAnnotations.ToList() .ForEach(x => { listToReturn.Add(x.Description); }); return(listToReturn); }
public void ProtoRepeatedField1() { // Sample: ProtoRepeatedField1 // In normal code you'd populate these individual requests with more // information. AnnotateImageRequest request1 = new AnnotateImageRequest(); AnnotateImageRequest request2 = new AnnotateImageRequest(); // Create the batch request using an object initializer BatchAnnotateImagesRequest batch = new BatchAnnotateImagesRequest { // Populate the repeated field with a collection initializer Requests = { request1, request2 } }; // End sample }
/// <summary>Snippet for BatchAnnotateImagesAsync</summary> public async Task BatchAnnotateImagesAsync_RequestObject() { // Snippet: BatchAnnotateImagesAsync(BatchAnnotateImagesRequest,CallSettings) // Create client ImageAnnotatorClient imageAnnotatorClient = await ImageAnnotatorClient.CreateAsync(); // Initialize request argument(s) BatchAnnotateImagesRequest request = new BatchAnnotateImagesRequest { Requests = { }, }; // Make the request BatchAnnotateImagesResponse response = await imageAnnotatorClient.BatchAnnotateImagesAsync(request); // End snippet }
public AnnotateImageResponse Annotate(VisionService service, HttpContent content, string[] features) { var request = new BatchAnnotateImagesRequest(); request.Requests = new List <AnnotateImageRequest>(); request.Requests.Add(CreateAnnotationImageRequest(content, features)); var result = service.Images.Annotate(request).Execute(); if (result?.Responses?.Count > 0) { return(result.Responses[0]); } return(null); }
public void ProtoRepeatedField2() { // Sample: ProtoRepeatedField2 // In normal code you'd populate these individual requests with more // information. AnnotateImageRequest request1 = new AnnotateImageRequest(); AnnotateImageRequest request2 = new AnnotateImageRequest(); // Populate the batch without using an object initializer, just by calling // Add on the repeated field BatchAnnotateImagesRequest batch = new BatchAnnotateImagesRequest(); batch.Requests.Add(request1); batch.Requests.Add(request2); // End sample }
public void BatchAnnotateImages2() { Mock <ImageAnnotator.ImageAnnotatorClient> mockGrpcClient = new Mock <ImageAnnotator.ImageAnnotatorClient>(MockBehavior.Strict); BatchAnnotateImagesRequest request = new BatchAnnotateImagesRequest { Requests = { }, }; BatchAnnotateImagesResponse expectedResponse = new BatchAnnotateImagesResponse(); mockGrpcClient.Setup(x => x.BatchAnnotateImages(request, It.IsAny <CallOptions>())) .Returns(expectedResponse); ImageAnnotatorClient client = new ImageAnnotatorClientImpl(mockGrpcClient.Object, null); BatchAnnotateImagesResponse response = client.BatchAnnotateImages(request); Assert.Same(expectedResponse, response); mockGrpcClient.VerifyAll(); }
public async Task BatchAnnotateImagesAsync2() { Mock <ImageAnnotator.ImageAnnotatorClient> mockGrpcClient = new Mock <ImageAnnotator.ImageAnnotatorClient>(MockBehavior.Strict); BatchAnnotateImagesRequest request = new BatchAnnotateImagesRequest { Requests = { }, }; BatchAnnotateImagesResponse expectedResponse = new BatchAnnotateImagesResponse(); mockGrpcClient.Setup(x => x.BatchAnnotateImagesAsync(request, It.IsAny <CallOptions>())) .Returns(new Grpc.Core.AsyncUnaryCall <BatchAnnotateImagesResponse>(Task.FromResult(expectedResponse), null, null, null, null)); ImageAnnotatorClient client = new ImageAnnotatorClientImpl(mockGrpcClient.Object, null); BatchAnnotateImagesResponse response = await client.BatchAnnotateImagesAsync(request); Assert.Same(expectedResponse, response); mockGrpcClient.VerifyAll(); }
public string RecognizeText(Bitmap image, List <string> languages, int timeout) { BatchAnnotateImagesRequest batchRequest = new BatchAnnotateImagesRequest(); batchRequest.Requests = new List <AnnotateImageRequest>(); batchRequest.Requests.Add(new AnnotateImageRequest() { Features = new List <Feature>() { new Feature() { Type = "TEXT_DETECTION", MaxResults = 1 }, }, ImageContext = new ImageContext() { LanguageHints = languages }, Image = new GoogleOCR.Image() { Content = Convert.ToBase64String(image.ImageToBytes()) }, }); string output = string.Empty; using (var visioService = this.CreateService()) { visioService.HttpClient.Timeout = new TimeSpan(0, 0, 0, 0, timeout); var annotate = visioService.Images.Annotate(batchRequest); BatchAnnotateImagesResponse batchAnnotateImagesResponse = annotate.Execute(); var annotations = batchAnnotateImagesResponse.Responses; if (annotations.Count > 0 && annotations[0].TextAnnotations != null && annotations[0].TextAnnotations.Count > 0 && annotations[0].TextAnnotations[0].Description != null) { output = annotations[0].TextAnnotations[0].Description.TrimEnd().TrimStart(); } } return(output); }
public static async Task <AnnotateImageResponse> AnnotateAsync( this VisionService service, FileInfo file, params string[] features) { var request = new BatchAnnotateImagesRequest(); request.Requests = new List <AnnotateImageRequest>(); request.Requests.Add(CreateAnnotationImageRequest(file.FullName, features)); var result = await service.Images.Annotate(request).ExecuteAsync(); if (result.Responses.Count > 0) { return(result.Responses[0]); } return(null); }
public void ProtoRepeatedField3() { // Sample: ProtoRepeatedField3 // In normal code you'd populate these individual requests with more // information. List <AnnotateImageRequest> requests = new List <AnnotateImageRequest> { new AnnotateImageRequest(), new AnnotateImageRequest() }; // Create the batch request using an object initializer BatchAnnotateImagesRequest batch = new BatchAnnotateImagesRequest { // Populate the repeated field using the Add overload that accepts // an IEnumerable<T> Requests = { requests } }; // End sample }
public IList <FaceAnnotation> detectFaces(string path, int maxResults) { byte[] data = File.ReadAllBytes(path); AnnotateImageRequest request = new AnnotateImageRequest(); Google.Apis.Vision.v1.Data.Image img = new Google.Apis.Vision.v1.Data.Image(); img.Content = Convert.ToBase64String(data); request.Image = img; Feature feature = new Feature(); feature.Type = "FACE_DETECTION"; feature.MaxResults = maxResults; request.Features = new List <Feature>() { feature }; BatchAnnotateImagesRequest batchAnnotate = new BatchAnnotateImagesRequest(); batchAnnotate.Requests = new List <AnnotateImageRequest>() { request }; ImagesResource.AnnotateRequest annotate = _vision.Images.Annotate(batchAnnotate); BatchAnnotateImagesResponse batchResponse = annotate.Execute(); AnnotateImageResponse response = batchResponse.Responses[0]; if (response.FaceAnnotations == null) { throw new Exception(response.Error.Message); } return(response.FaceAnnotations); }
/// <summary>Snippet for BatchAnnotateImagesAsync</summary> public async Task BatchAnnotateImagesRequestObjectAsync() { // Snippet: BatchAnnotateImagesAsync(BatchAnnotateImagesRequest, CallSettings) // Additional: BatchAnnotateImagesAsync(BatchAnnotateImagesRequest, CancellationToken) // Create client ImageAnnotatorClient imageAnnotatorClient = await ImageAnnotatorClient.CreateAsync(); // Initialize request argument(s) BatchAnnotateImagesRequest request = new BatchAnnotateImagesRequest { Requests = { new AnnotateImageRequest(), }, Parent = "", }; // Make the request BatchAnnotateImagesResponse response = await imageAnnotatorClient.BatchAnnotateImagesAsync(request); // End snippet }
/// <summary> /// Run image detection and annotation for a batch of images. /// Documentation https://developers.google.com/vision/v1/reference/images/annotate /// Generation Note: This does not always build corectly. Google needs to standardise things I need to figuer out which ones are wrong. /// </summary> /// <param name="service">Authenticated Vision service.</param> /// <param name="body">A valid Vision v1 body.</param> /// <returns>BatchAnnotateImagesResponseResponse</returns> public static BatchAnnotateImagesResponse Annotate(VisionService service, BatchAnnotateImagesRequest body) { try { // Initial validation. if (service == null) { throw new ArgumentNullException("service"); } if (body == null) { throw new ArgumentNullException("body"); } // Make the request. return(service.Images.Annotate(body).Execute()); } catch (Exception ex) { throw new Exception("Request Images.Annotate failed.", ex); } }
public async Task <AnnotatedImage> AnnotateImage(string image) { var request = new AnnotateImageRequest { Image = new Image(), }; request.Image.Content = image; request.Features = RequestFeatures; var batchRequest = new BatchAnnotateImagesRequest { Requests = new List <AnnotateImageRequest>(), }; batchRequest.Requests.Add(request); BatchAnnotateImagesResponse response; try { response = await this.visionService.Images.Annotate(batchRequest).ExecuteAsync(); } catch (GoogleApiException ex) { var e = ex; //ex.HttpStatusCode //ex.Error.Code //ex.Error.Message throw; } return(response.Responses.Count > 0 ? new AnnotatedImage(response.Responses[0]) : null); }
IEnumerable <FaceAnnotation> detectFaces(IEnumerable <byte> inputImage, int maxResults) { var img = new Google.Apis.Vision.v1.Data.Image(); img.Content = Convert.ToBase64String(inputImage.ToArray()); AnnotateImageRequest request = new AnnotateImageRequest(); request.Image = img; request.Features = new List <Feature>(); request.Features.Add(new Feature() { MaxResults = maxResults, Type = "FACE_DETECTION" }); var batch = new BatchAnnotateImagesRequest(); batch.Requests = new List <AnnotateImageRequest>(); batch.Requests.Add(request); var annotate = _visionService.Images.Annotate(batch); //annotate.Key = "AIzaSyDbuBnG-8f41OVET1BXXoHjhZRTlQFFnvU"; BatchAnnotateImagesResponse batchResponse = annotate.Execute(); AnnotateImageResponse response = batchResponse.Responses[0]; if (response.FaceAnnotations == null) { throw new IOException( response.Error != null ? response.Error.Message : "Unknown error getting image annotations"); } return(response.FaceAnnotations); }
// Partial methods called on each request. // Allows per-RPC-call modification to the request and CallSettings objects, // before the underlying RPC is performed. partial void Modify_BatchAnnotateImagesRequest(ref BatchAnnotateImagesRequest request, ref gaxgrpc::CallSettings settings);