Esempio n. 1
0
        public AnnotateImageRequest GetAnnotateImageRequest()
        {
            var request = new AnnotateImageRequest();

            if (ImageLocationType == CVClientCommon.LocationType.Local)
            {
                request.Image = Image.FromFile(ImageLocation);
            }
            else if (ImageLocationType == CVClientCommon.LocationType.Uri)
            {
                request.Image = Image.FromUri(ImageLocation);
            }

            foreach (var feature in DetectionFeatureTypes)
            {
                var newFeature = new Feature()
                {
                    Type = feature
                };

                if (DetectionMaxResults.Keys.Contains(feature))
                {
                    if (DetectionMaxResults[feature] > 0)
                    {
                        newFeature.MaxResults = DetectionMaxResults[feature];
                    }
                }

                request.Features.Add(newFeature);
            }

            return(request);
        }
Esempio n. 2
0
        public void ThrowOnAnyError()
        {
            // Snippet: ThrowOnAnyError
            Image image = new Image(); // No content or source!
            // Just a single request in this example, but usually BatchAnnotateImages would be
            // used with multiple requests.
            var request = new AnnotateImageRequest
            {
                Image    = image,
                Features = { new Feature {
                                 Type = Feature.Types.Type.SafeSearchDetection
                             } }
            };
            ImageAnnotatorClient client = ImageAnnotatorClient.Create();

            try
            {
                BatchAnnotateImagesResponse response = client.BatchAnnotateImages(new[] { request });
                // ThrowOnAnyError will throw if any individual response in response.Responses
                // contains an error. Other responses may still have useful results.
                // Errors can be detected manually by checking the Error property in each
                // individual response.
                response.ThrowOnAnyError();
            }
            catch (AggregateException e)
            {
                // Because a batch can have multiple errors, the exception thrown is AggregateException.
                // Each inner exception is an AnnotateImageException
                foreach (AnnotateImageException innerException in e.InnerExceptions)
                {
                    Console.WriteLine(innerException.Response.Error);
                }
            }
            // End snippet
        }
Esempio n. 3
0
        public AnnotateImageRequest CreateAnnotationImageRequest(HttpContent content, string[] featureTypes)
        {
            //if (!File.Exists(path))
            //{
            //    throw new FileNotFoundException("Not found.", path);
            //}

            var request = new AnnotateImageRequest();

            request.Image = new Image();

            var bytes = content.ReadAsByteArrayAsync().Result;

            request.Image.Content = Convert.ToBase64String(bytes);

            request.Features = new List <Feature>();

            foreach (var featureType in featureTypes)
            {
                request.Features.Add(new Feature()
                {
                    Type = featureType
                });
            }

            return(request);
        }
        public async Task Annotate()
        {
            ImageAnnotatorClient client = new FakeImageAnnotator();
            var request = new AnnotateImageRequest
            {
                Image    = s_allAnnotationsImage,
                Features =
                {
                    new Feature {
                        Type = FeatureType.FaceDetection
                    },
                    new Feature {
                        Type = FeatureType.LogoDetection, MaxResults = 1
                    }
                }
            };
            var expectedResponse = new AnnotateImageResponse
            {
                FaceAnnotations = { s_allAnnotationsResponse.FaceAnnotations },
                LogoAnnotations = { s_allAnnotationsResponse.LogoAnnotations.Take(1) }
            };

            Assert.Equal(expectedResponse, client.Annotate(request));
            Assert.Equal(expectedResponse, await client.AnnotateAsync(request));
        }
        public void AnnotateImagesTest_OneValidAndOneInvalidImageURL()
        {
            ImageFeatures feature = new ImageFeatures(ImageType.LANDMARK_DETECTION, MAX_RESULTS, VALID_MODEL);

            imageFeaturesList.Add(feature);

            AnnotateImageRequest     imageRequest1    = GenerateImageRequest(IMAGE_URL, imageFeaturesList);
            AnnotateImageRequest     imageRequest2    = GenerateImageRequest(LANDMARK_ANNOTATION_URL_1, imageFeaturesList);
            AnnotateImageRequestList imageRequestList =
                new AnnotateImageRequestList(new List <AnnotateImageRequest>()
            {
                imageRequest1, imageRequest2
            });

            Task <Tuple <AnnotateImageResponseList, ResponseStatus> > responses = imageIntelligence.AnnotateImages(imageRequestList);

            responses.Wait();

            Assert.IsNotNull(responses.Result.Item1);

            AnnotateImageResponseList responseList = responses.Result.Item1;

            Assert.AreEqual(responseList.Responses.Count, 2);

            Assert.IsNotNull(responseList.Responses[0].Error);
            Assert.IsNotNull(responseList.Responses[0].Error.Message);
            Assert.IsNotEmpty(responseList.Responses[0].Error.Message);


            Assert.IsNotNull(responseList.Responses[1].LandmarkAnnotations);
            Assert.GreaterOrEqual(responseList.Responses[1].LandmarkAnnotations.Count, 1);

            imageRequestList.Requests.Clear();
        }
        /// <summary>
        /// Annotates a single image asynchronously.
        /// </summary>
        /// <remarks>
        /// <para>This simply delegates to <see cref="BatchAnnotateImagesAsync(System.Collections.Generic.IEnumerable{AnnotateImageRequest}, CallSettings)"/>
        /// by creating a batch with a single request, and returns the single response.</para>
        /// <para>If <see cref="AnnotateImageException"/> is thrown, the original response can still be retrieved using
        /// <see cref="AnnotateImageException.Response"/>.</para>
        /// </remarks>
        /// <param name="request">The annotation request to process. Must not be null.</param>
        /// <param name="settings">Call settings to apply to the RPC, if any.</param>
        /// <exception cref="AnnotateImageException">The RPC returns a response, but the response contains an error.</exception>
        /// <returns>A task representing the asynchronous operation. The task result will be the annotation response.</returns>
        public virtual async Task <AnnotateImageResponse> AnnotateAsync(AnnotateImageRequest request, CallSettings settings = null)
        {
            GaxPreconditions.CheckNotNull(request, nameof(request));
            var batchResponse = await BatchAnnotateImagesAsync(new[] { request }, settings).ConfigureAwait(false);

            return(batchResponse.Responses[0].ThrowOnError());
        }
        /// <summary>
        /// Annotates a single image.
        /// </summary>
        /// <remarks>
        /// <para>This simply delegates to <see cref="BatchAnnotateImages(System.Collections.Generic.IEnumerable{AnnotateImageRequest}, CallSettings)"/>
        /// by creating a batch with a single request, and returns the single response.</para>
        /// <para>If <see cref="AnnotateImageException"/> is thrown, the original response can still be retrieved using
        /// <see cref="AnnotateImageException.Response"/>.
        /// </para>
        /// </remarks>
        /// <param name="request">The annotation request to process. Must not be null.</param>
        /// <param name="settings">Call settings to apply to the RPC, if any.</param>
        /// <exception cref="AnnotateImageException">The RPC returns a response, but the response contains an error.</exception>
        /// <returns>The annotation response.</returns>
        public virtual AnnotateImageResponse Annotate(AnnotateImageRequest request, CallSettings settings = null)
        {
            GaxPreconditions.CheckNotNull(request, nameof(request));
            var batchResponse = BatchAnnotateImages(new[] { request }, settings);

            return(batchResponse.Responses[0].ThrowOnError());
        }
Esempio n. 8
0
        public async Task <Vertex> SkipAdImageLocationAsync(string imageUri)
        {
            var image = await Image.FromFileAsync(imageUri);

            var req = new AnnotateImageRequest
            {
                Image    = image,
                Features = { new Feature {
                                 Type = Feature.Types.Type.DocumentTextDetection
                             } }
            };

            var resp = await _apiClient.AnnotateAsync(req);

            for (var i = 0; i < resp.TextAnnotations.Count - 1; ++i)
            {
                var annCurr = resp.TextAnnotations[i];
                var annNext = resp.TextAnnotations[i + 1];

                if (IsSkipAdSequence(annCurr.Description, annNext.Description))
                {
                    return(annCurr.BoundingPoly.Vertices.First());
                }
            }
            return(null);
        }
        /// <summary>Snippet for AsyncBatchAnnotateImagesAsync</summary>
        public async Task AsyncBatchAnnotateImagesAsync()
        {
            // Snippet: AsyncBatchAnnotateImagesAsync(IEnumerable<AnnotateImageRequest>, OutputConfig, CallSettings)
            // Additional: AsyncBatchAnnotateImagesAsync(IEnumerable<AnnotateImageRequest>, OutputConfig, CancellationToken)
            // Create client
            ImageAnnotatorClient imageAnnotatorClient = await ImageAnnotatorClient.CreateAsync();

            // Initialize request argument(s)
            IEnumerable <AnnotateImageRequest> requests = new AnnotateImageRequest[]
            {
                new AnnotateImageRequest(),
            };
            OutputConfig outputConfig = new OutputConfig();
            // Make the request
            Operation <AsyncBatchAnnotateImagesResponse, OperationMetadata> response = await imageAnnotatorClient.AsyncBatchAnnotateImagesAsync(requests, outputConfig);

            // Poll until the returned long-running operation is complete
            Operation <AsyncBatchAnnotateImagesResponse, OperationMetadata> completedResponse = await response.PollUntilCompletedAsync();

            // Retrieve the operation result
            AsyncBatchAnnotateImagesResponse result = completedResponse.Result;

            // Or get the name of the operation
            string operationName = response.Name;
            // This name can be stored, then the long-running operation retrieved later by name
            Operation <AsyncBatchAnnotateImagesResponse, OperationMetadata> retrievedResponse = await imageAnnotatorClient.PollOnceAsyncBatchAnnotateImagesAsync(operationName);

            // Check if the retrieved long-running operation has completed
            if (retrievedResponse.IsCompleted)
            {
                // If it has completed, then access the result
                AsyncBatchAnnotateImagesResponse retrievedResult = retrievedResponse.Result;
            }
            // End snippet
        }
Esempio n. 10
0
    // 画像データを与えるとGoogleCloudVisionでラベル検知したやつを返す。
    public static responseBody RequestVisionAPI(string base64String)
    {
        // 参考:https://qiita.com/jyuko/items/e6115a5dfc959f52591d

        string apiKey = "AIzaSyALq6mj-H1c2HKrXubWzhsPtUCxni_Z5_I";
        string url    = "https://vision.googleapis.com/v1/images:annotate?key=" + apiKey;

        // 送信用データを作成

        // 1.requestBodyを作成
        var requests = new requestBody();

        requests.requests = new List <AnnotateImageRequest>();

        // 2.requestBody > requestを作成
        var request = new AnnotateImageRequest();

        request.image         = new Image();
        request.image.content = base64String;

        // 3.requestBody > request > featureを作成
        request.features = new List <Feature>();
        var feature = new Feature();

        feature.type       = FeatureType.LABEL_DETECTION.ToString();
        feature.maxResults = 10;
        request.features.Add(feature);

        requests.requests.Add(request);

        // JSONに変換
        string jsonRequestBody = JsonUtility.ToJson(requests);

        return(JsonUtility.FromJson <responseBody>(Communication(url, jsonRequestBody)));
    }
        public void AnnotateImagesTest_InvalidAPIKey()
        {
            ImageFeatures feature = new ImageFeatures(ImageType.LABEL_DETECTION, MAX_RESULTS, VALID_MODEL);

            imageFeaturesList.Add(feature);

            AnnotateImageRequest     imageRequest     = GenerateImageRequest(VALID_IMAGE_URL, imageFeaturesList);
            AnnotateImageRequestList imageRequestList = new AnnotateImageRequestList(new List <AnnotateImageRequest>()
            {
                imageRequest
            });

            imageIntelligence.UpdateKey(INVALID_SETUP);

            Task <Tuple <AnnotateImageResponseList, ResponseStatus> > responses = imageIntelligence.AnnotateImages(imageRequestList);

            responses.Wait();

            imageIntelligence.UpdateKey(VALID_SETUP);

            Assert.IsNull(responses.Result.Item1);
            Assert.AreSame(responses.Result.Item2, ImageAnnotationStatus.INVALID_API_KEY);

            imageRequestList.Requests.Clear();
        }
Esempio n. 12
0
        private static AnnotateImageRequest CreateAnnotationImageRequest(
            string path,
            string[] featureTypes)
        {
            if (!File.Exists(path))
            {
                throw new FileNotFoundException("Not found.", path);
            }

            var request = new AnnotateImageRequest();

            request.Image = new Image();

            var bytes = File.ReadAllBytes(path);

            request.Image.Content = Convert.ToBase64String(bytes);

            request.Features = new List <Feature>();

            foreach (var featureType in featureTypes)
            {
                request.Features.Add(new Feature()
                {
                    Type = featureType
                });
            }

            return(request);
        }
        public void AnnotateImagesTest_ValidRequestWithTextDetection()
        {
            ImageFeatures feature = new ImageFeatures(ImageType.TEXT_DETECTION, MAX_RESULTS, VALID_MODEL);

            imageFeaturesList.Add(feature);

            AnnotateImageRequest     imageRequest     = GenerateImageRequest(TEXT_DETECTION_URL_1, imageFeaturesList);
            AnnotateImageRequestList imageRequestList = new AnnotateImageRequestList(new List <AnnotateImageRequest>()
            {
                imageRequest
            });

            Task <Tuple <AnnotateImageResponseList, ResponseStatus> > response = imageIntelligence.AnnotateImages(imageRequestList);

            response.Wait();

            AnnotateImageResponseList responseList = response.Result.Item1;

            Assert.IsNotNull(responseList);
            Assert.AreSame(response.Result.Item2, ImageAnnotationStatus.OK);

            Assert.GreaterOrEqual(responseList.Responses.Count, 1);

            for (int i = 0; i < responseList.Responses.Count; i++)
            {
                Assert.IsNotNull(responseList.Responses[i].TextAnnotations);
                Assert.GreaterOrEqual(responseList.Responses[i].TextAnnotations.Count, 1);

                Assert.IsNotNull(responseList.Responses[i].FullTextAnnotations);
                Assert.IsNotNull(responseList.Responses[i].FullTextAnnotations.Text);
                Assert.GreaterOrEqual(responseList.Responses[i].FullTextAnnotations.Pages.Count, 1);
            }

            imageRequestList.Requests.Clear();
        }
        public void AnnotateImagesTest_ValidRequestWithImageProperties()
        {
            ImageFeatures feature = new ImageFeatures(ImageType.IMAGE_PROPERTIES, MAX_RESULTS, VALID_MODEL);

            imageFeaturesList.Add(feature);

            AnnotateImageRequest     imageRequest     = GenerateImageRequest(VALID_IMAGE_URL, imageFeaturesList);
            AnnotateImageRequestList imageRequestList = new AnnotateImageRequestList(new List <AnnotateImageRequest>()
            {
                imageRequest
            });

            Task <Tuple <AnnotateImageResponseList, ResponseStatus> > response = imageIntelligence.AnnotateImages(imageRequestList);

            response.Wait();

            AnnotateImageResponseList responseList = response.Result.Item1;

            Assert.IsNotNull(responseList);
            Assert.AreSame(response.Result.Item2, ImageAnnotationStatus.OK);

            Assert.GreaterOrEqual(responseList.Responses.Count, 1);

            for (int i = 0; i < responseList.Responses.Count; i++)
            {
                Assert.IsNotNull(responseList.Responses[i].ImagePropertiesAnnotation);
                Assert.IsNotNull(responseList.Responses[i].ImagePropertiesAnnotation.DominantColors);
                Assert.GreaterOrEqual(responseList.Responses[i].ImagePropertiesAnnotation.DominantColors.Colors.Count, 1);
            }

            imageRequestList.Requests.Clear();
        }
Esempio n. 15
0
        public AnnotateImageResponse AnalyzeImage(Image image)
        {
            var client = new ImageAnnotatorClientBuilder();

            client.CredentialsPath = ("HackGT.json");
            var visClient = client.Build();

            AnnotateImageRequest request = new AnnotateImageRequest
            {
                Image    = image,
                Features =
                {
                    new Feature {
                        Type = Feature.Types.Type.LabelDetection
                    },
                    new Feature {
                        Type = Feature.Types.Type.LogoDetection
                    },
                    new Feature {
                        Type = Feature.Types.Type.ImageProperties
                    }
                }
            };

            return(visClient.Annotate(request));
        }
Esempio n. 16
0
        private async Task <string> AnnotateAsync(string path)
        {
            // Create the service
            var service = new VisionService(new BaseClientService.Initializer
            {
                ApiKey = ""
            });

            var bytes = File.ReadAllBytes(path);

            // Create the image request
            var imgReq = new AnnotateImageRequest
            {
                Image = new Google.Apis.Vision.v1.Data.Image
                {
                    Content = Convert.ToBase64String(bytes)
                },

                Features = new List <Feature>
                {
                    new Feature()
                    {
                        Type = "TEXT_DETECTION"
                    }
                }
            };

            // Create the request
            var request = new BatchAnnotateImagesRequest
            {
                Requests = new List <AnnotateImageRequest>
                {
                    imgReq
                }
            };

            // Get the response
            var result = await service.Images.Annotate(request).ExecuteAsync();

            // Extract the keywords
            string keywords = "";

            if (result?.Responses?.Count > 0 && result.Responses[0].TextAnnotations != null)
            {
                var desc = result.Responses[0].TextAnnotations[0].Description;

                string[] words = desc.Split(
                    new[] { "\r\n", "\r", "\n" },
                    StringSplitOptions.None
                    );

                keywords = String.Join(" ", words);
            }

            return(keywords);
        }
        public AnnotateImageRequest GenerateImageRequest(String imageURL,
                                                         List <ImageFeatures> imageFeaturesList, ImageContext context = null)
        {
            ImageSource imageSource = new ImageSource(imageURL);
            Image       image       = new Image(source: imageSource);

            AnnotateImageRequest request = new AnnotateImageRequest(image, imageFeaturesList, null);

            return(request);
        }
Esempio n. 18
0
 public static void AddAllFeatures(this AnnotateImageRequest req)
 {
     foreach (var item in Enum.GetValues(typeof(Feature.Types.Type)).Cast <Feature.Types.Type>())
     {
         req.Features.Add(new Feature()
         {
             Type = item
         });
     }
 }
Esempio n. 19
0
    public IEnumerator Capture()
    {
        while (true)
        {
            //texture2D.Apply(false); // Not required. Because we do not need to be uploaded it to GPU


            byte[] jpg    = img;
            string base64 = System.Convert.ToBase64String(jpg);
#if UNITY_WEBGL
            Application.ExternalCall("post", this.gameObject.name, "OnSuccessFromBrowser", "OnErrorFromBrowser", this.url + this.apiKey, base64, this.featureType.ToString(), this.maxResults);
#else
            AnnotateImageRequests requests = new AnnotateImageRequests();
            requests.requests = new List <AnnotateImageRequest>();

            AnnotateImageRequest request = new AnnotateImageRequest();
            request.image         = new Image();
            request.image.content = base64;
            request.features      = new List <Feature>();

            Feature feature = new Feature();
            feature.type       = this.featureType.ToString();
            feature.maxResults = this.maxResults;

            request.features.Add(feature);

            requests.requests.Add(request);

            string jsonData = JsonUtility.ToJson(requests, false);
            if (jsonData != string.Empty)
            {
                string url      = this.url + this.apiKey;
                byte[] postData = System.Text.Encoding.Default.GetBytes(jsonData);
                using (WWW www = new WWW(url, postData, headers))
                {
                    yield return(www);

                    if (string.IsNullOrEmpty(www.error))
                    {
                        //Debug.Log(www.text.Replace("\n", "").Replace(" ", ""));
                        AnnotateImageResponses responses = JsonUtility.FromJson <AnnotateImageResponses>(www.text);
                        // SendMessage, BroadcastMessage or someting like that.
                        Debug.Log("got results");
                        TextOcr(responses);
                    }
                    else
                    {
                        Debug.Log("Error: " + www.error);
                    }
                }
            }
#endif
        }
    }
Esempio n. 20
0
 private static void AddSingularFeature <T>(
     AnnotateImageRequest request,
     AnnotateImageResponse fullResponse,
     AnnotateImageResponse actualResponse,
     FeatureType featureType,
     Func <AnnotateImageResponse, T> extractor,
     Action <AnnotateImageResponse, T> assigner)
 {
     if (request.Features.Any(f => f.Type == featureType))
     {
         assigner(actualResponse, extractor(fullResponse));
     }
 }
Esempio n. 21
0
    /// <summary>
    /// 画像データを与えるとGoogleCloudVisionでラベル検知したやつを返す。
    /// </summary>
    /// <param name="imageData"></param>
    /// <returns></returns>
    public static EntityAnnotation[] RequestVisionAPI(string base64String)
    {
        // 参考:https://qiita.com/jyuko/items/e6115a5dfc959f52591d

        string apiKey = "AIzaSyAQe-ZtCVwWx0xIco4b9U3dpbk83MoLv_c";
        string url    = "https://vision.googleapis.com/v1/images:annotate?key=" + apiKey;

        // 送信用データを作成

        // 1.requestBodyを作成
        var requests = new requestBody();

        requests.requests = new List <AnnotateImageRequest>();

        // 2.requestBody > requestを作成
        var request = new AnnotateImageRequest();

        request.image         = new Image();
        request.image.content = base64String;

        // 3.requestBody > request > featureを作成
        request.features = new List <Feature>();
        var feature = new Feature();

        feature.type       = FeatureType.LABEL_DETECTION.ToString();
        feature.maxResults = 10;
        request.features.Add(feature);

        requests.requests.Add(request);

        // JSONに変換
        string jsonRequestBody = JsonUtility.ToJson(requests);

        // ヘッダを"application/json"にして投げる
        var webRequest = new UnityWebRequest(url, "POST");

        byte[] postData = Encoding.UTF8.GetBytes(jsonRequestBody);
        webRequest.uploadHandler   = new UploadHandlerRaw(postData);
        webRequest.downloadHandler = new DownloadHandlerBuffer();
        webRequest.SetRequestHeader("Content-Type", "application/json");

        webRequest.SendWebRequest();
        // 受信するまで待機
        while (!webRequest.isDone)
        {
        }

        var buff = JsonUtility.FromJson <responseBody>(webRequest.downloadHandler.text);

        return(CheckFunction.removeAnnotation(buff.responses[0].labelAnnotations.ToArray()));
    }
        public async Task <List <string> > Run(string base64EncodedImage)
        {
            var service = new VisionService(new BaseClientService.Initializer
            {
                ApplicationName = "CortanaWhatsThat",
                //FIXME Private Information
                //==================================================
                ApiKey = "AIzaSyBLJacVUI5sxuir2RHwkDNE2fveMT1-T0U",
                //==================================================
            });

            var resource = new ImagesResource(service);

            var googleImage = new Image()
            {
                Content = base64EncodedImage
            };
            var googleFeature = new Feature()
            {
                Type = "LABEL_DETECTION", MaxResults = 10
            };
            var googleAnnotateRequest = new AnnotateImageRequest()
            {
                Features = new List <Feature>()
                {
                    googleFeature
                },
                Image = googleImage
                        //Image =  new Image { Content = "" }
            };
            var googleBatchAnnotateRequest = new BatchAnnotateImagesRequest()
            {
                Requests = new List <AnnotateImageRequest>()
                {
                    googleAnnotateRequest
                }
            };

            var request  = resource.Annotate(googleBatchAnnotateRequest);
            var response = await request.ExecuteAsync();


            List <string> listToReturn = new List <string>();

            response.Responses[0].LabelAnnotations.ToList()
            .ForEach(x => {
                listToReturn.Add(x.Description);
            });
            return(listToReturn);
        }
 /// <summary>Snippet for BatchAnnotateImages</summary>
 public void BatchAnnotateImages()
 {
     // Snippet: BatchAnnotateImages(IEnumerable<AnnotateImageRequest>, CallSettings)
     // Create client
     ImageAnnotatorClient imageAnnotatorClient = ImageAnnotatorClient.Create();
     // Initialize request argument(s)
     IEnumerable <AnnotateImageRequest> requests = new AnnotateImageRequest[]
     {
         new AnnotateImageRequest(),
     };
     // Make the request
     BatchAnnotateImagesResponse response = imageAnnotatorClient.BatchAnnotateImages(requests);
     // End snippet
 }
        public void AnnotateImage_BadImage()
        {
            var client  = ImageAnnotatorClient.Create();
            var request = new AnnotateImageRequest
            {
                Image    = s_badImage,
                Features = { new Feature {
                                 Type = Feature.Types.Type.FaceDetection
                             } }
            };
            var exception = Assert.Throws <AnnotateImageException>(() => client.Annotate(request));

            Assert.Equal((int)Code.InvalidArgument, exception.Response.Error.Code);
        }
    public IEnumerator SendTexture(Texture2D _texture)
    {
        string jsonString = "";

        // Create a Request Json
        GCVAPISample.Image image    = new GCVAPISample.Image(_texture);
        List <Feature>     features = new List <Feature>();

        foreach (FeatureType _featureType in Enum.GetValues(typeof(FeatureType)))
        {
            Feature feature = new Feature(_featureType, 3);
            features.Add(feature);
        }
        ImageContext                imageContext = new ImageContext();
        AnnotateImageRequest        _request     = new AnnotateImageRequest(image, features, imageContext);
        GoogleCloudVisionApiRequest gcv          = new GoogleCloudVisionApiRequest();

        gcv.Add(_request);

        jsonString = JsonUtility.ToJson(gcv, true);
        Debug.Log(jsonString);

        // Create a header
        Dictionary <string, string> header = new Dictionary <string, string> ();

        header.Add("Content-Type", "application/json; charset=UTF-8");

        byte[] postBytes = Encoding.Default.GetBytes(jsonString);

        WWW www = new WWW(API_ENDPOINT + API_KEY, postBytes, header);

        yield return(www);

        if (!string.IsNullOrEmpty(www.error))
        {
            Debug.LogError(www.error);
            Debug.Log(www.text);
        }
        else
        {
            Debug.Log(www.text);
            inputField.text = www.text;
            GoogleCloudVisionApiResponse googleCloudVisionApiResponse =
                (GoogleCloudVisionApiResponse)JsonUtility.FromJson(
                    www.text,
                    typeof(GoogleCloudVisionApiResponse));
            Debug.Log(googleCloudVisionApiResponse);
        }
    }
        /// <summary>
        /// Performs "safe search" processing on a single image.
        /// </summary>
        /// <remarks>
        /// If <see cref="AnnotateImageException"/> is thrown, the original response can still be retrieved using
        /// <see cref="AnnotateImageException.Response"/>.
        /// </remarks>
        /// <param name="image">The image to process. Must not be null.</param>
        /// <param name="context">Additional contextual information, if any.</param>
        /// <param name="callSettings">Call settings to apply to the RPC, if any.</param>
        /// <exception cref="AnnotateImageException">The RPC returns a response, but the response contains an error.</exception>
        /// <returns>The safe search categorization for the image.</returns>
        public SafeSearchAnnotation DetectSafeSearch(Image image, ImageContext context = null, CallSettings callSettings = null)
        {
            GaxPreconditions.CheckNotNull(image, nameof(image));
            var request = new AnnotateImageRequest
            {
                Image        = image,
                ImageContext = context,
                Features     = { new Feature {
                                     Type = Feature.Types.Type.SafeSearchDetection
                                 } }
            };
            var response = Annotate(request, callSettings);

            return(response.SafeSearchAnnotation);
        }
        /// <summary>
        /// Performs image property processing on a single image.
        /// </summary>
        /// <remarks>
        /// If <see cref="AnnotateImageException"/> is thrown, the original response can still be retrieved using
        /// <see cref="AnnotateImageException.Response"/>.
        /// </remarks>
        /// <param name="image">The image to process. Must not be null.</param>
        /// <param name="context">Additional contextual information, if any.</param>
        /// <param name="callSettings">Call settings to apply to the RPC, if any.</param>
        /// <exception cref="AnnotateImageException">The RPC returns a response, but the response contains an error.</exception>
        /// <returns>The detected properties for the image.</returns>
        public ImageProperties DetectImageProperties(Image image, ImageContext context = null, CallSettings callSettings = null)
        {
            GaxPreconditions.CheckNotNull(image, nameof(image));
            var request = new AnnotateImageRequest
            {
                Image        = image,
                ImageContext = context,
                Features     = { new Feature {
                                     Type = Feature.Types.Type.ImageProperties
                                 } }
            };
            var response = Annotate(request, callSettings);

            return(response.ImagePropertiesAnnotation);
        }
        /// <summary>
        /// Performs image property processing on a single image asynchronously.
        /// </summary>
        /// <remarks>
        /// If <see cref="AnnotateImageException"/> is thrown, the original response can still be retrieved using
        /// <see cref="AnnotateImageException.Response"/>.
        /// </remarks>
        /// <param name="image">The image to process. Must not be null.</param>
        /// <param name="context">Additional contextual information, if any.</param>
        /// <param name="callSettings">Call settings to apply to the RPC, if any.</param>
        /// <exception cref="AnnotateImageException">The RPC returns a response, but the response contains an error.</exception>
        /// <returns>A task representing the asynchronous operation. The task result will be the detected properties for the image.</returns>
        public async Task <ImageProperties> DetectImagePropertiesAsync(Image image, ImageContext context = null, CallSettings callSettings = null)
        {
            GaxPreconditions.CheckNotNull(image, nameof(image));
            var request = new AnnotateImageRequest
            {
                Image        = image,
                ImageContext = context,
                Features     = { new Feature {
                                     Type = Feature.Types.Type.ImageProperties
                                 } }
            };
            var response = await AnnotateAsync(request, callSettings).ConfigureAwait(false);

            return(response.ImagePropertiesAnnotation);
        }
        public void ProtoRepeatedField1()
        {
            // Sample: ProtoRepeatedField1
            // In normal code you'd populate these individual requests with more
            // information.
            AnnotateImageRequest request1 = new AnnotateImageRequest();
            AnnotateImageRequest request2 = new AnnotateImageRequest();

            // Create the batch request using an object initializer
            BatchAnnotateImagesRequest batch = new BatchAnnotateImagesRequest
            {
                // Populate the repeated field with a collection initializer
                Requests = { request1, request2 }
            };
            // End sample
        }
        public void ProtoRepeatedField2()
        {
            // Sample: ProtoRepeatedField2
            // In normal code you'd populate these individual requests with more
            // information.
            AnnotateImageRequest request1 = new AnnotateImageRequest();
            AnnotateImageRequest request2 = new AnnotateImageRequest();

            // Populate the batch without using an object initializer, just by calling
            // Add on the repeated field
            BatchAnnotateImagesRequest batch = new BatchAnnotateImagesRequest();

            batch.Requests.Add(request1);
            batch.Requests.Add(request2);
            // End sample
        }
        /// <summary>
        /// 
        /// </summary>
        /// <param name="imageContent">Base64エンコードした画像</param>
        public APIRequestJSON(string imageContent, uint faceDetection = 10, uint landmarkDetection = 10, uint logoDetection = 10, uint labelDetection = 10, uint textDetection = 10, uint safeSearchDetection = 10, uint imageProperties = 0)
        {
            requests = new AnnotateImageRequest[1];
            requests[0] = new AnnotateImageRequest();
            requests[0].image = new Image(imageContent);
            // 本来なら features で0のものは出力しないようにすると良い
            requests[0].features = new Feature[7];
            requests[0].features[0] = new Feature("FACE_DETECTION",faceDetection);
            requests[0].features[1] = new Feature("LANDMARK_DETECTION",landmarkDetection);
            requests[0].features[2] = new Feature("LOGO_DETECTION",logoDetection);
            requests[0].features[3] = new Feature("LABEL_DETECTION",labelDetection);
            requests[0].features[4] = new Feature("TEXT_DETECTION",textDetection);
            requests[0].features[5] = new Feature("SAFE_SEARCH_DETECTION",safeSearchDetection);
            requests[0].features[6] = new Feature("IMAGE_PROPERTIES",imageProperties);// 現時点でResponceが未対応

            List<string> lng = new List<string>();
            lng.Add("ja");
            requests[0].imageContext = new ImageContext();
            requests[0].imageContext.languageHints = lng.ToArray();
        }