private static void MapWordsResponse(ClassifyResponse wordsResponse, VisualRecognitionViewModel toModel)
        {
            if (wordsResponse == null)
                return;

            foreach (var image in wordsResponse.Images)
            {
                var currentImage = toModel.Images.FirstOrDefault(m => m.ImageName == image.ImageName);
                if (currentImage == null)
                {
                    toModel.Images.Add(new ImageViewModel()
                    {
                        ImageName = image.ImageName,
                        Words = image?.Words?.Select(ImageWordsMapper.Map).ToArray(),
                        ResolvedUrl = image.ResolvedUrl,
                        SourceUrl = image.SourceUrl,
                        ImageText = image.ImageText
                    });
                }
                else
                {
                    currentImage.Words = image?.Words?.Select(ImageWordsMapper.Map).ToArray();
                    currentImage.ResolvedUrl = currentImage.ResolvedUrl ?? image.ResolvedUrl;
                    currentImage.SourceUrl = currentImage.SourceUrl ?? image.SourceUrl;
                    currentImage.ImageText = image?.ImageText;
                }
            }
        }
        /// <summary>
        /// Classifies an image based on a given set of classifiers, or all classifiers if no classifiers are specified.
        /// </summary>
        /// <param name="url">The URL of an image (.jpg, or .png). Redirects are followed, so you can use shortened
        ///                   URLs. The resolved URL is returned in the response. Maximum image size is 2 MB.</param>
        /// <param name="acceptLanguage">(Optional) Specifies the language of the output. You can specify en for English,
        ///                              es for Spanish, ar for Arabic, or ja for Japanese. Classifiers for which no 
        ///                              translation is available are ommitted.  Default value is English.</param>
        /// <param name="threshold">(Optional) A floating value that specifies the minimum score a class must have to be
        ///                         displayed in the response. Setting the threshold to 0.0 will return all values, 
        ///                         regardless of their classification score.</param>
        /// <param name="owners">(Optional) A Collection with the value(s) ClassifierOwner.IBM and/or ClassifierOwner.Me
        ///                      to specify which classifiers to run.</param>
        /// <param name="classifierIds">(Optional) Array of classifier Ids to use when classifying the image</param>
        /// <returns>A collection of images and their corresponding classifier scores</returns>
        public async Task<ClassifyResponse> ClassifyAsync(string url,
            AcceptLanguage acceptLanguage = AcceptLanguage.EN,
            double? threshold = null,
            ICollection<ClassifierOwner> owners = null,
            params string[] classifierIds)
        {
            ClassifyResponse model = new ClassifyResponse();

            // Create an HttpClient to make the request using VrClient()
            using (var client = VrClient())
            {
                try
                {
                    var requestString = "api/v3/classify";

                    // add API key
                    requestString += "?api_key=" + _vrCreds.ApiKey;
                    // add API version
                    requestString += "&version=" + VersionReleaseDate;
                    // add URL
                    requestString += "&url=" + url;

                    // convert the classifierIds array to a comma-separated list and add it to the request
                    if (classifierIds?.Any() == true && classifierIds[0] != null)
                    {
                        requestString += "&classifier_ids=" + string.Join(",", classifierIds);
                    }

                    // convert the owners array to a comma-separated list and add it to the request
                    if (owners?.Any() == true)
                    {
                        requestString += "&owners=" + string.Join(",", owners.Select(m => m == ClassifierOwner.IBM ? m.ToString() : m.ToString().ToLowerInvariant()).ToList());
                    }

                    // if threshold was not omitted, add it to the request
                    if (threshold.HasValue)
                    {
                        requestString += "&threshold=" + threshold.Value.ToString("F2");
                    }

                    // set accepted languages in headers
                    client.DefaultRequestHeaders.AcceptLanguage.Clear();
                    client.DefaultRequestHeaders.AcceptLanguage.Add(new StringWithQualityHeaderValue(acceptLanguage.ToString().ToLowerInvariant()));

                    // send a GET request to the Watson service
                    var response = await client.GetAsync(requestString);

                    // if the request succeeded, read the json result as a Response object
                    if (response.IsSuccessStatusCode)
                    {
                        model = await response.Content.ReadAsAsync<ClassifyResponse>();
                    }
                }
                catch (Exception ex)
                {
                    Console.WriteLine(ex.StackTrace);
                }
            }
            return model;
        }
        internal static VisualRecognitionViewModel Map(ClassifyResponse classifyResponse, ClassifyResponse facesResponse, ClassifyResponse wordsResponse)
        {
            VisualRecognitionViewModel toModel = new VisualRecognitionViewModel();

            toModel.Images = new List<ImageViewModel>();
            MapClassifyResponse(classifyResponse, toModel);
            MapWordsResponse(wordsResponse, toModel);
            MapFacesResponse(facesResponse, toModel);

            return toModel;
        }
        public async Task<ClassifyResponse> RecognizeFacesAsync(string imageFileName, byte[] imageFileContents, string url = null)
        {
            ClassifyResponse model = new ClassifyResponse();

            // Create an HttpClient to make the request using VrClient()
            using (var client = VrClient())
            {
                try
                {
                    var requestString = "api/v3/detect_faces";

                    // add API key
                    requestString += "?api_key=" + _vrCreds.ApiKey;
                    // add API version
                    requestString += "&version=" + VersionReleaseDate;

                    // add url to request parameters
                    ClassifyParameters parameters = new ClassifyParameters() { Url = url };

                    HttpContent imageContent = GetHttpContentFromImage(imageFileName, imageFileContents);
                    var request = CreateFileUploadRequest(GetHttpContentFromParameters(parameters), imageContent);

                    // send a POST request to the Visual Recognition service
                    var response = await client.PostAsync(requestString, request);

                    // if the request succeeded, read the json result as a ClassifyResponse object
                    if (response.IsSuccessStatusCode)
                    {
                        model = await response.Content.ReadAsAsync<ClassifyResponse>();
                    }
                }
                catch (Exception ex)
                {
                    Console.WriteLine(ex.StackTrace);
                }
            }

            return model;
        }
        public async Task<ClassifyResponse> RecognizeFacesAsync(string url)
        {
            ClassifyResponse model = new ClassifyResponse();

            // Create an HttpClient to make the request using VrClient()
            using (var client = VrClient())
            {
                try
                {
                    var requestString = "api/v3/detect_faces";

                    // add API key
                    requestString += "?api_key=" + _vrCreds.ApiKey;
                    // add API version
                    requestString += "&version=" + VersionReleaseDate;
                    // add url
                    requestString += "&url=" + url;

                    // send a GET request to the AlchemyAPI service
                    var response = await client.GetAsync(requestString);

                    // if the request succeeded, read the json result as a Response object
                    if (response.IsSuccessStatusCode)
                    {
                        model = await response.Content.ReadAsAsync<ClassifyResponse>();
                    }
                }
                catch (Exception ex)
                {
                    Console.WriteLine(ex.StackTrace);
                }
            }

            return model;
        }
        public async Task<ClassifyResponse> GetImageSceneTextAsync(string imageFileName, byte[] imageFileContents)
        {
            ClassifyResponse model = new ClassifyResponse();

            // Create an HttpClient to make the request using VrClient()
            using (var client = VrClient())
            {
                try
                {
                    var requestString = "api/v3/recognize_text";

                    // add API key
                    requestString += "?api_key=" + _vrCreds.ApiKey;
                    // add API version
                    requestString += "&version=" + VersionReleaseDate;

                    HttpContent imageContent = GetHttpContentFromImage(imageFileName, imageFileContents);
                    var request = CreateFileUploadRequest(imageContent);

                    // send a POST request to the AlchemyAPI service
                    var response = await client.PostAsync(requestString, request);

                    // if the request succeeded, read the json result as an ClassifyResponse object
                    if (response.IsSuccessStatusCode)
                    {
                        model = await response.Content.ReadAsAsync<ClassifyResponse>();
                    }
                }
                catch (Exception ex)
                {
                    Console.WriteLine(ex.StackTrace);
                }
            }

            return model;
        }
        /// <summary>
        /// Classifies an image based on a given set of classifiers, or all classifiers if no classifiers are specified.
        /// </summary>
        /// <param name="filename">The name of the image file to be classified</param>
        /// <param name="fileContents">A byte-array containing the contents of the image file to be classified</param>
        /// <param name="acceptLanguage">(Optional) Specifies the language of the output. You can specify en for English,
        ///                              es for Spanish, ar for Arabic, or ja for Japanese. Classifiers for which no 
        ///                              translation is available are ommitted.  Default value is English.</param>
        /// <param name="threshold">(Optional) A floating value that specifies the minimum score a class must have to be
        ///                         displayed in the response. Setting the threshold to 0.0 will return all values, 
        ///                         regardless of their classification score.</param>
        /// <param name="owners">(Optional) A Collection with the value(s) ClassifierOwner.IBM and/or ClassifierOwner.Me
        ///                      to specify which classifiers to run.</param>
        /// <param name="classifierIds">(Optional) Array of classifier Ids to use when classifying the image</param>
        /// <returns>A collection of images and their corresponding classifier scores</returns>
        public async Task<ClassifyResponse> ClassifyAsync(string filename,
            byte[] fileContents,
            AcceptLanguage acceptLanguage = AcceptLanguage.EN,
            double? threshold = null,
            ICollection<ClassifierOwner> owners = null,
            params string[] classifierIds)
        {
            ClassifyResponse model = new ClassifyResponse();

            // Create an HttpClient to make the request using VrClient()
            using (var client = VrClient())
            {
                try
                {
                    var parameters = new ClassifyParameters();

                    // if classifierIds was not omitted, convert the array to a comma-separated list and add it to the request
                    if (classifierIds != null && classifierIds.Any() && classifierIds[0] != null)
                    {
                        parameters.ClassifierIds = classifierIds;
                    }

                    // if owners was not omitted, convert the array to a comma-separated list and add it to the request
                    if (owners != null && owners.Any())
                    {
                        parameters.Owners = owners;
                    }

                    // if threshold was not omitted, add it to the request
                    if (threshold.HasValue)
                    {
                        parameters.Threshold = threshold.Value.ToString("F2");
                    }

                    // set accepted languages in headers
                    client.DefaultRequestHeaders.AcceptLanguage.Clear();
                    client.DefaultRequestHeaders.AcceptLanguage.Add(new StringWithQualityHeaderValue(acceptLanguage.ToString().ToLowerInvariant()));

                    // create request object
                    HttpContent imageContent = GetHttpContentFromImage(filename, fileContents);
                    MultipartFormDataContent request = CreateFileUploadRequest(GetHttpContentFromParameters(parameters), imageContent);

                    // send a POST request to the Watson service with the form data from request
                    var response = await client.PostAsync("api/v3/classify?api_key=" + _vrCreds.ApiKey + "&version=" + VersionReleaseDate, request);

                    // if the request succeeded, read the json result as a Response object
                    if (response.IsSuccessStatusCode)
                    {
                        var jsonData = await response.Content.ReadAsStringAsync();
                        model = JsonConvert.DeserializeObject<ClassifyResponse>(jsonData);
                    }
                    else
                    {
                        var responseMessage = await response.Content.ReadAsStringAsync();
                        model.Error = new ErrorResponse()
                        {
                            Description = responseMessage
                        };
                    }
                }
                catch (Exception ex)
                {
                    Console.WriteLine(ex.StackTrace);
                }
            }
            return model;
        }