/// <summary>
        /// Uploads the image to Cognitive Services and performs description.
        /// </summary>
        /// <param name="imageFilePath">The image file path.</param>
        /// <returns>Awaitable image description.</returns>
        private async Task <ImageDescription> UploadAndDescribeImageAsync(string imageFilePath)
        {
            // -----------------------------------------------------------------------
            // KEY SAMPLE CODE STARTS HERE
            // -----------------------------------------------------------------------

            //
            // Create Cognitive Services Vision API Service client.
            //
            using (var client = new ComputerVisionClient(Credentials)
            {
                Endpoint = Endpoint
            })
            {
                Log("ComputerVisionClient is created");

                using (Stream imageFileStream = File.OpenRead(imageFilePath))
                {
                    //
                    // Upload and image and request three descriptions.
                    //
                    Log("Calling ComputerVisionClient.DescribeImageInStreamAsync()...");
                    string           language       = (_language.SelectedItem as RecognizeLanguage).ShortCode;
                    ImageDescription analysisResult = await client.DescribeImageInStreamAsync(imageFileStream, 3, language);

                    return(analysisResult);
                }
            }

            // -----------------------------------------------------------------------
            // KEY SAMPLE CODE ENDS HERE
            // -----------------------------------------------------------------------
        }
        /// <summary>
        /// Analyze the image and return a description
        /// </summary>
        /// <param name="stream"></param>
        /// <returns></returns>
        public async Task <string> AnalyzeImage(Stream stream)
        {
            if (!_availableServices.Contains(AzureServiceType.ComputerVision))
            {
                return(null);
            }

            _computerVisionSemaphore.Wait();
            try
            {
                ImageDescription imageDescription = await _computerVisionClient.DescribeImageInStreamAsync(stream);

                if (!imageDescription.Captions.Any())
                {
                    return("I have no idea.");
                }
                else
                {
                    return(imageDescription.Captions.First().Text);
                }
            }
            catch (Exception ex)
            {
                string message = "Failed processing image.";
                _logger.Log(message, ex);
                return(message);
            }
            finally
            {
                _computerVisionSemaphore.Release();
            }
        }
        public async Task <string> Analyze(byte[] image)
        {
            var imageStream = new MemoryStream(image);
            var description = await _client.DescribeImageInStreamAsync(imageStream, 1, "en");

            var captions = String.Join(" - ", description.Captions.Select(c => c.Text));
            var tags     = String.Join(" - ", description.Tags);

            return($"{captions}\r\nTags: {tags}");
        }
示例#4
0
        private async static void DescribeImage(ComputerVisionClient cliente, String url)
        {
            var imgStream = new FileStream(url, FileMode.Open, FileAccess.Read, FileShare.Read);
            var result    = await cliente.DescribeImageInStreamAsync(imgStream, language : "es");

            var captions = result.Captions;
            var tags     = result.Tags;

            Console.WriteLine("-.-.-.Descripción general de la imagen-.-.-.");
            Console.WriteLine($"Descripción de imagen: {captions[0].Text}");
            Console.WriteLine($"Score: {captions[0].Confidence}");
            Console.WriteLine($"-.-.-.Etiquetas.--.-.-.-.");
            tags.ToList().ForEach(t => Console.WriteLine($"tag: {t}"));
        }
示例#5
0
        public async Task <string> DescribeImage(byte[] imgBytes)
        {
            try
            {
                MemoryStream     memStream      = new MemoryStream(imgBytes);
                ImageDescription analysisResult = await client.DescribeImageInStreamAsync(memStream);

                return(analysisResult.Captions[0].Text);
            }
            catch (Exception ex)
            {
                _logger.Error("Error getting info from Azure", ex);
                return(null);
            }
        }
        // Analyze a local image
        private static async Task DescribeImageFromStreamAsync(ComputerVisionClient computerVision, string imagePath)
        {
            if (!File.Exists(imagePath))
            {
                Console.WriteLine("\nUnable to open or read local image path:\n{0} \n", imagePath);
                return;
            }

            using (Stream imageStream = File.OpenRead(imagePath))
            {
                ImageDescription descriptions = await computerVision.DescribeImageInStreamAsync(imageStream);

                Console.WriteLine(imagePath);
                DisplayDescriptions(descriptions);
            }
        }
示例#7
0
        public static async Task <IActionResult> Run(
            [HttpTrigger(AuthorizationLevel.Function, "get", "post", Route = null)] HttpRequest req,
            ILogger log)
        {
            log.LogInformation("Function invoked");

            // We only support POST requests
            if (req.Method == "GET")
            {
                return(new BadRequestResult());
            }

            // grab the key and URI from the portal config
            var visionKey      = Environment.GetEnvironmentVariable("VisionKey");
            var visionEndpoint = Environment.GetEnvironmentVariable("VisionEndpoint");

            // create a client and request Tags for the image submitted
            var vsc = new ComputerVisionClient(new ApiKeyServiceClientCredentials(visionKey))
            {
                Endpoint = visionEndpoint
            };

            ImageDescription result = null;

            // We read the content as a byte array and assume it's an image
            if (req.Method == "POST")
            {
                try
                {
                    result = await vsc.DescribeImageInStreamAsync(req.Body);
                }
                catch { }
            }

            // if we didn't get a result from the service, return a 400
            if (result == null)
            {
                return(new BadRequestResult());
            }

            var bestResult = result.Captions.OrderByDescending(c => c.Confidence).FirstOrDefault()?.Text;

            return(new OkObjectResult(bestResult
                                      ?? "I'm at a loss for words... I can't describe this image!"));
        }
示例#8
0
        public static async Task <CloudPhotoMetadata> PhotoBlobTrigger(
            [BlobTrigger("photos/{name}", Connection = "StorageConnectionString")] Stream imageStream,
            string name,
            ILogger log)
        {
            var descriptions = await ComputerVisionClient.DescribeImageInStreamAsync(imageStream);

            var description = descriptions.Captions.FirstOrDefault()?.Text;

            log.LogInformation($"Processing blog {name}");
            return(new CloudPhotoMetadata
            {
                PartitionKey = "Photometadata",
                RowKey = name,
                BlobName = name,
                Description = description
            });
        }
        public async static Task <IActionResult> Run([HttpTrigger(AuthorizationLevel.Anonymous, "get", "post", Route = null)] HttpRequest req, TraceWriter log)
        {
            log.Info("Function invoked");

            // We only support POST requests
            if (req.Method == "GET")
            {
                return(new BadRequestResult());
            }

            // grab the key and URI from the portal config
            var visionKey      = "dfa6c21fa1204079ac21ab269c03ca49";
            var visionEndpoint = "https://westcentralus.api.cognitive.microsoft.com/";

            // create a client and request Tags for the image submitted
            var vsc = new ComputerVisionClient(new ApiKeyServiceClientCredentials(visionKey))
            {
                Endpoint = visionEndpoint
            };

            ImageDescription result = null;

            // We read the content as a byte array and assume it's an image
            if (req.Method == "POST")
            {
                try
                {
                    result = await vsc.DescribeImageInStreamAsync(req.Body);
                }
                catch (Exception ex) { }
            }

            // if we didn't get a result from the service, return a 400
            if (result == null)
            {
                return(new BadRequestResult());
            }

            var formatedResult = LogDescriptionResults(result);
            var finalResult    = LogAnalysisResult(formatedResult);

            return(new OkObjectResult(finalResult
                                      ?? "I'm at a loss for words... I can't describe this image!"));
        }
示例#10
0
        private async Task <ImageDescription> DescribeImageAsync(string imageUrl)
        {
            Stream stream = File.OpenRead(imageUrl);

            return(await visionClient.DescribeImageInStreamAsync(stream));
        }
示例#11
0
    public void StartSnapPhoto()
    {
        textmesh.text             = "Verifying...";
        cameraButton.interactable = false;

        LoadingCircle.Show();

        StartCoroutine(controller.SnapPhoto(async tex =>
        {
            try
            {
                audioSource.PlayOneShot(clipCamera);

                // encode the image from the camera as a PNG to send to the Computer Vision API
                byte[] pngBuff  = tex.EncodeToPNG();
                MemoryStream ms = new MemoryStream(pngBuff);

                // call the vision service and get the image analysis
                ComputerVisionClient client = new ComputerVisionClient(new ApiKeyServiceClientCredentials(Globals.VisionKey), new DelegatingHandler[] { });
                client.Endpoint             = Globals.VisionEndpoint;
                ImageDescription result     = await client.DescribeImageInStreamAsync(ms);

                // send the tag list to the debug log
                string tags = result.Tags.Aggregate((x, y) => $"{x}, {y}");
                Debug.Log(tags);

                foreach (string itemTag in Globals.CurrentItem.Tags)
                {
                    if (result.Tags.Contains(itemTag.ToLower()))
                    {
                        audioSource.PlayOneShot(clipFound);
                        textmesh.text = "You found it!";

                        PlayFabEvents.WriteEvent(PlayFabEventType.ItemFound);

                        // if the image matches, call the ItemFound function to record it
                        string s = JsonConvert.SerializeObject(Globals.CurrentItem);
                        await Globals.HttpClient.PostAsync("ItemFound", new StringContent(s, Encoding.UTF8, "application/json"));
                        LoadingCircle.Dismiss();
                        SceneManager.LoadScene("ItemList");
                        return;
                    }
                }

                audioSource.PlayOneShot(clipNotFound);
                textmesh.text = "Not a match, please try again.";

                PlayFabEvents.WriteEvent(PlayFabEventType.ItemNotFound);

                controller.StartStream();
                cameraButton.interactable = true;
                LoadingCircle.Dismiss();
            }
            catch (Exception e)
            {
                LoadingCircle.Dismiss();
                Debug.Log(e);
                DialogBox.Show(e.Message);
            }
        }));
    }