// Language Detection with file async
        public static async Task LanguageDetectionWithFileAsync()
        {
            // <languageDetectionInAccuracyWithFile>
            // Creates an instance of a speech config with specified subscription key and service region.
            // Replace with your own subscription key and service region (e.g., "westus").
            var config = SpeechConfig.FromSubscription("YourSubscriptionKey", "YourServiceRegion");

            // Single-Shot with Accuracy
            // Please refer to the documentation of language id with different modes
            config.SetProperty(PropertyId.SpeechServiceConnection_SingleLanguageIdPriority, "Accuracy");

            // Creates a speech recognizer using file as audio input.
            // Replace with your own audio file name.
            using (var audioInput = AudioConfig.FromWavFileInput(@"LanguageDetection_enUS.wav"))
            {
                using (var recognizer = new SourceLanguageRecognizer(config, autoDetectSourceLanguageConfig, audioInput))
                {
                    // Starts recognizing.
                    Console.WriteLine("Say something...");

                    // Starts language detection, and returns after a single utterance is recognized.
                    // The task returns the recognition text as result.
                    // Note: Since RecognizeOnceAsync() returns only a single utterance, it is suitable only for single
                    // shot detection like command or query.
                    // For long-running multi-utterance detection, use StartContinuousRecognitionAsync() instead.
                    var result = await recognizer.RecognizeOnceAsync().ConfigureAwait(false);

                    // Checks result.
                    if (result.Reason == ResultReason.RecognizedSpeech)
                    {
                        var lidResult = AutoDetectSourceLanguageResult.FromResult(result);
                        Console.WriteLine($"DETECTED: Language={lidResult.Language}");
                    }
                    else if (result.Reason == ResultReason.NoMatch)
                    {
                        Console.WriteLine($"NOMATCH: Speech could not be recognized.");
                    }
                    else if (result.Reason == ResultReason.Canceled)
                    {
                        var cancellation = CancellationDetails.FromResult(result);
                        Console.WriteLine($"CANCELED: Reason={cancellation.Reason}");

                        if (cancellation.Reason == CancellationReason.Error)
                        {
                            Console.WriteLine($"CANCELED: ErrorCode={cancellation.ErrorCode}");
                            Console.WriteLine($"CANCELED: ErrorDetails={cancellation.ErrorDetails}");
                            Console.WriteLine($"CANCELED: Did you update the subscription info?");
                        }
                    }
                }
            }
            // </languageDetectionInAccuracyWithFile>
        }
        public static async Task RunAsync([EventGridTrigger] EventGridEvent eventGridEvent, ILogger log)
        {
            //Extracting content type and url of the blob triggering the function
            var jsondata = JsonConvert.SerializeObject(eventGridEvent.Data);
            var tmp      = new { contentType = "", url = "" };
            var data     = JsonConvert.DeserializeAnonymousType(jsondata, tmp);

            //Checking if the trigger was iniatiated for a WAV File.
            if (data.contentType == "audio/wav")
            {
                var    audioUrl = data.url;
                string blobName = audioUrl.Split('/').Last();

                string contosoStorageConnectionString = System.Environment.GetEnvironmentVariable("ContosoStorageConnectionString", EnvironmentVariableTarget.Process);
                string speechRegion          = System.Environment.GetEnvironmentVariable("SpeechRegion", EnvironmentVariableTarget.Process);
                string speechKey             = System.Environment.GetEnvironmentVariable("SpeechKey", EnvironmentVariableTarget.Process);
                string translatorKey         = System.Environment.GetEnvironmentVariable("TranslatorKey", EnvironmentVariableTarget.Process);
                string translatorEndpoint    = System.Environment.GetEnvironmentVariable("TranslatorEndpoint", EnvironmentVariableTarget.Process);
                string translatorLocation    = System.Environment.GetEnvironmentVariable("TranslatorLocation", EnvironmentVariableTarget.Process);
                string cosmosEndpointUrl     = System.Environment.GetEnvironmentVariable("CosmosDBEndpointUrl", EnvironmentVariableTarget.Process);
                string cosmosPrimaryKey      = System.Environment.GetEnvironmentVariable("CosmosDBPrimaryKey", EnvironmentVariableTarget.Process);
                string textAnalyticsKey      = System.Environment.GetEnvironmentVariable("TextAnalyticsKey", EnvironmentVariableTarget.Process);
                string textAnalyticsEndpoint = System.Environment.GetEnvironmentVariable("TextAnalyticsEndpoint", EnvironmentVariableTarget.Process);

                // Download audio file to a local temp directory
                var tempPath = System.IO.Path.GetTempFileName();
                BlobContainerClient container = new BlobContainerClient(contosoStorageConnectionString, "audiorecordings");
                BlobClient          blob      = container.GetBlobClient(blobName);
                await blob.DownloadToAsync(tempPath);

                var speechConfig = SpeechConfig.FromSubscription(speechKey, speechRegion);
                speechConfig.SetProperty(PropertyId.SpeechServiceConnection_SingleLanguageIdPriority, "Latency");

                // Audio Language Identification
                // Considering only two languages: English and Spanish
                // Languages supported for language detection : https://docs.microsoft.com/azure/cognitive-services/speech-service/language-support
                var    autoDetectSourceLanguageConfig = AutoDetectSourceLanguageConfig.FromLanguages(new string[] { "en-US", "es-MX" });
                string languageDetected = "en-US";
                using (var audioInput = AudioConfig.FromWavFileInput(tempPath))
                {
                    using (var recognizer = new SourceLanguageRecognizer(speechConfig, autoDetectSourceLanguageConfig, audioInput))
                    {
                        var result = await recognizer.RecognizeOnceAsync().ConfigureAwait(false);

                        if (result.Reason == ResultReason.RecognizedSpeech)
                        {
                            var lidResult = AutoDetectSourceLanguageResult.FromResult(result);
                            languageDetected = lidResult.Language;
                        }
                    }
                }
                speechConfig.SpeechRecognitionLanguage = languageDetected;

                // Audio Transcription
                StringBuilder sb = new StringBuilder();
                using var audioConfig = AudioConfig.FromWavFileInput(tempPath);
                {
                    using var recognizer = new SpeechRecognizer(speechConfig, audioConfig);
                    {
                        var stopRecognition = new TaskCompletionSource <int>();
                        recognizer.SessionStopped += (s, e) =>
                        {
                            stopRecognition.TrySetResult(0);
                        };
                        recognizer.Canceled += (s, e) =>
                        {
                            stopRecognition.TrySetResult(0);
                        };
                        recognizer.Recognized += (s, e) =>
                        {
                            if (e.Result.Reason == ResultReason.RecognizedSpeech)
                            {
                                sb.Append(e.Result.Text);
                            }
                            else if (e.Result.Reason == ResultReason.NoMatch)
                            {
                                log.LogInformation($"NOMATCH: Speech could not be recognized.");
                            }
                        };
                        await recognizer.StartContinuousRecognitionAsync();

                        Task.WaitAny(new[] { stopRecognition.Task });
                    }
                }
                string transcribedText = sb.ToString();

                // If transcription is in Spanish we will translate it to English
                if (!languageDetected.Contains("en"))
                {
                    string   route           = $"/translate?api-version=3.0&to=en";
                    string   textToTranslate = sb.ToString();
                    object[] body            = new object[] { new { Text = textToTranslate } };
                    var      requestBody     = JsonConvert.SerializeObject(body);

                    using (var client = new HttpClient())
                        using (var request = new HttpRequestMessage())
                        {
                            request.Method     = HttpMethod.Post;
                            request.RequestUri = new Uri(translatorEndpoint + route);
                            request.Content    = new StringContent(requestBody, Encoding.UTF8, "application/json");
                            request.Headers.Add("Ocp-Apim-Subscription-Key", translatorKey);
                            request.Headers.Add("Ocp-Apim-Subscription-Region", translatorLocation);

                            HttpResponseMessage response = await client.SendAsync(request).ConfigureAwait(false);

                            var responseBody = await response.Content.ReadAsStringAsync();

                            List <Model.TranslatorService.Root> translatedDocuments = JsonConvert.DeserializeObject <List <Model.TranslatorService.Root> >(responseBody);
                            transcribedText = translatedDocuments.FirstOrDefault().Translations.FirstOrDefault().Text;
                        }
                }

                //TODO:Azure Text Analytics for Healthcare


                //Insert documents into CosmosDB
                var cosmosClient    = new CosmosClient(cosmosEndpointUrl, cosmosPrimaryKey);
                var cosmosDatabase  = (await cosmosClient.CreateDatabaseIfNotExistsAsync("Contoso")).Database;
                var cosmosContainer = (await cosmosDatabase.CreateContainerIfNotExistsAsync("Transcriptions", "/id")).Container;

                Model.Transcription newTranscription = new Model.Transcription();
                newTranscription.Id           = Guid.NewGuid().ToString();
                newTranscription.DocumentDate = new DateTime(int.Parse(blobName.Substring(0, 4)),
                                                             int.Parse(blobName.Substring(4, 2)), int.Parse(blobName.Substring(6, 2)));
                newTranscription.FileName        = blobName;
                newTranscription.TranscribedText = transcribedText;
                foreach (var item in healthcareResult.Entities)
                {
                    newTranscription.HealthcareEntities.Add(new Model.HealthcareEntity()
                    {
                        Category = item.Category, Text = item.Text
                    });
                }

                try
                {
                    ItemResponse <Model.Transcription> cosmosResponse = await
                                                                        cosmosContainer.CreateItemAsync(newTranscription, new PartitionKey(newTranscription.Id));
                }
                catch (CosmosException ex) when(ex.StatusCode == System.Net.HttpStatusCode.Conflict)
                {
                    //Conflicting documents are silently ignored for demo purposes.
                }

                System.IO.File.Delete(tempPath);
                log.LogInformation(eventGridEvent.Data.ToString());
            }
        }