private async void btnRecord_Click(object sender, EventArgs e)
        {
            btnRecord.BackColor = Color.LightGreen;
            // other fun language codes:
            //fr-FR
            //ja-JP
            //hi-IN
            //de-DE
            var autoDetectSourceLanguageConfig = AutoDetectSourceLanguageConfig.FromLanguages(new string[] { "fr-FR", "hi-IN" });

            using (var recognizer = new SpeechRecognizer(SpeechConfig.FromSubscription("cb35ce20eade4be2a74a36ab2e9d0ac1", "eastus"), autoDetectSourceLanguageConfig))
            {
                var speechRecognitionResult = await recognizer.RecognizeOnceAsync();

                if (speechRecognitionResult.Reason == ResultReason.Canceled)
                {
                    var cancellation = CancellationDetails.FromResult(speechRecognitionResult);
                    MessageBox.Show("Error: " + cancellation);
                    this.Close();
                    return;
                }
                var autoDetectSourceLanguageResult = AutoDetectSourceLanguageResult.FromResult(speechRecognitionResult);
                var detectedLanguage = autoDetectSourceLanguageResult.Language;
                btnRecord.BackColor = default(Color);
                // detectedLanguage passed on to the OptionsMenu form
                formOptionsMenu = new OptionsMenu(detectedLanguage);
                //pop up Options Menu
                formOptionsMenu.Show();
            }
            btnRecord.Click += new EventHandler(this.btnRecord_Click);
        }
        public async Task <string> DetectLanguage(byte[] audioBytes, string fileExtension, string locale1, string locale2)
        {
            var wavBytes = ConvertToWaveBytes(audioBytes, fileExtension);

            var autoDetectSourceLanguageConfig = AutoDetectSourceLanguageConfig.FromLanguages(new string[] { locale1, locale2 });

            var config          = SpeechConfig.FromSubscription(SubscriptionKey, SubscriptionRegion);
            var stopRecognition = new TaskCompletionSource <int>();
            var detected        = new List <string>();

            using var pushStream = AudioInputStream.CreatePushStream();
            using (var audioInput = AudioConfig.FromStreamInput(pushStream))
            {
                using var recognizer = new SpeechRecognizer(
                          config,
                          autoDetectSourceLanguageConfig,
                          audioInput);
                pushStream.Write(wavBytes);
                pushStream.Close();

                recognizer.Recognized += (s, e) =>
                {
                    var autoDetectSourceLanguageResult = AutoDetectSourceLanguageResult.FromResult(e.Result);
                    var detectedLanguage = autoDetectSourceLanguageResult.Language;
                    detected.Add(detectedLanguage);
                    if (detected.Count > UtteranceCount)
                    {
                        stopRecognition.TrySetResult(0);
                    }
                };

                recognizer.SessionStopped += (s, e) =>
                {
                    stopRecognition.TrySetResult(0);
                };

                await recognizer.StartContinuousRecognitionAsync().ConfigureAwait(false);

                var t = Task.Factory.StartNew(async() => { await SetTimeOutForRecognition(stopRecognition).ConfigureAwait(false); }, CancellationToken.None, TaskCreationOptions.None, TaskScheduler.Default);

                Task.WaitAny(new[] { stopRecognition.Task });

                await recognizer.StopKeywordRecognitionAsync().ConfigureAwait(false);
            }

            if (detected.Count == 0)
            {
                throw new TimeoutException("Did not get any language identification results back in time.");
            }

            var detectedByCount = detected.GroupBy(i => i);
            var mostFreq        = detectedByCount.OrderBy(t => t.Count()).LastOrDefault().Key;

            if (string.IsNullOrEmpty(mostFreq) || (!mostFreq.Equals(locale1, StringComparison.OrdinalIgnoreCase) && !mostFreq.Equals(locale2, StringComparison.OrdinalIgnoreCase)))
            {
                return(locale1);
            }

            return(mostFreq);
        }
Exemple #3
0
 private string LanguageFromSpeechRecognitionResult(SpeechRecognitionResult result)
 {
     if (null != this.userConfig.languageIDLanguages)
     {
         var languageIDResult = AutoDetectSourceLanguageResult.FromResult(result);
         return($"[{languageIDResult.Language}]");
     }
     else
     {
         return("");
     }
 }
        // Language Detection with file async
        public static async Task LanguageDetectionWithFileAsync()
        {
            // <languageDetectionInAccuracyWithFile>
            // Creates an instance of a speech config with specified subscription key and service region.
            // Replace with your own subscription key and service region (e.g., "westus").
            var config = SpeechConfig.FromSubscription("YourSubscriptionKey", "YourServiceRegion");

            // Single-Shot with Accuracy
            // Please refer to the documentation of language id with different modes
            config.SetProperty(PropertyId.SpeechServiceConnection_SingleLanguageIdPriority, "Accuracy");

            // Creates a speech recognizer using file as audio input.
            // Replace with your own audio file name.
            using (var audioInput = AudioConfig.FromWavFileInput(@"LanguageDetection_enUS.wav"))
            {
                using (var recognizer = new SourceLanguageRecognizer(config, autoDetectSourceLanguageConfig, audioInput))
                {
                    // Starts recognizing.
                    Console.WriteLine("Say something...");

                    // Starts language detection, and returns after a single utterance is recognized.
                    // The task returns the recognition text as result.
                    // Note: Since RecognizeOnceAsync() returns only a single utterance, it is suitable only for single
                    // shot detection like command or query.
                    // For long-running multi-utterance detection, use StartContinuousRecognitionAsync() instead.
                    var result = await recognizer.RecognizeOnceAsync().ConfigureAwait(false);

                    // Checks result.
                    if (result.Reason == ResultReason.RecognizedSpeech)
                    {
                        var lidResult = AutoDetectSourceLanguageResult.FromResult(result);
                        Console.WriteLine($"DETECTED: Language={lidResult.Language}");
                    }
                    else if (result.Reason == ResultReason.NoMatch)
                    {
                        Console.WriteLine($"NOMATCH: Speech could not be recognized.");
                    }
                    else if (result.Reason == ResultReason.Canceled)
                    {
                        var cancellation = CancellationDetails.FromResult(result);
                        Console.WriteLine($"CANCELED: Reason={cancellation.Reason}");

                        if (cancellation.Reason == CancellationReason.Error)
                        {
                            Console.WriteLine($"CANCELED: ErrorCode={cancellation.ErrorCode}");
                            Console.WriteLine($"CANCELED: ErrorDetails={cancellation.ErrorDetails}");
                            Console.WriteLine($"CANCELED: Did you update the subscription info?");
                        }
                    }
                }
            }
            // </languageDetectionInAccuracyWithFile>
        }
        // Speech recognition from microphone.
        public static async Task RecognitionWithMicrophoneAsync()
        {
            // <recognitionWithMicrophone>
            // Creates an instance of a speech config with specified subscription key and service region.
            // Replace with your own subscription key and service region (e.g., "westus").
            var config = SpeechConfig.FromSubscription("YourSubscriptionKey", "YourServiceRegion");

            // Please refer to the documentation of language id with different modes
            config.SetProperty(PropertyId.SpeechServiceConnection_SingleLanguageIdPriority, "Latency");

            // Creates a speech recognizer using microphone as audio input.
            using (var recognizer = new SpeechRecognizer(config, autoDetectSourceLanguageConfig))
            {
                // Starts recognizing.
                Console.WriteLine("Say something...");

                // Starts speech recognition, and returns after a single utterance is recognized. The end of a
                // single utterance is determined by listening for silence at the end or until a maximum of 15
                // seconds of audio is processed.  The task returns the recognition text as result.
                // Note: Since RecognizeOnceAsync() returns only a single utterance, it is suitable only for single
                // shot recognition like command or query.
                // For long-running multi-utterance recognition, use StartContinuousRecognitionAsync() instead.
                var result = await recognizer.RecognizeOnceAsync().ConfigureAwait(false);

                // Checks result.
                if (result.Reason == ResultReason.RecognizedSpeech)
                {
                    var lidResult = AutoDetectSourceLanguageResult.FromResult(result);
                    Console.WriteLine($"RECOGNIZED: Text={result.Text} with language={lidResult.Language}");
                }
                else if (result.Reason == ResultReason.NoMatch)
                {
                    Console.WriteLine($"NOMATCH: Speech could not be recognized.");
                }
                else if (result.Reason == ResultReason.Canceled)
                {
                    var cancellation = CancellationDetails.FromResult(result);
                    Console.WriteLine($"CANCELED: Reason={cancellation.Reason}");

                    if (cancellation.Reason == CancellationReason.Error)
                    {
                        Console.WriteLine($"CANCELED: ErrorCode={cancellation.ErrorCode}");
                        Console.WriteLine($"CANCELED: ErrorDetails={cancellation.ErrorDetails}");
                        Console.WriteLine($"CANCELED: Did you update the subscription info?");
                    }
                }
            }
            // </recognitionWithMicrophone>
        }
Exemple #6
0
        public async Task RecognizeSpeechAsync()
        {
            text     = "Error";
            language = "Error";

            var config =
                SpeechConfig.FromSubscription("54e5c11f4ba84a95a282d180905efeb1", "westus");

            var autoDetectSourceLanguageConfig = AutoDetectSourceLanguageConfig.FromLanguages(new string[] { "en-US", "de-DE", "pl-PL" });

            using var recognizer = new SpeechRecognizer(config, autoDetectSourceLanguageConfig);

            var result = await recognizer.RecognizeOnceAsync();

            var autoDetectSourceLanguageResult = AutoDetectSourceLanguageResult.FromResult(result);
            var detectedLanguage = autoDetectSourceLanguageResult.Language;

            language = detectedLanguage;

            switch (result.Reason)
            {
            case ResultReason.RecognizedSpeech:
                text = result.Text;
                break;

            case ResultReason.NoMatch:
                text = $"NOMATCH: Rozpoznanie nie udało się.";
                break;

            case ResultReason.Canceled:
                var cancellation = CancellationDetails.FromResult(result);

                if (cancellation.Reason == CancellationReason.Error)
                {
                    Debug.WriteLine($"CANCELED: ErrorCode={cancellation.ErrorCode}");
                    Debug.WriteLine($"CANCELED: ErrorDetails={cancellation.ErrorDetails}");
                    Debug.WriteLine($"CANCELED: Did you update the subscription info?");
                }
                text = $"CANCELED: Reason={cancellation.Reason}";
                break;
            }
        }
        public static async Task RecognizeLng()
        {
            SpeechConfig speechConfig = SpeechConfig.FromEndpoint(new System.Uri(ConfigurationManager.AppSettings.Get("SpeechEndpoint")), ConfigurationManager.AppSettings.Get("TTSKey"));
            AudioConfig  audioConfig  = AudioConfig.FromDefaultSpeakerOutput();
            AutoDetectSourceLanguageConfig autoDetectSourceLanguageConfig = AutoDetectSourceLanguageConfig
                                                                            .FromLanguages(new string[] { "en-US", "ru-RU" });

            using (var recognizer = new SpeechRecognizer(
                       speechConfig,
                       autoDetectSourceLanguageConfig,
                       audioConfig))
            {
                Console.WriteLine("Say something...");
                var speechRecognitionResult = await recognizer.RecognizeOnceAsync();

                var autoDetectSourceLanguageResult =
                    AutoDetectSourceLanguageResult.FromResult(speechRecognitionResult);
                var detectedLng = autoDetectSourceLanguageResult.Language;
                Console.WriteLine("I recognized " + speechRecognitionResult.Text + " in " + detectedLng);
            }
        }
        /// <summary>
        /// Called when Azure's Recognizing or Recognized events have been invoked.
        /// Passes the event on to <see cref="Recognized"/>
        /// </summary>
        /// <param name="e"></param>
        /// <param name="final"></param>
        protected void OnSpeechEvent(SpeechRecognitionEventArgs e, bool final)
        {
            var language = AutoDetectSourceLanguageResult.FromResult(e.Result);

            string strEvent = final ? "Recognized" : "Recognizing";

            Logger.LogTrace($"{strEvent} ({language.Language}): {e.Result.Text}");

            if (string.IsNullOrWhiteSpace(e.Result.Text))
            {
                //this happens occasionally
                return;
            }

            var recognizedEvent = ServiceProvider.GetService(typeof(SpeechRecognizedEvent)) as SpeechRecognizedEvent;

            recognizedEvent.Preliminary = !final;
            recognizedEvent.ResultID    = e.Result.OffsetInTicks.ToString();
            recognizedEvent.Text        = e.Result.Text;
            recognizedEvent.Language    = language.Language;

            Recognized?.Invoke(this, recognizedEvent);
        }
        static async Task <string?> recognize_continuous(SpeechRecognizer speech_recognizer, USER_CONFIG user_config)
        {
            var recognition_end = new TaskCompletionSource <string?>();
            int sequence_number = 0;

            speech_recognizer.Recognized += (object?sender, SpeechRecognitionEventArgs e) =>
            {
                if (ResultReason.RecognizedSpeech == e.Result.Reason && e.Result.Text.Length > 0)
                {
                    string?language = null;
                    if (true == user_config.language_id_enabled)
                    {
                        var language_id_result = AutoDetectSourceLanguageResult.FromResult(e.Result);
                        language = language_id_result.Language;
                    }

                    sequence_number++;
                    var    start_time = new DateTime(e.Result.OffsetInTicks);
                    var    end_time   = start_time.Add(e.Result.Duration);
                    string caption    = caption_time_to_caption(user_config.srt_enabled, sequence_number, start_time, end_time, e.Result.Text, language);
                    write_to_console_or_file(caption, user_config);
                }
                else if (ResultReason.NoMatch == e.Result.Reason)
                {
                    write_to_console(String.Format("NOMATCH: Speech could not be recognized.{0}", Environment.NewLine), user_config);
                }
            };

            speech_recognizer.Canceled += (object?sender, SpeechRecognitionCanceledEventArgs e) =>
            {
                if (CancellationReason.EndOfStream == e.Reason)
                {
                    write_to_console(String.Format("End of stream reached.{0}", Environment.NewLine), user_config);
                    recognition_end.TrySetResult(null);      // Notify to stop recognition.
                }
                else if (CancellationReason.CancelledByUser == e.Reason)
                {
                    write_to_console(String.Format("User canceled request.{0}", Environment.NewLine), user_config);
                    recognition_end.TrySetResult(null);      // Notify to stop recognition.
                }
                else if (CancellationReason.Error == e.Reason)
                {
                    var error = String.Format("Encountered error.{0}Error code: {1}{0}Error details: {2}{0}", Environment.NewLine, (int)e.ErrorCode, e.ErrorDetails);
                    recognition_end.TrySetResult(error);      // Notify to stop recognition.
                }
                else
                {
                    var error = String.Format("Request was cancelled for an unrecognized reason: {0}.{1}", (int)e.Reason, Environment.NewLine);
                    recognition_end.TrySetResult(error);      // Notify to stop recognition.
                }
            };

            speech_recognizer.SessionStopped += (object?sender, SessionEventArgs e) =>
            {
                write_to_console(String.Format("Session stopped.{0}", Environment.NewLine), user_config);
                recognition_end.TrySetResult(null);      // Notify to stop recognition.
            };

// Starts continuous recognition. Uses StopContinuousRecognitionAsync() to stop recognition.
            await speech_recognizer.StartContinuousRecognitionAsync().ConfigureAwait(false);

// Waits for recognition end.
            Task.WaitAll(new[] { recognition_end.Task });

// Stops recognition.
            await speech_recognizer.StopContinuousRecognitionAsync().ConfigureAwait(false);

            return(recognition_end.Task.Result);
        }
Exemple #10
0
        // Speech recognition with auto detection for source language and custom model
        public static async Task RecognitionWithAutoDetectSourceLanguageAndCustomModelAsync()
        {
            // Creates an instance of a speech config with specified subscription key and service region.
            // Replace with your own subscription key and service region (e.g., "westus").
            var config = SpeechConfig.FromSubscription("YourSubscriptionKey", "YourServiceRegion");

            var sourceLanguageConfigs = new SourceLanguageConfig[]
            {
                // The endpoint id is optional, if not specified,  the service will use the default model for en-US
                // Replace the language with your source language candidate. Please see https://docs.microsoft.com/azure/cognitive-services/speech-service/language-support for all supported langauges
                SourceLanguageConfig.FromLanguage("en-US"),

                // Replace the id with the CRIS endpoint id of your customized model. If the speech is in fr-FR, the service will use the corresponding customized model for speech recognition
                SourceLanguageConfig.FromLanguage("fr-FR", "YourEndpointId"),
            };

            // Creates an instance of AutoDetectSourceLanguageConfig with the 2 source language configurations
            // Currently this feature only supports 2 different language candidates
            var autoDetectSourceLanguageConfig = AutoDetectSourceLanguageConfig.FromSourceLanguageConfigs(sourceLanguageConfigs);

            var stopRecognition = new TaskCompletionSource <int>();

            // Creates a speech recognizer using the auto detect source language config, and the file as audio input.
            // Replace with your own audio file name.
            using (var audioInput = AudioConfig.FromWavFileInput(@"whatstheweatherlike.wav"))
            {
                using (var recognizer = new SpeechRecognizer(config, autoDetectSourceLanguageConfig, audioInput))
                {
                    recognizer.Recognizing += (s, e) =>
                    {
                        if (e.Result.Reason == ResultReason.RecognizingSpeech)
                        {
                            Console.WriteLine($"RECOGNIZING: Text={e.Result.Text}");
                            // Retrieve the detected language
                            var autoDetectSourceLanguageResult = AutoDetectSourceLanguageResult.FromResult(e.Result);
                            Console.WriteLine($"DETECTED: Language={autoDetectSourceLanguageResult.Language}");
                        }
                    };

                    recognizer.Recognized += (s, e) =>
                    {
                        if (e.Result.Reason == ResultReason.RecognizedSpeech)
                        {
                            Console.WriteLine($"RECOGNIZED: Text={e.Result.Text}");
                            // Retrieve the detected language
                            var autoDetectSourceLanguageResult = AutoDetectSourceLanguageResult.FromResult(e.Result);
                            Console.WriteLine($"DETECTED: Language={autoDetectSourceLanguageResult.Language}");
                        }
                        else if (e.Result.Reason == ResultReason.NoMatch)
                        {
                            Console.WriteLine($"NOMATCH: Speech could not be recognized.");
                        }
                    };

                    recognizer.Canceled += (s, e) =>
                    {
                        Console.WriteLine($"CANCELED: Reason={e.Reason}");

                        if (e.Reason == CancellationReason.Error)
                        {
                            Console.WriteLine($"CANCELED: ErrorCode={e.ErrorCode}");
                            Console.WriteLine($"CANCELED: ErrorDetails={e.ErrorDetails}");
                            Console.WriteLine($"CANCELED: Did you update the subscription info?");
                        }

                        stopRecognition.TrySetResult(0);
                    };

                    recognizer.SessionStarted += (s, e) =>
                    {
                        Console.WriteLine("\n    Session started event.");
                    };

                    recognizer.SessionStopped += (s, e) =>
                    {
                        Console.WriteLine("\n    Session stopped event.");
                        Console.WriteLine("\nStop recognition.");
                        stopRecognition.TrySetResult(0);
                    };

                    // Starts continuous recognition. Uses StopContinuousRecognitionAsync() to stop recognition.
                    await recognizer.StartContinuousRecognitionAsync().ConfigureAwait(false);

                    // Waits for completion.
                    // Use Task.WaitAny to keep the task rooted.
                    Task.WaitAny(new[] { stopRecognition.Task });

                    // Stops recognition.
                    await recognizer.StopContinuousRecognitionAsync().ConfigureAwait(false);
                }
            }
        }
        public static async Task RunAsync([EventGridTrigger] EventGridEvent eventGridEvent, ILogger log)
        {
            //Extracting content type and url of the blob triggering the function
            var jsondata = JsonConvert.SerializeObject(eventGridEvent.Data);
            var tmp      = new { contentType = "", url = "" };
            var data     = JsonConvert.DeserializeAnonymousType(jsondata, tmp);

            //Checking if the trigger was iniatiated for a WAV File.
            if (data.contentType == "audio/wav")
            {
                var    audioUrl = data.url;
                string blobName = audioUrl.Split('/').Last();

                string contosoStorageConnectionString = System.Environment.GetEnvironmentVariable("ContosoStorageConnectionString", EnvironmentVariableTarget.Process);
                string speechRegion          = System.Environment.GetEnvironmentVariable("SpeechRegion", EnvironmentVariableTarget.Process);
                string speechKey             = System.Environment.GetEnvironmentVariable("SpeechKey", EnvironmentVariableTarget.Process);
                string translatorKey         = System.Environment.GetEnvironmentVariable("TranslatorKey", EnvironmentVariableTarget.Process);
                string translatorEndpoint    = System.Environment.GetEnvironmentVariable("TranslatorEndpoint", EnvironmentVariableTarget.Process);
                string translatorLocation    = System.Environment.GetEnvironmentVariable("TranslatorLocation", EnvironmentVariableTarget.Process);
                string cosmosEndpointUrl     = System.Environment.GetEnvironmentVariable("CosmosDBEndpointUrl", EnvironmentVariableTarget.Process);
                string cosmosPrimaryKey      = System.Environment.GetEnvironmentVariable("CosmosDBPrimaryKey", EnvironmentVariableTarget.Process);
                string textAnalyticsKey      = System.Environment.GetEnvironmentVariable("TextAnalyticsKey", EnvironmentVariableTarget.Process);
                string textAnalyticsEndpoint = System.Environment.GetEnvironmentVariable("TextAnalyticsEndpoint", EnvironmentVariableTarget.Process);

                // Download audio file to a local temp directory
                var tempPath = System.IO.Path.GetTempFileName();
                BlobContainerClient container = new BlobContainerClient(contosoStorageConnectionString, "audiorecordings");
                BlobClient          blob      = container.GetBlobClient(blobName);
                await blob.DownloadToAsync(tempPath);

                var speechConfig = SpeechConfig.FromSubscription(speechKey, speechRegion);
                speechConfig.SetProperty(PropertyId.SpeechServiceConnection_SingleLanguageIdPriority, "Latency");

                // Audio Language Identification
                // Considering only two languages: English and Spanish
                // Languages supported for language detection : https://docs.microsoft.com/azure/cognitive-services/speech-service/language-support
                var    autoDetectSourceLanguageConfig = AutoDetectSourceLanguageConfig.FromLanguages(new string[] { "en-US", "es-MX" });
                string languageDetected = "en-US";
                using (var audioInput = AudioConfig.FromWavFileInput(tempPath))
                {
                    using (var recognizer = new SourceLanguageRecognizer(speechConfig, autoDetectSourceLanguageConfig, audioInput))
                    {
                        var result = await recognizer.RecognizeOnceAsync().ConfigureAwait(false);

                        if (result.Reason == ResultReason.RecognizedSpeech)
                        {
                            var lidResult = AutoDetectSourceLanguageResult.FromResult(result);
                            languageDetected = lidResult.Language;
                        }
                    }
                }
                speechConfig.SpeechRecognitionLanguage = languageDetected;

                // Audio Transcription
                StringBuilder sb = new StringBuilder();
                using var audioConfig = AudioConfig.FromWavFileInput(tempPath);
                {
                    using var recognizer = new SpeechRecognizer(speechConfig, audioConfig);
                    {
                        var stopRecognition = new TaskCompletionSource <int>();
                        recognizer.SessionStopped += (s, e) =>
                        {
                            stopRecognition.TrySetResult(0);
                        };
                        recognizer.Canceled += (s, e) =>
                        {
                            stopRecognition.TrySetResult(0);
                        };
                        recognizer.Recognized += (s, e) =>
                        {
                            if (e.Result.Reason == ResultReason.RecognizedSpeech)
                            {
                                sb.Append(e.Result.Text);
                            }
                            else if (e.Result.Reason == ResultReason.NoMatch)
                            {
                                log.LogInformation($"NOMATCH: Speech could not be recognized.");
                            }
                        };
                        await recognizer.StartContinuousRecognitionAsync();

                        Task.WaitAny(new[] { stopRecognition.Task });
                    }
                }
                string transcribedText = sb.ToString();

                // If transcription is in Spanish we will translate it to English
                if (!languageDetected.Contains("en"))
                {
                    string   route           = $"/translate?api-version=3.0&to=en";
                    string   textToTranslate = sb.ToString();
                    object[] body            = new object[] { new { Text = textToTranslate } };
                    var      requestBody     = JsonConvert.SerializeObject(body);

                    using (var client = new HttpClient())
                        using (var request = new HttpRequestMessage())
                        {
                            request.Method     = HttpMethod.Post;
                            request.RequestUri = new Uri(translatorEndpoint + route);
                            request.Content    = new StringContent(requestBody, Encoding.UTF8, "application/json");
                            request.Headers.Add("Ocp-Apim-Subscription-Key", translatorKey);
                            request.Headers.Add("Ocp-Apim-Subscription-Region", translatorLocation);

                            HttpResponseMessage response = await client.SendAsync(request).ConfigureAwait(false);

                            var responseBody = await response.Content.ReadAsStringAsync();

                            List <Model.TranslatorService.Root> translatedDocuments = JsonConvert.DeserializeObject <List <Model.TranslatorService.Root> >(responseBody);
                            transcribedText = translatedDocuments.FirstOrDefault().Translations.FirstOrDefault().Text;
                        }
                }

                //TODO:Azure Text Analytics for Healthcare


                //Insert documents into CosmosDB
                var cosmosClient    = new CosmosClient(cosmosEndpointUrl, cosmosPrimaryKey);
                var cosmosDatabase  = (await cosmosClient.CreateDatabaseIfNotExistsAsync("Contoso")).Database;
                var cosmosContainer = (await cosmosDatabase.CreateContainerIfNotExistsAsync("Transcriptions", "/id")).Container;

                Model.Transcription newTranscription = new Model.Transcription();
                newTranscription.Id           = Guid.NewGuid().ToString();
                newTranscription.DocumentDate = new DateTime(int.Parse(blobName.Substring(0, 4)),
                                                             int.Parse(blobName.Substring(4, 2)), int.Parse(blobName.Substring(6, 2)));
                newTranscription.FileName        = blobName;
                newTranscription.TranscribedText = transcribedText;
                foreach (var item in healthcareResult.Entities)
                {
                    newTranscription.HealthcareEntities.Add(new Model.HealthcareEntity()
                    {
                        Category = item.Category, Text = item.Text
                    });
                }

                try
                {
                    ItemResponse <Model.Transcription> cosmosResponse = await
                                                                        cosmosContainer.CreateItemAsync(newTranscription, new PartitionKey(newTranscription.Id));
                }
                catch (CosmosException ex) when(ex.StatusCode == System.Net.HttpStatusCode.Conflict)
                {
                    //Conflicting documents are silently ignored for demo purposes.
                }

                System.IO.File.Delete(tempPath);
                log.LogInformation(eventGridEvent.Data.ToString());
            }
        }
        /// <summary>
        /// Speech recognition with auto detection for source language with universal v2 endpoint
        /// We only support multi-lingual continuous recognition in universal v2 endpoint
        /// </summary>
        public static async Task MultiLingualRecognitionWithUniversalV2Endpiont()
        {
            // Offical v2 endpoint
            // Replace the region with your service region
            var v2EndpointInString = String.Format("wss://{0}.stt.speech.microsoft.com/speech/universal/v2", "YourServiceRegion");
            var v2EndpointUrl      = new Uri(v2EndpointInString);

            // Creates an instance of a speech config with specified subscription key.
            // Replace the subscription key with your subscription key
            var config = SpeechConfig.FromEndpoint(v2EndpointUrl, "YourSubscriptionKey");

            // Please refer to the documentation of language id with different modes
            config.SetProperty(PropertyId.SpeechServiceConnection_ContinuousLanguageIdPriority, "Latency");

            // Creates an instance of AutoDetectSourceLanguageConfig with the 2 source language candidates
            // Currently this feature only supports 2 different language candidates
            // Replace the languages to be the language candidates for your speech. Please see https://docs.microsoft.com/azure/cognitive-services/speech-service/language-support for all supported langauges
            var autoDetectSourceLanguageConfig = AutoDetectSourceLanguageConfig.FromLanguages(new string[] { "en-US", "zh-CN" });

            var stopRecognition = new TaskCompletionSource <int>();

            // Creates a speech recognizer using the auto detect source language config, and the file as audio input.
            // Replace with your own audio file name.
            using (var audioInput = AudioConfig.FromWavFileInput(@"en-us_zh-cn.wav"))
            {
                using (var recognizer = new SpeechRecognizer(config, autoDetectSourceLanguageConfig, audioInput))
                {
                    // Subscribes to events.
                    recognizer.Recognizing += (s, e) =>
                    {
                        if (e.Result.Reason == ResultReason.RecognizingSpeech)
                        {
                            Console.WriteLine($"RECOGNIZING: Text={e.Result.Text}");
                            // Retrieve the detected language
                            var autoDetectSourceLanguageResult = AutoDetectSourceLanguageResult.FromResult(e.Result);
                            Console.WriteLine($"DETECTED: Language={autoDetectSourceLanguageResult.Language}");
                        }
                    };

                    recognizer.Recognized += (s, e) =>
                    {
                        if (e.Result.Reason == ResultReason.RecognizedSpeech)
                        {
                            Console.WriteLine($"RECOGNIZED: Text={e.Result.Text}");
                            // Retrieve the detected language
                            var autoDetectSourceLanguageResult = AutoDetectSourceLanguageResult.FromResult(e.Result);
                            Console.WriteLine($"DETECTED: Language={autoDetectSourceLanguageResult.Language}");
                        }
                        else if (e.Result.Reason == ResultReason.NoMatch)
                        {
                            Console.WriteLine($"NOMATCH: Speech could not be recognized.");
                        }
                    };

                    recognizer.Canceled += (s, e) =>
                    {
                        Console.WriteLine($"CANCELED: Reason={e.Reason}");

                        if (e.Reason == CancellationReason.Error)
                        {
                            Console.WriteLine($"CANCELED: ErrorCode={e.ErrorCode}");
                            Console.WriteLine($"CANCELED: ErrorDetails={e.ErrorDetails}");
                            Console.WriteLine($"CANCELED: Did you update the subscription info?");
                        }

                        stopRecognition.TrySetResult(0);
                    };

                    recognizer.SessionStarted += (s, e) =>
                    {
                        Console.WriteLine("\n    Session started event.");
                    };

                    recognizer.SessionStopped += (s, e) =>
                    {
                        Console.WriteLine("\n    Session stopped event.");
                        Console.WriteLine("\nStop recognition.");
                        stopRecognition.TrySetResult(0);
                    };

                    // Starts continuous recognition. Uses StopContinuousRecognitionAsync() to stop recognition.
                    await recognizer.StartContinuousRecognitionAsync().ConfigureAwait(false);

                    // Waits for completion.
                    // Use Task.WaitAny to keep the task rooted.
                    Task.WaitAny(new[] { stopRecognition.Task });

                    // Stops recognition.
                    await recognizer.StopContinuousRecognitionAsync().ConfigureAwait(false);
                }
            }
        }
        // Continuous language detection
        public static async Task ContinuousLanguageDetectionWithFileAsync()
        {
            // <languageDetectionContinuousWithFile>
            // Creates an instance of a speech config with specified subscription key and service region.
            // Replace with your own subscription key and service region (e.g., "westus").
            var config = SpeechConfig.FromSubscription("YourSubscriptionKey", "YourServiceRegion");

            // Please refer to the documentation of language id with different modes
            config.SetProperty(PropertyId.SpeechServiceConnection_ContinuousLanguageIdPriority, "Latency");

            var stopRecognition = new TaskCompletionSource <int>();

            // Creates a speech recognizer using file as audio input.
            // Replace with your own audio file name.
            using (var audioInput = AudioConfig.FromWavFileInput(@"en-us_zh-cn.wav"))
            {
                using (var recognizer = new SourceLanguageRecognizer(config, autoDetectSourceLanguageConfig, audioInput))
                {
                    recognizer.Recognized += (s, e) =>
                    {
                        if (e.Result.Reason == ResultReason.RecognizedSpeech)
                        {
                            var lidResult = AutoDetectSourceLanguageResult.FromResult(e.Result);
                            Console.WriteLine($"DETECTED: Language={lidResult.Language}");
                        }
                        else if (e.Result.Reason == ResultReason.NoMatch)
                        {
                            Console.WriteLine($"NOMATCH: Speech could not be recognized.");
                        }
                    };

                    recognizer.Canceled += (s, e) =>
                    {
                        Console.WriteLine($"CANCELED: Reason={e.Reason}");

                        if (e.Reason == CancellationReason.Error)
                        {
                            Console.WriteLine($"CANCELED: ErrorCode={e.ErrorCode}");
                            Console.WriteLine($"CANCELED: ErrorDetails={e.ErrorDetails}");
                            Console.WriteLine($"CANCELED: Did you update the subscription info?");
                        }

                        stopRecognition.TrySetResult(0);
                    };

                    recognizer.SessionStarted += (s, e) =>
                    {
                        Console.WriteLine("\n    Session started event.");
                    };

                    recognizer.SessionStopped += (s, e) =>
                    {
                        Console.WriteLine("\n    Session stopped event.");
                        Console.WriteLine("\nStop recognition.");
                        stopRecognition.TrySetResult(0);
                    };

                    // Starts continuous recognition. Uses StopContinuousRecognitionAsync() to stop recognition.
                    await recognizer.StartContinuousRecognitionAsync().ConfigureAwait(false);

                    // Waits for completion.
                    // Use Task.WaitAny to keep the task rooted.
                    Task.WaitAny(new[] { stopRecognition.Task });

                    // Stops recognition.
                    await recognizer.StopContinuousRecognitionAsync().ConfigureAwait(false);
                }
            }
            // </languageDetectionContinuousWithFile>
        }