public async Task <string> RecognizeAudioAsync(string fileURL) { string result = string.Empty; var config = SpeechConfig.FromSubscription(_subscriptionKey, _region); var stopRecognition = new TaskCompletionSource <int>(); using (var audioInput = await AudioUtils.DownloadWavFileAsync(fileURL)) { using (var recognizer = new SpeechRecognizer(config, audioInput)) { // Subscribes to events. recognizer.Recognized += (s, e) => { if (e.Result.Reason == ResultReason.RecognizedSpeech) { result = e.Result.Text; } }; recognizer.Canceled += (s, e) => { if (e.Reason == CancellationReason.Error) { result = $"NOMATCH: Audio file error ({e.ErrorDetails})"; } stopRecognition.TrySetResult(0); }; recognizer.SessionStopped += (s, e) => { stopRecognition.TrySetResult(0); }; // Starts continuous recognition. Uses StopContinuousRecognitionAsync() to stop recognition. await recognizer.StartContinuousRecognitionAsync().ConfigureAwait(false); // Waits for completion. // Use Task.WaitAny to keep the task rooted. Task.WaitAny(new[] { stopRecognition.Task }); // Stops recognition. await recognizer.StopContinuousRecognitionAsync().ConfigureAwait(false); } } return(string.IsNullOrEmpty(result) ? "NOMATCH: Speech could not be recognized." : result); }
public async Task <TranscriptUtterance> SpeechToTranslatedTextAsync(string audioUrl, string sourceLanguage, string targetLanguage) { Transcripts.Clear(); TranscriptUtterance utterance = null; var config = SpeechTranslationConfig.FromSubscription(_subscriptionKey, _region); config.SpeechRecognitionLanguage = sourceLanguage; config.AddTargetLanguage(targetLanguage); var stopTranslation = new TaskCompletionSource <int>(); using (var audioInput = await AudioUtils.DownloadWavFileAsync(audioUrl)) { using (var recognizer = new TranslationRecognizer(config, audioInput)) { // Subscribes to events. recognizer.Recognized += (s, e) => { if (e.Result.Reason == ResultReason.TranslatedSpeech) { utterance = new TranscriptUtterance { Recognition = e.Result.Text, Translation = e.Result.Translations.FirstOrDefault().Value, }; } else if (e.Result.Reason == ResultReason.NoMatch) { Trace.TraceError($"NOMATCH: Speech could not be translated."); } }; recognizer.Canceled += (s, e) => { if (e.Reason == CancellationReason.Error) { Trace.TraceError($"Failed to decode incoming text message: {e.ErrorDetails}"); } stopTranslation.TrySetResult(0); }; recognizer.SessionStopped += (s, e) => { Trace.TraceInformation("Session stopped event."); stopTranslation.TrySetResult(0); }; await recognizer.StartContinuousRecognitionAsync().ConfigureAwait(false); // Waits for completion. // Use Task.WaitAny to keep the task rooted. Task.WaitAny(new[] { stopTranslation.Task }); // Stops translation. await recognizer.StopContinuousRecognitionAsync().ConfigureAwait(false); return(utterance); } } }