private async void SpeechRecogntionFromStream() { string key = string.Empty; string region = string.Empty; string deployID = string.Empty; string language = string.Empty; bool isStandard = true; if (this.localSetting.SpeechServerType.Equals("Standard Speech Server")) { key = this.localSetting.StandardSubscriptionKey; region = this.localSetting.StandardRegion; language = this.localSetting.StandardRecognitionLanguage; } else if (this.localSetting.SpeechServerType.Equals("Custom Speech Server")) { isStandard = false; key = this.localSetting.CustomSubscriptionKey; region = this.localSetting.CustomRegion; language = this.localSetting.CustomRecognitionLanguage; deployID = this.localSetting.CustomDeploymentID; } if (String.IsNullOrEmpty(key) || String.IsNullOrEmpty(region)) { NotifyUser("Subscription key or region is missing!", NotifyType.ErrorMessage); return; } else if (string.IsNullOrEmpty(deployID) && isStandard == false) { NotifyUser("Deploym ID is missing!", NotifyType.ErrorMessage); return; } else if (string.IsNullOrEmpty(this.localSetting.TranslateSubscriptionKey)) { NotifyUser("Translator key is missing!", NotifyType.ErrorMessage); return; } else { NotifyUser(string.Empty, NotifyType.StatusMessage); ShowResult(TBSRResult, string.Empty); ShowResult(TBTranslation, string.Empty); } stopRecognitionTaskCompletionSource = new TaskCompletionSource <int>(); AudioInputStream audioStream = null; BinaryReader reader = null; Stream stream = null; var picker = new Windows.Storage.Pickers.FileOpenPicker(); picker.FileTypeFilter.Add(".wav"); StorageFile file = await picker.PickSingleFileAsync(); if (file == null) { string s = string.Format("Can't open it!"); NotifyUser(s, NotifyType.ErrorMessage); return; } try { stream = (await file.OpenReadAsync()).AsStreamForRead(); reader = new BinaryReader(stream); // Create an audio stream from a wav file. audioStream = Helper.OpenWaveFile(reader); // Creates an instance of a speech factory with specified and service region (e.g., "westus"). var factory = SpeechFactory.FromSubscription(key, region); // Creates a speech recognizer using file as audio input. The default language is "en-us". using (var recognizer = factory.CreateSpeechRecognizerWithStream(audioStream, language)) { // Replace with the CRIS deployment id of your customized model. recognizer.DeploymentId = deployID; // Subscribes to events. recognizer.IntermediateResultReceived += (s, ee) => { NotifyUser(ee.Result.Text, NotifyType.StatusMessage); }; recognizer.FinalResultReceived += (s, ee) => { string str; if (ee.Result.RecognitionStatus == RecognitionStatus.Recognized) { str = ee.Result.Text; ShowResult(TBSRResult, str); TranslateAsync(str); } else { str = $"Final result: Status: {ee.Result.RecognitionStatus.ToString()}, FailureReason: {ee.Result.RecognitionFailureReason}."; NotifyUser(str, NotifyType.StatusMessage); } }; recognizer.RecognitionErrorRaised += (s, ee) => { NotifyUser($"An error occurred. Status: {ee.Status.ToString()}, FailureReason: {ee.FailureReason}", NotifyType.StatusMessage); }; recognizer.OnSessionEvent += (s, ee) => { NotifyUser($"Session event. Event: {ee.EventType.ToString()}.", NotifyType.StatusMessage); // Stops translation when session stop is detected. if (ee.EventType == SessionEventType.SessionStoppedEvent) { NotifyUser($"Stop recognition.", NotifyType.StatusMessage); stopRecognitionTaskCompletionSource.TrySetResult(0); } }; // Starts continuous recognition. Uses StopContinuousRecognitionAsync() to stop recognition. await recognizer.StartContinuousRecognitionAsync().ConfigureAwait(false); // Waits for completion. await stopRecognitionTaskCompletionSource.Task.ConfigureAwait(false); // Stops recognition. await recognizer.StopContinuousRecognitionAsync().ConfigureAwait(false); } } catch (System.FormatException ex) { NotifyUser(ex.ToString(), NotifyType.ErrorMessage); } finally { if (reader != null) { reader.Dispose(); } if (audioStream != null) { audioStream.Dispose(); } if (stream != null) { stream.Dispose(); } } }
private async void SpeechRecogntionFromStream_ButtonClicked(object sender, RoutedEventArgs e) { stopRecognitionTaskCompletionSource = new TaskCompletionSource <int>(); AudioInputStream audioStream = null; BinaryReader reader = null; Stream stream = null; if (!AreKeysValid()) { NotifyUser("Subscription Key is missing!", NotifyType.ErrorMessage); return; } else { NotifyUser(" ", NotifyType.StatusMessage); } var picker = new Windows.Storage.Pickers.FileOpenPicker(); picker.FileTypeFilter.Add(".wav"); StorageFile file = await picker.PickSingleFileAsync(); if (file == null) { string s = string.Format("Can't open {0} !", file.Path); NotifyUser(s, NotifyType.ErrorMessage); return; } try { stream = (await file.OpenReadAsync()).AsStreamForRead(); reader = new BinaryReader(stream); // Create an audio stream from a wav file. audioStream = MicrosoftSpeechSDKSamples.Helper.OpenWaveFile(reader); // Creates an instance of a speech factory with specified and service region (e.g., "westus"). var factory = SpeechFactory.FromSubscription(this.SubscriptionKey, this.Region); // Creates a speech recognizer using file as audio input. The default language is "en-us". using (var recognizer = factory.CreateSpeechRecognizerWithStream(audioStream, this.RecognitionLanguage)) { // Subscribes to events. recognizer.IntermediateResultReceived += (s, ee) => { NotifyUser(ee.Result.Text, NotifyType.StatusMessage); }; recognizer.FinalResultReceived += (s, ee) => { string str; if (ee.Result.RecognitionStatus == RecognitionStatus.Recognized) { str = $"Final result: Status: {ee.Result.RecognitionStatus.ToString()}, Text: {ee.Result.Text}."; } else { str = $"Final result: Status: {ee.Result.RecognitionStatus.ToString()}, FailureReason: {ee.Result.RecognitionFailureReason}."; } NotifyUser(str, NotifyType.StatusMessage); }; recognizer.RecognitionErrorRaised += (s, ee) => { NotifyUser($"An error occurred. Status: {ee.Status.ToString()}, FailureReason: {ee.FailureReason}", NotifyType.StatusMessage); }; recognizer.OnSessionEvent += (s, ee) => { NotifyUser($"Session event. Event: {ee.EventType.ToString()}.", NotifyType.StatusMessage); // Stops translation when session stop is detected. if (ee.EventType == SessionEventType.SessionStoppedEvent) { NotifyUser($"Stop recognition.", NotifyType.StatusMessage); stopRecognitionTaskCompletionSource.TrySetResult(0); } }; // Starts continuous recognition. Uses StopContinuousRecognitionAsync() to stop recognition. await recognizer.StartContinuousRecognitionAsync().ConfigureAwait(false); // Waits for completion. await stopRecognitionTaskCompletionSource.Task.ConfigureAwait(false); // Stops recognition. await recognizer.StopContinuousRecognitionAsync().ConfigureAwait(false); } } catch (System.FormatException ex) { NotifyUser(ex.ToString(), NotifyType.ErrorMessage); } finally { if (reader != null) { reader.Dispose(); } if (audioStream != null) { audioStream.Dispose(); } if (stream != null) { stream.Dispose(); } } }