void SR_AudioLevelChanged(SpeechRecognizer sender, SpeechRecognitionAudioLevelChangedEventArgs args) { var v = args.AudioLevel; if (v > 0) VolumeMeter.Opacity = v / 50; else VolumeMeter.Opacity = Math.Abs((v - 50) / 100); }
private async Task StartRecognizeAsync() { try { var speechRecognizer = new SpeechRecognizer(); speechRecognizer.Grammars.AddGrammarFromList( "answer", _words); while (!_pleaseFinish) { var result = await speechRecognizer.RecognizeAsync(); if (result.TextConfidence != SpeechRecognitionConfidence.Rejected) { ProcessResult(result); } else { Debug.WriteLine("No text!"); } } } finally { _isRunning = false; } }
public static void Initialize() { try { if (Speech.initialized) { return; } Speech.recognizer = new SpeechRecognizer(); Speech.synthesizer = new SpeechSynthesizer(); Speech.recognizerUI = new SpeechRecognizerUI(); IEnumerable<VoiceInformation> DeVoices = from voice in InstalledVoices.All where voice.Gender == VoiceGender.Female && voice.Language == "de-DE" select voice; Speech.synthesizer.SetVoice(DeVoices.ElementAt(0)); Speech.initialized = true; IsolatedStorageSettingsHelper.SetSpeechPackageState(true); } catch (Exception ex) { IsolatedStorageSettingsHelper.SetSpeechPackageState(false); throw new Exception(); } }
private async void InitSpeechRecognition() { try { if (speechRecognizerContinuous == null) { speechRecognizerContinuous = new SpeechRecognizer(); speechRecognizerContinuous.Constraints.Add( new SpeechRecognitionListConstraint( new List<String>() { "Start Listening" }, "start")); SpeechRecognitionCompilationResult contCompilationResult = await speechRecognizerContinuous.CompileConstraintsAsync(); if (contCompilationResult.Status != SpeechRecognitionResultStatus.Success) { throw new Exception(); } speechRecognizerContinuous.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated; } await speechRecognizerContinuous.ContinuousRecognitionSession.StartAsync(); } catch (Exception ex) { System.Diagnostics.Debug.WriteLine(ex.Message); } }
/// <summary> /// Initialize Speech Recognizer and compile constraints. /// </summary> /// <param name="recognizerLanguage">Language to use for the speech recognizer</param> /// <returns>Awaitable task.</returns> private async Task InitializeRecognizer(Language recognizerLanguage) { MicrophoneAccessStatus status = await AudioCapturePermissions.RequestMicrophoneAccessAsync(); if (status != MicrophoneAccessStatus.Allowed) { string prompt = status == MicrophoneAccessStatus.NoCaptureDevices ? "没有检测到音频捕获设备,请检查设备后重试" : "您没有允许本应用访问麦克风,请在 设置 -> 隐私 -> 麦克风 中设置"; var messageDialog = new MessageDialog(prompt); await messageDialog.ShowAsync(); throw new Exception($"Request microphone access failed. Status: {status}"); } Dispose(); // Create an instance of SpeechRecognizer. _speechRecognizer = new SpeechRecognizer(recognizerLanguage); // Add a web search topic constraint to the recognizer. var webSearchGrammar = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.WebSearch, "webSearch"); _speechRecognizer.Constraints.Add(webSearchGrammar); // RecognizeWithUIAsync allows developers to customize the prompts. _speechRecognizer.UIOptions.AudiblePrompt = "请说出您想搜索的东西"; _speechRecognizer.UIOptions.ExampleText = "例如:“你好,美女”"; // Compile the constraint. SpeechRecognitionCompilationResult compilationResult = await _speechRecognizer.CompileConstraintsAsync(); // Check to make sure that the constraints were in a proper format and the recognizer was able to compile it. if (compilationResult.Status != SpeechRecognitionResultStatus.Success) throw new Exception($"Unable to compile grammar. Status: {compilationResult.Status}"); }
private async void SpeechButton_Click(object sender, RoutedEventArgs e) { // Create an instance of SpeechRecognizer. this.speechRecognizer = new Windows.Media.SpeechRecognition.SpeechRecognizer(); // You could create this array dynamically. string[] responses = { "Start", "Stop", "Go left", "Go right", "Go home", "Go to home", "Go to base" }; // Add a list constraint to the recognizer. var listConstraint = new Windows.Media.SpeechRecognition.SpeechRecognitionListConstraint(responses, "yesOrNo"); speechRecognizer.UIOptions.ExampleText = @"Ex. 'Yes', 'No'"; speechRecognizer.Constraints.Add(listConstraint); // Compile the constraint. await speechRecognizer.CompileConstraintsAsync(); // Start recognition. Windows.Media.SpeechRecognition.SpeechRecognitionResult speechRecognitionResult = await this.speechRecognizer.RecognizeWithUIAsync(); var messageDialog = new Windows.UI.Popups.MessageDialog(speechRecognitionResult.Text, "Command received"); // Do something with the recognition result. if (speechRecognitionResult.Text.Equals("Go home") || speechRecognitionResult.Text.Equals("Go to home") || speechRecognitionResult.Text.Equals("Go to base")) { messageDialog = new Windows.UI.Popups.MessageDialog("Okay, heading home now..", "Text spoken"); } await messageDialog.ShowAsync(); }
private void InitializeSR() { spRecognizer = new SpeechRecognizer(); spRecognizer.Enabled = true; spRecognizer.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(spRecognizer_SpeechRecognized); }
private async void Button_Click_2(object sender, RoutedEventArgs e) { SpeechRecognizer speechRecognizer = new SpeechRecognizer(); speechRecognizer.Grammars.AddGrammarFromList("color", new List<string> { "红色", "白色", "蓝色", "绿色" }); try { var result = await speechRecognizer.RecognizeAsync(); if (result.TextConfidence == SpeechRecognitionConfidence.Rejected) { MessageBox.Show("语音识别不到"); } else { MessageBox.Show(result.Text); } } catch (Exception err) { MessageBox.Show("请检查是否接收语音隐私协议" + err.Message + err.HResult); } }
/// <summary> /// Constructor initializes necessary variables and reads in saved constraints from text file. /// </summary> public MainWindow() { InitializeComponent(); string fileName = @"Stored_Constraints.txt"; Debug.WriteLine(DateTime.Now.ToString()); filePath = System.IO.Path.Combine(Directory.GetCurrentDirectory(), fileName); dateTimesForConstraints = new Dictionary<string, string>(); backgroundListener = new SpeechRecognizer(); constraints = new List<string>(); BLResultGenerated = new TypedEventHandler<SpeechContinuousRecognitionSession, SpeechContinuousRecognitionResultGeneratedEventArgs>(blResultGenerated); backgroundListener.ContinuousRecognitionSession.ResultGenerated += BLResultGenerated; constraints = readInConstraintsFromFile(); currentlyStoredConstraints = constraints.ToList(); updateConstraintsWindow(constraints); this.Closing += OnAppClosing; var waitOn = loadOnStart(); while (waitOn.Status != AsyncStatus.Completed) { } var ff = backgroundListener.ContinuousRecognitionSession.StartAsync(); notifyIcon = new NotifyIcon(); notifyIcon.Icon = new System.Drawing.Icon("trayImage.ico"); notifyIcon.Visible = true; notifyIcon.DoubleClick += delegate (object sender, EventArgs args) { this.Show(); this.WindowState = WindowState.Normal; }; }
public MainWindow() { InitializeComponent(); ColorsList = new List<string>(); speechRecognizer = new SpeechRecognizer(); this.Loaded += MainWindow_Loaded; }
public ZenMode() { InitializeComponent(); DataContext = cloud = Connection.CurrentCloud; cloud.Controller.Messages.CollectionChanged += ScrollDown; recognizer = new SpeechRecognizer(); doneWithZen = false; }
private async void StartVoiceRecognition() { await SpeakText( "Say Captains Log at any time to create a log entry." ); speechRecognizerCaptainsLogCommand = new SpeechRecognizer(); while ( !cancellationSource.IsCancellationRequested ) { // Listen for user to say "Captains Log" ISpeechRecognitionConstraint commandConstraint = new SpeechRecognitionListConstraint( new[] { "Captains Log", "Computer Captains Log" } ); speechRecognizerCaptainsLogCommand.Constraints.Add( commandConstraint ); await speechRecognizerCaptainsLogCommand.CompileConstraintsAsync(); SpeechRecognitionResult commandResult = await speechRecognizerCaptainsLogCommand.RecognizeAsync(); if ( commandResult.Status != SpeechRecognitionResultStatus.Success || commandResult.Confidence == SpeechRecognitionConfidence.Rejected || cancellationSource.IsCancellationRequested ) { continue; } // Recognized user saying "Captains Log" // Listen for the user's dictation entry var captainsLogDictationRecognizer = new SpeechRecognizer(); ISpeechRecognitionConstraint dictationConstraint = new SpeechRecognitionTopicConstraint( SpeechRecognitionScenario.Dictation, "LogEntry", "LogEntryDictation" ); captainsLogDictationRecognizer.Constraints.Add( dictationConstraint ); await captainsLogDictationRecognizer.CompileConstraintsAsync(); captainsLogDictationRecognizer.UIOptions.ExampleText = "Boldly going where no man or woman has gone before."; captainsLogDictationRecognizer.UIOptions.AudiblePrompt = "Go ahead"; captainsLogDictationRecognizer.UIOptions.IsReadBackEnabled = true; captainsLogDictationRecognizer.UIOptions.ShowConfirmation = true; SpeechRecognitionResult dictationResult = await captainsLogDictationRecognizer.RecognizeWithUIAsync(); if ( dictationResult.Status != SpeechRecognitionResultStatus.Success || dictationResult.Confidence == SpeechRecognitionConfidence.Rejected || string.IsNullOrWhiteSpace( dictationResult.Text ) || cancellationSource.IsCancellationRequested ) { captainsLogDictationRecognizer.Dispose(); continue; } // Recognized user's dictation entry AddLogEntry( dictationResult.Text ); captainsLogDictationRecognizer.Dispose(); } speechRecognizerCaptainsLogCommand.Dispose(); }
private SpeechRecognitionService() { _recognizer = new SpeechRecognizer(); _recognizer.Constraints.Add(new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.WebSearch, "webSearch")); _recognizer.CompileConstraintsAsync().AsTask().Wait(); _recognizer.ContinuousRecognitionSession.ResultGenerated += RecognitionFound; }
/// <summary> /// Initializes a new instance of <see cref="SpeechInterpreter"/> /// </summary> /// <param name="container">The IoC container</param> public SpeechInterpreter(Container container) { m_Container = container; m_Recognizer = new SpeechRecognizer(/*new Language("en-US")*/); m_Recognizer.ContinuousRecognitionSession.ResultGenerated += RecognizerResultGenerated; m_Recognizer.StateChanged += RecognizerStateChanged; }
public MainPage() { this.InitializeComponent(); var recognizer = new SpeechRecognizer(); var topicconstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.WebSearch, "Development"); recognizer.Constraints.Add(topicconstraint); var result = recognizer.CompileConstraintsAsync(); }
private async void listenIn() { SpeechRecognizer speechRecognizer = new SpeechRecognizer(); speechRecognizer.Constraints.Add(new SpeechRecognitionListConstraint(new List<String>() { "note finished" })); SpeechRecognitionCompilationResult comResult = await speechRecognizer.CompileConstraintsAsync(); speechRecognizer.ContinuousRecognitionSession.ResultGenerated += Con_Result; await speechRecognizer.ContinuousRecognitionSession.StartAsync(); }
private void SpeechRecognizerHypothesisGenerated(SpeechRecognizer sender, SpeechRecognitionHypothesisGeneratedEventArgs args) { Debug.WriteLine(args.Hypothesis.Text); /*await Windows.ApplicationModel.Core.CoreApplication.MainView.CoreWindow.Dispatcher.RunAsync( CoreDispatcherPriority.Normal, () => { });*/ }
// Must be called before using static methods. public static void Initialize() { if (Speech.initialized) { return; } Recognizer = GetSpeechRecognizer(); Synthesizer = GetSpeechSynthesizer(); Speech.initialized = true; }
private void button1_Click(object sender, EventArgs e) { SpeechRecognizer r = new SpeechRecognizer(); Choices c = new Choices(); c.Add(new String[] {"haha","hahaha","hahahaha","hahahahaha","hahahahahaha","hahahahahahaha","hahahahahahahaha","hahahahahahahahahah"}); GrammarBuilder gb = new GrammarBuilder(); gb.Append(c); Grammar g = new Grammar(gb); r.LoadGrammar(g); r.SpeechRecognized+=new EventHandler<SpeechRecognizedEventArgs>(r_SpeechRecognized); }
private async void listenIn() { SpeechRecognizer speechRecognizer = new SpeechRecognizer(); speechRecognizer.Constraints.Add(new SpeechRecognitionListConstraint(new List<String>() { "sponge in", "sponge out", "instrument in", "needle in","needle out", "instrument out", "going to close" })); SpeechRecognitionCompilationResult comResult = await speechRecognizer.CompileConstraintsAsync(); speechRecognizer.ContinuousRecognitionSession.ResultGenerated += Con_Result; await speechRecognizer.ContinuousRecognitionSession.StartAsync(); }
void speechRec_RecognizerResultReceived(SpeechRecognizer sender, SpeechRecognitionResultReceivedEventArgs args) { if (args.Text == null) return; IntermediateResultsTextBlock.Text = "IntermediateResults: " + args.Text; if (args.Text.ToLower().Contains("cancel")) speechRec.RequestCancelOperation(); else if (args.Text.ToLower().Contains("stop")) speechRec.StopListeningAndProcessAudio(); }
public SpeechInput() { SpeechRecognizer recognizer = new SpeechRecognizer(); Choices numbers = new Choices(); numbers.Add(this.allowedPhrases); GrammarBuilder gb = new GrammarBuilder(); gb.Append(numbers); Grammar g = new Grammar(gb); recognizer.LoadGrammar(g); recognizer.SpeechRecognized += new EventHandler<SpeechRecognizedEventArgs>(this.Sre_SpeechRecognized); }
public async void Dispose() { if (_SpeechRecognizer != null) { while (_SpeechRecognizer.State != SpeechRecognizerState.Idle) { await Task.Delay(10); } _SpeechRecognizer.Dispose(); _SpeechRecognizer = null; } }
private SpeechRecognizer GetNewSpeechRecognizer() { SpeechRecognizer speechRecognizer = new SpeechRecognizer(new Language("zh-CN")); speechRecognizer.Timeouts.InitialSilenceTimeout = TimeSpan.FromSeconds(5.0); speechRecognizer.Timeouts.EndSilenceTimeout = TimeSpan.FromSeconds(0.15); speechRecognizer.Timeouts.BabbleTimeout = TimeSpan.FromSeconds(0.0); speechRecognizer.UIOptions.IsReadBackEnabled = false; speechRecognizer.UIOptions.ShowConfirmation = false; speechRecognizer.UIOptions.ExampleText = @"请说一些东西"; return speechRecognizer; }
/// <summary> /// When activating the scenario, ensure we have permission from the user to access their microphone, and /// provide an appropriate path for the user to enable access to the microphone if they haven't /// given explicit permission for it. /// </summary> /// <param name="e">The navigation event details</param> private async Task InitSpeech() { // Save the UI thread dispatcher to allow speech status messages to be shown on the UI. dispatcher = CoreWindow.GetForCurrentThread().Dispatcher; bool permissionGained = await AudioCapturePermissions.RequestMicrophonePermission(); if (permissionGained) { // Enable the recognition buttons. button.IsEnabled = true; if (speechRecognizer != null) { // cleanup prior to re-initializing this scenario. //speechRecognizer.StateChanged -= SpeechRecognizer_StateChanged; this.speechRecognizer.Dispose(); this.speechRecognizer = null; } // Create an instance of SpeechRecognizer. speechRecognizer = new SpeechRecognizer(); // Provide feedback to the user about the state of the recognizer. //speechRecognizer.StateChanged += SpeechRecognizer_StateChanged; // Compile the dictation topic constraint, which optimizes for dictated speech. var dictationConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "dictation"); speechRecognizer.Constraints.Add(dictationConstraint); SpeechRecognitionCompilationResult compilationResult = await speechRecognizer.CompileConstraintsAsync(); speechRecognizer.HypothesisGenerated += SpeechRecognizer_HypothesisGenerated; // Check to make sure that the constraints were in a proper format and the recognizer was able to compile it. if (compilationResult.Status != SpeechRecognitionResultStatus.Success) { // Disable the recognition buttons. button.IsEnabled = false; // Let the user know that the grammar didn't compile properly. //resultTextBlock.Visibility = Visibility.Visible; //resultTextBlock.Text = "Unable to compile grammar."; } } else { // "Permission to access capture resources was not given by the user; please set the application setting in Settings->Privacy->Microphone."; button.IsEnabled = false; } await Task.Yield(); }
private async void Recognizer_StateChanged (SpeechRecognizer sender, SpeechRecognizerStateChangedEventArgs args) { await _dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => { var state = args.State; _state.Text = state.ToString(); if (state == SpeechRecognizerState.SoundEnded) { SplitText(_recognizedText.Text); } }); }
private async void onStateChanged(SpeechRecognizer rec, SpeechRecognizerStateChangedEventArgs args) { System.Diagnostics.Debug.WriteLine("in onStateChanged"); await Dispatcher.InvokeAsync(() => { if (args.State == SpeechRecognizerState.Paused) { System.Diagnostics.Debug.WriteLine("state was paused"); InputSimulator.SimulateModifiedKeyStroke(VirtualKeyCode.LWIN, VirtualKeyCode.VK_S); } }); }
public MainWindow() { InitializeComponent(); recognizer = new SpeechRecognizer(); List<String> constraints = new List<string>(); //recognizer.Constraints.Add(new SpeechRecognitionListConstraint(constraints)); IAsyncOperation<SpeechRecognitionCompilationResult> op = recognizer.CompileConstraintsAsync(); resultGenerated = new TypedEventHandler<SpeechContinuousRecognitionSession, SpeechContinuousRecognitionResultGeneratedEventArgs>(UpdateTextBox); recognizer.ContinuousRecognitionSession.ResultGenerated += resultGenerated; OnStateChanged = new TypedEventHandler<SpeechRecognizer, SpeechRecognizerStateChangedEventArgs>(onStateChanged); recognizer.StateChanged += OnStateChanged; op.Completed += HandleCompilationCompleted; }
public VoiceCommandRecognizer() { recognizer = new SpeechRecognizer(); Choices commands = new Choices(); commands.Add(new string[] { "test", "example" }); GrammarBuilder gb = new GrammarBuilder(); gb.Append(commands); Grammar g = new Grammar(gb); recognizer.LoadGrammar(g); handler = new EventHandler<SpeechRecognizedEventArgs>(recognitionHandler); //per ora lo faccio così, ma è assolutamente da CAMBIARE StartRecognition(); }
private async void Init(Windows.Globalization.Language language) { ListenButton.IsEnabled = false; bool permissionGained = await AudioCapturePermissions.RequestMicrophonePermission(); if (!permissionGained) { MessageDialog("Permission to access capture resources was not given by the user, reset the application setting in Settings->Privacy->Microphone."); } var recognizer = new SpeechRecognizer(language); var topicConstraint = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "Development"); recognizer.Constraints.Add(topicConstraint); var compilationResult = await recognizer.CompileConstraintsAsync(); _SpeechRecognizer = recognizer; ListenButton.IsEnabled = true; }
// 使用麥克風進行語音辨識 public static async Task RecognitionWithMicrophoneAsync() { // 建立語音辨識的設定,這裡必須提供 Azure Cognitive Service 的訂閱金鑰和服務區域 var config = SpeechConfig.FromSubscription(YourSubscriptionKey, YourServiceRegion); // 預設使用 en-us 的美式英文作為辨識語言 config.SpeechRecognitionLanguage = "en-us"; // 建立語音辨識器,並將音訊來源指定為機器預設的麥克風 using (var recognizer = new SpeechRecognizer(config, AudioConfig.FromDefaultMicrophoneInput())) { Console.WriteLine("Say something..."); // 開始進行語音辨識,會在辨別出句子結束時,返回語音辨識的結果。 // 會藉由句子說完後,所產生的靜默時間作為辨識依據,或者語音超過 15 秒,也會處理成斷句。 var result = await recognizer.RecognizeOnceAsync().ConfigureAwait(false); // 輸出語音辨識結果 switch (result.Reason) { case ResultReason.RecognizedSpeech: Console.WriteLine($"RECOGNIZED: {result.Text}"); break; case ResultReason.NoMatch: Console.WriteLine($"NOMATCH: Speech could not be recognized."); break; case ResultReason.Canceled: default: var cancellation = CancellationDetails.FromResult(result); Console.WriteLine($"CANCELED: Reason={cancellation.Reason}"); if (cancellation.Reason == CancellationReason.Error) { Console.WriteLine($"CANCELED: ErrorCode={cancellation.ErrorCode}"); Console.WriteLine($"CANCELED: ErrorDetails={cancellation.ErrorDetails}"); Console.WriteLine($"CANCELED: Did you update the subscription info?"); } break; } } }
private async Task RunRecognizer(SpeechRecognizer recognizer, TaskCompletionSource <int> source) { //subscribe to events EventHandler <SpeechRecognitionEventArgs> recognizingHandler = (sender, e) => RecognizingEventHandler(e); recognizer.Recognizing += recognizingHandler; EventHandler <SpeechRecognitionEventArgs> recognizedHandler = (sender, e) => RecognizedEventHandler(e); EventHandler <SpeechRecognitionCanceledEventArgs> canceledHandler = (sender, e) => CanceledEventHandler(e, source); EventHandler <SessionEventArgs> sessionStartedHandler = (sender, e) => SessionStartedEventHandler(e); EventHandler <SessionEventArgs> sessionStoppedHandler = (sender, e) => SessionStoppedEventHandler(e, source); EventHandler <RecognitionEventArgs> speechStartDetectedHandler = (sender, e) => SpeechDetectedEventHandler(e, "start"); EventHandler <RecognitionEventArgs> speechEndDetectedHandler = (sender, e) => SpeechDetectedEventHandler(e, "end"); recognizer.Recognized += recognizedHandler; recognizer.Canceled += canceledHandler; recognizer.SessionStarted += sessionStartedHandler; recognizer.SessionStopped += sessionStoppedHandler; recognizer.SpeechStartDetected -= speechStartDetectedHandler; recognizer.SpeechEndDetected -= speechEndDetectedHandler; //start,wait,stop recognition await recognizer.StartContinuousRecognitionAsync().ConfigureAwait(false); await source.Task.ConfigureAwait(false); await recognizer.StopContinuousRecognitionAsync().ConfigureAwait(false); // unsubscribe from events recognizer.Recognizing -= recognizingHandler; recognizer.Recognized -= recognizedHandler; recognizer.Canceled -= canceledHandler; recognizer.SessionStarted -= sessionStartedHandler; recognizer.SessionStopped -= sessionStoppedHandler; recognizer.SpeechStartDetected -= speechStartDetectedHandler; recognizer.SpeechEndDetected -= speechEndDetectedHandler; }
private async void CmdRecognizerStateChanged(SpeechRecognizer sender, SpeechRecognizerStateChangedEventArgs args) { await dispatcher.RunAsync(CoreDispatcherPriority.Normal, () => { this.vm.CmdRecognizerState = args.State.ToString(); }); if (args.State == SpeechRecognizerState.Idle && (this.AppState == HappaState.WaitingForCommand || this.AppState == HappaState.WaitingForHeyHappa)) { var prevState = this.AppState; this.UpdateAppState(HappaState.Initializing); await this.InitializeCmdRecognizer(); await this.cmdRecognizer.ContinuousRecognitionSession.StartAsync(); this.UpdateAppState(prevState); } }
// Speech recognition from microphone. public static async Task RecognitionWithMicrophoneAsync() { // <recognitionWithMicrophone> // Creates an instance of a speech config with specified subscription key and service region. // Replace with your own subscription key and service region (e.g., "westus"). // The default language is "en-us". var config = SpeechConfig.FromSubscription("YourSubscriptionKey", "YourServiceRegion"); // Creates a speech recognizer using microphone as audio input. using (var recognizer = new SpeechRecognizer(config)) { // Starts recognizing. Console.WriteLine("Say something..."); // Performs recognition. RecognizeOnceAsync() returns when the first utterance has been recognized, // so it is suitable only for single shot recognition like command or query. For long-running // recognition, use StartContinuousRecognitionAsync() instead. var result = await recognizer.RecognizeOnceAsync().ConfigureAwait(false); // Checks result. if (result.Reason == ResultReason.RecognizedSpeech) { Console.WriteLine($"RECOGNIZED: Text={result.Text}"); } else if (result.Reason == ResultReason.NoMatch) { Console.WriteLine($"NOMATCH: Speech could not be recognized."); } else if (result.Reason == ResultReason.Canceled) { var cancellation = CancellationDetails.FromResult(result); Console.WriteLine($"CANCELED: Reason={cancellation.Reason}"); if (cancellation.Reason == CancellationReason.Error) { Console.WriteLine($"CANCELED: ErrorCode={cancellation.ErrorCode}"); Console.WriteLine($"CANCELED: ErrorDetails={cancellation.ErrorDetails}"); Console.WriteLine($"CANCELED: Did you update the subscription info?"); } } } // </recognitionWithMicrophone> }
void Awake() { main = this; UnityEngine.Assertions.Assert.IsNotNull(_pocketSphinxPrefab, "No PocketSphinx prefab assigned."); var obj = Instantiate(_pocketSphinxPrefab, this.transform) as GameObject; _pocketSphinx = obj.GetComponent <UnityPocketSphinx.PocketSphinx>(); if (_pocketSphinx == null) { Debug.LogError("[SpeechRecognizerDemo] No PocketSphinx component found. Did you assign the right prefab???"); } SubscribeToPocketSphinxEvents(); _infoText.text = "Please wait for Speech Recognition engine to load."; _SpeechResult.text = "Loading human dictionary..."; }
async static Task Main(string[] args) { const string WAKE_WORD = "hey computer"; var speechConfig = SpeechConfig.FromSubscription("e073d2855d604ddda74ba6518ab2e6b3", "westeurope"); var Intentconfig = SpeechConfig.FromSubscription("9051c66d5ba949ac84e32b01c37eb9b4", "westus"); var audioConfig = AudioConfig.FromDefaultMicrophoneInput(); var model = LanguageUnderstandingModel.FromAppId("7f7a9344-69b6-4582-a01d-19ffa3c9bed8"); var continuousRecognizer = new SpeechRecognizer(speechConfig, audioConfig); var intentRecognizer = new IntentRecognizer(Intentconfig, audioConfig); intentRecognizer.AddAllIntents(model); var synthesizer = new SpeechSynthesizer(speechConfig); bool _waitingForCommand = false; continuousRecognizer.Recognized += async(s, e) => { if (!_waitingForCommand) { if (e.Result.Reason == ResultReason.RecognizedSpeech) { Console.WriteLine($"RECOGNIZED: Text={e.Result.Text}"); if (e.Result.Text.Contains(WAKE_WORD, StringComparison.CurrentCultureIgnoreCase)) { Console.WriteLine($"RECOGNIZED: {WAKE_WORD}"); _waitingForCommand = true; await ParseCommand(synthesizer, await awaitCommand(intentRecognizer, synthesizer)); _waitingForCommand = false; Console.WriteLine("Listening for wake word."); } } } }; await continuousRecognizer.StartContinuousRecognitionAsync(); Console.Write("Press any key!"); Console.Read(); }
private async Task SetupRecognizer() { Config config = Decoder.DefaultConfig(); _recognizer = new SpeechRecognizerSetup(config) .SetAcousticModel(new File(assetsDir, "en-us-ptm")) .SetDictionary(new File(assetsDir, "cmudict-en-us.dict")) //.setKeywordThreshold(float.Parse("1e-1")) //.SetRawLogDir(assetsDir) // To disable logging of raw audio comment out this call (takes a lot of space on the device) .GetRecognizer(); _recognizer.Result += Recognizer_Result; _recognizer.InSpeechChange += Recognizer_InSpeechChange; _recognizer.Timeout += Recognizer_Timeout; _recognizer.Stopped += _recognizer_Stopped; // Create keyword-activation search. _recognizer.AddKeyphraseSearch(KWS_SEARCH, KEYPHRASE); }
public async void initializeSpeechRec() { TIASiml.initiateSIML(); bool permissionGained = await AudioCapturePermissions.RequestMicrophonePermission(); if (!permissionGained) { MessageDialog("Permission to access capture resources was not given by the user, reset the application setting in Settings->Privacy->Microphone."); } this.dispatcher = CoreWindow.GetForCurrentThread().Dispatcher; this.speechRecognizer = new SpeechRecognizer(); speechRecognizer.StateChanged += SpeechRecognizer_StateChanged; SpeechRecognitionCompilationResult result = await speechRecognizer.CompileConstraintsAsync(); speechRecognizer.ContinuousRecognitionSession.Completed += ContinuousRecognitionSession_Completed; speechRecognizer.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated; await speechRecognizer.ContinuousRecognitionSession.StartAsync(); }
public void CancelNoRecognize() { var clientConfig = this.CreateClientConfigDefault(this.CreateConfigDefault()); SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(clientConfig); try { speechRecognizer.CancelRecognition(); } catch (Exception ex) { throw ex; } finally { speechRecognizer.Close(); } }
public async void StartListening(IProgress <APIProgressReport> progress, string exampleText, bool readBackEnabled = false, bool showConfirmation = false) { m_progress = progress; SpeechRecognitionResult speechRecognitionResult = null; // Create the Recognizer VoiceRecognizer = new SpeechRecognizer(); VoiceRecognizer.StateChanged += speechRecognizer_StateChanged; VoiceRecognizer.HypothesisGenerated += VoiceRecognizer_HypothesisGenerated; VoiceRecognizer.RecognitionQualityDegrading += speechRecognizer_RecognitionQualityDegrading; // Set special commands VoiceRecognizer.UIOptions.ExampleText = "Ex. " + exampleText; VoiceRecognizer.UIOptions.IsReadBackEnabled = readBackEnabled; VoiceRecognizer.UIOptions.ShowConfirmation = showConfirmation; // Set Timeouts VoiceRecognizer.Timeouts.InitialSilenceTimeout = TimeSpan.FromSeconds(6.0); VoiceRecognizer.Timeouts.BabbleTimeout = TimeSpan.FromSeconds(4.0); VoiceRecognizer.Timeouts.EndSilenceTimeout = TimeSpan.FromSeconds(1.2); try { SpeechRecognitionCompilationResult compilationResult = await VoiceRecognizer.CompileConstraintsAsync(); speechRecognitionResult = await VoiceRecognizer.RecognizeWithUIAsync(); // RecognizeAsync(); } catch (Exception e) { ExceptionHelpers.PrintOutException(e, "Speech Recognition Exception"); } VoiceRecognizer = null; if (speechRecognitionResult != null) { if (progress != null) { progress.Report(new APIProgressReport(100.0, "Successfully Received Voice Recognition", APIResponse.Successful, speechRecognitionResult)); } else if (progress != null) { progress.Report(new APIProgressReport(100.0, "Failed to Retrieve Voice Recognition", APIResponse.Failed)); } } }
/// <summary> /// Event handler used to display what's being heard in the main screen's text box /// </summary> /// <param name="recognizer"></param> /// <param name="args"></param> private static void Recognizer_HypothesisGenerated(SpeechRecognizer recognizer, SpeechRecognitionHypothesisGeneratedEventArgs args) { if (StringUtils.Contains(args.Hypothesis.Text, activatorString) || StringUtils.AreEqual(SpokenText, activatorString)) { Utils.RunOnMainThread(() => { if (commandBox != null) { string tempText = SpokenText + " " + args.Hypothesis.Text; EnsureSpokenTextDoesNotContainStuffBeforeActivatorString(ref tempText); if (!StringUtils.StartsWith(tempText, activatorString)) { tempText = activatorString + " " + tempText.Trim(); } commandBox.Text = tempText; } }); } }
/// <summary> /// Stops the recognition on the speech recognizer or translator as applicable. /// Important: Unhook all events & clean-up resources. /// </summary> public async void StopRecognition() { if (recognizer != null) { await recognizer.StopContinuousRecognitionAsync().ConfigureAwait(false); recognizer.Recognizing -= RecognizingHandler; recognizer.Recognized -= RecognizedHandler; recognizer.SpeechStartDetected -= SpeechStartDetectedHandler; recognizer.SpeechEndDetected -= SpeechEndDetectedHandler; recognizer.Canceled -= CanceledHandler; recognizer.SessionStarted -= SessionStartedHandler; recognizer.SessionStopped -= SessionStoppedHandler; recognizer.Dispose(); recognizer = null; finalString = "Speech Recognizer is now stopped."; UnityEngine.Debug.LogFormat("Speech Recognizer is now stopped."); } }
private void Recognizer_StateChanged(SpeechRecognizer sender, SpeechRecognizerStateChangedEventArgs args) { Debug.WriteLine($"[Speech to Text]: recognizer state changed to: {args.State.ToString()}"); switch (args.State) { case SpeechRecognizerState.Capturing: this.CapturingStarted?.Invoke(this, null); break; case SpeechRecognizerState.Processing: case SpeechRecognizerState.Idle: this.CapturingEnded?.Invoke(this, null); break; default: break; } }
void CreateSpeechRecognizer() { if (recognizer == null) { SpeechConfig config = SpeechConfig.FromSubscription(lunarcomController.SpeechServiceAPIKey, lunarcomController.SpeechServiceRegion); config.SpeechRecognitionLanguage = fromLanguage; recognizer = new SpeechRecognizer(config); if (recognizer != null) { recognizer.Recognizing += RecognizingHandler; recognizer.Recognized += RecognizedHandler; recognizer.SpeechStartDetected += SpeechStartDetected; recognizer.SpeechEndDetected += SpeechEndDetectedHandler; recognizer.Canceled += CancelHandler; recognizer.SessionStarted += SessionStartedHandler; recognizer.SessionStopped += SessionStoppedHandler; } } }
public static async Task RecognizeSpeechAsync() { // Creates an instance of a speech config with specified subscription key and service region. // Replace with your own subscription key and service region (e.g., "westus"). var config = SpeechConfig.FromSubscription("6cc36fa4db2a413989da529a8800975f", "southeastasia"); synth = new SpeechSynthesizer(); foreach (var v in synth.GetInstalledVoices().Select(v => v.VoiceInfo)) { Console.WriteLine("Name:{0}, Gender:{1}, Age:{2}", v.Description, v.Gender, v.Age); } // select male senior (if it exists) synth.SelectVoiceByHints(VoiceGender.Female, VoiceAge.Teen); // select audio device synth.SetOutputToDefaultAudioDevice(); // Creates a speech recognizer. using (var recognizer = new SpeechRecognizer(config)) { Console.WriteLine("Say something..."); // Starts speech recognition, and returns after a single utterance is recognized. The end of a // single utterance is determined by listening for silence at the end or until a maximum of 15 // seconds of audio is processed. The task returns the recognition text as result. // Note: Since RecognizeOnceAsync() returns only a single utterance, it is suitable only for single // shot recognition like command or query. // For long-running multi-utterance recognition, use StartContinuousRecognitionAsync() instead. recognizer.Recognized += Recognizer_Recognized; Console.WriteLine("press any key to stop..."); synth.Speak("Sarah is ready to serve"); await recognizer.StartContinuousRecognitionAsync(); Console.ReadLine(); synth.Dispose(); await recognizer.StopKeywordRecognitionAsync(); } }
public void Run() { //initialize FaceTracker faceTracker = new FaceTracker(tracker_model_json_filepath); //initialize FaceTrackerParams faceTrackerParams = new FaceTrackerParams(); cascade = new CascadeClassifier(); cascade.load(haarcascade_frontalface_alt_xml_filepath); // if (cascade.empty()) // { // Debug.LogError("cascade file is not loaded.Please copy from “FaceTrackerExample/StreamingAssets/” to “Assets/StreamingAssets/” folder. "); // } #if UNITY_ANDROID && !UNITY_EDITOR // Avoids the front camera low light issue that occurs in only some Android devices (e.g. Google Pixel, Pixel2). webCamTextureToMatHelper.avoidAndroidFrontCameraLowLightIssue = true; #endif webCamTextureToMatHelper.Initialize(); if (SpeechRecognizer.ExistsOnDevice()) { resultText.text = "I am running run"; SpeechRecognizerListener listener = GameObject.FindObjectOfType <SpeechRecognizerListener>(); listener.onAuthorizationStatusFetched.AddListener(OnAuthorizationStatusFetched); listener.onAvailabilityChanged.AddListener(OnAvailabilityChange); listener.onErrorDuringRecording.AddListener(OnError); listener.onErrorOnStartRecording.AddListener(OnError); listener.onFinalResults.AddListener(OnFinalResult); listener.onPartialResults.AddListener(OnPartialResult); listener.onEndOfSpeech.AddListener(OnEndOfSpeech); //startRecordingButton.enabled = false; SpeechRecognizer.RequestAccess(); SpeechRecognizer.StartRecording(true); resultText.text = "Say something :-)"; } else { resultText.text = "Sorry, but this device doesn't support speech recognition"; Debug.Log("Next Command is crossfade from run function"); //GameObject.FindGameObjectWithTag("twohand)").GetComponent<Animator>().CrossFade("V", -1); //startRecordingButton.enabled = false; } }
private async void SpokenMessage_Click(object sender, RoutedEventArgs e) { SpeechRecognizer speechRecognizer = new SpeechRecognizer(); await speechRecognizer.CompileConstraintsAsync(); speechRecognizer.UIOptions.AudiblePrompt = "Say the message you want to send ..."; string spokenMessage = ""; // Start recognition. try { SpeechRecognitionResult speechRecognitionResult = await speechRecognizer.RecognizeWithUIAsync(); spokenMessage = speechRecognitionResult.Text; } catch (System.Runtime.InteropServices.COMException exc) when(exc.HResult == unchecked ((int)0x80045509)) //privacyPolicyHResult //The speech privacy policy was not accepted prior to attempting a speech recognition. { ContentDialog Dialog = new ContentDialog() { Title = "The speech privacy policy was not accepted", Content = "You need to turn on a button called 'Get to know me'...", PrimaryButtonText = "Nevermind", SecondaryButtonText = "Show me the setting" }; if (await Dialog.ShowAsync() == ContentDialogResult.Secondary) { string uriToLaunch = "ms-settings:privacy-speechtyping"; Uri uri = new Uri(uriToLaunch); bool success = await Windows.System.Launcher.LaunchUriAsync(uri); if (!success) { await new ContentDialog { Title = "Oops! Something went wrong...", Content = "The settings app could not be opened.", PrimaryButtonText = "Nevermind!" } }
public async Task <ActionResult> RecognizeSpeechAsync() { try { config = SpeechConfig.FromSubscription("ae9492aae8044a4c888a45a45e957d83", "westus"); using (var recognizer = new SpeechRecognizer(config)) { var result = await recognizer.RecognizeOnceAsync(); if (result.Reason == ResultReason.RecognizedSpeech) { translatedWords = translatedWords + result.Text; ViewBag.message = result.Text; } else if (result.Reason == ResultReason.NoMatch) { ViewBag.message = "Not recognized"; } else if (result.Reason == ResultReason.Canceled) { var cancellation = CancellationDetails.FromResult(result); if (cancellation.Reason == CancellationReason.Error) { ViewBag.message = result.Text; } } } } catch (Exception ex) { x = ex.StackTrace; } return(View("Index")); }
/// <summary> /// 开始识别事件 /// 启动识别,等待收到最终结果,然后停止识别 /// </summary> /// <param name="recognizer">识别器</param> /// <value> /// <c>Base</c> if Baseline model; otherwise, /// </value> private async Task RunRecognizer(SpeechRecognizer recognizer, TaskCompletionSource <int> source) { //创建事件 EventHandler <SpeechRecognitionEventArgs> recognizingHandler = (sender, e) => RecognizingEventHandler(e); //识别器添加事件 recognizer.Recognizing += recognizingHandler; EventHandler <SpeechRecognitionEventArgs> recognizedHandler = (sender, e) => RecognizedEventHandler(e); EventHandler <SpeechRecognitionCanceledEventArgs> canceledHandler = (sender, e) => CanceledEventHandler(e, source); EventHandler <SessionEventArgs> sessionStartedHandler = (sender, e) => SessionStartedEventHandler(e); EventHandler <SessionEventArgs> sessionStoppedHandler = (sender, e) => SessionStoppedEventHandler(e, source); EventHandler <RecognitionEventArgs> speechStartDetectedHandler = (sender, e) => SpeechDetectedEventHandler(e, "start"); EventHandler <RecognitionEventArgs> speechEndDetectedHandler = (sender, e) => SpeechDetectedEventHandler(e, "end"); recognizer.Recognized += recognizedHandler; recognizer.Canceled += canceledHandler; recognizer.SessionStarted += sessionStartedHandler; recognizer.SessionStopped += sessionStoppedHandler; recognizer.SpeechStartDetected -= speechStartDetectedHandler; recognizer.SpeechEndDetected -= speechEndDetectedHandler; //开始,等待,停止识别(单次识别) //await recognizer.StartContinuousRecognitionAsync().ConfigureAwait(false); await recognizer.RecognizeOnceAsync(); await source.Task.ConfigureAwait(false); //await recognizer.StopContinuousRecognitionAsync().ConfigureAwait(false); this.EnableButtons(); recognizer.Recognizing -= recognizingHandler; recognizer.Recognized -= recognizedHandler; recognizer.Canceled -= canceledHandler; recognizer.SessionStarted -= sessionStartedHandler; recognizer.SessionStopped -= sessionStoppedHandler; recognizer.SpeechStartDetected -= speechStartDetectedHandler; recognizer.SpeechEndDetected -= speechEndDetectedHandler; }
private async Task InitializeRecognizer(Language recognizerLanguage, Language speechLanguage = null) { try { if (speechRecognizer != null) { speechRecognizer.ContinuousRecognitionSession.Completed -= ContinuousRecognitionSession_Completed; speechRecognizer.ContinuousRecognitionSession.ResultGenerated -= ContinuousRecognitionSession_ResultGenerated; speechRecognizer.HypothesisGenerated -= SpeechRecognizer_HypothesisGenerated; speechRecognizer.Dispose(); speechRecognizer = null; } speechRecognizer = new SpeechRecognizer(recognizerLanguage); SpeechRecognitionCompilationResult result = await speechRecognizer.CompileConstraintsAsync(); if (result.Status != SpeechRecognitionResultStatus.Success) { checkError.Visibility = Visibility.Visible; errorCheck.Visibility = Visibility.Visible; errorCheck.Text = "Recognition Failed!"; } speechRecognizer.ContinuousRecognitionSession.Completed += ContinuousRecognitionSession_Completed; speechRecognizer.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated; speechRecognizer.HypothesisGenerated += SpeechRecognizer_HypothesisGenerated; isListening = false; dispatcher = this.Dispatcher; bool permissionGained = await AudioCapturePermissions.RequestMicrophoneCapture(); if (!permissionGained) { this.dictationTextBox.Text = "Requesting Microphone Capture Fails; Make sure Microphone is plugged in"; } } catch (Exception ex) { Console.WriteLine(ex.Message); } }
/// <summary> /// medthod to convert Speech to text using Microphone /// </summary> /// <returns></returns> public static async Task SpeechToTextAsyncwithMicrophone() { // Creates an instance of a speech config with specified subscription key and service region. // Replace with your own subscription key and service region (e.g., "eastus"). var config = SpeechConfig.FromSubscription(SubscriptionKey, ServiceRegion); // Creates a speech recognizer. using (var recognizer = new SpeechRecognizer(config)) { Console.WriteLine("Say something..."); // Starts speech recognition, and returns after a single utterance is recognized. The end of a // single utterance is determined by listening for silence at the end or until a maximum of 15 // seconds of audio is processed. The task returns the recognition text as result. // Note: Since RecognizeOnceAsync() returns only a single utterance, it is suitable only for single // shot recognition like command or query. // For long-running multi-utterance recognition, use StartContinuousRecognitionAsync() instead. var result = await recognizer.RecognizeOnceAsync(); // Checks result. if (result.Reason == ResultReason.RecognizedSpeech) { Console.WriteLine($"We recognized: {result.Text}"); } else if (result.Reason == ResultReason.NoMatch) { Console.WriteLine($"NOMATCH: Speech could not be recognized."); } else if (result.Reason == ResultReason.Canceled) { var cancellation = CancellationDetails.FromResult(result); Console.WriteLine($"CANCELED: Reason={cancellation.Reason}"); if (cancellation.Reason == CancellationReason.Error) { Console.WriteLine($"CANCELED: ErrorCode={cancellation.ErrorCode}"); Console.WriteLine($"CANCELED: ErrorDetails={cancellation.ErrorDetails}"); Console.WriteLine($"CANCELED: Did you update the subscription info?"); } } } }
/// <summary> /// Upon leaving, clean up the speech recognizer. Ensure we aren't still listening, and disable the event /// handlers to prevent leaks. /// </summary> /// <param name="e">Unused navigation parameters.</param> protected async override void OnNavigatedFrom(NavigationEventArgs e) { if (speechRecognizer != null) { if (isListening) { await speechRecognizer.ContinuousRecognitionSession.CancelAsync(); isListening = false; } speechRecognizer.ContinuousRecognitionSession.Completed -= ContinuousRecognitionSession_Completed; speechRecognizer.ContinuousRecognitionSession.ResultGenerated -= ContinuousRecognitionSession_ResultGenerated; speechRecognizer.StateChanged -= SpeechRecognizer_StateChanged; speechRecognizer.Dispose(); speechRecognizer = null; } }
private async void OnRecordButtonClicked(object sender, EventArgs e) { try { var config = SpeechConfig.FromSubscription(speech_key, "westus"); config.SpeechRecognitionLanguage = config.SpeechSynthesisLanguage = viewModel.LangCodeDictionary[(string)SourceLanguage.SelectedItem]; using (var recognizer = new SpeechRecognizer(config)) { var result = await recognizer.RecognizeOnceAsync().ConfigureAwait(false); // Checks result. StringBuilder sb = new StringBuilder(); if (result.Reason == ResultReason.RecognizedSpeech) { sb.AppendLine(result.Text); } else if (result.Reason == ResultReason.NoMatch) { sb.AppendLine($"NOMATCH: Speech could not be recognized."); } else if (result.Reason == ResultReason.Canceled) { var cancellation = CancellationDetails.FromResult(result); sb.AppendLine($"CANCELED: Reason={cancellation.Reason}"); if (cancellation.Reason == CancellationReason.Error) { sb.AppendLine($"CANCELED: ErrorCode={cancellation.ErrorCode}"); sb.AppendLine($"CANCELED: ErrorDetails={cancellation.ErrorDetails}"); sb.AppendLine($"CANCELED: Did you update the subscription info?"); } } UpdateUI(sb.ToString()); } } catch (Exception ex) { UpdateUI("Exception: " + ex); } }
public void RecognizeSpeech() { // Creates an instance of a speech config with specified subscription key and service region. // Replace with your own subscription key and service region (e.g., "westus"). var config = SpeechConfig.FromSubscription("c78539a1f1754b37a9d72875a3d19c06", "southeastasia"); // Creates a speech recognizer. using (var recognizer = new SpeechRecognizer(config)) { Console.WriteLine("Say something..."); // Starts speech recognition, and returns after a single utterance is recognized. The end of a // single utterance is determined by listening for silence at the end or until a maximum of 15 // seconds of audio is processed. The task returns the recognition text as result. // Note: Since RecognizeOnceAsync() returns only a single utterance, it is suitable only for single // shot recognition like command or query. // For long-running multi-utterance recognition, use StartContinuousRecognitionAsync() instead. var result = recognizer.RecognizeOnceAsync().Result; // Checks result. if (result.Reason == ResultReason.RecognizedSpeech) { txt_STTText.Text += result.Text + Environment.NewLine; } else if (result.Reason == ResultReason.NoMatch) { Console.WriteLine($"NOMATCH: Speech could not be recognized."); } else if (result.Reason == ResultReason.Canceled) { var cancellation = CancellationDetails.FromResult(result); Console.WriteLine($"CANCELED: Reason={cancellation.Reason}"); if (cancellation.Reason == CancellationReason.Error) { Console.WriteLine($"CANCELED: ErrorCode={cancellation.ErrorCode}"); Console.WriteLine($"CANCELED: ErrorDetails={cancellation.ErrorDetails}"); Console.WriteLine($"CANCELED: Did you update the subscription info?"); } } } }
public async Task SpeechRecognitionFromFileAsync(StorageFile file) { SpeechConfig config = GetRecognizerConfig(); if (config == null) { return; } ResetState(); stopRecognitionTaskCompletionSource = new TaskCompletionSource <int>(); using (var audioInput = AudioConfig.FromWavFileInput(file.Path)) { using (var recognizer = new SpeechRecognizer(config, audioInput)) { recognizer.Recognizing += OnRecognizing; recognizer.Recognized += OnRecognized; recognizer.Canceled += OnCanceled; recognizer.SessionStarted += (s, e) => { recognizeCancellationTokenSource = new CancellationTokenSource(); }; recognizer.SessionStopped += (s, e) => { if (recognizeCancellationTokenSource != null && recognizeCancellationTokenSource.Token.CanBeCanceled) { recognizeCancellationTokenSource.Cancel(); } stopRecognitionTaskCompletionSource.TrySetResult(0); }; // Starts continuous recognition. await recognizer.StartContinuousRecognitionAsync().ConfigureAwait(false); // Waits for completion. await stopRecognitionTaskCompletionSource.Task.ConfigureAwait(false); // Stops recognition. await recognizer.StopContinuousRecognitionAsync().ConfigureAwait(false); } } }
/// <summary> /// Recognizes wav files but only 15 secs at max /// </summary> /// <param name="stream"></param> /// <param name="fileName"></param> /// <returns></returns> public async Task <string> RecognizeShortAudio(Stream stream, string fileName) { DirectoryInfo info = new DirectoryInfo("audios"); if (!info.Exists) { info.Create(); } string path = Path.Combine("audios", fileName); using (FileStream outputFileStream = new FileStream(path, FileMode.Create)) { stream.CopyTo(outputFileStream); } var audioConfig = AudioConfig.FromWavFileInput(path); var recognizer = new SpeechRecognizer(speechConfig, audioConfig); var result = await recognizer.RecognizeOnceAsync(); switch (result.Reason) { case ResultReason.RecognizedSpeech: Console.WriteLine($"RECOGNIZED: Text={result.Text}"); return(result.Text); case ResultReason.NoMatch: Console.WriteLine($"NOMATCH: Speech could not be recognized."); break; case ResultReason.Canceled: var cancellation = CancellationDetails.FromResult(result); Console.WriteLine($"CANCELED: Reason={cancellation.Reason}"); if (cancellation.Reason == CancellationReason.Error) { Console.WriteLine($"CANCELED: ErrorCode={cancellation.ErrorCode}"); Console.WriteLine($"CANCELED: ErrorDetails={cancellation.ErrorDetails}"); } break; } return(null); }
public async Task RecognizeSpeechAsync(string path) { try { var config = SpeechConfig.FromSubscription("523e137d4e544865b41b7b418dd39ac0", "uksouth"); config.SpeechRecognitionLanguage = "pt-PT"; using (var audioInput = AudioConfig.FromWavFileInput(path)) using (var recognizer = new SpeechRecognizer(config, audioInput)) { Console.WriteLine("Recognizing first result..."); var result = await recognizer.RecognizeOnceAsync(); switch (result.Reason) { case ResultReason.RecognizedSpeech: this.result_text = result.Text; break; case ResultReason.NoMatch: this.result_text = "NOMATCH: Speech could not be recognized."; break; case ResultReason.Canceled: var cancellation = CancellationDetails.FromResult(result); Console.WriteLine($"CANCELED: Reason={cancellation.Reason}"); this.result_text = "CANCELED: Reason={cancellation.Reason}"; if (cancellation.Reason == CancellationReason.Error) { Console.WriteLine($"CANCELED: ErrorCode={cancellation.ErrorCode}"); Console.WriteLine($"CANCELED: ErrorDetails={cancellation.ErrorDetails}"); Console.WriteLine($"CANCELED: Did you update the subscription info?"); } break; } } } catch (Exception ex) { Console.WriteLine(ex.ToString()); } }
public static async Task RecognizeSpeechAsync() { // Creates an instance of a speech config with specified subscription key and service region. // Replace with your own subscription key // and service region (e.g., "westus"). var config = SpeechConfig.FromSubscription("a500a698e9f247eda1b27c379ba3298a", "uksouth"); // Creates a speech recognizer. using (var recognizer = new SpeechRecognizer(config)) { Console.WriteLine("Te rog sa spui ceva in engleza in microfon..."); // Starts speech recognition, and returns after a single utterance is recognized. The end of a // single utterance is determined by listening for silence at the end or until a maximum of 15 // seconds of audio is processed. The task returns the recognition text as result. // Note: Since RecognizeOnceAsync() returns only a single utterance, it is suitable only for single // shot recognition like command or query. // For long-running multi-utterance recognition, use StartContinuousRecognitionAsync() instead. var result = await recognizer.RecognizeOnceAsync(); // Checks result. if (result.Reason == ResultReason.RecognizedSpeech) { Console.WriteLine($"We recognized: {result.Text}"); } else if (result.Reason == ResultReason.NoMatch) { Console.WriteLine($"NOMATCH: Nu pot sa recunosc ce spui."); } else if (result.Reason == ResultReason.Canceled) { var cancellation = CancellationDetails.FromResult(result); Console.WriteLine($"CANCELED: Reason={cancellation.Reason}"); if (cancellation.Reason == CancellationReason.Error) { Console.WriteLine($"CANCELED: ErrorCode={cancellation.ErrorCode}"); Console.WriteLine($"CANCELED: ErrorDetails={cancellation.ErrorDetails}"); Console.WriteLine($"CANCELED: Did you update the subscription info?"); } } } }
public async Task <SpeechRecognitionResult> Record() { // Creates an instance of a speech config with specified subscription key and service region. // Replace with your own subscription key and service region (e.g., "westus"). var config = SpeechConfig.FromSubscription("2515f320-76bd-4798-adb3-dade8f1db94e", "northeurope"); //var config = SpeechConfig.FromEndpoint(new Uri("https://northeurope.stt.speech.microsoft.com/speech/recognition/conversation/cognitiveservices/v1"), "2515f320-76bd-4798-adb3-dade8f1db94e"); // Creates a speech recognizer. using (var recognizer = new SpeechRecognizer(config)) { Console.WriteLine("Say something..."); // Performs recognition. RecognizeOnceAsync() returns when the first utterance has been recognized, // so it is suitable only for single shot recognition like command or query. For long-running // recognition, use StartContinuousRecognitionAsync() instead. SpeechRecognitionResult result = await recognizer.RecognizeOnceAsync(); // Checks result. if (result.Reason == ResultReason.RecognizedSpeech) { Console.WriteLine($"We recognized: {result.Text}"); } else if (result.Reason == ResultReason.NoMatch) { Console.WriteLine($"NOMATCH: Speech could not be recognized."); } else if (result.Reason == ResultReason.Canceled) { var cancellation = CancellationDetails.FromResult(result); Console.WriteLine($"CANCELED: Reason={cancellation.Reason}"); if (cancellation.Reason == CancellationReason.Error) { Console.WriteLine($"CANCELED: ErrorDetails={cancellation.ErrorDetails}"); Console.WriteLine($"CANCELED: Did you update the subscription info?"); } } return(result); } }
public override async Task InitializeAsync() { if (speechRecognizer == null) { try { var recognizer = new SpeechRecognizer(ConvertAILangToSystem(config.Language)); // INFO: Dictation is default Constraint //var webSearchGrammar = new SpeechRecognitionTopicConstraint(SpeechRecognitionScenario.Dictation, "dictation"); //recognizer.Constraints.Add(webSearchGrammar); await recognizer.CompileConstraintsAsync(); lock (speechRecognizerLock) { if (speechRecognizer == null) { speechRecognizer = recognizer; } } } catch (Exception e) { if ((uint)e.HResult == HRESULT_LANG_NOT_SUPPORTED) { throw new AIServiceException(string.Format("Specified language {0} not supported or not installed on device", config.Language.code), e); } throw; } } }