public void CloseWhileRecognize() { var clientConfig = this.CreateClientConfigDefault(this.CreateConfigDefault()); //Send all buffer to be writter var audioSource = new BufferAudioSource(File.ReadAllBytes(@"8k\ContinuosMode\joao_mineiro_marciano_intro_8k.wav")); SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(clientConfig); try { var lModelLst = new LanguageModelList(); lModelLst.AddFromUri("builtin:slm/general"); speechRecognizer.Recognize(audioSource, lModelLst); Thread.Sleep(2000); speechRecognizer.Close(); var result = speechRecognizer.WaitRecognitionResult(); Assert.IsTrue(result.Count == 0); } catch (Exception ex) { throw ex; } finally { speechRecognizer.Close(); } }
public void SessionTimeout() { var clientConfig = CreateClientConfigDefault(CreateConfigDefault()); //Send all buffer to be writter var audioSource = new BufferAudioSource(File.ReadAllBytes(@"8k\pizza\pizza_veg_audio_8k.wav")); SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(clientConfig); try { var lModelLst = new LanguageModelList(); lModelLst.AddFromUri("builtin:slm/general"); speechRecognizer.Recognize(audioSource, lModelLst); Thread.Sleep(65000); var result = speechRecognizer.WaitRecognitionResult(); var resultRecognized = result.Where(r => r.ResultCode == CPqDASR.RecognitionResultCode.RECOGNIZED).FirstOrDefault(); Assert.IsNotNull(resultRecognized); } catch (Exception ex) { throw ex; } finally { speechRecognizer.Close(); } }
public void CloseWithoutRecognize() { var clientConfig = CreateClientWithCredentials(CreateConfigDefault(), TestsReferences.DefaultASRURL, TestsReferences.User, TestsReferences.Password); using (SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(clientConfig)) { speechRecognizer.Close(); } }
public void CloseWithoutRecognize() { var clientConfig = this.CreateClientConfigDefault(this.CreateConfigDefault()); //Send all buffer to be writter var audioSource = new BufferAudioSource(File.ReadAllBytes(@"8k\pizza\pizza_veg_audio_8k.wav")); try { SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(clientConfig); speechRecognizer.Close(); } catch (Exception ex) { throw ex; } }
public void MaxWaitSettings() { var clientConfig = CreateClientWithCredentials(CreateConfigDefault(), TestsReferences.DefaultASRURL, TestsReferences.User, TestsReferences.Password); var lModelLst = new LanguageModelList(); lModelLst.AddFromUri(TestsReferences.FreeLanguageModel); var audioSource = new FileAudioSource(TestsReferences.AudioPizzaVeg, AudioType.DETECT); //Just initializing variables with same value DateTime initWait = DateTime.Now; DateTime endWait = new DateTime(initWait.Ticks); double stampInMilliseconds; const int timeToWait = 1000; SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(clientConfig); try { lModelLst.AddFromUri(TestsReferences.FreeLanguageModel); speechRecognizer.Recognize(audioSource, lModelLst); initWait = DateTime.Now; speechRecognizer.WaitRecognitionResult(timeToWait); } catch (Exception ex) { if (ex.Message.Equals("Response timeout")) { endWait = DateTime.Now; } else { throw ex; } } finally { speechRecognizer.Close(); } stampInMilliseconds = (endWait - initWait).TotalMilliseconds; //Asserts if stamp was correctly calculated and is lower than timeToWait //with an increment of 200 milis that considering the natural processing delay Assert.IsTrue(stampInMilliseconds > 0 && stampInMilliseconds <= (timeToWait + 500)); }
public void MaxWaitSettings() { var clientConfig = this.CreateClientConfigDefault(this.CreateConfigDefault()); var audioSource = new FileAudioSource(@"8k\pizza\pizza_veg_audio_8k.wav"); //Just initializinf variables with same value DateTime initWait = DateTime.Now; DateTime endWait = new DateTime(initWait.Ticks); double stampInSeconds; const int timeToWait = 1000; SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(clientConfig); try { //Cria modelo de linguagem com gramática para o áudio de pizza: var lModelLst = new LanguageModelList(); lModelLst.AddFromUri("http://vmh102.cpqd.com.br:8280/asr_dist/repository/grammars/dynamic-gram/pizza.gram"); speechRecognizer.Recognize(audioSource, lModelLst); initWait = DateTime.Now; speechRecognizer.WaitRecognitionResult(timeToWait); } catch (Exception ex) { if (ex.Message.Equals("Recognition timeout")) { endWait = DateTime.Now; } else { throw ex; } } finally { speechRecognizer.Close(); } stampInSeconds = (endWait - initWait).TotalSeconds; //Asserts if stamp was correctly calculated and is lower than timeToWait //with an increment of 200 milis that considering the natural processing delay Assert.IsTrue(stampInSeconds > 0 && stampInSeconds <= (timeToWait + 200)); }
public void CancelNoRecognize() { var clientConfig = this.CreateClientConfigDefault(this.CreateConfigDefault()); SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(clientConfig); try { speechRecognizer.CancelRecognition(); } catch (Exception ex) { throw ex; } finally { speechRecognizer.Close(); } }
public void CloseWhileRecognize() { var clientConfig = CreateClientWithCredentials(CreateConfigDefault(), TestsReferences.DefaultASRURL, TestsReferences.User, TestsReferences.Password); var lModelLst = new LanguageModelList(); var audioSource = new BufferAudioSource(File.ReadAllBytes(TestsReferences.AudioCpf)); List <RecognitionResult> results = null; using (SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(clientConfig)) { lModelLst.AddFromUri(TestsReferences.FreeLanguageModel); speechRecognizer.Recognize(audioSource, lModelLst); Thread.Sleep(1000); speechRecognizer.Close(); results = speechRecognizer.WaitRecognitionResult(); } Assert.AreEqual(0, results.Count); }
public void WaitNoRecognize() { var clientConfig = this.CreateClientConfigDefault(this.CreateConfigDefault()); SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(clientConfig); try { var result = speechRecognizer.WaitRecognitionResult(); Assert.IsTrue(result.Count == 0); } catch (Exception ex) { throw ex; } finally { speechRecognizer.Close(); } }
private void ExecuteMultiplesRecognitions(ClientConfig config) { SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(config); int k = 0; try { var lModelLst = new LanguageModelList(); lModelLst.AddFromUri("builtin:slm/general"); for (int i = 0; i < 4; i++) { var audioSource = new FileAudioSource(@"8k\pizza\pizza_veg_audio_8k.wav"); speechRecognizer.Recognize(audioSource, lModelLst); var result = speechRecognizer.WaitRecognitionResult(); var resultRecognized = result.Where(r => r.ResultCode == CPqDASR.RecognitionResultCode.RECOGNIZED).FirstOrDefault(); Assert.IsNotNull(resultRecognized); k++; if (i < 3) { Thread.Sleep(3000); } else { Thread.Sleep(5500); } } } catch (Exception ex) { throw ex; } finally { speechRecognizer.Close(); } }
public async Task MultipleListeners() { var clientConfig = this.CreateClientConfigDefault(this.CreateConfigDefault()); var audioSource = new FileAudioSource(@"8k\pizza\pizza_veg_audio_8k.wav"); Events = new EventsPassed(); SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(clientConfig); try { //Cria modelo de linguagem com gramática para o áudio de pizza: var lModelLst = new LanguageModelList(); lModelLst.AddFromUri("http://vmh102.cpqd.com.br:8280/asr_dist/repository/grammars/dynamic-gram/pizza.gram"); speechRecognizer.OnListening += SpeechRecognizer_OnListening; speechRecognizer.OnPartialRecognitionResult += SpeechRecognizer_OnPartialRecognitionResult; speechRecognizer.OnRecognitionResult += SpeechRecognizer_OnRecognitionResult; speechRecognizer.OnSpeechStart += SpeechRecognizer_OnSpeechStart; speechRecognizer.OnSpeechStop += SpeechRecognizer_OnSpeechStop; speechRecognizer.Recognize(audioSource, lModelLst); Task <bool> checkEventsPassed = this.CheckIfEventsHasPassed(); bool result = await checkEventsPassed; Assert.IsTrue(result); } catch (Exception ex) { Events = null; throw ex; } finally { speechRecognizer.Close(); } Events = null; }