public void BasicSLM() { var clientConfig = this.CreateClientConfigDefault(this.CreateConfigDefault()); var audioSource = new FileAudioSource(@"8k\pizza\pizza_veg_audio_8k.wav"); try { //Cria modelo de linguagem com gramática para o áudio de pizza: var lModelLst = new LanguageModelList(); lModelLst.AddFromUri("builtin:slm/general"); var results = this.ExecuteRecognition(clientConfig, lModelLst, audioSource); var score = results. Where(r => r.ResultCode == CPqDASR.RecognitionResultCode.RECOGNIZED). FirstOrDefault().Alternatives. Where(a => a.Confidence >= 90).FirstOrDefault()?.Confidence; Assert.AreEqual(score != null && score > 90, true); } catch (Exception ex) { throw ex; } }
public void BasicGrammar() { var clientConfig = this.CreateClientConfigDefault(this.CreateConfigDefault()); var audioSource = new FileAudioSource(@"8k\pizza\pizza_veg_audio_8k.wav"); try { //Cria modelo de linguagem com gramática para o áudio de pizza: var lModelLst = new LanguageModelList(); lModelLst.AddFromUri("http://vmh102.cpqd.com.br:8280/asr_dist/repository/grammars/dynamic-gram/pizza.gram"); var results = this.ExecuteRecognition(clientConfig, lModelLst, audioSource); var score = results. Where(r => r.ResultCode == CPqDASR.RecognitionResultCode.RECOGNIZED). FirstOrDefault().Alternatives. Where(a => a.Confidence >= 90).FirstOrDefault()?.Confidence; Assert.AreEqual(score != null && score > 90, true); } catch (Exception ex) { throw ex; } }
private void ExecuteMultiplesRecognitions(ClientConfig config, int recogs, bool useEndSleep = true) { using (SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(config)) { for (int i = 0; i < recogs; i++) { var audioSource = new FileAudioSource(TestsReferences.AudioCpf, AudioType.DETECT); var lModelLst = new LanguageModelList(); lModelLst.AddFromUri(TestsReferences.FreeLanguageModel); speechRecognizer.Recognize(audioSource, lModelLst); var result = speechRecognizer.WaitRecognitionResult(); Assert.AreEqual(CPqDASR.RecognitionResultCode.RECOGNIZED, result[0].ResultCode); if (i < recogs - 1) { Thread.Sleep(3000); } else { Thread.Sleep(6000); } } } }
public void WaitRecognitionResultDuplicate() { var clientConfig = CreateClientWithCredentials(CreateConfigDefault(), TestsReferences.DefaultASRURL, TestsReferences.User, TestsReferences.Password); var lModelLst = new LanguageModelList(); var audioSource = new FileAudioSource(TestsReferences.AudioPizzaVeg, AudioType.DETECT); //Just initializing variables with same value DateTime initWait = DateTime.Now; DateTime endWait = new DateTime(initWait.Ticks); double stampInMilliseconds; using (SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(clientConfig)) { lModelLst.AddFromUri(TestsReferences.FreeLanguageModel); speechRecognizer.Recognize(audioSource, lModelLst); var firstResult = speechRecognizer.WaitRecognitionResult(); Assert.AreEqual(CPqDASR.RecognitionResultCode.RECOGNIZED, firstResult[0].ResultCode); initWait = DateTime.Now; var duplicatedResult = speechRecognizer.WaitRecognitionResult(); endWait = DateTime.Now; stampInMilliseconds = (endWait - initWait).TotalMilliseconds; Assert.AreEqual(0, duplicatedResult.Count); Assert.IsTrue(0 < stampInMilliseconds && stampInMilliseconds < 5); } }
public void RecogAfterSessionTimeout() { var clientConfig = CreateClientWithCredentials(CreateConfigDefault(), TestsReferences.DefaultASRURL, TestsReferences.User, TestsReferences.Password); var audioSource = new FileAudioSource(TestsReferences.AudioPizzaVeg, AudioType.DETECT); var lModelLst = new LanguageModelList(); List <RecognitionResult> results = null; using (SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(clientConfig)) { Thread.Sleep(65000); lModelLst.AddFromUri(TestsReferences.FreeLanguageModel); speechRecognizer.Recognize(audioSource, lModelLst); results = speechRecognizer.WaitRecognitionResult(); } var score = results?. Where(r => r.ResultCode == CPqDASR.RecognitionResultCode.RECOGNIZED). FirstOrDefault().Alternatives. Where(a => a.Confidence >= 90).FirstOrDefault()?.Confidence; Assert.IsNotNull(score); var textFromFirstAlternative = results[0].Alternatives[0].Text.ToString(); Assert.AreEqual(TestsReferences.TextPizzaVeg, textFromFirstAlternative); }
public void NoInputTimeOut() { var recogConfig = this.CreateConfigDefault(); recogConfig.NoInputTimeoutMilliseconds = 50; recogConfig.NoInputTimeoutEnabled = true; var clientConfig = this.CreateClientConfigDefault(recogConfig); var audioSource = new FileAudioSource(@"8k\Silencio\silence-8k.wav"); try { //Cria modelo de linguagem com gramática para o áudio de pizza: var lModelLst = new LanguageModelList(); lModelLst.AddFromUri("builtin:slm/general"); var results = ExecuteRecognition(clientConfig, lModelLst, audioSource); var reultNoInputTimeout = results. Where(r => r.ResultCode == CPqDASR.RecognitionResultCode.NO_INPUT_TIMEOUT). FirstOrDefault(); Assert.IsNotNull(reultNoInputTimeout); } catch (Exception ex) { throw ex; } }
public void RawFileRecognizer() { var clientConfig = CreateClientWithCredentials(CreateConfigDefault(), TestsReferences.DefaultASRURL, TestsReferences.User, TestsReferences.Password); var lModelLst = new LanguageModelList(); lModelLst.AddFromUri(TestsReferences.FreeLanguageModel); var audioSource = new FileAudioSource(TestsReferences.Nasceu, AudioType.RAW); List <RecognitionResult> results = null; try { lModelLst.AddFromUri(TestsReferences.FreeLanguageModel); results = ExecuteRecognition(clientConfig, lModelLst, audioSource); } catch (Exception ex) { throw new InternalTestFailureException(ex.Message); } var score = results?. Where(r => r.ResultCode == CPqDASR.RecognitionResultCode.RECOGNIZED). FirstOrDefault() ?.Alternatives. Where(a => a.Confidence >= 90).FirstOrDefault()?.Confidence; Assert.IsNotNull(score); TestContext.WriteLine($"{results[0].Alternatives[0].Text}"); var textFromFirstAlternative = results[0].Alternatives[0].Text; Assert.AreEqual(TestsReferences.TextNasceu, textFromFirstAlternative); }
public void BasicGrammar() { var clientConfig = CreateClientWithCredentials(CreateConfigDefault(), TestsReferences.DefaultASRURL, TestsReferences.User, TestsReferences.Password); var lModelLst = new LanguageModelList(); lModelLst.AddFromUri(TestsReferences.FreeLanguageModel); var audioSource = new FileAudioSource(TestsReferences.AudioPizzaVeg, AudioType.DETECT); List <RecognitionResult> results = null; try { lModelLst.AddFromUri(TestsReferences.GramPizzaHttp); results = ExecuteRecognition(clientConfig, lModelLst, audioSource); } catch (Exception ex) { throw new InternalTestFailureException(ex.Message); } var score = results?. Where(r => r.ResultCode == CPqDASR.RecognitionResultCode.RECOGNIZED). FirstOrDefault().Alternatives. Where(a => a.Confidence >= 90).FirstOrDefault()?.Confidence; Assert.IsNotNull(score); var textFromFirstAlternative = results[0].Alternatives[0].Text.ToString(); Assert.AreEqual(TestsReferences.TextPizzaVeg, textFromFirstAlternative); var firstInterpFromFirstAlt = results[0].Alternatives[0].Interpretations[0].InterpretationJson.ToString(); Assert.AreEqual(TestsReferences.InterpPizzaVeg, firstInterpFromFirstAlt); }
public void DuplicateRecognize() { var clientConfig = CreateClientWithCredentials(CreateConfigDefault(), TestsReferences.DefaultASRURL, TestsReferences.User, TestsReferences.Password); var audioSource = new FileAudioSource(TestsReferences.AudioPizzaVeg, AudioType.DETECT); var lModelLst = new LanguageModelList(); List <RecognitionResult> results = null; using (SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(clientConfig)) { lModelLst.AddFromUri(TestsReferences.FreeLanguageModel); speechRecognizer.Recognize(audioSource, lModelLst); Thread.Sleep(500); try { speechRecognizer.Recognize(audioSource, lModelLst); } catch (Exception ex) { Assert.IsInstanceOfType(ex, typeof(RecognitionException)); } results = speechRecognizer.WaitRecognitionResult(); } Assert.AreEqual(CPqDASR.RecognitionResultCode.RECOGNIZED, results[0].ResultCode); }
public void NoInputTimeOut() { var recogConfig = new RecognitionConfig { NoInputTimeoutMilliseconds = 200, NoInputTimeoutEnabled = true }; var clientConfig = CreateClientWithCredentials(recogConfig, TestsReferences.DefaultASRURL, TestsReferences.User, TestsReferences.Password); var lModelLst = new LanguageModelList(); lModelLst.AddFromUri(TestsReferences.FreeLanguageModel); var audioSource = new FileAudioSource(TestsReferences.AudioSilence, AudioType.DETECT); List <RecognitionResult> results = null; try { lModelLst.AddFromUri(TestsReferences.FreeLanguageModel); results = ExecuteRecognition(clientConfig, lModelLst, audioSource); } catch (Exception ex) { throw new InternalTestFailureException(ex.Message); } Assert.IsTrue(results != null && results.Count > 0); Assert.AreEqual(RecognitionResultCode.NO_INPUT_TIMEOUT, results[0].ResultCode); }
public async Task MultipleListeners() { var clientConfig = CreateClientWithCredentials(CreateConfigDefault(), TestsReferences.DefaultASRURL, TestsReferences.User, TestsReferences.Password); var lModelLst = new LanguageModelList(); lModelLst.AddFromUri(TestsReferences.FreeLanguageModel); var audioSource = new FileAudioSource(TestsReferences.AudioPizzaVeg, AudioType.DETECT); using (SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(clientConfig)) { lModelLst.AddFromUri(TestsReferences.GramPizzaHttp); Events = new EventsPassed(); speechRecognizer.OnListening += SpeechRecognizer_OnListening; speechRecognizer.OnPartialRecognitionResult += SpeechRecognizer_OnPartialRecognitionResult; speechRecognizer.OnRecognitionResult += SpeechRecognizer_OnRecognitionResult; speechRecognizer.OnSpeechStart += SpeechRecognizer_OnSpeechStart; speechRecognizer.OnSpeechStop += SpeechRecognizer_OnSpeechStop; speechRecognizer.Recognize(audioSource, lModelLst); Task <bool> checkEventsPassed = CheckIfEventsHasPassed(); bool result = await checkEventsPassed; Assert.IsTrue(result); } Events = null; }
public void BasicContinuousModeOn() { var recogConfig = new RecognitionConfig { ContinuousMode = true, }; var clientConfig = CreateClientWithCredentials(recogConfig, TestsReferences.DefaultASRURL, TestsReferences.User, TestsReferences.Password); var lModelLst = new LanguageModelList(); List <RecognitionResult> results = null; int i = 0; List <string> segmentsText = new List <string>(new string[] { TestsReferences.TextContinuousModeSeg1, TestsReferences.TextContinuousModeSeg2, TestsReferences.TextContinuousModeSeg3 }); using (SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(clientConfig)) { var audioSource = new FileAudioSource(File.ReadAllBytes(TestsReferences.AudioContinuosMode), AudioType.DETECT); lModelLst.AddFromUri(TestsReferences.FreeLanguageModel); speechRecognizer.Recognize(audioSource, lModelLst); results = speechRecognizer.WaitRecognitionResult(); Assert.IsTrue(segmentsText.Count() + 1 == results.Count()); for (i = 0; i < segmentsText.Count(); i++) { Assert.AreEqual(CPqDASR.RecognitionResultCode.RECOGNIZED, results[i].ResultCode); var textFromFirstAlternative = results[i].Alternatives[0].Text.ToString(); Assert.AreEqual(segmentsText[i], textFromFirstAlternative); } Assert.AreEqual(CPqDASR.RecognitionResultCode.NO_SPEECH, results[i].ResultCode); } using (SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(clientConfig)) { var audioSource = new BufferAudioSource(File.ReadAllBytes(TestsReferences.AudioContinuosMode)); lModelLst.AddFromUri(TestsReferences.FreeLanguageModel); speechRecognizer.Recognize(audioSource, lModelLst); results = speechRecognizer.WaitRecognitionResult(); Assert.IsTrue(segmentsText.Count() + 1 == results.Count()); for (i = 0; i < segmentsText.Count(); i++) { Assert.AreEqual(CPqDASR.RecognitionResultCode.RECOGNIZED, results[i].ResultCode); var textFromFirstAlternative = results[i].Alternatives[0].Text; Assert.AreEqual(segmentsText[i], textFromFirstAlternative); } Assert.AreEqual(CPqDASR.RecognitionResultCode.NO_INPUT_TIMEOUT, results[i].ResultCode); } }
public static void Main(string[] args) { Trace.Close(); TextWriterTraceListener tr1 = new TextWriterTraceListener(System.IO.File.CreateText(string.Format("D:\\Trace_{0}.trace", DateTime.Now.ToString("dd-MM-yyyy_HH-mm")))); Trace.Listeners.Add(tr1); Trace.AutoFlush = true; var objRecognitionConfig = new RecognitionConfig() { MaxSentences = 2, ContinuousMode = true }; var objClientConfig = new ClientConfig() { ServerUrl = "ws://*****:*****@"C:\AudioTestesASR\8K\ContinuosMode\joao_mineiro_marciano_intro_8k.wav"); try { SpeechRecognizer obj = SpeechRecognizer.Create(objClientConfig); obj.OnSpeechStart += Obj_OnSpeechStart; obj.OnSpeechStop += Obj_OnSpeechStop; obj.OnListening += Obj_OnListening; obj.OnPartialRecognitionResult += Obj_OnPartialRecognitionResult; obj.OnRecognitionResult += Obj_OnRecognitionResult; obj.OnError += Obj_OnError1; LanguageModelList lModelLst = new LanguageModelList(); lModelLst.AddFromUri("builtin:slm/general"); obj.Recognize(objAudioSource, lModelLst); //var results = obj.WaitRecognitionResult(); Console.Read(); } catch (Exception ex) { Console.WriteLine(ex.Message); Console.Read(); } }
public void MaxWaitSettings() { var clientConfig = CreateClientWithCredentials(CreateConfigDefault(), TestsReferences.DefaultASRURL, TestsReferences.User, TestsReferences.Password); var lModelLst = new LanguageModelList(); lModelLst.AddFromUri(TestsReferences.FreeLanguageModel); var audioSource = new FileAudioSource(TestsReferences.AudioPizzaVeg, AudioType.DETECT); //Just initializing variables with same value DateTime initWait = DateTime.Now; DateTime endWait = new DateTime(initWait.Ticks); double stampInMilliseconds; const int timeToWait = 1000; SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(clientConfig); try { lModelLst.AddFromUri(TestsReferences.FreeLanguageModel); speechRecognizer.Recognize(audioSource, lModelLst); initWait = DateTime.Now; speechRecognizer.WaitRecognitionResult(timeToWait); } catch (Exception ex) { if (ex.Message.Equals("Response timeout")) { endWait = DateTime.Now; } else { throw ex; } } finally { speechRecognizer.Close(); } stampInMilliseconds = (endWait - initWait).TotalMilliseconds; //Asserts if stamp was correctly calculated and is lower than timeToWait //with an increment of 200 milis that considering the natural processing delay Assert.IsTrue(stampInMilliseconds > 0 && stampInMilliseconds <= (timeToWait + 500)); }
public void MaxWaitSettings() { var clientConfig = this.CreateClientConfigDefault(this.CreateConfigDefault()); var audioSource = new FileAudioSource(@"8k\pizza\pizza_veg_audio_8k.wav"); //Just initializinf variables with same value DateTime initWait = DateTime.Now; DateTime endWait = new DateTime(initWait.Ticks); double stampInSeconds; const int timeToWait = 1000; SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(clientConfig); try { //Cria modelo de linguagem com gramática para o áudio de pizza: var lModelLst = new LanguageModelList(); lModelLst.AddFromUri("http://vmh102.cpqd.com.br:8280/asr_dist/repository/grammars/dynamic-gram/pizza.gram"); speechRecognizer.Recognize(audioSource, lModelLst); initWait = DateTime.Now; speechRecognizer.WaitRecognitionResult(timeToWait); } catch (Exception ex) { if (ex.Message.Equals("Recognition timeout")) { endWait = DateTime.Now; } else { throw ex; } } finally { speechRecognizer.Close(); } stampInSeconds = (endWait - initWait).TotalSeconds; //Asserts if stamp was correctly calculated and is lower than timeToWait //with an increment of 200 milis that considering the natural processing delay Assert.IsTrue(stampInSeconds > 0 && stampInSeconds <= (timeToWait + 200)); }
public void RecognizeMaxWaitSeconds() { var clientConfig = CreateClientWithCredentials(CreateConfigDefault(), TestsReferences.DefaultASRURL, TestsReferences.User, TestsReferences.Password); var lModelLst = new LanguageModelList(); var audioSource = new FileAudioSource(TestsReferences.AudioCpf, AudioType.DETECT); //Set 2 seconds to max wait time clientConfig.MaxWaitSeconds = 2000; try { lModelLst.AddFromUri(TestsReferences.FreeLanguageModel); ExecuteRecognition(clientConfig, lModelLst, audioSource); } catch (Exception ex) { Assert.IsInstanceOfType(ex, typeof(RecognitionException)); } }
public void CredentialValid() { var clientConfig = this.CreateClientWithCredentials(this.CreateConfigDefault(), "wss://speech.cpqd.com.br/asr/ws/estevan/recognize/8k", "estevan", "Thect195"); var audioSource = new FileAudioSource(@"8k\pizza\pizza_veg_audio_8k.wav"); try { //Cria modelo de linguagem com gramática para o áudio de pizza: var lModelLst = new LanguageModelList(); lModelLst.AddFromUri("http://vmh102.cpqd.com.br:8280/asr_dist/repository/grammars/dynamic-gram/pizza.gram"); var results = this.ExecuteRecognition(clientConfig, lModelLst, audioSource); Assert.IsTrue(results?.Count > 0); } catch (Exception ex) { throw ex; } }
public void CredentialNull() { var clientConfig = CreateClientWithCredentials(CreateConfigDefault(), TestsReferences.DefaultASRURL, null, null); var lModelLst = new LanguageModelList(); lModelLst.AddFromUri(TestsReferences.FreeLanguageModel); var audioSource = new FileAudioSource(TestsReferences.AudioPizzaVeg, AudioType.DETECT); try { lModelLst.AddFromUri(TestsReferences.GramPizzaHttp); ExecuteRecognition(clientConfig, lModelLst, audioSource); } catch (Exception ex) { Assert.IsInstanceOfType(ex, typeof(NullReferenceException)); return; } throw new AssertFailedException(); }
public void CredentialInvalid() { var clientConfig = this.CreateClientWithCredentials(this.CreateConfigDefault(), "wss://speech.cpqd.com.br/asr/ws/estevan/recognize/8k", "invalid", "invalid"); var audioSource = new FileAudioSource(@"8k\pizza\pizza_veg_audio_8k.wav"); try { //Cria modelo de linguagem com gramática para o áudio de pizza: var lModelLst = new LanguageModelList(); lModelLst.AddFromUri("http://vmh102.cpqd.com.br:8280/asr_dist/repository/grammars/dynamic-gram/pizza.gram"); this.ExecuteRecognition(clientConfig, lModelLst, audioSource); } catch (Exception ex) { Assert.IsInstanceOfType(ex, typeof(IOException)); return; } throw new AssertFailedException(); }
public void UrlInvalid() { var clientConfig = this.CreateClientConfigDefault(this.CreateConfigDefault(), "ws:invalid_uri"); var audioSource = new FileAudioSource(@"8k\pizza\pizza_veg_audio_8k.wav"); try { //Cria modelo de linguagem com gramática para o áudio de pizza: var lModelLst = new LanguageModelList(); lModelLst.AddFromUri("http://vmh102.cpqd.com.br:8280/asr_dist/repository/grammars/dynamic-gram/pizza.gram"); var results = this.ExecuteRecognition(clientConfig, lModelLst, audioSource); } catch (Exception ex) { Assert.AreEqual(ex.GetType(), typeof(UriFormatException)); return; } throw new AssertFailedException("A invalid server Url doesn't throw an UriFormatException!"); }
public void NoSpeech() { var clientConfig = this.CreateClientConfigDefault(this.CreateConfigDefault()); var audioSource = new FileAudioSource(@"8k\Silencio\silence-8k.wav"); try { //Cria modelo de linguagem com gramática para o áudio de pizza: var lModelLst = new LanguageModelList(); lModelLst.AddFromUri("builtin:slm/general"); var results = ExecuteRecognition(clientConfig, lModelLst, audioSource); Assert.IsTrue(results != null && results.Count > 0); Assert.AreEqual(results[0].ResultCode, CPqDASR.RecognitionResultCode.NO_SPEECH); } catch (Exception ex) { throw ex; } }
public void RecogConfig() { var recogConfig = new RecognitionConfig { ConfidenceThreshold = 90, ContinuousMode = false, EndpointerAutoLevelLen = 350, EndpointerLevelMode = 0, EndpointerLevelThreshold = 4, HeadMarginMilliseconds = 250, MaxSentences = 3, NoInputTimeoutEnabled = false, NoInputTimeoutMilliseconds = 2000, RecognitionTimeoutEnabled = false, RecognitionTimeoutMilliseconds = 65000, StartInputTimers = false, TailMarginMilliseconds = 450, WaitEndMilliseconds = 900 }; var clientConfig = CreateClientWithCredentials(CreateConfigDefault(), TestsReferences.DefaultASRURL, TestsReferences.User, TestsReferences.Password); var lModelLst = new LanguageModelList(); var audioSource = new FileAudioSource(TestsReferences.AudioCpf, AudioType.DETECT); List <RecognitionResult> results = null; try { lModelLst.AddFromUri(TestsReferences.FreeLanguageModel); results = ExecuteRecognition(clientConfig, lModelLst, audioSource); } catch (Exception ex) { throw new InternalTestFailureException(ex.Message); } TestContext.WriteLine($"${results[0].Alternatives[0].Confidence}"); // Assert.AreEqual(results[0].Alternatives[0].Confidence >= recogConfig.ConfidenceThreshold ? // CPqDASR.RecognitionResultCode.RECOGNIZED : RecognitionResultCode.NO_MATCH, results[0].ResultCode); }
private void ExecuteMultiplesRecognitions(ClientConfig config) { SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(config); int k = 0; try { var lModelLst = new LanguageModelList(); lModelLst.AddFromUri("builtin:slm/general"); for (int i = 0; i < 4; i++) { var audioSource = new FileAudioSource(@"8k\pizza\pizza_veg_audio_8k.wav"); speechRecognizer.Recognize(audioSource, lModelLst); var result = speechRecognizer.WaitRecognitionResult(); var resultRecognized = result.Where(r => r.ResultCode == CPqDASR.RecognitionResultCode.RECOGNIZED).FirstOrDefault(); Assert.IsNotNull(resultRecognized); k++; if (i < 3) { Thread.Sleep(3000); } else { Thread.Sleep(5500); } } } catch (Exception ex) { throw ex; } finally { speechRecognizer.Close(); } }
public async Task MultipleListeners() { var clientConfig = this.CreateClientConfigDefault(this.CreateConfigDefault()); var audioSource = new FileAudioSource(@"8k\pizza\pizza_veg_audio_8k.wav"); Events = new EventsPassed(); SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(clientConfig); try { //Cria modelo de linguagem com gramática para o áudio de pizza: var lModelLst = new LanguageModelList(); lModelLst.AddFromUri("http://vmh102.cpqd.com.br:8280/asr_dist/repository/grammars/dynamic-gram/pizza.gram"); speechRecognizer.OnListening += SpeechRecognizer_OnListening; speechRecognizer.OnPartialRecognitionResult += SpeechRecognizer_OnPartialRecognitionResult; speechRecognizer.OnRecognitionResult += SpeechRecognizer_OnRecognitionResult; speechRecognizer.OnSpeechStart += SpeechRecognizer_OnSpeechStart; speechRecognizer.OnSpeechStop += SpeechRecognizer_OnSpeechStop; speechRecognizer.Recognize(audioSource, lModelLst); Task <bool> checkEventsPassed = this.CheckIfEventsHasPassed(); bool result = await checkEventsPassed; Assert.IsTrue(result); } catch (Exception ex) { Events = null; throw ex; } finally { speechRecognizer.Close(); } Events = null; }
public void UrlInvalid() { var clientConfig = CreateClientConfigDefault(CreateConfigDefault(), "ws:invalid_uri"); var lModelLst = new LanguageModelList(); lModelLst.AddFromUri(TestsReferences.FreeLanguageModel); var audioSource = new FileAudioSource(TestsReferences.AudioPizzaVeg, AudioType.DETECT); List <RecognitionResult> results = null; try { lModelLst.AddFromUri(TestsReferences.GramPizzaHttp); results = ExecuteRecognition(clientConfig, lModelLst, audioSource); } catch (Exception ex) { Assert.AreEqual(ex.GetType(), typeof(UriFormatException)); return; } throw new AssertFailedException("A invalid server Url doesn't throw an UriFormatException!"); }
public void CredentialValid() { var clientConfig = CreateClientWithCredentials(CreateConfigDefault(), TestsReferences.DefaultASRURL, TestsReferences.User, TestsReferences.Password); var lModelLst = new LanguageModelList(); lModelLst.AddFromUri(TestsReferences.FreeLanguageModel); var audioSource = new FileAudioSource(TestsReferences.AudioPizzaVeg, AudioType.DETECT); List <RecognitionResult> results = null; try { lModelLst.AddFromUri(TestsReferences.GramPizzaHttp); results = ExecuteRecognition(clientConfig, lModelLst, audioSource); } catch (Exception ex) { throw new InternalTestFailureException(ex.Message); } Assert.IsNotNull(results); Assert.IsTrue(results.Count > 0); }
public void NoSpeech() { var clientConfig = CreateClientWithCredentials(CreateConfigDefault(), TestsReferences.DefaultASRURL, TestsReferences.User, TestsReferences.Password); var lModelLst = new LanguageModelList(); lModelLst.AddFromUri(TestsReferences.FreeLanguageModel); var audioSource = new FileAudioSource(TestsReferences.AudioSilence, AudioType.DETECT); List <RecognitionResult> results = null; try { lModelLst.AddFromUri(TestsReferences.FreeLanguageModel); results = ExecuteRecognition(clientConfig, lModelLst, audioSource); } catch (Exception ex) { throw new InternalTestFailureException(ex.Message); } Assert.IsTrue(results != null && results.Count > 0); Assert.AreEqual(CPqDASR.RecognitionResultCode.NO_MATCH, results[0].ResultCode); }
public void Textify() { var recogConfig = new RecognitionConfig { ConfidenceThreshold = 90, Textify = true }; var clientConfig = CreateClientWithCredentials(recogConfig, TestsReferences.DefaultASRURL, TestsReferences.User, TestsReferences.Password); var lModelLst = new LanguageModelList(); lModelLst.AddFromUri(TestsReferences.FreeLanguageModel); var audioSource = new FileAudioSource(TestsReferences.AudioCpf, AudioType.DETECT); List <RecognitionResult> results = null; try { lModelLst.AddFromUri(TestsReferences.FreeLanguageModel); results = ExecuteRecognition(clientConfig, lModelLst, audioSource); } catch (Exception ex) { throw new InternalTestFailureException(ex.Message); } var score = results?. Where(r => r.ResultCode == CPqDASR.RecognitionResultCode.RECOGNIZED). FirstOrDefault().Alternatives. Where(a => a.Confidence >= 90).FirstOrDefault()?.Confidence; Assert.IsNotNull(score); var textFromFirstAlternative = results[0].Alternatives[0].Text; Assert.AreEqual(TestsReferences.TextCPFTextify, textFromFirstAlternative); }