Ejemplo n.º 1
0
        public void DuplicateRecognize()
        {
            var clientConfig = CreateClientWithCredentials(CreateConfigDefault(), TestsReferences.DefaultASRURL, TestsReferences.User, TestsReferences.Password);
            var audioSource  = new FileAudioSource(TestsReferences.AudioPizzaVeg, AudioType.DETECT);
            var lModelLst    = new LanguageModelList();
            List <RecognitionResult> results = null;

            using (SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(clientConfig))
            {
                lModelLst.AddFromUri(TestsReferences.FreeLanguageModel);
                speechRecognizer.Recognize(audioSource, lModelLst);

                Thread.Sleep(500);
                try
                {
                    speechRecognizer.Recognize(audioSource, lModelLst);
                }
                catch (Exception ex)
                {
                    Assert.IsInstanceOfType(ex, typeof(RecognitionException));
                }
                results = speechRecognizer.WaitRecognitionResult();
            }

            Assert.AreEqual(CPqDASR.RecognitionResultCode.RECOGNIZED, results[0].ResultCode);
        }
Ejemplo n.º 2
0
        private void ExecuteMultiplesRecognitions(ClientConfig config, int recogs, bool useEndSleep = true)
        {
            using (SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(config))
            {
                for (int i = 0; i < recogs; i++)
                {
                    var audioSource = new FileAudioSource(TestsReferences.AudioCpf, AudioType.DETECT);
                    var lModelLst   = new LanguageModelList();
                    lModelLst.AddFromUri(TestsReferences.FreeLanguageModel);
                    speechRecognizer.Recognize(audioSource, lModelLst);
                    var result = speechRecognizer.WaitRecognitionResult();

                    Assert.AreEqual(CPqDASR.RecognitionResultCode.RECOGNIZED, result[0].ResultCode);

                    if (i < recogs - 1)
                    {
                        Thread.Sleep(3000);
                    }
                    else
                    {
                        Thread.Sleep(6000);
                    }
                }
            }
        }
Ejemplo n.º 3
0
        public void RecogAfterSessionTimeout()
        {
            var clientConfig = CreateClientWithCredentials(CreateConfigDefault(), TestsReferences.DefaultASRURL, TestsReferences.User, TestsReferences.Password);
            var audioSource  = new FileAudioSource(TestsReferences.AudioPizzaVeg, AudioType.DETECT);
            var lModelLst    = new LanguageModelList();
            List <RecognitionResult> results = null;

            using (SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(clientConfig))
            {
                Thread.Sleep(65000);

                lModelLst.AddFromUri(TestsReferences.FreeLanguageModel);
                speechRecognizer.Recognize(audioSource, lModelLst);
                results = speechRecognizer.WaitRecognitionResult();
            }

            var score = results?.
                        Where(r => r.ResultCode == CPqDASR.RecognitionResultCode.RECOGNIZED).
                        FirstOrDefault().Alternatives.
                        Where(a => a.Confidence >= 90).FirstOrDefault()?.Confidence;

            Assert.IsNotNull(score);

            var textFromFirstAlternative = results[0].Alternatives[0].Text.ToString();

            Assert.AreEqual(TestsReferences.TextPizzaVeg, textFromFirstAlternative);
        }
Ejemplo n.º 4
0
        public async Task MultipleListeners()
        {
            var clientConfig = CreateClientWithCredentials(CreateConfigDefault(), TestsReferences.DefaultASRURL, TestsReferences.User, TestsReferences.Password);
            var lModelLst    = new LanguageModelList();

            lModelLst.AddFromUri(TestsReferences.FreeLanguageModel);
            var audioSource = new FileAudioSource(TestsReferences.AudioPizzaVeg, AudioType.DETECT);

            using (SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(clientConfig))
            {
                lModelLst.AddFromUri(TestsReferences.GramPizzaHttp);

                Events = new EventsPassed();
                speechRecognizer.OnListening += SpeechRecognizer_OnListening;
                speechRecognizer.OnPartialRecognitionResult += SpeechRecognizer_OnPartialRecognitionResult;
                speechRecognizer.OnRecognitionResult        += SpeechRecognizer_OnRecognitionResult;
                speechRecognizer.OnSpeechStart += SpeechRecognizer_OnSpeechStart;
                speechRecognizer.OnSpeechStop  += SpeechRecognizer_OnSpeechStop;

                speechRecognizer.Recognize(audioSource, lModelLst);

                Task <bool> checkEventsPassed = CheckIfEventsHasPassed();

                bool result = await checkEventsPassed;

                Assert.IsTrue(result);
            }
            Events = null;
        }
Ejemplo n.º 5
0
        public void WaitRecognitionResultDuplicate()
        {
            var clientConfig = CreateClientWithCredentials(CreateConfigDefault(), TestsReferences.DefaultASRURL, TestsReferences.User, TestsReferences.Password);
            var lModelLst    = new LanguageModelList();
            var audioSource  = new FileAudioSource(TestsReferences.AudioPizzaVeg, AudioType.DETECT);

            //Just initializing variables with same value
            DateTime initWait = DateTime.Now;
            DateTime endWait  = new DateTime(initWait.Ticks);
            double   stampInMilliseconds;

            using (SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(clientConfig))
            {
                lModelLst.AddFromUri(TestsReferences.FreeLanguageModel);
                speechRecognizer.Recognize(audioSource, lModelLst);

                var firstResult = speechRecognizer.WaitRecognitionResult();
                Assert.AreEqual(CPqDASR.RecognitionResultCode.RECOGNIZED, firstResult[0].ResultCode);

                initWait = DateTime.Now;
                var duplicatedResult = speechRecognizer.WaitRecognitionResult();
                endWait = DateTime.Now;

                stampInMilliseconds = (endWait - initWait).TotalMilliseconds;

                Assert.AreEqual(0, duplicatedResult.Count);
                Assert.IsTrue(0 < stampInMilliseconds && stampInMilliseconds < 5);
            }
        }
        public void SessionTimeout()
        {
            var clientConfig = CreateClientConfigDefault(CreateConfigDefault());
            //Send all buffer to be writter
            var audioSource = new BufferAudioSource(File.ReadAllBytes(@"8k\pizza\pizza_veg_audio_8k.wav"));

            SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(clientConfig);

            try
            {
                var lModelLst = new LanguageModelList();
                lModelLst.AddFromUri("builtin:slm/general");

                speechRecognizer.Recognize(audioSource, lModelLst);

                Thread.Sleep(65000);

                var result           = speechRecognizer.WaitRecognitionResult();
                var resultRecognized = result.Where(r => r.ResultCode == CPqDASR.RecognitionResultCode.RECOGNIZED).FirstOrDefault();

                Assert.IsNotNull(resultRecognized);
            }
            catch (Exception ex)
            {
                throw ex;
            }
            finally
            {
                speechRecognizer.Close();
            }
        }
        public void CancelWhileRecognize()
        {
            var clientConfig = this.CreateClientConfigDefault(this.CreateConfigDefault());
            //Send all buffer to be writter
            var audioSource = new BufferAudioSource(File.ReadAllBytes(@"8k\ContinuosMode\joao_mineiro_marciano_intro_8k.wav"));

            SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(clientConfig);

            try
            {
                var lModelLst = new LanguageModelList();
                lModelLst.AddFromUri("builtin:slm/general");

                speechRecognizer.Recognize(audioSource, lModelLst);

                Thread.Sleep(2000);

                speechRecognizer.CancelRecognition();

                var result = speechRecognizer.WaitRecognitionResult();
                Assert.IsTrue(result.Count == 0);
            }
            catch (Exception ex)
            {
                throw ex;
            }
            finally
            {
                speechRecognizer.Close();
            }
        }
Ejemplo n.º 8
0
        public async Task <string> Solve(string url)
        {
            var file = await DownloadFile(url);

            var wavFile = ConvertToWav(file);

            var speechRecognizer = new SpeechRecognizer();

            return(await speechRecognizer.Recognize(wavFile));
        }
Ejemplo n.º 9
0
        private List <RecognitionResult> ExecuteRecognition(ClientConfig clientConfig, LanguageModelList languageModelList, IAudioSource audioSource)
        {
            List <RecognitionResult> results = null;

            using (SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(clientConfig))
            {
                speechRecognizer.Recognize(audioSource, languageModelList);
                results = speechRecognizer.WaitRecognitionResult();
            }

            return(results);
        }
Ejemplo n.º 10
0
        public void BasicContinuousModeOn()
        {
            var recogConfig = new RecognitionConfig
            {
                ContinuousMode = true,
            };
            var clientConfig = CreateClientWithCredentials(recogConfig, TestsReferences.DefaultASRURL, TestsReferences.User, TestsReferences.Password);
            var lModelLst    = new LanguageModelList();
            List <RecognitionResult> results = null;
            int           i            = 0;
            List <string> segmentsText = new List <string>(new string[] {
                TestsReferences.TextContinuousModeSeg1,
                TestsReferences.TextContinuousModeSeg2,
                TestsReferences.TextContinuousModeSeg3
            });

            using (SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(clientConfig))
            {
                var audioSource = new FileAudioSource(File.ReadAllBytes(TestsReferences.AudioContinuosMode), AudioType.DETECT);
                lModelLst.AddFromUri(TestsReferences.FreeLanguageModel);
                speechRecognizer.Recognize(audioSource, lModelLst);

                results = speechRecognizer.WaitRecognitionResult();
                Assert.IsTrue(segmentsText.Count() + 1 == results.Count());

                for (i = 0; i < segmentsText.Count(); i++)
                {
                    Assert.AreEqual(CPqDASR.RecognitionResultCode.RECOGNIZED, results[i].ResultCode);
                    var textFromFirstAlternative = results[i].Alternatives[0].Text.ToString();
                    Assert.AreEqual(segmentsText[i], textFromFirstAlternative);
                }
                Assert.AreEqual(CPqDASR.RecognitionResultCode.NO_SPEECH, results[i].ResultCode);
            }

            using (SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(clientConfig))
            {
                var audioSource = new BufferAudioSource(File.ReadAllBytes(TestsReferences.AudioContinuosMode));

                lModelLst.AddFromUri(TestsReferences.FreeLanguageModel);
                speechRecognizer.Recognize(audioSource, lModelLst);

                results = speechRecognizer.WaitRecognitionResult();
                Assert.IsTrue(segmentsText.Count() + 1 == results.Count());

                for (i = 0; i < segmentsText.Count(); i++)
                {
                    Assert.AreEqual(CPqDASR.RecognitionResultCode.RECOGNIZED, results[i].ResultCode);
                    var textFromFirstAlternative = results[i].Alternatives[0].Text;
                    Assert.AreEqual(segmentsText[i], textFromFirstAlternative);
                }
                Assert.AreEqual(CPqDASR.RecognitionResultCode.NO_INPUT_TIMEOUT, results[i].ResultCode);
            }
        }
Ejemplo n.º 11
0
        public static void Main(string[] args)
        {
            Trace.Close();
            TextWriterTraceListener tr1 = new TextWriterTraceListener(System.IO.File.CreateText(string.Format("D:\\Trace_{0}.trace", DateTime.Now.ToString("dd-MM-yyyy_HH-mm"))));

            Trace.Listeners.Add(tr1);
            Trace.AutoFlush = true;

            var objRecognitionConfig = new RecognitionConfig()
            {
                MaxSentences   = 2,
                ContinuousMode = true
            };

            var objClientConfig = new ClientConfig()
            {
                ServerUrl          = "ws://*****:*****@"C:\AudioTestesASR\8K\ContinuosMode\joao_mineiro_marciano_intro_8k.wav");

            try
            {
                SpeechRecognizer obj = SpeechRecognizer.Create(objClientConfig);

                obj.OnSpeechStart += Obj_OnSpeechStart;
                obj.OnSpeechStop  += Obj_OnSpeechStop;
                obj.OnListening   += Obj_OnListening;
                obj.OnPartialRecognitionResult += Obj_OnPartialRecognitionResult;
                obj.OnRecognitionResult        += Obj_OnRecognitionResult;
                obj.OnError += Obj_OnError1;

                LanguageModelList lModelLst = new LanguageModelList();

                lModelLst.AddFromUri("builtin:slm/general");

                obj.Recognize(objAudioSource, lModelLst);
                //var results = obj.WaitRecognitionResult();
                Console.Read();
            }
            catch (Exception ex)
            {
                Console.WriteLine(ex.Message);
                Console.Read();
            }
        }
Ejemplo n.º 12
0
        public void MaxWaitSettings()
        {
            var clientConfig = CreateClientWithCredentials(CreateConfigDefault(), TestsReferences.DefaultASRURL, TestsReferences.User, TestsReferences.Password);
            var lModelLst    = new LanguageModelList();

            lModelLst.AddFromUri(TestsReferences.FreeLanguageModel);
            var audioSource = new FileAudioSource(TestsReferences.AudioPizzaVeg, AudioType.DETECT);

            //Just initializing variables with same value
            DateTime initWait = DateTime.Now;
            DateTime endWait  = new DateTime(initWait.Ticks);

            double    stampInMilliseconds;
            const int timeToWait = 1000;

            SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(clientConfig);

            try
            {
                lModelLst.AddFromUri(TestsReferences.FreeLanguageModel);

                speechRecognizer.Recognize(audioSource, lModelLst);
                initWait = DateTime.Now;
                speechRecognizer.WaitRecognitionResult(timeToWait);
            }
            catch (Exception ex)
            {
                if (ex.Message.Equals("Response timeout"))
                {
                    endWait = DateTime.Now;
                }
                else
                {
                    throw ex;
                }
            }
            finally
            {
                speechRecognizer.Close();
            }

            stampInMilliseconds = (endWait - initWait).TotalMilliseconds;

            //Asserts if stamp was correctly calculated and is lower than timeToWait
            //with an increment of 200 milis that considering the natural processing delay
            Assert.IsTrue(stampInMilliseconds > 0 && stampInMilliseconds <= (timeToWait + 500));
        }
        public void MaxWaitSettings()
        {
            var clientConfig = this.CreateClientConfigDefault(this.CreateConfigDefault());
            var audioSource  = new FileAudioSource(@"8k\pizza\pizza_veg_audio_8k.wav");

            //Just initializinf variables with same value
            DateTime initWait = DateTime.Now;
            DateTime endWait  = new DateTime(initWait.Ticks);

            double stampInSeconds;

            const int        timeToWait       = 1000;
            SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(clientConfig);

            try
            {
                //Cria modelo de linguagem com gramática para o áudio de pizza:
                var lModelLst = new LanguageModelList();
                lModelLst.AddFromUri("http://vmh102.cpqd.com.br:8280/asr_dist/repository/grammars/dynamic-gram/pizza.gram");

                speechRecognizer.Recognize(audioSource, lModelLst);

                initWait = DateTime.Now;
                speechRecognizer.WaitRecognitionResult(timeToWait);
            }
            catch (Exception ex)
            {
                if (ex.Message.Equals("Recognition timeout"))
                {
                    endWait = DateTime.Now;
                }
                else
                {
                    throw ex;
                }
            }
            finally
            {
                speechRecognizer.Close();
            }

            stampInSeconds = (endWait - initWait).TotalSeconds;

            //Asserts if stamp was correctly calculated and is lower than timeToWait
            //with an increment of 200 milis that considering the natural processing delay
            Assert.IsTrue(stampInSeconds > 0 && stampInSeconds <= (timeToWait + 200));
        }
Ejemplo n.º 14
0
        public void CancelWhileRecognize()
        {
            var clientConfig = CreateClientWithCredentials(CreateConfigDefault(), TestsReferences.DefaultASRURL, TestsReferences.User, TestsReferences.Password);
            var lModelLst    = new LanguageModelList();
            var audioSource  = new BufferAudioSource(File.ReadAllBytes(TestsReferences.AudioCpf));
            List <RecognitionResult> results = null;

            using (SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(clientConfig))
            {
                lModelLst.AddFromUri(TestsReferences.FreeLanguageModel);
                speechRecognizer.Recognize(audioSource, lModelLst);

                Thread.Sleep(1000);

                speechRecognizer.CancelRecognition();
                results = speechRecognizer.WaitRecognitionResult();
            }

            Assert.AreEqual(0, results.Count);
        }
        private void ExecuteMultiplesRecognitions(ClientConfig config)
        {
            SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(config);
            int k = 0;

            try
            {
                var lModelLst = new LanguageModelList();
                lModelLst.AddFromUri("builtin:slm/general");


                for (int i = 0; i < 4; i++)
                {
                    var audioSource = new FileAudioSource(@"8k\pizza\pizza_veg_audio_8k.wav");
                    speechRecognizer.Recognize(audioSource, lModelLst);

                    var result           = speechRecognizer.WaitRecognitionResult();
                    var resultRecognized = result.Where(r => r.ResultCode == CPqDASR.RecognitionResultCode.RECOGNIZED).FirstOrDefault();

                    Assert.IsNotNull(resultRecognized);
                    k++;
                    if (i < 3)
                    {
                        Thread.Sleep(3000);
                    }
                    else
                    {
                        Thread.Sleep(5500);
                    }
                }
            }
            catch (Exception ex)
            {
                throw ex;
            }
            finally
            {
                speechRecognizer.Close();
            }
        }
        public async Task MultipleListeners()
        {
            var clientConfig = this.CreateClientConfigDefault(this.CreateConfigDefault());
            var audioSource  = new FileAudioSource(@"8k\pizza\pizza_veg_audio_8k.wav");

            Events = new EventsPassed();
            SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(clientConfig);

            try
            {
                //Cria modelo de linguagem com gramática para o áudio de pizza:
                var lModelLst = new LanguageModelList();
                lModelLst.AddFromUri("http://vmh102.cpqd.com.br:8280/asr_dist/repository/grammars/dynamic-gram/pizza.gram");

                speechRecognizer.OnListening += SpeechRecognizer_OnListening;
                speechRecognizer.OnPartialRecognitionResult += SpeechRecognizer_OnPartialRecognitionResult;
                speechRecognizer.OnRecognitionResult        += SpeechRecognizer_OnRecognitionResult;
                speechRecognizer.OnSpeechStart += SpeechRecognizer_OnSpeechStart;
                speechRecognizer.OnSpeechStop  += SpeechRecognizer_OnSpeechStop;

                speechRecognizer.Recognize(audioSource, lModelLst);

                Task <bool> checkEventsPassed = this.CheckIfEventsHasPassed();

                bool result = await checkEventsPassed;

                Assert.IsTrue(result);
            }
            catch (Exception ex)
            {
                Events = null;
                throw ex;
            }
            finally
            {
                speechRecognizer.Close();
            }

            Events = null;
        }