public void SessionTimeout()
        {
            var clientConfig = CreateClientConfigDefault(CreateConfigDefault());
            //Send all buffer to be writter
            var audioSource = new BufferAudioSource(File.ReadAllBytes(@"8k\pizza\pizza_veg_audio_8k.wav"));

            SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(clientConfig);

            try
            {
                var lModelLst = new LanguageModelList();
                lModelLst.AddFromUri("builtin:slm/general");

                speechRecognizer.Recognize(audioSource, lModelLst);

                Thread.Sleep(65000);

                var result           = speechRecognizer.WaitRecognitionResult();
                var resultRecognized = result.Where(r => r.ResultCode == CPqDASR.RecognitionResultCode.RECOGNIZED).FirstOrDefault();

                Assert.IsNotNull(resultRecognized);
            }
            catch (Exception ex)
            {
                throw ex;
            }
            finally
            {
                speechRecognizer.Close();
            }
        }
Пример #2
0
        public void RecognizeBufferAudioSource()
        {
            var clientConfig = CreateClientWithCredentials(CreateConfigDefault(), TestsReferences.DefaultASRURL, TestsReferences.User, TestsReferences.Password);
            var lModelLst    = new LanguageModelList();
            var audioSource  = new BufferAudioSource(File.ReadAllBytes(TestsReferences.AudioPizzaVeg));
            List <RecognitionResult> results = null;

            try
            {
                lModelLst.AddFromUri(TestsReferences.FreeLanguageModel);
                results = ExecuteRecognition(clientConfig, lModelLst, audioSource);
            }
            catch (Exception ex)
            {
                throw new InternalTestFailureException(ex.Message);
            }

            var score = results?.
                        Where(r => r.ResultCode == CPqDASR.RecognitionResultCode.RECOGNIZED).
                        FirstOrDefault().Alternatives.
                        Where(a => a.Confidence >= 90).FirstOrDefault()?.Confidence;

            Assert.IsNotNull(score);

            var textFromFirstAlternative = results[0].Alternatives[0].Text.ToString();

            Assert.AreEqual(TestsReferences.TextPizzaVeg, textFromFirstAlternative);
        }
        public void CancelWhileRecognize()
        {
            var clientConfig = this.CreateClientConfigDefault(this.CreateConfigDefault());
            //Send all buffer to be writter
            var audioSource = new BufferAudioSource(File.ReadAllBytes(@"8k\ContinuosMode\joao_mineiro_marciano_intro_8k.wav"));

            SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(clientConfig);

            try
            {
                var lModelLst = new LanguageModelList();
                lModelLst.AddFromUri("builtin:slm/general");

                speechRecognizer.Recognize(audioSource, lModelLst);

                Thread.Sleep(2000);

                speechRecognizer.CancelRecognition();

                var result = speechRecognizer.WaitRecognitionResult();
                Assert.IsTrue(result.Count == 0);
            }
            catch (Exception ex)
            {
                throw ex;
            }
            finally
            {
                speechRecognizer.Close();
            }
        }
Пример #4
0
        public void BasicContinuousModeOn()
        {
            var recogConfig = new RecognitionConfig
            {
                ContinuousMode = true,
            };
            var clientConfig = CreateClientWithCredentials(recogConfig, TestsReferences.DefaultASRURL, TestsReferences.User, TestsReferences.Password);
            var lModelLst    = new LanguageModelList();
            List <RecognitionResult> results = null;
            int           i            = 0;
            List <string> segmentsText = new List <string>(new string[] {
                TestsReferences.TextContinuousModeSeg1,
                TestsReferences.TextContinuousModeSeg2,
                TestsReferences.TextContinuousModeSeg3
            });

            using (SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(clientConfig))
            {
                var audioSource = new FileAudioSource(File.ReadAllBytes(TestsReferences.AudioContinuosMode), AudioType.DETECT);
                lModelLst.AddFromUri(TestsReferences.FreeLanguageModel);
                speechRecognizer.Recognize(audioSource, lModelLst);

                results = speechRecognizer.WaitRecognitionResult();
                Assert.IsTrue(segmentsText.Count() + 1 == results.Count());

                for (i = 0; i < segmentsText.Count(); i++)
                {
                    Assert.AreEqual(CPqDASR.RecognitionResultCode.RECOGNIZED, results[i].ResultCode);
                    var textFromFirstAlternative = results[i].Alternatives[0].Text.ToString();
                    Assert.AreEqual(segmentsText[i], textFromFirstAlternative);
                }
                Assert.AreEqual(CPqDASR.RecognitionResultCode.NO_SPEECH, results[i].ResultCode);
            }

            using (SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(clientConfig))
            {
                var audioSource = new BufferAudioSource(File.ReadAllBytes(TestsReferences.AudioContinuosMode));

                lModelLst.AddFromUri(TestsReferences.FreeLanguageModel);
                speechRecognizer.Recognize(audioSource, lModelLst);

                results = speechRecognizer.WaitRecognitionResult();
                Assert.IsTrue(segmentsText.Count() + 1 == results.Count());

                for (i = 0; i < segmentsText.Count(); i++)
                {
                    Assert.AreEqual(CPqDASR.RecognitionResultCode.RECOGNIZED, results[i].ResultCode);
                    var textFromFirstAlternative = results[i].Alternatives[0].Text;
                    Assert.AreEqual(segmentsText[i], textFromFirstAlternative);
                }
                Assert.AreEqual(CPqDASR.RecognitionResultCode.NO_INPUT_TIMEOUT, results[i].ResultCode);
            }
        }
        public void CloseWithoutRecognize()
        {
            var clientConfig = this.CreateClientConfigDefault(this.CreateConfigDefault());
            //Send all buffer to be writter
            var audioSource = new BufferAudioSource(File.ReadAllBytes(@"8k\pizza\pizza_veg_audio_8k.wav"));

            try
            {
                SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(clientConfig);
                speechRecognizer.Close();
            }
            catch (Exception ex)
            {
                throw ex;
            }
        }
Пример #6
0
        public void CancelWhileRecognize()
        {
            var clientConfig = CreateClientWithCredentials(CreateConfigDefault(), TestsReferences.DefaultASRURL, TestsReferences.User, TestsReferences.Password);
            var lModelLst    = new LanguageModelList();
            var audioSource  = new BufferAudioSource(File.ReadAllBytes(TestsReferences.AudioCpf));
            List <RecognitionResult> results = null;

            using (SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(clientConfig))
            {
                lModelLst.AddFromUri(TestsReferences.FreeLanguageModel);
                speechRecognizer.Recognize(audioSource, lModelLst);

                Thread.Sleep(1000);

                speechRecognizer.CancelRecognition();
                results = speechRecognizer.WaitRecognitionResult();
            }

            Assert.AreEqual(0, results.Count);
        }
        public void RecognizeMaxWaitSeconds()
        {
            var clientConfig = this.CreateClientConfigDefault(this.CreateConfigDefault());

            //Set 2 seconds to max wait time
            clientConfig.MaxWaitSeconds = 2000;

            //Send all buffer to be writter
            var audioSource = new BufferAudioSource(File.ReadAllBytes(@"8k\ContinuosMode\joao_mineiro_marciano_intro_8k.wav"));

            try
            {
                //Cria modelo de linguagem com gramática para o áudio de pizza:
                var lModelLst = new LanguageModelList();
                lModelLst.AddFromUri("builtin:slm/general");

                ExecuteRecognition(clientConfig, lModelLst, audioSource);
            }
            catch (Exception ex)
            {
                Assert.IsInstanceOfType(ex, typeof(RecognitionException));
            }
        }
        public void RecognizeBufferAudioSource()
        {
            var clientConfig = this.CreateClientConfigDefault(this.CreateConfigDefault());
            //Send all buffer to be writter
            var audioSource = new BufferAudioSource(File.ReadAllBytes(@"8k\pizza\pizza_veg_audio_8k.wav"));

            try
            {
                //Cria modelo de linguagem com gramática para o áudio de pizza:
                var lModelLst = new LanguageModelList();
                lModelLst.AddFromUri("builtin:slm/general");

                var results = ExecuteRecognition(clientConfig, lModelLst, audioSource);

                var reultNoInputTimeout = results.
                                          Where(r => r.ResultCode == CPqDASR.RecognitionResultCode.RECOGNIZED).
                                          FirstOrDefault();
                Assert.IsNotNull(reultNoInputTimeout);
            }
            catch (Exception ex)
            {
                throw ex;
            }
        }
        public void TestReadWrightWait()
        {
            //Creates the source that contains buffer which producer will wright and consumer will read
            Queue <byte[]> originSource = ReadFileInChunks(TestsReferences.AudioPizzaVeg, out int originalLength);

            //BufferAudioSource object, that reperesents our circular buffer source.
            BufferAudioSource bufferAudioSource = new BufferAudioSource();

            bool isToContinue = true;

            //Producer thread
            Thread producerThread = new Thread(() =>
            {
                while (originSource.Count > 0)
                {
                    bufferAudioSource.Write(originSource.Dequeue(), AudioType.DETECT);
                    Thread.Sleep(100);
                }

                isToContinue = false;
                bufferAudioSource.Finish();
            });

            //Counter that validates whether the read method awaits the producer or not
            int iterations = 0;

            //Flag that indicates the thread was released
            bool isReadReleased = false;

            //Consumer thread
            Thread consumerThread = new Thread(() =>
            {
                //Consuming the buffer of audio source
                while (true)
                {
                    if (isToContinue)
                    {
                        byte[] localbytes = bufferAudioSource.Read();

                        if (localbytes != null && localbytes.Length > 0)
                        {
                            iterations++;
                        }
                    }
                    else
                    {
                        break;
                    }
                }
            });

            //Start the producer thread
            producerThread.Start();

            //Start the consumer thread
            consumerThread.Start();

            while (iterations < originalLength)
            {
                Thread.Sleep(1000);
            }

            Thread.Sleep(1000);

            byte[] bytes = bufferAudioSource.Read();

            if (bytes?.Length == 0)
            {
                isReadReleased = true;
            }

            Assert.IsTrue(iterations == originalLength && originSource.Count == 0 && isReadReleased);
        }