// Kinect enabled apps should customize which Kinect services it initializes here. private void InitializeKinectServices(KinectSensorManager kinectSensorManager, KinectSensor sensor) { // Application should enable all streams first. kinectSensorManager.ColorFormat = ColorImageFormat.RgbResolution640x480Fps30; kinectSensorManager.ColorStreamEnabled = true; sensor.SkeletonFrameReady += this.SkeletonsReady; kinectSensorManager.TransformSmoothParameters = new TransformSmoothParameters { Smoothing = 0.5f, Correction = 0.5f, Prediction = 0.5f, JitterRadius = 0.05f, MaxDeviationRadius = 0.04f }; kinectSensorManager.SkeletonStreamEnabled = true; kinectSensorManager.KinectSensorEnabled = true; if (!kinectSensorManager.KinectSensorAppConflict) { // Start speech recognizer after KinectSensor started successfully. this.mySpeechRecognizer = SpeechRecognizer.Create(); if (null != this.mySpeechRecognizer) { this.mySpeechRecognizer.SaidSomething += this.RecognizerSaidSomething; this.mySpeechRecognizer.Start(sensor.AudioSource); } enableAec.Visibility = Visibility.Visible; this.UpdateEchoCancellation(this.enableAec); } }
// Kinect enabled apps should customize which Kinect services it initializes here. private KinectSensor InitializeKinectServices(KinectSensor sensor) { // Application should enable all streams first. sensor.ColorStream.Enable(ColorImageFormat.RgbResolution640x480Fps30); sensor.SkeletonFrameReady += this.SkeletonsReady; sensor.SkeletonStream.Enable(new TransformSmoothParameters() { Smoothing = 0.5f, Correction = 0.5f, Prediction = 0.5f, JitterRadius = 0.05f, MaxDeviationRadius = 0.04f }); try { sensor.Start(); } catch (IOException) { SensorChooser.AppConflictOccurred(); return(null); } // Start speech recognizer after KinectSensor.Start() is called // returns null if problem with speech prereqs or instantiation. this.mySpeechRecognizer = SpeechRecognizer.Create(); this.mySpeechRecognizer.SaidSomething += this.RecognizerSaidSomething; this.mySpeechRecognizer.Start(sensor.AudioSource); enableAec.Visibility = Visibility.Visible; this.UpdateEchoCancellation(this.enableAec); return(sensor); }
public void WaitRecognitionResultDuplicate() { var clientConfig = CreateClientWithCredentials(CreateConfigDefault(), TestsReferences.DefaultASRURL, TestsReferences.User, TestsReferences.Password); var lModelLst = new LanguageModelList(); var audioSource = new FileAudioSource(TestsReferences.AudioPizzaVeg, AudioType.DETECT); //Just initializing variables with same value DateTime initWait = DateTime.Now; DateTime endWait = new DateTime(initWait.Ticks); double stampInMilliseconds; using (SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(clientConfig)) { lModelLst.AddFromUri(TestsReferences.FreeLanguageModel); speechRecognizer.Recognize(audioSource, lModelLst); var firstResult = speechRecognizer.WaitRecognitionResult(); Assert.AreEqual(CPqDASR.RecognitionResultCode.RECOGNIZED, firstResult[0].ResultCode); initWait = DateTime.Now; var duplicatedResult = speechRecognizer.WaitRecognitionResult(); endWait = DateTime.Now; stampInMilliseconds = (endWait - initWait).TotalMilliseconds; Assert.AreEqual(0, duplicatedResult.Count); Assert.IsTrue(0 < stampInMilliseconds && stampInMilliseconds < 5); } }
public async Task MultipleListeners() { var clientConfig = CreateClientWithCredentials(CreateConfigDefault(), TestsReferences.DefaultASRURL, TestsReferences.User, TestsReferences.Password); var lModelLst = new LanguageModelList(); lModelLst.AddFromUri(TestsReferences.FreeLanguageModel); var audioSource = new FileAudioSource(TestsReferences.AudioPizzaVeg, AudioType.DETECT); using (SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(clientConfig)) { lModelLst.AddFromUri(TestsReferences.GramPizzaHttp); Events = new EventsPassed(); speechRecognizer.OnListening += SpeechRecognizer_OnListening; speechRecognizer.OnPartialRecognitionResult += SpeechRecognizer_OnPartialRecognitionResult; speechRecognizer.OnRecognitionResult += SpeechRecognizer_OnRecognitionResult; speechRecognizer.OnSpeechStart += SpeechRecognizer_OnSpeechStart; speechRecognizer.OnSpeechStop += SpeechRecognizer_OnSpeechStop; speechRecognizer.Recognize(audioSource, lModelLst); Task <bool> checkEventsPassed = CheckIfEventsHasPassed(); bool result = await checkEventsPassed; Assert.IsTrue(result); } Events = null; }
public void RecogAfterSessionTimeout() { var clientConfig = CreateClientWithCredentials(CreateConfigDefault(), TestsReferences.DefaultASRURL, TestsReferences.User, TestsReferences.Password); var audioSource = new FileAudioSource(TestsReferences.AudioPizzaVeg, AudioType.DETECT); var lModelLst = new LanguageModelList(); List <RecognitionResult> results = null; using (SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(clientConfig)) { Thread.Sleep(65000); lModelLst.AddFromUri(TestsReferences.FreeLanguageModel); speechRecognizer.Recognize(audioSource, lModelLst); results = speechRecognizer.WaitRecognitionResult(); } var score = results?. Where(r => r.ResultCode == CPqDASR.RecognitionResultCode.RECOGNIZED). FirstOrDefault().Alternatives. Where(a => a.Confidence >= 90).FirstOrDefault()?.Confidence; Assert.IsNotNull(score); var textFromFirstAlternative = results[0].Alternatives[0].Text.ToString(); Assert.AreEqual(TestsReferences.TextPizzaVeg, textFromFirstAlternative); }
public MainModel() { mKinect = KinectDevice.Instance; mWindow = (MainWindow)System.Windows.Application.Current.MainWindow; //mVoiceControl = VoiceControl.Instance(); speechRecognizer = SpeechRecognizer.Create(); //returns null if problem with speech prereqs or instantiation. if (speechRecognizer != null) { speechRecognizer.Start(new KinectAudioSource()); //KinectSDK TODO: expose Runtime.AudioSource to return correct audiosource. speechRecognizer.SaidSomething += new EventHandler <SpeechRecognizer.SaidSomethingEventArgs>(recognizer_SaidSomething); } mSkeleton = mKinect.mSkeleton; mGraphPlot = mWindow.theGraphPlot.vm; mCurrentModeState = ModeState.TrackingMode; mWindow.IndicatorPan.TrackingButton.IsChecked = true; UpdateMode((int)ModeState.TrackingMode); mSelectedJoint = "Head"; ChangeCurrentJoint(mSelectedJoint); mLineGraphing = new LineGraphing(); mParabolaGraphing = new ParabolaGraphing(); mSineGraphing = new SineGraphing(); mAllowLineEdit = false; mAllowParabolaEdit = false; mAllowSineEdit = false; mRandom = new Random(); }
public void SessionTimeout() { var clientConfig = CreateClientConfigDefault(CreateConfigDefault()); //Send all buffer to be writter var audioSource = new BufferAudioSource(File.ReadAllBytes(@"8k\pizza\pizza_veg_audio_8k.wav")); SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(clientConfig); try { var lModelLst = new LanguageModelList(); lModelLst.AddFromUri("builtin:slm/general"); speechRecognizer.Recognize(audioSource, lModelLst); Thread.Sleep(65000); var result = speechRecognizer.WaitRecognitionResult(); var resultRecognized = result.Where(r => r.ResultCode == CPqDASR.RecognitionResultCode.RECOGNIZED).FirstOrDefault(); Assert.IsNotNull(resultRecognized); } catch (Exception ex) { throw ex; } finally { speechRecognizer.Close(); } }
public void DuplicateRecognize() { var clientConfig = CreateClientWithCredentials(CreateConfigDefault(), TestsReferences.DefaultASRURL, TestsReferences.User, TestsReferences.Password); var audioSource = new FileAudioSource(TestsReferences.AudioPizzaVeg, AudioType.DETECT); var lModelLst = new LanguageModelList(); List <RecognitionResult> results = null; using (SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(clientConfig)) { lModelLst.AddFromUri(TestsReferences.FreeLanguageModel); speechRecognizer.Recognize(audioSource, lModelLst); Thread.Sleep(500); try { speechRecognizer.Recognize(audioSource, lModelLst); } catch (Exception ex) { Assert.IsInstanceOfType(ex, typeof(RecognitionException)); } results = speechRecognizer.WaitRecognitionResult(); } Assert.AreEqual(CPqDASR.RecognitionResultCode.RECOGNIZED, results[0].ResultCode); }
private void ExecuteMultiplesRecognitions(ClientConfig config, int recogs, bool useEndSleep = true) { using (SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(config)) { for (int i = 0; i < recogs; i++) { var audioSource = new FileAudioSource(TestsReferences.AudioCpf, AudioType.DETECT); var lModelLst = new LanguageModelList(); lModelLst.AddFromUri(TestsReferences.FreeLanguageModel); speechRecognizer.Recognize(audioSource, lModelLst); var result = speechRecognizer.WaitRecognitionResult(); Assert.AreEqual(CPqDASR.RecognitionResultCode.RECOGNIZED, result[0].ResultCode); if (i < recogs - 1) { Thread.Sleep(3000); } else { Thread.Sleep(6000); } } } }
public void CancelWhileRecognize() { var clientConfig = this.CreateClientConfigDefault(this.CreateConfigDefault()); //Send all buffer to be writter var audioSource = new BufferAudioSource(File.ReadAllBytes(@"8k\ContinuosMode\joao_mineiro_marciano_intro_8k.wav")); SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(clientConfig); try { var lModelLst = new LanguageModelList(); lModelLst.AddFromUri("builtin:slm/general"); speechRecognizer.Recognize(audioSource, lModelLst); Thread.Sleep(2000); speechRecognizer.CancelRecognition(); var result = speechRecognizer.WaitRecognitionResult(); Assert.IsTrue(result.Count == 0); } catch (Exception ex) { throw ex; } finally { speechRecognizer.Close(); } }
public void CancelNoRecognize() { var clientConfig = CreateClientWithCredentials(CreateConfigDefault(), TestsReferences.DefaultASRURL, TestsReferences.User, TestsReferences.Password); using (SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(clientConfig)) { speechRecognizer.CancelRecognition(); } }
public void BasicContinuousModeOn() { var recogConfig = new RecognitionConfig { ContinuousMode = true, }; var clientConfig = CreateClientWithCredentials(recogConfig, TestsReferences.DefaultASRURL, TestsReferences.User, TestsReferences.Password); var lModelLst = new LanguageModelList(); List <RecognitionResult> results = null; int i = 0; List <string> segmentsText = new List <string>(new string[] { TestsReferences.TextContinuousModeSeg1, TestsReferences.TextContinuousModeSeg2, TestsReferences.TextContinuousModeSeg3 }); using (SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(clientConfig)) { var audioSource = new FileAudioSource(File.ReadAllBytes(TestsReferences.AudioContinuosMode), AudioType.DETECT); lModelLst.AddFromUri(TestsReferences.FreeLanguageModel); speechRecognizer.Recognize(audioSource, lModelLst); results = speechRecognizer.WaitRecognitionResult(); Assert.IsTrue(segmentsText.Count() + 1 == results.Count()); for (i = 0; i < segmentsText.Count(); i++) { Assert.AreEqual(CPqDASR.RecognitionResultCode.RECOGNIZED, results[i].ResultCode); var textFromFirstAlternative = results[i].Alternatives[0].Text.ToString(); Assert.AreEqual(segmentsText[i], textFromFirstAlternative); } Assert.AreEqual(CPqDASR.RecognitionResultCode.NO_SPEECH, results[i].ResultCode); } using (SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(clientConfig)) { var audioSource = new BufferAudioSource(File.ReadAllBytes(TestsReferences.AudioContinuosMode)); lModelLst.AddFromUri(TestsReferences.FreeLanguageModel); speechRecognizer.Recognize(audioSource, lModelLst); results = speechRecognizer.WaitRecognitionResult(); Assert.IsTrue(segmentsText.Count() + 1 == results.Count()); for (i = 0; i < segmentsText.Count(); i++) { Assert.AreEqual(CPqDASR.RecognitionResultCode.RECOGNIZED, results[i].ResultCode); var textFromFirstAlternative = results[i].Alternatives[0].Text; Assert.AreEqual(segmentsText[i], textFromFirstAlternative); } Assert.AreEqual(CPqDASR.RecognitionResultCode.NO_INPUT_TIMEOUT, results[i].ResultCode); } }
private List <RecognitionResult> ExecuteRecognition(ClientConfig clientConfig, LanguageModelList languageModelList, IAudioSource audioSource) { List <RecognitionResult> results = null; using (SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(clientConfig)) { speechRecognizer.Recognize(audioSource, languageModelList); results = speechRecognizer.WaitRecognitionResult(); } return(results); }
public void WaitNoRecognize() { var clientConfig = CreateClientWithCredentials(CreateConfigDefault(), TestsReferences.DefaultASRURL, TestsReferences.User, TestsReferences.Password); List <RecognitionResult> results = null; using (SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(clientConfig)) { results = speechRecognizer.WaitRecognitionResult(); } Assert.AreEqual(0, results.Count); }
public static void Main(string[] args) { Trace.Close(); TextWriterTraceListener tr1 = new TextWriterTraceListener(System.IO.File.CreateText(string.Format("D:\\Trace_{0}.trace", DateTime.Now.ToString("dd-MM-yyyy_HH-mm")))); Trace.Listeners.Add(tr1); Trace.AutoFlush = true; var objRecognitionConfig = new RecognitionConfig() { MaxSentences = 2, ContinuousMode = true }; var objClientConfig = new ClientConfig() { ServerUrl = "ws://*****:*****@"C:\AudioTestesASR\8K\ContinuosMode\joao_mineiro_marciano_intro_8k.wav"); try { SpeechRecognizer obj = SpeechRecognizer.Create(objClientConfig); obj.OnSpeechStart += Obj_OnSpeechStart; obj.OnSpeechStop += Obj_OnSpeechStop; obj.OnListening += Obj_OnListening; obj.OnPartialRecognitionResult += Obj_OnPartialRecognitionResult; obj.OnRecognitionResult += Obj_OnRecognitionResult; obj.OnError += Obj_OnError1; LanguageModelList lModelLst = new LanguageModelList(); lModelLst.AddFromUri("builtin:slm/general"); obj.Recognize(objAudioSource, lModelLst); //var results = obj.WaitRecognitionResult(); Console.Read(); } catch (Exception ex) { Console.WriteLine(ex.Message); Console.Read(); } }
public void CloseWithoutRecognize() { var clientConfig = this.CreateClientConfigDefault(this.CreateConfigDefault()); //Send all buffer to be writter var audioSource = new BufferAudioSource(File.ReadAllBytes(@"8k\pizza\pizza_veg_audio_8k.wav")); try { SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(clientConfig); speechRecognizer.Close(); } catch (Exception ex) { throw ex; } }
public void MaxWaitSettings() { var clientConfig = CreateClientWithCredentials(CreateConfigDefault(), TestsReferences.DefaultASRURL, TestsReferences.User, TestsReferences.Password); var lModelLst = new LanguageModelList(); lModelLst.AddFromUri(TestsReferences.FreeLanguageModel); var audioSource = new FileAudioSource(TestsReferences.AudioPizzaVeg, AudioType.DETECT); //Just initializing variables with same value DateTime initWait = DateTime.Now; DateTime endWait = new DateTime(initWait.Ticks); double stampInMilliseconds; const int timeToWait = 1000; SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(clientConfig); try { lModelLst.AddFromUri(TestsReferences.FreeLanguageModel); speechRecognizer.Recognize(audioSource, lModelLst); initWait = DateTime.Now; speechRecognizer.WaitRecognitionResult(timeToWait); } catch (Exception ex) { if (ex.Message.Equals("Response timeout")) { endWait = DateTime.Now; } else { throw ex; } } finally { speechRecognizer.Close(); } stampInMilliseconds = (endWait - initWait).TotalMilliseconds; //Asserts if stamp was correctly calculated and is lower than timeToWait //with an increment of 200 milis that considering the natural processing delay Assert.IsTrue(stampInMilliseconds > 0 && stampInMilliseconds <= (timeToWait + 500)); }
public void MaxWaitSettings() { var clientConfig = this.CreateClientConfigDefault(this.CreateConfigDefault()); var audioSource = new FileAudioSource(@"8k\pizza\pizza_veg_audio_8k.wav"); //Just initializinf variables with same value DateTime initWait = DateTime.Now; DateTime endWait = new DateTime(initWait.Ticks); double stampInSeconds; const int timeToWait = 1000; SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(clientConfig); try { //Cria modelo de linguagem com gramática para o áudio de pizza: var lModelLst = new LanguageModelList(); lModelLst.AddFromUri("http://vmh102.cpqd.com.br:8280/asr_dist/repository/grammars/dynamic-gram/pizza.gram"); speechRecognizer.Recognize(audioSource, lModelLst); initWait = DateTime.Now; speechRecognizer.WaitRecognitionResult(timeToWait); } catch (Exception ex) { if (ex.Message.Equals("Recognition timeout")) { endWait = DateTime.Now; } else { throw ex; } } finally { speechRecognizer.Close(); } stampInSeconds = (endWait - initWait).TotalSeconds; //Asserts if stamp was correctly calculated and is lower than timeToWait //with an increment of 200 milis that considering the natural processing delay Assert.IsTrue(stampInSeconds > 0 && stampInSeconds <= (timeToWait + 200)); }
// Kinect enabled apps should customize which Kinect services it initializes here. private void InitializeKinectServices(KinectSensorManager kinectSensorManager, KinectSensor sensor) { // Application should enable all streams first. kinectSensorManager.ColorFormat = ColorImageFormat.RgbResolution640x480Fps30; kinectSensorManager.ColorStreamEnabled = true; sensor.SkeletonFrameReady += this.SkeletonsReady; kinectSensorManager.TransformSmoothParameters = new TransformSmoothParameters { Smoothing = 0.5f, Correction = 0.5f, Prediction = 0.5f, JitterRadius = 0.05f, MaxDeviationRadius = 0.04f }; kinectSensorManager.SkeletonStreamEnabled = true; kinectSensorManager.KinectSensorEnabled = true; foreach (var keyValuePair in gestureMap) { var gesture = keyValuePair.Key; var cmd = keyValuePair.Value; gesture.AddListener((s, e) => SendCommand(cmd)); } leftSwipeRight.AddListener((s, e) => swipeThrottle.Push(Swipe.lr)); rightSwipeLeft.AddListener((s, e) => swipeThrottle.Push(Swipe.rl)); rightSwipeRight.AddListener((s, e) => swipeThrottle.Push(Swipe.rr)); leftSwipeLeft.AddListener((s, e) => swipeThrottle.Push(Swipe.ll)); if (!kinectSensorManager.KinectSensorAppConflict) { // Start speech recognizer after KinectSensor started successfully. this.mySpeechRecognizer = SpeechRecognizer.Create(); if (null != this.mySpeechRecognizer) { this.mySpeechRecognizer.SaidSomething += this.RecognizerSaidSomething; this.mySpeechRecognizer.Start(sensor.AudioSource); } enableAec.Visibility = Visibility.Visible; this.UpdateEchoCancellation(this.enableAec); } }
void kinectSensorChooser1_KinectSensorChanged(object sender, DependencyPropertyChangedEventArgs e) { KinectSensor old = (KinectSensor)e.OldValue; StopKinect(old); KinectSensor sensor = (KinectSensor)e.NewValue; if (sensor == null) { return; } var parameters = new TransformSmoothParameters { Smoothing = 0.3f, Correction = 0.0f, Prediction = 0.0f, JitterRadius = 1.0f, MaxDeviationRadius = 0.5f }; sensor.SkeletonStream.Enable(parameters); sensor.SkeletonStream.Enable(); sensor.AllFramesReady += new EventHandler <AllFramesReadyEventArgs>(sensor_AllFramesReady); sensor.DepthStream.Enable(DepthImageFormat.Resolution640x480Fps30); sensor.ColorStream.Enable(ColorImageFormat.RgbResolution640x480Fps30); try { sensor.Start(); } catch (System.IO.IOException) { kinectSensorChooser1.AppConflictOccurred(); } this.mySpeechRecognizer = SpeechRecognizer.Create(); this.mySpeechRecognizer.SaidSomething += this.RecognizerSaidSomething; this.mySpeechRecognizer.Start(sensor.AudioSource); }
public void CancelNoRecognize() { var clientConfig = this.CreateClientConfigDefault(this.CreateConfigDefault()); SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(clientConfig); try { speechRecognizer.CancelRecognition(); } catch (Exception ex) { throw ex; } finally { speechRecognizer.Close(); } }
public void WaitNoRecognize() { var clientConfig = this.CreateClientConfigDefault(this.CreateConfigDefault()); SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(clientConfig); try { var result = speechRecognizer.WaitRecognitionResult(); Assert.IsTrue(result.Count == 0); } catch (Exception ex) { throw ex; } finally { speechRecognizer.Close(); } }
public void CancelWhileRecognize() { var clientConfig = CreateClientWithCredentials(CreateConfigDefault(), TestsReferences.DefaultASRURL, TestsReferences.User, TestsReferences.Password); var lModelLst = new LanguageModelList(); var audioSource = new BufferAudioSource(File.ReadAllBytes(TestsReferences.AudioCpf)); List <RecognitionResult> results = null; using (SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(clientConfig)) { lModelLst.AddFromUri(TestsReferences.FreeLanguageModel); speechRecognizer.Recognize(audioSource, lModelLst); Thread.Sleep(1000); speechRecognizer.CancelRecognition(); results = speechRecognizer.WaitRecognitionResult(); } Assert.AreEqual(0, results.Count); }
public async Task MultipleListeners() { var clientConfig = this.CreateClientConfigDefault(this.CreateConfigDefault()); var audioSource = new FileAudioSource(@"8k\pizza\pizza_veg_audio_8k.wav"); Events = new EventsPassed(); SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(clientConfig); try { //Cria modelo de linguagem com gramática para o áudio de pizza: var lModelLst = new LanguageModelList(); lModelLst.AddFromUri("http://vmh102.cpqd.com.br:8280/asr_dist/repository/grammars/dynamic-gram/pizza.gram"); speechRecognizer.OnListening += SpeechRecognizer_OnListening; speechRecognizer.OnPartialRecognitionResult += SpeechRecognizer_OnPartialRecognitionResult; speechRecognizer.OnRecognitionResult += SpeechRecognizer_OnRecognitionResult; speechRecognizer.OnSpeechStart += SpeechRecognizer_OnSpeechStart; speechRecognizer.OnSpeechStop += SpeechRecognizer_OnSpeechStop; speechRecognizer.Recognize(audioSource, lModelLst); Task <bool> checkEventsPassed = this.CheckIfEventsHasPassed(); bool result = await checkEventsPassed; Assert.IsTrue(result); } catch (Exception ex) { Events = null; throw ex; } finally { speechRecognizer.Close(); } Events = null; }
private void ExecuteMultiplesRecognitions(ClientConfig config) { SpeechRecognizer speechRecognizer = SpeechRecognizer.Create(config); int k = 0; try { var lModelLst = new LanguageModelList(); lModelLst.AddFromUri("builtin:slm/general"); for (int i = 0; i < 4; i++) { var audioSource = new FileAudioSource(@"8k\pizza\pizza_veg_audio_8k.wav"); speechRecognizer.Recognize(audioSource, lModelLst); var result = speechRecognizer.WaitRecognitionResult(); var resultRecognized = result.Where(r => r.ResultCode == CPqDASR.RecognitionResultCode.RECOGNIZED).FirstOrDefault(); Assert.IsNotNull(resultRecognized); k++; if (i < 3) { Thread.Sleep(3000); } else { Thread.Sleep(5500); } } } catch (Exception ex) { throw ex; } finally { speechRecognizer.Close(); } }