public static void Start(User u, SpeechToTextCallbackDelegate speechToTextDel, SpeechStateCallbackDelegate speechStateDel, Delegate networkDel) { // trace the speech request TraceHelper.AddMessage("Starting Speech"); // Start is not reentrant - make sure the caller didn't violate the contract if (speechOperationInProgress == true) { return; } // store the delegates passed in speechToTextDelegate = speechToTextDel; speechStateDelegate = speechStateDel; // initialize the connection if (InitializeSpeechKit() == false) { Cancel(networkDel); return; } // start a new thread that starts the dictation DictationStart(SKRecognizerType.SKDictationRecognizerType); }
public static void Stop(SpeechToTextCallbackDelegate del) { switch (SpeechProvider) { case SpeechProviders.NativeSpeech: NativeSpeechHelper.Stop(del); break; case SpeechProviders.NuanceSpeech: NuanceSpeechHelper.Stop(del); break; } }
public static void Start(User u, SpeechToTextCallbackDelegate speechToTextDel, SpeechStateCallbackDelegate speechStateDel, Delegate networkDel) { // trace the speech request TraceHelper.AddMessage("Starting Speech"); // Start is not reentrant - make sure the caller didn't violate the contract if (speechOperationInProgress == true) return; // store the delegates passed in speechToTextDelegate = speechToTextDel; speechStateDelegate = speechStateDel; // initialize the connection if (InitializeSpeechKit() == false) { Cancel(networkDel); return; } // start a new thread that starts the dictation DictationStart(SKRecognizerType.SKDictationRecognizerType); }
public static void Stop(SpeechToTextCallbackDelegate del) { // get the last chunk of speech int len = mic.GetData(speechBuffer); // stop listening mic.Stop(); // remove the mic eventhandler mic.BufferReady -= MicBufferReady; initializedBufferReadyEvent = false; // trace the operation TraceHelper.AddMessage(String.Format("Final Frame: {0} bytes of speech", len)); // create a properly sized copy of the last buffer byte[] speechChunk = new byte[len]; Array.Copy(speechBuffer, speechChunk, len); // add the last speech buffer to the list speechBufferList.Add(speechChunk); // if the encode flag is set, encode the chunk before sending it if (encode) { // do this on a background thread because it is CPU-intensive ThreadPool.QueueUserWorkItem(delegate { // create a new mutex object for this frame AutoResetEvent bufferMutex = new AutoResetEvent(false); bufferMutexList.Add(bufferMutex); // encode the frame TraceHelper.AddMessage(String.Format("Final Frame: About to encode speech")); byte[] encodedBuf = EncodeSpeech(speechChunk, speechChunk.Length); TraceHelper.AddMessage(String.Format("Final Frame: Encoded down to {0} bytes", encodedBuf.Length)); // wait until the previous frame has been sent int frameIndex = bufferMutexList.Count - 1; if (frameIndex > 0) bufferMutexList[frameIndex - 1].WaitOne(); // send the last frame and retrieve the response TraceHelper.AddMessage(String.Format("Sending Final Frame: {0} bytes", encodedBuf.Length)); NetworkHelper.EndSpeech(encodedBuf, encodedBuf.Length, del, new NetworkDelegate(NetworkCallback)); // repeat the sentence back to the user PlaybackSpeech(); }); } else { // send the operation immediately TraceHelper.AddMessage(String.Format("Sending Final Frame: {0} bytes", speechChunk.Length)); NetworkHelper.EndSpeech(speechChunk, speechChunk.Length, del, new NetworkDelegate(NetworkCallback)); // play back the speech immediately PlaybackSpeech(); } }