private void UnionMethodsAsync(string vawAudio) { string PointURL = "https://westus.api.cognitive.microsoft.com/luis/v2.0/apps/75a1f980-aa8e-42e5-927c-eef62286b24c?subscription-key=3441bb92f501414e8bcb7013517a20f1&verbose=true&timezoneOffset=0&q="; this.dataClient = SpeechRecognitionServiceFactory.CreateDataClientWithIntentUsingEndpointUrl( this.DefaultLocale, this.SubscriptionKey, PointURL); this.dataClient.AuthenticationUri = this.AuthenticationUri; this.dataClient.OnResponseReceived += this.OnDataShortPhraseResponseReceivedHandler; this.dataClient.OnPartialResponseReceived += this.OnPartialResponseReceivedHandler; this.dataClient.OnConversationError += this.OnConversationErrorHandler; this.dataClient.OnIntent += this.OnIntentHandler; using (FileStream fileStream = new FileStream(vawAudio, FileMode.Open, FileAccess.Read)) { int bytesRead = 0; byte[] buffer = new byte[1024]; try { do { bytesRead = fileStream.Read(buffer, 0, buffer.Length); this.dataClient.SendAudio(buffer, bytesRead); }while (bytesRead > 0); } finally { this.dataClient.EndAudio(); } } }
/// <summary> /// 创建语音识别的客户端类型的实例。 /// 该实例可以识别来自文件和语音设备的语音。 /// 语音数据会被切分很小的段,然后使用该实例连续的向服务端发送一段。 /// </summary> /// <param name="mode"><see cref="SpeechRecognitionMode"/>指明语音识别的模式。</param> private void CreateDataRecoClient(SpeechRecognitionMode mode) { if (this.dataClient != null) { this.dataClient.Dispose(); this.dataClient = null; } // 使用工厂类型的 CreateDataClient 方法创建 DataRecognitionClient 类型的实例。 this.dataClient = SpeechRecognitionServiceFactory.CreateDataClient( mode, // 指定语音识别的模式。 "en-US", // 我们把语音中语言的类型 hardcode 为英语,因为我们的两个 demo 文件都是英语语音。 SUBSCRIPTIONKEY); // Bing Speech API 服务实例的 key。 // 为语音识别Event handlers for speech recognition results if (mode == SpeechRecognitionMode.ShortPhrase) { // 为 ShortPhrase 模式的识别结果添加处理程序。 this.dataClient.OnResponseReceived += this.OnDataShortPhraseResponseReceivedHandler; } else { // 为 LongDictation 模式的识别结果添加处理程序。 // 服务端根据分辨出的语句间的停顿会多次触发执行该处理程序。 this.dataClient.OnResponseReceived += this.OnDataDictationResponseReceivedHandler; } // 在服务端执行语音识别的过程中,该处理程序会被执行多次, // 具体是在语音服务对语音的内容产生了预测的结果时,就会触发执行该处理程序。 this.dataClient.OnPartialResponseReceived += this.OnPartialResponseReceivedHandler; // 在服务端检测到错误时,触发执行该处理程序。 this.dataClient.OnConversationError += this.OnConversationErrorHandler; }
public void Start() { var location = System.Reflection.Assembly.GetEntryAssembly().Location; var directory = Path.GetDirectoryName(location); var filename = "Recordings/linear.wav"; var path = string.Format(@"{0}/{1}", directory, filename); var format = WaveFormat.CreateCustomFormat(WaveFormatEncoding.Pcm, 8000, 1, 16000, 2, 16); client = SpeechRecognitionServiceFactory.CreateDataClient(SpeechRecognitionMode.LongDictation, "en-US", key1); client.OnConversationError += Client_OnConversationError; client.OnIntent += Client_OnIntent; client.OnMicrophoneStatus += Client_OnMicrophoneStatus; client.OnPartialResponseReceived += Client_OnPartialResponseReceived; client.OnResponseReceived += Client_OnResponseReceived; client.AudioStart(); client.SendAudioFormat(new SpeechAudioFormat { AverageBytesPerSecond = 16000, BitsPerSample = 16, BlockAlign = 2, ChannelCount = 1, EncodingFormat = AudioCompressionType.PCM, SamplesPerSecond = 8000 }); IsReady = true; }
private void SendAudio(DataRecognitionClient dataClient, string fileName) { using (FileStream fileStream = new FileStream(fileName, FileMode.Open, FileAccess.Read)) { // Note for wave files, we can just send data from the file right to the server. // In the case you are not an audio file in wave format, and instead you have just // raw data (for example audio coming over bluetooth), then before sending up any // audio data, you must first send up an SpeechAudioFormat descriptor to describe // the layout and format of your raw audio data via DataRecognitionClient's sendAudioFormat() method. int bytesRead = 0; byte[] buffer = new byte[10000]; try { do { // Get more Audio data to send into byte buffer. bytesRead = fileStream.Read(buffer, 0, buffer.Length); // Send of audio data to service. dataClient.SendAudio(buffer, bytesRead); }while (bytesRead > 0); } finally { // We are done sending audio. Final recognition results will arrive in OnResponseReceived event call. dataClient.EndAudio(); } } }
/// <summary> /// SpeechToText constructor. Creates the recording objects and calls the Initialize function /// </summary> /// <param name="bingApiKey">Bing Speech API key</param> public SpeechToText(string bingApiKey) { _dataRecClient = SpeechRecognitionServiceFactory.CreateDataClientWithIntent(_language, bingApiKey, "LUIS_APP_ID", "LUIS_API_KEY"); _micRecClient = SpeechRecognitionServiceFactory.CreateMicrophoneClient(_speechMode, _language, bingApiKey); Initialize(); }
/// <summary> /// SpeechToText constructor. Creates the recording objects and calls the Initialize function /// </summary> /// <param name="bingApiKey">Bing Speech API key</param> public SpeechToText(string bingApiKey) { _dataRecClient = SpeechRecognitionServiceFactory.CreateDataClientWithIntentUsingEndpointUrl(_language, bingApiKey, "LUIS_ENDPOINT"); _micRecClient = SpeechRecognitionServiceFactory.CreateMicrophoneClient(_speechMode, _language, bingApiKey); Initialize(); }
/// <summary> /// Creates a data client without LUIS intent support. /// Speech recognition with data (for example from a file or audio source). /// The data is broken up into buffers and each buffer is sent to the Speech Recognition Service. /// No modification is done to the buffers, so the user can apply their /// own Silence Detection if desired. /// </summary> private void CreateDataRecoClient() { string url = null; if (this.Mode == SpeechRecognitionMode.ShortPhrase) { url = "https://d5a89bbf25d54ab2a6cbcff90aece700.api.cris.ai/ws/cris/speech/recognize"; } else if (this.Mode == SpeechRecognitionMode.LongDictation) { url = "https://a5936cdca4384273a0428efc972cf356.api.cris.ai/ws/cris/speech/recognize/continuous"; } this.dataClient = SpeechRecognitionServiceFactory.CreateDataClient( this.Mode, this.DefaultLocale, this.SubscriptionKey, this.SubscriptionKey, url); this.dataClient.AuthenticationUri = this.AuthenticationUri; // Event handlers for speech recognition results if (this.Mode == SpeechRecognitionMode.ShortPhrase) { this.dataClient.OnResponseReceived += this.OnDataShortPhraseResponseReceivedHandler; } else { this.dataClient.OnResponseReceived += this.OnDataDictationResponseReceivedHandler; } this.dataClient.OnPartialResponseReceived += this.OnPartialResponseReceivedHandler; this.dataClient.OnConversationError += this.OnConversationErrorHandler; }
protected virtual void Dispose(bool disposing) { if (disposing) { if (_micRecClient != null) { _micRecClient.EndMicAndRecognition(); _micRecClient.OnMicrophoneStatus -= OnMicrophoneStatus; _micRecClient.OnPartialResponseReceived -= OnPartialResponseReceived; _micRecClient.OnResponseReceived -= OnResponseReceived; _micRecClient.OnConversationError -= OnConversationErrorReceived; _micRecClient.Dispose(); _micRecClient = null; } if (_dataRecClient != null) { _dataRecClient.OnIntent -= OnIntentReceived; _dataRecClient.OnPartialResponseReceived -= OnPartialResponseReceived; _dataRecClient.OnConversationError -= OnConversationErrorReceived; _dataRecClient.OnResponseReceived -= OnResponseReceived; _dataRecClient.Dispose(); _dataRecClient = null; } } }
public void Stop() { _client?.EndAudio(); _client?.Dispose(); _client = null; _started = false; }
public void CreateDataRecoClient() { this.dataClient = SpeechRecognitionServiceFactory.CreateDataClient( SpeechRecognitionMode.ShortPhrase, this.DefaultLocale, this.SubscriptionKey); this.dataClient.OnResponseReceived += this.OnDataShortPhraseResponseReceivedHandler; }
/// <summary> /// Creates the data reco client. /// </summary> public void CreateDataRecoClient() { this.SubscriptionKey = ConfigurationManager.AppSettings["MicrosoftSpeechApiKey"].ToString(); this.dataClient = SpeechRecognitionServiceFactory.CreateDataClient( SpeechRecognitionMode.ShortPhrase, this.DefaultLocale,// for example: ‘en-us’ this.SubscriptionKey); this.dataClient.OnResponseReceived += this.OnResponseReceivedHandler; this.dataClient.OnConversationError += this.OnConversationError; }
private void CreateDataRecoClient() { this.dataClient = SpeechRecognitionServiceFactory.CreateDataClient( this.Mode, this.DefaultLocale, this.SubscriptionKey); this.dataClient.AuthenticationUri = ""; this.dataClient.OnResponseReceived += this.OnDataDictationResponseReceivedHandler; this.dataClient.OnPartialResponseReceived += this.OnPartialResponseReceivedHandler; this.dataClient.OnConversationError += this.OnConversationErrorHandler; }
public void CreateDataRecoClient() { dataClient = SpeechRecognitionServiceFactory.CreateDataClient( SpeechRecognitionMode.ShortPhrase, DefaultLocale, PrimaryKey, SecondaryKey, CrisUri); dataClient.AuthenticationUri = AuthenticationUri; dataClient.OnResponseReceived += OnDataShortPhraseResponseReceivedHandler; }
void RunSpeechToTextFromStream(Stream stream) { stream.Seek(0, SeekOrigin.Begin); DataRecognitionClient dataClient = SpeechRecognitionServiceFactory.CreateDataClientWithIntent( "en-US", "7bb47d20e78846c3aa0340cc2e148a85", "LofnLuisBot", // "yourLuisAppID", "" // "yourLuisSubsrciptionID" ); dataClient.AuthenticationUri = ""; // this.AuthenticationUri; // Event handlers for speech recognition results dataClient.OnResponseReceived += DataClient_OnResponseReceived; dataClient.OnPartialResponseReceived += DataClient_OnPartialResponseReceived; dataClient.OnConversationError += (sender, args) => { }; // Event handler for intent result dataClient.OnIntent += (sender, args) => { }; // Note for wave files, we can just send data from the file right to the server. // In the case you are not an audio file in wave format, and instead you have just // raw data (for example audio coming over bluetooth), then before sending up any // audio data, you must first send up an SpeechAudioFormat descriptor to describe // the layout and format of your raw audio data via DataRecognitionClient's sendAudioFormat() method. int bytesRead = 0; byte[] buffer = new byte[1024]; try { do { // Get more Audio data to send into byte buffer. bytesRead = stream.Read(buffer, 0, buffer.Length); // Send of audio data to service. dataClient.SendAudio(buffer, bytesRead); }while (bytesRead > 0); } finally { // We are done sending audio. Final recognition results will arrive in OnResponseReceived event call. dataClient.EndAudio(); } }
/// <summary> /// Creates a data client without LUIS intent support. /// Speech recognition with data (for example from a file or audio source). /// The data is broken up into buffers and each buffer is sent to the Speech Recognition Service. /// No modification is done to the buffers, so the user can apply their /// own Silence Detection if desired. /// </summary> private void CreateDataRecoClient() { this.dataClient = SpeechRecognitionServiceFactory.CreateDataClient( this.Mode, this.DefaultLocale, this.SubscriptionKey); // Event handlers for speech recognition results this.dataClient.OnResponseReceived += this.OnDataDictationResponseReceivedHandler; this.dataClient.OnPartialResponseReceived += this.OnPartialResponseReceivedHandler; }
/// <summary> /// Speech recognition with data (for example from a file or audio source). /// The data is broken up into buffers and each buffer is sent to the Speech Recognition Service. /// No modification is done to the buffers, so the user can apply their /// own Silence Detection if desired. /// </summary> void DoDataRecognition(DataRecognitionClient dataClient) { // Choose between a two minute recitation of the wikipedia page for batman // or a a short utterance string filename = (m_recoMode == SpeechRecognitionMode.LongDictation) ? "C:\\dev\\audio\\v1.wav" : "C:\\dev\\audio\\v1.wav"; if (m_filename != null) { filename = m_filename; } int waitSeconds = (m_recoMode == SpeechRecognitionMode.LongDictation) ? 200 : 15; using (FileStream fileStream = new FileStream(filename, FileMode.Open, FileAccess.Read)) { // Note for wave files, we can just send data from the file right to the server. // In the case you are not an audio file in wave format, and instead you have just // raw data (for example audio coming over bluetooth), then before sending up any // audio data, you must first send up an SpeechAudioFormat descriptor to describe // the layout and format of your raw audio data via DataRecognitionClient's sendAudioFormat() method. int bytesRead = 0; byte[] buffer = new byte[1024]; try { do { // Get more Audio data to send into byte buffer. bytesRead = fileStream.Read(buffer, 0, buffer.Length); // Send of audio data to service. dataClient.SendAudio(buffer, bytesRead); } while (bytesRead > 0); } finally { // We are done sending audio. Final recognition results will arrive in OnResponseReceived event call. dataClient.EndAudio(); } // sleep until the final result in OnResponseReceived event call, or waitSeconds, whichever is smaller. bool isReceivedResponse = dataClient.WaitForFinalResponse(waitSeconds * 1000); if (!isReceivedResponse) { Console.WriteLine("{0}: Timed out waiting for conversation response after {1} ms", DateTime.UtcNow, waitSeconds * 1000); } } }
internal void Recognice(string connectionId, byte[] stream) { _connectionId = connectionId; _dataClient = SpeechRecognitionServiceFactory.CreateDataClient( SpeechRecognitionMode.LongDictation, "es-ES", "ebccef423cec4f098c35a774091582d4"); _dataClient.OnResponseReceived += this.OnDataDictationResponseReceivedHandler; // Send of audio data to service. this._dataClient.SendAudio(stream, stream.Length); this._dataClient.EndAudio(); }
private void CreateDataRecoClientWithIntent() { string PointURL = "https://westus.api.cognitive.microsoft.com/luis/v2.0/apps/75a1f980-aa8e-42e5-927c-eef62286b24c?subscription-key=3441bb92f501414e8bcb7013517a20f1&verbose=true&timezoneOffset=0&q="; this.dataClient = SpeechRecognitionServiceFactory.CreateDataClientWithIntentUsingEndpointUrl( this.DefaultLocale, this.SubscriptionKey, PointURL); this.dataClient.AuthenticationUri = this.AuthenticationUri; this.dataClient.OnResponseReceived += this.OnDataShortPhraseResponseReceivedHandler; this.dataClient.OnPartialResponseReceived += this.OnPartialResponseReceivedHandler; this.dataClient.OnConversationError += this.OnConversationErrorHandler; this.dataClient.OnIntent += this.OnIntentHandler; }
public void DoSR(string subscriptionKey, SendMessageAction OnResult, Action OnClose, string locale, string endpoint = null) { this.SendMessage = OnResult; this.OnClose = OnClose; this.dataClient = SpeechRecognitionServiceFactory.CreateDataClient(SpeechRecognitionMode.ShortPhrase, locale, subscriptionKey, "", endpoint); // Event handlers for speech recognition results this.dataClient.OnResponseReceived += this.OnResponseReceivedHandler; this.dataClient.OnPartialResponseReceived += this.OnPartialResponseReceivedHandler; this.dataClient.OnConversationError += this.OnConversationErrorHandler; WriteLine("client created"); timer = new Timer(OnTimerCallback, this, CutoffSeconds * 1000, Timeout.Infinite); }
public void Start() { _client?.Dispose(); _client = SpeechRecognitionServiceFactory.CreateDataClient( SpeechRecognitionMode.LongDictation, _speechSettings.SpeechLanguage, _speechSettings.AzureSpeechPrimaryKey /*, _speechSettings.AzureSpeechSecondaryKey,_speechSettings.AzureSpeechAuthUrl*/); _started = true; ; _client.SendAudioFormat(SpeechAudioFormat.create16BitPCMFormat(_speechSettings.SampleRateValue)); _client.OnResponseReceived += ClientOnResponseReceived; }
/// <summary> /// Creates a data client with LUIS intent support. /// Speech recognition with data (for example from a file or audio source). /// The data is broken up into buffers and each buffer is sent to the Speech Recognition Service. /// No modification is done to the buffers, so the user can apply their /// own Silence Detection if desired. /// </summary> private void CreateDataRecoClientWithIntent() { this.dataClient = SpeechRecognitionServiceFactory.CreateDataClientWithIntent( this.DefaultLocale, this.SubscriptionKey, this.LuisAppId, this.LuisSubscriptionID); this.dataClient.OnPartialResponseReceived += this.OnPartialResponseReceivedHandler; // Event handler for intent result this.dataClient.OnIntent += this.OnIntentHandler; }
private void btnStop_Click(object sender, RoutedEventArgs e) { btnStart.IsEnabled = true; btnStop.IsEnabled = false; if (!file) { waveIn.StopRecording(); waveIn.Dispose(); waveIn = null; recorder.Stop(); // Close the audio file if logging if (audioSent != null) { audioSent.Flush(); audioSent.Dispose(); audioSent = null; } logAudioFileName = null; if (name == "英文" || name == "西班牙语") { this.micClient.EndMicAndRecognition(); this.micClient.Dispose(); this.micClient = null; stopbutton = true; } } else if (file) { if (name == "英文" || name == "西班牙语") { this.dataClient.Dispose(); this.dataClient = null; } } CheckBox_RecordAudio.IsEnabled = true; CheckBox_RecordAudio.IsChecked = false; CheckBox_Transcript.IsEnabled = true; CheckBox_Transcript.IsChecked = false; CheckBox_Library.IsEnabled = true; }
/// <summary> /// Creates a data client with LUIS intent support. /// Speech recognition with data (for example from a file or audio source). /// The data is broken up into buffers and each buffer is sent to the Speech Recognition Service. /// No modification is done to the buffers, so the user can apply their /// own Silence Detection if desired. /// </summary> private void CreateDataRecoClientWithIntent() { this.dataClient = SpeechRecognitionServiceFactory.CreateDataClientWithIntentUsingEndpointUrl( this.DefaultLocale, this.SubscriptionKey, this.LuisEndpointUrl); this.dataClient.AuthenticationUri = this.AuthenticationUri; // Event handlers for speech recognition results this.dataClient.OnResponseReceived += this.OnDataShortPhraseResponseReceivedHandler; this.dataClient.OnPartialResponseReceived += this.OnPartialResponseReceivedHandler; this.dataClient.OnConversationError += this.OnConversationErrorHandler; // Event handler for intent result this.dataClient.OnIntent += this.OnIntentHandler; }
private string Recognise(DataRecognitionClient dataClient, string fileName) { int i = 0; recognitionResult = null; while (recognitionResult == null && i < 10) { SendAudio(dataClient, fileName); sem.WaitOne(300000); i++; } string temp = recognitionResult; recognitionResult = null; return(temp); }
/// <summary> /// Creates a data client with LUIS intent support. /// Speech recognition with data (for example from a file or audio source). /// The data is broken up into buffers and each buffer is sent to the Speech Recognition Service. /// No modification is done to the buffers, so the user can apply their /// own Silence Detection if desired. /// </summary> private void CreateDataRecoClientWithIntent() { _dataClient = SpeechRecognitionServiceFactory.CreateDataClientWithIntent( DefaultLocale, SubscriptionKey, LuisAppId, LuisSubscriptionId); _dataClient.AuthenticationUri = AuthenticationUri; // Event handlers for speech recognition results _dataClient.OnResponseReceived += OnDataShortPhraseResponseReceivedHandler; //dataClient.OnPartialResponseReceived += OnPartialResponseReceivedHandler; _dataClient.OnConversationError += OnConversationErrorHandler; // Event handler for intent result _dataClient.OnIntent += OnIntentHandler; }
private void CreateDataRecoClient() { this.dataClient = SpeechRecognitionServiceFactory.CreateDataClient( this.Mode, this.DefaultLocale, this.SubscriptionKey); if (this.Mode == SpeechRecognitionMode.ShortPhrase) { this.dataClient.OnResponseReceived += this.OnDataShortPhraseResponseReceivedHandler; } else { this.dataClient.OnResponseReceived += this.OnDataDictationResponseReceivedHandler; } this.dataClient.OnPartialResponseReceived += this.OnPartialResponseReceivedHandler; this.dataClient.OnConversationError += this.OnConversationErrorHandler; }
private void RadioButton_Click(object sender, RoutedEventArgs e) { // Reset everything if (_micClient != null) { _micClient.EndMicAndRecognition(); _micClient.Dispose(); } if (_dataClient != null) { _dataClient.Dispose(); } _micClient = null; _dataClient = null; _logText.Text = ""; _startButton.IsEnabled = true; _radioGroup.IsEnabled = true; }
/// <summary> /// Handles the Click event of the RadioButton control. /// </summary> /// <param name="sender">The source of the event.</param> /// <param name="e">The <see cref="RoutedEventArgs"/> instance containing the event data.</param> private void RadioButton_Click(object sender, RoutedEventArgs e) { // Reset everything if (this.micClient != null) { this.micClient.EndMicAndRecognition(); this.micClient.Dispose(); this.micClient = null; } if (this.dataClient != null) { this.dataClient.Dispose(); this.dataClient = null; } this._logText.Text = string.Empty; this._startButton.IsEnabled = true; this._radioGroup.IsEnabled = true; }
public Recogniser() { sem = new Semaphore(0, 1); defaultDataClient = SpeechRecognitionServiceFactory.CreateDataClient( mode, DefaultLocale, defaultPrimarySubscriptionKey, defaultSecondarySubscriptionKey); defaultDataClient.OnResponseReceived += this.OnDataDictationResponseReceivedHandler; defaultDataClient.OnConversationError += OnConversationErrorHandler; customDataClient = SpeechRecognitionServiceFactory.CreateDataClient( mode, DefaultLocale, customPrimarySubscriptionKey, customSecondarySubscriptionKey, customModelUrl); customDataClient.OnResponseReceived += this.OnDataDictationResponseReceivedHandler; }
/// <summary> /// Creates a data client without LUIS intent support. /// Speech recognition with data (for example from a file or audio source). /// The data is broken up into buffers and each buffer is sent to the Speech Recognition Service. /// No modification is done to the buffers, so the user can apply their /// own Silence Detection if desired. /// </summary> private void CreateDataRecoClient() { this.dataClient = SpeechRecognitionServiceFactory.CreateDataClient( SpeechRecognitionMode.LongDictation, this.DefaultLocale, this.SubscriptionKey); this.dataClient.AuthenticationUri = ""; // Event handlers for speech recognition results //if (this.Mode == SpeechRecognitionMode.ShortPhrase) //{ // this.dataClient.OnResponseReceived += this.OnDataShortPhraseResponseReceivedHandler; //} //else //{ this.dataClient.OnResponseReceived += this.OnDataDictationResponseReceivedHandler; //} //this.dataClient.OnPartialResponseReceived += this.OnPartialResponseReceivedHandler; //this.dataClient.OnConversationError += this.OnConversationErrorHandler; }
static IDisposable ReactiveSample(DataRecognitionClient client) { var disposable = new CompositeDisposable(); var sentenceSubscriptions = client.GetResponseObservable() .Select((observable, count) => new { observable, count }) .Subscribe( x => disposable.Add(x.observable.Subscribe( phrases => { Console.CursorLeft = 0; var firstPhrase = phrases.First(); var prefix = x.count == 0 ? "Title" : "Sentence " + x.count; Console.Write("{0}: {1}", prefix, firstPhrase.DisplayText ?? firstPhrase.LexicalForm); }, ex => Console.Error.WriteLine(ex), () => Console.WriteLine()))); disposable.Add(sentenceSubscriptions); return disposable; }
static IDisposable EventBasedSample(DataRecognitionClient client) { var count = 0; EventHandler<SpeechErrorEventArgs> errorHandler = (sender, args) => { Console.Error.WriteLine("Failed with code '{0}' and text '{1}'.", args.SpeechErrorCode, args.SpeechErrorText); }; EventHandler<PartialSpeechResponseEventArgs> partialHandler = (sender, args) => { Console.CursorLeft = 0; var prefix = (count == 0) ? "Title" : "Sentence " + count; Console.Write("{0}: {1}", prefix, args.PartialResult); }; EventHandler<SpeechResponseEventArgs> responseHandler = (sender, args) => { if (args.PhraseResponse.RecognitionStatus == RecognitionStatus.RecognitionSuccess) { var result = args.PhraseResponse.Results.First().DisplayText; Console.CursorLeft = 0; var prefix = (count == 0) ? "Title" : "Sentence " + count; Console.WriteLine("{0}: {1}", prefix, result); count++; } }; client.OnConversationError += errorHandler; client.OnPartialResponseReceived += partialHandler; client.OnResponseReceived += responseHandler; return Disposable.Create(() => { client.OnConversationError -= errorHandler; client.OnPartialResponseReceived -= partialHandler; client.OnResponseReceived -= responseHandler; }); }