/// <summary> /// 创建语音识别的客户端类型的实例。 /// 该实例可以识别来自文件和语音设备的语音。 /// 语音数据会被切分很小的段,然后使用该实例连续的向服务端发送一段。 /// </summary> /// <param name="mode"><see cref="SpeechRecognitionMode"/>指明语音识别的模式。</param> private void CreateDataRecoClient(SpeechRecognitionMode mode) { if (this.dataClient != null) { this.dataClient.Dispose(); this.dataClient = null; } // 使用工厂类型的 CreateDataClient 方法创建 DataRecognitionClient 类型的实例。 this.dataClient = SpeechRecognitionServiceFactory.CreateDataClient( mode, // 指定语音识别的模式。 "en-US", // 我们把语音中语言的类型 hardcode 为英语,因为我们的两个 demo 文件都是英语语音。 SUBSCRIPTIONKEY); // Bing Speech API 服务实例的 key。 // 为语音识别Event handlers for speech recognition results if (mode == SpeechRecognitionMode.ShortPhrase) { // 为 ShortPhrase 模式的识别结果添加处理程序。 this.dataClient.OnResponseReceived += this.OnDataShortPhraseResponseReceivedHandler; } else { // 为 LongDictation 模式的识别结果添加处理程序。 // 服务端根据分辨出的语句间的停顿会多次触发执行该处理程序。 this.dataClient.OnResponseReceived += this.OnDataDictationResponseReceivedHandler; } // 在服务端执行语音识别的过程中,该处理程序会被执行多次, // 具体是在语音服务对语音的内容产生了预测的结果时,就会触发执行该处理程序。 this.dataClient.OnPartialResponseReceived += this.OnPartialResponseReceivedHandler; // 在服务端检测到错误时,触发执行该处理程序。 this.dataClient.OnConversationError += this.OnConversationErrorHandler; }
/// <summary> /// Initializes a new instance of the <see cref="SpeechRecognitionClient"/> class. /// </summary> /// <param name="recognitionMode">The recognition mode to use.</param> /// <param name="language">The recognition language.</param> /// <param name="subscriptionKey">The speech service API key.</param> public SpeechRecognitionClient(SpeechRecognitionMode recognitionMode, string language, string subscriptionKey) { this.webSocket = new ClientWebSocket(); this.authentication = new Authentication(subscriptionKey); this.subscriptionKey = subscriptionKey; this.language = language; this.recognitionMode = recognitionMode; }
/// <summary> /// using client library=>yet to implement /// </summary> /// <param name="mode"></param> /// <param name="language"></param> /// <param name="subscriptionKey"></param> public SpeechToTextService(SpeechRecognitionMode mode, string language, string subscriptionKey) { serviceClient = SpeechRecognitionServiceFactory.CreateDataClient(mode, language, subscriptionKey); // Event handlers for speech recognition results serviceClient.OnResponseReceived += this.OnResponseReceivedHandler; serviceClient.OnPartialResponseReceived += this.OnPartialResponseReceivedHandler; serviceClient.OnConversationError += this.OnConversationErrorHandler; }
public static void ConvertSpeechToText(SpeechRecognitionMode mode, string language, string subscriptionKey) { _microRecogClient = SpeechRecognitionServiceFactory.CreateMicrophoneClient(mode, language, subscriptionKey); _microRecogClient.OnResponseReceived += OnResponseReceivedHandler; _microRecogClient.OnPartialResponseReceived += OnPartialResponseReceivedHandler; _microRecogClient.OnConversationError += OnConversationError; _microRecogClient.StartMicAndRecognition(); }
async Task ISpeechEngine.SetRecognitionModeAsync(SpeechRecognitionMode mode) { if (mode != RecognitionMode) { RecognitionMode = mode; await ( mode == SpeechRecognitionMode.Paused ? _.EndRecognitionSessionAsync() : _.StartContinuousRecognitionAsync() ); } }
public async Task SetRecognitionModeAsync(SpeechRecognitionMode mode) { if (mode != RecognitionMode) { RecognitionMode = mode; await ( mode == SpeechRecognitionMode.Paused ? EndRecognitionSessionAsync() : StartContinuousRecognitionAsync() ); } }
/// <summary> /// Assigns the kind of speech that the <see cref="SpeechManager"/> listens /// for: commands or dictation. /// </summary> /// <param name="mode">The recognition mode.</param> /// <returns>Void</returns> public async Task SetRecognitionMode(SpeechRecognitionMode mode) { if (mode != RecognitionMode) { RecognitionMode = mode; if (mode == SpeechRecognitionMode.Paused) { await EndRecognitionSession(); } else { await StartContinuousRecognition(); } } }
/// <summary> /// Initializes a new instance of the <see cref="SpeechRecognitionClient"/> class. /// </summary> /// <param name="recognitionMode">The recognition mode to use.</param> /// <param name="language">The recognition language.</param> /// <param name="subscriptionKey">The speech service API key.</param> /// <param name="region">The speech service region associated to the subscription.</param> public SpeechRecognitionClient(SpeechRecognitionMode recognitionMode, string language, string subscriptionKey, string region = null) { this.webSocket = new ClientWebSocket(); this.region = region?.Replace(" ", string.Empty); // convert to region endpoint URL string if (string.IsNullOrWhiteSpace(this.region)) { this.authentication = new Authentication(subscriptionKey); } else { this.authentication = new AzureAuthentication(subscriptionKey, this.region); } this.subscriptionKey = subscriptionKey; this.language = language; this.recognitionMode = recognitionMode; }
MicrophoneRecognitionClient CreateMicrophoneRecoClient(string recoLanguage, SpeechRecognitionMode recoMode) { this.micClient = SpeechRecognitionServiceFactory.CreateMicrophoneClient( recoMode, recoLanguage, _speechAPIAccountKey); // Event handlers for speech recognition results this.micClient.OnMicrophoneStatus += this.OnMicrophoneStatus; this.micClient.OnPartialResponseReceived += this.OnPartialResponseReceivedHandler; if (recoMode == SpeechRecognitionMode.ShortPhrase) { this.micClient.OnResponseReceived += this.OnMicShortPhraseResponseReceivedHandler; } this.micClient.OnConversationError += this.OnConversationErrorHandler; return(micClient); }
MicrophoneRecognitionClient CreateMicrophoneRecoClient(SpeechRecognitionMode recoMode, string language, string subscriptionKey) { MicrophoneRecognitionClient micClient = SpeechRecognitionServiceFactory.CreateMicrophoneClient( recoMode, language, subscriptionKey); // Event handlers for speech recognition results micClient.OnMicrophoneStatus += OnMicrophoneStatus; micClient.OnPartialResponseReceived += OnPartialResponseReceivedHandler; micClient.OnResponseReceived += OnMicShortPhraseResponseReceivedHandler; micClient.OnConversationError += OnConversationErrorHandler; return(micClient); }
public void Start(bool isShortParse = true) { if (isShortParse) { this.Mode = SpeechRecognitionMode.ShortPhrase; } else { this.Mode = SpeechRecognitionMode.LongDictation; } if (this.micClient == null) { this.CreateMicrophoneRecoClient(); } this.micClient.StartMicAndRecognition(); CreateDataRecoClient(); }
/// <summary> /// Speech recognition with data (for example from a file or audio source). /// The data is broken up into buffers and each buffer is sent to the Speech Recognition Service. /// No modification is done to the buffers, so the user can apply their /// own Silence Detection if desired. /// </summary> DataRecognitionClient CreateDataRecoClient(SpeechRecognitionMode recoMode, string recoLanguage) { DataRecognitionClient dataClient = SpeechRecognitionServiceFactory.CreateDataClient( recoMode, recoLanguage, SubscriptionKey); // Event handlers for speech recognition results if (recoMode == SpeechRecognitionMode.ShortPhrase) { dataClient.OnResponseReceived += OnDataShortPhraseResponseReceivedHandler; } else { dataClient.OnResponseReceived += OnDataDictationResponseReceivedHandler; } dataClient.OnPartialResponseReceived += OnPartialResponseReceivedHandler; dataClient.OnConversationError += OnConversationErrorHandler; return(dataClient); }
/// <summary> /// Assigns the kind of speech that the <see cref="SpeechManager"/> listens /// for: commands or dictation. /// </summary> /// <param name="mode">The recognition mode.</param> /// <returns>Void</returns> public async Task <bool> SetRecognitionMode(SpeechRecognitionMode mode) { bool success = true; if (mode != RecognitionMode) { RecognitionMode = mode; if (mode == SpeechRecognitionMode.Paused) { await EndRecognitionSession(); } else { if (!await StartContinuousRecognition()) { success = false; mode = SpeechRecognitionMode.Default; } } OnModeChanged(new EventArgs()); } return(success); }
private void LogRecognitionStart(string recoSource, string recoLanguage, SpeechRecognitionMode recoMode) { WriteLine("\n--- Start speech recognition using " + recoSource + " with " + recoMode + " mode in " + recoLanguage + " language ----\n\n"); }