public async Task <string> Recognize(string filePath) { using var inputAudio = AudioConfig.FromWavFileInput(filePath); using var speechRecognizer = new SpeechRecognizer(_config, inputAudio); var result = await speechRecognizer.RecognizeOnceAsync(); var transcription = ""; switch (result.Reason) { case ResultReason.RecognizedSpeech: transcription = result.Text; break; case ResultReason.Canceled: var cancellation = CancellationDetails.FromResult(result); if (cancellation.Reason == CancellationReason.Error) { var erroMessage = $"CANCELED: Reason={cancellation.Reason}\n"; erroMessage += $"CANCELED: ErrorCode={cancellation.ErrorCode}\n"; erroMessage += $"CANCELED: ErrorDetails={cancellation.ErrorDetails}\n"; erroMessage += $"CANCELED: Did you update the subscription info?"; throw new OperationCanceledException(erroMessage); } break; } return(transcription); }
/// <summary> /// Creates Recognizer with baseline model and selected language: /// Creates a config with subscription key and selected region /// If input source is audio file, creates recognizer with audio file otherwise with default mic /// Waits on RunRecognition /// </summary> private async Task CreateBaseReco() { // Todo: suport users to specifiy a different region. Console.WriteLine("LOLO Task() region:" + this.Region); var config = SpeechConfig.FromSubscription(this.SubscriptionKey, this.Region); config.SpeechRecognitionLanguage = this.RecognitionLanguage; SpeechRecognizer basicRecognizer; if (this.UseMicrophone) { using (basicRecognizer = new SpeechRecognizer(config)) { await this.RunRecognizer(basicRecognizer, RecoType.Base, stopBaseRecognitionTaskCompletionSource).ConfigureAwait(false); } } else { Console.WriteLine("LOLO Task() file must be submitted"); using (var audioInput = AudioConfig.FromWavFileInput(wavFileName)) { using (basicRecognizer = new SpeechRecognizer(config, audioInput)) { Console.WriteLine("LOLO Task() file must be submitted.... await"); await this.RunRecognizer(basicRecognizer, RecoType.Base, stopBaseRecognitionTaskCompletionSource).ConfigureAwait(false); } } } }
/// <summary> /// Creates Recognizer with baseline model and selected language: /// Creates a config with subscription key and selected region /// If input source is audio file, creates recognizer with audio file otherwise with default mic /// Waits on RunRecognition /// </summary> private async Task CreateBaseReco() { // Todo: suport users to specifiy a different region. var config = SpeechConfig.FromSubscription(this.SubscriptionKey, this.Region); config.SpeechRecognitionLanguage = this.RecognitionLanguage; //create recognizer SpeechRecognizer basicRecognizer; if (this.UseMicrophone) { using (basicRecognizer = new SpeechRecognizer(config)) { await this.RunRecognizer(basicRecognizer, RecoType.Base, stopBaseRecognitionTaskCompletionSource).ConfigureAwait(false); } } else { using (var audioInput = AudioConfig.FromWavFileInput(wavFileName)) { using (basicRecognizer = new SpeechRecognizer(config, audioInput)) { await this.RunRecognizer(basicRecognizer, RecoType.Base, stopBaseRecognitionTaskCompletionSource).ConfigureAwait(false); } } } }
public static async Task RecognizeSpeechAsync() { var config = SpeechConfig.FromSubscription("3e3aea608df74736855bf7bf92596e43", "eastus2"); config.SpeechRecognitionLanguage = "es-ES"; var audioConfig = AudioConfig.FromWavFileInput("24375.wav"); using (var recognizer = new SpeechRecognizer(config, audioConfig)) { Console.WriteLine("Say..."); var result = await recognizer.RecognizeOnceAsync(); if (result.Reason == ResultReason.RecognizedSpeech) { Console.WriteLine($"Text recognized {result.Text}"); } else if (result.Reason == ResultReason.NoMatch) { Console.WriteLine("No recognized"); } else if (result.Reason == ResultReason.Canceled) { var cancellationDetails = CancellationDetails.FromResult(result); Console.WriteLine($"Speech recognition canceled: {cancellationDetails.Reason}"); if (cancellationDetails.Reason == CancellationReason.Error) { Console.WriteLine($"ErrorCode {cancellationDetails.ErrorCode}"); Console.WriteLine($"ErrorDetails {cancellationDetails.ErrorDetails}"); } } } }
/// <summary> /// Creates Recognizer with custom model endpointId and selected language: /// Creates a config with subscription key and selected region /// If input source is audio file, creates recognizer with audio file otherwise with default mic /// Waits on RunRecognition /// </summary> private async Task CreateCustomReco() { // Todo: suport users to specifiy a different region. var config = SpeechConfig.FromSubscription(this.SubscriptionKey, this.Region); config.SpeechRecognitionLanguage = this.RecognitionLanguage; config.EndpointId = this.CustomModelEndpointId; SpeechRecognizer customRecognizer; if (this.UseMicrophone) { using (customRecognizer = new SpeechRecognizer(config)) { await this.RunRecognizer(customRecognizer, RecoType.Custom, stopCustomRecognitionTaskCompletionSource).ConfigureAwait(false); } } else { using (var audioInput = AudioConfig.FromWavFileInput(wavFileName)) { using (customRecognizer = new SpeechRecognizer(config, audioInput)) { await this.RunRecognizer(customRecognizer, RecoType.Custom, stopCustomRecognitionTaskCompletionSource).ConfigureAwait(false); } } } }
public static async Task <string> ProcessAudioAsync() { string key = "<KEY>"; string region = "<REGION>"; SpeechConfig configRecognizer = SpeechConfig.FromSubscription(key, region); string processedAudio = ""; bool isRecorded = CheckAudioFile(); if (isRecorded) { using (AudioConfig audioInput = AudioConfig.FromWavFileInput(_audioFile)) using (IntentRecognizer recognizer = new IntentRecognizer(configRecognizer, audioInput)) { TaskCompletionSource <int> stopRecognition = new TaskCompletionSource <int>(); recognizer.Recognized += (s, e) => { if (e.Result.Reason == ResultReason.RecognizedSpeech) { processedAudio = e.Result.Text; } }; recognizer.Canceled += (s, e) => { if (e.Reason == CancellationReason.Error) { //log } stopRecognition.TrySetResult(0); }; recognizer.SessionStarted += (s, e) => { //log }; recognizer.SessionStopped += (s, e) => { //log stopRecognition.TrySetResult(0); }; await recognizer.StartContinuousRecognitionAsync(); Task.WaitAny(new[] { stopRecognition.Task }); await recognizer.StopContinuousRecognitionAsync(); } //log return(processedAudio); } else { //log return(processedAudio); } }
public static async Task RecognizeSpeechAsync() { var config = SpeechConfig.FromSubscription("YourSubscriptionKey", "YourServiceRegion"); using (var audioConfig = AudioConfig.FromWavFileInput(@"YourFilePath")) using (var recognizer = new SpeechRecognizer(config, audioConfig)) { var result = await recognizer.RecognizeOnceAsync(); if (result.Reason == ResultReason.RecognizedSpeech) { Console.WriteLine($"We recognized: {result.Text}"); } else if (result.Reason == ResultReason.NoMatch) { Console.WriteLine($"NOMATCH: Speech could not be recognized."); } else if (result.Reason == ResultReason.Canceled) { var cancellation = CancellationDetails.FromResult(result); Console.WriteLine($"CANCELED: Reason={cancellation.Reason}"); if (cancellation.Reason == CancellationReason.Error) { Console.WriteLine($"CANCELED: ErrorCode={cancellation.ErrorCode}"); Console.WriteLine($"CANCELED: ErrorDetails={cancellation.ErrorDetails}"); Console.WriteLine($"CANCELED: Did you update the subscription info?"); } } } }
public async Task <string> RecognizeSpeechFromFileAsync(string path) { var config = SpeechConfig.FromSubscription("3e3aea608df74736855bf7bf92596e43", "eastus2"); config.SpeechRecognitionLanguage = "es-ES"; var audioConfig = AudioConfig.FromWavFileInput(path); // "24375.wav"); using (var recognizer = new SpeechRecognizer(config, audioConfig)) { var result = await recognizer.RecognizeOnceAsync(); if (result.Reason == ResultReason.RecognizedSpeech) { return($"Text recognized: {result.Text}"); } else if (result.Reason == ResultReason.NoMatch) { return($"No speech recognized"); } else if (result.Reason == ResultReason.Canceled) { var cancellationDetails = CancellationDetails.FromResult(result); if (cancellationDetails.Reason == CancellationReason.Error) { return($"ErrorCode {cancellationDetails.ErrorCode}\n" + $"ErrorDetails {cancellationDetails.ErrorDetails}"); } return($"Speech recognition canceled: {cancellationDetails.Reason}"); } return("Unknown error"); } }
public static async Task RecognizeSpeechAsync() { var config = SpeechConfig.FromSubscription("5dbb936323894a3abead86291b52d1b4", "centralus"); using (var audioInput = AudioConfig.FromWavFileInput(@"C:\Users\sergi\revature\00_csharp\Translator\sample.wav")) { using (var recognizer = new SpeechRecognizer(config, audioInput)) { Console.WriteLine("Recognizing first result..."); var result = await recognizer.RecognizeOnceAsync(); if (result.Reason == ResultReason.RecognizedSpeech) { Console.WriteLine($"We recognized: {result.Text}"); } else if (result.Reason == ResultReason.NoMatch) { Console.WriteLine($"NOMATCH: Speech could not be recognized."); } else if (result.Reason == ResultReason.Canceled) { var cancellation = CancellationDetails.FromResult(result); Console.WriteLine($"CANCELED: Reason={cancellation.Reason}"); if (cancellation.Reason == CancellationReason.Error) { Console.WriteLine($"CANCELED: ErrorCode={cancellation.ErrorCode}"); Console.WriteLine($"CANCELED: ErrorDetails={cancellation.ErrorDetails}"); Console.WriteLine($"CANCELED: Did you update the subscription info?"); } } } } }
private async Task <List <SpeechRecognitionResult> > SR(string filename, SpeechConfig config) { var stopRecognition = new TaskCompletionSource <bool>(); List <SpeechRecognitionResult> listspeechrecognitionresult = new List <SpeechRecognitionResult>(); using (var audioInput = AudioConfig.FromWavFileInput(filename)) using (var recognizer = new SpeechRecognizer(config, audioInput)) { recognizer.Recognized += (s, e) => listspeechrecognitionresult.Add(e.Result); recognizer.Canceled += (s, e) => { listspeechrecognitionresult.Add(e.Result); stopRecognition.TrySetResult(false); }; recognizer.SessionStopped += (s, e) => stopRecognition.TrySetResult(false); await recognizer.StartContinuousRecognitionAsync().ConfigureAwait(false); await stopRecognition.Task; await recognizer.StopContinuousRecognitionAsync().ConfigureAwait(false); var x = CancellationDetails.FromResult(listspeechrecognitionresult[0]); if (x.Reason == CancellationReason.Error) { System.Windows.Forms.MessageBox.Show("Transcription error: " + x.ErrorDetails); } return(listspeechrecognitionresult); } }
public static async Task RecognizeSpeechAsync() { var config = SpeechConfig.FromSubscription("04d78025d6c14834ba9888b8d307843c", "eastus"); using (var audioInput = AudioConfig.FromWavFileInput("./hawking01.wav")) using (var recognizer = new SpeechRecognizer(config, audioInput)) { Console.WriteLine("Recognizing first result..."); var result = await recognizer.RecognizeOnceAsync(); switch (result.Reason) { case ResultReason.RecognizedSpeech: Console.WriteLine($"We recognized: {result.Text}"); break; case ResultReason.NoMatch: Console.WriteLine($"NOMATCH: Speech could not be recognized."); break; case ResultReason.Canceled: var cancellation = CancellationDetails.FromResult(result); Console.WriteLine($"CANCELED: Reason={cancellation.Reason}"); if (cancellation.Reason == CancellationReason.Error) { Console.WriteLine($"CANCELED: ErrorCode={cancellation.ErrorCode}"); Console.WriteLine($"CANCELED: ErrorDetails={cancellation.ErrorDetails}"); Console.WriteLine($"CANCELED: Did you update the subscription info?"); } break; } } }
public async Task <string> RecognizeSpeechAsync(string audioUrl) { var audioName = await CloudConvert.ConvertAudioToWavAsync(audioUrl); var config = SpeechConfig.FromSubscription(Settings.SubscriptionKey, Settings.SubscriptionRegion); var textConverted = ""; using (var audioInput = AudioConfig.FromWavFileInput(audioName)) using (var recognizer = new Microsoft.CognitiveServices.Speech.SpeechRecognizer(config, audioInput)) { var result = await recognizer.RecognizeOnceAsync(); switch (result.Reason) { case ResultReason.NoMatch: textConverted = "Sorry, I couldn't understand what you said."; break; case ResultReason.RecognizedSpeech: textConverted = result.Text; break; default: break; } } File.Delete(audioName); return(textConverted); }
private async Task <LuisResult> RecognizeSpeechWithIntentRecognizerAsync(string speechFile) { var speechConfig = SpeechConfig.FromEndpoint(this.LuisConfiguration.SpeechEndpoint, this.LuisConfiguration.EndpointKey); using (var audioInput = AudioConfig.FromWavFileInput(speechFile)) using (var recognizer = new IntentRecognizer(speechConfig, audioInput)) { // Add intents to intent recognizer var model = LanguageUnderstandingModel.FromAppId(this.LuisConfiguration.AppId); recognizer.AddIntent(model, "None", "None"); var result = await recognizer.RecognizeOnceAsync().ConfigureAwait(false); // Checks result. // For some reason RecognizeOnceAsync always return ResultReason.RecognizedSpeech // when intent is recognized. It's because we don't add all possible intents (note that this IS intentional) // in code via AddIntent method. if (result.Reason == ResultReason.RecognizedSpeech || result.Reason == ResultReason.RecognizedIntent) { var content = result.Properties.GetProperty(PropertyId.LanguageUnderstandingServiceResponse_JsonResult); return(JsonConvert.DeserializeObject <LuisResult>(content)); } else if (result.Reason == ResultReason.NoMatch) { Logger.LogWarning("Received 'NoMatch' result from Cognitive Services."); return(null); } else { throw new InvalidOperationException($"Failed to get speech recognition result. Reason = '{result.Reason}'"); } } }
/// <summary> /// Преобразует речь в текст и запускает выполнение команды /// </summary> /// <param name="filePath"></param> /// <returns></returns> private async Task AnalizeCommandHandwritten() { string filePath = AudioRecording.RecorderPath; if (!String.IsNullOrEmpty(filePath)) { using (var audioInput = AudioConfig.FromWavFileInput(filePath)) { using (var recognizer = new SpeechRecognizer(SpeechAnalyzer.SpeechConfiguration, audioInput)) { var result = await recognizer.RecognizeOnceAsync(); if (result.Reason == ResultReason.RecognizedSpeech) { if (String.IsNullOrEmpty(result.Text)) { await SpeechSyntezer.VoiceResult("Не удалось распознать речь"); } else { string processedText = SpeechAnalyzer.PreprocessingCommands(result.Text); await DoCommandsActionOnHandwritten(processedText); } } else { await SpeechSyntezer.VoiceResult("Не удалось распознать речь"); } } } } }
public async Task <string> CreateRecognitionModelFromFileAsync(StorageFile audioFile, SpeechConfig config, VoiceProfileType voiceProfileType) { using (var audioInput = AudioConfig.FromWavFileInput(audioFile.Path)) { return(await EnrollProfileAsync(config, audioInput, voiceProfileType)); } }
static async Task RecognizeSpeechAsync() { var config = SpeechConfig.FromSubscription("your subcription key", "region(eastus, westus etc)"); using var audioInput = AudioConfig.FromWavFileInput(@"Your directory path\sample.wav"); using var recognizer = new SpeechRecognizer(config, audioInput); Console.WriteLine("Recognizing first result"); var result = await recognizer.RecognizeOnceAsync(); switch (result.Reason) { case ResultReason.RecognizedSpeech: Console.WriteLine($"We've recognized: {result.Text}"); Console.ReadLine(); break; case ResultReason.NoMatch: Console.WriteLine("NOMATCH: Speech could not be recognized"); Console.ReadLine(); break; case ResultReason.Canceled: var cancellation = CancellationDetails.FromResult(result); Console.WriteLine($"CANCELED: Reason = {cancellation.Reason}"); Console.ReadLine(); if (cancellation.Reason == CancellationReason.Error) { Console.WriteLine($"CANCELED: ErrorCode = {cancellation.ErrorCode}"); Console.WriteLine($"CANCELED: ErrorDetails = {cancellation.Reason}"); Console.WriteLine($"CANCELED: Did you update your subscription info?"); Console.ReadLine(); } break; } }
static async Task RecognizeSpeechAsync() { var config = SpeechConfig.FromSubscription("YourSubscriptionKey", "YourServiceRegion"); using (var audioInput = AudioConfig.FromWavFileInput("whatstheweatherlike.wav")) using (var recognizer = new SpeechRecognizer(config, audioInput)) { Console.WriteLine("Recognizing first result..."); var result = await recognizer.RecognizeOnceAsync(); switch (result.Reason) { case ResultReason.RecognizedSpeech: Console.WriteLine($"We recognized: {result.Text}"); break; case ResultReason.NoMatch: Console.WriteLine($"NOMATCH: Speech could not be recognized."); break; case ResultReason.Canceled: var cancellation = CancellationDetails.FromResult(result); Console.WriteLine($"CANCELED: Reason={cancellation.Reason}"); if (cancellation.Reason == CancellationReason.Error) { Console.WriteLine($"CANCELED: ErrorCode={cancellation.ErrorCode}"); Console.WriteLine($"CANCELED: ErrorDetails={cancellation.ErrorDetails}"); Console.WriteLine($"CANCELED: Did you update the subscription info?"); } break; } } }
public async Task <Result <string> > Recognize(string filePath) { // Credenciais SpeechToText criado no Azure var config = SpeechConfig.FromSubscription("YourSpeechToTextKey", "YourRegion"); config.SpeechRecognitionLanguage = "pt-br"; using (var audioInput = AudioConfig.FromWavFileInput(filePath)) { using (var recognizer = new SpeechRecognizer(config, audioInput)) { var result = await recognizer.RecognizeOnceAsync().ConfigureAwait(false); if (result.Reason == ResultReason.RecognizedSpeech) { return(new Result <string>(result.Text)); } else if (result.Reason == ResultReason.NoMatch) { return(new Result <string>(result.Text, false, "Falha no reconhecimento do áudio!")); } else if (result.Reason == ResultReason.Canceled) { var cancellation = CancellationDetails.FromResult(result); if (cancellation.Reason == CancellationReason.Error) { return(new Result <string>(result.Text, false, $"Motivo: {cancellation.Reason}. Detalhes: {cancellation.ErrorDetails}")); } return(new Result <string>(result.Text, false, $"Motivo: {cancellation.Reason}.")); } } } return(new Result <string>(null, false, "Erro desconhecido!")); }
private async Task CreateBaseReco() { this.Region = "eastasia"; this.RecognitionLanguage = "en-US"; var config = SpeechConfig.FromSubscription(subscriptionKey, this.Region); config.SpeechRecognitionLanguage = this.RecognitionLanguage; SpeechRecognizer basicRecognizer; if (IsFromFile) { using (var audioInput = AudioConfig.FromWavFileInput(wavFileName)) { using (basicRecognizer = new SpeechRecognizer(config, audioInput)) { await this.RunRecognizer(basicRecognizer, RecoType.Base, stopBaseRecognitionTaskCompletionSource).ConfigureAwait(false); } } } else { using (basicRecognizer = new SpeechRecognizer(config)) { await this.RunRecognizer(basicRecognizer, RecoType.Base, stopBaseRecognitionTaskCompletionSource).ConfigureAwait(false); } } }
/// <summary> /// Recognize speech from a WAV audio file using azure cognitive services /// </summary> /// <param name="wavFile">The path to the WAV file</param> public async Task <string> RecognizeSpeechAsync(string wavFile) { var config = SpeechConfig.FromEndpoint(new Uri(SecretData.AzureSpeechEndpoint), SecretData.AzureSpeechToken); using var audioInput = AudioConfig.FromWavFileInput(wavFile); using var recognizer = new SpeechRecognizer(config, audioInput); _logger.LogInfo($"Recognizing: { wavFile }"); var result = await recognizer.RecognizeOnceAsync(); if (result.Reason == ResultReason.RecognizedSpeech) { return(result.Text); } else if (result.Reason == ResultReason.NoMatch) { throw new Exception($"NOMATCH: Speech could not be recognized."); } else if (result.Reason == ResultReason.Canceled) { var cancellation = CancellationDetails.FromResult(result); if (cancellation.Reason == CancellationReason.Error) { _logger.LogError($"CANCELED: ErrorCode={cancellation.ErrorCode}"); _logger.LogError($"CANCELED: ErrorDetails={cancellation.ErrorDetails}"); _logger.LogError($"CANCELED: Did you update the subscription info?"); } throw new Exception($"CANCELED: Reason={cancellation.Reason}"); } return(""); }
static async Task RecognizeSpeechAsync(string inputFile) { var apiKey = Environment.GetEnvironmentVariable("AZURE_COG_SVCS_API_KEY"); var config = SpeechConfig.FromSubscription(apiKey, "westus2"); using var audioInput = AudioConfig.FromWavFileInput(inputFile); using var recognizer = new SpeechRecognizer(config, audioInput); Console.WriteLine("Recognizing first result..."); var result = await recognizer.RecognizeOnceAsync(); switch (result.Reason) { case ResultReason.RecognizedSpeech: Console.WriteLine($"We recognized: {result.Text}"); break; case ResultReason.NoMatch: Console.WriteLine($"NOMATCH: Speech could not be recognized."); break; case ResultReason.Canceled: var cancellation = CancellationDetails.FromResult(result); Console.WriteLine($"CANCELED: Reason={cancellation.Reason}"); if (cancellation.Reason == CancellationReason.Error) { Console.WriteLine($"CANCELED: ErrorCode={cancellation.ErrorCode}"); Console.WriteLine($"CANCELED: ErrorDetails={cancellation.ErrorDetails}"); Console.WriteLine($"CANCELED: Did you update the subscription info?"); } break; } }
/// <summary> /// 从文件读取音频 仅测试通过wav文件 /// </summary> /// <param name="filePath"></param> /// <returns></returns> public static async Task FormFile(string filePath) { using (var audioConfig = AudioConfig.FromWavFileInput(filePath)) // 从文件读取 { await SpeechRecognizer(audioConfig); } }
// perform enrollment public static async Task EnrollSpeakerAsync(VoiceProfileClient client, VoiceProfile profile, string audioFileName) { // Create audio input for enrollment from audio files. Replace with your own audio files. using (var audioInput = AudioConfig.FromWavFileInput(audioFileName)) { var reason = ResultReason.EnrollingVoiceProfile; while (reason == ResultReason.EnrollingVoiceProfile) { var result = await client.EnrollProfileAsync(profile, audioInput); if (result.Reason == ResultReason.EnrollingVoiceProfile) { Console.WriteLine($"Enrolling profile id {profile.Id}."); } else if (result.Reason == ResultReason.EnrolledVoiceProfile) { Console.WriteLine($"Enrolled profile id {profile.Id}."); } else if (result.Reason == ResultReason.Canceled) { var cancellation = VoiceProfileEnrollmentCancellationDetails.FromResult(result); Console.WriteLine($"CANCELED {profile.Id}: ErrorCode={cancellation.ErrorCode}"); Console.WriteLine($"CANCELED {profile.Id}: ErrorDetails={cancellation.ErrorDetails}"); } Console.WriteLine($"Summation of pure speech across all enrollments in seconds is {result.EnrollmentsSpeechLength.TotalSeconds}."); Console.WriteLine($"The remaining enrollments speech length in seconds is {result.RemainingEnrollmentsSpeechLength?.TotalSeconds}."); reason = result.Reason; } } }
/// <summary> /// 使用(语音模型)和(语言)初始化识别器: /// 1.利用密钥和地区创建语音配置Config /// 2.根据不同方式【麦克风录音】和【识别wav文件】,进行不同操作 /// 3.等待异步运行 /// </summary> private async Task CreateBaseReco() { // 根据地区和密钥 初始化配置 var config = SpeechConfig.FromSubscription(this.SubscriptionKey, this.Region); config.SpeechRecognitionLanguage = this.RecognitionLanguage; //语音识别器 SpeechRecognizer basicRecognizer; if (this.UseMicrophone) { using (basicRecognizer = new SpeechRecognizer(config)) { await this.RunRecognizer(basicRecognizer, stopBaseRecognitionTaskCompletionSource).ConfigureAwait(false); } } else { using (var audioInput = AudioConfig.FromWavFileInput(wavFileName)) { using (basicRecognizer = new SpeechRecognizer(config, audioInput)) { await this.RunRecognizer(basicRecognizer, stopBaseRecognitionTaskCompletionSource).ConfigureAwait(false); } } } }
// perform speaker verification. public static async Task SpeakerVerificationAsync() { // Replace with your own subscription key and service region (e.g., "westus"). string subscriptionKey = "YourSubscriptionKey"; string region = "YourServiceRegion"; // Creates an instance of a speech config with specified subscription key and service region. var config = SpeechConfig.FromSubscription(subscriptionKey, region); // Creates a VoiceProfileClient to enroll your voice profile. using (var client = new VoiceProfileClient(config)) // Creates a text dependent voice profile in one of the supported locales using the client. using (var profile = await client.CreateProfileAsync(VoiceProfileType.TextDependentVerification, "en-us")) { try { Console.WriteLine($"Created a profile {profile.Id} for text dependent verification."); string[] trainingFiles = new string[] { @"MyVoiceIsMyPassportVerifyMe01.wav", @"MyVoiceIsMyPassportVerifyMe02.wav", @"MyVoiceIsMyPassportVerifyMe03.wav" }; // feed each training file to the enrollment service. foreach (var trainingFile in trainingFiles) { // Create audio input for enrollment from audio file. Replace with your own audio files. using (var audioInput = AudioConfig.FromWavFileInput(trainingFile)) { var result = await client.EnrollProfileAsync(profile, audioInput); if (result.Reason == ResultReason.EnrollingVoiceProfile) { Console.WriteLine($"Enrolling profile id {profile.Id}."); } else if (result.Reason == ResultReason.EnrolledVoiceProfile) { Console.WriteLine($"Enrolled profile id {profile.Id}."); await VerifySpeakerAsync(config, profile); } else if (result.Reason == ResultReason.Canceled) { var cancellation = VoiceProfileEnrollmentCancellationDetails.FromResult(result); Console.WriteLine($"CANCELED {profile.Id}: ErrorCode={cancellation.ErrorCode}"); Console.WriteLine($"CANCELED {profile.Id}: ErrorDetails={cancellation.ErrorDetails}"); } Console.WriteLine($"Number of enrollment audios accepted for {profile.Id} is {result.EnrollmentsCount}."); Console.WriteLine($"Number of enrollment audios needed to complete { profile.Id} is {result.RemainingEnrollmentsCount}."); } } } finally { await client.DeleteProfileAsync(profile); } } }
public async Task RecognizeSpeechAsync(string fullFileName) { var stopRecognition = new TaskCompletionSource <int>(); using (var audioInput = AudioConfig.FromWavFileInput(fullFileName)) { using (var recognizer = new SpeechRecognizer(GetConfig(), audioInput)) { // Subscribes to events. recognizer.Recognizing += (s, e) => { Console.WriteLine($"RECOGNIZING: Text={e.Result.Text}"); }; recognizer.Recognized += (s, e) => { if (e.Result.Reason == ResultReason.RecognizedSpeech) { Console.WriteLine($"RECOGNIZED: Text={e.Result.Text}"); File.WriteAllText(_DestinationFullFileName, e.Result.Text); } else if (e.Result.Reason == ResultReason.NoMatch) { Console.WriteLine($"NOMATCH: Speech could not be recognized."); } }; recognizer.Canceled += (s, e) => { Console.WriteLine($"CANCELED: Reason={e.Reason}"); if (e.Reason == CancellationReason.Error) { Console.WriteLine($"CANCELED: ErrorCode={e.ErrorCode}"); Console.WriteLine($"CANCELED: ErrorDetails={e.ErrorDetails}"); Console.WriteLine($"CANCELED: Did you update the subscription info?"); } stopRecognition.TrySetResult(0); }; recognizer.SessionStarted += (s, e) => { Console.WriteLine("\n Session started event."); }; recognizer.SessionStopped += (s, e) => { Console.WriteLine("\n Session stopped event."); Console.WriteLine("\nStop recognition."); stopRecognition.TrySetResult(0); }; // Starts continuous recognition. Uses StopContinuousRecognitionAsync() to stop recognition. await recognizer.StartContinuousRecognitionAsync().ConfigureAwait(false); // Waits for completion. // Use Task.WaitAny to keep the task rooted. Task.WaitAny(new[] { stopRecognition.Task }); // Stops recognition. await recognizer.StopContinuousRecognitionAsync().ConfigureAwait(false); } } }
public static async Task AudioToTextContinuousAsync(string fn) { var config = SpeechConfig.FromSubscription(cKey, cRegion); using (var ai = AudioConfig.FromWavFileInput(fn)) using (var recognizer = new SpeechRecognizer(config, ai)) await RecognizeAll(recognizer); }
async static Task <string> FromFile(SpeechConfig speechConfig, string file) { using var audioConfig = AudioConfig.FromWavFileInput(file); using var recognizer = new SpeechRecognizer(speechConfig, audioConfig); var result = await recognizer.RecognizeOnceAsync(); return(result.Text); }
async static Task FromFile(SpeechConfig speechConfig) { using var audioConfig = AudioConfig.FromWavFileInput(DEMO_FILE); using var recognizer = new SpeechRecognizer(speechConfig, audioConfig); var result = await recognizer.RecognizeOnceAsync(); Console.WriteLine($"RECOGNIZED: Text={result.Text}"); }
async static Task FromFile(SpeechConfig speechConfig) { //using var audioConfig = AudioConfig.FromWavFileInput(@"C:\Users\juanm\Documents\GitHub\Youngermaster\Speech-Recognition-tutorials\Azure\AzureSpeechToTextTest\AzureSpeechToTextTest\rpiRecording.wav"); using var audioConfig = AudioConfig.FromWavFileInput(@"C:\Users\juanm\GitHub\Youngermaster\Speech-Recognition-tutorials\Azure\AzureSpeechToTextTest\AzureSpeechToTextTest\ILikePizza.wav"); using var recognizer = new SpeechRecognizer(speechConfig, audioConfig); var result = await recognizer.RecognizeOnceAsync(); Console.WriteLine($"RECOGNIZED: Text={result.Text}"); }