/// <summary> /// Process message from queue /// </summary> /// <param name="message"></param> /// <returns></returns> public async Task <string> ProcessAudioAsync(string message) { var msg = JsonConvert.DeserializeObject <ProcessAudioMessage>(message); ///get blob var blob = await _blobStorageService.GetFileAsync(Container.Audio, msg.FileName); ///extract text from audio file var capture = new TextCapture(); await _speechService.ContinuousRecognitionWithFileAsync(await blob.OpenReadAsync(), capture); /// get auth token var token = await _tokenService.GetAuthTokenAsync(); /// get overall sentiment var sentiment = await _textAnalyticsService.GetSentiment(capture.Text, token); /// related emotional taxonomies var taxonomy = await _textAnalyticsService.GetTaxonomy(capture.Text, token); string taxonomyStr = string .Join(", ", taxonomy.Data.Categories .Select(x => x.Label)).TrimEnd(',', ' '); ///create audio entry with details from above var audio = new Audio { FileName = msg.FileName, FileExtension = Path.GetExtension(msg.FileName), Issue = msg.Issue, Created = DateTime.Now, Sentiment = sentiment.Data.Sentiment.Overall, Taxonomy = taxonomyStr, Status = AuditStatus.Done, Transcript = capture.Text }; await _dbContext.Audios.AddAsync(audio); await _dbContext.SaveChangesAsync(); return(audio.FileName); }
public async Task ContinuousRecognitionWithFileAsync(Stream stream, TextCapture textCapture) { // <recognitionContinuousWithFile> // Creates an instance of a speech config with specified subscription key and service region. // Replace with your own subscription key and service region (e.g., "westus"). var subkey = Environment.GetEnvironmentVariable("AzureSpeechServiceKey"); var region = Environment.GetEnvironmentVariable("AzureSpeechServiceRegion"); var config = SpeechConfig.FromSubscription(subkey, region); //string text = string.Empty; var stopRecognition = new TaskCompletionSource <int>(); // Creates a speech recognizer using file as audio input. // Replace with your own audio file name. using var audioInput = Helper.OpenWavFile(stream); using var recognizer = new SpeechRecognizer(config, audioInput); // Subscribes to events. recognizer.Recognizing += (s, e) => { //Console.WriteLine($"RECOGNIZING: Text={e.Result.Text}"); }; recognizer.Recognized += (s, e) => { if (e.Result.Reason == ResultReason.RecognizedSpeech) { textCapture.Text += e.Result.Text; //Console.WriteLine($"RECOGNIZED: Text={e.Result.Text}"); } else if (e.Result.Reason == ResultReason.NoMatch) { Console.WriteLine($"NOMATCH: Speech could not be recognized."); } }; recognizer.Canceled += (s, e) => { Console.WriteLine($"CANCELED: Reason={e.Reason}"); if (e.Reason == CancellationReason.Error) { Console.WriteLine($"CANCELED: ErrorCode={e.ErrorCode}"); Console.WriteLine($"CANCELED: ErrorDetails={e.ErrorDetails}"); Console.WriteLine($"CANCELED: Did you update the subscription info?"); } stopRecognition.TrySetResult(0); }; recognizer.SessionStarted += (s, e) => { Console.WriteLine("\n Session started event."); }; recognizer.SessionStopped += (s, e) => { Console.WriteLine("\n Session stopped event."); Console.WriteLine("\nStop recognition."); stopRecognition.TrySetResult(0); }; // Starts continuous recognition. Uses StopContinuousRecognitionAsync() to stop recognition. await recognizer.StartContinuousRecognitionAsync().ConfigureAwait(false); // Waits for completion. // Use Task.WaitAny to keep the task rooted. Task.WaitAny(new[] { stopRecognition.Task }); // Stops recognition. await recognizer.StopContinuousRecognitionAsync().ConfigureAwait(false); // </recognitionContinuousWithFile> }