private static async Task CallCognitiveServices(MeetingMinutesUiPathArguments meetingMinutesUiPathArguments) { try { var wavFileCount = _audioWriter.WavFileCount; _audioWriter = new AudioWriter(OutputFolder); meetingMinutesUiPathArguments.KeyPhrasesFilePath = $"{wavFileCount}.keyphrases.txt"; meetingMinutesUiPathArguments.MinutesFilePath = $"{wavFileCount}.minutes.txt"; meetingMinutesUiPathArguments.SentimentFilePath = $"{wavFileCount}.sentiment.txt"; meetingMinutesUiPathArguments.TranscribedFilePath = $"{wavFileCount}.transcribed.txt"; var fullTranscribeSpeechPath = Combine(OutputFolder, meetingMinutesUiPathArguments.TranscribedFilePath); var meetingMinutesFilePath = Combine(OutputFolder, meetingMinutesUiPathArguments.MinutesFilePath); var keyExtractionFilePath = Combine(OutputFolder, meetingMinutesUiPathArguments.KeyPhrasesFilePath); var sentimentAnalysisFilePath = Combine(OutputFolder, meetingMinutesUiPathArguments.SentimentFilePath); var config = SpeechConfig.FromSubscription(Settings.SpeechServiceSubscriptionKey, "eastus"); config.SpeechRecognitionLanguage = "en-US"; WriteLine("===== Initializing Speech Identifier ====="); var speakerIdentifierHttpRequests = new List <Task>(); SpeechIdentifier.SpeechIdentifier speechIdentifier = new SpeechIdentifier.SpeechIdentifier(_audioWriter.OutputFilePath, speakerIdentifierHttpRequests); await speechIdentifier.IdentifySpeakers(); Task.WaitAll(speakerIdentifierHttpRequests.ToArray()); WriteLine("===== Done Speaker Identification ====="); WriteLine(); WriteLine("===== Transcribing Identified Speakers ====="); if (await TranscribeIdentifiedSpeakers(meetingMinutesFilePath, speechIdentifier, _audioWriter.OutputFilePath, config) ) { return; } WriteLine("===== Done Transcribing Identified Speakers ====="); WriteLine(); WriteLine("===== Transcribing entire audio ====="); using (var fullTranscribeSpeechWriter = new StreamWriter(fullTranscribeSpeechPath)) { var transcriber = new Transcriber(_audioWriter.OutputFilePath, fullTranscribeSpeechWriter); await transcriber.TranscribeSpeechFromWavFileInput(config); } WriteLine("===== Done Transcribing entire audio ====="); WriteLine(); WriteLine("===== Initializing Key Extraction and Sentiment Analysis ====="); var textAnalytics = new TextAnalytics.TextAnalytics(meetingMinutesFilePath, fullTranscribeSpeechPath); textAnalytics.KeyExtraction(keyExtractionFilePath); textAnalytics.SentimentAnalysis(sentimentAnalysisFilePath); WriteLine("===== Done Key Extraction and Sentiment Analysis ====="); WriteLine(); } catch (Exception e) { WriteLine(e); ReadLine(); throw; } }
private static async Task WebSocketClientOnOnMessage(MessageEventArgs e) { try { if (e.Data == null) { return; } var activitySet = JsonConvert.DeserializeObject <ActivitySet>(e.Data); if (activitySet == null) { return; } foreach (var activity in activitySet.Activities) { if (activity.Type != ActivityTypes.Message || string.IsNullOrEmpty(activity.Text)) { continue; } if (activity.Text.Contains("Record_Start")) { if (_audioWriter != null) { WriteLine( "Tried to record again but can only have one recording at a time. Stop the other recorder first."); continue; } _audioWriter = new AudioWriter(OutputFolder); _audioWriter.StartRecording(); } else if (activity.Text.Contains("Record_Stop")) { var messages = activity.Text.Split(','); var jobId = messages[1]; _audioWriter?.StopRecording(); var uiPathDownloadArguments = new MeetingMinutesUiPathArguments { JobId = jobId, ServiceUrl = Settings.BotServiceUrl, BotAppId = Settings.BotAppId, BotAppPassword = Settings.BotAppPassword, EmailToSend = Settings.EmailToSend, EmailBody = Settings.EmailBody, EmailSubject = $"Meeting minutes for {DateTime.Today.ToShortDateString()}", }; await CallCognitiveServices(uiPathDownloadArguments); await UploadToAzureBlob(uiPathDownloadArguments); await _uiPathHttpClient.SendUiPathJob(uiPathDownloadArguments, Settings.UiPathMeetingMinutesJobKey); _audioWriter = null; } } } catch (Exception exception) { WriteLine(exception); throw; } }