Exemplo n.º 1
0
        private static async Task <bool> TranscribeIdentifiedSpeakers(string meetingMinutesFilePath,
                                                                      SpeechIdentifier.SpeechIdentifier speechIdentifier,
                                                                      string wavFilePath, SpeechConfig config)
        {
            using (var meetingMinutesWriter = new StreamWriter(meetingMinutesFilePath))
            {
                var recognitionResults = speechIdentifier.RecognitionResults.ToList();
                var startIndex         = 0;
                var currentResult      = recognitionResults.FirstOrDefault(x => x.Succeeded);
                if (currentResult == null)
                {
                    WriteLine("No recognized speaker identified. Skipping");
                    return(true);
                }

                for (var index = 0; index < recognitionResults.Count; index++)
                {
                    var result = recognitionResults[index];
                    if (!result.Succeeded || result.Value.IdentifiedProfileId == default)
                    {
                        continue;
                    }
                    if (index != recognitionResults.Count - 1 &&
                        currentResult.Value.IdentifiedProfileId == result.Value.IdentifiedProfileId)
                    {
                        continue;
                    }
                    WriteLine("Transcribing from {0} to {1}", startIndex, index);

                    var transcriber = new Transcriber(wavFilePath, meetingMinutesWriter);
                    var person      = GetSpeakerName(currentResult);
                    currentResult = result;
                    await transcriber.TranscribeSpeechFromAudioStream(config, person, startIndex, index);

                    startIndex = index;
                }
            }

            return(false);
        }
Exemplo n.º 2
0
        private static async Task CallCognitiveServices(MeetingMinutesUiPathArguments meetingMinutesUiPathArguments)
        {
            try
            {
                var wavFileCount = _audioWriter.WavFileCount;
                _audioWriter = new AudioWriter(OutputFolder);
                meetingMinutesUiPathArguments.KeyPhrasesFilePath  = $"{wavFileCount}.keyphrases.txt";
                meetingMinutesUiPathArguments.MinutesFilePath     = $"{wavFileCount}.minutes.txt";
                meetingMinutesUiPathArguments.SentimentFilePath   = $"{wavFileCount}.sentiment.txt";
                meetingMinutesUiPathArguments.TranscribedFilePath = $"{wavFileCount}.transcribed.txt";
                var fullTranscribeSpeechPath  = Combine(OutputFolder, meetingMinutesUiPathArguments.TranscribedFilePath);
                var meetingMinutesFilePath    = Combine(OutputFolder, meetingMinutesUiPathArguments.MinutesFilePath);
                var keyExtractionFilePath     = Combine(OutputFolder, meetingMinutesUiPathArguments.KeyPhrasesFilePath);
                var sentimentAnalysisFilePath = Combine(OutputFolder, meetingMinutesUiPathArguments.SentimentFilePath);

                var config = SpeechConfig.FromSubscription(Settings.SpeechServiceSubscriptionKey, "eastus");
                config.SpeechRecognitionLanguage = "en-US";

                WriteLine("===== Initializing Speech Identifier =====");
                var speakerIdentifierHttpRequests = new List <Task>();
                SpeechIdentifier.SpeechIdentifier speechIdentifier =
                    new SpeechIdentifier.SpeechIdentifier(_audioWriter.OutputFilePath, speakerIdentifierHttpRequests);
                await speechIdentifier.IdentifySpeakers();

                Task.WaitAll(speakerIdentifierHttpRequests.ToArray());
                WriteLine("===== Done Speaker Identification =====");
                WriteLine();
                WriteLine("===== Transcribing Identified Speakers =====");
                if (await TranscribeIdentifiedSpeakers(meetingMinutesFilePath, speechIdentifier,
                                                       _audioWriter.OutputFilePath,
                                                       config)
                    )
                {
                    return;
                }
                WriteLine("===== Done Transcribing Identified Speakers =====");
                WriteLine();
                WriteLine("===== Transcribing entire audio =====");
                using (var fullTranscribeSpeechWriter = new StreamWriter(fullTranscribeSpeechPath))
                {
                    var transcriber = new Transcriber(_audioWriter.OutputFilePath, fullTranscribeSpeechWriter);
                    await transcriber.TranscribeSpeechFromWavFileInput(config);
                }

                WriteLine("===== Done Transcribing entire audio =====");
                WriteLine();
                WriteLine("===== Initializing Key Extraction and Sentiment Analysis =====");
                var textAnalytics =
                    new TextAnalytics.TextAnalytics(meetingMinutesFilePath, fullTranscribeSpeechPath);

                textAnalytics.KeyExtraction(keyExtractionFilePath);
                textAnalytics.SentimentAnalysis(sentimentAnalysisFilePath);
                WriteLine("===== Done Key Extraction and Sentiment Analysis =====");
                WriteLine();
            }
            catch (Exception e)
            {
                WriteLine(e);
                ReadLine();
                throw;
            }
        }