private List <TextAnalyticsRequestsChunk> CreateUtteranceLevelRequests(SpeechTranscript speechTranscript, int documentRequestLimit)
        {
            var textAnalyticChunks = new List <TextAnalyticsRequestsChunk>();

            if (!speechTranscript.RecognizedPhrases.Any())
            {
                return(textAnalyticChunks);
            }

            var textAnalyticsDocumentList = new List <TextAnalyticsRequest>();

            foreach (var recognizedPhrase in speechTranscript.RecognizedPhrases)
            {
                var id   = $"{recognizedPhrase.Channel}_{recognizedPhrase.Offset}";
                var text = recognizedPhrase.NBest.FirstOrDefault().Display;
                text = text.Substring(0, Math.Min(text.Length, TextAnalyticsRequestCharacterLimit));
                var textAnalyticsDocument = new TextAnalyticsRequest(Locale, id, text);
                textAnalyticsDocumentList.Add(textAnalyticsDocument);
            }

            for (int i = 0; i < textAnalyticsDocumentList.Count; i += documentRequestLimit)
            {
                textAnalyticChunks.Add(new TextAnalyticsRequestsChunk(textAnalyticsDocumentList.GetRange(i, Math.Min(documentRequestLimit, textAnalyticsDocumentList.Count - i))));
            }

            Log.LogInformation($"Received {textAnalyticChunks.Count} text analytics chunks from {textAnalyticsDocumentList.Count} documents.");
            return(textAnalyticChunks);
        }
        private List <TextAnalyticsRequestsChunk> CreateRequestChunks(SpeechTranscript speechTranscript, int documentRequestLimit)
        {
            var textAnalyticsDocumentList = new List <TextAnalyticsRequest>();
            var textAnalyticChunks        = new List <TextAnalyticsRequestsChunk>();

            if (!speechTranscript.RecognizedPhrases.Any())
            {
                return(textAnalyticChunks);
            }

            foreach (var recognizedPhrase in speechTranscript.RecognizedPhrases)
            {
                var id = $"{recognizedPhrase.Channel}_{recognizedPhrase.Offset}";
                var textAnalyticsDocument = new TextAnalyticsRequest(Locale, id, recognizedPhrase.NBest.FirstOrDefault().Display);
                textAnalyticsDocumentList.Add(textAnalyticsDocument);
            }

            Log.LogInformation($"Total text analytics documents: {textAnalyticsDocumentList.Count}");

            for (int i = 0; i < textAnalyticsDocumentList.Count; i += documentRequestLimit)
            {
                textAnalyticChunks.Add(new TextAnalyticsRequestsChunk(textAnalyticsDocumentList.GetRange(i, Math.Min(documentRequestLimit, textAnalyticsDocumentList.Count - i))));
            }

            Log.LogInformation($"Total chunks: {textAnalyticChunks.Count}");
            return(textAnalyticChunks);
        }
        private List <TextAnalyticsRequestsChunk> CreateAudioLevelRequests(SpeechTranscript speechTranscript, int documentRequestLimit)
        {
            var textAnalyticChunks = new List <TextAnalyticsRequestsChunk>();

            if (!speechTranscript.RecognizedPhrases.Any())
            {
                return(textAnalyticChunks);
            }

            var textAnalyticsDocumentList = new List <TextAnalyticsRequest>();

            foreach (var combinedRecognizedPhrase in speechTranscript.CombinedRecognizedPhrases)
            {
                var channel      = combinedRecognizedPhrase.Channel;
                var requestCount = 0;

                var displayForm = combinedRecognizedPhrase.Display;

                for (int i = 0; i < displayForm.Length; i += TextAnalyticsRequestCharacterLimit)
                {
                    var displayChunk          = displayForm.Substring(i, Math.Min(TextAnalyticsRequestCharacterLimit, displayForm.Length - i));
                    var textAnalyticsDocument = new TextAnalyticsRequest(Locale, $"{channel}_{requestCount}", displayChunk);
                    textAnalyticsDocumentList.Add(textAnalyticsDocument);
                    requestCount += 1;
                }
            }

            for (int i = 0; i < textAnalyticsDocumentList.Count; i += documentRequestLimit)
            {
                textAnalyticChunks.Add(new TextAnalyticsRequestsChunk(textAnalyticsDocumentList.GetRange(i, Math.Min(documentRequestLimit, textAnalyticsDocumentList.Count - i))));
            }

            Log.LogInformation($"Received {textAnalyticChunks.Count} text analytics chunks from {textAnalyticsDocumentList.Count} documents.");
            return(textAnalyticChunks);
        }
        /// <summary>
        /// Submits text analytics requests depending on the settings (sentimentAnalysisSetting). Every utterance will be submitted independently to text analytics.
        /// (This means for instance that every utterance will have a separate sentiment score).
        /// </summary>
        /// <param name="speechTranscript">The speech transcript object.</param>
        /// <param name="sentimentAnalysisSetting">The sentiment analysis setting.</param>
        /// <returns>The job ids and errors, if any were found.</returns>
        public async Task <(IEnumerable <string> jobIds, IEnumerable <string> errors)> SubmitUtteranceLevelRequests(
            SpeechTranscript speechTranscript,
            SentimentAnalysisSetting sentimentAnalysisSetting)
        {
            speechTranscript = speechTranscript ?? throw new ArgumentNullException(nameof(speechTranscript));

            if (sentimentAnalysisSetting != SentimentAnalysisSetting.UtteranceLevel)
            {
                return(new List <string>(), new List <string>());
            }

            var documents = speechTranscript.RecognizedPhrases.Where(r => r.NBest.FirstOrDefault() != null && !string.IsNullOrEmpty(r.NBest.First().Display)).Select(r => new TextDocumentInput($"{r.Channel}_{r.Offset}", r.NBest.First().Display)
            {
                Language = Locale
            });

            var actions = new TextAnalyticsActions
            {
                DisplayName             = "IngestionClient",
                AnalyzeSentimentActions = new List <AnalyzeSentimentAction>()
                {
                    new AnalyzeSentimentAction()
                }
            };

            return(await SubmitDocumentsAsync(documents, actions).ConfigureAwait(false));
        }
        public async Task <IEnumerable <string> > RedactEntitiesAsync(SpeechTranscript speechTranscript)
        {
            Log.LogInformation($"Starting entity masking.");

            if (speechTranscript == null)
            {
                throw new ArgumentNullException(nameof(speechTranscript));
            }

            var textAnalyticsChunks = CreateRequestChunks(speechTranscript, EntityRecognitionRequestLimit);
            var responses           = new List <HttpResponseMessage>();

            foreach (var chunk in textAnalyticsChunks)
            {
                var chunkString = JsonConvert.SerializeObject(chunk);
                var response    = await MakeRequestAsync(chunkString, EntityRecognitionSuffix).ConfigureAwait(false);

                responses.Add(response);
            }

            Log.LogInformation($"Total responses: {responses.Count}");
            var entityRedactionErrors = await RedactEntitiesInSpeechTranscriptAsync(responses, speechTranscript).ConfigureAwait(false);

            return(entityRedactionErrors);
        }
        /// <summary>
        /// Gets the (audio-level) results from text analytics, adds the results to the speech transcript.
        /// </summary>
        /// <param name="conversationJobIds">The conversation analysis job Ids.</param>
        /// <param name="speechTranscript">The speech transcript object.</param>
        /// <returns>The errors, if any.</returns>
        public async Task <IEnumerable <string> > AddConversationalEntitiesAsync(
            IEnumerable <string> conversationJobIds,
            SpeechTranscript speechTranscript)
        {
            speechTranscript = speechTranscript ?? throw new ArgumentNullException(nameof(speechTranscript));
            var errors = new List <string>();

            var isConversationalPiiEnabled = IsConversationalPiiEnabled();

            if (!isConversationalPiiEnabled)
            {
                return(new List <string>());
            }

            if (conversationJobIds == null || !conversationJobIds.Any())
            {
                return(errors);
            }

            var conversationsPiiResults = await GetConversationsOperationsResult(conversationJobIds).ConfigureAwait(false);

            if (conversationsPiiResults.errors.Any())
            {
                errors.AddRange(conversationsPiiResults.errors);
            }

            speechTranscript.ConversationAnalyticsResults = new ConversationAnalyticsResults
            {
                AnalyzeConversationPiiResults = conversationsPiiResults.piiResults,
            };

            return(errors);
        }
Beispiel #7
0
 public int LogTask(SpeechTranscript transcript)
 {
     try
     {
         _SpeechTranscript.Add(transcript);
     }
     catch (Exception)
     {
         return(0);
     }
     return(1);
 }
        /// <summary>
        /// Submits text analytics requests depending on the settings (sentimentAnalysisSetting). The whole transcript (per channel) will be submitted in a single request.
        /// (This means for instance that one single sentiment score will be generated per channel).
        /// </summary>
        /// <param name="speechTranscript">The speech transcript object.</param>
        /// <param name="sentimentAnalysisSetting">The sentiment analysis setting.</param>
        /// <param name="piiRedactionSetting">The PII redaction setting.</param>
        /// <returns>The job ids and errors, if any were found.</returns>
        public async Task <(IEnumerable <string> jobIds, IEnumerable <string> errors)> SubmitAudioLevelRequests(
            SpeechTranscript speechTranscript,
            SentimentAnalysisSetting sentimentAnalysisSetting,
            PiiRedactionSetting piiRedactionSetting)
        {
            speechTranscript = speechTranscript ?? throw new ArgumentNullException(nameof(speechTranscript));

            if (sentimentAnalysisSetting != SentimentAnalysisSetting.AudioLevel && piiRedactionSetting != PiiRedactionSetting.UtteranceAndAudioLevel)
            {
                return(new List <string>(), new List <string>());
            }

            var documents = speechTranscript.CombinedRecognizedPhrases.Where(r => !string.IsNullOrEmpty(r.Display)).Select(r => new TextDocumentInput($"{r.Channel}", r.Display)
            {
                Language = Locale
            });

            var actions = new TextAnalyticsActions
            {
                DisplayName = "IngestionClient"
            };

            if (sentimentAnalysisSetting == SentimentAnalysisSetting.AudioLevel)
            {
                actions.AnalyzeSentimentActions = new List <AnalyzeSentimentAction>()
                {
                    new AnalyzeSentimentAction()
                };
            }

            if (piiRedactionSetting == PiiRedactionSetting.UtteranceAndAudioLevel)
            {
                var action = new RecognizePiiEntitiesAction();

                if (!string.IsNullOrEmpty(FetchTranscriptionEnvironmentVariables.PiiCategories))
                {
                    var piiEntityCategories = FetchTranscriptionEnvironmentVariables.PiiCategories.Split(",").Select(c => new PiiEntityCategory(c));

                    foreach (var category in piiEntityCategories)
                    {
                        action.CategoriesFilter.Add(category);
                    }
                }

                actions.RecognizePiiEntitiesActions = new List <RecognizePiiEntitiesAction>()
                {
                    action
                };
            }

            return(await SubmitDocumentsAsync(documents, actions).ConfigureAwait(false));
        }
Beispiel #9
0
        public async Task <IEnumerable <string> > AddUtteranceLevelEntitiesAsync(
            SpeechTranscript speechTranscript,
            SentimentAnalysisSetting sentimentAnalysisSetting)
        {
            speechTranscript = speechTranscript ?? throw new ArgumentNullException(nameof(speechTranscript));

            var errors = new List <string>();

            if (sentimentAnalysisSetting != SentimentAnalysisSetting.UtteranceLevel)
            {
                return(errors);
            }

            var documents = speechTranscript.RecognizedPhrases.Where(r => r.NBest.FirstOrDefault() != null && !string.IsNullOrEmpty(r.NBest.First().Display)).Select(r => new TextDocumentInput($"{r.Channel}_{r.Offset}", r.NBest.First().Display)
            {
                Language = Locale
            });

            var actions = new TextAnalyticsActions
            {
                DisplayName             = "IngestionClient",
                AnalyzeSentimentActions = new List <AnalyzeSentimentAction>()
                {
                    new AnalyzeSentimentAction()
                }
            };

            var(sentimentResults, piiResults, requestErrors) = await this.GetDocumentResultsAsync(documents, actions).ConfigureAwait(false);

            errors.AddRange(requestErrors);

            foreach (var recognizedPhrase in speechTranscript.RecognizedPhrases)
            {
                var index      = $"{recognizedPhrase.Channel}_{recognizedPhrase.Offset}";
                var firstNBest = recognizedPhrase.NBest.FirstOrDefault();

                var sentimentResult = sentimentResults.Where(s => s.Id == index).FirstOrDefault();

                if (firstNBest != null)
                {
                    firstNBest.Sentiment = new Sentiment()
                    {
                        Negative = sentimentResult?.DocumentSentiment.ConfidenceScores.Negative ?? 0.0,
                        Positive = sentimentResult?.DocumentSentiment.ConfidenceScores.Positive ?? 0.0,
                        Neutral  = sentimentResult?.DocumentSentiment.ConfidenceScores.Neutral ?? 0.0,
                    };
                }
            }

            return(errors);
        }
        public async Task <IEnumerable <string> > AddSentimentToTranscriptAsync(SpeechTranscript speechTranscript, SentimentAnalysisSetting sentimentSetting)
        {
            if (speechTranscript == null)
            {
                throw new ArgumentNullException(nameof(speechTranscript));
            }

            var sentimentErrors = new List <string>();

            try
            {
                var textAnalyticsChunks = new List <TextAnalyticsRequestsChunk>();

                if (sentimentSetting == SentimentAnalysisSetting.UtteranceLevel)
                {
                    textAnalyticsChunks = CreateUtteranceLevelRequests(speechTranscript, SentimentRequestLimit);
                }
                else if (sentimentSetting == SentimentAnalysisSetting.AudioLevel)
                {
                    textAnalyticsChunks = CreateAudioLevelRequests(speechTranscript, SentimentRequestLimit);
                }

                var responses = new List <HttpResponseMessage>();
                foreach (var chunk in textAnalyticsChunks)
                {
                    var chunkString = JsonConvert.SerializeObject(chunk);
                    var response    = await MakeRequestAsync(chunkString, SentimentSuffix).ConfigureAwait(false);

                    responses.Add(response);
                }

                Log.LogInformation($"Total responses: {responses.Count}");
                sentimentErrors = await AddSentimentToSpeechTranscriptAsync(responses, speechTranscript, sentimentSetting).ConfigureAwait(false);

                return(sentimentErrors);
            }
            catch (Exception e)
            {
                var sentimentError = $"Sentiment Analysis failed with exception: {e.Message}";
                Log.LogError(sentimentError);
                sentimentErrors.Add(sentimentError);
                return(sentimentErrors);
            }
        }
        private string CreateTranscriptJson()
        {
            List <RecognizedPhrase> recognizedPhrases = new List <RecognizedPhrase>();
            int    totalduration = 0;
            string totaldisplay  = string.Empty;
            string totallexical  = string.Empty;
            string totalitn      = string.Empty;
            string totalmasked   = string.Empty;

            // var log = (rt == RecoType.Base) ? this.baseModelLogText : this.customModelLogText;
            // source.TrySetResult(0);
            foreach (var utt in FinalResultsCumulative)
            {
                totaldisplay = totaldisplay + utt.DisplayText.PadRight(1, ' ');

                if (utt.NBest != null && utt.NBest.Count > 0)
                {
                    totallexical = totallexical + utt.NBest[0].Lexical.PadRight(1, ' ');
                    totalitn     = totalitn + utt.NBest[0].ITN.PadRight(1, ' ');
                    totalmasked  = totalmasked + utt.NBest[0].MaskedITN.PadRight(1, ' ');
                }

                totalduration = totalduration + utt.Duration;

                var durationTicks = new TimeSpan(0, 0, 0, 0, utt.Duration).Ticks;
                var offsetTicks   = new TimeSpan(0, 0, 0, 0, utt.Offset).Ticks;
                RecognizedPhrase recognizedPhrase = new RecognizedPhrase(utt.RecognitionStatus, this.channel, 0, utt.Offset.ToString(CultureInfo.InvariantCulture.NumberFormat), utt.Duration.ToString(CultureInfo.InvariantCulture.NumberFormat), offsetTicks, durationTicks, utt.NBest);
                recognizedPhrases.Add(recognizedPhrase);
            }

            var totalDurationTicks            = new TimeSpan(0, 0, 0, 0, totalduration).Ticks;
            CombinedRecognizedPhrase combined = new CombinedRecognizedPhrase(this.channel, totallexical, totalitn, totalmasked, totaldisplay, null);
            string           timestamp        = DateTime.UtcNow.ToString(CultureInfo.InvariantCulture.DateTimeFormat);
            SpeechTranscript transcript       = new SpeechTranscript(this.fileSource, timestamp, totalDurationTicks, totalduration.ToString(CultureInfo.InvariantCulture.NumberFormat), new List <CombinedRecognizedPhrase> {
                combined
            }, recognizedPhrases);

            if (log != null)
            {
                log.LogInformation($"Speech transcript JSON created at : {0} UTC", timestamp);
            }

            return(JsonConvert.SerializeObject(transcript));
        }
        /// <summary>
        /// Gets the (utterance-level) results from text analytics, adds the results to the speech transcript.
        /// </summary>
        /// <param name="jobIds">The text analytics job ids.</param>
        /// <param name="speechTranscript">The speech transcript object.</param>
        /// <returns>The errors, if any.</returns>
        public async Task <IEnumerable <string> > AddUtteranceLevelEntitiesAsync(
            IEnumerable <string> jobIds,
            SpeechTranscript speechTranscript)
        {
            speechTranscript = speechTranscript ?? throw new ArgumentNullException(nameof(speechTranscript));
            var errors = new List <string>();

            if (jobIds == null || !jobIds.Any())
            {
                return(errors);
            }

            var(sentimentResults, piiResults, requestErrors) = await this.GetOperationsResultsAsync(jobIds).ConfigureAwait(false);

            errors.AddRange(requestErrors);

            foreach (var recognizedPhrase in speechTranscript.RecognizedPhrases)
            {
                var index      = $"{recognizedPhrase.Channel}_{recognizedPhrase.Offset}";
                var firstNBest = recognizedPhrase.NBest.FirstOrDefault();

                if (sentimentResults != null)
                {
                    // utterance level requests can only contain sentiment results at the moment
                    var sentimentResult = sentimentResults.Where(s => s.Id == index).FirstOrDefault();

                    if (firstNBest != null)
                    {
                        firstNBest.Sentiment = new Sentiment()
                        {
                            Negative = sentimentResult?.DocumentSentiment.ConfidenceScores.Negative ?? 0.0,
                            Positive = sentimentResult?.DocumentSentiment.ConfidenceScores.Positive ?? 0.0,
                            Neutral  = sentimentResult?.DocumentSentiment.ConfidenceScores.Neutral ?? 0.0,
                        };
                    }
                }
            }

            return(errors);
        }
        public async Task <IEnumerable <string> > RedactEntitiesAsync(SpeechTranscript speechTranscript, EntityRedactionSetting entityRedactionSetting)
        {
            if (speechTranscript == null)
            {
                throw new ArgumentNullException(nameof(speechTranscript));
            }

            var entityRedactionErrors = new List <string>();

            try
            {
                var textAnalyticsChunks = new List <TextAnalyticsRequestsChunk>();
                if (entityRedactionSetting == EntityRedactionSetting.UtteranceLevel)
                {
                    textAnalyticsChunks = CreateUtteranceLevelRequests(speechTranscript, EntityRecognitionRequestLimit);
                }

                var responses = new List <HttpResponseMessage>();
                foreach (var chunk in textAnalyticsChunks)
                {
                    var chunkString = JsonConvert.SerializeObject(chunk);
                    var response    = await MakeRequestAsync(chunkString, EntityRecognitionSuffix).ConfigureAwait(false);

                    responses.Add(response);
                }

                Log.LogInformation($"Total responses: {responses.Count}");
                entityRedactionErrors = await RedactEntitiesInSpeechTranscriptAsync(responses, speechTranscript, entityRedactionSetting).ConfigureAwait(false);

                return(entityRedactionErrors);
            }
            catch (Exception e)
            {
                var entityRedactionError = $"Entity Redaction failed with exception: {e.Message}";
                Log.LogError(entityRedactionError);
                entityRedactionErrors.Add(entityRedactionError);
                return(entityRedactionErrors);
            }
        }
        public async Task <IEnumerable <string> > AddSentimentToTranscriptAsync(SpeechTranscript speechTranscript)
        {
            if (speechTranscript == null)
            {
                throw new ArgumentNullException(nameof(speechTranscript));
            }

            var textAnalyticsChunks = CreateRequestChunks(speechTranscript, SentimentRequestLimit);

            var responses = new List <HttpResponseMessage>();

            foreach (var chunk in textAnalyticsChunks)
            {
                var chunkString = JsonConvert.SerializeObject(chunk);
                var response    = await MakeRequestAsync(chunkString, SentimentSuffix).ConfigureAwait(false);

                responses.Add(response);
            }

            Log.LogInformation($"Total responses: {responses.Count}");
            var sentimentErrors = await AddSentimentToSpeechTranscriptAsync(responses, speechTranscript).ConfigureAwait(false);

            return(sentimentErrors);
        }
Beispiel #15
0
 Task <int> ITranscriptRepository.LogTask(SpeechTranscript status)
 {
     throw new NotImplementedException();
 }
        private async Task <List <string> > AddSentimentToSpeechTranscriptAsync(List <HttpResponseMessage> responses, SpeechTranscript speechTranscript, SentimentAnalysisSetting sentimentSetting)
        {
            var sentimentErrors = new List <string>();

            if (!speechTranscript.RecognizedPhrases.Any())
            {
                return(sentimentErrors);
            }

            var textAnalyticsDocuments = new List <TextAnalyticsDocument>();

            foreach (var message in responses)
            {
                message.EnsureSuccessStatusCode();

                var responseBody = await message.Content.ReadAsStringAsync().ConfigureAwait(false);

                var textAnalyticsResponse = JsonConvert.DeserializeObject <TextAnalyticsResponse>(responseBody);

                foreach (var error in textAnalyticsResponse.Errors)
                {
                    var errorMessage = $"Sentiment extraction failed with error: {error.Error.InnerError.Message}";
                    Log.LogError(errorMessage);
                    sentimentErrors.Add(errorMessage);
                }

                textAnalyticsDocuments.AddRange(textAnalyticsResponse.Documents);
            }

            if (sentimentSetting == SentimentAnalysisSetting.UtteranceLevel)
            {
                foreach (var document in textAnalyticsDocuments)
                {
                    // Matching sentiment and transcription JSON by using the timestamp
                    var target = speechTranscript.RecognizedPhrases.Where(e => $"{e.Channel}_{e.Offset}".Equals(document.Id, StringComparison.Ordinal)).FirstOrDefault();

                    var nBest = target.NBest.FirstOrDefault();
                    if (nBest != null)
                    {
                        if (nBest.Sentiment == null)
                        {
                            nBest.Sentiment = new Sentiment();
                        }

                        nBest.Sentiment.Negative = document.ConfidenceScores.Negative;
                        nBest.Sentiment.Positive = document.ConfidenceScores.Positive;
                        nBest.Sentiment.Neutral  = document.ConfidenceScores.Neutral;
                    }
                }
            }
            else if (sentimentSetting == SentimentAnalysisSetting.AudioLevel)
            {
                foreach (var combinedRecognizedPhrase in speechTranscript.CombinedRecognizedPhrases)
                {
                    var documents = textAnalyticsDocuments.Where(document => document.Id.StartsWith($"{combinedRecognizedPhrase.Channel}_", StringComparison.OrdinalIgnoreCase));

                    if (!documents.Any())
                    {
                        continue;
                    }

                    if (combinedRecognizedPhrase.Sentiment == null)
                    {
                        combinedRecognizedPhrase.Sentiment = new Sentiment();
                    }

                    combinedRecognizedPhrase.Sentiment.Negative = documents.Average(d => d.ConfidenceScores.Negative);
                    combinedRecognizedPhrase.Sentiment.Positive = documents.Average(d => d.ConfidenceScores.Positive);
                    combinedRecognizedPhrase.Sentiment.Neutral  = documents.Average(d => d.ConfidenceScores.Neutral);
                }
            }

            return(sentimentErrors);
        }
        private async Task <List <string> > RedactEntitiesInSpeechTranscriptAsync(List <HttpResponseMessage> responses, SpeechTranscript speechTranscript, EntityRedactionSetting entityRedactionSetting)
        {
            var entityRedactionErrors = new List <string>();

            if (!speechTranscript.RecognizedPhrases.Any())
            {
                return(entityRedactionErrors);
            }

            var displayFull = new List <string>();
            var fullTranscriptionPerChannelDict = new Dictionary <int, StringBuilder>();

            foreach (var message in responses)
            {
                message.EnsureSuccessStatusCode();

                var responseBody = await message.Content.ReadAsStringAsync().ConfigureAwait(false);

                var textAnalyticsResponse = JsonConvert.DeserializeObject <TextAnalyticsResponse>(responseBody);

                foreach (var error in textAnalyticsResponse.Errors)
                {
                    var errorMessage = $"Entity redaction failed with error: {error.Error.InnerError.Message}";
                    Log.LogError(errorMessage);
                    entityRedactionErrors.Add(errorMessage);
                }

                foreach (var document in textAnalyticsResponse.Documents)
                {
                    if (entityRedactionSetting == EntityRedactionSetting.UtteranceLevel)
                    {
                        var phrase = speechTranscript.RecognizedPhrases.Where(e => $"{e.Channel}_{e.Offset}".Equals(document.Id, StringComparison.Ordinal)).FirstOrDefault();

                        // Remove all text but the display form
                        if (phrase.NBest == null || !phrase.NBest.Any())
                        {
                            continue;
                        }

                        var nBest = phrase.NBest.FirstOrDefault();
                        nBest.ITN       = string.Empty;
                        nBest.MaskedITN = string.Empty;
                        nBest.Lexical   = string.Empty;

                        // Remove word level timestamps if they exist
                        nBest.Words = null;

                        nBest.Display = RedactEntitiesInText(nBest.Display, document.Entities);
                        phrase.NBest  = new[] { nBest };

                        if (fullTranscriptionPerChannelDict.ContainsKey(phrase.Channel))
                        {
                            fullTranscriptionPerChannelDict[phrase.Channel].Append(" " + nBest.Display);
                        }
                        else
                        {
                            fullTranscriptionPerChannelDict.Add(phrase.Channel, new StringBuilder(nBest.Display));
                        }
                    }
                }
            }

            if (entityRedactionSetting == EntityRedactionSetting.UtteranceLevel)
            {
                foreach (var combinedRecognizedPhrase in speechTranscript.CombinedRecognizedPhrases)
                {
                    var channel = combinedRecognizedPhrase.Channel;

                    // Remove full transcription for channel:
                    combinedRecognizedPhrase.MaskedITN = string.Empty;
                    combinedRecognizedPhrase.ITN       = string.Empty;
                    combinedRecognizedPhrase.Lexical   = string.Empty;
                    combinedRecognizedPhrase.Display   = string.Empty;

                    if (fullTranscriptionPerChannelDict.ContainsKey(channel))
                    {
                        combinedRecognizedPhrase.Display = fullTranscriptionPerChannelDict[channel].ToString();
                    }
                }
            }

            return(entityRedactionErrors);
        }
        /// <summary>
        /// Gets the (audio-level) results from text analytics, adds the results to the speech transcript.
        /// </summary>
        /// <param name="jobIds">The text analytics job ids.</param>
        /// <param name="speechTranscript">The speech transcript object.</param>
        /// <returns>The errors, if any.</returns>
        public async Task <IEnumerable <string> > AddAudioLevelEntitiesAsync(
            IEnumerable <string> jobIds,
            SpeechTranscript speechTranscript)
        {
            speechTranscript = speechTranscript ?? throw new ArgumentNullException(nameof(speechTranscript));
            var errors = new List <string>();

            if (jobIds == null || !jobIds.Any())
            {
                return(errors);
            }

            var(sentimentResults, piiResults, requestErrors) = await this.GetOperationsResultsAsync(jobIds).ConfigureAwait(false);

            errors.AddRange(requestErrors);

            foreach (var combinedRecognizedPhrase in speechTranscript.CombinedRecognizedPhrases)
            {
                var channel         = combinedRecognizedPhrase.Channel;
                var sentimentResult = sentimentResults.Where(document => document.Id.Equals($"{channel}", StringComparison.OrdinalIgnoreCase)).SingleOrDefault();

                if (sentimentResult != null)
                {
                    combinedRecognizedPhrase.Sentiment = new Sentiment()
                    {
                        Negative = sentimentResult.DocumentSentiment.ConfidenceScores.Negative,
                        Positive = sentimentResult.DocumentSentiment.ConfidenceScores.Positive,
                        Neutral  = sentimentResult.DocumentSentiment.ConfidenceScores.Neutral,
                    };
                }

                if (!AnalyzeConversationsProvider.IsConversationalPiiEnabled())
                {
                    var piiResult = piiResults.Where(document => document.Id.Equals($"{channel}", StringComparison.OrdinalIgnoreCase)).SingleOrDefault();
                    if (piiResult != null)
                    {
                        var redactedText = piiResult.Entities.RedactedText;

                        combinedRecognizedPhrase.Display   = redactedText;
                        combinedRecognizedPhrase.ITN       = string.Empty;
                        combinedRecognizedPhrase.MaskedITN = string.Empty;
                        combinedRecognizedPhrase.Lexical   = string.Empty;

                        var phrases = speechTranscript.RecognizedPhrases.Where(phrase => phrase.Channel == channel);

                        var startIndex = 0;
                        foreach (var phrase in phrases)
                        {
                            var firstNBest = phrase.NBest.FirstOrDefault();

                            if (firstNBest != null && !string.IsNullOrEmpty(firstNBest.Display))
                            {
                                firstNBest.Display   = redactedText.Substring(startIndex, firstNBest.Display.Length);
                                firstNBest.ITN       = string.Empty;
                                firstNBest.MaskedITN = string.Empty;
                                firstNBest.Lexical   = string.Empty;

                                startIndex += firstNBest.Display.Length + 1;
                            }
                        }
                    }
                }
            }

            return(errors);
        }
        private async Task <IEnumerable <string> > AddSentimentToSpeechTranscriptAsync(List <HttpResponseMessage> responses, SpeechTranscript speechTranscript)
        {
            var sentimentErrors = new List <string>();

            if (!speechTranscript.RecognizedPhrases.Any())
            {
                return(sentimentErrors);
            }

            foreach (var message in responses)
            {
                message.EnsureSuccessStatusCode();

                var responseBody = await message.Content.ReadAsStringAsync().ConfigureAwait(false);

                var textAnalyticsResponse = JsonConvert.DeserializeObject <TextAnalyticsResponse>(responseBody);

                foreach (var error in textAnalyticsResponse.Errors)
                {
                    var errorMessage = $"Sentiment extraction failed with error: {error.Error.InnerError.Message}";
                    Log.LogError(errorMessage);
                    sentimentErrors.Add(errorMessage);
                }

                // Matching sentiment and transcription JSON by using the timestamp
                foreach (var document in textAnalyticsResponse.Documents)
                {
                    var targetSegment = speechTranscript.RecognizedPhrases.Where(e => $"{e.Channel}_{e.Offset}".Equals(document.Id, StringComparison.Ordinal)).FirstOrDefault();

                    var nBest = targetSegment.NBest.FirstOrDefault();
                    if (nBest != null)
                    {
                        if (nBest.Sentiment == null)
                        {
                            nBest.Sentiment = new Sentiment();
                        }

                        nBest.Sentiment.Negative = document.ConfidenceScores.Negative;
                        nBest.Sentiment.Positive = document.ConfidenceScores.Positive;
                        nBest.Sentiment.Neutral  = document.ConfidenceScores.Neutral;
                    }
                }
            }

            return(sentimentErrors);
        }
        private async Task <IEnumerable <string> > RedactEntitiesInSpeechTranscriptAsync(List <HttpResponseMessage> responses, SpeechTranscript speechTranscript)
        {
            var entityRedactionErrors = new List <string>();

            if (!speechTranscript.RecognizedPhrases.Any())
            {
                return(entityRedactionErrors);
            }

            var displayFull = new List <string>();
            var fullTranscriptionPerChannelDict = new Dictionary <int, StringBuilder>();

            foreach (var message in responses)
            {
                message.EnsureSuccessStatusCode();

                var responseBody = await message.Content.ReadAsStringAsync().ConfigureAwait(false);

                var textAnalyticsResponse = JsonConvert.DeserializeObject <TextAnalyticsResponse>(responseBody);

                foreach (var error in textAnalyticsResponse.Errors)
                {
                    var errorMessage = $"Entity redaction failed with error: {error.Error.InnerError.Message}";
                    Log.LogError(errorMessage);
                    entityRedactionErrors.Add(errorMessage);
                }

                // Matching entities and transcription JSON by using the timestamp
                foreach (var document in textAnalyticsResponse.Documents)
                {
                    var newSegment = speechTranscript.RecognizedPhrases.Where(e => $"{e.Channel}_{e.Offset}".Equals(document.Id, StringComparison.Ordinal)).FirstOrDefault();

                    // Remove all text but the display form
                    if (newSegment.NBest == null || !newSegment.NBest.Any())
                    {
                        continue;
                    }

                    var nBest = newSegment.NBest.FirstOrDefault();

                    nBest.ITN       = string.Empty;
                    nBest.MaskedITN = string.Empty;
                    nBest.Lexical   = string.Empty;

                    // Remove word level timestamps if they exist
                    nBest.Words = null;

                    var maskableEntities = document.Entities.Where(e => IsMaskableEntityType(e)).ToList();
                    var entities         = RemoveOverlappingEntities(maskableEntities);

                    // Order descending to make insertions that do not impact the offset of other entities
                    entities = entities.OrderByDescending(o => o.Offset).ToList();

                    foreach (var entity in entities)
                    {
                        RedactEntityInRecognizedPhrase(newSegment, entity);
                    }

                    // Create full transcription per channel
                    if (fullTranscriptionPerChannelDict.ContainsKey(newSegment.Channel))
                    {
                        fullTranscriptionPerChannelDict[newSegment.Channel].Append(" " + nBest.Display);
                    }
                    else
                    {
                        fullTranscriptionPerChannelDict.Add(newSegment.Channel, new StringBuilder(nBest.Display));
                    }
                }
            }

            foreach (var combinedRecognizedPhrase in speechTranscript.CombinedRecognizedPhrases)
            {
                var channel = combinedRecognizedPhrase.Channel;

                // Remove full transcription for channel:
                combinedRecognizedPhrase.MaskedITN = string.Empty;
                combinedRecognizedPhrase.ITN       = string.Empty;
                combinedRecognizedPhrase.Lexical   = string.Empty;
                combinedRecognizedPhrase.Display   = string.Empty;

                if (fullTranscriptionPerChannelDict.ContainsKey(channel))
                {
                    combinedRecognizedPhrase.Display = fullTranscriptionPerChannelDict[channel].ToString();
                }
            }

            return(entityRedactionErrors);
        }
        /// <summary>
        /// API to submit an analyzeConversations async Request.
        /// </summary>
        /// <param name="speechTranscript">Instance of the speech transcript.</param>
        /// <returns>An enumerable of the jobs IDs and errors if any.</returns>
        public async Task <(IEnumerable <string> jobIds, IEnumerable <string> errors)> SubmitAnalyzeConversationsRequestAsync(SpeechTranscript speechTranscript)
        {
            speechTranscript = speechTranscript ?? throw new ArgumentNullException(nameof(speechTranscript));

            var data      = new List <AnalyzeConversationsRequest>();
            var count     = -1;
            var jobCount  = 0;
            var turnCount = 0;

            foreach (var recognizedPhrase in speechTranscript.RecognizedPhrases)
            {
                var topResult = recognizedPhrase.NBest.First();
                var textCount = topResult.Lexical.Length;

                if (count == -1 || (count + textCount) > FetchTranscriptionEnvironmentVariables.ConversationPiiMaxChunkSize)
                {
                    count = 0;
                    jobCount++;
                    data.Add(new AnalyzeConversationsRequest
                    {
                        DisplayName   = "IngestionClient",
                        AnalysisInput = new AnalysisInput(new[]
                        {
                            new Conversation
                            {
                                Id                = $"{jobCount}",
                                Language          = Locale,
                                Modality          = Modality.transcript,
                                ConversationItems = new List <ConversationItem>()
                            }
                        }),
                        Tasks = new[]
                        {
                            new AnalyzeConversationsTask
                            {
                                TaskName   = "Conversation PII task",
                                Kind       = AnalyzeConversationsTaskKind.ConversationalPIITask,
                                Parameters = new Dictionary <string, object>
                                {
                                    {
                                        "piiCategories", FetchTranscriptionEnvironmentVariables.ConversationPiiCategories.ToList()
                                    },
                                    {
                                        "redactionSource", FetchTranscriptionEnvironmentVariables.ConversationPiiInferenceSource ?? DefaultInferenceSource
                                    },
                                    {
                                        "includeAudioRedaction", FetchTranscriptionEnvironmentVariables.ConversationPiiSetting == Connector.Enums.ConversationPiiSetting.IncludeAudioRedaction
                                    }
                                }
                            }
                        }
                    });
                }

                data.Last().AnalysisInput.Conversations[0].ConversationItems.Add(new ConversationItem
                {
                    Text          = topResult.Display,
                    Lexical       = topResult.Lexical,
                    Itn           = topResult.ITN,
                    MaskedItn     = topResult.MaskedITN,
                    Id            = $"{turnCount}__{recognizedPhrase.Offset}__{recognizedPhrase.Channel}",
                    ParticipantId = $"{recognizedPhrase.Channel}",
                    AudioTimings  = topResult.Words
                                    ?.Select(word => new WordLevelAudioTiming
                    {
                        Word     = word.Word,
                        Duration = (long)word.DurationInTicks,
                        Offset   = (long)word.OffsetInTicks
                    })
                });
                count += textCount;
                turnCount++;
            }

            Log.LogInformation($"Submitting {jobCount} jobs to Conversations...");

            return(await SubmitConversationsAsync(data).ConfigureAwait(false));
        }
Beispiel #22
0
        public async Task <IEnumerable <string> > AddAudioLevelEntitiesAsync(
            SpeechTranscript speechTranscript,
            SentimentAnalysisSetting sentimentAnalysisSetting,
            PiiRedactionSetting piiRedactionSetting)
        {
            speechTranscript = speechTranscript ?? throw new ArgumentNullException(nameof(speechTranscript));

            var errors = new List <string>();

            if (sentimentAnalysisSetting != SentimentAnalysisSetting.AudioLevel && piiRedactionSetting != PiiRedactionSetting.UtteranceAndAudioLevel)
            {
                return(errors);
            }

            // Remove other nBests if pii is redacted
            if (piiRedactionSetting != PiiRedactionSetting.None)
            {
                speechTranscript.RecognizedPhrases.ToList().ForEach(phrase =>
                {
                    if (phrase.NBest != null && phrase.NBest.Any())
                    {
                        var firstNBest = phrase.NBest.First();
                        phrase.NBest   = new[] { firstNBest };
                    }
                });
            }

            var documents = speechTranscript.CombinedRecognizedPhrases.Where(r => !string.IsNullOrEmpty(r.Display)).Select(r => new TextDocumentInput($"{r.Channel}", r.Display)
            {
                Language = Locale
            });

            var actions = new TextAnalyticsActions
            {
                DisplayName = "IngestionClient"
            };

            if (sentimentAnalysisSetting == SentimentAnalysisSetting.AudioLevel)
            {
                actions.AnalyzeSentimentActions = new List <AnalyzeSentimentAction>()
                {
                    new AnalyzeSentimentAction()
                };
            }

            if (piiRedactionSetting == PiiRedactionSetting.UtteranceAndAudioLevel)
            {
                var action = new RecognizePiiEntitiesAction();

                if (!string.IsNullOrEmpty(FetchTranscriptionEnvironmentVariables.PiiCategories))
                {
                    var piiEntityCategories = FetchTranscriptionEnvironmentVariables.PiiCategories.Split(",").Select(c => new PiiEntityCategory(c));

                    foreach (var category in piiEntityCategories)
                    {
                        action.CategoriesFilter.Add(category);
                    }
                }

                actions.RecognizePiiEntitiesActions = new List <RecognizePiiEntitiesAction>()
                {
                    action
                };
            }

            var(sentimentResults, piiResults, requestErrors) = await this.GetDocumentResultsAsync(documents, actions).ConfigureAwait(false);

            errors.AddRange(requestErrors);

            foreach (var combinedRecognizedPhrase in speechTranscript.CombinedRecognizedPhrases)
            {
                var channel         = combinedRecognizedPhrase.Channel;
                var sentimentResult = sentimentResults.Where(document => document.Id.Equals($"{channel}", StringComparison.OrdinalIgnoreCase)).SingleOrDefault();

                if (sentimentResult != null)
                {
                    combinedRecognizedPhrase.Sentiment = new Sentiment()
                    {
                        Negative = sentimentResult.DocumentSentiment.ConfidenceScores.Negative,
                        Positive = sentimentResult.DocumentSentiment.ConfidenceScores.Positive,
                        Neutral  = sentimentResult.DocumentSentiment.ConfidenceScores.Neutral,
                    };
                }

                var piiResult = piiResults.Where(document => document.Id.Equals($"{channel}", StringComparison.OrdinalIgnoreCase)).SingleOrDefault();
                if (piiResult != null)
                {
                    var redactedText = piiResult.Entities.RedactedText;

                    combinedRecognizedPhrase.Display   = redactedText;
                    combinedRecognizedPhrase.ITN       = string.Empty;
                    combinedRecognizedPhrase.MaskedITN = string.Empty;
                    combinedRecognizedPhrase.Lexical   = string.Empty;

                    var phrases = speechTranscript.RecognizedPhrases.Where(phrase => phrase.Channel == channel);

                    var startIndex = 0;
                    foreach (var phrase in phrases)
                    {
                        var firstNBest = phrase.NBest.FirstOrDefault();

                        if (firstNBest != null && !string.IsNullOrEmpty(firstNBest.Display))
                        {
                            firstNBest.Display   = redactedText.Substring(startIndex, firstNBest.Display.Length);
                            firstNBest.ITN       = string.Empty;
                            firstNBest.MaskedITN = string.Empty;
                            firstNBest.Lexical   = string.Empty;

                            startIndex += firstNBest.Display.Length + 1;
                        }
                    }
                }
            }

            return(errors);
        }