/// <summary> /// Processes ActivityReceived events. /// Along with various info that can be found in an event, it also contains /// a "HasAudio" flag that can be used to signal audio is present/ready for processing /// </summar> private void DialogServiceConnector_ActivityReceived(object sender, ActivityReceivedEventArgs e) { Debug.Log($"Activity received:\r\n {e.Activity} "); if (e.HasAudio) { Debug.Log($"Audio received"); var audio = e.Audio; // for sample purposes, just save the entire file and playback. (can improved with streaming) var buffer = new byte[800]; uint bytesRead = 0; while ((bytesRead = audio.Read(buffer)) > 0) { byteStream.Write(buffer, 0, (int)bytesRead); } // Write a wav header on the final stream now that we know the full length if (byteStream.Length > 0) { WaveAudioData.WriteWavHeader(finalStream, false, 1, 16, 16000, (int)byteStream.Length); byteStream.WriteTo(finalStream); audioData = new WaveAudioData(finalStream.ToArray()); ClearStream(byteStream); ClearStream(finalStream); } } }
private void SpeechBotConnector_ActivityReceived(object sender, ActivityReceivedEventArgs e) { var json = e.Activity; var activity = JsonConvert.DeserializeObject <Activity>(json); this.stopWatch.Stop(); this.elapsedTime += (int)this.stopWatch.ElapsedMilliseconds; int activityIndex = 0; int ttsDuration = 0; lock (this.BotReplyList) { this.BotReplyList.Add(new BotReply(activity, this.elapsedTime, false)); activityIndex = this.BotReplyList.Count - 1; } if (e.HasAudio) { this.ttsStreamDownloadCount++; this.indexActivityWithAudio++; ttsDuration = this.WriteAudioToWAVfile(e.Audio, this.baseFileName, this.dialogID, this.turnID, this.indexActivityWithAudio - 1); this.ttsStreamDownloadCount--; lock (this.BotReplyList) { this.BotReplyList[activityIndex].TTSAudioDuration = ttsDuration; } } this.stopWatch.Restart(); }
private void Connector_ActivityReceived(object sender, ActivityReceivedEventArgs e) { var json = e.Activity; var activity = JsonConvert.DeserializeObject <Activity>(json); if (e.HasAudio) { var audio = e.Audio; var stream = new ProducerConsumerStream(); Task.Run(() => { var buffer = new byte[800]; uint bytesRead = 0; while ((bytesRead = audio.Read(buffer)) > 0) { stream.Write(buffer, 0, (int)bytesRead); } }); var channelData = activity.GetChannelData <SpeechChannelData>(); var id = channelData?.ConversationalAiData?.RequestInfo?.InteractionId; if (!string.IsNullOrEmpty(id)) { System.Diagnostics.Debug.WriteLine($"Expecting TTS stream {id}"); } var wavStream = new RawSourceWaveStream(stream, new WaveFormat(16000, 16, 1)); playbackStreams.Enqueue(new WavQueueEntry(id, false, stream, wavStream)); if (player.PlaybackState != PlaybackState.Playing) { Task.Run(() => PlayFromAudioQueue()); } } var cardsToBeRendered = new List <AdaptiveCard>(); if (activity.Attachments?.Any() is true) { cardsToBeRendered = activity.Attachments .Where(x => x.ContentType == AdaptiveCard.ContentType) .Select(x => { var parseResult = AdaptiveCard.FromJson(x.Content.ToString()); return(parseResult.Card); }) .Where(x => x != null) .ToList(); } activities.Add(new ActivityRecord(json, activity, Sender.Bot)); messages.Add(new MessageRecord(activity.Text, Sender.Bot)); }
private async void Connector_ActivityReceived(object sender, ActivityReceivedEventArgs e) { var json = e.Activity; var activity = JsonConvert.DeserializeObject <Activity>(json); if (e.HasAudio && activity.Speak != null) { var audio = e.Audio; using var stream = new MemoryStream(); await Task.Run(() => { var buffer = new byte[800]; uint bytesRead = 0; while ((bytesRead = audio.Read(buffer)) > 0) { stream.Write(buffer, 0, (int)bytesRead); } }); var byteStream = stream.ToArray(); var wavStream = new RawSourceWaveStream(byteStream, 0, byteStream.Length, new WaveFormat(16000, 16, 1)); playbackStreams.Enqueue(wavStream); if (player.PlaybackState != PlaybackState.Playing) { _ = Task.Run(() => PlayFromAudioQueue()); } } // Checks whether it is a custom activity sent to client. if (activity.Type == "event" && activity.Value != null) { var message = $"{activity.Name}{Environment.NewLine}{activity.Value}"; AddMessage(new MessageDisplay(message, Sender.Bot)); } else if (activity.Type == ActivityTypes.Message) { AddMessage(new MessageDisplay(activity.Text, Sender.Bot)); if (activity.InputHint == InputHints.ExpectingInput) { // The activity expects a further user input. waitingForUserInput = true; } } }
private void SpeechBotConnector_ActivityReceived(object sender, ActivityReceivedEventArgs e) { var json = e.Activity; var activity = JsonConvert.DeserializeObject <Activity>(json); // TODO: When there is TTS audio, get the elapsed time only after first TTS buffer was received int elapsedTime = (int)this.stopWatch.ElapsedMilliseconds; if (this.appsettings.RealTimeAudio) { // For WAV file input, the timer starts on SessionStart event. If we consume the audio from the input stream at real-time, then by subtracting // the speech duration here, its as if we started the timer at the point that speech stopped. This is what we want to accurately measure UPL. // For any other input (text or Activity), the speechDuration value is not relevant and should be zero at this point. elapsedTime -= this.speechDuration; } Trace.TraceInformation($"[{DateTime.Now.ToString("h:mm:ss tt", CultureInfo.CurrentCulture)}] Activity received, elapsedTime = {elapsedTime}, speechDuration = {this.speechDuration}"); int activityIndex = 0; int ttsDuration = 0; lock (this.BotReplyList) { this.BotReplyList.Add(new BotReply(activity, elapsedTime, false)); activityIndex = this.BotReplyList.Count - 1; } if (e.HasAudio) { this.ttsStreamDownloadCount++; this.indexActivityWithAudio++; ttsDuration = this.WriteAudioToWAVfile(e.Audio, this.baseFileName, this.dialogID, this.turnID, this.indexActivityWithAudio - 1); this.ttsStreamDownloadCount--; lock (this.BotReplyList) { this.BotReplyList[activityIndex].TTSAudioDuration = ttsDuration; } } }
private async void DialogServiceConnector_ActivityReceived(object sender, ActivityReceivedEventArgs e) { var json = e.Activity; var activity = JsonConvert.DeserializeObject <Activity>(json); if (e.HasAudio) { UpdateUI(() => { this.Messages.Add(new MessageDisplay("Audio received", Sender.Other)); }); audioStream = e.Audio; frameInputNode.Start(); } await UpdateActivity(json); UpdateUI(() => { this.Activities.Add(new ActivityDisplay(json, activity, Sender.Bot)); }); }
private void Connector_ActivityReceived(object sender, ActivityReceivedEventArgs e) { var json = e.Activity; var activity = JsonConvert.DeserializeObject <Activity>(json); if (e.HasAudio && activity.Speak != null) { var audio = e.Audio; var stream = new ProducerConsumerStream(); Task.Run(() => { var buffer = new byte[800]; uint bytesRead = 0; while ((bytesRead = audio.Read(buffer)) > 0) { stream.Write(buffer, 0, (int)bytesRead); } }).Wait(); var channelData = activity.GetChannelData <SpeechChannelData>(); var id = channelData?.ConversationalAiData?.RequestInfo?.InteractionId; if (!string.IsNullOrEmpty(id)) { System.Diagnostics.Debug.WriteLine($"Expecting TTS stream {id}"); } var wavStream = new RawSourceWaveStream(stream, new WaveFormat(16000, 16, 1)); this.playbackStreams.Enqueue(new WavQueueEntry(id, false, stream, wavStream)); if (this.player.PlaybackState != PlaybackState.Playing) { Task.Run(() => this.PlayFromAudioQueue()); } } List <AdaptiveCard> cardsToBeRendered = new List <AdaptiveCard>(); if (activity.Attachments?.Any() is true) { cardsToBeRendered = activity.Attachments .Where(x => x.ContentType == AdaptiveCard.ContentType) .Select(x => { try { var parseResult = AdaptiveCard.FromJson(x.Content.ToString()); return(parseResult.Card); } #pragma warning disable CA1031 // Do not catch general exception types catch (Exception ex) { this.ShowException(ex); return(null); } #pragma warning restore CA1031 // Do not catch general exception types }) .Where(x => x != null) .ToList(); } this.RunOnUiThread(() => { this.Activities.Add(new ActivityDisplay(json, activity, Sender.Bot)); if (activity.Type == ActivityTypes.Message || cardsToBeRendered?.Any() == true) { var renderedCards = cardsToBeRendered.Select(x => { var rendered = this.renderer.RenderCard(x); rendered.OnAction += this.RenderedCard_OnAction; rendered.OnMediaClicked += this.RenderedCard_OnMediaClicked; return(rendered?.FrameworkElement); }); this.Messages.Add(new MessageDisplay(activity.Text, Sender.Bot, renderedCards)); this.ConversationView.ConversationHistory.ScrollIntoView(this.ConversationView.ConversationHistory.Items[this.ConversationView.ConversationHistory.Items.Count - 1]); } }); }