Exemplo n.º 1
0
        private static Task ProcessTextAssistResponse(AssistResponse response, CommandContext ctx, ref List <byte> audioOut)
        {
            ctx.LogInfo(
                $"GoogleAssistant: Received response: Event Type: {response.EventType.ToString()}, Debug Info: {response.DebugInfo}, Size: {response.CalculateSize()}");

            var tasks = new List <Task>();

            if (!string.IsNullOrWhiteSpace(response.DialogStateOut?.SupplementalDisplayText))
            {
                ctx.LogInfo($"GoogleAssistant: Received supplemental text.");

                tasks.Add(Task.Run(() => ctx.RespondAsync(response.DialogStateOut?.SupplementalDisplayText)));
            }

            if (response.ScreenOut != null)
            {
                ctx.LogInfo($"GoogleAssistant: Received screen data.");

                tasks.Add(Task.Run(() => ctx.RespondWithHtmlAsImage(response.ScreenOut.Data.ToStringUtf8())));
            }

            if (response.AudioOut?.AudioData != null)
            {
                ctx.LogInfo($"GoogleAssistant: Received audio data.");

                audioOut.AddRange(response.AudioOut.AudioData.ToByteArray());
            }

            return(Task.WhenAll(tasks));
        }
Exemplo n.º 2
0
            public void InvokeDemo()
            {
                var r = new AssistResponse()
                {
                    testresponse = "test"
                };

                _del?.Invoke(r);
            }
        private async Task WaitForResponse()
        {
            var response = await responseStream.MoveNext();

            if (response)
            {
                AssistResponse currentResponse = responseStream.Current;
                OnNext(currentResponse);
                await WaitForResponse();
            }
        }
Exemplo n.º 4
0
        private static Task ProcessVoiceAssistResponse(AssistResponse response, CommandContext ctx, ref List <byte> audioOut)
        {
            var tasks = new List <Task>();

            try
            {
                if (response.EventType == AssistResponse.Types.EventType.EndOfUtterance)
                {
                    ctx.LogInfo($"GoogleAssistant: Utterance detected: Event Type: {response.EventType.ToString()}, Supplemental Text: {response.DialogStateOut?.SupplementalDisplayText}, Transcript: {response.SpeechResults?.FirstOrDefault()?.Transcript} , Debug Info: {response.DebugInfo?.ToString()}");

                    tasks.Add(ctx.Client.SendMessageAsync(ctx.Channel, $"{ctx.User.Username}, utterance detected: {response.SpeechResults?.FirstOrDefault()?.Transcript}, Screen Out: {response.ScreenOut?.Data}"));
                }
                else
                {
                    ctx.LogInfo($"GoogleAssistant: Received response: Event Type: {response.EventType.ToString()}, Microphone Mode: {response.DialogStateOut?.MicrophoneMode}, Debug Info: {response.DebugInfo}");

                    if (!string.IsNullOrWhiteSpace(response.DialogStateOut?.SupplementalDisplayText))
                    {
                        ctx.LogInfo($"GoogleAssistant: Received supplemental text.");

                        tasks.Add(Task.Run(() => ctx.RespondAsync(response.DialogStateOut?.SupplementalDisplayText)));
                    }

                    if (response.ScreenOut != null)
                    {
                        ctx.LogInfo($"GoogleAssistant: Received screen data.");

                        tasks.Add(Task.Run(() => ctx.RespondWithHtmlAsImage(response.ScreenOut.Data.ToStringUtf8())));
                    }

                    if (response.AudioOut?.AudioData != null)
                    {
                        ctx.LogInfo($"GoogleAssistant: Received audio data.");

                        audioOut.AddRange(response.AudioOut.AudioData.ToByteArray());
                    }
                }
            }
            catch (RpcException ex)
            {
                ctx.LogError($"GoogleAssistant: Exception: {ex.StatusCode}, Detail: {ex.Status.Detail}, Message: {ex.Message}.");
            }

            return(Task.WhenAll(tasks));
        }
Exemplo n.º 5
0
        private string ResponseToOutput(AssistResponse currentResponse)
        {
            if (currentResponse.AudioOut != null)
            {
                return($"Response - AudioOut {currentResponse.AudioOut.AudioData.Length}");
            }
            //if (currentResponse..Error != null)
            //    return $"Response - Error:{currentResponse.Error}";
            if (currentResponse.DialogStateOut != null)
            {
                return($"Response - Result:{currentResponse.DialogStateOut}");
            }
            if (currentResponse.EventType != AssistResponse.Types.EventType.Unspecified)
            {
                return($"Response - EventType:{currentResponse.EventType}");
            }

            return("Response Empty?");
        }
        private void OnNext(AssistResponse value)
        {
            if (value.AudioOut != null)
            {
                using (MemoryStream stream = new MemoryStream())
                {
                    using (BinaryWriter writer = new BinaryWriter(stream))
                    {
                        writer.Write(value.AudioOut.AudioData.ToByteArray());
                    }
                    currentAudioResponse.AddRange(stream.ToArray());
                }
            }

            if (value.DialogStateOut != null)
            {
                currentConversationState = value.DialogStateOut.ConversationState;

                if (!string.IsNullOrEmpty(value.DialogStateOut.SupplementalDisplayText))
                {
                    currentTextResponse = value.DialogStateOut.SupplementalDisplayText;
                }
            }
        }
Exemplo n.º 7
0
        private async Task WaitForResponse()
        {
            var response = await _responseStream.MoveNext();

            if (response)
            {
                // multiple response elements are received per response, each can contain one of the Result, AudioOut or EventType fields
                AssistResponse currentResponse = _responseStream.Current;

                // Debug output the whole response, useful for.. debugging.
                OnDebug?.Invoke(ResponseToOutput(currentResponse));

                // EndOfUtterance, Assistant has recognised something so stop sending audio
                if (currentResponse.EventType == AssistResponse.Types.EventType.EndOfUtterance)
                {
                    ResetSendingAudio(false);
                }

                if (currentResponse.AudioOut != null)
                {
                    _audioOut.AddBytesToPlay(currentResponse.AudioOut.AudioData.ToByteArray());
                }

                if (currentResponse.DialogStateOut != null)
                {
                    // if the assistant has recognised something, flag this so the failure notification isn't played
                    if (!String.IsNullOrEmpty(currentResponse.DialogStateOut.SupplementalDisplayText))
                    {
                        _assistantResponseReceived = true;
                    }

                    switch (currentResponse.DialogStateOut.MicrophoneMode)
                    {
                    // this is the end of the current conversation
                    case DialogStateOut.Types.MicrophoneMode.CloseMicrophone:
                        StopRecording();

                        // play failure notification if nothing recognised.
                        if (!_assistantResponseReceived)
                        {
                            _audioOut.PlayNegativeNotification();
                            OnAssistantStateChanged?.Invoke(AssistantState.Inactive);
                        }
                        break;

                    case DialogStateOut.Types.MicrophoneMode.DialogFollowOn:
                        // stop recording as the follow on is in a whole new conversation, so may as well restart the same flow
                        StopRecording();
                        _followOn = true;
                        break;
                    }
                }

                await WaitForResponse();
            }
            else
            {
                OnDebug?.Invoke("Response End");
                // if we've received any audio... play it.
                _audioOut.Play();
            }
        }
Exemplo n.º 8
0
        private void Connection_DataReceived(object sender, Samsung.Sap.DataReceivedEventArgs e)
        {
            AssistResponse ar = AssistResponse.Parser.ParseFrom(e.Data);

            if (ar.SpeechResults != null)
            {
                if (ar.SpeechResults.Any() && ar.SpeechResults.First().Stability > 0.01)
                {
                    label.Text = ar.SpeechResults.First().Transcript;

                    if (ar.SpeechResults.First().Stability == 1)
                    {
                        audioRecorder.StopRecording();
                        updateLabel(ar.SpeechResults.First().Transcript);

                        if (File.Exists(filePath))
                        {
                            File.Delete(filePath);
                        }
                        fs = File.Create(filePath);
                    }
                }
            }

            if (ar.DialogStateOut != null && ar.DialogStateOut.SupplementalDisplayText != "")
            {
                updateLabel(ar.DialogStateOut.SupplementalDisplayText);
            }

            if (ar.DialogStateOut != null && ar.DialogStateOut.VolumePercentage != 0)
            {
                int newVolumeLevel = Convert.ToInt32(15 * ar.DialogStateOut.VolumePercentage / 100);
                AudioManager.VolumeController.Level[AudioVolumeType.Media] = newVolumeLevel;
                actionButton.IsEnable = true;
            }

            if (ar.ScreenOut != null)
            {
                updateLabel(ar.ScreenOut.Data.ToStringUtf8());
            }

            if (ar.AudioOut != null && ar.AudioOut.AudioData.Length != 0)
            {
                try
                {
                    fs.Write(ar.AudioOut.AudioData.ToByteArray(), 0, ar.AudioOut.AudioData.Length);
                    if (fs.Length != 0)
                    {
                        fs.Flush();
                    }

                    if (!isPlaying && fs.Length >= 1600)
                    {
                        isPlaying = true;
                        audioPlayer.Play(fs.Name);
                        actionButton.IsEnable        = true;
                        actionButton.BackgroundColor = Color.Red;
                        actionButton.Text            = "Stop";
                    }
                }
                catch (System.ObjectDisposedException)
                {
                    Tizen.Log.Debug("AUDIO RESPONSE", "Tried to write to closed FileStream, Knownbug");
                    return;
                }
            }
        }