private void StopRecording() { if (_waveIn != null) { OnDebug?.Invoke("Stop Recording"); _waveIn.StopRecording(); _waveIn.Dispose(); _waveIn = null; OnAssistantStateChanged?.Invoke(AssistantState.Processing); OnDebug?.Invoke("Send Request Complete"); _requestStreamAvailable = false; _requestStream.CompleteAsync(); } }
private void OnAudioPlaybackStateChanged(bool started) { if (started) { OnAssistantStateChanged?.Invoke(AssistantState.Speaking); } else { // stopped if (_followOn) { NewConversation(); } else { OnAssistantStateChanged?.Invoke(AssistantState.Inactive); } } }
public async void NewConversation() { try { OnAssistantStateChanged?.Invoke(AssistantState.Listening); _followOn = false; _assistantResponseReceived = false; AsyncDuplexStreamingCall <AssistRequest, AssistResponse> assist = _assistant.Assist(); _requestStream = assist.RequestStream; _responseStream = assist.ResponseStream; logger.Debug("New Conversation - New Config Request"); OnDebug?.Invoke("New Conversation - New Config Request"); // Once this opening request is issued if its not followed by audio an error of 'code: 14, message: Service Unavaible.' comes back, really not helpful Google! await _requestStream.WriteAsync(CreateNewRequest()); _requestStreamAvailable = true; ResetSendingAudio(true); // note recreating the WaveIn each time otherwise the recording just stops on follow ups _waveIn = new WaveIn { WaveFormat = new WaveFormat(Const.SampleRateHz, 1) }; _waveIn.DataAvailable += ProcessInAudio; _waveIn.StartRecording(); await WaitForResponse(); } catch (Exception ex) { logger.Error(ex.Message); Console.WriteLine(ex.Message); OnDebug?.Invoke($"Error {ex.Message}"); StopRecording(); } }
private async Task WaitForResponse() { var response = await _responseStream.MoveNext(); if (response) { // multiple response elements are received per response, each can contain one of the Result, AudioOut or EventType fields ConverseResponse currentResponse = _responseStream.Current; // Debug output the whole response, useful for.. debugging. OnDebug?.Invoke(ResponseToOutput(currentResponse)); // EndOfUtterance, Assistant has recognised something so stop sending audio if (currentResponse.EventType == ConverseResponse.Types.EventType.EndOfUtterance) { ResetSendingAudio(false); } if (currentResponse.AudioOut != null) { _audioOut.AddBytesToPlay(currentResponse.AudioOut.AudioData.ToByteArray()); } if (currentResponse.Result != null) { // if the assistant has recognised something, flag this so the failure notification isn't played if (!String.IsNullOrEmpty(currentResponse.Result.SpokenRequestText)) { _assistantResponseReceived = true; } switch (currentResponse.Result.MicrophoneMode) { // this is the end of the current conversation case ConverseResult.Types.MicrophoneMode.CloseMicrophone: StopRecording(); // play failure notification if nothing recognised. if (!_assistantResponseReceived) { _audioOut.PlayNegativeNotification(); OnAssistantStateChanged?.Invoke(AssistantState.Inactive); } break; case ConverseResult.Types.MicrophoneMode.DialogFollowOn: // stop recording as the follow on is in a whole new conversation, so may as well restart the same flow StopRecording(); _followOn = true; break; } } await WaitForResponse(); } else { OnDebug?.Invoke("Response End"); // if we've received any audio... play it. _audioOut.Play(); } }