private void Start() { //Linear16 is for .wav files which are supported by this program //and it needs to have a sample rate of 16000 or 16K Hz outputAudioConfig = new OutputAudioConfig() { AudioEncoding = OutputAudioEncoding.Linear16, SampleRateHertz = 16000, SynthesizeSpeechConfig = new SynthesizeSpeechConfig() { Voice = new VoiceSelectionParams() { SsmlGender = SsmlVoiceGender.Female } } }; //Defines what kind of audio bytes we are sending inputAudioConfig = new InputAudioConfig() { AudioEncoding = AudioEncoding.Linear16, LanguageCode = "en-US", SampleRateHertz = 16000 }; //Specifies the audio config to show that the input is audio bytes query = new QueryInput() { AudioConfig = inputAudioConfig }; session = string.Format("projects/{0}/agent/sessions/{1}", agent, sessionId); }
// [START dialogflow_detect_intent_texttospeech_response] public static int DetectIntentTexttospeechResponseFromTexts(string projectId, string sessionId, string[] texts, string languageCode = "en-US") { var client = SessionsClient.Create(); var outputAudioConfig = new OutputAudioConfig { AudioEncoding = OutputAudioEncoding.Linear16, SynthesizeSpeechConfig = new SynthesizeSpeechConfig { SpeakingRate = 1, Pitch = 1, VolumeGainDb = 1 }, SampleRateHertz = 16000, }; foreach (var text in texts) { var response = client.DetectIntent(new DetectIntentRequest { SessionAsSessionName = SessionName.FromProjectSession(projectId, sessionId), OutputAudioConfig = outputAudioConfig, QueryInput = new QueryInput() { Text = new TextInput() { Text = text, LanguageCode = languageCode } } } ); var queryResult = response.QueryResult; Console.WriteLine($"Query text: {queryResult.QueryText}"); if (queryResult.Intent != null) { Console.WriteLine($"Intent detected: {queryResult.Intent.DisplayName}"); } Console.WriteLine($"Intent confidence: {queryResult.IntentDetectionConfidence}"); Console.WriteLine($"Fulfillment text: {queryResult.FulfillmentText}"); Console.WriteLine(); if (response.OutputAudio.Length > 0) { using (var output = File.Create("output.wav")) { response.OutputAudio.WriteTo(output); } Console.WriteLine("Audio content written to file \"output.wav\""); } } return(0); }
private async Task <string> DetectIntent(string inputAudio) { InputAudioConfig audioConfig = ConfigureAudioInput(); OutputAudioConfig outputConfig = ConfigureAudioOutput(); QueryInput queryInput = new QueryInput() { AudioConfig = audioConfig, }; DetectIntentContent detectIntentContent = new DetectIntentContent() { queryInput = queryInput, inputAudio = inputAudio, outputAudioConfig = outputConfig }; JsonSerializerSettings settings = new JsonSerializerSettings(); settings.ContractResolver = new Newtonsoft.Json.Serialization.CamelCasePropertyNamesContractResolver(); string content = JsonConvert.SerializeObject(detectIntentContent, settings); content = content.Replace(",\"inputCase\":1", ""); Uri requestUri = new Uri("https://dialogflow.googleapis.com/v2/projects/mistyapi-pxxkne/agent/sessions/123456:detectIntent"); HttpRequestMessage request = new HttpRequestMessage(HttpMethod.Post, requestUri); request.Content = new StringContent(content, Encoding.UTF8); request.Headers.Authorization = new AuthenticationHeaderValue("Bearer", _credential.AuthToken); HttpResponseMessage result = await _client.SendAsync(request); string stringResult = await result.Content.ReadAsStringAsync(); OnQueryResultReceived?.Invoke(this, stringResult); return(stringResult); }