public static int Main(string[] args) { // Create client TextToSpeechClient client = TextToSpeechClient.Create(); // Initialize request argument(s) SynthesisInput input = new SynthesisInput { Text = "test", }; VoiceSelectionParams voice = new VoiceSelectionParams { LanguageCode = "en-US", }; AudioConfig audioConfig = new AudioConfig { AudioEncoding = AudioEncoding.Mp3, }; // Call API method SynthesizeSpeechResponse response = client.SynthesizeSpeech(input, voice, audioConfig); // Show the result Console.WriteLine(response); // Success Console.WriteLine("Smoke test passed OK"); return(0); }
public Stream Test(string text, string language = "en-US") { var client = TextToSpeechClient.Create(); // The input to be synthesized, can be provided as text or SSML. var input = new SynthesisInput { Text = text }; // Build the voice request. var voiceSelection = new VoiceSelectionParams { LanguageCode = language, SsmlGender = SsmlVoiceGender.Female }; // Specify the type of audio file. var audioConfig = new AudioConfig { AudioEncoding = AudioEncoding.Mp3 }; // Perform the text-to-speech request. var response = client.SynthesizeSpeech(input, voiceSelection, audioConfig); // Write the response to the output file. using (var output = new MemoryStream()) { response.AudioContent.WriteTo(output); return(output); } }
public async Task SynthesizeSpeechAsync() { Mock <TextToSpeech.TextToSpeechClient> mockGrpcClient = new Mock <TextToSpeech.TextToSpeechClient>(MockBehavior.Strict); SynthesizeSpeechRequest expectedRequest = new SynthesizeSpeechRequest { Input = new SynthesisInput(), Voice = new VoiceSelectionParams(), AudioConfig = new AudioConfig(), }; SynthesizeSpeechResponse expectedResponse = new SynthesizeSpeechResponse { AudioContent = ByteString.CopyFromUtf8("16"), }; mockGrpcClient.Setup(x => x.SynthesizeSpeechAsync(expectedRequest, It.IsAny <CallOptions>())) .Returns(new Grpc.Core.AsyncUnaryCall <SynthesizeSpeechResponse>(Task.FromResult(expectedResponse), null, null, null, null)); TextToSpeechClient client = new TextToSpeechClientImpl(mockGrpcClient.Object, null); SynthesisInput input = new SynthesisInput(); VoiceSelectionParams voice = new VoiceSelectionParams(); AudioConfig audioConfig = new AudioConfig(); SynthesizeSpeechResponse response = await client.SynthesizeSpeechAsync(input, voice, audioConfig); Assert.Same(expectedResponse, response); mockGrpcClient.VerifyAll(); }
private void Button_Click_Save(object sender, RoutedEventArgs e) { var text = SpeechText.Text; var name = (UseWaveNet.IsChecked ?? false) ? "ja-JP-Wavenet-A" : "ja-JP-Standard-A"; var speed = SpeedSlider.Value; var pitch = PitchSlider.Value; var input = new SynthesisInput { Text = text }; var voiceSection = new VoiceSelectionParams { Name = name, LanguageCode = "ja-JP", SsmlGender = SsmlVoiceGender.Female, }; var audioConfig = new AudioConfig { AudioEncoding = AudioEncoding.Mp3, SpeakingRate = speed, Pitch = pitch, }; var response = _Client.SynthesizeSpeech(input, voiceSection, audioConfig); var fileName = GetFileName(); using (var output = File.Create($"mp3\\{fileName}")) { response.AudioContent.WriteTo(output); } // 画面を更新 UpdateDisplayValues(fileName); }
private byte[] TextToSpeech(string text) { var input = new SynthesisInput { Text = text }; var voice = new VoiceSelectionParams { LanguageCode = _language.SpeechCode, Name = _language.SpeechName }; var config = new AudioConfig { AudioEncoding = AudioEncoding.Linear16 }; return(_speech.SynthesizeSpeech(new SynthesizeSpeechRequest { Input = input, Voice = voice, AudioConfig = config }).AudioContent.ToByteArray()); }
static void Main(string[] args) { var client = TextToSpeechClient.Create(); // The input to be synthesized, can be provided as text or SSML. var input = new SynthesisInput { Text = "Whoa whoa there, " + args[0] + "! Killer here is having a fit, so come back later" }; // Build the voice request. var voiceSelection = new VoiceSelectionParams { LanguageCode = "en-US", SsmlGender = SsmlVoiceGender.Neutral }; // Specify the type of audio file. var audioConfig = new AudioConfig { AudioEncoding = AudioEncoding.Mp3 }; // Perform the text-to-speech request. var response = client.SynthesizeSpeech(input, voiceSelection, audioConfig); // Write the response to the output file. using (var output = File.Create("output3.mp3")) { response.AudioContent.WriteTo(output); } Console.WriteLine("Audio content written to file \"output3.mp3\""); }
public void SimpleExample() { // Sample: SynthesizeSpeech // Additional: SynthesizeSpeech(SynthesisInput,VoiceSelectionParams,AudioConfig,CallSettings) TextToSpeechClient client = TextToSpeechClient.Create(); // The input can be provided as text or SSML. SynthesisInput input = new SynthesisInput { Text = "This is a demonstration of the Google Cloud Text-to-Speech API" }; // You can specify a particular voice, or ask the server to pick based // on specified criteria. VoiceSelectionParams voiceSelection = new VoiceSelectionParams { LanguageCode = "en-US", SsmlGender = SsmlVoiceGender.Female }; // The audio configuration determines the output format and speaking rate. AudioConfig audioConfig = new AudioConfig { AudioEncoding = AudioEncoding.Mp3 }; SynthesizeSpeechResponse response = client.SynthesizeSpeech(input, voiceSelection, audioConfig); using (Stream output = File.Create("sample.mp3")) { // response.AudioContent is a ByteString. This can easily be converted into // a byte array or written to a stream. response.AudioContent.WriteTo(output); } // End sample }
private static byte[] TextToSpeech(string text, Language lang) { var input = new SynthesisInput { Text = text }; var voice = new VoiceSelectionParams { LanguageCode = lang.Speech, Name = lang.Name }; var config = new AudioConfig { AudioEncoding = AudioEncoding.Mp3 }; return(_tts.SynthesizeSpeech(new SynthesizeSpeechRequest { Input = input, Voice = voice, AudioConfig = config }).AudioContent.ToByteArray()); }
private Stream GetAudioStream(string message) { SynthesisInput input = new SynthesisInput { Text = message }; VoiceSelectionParams voiceSelection = new VoiceSelectionParams { LanguageCode = "es-ES", Name = "es-ES-Standard-A" }; AudioConfig audioConfig = new AudioConfig { AudioEncoding = AudioEncoding.Linear16, SpeakingRate = Configuration.BasicConfiguration.MessageSpeed }; try { SynthesizeSpeechResponse response = Client.SynthesizeSpeech(input, voiceSelection, audioConfig); return(new MemoryStream(response.AudioContent.ToByteArray())); } catch (Exception ex) { TwitchBot.Instance.LogMessage(LogSeverity.Error, ex.Message, ex.StackTrace); return(null); } }
/// <summary> /// Synthesizes speech synchronously: receive results after all text input /// has been processed. /// </summary> /// <param name="input"> /// Required. The Synthesizer requires either plain text or SSML as input. /// </param> /// <param name="voice"> /// Required. The desired voice of the synthesized audio. /// </param> /// <param name="audioConfig"> /// Required. The configuration of the synthesized audio. /// </param> /// <param name="callSettings">If not null, applies overrides to this RPC call.</param> /// <returns>A Task containing the RPC response.</returns> public virtual stt::Task <SynthesizeSpeechResponse> SynthesizeSpeechAsync(SynthesisInput input, VoiceSelectionParams voice, AudioConfig audioConfig, gaxgrpc::CallSettings callSettings = null) => SynthesizeSpeechAsync(new SynthesizeSpeechRequest { Input = gax::GaxPreconditions.CheckNotNull(input, nameof(input)), Voice = gax::GaxPreconditions.CheckNotNull(voice, nameof(voice)), AudioConfig = gax::GaxPreconditions.CheckNotNull(audioConfig, nameof(audioConfig)), }, callSettings);
public void GenerateSpeech(string content, string file) { var client = TextToSpeechClient.Create(); var input = new SynthesisInput { Text = content }; // Build the voice request. var voiceSelection = new VoiceSelectionParams { LanguageCode = "en-US", SsmlGender = SsmlVoiceGender.Female }; // Specify the type of audio file. var audioConfig = new AudioConfig { AudioEncoding = AudioEncoding.Linear16 }; // Perform the text-to-speech request. var response = client.SynthesizeSpeech(input, voiceSelection, audioConfig); // Write the response to the output file. using (var output = File.Create(file)) { output.Write(response.AudioContent, 0, response.AudioContent.Length); } }
private async Task Synth(string fileName, string ssml) { var input = new SynthesisInput { Ssml = $"<speak>{Prepend}{ssml}</speak>" }; var voice = new VoiceSelectionParams { LanguageCode = GoogleConfig.LanguageCode, SsmlGender = GoogleConfig.SsmlVoiceGender }; var config = new AudioConfig { AudioEncoding = GoogleConfig.AudioEncoding, SpeakingRate = GoogleConfig.SpeakingRate, VolumeGainDb = GoogleConfig.VolumeGainDb }; var response = await Client.SynthesizeSpeechAsync(new SynthesizeSpeechRequest { Input = input, Voice = voice, AudioConfig = config }) .ConfigureAwait(false); await using var output = File.Create(Path.Combine(OutputDirectory, fileName)); response.AudioContent.WriteTo(output); }
public ActionResult KanaSounds() { string webRootPath = _env.WebRootPath; string kanaText = Request.Form["kanastring"]; string kanasound = webRootPath + "/assets/sounds/kana/" + kanaText + ".mp3"; string kanasoundhreg = "http://localhost:5001" + "/assets/sounds/kana/" + kanaText + ".mp3"; var client = TextToSpeechClient.Create(); var input = new SynthesisInput { Text = kanaText }; var voiceSelection = new VoiceSelectionParams { LanguageCode = "ja-JP", SsmlGender = SsmlVoiceGender.Female, Name = "ja-JP-Wavenet-A" }; var audioConfig = new AudioConfig { AudioEncoding = AudioEncoding.Mp3 }; var response = client.SynthesizeSpeech(input, voiceSelection, audioConfig); if (System.IO.File.Exists(kanasound) == false) { using var output = System.IO.File.Create(kanasound); response.AudioContent.WriteTo(output); } return(new JsonResult(kanasoundhreg)); }
public async Task Tts(string message) { var client = TextToSpeechClient.Create(); var input = new SynthesisInput { Text = message.ToLower() }; var voiceSelection = new VoiceSelectionParams { LanguageCode = "pl-PL", Name = "pl-PL-Standard-C", SsmlGender = SsmlVoiceGender.Female }; var audioConfig = new AudioConfig { AudioEncoding = AudioEncoding.Mp3, SpeakingRate = 0.85 }; var response = client.SynthesizeSpeech(input, voiceSelection, audioConfig); using (var output = File.Create(EnvVars.RoboMajkelTtsAudioFileLocation)) { response.AudioContent.WriteTo(output); } IVoiceChannel channel = (Context.User as IGuildUser)?.VoiceChannel; var audioClient = await channel.ConnectAsync(); await SendAsync(audioClient, EnvVars.RoboMajkelTtsAudioFileLocation); }
public void cevapla(string soru) { string text = ""; mediaPlayer.Close(); if (String.IsNullOrEmpty(soru)) { text = "yoksa bana küstün mü ?"; } else { try { soru = soru.Substring(0, yazi.Length - 6); Console.WriteLine("soru :" + soru); text = db.sor(soru.ToLower()); cevaplayamadim = false; durdurma = false; durdurmaBtn.Visibility = Visibility.Hidden; } catch (Exception e) { text = "Anlamadım. Lütfen tekrar söyle."; cevaplayamadim = true; durdurmaBtn.Visibility = Visibility.Visible; } finally { db.bagKapat(); } } yazi = text; VoiceSelectionParams voice = new VoiceSelectionParams { LanguageCode = "tr-TR", SsmlGender = SsmlVoiceGender.Male }; AudioConfig config = new AudioConfig { AudioEncoding = AudioEncoding.Mp3 }; SynthesisInput input = new SynthesisInput { Text = text }; var response = client.SynthesizeSpeech(new SynthesizeSpeechRequest { Input = input, Voice = voice, AudioConfig = config }); using (Stream output = File.Create("C:\\Users\\corx\\source\\repos\\Selami\\Selami\\ses\\sample.mp3")) { response.AudioContent.WriteTo(output); } mediaPlayer.Open(new Uri("C:\\Users\\corx\\source\\repos\\Selami\\Selami\\ses\\sample.mp3")); mediaPlayer.Play(); }
public void TextSpeech(string text) { TextToSpeechClient client = TextToSpeechClient.Create(); SynthesisInput input = new SynthesisInput { Text = text }; VoiceSelectionParams voice = new VoiceSelectionParams { LanguageCode = "ja-JP", SsmlGender = SsmlVoiceGender.Female }; AudioConfig config = new AudioConfig { AudioEncoding = AudioEncoding.Linear16, }; var response = client.SynthesizeSpeech( input, voice, config ); using (var memoryStream = new MemoryStream(response.AudioContent, true)) { var player = new System.Media.SoundPlayer(memoryStream); Console.Write("Play"); player.Play(); } }
public async Task GenerateSpeechFile(string text, string path) { TextToSpeechClient client = TextToSpeechClient.Create(); SynthesisInput input = new SynthesisInput { Text = text }; VoiceSelectionParams voice = new VoiceSelectionParams { LanguageCode = "en-US", SsmlGender = SsmlVoiceGender.Neutral }; AudioConfig config = new AudioConfig { AudioEncoding = AudioEncoding.Mp3 }; var response = await client.SynthesizeSpeechAsync(new SynthesizeSpeechRequest { Input = input, Voice = voice, AudioConfig = config }); using (Stream output = File.Create(path)) { response.AudioContent.WriteTo(output); } }
private void TTS_Button_Click(object sender, RoutedEventArgs e) { // Instantiate a client TextToSpeechClient client = TextToSpeechClient.Create(); // Set the text input to be synthesized. SynthesisInput input = new SynthesisInput { Text = "N Pay구매하신 상품의구매확정처리부탁드립니다.상품을 받으신 후 만족하셨다면 구매확정을 부탁드립니다." + "아래 기한까지 구매확정을 하지 않으실 경우,이후 자동으로 구매가 확정될 예정입니다." + "만일,구매확정기한 내 정상적으로 상품을 수령하지 못하신 경우에는 판매자문의 또는 구매확정 연장을 해주세요." + "고객명 이 * 연님주문번호 2019100971174081주문일자 2019.10.09 23:13발송일자 2019.10.10자동구매확정일 2019.10.19" + "결제정보총 주문금액 12,100원할인금액 0원환불정산액 / 포인트 0원 / 2,394원결제수단 신용카드" + "최종결제금액 9,706원배송정보수령인 이*연연락처 010 - 5234 - ****배송지 14305경기도 광명시 금당로 11(하안동, 하안6단지고층주공아파트)" + "603동****배송메모발송상품상품이미지애플 인증 고속충전 정품 1.2m 2m 아이패드 아이폰 케이블" + "옵션 : 옵션선택: mfi인증 메탈릭1.2m_다크그레이주문금액 9,600원수량 1" }; // Build the voice request, select the language code ("en-US"), // and the SSML voice gender ("neutral"). VoiceSelectionParams voice = new VoiceSelectionParams { LanguageCode = "ko-KR", SsmlGender = SsmlVoiceGender.Neutral }; // Select the type of audio file you want returned. AudioConfig config = new AudioConfig { AudioEncoding = AudioEncoding.Mp3 }; // Perform the Text-to-Speech request, passing the text input // with the selected voice parameters and audio file type var response = client.SynthesizeSpeech(new SynthesizeSpeechRequest { Input = input, Voice = voice, AudioConfig = config }); // Write the binary AudioContent of the response to an MP3 file. using (Stream output = File.Create("C:\\Users\\이제연\\Desktop\\sample.mp3")) { response.AudioContent.WriteTo(output); } mciSendString("open \"" + "C:\\Users\\이제연\\Desktop\\sample.mp3" + "\" type mpegvideo alias MediaFile", null, 0, IntPtr.Zero); StringBuilder returnData = new StringBuilder(128); mciSendString("status MediaFile length", returnData, returnData.Capacity, IntPtr.Zero); int nMilliSecond = Convert.ToInt32(returnData.ToString()); mciSendString("play MediaFile", null, 0, IntPtr.Zero); Thread thread = new Thread(() => _deleteMp3File("C:\\Users\\이제연\\Desktop\\sample.mp3", nMilliSecond)); thread.Start(); }
/// <summary> /// Synthesizes speech synchronously: receive results after all text input /// has been processed. /// </summary> /// <param name="input"> /// Required. The Synthesizer requires either plain text or SSML as input. /// </param> /// <param name="voice"> /// Required. The desired voice of the synthesized audio. /// </param> /// <param name="audioConfig"> /// Required. The configuration of the synthesized audio. /// </param> /// <param name="cancellationToken"> /// A <see cref="st::CancellationToken"/> to use for this RPC. /// </param> /// <returns> /// A Task containing the RPC response. /// </returns> public virtual stt::Task <SynthesizeSpeechResponse> SynthesizeSpeechAsync( SynthesisInput input, VoiceSelectionParams voice, AudioConfig audioConfig, st::CancellationToken cancellationToken) => SynthesizeSpeechAsync( input, voice, audioConfig, gaxgrpc::CallSettings.FromCancellationToken(cancellationToken));
void Start() { #region Environment Variable if (!File.Exists(credentialsPath)) { Debug.LogError("failure" + credentialsPath); return; } else { Debug.Log("success: " + credentialsPath); } Environment.SetEnvironmentVariable("GOOGLE_APPLICATION_CREDENTIALS", credentialsPath); #endregion #region QuickStart // Instantiate a client TextToSpeechClient client = TextToSpeechClient.Create(); // Set the text input to be synthesized. SynthesisInput input = new SynthesisInput { Text = "Hello, World!" }; // Build the voice request, select the language code ("en-US"), // and the SSML voice gender ("neutral"). VoiceSelectionParams voice = new VoiceSelectionParams { LanguageCode = "en-US", SsmlGender = SsmlVoiceGender.Neutral }; // Select the type of audio file you want returned. AudioConfig config = new AudioConfig { AudioEncoding = AudioEncoding.Mp3 }; // Perform the Text-to-Speech request, passing the text input // with the selected voice parameters and audio file type var response = client.SynthesizeSpeech(new SynthesizeSpeechRequest { Input = input, Voice = voice, AudioConfig = config }); // Write the binary AudioContent of the response to an MP3 file. using (Stream output = File.Create(saveFile)) { response.AudioContent.WriteTo(output); Debug.Log($"Audio content written to file " + saveFile); } #endregion }
public void convertTextToSpeech() { // Declare variables for the message to be converted and the filename to be saved string messageText; string fileName; Console.Clear(); // Gather information from the user Console.Write("Enter the message to convert to audio: "); messageText = Console.ReadLine(); Console.Write("Enter the name of the file: "); fileName = Console.ReadLine(); // Create a new TextToSpeechClient called client TextToSpeechClient client = TextToSpeechClient.Create(); // Assign the user input message to Text (Text is used in the API as the message) SynthesisInput input = new SynthesisInput { Text = messageText }; // Build the voice request and set the parameters that differ from default settings VoiceSelectionParams voice = new VoiceSelectionParams { LanguageCode = "en-US", Name = "en-US-Wavenet-C", SsmlGender = SsmlVoiceGender.Female }; // Select the speaking rate and type of audio file you want returned AudioConfig config = new AudioConfig { SpeakingRate = 1.0, AudioEncoding = AudioEncoding.Linear16 }; // Perform the Text-to-Speech request, passing the text input (SynthesisInput) // with the selected voice parameters (VoiceSelectionParams) and audio file type (AudioConfig) var response = client.SynthesizeSpeech(new SynthesizeSpeechRequest { Input = input, Voice = voice, AudioConfig = config }); // Write the binary AudioContent of the response to an WAV file. using (Stream output = File.Create(fileName + ".wav")) { response.AudioContent.WriteTo(output); Console.WriteLine($"\r\nAudio content written to " + fileName + ".wav"); } }
private static async Task SpeakGoogle(ISpeaker speaker, string textToSpeech, string user) { textToSpeech = textToSpeech.Replace("\"", "\"\""); // Instantiate a client TextToSpeechClient client = TextToSpeechClient.Create(); // Set the text input to be synthesized. SynthesisInput input = new SynthesisInput { //Text = textToSpeech, Ssml = File.ReadAllText("Speakers/SSML.xml").Replace("{text}", textToSpeech).Replace("{voice}", speaker.Voice).Replace("{posmsg}", speaker.Diction).Replace("{alert}", speaker.Alert), }; // Build the voice request, select the language code ("en-US"), // and the SSML voice gender ("neutral"). VoiceSelectionParams voice = new VoiceSelectionParams { LanguageCode = speaker.Accent.ToString(), //SsmlGender = SsmlVoiceGender.Neutral }; // Select the type of audio file you want returned. AudioConfig config = new AudioConfig { AudioEncoding = AudioEncoding.Mp3 }; // Perform the Text-to-Speech request, passing the text input // with the selected voice parameters and audio file type var response = await client.SynthesizeSpeechAsync(new SynthesizeSpeechRequest { Input = input, Voice = voice, AudioConfig = config }); // create a temp file with .ps1 extension var cFile = System.IO.Path.GetTempPath() + Guid.NewGuid() + ".mp3"; // Write the binary AudioContent of the response to an MP3 file. using (Stream output = File.Create(cFile)) response.AudioContent.WriteTo(output); Sounds.RandomTrollSound(); SpeakerCore.PreSpeech(user); SpeakerCore.ExecuteMP3File(cFile); await AutomaticTranslator.Translate(textToSpeech); }
/// <summary>Snippet for SynthesizeSpeech</summary> public void SynthesizeSpeech() { // Snippet: SynthesizeSpeech(SynthesisInput, VoiceSelectionParams, AudioConfig, CallSettings) // Create client TextToSpeechClient textToSpeechClient = TextToSpeechClient.Create(); // Initialize request argument(s) SynthesisInput input = new SynthesisInput(); VoiceSelectionParams voice = new VoiceSelectionParams(); AudioConfig audioConfig = new AudioConfig(); // Make the request SynthesizeSpeechResponse response = textToSpeechClient.SynthesizeSpeech(input, voice, audioConfig); // End snippet }
/// <summary>Snippet for SynthesizeSpeechAsync</summary> public async Task SynthesizeSpeechAsync() { // Snippet: SynthesizeSpeechAsync(SynthesisInput, VoiceSelectionParams, AudioConfig, CallSettings) // Additional: SynthesizeSpeechAsync(SynthesisInput, VoiceSelectionParams, AudioConfig, CancellationToken) // Create client TextToSpeechClient textToSpeechClient = await TextToSpeechClient.CreateAsync(); // Initialize request argument(s) SynthesisInput input = new SynthesisInput(); VoiceSelectionParams voice = new VoiceSelectionParams(); AudioConfig audioConfig = new AudioConfig(); // Make the request SynthesizeSpeechResponse response = await textToSpeechClient.SynthesizeSpeechAsync(input, voice, audioConfig); // End snippet }
public ResultModel <SynthesizeSpeechResponse> DownloadWord(SpeechModel model) { if (!File.Exists("wwwroot/assets/speeches/")) { Directory.CreateDirectory("wwwroot/assets/speeches/"); } Environment.SetEnvironmentVariable("GOOGLE_APPLICATION_CREDENTIALS", "./speech-key.json"); try { var client = TextToSpeechClient.Create(); // The input to be synthesized, can be provided as text or SSML. var input = new SynthesisInput { Text = model.Text }; // Build the voice request. var voiceSelection = new VoiceSelectionParams { LanguageCode = model.LanguageCode, SsmlGender = model.Gender }; // Specify the type of audio file. var audioConfig = new AudioConfig { AudioEncoding = AudioEncoding.Mp3 }; // Perform the text-to-speech request. return(new ResultModel <SynthesizeSpeechResponse> { Data = client.SynthesizeSpeech(input, voiceSelection, audioConfig), Success = true }); } catch (Exception ex) { notFoundLanguageCodes.Add(new NotFoundLanguageCode { Code = model.LanguageCode, ErrorMessage = ex.Message }); return(new ResultModel <SynthesizeSpeechResponse> { Success = false, ErrorMessage = ex.Message }); } }
protected override void DoSpeech(string culture, string chat) { // The input to be synthesized, can be provided as text or SSML. var input = new SynthesisInput { Text = chat }; // Build the voice request. var voiceSelection = new VoiceSelectionParams { LanguageCode = culture, SsmlGender = SsmlVoiceGender.Female }; // Specify the type of audio file. var audioConfig = new AudioConfig { AudioEncoding = AudioEncoding.Linear16 }; // Perform the text-to-speech request. var response = _client.SynthesizeSpeech(input, voiceSelection, audioConfig); lock (_mediaFileLock) { if (File.Exists(_mediaFilePath)) { File.Delete(_mediaFilePath); } // Write the response to the output file. using (var output = File.Create(_mediaFilePath)) { response.AudioContent.WriteTo(output); //output.Close(); } //WavPlayer player = new WavPlayer(_mediaFilePath); //player.Play(); ////Console.WriteLine("Audio content written to file \"output.mp3\""); //player.Dispose(); //player = null; } }
public ActionResult VocabularySounds() { string webRootPath = _env.WebRootPath; string vocabularyText = Request.Form["vocabularystring"]; string vocabularySound = webRootPath + "/assets/sounds/vocabulary/" + vocabularyText + ".mp3"; string vocabularySoundHref = "http://localhost:5001" + "/assets/sounds/vocabulary/" + vocabularyText + ".mp3"; ////////////////////////////////////////////// // try texttospeech var client = TextToSpeechClient.Create(); var input = new SynthesisInput { Text = vocabularyText }; // Build the voice request. var voiceSelection = new VoiceSelectionParams { LanguageCode = "ja-JP", SsmlGender = SsmlVoiceGender.Female, Name = "ja-JP-Wavenet-A" }; // Specify the type of audio file. var audioConfig = new AudioConfig { AudioEncoding = AudioEncoding.Mp3 }; // Perform the text-to-speech request. var response = client.SynthesizeSpeech(input, voiceSelection, audioConfig); // Write the response to the output file. if (System.IO.File.Exists(vocabularySound) == false) { using var output = System.IO.File.Create(vocabularySound); response.AudioContent.WriteTo(output); } // end try text to speech ////////////////////////////////////////////// return(new JsonResult(vocabularySoundHref)); }
//get text return his temp voiceURL public static string TextToSpeach(string text) { Environment.SetEnvironmentVariable("GOOGLE_APPLICATION_CREDENTIALS", @"C:\TextToSpeach-b2d8743c4197.json"); // Instantiate a client TextToSpeechClient client = TextToSpeechClient.Create(); // Set the text input to be synthesized. SynthesisInput input = new SynthesisInput { Text = text }; // Build the voice request, select the language code ("en-US"), // and the SSML voice gender ("neutral"). VoiceSelectionParams voice = new VoiceSelectionParams { LanguageCode = "en-US", SsmlGender = SsmlVoiceGender.Neutral, Name = "en-US-Wavenet-F", }; // Select the type of audio file you want returned. AudioConfig config = new AudioConfig { AudioEncoding = AudioEncoding.Mp3 }; // Perform the Text-to-Speech request, passing the text input // with the selected voice parameters and audio file type var response = client.SynthesizeSpeech(new SynthesizeSpeechRequest { Input = input, Voice = voice, AudioConfig = config }); string url = ""; // Write the binary AudioContent of the response to an MP3 file. string path = System.IO.Path.GetTempFileName(); using (FileStream output = File.OpenWrite(path)) { response.AudioContent.WriteTo(output); url = output.Name; Console.WriteLine($"Audio content written to file 'sample.mp3'"); } return url; }
public string text_to_mp3(string text, Grpc.Core.Channel channel, string LanguageCode, string Gender, string Voice) { TextToSpeechClient client = TextToSpeechClient.Create(channel); var input = new SynthesisInput { Text = text }; SsmlVoiceGender gender; if (Gender == "Female") { gender = SsmlVoiceGender.Female; } else { gender = SsmlVoiceGender.Male; } var voiceSelection = new VoiceSelectionParams { LanguageCode = LanguageCode, SsmlGender = gender, Name = Voice }; var audioConfig = new AudioConfig { AudioEncoding = AudioEncoding.Mp3 }; var response = client.SynthesizeSpeech(input, voiceSelection, audioConfig); string filename = Guid.NewGuid().ToString() + ".mp3"; MemoryStream newTextToSpeech = new MemoryStream(); response.AudioContent.WriteTo(newTextToSpeech); //Add it to the dictionary textToSpeechFiles[filename] = newTextToSpeech; //and return the filename as the key return(filename); }
private void Speak(string text) { if (client == null) { return; } // Set the text input to be synthesized. SynthesisInput input = new SynthesisInput { Text = text }; // Select the type of audio file you want returned. AudioConfig config = new AudioConfig { AudioEncoding = AudioEncoding.Mp3, // Pitch = -5, SpeakingRate = Rate }; // Perform the Text-to-Speech request, passing the text input // with the selected voice parameters and audio file type SynthesizeSpeechResponse response = client.SynthesizeSpeech(new SynthesizeSpeechRequest { Input = input, Voice = new VoiceSelectionParams { LanguageCode = languages[Settings.Default.languageIndex], Name = voiceTypes[Settings.Default.useWavenetVoices ? 0 : 1, Settings.Default.languageIndex, Settings.Default.ttsVoice] }, AudioConfig = config }); // Write the AudioContent of the response to an MP3 file. string s64 = response.AudioContent.ToBase64(); string filePath = Path.Combine(Path.GetTempPath(), "ttsoutput64_" + DateTime.Now.ToFileTime() + ".mp3"); File.WriteAllBytes(filePath, Convert.FromBase64String(s64)); // play the file ttsQueue.Enqueue(filePath); }