public void cevapla(string soru) { string text = ""; mediaPlayer.Close(); if (String.IsNullOrEmpty(soru)) { text = "yoksa bana küstün mü ?"; } else { try { soru = soru.Substring(0, yazi.Length - 6); Console.WriteLine("soru :" + soru); text = db.sor(soru.ToLower()); cevaplayamadim = false; durdurma = false; durdurmaBtn.Visibility = Visibility.Hidden; } catch (Exception e) { text = "Anlamadım. Lütfen tekrar söyle."; cevaplayamadim = true; durdurmaBtn.Visibility = Visibility.Visible; } finally { db.bagKapat(); } } yazi = text; VoiceSelectionParams voice = new VoiceSelectionParams { LanguageCode = "tr-TR", SsmlGender = SsmlVoiceGender.Male }; AudioConfig config = new AudioConfig { AudioEncoding = AudioEncoding.Mp3 }; SynthesisInput input = new SynthesisInput { Text = text }; var response = client.SynthesizeSpeech(new SynthesizeSpeechRequest { Input = input, Voice = voice, AudioConfig = config }); using (Stream output = File.Create("C:\\Users\\corx\\source\\repos\\Selami\\Selami\\ses\\sample.mp3")) { response.AudioContent.WriteTo(output); } mediaPlayer.Open(new Uri("C:\\Users\\corx\\source\\repos\\Selami\\Selami\\ses\\sample.mp3")); mediaPlayer.Play(); }
public async Task Tts(string message) { var client = TextToSpeechClient.Create(); var input = new SynthesisInput { Text = message.ToLower() }; var voiceSelection = new VoiceSelectionParams { LanguageCode = "pl-PL", Name = "pl-PL-Standard-C", SsmlGender = SsmlVoiceGender.Female }; var audioConfig = new AudioConfig { AudioEncoding = AudioEncoding.Mp3, SpeakingRate = 0.85 }; var response = client.SynthesizeSpeech(input, voiceSelection, audioConfig); using (var output = File.Create(EnvVars.RoboMajkelTtsAudioFileLocation)) { response.AudioContent.WriteTo(output); } IVoiceChannel channel = (Context.User as IGuildUser)?.VoiceChannel; var audioClient = await channel.ConnectAsync(); await SendAsync(audioClient, EnvVars.RoboMajkelTtsAudioFileLocation); }
public ActionResult KanaSounds() { string webRootPath = _env.WebRootPath; string kanaText = Request.Form["kanastring"]; string kanasound = webRootPath + "/assets/sounds/kana/" + kanaText + ".mp3"; string kanasoundhreg = "http://localhost:5001" + "/assets/sounds/kana/" + kanaText + ".mp3"; var client = TextToSpeechClient.Create(); var input = new SynthesisInput { Text = kanaText }; var voiceSelection = new VoiceSelectionParams { LanguageCode = "ja-JP", SsmlGender = SsmlVoiceGender.Female, Name = "ja-JP-Wavenet-A" }; var audioConfig = new AudioConfig { AudioEncoding = AudioEncoding.Mp3 }; var response = client.SynthesizeSpeech(input, voiceSelection, audioConfig); if (System.IO.File.Exists(kanasound) == false) { using var output = System.IO.File.Create(kanasound); response.AudioContent.WriteTo(output); } return(new JsonResult(kanasoundhreg)); }
public void TextSpeech(string text) { TextToSpeechClient client = TextToSpeechClient.Create(); SynthesisInput input = new SynthesisInput { Text = text }; VoiceSelectionParams voice = new VoiceSelectionParams { LanguageCode = "ja-JP", SsmlGender = SsmlVoiceGender.Female }; AudioConfig config = new AudioConfig { AudioEncoding = AudioEncoding.Linear16, }; var response = client.SynthesizeSpeech( input, voice, config ); using (var memoryStream = new MemoryStream(response.AudioContent, true)) { var player = new System.Media.SoundPlayer(memoryStream); Console.Write("Play"); player.Play(); } }
private Stream GetAudioStream(string message) { SynthesisInput input = new SynthesisInput { Text = message }; VoiceSelectionParams voiceSelection = new VoiceSelectionParams { LanguageCode = "es-ES", Name = "es-ES-Standard-A" }; AudioConfig audioConfig = new AudioConfig { AudioEncoding = AudioEncoding.Linear16, SpeakingRate = Configuration.BasicConfiguration.MessageSpeed }; try { SynthesizeSpeechResponse response = Client.SynthesizeSpeech(input, voiceSelection, audioConfig); return(new MemoryStream(response.AudioContent.ToByteArray())); } catch (Exception ex) { TwitchBot.Instance.LogMessage(LogSeverity.Error, ex.Message, ex.StackTrace); return(null); } }
public async Task GenerateSpeechFile(string text, string path) { TextToSpeechClient client = TextToSpeechClient.Create(); SynthesisInput input = new SynthesisInput { Text = text }; VoiceSelectionParams voice = new VoiceSelectionParams { LanguageCode = "en-US", SsmlGender = SsmlVoiceGender.Neutral }; AudioConfig config = new AudioConfig { AudioEncoding = AudioEncoding.Mp3 }; var response = await client.SynthesizeSpeechAsync(new SynthesizeSpeechRequest { Input = input, Voice = voice, AudioConfig = config }); using (Stream output = File.Create(path)) { response.AudioContent.WriteTo(output); } }
public async Task SynthesizeSpeechAsync() { Mock <TextToSpeech.TextToSpeechClient> mockGrpcClient = new Mock <TextToSpeech.TextToSpeechClient>(MockBehavior.Strict); SynthesizeSpeechRequest expectedRequest = new SynthesizeSpeechRequest { Input = new SynthesisInput(), Voice = new VoiceSelectionParams(), AudioConfig = new AudioConfig(), }; SynthesizeSpeechResponse expectedResponse = new SynthesizeSpeechResponse { AudioContent = ByteString.CopyFromUtf8("16"), }; mockGrpcClient.Setup(x => x.SynthesizeSpeechAsync(expectedRequest, It.IsAny <CallOptions>())) .Returns(new Grpc.Core.AsyncUnaryCall <SynthesizeSpeechResponse>(Task.FromResult(expectedResponse), null, null, null, null)); TextToSpeechClient client = new TextToSpeechClientImpl(mockGrpcClient.Object, null); SynthesisInput input = new SynthesisInput(); VoiceSelectionParams voice = new VoiceSelectionParams(); AudioConfig audioConfig = new AudioConfig(); SynthesizeSpeechResponse response = await client.SynthesizeSpeechAsync(input, voice, audioConfig); Assert.Same(expectedResponse, response); mockGrpcClient.VerifyAll(); }
private void Button_Click_Save(object sender, RoutedEventArgs e) { var text = SpeechText.Text; var name = (UseWaveNet.IsChecked ?? false) ? "ja-JP-Wavenet-A" : "ja-JP-Standard-A"; var speed = SpeedSlider.Value; var pitch = PitchSlider.Value; var input = new SynthesisInput { Text = text }; var voiceSection = new VoiceSelectionParams { Name = name, LanguageCode = "ja-JP", SsmlGender = SsmlVoiceGender.Female, }; var audioConfig = new AudioConfig { AudioEncoding = AudioEncoding.Mp3, SpeakingRate = speed, Pitch = pitch, }; var response = _Client.SynthesizeSpeech(input, voiceSection, audioConfig); var fileName = GetFileName(); using (var output = File.Create($"mp3\\{fileName}")) { response.AudioContent.WriteTo(output); } // 画面を更新 UpdateDisplayValues(fileName); }
static void Main(string[] args) { var client = TextToSpeechClient.Create(); // The input to be synthesized, can be provided as text or SSML. var input = new SynthesisInput { Text = "Whoa whoa there, " + args[0] + "! Killer here is having a fit, so come back later" }; // Build the voice request. var voiceSelection = new VoiceSelectionParams { LanguageCode = "en-US", SsmlGender = SsmlVoiceGender.Neutral }; // Specify the type of audio file. var audioConfig = new AudioConfig { AudioEncoding = AudioEncoding.Mp3 }; // Perform the text-to-speech request. var response = client.SynthesizeSpeech(input, voiceSelection, audioConfig); // Write the response to the output file. using (var output = File.Create("output3.mp3")) { response.AudioContent.WriteTo(output); } Console.WriteLine("Audio content written to file \"output3.mp3\""); }
private void TTS_Button_Click(object sender, RoutedEventArgs e) { // Instantiate a client TextToSpeechClient client = TextToSpeechClient.Create(); // Set the text input to be synthesized. SynthesisInput input = new SynthesisInput { Text = "N Pay구매하신 상품의구매확정처리부탁드립니다.상품을 받으신 후 만족하셨다면 구매확정을 부탁드립니다." + "아래 기한까지 구매확정을 하지 않으실 경우,이후 자동으로 구매가 확정될 예정입니다." + "만일,구매확정기한 내 정상적으로 상품을 수령하지 못하신 경우에는 판매자문의 또는 구매확정 연장을 해주세요." + "고객명 이 * 연님주문번호 2019100971174081주문일자 2019.10.09 23:13발송일자 2019.10.10자동구매확정일 2019.10.19" + "결제정보총 주문금액 12,100원할인금액 0원환불정산액 / 포인트 0원 / 2,394원결제수단 신용카드" + "최종결제금액 9,706원배송정보수령인 이*연연락처 010 - 5234 - ****배송지 14305경기도 광명시 금당로 11(하안동, 하안6단지고층주공아파트)" + "603동****배송메모발송상품상품이미지애플 인증 고속충전 정품 1.2m 2m 아이패드 아이폰 케이블" + "옵션 : 옵션선택: mfi인증 메탈릭1.2m_다크그레이주문금액 9,600원수량 1" }; // Build the voice request, select the language code ("en-US"), // and the SSML voice gender ("neutral"). VoiceSelectionParams voice = new VoiceSelectionParams { LanguageCode = "ko-KR", SsmlGender = SsmlVoiceGender.Neutral }; // Select the type of audio file you want returned. AudioConfig config = new AudioConfig { AudioEncoding = AudioEncoding.Mp3 }; // Perform the Text-to-Speech request, passing the text input // with the selected voice parameters and audio file type var response = client.SynthesizeSpeech(new SynthesizeSpeechRequest { Input = input, Voice = voice, AudioConfig = config }); // Write the binary AudioContent of the response to an MP3 file. using (Stream output = File.Create("C:\\Users\\이제연\\Desktop\\sample.mp3")) { response.AudioContent.WriteTo(output); } mciSendString("open \"" + "C:\\Users\\이제연\\Desktop\\sample.mp3" + "\" type mpegvideo alias MediaFile", null, 0, IntPtr.Zero); StringBuilder returnData = new StringBuilder(128); mciSendString("status MediaFile length", returnData, returnData.Capacity, IntPtr.Zero); int nMilliSecond = Convert.ToInt32(returnData.ToString()); mciSendString("play MediaFile", null, 0, IntPtr.Zero); Thread thread = new Thread(() => _deleteMp3File("C:\\Users\\이제연\\Desktop\\sample.mp3", nMilliSecond)); thread.Start(); }
/// <summary> /// Synthesizes speech synchronously: receive results after all text input /// has been processed. /// </summary> /// <param name="input"> /// Required. The Synthesizer requires either plain text or SSML as input. /// </param> /// <param name="voice"> /// Required. The desired voice of the synthesized audio. /// </param> /// <param name="audioConfig"> /// Required. The configuration of the synthesized audio. /// </param> /// <param name="cancellationToken"> /// A <see cref="st::CancellationToken"/> to use for this RPC. /// </param> /// <returns> /// A Task containing the RPC response. /// </returns> public virtual stt::Task <SynthesizeSpeechResponse> SynthesizeSpeechAsync( SynthesisInput input, VoiceSelectionParams voice, AudioConfig audioConfig, st::CancellationToken cancellationToken) => SynthesizeSpeechAsync( input, voice, audioConfig, gaxgrpc::CallSettings.FromCancellationToken(cancellationToken));
void Start() { #region Environment Variable if (!File.Exists(credentialsPath)) { Debug.LogError("failure" + credentialsPath); return; } else { Debug.Log("success: " + credentialsPath); } Environment.SetEnvironmentVariable("GOOGLE_APPLICATION_CREDENTIALS", credentialsPath); #endregion #region QuickStart // Instantiate a client TextToSpeechClient client = TextToSpeechClient.Create(); // Set the text input to be synthesized. SynthesisInput input = new SynthesisInput { Text = "Hello, World!" }; // Build the voice request, select the language code ("en-US"), // and the SSML voice gender ("neutral"). VoiceSelectionParams voice = new VoiceSelectionParams { LanguageCode = "en-US", SsmlGender = SsmlVoiceGender.Neutral }; // Select the type of audio file you want returned. AudioConfig config = new AudioConfig { AudioEncoding = AudioEncoding.Mp3 }; // Perform the Text-to-Speech request, passing the text input // with the selected voice parameters and audio file type var response = client.SynthesizeSpeech(new SynthesizeSpeechRequest { Input = input, Voice = voice, AudioConfig = config }); // Write the binary AudioContent of the response to an MP3 file. using (Stream output = File.Create(saveFile)) { response.AudioContent.WriteTo(output); Debug.Log($"Audio content written to file " + saveFile); } #endregion }
public void convertTextToSpeech() { // Declare variables for the message to be converted and the filename to be saved string messageText; string fileName; Console.Clear(); // Gather information from the user Console.Write("Enter the message to convert to audio: "); messageText = Console.ReadLine(); Console.Write("Enter the name of the file: "); fileName = Console.ReadLine(); // Create a new TextToSpeechClient called client TextToSpeechClient client = TextToSpeechClient.Create(); // Assign the user input message to Text (Text is used in the API as the message) SynthesisInput input = new SynthesisInput { Text = messageText }; // Build the voice request and set the parameters that differ from default settings VoiceSelectionParams voice = new VoiceSelectionParams { LanguageCode = "en-US", Name = "en-US-Wavenet-C", SsmlGender = SsmlVoiceGender.Female }; // Select the speaking rate and type of audio file you want returned AudioConfig config = new AudioConfig { SpeakingRate = 1.0, AudioEncoding = AudioEncoding.Linear16 }; // Perform the Text-to-Speech request, passing the text input (SynthesisInput) // with the selected voice parameters (VoiceSelectionParams) and audio file type (AudioConfig) var response = client.SynthesizeSpeech(new SynthesizeSpeechRequest { Input = input, Voice = voice, AudioConfig = config }); // Write the binary AudioContent of the response to an WAV file. using (Stream output = File.Create(fileName + ".wav")) { response.AudioContent.WriteTo(output); Console.WriteLine($"\r\nAudio content written to " + fileName + ".wav"); } }
/// <summary> /// Synthesizes speech synchronously: receive results after all text input /// has been processed. /// </summary> /// <param name="input"> /// Required. The Synthesizer requires either plain text or SSML as input. /// </param> /// <param name="voice"> /// Required. The desired voice of the synthesized audio. /// </param> /// <param name="audioConfig"> /// Required. The configuration of the synthesized audio. /// </param> /// <param name="callSettings"> /// If not null, applies overrides to this RPC call. /// </param> /// <returns> /// The RPC response. /// </returns> public virtual SynthesizeSpeechResponse SynthesizeSpeech( SynthesisInput input, VoiceSelectionParams voice, AudioConfig audioConfig, gaxgrpc::CallSettings callSettings = null) => SynthesizeSpeech( new SynthesizeSpeechRequest { Input = gax::GaxPreconditions.CheckNotNull(input, nameof(input)), Voice = gax::GaxPreconditions.CheckNotNull(voice, nameof(voice)), AudioConfig = gax::GaxPreconditions.CheckNotNull(audioConfig, nameof(audioConfig)), }, callSettings);
private static async Task SpeakGoogle(ISpeaker speaker, string textToSpeech, string user) { textToSpeech = textToSpeech.Replace("\"", "\"\""); // Instantiate a client TextToSpeechClient client = TextToSpeechClient.Create(); // Set the text input to be synthesized. SynthesisInput input = new SynthesisInput { //Text = textToSpeech, Ssml = File.ReadAllText("Speakers/SSML.xml").Replace("{text}", textToSpeech).Replace("{voice}", speaker.Voice).Replace("{posmsg}", speaker.Diction).Replace("{alert}", speaker.Alert), }; // Build the voice request, select the language code ("en-US"), // and the SSML voice gender ("neutral"). VoiceSelectionParams voice = new VoiceSelectionParams { LanguageCode = speaker.Accent.ToString(), //SsmlGender = SsmlVoiceGender.Neutral }; // Select the type of audio file you want returned. AudioConfig config = new AudioConfig { AudioEncoding = AudioEncoding.Mp3 }; // Perform the Text-to-Speech request, passing the text input // with the selected voice parameters and audio file type var response = await client.SynthesizeSpeechAsync(new SynthesizeSpeechRequest { Input = input, Voice = voice, AudioConfig = config }); // create a temp file with .ps1 extension var cFile = System.IO.Path.GetTempPath() + Guid.NewGuid() + ".mp3"; // Write the binary AudioContent of the response to an MP3 file. using (Stream output = File.Create(cFile)) response.AudioContent.WriteTo(output); Sounds.RandomTrollSound(); SpeakerCore.PreSpeech(user); SpeakerCore.ExecuteMP3File(cFile); await AutomaticTranslator.Translate(textToSpeech); }
/// <summary>Snippet for SynthesizeSpeech</summary> public void SynthesizeSpeech() { // Snippet: SynthesizeSpeech(SynthesisInput, VoiceSelectionParams, AudioConfig, CallSettings) // Create client TextToSpeechClient textToSpeechClient = TextToSpeechClient.Create(); // Initialize request argument(s) SynthesisInput input = new SynthesisInput(); VoiceSelectionParams voice = new VoiceSelectionParams(); AudioConfig audioConfig = new AudioConfig(); // Make the request SynthesizeSpeechResponse response = textToSpeechClient.SynthesizeSpeech(input, voice, audioConfig); // End snippet }
private static void ConvertTextToMp3(SettingModel config, List <string> files) { var client = TextToSpeechClient.Create(); var voice = new VoiceSelectionParams { LanguageCode = config.Voice.LanguageCode, SsmlGender = SsmlVoiceGender.Neutral, Name = config.Voice.Name }; var audioConfig = new AudioConfig { AudioEncoding = config.AudioConfig.AudioEncoding.AsAudioEncoding(), EffectsProfileId = { config.AudioConfig.EffectsProfileId }, Pitch = config.AudioConfig.Pitch, SpeakingRate = config.AudioConfig.SpeakingRate }; files.ForEach(file => { Console.WriteLine($"檔案 { Path.GetFileName(file) } 處理中"); var response = client.SynthesizeSpeech(new SynthesizeSpeechRequest { Input = new SynthesisInput { Text = File.ReadAllText(file) }, Voice = voice, AudioConfig = audioConfig });; var mp3Name = $"{ Path.GetFileNameWithoutExtension(file) }.mp3"; var outPath = Path.Combine(OutputDirectory, mp3Name); if (File.Exists(outPath)) { Console.ForegroundColor = ConsoleColor.Yellow; Console.WriteLine($"語音檔 { outPath } 已存在,略過本次轉換. { Environment.NewLine }"); Console.ResetColor(); } else { using (Stream output = File.Create(outPath)) { response.AudioContent.WriteTo(output); Console.WriteLine($"語音檔 { outPath } 轉換完畢. { Environment.NewLine }"); } } }); }
/// <summary>Snippet for SynthesizeSpeechAsync</summary> public async Task SynthesizeSpeechAsync() { // Snippet: SynthesizeSpeechAsync(SynthesisInput, VoiceSelectionParams, AudioConfig, CallSettings) // Additional: SynthesizeSpeechAsync(SynthesisInput, VoiceSelectionParams, AudioConfig, CancellationToken) // Create client TextToSpeechClient textToSpeechClient = await TextToSpeechClient.CreateAsync(); // Initialize request argument(s) SynthesisInput input = new SynthesisInput(); VoiceSelectionParams voice = new VoiceSelectionParams(); AudioConfig audioConfig = new AudioConfig(); // Make the request SynthesizeSpeechResponse response = await textToSpeechClient.SynthesizeSpeechAsync(input, voice, audioConfig); // End snippet }
protected override void DoSpeech(string culture, string chat) { // The input to be synthesized, can be provided as text or SSML. var input = new SynthesisInput { Text = chat }; // Build the voice request. var voiceSelection = new VoiceSelectionParams { LanguageCode = culture, SsmlGender = SsmlVoiceGender.Female }; // Specify the type of audio file. var audioConfig = new AudioConfig { AudioEncoding = AudioEncoding.Linear16 }; // Perform the text-to-speech request. var response = _client.SynthesizeSpeech(input, voiceSelection, audioConfig); lock (_mediaFileLock) { if (File.Exists(_mediaFilePath)) { File.Delete(_mediaFilePath); } // Write the response to the output file. using (var output = File.Create(_mediaFilePath)) { response.AudioContent.WriteTo(output); //output.Close(); } //WavPlayer player = new WavPlayer(_mediaFilePath); //player.Play(); ////Console.WriteLine("Audio content written to file \"output.mp3\""); //player.Dispose(); //player = null; } }
public ResultModel <SynthesizeSpeechResponse> DownloadWord(SpeechModel model) { if (!File.Exists("wwwroot/assets/speeches/")) { Directory.CreateDirectory("wwwroot/assets/speeches/"); } Environment.SetEnvironmentVariable("GOOGLE_APPLICATION_CREDENTIALS", "./speech-key.json"); try { var client = TextToSpeechClient.Create(); // The input to be synthesized, can be provided as text or SSML. var input = new SynthesisInput { Text = model.Text }; // Build the voice request. var voiceSelection = new VoiceSelectionParams { LanguageCode = model.LanguageCode, SsmlGender = model.Gender }; // Specify the type of audio file. var audioConfig = new AudioConfig { AudioEncoding = AudioEncoding.Mp3 }; // Perform the text-to-speech request. return(new ResultModel <SynthesizeSpeechResponse> { Data = client.SynthesizeSpeech(input, voiceSelection, audioConfig), Success = true }); } catch (Exception ex) { notFoundLanguageCodes.Add(new NotFoundLanguageCode { Code = model.LanguageCode, ErrorMessage = ex.Message }); return(new ResultModel <SynthesizeSpeechResponse> { Success = false, ErrorMessage = ex.Message }); } }
public ActionResult VocabularySounds() { string webRootPath = _env.WebRootPath; string vocabularyText = Request.Form["vocabularystring"]; string vocabularySound = webRootPath + "/assets/sounds/vocabulary/" + vocabularyText + ".mp3"; string vocabularySoundHref = "http://localhost:5001" + "/assets/sounds/vocabulary/" + vocabularyText + ".mp3"; ////////////////////////////////////////////// // try texttospeech var client = TextToSpeechClient.Create(); var input = new SynthesisInput { Text = vocabularyText }; // Build the voice request. var voiceSelection = new VoiceSelectionParams { LanguageCode = "ja-JP", SsmlGender = SsmlVoiceGender.Female, Name = "ja-JP-Wavenet-A" }; // Specify the type of audio file. var audioConfig = new AudioConfig { AudioEncoding = AudioEncoding.Mp3 }; // Perform the text-to-speech request. var response = client.SynthesizeSpeech(input, voiceSelection, audioConfig); // Write the response to the output file. if (System.IO.File.Exists(vocabularySound) == false) { using var output = System.IO.File.Create(vocabularySound); response.AudioContent.WriteTo(output); } // end try text to speech ////////////////////////////////////////////// return(new JsonResult(vocabularySoundHref)); }
//get text return his temp voiceURL public static string TextToSpeach(string text) { Environment.SetEnvironmentVariable("GOOGLE_APPLICATION_CREDENTIALS", @"C:\TextToSpeach-b2d8743c4197.json"); // Instantiate a client TextToSpeechClient client = TextToSpeechClient.Create(); // Set the text input to be synthesized. SynthesisInput input = new SynthesisInput { Text = text }; // Build the voice request, select the language code ("en-US"), // and the SSML voice gender ("neutral"). VoiceSelectionParams voice = new VoiceSelectionParams { LanguageCode = "en-US", SsmlGender = SsmlVoiceGender.Neutral, Name = "en-US-Wavenet-F", }; // Select the type of audio file you want returned. AudioConfig config = new AudioConfig { AudioEncoding = AudioEncoding.Mp3 }; // Perform the Text-to-Speech request, passing the text input // with the selected voice parameters and audio file type var response = client.SynthesizeSpeech(new SynthesizeSpeechRequest { Input = input, Voice = voice, AudioConfig = config }); string url = ""; // Write the binary AudioContent of the response to an MP3 file. string path = System.IO.Path.GetTempFileName(); using (FileStream output = File.OpenWrite(path)) { response.AudioContent.WriteTo(output); url = output.Name; Console.WriteLine($"Audio content written to file 'sample.mp3'"); } return url; }
public string text_to_mp3(string text, Grpc.Core.Channel channel, string LanguageCode, string Gender, string Voice) { TextToSpeechClient client = TextToSpeechClient.Create(channel); var input = new SynthesisInput { Text = text }; SsmlVoiceGender gender; if (Gender == "Female") { gender = SsmlVoiceGender.Female; } else { gender = SsmlVoiceGender.Male; } var voiceSelection = new VoiceSelectionParams { LanguageCode = LanguageCode, SsmlGender = gender, Name = Voice }; var audioConfig = new AudioConfig { AudioEncoding = AudioEncoding.Mp3 }; var response = client.SynthesizeSpeech(input, voiceSelection, audioConfig); string filename = Guid.NewGuid().ToString() + ".mp3"; MemoryStream newTextToSpeech = new MemoryStream(); response.AudioContent.WriteTo(newTextToSpeech); //Add it to the dictionary textToSpeechFiles[filename] = newTextToSpeech; //and return the filename as the key return(filename); }
private void Start() { // ボタンを押したときのイベントを追加 button.onClick.AddListener(() => { var str = inputField.text; if (string.IsNullOrEmpty(str)) { return; } inputField.text = ""; CreateRequest(str); Debug.Log($"Send Request: {str}"); }); // 認証情報をResourceから読み込む var credentialStr = Resources.Load <TextAsset>(credential).text; var googleCredential = GoogleCredential.FromJson(credentialStr); _credentials = googleCredential.CreateScoped(GcpUrl).ToChannelCredentials(); var channel = new Channel(ChannelTarget, _credentials); _client = new TextToSpeechClientImpl(new TextToSpeech.TextToSpeechClient(channel), new TextToSpeechSettings()); // オプションを記述 _audioConfig = new AudioConfig() { AudioEncoding = AudioEncoding.Linear16, SampleRateHertz = 44100 }; // 声のパラメータを指定 // https://cloud.google.com/text-to-speech/docs/voices?hl=jaに記載されているものから選択できます _voiceSelectionParams = new VoiceSelectionParams() { SsmlGender = SsmlVoiceGender.Female, Name = "ja-JP-Wavenet-B", LanguageCode = "ja-JP" }; _context = SynchronizationContext.Current; }
internal string Convert(string v) { // Instantiate a client TextToSpeechClient client = TextToSpeechClient.Create(); // Set the text input to be synthesized. SynthesisInput input = new SynthesisInput { Text = v }; // Build the voice request, select the language code ("en-US"), // and the SSML voice gender ("neutral"). VoiceSelectionParams voice = new VoiceSelectionParams { LanguageCode = "en-US", SsmlGender = SsmlVoiceGender.Neutral }; // Select the type of audio file you want returned. AudioConfig config = new AudioConfig { AudioEncoding = AudioEncoding.Mp3 }; // Perform the Text-to-Speech request, passing the text input // with the selected voice parameters and audio file type var response = client.SynthesizeSpeech(new SynthesizeSpeechRequest { Input = input, Voice = voice, AudioConfig = config }); // Write the binary AudioContent of the response to an MP3 file. using (Stream output = File.Create("sample.mp3")) { response.AudioContent.WriteTo(output); Console.WriteLine($"Audio content written to file 'sample.mp3'"); } return("sample.mp3"); }
static void synth(string text, string filename) { var client = TextToSpeechClient.Create(); var input = new SynthesisInput { Text = text }; var voice = new VoiceSelectionParams { LanguageCode = "en-AU", Name = "en-AU-Wavenet-B" }; var config = new AudioConfig { AudioEncoding = AudioEncoding.Mp3, Pitch = 1, SpeakingRate = 0.95 }; /*VoiceSelectionParams voice = new VoiceSelectionParams * { * LanguageCode = "en-US", * Name = "en-US-Wavenet-F" * }; * * AudioConfig config = new AudioConfig * { * AudioEncoding = AudioEncoding.Mp3, * Pitch = 1, * SpeakingRate = 1 * };*/ var response = client.SynthesizeSpeech(new SynthesizeSpeechRequest { Input = input, Voice = voice, AudioConfig = config }); using (Stream output = File.Create($"{Environment.GetFolderPath(Environment.SpecialFolder.UserProfile)}\\OneDrive\\DaVinci Design\\VO\\GeneratedGCloud\\For video\\" + filename)) { response.AudioContent.WriteTo(output); Console.WriteLine($"Audio content written to file '{filename}'"); } }
private void Request(string text) { // Instantiate a client var client = TextToSpeechClient.Create(); // Set the text input to be synthesized. var input = new SynthesisInput { //Text = "Hello, World!" Text = text, }; // Build the voice request, select the language code ("en-US"), // and the SSML voice gender ("neutral"). var voice = new VoiceSelectionParams { LanguageCode = languageCode, SsmlGender = ssmiGender, }; // Select the type of audio file you want returned. var config = new AudioConfig { AudioEncoding = AudioEncoding.Mp3 }; // Perform the Text-to-Speech request, passing the text input // with the selected voice parameters and audio file type var response = client.SynthesizeSpeech(new SynthesizeSpeechRequest { Input = input, Voice = voice, AudioConfig = config }); // Write the binary AudioContent of the response to an MP3 file. using (Stream output = File.Create("sample.mp3")) { response.AudioContent.WriteTo(output); Debug.WriteLine($"Audio content written to file 'sample.mp3'"); } }
/// <summary> /// this calls out to google cloud text to speech api and creates an audio file from text /// NOTE: this only works if you have set up a key on ur machine, i beleive the docs are here. /// https://cloud.google.com/text-to-speech/docs/quickstart-protocol /// </summary> /// <param name="id"></param> /// <param name="text"></param> /// <returns></returns> public async Task GetSpeechFile(int id, string text) { // Instantiate a client TextToSpeechClient client = TextToSpeechClient.Create(); // Set the text input to be synthesized. SynthesisInput input = new SynthesisInput { Text = text }; // Build the voice request, select the language code ("en-US"), // and the SSML voice gender ("neutral"). VoiceSelectionParams voice = new VoiceSelectionParams { LanguageCode = "en-US", SsmlGender = SsmlVoiceGender.Neutral, Name = "en-US-Wavenet-D" }; // Select the type of audio file you want returned. AudioConfig config = new AudioConfig { AudioEncoding = AudioEncoding.Mp3 }; // Perform the Text-to-Speech request, passing the text input // with the selected voice parameters and audio file type var response = client.SynthesizeSpeech(new SynthesizeSpeechRequest { Input = input, Voice = voice, AudioConfig = config }); // Write the binary AudioContent of the response to an MP3 file. using (Stream output = File.Create(@"C:\Users\dylro\source\repos\QuestSpeak\QuestSpeak\audio\" + id + ".mp3")) { response.AudioContent.WriteTo(output); Console.WriteLine(id); } }
public static Stream CreаteStreаmAudiо(string text) { SynthesisInput input = new SynthesisInput { Text = text }; VoiceSelectionParams voiceSelection = new VoiceSelectionParams { LanguageCode = "en-US", SsmlGender = SsmlVoiceGender.Female }; AudioConfig audioConfig = new AudioConfig { AudioEncoding = AudioEncoding.Linear16 }; var response = client.SynthesizeSpeech(input, voiceSelection, audioConfig); //SynthesizeSpeechRequest request = new SynthesizeSpeechRequest(); //request.Input = new SynthesisInput(); //request.Input.Text = text; //request.AudioConfig = new AudioConfig(); //request.AudioConfig.AudioEncoding = AudioEncoding.Linear16; //request.AudioConfig.SampleRateHertz = 44100; //request.Voice = new VoiceSelectionParams(); //request.Voice.LanguageCode = "yue-Hant-HK"; //request.Voice.SsmlGender = SsmlVoiceGender.Female; //SynthesizeSpeechResponse respоnse = client.SynthesizeSpeech(request); Stream streаm = new MemoryStream(); response.AudioContent.WriteTo(streаm); streаm.Position = 0; return(streаm); }
private void CreateAudioFromSpeech(int amountOfJumps, string fileName) { // Set the text input to be synthesized. SynthesisInput input = new SynthesisInput { Text = voiceConfiguration.GetTextToSpeak(amountOfJumps) }; // Build the voice request, select the language code ("en-US"), // and the SSML voice gender ("neutral"). VoiceSelectionParams voice = new VoiceSelectionParams { LanguageCode = voiceConfiguration.SpeechLanguage(), SsmlGender = MapToGoogleGender(voiceConfiguration.VoiceGender()), Name = voiceConfiguration.VoiceName() }; // Select the type of audio file you want returned. AudioConfig config = new AudioConfig { AudioEncoding = AudioEncoding.Mp3, SpeakingRate = 1.25 }; // Perform the Text-to-Speech request, passing the text input // with the selected voice parameters and audio file type var response = client.SynthesizeSpeech(new SynthesizeSpeechRequest { Input = input, Voice = voice, AudioConfig = config }); // Write the binary AudioContent of the response to an MP3 file. using (Stream output = File.Create(AppDomain.CurrentDomain.BaseDirectory + @"\Sounds\" + fileName)) { response.AudioContent.WriteTo(output); sounds.Add(fileName, CreateMediaPlayerFromAudio(fileName)); } }