예제 #1
0
        public void cevapla(string soru)
        {
            string text = "";

            mediaPlayer.Close();
            if (String.IsNullOrEmpty(soru))
            {
                text = "yoksa bana küstün mü ?";
            }
            else
            {
                try
                {
                    soru = soru.Substring(0, yazi.Length - 6);
                    Console.WriteLine("soru :" + soru);
                    text = db.sor(soru.ToLower());

                    cevaplayamadim         = false;
                    durdurma               = false;
                    durdurmaBtn.Visibility = Visibility.Hidden;
                }
                catch (Exception e)
                {
                    text                   = "Anlamadım. Lütfen tekrar söyle.";
                    cevaplayamadim         = true;
                    durdurmaBtn.Visibility = Visibility.Visible;
                }
                finally
                {
                    db.bagKapat();
                }
            }
            yazi = text;
            VoiceSelectionParams voice = new VoiceSelectionParams
            {
                LanguageCode = "tr-TR",
                SsmlGender   = SsmlVoiceGender.Male
            };
            AudioConfig config = new AudioConfig
            {
                AudioEncoding = AudioEncoding.Mp3
            };
            SynthesisInput input = new SynthesisInput
            {
                Text = text
            };
            var response = client.SynthesizeSpeech(new SynthesizeSpeechRequest
            {
                Input       = input,
                Voice       = voice,
                AudioConfig = config
            });

            using (Stream output = File.Create("C:\\Users\\corx\\source\\repos\\Selami\\Selami\\ses\\sample.mp3"))
            {
                response.AudioContent.WriteTo(output);
            }
            mediaPlayer.Open(new Uri("C:\\Users\\corx\\source\\repos\\Selami\\Selami\\ses\\sample.mp3"));
            mediaPlayer.Play();
        }
예제 #2
0
        private void Button_Click_Save(object sender, RoutedEventArgs e)
        {
            var text  = SpeechText.Text;
            var name  = (UseWaveNet.IsChecked ?? false) ? "ja-JP-Wavenet-A" : "ja-JP-Standard-A";
            var speed = SpeedSlider.Value;
            var pitch = PitchSlider.Value;

            var input = new SynthesisInput {
                Text = text
            };
            var voiceSection = new VoiceSelectionParams
            {
                Name         = name,
                LanguageCode = "ja-JP",
                SsmlGender   = SsmlVoiceGender.Female,
            };
            var audioConfig = new AudioConfig
            {
                AudioEncoding = AudioEncoding.Mp3,
                SpeakingRate  = speed,
                Pitch         = pitch,
            };

            var response = _Client.SynthesizeSpeech(input, voiceSection, audioConfig);
            var fileName = GetFileName();

            using (var output = File.Create($"mp3\\{fileName}"))
            {
                response.AudioContent.WriteTo(output);
            }

            // 画面を更新
            UpdateDisplayValues(fileName);
        }
        public void SimpleExample()
        {
            // Sample: SynthesizeSpeech
            // Additional: SynthesizeSpeech(SynthesisInput,VoiceSelectionParams,AudioConfig,CallSettings)
            TextToSpeechClient client = TextToSpeechClient.Create();
            // The input can be provided as text or SSML.
            SynthesisInput input = new SynthesisInput
            {
                Text = "This is a demonstration of the Google Cloud Text-to-Speech API"
            };
            // You can specify a particular voice, or ask the server to pick based
            // on specified criteria.
            VoiceSelectionParams voiceSelection = new VoiceSelectionParams
            {
                LanguageCode = "en-US",
                SsmlGender   = SsmlVoiceGender.Female
            };
            // The audio configuration determines the output format and speaking rate.
            AudioConfig audioConfig = new AudioConfig
            {
                AudioEncoding = AudioEncoding.Mp3
            };
            SynthesizeSpeechResponse response = client.SynthesizeSpeech(input, voiceSelection, audioConfig);

            using (Stream output = File.Create("sample.mp3"))
            {
                // response.AudioContent is a ByteString. This can easily be converted into
                // a byte array or written to a stream.
                response.AudioContent.WriteTo(output);
            }
            // End sample
        }
        // [START tts_synthesize_text]
        /// <summary>
        /// Creates an audio file from the text input.
        /// </summary>
        /// <param name="text">Text to synthesize into audio</param>
        /// <remarks>
        /// Generates a file named 'output.mp3' in project folder.
        /// </remarks>
        public static void SynthesizeText(string text)
        {
            TextToSpeechClient client = TextToSpeechClient.Create();
            var response = client.SynthesizeSpeech(new SynthesizeSpeechRequest
            {
                Input = new SynthesisInput
                {
                    Text = text
                },
                // Note: voices can also be specified by name
                Voice = new VoiceSelectionParams
                {
                    LanguageCode = "en-US",
                    SsmlGender   = SsmlVoiceGender.Female
                },
                AudioConfig = new AudioConfig
                {
                    AudioEncoding = AudioEncoding.Mp3
                }
            });

            using (Stream output = File.Create("output.mp3"))
            {
                response.AudioContent.WriteTo(output);
            }
        }
예제 #5
0
        public static int Main(string[] args)
        {
            // Create client
            TextToSpeechClient client = TextToSpeechClient.Create();

            // Initialize request argument(s)
            SynthesisInput input = new SynthesisInput
            {
                Text = "test",
            };
            VoiceSelectionParams voice = new VoiceSelectionParams
            {
                LanguageCode = "en-US",
            };
            AudioConfig audioConfig = new AudioConfig
            {
                AudioEncoding = AudioEncoding.Mp3,
            };

            // Call API method
            SynthesizeSpeechResponse response = client.SynthesizeSpeech(input, voice, audioConfig);

            // Show the result
            Console.WriteLine(response);

            // Success
            Console.WriteLine("Smoke test passed OK");
            return(0);
        }
예제 #6
0
        public void TextSpeech(string text)
        {
            TextToSpeechClient client = TextToSpeechClient.Create();

            SynthesisInput input = new SynthesisInput
            {
                Text = text
            };

            VoiceSelectionParams voice = new VoiceSelectionParams
            {
                LanguageCode = "ja-JP",
                SsmlGender   = SsmlVoiceGender.Female
            };

            AudioConfig config = new AudioConfig
            {
                AudioEncoding = AudioEncoding.Linear16,
            };

            var response = client.SynthesizeSpeech(
                input,
                voice,
                config
                );

            using (var memoryStream = new MemoryStream(response.AudioContent, true))
            {
                var player = new System.Media.SoundPlayer(memoryStream);
                Console.Write("Play");
                player.Play();
            }
        }
예제 #7
0
        private byte[] TextToSpeech(string text)
        {
            var input = new SynthesisInput
            {
                Text = text
            };

            var voice = new VoiceSelectionParams
            {
                LanguageCode = _language.SpeechCode,
                Name         = _language.SpeechName
            };

            var config = new AudioConfig
            {
                AudioEncoding = AudioEncoding.Linear16
            };

            return(_speech.SynthesizeSpeech(new SynthesizeSpeechRequest
            {
                Input = input,
                Voice = voice,
                AudioConfig = config
            }).AudioContent.ToByteArray());
        }
예제 #8
0
        /// <summary>
        /// Using environment vars, it connects to google api to generate text to speech
        /// </summary>
        /// <param name="text"></param>
        private static void synthesizeVoice(string text)
        {
            TextToSpeechClient client = TextToSpeechClient.Create();
            var response = client.SynthesizeSpeech(new SynthesizeSpeechRequest
            {
                Input = new SynthesisInput
                {
                    Text = text
                },
                // Note: voices can also be specified by name
                Voice = new VoiceSelectionParams
                {
                    LanguageCode = "en-US",
                    Name         = "en-US-Wavenet-F"
                },
                AudioConfig = new AudioConfig
                {
                    AudioEncoding   = AudioEncoding.Mp3,
                    SampleRateHertz = 32000,
                    SpeakingRate    = 1.0
                }
            });

            Debugger.Write("Successfully downloaded voice audio from google api");

            using (Stream output = File.Create($"stored_responses/{text.Replace(' ','_')}.mp3"))
            {
                response.AudioContent.WriteTo(output);
            }
        }
예제 #9
0
        private static byte[] TextToSpeech(string text, Language lang)
        {
            var input = new SynthesisInput
            {
                Text = text
            };

            var voice = new VoiceSelectionParams
            {
                LanguageCode = lang.Speech,
                Name         = lang.Name
            };

            var config = new AudioConfig
            {
                AudioEncoding = AudioEncoding.Mp3
            };

            return(_tts.SynthesizeSpeech(new SynthesizeSpeechRequest
            {
                Input = input,
                Voice = voice,
                AudioConfig = config
            }).AudioContent.ToByteArray());
        }
예제 #10
0
        public static string Speak(string txt, QuestToSpeech.Voice voice, string filePath)
        {
            try {
                TextToSpeechClient client = TextToSpeechClient.Create();

                SynthesizeSpeechResponse res = client.SynthesizeSpeech(new SynthesizeSpeechRequest {
                    Input = new SynthesisInput {
                        Text = txt
                    },
                    Voice = new VoiceSelectionParams {
                        Name         = voice.Name,
                        LanguageCode = voice.LangCode,
                        SsmlGender   = voice.Gender == QuestToSpeech.Gender.Female ? SsmlVoiceGender.Female : SsmlVoiceGender.Male
                    },
                    AudioConfig = new AudioConfig {
                        AudioEncoding = AudioEncoding.Linear16
                    }
                });

                using (FileStream output = File.Create(filePath)) {
                    res.AudioContent.WriteTo(output);
                }
            } catch (Exception ex) {
                return(string.Format("GoogleTTS Exception: {0}", ex.InnerException == null ? ex.Message : ex.InnerException.ToString()));
            }

            return(null);
        }
예제 #11
0
        private void TTS_Button_Click(object sender, RoutedEventArgs e)
        {
            // Instantiate a client
            TextToSpeechClient client = TextToSpeechClient.Create();

            // Set the text input to be synthesized.
            SynthesisInput input = new SynthesisInput
            {
                Text = "N Pay구매하신 상품의구매확정처리부탁드립니다.상품을 받으신 후 만족하셨다면 구매확정을 부탁드립니다."
                       + "아래 기한까지 구매확정을 하지 않으실 경우,이후 자동으로 구매가 확정될 예정입니다."
                       + "만일,구매확정기한 내 정상적으로 상품을 수령하지 못하신 경우에는 판매자문의 또는 구매확정 연장을 해주세요."
                       + "고객명 이 * 연님주문번호    2019100971174081주문일자    2019.10.09 23:13발송일자    2019.10.10자동구매확정일 2019.10.19"
                       + "결제정보총 주문금액  12,100원할인금액    0원환불정산액 / 포인트   0원 / 2,394원결제수단    신용카드"
                       + "최종결제금액  9,706원배송정보수령인 이*연연락처 010 - 5234 - ****배송지 14305경기도 광명시 금당로 11(하안동, 하안6단지고층주공아파트)"
                       + "603동****배송메모발송상품상품이미지애플 인증 고속충전 정품 1.2m 2m 아이패드 아이폰 케이블"
                       + "옵션 : 옵션선택: mfi인증 메탈릭1.2m_다크그레이주문금액    9,600원수량  1"
            };

            // Build the voice request, select the language code ("en-US"),
            // and the SSML voice gender ("neutral").
            VoiceSelectionParams voice = new VoiceSelectionParams
            {
                LanguageCode = "ko-KR",
                SsmlGender   = SsmlVoiceGender.Neutral
            };

            // Select the type of audio file you want returned.
            AudioConfig config = new AudioConfig
            {
                AudioEncoding = AudioEncoding.Mp3
            };

            // Perform the Text-to-Speech request, passing the text input
            // with the selected voice parameters and audio file type
            var response = client.SynthesizeSpeech(new SynthesizeSpeechRequest
            {
                Input       = input,
                Voice       = voice,
                AudioConfig = config
            });

            // Write the binary AudioContent of the response to an MP3 file.
            using (Stream output = File.Create("C:\\Users\\이제연\\Desktop\\sample.mp3"))
            {
                response.AudioContent.WriteTo(output);
            }
            mciSendString("open \"" + "C:\\Users\\이제연\\Desktop\\sample.mp3" + "\" type mpegvideo alias MediaFile", null, 0, IntPtr.Zero);

            StringBuilder returnData = new StringBuilder(128);

            mciSendString("status MediaFile length", returnData, returnData.Capacity, IntPtr.Zero);
            int nMilliSecond = Convert.ToInt32(returnData.ToString());

            mciSendString("play MediaFile", null, 0, IntPtr.Zero);

            Thread thread = new Thread(() => _deleteMp3File("C:\\Users\\이제연\\Desktop\\sample.mp3", nMilliSecond));

            thread.Start();
        }
    void Start()
    {
        #region Environment Variable
        if (!File.Exists(credentialsPath))
        {
            Debug.LogError("failure" + credentialsPath);
            return;
        }
        else
        {
            Debug.Log("success: " + credentialsPath);
        }
        Environment.SetEnvironmentVariable("GOOGLE_APPLICATION_CREDENTIALS", credentialsPath);
        #endregion

        #region QuickStart
        // Instantiate a client
        TextToSpeechClient client = TextToSpeechClient.Create();

        // Set the text input to be synthesized.
        SynthesisInput input = new SynthesisInput
        {
            Text = "Hello, World!"
        };

        // Build the voice request, select the language code ("en-US"),
        // and the SSML voice gender ("neutral").
        VoiceSelectionParams voice = new VoiceSelectionParams
        {
            LanguageCode = "en-US",
            SsmlGender   = SsmlVoiceGender.Neutral
        };

        // Select the type of audio file you want returned.
        AudioConfig config = new AudioConfig
        {
            AudioEncoding = AudioEncoding.Mp3
        };

        // Perform the Text-to-Speech request, passing the text input
        // with the selected voice parameters and audio file type
        var response = client.SynthesizeSpeech(new SynthesizeSpeechRequest
        {
            Input       = input,
            Voice       = voice,
            AudioConfig = config
        });

        // Write the binary AudioContent of the response to an MP3 file.
        using (Stream output = File.Create(saveFile))
        {
            response.AudioContent.WriteTo(output);
            Debug.Log($"Audio content written to file " + saveFile);
        }
        #endregion
    }
예제 #13
0
파일: Program.cs 프로젝트: jebron/TTSClient
        public void convertTextToSpeech()
        {
            // Declare variables for the message to be converted and the filename to be saved
            string messageText;
            string fileName;

            Console.Clear();

            // Gather information from the user
            Console.Write("Enter the message to convert to audio: ");
            messageText = Console.ReadLine();
            Console.Write("Enter the name of the file: ");
            fileName = Console.ReadLine();

            // Create a new TextToSpeechClient called client
            TextToSpeechClient client = TextToSpeechClient.Create();

            // Assign the user input message to Text (Text is used in the API as the message)
            SynthesisInput input = new SynthesisInput
            {
                Text = messageText
            };

            // Build the voice request and set the parameters that differ from default settings
            VoiceSelectionParams voice = new VoiceSelectionParams
            {
                LanguageCode = "en-US",
                Name         = "en-US-Wavenet-C",
                SsmlGender   = SsmlVoiceGender.Female
            };

            // Select the speaking rate and type of audio file you want returned
            AudioConfig config = new AudioConfig
            {
                SpeakingRate  = 1.0,
                AudioEncoding = AudioEncoding.Linear16
            };

            // Perform the Text-to-Speech request, passing the text input (SynthesisInput)
            // with the selected voice parameters (VoiceSelectionParams) and audio file type (AudioConfig)
            var response = client.SynthesizeSpeech(new SynthesizeSpeechRequest
            {
                Input       = input,
                Voice       = voice,
                AudioConfig = config
            });

            // Write the binary AudioContent of the response to an WAV file.
            using (Stream output = File.Create(fileName + ".wav"))
            {
                response.AudioContent.WriteTo(output);
                Console.WriteLine($"\r\nAudio content written to " + fileName + ".wav");
            }
        }
 /// <summary>Snippet for SynthesizeSpeech</summary>
 public void SynthesizeSpeech()
 {
     // Snippet: SynthesizeSpeech(SynthesisInput, VoiceSelectionParams, AudioConfig, CallSettings)
     // Create client
     TextToSpeechClient textToSpeechClient = TextToSpeechClient.Create();
     // Initialize request argument(s)
     SynthesisInput       input       = new SynthesisInput();
     VoiceSelectionParams voice       = new VoiceSelectionParams();
     AudioConfig          audioConfig = new AudioConfig();
     // Make the request
     SynthesizeSpeechResponse response = textToSpeechClient.SynthesizeSpeech(input, voice, audioConfig);
     // End snippet
 }
        public bool generateTTSSoundFile(string outputFileName, string ssml, string usedLanguageCode, string voiceName)
        {
            try
            {
                var response = client.SynthesizeSpeech(new SynthesizeSpeechRequest
                {
                    Input = new SynthesisInput
                    {
                        Ssml = ssml
                    },
                    // Note: voices can also be specified by name
                    Voice = new VoiceSelectionParams
                    {
                        LanguageCode = usedLanguageCode,
                        Name         = voiceName
                    },
                    AudioConfig = new AudioConfig
                    {
                        AudioEncoding = AudioEncoding.Mp3
                    }
                });

                using (Stream output = File.Create(outputFileName))
                {
                    response.AudioContent.WriteTo(output);
                }
                return(true);
            }
            catch (Grpc.Core.RpcException ex)
            {
                if (ex.StatusCode == Grpc.Core.StatusCode.ResourceExhausted)
                {
                    MessageBoxResult result = MessageBox.Show("The google cloud rejeccted the request because of too many polls. Try generating less files at once. You could also wait for a bit and try agian. Retry?", "Retry", MessageBoxButton.YesNo);
                    if (result == MessageBoxResult.Yes)
                    {
                        generateTTSSoundFile(outputFileName, ssml, usedLanguageCode, voiceName);
                    }
                }
                else
                {
                    MessageBox.Show("An error occured: " + ex.Message);
                }
            }
            catch (Exception ex)
            {
                MessageBox.Show("An error occured: " + ex.Message);
            }
            return(false);
        }
예제 #16
0
        private void button1_Click(object sender, EventArgs e)
        {
            string inpTemp = input.Text;

            string inpStr = xuly(inpTemp);

            long inp;
            bool success = Int64.TryParse(inpStr, out inp);


            if (viBtn.Checked == true)
            {
                String payload = ChuyenSoSangChuoi(inp);


                TextToSpeechClient client = TextToSpeechClient.Create();

                SynthesizeSpeechResponse response = client.SynthesizeSpeech(
                    new SynthesisInput()
                {
                    Text = payload
                },
                    new VoiceSelectionParams()
                {
                    LanguageCode = "vi-VN",
                    Name         = "vi-VN-Standard-A"
                },
                    new AudioConfig()
                {
                    AudioEncoding = AudioEncoding.Linear16
                }
                    );

                string speechFile = Path.Combine(Directory.GetCurrentDirectory(), "sample.wav");

                File.WriteAllBytes(speechFile, response.AudioContent.ToByteArray());


                System.Media.SoundPlayer player = new System.Media.SoundPlayer(speechFile);
                player.Play();
            }
            else
            {
                var synthesizer = new SpeechSynthesizer();
                synthesizer.SetOutputToDefaultAudioDevice();
                synthesizer.Speak(NumberToWords(inp));
            }
        }
 /// <summary>Snippet for SynthesizeSpeech</summary>
 public void SynthesizeSpeech_RequestObject()
 {
     // Snippet: SynthesizeSpeech(SynthesizeSpeechRequest, CallSettings)
     // Create client
     TextToSpeechClient textToSpeechClient = TextToSpeechClient.Create();
     // Initialize request argument(s)
     SynthesizeSpeechRequest request = new SynthesizeSpeechRequest
     {
         Input       = new SynthesisInput(),
         Voice       = new VoiceSelectionParams(),
         AudioConfig = new AudioConfig(),
     };
     // Make the request
     SynthesizeSpeechResponse response = textToSpeechClient.SynthesizeSpeech(request);
     // End snippet
 }
예제 #18
0
        protected override void DoSpeech(string culture, string chat)
        {
            // The input to be synthesized, can be provided as text or SSML.
            var input = new SynthesisInput
            {
                Text = chat
            };

            // Build the voice request.
            var voiceSelection = new VoiceSelectionParams
            {
                LanguageCode = culture,
                SsmlGender   = SsmlVoiceGender.Female
            };

            // Specify the type of audio file.
            var audioConfig = new AudioConfig
            {
                AudioEncoding = AudioEncoding.Linear16
            };

            // Perform the text-to-speech request.
            var response = _client.SynthesizeSpeech(input, voiceSelection, audioConfig);

            lock (_mediaFileLock)
            {
                if (File.Exists(_mediaFilePath))
                {
                    File.Delete(_mediaFilePath);
                }

                // Write the response to the output file.
                using (var output = File.Create(_mediaFilePath))
                {
                    response.AudioContent.WriteTo(output);
                    //output.Close();
                }

                //WavPlayer player = new WavPlayer(_mediaFilePath);
                //player.Play();

                ////Console.WriteLine("Audio content written to file \"output.mp3\"");

                //player.Dispose();
                //player = null;
            }
        }
    //get text return his temp voiceURL
    public static string TextToSpeach(string text)
    {
        Environment.SetEnvironmentVariable("GOOGLE_APPLICATION_CREDENTIALS", @"C:\TextToSpeach-b2d8743c4197.json");
        // Instantiate a client
        TextToSpeechClient client = TextToSpeechClient.Create();
        // Set the text input to be synthesized.
        SynthesisInput input = new SynthesisInput
        {
            Text = text
        };

        // Build the voice request, select the language code ("en-US"),
        // and the SSML voice gender ("neutral").
        VoiceSelectionParams voice = new VoiceSelectionParams
        {
            LanguageCode = "en-US",
            SsmlGender = SsmlVoiceGender.Neutral,
            Name = "en-US-Wavenet-F",
        };

        // Select the type of audio file you want returned.
        AudioConfig config = new AudioConfig
        {
            AudioEncoding = AudioEncoding.Mp3
        };

        // Perform the Text-to-Speech request, passing the text input
        // with the selected voice parameters and audio file type
        var response = client.SynthesizeSpeech(new SynthesizeSpeechRequest
        {
            Input = input,
            Voice = voice,
            AudioConfig = config
        });
        string url = "";
        // Write the binary AudioContent of the response to an MP3 file.
        string path = System.IO.Path.GetTempFileName();
      
        using (FileStream output = File.OpenWrite(path))
        {
            response.AudioContent.WriteTo(output);
            url = output.Name;
            Console.WriteLine($"Audio content written to file 'sample.mp3'");
        }
        return url;
    }
        public string text_to_mp3(string text, Grpc.Core.Channel channel, string LanguageCode, string Gender, string Voice)
        {
            TextToSpeechClient client = TextToSpeechClient.Create(channel);

            var input = new SynthesisInput
            {
                Text = text
            };

            SsmlVoiceGender gender;

            if (Gender == "Female")
            {
                gender = SsmlVoiceGender.Female;
            }
            else
            {
                gender = SsmlVoiceGender.Male;
            }

            var voiceSelection = new VoiceSelectionParams
            {
                LanguageCode = LanguageCode,
                SsmlGender   = gender,
                Name         = Voice
            };
            var audioConfig = new AudioConfig
            {
                AudioEncoding = AudioEncoding.Mp3
            };

            var response = client.SynthesizeSpeech(input, voiceSelection, audioConfig);

            string filename = Guid.NewGuid().ToString() + ".mp3";

            MemoryStream newTextToSpeech = new MemoryStream();

            response.AudioContent.WriteTo(newTextToSpeech);

            //Add it to the dictionary
            textToSpeechFiles[filename] = newTextToSpeech;

            //and return the filename as the key
            return(filename);
        }
예제 #21
0
        private void Speak(string text)
        {
            if (client == null)
            {
                return;
            }

            // Set the text input to be synthesized.
            SynthesisInput input = new SynthesisInput
            {
                Text = text
            };


            // Select the type of audio file you want returned.
            AudioConfig config = new AudioConfig
            {
                AudioEncoding = AudioEncoding.Mp3,
                // Pitch = -5,
                SpeakingRate = Rate
            };

            // Perform the Text-to-Speech request, passing the text input
            // with the selected voice parameters and audio file type
            SynthesizeSpeechResponse response = client.SynthesizeSpeech(new SynthesizeSpeechRequest
            {
                Input = input,
                Voice = new VoiceSelectionParams
                {
                    LanguageCode = languages[Settings.Default.languageIndex],
                    Name         = voiceTypes[Settings.Default.useWavenetVoices ? 0 : 1, Settings.Default.languageIndex, Settings.Default.ttsVoice]
                },
                AudioConfig = config
            });

            // Write the AudioContent of the response to an MP3 file.
            string s64      = response.AudioContent.ToBase64();
            string filePath = Path.Combine(Path.GetTempPath(), "ttsoutput64_" + DateTime.Now.ToFileTime() + ".mp3");

            File.WriteAllBytes(filePath, Convert.FromBase64String(s64));

            // play the file
            ttsQueue.Enqueue(filePath);
        }
        internal string Convert(string v)
        {
            // Instantiate a client
            TextToSpeechClient client = TextToSpeechClient.Create();

            // Set the text input to be synthesized.
            SynthesisInput input = new SynthesisInput
            {
                Text = v
            };

            // Build the voice request, select the language code ("en-US"),
            // and the SSML voice gender ("neutral").
            VoiceSelectionParams voice = new VoiceSelectionParams
            {
                LanguageCode = "en-US",
                SsmlGender   = SsmlVoiceGender.Neutral
            };

            // Select the type of audio file you want returned.
            AudioConfig config = new AudioConfig
            {
                AudioEncoding = AudioEncoding.Mp3
            };

            // Perform the Text-to-Speech request, passing the text input
            // with the selected voice parameters and audio file type
            var response = client.SynthesizeSpeech(new SynthesizeSpeechRequest
            {
                Input       = input,
                Voice       = voice,
                AudioConfig = config
            });


            // Write the binary AudioContent of the response to an MP3 file.
            using (Stream output = File.Create("sample.mp3"))
            {
                response.AudioContent.WriteTo(output);
                Console.WriteLine($"Audio content written to file 'sample.mp3'");
            }

            return("sample.mp3");
        }
예제 #23
0
        /// <summary>
        ///  this calls out to google cloud text to speech api and creates an audio file from text
        ///  NOTE: this only works if you have set up a key on ur machine, i beleive the docs are here.
        ///  https://cloud.google.com/text-to-speech/docs/quickstart-protocol
        /// </summary>
        /// <param name="id"></param>
        /// <param name="text"></param>
        /// <returns></returns>
        public async Task GetSpeechFile(int id, string text)
        {
            // Instantiate a client
            TextToSpeechClient client = TextToSpeechClient.Create();

            // Set the text input to be synthesized.
            SynthesisInput input = new SynthesisInput
            {
                Text = text
            };

            // Build the voice request, select the language code ("en-US"),
            // and the SSML voice gender ("neutral").
            VoiceSelectionParams voice = new VoiceSelectionParams
            {
                LanguageCode = "en-US",
                SsmlGender   = SsmlVoiceGender.Neutral,
                Name         = "en-US-Wavenet-D"
            };

            // Select the type of audio file you want returned.
            AudioConfig config = new AudioConfig
            {
                AudioEncoding = AudioEncoding.Mp3
            };

            // Perform the Text-to-Speech request, passing the text input
            // with the selected voice parameters and audio file type
            var response = client.SynthesizeSpeech(new SynthesizeSpeechRequest
            {
                Input       = input,
                Voice       = voice,
                AudioConfig = config
            });

            // Write the binary AudioContent of the response to an MP3 file.
            using (Stream output = File.Create(@"C:\Users\dylro\source\repos\QuestSpeak\QuestSpeak\audio\" + id + ".mp3"))
            {
                response.AudioContent.WriteTo(output);
                Console.WriteLine(id);
            }
        }
예제 #24
0
        private void CreateAudioFromSpeech(int amountOfJumps, string fileName)
        {
            // Set the text input to be synthesized.
            SynthesisInput input = new SynthesisInput
            {
                Text = voiceConfiguration.GetTextToSpeak(amountOfJumps)
            };

            // Build the voice request, select the language code ("en-US"),
            // and the SSML voice gender ("neutral").
            VoiceSelectionParams voice = new VoiceSelectionParams
            {
                LanguageCode = voiceConfiguration.SpeechLanguage(),
                SsmlGender   = MapToGoogleGender(voiceConfiguration.VoiceGender()),
                Name         = voiceConfiguration.VoiceName()
            };

            // Select the type of audio file you want returned.
            AudioConfig config = new AudioConfig
            {
                AudioEncoding = AudioEncoding.Mp3,
                SpeakingRate  = 1.25
            };

            // Perform the Text-to-Speech request, passing the text input
            // with the selected voice parameters and audio file type
            var response = client.SynthesizeSpeech(new SynthesizeSpeechRequest
            {
                Input       = input,
                Voice       = voice,
                AudioConfig = config
            });


            // Write the binary AudioContent of the response to an MP3 file.
            using (Stream output = File.Create(AppDomain.CurrentDomain.BaseDirectory + @"\Sounds\" + fileName))
            {
                response.AudioContent.WriteTo(output);
                sounds.Add(fileName, CreateMediaPlayerFromAudio(fileName));
            }
        }
        public static Stream CreаteStreаmAudiо(string text)
        {
            SynthesisInput input = new SynthesisInput
            {
                Text = text
            };

            VoiceSelectionParams voiceSelection = new VoiceSelectionParams
            {
                LanguageCode = "en-US",
                SsmlGender   = SsmlVoiceGender.Female
            };

            AudioConfig audioConfig = new AudioConfig
            {
                AudioEncoding = AudioEncoding.Linear16
            };

            var response = client.SynthesizeSpeech(input, voiceSelection, audioConfig);

            //SynthesizeSpeechRequest request = new SynthesizeSpeechRequest();
            //request.Input = new SynthesisInput();
            //request.Input.Text = text;

            //request.AudioConfig = new AudioConfig();
            //request.AudioConfig.AudioEncoding = AudioEncoding.Linear16;
            //request.AudioConfig.SampleRateHertz = 44100;


            //request.Voice = new VoiceSelectionParams();
            //request.Voice.LanguageCode = "yue-Hant-HK";
            //request.Voice.SsmlGender = SsmlVoiceGender.Female;
            //SynthesizeSpeechResponse respоnse = client.SynthesizeSpeech(request);

            Stream streаm = new MemoryStream();

            response.AudioContent.WriteTo(streаm);
            streаm.Position = 0;

            return(streаm);
        }
예제 #26
0
        private byte[] GetGoogleSpeech(string speechText, string languageCode)
        {
            string             path       = Path.Combine(Path.GetDirectoryName(Assembly.GetExecutingAssembly().Location), @"SpellingApp-7fc0cf8b5885.json");
            var                credential = GoogleCredential.FromFile(path);
            var                channel    = new Grpc.Core.Channel(TextToSpeechClient.DefaultEndpoint.ToString(), credential.ToChannelCredentials());
            TextToSpeechClient client     = TextToSpeechClient.Create(channel);

            // Set the text input to be synthesized.
            SynthesisInput input = new SynthesisInput
            {
                Text = speechText
            };

            // Build the voice request, select the language code ("en-US"),
            // and the SSML voice gender ("neutral").
            VoiceSelectionParams voice = new VoiceSelectionParams
            {
                LanguageCode = languageCode,
                SsmlGender   = SsmlVoiceGender.Neutral
            };

            // Select the type of audio file you want returned.
            AudioConfig config = new AudioConfig
            {
                AudioEncoding = AudioEncoding.Mp3
            };

            // Perform the Text-to-Speech request, passing the text input
            // with the selected voice parameters and audio file type
            var response = client.SynthesizeSpeech(new SynthesizeSpeechRequest
            {
                Input       = input,
                Voice       = voice,
                AudioConfig = config
            });

            // Write the binary AudioContent of the response to an MP3 file.

            return(response.AudioContent.ToByteArray());
        }
예제 #27
0
        public static void CreateSpeechFile(string inputText, string filPath)
        {
            TextToSpeechClient client = TextToSpeechClient.Create();

            // Set the text input to be synthesized.
            SynthesisInput input = new SynthesisInput
            {
                Text = inputText
            };

            // Build the voice request, select the language code ("en-US"),
            // and the SSML voice gender ("neutral").
            VoiceSelectionParams voice = new VoiceSelectionParams
            {
                LanguageCode = "en-US",
                SsmlGender   = SsmlVoiceGender.Neutral
            };

            // Select the type of audio file you want returned.
            AudioConfig config = new AudioConfig
            {
                AudioEncoding = AudioEncoding.Mp3
            };

            // Perform the Text-to-Speech request, passing the text input
            // with the selected voice parameters and audio file type
            var response = client.SynthesizeSpeech(new SynthesizeSpeechRequest
            {
                Input       = input,
                Voice       = voice,
                AudioConfig = config
            });

            // Write the binary AudioContent of the response to an MP3 file.
            using (Stream output = File.Create(filPath))
            {
                response.AudioContent.WriteTo(output);
            }
        }
예제 #28
0
        public async Task Speak(string text, string fileName = "")
        {
            var input = new SynthesisInput()
            {
                Text = text
            };
            var request = new SynthesizeSpeechRequest()
            {
                Input       = input,
                Voice       = Voice,
                AudioConfig = Config
            };

            // make the request to Google
            var response = Client.SynthesizeSpeech(request);

            // limits entry to single thread
            await Semaphore.WaitAsync().ConfigureAwait(false);

            try
            {
                if (!string.IsNullOrWhiteSpace(fileName))
                {
                    await PlayWindowsMediaFile(fileName).ConfigureAwait(false);
                }

                using (var output = File.Create(SpeechAudioFileName))
                {
                    response.AudioContent.WriteTo(output);
                }

                await PlayAudioFile(SpeechAudioFileName).ConfigureAwait(false);
            }
            finally
            {
                Semaphore.Release();
            }
        }
예제 #29
0
        public static void TextToSpeech(string text, string languageCode, SsmlVoiceGender gender, string serviceAcc)
        {
            GoogleCredential credentials = GoogleCredential.FromFile(serviceAcc);

            TextToSpeechClient client = TextToSpeechClient.Create(credentials);

            SynthesizeSpeechResponse response = client.SynthesizeSpeech(
                new SynthesisInput()
            {
                Text = text
            },
                new VoiceSelectionParams()
            {
                LanguageCode = languageCode,
                SsmlGender   = gender
            },
                new AudioConfig()
            {
                AudioEncoding = AudioEncoding.Linear16
            }
                );

            string speechFile = Path.Combine(Directory.GetCurrentDirectory(), "sample.wav");

            File.WriteAllBytes(speechFile, response.AudioContent);
            System.Media.SoundPlayer player = new System.Media.SoundPlayer();

            player.SoundLocation = speechFile;
            player.PlaySync();
            try
            {
                File.Delete(speechFile);
            }
            catch
            {
                Console.WriteLine("Cannot delete the file");
            }
        }
예제 #30
0
        /// <summary>
        /// Handles pipeline packages of text. Translates text to audio and sends audio bytes
        /// down the pipeline
        /// </summary>
        /// <param name="text">Text to turn into audio</param>
        /// <param name="e">Pipeline Envelope</param>
        protected override void Receive(string text, Envelope e)
        {
            var response = gClient.SynthesizeSpeech(new SynthesizeSpeechRequest
            {
                Input = new SynthesisInput
                {
                    Text = text
                },
                Voice = new VoiceSelectionParams
                {
                    LanguageCode = this.TextLanguageCode,
                    SsmlGender   = SsmlVoiceGender.Female
                },
                AudioConfig = new AudioConfig
                {
                    AudioEncoding = this.googleAudioFormat
                }
            });

            AudioBuffer textAudio = new AudioBuffer(response.AudioContent.ToByteArray(), this.format);

            this.Out.Post(textAudio, e.OriginatingTime);
        }