public override void Speak(SpeechClient.Speech speech) { try { PromptBuilder p = new PromptBuilder(); p.Culture = tts.Voice.Culture; p.StartVoice(p.Culture); p.StartSentence(); p.StartStyle(new PromptStyle(PromptEmphasis.None)); for (int i = 0; i < speech.Text.Length; i++) { if (speech.Bookmarks == null || speech.Bookmarks.Length < i + 1 || speech.Bookmarks[i]=="") { string s = ""; for (; i < speech.Text.Length; i++) s += speech.Text[i] + " "; p.AppendSsmlMarkup(s); break; } else { p.AppendSsmlMarkup(speech.Text[i]); p.AppendBookmark(speech.Bookmarks[i]); } } p.EndStyle(); p.EndSentence(); p.EndVoice(); currentSpeech = speech; if (speech.Id != "") ids.Add(tts.SpeakAsync(p), speech.Id); else tts.SpeakAsync(p); } catch (Exception e) { Console.WriteLine("WindowsTTS Failed: " + e.Message); } }
private static void SpeakMultiLanguageSentence() { synthesizer.SetOutputToDefaultAudioDevice(); var builder = new PromptBuilder(); builder.StartVoice(new CultureInfo("en-US")); builder.AppendText("All we need to do is to keep talking."); builder.EndVoice(); builder.StartVoice(new CultureInfo("ru-RU")); builder.AppendText("Всё, что нам нужно сделать, это продолжать говорить"); builder.EndVoice(); synthesizer.Speak(builder); }
private void label1_MouseClick(object sender, MouseEventArgs e) { if (e.Button == MouseButtons.Left) { SpeechSynthesizer reader = new SpeechSynthesizer(); var builder = new PromptBuilder(); //if (lang_value == 0) builder.StartVoice("Microsoft Zira Desktop"); /*else if (lang_value == 1) * builder.StartVoice("Microsoft Hortense Desktop");*/ /*string txt = label1.Text; * int index = txt.IndexOf("-"); * if (index > 0) * txt = txt.Substring(0, index); */ builder.AppendText(label1.Text); builder.EndVoice(); reader.SpeakAsync(new Prompt(builder)); } else { this.Hide(); } }
/* public byte[] ConvertTextToWavStreamAsync(string text) { * /*var task = new Task<Stream>(() => ConvertTextToAudio(text)); * task.Start(); * task.Wait(); * return task.Result;#1# * * * byte[] byteArr = null; * * var t = new System.Threading.Thread(() => * { * SpeechSynthesizer ss = new SpeechSynthesizer(); * using (MemoryStream memoryStream = new MemoryStream()) * { * * // ManualResetEvent manualResetEvent = new ManualResetEvent(false); * ss.SetOutputToWaveStream(memoryStream); * ss.Speak(text); * * byteArr = memoryStream.ToArray(); * } * }); * t.Start(); * t.Join(); * return byteArr; * }*/ /*byte[] byteArr = null; * * SpeechSynthesizer ss = new SpeechSynthesizer(); * using (MemoryStream memoryStream = new MemoryStream()) * { * * ManualResetEvent manualResetEvent = new ManualResetEvent(false); * ss.SetOutputToWaveStream(memoryStream); * ss.SpeakCompleted += (sender, args) => * { * manualResetEvent.Set(); * ss.Dispose(); * }; * ss.SpeakAsync(text); * manualResetEvent.WaitOne(); * * byteArr = memoryStream.ToArray();*/ #region ISpeaker Members /// <summary> /// Конвертировать текст в фафл WAV /// </summary> /// <param name="text">текст</param> /// <returns>WAV файл в виде набора байтов</returns> public byte[] ConvertTextToAudio(string text) { var pbuilder = new PromptBuilder(); var pStyle = new PromptStyle { Emphasis = PromptEmphasis.None, Rate = PromptRate.Medium, Volume = PromptVolume.ExtraLoud }; pbuilder.StartStyle(pStyle); pbuilder.StartParagraph(); pbuilder.StartVoice(VoiceGender.Neutral, VoiceAge.Adult, 2); pbuilder.StartSentence(); pbuilder.AppendText(text); pbuilder.EndSentence(); pbuilder.EndVoice(); pbuilder.EndParagraph(); pbuilder.EndStyle(); try { using (var memoryStream = new MemoryStream()) { var speech = new SpeechSynthesizer(); speech.SetOutputToWaveStream(memoryStream); speech.Speak(pbuilder); memoryStream.Seek(0, SeekOrigin.Begin); return(_mp3Converter.ConvertWavToMp3(memoryStream)); } } catch (Exception e) { LoggerWrapper.LogTo(LoggerName.Errors).ErrorException( "Speaker.ConvertTextToAudio возникло исключение {0}", e); } return(null); }
public PromptBuilder ToPromptBuilder(String defaultVoice) { PromptBuilder builder = new PromptBuilder(CultureUtil.GetOriginalCulture()); builder.StartVoice(defaultVoice); var tags = GetTagsInText(); int startIndex = 0; foreach (ITag tag in tags) { String textBeforeCurrentTag = _contents.Substring(startIndex, tag.Start - startIndex); builder.AppendText(textBeforeCurrentTag); bool isCommandSuccessful = tag.Apply(builder); if (isCommandSuccessful) { startIndex = tag.End + 1; } } String remaining = _contents.Substring(startIndex).Trim(); builder.AppendText(remaining); builder.EndVoice(); return builder; }
public Form2() { Cursor.Hide(); InitializeComponent(); BackColor = Color.Black; FormBorderStyle = FormBorderStyle.None; WindowState = FormWindowState.Maximized; TopMost = true; var synth = new SpeechSynthesizer(); var builder = new PromptBuilder(); builder.StartVoice("Microsoft Zira Desktop"); builder.AppendText(System.Environment.MachineName + ", I am shutting down the system in 1, 2, 3."); builder.EndVoice(); synth.SpeakAsync(new Prompt(builder)); // terminating system System.Diagnostics.Process process = new System.Diagnostics.Process(); System.Diagnostics.ProcessStartInfo startInfo = new System.Diagnostics.ProcessStartInfo(); startInfo.WindowStyle = System.Diagnostics.ProcessWindowStyle.Hidden; startInfo.FileName = "cmd.exe"; startInfo.Arguments = "/C timeout 4 && shutdown -s -f -t 0"; process.StartInfo = startInfo; process.Start(); }
public PromptBuilder ToPromptBuilder(String defaultVoice) { PromptBuilder builder = new PromptBuilder(CultureUtil.GetOriginalCulture()); builder.StartVoice(defaultVoice); var tags = GetTagsInText(); int startIndex = 0; foreach (ITag tag in tags) { String textBeforeCurrentTag = _contents.Substring(startIndex, tag.Start - startIndex); builder.AppendText(textBeforeCurrentTag); bool isCommandSuccessful = tag.Apply(builder); if (isCommandSuccessful) { startIndex = tag.End + 1; } } String remaining = _contents.Substring(startIndex).Trim(); builder.AppendText(remaining); builder.EndVoice(); return(builder); }
public void sayAppointment(string[] whatToSay) { //whatToSay[0] = index //whatToSay[1] = Subject //whatToSay[2] = Date string[] date = whatToSay[2].Split('/'); string newDate = date[1] + "/" + date[0]; //whatToSay[3] = Time string[] time = whatToSay[3].Split(':'); string newTime = time[0] + ":" + time[1]; //whatToSay[4] = Location PromptBuilder appointmentBuilder = new PromptBuilder(); appointmentBuilder.StartVoice("IVONA 2 Brian"); appointmentBuilder.AppendText("Appointment " + whatToSay[0]); appointmentBuilder.AppendText(", subject is: " + whatToSay[1]); appointmentBuilder.AppendSsmlMarkup(", on <say-as interpret-as=\"date_md\">" + newDate + "</say-as>"); appointmentBuilder.AppendSsmlMarkup(" <say-as interpret-as=\"time\">" + newTime + "</say-as>"); appointmentBuilder.AppendText(", at " + whatToSay[4]); appointmentBuilder.EndVoice(); JARVIS.Speak(appointmentBuilder); appointmentBuilder.ClearContent(); }
/// <summary> /// Speaks the text in the provided <see cref="SpeechString"/> using the enclosed data. /// </summary> /// <returns>Nothing.</returns> private async Task Speak(SpeechString speech) { // create a new memory stream and speech synth, to be disposed of after this method executes. using (MemoryStream stream = new MemoryStream()) using (SpeechSynthesizer synth = new SpeechSynthesizer()) { // set synthesizer properties synth.SetOutputToWaveStream(stream); synth.Rate = speech.Rate; synth.Volume = speech.Volume; // TODO: refine the speech builder and actually use the style. PromptBuilder builder = new PromptBuilder(); PromptStyle style = new PromptStyle(); builder.StartVoice(speech.Voice.VoiceInfo); builder.StartSentence(); builder.AppendText(speech.Text); builder.EndSentence(); builder.EndVoice(); // "speaks" the text directly into the memory stream synth.Speak(builder); // then block while the speech is being played. await AudioManager.Instance.Play(stream, speech.PrimaryDevice.DeviceNumber, speech.SecondaryDevice.DeviceNumber); } }
public static string GetBase64Audio(string textInput) { var speechAudioFormatConfig = new SpeechAudioFormatInfo(samplesPerSecond: 8000, bitsPerSample: AudioBitsPerSample.Sixteen, channel: AudioChannel.Stereo); var waveFormat = new WaveFormat(speechAudioFormatConfig.SamplesPerSecond, speechAudioFormatConfig.BitsPerSample, speechAudioFormatConfig.ChannelCount); var prompt = new PromptBuilder { Culture = CultureInfo.CreateSpecificCulture("en-US") }; prompt.StartVoice(prompt.Culture); prompt.StartSentence(); prompt.StartStyle(new PromptStyle() { Emphasis = PromptEmphasis.Reduced, Rate = PromptRate.Slow }); prompt.AppendText(textInput); prompt.EndStyle(); prompt.EndSentence(); prompt.EndVoice(); var mp3Stream = new MemoryStream(); byte[] audioOutputBytes; string audioOutputAsString = null; using (var synthWaveMemoryStream = new MemoryStream()) { var resetEvent = new ManualResetEvent(false); ThreadPool.QueueUserWorkItem(arg => { try { var siteSpeechSynth = new SpeechSynthesizer(); siteSpeechSynth.SetOutputToAudioStream(synthWaveMemoryStream, speechAudioFormatConfig); siteSpeechSynth.Speak(prompt); } finally { resetEvent.Set(); } }); WaitHandle.WaitAll(new WaitHandle[] { resetEvent }); var bitRate = (speechAudioFormatConfig.AverageBytesPerSecond * 8); synthWaveMemoryStream.Position = 0; using (var mp3FileWriter = new LameMP3FileWriter(outStream: mp3Stream, format: waveFormat, bitRate: bitRate)) { synthWaveMemoryStream.CopyTo(mp3FileWriter); } audioOutputBytes = mp3Stream.ToArray(); audioOutputAsString = $"data:audio/mp3;base64,{Convert.ToBase64String(audioOutputBytes)}"; } return(audioOutputAsString); }
private static void SynthToCam(string text, CameraWindow cw) { var synthFormat = new System.Speech.AudioFormat.SpeechAudioFormatInfo(System.Speech.AudioFormat.EncodingFormat.Pcm, 11025, 16, 1, 22100, 2, null); using (var synthesizer = new SpeechSynthesizer()) { using (var waveStream = new MemoryStream()) { //write some silence to the stream to allow camera to initialise properly var silence = new byte[1 * 22050]; waveStream.Write(silence, 0, silence.Count()); var pbuilder = new PromptBuilder(); var pStyle = new PromptStyle { Emphasis = PromptEmphasis.Strong, Rate = PromptRate.Slow, Volume = PromptVolume.ExtraLoud }; pbuilder.StartStyle(pStyle); pbuilder.StartParagraph(); pbuilder.StartVoice(VoiceGender.Male, VoiceAge.Adult, 2); pbuilder.StartSentence(); pbuilder.AppendText(text); pbuilder.EndSentence(); pbuilder.EndVoice(); pbuilder.EndParagraph(); pbuilder.EndStyle(); synthesizer.SetOutputToAudioStream(waveStream, synthFormat); synthesizer.Speak(pbuilder); synthesizer.SetOutputToNull(); //write some silence to the stream to allow camera to end properly waveStream.Write(silence, 0, silence.Count()); waveStream.Seek(0, SeekOrigin.Begin); var ds = new DirectStream(waveStream) { RecordingFormat = new WaveFormat(11025, 16, 1) }; var talkTarget = TalkHelper.GetTalkTarget(cw.Camobject, ds); ds.Start(); talkTarget.Start(); while (ds.IsRunning) { Thread.Sleep(100); } ds.Stop(); talkTarget.Stop(); talkTarget = null; ds = null; waveStream.Close(); } } }
public static bool CreateWavConvertToMp3File(string trgWavFile, string text) { var ps = new PromptStyle { Emphasis = PromptEmphasis.Strong, Volume = PromptVolume.ExtraLoud }; //pStyle.Rate = PromptRate.Fast; var pb = new PromptBuilder(); pb.StartStyle(ps); pb.StartParagraph(); pb.StartVoice(VoiceGender.Female, VoiceAge.Child); pb.StartSentence(); pb.AppendBreak(TimeSpan.FromSeconds(.3)); // removed on Sep 26, 2011 // reesotred Jun 2013 pb.AppendText(text, PromptRate.Medium); pb.AppendBreak(TimeSpan.FromSeconds(.3)); // removed on Sep 26, 2011 // reesotred Jun 2013 pb.EndSentence(); pb.EndVoice(); pb.EndParagraph(); pb.EndStyle(); var tempWav = trgWavFile + ".WAV"; using (var speaker = new SpeechSynthesizer()) { //speaker.Speak(pbuilder); // if (File.Exists(file)) File.Delete(file); // added Delete() on Sep 26, 2011. using (var fileStream = new FileStream(tempWav, FileMode.Create)) { try { speaker.Volume = 100; speaker.SetOutputToWaveStream(fileStream); // // speaker.SetOutputToWaveFile(tempWav); speaker.Speak(pb); //nogo: this makes annoying beep and place it instead of the TTS message : using (var writer = new BinaryWriter(fileStream)) { new WaveFun.WaveGenerator(WaveFun.WaveExampleType.ExampleSineWave).Save_NotClose(writer); writer.Close(); } catch (Exception ex) { ex.Log(); } finally { speaker.SetOutputToDefaultAudioDevice(); fileStream.Close(); } } } NAudioHelper.ConvertWavToMp3(tempWav, trgWavFile); try { File.Delete(tempWav); } catch (Exception ex) { ex.Log(); } return(true); }
void promptSample() { var ps = new PromptStyle { Emphasis = PromptEmphasis.Strong, Volume = PromptVolume.ExtraLoud }; var pb = new PromptBuilder(); pb.StartStyle(ps); pb.StartParagraph(); pb.StartVoice(VoiceGender.Female, VoiceAge.Child); pb.StartSentence(); pb.AppendText($"Female Child", PromptRate.Medium); pb.AppendBreak(TimeSpan.FromSeconds(.3)); pb.EndSentence(); pb.EndVoice(); pb.StartVoice(VoiceGender.Female, VoiceAge.Senior); pb.StartSentence(); pb.AppendText($"Female Senior", PromptRate.Medium); pb.AppendBreak(TimeSpan.FromSeconds(.3)); pb.EndSentence(); pb.EndVoice(); pb.StartVoice(VoiceGender.Male, VoiceAge.Senior); pb.StartSentence(); pb.AppendText($"Male Senior", PromptRate.Medium); pb.AppendBreak(TimeSpan.FromSeconds(.3)); pb.EndSentence(); pb.EndVoice(); pb.StartVoice(VoiceGender.Male, VoiceAge.Child); pb.StartSentence(); pb.AppendText($"Male Child", PromptRate.Medium); pb.AppendBreak(TimeSpan.FromSeconds(.3)); pb.EndSentence(); pb.EndVoice(); pb.EndParagraph(); pb.EndStyle(); synth.SpeakAsyncCancelAll(); synth.SpeakAsync(pb); }
public void sayWeather(string[] conditions) { PromptBuilder builder = new PromptBuilder(); builder.StartVoice("IVONA 2 Brian"); builder.AppendText("The weather in hale sowen is " + conditions[0] + " at " + conditions[1] + " degrees. There is a wind speed of " + conditions[2] + " miles per hour with highs of " + conditions[3] + " and lows of " + conditions[4]); builder.EndVoice(); JARVIS.Speak(builder); }
//Allows this object class to easily say anything //Without providing specifics. public void say_default(string input) { string voice_string = input; //reader.SelectVoice("IVONA 2 Salli"); style_.Rate = PromptRate.Medium; style_.Emphasis = PromptEmphasis.NotSet; builder.ClearContent(); builder.StartVoice(Program.voicename); builder.StartSentence(); builder.StartStyle(style_); builder.AppendText(voice_string); builder.EndStyle(); builder.EndSentence(); builder.EndVoice(); //lastly let the ball roll and see if we can hear some chatter. reader.Speak(builder); }
PromptBuilder ExtractTextToPrompt(TextElement te) { var stb = ExtractText(te); var pbd = new PromptBuilder(); pbd.StartVoice(new CultureInfo(_speechCulture)); pbd.AppendText(stb.ToString()); pbd.EndVoice(); return(pbd); }
public void speakMsg(string Message) { PromptBuilder speakRate = new PromptBuilder(); speakRate.StartVoice(voice); speakRate.AppendText(Message, PromptRate.Slow); speakRate.EndVoice(); speak.Speak(speakRate); speakRate.ClearContent(); }
private static PromptBuilder GetPromptBuilderForTTS(string tts, string cultureInfo) { var promptBuilder = new PromptBuilder(); promptBuilder.StartVoice(new CultureInfo(cultureInfo)); promptBuilder.AppendText(tts); promptBuilder.EndVoice(); return(promptBuilder); }
private static void SynthToCam(string text, CameraWindow cw) { var synthFormat = new System.Speech.AudioFormat.SpeechAudioFormatInfo(System.Speech.AudioFormat.EncodingFormat.Pcm, 11025, 16, 1, 22100, 2, null); using (var synthesizer = new SpeechSynthesizer()) { using (var waveStream = new MemoryStream()) { //write some silence to the stream to allow camera to initialise properly var silence = new byte[1 * 22050]; waveStream.Write(silence, 0, silence.Length); var pbuilder = new PromptBuilder(); var pStyle = new PromptStyle { Emphasis = PromptEmphasis.Strong, Rate = PromptRate.Slow, Volume = PromptVolume.ExtraLoud }; pbuilder.StartStyle(pStyle); pbuilder.StartParagraph(); pbuilder.StartVoice(VoiceGender.Male, VoiceAge.Adult, 2); pbuilder.StartSentence(); pbuilder.AppendText(text); pbuilder.EndSentence(); pbuilder.EndVoice(); pbuilder.EndParagraph(); pbuilder.EndStyle(); synthesizer.SetOutputToAudioStream(waveStream, synthFormat); synthesizer.Speak(pbuilder); synthesizer.SetOutputToNull(); //write some silence to the stream to allow camera to end properly waveStream.Write(silence, 0, silence.Length); waveStream.Seek(0, SeekOrigin.Begin); var ds = new DirectStream(waveStream) { RecordingFormat = new WaveFormat(11025, 16, 1) }; var talkTarget = TalkHelper.GetTalkTarget(cw.Camobject, ds); ds.Start(); talkTarget.Start(); while (ds.IsRunning) { Thread.Sleep(100); } ds.Stop(); talkTarget.Stop(); talkTarget = null; ds = null; } } }
private void BtnPreviewTTS_Click(object sender, EventArgs e) { string messageDemo = TTSDemoBuilder().Item1; var builder = new PromptBuilder(); builder.StartVoice(new CultureInfo(csgo_tts_main.region)); builder.AppendText(messageDemo); builder.EndVoice(); csgo_tts_main.synthesizer.SpeakAsync(builder); builder.ClearContent(); }
/// <summary> /// 获取当前验证码的语音提示 /// </summary> /// <param name="key">使用的键名</param> /// <returns></returns> public virtual MemoryStream GetAudioStream(string key) { // 检查是否支持 if (!SupportCaptchaAudio) { throw new NotSupportedException("TTS is unsupported on this environment"); } #if NETCORE throw new NotSupportedException("TTS is unsupported on .Net Core"); #else // 生成语音到内存 // 需要使用独立线程否则会提示当前线程不支持异步操作 // http://stackoverflow.com/questions/10783127/how-to-implement-custom-audio-capcha-in-asp-net var captcha = GetWithoutRemove(key) ?? ""; var stream = new MemoryStream(); var cultureInfo = Thread.CurrentThread.CurrentCulture; var thread = new Thread(() => { try { // 设置线程语言,语音提示会自动选择对应的语言 Thread.CurrentThread.CurrentCulture = cultureInfo; Thread.CurrentThread.CurrentUICulture = cultureInfo; // 构建语音 // 参数缓存一定时间,防止多次尝试攻击 var prompt = CaptchaAudioPromptCache.GetOrCreate(captcha, () => { var builder = new PromptBuilder(); var promptRates = new[] { PromptRate.Slow, PromptRate.Medium, PromptRate.Fast }; var voiceGenders = new[] { VoiceGender.Male, VoiceGender.Female, VoiceGender.Neutral }; var voiceAges = new[] { VoiceAge.Adult, VoiceAge.Child, VoiceAge.Senior, VoiceAge.Teen }; foreach (var c in captcha) { builder.StartVoice( RandomUtils.RandomSelection(voiceGenders), RandomUtils.RandomSelection(voiceAges)); builder.AppendText(c.ToString(), RandomUtils.RandomSelection(promptRates)); builder.AppendBreak(TimeSpan.FromMilliseconds(RandomUtils.RandomInt(50, 450))); builder.EndVoice(); } return(builder); }, CaptchaAudioPromptCacheTime); // 写入语音到数据流 var synthesizer = new SpeechSynthesizer(); synthesizer.SetOutputToWaveStream(stream); synthesizer.Speak(prompt); } catch (Exception e) { var logManager = Application.Ioc.Resolve <LogManager>(); logManager.LogError(e.ToString()); } }); thread.Start(); thread.Join(); stream.Seek(0, SeekOrigin.Begin); return(stream); #endif }
public void currentTime() { string timeString = DateTime.Now.ToShortTimeString(); PromptBuilder timeBuilder = new PromptBuilder(); timeBuilder.StartVoice("IVONA 2 Brian"); timeBuilder.AppendText("The time is: "); timeBuilder.AppendSsmlMarkup(" <say-as interpret-as=\"time\">" + timeString + "</say-as>"); timeBuilder.EndVoice(); JARVIS.Speak(timeBuilder); }
public void currentDate() { string dateString = DateTime.Now.ToShortDateString(); PromptBuilder dateBuilder = new PromptBuilder(); dateBuilder.StartVoice("IVONA 2 Brian"); dateBuilder.AppendText("The date is: "); dateBuilder.AppendSsmlMarkup(" <say-as interpret-as=\"date_md\">" + dateString + "</say-as>"); dateBuilder.EndVoice(); JARVIS.Speak(dateBuilder); }
/// <summary> /// テキストを読み上げる /// </summary> /// <param name="text">読み上げるテキスト</param> public void Speak( string text, PlayDevices playDevice = PlayDevices.Both, bool isSync = false, float?volume = null) { if (string.IsNullOrWhiteSpace(text)) { return; } // 現在の条件をハッシュ化してWAVEファイル名を作る var wave = this.GetCacheFileName( Settings.Default.TTS, text, this.Config.ToString()); lock (this) { if (!File.Exists(wave)) { using (var fs = new FileStream(wave, FileMode.Create)) using (var synth = new SpeechSynthesizer()) { // VOICEを設定する var voice = this.GetSynthesizer(this.Config.VoiceID); if (voice == null) { return; } synth.SelectVoice(voice.VoiceInfo.Name); synth.Rate = this.Config.Rate; synth.Volume = this.Config.Volume; // Promptを生成する var pb = new PromptBuilder(voice.VoiceInfo.Culture); pb.StartVoice(voice.VoiceInfo); pb.AppendSsmlMarkup( $"<prosody pitch=\"{this.Config.Pitch.ToXML()}\">{text}</prosody>"); pb.EndVoice(); synth.SetOutputToWaveStream(fs); synth.Speak(pb); } } } // 再生する SoundPlayerWrapper.Play(wave, playDevice, isSync, volume); }
public static void Speak(string text) { PromptBuilder builder = new PromptBuilder(); builder.StartVoice(VoiceGender.Female, VoiceAge.Teen); builder.AppendText(text); builder.EndVoice(); SpeechSynthesizer synthesizer = new SpeechSynthesizer(); synthesizer.Speak(builder); synthesizer.Dispose(); }
private void DoPrompt(string culture) { var synthesizer = new SpeechSynthesizer(); synthesizer.SetOutputToDefaultAudioDevice(); var builder = new PromptBuilder(); builder.StartVoice(new CultureInfo(culture)); builder.AppendText(_messagesByCulture[culture]); builder.EndVoice(); //synthesizer.Speak(builder); synthesizer.SpeakAsync(builder); }
public void currentDate() { string dateString = DateTime.Now.ToShortDateString(); PromptBuilder dateBuilder = new PromptBuilder(); dateBuilder.Culture = CultureInfo.GetCultureInfo("en-US"); dateBuilder.StartVoice(VoiceGender.Male, VoiceAge.NotSet, 0); dateBuilder.AppendText("The date is: "); dateBuilder.AppendSsmlMarkup("<say-as interpret-as=\"date_md\">" + dateString + "</say-as>"); dateBuilder.EndVoice(); Speaker.SelectVoiceByHints(VoiceGender.Male, VoiceAge.NotSet, 0, CultureInfo.GetCultureInfo("en-US")); Speaker.Speak(dateBuilder); }
public void welcome() { string welcomeMessage = "Welcome back."; PromptBuilder welcomeBuilder = new PromptBuilder(); welcomeBuilder.StartVoice("IVONA 2 Brian"); welcomeBuilder.AppendText(welcomeMessage); welcomeBuilder.EndVoice(); using (SpeechSynthesizer sayWelcome = new SpeechSynthesizer()) { sayWelcome.Speak(welcomeBuilder); } }
private async void Discord_ChatMessage(object sender, MessageEventArgs e) { if (this.beam.IsConnected && e.User.Id != this.discord.CurrentUser.Id && e.Channel.Id == 187760090206437376) { var msg = $"[discord] {e.Message.User.Name}: {e.Message.Text}"; await this.beam.SendMessage(msg); } if (this.readingMessages && e.Channel == this.readingChannel) { using (var synthesizer = new SpeechSynthesizer()) using (var mem = new MemoryStream()) { var info = new SpeechAudioFormatInfo(48000, AudioBitsPerSample.Sixteen, AudioChannel.Stereo); synthesizer.SetOutputToAudioStream(mem, info); PromptBuilder builder = new PromptBuilder(); builder.Culture = CultureInfo.CreateSpecificCulture("en-US"); builder.StartVoice(builder.Culture); builder.StartSentence(); builder.StartStyle(new PromptStyle() { Emphasis = PromptEmphasis.Reduced }); builder.AppendText(e.Message.User.Name); builder.AppendText(" says "); builder.EndStyle(); builder.AppendText(e.Message.Text); builder.EndSentence(); builder.EndVoice(); synthesizer.Speak(builder); mem.Seek(0, SeekOrigin.Begin); int count, block = 96000; var buffer = new byte[block]; while ((count = mem.Read(buffer, 0, block)) > 0) { if (count < block) { for (int i = count; i < block; i++) { buffer[i] = 0; } } this.audio.Send(buffer, 0, block); } } } }
private void Speak(string text) { var builder = new PromptBuilder(); builder.StartVoice(VoiceGender.Female, VoiceAge.Adult); builder.AppendText(text); builder.EndVoice(); var synthesizer = new SpeechSynthesizer(); synthesizer.Rate = 1; synthesizer.Speak(builder); synthesizer.Dispose(); }
public void goodbye() { string goodbyeMessage = "Goodbye, for now."; PromptBuilder builder = new PromptBuilder(); builder.StartVoice("IVONA 2 Brian"); builder.AppendText(goodbyeMessage); builder.EndVoice(); using (SpeechSynthesizer sayGoodbye = new SpeechSynthesizer()) { sayGoodbye.Speak(builder); } }
private void Voz(string interno) { var synth = new SpeechSynthesizer(); var builder = new PromptBuilder(); synth.Rate = -2; synth.Volume = 100; builder.StartVoice("Microsoft Sabina Desktop"); builder.AppendText("Que tenga un buen día interno " + interno); builder.EndVoice(); synth.SpeakAsync(new Prompt(builder)); }
public Window1() { InitializeComponent(); SpeechSynthesizer synthesizer = new SpeechSynthesizer(); PromptBuilder promptBuilder = new PromptBuilder(); promptBuilder.AppendTextWithHint("WPF", SayAs.SpellOut); promptBuilder.AppendText("sounds better than WPF."); // Pause for 2 seconds promptBuilder.AppendBreak(new TimeSpan(0, 0, 2)); promptBuilder.AppendText("The time is"); promptBuilder.AppendTextWithHint(DateTime.Now.ToString("hh:mm"), SayAs.Time); // Pause for 2 seconds promptBuilder.AppendBreak(new TimeSpan(0, 0, 2)); promptBuilder.AppendText("Hey Sam, can you spell queue?"); promptBuilder.StartVoice("Microsoft Sam"); promptBuilder.AppendTextWithHint("queue", SayAs.SpellOut); promptBuilder.EndVoice(); promptBuilder.AppendText("Do it faster!"); promptBuilder.StartVoice("Microsoft Sam"); promptBuilder.StartStyle(new PromptStyle(PromptRate.ExtraFast)); promptBuilder.AppendTextWithHint("queue", SayAs.SpellOut); promptBuilder.EndStyle(); promptBuilder.EndVoice(); // Speak all the content in the PromptBuilder synthesizer.SpeakAsync(promptBuilder); }
public void Say(string text, int volume, int rate) { //foreach (InstalledVoice voice in sp.GetInstalledVoices()) //{ // VoiceInfo info = voice.VoiceInfo; // Console.WriteLine(" Name: " + info.Name); // Console.WriteLine(" Culture: " + info.Culture); // Console.WriteLine(" Age: " + info.Age); // Console.WriteLine(" Gender: " + info.Gender); // Console.WriteLine(" Description: " + info.Description); // Console.WriteLine(" ID: " + info.Id); //} if (volume >= 0 && volume <= 100) sp.Volume = volume; else sp.Volume = 100; // rappresenta la velocità di lettura if (rate >= -10 && rate <= 10) sp.Rate = rate; else sp.Rate = 0; //CultureInfo culture = CultureInfo.CreateSpecificCulture("it-IT"); //spSynth.SelectVoiceByHints(VoiceGender.Male, VoiceAge.Teen, 0, culture); //spSynth.SelectVoice("ScanSoft Silvia_Dri40_16kHz"); //spSynth.SelectVoice("Microsoft Elsa Desktop"); //spSynth.SelectVoice("Paola"); //spSynth.SelectVoice("Luca"); //spSynth.SelectVoice("Roberto"); PromptBuilder builder = new PromptBuilder(); builder.StartVoice("Luca"); builder.StartSentence(); builder.StartStyle(new PromptStyle() { Emphasis = PromptEmphasis.Strong, Rate = PromptRate.Medium }); string high = "<prosody pitch=\"x-high\"> " + text + " </prosody >"; builder.AppendSsmlMarkup(high); builder.EndStyle(); builder.EndSentence(); builder.EndVoice(); // Asynchronous sp.SpeakAsync(builder); }
public override bool Apply(PromptBuilder builder) { builder.EndVoice(); String voiceArgument = ParseTagArgument().ToLower(); String voiceName = FindFullVoiceName(voiceArgument); if (voiceName == null) { return false; } builder.StartVoice(voiceName); return true; }
/// <summary> /// Handler for MSG_QUERY /// </summary> /// <param name="message">The message received</param> protected async void MsgQuery(dynamic message) { var data = message["data"]; MycroftSpeaker speaker = speakers[data["targetSpeaker"]]; if (speaker.Status != "up") { await QueryFail(message["id"], "Target speaker is " + speaker.Status); } else { var text = data["text"]; PromptBuilder prompt = new PromptBuilder(new System.Globalization.CultureInfo("en-GB")); prompt.StartVoice(VoiceGender.Female, VoiceAge.Adult, 0); foreach (var phrase in text) { prompt.AppendText(phrase["phrase"]); prompt.AppendBreak(new TimeSpan((int)(phrase["delay"] * 10000000))); } prompt.EndVoice(); try { prompt.AppendAudio("lutz.wav"); } catch { } Thread t = new Thread(Listen); t.Start(new { speaker = speaker, prompt = prompt }); await Query("audioOutput", "stream_tts", new { ip = ipAddress, port = speaker.Port }, new string[] { speaker.InstanceId }); } }
public override bool Apply(PromptBuilder builder) { builder.EndVoice(); builder.StartVoice(TextToSpeech.DefaultVoiceName); return true; }
float[] GetFloatBufferFromWord(string sWord) { _speechSynthesizer.SetOutputToNull(); MemoryStream waveStream = new MemoryStream(); _speechSynthesizer.SetOutputToWaveStream(waveStream); var pb = new PromptBuilder(); pb.StartVoice(_currentSpeechVoice._voiceName); pb.AppendText(sWord, _speechSynthData._promptRate); pb.EndVoice(); _speechSynthesizer.Speak(pb); return GetFloatBuffer(waveStream); }