public void ShowAction(String text) { this.action.Dispatcher.BeginInvoke( (Action)(() => { this.action.Text = text; PromptBuilder pb = new PromptBuilder(); pb.AppendText(text + "!"); synthesizer.Speak(pb); } )); }
public static void Test() { SpeechSynthesizer synth = new SpeechSynthesizer(); PromptBuilder pb = new PromptBuilder(); pb.AppendText("Welcome, everyone"); synth.Speak(pb); }
// Méthode permettant de lancer la synthese vocale asynchrone // public static void SpeechAsynchrone(String texte) { SpeechSynthesizer s = new SpeechSynthesizer(); PromptBuilder builder = new PromptBuilder(new System.Globalization.CultureInfo("fr-fr")); builder.AppendText(texte); s.SpeakAsync(builder); }
public PromptBuilder ToPromptBuilder(String defaultVoice) { PromptBuilder builder = new PromptBuilder(CultureUtil.GetOriginalCulture()); builder.StartVoice(defaultVoice); var tags = GetTagsInText(); int startIndex = 0; foreach (ITag tag in tags) { String textBeforeCurrentTag = _contents.Substring(startIndex, tag.Start - startIndex); builder.AppendText(textBeforeCurrentTag); bool isCommandSuccessful = tag.Apply(builder); if (isCommandSuccessful) { startIndex = tag.End + 1; } } String remaining = _contents.Substring(startIndex).Trim(); builder.AppendText(remaining); builder.EndVoice(); return builder; }
public override bool Apply(PromptBuilder builder) { String speed = ParseTagArgument().ToLowerInvariant(); PromptRate rate; switch (speed) { case "fast": rate = PromptRate.Fast; break; case "medium": rate = PromptRate.Medium; break; case "slow": rate = PromptRate.Slow; break; case "extra fast": rate = PromptRate.ExtraFast; break; case "extra slow": rate = PromptRate.ExtraSlow; break; default: return false; } PromptStyle style = new PromptStyle(rate); builder.StartStyle(style); return true; }
public void SpeechSynthesisTest() { var builder = new PromptBuilder(); builder.AppendText("The build breaker is, dave."); var player = new AudioPlayer(new CradiatorSpeechSynthesizer(new SpeechSynthesizer()), new ConfigSettings(), new VoiceSelector(null), new AppLocation()); player.Say(builder); }
public void initializeSpeech() { this.speaker.SetOutputToDefaultAudioDevice(); PromptBuilder builder = new PromptBuilder(); this.speaker.Volume = 100; this.speaker.Rate = 1; builder.AppendText("Jen, You are a re-de-culous Girl! And I love you"); speaker.Speak(builder); }
public static void Speak(PromptBuilder p) { var synthesizer = CreateSynthesizerOutputToAudio(); Prompt spokenPrompt = synthesizer.SpeakAsync(p); SynthesisState state = new SynthesisState(synthesizer, spokenPrompt); ShowSpeechCancelDialog(state); }
public void SpeakNow(string text) { var pBuilder = new PromptBuilder(); pBuilder.ClearContent(); pBuilder.AppendText(text + " is to be said as "); // pBuilder.AppendText(tvm.Translations.Last().Target); var speaker = new SpeechSynthesizer(); speaker.Speak(pBuilder); }
private static void SynthToCam(string text, CameraWindow cw) { var synthFormat = new System.Speech.AudioFormat.SpeechAudioFormatInfo(System.Speech.AudioFormat.EncodingFormat.Pcm, 11025, 16, 1, 22100, 2, null); using (var synthesizer = new SpeechSynthesizer()) { using (var waveStream = new MemoryStream()) { //write some silence to the stream to allow camera to initialise properly var silence = new byte[1 * 22050]; waveStream.Write(silence, 0, silence.Length); var pbuilder = new PromptBuilder(); var pStyle = new PromptStyle { Emphasis = PromptEmphasis.Strong, Rate = PromptRate.Slow, Volume = PromptVolume.ExtraLoud }; pbuilder.StartStyle(pStyle); pbuilder.StartParagraph(); pbuilder.StartVoice(VoiceGender.Male, VoiceAge.Adult, 2); pbuilder.StartSentence(); pbuilder.AppendText(text); pbuilder.EndSentence(); pbuilder.EndVoice(); pbuilder.EndParagraph(); pbuilder.EndStyle(); synthesizer.SetOutputToAudioStream(waveStream, synthFormat); synthesizer.Speak(pbuilder); synthesizer.SetOutputToNull(); //write some silence to the stream to allow camera to end properly waveStream.Write(silence, 0, silence.Length); waveStream.Seek(0, SeekOrigin.Begin); var ds = new DirectStream(waveStream) { RecordingFormat = new WaveFormat(11025, 16, 1) }; var talkTarget = TalkHelper.GetTalkTarget(cw.Camobject, ds); ds.Start(); talkTarget.Start(); while (ds.IsRunning) { Thread.Sleep(100); } ds.Stop(); talkTarget.Stop(); talkTarget = null; ds = null; } } }
protected override void HandleNotification(Notification notification, string displayName) { StringBuilder sb = new StringBuilder(); PromptBuilder pb = new PromptBuilder(); pb.AppendText(notification.Title, PromptEmphasis.Strong); pb.AppendBreak(); pb.AppendText(notification.Description); ss.Speak(pb); }
public void SpeakWithPromptBuilder() { var builder = new PromptBuilder(); builder.AppendText("This is something of a test"); builder.AppendAudio(@"E:\OneDrive\Music\mycomp\MusicalIntervals01\ExtractedPianoNotes\F#5.wav"); builder.AppendAudio(@"E:\OneDrive\Music\mycomp\MusicalIntervals01\ExtractedPianoNotes\E5.wav"); builder.AppendAudio(@"E:\OneDrive\Music\mycomp\MusicalIntervals01\ExtractedPianoNotes\PerfectFourth2.wav" ); builder.AppendAudio(@"E:\OneDrive\Music\mycomp\MusicalIntervals01\ExtractedPianoNotes\PerfectFourth3.wav"); _speechSynthesizer.Speak(builder); }
PromptBuilder MakeSpeech(IEnumerable<ProjectStatus> projects, string rawSentence) { var promptBuilder = new PromptBuilder(); if (string.IsNullOrEmpty(rawSentence)) return promptBuilder; promptBuilder.AppendBreak(OneSecond); foreach (var project in projects) { promptBuilder.AppendBreak(OneSecond); promptBuilder.AppendText(_speechTextParser.Parse(rawSentence, project)); } return promptBuilder; }
public static void say(String tts) { Console.WriteLine("[TTS] Say: {0}", tts); using (SpeechSynthesizer synthesizer = new SpeechSynthesizer()) { // Configure the audio output. synthesizer.SetOutputToDefaultAudioDevice(); // Build and speak a prompt. PromptBuilder builder = new PromptBuilder(); builder.AppendText(tts); synthesizer.Speak(builder); } }
public override bool Apply(PromptBuilder builder) { String argument = ParseTagArgument(); double duration; if (!Double.TryParse(argument, out duration)) { return false; } int pauseInTicks = (int) Math.Round(duration*TicksPerSecond); builder.AppendBreak(new TimeSpan(pauseInTicks)); return true; }
public override bool Apply(PromptBuilder builder) { String pronounciation = ParseTagArgument(); var wordToPronounce = ParseWordToPronounce(); try { builder.AppendTextWithPronunciation(wordToPronounce, pronounciation); } catch (FormatException) { return false; } return true; }
public void Say(string text, int volume, int rate) { //foreach (InstalledVoice voice in sp.GetInstalledVoices()) //{ // VoiceInfo info = voice.VoiceInfo; // Console.WriteLine(" Name: " + info.Name); // Console.WriteLine(" Culture: " + info.Culture); // Console.WriteLine(" Age: " + info.Age); // Console.WriteLine(" Gender: " + info.Gender); // Console.WriteLine(" Description: " + info.Description); // Console.WriteLine(" ID: " + info.Id); //} if (volume >= 0 && volume <= 100) sp.Volume = volume; else sp.Volume = 100; // rappresenta la velocità di lettura if (rate >= -10 && rate <= 10) sp.Rate = rate; else sp.Rate = 0; //CultureInfo culture = CultureInfo.CreateSpecificCulture("it-IT"); //spSynth.SelectVoiceByHints(VoiceGender.Male, VoiceAge.Teen, 0, culture); //spSynth.SelectVoice("ScanSoft Silvia_Dri40_16kHz"); //spSynth.SelectVoice("Microsoft Elsa Desktop"); //spSynth.SelectVoice("Paola"); //spSynth.SelectVoice("Luca"); //spSynth.SelectVoice("Roberto"); PromptBuilder builder = new PromptBuilder(); builder.StartVoice("Luca"); builder.StartSentence(); builder.StartStyle(new PromptStyle() { Emphasis = PromptEmphasis.Strong, Rate = PromptRate.Medium }); string high = "<prosody pitch=\"x-high\"> " + text + " </prosody >"; builder.AppendSsmlMarkup(high); builder.EndStyle(); builder.EndSentence(); builder.EndVoice(); // Asynchronous sp.SpeakAsync(builder); }
static void Main(string[] args) { if( args.Length != 1) { Console.WriteLine("Usage: CreateWavFiles.exe file.csv" ); return; } var dir = Path.GetDirectoryName(args[0]); CsvFile csv = new CsvFile(args[0]); dir = Path.Combine(dir,"output"); // Initialize a new instance of the SpeechSynthesizer. using (SpeechSynthesizer synth = new SpeechSynthesizer()) { Console.WriteLine("using voice :"+synth.Voice.Name); foreach (var item in synth.GetInstalledVoices()) { //.. Console.WriteLine(item.VoiceInfo.Name); } for (int i = 0; i < csv.Rows.Count; i++) { // Set a value for the speaking rate. Slower to Faster (-10 to 10) synth.Rate = -3; // Configure the audio output. string outputWavFileName = Path.Combine(dir, csv.Rows[i]["File Name"].ToString()); Console.WriteLine(outputWavFileName); synth.SetOutputToWaveFile(outputWavFileName, new SpeechAudioFormatInfo(8000, AudioBitsPerSample.Sixteen, AudioChannel.Mono)); // Create a SoundPlayer instance to play output audio file. //System.Media.SoundPlayer m_SoundPlayer = new System.Media.SoundPlayer(outputWavFileName); // Build a prompt. PromptBuilder builder = new PromptBuilder(); builder.AppendText(csv.Rows[i]["Text"].ToString()); // Speak the prompt. synth.Speak(builder); //m_SoundPlayer.Play(); } } Console.WriteLine(); Console.WriteLine("Press any key to exit..."); Console.ReadKey(); }
public override bool Apply(PromptBuilder builder) { builder.EndVoice(); String voiceArgument = ParseTagArgument().ToLower(); String voiceName = FindFullVoiceName(voiceArgument); if (voiceName == null) { return false; } builder.StartVoice(voiceName); return true; }
public static void SaveAsWav(PromptBuilder p, String directory) { bool hasFilePath = !String.IsNullOrWhiteSpace(directory); if (!hasFilePath) { // We check if there is text first, as // .SetOutputToWaveFile creates an empty WAV file // (even if nothing will be added to it.) return; } using (var synthesizer = new SpeechSynthesizer()) { synthesizer.SetOutputToWaveFile(directory); synthesizer.Speak(p); } }
protected override void HandleNotification(Notification notification, string displayName) { /* string xml = @"<p> <s>You have 4 new messages.</s> <s>The first is from Stephanie Williams and arrived at <break/> 3:45pm. </s> <s> The subject is <prosody rate=""-20%"">ski trip</prosody> </s> </p>"; * */ PromptBuilder pb = new PromptBuilder(); // handle title if (notification.CustomTextAttributes != null && notification.CustomTextAttributes.ContainsKey("Notification-Title-SSML")) pb.AppendSsmlMarkup(notification.CustomTextAttributes["Notification-Title-SSML"]); else pb.AppendText(notification.Title, PromptEmphasis.Strong); pb.AppendBreak(); // handle text if (notification.CustomTextAttributes != null && notification.CustomTextAttributes.ContainsKey("Notification-Text-SSML")) pb.AppendSsmlMarkup(notification.CustomTextAttributes["Notification-Text-SSML"]); else pb.AppendText(notification.Description); try { ss.Speak(pb); } catch (Exception ex) { Growl.CoreLibrary.DebugInfo.WriteLine("Unable to speak input: " + ex.Message); // fall back to plain text (if the plain text is what failed the first time, it wont work this time either but wont hurt anything) pb.ClearContent(); pb.AppendText(notification.Title, PromptEmphasis.Strong); pb.AppendBreak(); pb.AppendText(notification.Description); ss.Speak(pb); } }
public static void Search() { PromptBuilder pBuilder = new PromptBuilder(); SpeechRecognitionEngine recognizer = new SpeechRecognitionEngine(new System.Globalization.CultureInfo("en-US")); Choices sList = new Choices(); sList.Add(File.ReadAllLines(searchKeywords)); GrammarBuilder gbuild = new GrammarBuilder(); gbuild.Append(sList); gbuild.Culture = new System.Globalization.CultureInfo("en-US"); Grammar gr = new Grammar(gbuild); recognizer.RequestRecognizerUpdate(); recognizer.LoadGrammarAsync(gr); recognizer.SpeechRecognized += new EventHandler <SpeechRecognizedEventArgs>(SearchResult); recognizer.SetInputToDefaultAudioDevice(); recognizer.RecognizeAsync(RecognizeMode.Multiple); Console.ReadKey(); }
public void speak(string utts) { //force to terminate all queued prompts synthesizer.SpeakAsyncCancelAll(); string[] separator = { "!", "?", "." }; string[] sentences = utts.Split(separator, StringSplitOptions.RemoveEmptyEntries); int index = 0; foreach (string ea in sentences) { string path = "C:\\temp\\audio" + index + ".wav"; synthesizer.SetOutputToWaveFile(@path); PromptBuilder pb = new PromptBuilder(); synthesizer.Rate = -1; synthesizer.Speak(ea); index++; } //Debug.WriteLine("Phonetics: " + phoneme); }
private async Task DoSay(string actualPhrase) { var bitchName = _settingAgent.GetSetting("BitchName", Constants.DefaultBitchName); var newGuid = Guid.NewGuid(); _eventHub.InvokeStartTalkingEvent(newGuid, bitchName, actualPhrase); var task = new Task(() => { var builder = new PromptBuilder(); builder.StartSentence(); builder.AppendText(actualPhrase); builder.EndSentence(); using (var synthesizer = new SpeechSynthesizer()) { var voices = synthesizer.GetInstalledVoices(); var voice = voices.LastOrDefault(x => x.VoiceInfo.Gender == VoiceGender.Female); if (voice == null) { voice = voices.FirstOrDefault(); } if (voice == null) { throw new InvalidOperationException("Cannot find any installed voices."); } //synthesizer.SelectVoice("Microsoft David Desktop"); //synthesizer.SelectVoice("Microsoft Hazel Desktop"); //synthesizer.SelectVoice("Microsoft Zira Desktop"); synthesizer.SelectVoice(voice.VoiceInfo.Name); synthesizer.Speak(builder); } }); task.Start(); await task; _eventHub.InvokeDoneTalkingEvent(newGuid); }
private void soundPhrase1_Click(object sender, EventArgs e) { PictureBox[] PauseSound = { soundPhrase1Pause, soundPhrase2Pause, soundPhrase3Pause, soundPhrase4Pause, soundPhrase5Pause, soundPhrase6Pause, soundPhrase7Pause }; Label[] labels = { picBoxLabel1, coursLabel1, picBoxLabel2, coursLabel2, picBoxLabel3, coursLabel4, coursLabel5 }; foreach (PictureBox box in PauseSound) { box.Visible = false; } int i = 0; PictureBox pbox = (PictureBox)sender; while (PauseSound[i].Name != pbox.Name + "Pause") { i++; } childThread = new Thread(new ThreadStart(() => { if ((pbox.Name == pausedName)) { sSynth.Resume(); pausedName = ""; } else { try { sSynth.Pause(); } catch (Exception) { } sSynth = new SpeechSynthesizer(); PromptBuilder pBuilder = new PromptBuilder(); SpeechRecognitionEngine sRecognize = new SpeechRecognitionEngine(new CultureInfo("fr-FR")); pBuilder.ClearContent(); pBuilder.AppendText(labels[i].Text); sSynth.Speak(pBuilder); sSynth.Dispose(); pausedName = ""; this.Invoke(new MethodInvoker(() => PauseSound[i].Hide())); } })); PauseSound[i].Show(); PauseSound[i].Click += pause_click; childThread.IsBackground = true; childThread.Start(); }
public void SpeakAsync(String text) { if (String.IsNullOrWhiteSpace(text)) { PromptBuilder builder = new PromptBuilder(); builder.AppendBreak(TimeSpan.FromSeconds(1)); if (!Settings.Muted) _voice.SpeakAsync(builder); return; } _spokenWords.Push(text); if (!Settings.Muted) _voice.SpeakAsync(text); if (Spoke != null) Spoke(text); }
private void cmdPromptTest_Click(object sender, RoutedEventArgs e) { PromptBuilder prompt = new PromptBuilder(); prompt.AppendText("How are you"); prompt.AppendBreak(TimeSpan.FromSeconds(2)); prompt.AppendText("How ", PromptEmphasis.Reduced); PromptStyle style = new PromptStyle(); style.Rate = PromptRate.ExtraSlow; style.Emphasis = PromptEmphasis.Strong; prompt.StartStyle(style); prompt.AppendText("are "); prompt.EndStyle(); prompt.AppendText("you?"); SpeechSynthesizer synthesizer = new SpeechSynthesizer(); synthesizer.Speak(prompt); }
/// <summary> /// Speaks asynchronously /// </summary> /// <param name="message">Message to speak</param> /// <param name="sayAs">Type of pronunciation</param> public void SpeakAsync(string message, SayAs sayAs = SayAs.Text) { if (_enabled && (_voices.Count > 0)) { StopSpeak(); PromptBuilder builder = null; try { builder = new PromptBuilder(CultureInfo.CreateSpecificCulture(_culture)); } catch (Exception e) { builder = new PromptBuilder(); } builder.AppendTextWithHint(message, sayAs); _speechSynth.SpeakAsync(builder); } }
private void soundPhrase1_Click(object sender, EventArgs e) { //stopped = false; soundPhrase1Pause.Visible = false; int i = 0; PictureBox pbox = (PictureBox)sender; //childref = new ThreadStart(voiceSpeak); childThread = new Thread(new ThreadStart(() => { pBoxName = pbox.Name; if ((pbox.Name == pausedName)) { sSynth.Resume(); pausedName = ""; } else { try { sSynth.Pause(); } catch (Exception) { } sSynth = new SpeechSynthesizer(); PromptBuilder pBuilder = new PromptBuilder(); SpeechRecognitionEngine sRecognize = new SpeechRecognitionEngine(new CultureInfo("fr-FR")); //int i = 0; // PictureBox[] soundPhrases = { soundPhrase1, soundPhrase2, soundPhrase3, soundPhrase4 }; // while (soundPhrases[i].Name != pBoxName) { i++; } //pbox = soundPhrases[i]; pBuilder.ClearContent(); pBuilder.AppendText(s); sSynth.Speak(pBuilder); sSynth.Dispose(); pausedName = ""; this.Invoke(new MethodInvoker(() => soundPhrase1Pause.Hide())); //pausePicBox.Hide(); } })); soundPhrase1Pause.Show(); childThread.Start(); }
public void SpeechSynthesizerEventsAndProperties() { using (var synth = new SpeechSynthesizer()) { using var ms = new MemoryStream(); synth.SetOutputToNull(); synth.SetOutputToAudioStream(ms, new SpeechAudioFormatInfo(16000, AudioBitsPerSample.Sixteen, AudioChannel.Stereo)); synth.SelectVoiceByHints(VoiceGender.Male, VoiceAge.Adult); Assert.True(synth.Volume > 0); Assert.NotNull(synth.Voice); Assert.NotEmpty(synth.GetInstalledVoices()); Assert.Null(synth.GetCurrentlySpokenPrompt()); var builder = new PromptBuilder(); builder.AppendText("synthesizer"); int events = 0; synth.BookmarkReached += (object o, BookmarkReachedEventArgs e) => events++; synth.PhonemeReached += (object o, PhonemeReachedEventArgs e) => events++; synth.SpeakProgress += (object o, SpeakProgressEventArgs e) => events++; synth.SpeakStarted += (object o, SpeakStartedEventArgs e) => events++; synth.VisemeReached += (object o, VisemeReachedEventArgs e) => events++; synth.VoiceChange += (object o, VoiceChangeEventArgs e) => events++; synth.StateChanged += (object o, System.Speech.Synthesis.StateChangedEventArgs e) => events++; synth.SpeakCompleted += (object o, SpeakCompletedEventArgs e) => { events++; Assert.Equal(34, events++); }; Assert.Equal(SynthesizerState.Ready, synth.State); synth.SpeakSsml(builder.ToXml()); Assert.Equal(SynthesizerState.Ready, synth.State); synth.Pause(); Assert.Equal(SynthesizerState.Paused, synth.State); synth.Resume(); Assert.Equal(SynthesizerState.Ready, synth.State); } }
public static string GetPronunciationFromText(string MyWord) { //this is a trick to figure out phonemes used by synthesis engine //txt to wav using (MemoryStream audioStream = new MemoryStream()) { using (SpeechSynthesizer synth = new SpeechSynthesizer()) { synth.SetOutputToWaveStream(audioStream); PromptBuilder pb = new PromptBuilder(); //pb.AppendBreak(PromptBreak.ExtraSmall); //'e' wont be recognized if this is large, or non-existent? //synth.Speak(pb); synth.Speak(MyWord); //synth.Speak(pb); synth.SetOutputToNull(); audioStream.Position = 0; //now wav to txt (for reco phonemes) recoPhonemes = String.Empty; GrammarBuilder gb = new GrammarBuilder(MyWord); Grammar g = new Grammar(gb); //TODO the hard letters to recognize are 'g' and 'e' SpeechRecognitionEngine reco = new SpeechRecognitionEngine(); reco.SpeechHypothesized += new EventHandler <SpeechHypothesizedEventArgs>(reco_SpeechHypothesized); reco.SpeechRecognitionRejected += new EventHandler <SpeechRecognitionRejectedEventArgs>(reco_SpeechRecognitionRejected); reco.UnloadAllGrammars(); //only use the one word grammar reco.LoadGrammar(g); reco.SetInputToWaveStream(audioStream); RecognitionResult rr = reco.Recognize(); reco.SetInputToNull(); if (rr != null) { recoPhonemes = StringFromWordArray(rr.Words, WordType.Pronunciation); } //txtRecoPho.Text = recoPhonemes; return(recoPhonemes); } } }
public void Speak(String tts, bool sync) { Log("Speaking: " + tts); try { PromptBuilder builder = new PromptBuilder(); builder.Culture = new CultureInfo(ConfigManager.GetInstance().Find("bot.language", "fr-FR")); builder.AppendText(tts); using (var ms = new MemoryStream()) { lock (synthesizer) { synthesizer.SetOutputToWaveStream(ms); synthesizer.Speak(builder); } ms.Position = 0; if (ms.Length <= 0) { return; } AddOnManager.GetInstance().AfterHandleVoice(tts, sync, ms); } } catch (Exception ex) { Error(ex); } }
public static void PromptBuilder() { string SsmlNs = "\"http://schemas.microsoft.com/Speech/2003/03/PromptEngine\""; string SsmlStartOutTag = "<peml:prompt_output xmlns:peml=" + SsmlNs + ">"; string SsmlEndOutTag = "</peml:prompt_output>"; PromptBuilder builder = new PromptBuilder(); builder.AppendText("test"); builder.AppendTextWithPronunciation("foo", "bar"); builder.AppendSsmlMarkup(SsmlStartOutTag); builder.AppendSsmlMarkup("hello"); builder.AppendSsmlMarkup(SsmlEndOutTag); Assert.Contains("hello", builder.ToXml()); Assert.Equal(CultureInfo.CurrentCulture, builder.Culture); Assert.False(builder.IsEmpty); string ssml = builder.ToXml(); builder.AppendSsml(XmlTextReader.Create(new StringReader(ssml))); }
private void OnPrompt(object sender, RoutedEventArgs e) { var promptBuilder = new PromptBuilder(); promptBuilder.AppendText("How are you"); promptBuilder.AppendBreak(TimeSpan.FromSeconds(2)); promptBuilder.AppendText("How ", PromptEmphasis.Reduced); var style = new PromptStyle { Rate = PromptRate.ExtraSlow, Emphasis = PromptEmphasis.Strong }; promptBuilder.StartStyle(style); promptBuilder.AppendText("are "); promptBuilder.EndStyle(); promptBuilder.AppendText("you?"); var synthesizer = new SpeechSynthesizer(); synthesizer.Speak(promptBuilder); }
public static void Speak(string FinalTextString) { if (reader.State == SynthesizerState.Paused) { tempReader.SpeakAsyncCancelAll(); tempReader.SpeakAsync("TTS is in Pause State. Please Press Caps lock E to continue with TTS"); Console.WriteLine("End of Speach "); } else { PromptBuilder pb2 = new PromptBuilder(new System.Globalization.CultureInfo("en-US")); // string str2 = "<say-as interpret-as=\"characters\">" + dataReceived + "</say-as>"; //pb2.AppendSsmlMarkup(FinalTextString); if (FinalTextString.Contains("asteriklinebreakasterik")) //split string at lineBreak and append break { string[] lineBreak = new string[] { "asteriklinebreakasterik" }; string[] result = FinalTextString.Split(lineBreak, StringSplitOptions.RemoveEmptyEntries); foreach (string v in result) { string[] pauses = new string[] { "\r\n" }; string[] pausesResult = v.Split(pauses, StringSplitOptions.RemoveEmptyEntries); foreach (var r in pausesResult) { pb2.AppendText(r); pb2.AppendBreak(PromptBreak.Small); } pb2.AppendBreak(TimeSpan.FromSeconds(2)); } reader.SpeakAsync(pb2); } else { reader.SpeakAsync(FinalTextString); } Console.WriteLine("End of Speach "); } }
private void SaveAsAudioButton_Click(object sender, EventArgs e) { if (ExerciseListBoxPopulated()) { var speechPrompt = new PromptBuilder(); speechPrompt.AppendText("Generated by Audio Workout Creator. Please visit Streets of Smashville Dot Com to download program."); AppendBreakToPrompt(speechPrompt, 0, 3); speechPrompt.AppendText($"Welcome to Workout Assistant. Today you will be performing {ExerciseListBox.Items.Count} exercises."); AppendBreakToPrompt(speechPrompt, 0, 3); foreach (var exercise in _workout) { speechPrompt.AppendText($"Please get in position for exercise, {exercise.Name} for {exercise.Reps} reps at {exercise.Weight} pounds."); AppendBreakToPrompt(speechPrompt, 0, 15); speechPrompt.AppendText($"Time for {exercise.Name} for {exercise.Reps} reps at {exercise.Weight} pounds. "); speechPrompt.AppendText($"You will have {exercise.SetTime} seconds to complete the set."); AppendBreakToPrompt(speechPrompt, 0, 5); speechPrompt.AppendText($"Begin {exercise.Name}"); AppendBreakToPrompt(speechPrompt, 0, (exercise.SetTime / 2)); speechPrompt.AppendText("Half set time."); AppendBreakToPrompt(speechPrompt, 0, (exercise.SetTime / 2)); speechPrompt.AppendText($"Good set! Time to rest. You will have {exercise.RestTime} seconds to rest."); AppendBreakToPrompt(speechPrompt, 0, (exercise.RestTime / 2)); speechPrompt.AppendText("Half rest time."); AppendBreakToPrompt(speechPrompt, 0, (exercise.RestTime / 2)); speechPrompt.AppendText("Rest time is over."); } speechPrompt.AppendText("Awesome workout! You are a step closer to achieving your goals! See you next time!"); SpeechAction.ConvertSpeechSynthPromptToMp3File(speechPrompt, _savePath, VoiceSelectListBox.SelectedItem.ToString()); MessageBox.Show($"Workout saved to {_savePath}."); } else { MessageBox.Show("There is no populated data in your workout. Saving failed."); } }
private void Go() { var ss = new SpeechSynthesizer(); var pb = new PromptBuilder(); var src = new SpeechRecognitionEngine(); Choices list = new Choices(new[] { "hello", "browser", "visual studio" }); Grammar gm = new Grammar(new GrammarBuilder(list)); try { src.RequestRecognizerUpdate(); src.LoadGrammar(gm); src.SpeechRecognized += (s, e) => { var txt = e.Result.Text; switch (txt) { case "hello": Process.Start("Notepad", ""); break; case "browser": Process.Start(@"C:\Program Files (x86)\Google\Chrome\Application\chrome.exe", "--incognito"); break; case "visual studio": Process.Start("devenv.exe", ""); break; //// more } }; src.SetInputToDefaultAudioDevice(); src.RecognizeAsync(RecognizeMode.Multiple); } catch (Exception ex) { MessageBox.Show(ex.Message); } }
/// <summary> /// Synthesize more elaborated speeches using prompts /// </summary> /// <param name="sentences"></param> // <Improvement> Incomplete implementation // Automatically determine proper output generation so Core doesn't need to bother because anyway Voice facility isn't part of Core // Utilize PromptRate, PromptVolume, PromptEmphasis, Voice, and Pause(break) when appropriate, according to sentence structure, tone, phrase type (Content type, SayAa()) and other information public void BuildSpeech(List <Tuple <string, SpeechTone> > sentences) { PromptBuilder builder = new PromptBuilder(); builder.StartVoice(VoiceGender.Female, VoiceAge.Adult); foreach (Tuple <string, SpeechTone> sentence in sentences) { builder.StartSentence(); switch (sentence.Item2) { case SpeechTone.Normal: builder.AppendText(sentence.Item1); break; case SpeechTone.Joyous: builder.AppendText(sentence.Item1); break; case SpeechTone.Naughty: builder.AppendText(sentence.Item1); break; case SpeechTone.Soft: builder.StartStyle(new PromptStyle(PromptVolume.ExtraSoft)); builder.AppendText(sentence.Item1); builder.EndStyle(); break; default: break; } builder.EndSentence(); } builder.EndVoice(); SpeechSynthesizer.Speak(builder); }
static void SaveTextToSpeech(string name, SpeechSynthesizer synth, string content) { string desktop = Environment.GetFolderPath(Environment.SpecialFolder.Desktop); string savePath = $@"{desktop}\{name}.wav"; // Configure the audio output. synth.SetOutputToWaveFile(savePath, new SpeechAudioFormatInfo(32000, AudioBitsPerSample.Sixteen, AudioChannel.Mono)); // Create a SoundPlayer instance to play output audio file. System.Media.SoundPlayer m_SoundPlayer = new System.Media.SoundPlayer(savePath); // Build a prompt. PromptBuilder builder = new PromptBuilder(); builder.AppendText(content); // Speak the prompt. synth.Speak(builder); m_SoundPlayer.Play(); }
private static void Speech(string culture, string chat) { // Initialize a new instance of the SpeechSynthesizer. using (SpeechSynthesizer synth = new SpeechSynthesizer()) { // Configure the audio output. synth.SetOutputToDefaultAudioDevice(); // Set the volume of the TTS voice, and the combined output volume. synth.TtsVolume = 20; synth.Volume = 20; // Build a prompt containing recorded audio and synthesized speech. PromptBuilder builder = new PromptBuilder( new System.Globalization.CultureInfo(culture)); //builder.AppendAudio("C:\\Test\\WelcomeToContosoRadio.wav"); builder.AppendText(chat); // Speak the prompt. synth.Speak(builder); } }
void recEngine_SpeachSpeechRecognized(object sender, SpeechRecognizedEventArgs e) { switch (e.Result.Text) { case "say hello": //MessageBox.Show("Hello Denis. How are you?"); break; PromptBuilder promtBuilder = new PromptBuilder(); promtBuilder.StartSentence(); promtBuilder.AppendText("Hello Denis"); promtBuilder.EndSentence(); promtBuilder.AppendBreak(PromptBreak.ExtraSmall); promtBuilder.AppendText("How are you?"); syncSpeechSynthesizer.SpeakAsync("Hello Denis. How are you?"); break; case "print my name": richTextBox1.Text += "\nDenis"; break; case "speak selected text": syncSpeechSynthesizer.SpeakAsync(richTextBox1.SelectedText); break; } }
private void Initialize() { rnd = new Random(); sSpeech = new SpeechRecognitionEngine(); //sSynth = new SpeechSynthesizer(); //sSynth = new SpeechSynthesizer(); pBuilder = new PromptBuilder(); sSpeech.SetInputToDefaultAudioDevice(); sSpeech.LoadGrammar(new DictationGrammar()); ts = new TimeSpan(0, 0, 5); sSpeech.SpeechRecognized += (s, args) => { foreach (RecognizedWordUnit word in args.Result.Words) { if (word.Confidence > 0.2f) { freeTextBox.Text += word.Text + " "; } } //freeTextBox.Text += Environment.NewLine; }; }
public override void DoBeforeNextExecute() { CheckScriptTimer(); if (bufferTosay != "") { if (bufferTosay.Contains(";")) { PromptBuilder pb = new PromptBuilder(); foreach (var s in bufferTosay.Split(';')) { //pb.StartParagraph(); //pb.StartSentence(); pb.AppendText(s); pb.AppendBreak(PromptBreak.Small); //pb.EndSentence(); //pb.EndParagraph(); } Say(pb); } else { Say(bufferTosay); } bufferTosay = ""; return; } if (cmd == 'S') { if (SF == null) { SF = new ScriptSpeech(this); } SF.Speech(); } }
private void SpeechToTextalgorithm() { // richTextBox1.Focus(); PromptBuilder pbuilder = new PromptBuilder(); SpeechRecognitionEngine sregEngine = new SpeechRecognitionEngine(); Choices slist = new Choices(); slist.Add(new string[] { "go to email", "go to subject", "go to message" //"Read email", //"Read subject", //"Read message", //"sent" // "next", }); Grammar gr = new Grammar(new GrammarBuilder(slist)); try { sregEngine.RequestRecognizerUpdate(); sregEngine.LoadGrammar(gr); sregEngine.SpeechRecognized += sregEngine_SpeechRecognized; sregEngine.SetInputToDefaultAudioDevice(); sregEngine.RecognizeAsync(RecognizeMode.Multiple); sregEngine.Recognize(); } catch { return; } }
public void SpeakStart(List <string> phonemes, bool async = true) { try { ssmlBuilder = new PromptBuilder(); ssmlBuilder.AppendSsmlMarkup(GetSsml(phonemes)); synthesizer.SetOutputToDefaultAudioDevice(); if (async) { synthesizer.SpeakAsync(ssmlBuilder); } else { synthesizer.Speak(ssmlBuilder); } } catch (Exception) { throw; } }
public MainWindow() { InitializeComponent(); //Add speech Anna ->20181021 PromptBuilder prompt = new PromptBuilder(); prompt.AppendText("Welcome to enter Automation World!"); SpeechSynthesizer synthesizer = new SpeechSynthesizer(); synthesizer.Speak(prompt); //TreeViewItem initTvItem = new TreeViewItem(); //initTvItem.MouseDoubleClick += buttonOpenDBC_Click; //initTvItem.Header = "Browse dbc to configure..."; // treeView.Items.Add(initTvItem); //expdBusGeneric.IsExpanded = false; //expdNetworkPar.IsExpanded = false; }
public static void Destiny() { PromptBuilder pBuilder = new PromptBuilder(); SpeechRecognitionEngine recognizer = new SpeechRecognitionEngine(new System.Globalization.CultureInfo("en-US")); Choices sList = new Choices(); sList.Add(File.ReadAllLines(commandUrl)); GrammarBuilder gbuild = new GrammarBuilder(); gbuild.Append(sList); gbuild.Culture = new System.Globalization.CultureInfo("en-US"); Grammar gr = new Grammar(gbuild); recognizer.RequestRecognizerUpdate(); recognizer.LoadGrammarAsync(gr); recognizer.SpeechRecognized += Recognizer_recognized; recognizer.SetInputToDefaultAudioDevice(); recognizer.RecognizeAsync(RecognizeMode.Multiple); while (true) { Console.ReadKey(); } }
public static void CreateAudio(string path, string text, CultureInfo cultureInfo, int bitrate) { using (SpeechSynthesizer reader = new SpeechSynthesizer()) { reader.Volume = 100; reader.Rate = 0; //medium MemoryStream ms = new MemoryStream(); reader.SetOutputToWaveStream(ms); PromptBuilder builder = new PromptBuilder(cultureInfo); builder.AppendText(text); reader.Speak(builder); ConvertWavStreamToMp3File(ref ms, path, bitrate); if (File.Exists(path)) { File.Delete(path); } File.Move(path + ".mp3", path); } }
public void SayStr(string str, string locale) { var prompt = new PromptBuilder(); prompt.Culture = CultureInfo.GetCultureInfoByIetfLanguageTag(locale); FillPromptBuilder(str, prompt); if (locale == Constants.Ru) { _speaker.SelectVoice(Constants.RuVoice); } else if (locale == Constants.En) { _speaker.SelectVoice(Constants.EnVoice); } else { throw new Exception("неизвестный идентификатор языка"); } _speaker.Speak(prompt); }
public void SCommands() { SpeechSynthesizer sSynth = new SpeechSynthesizer(); PromptBuilder pBuilder = new PromptBuilder(); SpeechRecognitionEngine sRecognize = new SpeechRecognitionEngine(); Choices sList = new Choices(); sList.Add(new string[] { "login", "signin", "register", "user", "password", "forgot" }); Grammar gr = new Grammar(new GrammarBuilder(sList)); try { sRecognize.RequestRecognizerUpdate(); sRecognize.LoadGrammar(gr); sRecognize.SpeechRecognized += sRecognize_speech; sRecognize.SetInputToDefaultAudioDevice(); sRecognize.RecognizeAsync(RecognizeMode.Multiple); sRecognize.Recognize(); }catch (Exception ex) { } }
/// <summary> /// Speichert die Stimme mit START und STOP als WAVE ton. BETA /// </summary> public void SaveSay1(String sText, String VoiceName, String StartSound, String StopSound, int iVolume, int iRate) { var synthFormat = new SpeechAudioFormatInfo(8000, AudioBitsPerSample.Sixteen, AudioChannel.Mono); var synthesizer = new SpeechSynthesizer(); var waveStream = new MemoryStream(); var waveFileStream = new FileStream(@".\\mywave.wav", FileMode.OpenOrCreate); var pbuilder = new PromptBuilder(); var pStyle = new PromptStyle(); var aSaveFileDialog1 = new SaveFileDialog(); //--- pStyle.Emphasis = PromptEmphasis.None; pStyle.Rate = PromptRate.Fast; pStyle.Volume = PromptVolume.ExtraLoud; pbuilder.StartStyle(pStyle); pbuilder.StartParagraph(); pbuilder.StartVoice(VoiceGender.Male, VoiceAge.Teen, 2); pbuilder.StartSentence(); pbuilder.AppendText("This is some text."); pbuilder.EndSentence(); pbuilder.EndVoice(); pbuilder.EndParagraph(); pbuilder.EndStyle(); synthesizer.SetOutputToAudioStream(waveStream, synthFormat); synthesizer.Speak(pbuilder); synthesizer.SetOutputToNull(); waveStream.WriteTo(waveFileStream); waveFileStream.Close(); /* * aSaveFileDialog1.Filter = "wave files (*.wav)|*.wav"; * aSaveFileDialog1.DefaultExt = "*.wav"; * aSaveFileDialog1.Title = "Stimme als Wave speichern"; * aSaveFileDialog1.FileName = ""; * aSaveFileDialog1.ShowDialog(); */ }
public override bool Apply(PromptBuilder builder) { String speed = ParseTagArgument().ToLowerInvariant(); PromptRate rate; switch (speed) { case "fast": rate = PromptRate.Fast; break; case "medium": rate = PromptRate.Medium; break; case "slow": rate = PromptRate.Slow; break; case "extra fast": rate = PromptRate.ExtraFast; break; case "extra slow": rate = PromptRate.ExtraSlow; break; default: return(false); } PromptStyle style = new PromptStyle(rate); builder.StartStyle(style); return(true); }
bool LoopState;//线程中循环运行的标志位 private void Button_Click(object sender, RoutedEventArgs e) { if (synth != null) { Button_Click_Stop(null, null); //如果不释放之前的资源,就会播放多个声音 } synth = new SpeechSynthesizer(); //Dispose()之后,虽然synth不为空,但是已经释放资源,所以每次都需要创建新实例 LoopState = true; string text = SpeechTextTB.Text; //其他线程无法直接使用UI线程的变量,所以需要见SpeechTextTB.Text赋值给一个普通变量 Task.Run(() => { synth.SetOutputToDefaultAudioDevice(); synth.SelectVoiceByHints(VoiceGender.Male);//不起作用 PromptBuilder prompt = new PromptBuilder(); PromptStyle style = new PromptStyle() { Rate = PromptRate.ExtraFast, Emphasis = PromptEmphasis.Strong, Volume = PromptVolume.ExtraLoud, }; //指定特殊的文本使用style prompt.StartStyle(style); prompt.AppendText(text); prompt.EndStyle(); prompt.AppendText("来电话了"); //循环播放声音 while (LoopState) { synth.Speak(prompt); } }); }
static void Main(string[] args) { var synthesizer = new SpeechSynthesizer(); synthesizer.SetOutputToDefaultAudioDevice(); foreach (var voice in synthesizer.GetInstalledVoices()) { var info = voice.VoiceInfo; Console.WriteLine($"Id: {info.Id} | Name: {info.Name} | Age: { info.Age} | Gender: { info.Gender } | Culture: {info.Culture}"); synthesizer.SelectVoice(info.Name); var builder = new PromptBuilder(); builder.StartVoice(info.Name); builder.AppendText($"This is the voice {info.Name}"); builder.StartStyle(new PromptStyle(PromptEmphasis.Strong)); builder.AppendText("It's time for bed."); builder.EndStyle(); builder.EndVoice(); synthesizer.Speak(builder); } //synthesizer.SelectVoice("Microsoft Server Speech Text to Speech Voice (en-GB, Hazel)"); //var builder = new PromptBuilder(); //builder.StartVoice(CultureInfo.GetCultureInfo("en-US")); //builder.AppendText($"Hello. I'm a child's voice."); //builder.StartStyle(new PromptStyle(PromptEmphasis.Strong)); //builder.AppendText("Why aren't you going up stairs yet?"); //builder.EndStyle(); //builder.EndVoice(); //synthesizer.Speak(builder); Console.ReadKey(); }
public void Speak(string message, int speed = 0) { try { // Build a prompt. PromptBuilder builder = new PromptBuilder(); builder.AppendText(message); // Synth message ss.Rate = speed; if (UseSSML) { string ssmlString = "<speak version=\"1.0\" xmlns=\"http://www.w3.org/2001/10/synthesis\""; switch (lang) { case Language.English: ssmlString += " xml:lang=\"en-GB\">"; break; case Language.Finish: ssmlString += " xml:lang=\"fi-FI\">"; break; case Language.Norwegian: ssmlString += " xml:lang=\"nb-NO\">"; break; case Language.Russian: ssmlString += " xml:lang=\"ru-RU\">"; break; case Language.Swedish: ssmlString += " xml:lang=\"sv-SE\">"; break; default: ssmlString += " xml:lang=\"en-GB\">"; break; } ssmlString += "<s>" + message + "</s></speak>"; if (AsyncMode) ss.SpeakSsmlAsync(ssmlString); else ss.SpeakSsml(ssmlString); } else { if (AsyncMode) ss.SpeakAsync(builder); else ss.Speak(builder); } } catch (Exception e) { Console.WriteLine("An error occured: '{0}'", e); } }
public void TTS(string text) { PromptBuilder pb = new PromptBuilder(); pb.AppendText("This is a date"); pb.AppendBreak(); pb.AppendTextWithHint("31-12-2007", SayAs.DayMonthYear); //PromptBuilder(pb); }
static SpeechSynthRecognition() { synth = new SpeechSynthesizer(); promptBuilder = new PromptBuilder(); recognition = new SpeechRecognitionEngine(); }
public void Speak(String tts) { if (tts == null) { return; } if (speaking) { return; } speaking = true; WSRConfig.GetInstance().logInfo("TTS", "[" + device + "] " + "Say: " + tts); try { // Build and speak a prompt. PromptBuilder builder = new PromptBuilder(); builder.AppendText(tts); // Setup buffer using (var ms = new MemoryStream()) { synthesizer.SetOutputToWaveStream(ms); synthesizer.Speak(builder); // Synchronous ms.Position = 0; if (ms.Length > 0) { RunSession(WaveOutSpeech, new WaveFileReader(ms), "TTS"); } } } catch (Exception ex) { WSRConfig.GetInstance().logError("TTS", ex); } WSRConfig.GetInstance().logInfo("PLAYER", "[" + device + "]" + "speaking false"); speaking = false; }
private void StartSpeech(AssignedVoice vb, string outputfile) { WinAvailableVoice wv = (WinAvailableVoice)vb.root; // Find the best audio format to use for this voice. System.Collections.ObjectModel.ReadOnlyCollection<SpeechAudioFormatInfo> formats = wv.winVoice.VoiceInfo.SupportedAudioFormats; format = formats.FirstOrDefault(); if (format == null) { // The voice did not tell us its parameters, so we pick some. format = new SpeechAudioFormatInfo( 16000, // Samples per second AudioBitsPerSample.Sixteen, AudioChannel.Mono); } // First set up to synthesize the message into a WAV file. mstream = new FileStream(outputfile, FileMode.Create, FileAccess.Write); syn.SetOutputToWaveStream(mstream); pb = new PromptBuilder(); mainStyle = new PromptStyle(); // mainStyle.Volume = promptVol; syn.SelectVoice(wv.winVoice.VoiceInfo.Name); pb.StartStyle(mainStyle); }