public override void Speak(SpeechClient.Speech speech) { try { PromptBuilder p = new PromptBuilder(); p.Culture = tts.Voice.Culture; p.StartVoice(p.Culture); p.StartSentence(); p.StartStyle(new PromptStyle(PromptEmphasis.None)); for (int i = 0; i < speech.Text.Length; i++) { if (speech.Bookmarks == null || speech.Bookmarks.Length < i + 1 || speech.Bookmarks[i]=="") { string s = ""; for (; i < speech.Text.Length; i++) s += speech.Text[i] + " "; p.AppendSsmlMarkup(s); break; } else { p.AppendSsmlMarkup(speech.Text[i]); p.AppendBookmark(speech.Bookmarks[i]); } } p.EndStyle(); p.EndSentence(); p.EndVoice(); currentSpeech = speech; if (speech.Id != "") ids.Add(tts.SpeakAsync(p), speech.Id); else tts.SpeakAsync(p); } catch (Exception e) { Console.WriteLine("WindowsTTS Failed: " + e.Message); } }
void RecEngine_SpeechRecognized(object sender, SpeechRecognizedEventArgs e) { switch (e.Result.Text) { case "hola": PromptBuilder builder = new PromptBuilder(); builder.StartSentence(); builder.AppendText("Hola David."); builder.EndSentence(); builder.AppendBreak(new TimeSpan(0, 0, 0, 0, 50)); builder.StartSentence(); builder.AppendText("Como estas?", PromptEmphasis.Strong); builder.EndSentence(); synthesizer.SpeakAsync(builder); break; case "mostrar mi nombre": richTextBox1.Text += "\nDavid"; break; case "leeme el texto": synthesizer.SpeakAsync(richTextBox1.SelectedText); break; } }
private void RecEngine_SpeechRecognized(object sender, SpeechRecognizedEventArgs e) { switch (e.Result.Text) { case "say hello": PromptBuilder builder = new PromptBuilder(); builder.StartSentence(); builder.AppendText("Hello sir"); builder.EndSentence(); builder.AppendBreak(PromptBreak.ExtraSmall); builder.StartSentence(); builder.AppendText("How you doing?", PromptEmphasis.Strong); builder.EndSentence(); synthesizer.SpeakAsync(builder); break; case "print my name": richTextBox.Text += "\nName is = Subhojit"; break; case "Speak selected text": synthesizer.SpeakAsync(richTextBox.SelectedText); break; } }
void RecEngine_SpeechRecognized(object sender, SpeechRecognizedEventArgs e) { switch (e.Result.Text) { case "hello there": PromptBuilder pBuilder = new PromptBuilder(); pBuilder.StartSentence(); pBuilder.AppendText("Hello Kola"); pBuilder.EndSentence(); pBuilder.AppendBreak(PromptBreak.Small); pBuilder.StartSentence(); pBuilder.AppendText("Hey Whats Up?", PromptEmphasis.Strong); pBuilder.EndSentence(); syn.SpeakAsync(pBuilder); richTextBox1.Text += "Hey Whats Up"; break; case "say my name": syn.SpeakAsync("Your name is Kola"); richTextBox1.Text += "\nYour name is kola"; break; case "Whats good?": syn.SpeakAsync("Not alot man, just here chilling!"); richTextBox1.Text += "\nNot alot man, just here chilling1"; break; case "say text": syn.SpeakAsync(richTextBox1.Text); break; } //throw new NotImplementedException(); }
void recEngine_SpeechRecognized(object sender, SpeechRecognizedEventArgs e) { switch (e.Result.Text) { case "ivan": PromptBuilder builder = new PromptBuilder(); builder.StartSentence(); builder.AppendText("Poquemon tipo Morro, Este poquemon habita en el Cetis 16.", PromptEmphasis.Reduced); builder.EndSentence(); builder.AppendBreak(new TimeSpan(0, 0, 0, 0, 50)); builder.StartSentence(); builder.AppendText("pa' que quieres saber eso, jaja saludos", PromptEmphasis.Strong); builder.EndSentence(); synthesizer.SpeakAsync(builder); break; case "imprime mi nombre": richTextBox1.Text += "\nMisael"; break; case "habla el texto": synthesizer.SpeakAsync(richTextBox1.SelectedText); break; } }
private void RecognitionEngine_SpeechRecognized(object sender, SpeechRecognizedEventArgs e) { switch (e.Result.Text) { case "say hello": PromptBuilder builder = new PromptBuilder(); builder.StartSentence(); builder.AppendText("Hello LuckyCat!"); builder.EndSentence(); builder.StartSentence(); builder.AppendText("How are you?", PromptEmphasis.Moderate); builder.EndSentence(); synthesizer.SelectVoiceByHints(VoiceGender.Male, VoiceAge.Teen); synthesizer.SpeakAsync(builder); break; case "print my name": richTextBox.Text += "\nLuckyCat1101"; break; case "speak selected text": synthesizer.SpeakAsync(richTextBox.SelectedText); break; } }
static void PromptBuilding() { PromptBuilder builder = new PromptBuilder(); builder.StartSentence(); builder.AppendText("This is a prompt building example."); builder.EndSentence(); builder.StartSentence(); builder.AppendText("Now, there will be a break of 2 seconds."); builder.EndSentence(); builder.AppendBreak(new TimeSpan(0, 0, 2)); builder.StartStyle(new PromptStyle(PromptVolume.ExtraSoft)); builder.AppendText("This text is spoken extra soft."); builder.EndStyle(); builder.StartStyle(new PromptStyle(PromptRate.Fast)); builder.AppendText("This text is spoken fast."); builder.EndStyle(); SpeechSynthesizer synthesizer = new SpeechSynthesizer(); synthesizer.Speak(builder); synthesizer.Dispose(); }
void recEngine_SpeechRecognized(object sender, SpeechRecognizedEventArgs e) { if (e.Result.Text == "Jarvis") { Choices commands = new Choices(); commands.Add(new String[] { "notepad", "mycomputer", "music", "Speak" }); gBuilder = new GrammarBuilder(); gBuilder.Append(commands); grammer = new Grammar(gBuilder); recEngine.LoadGrammarAsync(grammer); synthesizer.SpeakAsync("Yes Sir"); } switch (e.Result.Text) { case "notepad": Process.Start("notepad"); break; case "mycomputer": Process.Start("explorer"); break; case "music": WMPLib.WindowsMediaPlayer wplayer = new WMPLib.WindowsMediaPlayer(); wplayer.URL = @"C:\\Users\\Yogesh\\Downloads\\Music\\Main Koi Aisa Geet Gaoon(MyMp3Song).mp3"; wplayer.controls.play(); break; case "Speak": PromptBuilder builder = new PromptBuilder(); //builder.StartStyle(new PromptStyle()); // builder.StartVoice(VoiceGender.Male, VoiceAge.Adult, 2); builder.StartSentence(); builder.AppendText("Hello Yogesh"); builder.EndSentence(); builder.AppendBreak(PromptBreak.None); builder.StartSentence(); builder.AppendText("How are You", PromptEmphasis.Strong); builder.EndSentence(); synthesizer.SpeakAsync(builder); break; } }
/* public byte[] ConvertTextToWavStreamAsync(string text) { * /*var task = new Task<Stream>(() => ConvertTextToAudio(text)); * task.Start(); * task.Wait(); * return task.Result;#1# * * * byte[] byteArr = null; * * var t = new System.Threading.Thread(() => * { * SpeechSynthesizer ss = new SpeechSynthesizer(); * using (MemoryStream memoryStream = new MemoryStream()) * { * * // ManualResetEvent manualResetEvent = new ManualResetEvent(false); * ss.SetOutputToWaveStream(memoryStream); * ss.Speak(text); * * byteArr = memoryStream.ToArray(); * } * }); * t.Start(); * t.Join(); * return byteArr; * }*/ /*byte[] byteArr = null; * * SpeechSynthesizer ss = new SpeechSynthesizer(); * using (MemoryStream memoryStream = new MemoryStream()) * { * * ManualResetEvent manualResetEvent = new ManualResetEvent(false); * ss.SetOutputToWaveStream(memoryStream); * ss.SpeakCompleted += (sender, args) => * { * manualResetEvent.Set(); * ss.Dispose(); * }; * ss.SpeakAsync(text); * manualResetEvent.WaitOne(); * * byteArr = memoryStream.ToArray();*/ #region ISpeaker Members /// <summary> /// Конвертировать текст в фафл WAV /// </summary> /// <param name="text">текст</param> /// <returns>WAV файл в виде набора байтов</returns> public byte[] ConvertTextToAudio(string text) { var pbuilder = new PromptBuilder(); var pStyle = new PromptStyle { Emphasis = PromptEmphasis.None, Rate = PromptRate.Medium, Volume = PromptVolume.ExtraLoud }; pbuilder.StartStyle(pStyle); pbuilder.StartParagraph(); pbuilder.StartVoice(VoiceGender.Neutral, VoiceAge.Adult, 2); pbuilder.StartSentence(); pbuilder.AppendText(text); pbuilder.EndSentence(); pbuilder.EndVoice(); pbuilder.EndParagraph(); pbuilder.EndStyle(); try { using (var memoryStream = new MemoryStream()) { var speech = new SpeechSynthesizer(); speech.SetOutputToWaveStream(memoryStream); speech.Speak(pbuilder); memoryStream.Seek(0, SeekOrigin.Begin); return(_mp3Converter.ConvertWavToMp3(memoryStream)); } } catch (Exception e) { LoggerWrapper.LogTo(LoggerName.Errors).ErrorException( "Speaker.ConvertTextToAudio возникло исключение {0}", e); } return(null); }
public void speakBuilder() { builder.StartParagraph(); builder.StartSentence(); foreach (Pictogram picto in taleManager.CurrentPage.Pictograms) { if (picto != null) { if (picto.Sound != "") { builder.AppendBookmark(picto.Index.ToString()); //builder.AppendBookmark(" "); //builder.StartStyle(styleAudio); builder.AppendAudio(picto.Sound); //builder.EndStyle(); } else { builder.AppendBookmark(picto.Index.ToString()); builder.StartStyle(styleText); builder.AppendText(picto.TextToRead); builder.EndStyle(); } } } builder.EndSentence(); builder.EndParagraph(); }
public void speak(string command) { promptBuilder.StartSentence(); promptBuilder.AppendText(command); promptBuilder.EndSentence(); speechSynthesizer.SpeakAsync(promptBuilder); }
/// <summary> /// Updates the text log and also reads it aloud to the user /// </summary> public void UpdateLog(string msg) { PromptBuilder pBuilder = new PromptBuilder(); pBuilder.StartSentence(); pBuilder.StartStyle(new PromptStyle(PromptRate.Fast)); pBuilder.AppendText(msg); pBuilder.EndStyle(); pBuilder.EndSentence(); //_speechSynth.SetOutputToWaveFile(@".\audioWave.wav"); _speechSynth.SpeakAsync(pBuilder); //_speechSynth.SetOutputToNull(); //using (WaveFileReader waveReader = new WaveFileReader(@".\audioWave.wav")) //{ // WaveOut wOut = new WaveOut(); // wOut.Init(waveReader); // wOut.Play(); //} rtbLog.Text += ("\n" + msg); rtbLog.SelectionStart = rtbLog.Text.Length; rtbLog.ScrollToCaret(); }
static void Main(string[] args) { var synth = new SpeechSynthesizer(); synth.SetOutputToDefaultAudioDevice(); var cultureTag = ConfigurationManager.AppSettings["culture"]; var culture = CultureInfo.GetCultureInfoByIetfLanguageTag(cultureTag); var prompt = new PromptBuilder(culture); prompt.StartSentence(culture); prompt.AppendTextWithHint(SpeakResources.Welcome, SayAs.Text); prompt.EndSentence(); synth.SpeakAsync(prompt); var recog = new SpeechRecognitionEngine(culture); recog.LoadGrammar(new DictationGrammar()); recog.SetInputToDefaultAudioDevice(); recog.SpeechRecognized += Recog_SpeechRecognized; recog.RecognizeAsync(RecognizeMode.Multiple); Console.WriteLine("Hello!"); Console.ReadLine(); synth.Dispose(); recog.Dispose(); }
//////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Searches for the first wikipedia. </summary> /// /// <param name="text"> The text. </param> /// <param name="maxResults"> The maximum results. </param> /// <param name="resultsToRead"> The results to read. </param> /// <param name="language"> The language. </param> //////////////////////////////////////////////////////////////////////////////////////////////////// private void SearchWikipedia(string text, int maxResults, int resultsToRead, Language language) { wikipedia.Limit = maxResults; wikipedia.Language = language; voice.SpeakAsync("I just received a search request for the term " + text); QueryResult results = wikipedia.Search(text); if (results.Search.Count == 0) { voice.SpeakAsync("I'm sorry, I could not find anything for " + text); } else { if (results.Search.Count < resultsToRead) { resultsToRead = results.Search.Count; } voice.SpeakAsync("I found " + results.Search.Count + " results for " + text + ". According to Wikipedia, here are the top " + resultsToRead + " results I found"); PromptBuilder builder = new PromptBuilder(); builder.StartSentence(); for (int x = 0; x < resultsToRead; x++) { builder.AppendText(results.Search[x].Snippet.Substring(results.Search[x].Snippet.LastIndexOf("</span>") + 7)); } builder.EndSentence(); voice.SpeakAsync(builder); } }
//Allows this object class to easily say anything //Without providing specifics. public void say_default(string input) { string voice_string = input; //reader.SelectVoice("IVONA 2 Salli"); style_.Rate = PromptRate.Medium; style_.Emphasis = PromptEmphasis.NotSet; builder.ClearContent(); builder.StartSentence(); builder.StartStyle(style_); builder.AppendText(voice_string); builder.EndStyle(); builder.EndSentence(); //lastly let the ball roll and see if we can hear some chatter. reader.Speak(builder); }
/// <summary> /// Speaks the text in the provided <see cref="SpeechString"/> using the enclosed data. /// </summary> /// <returns>Nothing.</returns> private async Task Speak(SpeechString speech) { // create a new memory stream and speech synth, to be disposed of after this method executes. using (MemoryStream stream = new MemoryStream()) using (SpeechSynthesizer synth = new SpeechSynthesizer()) { // set synthesizer properties synth.SetOutputToWaveStream(stream); synth.Rate = speech.Rate; synth.Volume = speech.Volume; // TODO: refine the speech builder and actually use the style. PromptBuilder builder = new PromptBuilder(); PromptStyle style = new PromptStyle(); builder.StartVoice(speech.Voice.VoiceInfo); builder.StartSentence(); builder.AppendText(speech.Text); builder.EndSentence(); builder.EndVoice(); // "speaks" the text directly into the memory stream synth.Speak(builder); // then block while the speech is being played. await AudioManager.Instance.Play(stream, speech.PrimaryDevice.DeviceNumber, speech.SecondaryDevice.DeviceNumber); } }
private static void SayHello() { Console.WriteLine("Hello!"); synthesizer.SelectVoice(voices[2].VoiceInfo.Name); synthesizer.Rate = 2; promptBuilder.StartSentence(); promptBuilder.AppendText("Hello, Eugene!"); promptBuilder.EndSentence(); promptBuilder.AppendBreak(new TimeSpan(0, 0, 0, 0, 10)); promptBuilder.StartSentence(); promptBuilder.AppendText("How are you today?", PromptEmphasis.Strong); promptBuilder.EndSentence(); synthesizer.SpeakAsync(promptBuilder); }
public static string GetBase64Audio(string textInput) { var speechAudioFormatConfig = new SpeechAudioFormatInfo(samplesPerSecond: 8000, bitsPerSample: AudioBitsPerSample.Sixteen, channel: AudioChannel.Stereo); var waveFormat = new WaveFormat(speechAudioFormatConfig.SamplesPerSecond, speechAudioFormatConfig.BitsPerSample, speechAudioFormatConfig.ChannelCount); var prompt = new PromptBuilder { Culture = CultureInfo.CreateSpecificCulture("en-US") }; prompt.StartVoice(prompt.Culture); prompt.StartSentence(); prompt.StartStyle(new PromptStyle() { Emphasis = PromptEmphasis.Reduced, Rate = PromptRate.Slow }); prompt.AppendText(textInput); prompt.EndStyle(); prompt.EndSentence(); prompt.EndVoice(); var mp3Stream = new MemoryStream(); byte[] audioOutputBytes; string audioOutputAsString = null; using (var synthWaveMemoryStream = new MemoryStream()) { var resetEvent = new ManualResetEvent(false); ThreadPool.QueueUserWorkItem(arg => { try { var siteSpeechSynth = new SpeechSynthesizer(); siteSpeechSynth.SetOutputToAudioStream(synthWaveMemoryStream, speechAudioFormatConfig); siteSpeechSynth.Speak(prompt); } finally { resetEvent.Set(); } }); WaitHandle.WaitAll(new WaitHandle[] { resetEvent }); var bitRate = (speechAudioFormatConfig.AverageBytesPerSecond * 8); synthWaveMemoryStream.Position = 0; using (var mp3FileWriter = new LameMP3FileWriter(outStream: mp3Stream, format: waveFormat, bitRate: bitRate)) { synthWaveMemoryStream.CopyTo(mp3FileWriter); } audioOutputBytes = mp3Stream.ToArray(); audioOutputAsString = $"data:audio/mp3;base64,{Convert.ToBase64String(audioOutputBytes)}"; } return(audioOutputAsString); }
public static bool CreateWavConvertToMp3File(string trgWavFile, string text) { var ps = new PromptStyle { Emphasis = PromptEmphasis.Strong, Volume = PromptVolume.ExtraLoud }; //pStyle.Rate = PromptRate.Fast; var pb = new PromptBuilder(); pb.StartStyle(ps); pb.StartParagraph(); pb.StartVoice(VoiceGender.Female, VoiceAge.Child); pb.StartSentence(); pb.AppendBreak(TimeSpan.FromSeconds(.3)); // removed on Sep 26, 2011 // reesotred Jun 2013 pb.AppendText(text, PromptRate.Medium); pb.AppendBreak(TimeSpan.FromSeconds(.3)); // removed on Sep 26, 2011 // reesotred Jun 2013 pb.EndSentence(); pb.EndVoice(); pb.EndParagraph(); pb.EndStyle(); var tempWav = trgWavFile + ".WAV"; using (var speaker = new SpeechSynthesizer()) { //speaker.Speak(pbuilder); // if (File.Exists(file)) File.Delete(file); // added Delete() on Sep 26, 2011. using (var fileStream = new FileStream(tempWav, FileMode.Create)) { try { speaker.Volume = 100; speaker.SetOutputToWaveStream(fileStream); // // speaker.SetOutputToWaveFile(tempWav); speaker.Speak(pb); //nogo: this makes annoying beep and place it instead of the TTS message : using (var writer = new BinaryWriter(fileStream)) { new WaveFun.WaveGenerator(WaveFun.WaveExampleType.ExampleSineWave).Save_NotClose(writer); writer.Close(); } catch (Exception ex) { ex.Log(); } finally { speaker.SetOutputToDefaultAudioDevice(); fileStream.Close(); } } } NAudioHelper.ConvertWavToMp3(tempWav, trgWavFile); try { File.Delete(tempWav); } catch (Exception ex) { ex.Log(); } return(true); }
private static void SynthToCam(string text, CameraWindow cw) { var synthFormat = new System.Speech.AudioFormat.SpeechAudioFormatInfo(System.Speech.AudioFormat.EncodingFormat.Pcm, 11025, 16, 1, 22100, 2, null); using (var synthesizer = new SpeechSynthesizer()) { using (var waveStream = new MemoryStream()) { //write some silence to the stream to allow camera to initialise properly var silence = new byte[1 * 22050]; waveStream.Write(silence, 0, silence.Count()); var pbuilder = new PromptBuilder(); var pStyle = new PromptStyle { Emphasis = PromptEmphasis.Strong, Rate = PromptRate.Slow, Volume = PromptVolume.ExtraLoud }; pbuilder.StartStyle(pStyle); pbuilder.StartParagraph(); pbuilder.StartVoice(VoiceGender.Male, VoiceAge.Adult, 2); pbuilder.StartSentence(); pbuilder.AppendText(text); pbuilder.EndSentence(); pbuilder.EndVoice(); pbuilder.EndParagraph(); pbuilder.EndStyle(); synthesizer.SetOutputToAudioStream(waveStream, synthFormat); synthesizer.Speak(pbuilder); synthesizer.SetOutputToNull(); //write some silence to the stream to allow camera to end properly waveStream.Write(silence, 0, silence.Count()); waveStream.Seek(0, SeekOrigin.Begin); var ds = new DirectStream(waveStream) { RecordingFormat = new WaveFormat(11025, 16, 1) }; var talkTarget = TalkHelper.GetTalkTarget(cw.Camobject, ds); ds.Start(); talkTarget.Start(); while (ds.IsRunning) { Thread.Sleep(100); } ds.Stop(); talkTarget.Stop(); talkTarget = null; ds = null; waveStream.Close(); } } }
void promptSample() { var ps = new PromptStyle { Emphasis = PromptEmphasis.Strong, Volume = PromptVolume.ExtraLoud }; var pb = new PromptBuilder(); pb.StartStyle(ps); pb.StartParagraph(); pb.StartVoice(VoiceGender.Female, VoiceAge.Child); pb.StartSentence(); pb.AppendText($"Female Child", PromptRate.Medium); pb.AppendBreak(TimeSpan.FromSeconds(.3)); pb.EndSentence(); pb.EndVoice(); pb.StartVoice(VoiceGender.Female, VoiceAge.Senior); pb.StartSentence(); pb.AppendText($"Female Senior", PromptRate.Medium); pb.AppendBreak(TimeSpan.FromSeconds(.3)); pb.EndSentence(); pb.EndVoice(); pb.StartVoice(VoiceGender.Male, VoiceAge.Senior); pb.StartSentence(); pb.AppendText($"Male Senior", PromptRate.Medium); pb.AppendBreak(TimeSpan.FromSeconds(.3)); pb.EndSentence(); pb.EndVoice(); pb.StartVoice(VoiceGender.Male, VoiceAge.Child); pb.StartSentence(); pb.AppendText($"Male Child", PromptRate.Medium); pb.AppendBreak(TimeSpan.FromSeconds(.3)); pb.EndSentence(); pb.EndVoice(); pb.EndParagraph(); pb.EndStyle(); synth.SpeakAsyncCancelAll(); synth.SpeakAsync(pb); }
private void Speak(string text, SayAs mode = SayAs.Text) { var prompt = new PromptBuilder(culture); prompt.StartSentence(culture); prompt.AppendTextWithHint(text, mode); prompt.EndSentence(); synthesizer.SpeakAsync(prompt); }
private static void SynthToCam(string text, CameraWindow cw) { var synthFormat = new System.Speech.AudioFormat.SpeechAudioFormatInfo(System.Speech.AudioFormat.EncodingFormat.Pcm, 11025, 16, 1, 22100, 2, null); using (var synthesizer = new SpeechSynthesizer()) { using (var waveStream = new MemoryStream()) { //write some silence to the stream to allow camera to initialise properly var silence = new byte[1 * 22050]; waveStream.Write(silence, 0, silence.Length); var pbuilder = new PromptBuilder(); var pStyle = new PromptStyle { Emphasis = PromptEmphasis.Strong, Rate = PromptRate.Slow, Volume = PromptVolume.ExtraLoud }; pbuilder.StartStyle(pStyle); pbuilder.StartParagraph(); pbuilder.StartVoice(VoiceGender.Male, VoiceAge.Adult, 2); pbuilder.StartSentence(); pbuilder.AppendText(text); pbuilder.EndSentence(); pbuilder.EndVoice(); pbuilder.EndParagraph(); pbuilder.EndStyle(); synthesizer.SetOutputToAudioStream(waveStream, synthFormat); synthesizer.Speak(pbuilder); synthesizer.SetOutputToNull(); //write some silence to the stream to allow camera to end properly waveStream.Write(silence, 0, silence.Length); waveStream.Seek(0, SeekOrigin.Begin); var ds = new DirectStream(waveStream) { RecordingFormat = new WaveFormat(11025, 16, 1) }; var talkTarget = TalkHelper.GetTalkTarget(cw.Camobject, ds); ds.Start(); talkTarget.Start(); while (ds.IsRunning) { Thread.Sleep(100); } ds.Stop(); talkTarget.Stop(); talkTarget = null; ds = null; } } }
private void _recognizeSpeechAndWriteToConsole_SpeechRecognized(object sender, SpeechRecognizedEventArgs e) { PromptBuilder builder1 = new PromptBuilder(); builder1.StartSentence(); builder1.AppendText("What do you want me to remind?"); builder1.EndSentence(); SpeechSynthesizer synthesizer = new SpeechSynthesizer(); synthesizer.Speak(builder1); synthesizer.Dispose(); }
private async void Discord_ChatMessage(object sender, MessageEventArgs e) { if (this.beam.IsConnected && e.User.Id != this.discord.CurrentUser.Id && e.Channel.Id == 187760090206437376) { var msg = $"[discord] {e.Message.User.Name}: {e.Message.Text}"; await this.beam.SendMessage(msg); } if (this.readingMessages && e.Channel == this.readingChannel) { using (var synthesizer = new SpeechSynthesizer()) using (var mem = new MemoryStream()) { var info = new SpeechAudioFormatInfo(48000, AudioBitsPerSample.Sixteen, AudioChannel.Stereo); synthesizer.SetOutputToAudioStream(mem, info); PromptBuilder builder = new PromptBuilder(); builder.Culture = CultureInfo.CreateSpecificCulture("en-US"); builder.StartVoice(builder.Culture); builder.StartSentence(); builder.StartStyle(new PromptStyle() { Emphasis = PromptEmphasis.Reduced }); builder.AppendText(e.Message.User.Name); builder.AppendText(" says "); builder.EndStyle(); builder.AppendText(e.Message.Text); builder.EndSentence(); builder.EndVoice(); synthesizer.Speak(builder); mem.Seek(0, SeekOrigin.Begin); int count, block = 96000; var buffer = new byte[block]; while ((count = mem.Read(buffer, 0, block)) > 0) { if (count < block) { for (int i = count; i < block; i++) { buffer[i] = 0; } } this.audio.Send(buffer, 0, block); } } } }
//Check for speech synthesis private void speechButton_Click(object sender, EventArgs e) { consignes = new PromptBuilder(); consignes.StartSentence(); consignes.AppendText("Ceci est un essai du système de synthèse vocale."); //consignes.StartStyle(new PromptStyle(PromptEmphasis.Moderate)); //consignes.AppendText("système de synthèse vocale"); //consignes.EndStyle(); //consignes.AppendText(" le plus vite possible."); consignes.EndSentence(); synth.SpeakAsync(consignes); }
private static async Task SpeakAsync(string textToSay) { // Say the text they typed PromptBuilder builder = new PromptBuilder(); builder.StartSentence(); builder.AppendText(textToSay); builder.EndSentence(); SpeechSynthesizer synthesizer = new SpeechSynthesizer(); synthesizer.Speak(builder); synthesizer.Dispose(); }
//private SpeechRecognitionEngine _recognizer = null; private void ControlsBasicsWindow_Loaded(object sender, RoutedEventArgs e) { PromptBuilder builder1 = new PromptBuilder(); builder1.StartSentence(); builder1.AppendText("Welcome to Basic Human Artificial Intelligence"); builder1.EndSentence(); SpeechSynthesizer synthesizer = new SpeechSynthesizer(); //synthesizer.Speak(builder1); synthesizer.Dispose(); RecognizeSpeechAndWriteToConsoleMain1(); }
public void Say(string text, int volume, int rate) { //foreach (InstalledVoice voice in sp.GetInstalledVoices()) //{ // VoiceInfo info = voice.VoiceInfo; // Console.WriteLine(" Name: " + info.Name); // Console.WriteLine(" Culture: " + info.Culture); // Console.WriteLine(" Age: " + info.Age); // Console.WriteLine(" Gender: " + info.Gender); // Console.WriteLine(" Description: " + info.Description); // Console.WriteLine(" ID: " + info.Id); //} if (volume >= 0 && volume <= 100) sp.Volume = volume; else sp.Volume = 100; // rappresenta la velocità di lettura if (rate >= -10 && rate <= 10) sp.Rate = rate; else sp.Rate = 0; //CultureInfo culture = CultureInfo.CreateSpecificCulture("it-IT"); //spSynth.SelectVoiceByHints(VoiceGender.Male, VoiceAge.Teen, 0, culture); //spSynth.SelectVoice("ScanSoft Silvia_Dri40_16kHz"); //spSynth.SelectVoice("Microsoft Elsa Desktop"); //spSynth.SelectVoice("Paola"); //spSynth.SelectVoice("Luca"); //spSynth.SelectVoice("Roberto"); PromptBuilder builder = new PromptBuilder(); builder.StartVoice("Luca"); builder.StartSentence(); builder.StartStyle(new PromptStyle() { Emphasis = PromptEmphasis.Strong, Rate = PromptRate.Medium }); string high = "<prosody pitch=\"x-high\"> " + text + " </prosody >"; builder.AppendSsmlMarkup(high); builder.EndStyle(); builder.EndSentence(); builder.EndVoice(); // Asynchronous sp.SpeakAsync(builder); }
public static void OnBuildFinished(ITaskSession session) { session.ResetDepth(); LogTargetDurations(session); session.WriteInfo(String.Empty); if (session.HasFailed) { session.WriteError("BUILD FAILED"); } else { session.WriteInfo("BUILD SUCCESSFUL"); } TimeSpan buildDuration = session.BuildStopwatch.Elapsed; session.WriteInfo("Build finish time: {0:g}", DateTime.Now); session.WriteInfo( "Build duration: {0:D2}:{1:D2}:{2:D2} ({3:d} seconds)", buildDuration.Hours, buildDuration.Minutes, buildDuration.Seconds, (int)buildDuration.TotalSeconds); bool speechDisabled = session.Properties.Get(BuildProps.SpeechDisabled, false); if (session.IsInteractive && !speechDisabled) { using (SpeechSynthesizer speech = new SpeechSynthesizer()) { PromptBuilder builder = new PromptBuilder(new CultureInfo("en-US")); builder.StartStyle(new PromptStyle(PromptRate.Slow)); builder.StartStyle(new PromptStyle(PromptVolume.Loud)); builder.StartSentence(new CultureInfo("en-US")); builder.AppendText("Build " + (session.HasFailed ? "failed." : "successful!")); builder.EndSentence(); builder.EndStyle(); builder.EndStyle(); speech.Speak(builder); } } //Beeper.Beep(session.HasFailed ? MessageBeepType.Error : MessageBeepType.Ok); }
private PromptBuilder BuildPrompt(string text) { PromptBuilder prompt = new PromptBuilder(); prompt.StartVoice(_synthesizer.Voice); prompt.EndVoice(); prompt.StartSentence(); prompt.AppendText(text); prompt.EndSentence(); if (Interval > 0) { prompt.AppendBreak(TimeSpan.FromSeconds(Interval)); } return(prompt); }
private void _recognizeSpeechAndWriteToConsole_SpeechRecognized(object sender, SpeechRecognizedEventArgs e) { if (e.Result.Text == "OK") { _recognizer.SpeechRecognized += _recognizeSpeechAndWriteToConsole_SpeechRecognized1; //MessageBox.Show(e.Result.Text); PromptBuilder builder1 = new PromptBuilder(); builder1.StartSentence(); Console.Beep(3000, 500); //builder1.AppendText("YES SIR .... "); builder1.EndSentence(); music_flag = 1; SpeechSynthesizer synthesizer = new SpeechSynthesizer(); //synthesizer.Speak(builder1); synthesizer.Dispose(); } }
//------------------------------------------------------------------------------------------------------------------- private void ProcessPhrase(PromptBuilder promptBuilder, string speech) { string[] paragraphs = speech.Split(new string[] { Environment.NewLine }, StringSplitOptions.RemoveEmptyEntries); foreach (var paragraph in paragraphs) { promptBuilder.StartParagraph(); string[] sentences = paragraph.Split(new string[] { "." }, StringSplitOptions.RemoveEmptyEntries); foreach (var sentence in sentences) { promptBuilder.StartSentence(); promptBuilder.AppendText(sentence); promptBuilder.EndSentence(); promptBuilder.AppendBreak(new TimeSpan(0, 0, 0, 0, SentencePause)); } promptBuilder.EndParagraph(); promptBuilder.AppendBreak(new TimeSpan(0, 0, 0, 0, ParagraphPause)); } promptBuilder.AppendBreak(new TimeSpan(0, 0, 0, 0, SpeechPause)); }
void recEngine_SpeachSpeechRecognized(object sender, SpeechRecognizedEventArgs e) { switch (e.Result.Text) { case "say hello": //MessageBox.Show("Hello Denis. How are you?"); break; PromptBuilder promtBuilder = new PromptBuilder(); promtBuilder.StartSentence(); promtBuilder.AppendText("Hello Denis"); promtBuilder.EndSentence(); promtBuilder.AppendBreak(PromptBreak.ExtraSmall); promtBuilder.AppendText("How are you?"); syncSpeechSynthesizer.SpeakAsync("Hello Denis. How are you?"); break; case "print my name": richTextBox1.Text += "\nDenis"; break; case "speak selected text": syncSpeechSynthesizer.SpeakAsync(richTextBox1.SelectedText); break; } }