public void speakBuilder() { builder.StartParagraph(); builder.StartSentence(); foreach (Pictogram picto in taleManager.CurrentPage.Pictograms) { if (picto != null) { if (picto.Sound != "") { builder.AppendBookmark(picto.Index.ToString()); //builder.AppendBookmark(" "); //builder.StartStyle(styleAudio); builder.AppendAudio(picto.Sound); //builder.EndStyle(); } else { builder.AppendBookmark(picto.Index.ToString()); builder.StartStyle(styleText); builder.AppendText(picto.TextToRead); builder.EndStyle(); } } } builder.EndSentence(); builder.EndParagraph(); }
/// <summary> /// Transforms a string text to a TTS stream with the selected language /// </summary> /// <param name="said">Text to send to the TTS System</param> /// <param name="culture">Only 'en-US' and 'fr-FR' values are supported for now</param> /// <returns></returns> private static Stream TtsToStream(string said, string culture = "en-US") { if (_synth == null) { throw new InvalidOperationException("No speech synthetizer have been initiated."); } if (culture != "en-US" && culture != "fr-FR") { throw new InvalidOperationException("Only 'en-US' and 'fr-FR' cultures are supported right now."); } var stream = new MemoryStream(); _synth.SetOutputToWaveStream(stream); PromptBuilder builder = new PromptBuilder(new System.Globalization.CultureInfo(culture)); builder.StartParagraph(); builder.AppendText(said); builder.EndParagraph(); try { _synth.SelectVoice(GetVoice(culture)); } catch { throw new InvalidOperationException($"Could not select a voice with culture {culture}."); } _synth.Speak(said); return(stream); }
/* public byte[] ConvertTextToWavStreamAsync(string text) { * /*var task = new Task<Stream>(() => ConvertTextToAudio(text)); * task.Start(); * task.Wait(); * return task.Result;#1# * * * byte[] byteArr = null; * * var t = new System.Threading.Thread(() => * { * SpeechSynthesizer ss = new SpeechSynthesizer(); * using (MemoryStream memoryStream = new MemoryStream()) * { * * // ManualResetEvent manualResetEvent = new ManualResetEvent(false); * ss.SetOutputToWaveStream(memoryStream); * ss.Speak(text); * * byteArr = memoryStream.ToArray(); * } * }); * t.Start(); * t.Join(); * return byteArr; * }*/ /*byte[] byteArr = null; * * SpeechSynthesizer ss = new SpeechSynthesizer(); * using (MemoryStream memoryStream = new MemoryStream()) * { * * ManualResetEvent manualResetEvent = new ManualResetEvent(false); * ss.SetOutputToWaveStream(memoryStream); * ss.SpeakCompleted += (sender, args) => * { * manualResetEvent.Set(); * ss.Dispose(); * }; * ss.SpeakAsync(text); * manualResetEvent.WaitOne(); * * byteArr = memoryStream.ToArray();*/ #region ISpeaker Members /// <summary> /// Конвертировать текст в фафл WAV /// </summary> /// <param name="text">текст</param> /// <returns>WAV файл в виде набора байтов</returns> public byte[] ConvertTextToAudio(string text) { var pbuilder = new PromptBuilder(); var pStyle = new PromptStyle { Emphasis = PromptEmphasis.None, Rate = PromptRate.Medium, Volume = PromptVolume.ExtraLoud }; pbuilder.StartStyle(pStyle); pbuilder.StartParagraph(); pbuilder.StartVoice(VoiceGender.Neutral, VoiceAge.Adult, 2); pbuilder.StartSentence(); pbuilder.AppendText(text); pbuilder.EndSentence(); pbuilder.EndVoice(); pbuilder.EndParagraph(); pbuilder.EndStyle(); try { using (var memoryStream = new MemoryStream()) { var speech = new SpeechSynthesizer(); speech.SetOutputToWaveStream(memoryStream); speech.Speak(pbuilder); memoryStream.Seek(0, SeekOrigin.Begin); return(_mp3Converter.ConvertWavToMp3(memoryStream)); } } catch (Exception e) { LoggerWrapper.LogTo(LoggerName.Errors).ErrorException( "Speaker.ConvertTextToAudio возникло исключение {0}", e); } return(null); }
public static bool CreateWavConvertToMp3File(string trgWavFile, string text) { var ps = new PromptStyle { Emphasis = PromptEmphasis.Strong, Volume = PromptVolume.ExtraLoud }; //pStyle.Rate = PromptRate.Fast; var pb = new PromptBuilder(); pb.StartStyle(ps); pb.StartParagraph(); pb.StartVoice(VoiceGender.Female, VoiceAge.Child); pb.StartSentence(); pb.AppendBreak(TimeSpan.FromSeconds(.3)); // removed on Sep 26, 2011 // reesotred Jun 2013 pb.AppendText(text, PromptRate.Medium); pb.AppendBreak(TimeSpan.FromSeconds(.3)); // removed on Sep 26, 2011 // reesotred Jun 2013 pb.EndSentence(); pb.EndVoice(); pb.EndParagraph(); pb.EndStyle(); var tempWav = trgWavFile + ".WAV"; using (var speaker = new SpeechSynthesizer()) { //speaker.Speak(pbuilder); // if (File.Exists(file)) File.Delete(file); // added Delete() on Sep 26, 2011. using (var fileStream = new FileStream(tempWav, FileMode.Create)) { try { speaker.Volume = 100; speaker.SetOutputToWaveStream(fileStream); // // speaker.SetOutputToWaveFile(tempWav); speaker.Speak(pb); //nogo: this makes annoying beep and place it instead of the TTS message : using (var writer = new BinaryWriter(fileStream)) { new WaveFun.WaveGenerator(WaveFun.WaveExampleType.ExampleSineWave).Save_NotClose(writer); writer.Close(); } catch (Exception ex) { ex.Log(); } finally { speaker.SetOutputToDefaultAudioDevice(); fileStream.Close(); } } } NAudioHelper.ConvertWavToMp3(tempWav, trgWavFile); try { File.Delete(tempWav); } catch (Exception ex) { ex.Log(); } return(true); }
private static void SynthToCam(string text, CameraWindow cw) { var synthFormat = new System.Speech.AudioFormat.SpeechAudioFormatInfo(System.Speech.AudioFormat.EncodingFormat.Pcm, 11025, 16, 1, 22100, 2, null); using (var synthesizer = new SpeechSynthesizer()) { using (var waveStream = new MemoryStream()) { //write some silence to the stream to allow camera to initialise properly var silence = new byte[1 * 22050]; waveStream.Write(silence, 0, silence.Count()); var pbuilder = new PromptBuilder(); var pStyle = new PromptStyle { Emphasis = PromptEmphasis.Strong, Rate = PromptRate.Slow, Volume = PromptVolume.ExtraLoud }; pbuilder.StartStyle(pStyle); pbuilder.StartParagraph(); pbuilder.StartVoice(VoiceGender.Male, VoiceAge.Adult, 2); pbuilder.StartSentence(); pbuilder.AppendText(text); pbuilder.EndSentence(); pbuilder.EndVoice(); pbuilder.EndParagraph(); pbuilder.EndStyle(); synthesizer.SetOutputToAudioStream(waveStream, synthFormat); synthesizer.Speak(pbuilder); synthesizer.SetOutputToNull(); //write some silence to the stream to allow camera to end properly waveStream.Write(silence, 0, silence.Count()); waveStream.Seek(0, SeekOrigin.Begin); var ds = new DirectStream(waveStream) { RecordingFormat = new WaveFormat(11025, 16, 1) }; var talkTarget = TalkHelper.GetTalkTarget(cw.Camobject, ds); ds.Start(); talkTarget.Start(); while (ds.IsRunning) { Thread.Sleep(100); } ds.Stop(); talkTarget.Stop(); talkTarget = null; ds = null; waveStream.Close(); } } }
private static void SynthToCam(string text, CameraWindow cw) { var synthFormat = new System.Speech.AudioFormat.SpeechAudioFormatInfo(System.Speech.AudioFormat.EncodingFormat.Pcm, 11025, 16, 1, 22100, 2, null); using (var synthesizer = new SpeechSynthesizer()) { using (var waveStream = new MemoryStream()) { //write some silence to the stream to allow camera to initialise properly var silence = new byte[1 * 22050]; waveStream.Write(silence, 0, silence.Length); var pbuilder = new PromptBuilder(); var pStyle = new PromptStyle { Emphasis = PromptEmphasis.Strong, Rate = PromptRate.Slow, Volume = PromptVolume.ExtraLoud }; pbuilder.StartStyle(pStyle); pbuilder.StartParagraph(); pbuilder.StartVoice(VoiceGender.Male, VoiceAge.Adult, 2); pbuilder.StartSentence(); pbuilder.AppendText(text); pbuilder.EndSentence(); pbuilder.EndVoice(); pbuilder.EndParagraph(); pbuilder.EndStyle(); synthesizer.SetOutputToAudioStream(waveStream, synthFormat); synthesizer.Speak(pbuilder); synthesizer.SetOutputToNull(); //write some silence to the stream to allow camera to end properly waveStream.Write(silence, 0, silence.Length); waveStream.Seek(0, SeekOrigin.Begin); var ds = new DirectStream(waveStream) { RecordingFormat = new WaveFormat(11025, 16, 1) }; var talkTarget = TalkHelper.GetTalkTarget(cw.Camobject, ds); ds.Start(); talkTarget.Start(); while (ds.IsRunning) { Thread.Sleep(100); } ds.Stop(); talkTarget.Stop(); talkTarget = null; ds = null; } } }
void promptSample() { var ps = new PromptStyle { Emphasis = PromptEmphasis.Strong, Volume = PromptVolume.ExtraLoud }; var pb = new PromptBuilder(); pb.StartStyle(ps); pb.StartParagraph(); pb.StartVoice(VoiceGender.Female, VoiceAge.Child); pb.StartSentence(); pb.AppendText($"Female Child", PromptRate.Medium); pb.AppendBreak(TimeSpan.FromSeconds(.3)); pb.EndSentence(); pb.EndVoice(); pb.StartVoice(VoiceGender.Female, VoiceAge.Senior); pb.StartSentence(); pb.AppendText($"Female Senior", PromptRate.Medium); pb.AppendBreak(TimeSpan.FromSeconds(.3)); pb.EndSentence(); pb.EndVoice(); pb.StartVoice(VoiceGender.Male, VoiceAge.Senior); pb.StartSentence(); pb.AppendText($"Male Senior", PromptRate.Medium); pb.AppendBreak(TimeSpan.FromSeconds(.3)); pb.EndSentence(); pb.EndVoice(); pb.StartVoice(VoiceGender.Male, VoiceAge.Child); pb.StartSentence(); pb.AppendText($"Male Child", PromptRate.Medium); pb.AppendBreak(TimeSpan.FromSeconds(.3)); pb.EndSentence(); pb.EndVoice(); pb.EndParagraph(); pb.EndStyle(); synth.SpeakAsyncCancelAll(); synth.SpeakAsync(pb); }
//------------------------------------------------------------------------------------------------------------------- private void ProcessPhrase(PromptBuilder promptBuilder, string speech) { string[] paragraphs = speech.Split(new string[] { Environment.NewLine }, StringSplitOptions.RemoveEmptyEntries); foreach (var paragraph in paragraphs) { promptBuilder.StartParagraph(); string[] sentences = paragraph.Split(new string[] { "." }, StringSplitOptions.RemoveEmptyEntries); foreach (var sentence in sentences) { promptBuilder.StartSentence(); promptBuilder.AppendText(sentence); promptBuilder.EndSentence(); promptBuilder.AppendBreak(new TimeSpan(0, 0, 0, 0, SentencePause)); } promptBuilder.EndParagraph(); promptBuilder.AppendBreak(new TimeSpan(0, 0, 0, 0, ParagraphPause)); } promptBuilder.AppendBreak(new TimeSpan(0, 0, 0, 0, SpeechPause)); }
/// <summary> /// Speichert die Stimme mit START und STOP als WAVE ton. BETA /// </summary> public void SaveSay1(String sText, String VoiceName, String StartSound, String StopSound, int iVolume, int iRate) { var synthFormat = new SpeechAudioFormatInfo(8000, AudioBitsPerSample.Sixteen, AudioChannel.Mono); var synthesizer = new SpeechSynthesizer(); var waveStream = new MemoryStream(); var waveFileStream = new FileStream(@".\\mywave.wav", FileMode.OpenOrCreate); var pbuilder = new PromptBuilder(); var pStyle = new PromptStyle(); var aSaveFileDialog1 = new SaveFileDialog(); //--- pStyle.Emphasis = PromptEmphasis.None; pStyle.Rate = PromptRate.Fast; pStyle.Volume = PromptVolume.ExtraLoud; pbuilder.StartStyle(pStyle); pbuilder.StartParagraph(); pbuilder.StartVoice(VoiceGender.Male, VoiceAge.Teen, 2); pbuilder.StartSentence(); pbuilder.AppendText("This is some text."); pbuilder.EndSentence(); pbuilder.EndVoice(); pbuilder.EndParagraph(); pbuilder.EndStyle(); synthesizer.SetOutputToAudioStream(waveStream, synthFormat); synthesizer.Speak(pbuilder); synthesizer.SetOutputToNull(); waveStream.WriteTo(waveFileStream); waveFileStream.Close(); /* * aSaveFileDialog1.Filter = "wave files (*.wav)|*.wav"; * aSaveFileDialog1.DefaultExt = "*.wav"; * aSaveFileDialog1.Title = "Stimme als Wave speichern"; * aSaveFileDialog1.FileName = ""; * aSaveFileDialog1.ShowDialog(); */ }
private static void SynthToCam(string text, CameraWindow cw) { var synthFormat = new System.Speech.AudioFormat.SpeechAudioFormatInfo(System.Speech.AudioFormat.EncodingFormat.Pcm, 11025, 16, 1, 22100, 2, null); using (var synthesizer = new SpeechSynthesizer()) { using (var waveStream = new MemoryStream()) { //write some silence to the stream to allow camera to initialise properly var silence = new byte[1 * 22050]; waveStream.Write(silence, 0, silence.Count()); var pbuilder = new PromptBuilder(); var pStyle = new PromptStyle { Emphasis = PromptEmphasis.Strong, Rate = PromptRate.Slow, Volume = PromptVolume.ExtraLoud }; pbuilder.StartStyle(pStyle); pbuilder.StartParagraph(); pbuilder.StartVoice(VoiceGender.Male, VoiceAge.Adult, 2); pbuilder.StartSentence(); pbuilder.AppendText(text); pbuilder.EndSentence(); pbuilder.EndVoice(); pbuilder.EndParagraph(); pbuilder.EndStyle(); synthesizer.SetOutputToAudioStream(waveStream, synthFormat); synthesizer.Speak(pbuilder); synthesizer.SetOutputToNull(); //write some silence to the stream to allow camera to end properly waveStream.Write(silence, 0, silence.Count()); waveStream.Seek(0, SeekOrigin.Begin); ITalkTarget talkTarget = null; var ds = new DirectStream(waveStream) { RecordingFormat = new WaveFormat(11025, 16, 1) }; switch (cw.Camobject.settings.audiomodel) { case "Foscam": ds.Interval = 40; ds.PacketSize = 882; // (40ms packet at 22050 bytes per second) talkTarget = new TalkFoscam(cw.Camobject.settings.audioip, cw.Camobject.settings.audioport, cw.Camobject.settings.audiousername, cw.Camobject.settings.audiopassword, ds); break; case "NetworkKinect": ds.Interval = 40; ds.PacketSize = 882; talkTarget = new TalkNetworkKinect(cw.Camobject.settings.audioip, cw.Camobject.settings.audioport, ds); break; case "iSpyServer": ds.Interval = 40; ds.PacketSize = 882; talkTarget = new TalkiSpyServer(cw.Camobject.settings.audioip, cw.Camobject.settings.audioport, ds); break; case "Axis": talkTarget = new TalkAxis(cw.Camobject.settings.audioip, cw.Camobject.settings.audioport, cw.Camobject.settings.audiousername, cw.Camobject.settings.audiopassword, ds); break; default: //local playback talkTarget = new TalkLocal(ds); break; } ds.Start(); talkTarget.Start(); while (ds.IsRunning) { Thread.Sleep(100); } ds.Stop(); if (talkTarget != null) { talkTarget.Stop(); } talkTarget = null; ds = null; waveStream.Close(); } } }