/// <summary> /// See interface docs. /// </summary> /// <param name="speechText"></param> /// <returns></returns> public byte[] SpeechToWavBytes(string speechText) { if(speechText == null) throw new ArgumentNullException("speechText"); byte[] result = new byte[] {}; if(IsSupported) { using(MemoryStream memoryStream = new MemoryStream()) { using(SpeechSynthesizer speechSynthesizer = new SpeechSynthesizer() { Rate = -3 }) { var configuration = Factory.Singleton.Resolve<IConfigurationStorage>().Singleton.Load(); speechSynthesizer.Rate = configuration.AudioSettings.VoiceRate; string defaultVoice = speechSynthesizer.Voice.Name; try { speechSynthesizer.SelectVoice(configuration.AudioSettings.VoiceName); } catch(Exception ex) { Debug.WriteLine(String.Format("Audio.SpeechToWavBytes caught exception {0}", ex.ToString())); speechSynthesizer.SelectVoice(defaultVoice); } speechSynthesizer.SetOutputToWaveStream(memoryStream); speechSynthesizer.Speak(speechText); } memoryStream.Flush(); result = memoryStream.ToArray(); } } return result; }
public Speaker(Language lang = Language.English) { this.lang = lang; AsyncMode = false; // Default to synchron speech UseSSML = false; // Default to non-SSML speech try { // Create synthesizer ss = new SpeechSynthesizer(); ss.SetOutputToDefaultAudioDevice(); // Select language if (!UseSSML) { switch (lang) { case Language.English: ss.SelectVoice("Microsoft Server Speech Text to Speech Voice (en-GB, Hazel)"); break; case Language.Finish: ss.SelectVoice("Microsoft Server Speech Text to Speech Voice (fi-FI, Heidi)"); break; case Language.Norwegian: ss.SelectVoice("Microsoft Server Speech Text to Speech Voice (nb-NO, Hulda)"); break; case Language.Russian: ss.SelectVoice("Microsoft Server Speech Text to Speech Voice (ru-RU, Elena)"); break; case Language.Swedish: ss.SelectVoice("Microsoft Server Speech Text to Speech Voice (sv-SE, Hedvig)"); break; default: ss.SelectVoice("Microsoft Server Speech Text to Speech Voice (en-GB, Hazel)"); break; } } } catch (Exception e) { Console.WriteLine("An error occured: '{0}'", e); } }
/// <summary> /// Initiate the SpeechSynthesizer. /// </summary> /// <returns></returns> public static SpeechSynthesizer initVoice() { synth = new SpeechSynthesizer(); synth.SetOutputToDefaultAudioDevice(); synth.SelectVoice(voiceName); return synth; }
static void Main(string[] args) { string voiceFileName = args[0]; string voiceFileNamemp3 = voiceFileName.Replace(".wav", ".mp3"); string voiceFilePath = args[1]; string toBeVoiced = args[2]; int rate = int.Parse(args[3]); ; string voice = args[4]; voiceFileName = voiceFileName.Replace("~", " "); voiceFilePath = voiceFilePath.Replace("~", " "); toBeVoiced = toBeVoiced.Replace("~", " "); voice = voice.Replace("~", " "); var reader = new SpeechSynthesizer(); reader.Rate = rate; reader.SelectVoice(voice); try { reader.SetOutputToWaveFile(voiceFilePath + voiceFileName, new SpeechAudioFormatInfo(16025, AudioBitsPerSample.Sixteen, AudioChannel.Mono)); reader.Speak(toBeVoiced); reader.Dispose(); WaveToMP3(voiceFilePath + voiceFileName, voiceFilePath + voiceFileNamemp3); } catch (Exception er) { string s1 = er.Message; } }
public void load_listen(VI_Profile profile, VI_Settings settings, ListView statusContainer) { this.profile = profile; this.settings = settings; this.statusContainer = statusContainer; vi_syn = profile.synth; vi_syn.SelectVoice(settings.voice_info); vi_sre = new SpeechRecognitionEngine(settings.recognizer_info); GrammarBuilder phrases_grammar = new GrammarBuilder(); List<string> glossory = new List<string>(); foreach (VI_Phrase trigger in profile.Profile_Triggers) { glossory.Add(trigger.value); } if (glossory.Count == 0) { MessageBox.Show("You need to add at least one Trigger"); return; } phrases_grammar.Append(new Choices(glossory.ToArray())); vi_sre.LoadGrammar(new Grammar(phrases_grammar)); //set event function vi_sre.SpeechRecognized += phraseRecognized; vi_sre.SpeechRecognitionRejected += _recognizer_SpeechRecognitionRejected; vi_sre.SetInputToDefaultAudioDevice(); vi_sre.RecognizeAsync(RecognizeMode.Multiple); }
public static bool ConfigureVoice(SpeechSynthesizer synth, string name, int speed, int volume, bool defaultOutput, int customOutput) { try { synth.SetOutputToDefaultAudioDevice(); if (!defaultOutput) { HackCustomDevice(synth, customOutput); // do stuff. } } catch { } if (string.IsNullOrEmpty(name)) name = initialName; try { if (!string.IsNullOrEmpty(name)) synth.SelectVoice(name); } catch { return false; } synth.Rate = speed; synth.Volume = volume; return true; }
private SpeakService() { _synthesizer = new SpeechSynthesizer(); _synthesizer.SelectVoice(VoiceName); Repeat = true; }
public void VoiceCanBeHeard() { var synth = new SpeechSynthesizer(); synth.SelectVoice("Microsoft Hazel Desktop"); synth.Speak("This is an example of what to do"); synth.Speak(NumberUtilities.Direction.Ascending.ToString()); }
// Méthode permettant de lancer la synchronisation de maniere synchrone // public static void SpeechSynchrone(String text) { SpeechSynthesizer s = new SpeechSynthesizer(); String voix = "ScanSoft Virginie_Dri40_16kHz"; s.SelectVoice(voix); s.Speak(text); }
private void TestButton_Click(object sender, EventArgs e) { SpeechSynthesizer synth = new SpeechSynthesizer(); synth.SelectVoice(_settings.CurrentTTSVoice.VoiceInfo.Name); synth.Rate = _settings.TTS_Rate; synth.SpeakAsync("This is " + _settings.CurrentTTSVoice.VoiceInfo.Description + " Speaking at a rate of " + _settings.TTS_Rate); }
public void Run() { using (SpeechSynthesizer speaker = new SpeechSynthesizer()) { speaker.SelectVoice(VoiceName); speaker.Rate = Rate; speaker.Speak(Text); } }
public VoiceCommandExecutor() { _speech = new SpeechSynthesizer(); //_speech.SelectVoice("Microsoft Zira Desktop"); //_speech.SelectVoice("Microsoft Heera Desktop"); - No _speech.SelectVoice("Microsoft Hazel Desktop"); }
public Speech() { sr = new SpeechRecognitionEngine(); ss = new SpeechSynthesizer(); sr.SetInputToDefaultAudioDevice(); ss.SelectVoice("Microsoft Anna"); ss.SetOutputToDefaultAudioDevice(); }
public void Play() { speaker = new SpeechSynthesizer(); speaker.SelectVoice("ScanSoft Raquel_Full_22kHz"); speaker.SpeakCompleted += new EventHandler<SpeakCompletedEventArgs>(speaker_SpeakCompleted); speaker.SpeakAsync(texto); speaker.Resume(); }
public AIModule() { iTunes = new iTunesLib.iTunesApp(); bot = new SpeechSynthesizer(); bot.SelectVoice("VW Paul"); bot.Rate = 1; bot.Volume = 100; }
public string Speak(string sText, bool bToFile) { SpeechSynthesizer oSpeaker = new SpeechSynthesizer(); oSpeaker.Rate = 1; oSpeaker.SelectVoice("Microsoft Anna"); oSpeaker.Volume = 100; if (bToFile) { oSpeaker.SetOutputToWaveFile("SoundByte.wav"); } oSpeaker.Speak(sText); string msg; // Initialize a new instance of the SpeechSynthesizer. using (SpeechSynthesizer synth = new SpeechSynthesizer()) { // Get information about supported audio formats. string AudioFormats = ""; foreach (SpeechAudioFormatInfo fmt in synth.Voice.SupportedAudioFormats) { AudioFormats += String.Format("{0}\n", fmt.EncodingFormat.ToString()); } // Write information about the voice to the console. msg = "Name: " + synth.Voice.Name + "\n"; msg += "Culture: " + synth.Voice.Culture + "\n"; msg += " Age: " + synth.Voice.Age + "\n"; msg += " Gender: " + synth.Voice.Gender + "\n"; msg += " Description: " + synth.Voice.Description + "\n"; msg += " ID: " + synth.Voice.Id + "\n"; if (synth.Voice.SupportedAudioFormats.Count != 0) { msg += " Audio formats: " + AudioFormats + "\n"; } else { msg += " No supported audio formats found" + "\n"; } // Get additional information about the voice. foreach (string key in synth.Voice.AdditionalInfo.Keys) { msg += String.Format(" {0}: {1}\n",key, synth.Voice.AdditionalInfo[key]); } msg += " Additional Info - "; } oSpeaker.Dispose(); oSpeaker = null; return msg; }
public void Synthes(string textToSpeech) { SpeechSynthesizer ss = new SpeechSynthesizer(); var voiceList = ss.GetInstalledVoices(); ss.SelectVoice(voiceList[2].VoiceInfo.Name); //[0,1] - std english synthesizers, [2] - Nikolay ss.Volume = 100; // от 0 до 100 ss.Rate = 0; //от -10 до 10 ss.SpeakAsync(textToSpeech); }
public Tts() { speaking = false; goodbye = false; tts = new SpeechSynthesizer(); tts.SelectVoice("Microsoft Server Speech Text to Speech Voice (pt-PT, Helia)"); tts.SpeakCompleted += new EventHandler <SpeakCompletedEventArgs>(tts_SpeakCompleted); tts.SetOutputToDefaultAudioDevice(); }
public static void Narrator(string speakandspell) { SpeechSynthesizer synth = new SpeechSynthesizer(); synth.Rate = 0; synth.Speak(speakandspell); synth.SelectVoice("Microsoft David Desktop"); Console.WriteLine(" " + " Narrator: " + speakandspell + " \n\n"); }
public CoachSpeech() { synth = new SpeechSynthesizer(); synth.SetOutputToDefaultAudioDevice(); synth.SpeakStarted += SpeakStarted; synth.SpeakCompleted += SpeakCompleted; synth.SelectVoice("Microsoft Server Speech Text to Speech Voice (en-US, Helen)"); }
public Todaynews() { InitializeComponent(); try { speechRecognitionEngine = createSpeechEngine("en-US"); // hook to events //speechRecognitionEngine.AudioLevelUpdated += new EventHandler<AudioLevelUpdatedEventArgs>(engine_AudioLevelUpdated); speechRecognitionEngine.SpeechRecognized += new EventHandler <SpeechRecognizedEventArgs>(engine_SpeechRecognized); // load dictionary loadGrammarAndCommands(); // use the system's default microphone speechRecognitionEngine.SetInputToDefaultAudioDevice(); // start listening speechRecognitionEngine.RecognizeAsync(RecognizeMode.Multiple); Marvel.SpeakCompleted += new EventHandler <SpeakCompletedEventArgs>(Marvel_SpeakCompleted); if (Marvel.State == SynthesizerState.Speaking) { Marvel.SpeakAsyncCancelAll(); } foreach (InstalledVoice voice in Marvel.GetInstalledVoices()) { cbVoice.Items.Add(voice.VoiceInfo.Name.ToString()); if (cbVoice.Text != "Microsoft Zira Desktop") { cbVoice.SelectedItem = "Microsoft Zira Desktop"; Marvel.SelectVoice("Microsoft Zira Desktop"); } else { cbVoice.SelectedItem = "Microsoft David Desktop"; Marvel.SelectVoice("Microsoft David Desktop"); } } //stopbtn.Enabled = true; } catch (Exception) { Marvel.SpeakAsync(""); } }
// private static WeatherReport _weatherreport = null; public MainWindow() { InitializeComponent(); myspeech = createSpeechEngine("en-US"); foreach (InstalledVoice voice in vision.GetInstalledVoices()) { vision.SelectVoice("IVONA 2 Brian OEM"); } myspeech.AudioLevelUpdated += new EventHandler <AudioLevelUpdatedEventArgs>(myspeech_AudioLevelUpdated); myspeech.SpeechRecognized += new EventHandler <SpeechRecognizedEventArgs>(myspeech_SpeechRecognized); myspeech.SetInputToDefaultAudioDevice(); LoadCommands(); /* * System.Media.SoundPlayer player = new System.Media.SoundPlayer(@"C:\Users\shaje\Documents\Visual Studio 2017\vision_by_Shajedul_Islam\The vision\WELCOME BACK.wav"); * player.Play(); * // vision.SpeakAsync("i am ready for you command sir"); */ completed = true; completed2 = true; scrl1 = true; scrl2 = true; stl = true; tabchange = true; startmenu = true; oppkeys = true; myspeech.RecognizeAsync(RecognizeMode.Multiple); Directory.CreateDirectory(@"C:\Users\AnY\\Documents\\Vision Custom Commands"); // Properties.Settings.Default.WCMD = @"C:\Users\Shajedul\\Documents\\vision Custom Commands\\Weather Commands.txt"; // Properties.Settings.Default.WCN = @"C:\Users\Shajedul\\Documents\\vision Custom Commands\\Weather City.txt"; //weatherchpath = Properties.Settings.Default.WCMD; // weathercitypath = Properties.Settings.Default.WCN; // ArrayWeatherCommands = File.ReadAllLines(myvision.weatherchpath); //This loads all written commands in our Custom Commands text documents into arrays so they can be loaded into our grammars // ArrayWeatherCity = File.ReadAllLines(myvision.weathercitypath); }
// Add TTS Sound to database public static void AddTts(string name, string speech, string voice) { using (var db = new Context()) { // Check if name is duplicate if (db.Sounds.Any(o => o.Name.Equals(name))) { App.ErrorMessage($"Sound named {name} already exists."); return; } // Set path var path = Path.Combine(App.SoundDirectory, name + ".wav"); // Create TTS WAV file var synthesizer = new SpeechSynthesizer { Volume = 100, Rate = -2 }; try { synthesizer.SelectVoice(voice); synthesizer.SetOutputToWaveFile(path); synthesizer.Speak(speech); } catch (Exception e) { App.ErrorMessage("Error creating TTS file from input.", e); // Delete the file if it was created if (File.Exists(path)) { File.Delete(path); } return; } // Dispose the SpeechSynthesizer synthesizer.Dispose(); // Create Sound var newSound = new Sound { Name = name, FilePath = path }; // Add Sound to database db.Sounds.Add(newSound); db.SaveChanges(); } App.Log($"Sound added named '{name}' from TTS speech '{speech}' with voice '{voice}'"); }
private void ProcessRequest(HttpListenerContext ctx) { var req = ctx.Request; var res = ctx.Response; Console.WriteLine($"[{DateTime.Now:yyyy/MM/dd HH:mm:ss.ff}] {req.HttpMethod} {req.Url.AbsolutePath}"); if (req.HttpMethod.Equals("GET")) { res.StatusCode = 200; res.ContentType = "text/plain"; var b = Encoding.UTF8.GetBytes("ok"); res.OutputStream.Write(b, 0, b.Length); res.Close(); return; } if (req.HttpMethod.Equals("POST") && req.Url.AbsolutePath.Equals("/say") && req.Headers["Range"] == null) { var reader = new JsonTextReader(new StreamReader(req.InputStream)); var config = new JsonSerializer().Deserialize <ReqBody>(reader); if (config == null) { res.StatusCode = 400; res.Close(); return; } if (!_voiceNames.ContainsKey(config.Voice)) { config.Voice = _voiceNames.Keys.First(); } var tempStream = new MemoryStream(); _engine.Volume = config.Volume; _engine.Rate = config.Rate; _engine.SelectVoice(_voiceNames[config.Voice]); _engine.SetOutputToWaveStream(tempStream); _engine.Speak(config.Phrase); tempStream.Seek(0, SeekOrigin.Begin); var bytes = tempStream.ToArray(); tempStream.Close(); res.StatusCode = 200; res.ContentType = "audio/wav"; res.ContentLength64 = bytes.Length; res.OutputStream.Write(bytes, 0, bytes.Length); res.OutputStream.Close(); return; } res.StatusCode = 204; res.Close(); }
public static void Speak(string Speech) { using (SpeechSynthesizer synth = new SpeechSynthesizer()) { synth.SelectVoice("Microsoft Zira Desktop"); synth.Speak(Speech); synth.Dispose(); } }
private void B_Test_Click(object sender, EventArgs e) { using (var synth = new SpeechSynthesizer()) { synth.SelectVoice(CBox_VoiceSynthesizer.Text); synth.Volume = 100; synth.Rate = -2; synth.Speak(TB_ExampleText.Text); } }
public override void Say(string name, string text) { if (voices.Count > 1) { var voiceName = voices[Math.Abs(name.GetHashCode() % voices.Count)].VoiceInfo.Name; speechSynthesizer.SelectVoice(voiceName); } text = Sanitize(text); speechSynthesizer.SpeakAsync(text); }
private void OptionToolStripMenuItem_Click(object sender, EventArgs e) { using (var dialog = new OptionDialog()) { dialog.ShowDialog(this); } SpeechSynthesizer.SelectVoice(Settings.Default.TTSVoice); SpeechSynthesizer.Rate = Settings.Default.TTSRate; SpeechSynthesizer.Volume = Settings.Default.TTSVolume; }
public static void HelpMiamoto(object speakandspell) { SpeechSynthesizer synth = new SpeechSynthesizer(); synth.SelectVoiceByHints(VoiceGender.Male, VoiceAge.Senior); //synth.SelectVoice("Microsoft Haruka Desktop"); synth.SelectVoice("Ekho English"); synth.Rate = 0; synth.Speak((string)speakandspell); }
public speechText() { synthesizer.SetOutputToDefaultAudioDevice(); //synthesizer.SelectVoice("Microsoft Irina Desktop"); //synthesizer.SelectVoice("ScanSoft Katerina_Full_22kHz"); synthesizer.SelectVoice("IVONA 2 Tatyana"); //synthesizer.SelectVoiceByHints(VoiceGender.Male, VoiceAge.Adult); // to change VoiceGender and VoiceAge check out those links below synthesizer.Volume = 100; // (0 - 100) synthesizer.Rate = 0; // (-10 - 10) }
public NovelChapterReaderForm(string title, string link, int sourcesite) { InitializeComponent(); _title = title; _link = link; _sourcesite = sourcesite; speech.SelectVoice("Microsoft Zira Desktop"); speech.SpeakProgress += new EventHandler <SpeakProgressEventArgs>(SpeechProgress); speech.SpeakCompleted += new EventHandler <SpeakCompletedEventArgs>(SpeechCompleted); }
public static void InitiateSynth() { IReadOnlyCollection <InstalledVoice> InstalledVoices = speechSynthesizer.GetInstalledVoices(); InstalledVoice InstalledVoice = InstalledVoices.First(); speechSynthesizer.SelectVoice(InstalledVoice.VoiceInfo.Name); Console.Write(InstalledVoice.VoiceInfo.Name); speechSynthesizer.Rate = 1; speechSynthesizer.SpeakAsync(Processor.Greeting()); }
/// <summary> /// Selects a voice for the paragraph / selection / all speech synthesizer. /// </summary> /// <param name="newVoice">The voice to use for this speech synthesizer.</param> /// <seealso cref="https://msdn.microsoft.com/en-us/library/system.speech.synthesis.speechsynthesizer.selectvoice%28v=vs.110%29.aspx"/> public void ParaSelectVoice(String newVoice) { speechSynthPara.SelectVoice(newVoice); // The functionality is written for a separate voice to speak the words as they are entered // (ie., a whispering male voice to speak word, and a normal volume female voice to speak // the paragraph). It didn't seem as useful as I thought it would, so using this line, I // force the word voice to be the same as the paragraph / speak all / speak selection voice. speechSynthWord.SelectVoice(newVoice); }
public AddQuestion() { InitializeComponent(); myCulture = new System.Globalization.CultureInfo("en-US"); recEngine = new SpeechRecognitionEngine(myCulture); //connectionString = ConfigurationManager.ConnectionStrings["Quiz.Properties.Settings.QuizConnectionString"].ConnectionString; connectionString = "Data Source=(LocalDB)\\MSSQLLocalDB;AttachDbFilename=C:\\Projects\\Visual Studio\\Quiz\\Quiz\\Quiz.mdf;Integrated Security=True"; prepareSpeachRecognition(); synthesizer.SelectVoice("Microsoft Zira Desktop"); }
/// <summary> /// Select voice by name which must installed on the system. See installed voice by calling /// GetInstalledVoices(). If the chosen voice is not available, voice will be set as default /// </summary> /// <example>Vocalizer Expressive Tian-tian Harpo 22kHz, IVONA 2 Ivy OEM</example> /// <param name="voiceName"></param> public static void SelectVoiceByName(string voiceName) { try { synthesizer.SelectVoice(voiceName); } catch { } }
/// <summary> /// 读文字已语音播放 /// </summary> /// <param name="Rate">设置语音速度</param> /// <param name="SpeakText">需要读的文字</param> /// <param name="Volume">设置语音音量(默认是100)</param> public void ReadVoice(int Rate, string SpeakText, string VoiceName, int Volume = 100) { ss = new SpeechSynthesizer(); ss.Rate = Rate; //设置语音速度 ss.Volume = Volume; //设置语音音量 ss.SelectVoice(VoiceName); ss.SpeakAsyncCancelAll(); ss.SpeakAsync(SpeakText); ss.SpeakCompleted += ss_SpeakCompleted; }
public static void HelpOtto(object speakandspell) { SpeechSynthesizer synth = new SpeechSynthesizer(); synth.SelectVoiceByHints(VoiceGender.Male, VoiceAge.Teen); synth.SelectVoice("Microsoft Irina Desktop"); synth.Rate = 2; synth.Speak((string)speakandspell); }
public static void Speak(SettingsViewModel vmSettings, string text) { if (!App.voices.Any(o => o.VoiceInfo.Name == vmSettings.SelectedVoice.VOICENAME)) { return; } synth.SpeakAsyncCancelAll(); synth.SelectVoice(vmSettings.SelectedVoice.VOICENAME); synth.SpeakAsync(text); }
public static void SetVoice(string voice) { try { sp.SelectVoice(voice); } catch (Exception ex) { System.Windows.Forms.MessageBox.Show("Erro em Speaker: " + ex.Message); } }
public TimeSpeaker() { synth = new SpeechSynthesizer(); int random = new Random().Next(synth.GetInstalledVoices().Count); VoiceInfo voice = synth.GetInstalledVoices()[random].VoiceInfo; synth.SelectVoice(voice.Name); synth.Volume = 100; synth.Rate = 0; }
public void SpeechRecognized(object sender, SpeechRecognizedEventArgs e) { string response = "I Don't know, but a tin can."; var ss = new SpeechSynthesizer(); ss.SetOutputToDefaultAudioDevice(); ss.SelectVoice("Microsoft Server Speech Text to Speech Voice (en-US, ZiraPro)"); ss.Speak(response); }
void SetSynthOptions(SpeechSynthesizer synth, string unfilteredLineToSay) { // Selects voice based on input. If none is given, default synth voice is chosen string synthVoiceToUse = classConnector.textHandler.GetVoice(unfilteredLineToSay); synth.SelectVoice(synthVoiceToUse); // Select rate (Speak speed) Default is 0 synth.Rate = classConnector.textHandler.GetRate(unfilteredLineToSay); }
public static byte[] speakText(Command command) { SpeechRequest request = (SpeechRequest)Util.Serialization.deserialize(command.data); SpeechSynthesizer synth = new SpeechSynthesizer(); synth.SelectVoice(request.name); synth.Volume = request.volume; synth.SpeakAsync(request.text); return(new byte[] { }); }
private void ActionReproduce(object sender, EventArgs e) { SpeechSynthesizer synthesizer = new SpeechSynthesizer(); synthesizer.SetOutputToDefaultAudioDevice(); synthesizer.Volume = 100; synthesizer.Rate = -2; synthesizer.SelectVoice("Microsoft Maria Desktop"); synthesizer.Speak(txtInput.Text); }
public void SpeechRecognized(object sender, SpeechRecognizedEventArgs e) { string response = "If I need to be browbeaten over the fact that I don't subscribe to spot-if-eye, I'll just disable ad blocker."; var ss = new SpeechSynthesizer(); ss.SetOutputToDefaultAudioDevice(); ss.SelectVoice("Microsoft Server Speech Text to Speech Voice (en-US, ZiraPro)"); ss.Speak(response); }
/* * Guessing mode * Sounds from .mp3 * * */ public MainWindow() { InitializeComponent(); synthesizer = new SpeechSynthesizer { Volume = 100, Rate = 2 }; synthesizer.SelectVoice("Microsoft Zira Desktop"); synthesizer.SpeakCompleted += Synthesizer_SpeakCompleted; var imageDirectories = Directory.GetDirectories(imageDirectory); foreach (var directory in imageDirectories) { var folderName = Path.GetFileName(directory); var files = Directory.GetFiles(directory); var tempTileDataList = new List <TileData>(); foreach (var file in files) { var tileItem = new TileData { DisplayText = Path.GetFileNameWithoutExtension(file), ModeCategory = folderName, BitmapImage = new BitmapImage(new Uri(file)) }; tempTileDataList.Add(tileItem); } if (tempTileDataList.Count < 9) { continue; } tileDataList.AddRange(tempTileDataList); var menuItem = new MenuItem { Header = folderName, IsCheckable = true }; menuItem.Click += SetNewMode_Click; MenuItemExtensions.SetGroupName(menuItem, "CategoryGroup"); MenuItemCategory.Items.Add(menuItem); } if (MenuItemCategory.Items.Count > 0) { ((MenuItem)MenuItemCategory.Items[0]).IsChecked = true; } GridGuess.Visibility = Visibility.Collapsed; }
private void StartSpeak(object info) { if (this.InvokeRequired) { OneArgumentDel del = new OneArgumentDel(this.StartSpeak); this.Invoke(del, info); return; } string speechText = info as string; Program.OutputMessage("StartSpeek : {0}", speechText); if (string.IsNullOrEmpty(speechText)) { Program.OutputMessage("StartSpeak: No text to speak"); return; } //cmdPause.Enabled = false; //cmdPause.Text = "Pause"; //cmdStop.Enabled = false; if (_speachSyn != null) { try { _speachSyn.Dispose(); } catch (Exception e) { Program.OutputMessage("Exception thrown : {0}\r\n{1}", e.Message, e.StackTrace); } } _speechState = SynthesizerState.Ready; _speachSyn = new SpeechSynthesizer(); _speachSyn.StateChanged += _speachSyn_StateChanged; int volume = 100; int speed = 0; int.TryParse(txtVolume.Text, out volume); int.TryParse(cbSpeed.SelectedItem as string, out speed); string voice = cbVoice.SelectedItem as string; if (string.IsNullOrEmpty(voice)) { voice = cbVoice.Items[0].ToString(); } _speachSyn.SelectVoice(voice); _speachSyn.Volume = volume; _speachSyn.Rate = speed; _speachSyn.SetOutputToDefaultAudioDevice(); _speachSyn.SpeakAsync(speechText); }
private SpeechSynthesizer CreateSpeechSynthesizer() { var synthesizer = new SpeechSynthesizer(); if (!string.IsNullOrEmpty(this.Configuration.Voice)) { synthesizer.SelectVoice(this.Configuration.Voice); } if (this.Configuration.UseDefaultAudioPlaybackDevice) { // If specified, don't create an output stream and just set up the // synthesizer to play sound directly to the default audio device. synthesizer.SetOutputToDefaultAudioDevice(); } else { // Create the format info from the configuration input format SpeechAudioFormatInfo formatInfo = new SpeechAudioFormatInfo( (EncodingFormat)this.Configuration.OutputFormat.FormatTag, (int)this.Configuration.OutputFormat.SamplesPerSec, this.Configuration.OutputFormat.BitsPerSample, this.Configuration.OutputFormat.Channels, (int)this.Configuration.OutputFormat.AvgBytesPerSec, this.Configuration.OutputFormat.BlockAlign, (this.Configuration.OutputFormat is WaveFormatEx) ? ((WaveFormatEx)this.Configuration.OutputFormat).ExtraInfo : null); // Configure synthesizer to write to the output stream synthesizer.SetOutputToAudioStream( new IOStream( (buffer, offset, count) => { byte[] audioData = buffer; if (buffer.Length != count) { audioData = new byte[count]; Array.Copy(buffer, offset, audioData, 0, count); } this.Out.Post(new AudioBuffer(audioData, this.Configuration.OutputFormat), this.pipeline.GetCurrentTime()); }), formatInfo); } // Register all handlers synthesizer.BookmarkReached += this.OnBookmarkReached; synthesizer.PhonemeReached += this.OnPhonemeReached; synthesizer.SpeakCompleted += this.OnSpeakCompleted; synthesizer.SpeakProgress += this.OnSpeakProgress; synthesizer.SpeakStarted += this.OnSpeakStarted; synthesizer.StateChanged += this.OnStateChanged; synthesizer.VisemeReached += this.OnVisemeReached; return(synthesizer); }
public SpeechScope(VoiceInfo voiceInfo, params string[] phrases) { _phrases = phrases; _synth = new SpeechSynthesizer(); _synth.SelectVoice(voiceInfo != null ? voiceInfo.Name : _synth.GetInstalledVoices() .Where(v => v.VoiceInfo.Culture.Equals(CultureInfo.CurrentCulture)) .RandomElement().VoiceInfo.Name); }
public HebrewSpeechSynthesizer(string voiceName = null) { _speechSynthesizer = new SpeechSynthesizer(); //_speechSynthesizer.AddLexicon(new Uri("", UriKind.Relative), "application/pls+xml"); //This doesn't work! _speechSynthesizer.SpeakStarted += _speechSynthesizer_SpeakStarted; _speechSynthesizer.SpeakCompleted += _speechSynthesizer_SpeakCompleted; if (voiceName != null) { _speechSynthesizer.SelectVoice(voiceName); } }
public static void SayHello() { const string strUserName = "******"; using (var synth = new SpeechSynthesizer()) { var strVoice = ConfigurationManager.AppSettings["Voice"]; synth.SelectVoice(strVoice); synth.SetOutputToDefaultAudioDevice(); synth.Speak("Welcome to Portal ," + strUserName); } }
public static void Speak(string text, bool male) { Task.Factory.StartNew(() => { try { #if WINDOWS if (TrySpeech) { SpeechSynthesizer speech = new SpeechSynthesizer(); VoiceInfo vi = null; foreach (InstalledVoice v in speech.GetInstalledVoices()) { if (!v.Enabled) { continue; } if (vi == null) { vi = v.VoiceInfo; } else if ((male && v.VoiceInfo.Gender == VoiceGender.Male) || (!male && v.VoiceInfo.Gender == VoiceGender.Female)) { vi = v.VoiceInfo; break; } } if (vi == null) { TrySpeech = false; } else { speech.SelectVoice(vi.Name); speech.Speak(text); } } #endif TrySpeech = false; } catch (Exception ex) { Utilities.CheckException(ex); TrySpeech = false; } if (!TrySpeech) { String addme = male ? " -p 40" : " -p 95"; Process p = Process.Start("espeak", "\"" + text.Replace("\"", " quote ") + "\"" + addme); Console.WriteLine(p.MainModule.FileName); } }); }
static string runThis() { string result = Console.ReadLine(); Thread.Sleep(1000); using (SpeechSynthesizer speech = new SpeechSynthesizer()) { speech.SelectVoice("Microsoft Zira Desktop"); speech.Speak(result); } return "0"; }
public static void VocalSynthesis(string text, string culture, string filename, string voice) { SpeechSynthesizer synth = new SpeechSynthesizer(); synth.SelectVoiceByHints(VoiceGender.Neutral, VoiceAge.NotSet, 1, new CultureInfo(culture)); if (!string.IsNullOrEmpty(filename)) synth.SetOutputToWaveFile(filename); if (!string.IsNullOrEmpty(voice)) synth.SelectVoice(voice); synth.Speak(text); }
public static void Main(string[] args) { SpeechSynthesizer s = new SpeechSynthesizer(); string lang = args[0].ToLower(); if (lang.Equals("ja")) s.SelectVoice("Microsoft Server Speech Text to Speech Voice (ja-JP, Haruka)"); else if (lang.Equals("en")) s.SelectVoice("Microsoft Server Speech Text to Speech Voice (en-US, ZiraPro)"); else if (lang.Equals("ko")) s.SelectVoice("Microsoft Server Speech Text to Speech Voice (ko-KR, Heami)"); else if (lang.Equals("zh")) s.SelectVoice("Microsoft Server Speech Text to Speech Voice (zh-CN, HuiHui)"); else Environment.Exit(1); string textfile = args[1]; string text = System.IO.File.ReadAllText(textfile, System.Text.Encoding.UTF8); string wavefile = args[2]; s.Volume = 100; s.SetOutputToWaveFile(wavefile, new SpeechAudioFormatInfo(48000, AudioBitsPerSample.Sixteen, AudioChannel.Stereo)); s.Speak(text); s.Dispose(); }
public void initTTS() { try { tts = new SpeechSynthesizer(); tts.SelectVoice("Microsoft Server Speech Text to Speech Voice (en-US, Helen)"); tts.SetOutputToDefaultAudioDevice(); tts.Volume = 100; } catch(Exception e) { label1.Text = "init TTS Error : " + e.ToString(); } }
public void SetUp(string voiceName) { if (voiceName == null) throw new ArgumentNullException("voiceName"); synthesizer = new SpeechSynthesizer(); try { synthesizer.SelectVoice(voiceName); } catch (Exception) { logger.Error("{0} not installed or deactivated\n\tFallback to default\n", voiceName); } }
internal static void Main(string[] args) { try { synth = new SpeechSynthesizer(); string textToSpeak = ""; options = new OptionSet { { "v|voice=", "Use the specified {voice} (surround the name in quotes if it contains spaces).", x => { synth.SelectVoice(x); } }, { "r|rate=", "Speak at the specified {rate} (0-20).", x => { SetRate(x); } }, { "f|input-file=", "Speak the contents of {file.txt}.", x => { textToSpeak=File.ReadAllText(x); } }, { "o|output-file=", "Save the audio to {file.wav}.", x => { synth.SetOutputToWaveFile(x); } }, { "l|list-voices", "List available voices.", x => { ListVoices(); return; } }, { "h|help", "Print this help message and exit.", x => { PrintHelpMessage(); return; } } }; List<string> extra = options.Parse(args); if (textToSpeak == null || textToSpeak.Trim().Length == 0) { if (extra.Count > 0) { textToSpeak = string.Join(" ", extra.ToArray()); } else { textToSpeak = Console.In.ReadToEnd(); } } if (textToSpeak == null || textToSpeak.Trim().Length == 0) { Console.WriteLine("Error: could not find text to speak."); Console.WriteLine(); PrintHelpMessage(); return; } synth.Speak(textToSpeak); } catch (Exception x) { Console.WriteLine("Error: {0} - {1}", x.GetType().Name, x.Message); Console.WriteLine(); PrintHelpMessage(); } }
public SpeechSynthesizerInfo() { _config = (IDIFrameworkSection)ConfigurationManager.GetSection("idiFrameworkSection"); SpeechSynthesizer = new SpeechSynthesizer(); try { SpeechSynthesizer.SelectVoice(_config.SpeechSynthesizerElement.Name); SpeechSynthesizer.SetOutputToDefaultAudioDevice(); } catch (Exception ex) { throw new IDISynthetizerVoiceException("There seems to be a problem selecting the voice, " + "are you sure the voice exists?", ex); } }
private void btnSpeak_Click(object sender, EventArgs e) { SpeechSynthesizer synth = new SpeechSynthesizer(); //string str; //int i; if (synthVoice != null) synth.SelectVoice(synthVoice); synth.SpeakAsync(textBox1.Text); runSynth = synth; //synth.SelectVoice("Sam"); //VoiceInfo vinf = new VoiceInfo(); //vinf. }