/// <summary> /// Speak back text /// </summary> /// <param name="text">Text to speak</param> /// <param name="crossLocale">Locale of voice</param> /// <param name="pitch">Pitch of voice</param> /// <param name="speakRate">Speak Rate of voice (All) (0.0 - 2.0f)</param> /// <param name="volume">Volume of voice (0.0-1.0)</param> /// <param name="cancelToken">Canelation token to stop speak</param> /// <exception cref="ArgumentNullException">Thrown if text is null</exception> /// <exception cref="ArgumentException">Thrown if text length is greater than maximum allowed</exception> public async Task Speak(string text, CrossLocale?crossLocale = null, float?pitch = null, float?speakRate = null, float?volume = null, CancellationToken cancelToken = default(CancellationToken)) { if (text == null) { throw new ArgumentNullException(nameof(text), "Text can not be null"); } var tcs = new TaskCompletionSource <object>(); var handler = new EventHandler((sender, args) => tcs.TrySetResult(null)); try { await semaphore.WaitAsync(cancelToken); void OnCancel() { speechSynthesizer.StopSpeaking(); tcs.TrySetCanceled(); } using (cancelToken.Register(OnCancel)) { if (volume.HasValue) { speechSynthesizer.Volume = NormalizeVolume(volume); } if (speakRate.HasValue) { speechSynthesizer.Rate = speakRate.Value; } if (crossLocale.HasValue) { speechSynthesizer.Voice = crossLocale.Value.Language; } sdelegate.FinishedSpeaking += handler; speechSynthesizer.StartSpeakingString(text); await tcs.Task; } } finally { sdelegate.FinishedSpeaking -= handler; if (semaphore.CurrentCount == 0) { semaphore.Release(); } } }
/// <summary> /// Speak back text /// </summary> /// <param name="text">Text to speak</param> /// <param name="crossLocale">Locale of voice</param> /// <param name="pitch">Pitch of voice</param> /// <param name="speakRate">Speak Rate of voice (All) (0.0 - 2.0f)</param> /// <param name="volume">Volume of voice (iOS/WP) (0.0-1.0)</param> /// <param name="cancelToken">Canelation token to stop speak</param> /// <exception cref="ArgumentNullException">Thrown if text is null</exception> /// <exception cref="ArgumentException">Thrown if text length is greater than maximum allowed</exception> public async Task Speak(string text, CrossLocale?crossLocale, float?pitch, float?speakRate, float?volume, CancellationToken?cancelToken) { if (text == null) { throw new ArgumentNullException(nameof(text), "Text can not be null"); } var tcs = new TaskCompletionSource <object>(); var handler = new EventHandler((sender, args) => tcs.TrySetResult(null)); try { var ct = cancelToken ?? CancellationToken.None; await semaphore.WaitAsync(ct); using (ct.Register(() => { speechSynthesizer.StopSpeaking(); tcs.TrySetCanceled(); })) { speechSynthesizer.Volume = NormalizeVolume(volume); if (speakRate != null) { speechSynthesizer.Rate = speakRate.Value; } if (crossLocale != null) { speechSynthesizer.Voice = crossLocale.Value.Language; } sdelegate.FinishedSpeaking += handler; speechSynthesizer.StartSpeakingString(text); await tcs.Task; } } finally { semaphore.Release(); sdelegate.FinishedSpeaking -= handler; } }
public static void StopSpeaking() { speech.StopSpeaking(); }
public override void FinishedLaunching(NSObject notification) { // Configure logger string path = Path.Combine(Path.GetDirectoryName(System.Reflection.Assembly.GetEntryAssembly().Location), "log4net.config"); XmlConfigurator.ConfigureAndWatch(new FileInfo(path)); logger.Info("Ventriliquest 1.0 Starting up..."); // Get list of available audio out devices xamspeech ham = new xamspeech(); OutputDevices = ham.GetDevices(); // Setup UI statusMenu = new NSMenu(); statusItem = NSStatusBar.SystemStatusBar.CreateStatusItem(30); var outputItem = new NSMenuItem("Output Device", (a, b) => { }); var deviceList = new NSMenu(); outputItem.Submenu = deviceList; OutputDeviceUID = "Built-in Output"; foreach (var entry in OutputDevices) { var test = new NSMenuItem(entry.Key.ToString(), (a, b) => { foreach (NSMenuItem item in deviceList.ItemArray()) { item.State = NSCellStateValue.Off; } NSMenuItem theItem = (NSMenuItem)a; theItem.State = NSCellStateValue.On; config.OutputDevice = theItem.Title; foreach (var e in OutputDevices) { if (e.Key.ToString().Equals(theItem.Title)) { OutputDeviceUID = e.Value.ToString(); } } }); if (entry.Key.ToString().Equals(config.OutputDevice)) { test.State = NSCellStateValue.On; OutputDeviceUID = entry.Value.ToString(); } deviceList.AddItem(test); } var daItem = new NSMenuItem("Local Connections Only", (a, b) => { NSMenuItem theItem = (NSMenuItem)a; if (theItem.State == NSCellStateValue.On) { config.LocalOnly = false; theItem.State = NSCellStateValue.Off; } else { config.LocalOnly = true; theItem.State = NSCellStateValue.On; } }); if (config.LocalOnly) { daItem.State = NSCellStateValue.On; } var quitItem = new NSMenuItem("Quit", (a, b) => Shutdown()); var voiceconfigItem = new NSMenuItem("Voice Configuration", (a, b) => Process.Start("http://127.0.0.1:7888/config")); statusMenu.AddItem(new NSMenuItem("Version: 1.1")); statusMenu.AddItem(outputItem); statusMenu.AddItem(daItem); statusMenu.AddItem(voiceconfigItem); statusMenu.AddItem(quitItem); statusItem.Menu = statusMenu; statusItem.Image = NSImage.ImageNamed("tts-1.png"); statusItem.AlternateImage = NSImage.ImageNamed("tts-2.png"); statusItem.HighlightMode = true; speechdelegate.DidComplete += delegate { synthesis.Set(); }; sounddelegate.DidComplete += delegate { playback.Set(); IsSounding = false; IsSpeaking = false; sound.Dispose(); }; speech.Delegate = speechdelegate; queuetimer = new System.Timers.Timer(250); queuetimer.Elapsed += (object sender, ElapsedEventArgs e) => { TTSRequest r; if (Queue.TryDequeue(out r)) { if (r.Interrupt) { // stop current TTS NSApplication.SharedApplication.InvokeOnMainThread(delegate { if (IsSpeaking) { speech.StopSpeaking(); } if (IsSounding) { sound.Stop(); } }); // clear queue SpeechQueue.Clear(); } if (!r.Reset) { SpeechQueue.Enqueue(r); } RequestCount++; } var eventdata = new Hashtable(); eventdata.Add("ProcessedRequests", RequestCount); eventdata.Add("QueuedRequests", SpeechQueue.Count); eventdata.Add("IsSpeaking", IsSpeaking); InstrumentationEvent ev = new InstrumentationEvent(); ev.EventName = "status"; ev.Data = eventdata; NotifyGui(ev.EventMessage()); }; // when this timer fires, it will pull off of the speech queue and speak it // the 1000ms delay also adds a little pause between tts requests. speechtimer = new System.Timers.Timer(250); speechtimer.Elapsed += (object sender, ElapsedEventArgs e) => { if (IsSpeaking.Equals(false)) { if (SpeechQueue.Count > 0) { TTSRequest r = SpeechQueue.Dequeue(); IsSpeaking = true; speechtimer.Enabled = false; var oink = Path.Combine(audiopath, "temp.aiff"); NSApplication.SharedApplication.InvokeOnMainThread(delegate { ConfigureSpeechEngine(r); speech.StartSpeakingStringtoURL(r.Text, new NSUrl(oink, false)); }); synthesis.WaitOne(); NSApplication.SharedApplication.InvokeOnMainThread(delegate { IsSounding = true; sound = new NSSound(Path.Combine(audiopath, "temp.aiff"), false); sound.Delegate = sounddelegate; //if(OutputDeviceUID != "Default") { sound.PlaybackDeviceID = OutputDeviceUID; //} sound.Play(); }); playback.WaitOne(); IsSounding = false; speechtimer.Enabled = true; } } }; queuetimer.Enabled = true; queuetimer.Start(); speechtimer.Enabled = true; speechtimer.Start(); InitHTTPServer(); }
partial void btnStopHandler(Foundation.NSObject sender) { mSpeechSynth.StopSpeaking(NSSpeechBoundary.hWord); }