public override void ViewDidLoad() { base.ViewDidLoad(); ss = new AVSpeechSynthesizer(); ss.DidFinishSpeechUtterance += (object sender, AVSpeechSynthesizerUteranceEventArgs e) => { }; ss.DidStartSpeechUtterance += (object sender, AVSpeechSynthesizerUteranceEventArgs e) => { }; ss.WillSpeakRangeOfSpeechString += (object sender, AVSpeechSynthesizerWillSpeakEventArgs e) => { }; nlp = new NaturalLanguageProcessor.NaturalLanguageProcessor(); nlp.SetConfiguration(File.ReadAllText("RPRSpeechIntents.json"), File.ReadAllText("RPRScreenContexts.json")); recordButton.Enabled = false; this.pickerView.Model = new ScreensModel(this, nlp.ContextConfigurations.Select(cc => cc.Name).ToList()); speechIdleTimer = new System.Timers.Timer(3 * 1000); speechIdleTimer.Elapsed += (sender, e) => { this.stopSpeechRecognition(); speechIdleTimer.Stop(); }; }
async Task SpeakUtterance(AVSpeechUtterance speechUtterance, CancellationToken cancelToken) { AVSpeechSynthesizer speechSynthesizer = new AVSpeechSynthesizer(); try { currentSpeak = new TaskCompletionSource <object>(); speechSynthesizer.DidFinishSpeechUtterance += OnFinishedSpeechUtterance; speechSynthesizer.SpeakUtterance(speechUtterance); using (cancelToken.Register(TryCancel)) { await currentSpeak.Task; } } finally { speechSynthesizer.DidFinishSpeechUtterance -= OnFinishedSpeechUtterance; speechSynthesizer.Dispose(); } void OnFinishedSpeechUtterance(object sender, AVSpeechSynthesizerUteranceEventArgs args) => currentSpeak?.TrySetResult(null); void TryCancel() { speechSynthesizer?.StopSpeaking(AVSpeechBoundary.Word); currentSpeak?.TrySetCanceled(); } }
public AppleTextToSpeech() { this.synth = new AVSpeechSynthesizer(); synth.DidFinishSpeechUtterance += Synth_DidFinishSpeechUtterance; synth.DidCancelSpeechUtterance += Synth_DidCancelSpeechUtterance; }
public override void ViewDidAppear(bool animated) { base.ViewDidAppear (animated); this.View.BackgroundColor = UIColor.LightGray; txtSpeak = new UITextView (new RectangleF (20, 50, this.View.Bounds.Width - 40, 100)); txtSpeak.Text = "Xamarin rocks!"; this.View.AddSubview (txtSpeak); segAccent = new UISegmentedControl (new string[] {"US", "UK", "AUS" }); segAccent.Frame = new RectangleF(20,160,this.View.Bounds.Width - 40, 50); segAccent.SelectedSegment = 0; this.View.AddSubview (segAccent); lblRate = new UILabel (new RectangleF (20, 230, 200, 20)); lblRate.Text = "Rate"; this.View.AddSubview (lblRate); sldRate = new UISlider(new RectangleF(20,250,this.View.Bounds.Width - 40, 50)); sldRate.MinValue = 0; sldRate.MaxValue = 100; sldRate.Value = 75; this.View.AddSubview (sldRate); lblPitch = new UILabel (new RectangleF (20, 305, 200, 20)); lblPitch.Text = "Pitch"; this.View.AddSubview (lblPitch); sldPitch = new UISlider(new RectangleF(20,325,this.View.Bounds.Width - 40, 50)); sldPitch.MinValue = 0; sldPitch.MaxValue = 100; sldPitch.Value = 75; this.View.AddSubview (sldPitch); btnSpeak = new UIButton (UIButtonType.RoundedRect); btnSpeak.Frame = new RectangleF (100, 375, this.View.Bounds.Width - 200, 30); btnSpeak.SetTitle ("Speak", UIControlState.Normal); btnSpeak.TouchDown += (object sender, EventArgs e) => { var speechSynthesizer = new AVSpeechSynthesizer (); var speechUtterance = new AVSpeechUtterance (txtSpeak.Text); string lang = "en-US"; if (segAccent.SelectedSegment == 1) lang = "en-GB"; if (segAccent.SelectedSegment == 2) lang = "en-AU"; speechUtterance.Voice = AVSpeechSynthesisVoice.FromLanguage (lang); speechUtterance.Rate = AVSpeechUtterance.MaximumSpeechRate * (sldRate.Value / 100); speechUtterance.PitchMultiplier = 2.0f * (sldPitch.Value / 100); speechSynthesizer.SpeakUtterance (speechUtterance); }; this.View.AddSubview (btnSpeak); }
public static Task Speak(string text) { var speechSynthesizer = new AVSpeechSynthesizer(); var tcs = new TaskCompletionSource <bool> (); var speechUtterance = new AVSpeechUtterance(text) { Rate = AVSpeechUtterance.MaximumSpeechRate / 4, //Voice = AVSpeechSynthesisVoice.FromLanguage ("en-AU"), //Volume = 0.5f, //PitchMultiplier = 1.5f }; EventHandler <AVSpeechSynthesizerUteranceEventArgs> del = null; del = (object sender, AVSpeechSynthesizerUteranceEventArgs e) => { if (e.Utterance == speechUtterance) { tcs.TrySetResult(true); speechSynthesizer.DidFinishSpeechUtterance -= del; } }; speechSynthesizer.DidFinishSpeechUtterance += del; speechSynthesizer.SpeakUtterance(speechUtterance); return(tcs.Task); }
// only works on devices, not the iOS Simulator? public void Speak() { context.Fetch(); // re-populates with updated values var text = taskDialog.Name + ". " + taskDialog.Notes; if (UIDevice.CurrentDevice.CheckSystemVersion(7, 0)) { var speechSynthesizer = new AVSpeechSynthesizer(); var speechUtterance = new AVSpeechUtterance(text) { Rate = AVSpeechUtterance.MaximumSpeechRate / 4, Voice = AVSpeechSynthesisVoice.FromLanguage("en-AU"), Volume = 0.5f, PitchMultiplier = 1.0f }; speechSynthesizer.SpeakUtterance(speechUtterance); } else { Console.WriteLine("Speech requires iOS 7"); } }
public void Speak(string text) { var speechSynthesizer = new AVSpeechSynthesizer(); var speechUtterance = new AVSpeechUtterance(text); speechSynthesizer.SpeakUtterance(speechUtterance); }
internal static async Task SpeakUtterance(AVSpeechUtterance speechUtterance, CancellationToken cancelToken) { var tcsUtterance = new TaskCompletionSource <bool>(); var speechSynthesizer = new AVSpeechSynthesizer(); try { speechSynthesizer.DidFinishSpeechUtterance += OnFinishedSpeechUtterance; speechSynthesizer.SpeakUtterance(speechUtterance); using (cancelToken.Register(TryCancel)) { await tcsUtterance.Task; } } finally { speechSynthesizer.DidFinishSpeechUtterance -= OnFinishedSpeechUtterance; } void TryCancel() { speechSynthesizer?.StopSpeaking(AVSpeechBoundary.Word); tcsUtterance?.TrySetResult(true); } void OnFinishedSpeechUtterance(object sender, AVSpeechSynthesizerUteranceEventArgs args) { if (speechUtterance == args.Utterance) { tcsUtterance?.TrySetResult(true); } } }
public override void ViewDidLoad() { base.ViewDidLoad(); #region Using Siri's Voice //First we're gonna create the AvFundation in which we use it for reading the speech using the Siri's voice AVSpeechSynthesizer Siri = new AVSpeechSynthesizer(); #endregion ButtonDesc.TouchUpInside += Happiness; #region Hapiness Method Async //This is used for getting the Image which we're gonna use async void Happiness(object sender, EventArgs e) { var Path = await DownloadImageHappiness(); ImgDetection.Image = UIImage.FromFile(Path); var StreamImage = ImgDetection.Image.AsJPEG(.5f).AsStream(); { try { float Percent = await HappinessLevel(StreamImage); TextDescription.Text = GettingMessage(Percent); var Voice = new AVSpeechUtterance(TextDescription.Text); Voice.Voice = AVSpeechSynthesisVoice.FromLanguage("es-MX"); Siri.SpeakUtterance(Voice); } catch (Exception ex) { TextDescription.Text = ex.Message; } } } #endregion }
public void Play() { if (!String.IsNullOrEmpty (Text)) { var speechSynthesizer = new AVSpeechSynthesizer (); var speechUtterance = new AVSpeechUtterance (Text); speechSynthesizer.SpeakUtterance (speechUtterance); } }
/// <summary> /// Dispose of TTS /// </summary> public void Dispose() { if (speechSynthesizer != null) { speechSynthesizer.Dispose(); speechSynthesizer = null; } }
public void Dispose() { synthesizer.DidFinishSpeechUtterance -= OnFinishedSpeechUtterance; utterance.Dispose(); synthesizer.Dispose(); utterance = null; synthesizer = null; }
public static void Speak(string toSpeak) { var speechSynthesizer = new AVSpeechSynthesizer (); var speechUtterance = new AVSpeechUtterance (toSpeak) { Rate = AVSpeechUtterance.MaximumSpeechRate / 3 }; speechSynthesizer.SpeakUtterance (speechUtterance); }
public void Speak(string text) { var speechSynthesizer = new AVSpeechSynthesizer (); speechSynthesizer.SpeakUtterance (new AVSpeechUtterance (text) { Rate = AVSpeechUtterance.DefaultSpeechRate, Voice = AVSpeechSynthesisVoice.FromLanguage ("en-US"), Volume = .5f, PitchMultiplier = 1.0f }); }
public static void Speak(string toSpeak) { var speechSynthesizer = new AVSpeechSynthesizer(); var speechUtterance = new AVSpeechUtterance(toSpeak) { Rate = AVSpeechUtterance.MaximumSpeechRate / 3 }; speechSynthesizer.SpeakUtterance(speechUtterance); }
public void Speak(string text){ var speechSynthesizer = new AVSpeechSynthesizer (); var speechUtterance = new AVSpeechUtterance (text) { Rate = AVSpeechUtterance.MaximumSpeechRate / 4, Voice = AVSpeechSynthesisVoice.FromLanguage ("en-US"), Volume = 0.5f, PitchMultiplier = 1.0f }; speechSynthesizer.SpeakUtterance (speechUtterance); }
public override void Dispose() { if (synth != null) { synth.DidFinishSpeechUtterance -= Synth_DidFinishSpeechUtterance; } synth?.Dispose(); synth = null; utterances.ClearAsync(a => a.Value?.TrySetCanceled()); }
public void SeseDonustur(string metin) { var metinSesi = new AVSpeechSynthesizer(); metinSesi.SpeakUtterance(new AVSpeechUtterance(metin) { Rate = AVSpeechUtterance.DefaultSpeechRate, Voice = AVSpeechSynthesisVoice.FromLanguage("tr-TR"), Volume = .5f, PitchMultiplier = 1.0f }); }
public void Falar(string Fala) { var speechSynthesizer = new AVSpeechSynthesizer (); var speechUtterance = new AVSpeechUtterance (Fala) { Rate = AVSpeechUtterance.MaximumSpeechRate/4, Volume = 0.5f, PitchMultiplier = 1.0f }; speechSynthesizer.SpeakUtterance (speechUtterance); }
public void Speak(string text) { var speechSynthesizer = new AVSpeechSynthesizer(); speechSynthesizer.SpeakUtterance(new AVSpeechUtterance(text) { Rate = AVSpeechUtterance.DefaultSpeechRate, Voice = AVSpeechSynthesisVoice.FromLanguage("en-US"), Volume = .5f, PitchMultiplier = 1.0f }); }
public override void ViewDidLoad() { base.ViewDidLoad(); // We must set up an AVAudioSession and tell iOS we want to playback audio. Only then can speech synthesis be used in the background. var session = AVAudioSession.SharedInstance(); session.SetCategory(AVAudioSessionCategory.Playback); session.SetActive(true); this.speechSynthesizer = new AVSpeechSynthesizer(); UIApplication.Notifications.ObserveDidEnterBackground((sender, args) => { this.updateUi = false; this.Say("Entering background mode. I will keep you updated!"); }); UIApplication.Notifications.ObserveWillEnterForeground((sender, args) => { this.updateUi = true; this.Say("Entering foreground. Updating UI!"); }); // Create an instance of the location manager. this.locMan = new CLLocationManager { ActivityType = CLActivityType.Other, DesiredAccuracy = 1, DistanceFilter = 1 }; // Apple restricted location services usage in iOS8. We must explicitly prompt the user for permission. // See also: http://motzcod.es/post/97662738237/scanning-for-ibeacons-in-ios-8 if (UIDevice.CurrentDevice.CheckSystemVersion(8, 0)) { this.locMan.AuthorizationChanged += (object sender, CLAuthorizationChangedEventArgs e) => { Console.WriteLine("MapKit authorization changed to: " + e.Status); if (e.Status == CLAuthorizationStatus.AuthorizedAlways || e.Status == CLAuthorizationStatus.AuthorizedWhenInUse) { this.InitLocationServices(); } }; // This app will start locations services while in foreground and keeps using them when backgrounded. // Therefore we do not need RequestAlwaysAuthorization(). this.locMan.RequestWhenInUseAuthorization(); } else { this.InitLocationServices(); } }
public void speak(string text) { var speechSinthesizer = new AVSpeechSynthesizer(); var speechUterance = new AVSpeechUtterance(text) { Rate = AVSpeechUtterance.MaximumSpeechRate / 4, Voice = AVSpeechSynthesisVoice.FromLanguage("es-ES"), Volume = 0.5F, PitchMultiplier = 1.0F }; speechSinthesizer.SpeakUtterance(speechUterance); }
public void Speak(string text) { var speechSynthesizer = new AVSpeechSynthesizer(); var speechUtterance = new AVSpeechUtterance(text) { Rate = AVSpeechUtterance.MaximumSpeechRate / 3, Voice = AVSpeechSynthesisVoice.FromLanguage("en-US"), Volume = volume, PitchMultiplier = pitch }; speechSynthesizer.SpeakUtterance(speechUtterance); }
/// <summary> /// 文字列を読み上げます /// </summary> /// <param name="text">読み上げる文字列</param> public void Speak(string text) { var synthesizer = new AVSpeechSynthesizer(); synthesizer.SpeakUtterance( new AVSpeechUtterance(text) { Rate = AVSpeechUtterance.MaximumSpeechRate / 4, Voice = AVSpeechSynthesisVoice.FromLanguage("ja-JP"), Volume = 0.5f, PitchMultiplier = 1.0f }); }
public void LowSpeak(string text) { var speechSynthesizer = new AVSpeechSynthesizer(); var speechUtterance = new AVSpeechUtterance(text) { Rate = AVSpeechUtterance.MaximumSpeechRate / 2, Voice = SelectVoice(), Volume = 0.3f, PitchMultiplier = 1.0f }; speechSynthesizer.SpeakUtterance(speechUtterance); }
public void Speak(string speakText) { var speechSynthesizer = new AVSpeechSynthesizer(); var speechUtterance = new AVSpeechUtterance(speakText) { Rate = AVSpeechUtterance.MaximumSpeechRate / 2, Voice = AVSpeechSynthesisVoice.FromLanguage("en-US"), Volume = 0.5f, PitchMultiplier = 1.0f }; speechSynthesizer.SpeakUtterance(speechUtterance); }
public void Speak(string text, string language) { var speechSynthesizer = new AVSpeechSynthesizer(); var speechUtterance = new AVSpeechUtterance(text) { Rate = AVSpeechUtterance.MaximumSpeechRate / 2, Voice = AVSpeechSynthesisVoice.FromLanguage(language), Volume = 1f, PitchMultiplier = 1.0f }; speechSynthesizer.SpeakUtterance(speechUtterance); }
private void Configure() { speechSynthesizer = new AVSpeechSynthesizer(); speechSynthesizer.DidFinishSpeechUtterance += SpeechSynthesizer_DidFinishSpeechUtterance; speechSynthesizer.DidCancelSpeechUtterance += SpeechSynthesizer_DidFinishSpeechUtterance; NSNotificationCenter.DefaultCenter.AddObserver(UIApplication.WillResignActiveNotification, (obj) => { StopPlaying(); }); }
public override void ViewDidLoad () { base.ViewDidLoad (); // We must set up an AVAudioSession and tell iOS we want to playback audio. Only then can speech synthesis be used in the background. var session = AVAudioSession.SharedInstance (); session.SetCategory (AVAudioSessionCategory.Playback); session.SetActive (true); this.speechSynthesizer = new AVSpeechSynthesizer (); UIApplication.Notifications.ObserveDidEnterBackground ((sender, args) => { this.updateUi = false; this.Say ("Entering background mode. I will keep you updated!"); }); UIApplication.Notifications.ObserveWillEnterForeground ((sender, args) => { this.updateUi = true; this.Say ("Entering foreground. Updating UI!"); }); // Create an instance of the location manager. this.locMan = new CLLocationManager { ActivityType = CLActivityType.Other, DesiredAccuracy = 1, DistanceFilter = 1 }; // Apple restricted location services usage in iOS8. We must explicitly prompt the user for permission. // See also: http://motzcod.es/post/97662738237/scanning-for-ibeacons-in-ios-8 if (UIDevice.CurrentDevice.CheckSystemVersion (8, 0)) { this.locMan.AuthorizationChanged += (object sender, CLAuthorizationChangedEventArgs e) => { Console.WriteLine("MapKit authorization changed to: " + e.Status); if(e.Status == CLAuthorizationStatus.AuthorizedAlways || e.Status == CLAuthorizationStatus.AuthorizedWhenInUse) { this.InitLocationServices (); } }; // This app will start locations services while in foreground and keeps using them when backgrounded. // Therefore we do not need RequestAlwaysAuthorization(). this.locMan.RequestWhenInUseAuthorization (); } else { this.InitLocationServices (); } }
private void AISpeech(string textSpeak, float speechRate) { AVSpeechSynthesizer speech = new AVSpeechSynthesizer(); AVSpeechUtterance speechUtterance = new AVSpeechUtterance(textSpeak) { Rate = AVSpeechUtterance.MaximumSpeechRate / speechRate, Voice = AVSpeechSynthesisVoice.FromLanguage("en-US"), Volume = 1.0f, PitchMultiplier = 1.0f }; speech.SpeakUtterance(speechUtterance); }
//AI speech public void AIEnglish(string textToSpeech, string accent, float speechRate, float volume, float pitchMultiplier) { AVSpeechSynthesizer speech = new AVSpeechSynthesizer(); AVSpeechUtterance speechUtterance = new AVSpeechUtterance(textToSpeech) { Rate = AVSpeechUtterance.MaximumSpeechRate / speechRate, Voice = AVSpeechSynthesisVoice.FromLanguage(accent), Volume = volume, PitchMultiplier = pitchMultiplier }; speech.SpeakUtterance(speechUtterance); }
public override void Speak(string text) { var synthesizer = new AVSpeechSynthesizer(); var utterance = new AVSpeechUtterance(text) { Rate = AVSpeechUtterance.DefaultSpeechRate, Voice = AVSpeechSynthesisVoice.FromLanguage("en-US"), Volume = 0.5f, PitchMultiplier = 1.0f }; synthesizer.SpeakUtterance(utterance); }
private void SpeechText(string testedSpeech) { AVSpeechSynthesizer speech = new AVSpeechSynthesizer(); AVSpeechUtterance speechUtterance = new AVSpeechUtterance(testedSpeech) { Rate = AVSpeechUtterance.MaximumSpeechRate / 2.0f, Voice = AVSpeechSynthesisVoice.FromLanguage("en-US"), Volume = 1.0f, PitchMultiplier = 1.0f }; speech.SpeakUtterance(speechUtterance); }
public void Speak(string text, SpeechLanguages lang) { var speechSynthesizer = new AVSpeechSynthesizer(); var speechUtterance = new AVSpeechUtterance(text) { Rate = AVSpeechUtterance.MaximumSpeechRate / 4, //Voice = AVSpeechSynthesisVoice.FromLanguage(lang.ToLocale().LocaleIdentifier), Volume = 0.5f, PitchMultiplier = 1.0f }; speechSynthesizer.SpeakUtterance(speechUtterance); }
/// <summary> /// Speak example from: /// http://blog.xamarin.com/make-your-ios-7-app-speak/ /// </summary> public void Speak (string text) { if (!string.IsNullOrWhiteSpace (text)) { var speechSynthesizer = new AVSpeechSynthesizer (); var speechUtterance = new AVSpeechUtterance (text) { Rate = AVSpeechUtterance.MaximumSpeechRate/4, Voice = AVSpeechSynthesisVoice.FromLanguage ("en-US"), Volume = volume, PitchMultiplier = pitch }; speechSynthesizer.SpeakUtterance (speechUtterance); } }
//french voice that will be used to read the subtitle (DetailTextLabel) property of the french phrases, stored currently in the table view's memory cache public void frenchPhraseBookAI(string textToSpeak) { AVSpeechSynthesizer frenchSpeech = new AVSpeechSynthesizer(); AVSpeechUtterance frenchVoice = new AVSpeechUtterance(textToSpeak) { Rate = AVSpeechUtterance.MaximumSpeechRate / 2.2f, Voice = AVSpeechSynthesisVoice.FromLanguage("fr"), Volume = 1.0f, PitchMultiplier = 1.0f }; frenchSpeech.SpeakUtterance(frenchVoice); }
//english voice that notifies the end user of background executions public void englishAIBackground(string textToSpeak) { AVSpeechSynthesizer englishSpeech = new AVSpeechSynthesizer(); AVSpeechUtterance englishVoice = new AVSpeechUtterance(textToSpeak) { Rate = AVSpeechUtterance.MaximumSpeechRate / 2.2f, Voice = AVSpeechSynthesisVoice.FromLanguage("en-US"), Volume = 1.0f, PitchMultiplier = 1.0f }; englishSpeech.SpeakUtterance(englishVoice); }
partial void btnSpeak_TouchUpInside(UIButton sender) { var speechSynthesizer = new AVSpeechSynthesizer (); var button = (UIButton)sender; var speechUtterance = new AVSpeechUtterance (button.TitleLabel.Text) { Rate = AVSpeechUtterance.MaximumSpeechRate/4, Voice = AVSpeechSynthesisVoice.FromLanguage ("en-US"), Volume = 0.5f, PitchMultiplier = 1.0f }; speechSynthesizer.SpeakUtterance (speechUtterance); }
public void TextToTalk(string text) { var speechSynthesizer = new AVSpeechSynthesizer(); var speechUtterance = new AVSpeechUtterance(text) { Rate = AVSpeechUtterance.MaximumSpeechRate / 4, Voice = AVSpeechSynthesisVoice.FromLanguage("pt-BR"), Volume = 0.5f, PitchMultiplier = 1.0f }; speechSynthesizer.SpeakUtterance(speechUtterance); }
/// <summary> /// Speak example from: /// http://blog.xamarin.com/make-your-ios-7-app-speak/ /// </summary> void Speak (string text) { var speechSynthesizer = new AVSpeechSynthesizer (); // var voices = AVSpeechSynthesisVoice.GetSpeechVoices (); var speechUtterance = new AVSpeechUtterance (text) { Rate = AVSpeechUtterance.MaximumSpeechRate/4, Voice = AVSpeechSynthesisVoice.FromLanguage ("en-AU"), Volume = volume, PitchMultiplier = pitch }; speechSynthesizer.SpeakUtterance (speechUtterance); }
/// <summary> /// The speak. /// </summary> /// <param name="text">The text.</param> /// <param name="language">The language.</param> public void Speak (string text, string language = DEFAULT_LOCALE) { var speechSynthesizer = new AVSpeechSynthesizer(); var voice = AVSpeechSynthesisVoice.FromLanguage (language) ?? AVSpeechSynthesisVoice.FromLanguage (DEFAULT_LOCALE); var speechUtterance = new AVSpeechUtterance(text) { Rate = AVSpeechUtterance.MaximumSpeechRate / 4, Voice = AVSpeechSynthesisVoice.FromLanguage (language), Volume = 0.5f, PitchMultiplier = 1.0f }; speechSynthesizer.SpeakUtterance(speechUtterance); }
public IObservable<Unit> SpeakAsync(string speechString, CancellationToken cancellationToken = default(CancellationToken)) { speechString.AssertNotNull(nameof(speechString)); var utterance = new AVSpeechUtterance(speechString) { Voice = voice, Rate = 0.55f }; var synthesizer = new AVSpeechSynthesizer(); var finishedUtterance = Observable .FromEventPattern<AVSpeechSynthesizerUteranceEventArgs>(x => synthesizer.DidFinishSpeechUtterance += x, x => synthesizer.DidFinishSpeechUtterance -= x) .Select(_ => Unit.Default) .Publish(); finishedUtterance .Subscribe( _ => { utterance.Dispose(); synthesizer.Dispose(); }); if (cancellationToken.CanBeCanceled) { cancellationToken.Register(() => synthesizer.StopSpeaking(AVSpeechBoundary.Immediate)); Observable .FromEventPattern<AVSpeechSynthesizerUteranceEventArgs>(x => synthesizer.DidCancelSpeechUtterance += x, x => synthesizer.DidCancelSpeechUtterance -= x) .Select(_ => Unit.Default) .Subscribe( _ => { utterance.Dispose(); synthesizer.Dispose(); }); } synthesizer.SpeakUtterance(utterance); finishedUtterance.Connect(); return finishedUtterance .FirstAsync() .RunAsync(cancellationToken); }
public IObservable<Unit> Speak(string speechString) { Ensure.ArgumentNotNull(speechString, nameof(speechString)); var utterance = new AVSpeechUtterance(speechString) { Voice = voice, Rate = 0.55f }; var synthesizer = new AVSpeechSynthesizer(); var finishedUtterance = Observable .FromEventPattern<AVSpeechSynthesizerUteranceEventArgs>(x => synthesizer.DidFinishSpeechUtterance += x, x => synthesizer.DidFinishSpeechUtterance -= x) .Select(_ => Unit.Default) .Publish(); finishedUtterance .Subscribe( _ => { utterance.Dispose(); synthesizer.Dispose(); }); //if (cancellationToken.CanBeCanceled) //{ // cancellationToken.Register(() => synthesizer.StopSpeaking(AVSpeechBoundary.Immediate)); // Observable // .FromEventPattern<AVSpeechSynthesizerUteranceEventArgs>(x => synthesizer.DidCancelSpeechUtterance += x, x => synthesizer.DidCancelSpeechUtterance -= x) // .Select(_ => Unit.Default) // .Subscribe( // _ => // { // utterance.Dispose(); // synthesizer.Dispose(); // }); //} synthesizer.SpeakUtterance(utterance); finishedUtterance.Connect(); return finishedUtterance .FirstAsync(); }
// only works on devices, not the iOS Simulator? public void Speak() { context.Fetch (); // re-populates with updated values var text = taskDialog.Name + ". " + taskDialog.Notes; if (UIDevice.CurrentDevice.CheckSystemVersion (7, 0)) { var speechSynthesizer = new AVSpeechSynthesizer (); var speechUtterance = new AVSpeechUtterance (text) { Rate = AVSpeechUtterance.MaximumSpeechRate/4, Voice = AVSpeechSynthesisVoice.FromLanguage ("en-AU"), Volume = 0.5f, PitchMultiplier = 1.0f }; speechSynthesizer.SpeakUtterance (speechUtterance); } else { Console.WriteLine ("Speak requires iOS 7"); } }
/// <summary> /// Dispose of TTS /// </summary> public void Dispose() { if(speechSynthesizer != null) { speechSynthesizer.Dispose(); speechSynthesizer = null; } }
public TextToSpeech() { _speaker = new AVSpeechSynthesizer(); }
/// <summary> /// Speak the specified text. /// </summary> /// <param name="text">Text.</param> public static void Speak(string text) { if (_syn == null) { _syn = new AVSpeechSynthesizer(); _syn.Delegate = SpeechSynthesizerDelegate.instance; } var utterance = new AVSpeechUtterance(text); if (_settings != null) { utterance.pitchMultiplier = _settings.pitchMultiplier; utterance.postUtteranceDelay = _settings.postUtteranceDelay; utterance.preUtteranceDelay = _settings.preUtteranceDelay; utterance.rate = _settings.rate; utterance.voice = _settings.voice; utterance.volume = _settings.volume; } _syn.SpeakUtterance(utterance); }
public void SpeakText(string text) { var speechSynthesizer = new AVSpeechSynthesizer (); var speechUtterance = new AVSpeechUtterance(text); speechSynthesizer.SpeakUtterance (speechUtterance); }
/// <summary> /// Initialize TTS /// </summary> public void Init() { speechSynthesizer = new AVSpeechSynthesizer(); }
public static Task Speak (string text) { var speechSynthesizer = new AVSpeechSynthesizer (); var tcs = new TaskCompletionSource<bool> (); var speechUtterance = new AVSpeechUtterance (text) { Rate = AVSpeechUtterance.MaximumSpeechRate/4, //Voice = AVSpeechSynthesisVoice.FromLanguage ("en-AU"), //Volume = 0.5f, //PitchMultiplier = 1.5f }; EventHandler<AVSpeechSynthesizerUteranceEventArgs> del = null; del = (object sender, AVSpeechSynthesizerUteranceEventArgs e) => { if(e.Utterance == speechUtterance){ tcs.TrySetResult(true); speechSynthesizer.DidFinishSpeechUtterance -= del; } }; speechSynthesizer.DidFinishSpeechUtterance += del; speechSynthesizer.SpeakUtterance (speechUtterance); return tcs.Task; }