public static async Task SSML2AudioFile(string fileName) { // The object for controlling the speech synthesis engine (voice). var synth = new Windows.Media.SpeechSynthesis.SpeechSynthesizer(); var path = new FileInfo(Environment.GetCommandLineArgs()[0]).DirectoryName; var SsmlText = File.ReadAllText(path + "/" + $"{fileName}.xml"); // Generate the audio stream from the SSML text. using (SpeechSynthesisStream stream = await synth.SynthesizeSsmlToStreamAsync(SsmlText)) { // Get the app's local folder. var user = (await Windows.System.User.FindAllAsync())[0]; StorageFolder localFolder = await StorageFolder.GetFolderFromPathForUserAsync(user, path); // Send the stream to the audio file. using (var reader = new DataReader(stream)) { await reader.LoadAsync((uint)stream.Size); var sampleFile = await localFolder.CreateFileAsync($"{fileName}.wav", CreationCollisionOption.ReplaceExisting); await FileIO.WriteBufferAsync(sampleFile, reader.ReadBuffer((uint)stream.Size)); } } }
private async void initSpeeach(object sender, Windows.UI.Xaml.RoutedEventArgs e) { await recoWithUI.CompileConstraintsAsync(); recoWithUI.UIOptions.AudiblePrompt = "What do you want to calculate?"; recoWithUI.UIOptions.ExampleText = "salary equals 12 times 15"; var result = await recoWithUI.RecognizeWithUIAsync(); if (result.Text != "") { App.Model.OpenNotebook.Lines.Add(new Line { LineNumber = App.Model.OpenNotebook.Lines.Count + 1, Expression = result.Text }); double ans = App.Model.OpenNotebook.Solver.EvaluateNested(result.Text); var synth = new Windows.Media.SpeechSynthesis.SpeechSynthesizer(); // Generate the audio stream from plain text. SpeechSynthesisStream stream = await synth.SynthesizeTextToStreamAsync("The answer is " + ans.ToString()); // Send the stream to the media object. mediaElement.SetSource(stream, stream.ContentType); mediaElement.Play(); } }
//Read the text using speech synthesiser private async void playBook(Book book) { //Load book from Assets/Books folder into a string var folder = Windows.ApplicationModel.Package.Current.InstalledLocation; StorageFolder assets = await folder.GetFolderAsync(@"Assets\Books"); var file = await assets.GetFileAsync(book.name + book.fileType); string text = await Windows.Storage.FileIO.ReadTextAsync(file); //The object for controlling the speech synthesis engine (voice) var synth = new Windows.Media.SpeechSynthesis.SpeechSynthesizer(); //Generate the audio stream from the passed in book object SpeechSynthesisStream speechStream = await synth.SynthesizeTextToStreamAsync(text); //Send the stream to the media element mediaElement.SetSource(speechStream, speechStream.ContentType); configureCommandBar(); BookText.Visibility = Visibility.Visible; BookText.Text = text; BookTextScrollViewer.VerticalScrollBarVisibility = ScrollBarVisibility.Visible; BookTextScrollViewer.Visibility = Visibility.Visible; //Clear the collection to hide the list item in order to make the textblock visible books.Clear(); mediaElement.Play(); }
private async void ConvertButton_Click(object sender, RoutedEventArgs e) { try { using (var synthesizer = new Windows.Media.SpeechSynthesis.SpeechSynthesizer()) { synthesizer.Voice = SelectedVoice ?? Windows.Media.SpeechSynthesis.SpeechSynthesizer.DefaultVoice; var synStream = await synthesizer.SynthesizeTextToStreamAsync(TextInput); mPlayerElement.Source = MediaSource.CreateFromStream(synStream, synStream.ContentType); stream = synStream.AsStream(); stream.Position = 0; var dlg = new MessageDialog("Conversion succeeded.", Package.Current.DisplayName); var cmd = await dlg.ShowAsync(); } } catch (Exception exception) { var dlg = new MessageDialog(exception.Message, Package.Current.DisplayName); var cmd = await dlg.ShowAsync(); } }
/// <summary> /// Reading given expression out loud /// </summary> /// <param name="expression"></param> /// <returns></returns> public async Task readOutLoud(string expression) { // The media object for controlling and playing audio. MediaElement mediaElement = new MediaElement(); // The object for controlling the speech synthesis engine (voice). var synth = new Windows.Media.SpeechSynthesis.SpeechSynthesizer(); // Choosing the voice of reader. You can only use voices that you have installed on your Windows system. // To obtain new voices, go into Region & Country -> Speech -> More Voices // This part of the method looks for available English voices on the system, if not found, it uses your language's default voice // Default Hungarian voice: Microsoft Szabolcs using (var speaker = new SpeechSynthesizer()) { if ((SpeechSynthesizer.AllVoices.Any(x => x.Language.Contains("EN")))) { speaker.Voice = (SpeechSynthesizer.AllVoices.First(x => x.Language.Contains("EN"))); synth.Voice = speaker.Voice; } } // Generate the audio stream from plain text. SpeechSynthesisStream stream = await synth.SynthesizeTextToStreamAsync(expression); // Send the stream to the media object. mediaElement.SetSource(stream, stream.ContentType); mediaElement.Play(); }
private async void Button_Click(object sender, RoutedEventArgs e) { var synth = new Windows.Media.SpeechSynthesis.SpeechSynthesizer(); SpeechSynthesisStream stream = await synth.SynthesizeTextToStreamAsync("Hello World"); mediaEngine.PlayStream(stream); }
private async void Button_Click(object sender, RoutedEventArgs e) { var synth = new Windows.Media.SpeechSynthesis.SpeechSynthesizer(); SpeechSynthesisStream stream = await synth.SynthesizeTextToStreamAsync("Hello World"); mediaEngine.PlayStream(stream); }
/// <summary> /// Método para realizar mensaje de voz pasándole la frase correspondiente /// </summary> /// <param name="frase"></param> private async void MensajeVoz(string frase) { MediaElement mediaElement = new MediaElement(); var synth = new Windows.Media.SpeechSynthesis.SpeechSynthesizer(); SpeechSynthesisStream stream = await synth.SynthesizeTextToStreamAsync(frase); mediaElement.SetSource(stream, stream.ContentType); mediaElement.Play(); }
public static void InitialiseSpeechSynthesis(MediaElement MediaElement) { // The media object for controlling and playing audio. mediaElement = MediaElement; // The object for controlling the speech synthesis engine (voice). synth = new Windows.Media.SpeechSynthesis.SpeechSynthesizer(); }
public async void Speak(string text) { var mediaElement = new Windows.UI.Xaml.Controls.MediaElement(); var synth = new Windows.Media.SpeechSynthesis.SpeechSynthesizer(); var stream = await synth.SynthesizeTextToStreamAsync(text); mediaElement.SetSource(stream, stream.ContentType); mediaElement.Play(); }
public static async Task Read(string text, MediaPlayer mediaPlayer) { var synth = new Windows.Media.SpeechSynthesis.SpeechSynthesizer(); using (var stream = await synth.SynthesizeTextToStreamAsync(text)) { mediaPlayer.Source = MediaSource.CreateFromStream(stream, stream.ContentType); mediaPlayer.Play(); } }
private async void speech(string arg) { MediaElement mediaElement = this.media; var synth = new Windows.Media.SpeechSynthesis.SpeechSynthesizer(); SpeechSynthesisStream steram = await synth.SynthesizeTextToStreamAsync(arg); mediaElement.SetSource(steram, steram.ContentType); mediaElement.Play(); }
private async void speech(string arg) { MediaElement mediaElement = this.media; var synth = new Windows.Media.SpeechSynthesis.SpeechSynthesizer(); SpeechSynthesisStream steram = await synth.SynthesizeTextToStreamAsync(arg); mediaElement.SetSource(steram, steram.ContentType); mediaElement.Play(); }
public async void Speak(string text) { MediaElement mediaElement = new MediaElement(); var synth = new Windows.Media.SpeechSynthesis.SpeechSynthesizer(); SpeechSynthesisStream stream = await synth.SynthesizeTextToStreamAsync("Hello World"); mediaElement.SetSource(stream, stream.ContentType); mediaElement.Play(); await synth.SynthesizeTextToStreamAsync(text); }
private async void Button_Click_F(object sender, RoutedEventArgs e) { mediaElement.Stop(); var syn = new Windows.Media.SpeechSynthesis.SpeechSynthesizer(); Windows.Media.SpeechSynthesis.SpeechSynthesisStream stm = await syn.SynthesizeTextToStreamAsync(tekst); mediaElement.SetSource(stm, stm.ContentType); mediaElement.Play(); }
public async void speak(string text) { //SpeechSynthesizer synth = new SpeechSynthesizer(); //await synth.SynthesizeTextToStreamAsync(text); //Debug.WriteLine("uwp text"); var mediaElement = new MediaElement(); var synth = new Windows.Media.SpeechSynthesis.SpeechSynthesizer(); var stream = await synth.SynthesizeTextToStreamAsync(text); mediaElement.SetSource(stream, stream.ContentType); mediaElement.Play(); }
public async void Speak(string text) { MediaElement mediaElement = new MediaElement(); var synth = new Windows.Media.SpeechSynthesis.SpeechSynthesizer(); SpeechSynthesisStream stream = await synth.SynthesizeTextToStreamAsync("Hello World"); mediaElement.SetSource(stream, stream.ContentType); mediaElement.Play(); await synth.SynthesizeTextToStreamAsync(text); }
private async Task SynthesizeAudioAsync(string input) { /* string subscriptionKey = "c06aeb4d93b24003a125aa2adef59aaa"; * string region = "francecentral"; * var config = SpeechConfig.FromSubscription(subscriptionKey, region); * using (var synthesizer = new SpeechSynthesizer(config)) * { * await synthesizer.SpeakTextAsync("Synthesizing directly to speaker output."); * }*/ /* try * { * using (var synthesizer = new Windows.Media.SpeechSynthesis.SpeechSynthesizer()) * { * * //synthesizer.Voice = SelectedVoice ?? SpeechSynthesizer.DefaultVoice; * * var synStream = await synthesizer.SynthesizeTextToStreamAsync(test); * * //mPlayerElement.Source = MediaSource.CreateFromStream(synStream, synStream.ContentType); * * stream = synStream.AsStream(); * stream.Position = 0; * * var dlg = new MessageDialog("Conversion succeeded.", Package.Current.DisplayName); * var cmd = await dlg.ShowAsync(); * } * } * catch (Exception exception) * { * var dlg = new MessageDialog(exception.Message, Package.Current.DisplayName); * var cmd = await dlg.ShowAsync(); * }*/ try { using (var synthesizer = new Windows.Media.SpeechSynthesis.SpeechSynthesizer()) { synthesizer.Voice = SelectedVoice ?? Windows.Media.SpeechSynthesis.SpeechSynthesizer.DefaultVoice; var synStream = await synthesizer.SynthesizeTextToStreamAsync(input); mPlayerElement.Source = MediaSource.CreateFromStream(synStream, synStream.ContentType); stream = synStream.AsStream(); stream.Position = 0; } } catch (Exception exception) { var dlg = new MessageDialog(exception.Message, Package.Current.DisplayName); var cmd = await dlg.ShowAsync(); } }
public async void BestFriend() { //var bestFriend = PeopleManager.GetPeople().Where(x => x.Relationship == "Best Friend"); MediaElement media = new MediaElement(); var synth = new Windows.Media.SpeechSynthesis.SpeechSynthesizer(); SpeechSynthesisStream stream = await synth.SynthesizeTextToStreamAsync("Your best friend is Jesse Wheeler." /*+ bestFriend.ToString()*/); media.SetSource(stream, stream.ContentType); media.Play(); }
private async void IniciaFala (string text) { MediaElement mediaElement = this.media; // The object for controlling the speech synthesis engine (voice). var synth = new Windows.Media.SpeechSynthesis.SpeechSynthesizer(); // Generate the audio stream from plain text. SpeechSynthesisStream stream = await synth.SynthesizeTextToStreamAsync(text); // Send the stream to the media object. mediaElement.SetSource(stream, stream.ContentType); mediaElement.Play(); }
private async void readText(string mytext) { MediaElement mediaplayer = new MediaElement(); using (var speech = new Windows.Media.SpeechSynthesis.SpeechSynthesizer()) { speech.Voice = SpeechSynthesizer.AllVoices.First(gender => gender.Gender == VoiceGender.Female); string ssml = @"<speak version='1.0' " + "xmlns='http://www.w3.org/2001/10/synthesis' xml:lang='en-UK'>" + mytext + "</speak>"; SpeechSynthesisStream stream = await speech.SynthesizeSsmlToStreamAsync(ssml); mediaplayer.SetSource(stream, stream.ContentType); mediaplayer.Play(); } }
private void Window_Initialized(object sender, EventArgs e) { // Initial Speaker speaker = new SpeechSynthesizer(); voices = new Dictionary <string, VoiceInformation>(); // Add the voice to the listbox foreach (var item in SpeechSynthesizer.AllVoices) { this.voiceSelecter.Items.Add(item.DisplayName); voices.Add(item.DisplayName, item); } }
private async void PlaySpeech() { try { _totalPlayTime = _totalPlayTime.Add(Setting.App.AlarmInterval); //todo //using (SpeechSynthesizer syn = new SpeechSynthesizer()) //{ // string setting = Setting.Speech.Message; // if (string.IsNullOrEmpty(setting)) // setting = SpeechSetting.DefaultMessage; // string hourMsg = ""; // if (_totalPlayTime.Hours > 0) // hourMsg = $"{_totalPlayTime.Hours}小时 {_totalPlayTime.Minutes}分"; // else // hourMsg = $"{_totalPlayTime.Minutes}分钟"; // string msg = string.Format(setting, hourMsg); // syn.Speak(msg); //} string setting = Setting.Speech.Message; string hourMsg = ""; if (_totalPlayTime.Hours > 0) { hourMsg = $"{_totalPlayTime.Hours}小时 {_totalPlayTime.Minutes}分"; } else { hourMsg = $"{_totalPlayTime.Minutes}分钟"; } string msg = string.Format(setting, hourMsg); using var synthesizer = new Windows.Media.SpeechSynthesis.SpeechSynthesizer(); using Windows.Media.SpeechSynthesis.SpeechSynthesisStream synthStream = await synthesizer.SynthesizeTextToStreamAsync(msg); using Stream stream = synthStream.AsStreamForRead(); using System.Media.SoundPlayer player = new System.Media.SoundPlayer(); player.Stream = stream; player.Play(); var icon = IoC.Get <TaskbarIcon>(); icon.ShowBalloonTip("休息提示", msg, BalloonIcon.Info); } catch (Exception ex) { logger.Error($"PlaySpeech Ex:{ex.Message}"); } }
private async void Falar(int prodId) { // The media object for controlling and playing audio. MediaElement mediaElement = this.media; // The object for controlling the speech synthesis engine (voice). var synth = new Windows.Media.SpeechSynthesis.SpeechSynthesizer(); // Generate the audio stream from plain text. SpeechSynthesisStream stream = await synth.SynthesizeTextToStreamAsync(this.produtos[prodId].Nome); // Send the stream to the media object. mediaElement.SetSource(stream, stream.ContentType); mediaElement.Play(); }
public async void Start3() { // The media object for controlling and playing audio. // The object for controlling the speech synthesis engine (voice). var synth = new Windows.Media.SpeechSynthesis.SpeechSynthesizer(); // Generate the audio stream from plain text. SpeechSynthesisStream stream = await synth.SynthesizeTextToStreamAsync("Command me"); textBox.Text = "Command me"; // Send the stream to the media object. mediaElement.SetSource(stream, stream.ContentType); msg = 1; mediaElement.Play(); }
public async void Button_Click(object sender, Windows.UI.Xaml.RoutedEventArgs e) { // The media object for controlling and playing audio. MediaElement mediaElement = new MediaElement(); // The object for controlling the speech synthesis engine (voice). var synth = new Windows.Media.SpeechSynthesis.SpeechSynthesizer(); // Generate the audio stream from plain text. SpeechSynthesisStream stream = await synth.SynthesizeTextToStreamAsync("Hello World"); // Send the stream to the media object. mediaElement.SetSource(stream, stream.ContentType); mediaElement.Play(); }
public static async Task TextToSpeech(string text) { // The media object for controlling and playing audio. MediaElement mediaElement = new MediaElement(); // The object for controlling the speech synthesis engine (voice). var synth = new Windows.Media.SpeechSynthesis.SpeechSynthesizer(); // Generate the audio stream from plain text. SpeechSynthesisStream stream = await synth.SynthesizeTextToStreamAsync(text); // Send the stream to the media object. mediaElement.SetSource(stream, stream.ContentType); mediaElement.Play(); }
private async void Button_Click_M(object sender, RoutedEventArgs e) { mediaElement.Stop(); var syn = new Windows.Media.SpeechSynthesis.SpeechSynthesizer(); VoiceInformation voiceInfo = (from voice in SpeechSynthesizer.AllVoices where voice.Gender == VoiceGender.Male select voice).FirstOrDefault() ?? SpeechSynthesizer.DefaultVoice; syn.Voice = voiceInfo; Windows.Media.SpeechSynthesis.SpeechSynthesisStream stm = await syn.SynthesizeTextToStreamAsync(tekst); mediaElement.SetSource(stm, stm.ContentType); mediaElement.Play(); }
// SpeechRecognitionEngine sRecognize = new SpeechRecognitionEngine(); private async void SpeakButton_Click(object sender, RoutedEventArgs e) { // The media object for controlling and playing audio. MediaElement mediaElement = new MediaElement(); // The object for controlling the speech-synthesis engine (voice). SpeechSynthesizer synth = new Windows.Media.SpeechSynthesis.SpeechSynthesizer(); // Generate the audio stream from plain text. SpeechSynthesisStream stream = await synth.SynthesizeTextToStreamAsync(txtText.Text); // Send the stream to the media object. mediaElement.SetSource(stream, stream.ContentType); mediaElement.Play(); }
private async void PlayAudio(string msg) { // The media object for controlling and playing audio. MediaElement mediaElement = new MediaElement(); // The object for controlling the speech synthesis engine (voice). var synth = new Windows.Media.SpeechSynthesis.SpeechSynthesizer(); // Generate the audio stream from plain text. SpeechSynthesisStream stream = await synth.SynthesizeTextToStreamAsync(msg); // Send the stream to the media object. mediaElement.SetSource(stream, stream.ContentType); mediaElement.Play(); }
public async void HowMany(string itemToCount) { int count = 0; switch (itemToCount) { case "Blenheim": count = Peoples.Count(x => x.Locale == Locations.Blenheim); break; case "Toronto": count = Peoples.Count(x => x.Locale == Locations.Toronto); break; case "Windsor": count = Peoples.Count(x => x.Locale == Locations.Windsor); break; case "London": count = Peoples.Count(x => x.Locale == Locations.London); break; case "Caledonia": count = Peoples.Count(x => x.Locale == Locations.Caledonia); break; case "Chatham": count = Peoples.Count(x => x.Locale == Locations.Chatham); break; case "Welland": count = Peoples.Count(x => x.Locale == Locations.Welland); break; } MediaElement media = new MediaElement(); var synth = new Windows.Media.SpeechSynthesis.SpeechSynthesizer(); SpeechSynthesisStream stream = await synth.SynthesizeTextToStreamAsync(count + " people live in " + itemToCount); media.SetSource(stream, stream.ContentType); media.Play(); }
public async void Start() { // The media object for controlling and playing audio. // The object for controlling the speech synthesis engine (voice). var synth = new Windows.Media.SpeechSynthesis.SpeechSynthesizer(); // Generate the audio stream from plain text. SpeechSynthesisStream stream = await synth.SynthesizeTextToStreamAsync("Please Say contact Name"); // Send the stream to the media object. mediaElement.SetSource(stream, stream.ContentType); mediaElement.Play(); msg = 0; //Check(mediaElement); /* * // Do something with the recognition result. * var messageDialog = new Windows.UI.Popups.MessageDialog(, "Text spoken"); * await messageDialog.ShowAsync();*/ }
public async Task <bool> PlayNext() { // Respect sound on setting if (Settings.GetBool(DeviceSettingKeys.SoundOnKey)) { if (!this.IsCurrentlyPlaying && mediaPlayer.PlaybackSession.PlaybackState != MediaPlaybackState.Playing && speechlist.Count > 0) { this.IsCurrentlyPlaying = true; var item = speechlist.Dequeue(); var synth = new Windows.Media.SpeechSynthesis.SpeechSynthesizer(); SpeechSynthesisStream stream = await synth.SynthesizeTextToStreamAsync(item); MediaSource mediaSource = MediaSource.CreateFromStream(stream, stream.ContentType); mediaPlayer.Source = mediaSource; mediaPlayer.Play(); return(true); } } return(false); }
public async Task SayAsync(string phrase) { if (Settings.GetBool(DeviceSettingKeys.SoundOnKey)) { if (mediaPlayer.PlaybackSession.PlaybackState != MediaPlaybackState.Playing) { Debug.WriteLine("Audio playback started"); this.IsCurrentlyPlaying = true; var synth = new Windows.Media.SpeechSynthesis.SpeechSynthesizer(); SpeechSynthesisStream stream = await synth.SynthesizeTextToStreamAsync(phrase); MediaSource mediaSource = MediaSource.CreateFromStream(stream, stream.ContentType); mediaPlayer.Source = mediaSource; mediaPlayer.Play(); // Wait until the MediaEnded event on MediaElement is raised, // before turning on speech recognition again. The semaphore // is signaled in the mediaElement_MediaEnded event handler. await semaphoreSlim.WaitAsync(); } } }
public async void CountTheAmount(string itemToCount) { int count = 0; if (itemToCount == "Male") { count = Peoples.Count(x => x.Sex == "Male"); } else if (itemToCount == "Female") { count = Peoples.Count(x => x.Sex == "Female"); } MediaElement media = new MediaElement(); var synth = new Windows.Media.SpeechSynthesis.SpeechSynthesizer(); SpeechSynthesisStream stream = await synth.SynthesizeTextToStreamAsync("There are " + count + " " + itemToCount); media.SetSource(stream, stream.ContentType); media.Play(); }
public void SpeakString(string message) { AutoResetEvent speakCompletedEvent = new AutoResetEvent(false); manager.InvokeOnUIThread( async() => { if (soundOutputElement != null) { SpeechSynthesizer synth = new Windows.Media.SpeechSynthesis.SpeechSynthesizer(); // Generate the audio stream from plain text. SpeechSynthesisStream stream = await synth.SynthesizeTextToStreamAsync(message); // Send the stream to the media object. soundOutputElement.SetSource(stream, stream.ContentType); await PlayCompleteSoundAsync(); speakCompletedEvent.Set(); } } ); speakCompletedEvent.WaitOne(); }
private async static void SpeakItem_Click(object sender, RoutedEventArgs e) { try { meaningFlyout.Hide(); // The media object for controlling and playing audio. MediaPlayerElement mediaElement = new MediaPlayerElement(); // The object for controlling the speech synthesis engine (voice). var synth = new Windows.Media.SpeechSynthesis.SpeechSynthesizer(); // Generate the audio stream from plain text. SpeechSynthesisStream stream = await synth.SynthesizeTextToStreamAsync(selectedText); // Send the stream to the media object. mediaElement.Source = MediaSource.CreateFromStream(stream, stream.ContentType); mediaElement.MediaPlayer.Play(); } catch (Exception ex) { } }
private async void readAge(string age, string gender) { // The media object for controlling and playing audio. mediaElement = new MediaElement(); // The object for controlling the speech synthesis engine (voice). var synth = new Windows.Media.SpeechSynthesis.SpeechSynthesizer(); string adjetivo, faixaEtaria; if (gender == "male") { adjetivo = "sir"; } else { adjetivo = "miss"; } if (Int16.Parse(age) < 25) { faixaEtaria = "a young person"; } else if (Int16.Parse(age) > 50) { faixaEtaria = "a growth " + gender; } else { faixaEtaria = ""; } // Generate the audio stream from plain text. SpeechSynthesisStream stream; if (isSmiling && Int16.Parse(age) < 25) { stream = await synth.SynthesizeTextToStreamAsync("Hello " + adjetivo + "! Today you're looking " + faixaEtaria + " with " + age + " years old. Now I understand your smile."); } else if (!isSmiling && Int16.Parse(age) > 25) { stream = await synth.SynthesizeTextToStreamAsync("Hello " + adjetivo + "! Before I tell you your age, let me tell to you to try to smile to the photo next time. Maybe you can look younger. Today you're looking " + faixaEtaria + " with " + age + " years old."); } else if (!isSmiling) { stream = await synth.SynthesizeTextToStreamAsync("Hello " + adjetivo + "! Really? No smiles? OK. Today you're looking " + faixaEtaria + " with " + age + " years old."); } else { stream = await synth.SynthesizeTextToStreamAsync("Hello " + adjetivo + "! Today you're looking " + faixaEtaria + " with " + age + " years old. Before I forget: beautiful smile!"); } // Send the stream to the media object. mediaElement.SetSource(stream, stream.ContentType); mediaElement.Play(); }
private async void ReadVoiceName(string name) { // The media object for controlling and playing audio. mediaElement = new MediaElement(); // The object for controlling the speech synthesis engine (voice). var synth = new Windows.Media.SpeechSynthesis.SpeechSynthesizer(); // Generate the audio stream from plain text. SpeechSynthesisStream stream; if (name == "Hara") { stream = await synth.SynthesizeTextToStreamAsync("Hello " + name + "! You have 18 years old plus " + (Int16.Parse(age) - 18).ToString() + " years of experience. But, let me check something for you."); } else { stream = await synth.SynthesizeTextToStreamAsync("Hello " + name + "! Let me check some products for you."); } // Send the stream to the media object. mediaElement.SetSource(stream, stream.ContentType); mediaElement.Play(); }
private async void ReadVoice(Error name) { // The media object for controlling and playing audio. mediaElement = new MediaElement(); // The object for controlling the speech synthesis engine (voice). var synth = new Windows.Media.SpeechSynthesis.SpeechSynthesizer(); // Generate the audio stream from plain text. SpeechSynthesisStream stream; switch (name) { case Error.Not_Recognized: stream = await synth.SynthesizeTextToStreamAsync("Oops! Someone was do not recognized. Please, show me someone that I met before!"); break; case Error.No_Face: stream = await synth.SynthesizeTextToStreamAsync("I can't find a face. Do you really show me someone? Please, try again."); break; case Error.Not_Found: stream = await synth.SynthesizeTextToStreamAsync("I can't find another product for you."); break; case Error.Expensive: stream = await synth.SynthesizeTextToStreamAsync("You need to order your boss to raise your paycheck. Let me check another product for you, for now."); break; default: stream = await synth.SynthesizeTextToStreamAsync("Hello " + name + "! Let me check some products for you."); break; } // Send the stream to the media object. mediaElement.SetSource(stream, stream.ContentType); mediaElement.Play(); }