private async void ReadSsmlText(string mytext) { VoiceInformation currentVoice = (VoiceInformation)lstVoices.SelectedItem; // The object for controlling the speech synthesis engine (voice). string Ssml = @"<speak version='1.0' " + "xmlns='http://www.w3.org/2001/10/synthesis' xml:lang='" + currentVoice.Language + "'>" + // "<voice name='" + currentVoice.DisplayName + "' gender='" + currentVoice.Gender.ToString() + "' xml:lang='" + currentVoice.Language + "'>" + "<voice name='" + currentVoice.DisplayName + "'>" + "<prosody pitch='" + lstPitch.SelectedItem.ToString() + "' rate='" + sldRate.Value.ToString() + "'>" + mytext + "</prosody>" + "</voice></speak>"; // Generate the audio stream from plain text. SpeechSynthesisStream stream = await speech.SynthesizeSsmlToStreamAsync(Ssml); // Send the stream to the media object. mediaplayer.SetSource(stream, stream.ContentType); mediaplayer.Play(); }
private void InitializeListboxVoiceChooser() { var voices = SpeechSynthesizer.AllVoices; VoiceInformation currentVoice = synthesizer.Voice; foreach (VoiceInformation voice in voices.OrderBy(p => p.Language)) { ComboBoxItem item = new ComboBoxItem(); item.Name = voice.DisplayName; item.Tag = voice; item.Content = voice.DisplayName + " (Language: " + voice.Language + ")"; listboxVoiceChooser.Items.Add(item); // Check to see if we're looking at the current voice and set it as selected in the listbox. if (currentVoice.Id == voice.Id) { item.IsSelected = true; listboxVoiceChooser.SelectedItem = item; } } }
/// <summary> /// Accepts a MediaElement that should be placed on whichever page user is on when text is read by SpeechHelper. /// Initializes SpeechSynthesizer. /// </summary> public SpeechHelper(MediaElement media) { mediaElement = media; synthesizer = new SpeechSynthesizer(); mediaElement.MediaEnded += MediaElement_MediaEnded; mediaElement.MediaFailed += MediaElement_MediaFailed; // Get all of the installed voices. var voices = SpeechSynthesizer.AllVoices; // Get the currently selected voice. VoiceInformation currentVoice = synthesizer.Voice; foreach (VoiceInformation voice in voices) { if (voice.DisplayName.Contains("Pavel")) { synthesizer.Voice = voice; break; } } }
public static async Task Speak(this MediaElement media, string text, VoiceInformation voiceInformation) { //In some cases DoRecognition ends prematurely e.g. when //the user allows access to the microphone but there is no //microphone available so if the media is still playing just return. if (media.CurrentState == MediaElementState.Playing) { return; } SpeechSynthesisStream synthesisStream = await SynthesizeTextToSpeechAsync(text, voiceInformation); if (synthesisStream == null) { return; } try { TaskCompletionSource <bool> taskCompleted = new TaskCompletionSource <bool>(); void endOfPlayHandler(object s, RoutedEventArgs e) { synthesisStream.Dispose(); taskCompleted.SetResult(true); } media.MediaEnded += endOfPlayHandler; media.AutoPlay = true; media.SetSource(synthesisStream, synthesisStream.ContentType); //media.Play(); await taskCompleted.Task; media.MediaEnded -= endOfPlayHandler; } catch (System.Exception) { } }
protected override async void OnNavigatedTo(NavigationEventArgs e) { base.OnNavigatedTo(e); MediaElementCtrl.MediaEnded += MediaElementCtrl_MediaEnded; this.dispatcher = CoreWindow.GetForCurrentThread().Dispatcher; this.speechRecognizer = new SpeechRecognizer(); SpeechRecognitionCompilationResult result = await speechRecognizer.CompileConstraintsAsync(); speechRecognizer.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated; speechRecognizer.ContinuousRecognitionSession.Completed += ContinuousRecognitionSession_Completed; #region TTS try { _voice = (from voiceInformation in Windows.Media.SpeechSynthesis.SpeechSynthesizer.AllVoices select voiceInformation).First(); _speechSynthesizer = new Windows.Media.SpeechSynthesis.SpeechSynthesizer(); _speechSynthesizer.Voice = _voice; } catch (Exception exception) { var messageDialog = new Windows.UI.Popups.MessageDialog(exception.Message, "Exception"); messageDialog.ShowAsync().GetResults(); } #endregion StartConversation(); //#if DEBUG // _questions.Last().Value = "1"; // EndConversation(); // return; //#endif }
private async void list_ItemClick(object sender, TappedRoutedEventArgs e) { try { var saySomething = (Pueblo)list.SelectedItem; SpeechSynthesizer speechSynthesizer = new SpeechSynthesizer(); VoiceInformation voiceInformation = ( from voice in SpeechSynthesizer.AllVoices where voice.Gender == VoiceGender.Female && voice.Language.Contains(lenguaje) select voice ).FirstOrDefault(); speechSynthesizer.Voice = voiceInformation; string palabra = string.Empty; if (lenguaje == "fr") { palabra = saySomething.DescripcionFrench; } else { palabra = saySomething.DescripcionEspanish; } if (!string.IsNullOrEmpty(palabra)) { var speechStream = await speechSynthesizer.SynthesizeTextToStreamAsync(palabra); mediaElement.AutoPlay = true; mediaElement.SetSource(speechStream, speechStream.ContentType); mediaElement.Play(); } } catch (Exception ex) { System.Diagnostics.Debug.WriteLine(ex.Message); } }
public static async void Speak(string textToSpeech) { if (!string.IsNullOrEmpty(textToSpeech)) { await Windows.ApplicationModel.Core.CoreApplication.MainView.CoreWindow.Dispatcher.RunAsync(CoreDispatcherPriority.Normal, async() => { VoiceInformation voiceInfo = ( from voice in SpeechSynthesizer.AllVoices where voice.Gender == VoiceGender.Female select voice ).FirstOrDefault() ?? SpeechSynthesizer.DefaultVoice; speechSynthesizer.Voice = voiceInfo; var speechSynthesisStream = await speechSynthesizer.SynthesizeTextToStreamAsync(textToSpeech); mediaElement.SetSource(speechSynthesisStream, speechSynthesisStream.ContentType); mediaElement.Play(); }); } }
protected override async void OnNavigatedTo(NavigationEventArgs e) { base.OnNavigatedTo(e); MediaElementCtrl.MediaEnded += MediaElementCtrl_MediaEnded; this.dispatcher = CoreWindow.GetForCurrentThread().Dispatcher; this.speechRecognizer = new SpeechRecognizer(); SpeechRecognitionCompilationResult result = await speechRecognizer.CompileConstraintsAsync(); speechRecognizer.ContinuousRecognitionSession.ResultGenerated += ContinuousRecognitionSession_ResultGenerated; speechRecognizer.ContinuousRecognitionSession.Completed += ContinuousRecognitionSession_Completed; #region TTS try { _voice = (from voiceInformation in Windows.Media.SpeechSynthesis.SpeechSynthesizer.AllVoices select voiceInformation).First(); _speechSynthesizer = new Windows.Media.SpeechSynthesis.SpeechSynthesizer(); _speechSynthesizer.Voice = _voice; } catch (Exception exception) { var messageDialog = new Windows.UI.Popups.MessageDialog(exception.Message, "Exception"); messageDialog.ShowAsync().GetResults(); } #endregion StartConversation(); //#if DEBUG // _questions.Last().Value = "1"; // EndConversation(); // return; //#endif }
/// <summary> /// This creates items out of the system installed voices. The voices are then displayed in a listbox. /// This allows the user to change the voice of the synthesizer in your app based on their preference. /// </summary> private void InitializeListboxVoiceChooser() { // Get all of the installed voices. var voices = SpeechSynthesizer.AllVoices; // Get the currently selected voice. VoiceInformation currentVoice = synthesizer.Voice; foreach (VoiceInformation voice in voices) { ComboBoxItem item = new ComboBoxItem(); item.Name = voice.DisplayName; item.Tag = voice; item.Content = voice.DisplayName; listboxVoiceChooser.Items.Add(item); // Check to see if we're looking at the current voice and set it as selected in the listbox. if (currentVoice.Id == voice.Id) { item.IsSelected = true; listboxVoiceChooser.SelectedItem = item; } } }
public void ChangeVoice(string voiceId) { foreach (var item in listboxVoiceChooser.Items) { ComboBoxItem comItem = item as ComboBoxItem; if (comItem == null) { continue; } VoiceInformation voice = comItem.Tag as VoiceInformation; if (voice == null) { continue; } if (voice.Id.Equals(voiceId, StringComparison.OrdinalIgnoreCase)) { isSuppressVoiceChangedEvent = true; listboxVoiceChooser.SelectedItem = item; return; } } }
/// <summary> /// This is invoked when the form is initialized, and is used to initialize the list box /// this simply enumerates all of the installed voices /// </summary> private void ListboxVoiceChooser_Initialize() { // get all of the installed voices var voices = Windows.Media.SpeechSynthesis.SpeechSynthesizer.AllVoices; // get the currently selected voice VoiceInformation currentVoice = this.synthesizer.Voice; foreach (VoiceInformation voice in voices) { ComboBoxItem item = new ComboBoxItem(); item.Name = voice.DisplayName; item.Tag = voice; item.Content = voice.DisplayName; this.listboxVoiceChooser.Items.Add(item); // check to see if this is the current voice, so that we can set it to be selected if (currentVoice.Id == voice.Id) { item.IsSelected = true; this.listboxVoiceChooser.SelectedItem = item; } } }
/// <summary> /// Exports the entered text to a soundfile /// </summary> /// <param name="input">The text that's going to be converted into a soundfile</param> /// <param name="voice">The VoiceInformation containing the voice that's going to be used. If null system will use the default system voice</param> /// <returns></returns> public static async Task <bool> ExportTextToSpeechFile(string input, VoiceInformation voice = null) { bool success = false; // Add the spoken text var synth = new SpeechSynthesizer(); if (voice != null) { synth.Voice = voice; } SpeechSynthesisStream synthStream = await synth.SynthesizeTextToStreamAsync(input); // Check devicefamily var device = Windows.ApplicationModel.Resources.Core.ResourceContext.GetForCurrentView().QualifierValues; bool isDesktop = (device.ContainsKey("DeviceFamily") && device["DeviceFamily"] == "Desktop"); bool isXbox = (device.ContainsKey("DeviceFamily") && device["DeviceFamily"] == "Xbox"); // Now get the export-file picker var exportPicker = new FileSavePicker(); exportPicker.SuggestedStartLocation = PickerLocationId.Desktop; // Check whether the using is using Windows 10 Mobile, if true, only add .wav-export, because the codecs in Mobile are f*****g retarded if (isDesktop == true) { exportPicker.FileTypeChoices.Add(".MP3", new List <string>() { ".mp3" }); exportPicker.FileTypeChoices.Add(".WMA", new List <string>() { ".wma" }); } else if (isXbox == true) { exportPicker.FileTypeChoices.Add(".WMA", new List <string>() { ".wma" }); } exportPicker.FileTypeChoices.Add(".WAV", new List <string>() { ".wav" }); exportPicker.SuggestedFileName = ResourceExtensions.GetLocalized("SpokenText"); // Fill data of the fileTarget with selection from the Save-picker var fileTarget = await exportPicker.PickSaveFileAsync(); if (fileTarget != null) { if (fileTarget.FileType == ".wma" || fileTarget.FileType == ".mp3" || fileTarget.FileType == ".m4a") { success = await SaveAndEncodeFile(fileTarget, synthStream, synth.Voice); } else if (fileTarget.FileType == ".wav") { try { using (var reader = new DataReader(synthStream)) { await reader.LoadAsync((uint)synthStream.Size); IBuffer buffer = reader.ReadBuffer((uint)synthStream.Size); await FileIO.WriteBufferAsync(fileTarget, buffer); } success = true; } catch (Exception ex) { Debug.WriteLine("Couldn't export to wav"); Debug.WriteLine(ex); } } } return(success); }
/// <summary> /// /// </summary> /// <param name="fileTarget">File to write to</param> /// <param name="synthStream">The SpeechSynthesisStream with the actual sound</param> /// <param name="voice">The VoiceInformation for setting the correct voice in the artist</param> /// <returns></returns> private static async Task <bool> SaveAndEncodeFile(StorageFile fileTarget, SpeechSynthesisStream synthStream, VoiceInformation voice) { bool success = false; // Initialise some stuff MediaEncodingProfile _profile; MediaTranscoder _transcoder = new MediaTranscoder(); CoreDispatcher _dispatcher = Window.Current.Dispatcher; CancellationTokenSource _cts = new CancellationTokenSource(); Debug.WriteLine(fileTarget.FileType + " selected"); // Set encoding profiles _profile = null; AudioEncodingQuality audioEncodingProfile = AudioEncodingQuality.High; if (fileTarget.FileType == ".wma") { _profile = MediaEncodingProfile.CreateWma(audioEncodingProfile); } else if (fileTarget.FileType == ".mp3") { _profile = MediaEncodingProfile.CreateMp3(audioEncodingProfile); } else if (fileTarget.FileType == ".m4a") { _profile = MediaEncodingProfile.CreateM4a(audioEncodingProfile); } else { Debug.WriteLine("Can't select a media encoding profile"); return(success); } // Write temporary Wav to Temp-storage ApplicationData appData = ApplicationData.Current; StorageFile source = await appData.TemporaryFolder.CreateFileAsync("temporary.wav", CreationCollisionOption.ReplaceExisting); try { using (var reader = new DataReader(synthStream)) { await reader.LoadAsync((uint)synthStream.Size); IBuffer buffer = reader.ReadBuffer((uint)synthStream.Size); await FileIO.WriteBufferAsync(source, buffer); } } catch (Exception ex) { Debug.WriteLine("Couldn't prepare wav for transcoding"); Debug.WriteLine(ex); } // Prepare transcoding files var preparedTranscoderResult = await _transcoder.PrepareFileTranscodeAsync(source, fileTarget, _profile); if (preparedTranscoderResult.CanTranscode) { // Set task for transcoding await preparedTranscoderResult.TranscodeAsync().AsTask(_cts.Token); // Set Music-properties MusicProperties fileProperties = await fileTarget.Properties.GetMusicPropertiesAsync(); fileProperties.Title = fileTarget.DisplayName; fileProperties.Artist = ("Talkinator " + ResourceExtensions.GetLocalized("VoicedBy") + " " + voice.DisplayName); await fileProperties.SavePropertiesAsync(); // #TODO: Add the newly created file to the systems MRU? // Add the file to app MRU and possibly system MRU //RecentStorageItemVisibility visibility = SystemMRUCheckBox.IsChecked.Value ? RecentStorageItemVisibility.AppAndSystem : RecentStorageItemVisibility.AppOnly; //rootPage.mruToken = StorageApplicationPermissions.MostRecentlyUsedList.Add(file, file.Name, visibility); //RecentStorageItemVisibility visibility = RecentStorageItemVisibility.AppOnly; //StorageApplicationPermissions.FutureAccessList.Add(fileTarget, fileTarget.DisplayName); // Report completed success = true; Debug.WriteLine(fileTarget.FileType + " export completed"); } else { Debug.WriteLine(preparedTranscoderResult.FailureReason); } return(success); }
public static async Task <SpeechSynthesisStream> SynthesizeTextToSpeechAsync(string text, VoiceInformation voiceInformation) { // Windows.Storage.Streams.IRandomAccessStream SpeechSynthesisStream stream = null; // Windows.Media.SpeechSynthesis.SpeechSynthesizer using (SpeechSynthesizer synthesizer = new SpeechSynthesizer()) { ResourceContext speechContext = ResourceContext.GetForCurrentView(); speechContext.Languages = Helpers.SupportedLanguages.ToArray(); if (voiceInformation == null) { return(null); } text = text.ReplaceUpperCaseWords(voiceInformation.Language); text = text.Replace("/", "<break time=\"100ms\"/>"); text = System.Text.RegularExpressions.Regex.Replace(text, "([A-Z]{2,})", "<say-as interpret-as=\"spell-out\">$1</say-as> <break time=\"50ms\"/>"); text = $"<speak version=\"1.0\" xmlns=\"http://www.w3.org/2001/10/synthesis\" xml:lang=\"{voiceInformation.Language}\">{text}</speak>"; synthesizer.Voice = voiceInformation; // Windows.Media.SpeechSynthesis.SpeechSynthesisStream stream = await synthesizer.SynthesizeSsmlToStreamAsync(text); //stream = await synthesizer.SynthesizeTextToStreamAsync(text); } return(stream); }
public async Task ExportSpeechToFile(string input, VoiceInformation voice = null) { bool success = false; // Add the spoken text var synth = new SpeechSynthesizer(); if (voice != null) { synth.Voice = voice; } SpeechSynthesisStream synthStream = await synth.SynthesizeTextToStreamAsync(input); // Check devicefamily var device = Windows.ApplicationModel.Resources.Core.ResourceContext.GetForCurrentView().QualifierValues; bool isDesktop = (device.ContainsKey("DeviceFamily") && device["DeviceFamily"] == "Desktop"); bool isXbox = (device.ContainsKey("DeviceFamily") && device["DeviceFamily"] == "Xbox"); // Now get the export-file picker var exportPicker = new FileSavePicker(); exportPicker.SuggestedStartLocation = PickerLocationId.Desktop; // Check whether the using is using Windows 10 Mobile, if true, only add .wav-export, because the codecs in Mobile are f*****g retarded if (isDesktop == true) { exportPicker.FileTypeChoices.Add(".MP3", new List <string>() { ".mp3" }); exportPicker.FileTypeChoices.Add(".WMA", new List <string>() { ".wma" }); } else if (isXbox == true) { exportPicker.FileTypeChoices.Add(".WMA", new List <string>() { ".wma" }); } exportPicker.FileTypeChoices.Add(".WAV", new List <string>() { ".wav" }); exportPicker.SuggestedFileName = "Chitchat"; // Fill data of the fileTarget with selection from the Save-picker var fileTarget = await exportPicker.PickSaveFileAsync(); if (fileTarget != null) { if (fileTarget.FileType == ".wma" || fileTarget.FileType == ".mp3" || fileTarget.FileType == ".m4a") { success = await this.ExportToMusicFormat(fileTarget, synthStream); } else if (fileTarget.FileType == ".wav") { try { using (var reader = new DataReader(synthStream)) { await reader.LoadAsync((uint)synthStream.Size); IBuffer buffer = reader.ReadBuffer((uint)synthStream.Size); await FileIO.WriteBufferAsync(fileTarget, buffer); } success = true; } catch (Exception ex) { Debug.WriteLine("Couldn't export to wav"); Debug.WriteLine(ex); } } } // Show toast to tell the user whether export has successeeded or failed if (!success) { MessageHelper.ShowExportFailedMessage(); } else { MessageHelper.ShowExportSuccessfulMessage(fileTarget.Path); ////StorageFolder testFolder = await KnownFolders. //StorageFile test = await KnownFolders.GetFolderForUserAsync(exportPicker.) //var test = fileTarget.Path.; //test. //StorageFolder folder = await fileTarget.GetParentAsync(); //await Launcher.LaunchFolderAsync(folder, launcherOptions); //await Launcher.LaunchFileAsync(fileTarget); } //return success; }
// Contructor public VoiceModel(VoiceInformation voice) { Voice = voice; }
/// <summary> /// Executes a function that generates a speech stream and then converts and plays it in Unity. /// </summary> /// <param name="text"> /// A raw text version of what's being spoken for use in debug messages when speech isn't supported. /// </param> /// <param name="speakFunc"> /// The actual function that will be executed to generate speech. /// </param> private void PlaySpeech(string text, Func<IAsyncOperation<SpeechSynthesisStream>> speakFunc) { // Make sure there's something to speak if (speakFunc == null) throw new ArgumentNullException(nameof(speakFunc)); if (synthesizer != null) { try { // Need await, so most of this will be run as a new Task in its own thread. // This is good since it frees up Unity to keep running anyway. Task.Run(async () => { // Change voice? if (voice != TextToSpeechVoice.Default) { // Get name var voiceName = Enum.GetName(typeof(TextToSpeechVoice), voice); // See if it's never been found or is changing if ((voiceInfo == null) || (!voiceInfo.DisplayName.Contains(voiceName))) { // Search for voice info voiceInfo = SpeechSynthesizer.AllVoices.Where(v => v.DisplayName.Contains(voiceName)).FirstOrDefault(); // If found, select if (voiceInfo != null) { synthesizer.Voice = voiceInfo; } else { Debug.LogErrorFormat("TTS voice {0} could not be found.", voiceName); } } } // Speak and get stream var speechStream = await speakFunc(); // Get the size of the original stream var size = speechStream.Size; // Create buffer byte[] buffer = new byte[(int)size]; // Get input stream and the size of the original stream using (var inputStream = speechStream.GetInputStreamAt(0)) { // Close the original speech stream to free up memory speechStream.Dispose(); // Create a new data reader off the input stream using (var dataReader = new DataReader(inputStream)) { // Load all bytes into the reader await dataReader.LoadAsync((uint)size); // Copy from reader into buffer dataReader.ReadBytes(buffer); } } // Convert raw WAV data into Unity audio data int sampleCount = 0; int frequency = 0; var unityData = ToUnityAudio(buffer, out sampleCount, out frequency); // The remainder must be done back on Unity's main thread UnityEngine.WSA.Application.InvokeOnAppThread(() => { // Convert to an audio clip var clip = ToClip("Speech", unityData, sampleCount, frequency); // Set the source on the audio clip audioSource.clip = clip; // Play audio audioSource.Play(); }, false); }); } catch (Exception ex) { Debug.LogErrorFormat("Speech generation problem: \"{0}\"", ex.Message); } } else { Debug.LogErrorFormat("Speech not initialized. \"{0}\"", text); } }
/// <summary> /// /// Constructor is private to facilitate Singleton object pattern. /// /// Constructor attempts to retrieve settings from roaming settings, /// sets them to default if not. /// /// </summary> private Settings() { //for each setting, we try to retrieve the value from the roaming store; //if it can't be found, we apply the default _roamingSettings = ApplicationData.Current.RoamingSettings; //Setting for filtering phrases. Default is on. object filterPhrases = _roamingSettings.Values[_filterPhrasesKey]; if (filterPhrases == null) { _filterPhrases = true; } else { _filterPhrases = (bool)filterPhrases; } //Setting for showing keyboard shortcuts. Default is on. object showShortcuts = _roamingSettings.Values[_showShortcutsKey]; if (showShortcuts == null) { _showShortcuts = true; } else { _showShortcuts = (bool)showShortcuts; } //Setting for using categories. Default is on. object useCategories = _roamingSettings.Values[_useCategoriesKey]; if (useCategories == null) { _useCategories = true; } else { _useCategories = (bool)useCategories; } //Setting for speaking words. Default is off. object speakWords = _roamingSettings.Values[_speakWordsKey]; if (speakWords == null) { _speakWords = false; } else { _speakWords = (bool)speakWords; } //Setting for speaking sentences. Default is off. object speakSentences = _roamingSettings.Values[_speakSentencesKey]; if (speakSentences == null) { _speakSentences = false; } else { _speakSentences = (bool)speakSentences; } //Setting for speaking phrases as they're selected. Default is off. object speakPhrases = _roamingSettings.Values[_speakPhrasesKey]; if (speakPhrases == null) { _speakPhrases = false; } else { _speakPhrases = (bool)speakPhrases; } //Setting for showing navigation buttons. Default is off. object showNavigation = _roamingSettings.Values[_showNavigationKey]; if (showNavigation == null) { _showNavigation = false; } else { _showNavigation = (bool)showNavigation; } //Setting for showing sorting buttons. Default is on. object showSorting = _roamingSettings.Values[_showSortingKey]; if (showSorting == null) { _showSorting = true; } else { _showSorting = (bool)showSorting; } //Setting for font. Default is 12. object fontSize = _roamingSettings.Values[_fontSizeKey]; if (fontSize == null) { _fontSize = 12; } else { _fontSize = (int)fontSize; } //Setting for volume. Default is 50%. object volume = _roamingSettings.Values[_volumeKey]; if (volume == null) { _voiceVolume = 50; } else { _voiceVolume = (int)volume; } //Setting for voice. Default is default system voice. bool found = false; object voiceId = _roamingSettings.Values[_voiceKey]; if (voiceId != null) { string voiceIdString = (string)voiceId; Debug.WriteLine("Settings.cs: Voice id loaded from roaming settings is: " + voiceIdString); foreach (VoiceInformation voice in SpeechSynthesizer.AllVoices) { if (voice.Id.Equals(voiceIdString)) { _voice = voice; found = true; } } } if (!found) { _voice = SpeechSynthesizer.AllVoices.FirstOrDefault(); Debug.WriteLine("Settings.cs: Voice id loaded from default is: " + _voice.Id); } //Setting for showing icons and labels. Default is showing both icons and labels. object iconsAndLabels = _roamingSettings.Values[_showIconsAndLabelsKey]; if (iconsAndLabels == null) { _showIconsAndLabels = _iconsAndLabels; } else { _showIconsAndLabels = (int)iconsAndLabels; } }
async Task RecordSpeechFromMicrophoneAsync(VoiceInformation voiceInformation, Func <SpeechRecognitionResult, Task> doNext) { if (!await AudioCapturePermissions.RequestMicrophonePermission()) { return; } if (voiceInformation == null) { return; } if (!await DoRecognition()) { //media.StopMedia(); //In some cases DoRecognition ends prematurely e.g. when //the user allows access to the microphone but there is no //microphone available so do not stop media. await SpeakAndListen(); } async Task <bool> DoRecognition() { using (SpeechRecognizer speechRecognizer = new SpeechRecognizer(new Windows.Globalization.Language(voiceInformation.Language))) { SpeechRecognitionConstraints.ToList().ForEach(c => speechRecognizer.Constraints.Add(c)); speechRecognizer.Timeouts.InitialSilenceTimeout = TimeSpan.FromSeconds(SpeechRecognitionConstants.InitialSilenceTimeout); speechRecognizer.Timeouts.EndSilenceTimeout = TimeSpan.FromSeconds(SpeechRecognitionConstants.EndSilenceTimeout); await speechRecognizer.CompileConstraintsAsync(); SpeechRecognitionResult result = await speechRecognizer.RecognizeAsync(); if ( !(result.Status == SpeechRecognitionResultStatus.Success && new HashSet <SpeechRecognitionConfidence> { SpeechRecognitionConfidence.High, SpeechRecognitionConfidence.Medium, SpeechRecognitionConfidence.Low }.Contains(result.Confidence) ) ) { return(false); } if (result.Constraint.Tag == SpeechRecognitionConstants.GOBACKTAG) { if (UiNotificationService.CanGoBack) { await GoBack(); return(true); } else { return(false); } } else {//Options constraint succeeded await doNext(result); return(true); } } } }
protected override async void OnNavigatedTo(NavigationEventArgs e) { var parameters = e.Parameter.ToString(); if (string.IsNullOrEmpty(parameters)) { DashboardViewModel = new DashboardViewModel(); this.DataContext = DashboardViewModel; foreach (var item in DashboardViewModel.ListaPueblosMagicosSinMostrar) { DashboardViewModel.ListaPueblosMagicos.Add(item); } } else { if (parameters.Equals("Frances")) { DashboardViewModel = new DashboardViewModel(); this.DataContext = DashboardViewModel; foreach (var item in DashboardViewModel.ListaPueblosMagicosSinMostrar) { DashboardViewModel.ListaPueblosMagicos.Add(item); } lenguaje = "fr"; btnNativo.IsChecked = false; btnFrench.IsChecked = true; } else if (parameters.Equals("Nativo")) { DashboardViewModel = new DashboardViewModel(); this.DataContext = DashboardViewModel; foreach (var item in DashboardViewModel.ListaPueblosMagicosSinMostrar) { DashboardViewModel.ListaPueblosMagicos.Add(item); } lenguaje = "MX"; btnNativo.IsChecked = true; btnFrench.IsChecked = false; } else if (parameters.Equals("Agregar")) { DashboardViewModel = new DashboardViewModel(); this.DataContext = DashboardViewModel; foreach (var item in DashboardViewModel.ListaPueblosMagicosSinMostrar) { DashboardViewModel.ListaPueblosMagicos.Add(item); } Frame.Navigate(typeof(NewPuebloPage), "NewPueblo"); } else if (parameters.Equals("EliminarUltimo")) { DashboardViewModel = new DashboardViewModel(); this.DataContext = DashboardViewModel; foreach (var item in DashboardViewModel.ListaPueblosMagicosSinMostrar) { DashboardViewModel.ListaPueblosMagicos.Add(item); } await EliminarPuebloMagico(DashboardViewModel.ListaPueblosMagicos.LastOrDefault()); } else if (parameters.Equals("TotalActual")) { DashboardViewModel = new DashboardViewModel(); this.DataContext = DashboardViewModel; foreach (var item in DashboardViewModel.ListaPueblosMagicosSinMostrar) { DashboardViewModel.ListaPueblosMagicos.Add(item); } try { int TextToSpeechTotal = DashboardViewModel.ListaPueblosMagicos.Count; var saySomething = (Pueblo)list.SelectedItem; SpeechSynthesizer speechSynthesizer = new SpeechSynthesizer(); VoiceInformation voiceInformation = ( from voice in SpeechSynthesizer.AllVoices where voice.Gender == VoiceGender.Female && voice.Language.Contains(lenguaje) select voice ).FirstOrDefault(); speechSynthesizer.Voice = voiceInformation; string palabra = string.Empty; if (lenguaje == "fr") { palabra = "Le chiffre total de Pueblos Magicos agregados es: " + TextToSpeechTotal; } else { palabra = "El numero total de Pueblos Magicos agregados es: " + TextToSpeechTotal; } if (!string.IsNullOrEmpty(palabra)) { var speechStream = await speechSynthesizer.SynthesizeTextToStreamAsync(palabra); mediaElement.AutoPlay = true; mediaElement.SetSource(speechStream, speechStream.ContentType); mediaElement.Play(); } } catch (Exception ex) { System.Diagnostics.Debug.WriteLine(ex.Message); } } else { DashboardViewModel = new DashboardViewModel(); this.DataContext = DashboardViewModel; var listoDisplay = DashboardViewModel.ListaPueblosMagicosSinMostrar.Where(x => x.Estado == parameters).ToList(); foreach (var item in listoDisplay) { DashboardViewModel.ListaPueblosMagicos.Add(item); } } } }
public VoiceModel(VoiceInformation vi) { DisplayName = vi.DisplayName; Id = vi.Id; Language = vi.Language; }
/// <summary> /// Executes a function that generates a speech stream and then converts and plays it in Unity. /// </summary> /// <param name="text"> /// A raw text version of what's being spoken for use in debug messages when speech isn't supported. /// </param> /// <param name="speakFunc"> /// The actual function that will be executed to generate speech. /// </param> private void PlaySpeech(string text, Func <IAsyncOperation <SpeechSynthesisStream> > speakFunc) { // Make sure there's something to speak if (speakFunc == null) { throw new ArgumentNullException(nameof(speakFunc)); } if (synthesizer != null) { try { speechTextInQueue = true; // Need await, so most of this will be run as a new Task in its own thread. // This is good since it frees up Unity to keep running anyway. Task.Run(async() => { // Change voice? if (voice != TextToSpeechVoice.Default) { // Get name var voiceName = Enum.GetName(typeof(TextToSpeechVoice), voice); // See if it's never been found or is changing if ((voiceInfo == null) || (!voiceInfo.DisplayName.Contains(voiceName))) { // Search for voice info voiceInfo = SpeechSynthesizer.AllVoices.Where(v => v.DisplayName.Contains(voiceName)).FirstOrDefault(); // If found, select if (voiceInfo != null) { synthesizer.Voice = voiceInfo; } else { Debug.LogErrorFormat("TTS voice {0} could not be found.", voiceName); } } } // Speak and get stream var speechStream = await speakFunc(); // Get the size of the original stream var size = speechStream.Size; // Create buffer byte[] buffer = new byte[(int)size]; // Get input stream and the size of the original stream using (var inputStream = speechStream.GetInputStreamAt(0)) { // Close the original speech stream to free up memory speechStream.Dispose(); // Create a new data reader off the input stream using (var dataReader = new DataReader(inputStream)) { // Load all bytes into the reader await dataReader.LoadAsync((uint)size); // Copy from reader into buffer dataReader.ReadBytes(buffer); } } // Convert raw WAV data into Unity audio data int sampleCount = 0; int frequency = 0; var unityData = ToUnityAudio(buffer, out sampleCount, out frequency); // The remainder must be done back on Unity's main thread UnityEngine.WSA.Application.InvokeOnAppThread(() => { // Convert to an audio clip var clip = ToClip("Speech", unityData, sampleCount, frequency); // Set the source on the audio clip audioSource.clip = clip; // Play audio audioSource.Play(); speechTextInQueue = false; }, false); }); } catch (Exception ex) { speechTextInQueue = false; Debug.LogErrorFormat("Speech generation problem: \"{0}\"", ex.Message); } } else { Debug.LogErrorFormat("Speech not initialized. \"{0}\"", text); } }
public VoiceViewModel(VoiceInformation voiceInformation) { this._voiceInformation = voiceInformation; }