public static void PromptBuilder() { string SsmlNs = "\"http://schemas.microsoft.com/Speech/2003/03/PromptEngine\""; string SsmlStartOutTag = "<peml:prompt_output xmlns:peml=" + SsmlNs + ">"; string SsmlEndOutTag = "</peml:prompt_output>"; PromptBuilder builder = new PromptBuilder(); builder.AppendText("test"); builder.AppendTextWithPronunciation("foo", "bar"); builder.AppendSsmlMarkup(SsmlStartOutTag); builder.AppendSsmlMarkup("hello"); builder.AppendSsmlMarkup(SsmlEndOutTag); Assert.Contains("hello", builder.ToXml()); Assert.Equal(CultureInfo.CurrentCulture, builder.Culture); Assert.False(builder.IsEmpty); string ssml = builder.ToXml(); builder.AppendSsml(XmlTextReader.Create(new StringReader(ssml))); }
public void SpeechSynthesizerEventsAndProperties() { using (var synth = new SpeechSynthesizer()) { using var ms = new MemoryStream(); synth.SetOutputToNull(); synth.SetOutputToAudioStream(ms, new SpeechAudioFormatInfo(16000, AudioBitsPerSample.Sixteen, AudioChannel.Stereo)); synth.SelectVoiceByHints(VoiceGender.Male, VoiceAge.Adult); Assert.True(synth.Volume > 0); Assert.NotNull(synth.Voice); Assert.NotEmpty(synth.GetInstalledVoices()); Assert.Null(synth.GetCurrentlySpokenPrompt()); var builder = new PromptBuilder(); builder.AppendText("synthesizer"); int events = 0; synth.BookmarkReached += (object o, BookmarkReachedEventArgs e) => events++; synth.PhonemeReached += (object o, PhonemeReachedEventArgs e) => events++; synth.SpeakProgress += (object o, SpeakProgressEventArgs e) => events++; synth.SpeakStarted += (object o, SpeakStartedEventArgs e) => events++; synth.VisemeReached += (object o, VisemeReachedEventArgs e) => events++; synth.VoiceChange += (object o, VoiceChangeEventArgs e) => events++; synth.StateChanged += (object o, System.Speech.Synthesis.StateChangedEventArgs e) => events++; synth.SpeakCompleted += (object o, SpeakCompletedEventArgs e) => { events++; Assert.Equal(34, events++); }; Assert.Equal(SynthesizerState.Ready, synth.State); synth.SpeakSsml(builder.ToXml()); Assert.Equal(SynthesizerState.Ready, synth.State); synth.Pause(); Assert.Equal(SynthesizerState.Paused, synth.State); synth.Resume(); Assert.Equal(SynthesizerState.Ready, synth.State); } }
public override void Fire() { Init(); //be sure to leave this here if (synth == null) { return; } bool paused = true; for (int i = 1; i < na.NeuronCount; i++) { Neuron n = na.GetNeuronAt(i); if (n.Fired()) { if (n.Label.Length == 1) { phraseToSpeak += n.Label; paused = false; } if (n.Synapses.Count == 0) { //if a neuron fired and it has no connection, connect it to the knowledge store //connection to KB ModuleUKSN nmKB = (ModuleUKSN)FindModuleByType(typeof(ModuleUKSN)); if (nmKB != null) { string label = "pn" + n.Label; List <Thing> phonemes = nmKB.Labeled("Phoneme").Children; Thing pn = nmKB.Labeled(label, phonemes); if (pn == null) //this should always be null { pn = nmKB.AddThing(label, new Thing[] { nmKB.Labeled("Phoneme") }, pn); } Neuron n1 = nmKB.GetNeuron(pn); if (n1 != null) { n.AddSynapse(n1.Id, 1); n1.SetValue(1); } } } } } if (phonemesToFire != "") { char c = phonemesToFire[0]; bool fired = false; for (int i = 0; i < na.NeuronCount; i++) { Neuron n = na.GetNeuronAt(i); if (n.Label == c.ToString()) { n.SetValue(1); fired = true; break; } } if (!fired) { Utils.Noop(); } phonemesToFire = phonemesToFire.Substring(1); } if (paused && phraseToSpeak != "") { if (dlg != null) { ((ModuleSpeakPhonemesDlg)dlg).SetLabel(phraseToSpeak); } if (na.GetNeuronAt("Enable").Fired()) { ModuleSpeechIn msi = (ModuleSpeechIn)FindModuleByType(typeof(ModuleSpeechIn)); if (msi != null) { msi.PauseRecognition(); //if there is a recognizer active } //synth.SpeakAsync(phraseToSpeak + "."); //phraseToSpeak = ""; PromptBuilder pb1 = new PromptBuilder(); if (typedIn) { pb1.StartVoice("Microsoft David Desktop"); pb1.StartStyle(new PromptStyle(PromptRate.Medium)); } else { pb1.StartVoice("Microsoft Zira Desktop"); pb1.StartStyle(new PromptStyle(PromptRate.ExtraSlow)); } pb1.AppendTextWithPronunciation("not used", phraseToSpeak); pb1.EndStyle(); pb1.EndVoice(); string x = pb1.ToXml(); Debug.WriteLine(debugString(phraseToSpeak)); //synth.Speak(pb1); synth.SpeakAsync(pb1); } //string heard = GetPronunciationFromText("", phraseToSpeak); //it would be nice to hear what was said but it doesn't work with this engine phraseToSpeak = ""; typedIn = false; } }
public override void Fire() { Init(); //be sure to leave this here if (synth == null) { return; } if (GetNeuronValue("Cancel") == 1) { synth.SpeakAsyncCancelAll(); } if (GetNeuronValue("Validate") == 1) { if (!validating) { hitWords.Clear(); missWords.Clear(); missPhrase.Clear(); hit = 0; miss = 0; } validating = true; } else { if (validating) { if (hit + miss == 0) { Debug.WriteLine("No Validation Data"); } else { Debug.WriteLine("Validation: " + hit + " / " + miss + " = " + 100 * hit / (hit + miss)); Debug.WriteLine("Validation: " + hitWords.Count + " / " + missWords.Count + " = " + 100 * hitWords.Count / (hitWords.Count + missWords.Count)); } } validating = false; } bool paused = true; for (int i = 3; i < na.NeuronCount; i++) { Neuron n = na.GetNeuronAt(i); if (n.Fired()) { if (n.Label.Length == 1) { phraseToSpeak += n.Label; paused = false; } if (n.Synapses.Count == 0) { //connect it to the knowledge store //connection to KB //ModuleUKS2 nmKB = (ModuleUKS2)FindModuleByName("AudibleUKS"); if (FindModuleByName("AudibleUKS") is ModuleUKS2 UKS) { string label = "pn" + n.Label; List <Thing> phonemes = UKS.Labeled("Phoneme").Children; Thing pn = UKS.Labeled(label, phonemes); if (pn == null) //this should always be null { pn = UKS.AddThing(label, new Thing[] { UKS.Labeled("Phoneme") }, pn); } Neuron n1 = UKS.GetNeuron(pn); Neuron n2 = UKS.GetNeuron(pn, false); if (n1 != null) { n.AddSynapse(n1.Id, 1); n1.SetValue(1); n2.AddSynapse(n.Id, 1); } } } } } if (phonemesToFire != "") { char c = phonemesToFire[0]; bool fired = false; if (c != ' ') { for (int i = 0; i < na.NeuronCount; i++) { Neuron n = na.GetNeuronAt(i); if (n.Label == c.ToString()) { n.SetValue(1); fired = true; break; } } if (!fired) { Neuron n = AddLabel(c.ToString()); //connect it to the knowledge store //connection to KB //ModuleUKS2 nmKB = (ModuleUKS2)FindModuleByName("AudibleUKS"); if (FindModuleByName("AudibleUKS") is ModuleUKS2 UKS) { string label = "pn" + n.Label; List <Thing> phonemes = UKS.Labeled("Phoneme").Children; Thing pn = UKS.Labeled(label, phonemes); if (pn == null) //this should always be null { pn = UKS.AddThing(label, new Thing[] { UKS.Labeled("Phoneme") }, pn); } Neuron n1 = UKS.GetNeuron(pn); Neuron n2 = UKS.GetNeuron(pn, false); if (n1 != null) { n.AddSynapse(n1.Id, 1); n2.AddSynapse(n.Id, 1); n.SetValue(1); } } } } phonemesToFire = phonemesToFire.Substring(1); } if (paused && phraseToSpeak != "") { if (dlg != null) { ((ModuleSpeakPhonemes2Dlg)dlg).SetLabel(phraseToSpeak); } if (na.GetNeuronAt("Enable").Fired()) { ModuleSpeechIn msi = (ModuleSpeechIn)FindModuleByType(typeof(ModuleSpeechIn)); if (msi != null) { msi.PauseRecognition(); //if there is a recognizer active } //synth.SpeakAsync(phraseToSpeak + "."); //phraseToSpeak = ""; PromptBuilder pb1 = new PromptBuilder(); if (typedIn) { pb1.StartVoice("Microsoft David Desktop"); pb1.StartStyle(new PromptStyle(PromptRate.Medium)); } else { pb1.StartVoice("Microsoft Zira Desktop"); pb1.StartStyle(new PromptStyle(PromptRate.Slow)); } pb1.AppendTextWithPronunciation("not used", phraseToSpeak.Trim()); pb1.EndStyle(); pb1.EndVoice(); string x = pb1.ToXml(); Debug.WriteLine(debugString(phraseToSpeak)); //synth.Speak(pb1); synth.SpeakAsync(pb1); } //string heard = GetPronunciationFromText("", phraseToSpeak); //it would be nice to hear what was said but it doesn't work with this engine phraseToSpeak = ""; typedIn = false; } }
/// <summary> /// Start speaking the supplied text, or cancel the in-progress speech /// </summary> public void SpeakSsml(IEnumerable <string> ssmls, Action onComplete, int?volume = null, int?rate = null, string voice = null) { Log.Info("SpeakSsml called"); try { if (!ssmls.Any()) { return; } speechSynthesiser.Rate = rate ?? Settings.Default.SpeechRate; speechSynthesiser.Volume = volume ?? Settings.Default.SpeechVolume; speakCompleted = (sender, args) => { if (speakCompleted != null) { speechSynthesiser.SpeakCompleted -= speakCompleted; } speakCompleted = null; if (onComplete != null) { onComplete(); } }; speechSynthesiser.SpeakCompleted += speakCompleted; var pb = new PromptBuilder(); string voiceToUse = voice ?? Settings.Default.SpeechVoice; if (!string.IsNullOrWhiteSpace(voiceToUse)) { pb.AppendSsmlMarkup(string.Format("<voice name=\"{0}\">", voiceToUse)); } foreach (var ssml in ssmls) { pb.AppendSsmlMarkup(ssml); } if (!string.IsNullOrWhiteSpace(voiceToUse)) { pb.AppendSsmlMarkup("</voice>"); } Log.InfoFormat("Speaking prompt '{0}' with volume '{1}', rate '{2}' and voice '{3}'", pb.ToXml(), volume, rate, voice); speechSynthesiser.SpeakAsync(pb); } catch (Exception exception) { PublishError(this, exception); } }