Beispiel #1
0
        void ExcelRead()
        {
            synthesizer.Volume = 80;
            Invoke(new Action(() =>
            {
                synthesizer.SelectVoice(listBox1.SelectedItem.ToString());
            }));

            for (int i = 1; i >= 0; i++)
            {
                Excel.Range B = ex.get_Range("B" + Convert.ToString(i), Missing.Value);
                Excel.Range C = ex.get_Range("C" + Convert.ToString(i), Missing.Value);
                if (B.Text != "")
                {
                    WAVLocationDialog.FileName = B.Text + ".wav";
                    synthesizer.SetOutputToWaveFile(WAVLocationDialog.FileName, new SpeechAudioFormatInfo(11025, AudioBitsPerSample.Eight, AudioChannel.Mono));
                    synthesizer.Speak(C.Text);
                }
                else
                {
                    ex.Workbooks.Close();
                    Environment.Exit(0);
                }
            }
        }
Beispiel #2
0
 public static async System.Threading.Tasks.Task <Byte[]> TextToSpeechServiceAsync(string text, System.Collections.Generic.Dictionary <string, string> apiArgs)
 {
     Log.WriteLine("text:\"" + text + "\"");
     //System.Globalization.CultureInfo ci = new System.Globalization.CultureInfo(Options.options.locale.language);
     using (System.Speech.Synthesis.SpeechSynthesizer synth = new System.Speech.Synthesis.SpeechSynthesizer())
     {
         // Explicitly specify audio settings. All services are ok with 16000/16/1. It's ok to cast options to enums as their values are identical.
         int sampleRate = int.Parse(apiArgs["sampleRate"]);
         System.Speech.AudioFormat.SpeechAudioFormatInfo si = new System.Speech.AudioFormat.SpeechAudioFormatInfo(sampleRate, (System.Speech.AudioFormat.AudioBitsPerSample)WoundifyShared.Options.options.audio.bitDepth, (System.Speech.AudioFormat.AudioChannel)WoundifyShared.Options.options.audio.channels);
         // TODO: use memory based file instead
         synth.SetOutputToWaveFile(Options.options.tempFolderPath + Options.options.audio.speechSynthesisFileName, si);
         synth.SelectVoiceByHints((System.Speech.Synthesis.VoiceGender)Options.commandservices["TextToSpeech"].voiceGender, (System.Speech.Synthesis.VoiceAge)Options.commandservices["TextToSpeech"].voiceAge);
         synth.Speak(text);
     }
     return(await Helpers.ReadBytesFromFileAsync(Options.options.audio.speechSynthesisFileName));
 }
Beispiel #3
0
        private static string generateSpeech(string userid, string condition, string pathResponseFile, string path)
        {
            // *********  generate wav file voicing the response *****************
            // Using Microsoft voices

            // initiate new instance of speech synthesizer
            // needs own separate dedicated thread...

            string response      = readResponse(pathResponseFile);
            string audioFilePath = "";

            SQLLog.InsertLog(DateTime.Now, "Beginning Thread", "Beginning Thread", "ResponseGeneration.moveSpeak", 1);
            Thread t = new Thread(() =>
            {
                try
                {
                    System.Speech.Synthesis.SpeechSynthesizer synth = new System.Speech.Synthesis.SpeechSynthesizer();
                    string gender = SQLConditionGenderInfo.GetGender(userid);
                    bool makecopy = false;

                    if (synth != null)
                    {
                        if (gender.Contains("female"))
                        {
                            synth.SelectVoice("Microsoft Zira Desktop");
                        }
                        else
                        {
                            synth.SelectVoice("Microsoft David Desktop");
                        }

                        if (condition == "nonsocial" || condition == "social")
                        {
                            synth.SetOutputToWaveFile(path + "data\\agentAudio\\transformed.wav");
                            audioFilePath = "transformed.wav";
                            makecopy      = true;
                        }
                        else
                        {
                            synth.SetOutputToWaveFile(path + "data\\agentAudio\\response.wav");
                            audioFilePath = "response.wav";
                        }

                        synth.Speak(response);

                        if (makecopy)
                        {
                            string formatFileName = string.Format("data\\agentAudio\\transformed_{0:yyyy-MM-dd_hh-mm-ss-tt}.wav", DateTime.Now);
                            audioFilePath         = string.Format("transformed_{0:yyyy-MM-dd_hh-mm-ss-tt}.wav", DateTime.Now);                            synth.SetOutputToWaveFile(path + formatFileName);
                            synth.Speak(response);
                        }

                        synth.SetOutputToNull();
                    }
                }
                catch (Exception e)
                {
                    SQLLog.InsertLog(DateTime.Now, "Something went wrong in generating speech", "", "ResponseGeneration.generateSpeech", 1);
                }
            });

            t.Start();
            t.Join();

            Thread.Sleep(1000);

            return(audioFilePath);
        }
Beispiel #4
0
        private bool SynthesizeAudio(string outputFile, bool isSsml, string text)
        {
            if (this.synthesizerType == "Windows")
            {
                using var synth = new System.Speech.Synthesis.SpeechSynthesizer();

                try {
                    synth.InjectOneCoreVoices();
                }
                catch {
                }

                synth.Rate   = rate;
                synth.Volume = volume;

                synth.SetOutputToWaveFile(outputFile);

                finished = false;
                failed   = false;

                synth.SpeakCompleted += OnSpeakCompleted;

                if (isSsml)
                {
                    synth.SpeakSsmlAsync(text);
                }
                else
                {
                    synth.SpeakAsync(text);
                }

                while (!finished)
                {
                    Thread.Sleep(100);
                }

                return(true);
            }
            else
            {
                RenewToken();

                using var audioConfig = AudioConfig.FromWavFileOutput(outputFile);
                using var synthesizer = new Microsoft.CognitiveServices.Speech.SpeechSynthesizer(config, audioConfig);

                finished = false;
                failed   = false;

                synthesizer.SynthesisCanceled  += OnSynthesisCanceled;
                synthesizer.SynthesisCompleted += OnSynthesisCompleted;

                if (isSsml)
                {
                    synthesizer.SpeakSsmlAsync(text);
                }
                else
                {
                    synthesizer.SpeakTextAsync(text);
                }

                while (!finished)
                {
                    Thread.Sleep(100);
                }

                return(!failed);
            }
        }