public override void Speak(SpeechClient.Speech speech)
        {
            try
            {
                PromptBuilder p = new PromptBuilder();
                p.Culture = tts.Voice.Culture;
                p.StartVoice(p.Culture);
                p.StartSentence();

                p.StartStyle(new PromptStyle(PromptEmphasis.None));
                for (int i = 0; i < speech.Text.Length; i++)
                {
                    if (speech.Bookmarks == null || speech.Bookmarks.Length < i + 1 || speech.Bookmarks[i]=="")
                    {
                        string s = "";
                        for (; i < speech.Text.Length; i++) s += speech.Text[i] + " ";
                        p.AppendSsmlMarkup(s);
                        break;
                    }
                    else
                    {
                        p.AppendSsmlMarkup(speech.Text[i]);
                        p.AppendBookmark(speech.Bookmarks[i]);
                    }
                }
                p.EndStyle();
                p.EndSentence();
                p.EndVoice();
                currentSpeech = speech;
                if (speech.Id != "") ids.Add(tts.SpeakAsync(p), speech.Id);
                else tts.SpeakAsync(p);
                
            }
            catch (Exception e)
            {
                Console.WriteLine("WindowsTTS Failed: " + e.Message);
            }
        }
示例#2
0
        private static void SynthToCam(string text, CameraWindow cw)
        {
            var synthFormat = new System.Speech.AudioFormat.SpeechAudioFormatInfo(System.Speech.AudioFormat.EncodingFormat.Pcm, 11025, 16, 1, 22100, 2, null);
            using (var synthesizer = new SpeechSynthesizer())
            {
                using (var waveStream = new MemoryStream())
                {

                    //write some silence to the stream to allow camera to initialise properly
                    var silence = new byte[1 * 22050];
                    waveStream.Write(silence, 0, silence.Length);

                    var pbuilder = new PromptBuilder();
                    var pStyle = new PromptStyle
                    {
                        Emphasis = PromptEmphasis.Strong,
                        Rate = PromptRate.Slow,
                        Volume = PromptVolume.ExtraLoud
                    };

                    pbuilder.StartStyle(pStyle);
                    pbuilder.StartParagraph();
                    pbuilder.StartVoice(VoiceGender.Male, VoiceAge.Adult, 2);
                    pbuilder.StartSentence();
                    pbuilder.AppendText(text);
                    pbuilder.EndSentence();
                    pbuilder.EndVoice();
                    pbuilder.EndParagraph();
                    pbuilder.EndStyle();

                    synthesizer.SetOutputToAudioStream(waveStream, synthFormat);
                    synthesizer.Speak(pbuilder);
                    synthesizer.SetOutputToNull();

                    //write some silence to the stream to allow camera to end properly
                    waveStream.Write(silence, 0, silence.Length);
                    waveStream.Seek(0, SeekOrigin.Begin);

                    var ds = new DirectStream(waveStream) { RecordingFormat = new WaveFormat(11025, 16, 1) };
                    var talkTarget = TalkHelper.GetTalkTarget(cw.Camobject, ds); 
                    ds.Start();
                    talkTarget.Start();
                    while (ds.IsRunning)
                    {
                        Thread.Sleep(100);
                    }
                    ds.Stop();
                    talkTarget.Stop();
                    talkTarget = null;
                    ds = null;
                }
            }


        }
示例#3
0
文件: Speech.cs 项目: matti16/SOTI
        public void Say(string text, int volume, int rate)
        {
            //foreach (InstalledVoice voice in sp.GetInstalledVoices())
            //{
            //    VoiceInfo info = voice.VoiceInfo;

            //    Console.WriteLine(" Name:          " + info.Name);
            //    Console.WriteLine(" Culture:       " + info.Culture);
            //    Console.WriteLine(" Age:           " + info.Age);
            //    Console.WriteLine(" Gender:        " + info.Gender);
            //    Console.WriteLine(" Description:   " + info.Description);
            //    Console.WriteLine(" ID:            " + info.Id);
            //}

            if (volume >= 0 && volume <= 100)
                sp.Volume = volume;
            else
                sp.Volume = 100;

            // rappresenta la velocità di lettura
            if (rate >= -10 && rate <= 10)
                sp.Rate = rate;
            else
                sp.Rate = 0;

            //CultureInfo culture = CultureInfo.CreateSpecificCulture("it-IT");
            //spSynth.SelectVoiceByHints(VoiceGender.Male, VoiceAge.Teen, 0, culture);
            //spSynth.SelectVoice("ScanSoft Silvia_Dri40_16kHz");
            //spSynth.SelectVoice("Microsoft Elsa Desktop");
            //spSynth.SelectVoice("Paola");
            //spSynth.SelectVoice("Luca");
            //spSynth.SelectVoice("Roberto");

            PromptBuilder builder = new PromptBuilder();

            builder.StartVoice("Luca");
            builder.StartSentence();

            builder.StartStyle(new PromptStyle() { Emphasis = PromptEmphasis.Strong, Rate = PromptRate.Medium });
            string high = "<prosody pitch=\"x-high\"> " + text + " </prosody >";
            builder.AppendSsmlMarkup(high);
            builder.EndStyle();
            builder.EndSentence();
            builder.EndVoice();

            // Asynchronous
            sp.SpeakAsync(builder);
        }
示例#4
0
        private async Task DoSay(string actualPhrase)
        {
            var bitchName = _settingAgent.GetSetting("BitchName", Constants.DefaultBitchName);
            var newGuid   = Guid.NewGuid();

            _eventHub.InvokeStartTalkingEvent(newGuid, bitchName, actualPhrase);

            var task = new Task(() =>
            {
                var builder = new PromptBuilder();
                builder.StartSentence();
                builder.AppendText(actualPhrase);
                builder.EndSentence();

                using (var synthesizer = new SpeechSynthesizer())
                {
                    var voices = synthesizer.GetInstalledVoices();
                    var voice  = voices.LastOrDefault(x => x.VoiceInfo.Gender == VoiceGender.Female);
                    if (voice == null)
                    {
                        voice = voices.FirstOrDefault();
                    }
                    if (voice == null)
                    {
                        throw new InvalidOperationException("Cannot find any installed voices.");
                    }

                    //synthesizer.SelectVoice("Microsoft David Desktop");
                    //synthesizer.SelectVoice("Microsoft Hazel Desktop");
                    //synthesizer.SelectVoice("Microsoft Zira Desktop");

                    synthesizer.SelectVoice(voice.VoiceInfo.Name);
                    synthesizer.Speak(builder);
                }
            });

            task.Start();
            await task;

            _eventHub.InvokeDoneTalkingEvent(newGuid);
        }
示例#5
0
        void recEngine_SpeachSpeechRecognized(object sender, SpeechRecognizedEventArgs e)
        {
            switch (e.Result.Text)
            {
                case "say hello":
                    //MessageBox.Show("Hello Denis. How are you?"); break;

                    PromptBuilder promtBuilder = new PromptBuilder();
                    promtBuilder.StartSentence();
                    promtBuilder.AppendText("Hello Denis");
                    promtBuilder.EndSentence();

                    promtBuilder.AppendBreak(PromptBreak.ExtraSmall);
                    promtBuilder.AppendText("How are you?");

                    syncSpeechSynthesizer.SpeakAsync("Hello Denis. How are you?"); break;
                case "print my name":
                    richTextBox1.Text += "\nDenis"; break;
                case "speak selected text":
                    syncSpeechSynthesizer.SpeakAsync(richTextBox1.SelectedText); break;
            }
        }
示例#6
0
        /// <summary>
        /// Synthesize more elaborated speeches using prompts
        /// </summary>
        /// <param name="sentences"></param>
        // <Improvement> Incomplete implementation
        // Automatically determine proper output generation so Core doesn't need to bother because anyway Voice facility isn't part of Core
        // Utilize PromptRate, PromptVolume, PromptEmphasis, Voice, and Pause(break) when appropriate, according to sentence structure, tone, phrase type (Content type, SayAa()) and other information
        public void BuildSpeech(List <Tuple <string, SpeechTone> > sentences)
        {
            PromptBuilder builder = new PromptBuilder();

            builder.StartVoice(VoiceGender.Female, VoiceAge.Adult);

            foreach (Tuple <string, SpeechTone> sentence in sentences)
            {
                builder.StartSentence();
                switch (sentence.Item2)
                {
                case SpeechTone.Normal:
                    builder.AppendText(sentence.Item1);
                    break;

                case SpeechTone.Joyous:
                    builder.AppendText(sentence.Item1);
                    break;

                case SpeechTone.Naughty:
                    builder.AppendText(sentence.Item1);
                    break;

                case SpeechTone.Soft:
                    builder.StartStyle(new PromptStyle(PromptVolume.ExtraSoft));
                    builder.AppendText(sentence.Item1);
                    builder.EndStyle();
                    break;

                default:
                    break;
                }
                builder.EndSentence();
            }

            builder.EndVoice();

            SpeechSynthesizer.Speak(builder);
        }
示例#7
0
        /// <summary>
        /// Speichert die Stimme mit START und STOP als WAVE ton. BETA
        /// </summary>
        public void SaveSay1(String sText, String VoiceName, String StartSound, String StopSound, int iVolume, int iRate)
        {
            var synthFormat      = new SpeechAudioFormatInfo(8000, AudioBitsPerSample.Sixteen, AudioChannel.Mono);
            var synthesizer      = new SpeechSynthesizer();
            var waveStream       = new MemoryStream();
            var waveFileStream   = new FileStream(@".\\mywave.wav", FileMode.OpenOrCreate);
            var pbuilder         = new PromptBuilder();
            var pStyle           = new PromptStyle();
            var aSaveFileDialog1 = new SaveFileDialog();

            //---
            pStyle.Emphasis = PromptEmphasis.None;
            pStyle.Rate     = PromptRate.Fast;
            pStyle.Volume   = PromptVolume.ExtraLoud;
            pbuilder.StartStyle(pStyle);
            pbuilder.StartParagraph();
            pbuilder.StartVoice(VoiceGender.Male, VoiceAge.Teen, 2);
            pbuilder.StartSentence();
            pbuilder.AppendText("This is some text.");
            pbuilder.EndSentence();
            pbuilder.EndVoice();
            pbuilder.EndParagraph();
            pbuilder.EndStyle();
            synthesizer.SetOutputToAudioStream(waveStream, synthFormat);
            synthesizer.Speak(pbuilder);
            synthesizer.SetOutputToNull();
            waveStream.WriteTo(waveFileStream);
            waveFileStream.Close();

            /*
             * aSaveFileDialog1.Filter = "wave files (*.wav)|*.wav";
             * aSaveFileDialog1.DefaultExt = "*.wav";
             * aSaveFileDialog1.Title = "Stimme als Wave speichern";
             * aSaveFileDialog1.FileName = "";
             * aSaveFileDialog1.ShowDialog();
             */
        }
        ////////////////////////////////////////////////////////////////////////////////////////////////////
        /// <summary>   Process the speech request message described by msg. </summary>
        ///
        /// <param name="msg">  The message. </param>
        ///
        /// <returns>   True if it succeeds, false if it fails. </returns>
        ////////////////////////////////////////////////////////////////////////////////////////////////////

        bool ProcessSpeechRequestMessage(SpeechRequestMessage msg)
        {
            WriteLineInColor("Received Speech Bot Request", ConsoleColor.Red);
            WriteLineInColor("Text to speak: " + msg.text, ConsoleColor.Yellow);

            voice?.SelectVoiceByHints(msg.maleSpeaker == 1 ? VoiceGender.Male : VoiceGender.Female);

            Ensure.Argument(msg.volume).GreaterThanOrEqualTo(0);
            Ensure.Argument(msg.volume).LessThanOrEqualTo(100);
            Ensure.Argument(msg.rate).GreaterThanOrEqualTo(-10);
            Ensure.Argument(msg.rate).LessThanOrEqualTo(10);

            voice.Volume = msg.volume;
            voice.Rate   = msg.rate;

            PromptBuilder builder = new PromptBuilder();

            builder.ClearContent();
            builder.StartSentence();
            builder.AppendText(msg?.text);
            builder.EndSentence();
            voice.SpeakAsync(builder);
            return(true);
        }
示例#9
0
        void recEngine_SpeachSpeechRecognized(object sender, SpeechRecognizedEventArgs e)
        {
            switch (e.Result.Text)
            {
            case "say hello":
                //MessageBox.Show("Hello Denis. How are you?"); break;

                PromptBuilder promtBuilder = new PromptBuilder();
                promtBuilder.StartSentence();
                promtBuilder.AppendText("Hello Denis");
                promtBuilder.EndSentence();

                promtBuilder.AppendBreak(PromptBreak.ExtraSmall);
                promtBuilder.AppendText("How are you?");

                syncSpeechSynthesizer.SpeakAsync("Hello Denis. How are you?"); break;

            case "print my name":
                richTextBox1.Text += "\nDenis"; break;

            case "speak selected text":
                syncSpeechSynthesizer.SpeakAsync(richTextBox1.SelectedText); break;
            }
        }
示例#10
0
        /// <summary>
        /// Plays the _Text has a voice synthesizer
        /// </summary>
        public static void SpeakText(string _Text, VoiceInfo _Voice = null)
        {
            // Speech synthesizer is ready
            if (IsReady)
            {
                PromptBuilder builder = new PromptBuilder();

                // Get default voice if not specified
                if (_Voice == null)
                {
                    _Voice = SynthesizerVoice;
                }

                // Speech
                builder.StartVoice(_Voice);
                builder.StartSentence();
                builder.AppendText(_Text);
                builder.EndSentence();
                builder.EndVoice();

                // Speak
                SpeechSynthesizer.SpeakAsync(builder);
            }
        }
示例#11
0
        public HttpResponseMessage GetSpeechFromText([FromBody] JObject requestBody)
        {
            string textInput = (string)requestBody["TextInput"];

            if (string.IsNullOrEmpty(textInput))
            {
                return(this.Request.CreateResponse(HttpStatusCode.BadRequest, new { Success = false, Errors = new string[] { "Parameter 'TextInput' is required" } }));
            }

            var speechAudioFormatConfig = new SpeechAudioFormatInfo(samplesPerSecond: 8000, bitsPerSample: AudioBitsPerSample.Sixteen, channel: AudioChannel.Stereo);
            var waveFormat = new WaveFormat(speechAudioFormatConfig.SamplesPerSecond, speechAudioFormatConfig.BitsPerSample, speechAudioFormatConfig.ChannelCount);
            var prompt     = new PromptBuilder
            {
                Culture = CultureInfo.CreateSpecificCulture("en-US")
            };

            prompt.StartVoice(prompt.Culture);
            prompt.StartSentence();
            prompt.StartStyle(new PromptStyle()
            {
                Emphasis = PromptEmphasis.Reduced,
                Rate     = PromptRate.Slow
            });
            prompt.AppendText(textInput);
            prompt.EndStyle();
            prompt.EndSentence();
            prompt.EndVoice();

            var mp3Stream = new MemoryStream();

            byte[] audioOutputBytes;
            string audioOutputAsString = null;

            using (var synthWaveMemoryStream = new MemoryStream())
            {
                var resetEvent = new ManualResetEvent(false);
                ThreadPool.QueueUserWorkItem(arg =>
                {
                    try
                    {
                        var siteSpeechSynth = new SpeechSynthesizer();
                        siteSpeechSynth.SetOutputToAudioStream(synthWaveMemoryStream, speechAudioFormatConfig);
                        siteSpeechSynth.Speak(prompt);
                    }
                    finally
                    {
                        resetEvent.Set();
                    }
                });
                WaitHandle.WaitAll(new WaitHandle[] { resetEvent });
                var bitRate = (speechAudioFormatConfig.AverageBytesPerSecond * 8);

                synthWaveMemoryStream.Position = 0;

                using (var mp3FileWriter = new LameMP3FileWriter(outStream: mp3Stream, format: waveFormat, bitRate: bitRate))
                {
                    synthWaveMemoryStream.CopyTo(mp3FileWriter);
                }

                audioOutputBytes    = mp3Stream.ToArray();
                audioOutputAsString = Convert.ToBase64String(audioOutputBytes);
            }

            return(this.Request.CreateResponse(HttpStatusCode.OK, new { Success = true, Data = audioOutputAsString }));
        }
        public FileResult TextToMp3(string text)
        {
            //Primary memory stream for storing mp3 audio
            var mp3Stream = new MemoryStream();
            //Speech format
            var speechAudioFormatConfig = new SpeechAudioFormatInfo
                                              (samplesPerSecond: 8000, bitsPerSample: AudioBitsPerSample.Sixteen,
                                              channel: AudioChannel.Stereo);
            //Naudio's wave format used for mp3 conversion.
            //Mirror configuration of speech config.
            var waveFormat = new WaveFormat(speechAudioFormatConfig.SamplesPerSecond,
                                            speechAudioFormatConfig.BitsPerSample, speechAudioFormatConfig.ChannelCount);

            try
            {
                //Build a voice prompt to have the voice talk slower
                //and with an emphasis on words
                var prompt = new PromptBuilder
                {
                    Culture = CultureInfo.CreateSpecificCulture("en-US")
                };
                prompt.StartVoice(prompt.Culture);
                prompt.StartSentence();
                prompt.StartStyle(new PromptStyle()
                {
                    Emphasis = PromptEmphasis.Reduced, Rate = PromptRate.Slow
                });
                prompt.AppendText(text);
                prompt.EndStyle();
                prompt.EndSentence();
                prompt.EndVoice();

                //Wav stream output of converted text to speech
                using (var synthWavMs = new MemoryStream())
                {
                    //Spin off a new thread that's safe for an ASP.NET application pool.
                    var resetEvent = new ManualResetEvent(false);
                    ThreadPool.QueueUserWorkItem(arg =>
                    {
                        try
                        {
                            //initialize a voice with standard settings
                            var siteSpeechSynth = new SpeechSynthesizer();
                            //Set memory stream and audio format to speech synthesizer
                            siteSpeechSynth.SetOutputToAudioStream
                                (synthWavMs, speechAudioFormatConfig);
                            //build a speech prompt
                            siteSpeechSynth.Speak(prompt);
                        }
                        catch (Exception ex)
                        {
                            //This is here to diagnostic any issues with the conversion process.
                            //It can be removed after testing.
                            Response.AddHeader
                                ("EXCEPTION", ex.GetBaseException().ToString());
                        }
                        finally
                        {
                            resetEvent.Set();//end of thread
                        }
                    });
                    //Wait until thread catches up with us
                    WaitHandle.WaitAll(new WaitHandle[] { resetEvent });
                    //Estimated bitrate
                    var bitRate = (speechAudioFormatConfig.AverageBytesPerSecond * 8);
                    //Set at starting position
                    synthWavMs.Position = 0;
                    //Be sure to have a bin folder with lame dll files in there.
                    //They also need to be loaded on application start up via Global.asax file
                    using (var mp3FileWriter = new LameMP3FileWriter
                                                   (outStream: mp3Stream, format: waveFormat, bitRate: bitRate))
                        synthWavMs.CopyTo(mp3FileWriter);
                }
            }
            catch (Exception ex)
            {
                Response.AddHeader("EXCEPTION", ex.GetBaseException().ToString());
            }
            finally
            {
                //Set no cache on this file
                Response.Cache.SetExpires(DateTime.UtcNow.AddMinutes(-1));
                Response.Cache.SetCacheability(HttpCacheability.NoCache);
                Response.Cache.SetNoStore();
                //required for chrome and safari
                Response.AppendHeader("Accept-Ranges", "bytes");
                //Write the byte length of mp3 to the client
                Response.AddHeader("Content-Length",
                                   mp3Stream.Length.ToString(CultureInfo.InvariantCulture));
            }
            //return the converted wav to mp3 stream to a byte array for a file download
            return(File(mp3Stream.ToArray(), "audio/mp3"));
        }
        private void _recognizeSpeechAndWriteToConsole_SpeechRecognized_Main1(object sender, SpeechRecognizedEventArgs e)
        {
            string itemContent = string.Format(
                CultureInfo.CurrentCulture,
                "Item Content: {0}\n\n{0}\n\n{0}\n\n{0}\n\n{0}\n\n{0}\n\n{0}",
                "Curabitur class aliquam vestibulum nam curae maecenas sed integer cras phasellus suspendisse quisque donec dis praesent accumsan bibendum pellentesque condimentum adipiscing etiam consequat vivamus dictumst aliquam duis convallis scelerisque est parturient ullamcorper aliquet fusce suspendisse nunc hac eleifend amet blandit facilisi condimentum commodo scelerisque faucibus aenean ullamcorper ante mauris dignissim consectetuer nullam lorem vestibulum habitant conubia elementum pellentesque morbi facilisis arcu sollicitudin diam cubilia aptent vestibulum auctor eget dapibus pellentesque inceptos leo egestas interdum nulla consectetuer suspendisse adipiscing pellentesque proin lobortis sollicitudin augue elit mus congue fermentum parturient fringilla euismod feugiat");

            //MessageBox.Show("DATA IS:" + e.Result.Text);
            PromptBuilder builder1 = new PromptBuilder();

            QNS.Text = e.Result.Text;
            if (e.Result.Text == "MUSIC PLAYER" && main_flag == 1)
            {
                SampleDataItem sampleDataItem = new SampleDataItem(
                    "Group-1-Item-5",
                    "MUSIC PLAYER",
                    string.Empty,
                    null,
                    "MUSIC PLAYER",
                    itemContent,
                    null,
                    typeof(Window1));
                main_flag = 0;
                if (sampleDataItem != null && sampleDataItem.NavigationPage != null)
                {
                    backButton.Visibility    = System.Windows.Visibility.Visible;
                    navigationRegion.Content = Activator.CreateInstance(sampleDataItem.NavigationPage);
                }
            }
            else if (e.Result.Text == "TASK" && main_flag == 0)
            {
                SampleDataItem sampleDataItem = new SampleDataItem(
                    "Group-1-Item-2",
                    "REMINDER",
                    string.Empty,
                    null,
                    "CheckBox and RadioButton controls",
                    itemContent,
                    null,
                    typeof(Page1));
                main_flag = 0;
                if (sampleDataItem != null && sampleDataItem.NavigationPage != null)
                {
                    backButton.Visibility    = System.Windows.Visibility.Visible;
                    navigationRegion.Content = Activator.CreateInstance(sampleDataItem.NavigationPage);
                }
            }
            else if (e.Result.Text == "HELLO")
            {
                builder1.StartSentence();
                // builder1.AppendText("Hello sir ...");
                builder1.EndSentence();
                SpeechSynthesizer synthesizer = new SpeechSynthesizer();
                synthesizer.Speak(builder1);
                synthesizer.Dispose();
            }
            else if (e.Result.Text == "HOME AUTOMATION" && main_flag == 0)
            {
                SampleDataItem sampleDataItem = new SampleDataItem(
                    "Group-1-Item-1",
                    "HOME AUTOMATION",
                    string.Empty,
                    null,
                    "Several types of buttons with custom styles",
                    itemContent,
                    null,
                    typeof(ButtonSample));
                main_flag = 0;
                if (sampleDataItem != null && sampleDataItem.NavigationPage != null)
                {
                    backButton.Visibility    = System.Windows.Visibility.Visible;
                    navigationRegion.Content = Activator.CreateInstance(sampleDataItem.NavigationPage);
                }
            }

            else if (e.Result.Text == "OPEN YOUTUBE" && main_flag == 0)
            {
                main_flag = 0;
                Process.Start("chrome.exe", "http:\\www.YouTube.com");
            }
        }
示例#14
0
        private static void SynthToCam(string text, CameraWindow cw)
        {
            var synthFormat = new System.Speech.AudioFormat.SpeechAudioFormatInfo(System.Speech.AudioFormat.EncodingFormat.Pcm, 11025, 16, 1, 22100, 2, null);

            using (var synthesizer = new SpeechSynthesizer())
            {
                using (var waveStream = new MemoryStream())
                {
                    //write some silence to the stream to allow camera to initialise properly
                    var silence = new byte[1 * 22050];
                    waveStream.Write(silence, 0, silence.Count());

                    var pbuilder = new PromptBuilder();
                    var pStyle   = new PromptStyle
                    {
                        Emphasis = PromptEmphasis.Strong,
                        Rate     = PromptRate.Slow,
                        Volume   = PromptVolume.ExtraLoud
                    };

                    pbuilder.StartStyle(pStyle);
                    pbuilder.StartParagraph();
                    pbuilder.StartVoice(VoiceGender.Male, VoiceAge.Adult, 2);
                    pbuilder.StartSentence();
                    pbuilder.AppendText(text);
                    pbuilder.EndSentence();
                    pbuilder.EndVoice();
                    pbuilder.EndParagraph();
                    pbuilder.EndStyle();

                    synthesizer.SetOutputToAudioStream(waveStream, synthFormat);
                    synthesizer.Speak(pbuilder);
                    synthesizer.SetOutputToNull();

                    //write some silence to the stream to allow camera to end properly
                    waveStream.Write(silence, 0, silence.Count());

                    waveStream.Seek(0, SeekOrigin.Begin);

                    ITalkTarget talkTarget = null;

                    var ds = new DirectStream(waveStream)
                    {
                        RecordingFormat = new WaveFormat(11025, 16, 1)
                    };
                    switch (cw.Camobject.settings.audiomodel)
                    {
                    case "Foscam":
                        ds.Interval   = 40;
                        ds.PacketSize = 882;     // (40ms packet at 22050 bytes per second)
                        talkTarget    = new TalkFoscam(cw.Camobject.settings.audioip, cw.Camobject.settings.audioport,
                                                       cw.Camobject.settings.audiousername,
                                                       cw.Camobject.settings.audiopassword, ds);
                        break;

                    case "NetworkKinect":
                        ds.Interval   = 40;
                        ds.PacketSize = 882;
                        talkTarget    = new TalkNetworkKinect(cw.Camobject.settings.audioip, cw.Camobject.settings.audioport, ds);
                        break;

                    case "iSpyServer":
                        ds.Interval   = 40;
                        ds.PacketSize = 882;
                        talkTarget    = new TalkiSpyServer(cw.Camobject.settings.audioip,
                                                           cw.Camobject.settings.audioport,
                                                           ds);
                        break;

                    case "Axis":
                        talkTarget = new TalkAxis(cw.Camobject.settings.audioip, cw.Camobject.settings.audioport,
                                                  cw.Camobject.settings.audiousername,
                                                  cw.Camobject.settings.audiopassword, ds);
                        break;

                    default:
                        //local playback
                        talkTarget = new TalkLocal(ds);

                        break;
                    }
                    ds.Start();
                    talkTarget.Start();
                    while (ds.IsRunning)
                    {
                        Thread.Sleep(100);
                    }
                    ds.Stop();
                    if (talkTarget != null)
                    {
                        talkTarget.Stop();
                    }
                    talkTarget = null;
                    ds         = null;

                    waveStream.Close();
                }
            }
        }