private static void Main(string[] args) { OpenFileDialog openFileDialog = new OpenFileDialog() { Title = "Select a file to play", Filter = CodecFactory.SupportedFilesFilterEn }; if (openFileDialog.ShowDialog() != DialogResult.OK) return; using (var soundOut = new WasapiOut() { Latency = 50 }) { using (var fadeInOut = CodecFactory.Instance.GetCodec(openFileDialog.FileName) .ToSampleSource() .AppendSource(x => new FadeInOut(x))) { var linearFadeStrategy = new LinearFadeStrategy(); var eventHandle = new AutoResetEvent(false); linearFadeStrategy.FadingFinished += (s, e) => eventHandle.Set(); fadeInOut.FadeStrategy = linearFadeStrategy; soundOut.Initialize(fadeInOut.ToWaveSource()); soundOut.Play(); while (true) { Console.Write("Enter the target volume: "); float to; if (!Single.TryParse(Console.ReadLine(), out to) || to > 1 || to < 0) { Console.WriteLine("Invalid value."); continue; } linearFadeStrategy.StartFading(0.3f, to, 5000); //fade from the current volume to the entered volume over a duration of 3000ms do { ClearCurrentConsoleLine(); Console.WriteLine(linearFadeStrategy.CurrentVolume); Console.CursorTop--; } while (!eventHandle.WaitOne(50)); ClearCurrentConsoleLine(); Console.WriteLine(linearFadeStrategy.CurrentVolume); } } } }
static void Main() { OpenFileDialog openFileDialog = new OpenFileDialog {Filter = CodecFactory.SupportedFilesFilterEn}; if (openFileDialog.ShowDialog() != DialogResult.OK) return; using (var source = CodecFactory.Instance.GetCodec(openFileDialog.FileName) .ToSampleSource() .AppendSource(x => new BiQuadFilterSource(x))) { using (var soundOut = new WasapiOut()) { soundOut.Initialize(source.ToWaveSource()); soundOut.Play(); Console.WriteLine("Playing without any filter."); Console.ReadKey(); source.Filter = new HighpassFilter(source.WaveFormat.SampleRate, 4000); Console.WriteLine("HighpassFilter @4kHz"); Console.ReadKey(); source.Filter = new LowpassFilter(source.WaveFormat.SampleRate, 1000); Console.WriteLine("LowpassFilter @1kHz"); Console.ReadKey(); source.Filter = new PeakFilter(source.WaveFormat.SampleRate, 2000, 15, 10); Console.WriteLine("PeakFilter @2kHz; bandWidth = 15; gain = 10dB"); Console.ReadKey(); } } }
public void TestPhonemes() { EventWaitHandle waitHandle = new AutoResetEvent(false); using (MemoryStream stream = new MemoryStream()) using (SpeechSynthesizer synth = new SpeechSynthesizer()) { synth.SetOutputToWaveStream(stream); synth.SpeakSsml("<?xml version=\"1.0\" encoding=\"UTF-8\"?><speak version=\"1.0\" xmlns=\"http://www.w3.org/2001/10/synthesis\" xml:lang=\"en-GB\"><s>This is your <phoneme alphabet=\"ipa\" ph=\"leɪkɒn\">Lakon</phoneme>.</s></speak>"); //synth.SpeakSsml("<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?><speak version = \"1.0\" xmlns = \"http://www.w3.org/2001/10/synthesis\" xml:lang=\"en-GB\"><s>You are travelling to the <phoneme alphabet=\"ipa\" ph=\"ˈdɛltə\">delta</phoneme> system.</s></speak>"); //synth.SpeakSsml("<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?><speak version = \"1.0\" xmlns = \"http://www.w3.org/2001/10/synthesis\" xml:lang=\"en-GB\"><s>You are travelling to the <phoneme alphabet=\"ipa\" ph=\"bliːiː\">Bleae</phoneme> <phoneme alphabet=\"ipa\" ph=\"θuːə\">Thua</phoneme> system.</s></speak>"); //synth.SpeakSsml("<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?><speak version = \"1.0\" xmlns = \"http://www.w3.org/2001/10/synthesis\" xml:lang=\"en-GB\"><s>You are travelling to the Amnemoi system.</s></speak>"); //synth.Speak("You are travelling to the Barnard's Star system."); stream.Seek(0, SeekOrigin.Begin); IWaveSource source = new WaveFileReader(stream); var soundOut = new WasapiOut(); soundOut.Stopped += (s, e) => waitHandle.Set(); soundOut.Initialize(source); soundOut.Play(); waitHandle.WaitOne(); soundOut.Dispose(); source.Dispose(); } }
ISoundOut CreateSoundOut(ref IWaveSource source) { ISoundOut soundOut; if (WasapiOut.IsSupportedOnCurrentPlatform) soundOut = new WasapiOut(true, AudioClientShareMode.Shared, 50); else { soundOut = new DirectSoundOut() { Latency = 100 }; if (source.WaveFormat.BitsPerSample > 16) source = source.ToSampleSource().ToWaveSource(16); } return soundOut; }
public void CanPlayMonoToStereoSourceTest() { var source = new StereoToMonoSource(CodecFactory.Instance.GetCodec(testfile)); Assert.AreEqual(1, source.WaveFormat.Channels); var monoSource = new MonoToStereoSource(source); Assert.AreEqual(2, monoSource.WaveFormat.Channels); ISoundOut soundOut; if (WasapiOut.IsSupportedOnCurrentPlatform) soundOut = new WasapiOut(); else soundOut = new DirectSoundOut(); soundOut.Initialize(monoSource.ToWaveSource(16)); soundOut.Play(); Thread.Sleep((int)Math.Min(source.GetMilliseconds(source.Length), 60000)); soundOut.Dispose(); }
public void SoundInToSoundOutTest_Wasapi() { for (int i = 0; i < 10; i++) { var waveIn = new WasapiCapture(); waveIn.Initialize(); waveIn.Start(); var waveInToSource = new SoundInSource(waveIn) { FillWithZeros = true }; var soundOut = new WasapiOut(); soundOut.Initialize(waveInToSource); soundOut.Play(); Thread.Sleep(2000); Assert.AreEqual(PlaybackState.Playing, soundOut.PlaybackState); soundOut.Dispose(); waveIn.Dispose(); } }
static void Main(string[] args) { var openFileDialog = new OpenFileDialog(); openFileDialog.Filter = CodecFactory.SupportedFilesFilterEn; if (openFileDialog.ShowDialog() == DialogResult.OK) { using (var source = CodecFactory.Instance.GetCodec(openFileDialog.FileName)) { Debug.Assert(source.CanSeek, "Source does not support seeking."); using (var soundOut = new WasapiOut()) { soundOut.Initialize(source); soundOut.Play(); Console.WriteLine("Press any key to skip half the track."); Console.ReadKey(); source.Position = source.Length / 2; while (true) { IAudioSource s = source; var str = String.Format(@"New position: {0:mm\:ss\.f}/{1:mm\:ss\.f}", TimeConverterFactory.Instance.GetTimeConverterForSource(s) .ToTimeSpan(s.WaveFormat, s.Position), TimeConverterFactory.Instance.GetTimeConverterForSource(s) .ToTimeSpan(s.WaveFormat, s.Length)); str += String.Concat(Enumerable.Repeat(" ", Console.BufferWidth - 1 - str.Length)); Console.Write(str); Console.SetCursorPosition(0, Console.CursorTop); Thread.Sleep(100); } } } } }
public void CanPlayStereoToMonoSource() { //in order to fix workitem 3 var source = GlobalTestConfig.TestMp3().ToStereo(); Assert.AreEqual(2, source.WaveFormat.Channels); var monoSource = new StereoToMonoSource(source.ToSampleSource()); Assert.AreEqual(1, monoSource.WaveFormat.Channels); ISoundOut soundOut; if (WasapiOut.IsSupportedOnCurrentPlatform) soundOut = new WasapiOut(); else soundOut = new DirectSoundOut(); soundOut.Initialize(monoSource.ToWaveSource(16)); soundOut.Play(); Thread.Sleep((int)Math.Min(source.GetMilliseconds(source.Length), 20000)); soundOut.Dispose(); }
/// <summary> /// Plays the WAVE file data contained in the given MemoryStream using the CSCore library. /// </summary> /// <param name="memoryStream">The MemoryStream containing the WAVE file data.</param> public void PlayWithCSCore(MemoryStream memoryStream) { using (IWaveSource soundSource = new WaveFileReader(memoryStream)) { //SoundOut implementation which plays the sound // WaveOut works. DirectSoundOut works. WasapiOut works. using (ISoundOut soundOut = new CSCore.SoundOut.WasapiOut()) { //Tell the SoundOut which sound it has to play soundOut.Initialize(soundSource); //Play the sound soundOut.Play(); while ((soundOut.PlaybackState == CSCore.SoundOut.PlaybackState.Playing) && (soundNumPlaying >= 0)) { Thread.Sleep(10); } //Stop the playback soundOut.Stop(); } } }
public void TestExtendedSource() { EventWaitHandle waitHandle = new AutoResetEvent(false); using (MemoryStream stream = new MemoryStream()) using (SpeechSynthesizer synth = new SpeechSynthesizer()) { synth.SetOutputToWaveStream(stream); synth.Speak("Test."); stream.Seek(0, SeekOrigin.Begin); IWaveSource source = new ExtendedDurationWaveSource(new WaveFileReader(stream), 2000).AppendSource(x => new DmoWavesReverbEffect(x) { ReverbMix = -10 }); var soundOut = new WasapiOut(); soundOut.Stopped += (s, e) => waitHandle.Set(); soundOut.Initialize(source); soundOut.Play(); waitHandle.WaitOne(); soundOut.Dispose(); source.Dispose(); } }
public void Speak(string script, string voice, int echoDelay, int distortionLevel, int chorusLevel, int reverbLevel, int compressLevel, bool radio) { if (script == null) { return; } try { using (SpeechSynthesizer synth = new SpeechSynthesizer()) using (MemoryStream stream = new MemoryStream()) { if (String.IsNullOrWhiteSpace(voice)) { voice = configuration.StandardVoice; } if (voice != null) { try { synth.SelectVoice(voice); } catch { } } synth.Rate = configuration.Rate; synth.SetOutputToWaveStream(stream); string speech = SpeechFromScript(script); if (speech.Contains("<phoneme")) { speech = "<?xml version=\"1.0\" encoding=\"UTF-8\"?><speak version=\"1.0\" xmlns=\"http://www.w3.org/2001/10/synthesis\" xml:lang=\"" + locale + "\"><s>" + speech + "</s></speak>"; synth.SpeakSsml(speech); } else { synth.Speak(speech); } stream.Seek(0, SeekOrigin.Begin); using (System.IO.StreamWriter file = new System.IO.StreamWriter(Environment.GetEnvironmentVariable("AppData") + @"\EDDI\speech.log", true)) { file.WriteLine("" + System.Threading.Thread.CurrentThread.ManagedThreadId + ": Turned script " + script + " in to speech " + speech); } IWaveSource source = new WaveFileReader(stream); // We need to extend the duration of the wave source if we have any effects going on if (chorusLevel != 0 || reverbLevel != 0 || echoDelay != 0) { // Add a base of 500ms plus 10ms per effect level over 50 source = source.AppendSource(x => new ExtendedDurationWaveSource(x, 500 + Math.Max(0, (configuration.EffectsLevel - 50) * 10))); } // Add various effects... // We always have chorus if (chorusLevel != 0) { source = source.AppendSource(x => new DmoChorusEffect(x) { Depth = chorusLevel, WetDryMix = Math.Min(100, (int)(180 * ((decimal)configuration.EffectsLevel) / ((decimal)100))), Delay = 16, Frequency = 2, Feedback = 25 }); } // We only have reverb and echo if we're not transmitting or receiving if (!radio) { if (reverbLevel != 0) { // We tone down the reverb level with the distortion level, as the combination is nasty source = source.AppendSource(x => new DmoWavesReverbEffect(x) { ReverbTime = (int)(1 + 999 * ((decimal)configuration.EffectsLevel) / ((decimal)100)), ReverbMix = Math.Max(-96, -96 + (96 * reverbLevel / 100) - distortionLevel) }); } if (echoDelay != 0) { // We tone down the echo level with the distortion level, as the combination is nasty source = source.AppendSource(x => new DmoEchoEffect(x) { LeftDelay = echoDelay, RightDelay = echoDelay, WetDryMix = Math.Max(5, (int)(10 * ((decimal)configuration.EffectsLevel) / ((decimal)100)) - distortionLevel), Feedback = Math.Max(0, 10 - distortionLevel / 2) }); } } if (configuration.EffectsLevel > 0 && distortionLevel > 0) { source = source.AppendSource(x => new DmoDistortionEffect(x) { Edge = distortionLevel, Gain = -6 - (distortionLevel / 2), PostEQBandwidth = 4000, PostEQCenterFrequency = 4000 }); } if (radio) { source = source.AppendSource(x => new DmoDistortionEffect(x) { Edge = 7, Gain = -4 - distortionLevel / 2, PostEQBandwidth = 2000, PostEQCenterFrequency = 6000 }); source = source.AppendSource(x => new DmoCompressorEffect(x) { Attack = 1, Ratio = 3, Threshold = -10 }); } EventWaitHandle waitHandle = new EventWaitHandle(false, EventResetMode.AutoReset); var soundOut = new WasapiOut(); soundOut.Initialize(source); soundOut.Stopped += (s, e) => waitHandle.Set(); activeSpeeches.Add(soundOut); soundOut.Play(); // Add a timeout, in case it doesn't come back waitHandle.WaitOne(source.GetTime(source.Length)); // It's possible that this has been disposed of, so ensure that it's still there before we try to finish it lock (activeSpeeches) { if (activeSpeeches.Contains(soundOut)) { activeSpeeches.Remove(soundOut); soundOut.Stop(); soundOut.Dispose(); } } source.Dispose(); } } catch (Exception ex) { using (System.IO.StreamWriter file = new System.IO.StreamWriter(Environment.GetEnvironmentVariable("AppData") + @"\EDDI\speech.log", true)) { file.WriteLine("" + System.Threading.Thread.CurrentThread.ManagedThreadId + ": Caught exception " + ex); } } }
public void TestDropOff() { SpeechSynthesizer synth = new SpeechSynthesizer(); using (MemoryStream stream = new MemoryStream()) { synth.SetOutputToWaveStream(stream); synth.Speak("Testing drop-off."); stream.Seek(0, SeekOrigin.Begin); IWaveSource source = new WaveFileReader(stream); EventWaitHandle waitHandle = new EventWaitHandle(false, EventResetMode.AutoReset); var soundOut = new WasapiOut(); soundOut.Initialize(source); soundOut.Stopped += (s, e) => waitHandle.Set(); soundOut.Play(); waitHandle.WaitOne(); soundOut.Dispose(); source.Dispose(); } SpeechService.Instance.Speak("Testing drop-off.", null, 50, 1, 30, 40, 0, true); }
public void TestSpeech() { SpeechSynthesizer synth = new SpeechSynthesizer(); using (MemoryStream stream = new MemoryStream()) { synth.SetOutputToWaveStream(stream); synth.Speak("This is a test."); stream.Seek(0, SeekOrigin.Begin); IWaveSource source = new WaveFileReader(stream); EventWaitHandle waitHandle = new EventWaitHandle(false, EventResetMode.AutoReset); var soundOut = new WasapiOut(); DmoEchoEffect echoSource = new DmoEchoEffect(source); soundOut.Initialize(echoSource); soundOut.Stopped += (s, e) => waitHandle.Set(); soundOut.Play(); waitHandle.WaitOne(); soundOut.Dispose(); source.Dispose(); } }
/// <summary> /// Initializes a new WasapiSoundProvider class. /// </summary> /// <param name="soundInitializer">The SoundInitializer.</param> internal WasapiSoundProvider(ISoundInitializer soundInitializer) { _wasapiOut = new WasapiOut(false, AudioClientShareMode.Shared, 100); SoundInitializer = soundInitializer; _wasapiOut.Stopped += DirectSoundOutStopped; }
public void TestFlatten() { EventWaitHandle waitHandle = new AutoResetEvent(false); using (MemoryStream stream = new MemoryStream()) using (SpeechSynthesizer synth = new SpeechSynthesizer()) { synth.SetOutputToWaveStream(stream); synth.Speak("This is a test for flattening"); stream.Seek(0, SeekOrigin.Begin); IWaveSource source = new WaveFileReader(stream); Equalizer equalizer = Equalizer.Create10BandEqualizer(source); equalizer.SampleFilters[0].SetGain(-9.6f); equalizer.SampleFilters[1].SetGain(-9.6f); equalizer.SampleFilters[2].SetGain(-9.6f); equalizer.SampleFilters[3].SetGain(-3.9f); equalizer.SampleFilters[4].SetGain(2.4f); equalizer.SampleFilters[5].SetGain(11.1f); equalizer.SampleFilters[6].SetGain(15.9f); equalizer.SampleFilters[7].SetGain(15.9f); equalizer.SampleFilters[8].SetGain(15.9f); equalizer.SampleFilters[9].SetGain(16.7f); var soundOut = new WasapiOut(); soundOut.Stopped += (s, e) => waitHandle.Set(); soundOut.Initialize(equalizer.ToWaveSource()); soundOut.Play(); waitHandle.WaitOne(); soundOut.Dispose(); equalizer.Dispose(); source.Dispose(); } }
public void TestRandomVoice() { EventWaitHandle waitHandle = new AutoResetEvent(false); List<InstalledVoice> availableVoices = new List<InstalledVoice>(); foreach (InstalledVoice voice in new SpeechSynthesizer().GetInstalledVoices()) { if (voice.Enabled == true && voice.VoiceInfo.Culture.TwoLetterISOLanguageName == "en" && (voice.VoiceInfo.Name.StartsWith("IVONA") || voice.VoiceInfo.Name.StartsWith("CereVoice") || voice.VoiceInfo.Name == "Microsoft Anna")) { availableVoices.Add(voice); } } foreach (InstalledVoice availableVoice in availableVoices) { Console.WriteLine(availableVoice.VoiceInfo.Name); } for (int i = 0; i < 10; i++) { using (MemoryStream stream = new MemoryStream()) using (SpeechSynthesizer synth = new SpeechSynthesizer()) { string selectedVoice = availableVoices.OrderBy(x => Guid.NewGuid()).FirstOrDefault().VoiceInfo.Name; Console.WriteLine("Selected voice is " + selectedVoice); synth.SelectVoice(selectedVoice); synth.SetOutputToWaveStream(stream); //synth.Speak("Anaconda golf foxtrot lima one niner six eight requesting docking."); synth.Speak("Anaconda."); stream.Seek(0, SeekOrigin.Begin); IWaveSource source = new WaveFileReader(stream); var soundOut = new WasapiOut(); soundOut.Stopped += (s, e) => waitHandle.Set(); soundOut.Initialize(source); soundOut.Play(); waitHandle.WaitOne(); soundOut.Dispose(); source.Dispose(); } } }
public void TestDistortion() { EventWaitHandle waitHandle = new AutoResetEvent(false); using (MemoryStream stream = new MemoryStream()) using (SpeechSynthesizer synth = new SpeechSynthesizer()) { foreach (InstalledVoice voice in synth.GetInstalledVoices()) { Console.WriteLine(voice.VoiceInfo.Name); } synth.SetOutputToWaveStream(stream); synth.Speak("Anaconda golf foxtrot lima one niner six eight requesting docking."); stream.Seek(0, SeekOrigin.Begin); IWaveSource source = new WaveFileReader(stream); DmoDistortionEffect distortedSource = new DmoDistortionEffect(source); distortedSource.Edge = 10; distortedSource.PreLowpassCutoff = 4800; var soundOut = new WasapiOut(); soundOut.Stopped += (s, e) => waitHandle.Set(); soundOut.Initialize(distortedSource); soundOut.Play(); waitHandle.WaitOne(); soundOut.Dispose(); distortedSource.Dispose(); source.Dispose(); } }
static void Main(string[] args) { //Register the new codec. CodecFactory.Instance.Register("ogg-vorbis", new CodecFactoryEntry(s => new NVorbisSource(s).ToWaveSource(), ".ogg")); OpenFileDialog openFileDialog = new OpenFileDialog(); openFileDialog.Filter = "Vorbis file (*.ogg)|*.ogg"; if (openFileDialog.ShowDialog() == DialogResult.OK) { using (var source = CodecFactory.Instance.GetCodec(openFileDialog.FileName)) { using (WasapiOut soundOut = new WasapiOut()) { soundOut.Initialize(source); soundOut.Play(); Console.ReadKey(); soundOut.Stop(); } } } }
/// <summary> /// Disposes the SoundProvider. /// </summary> /// <param name="disposing">The State.</param> protected virtual void Dispose(bool disposing) { if (!_disposed) { if (_wasapiOut != null) { Stop(); _wasapiOut.Dispose(); _wasapiOut = null; } } _disposed = true; }
public void TestPhonemes() { EventWaitHandle waitHandle = new AutoResetEvent(false); using (MemoryStream stream = new MemoryStream()) using (SpeechSynthesizer synth = new SpeechSynthesizer()) { synth.SetOutputToWaveStream(stream); //synth.SpeakSsml("<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?><speak version = \"1.0\" xmlns = \"http://www.w3.org/2001/10/synthesis\" xml:lang=\"en-GB\"><s>You are travelling to the <phoneme alphabet=\"ipa\" ph=\"viˈga\">Vega</phoneme> system.</s></speak>"); //synth.SpeakSsml("<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?><speak version = \"1.0\" xmlns = \"http://www.w3.org/2001/10/synthesis\" xml:lang=\"en-GB\"><s>You are travelling to the <phoneme alphabet=\"ipa\" ph=\"ækɜˈnɑ\">Achenar</phoneme> system.</s></speak>"); //synth.SpeakSsml("<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?><speak version = \"1.0\" xmlns = \"http://www.w3.org/2001/10/synthesis\" xml:lang=\"en-GB\"><s>You are travelling to the <phoneme alphabet=\"ipa\" ph=\"ˈsɪɡni\">Cygni</phoneme> system.</s></speak>"); //synth.SpeakSsml("<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?><speak version = \"1.0\" xmlns = \"http://www.w3.org/2001/10/synthesis\" xml:lang=\"en-GB\"><s>You are travelling to the <phoneme alphabet=\"ipa\" ph=\"ˈsɪɡnəs\">Cygnus</phoneme> system.</s></speak>"); // synth.SpeakSsml("<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?><speak version = \"1.0\" xmlns = \"http://www.w3.org/2001/10/synthesis\" xml:lang=\"en-GB\"><s>You are travelling to the <phoneme alphabet=\"ipa\" ph=\"ʃɪnˈrɑːrtə\">Shinrarta</phoneme> <phoneme alphabet=\"ipa\" ph=\"ˈdezɦrə\">Dezhra</phoneme> system.</s></speak>"); //synth.SpeakSsml("<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?><speak version = \"1.0\" xmlns = \"http://www.w3.org/2001/10/synthesis\" xml:lang=\"en-GB\"><s>You are travelling to the <phoneme alphabet=\"ipa\" ph=\"ˈnjuːənɛts\">Reorte</phoneme> system.</s></speak>"); synth.SpeakSsml("<?xml version=\"1.0\" encoding=\"ISO-8859-1\"?><speak version = \"1.0\" xmlns = \"http://www.w3.org/2001/10/synthesis\" xml:lang=\"en-GB\"><s>You are travelling to the Eravate system.</s></speak>"); stream.Seek(0, SeekOrigin.Begin); IWaveSource source = new WaveFileReader(stream); var soundOut = new WasapiOut(); soundOut.Stopped += (s, e) => waitHandle.Set(); soundOut.Initialize(source); soundOut.Play(); waitHandle.WaitOne(); soundOut.Dispose(); source.Dispose(); } }
public void Play(string Url) { this.uri = Url; StopPlayBack(); try { soundSource = CodecFactory.Instance.GetCodec(new Uri(this.uri)); soundOut = new WasapiOut(); soundOut.Initialize(soundSource); soundOut.Play(); soundOut.Volume = lastVolume / 100f; } catch (Exception ex) { throw ex; } }
public WasapiEventHandler(WasapiOut wasapiOut) { _wasapiOut = wasapiOut; }
private ISoundOut GetSoundOut() { if (WasapiOut.IsSupportedOnCurrentPlatform) { WasapiOut wasapiOut = new WasapiOut(); wasapiOut.Device = getListOutputsSelected(); return wasapiOut; } else return null; }