private void OpenSource(IWaveSource source) { try { Stop(); //if playing -> stop playback _audioSource = source; _gainSource = new GainSource(source) { Gain = (float)this.Gain }; source = SetupVisualization(_gainSource.ToWaveSource(16)); if (WasapiOut.IsSupportedOnCurrentPlatform) // > Vista { _soundOut = new WasapiOut(false, CSCore.CoreAudioAPI.AudioClientShareMode.Shared, 100); } else // < Vista { _soundOut = new DirectSoundOut() { Latency = 100 }; } _soundOut.Initialize(source); _soundOut.Stopped += OnPlaybackStopped; _soundOut.Play(); } catch (CSCore.CoreAudioAPI.CoreAudioAPIException ex) { MessageBox.Show("Unknown CoreAudioAPI-error: 0x" + ex.ErrorCode.ToString("x"), "Error", MessageBoxButton.OK, MessageBoxImage.Error); Stop(); } catch (Exception ex) { MessageBox.Show("Unknown error: " + ex.Message, "Error", MessageBoxButton.OK, MessageBoxImage.Error); Stop(); } }
private void Stop() { Running = false; if (_soundIn != null) { _soundIn.Stop(); _soundIn.Dispose(); _soundIn = null; } if (_source != null) { _source.Dispose(); _source = null; } _disableTimer.Stop(); _volumeTimer.Stop(); _mayStop = false; }
private void Start() { if (_starting) { return; } Logger.Debug("Starting audio capture for device: {0}", Device?.FriendlyName ?? "default"); _starting = true; try { Stop(); if (Type == MmDeviceType.Input) { _soundIn = Device != null ? new WasapiCapture { Device = Device } : new WasapiCapture(); } else { _soundIn = Device != null ? new WasapiLoopbackCapture { Device = Device } : new WasapiLoopbackCapture(); } _soundIn.Initialize(); var soundInSource = new SoundInSource(_soundIn); _source = soundInSource.ToSampleSource().AppendSource(x => new GainSource(x), out _volume); // create a spectrum provider which provides fft data based on some input _spectrumProvider = new BasicSpectrumProvider(_source.WaveFormat.Channels, _source.WaveFormat.SampleRate, FftSize); // the SingleBlockNotificationStream is used to intercept the played samples var notificationSource = new SingleBlockNotificationStream(_source); // pass the intercepted samples as input data to the spectrumprovider (which will calculate a fft based on them) notificationSource.SingleBlockRead += (s, a) => _spectrumProvider.Add(a.Left, a.Right); var waveSource = notificationSource.ToWaveSource(16); // We need to read from our source otherwise SingleBlockRead is never called and our spectrum provider is not populated var buffer = new byte[waveSource.WaveFormat.BytesPerSecond / 2]; soundInSource.DataAvailable += (s, aEvent) => { while (waveSource.Read(buffer, 0, buffer.Length) > 0) { } }; _lineSpectrum = null; _singleSpectrum = new SingleSpectrum(FftSize, _spectrumProvider); _mayStop = false; _disableTimer.Start(); _volumeTimer.Start(); _soundIn.Start(); Running = true; } catch (Exception e) { Logger.Warn(e, "Failed to start WASAPI audio capture"); } _starting = false; }