private async void Button_Click(object sender, RoutedEventArgs e) { _audioProvider.CurrentPlayingFile = await SelectPlaybackFile(); if (_audioProvider.IsPlaying) { _audioProvider.Stop(); } await _audioProvider.Play(); //linespectrum and voiceprint3dspectrum used for rendering some fft data //in oder to get some fft data, set the previously created spectrumprovider _lineSpectrum = new LineSpectrum(FftSize) { SpectrumProvider = _audioProvider, UseAverage = true, BarCount = 100, BarSpacing = 2, IsXLogScale = true, ScalingStrategy = ScalingStrategy.Sqrt, MinimumFrequency = 20, MaximumFrequency = 20000 }; }
private void SetupSampleSource(ISampleSource aSampleSource) { const FftSize fftSize = FftSize.Fft4096; //create a spectrum provider which provides fft data based on some input var spectrumProvider = new BasicSpectrumProvider(aSampleSource.WaveFormat.Channels, aSampleSource.WaveFormat.SampleRate, fftSize); //linespectrum and voiceprint3dspectrum used for rendering some fft data //in oder to get some fft data, set the previously created spectrumprovider _lineSpectrum = new LineSpectrum(fftSize) { SpectrumProvider = spectrumProvider, UseAverage = true, BarCount = 50, BarSpacing = 2, IsXLogScale = true, ScalingStrategy = ScalingStrategy.Sqrt }; //the SingleBlockNotificationStream is used to intercept the played samples var notificationSource = new SingleBlockNotificationStream(aSampleSource); //pass the intercepted samples as input data to the spectrumprovider (which will calculate a fft based on them) notificationSource.SingleBlockRead += (s, a) => spectrumProvider.Add(a.Left, a.Right); _waveSource = notificationSource.ToWaveSource(16); }
public void InitialiseAudioProgram() { _soundIn = new WasapiLoopbackCapture(); _soundIn.Initialize(); var soundInSource = new SoundInSource(_soundIn); ISampleSource source = soundInSource.ToSampleSource(); var spectrumProvider = new SpectrumProvider(2, 48000, FftSize.Fft4096); _spectrum = new LineSpectrum(spectrumProvider, _barCount); var notificationSource = new SingleBlockNotificationStream(source); notificationSource.SingleBlockRead += (s, a) => spectrumProvider.Add(a.Left, a.Right); _source = notificationSource.ToWaveSource(16); // Read from the source otherwise SingleBlockRead is never called byte[] buffer = new byte[_source.WaveFormat.BytesPerSecond / 2]; soundInSource.DataAvailable += (src, evt) => { int read; while ((read = _source.Read(buffer, 0, buffer.Length)) > 0) { ; } }; _soundIn.Start(); for (int i = 0; i < MatrixCount; i++) { _Programs[i] = i == 0 ? AudioSequence().GetEnumerator() : null; } }
public Spectrograph() { InitializeComponent(); _soundIn = new WasapiLoopbackCapture(); _soundIn.Initialize(); var soundInSource = new SoundInSource(_soundIn); var singleBlockNotificationStream = new SingleBlockNotificationStream(soundInSource); _source = singleBlockNotificationStream.ToWaveSource(); if (!Directory.Exists(_loopbackDir)) { Directory.CreateDirectory(_loopbackDir); } _writer = new WaveWriter(_loopbackDir + "/loopback.wav", _source.WaveFormat); byte[] buffer = new byte[_source.WaveFormat.BytesPerSecond / 2]; soundInSource.DataAvailable += (s, e) => { int read; while ((read = _source.Read(buffer, 0, buffer.Length)) > 0) { _writer.Write(buffer, 0, read); } }; _lineSpectrumProvider = new BasicSpectrumProvider(_source.WaveFormat.Channels, _source.WaveFormat.SampleRate, fftSize); _spectrogramProvider = new BasicSpectrumProvider(_source.WaveFormat.Channels, _source.WaveFormat.SampleRate, fftSize); singleBlockNotificationStream.SingleBlockRead += SingleBlockNotificationStream_SingleBlockRead; _soundIn.Start(); _lineSpectrum = new LineSpectrum(fftSize) { SpectrumProvider = _lineSpectrumProvider, UseAverage = true, BarCount = 22, BarSpacing = 1, IsXLogScale = true, ScalingStrategy = ScalingStrategy.Sqrt }; _oscilloscope = new Oscilloscope(); _spectrogram = new Spectrogram(fftSize) { SpectrumProvider = _spectrogramProvider, UseAverage = true, BarCount = (int)fftSize, BarSpacing = 0, IsXLogScale = true, ScalingStrategy = ScalingStrategy.Sqrt }; _keyboardVisualizer = new KeyboardVisualizer(); UpdateTimer.Start(); }
void Awake() { barData = new float[numBars]; // This uses the wasapi api to get any sound data played by the computer switch (audioType) { case AudioSourceType.Microphone: capture = new WasapiCapture(); break; case AudioSourceType.Speakers: capture = new WasapiLoopbackCapture(); break; } capture.Initialize(); // Get our capture as a source IWaveSource source = new SoundInSource(capture); // From https://github.com/filoe/cscore/blob/master/Samples/WinformsVisualization/Form1.cs // This is the typical size, you can change this for higher detail as needed fftSize = FftSize.Fft4096; // Actual fft data fftBuffer = new float[(int)fftSize]; // These are the actual classes that give you spectrum data // The specific vars of lineSpectrum are changed below in the editor so most of these aren't that important here spectrumProvider = new BasicSpectrumProvider(capture.WaveFormat.Channels, capture.WaveFormat.SampleRate, fftSize); lineSpectrum = new LineSpectrum(fftSize) { SpectrumProvider = spectrumProvider, UseAverage = isAverage, BarCount = numBars, BarSpacing = 2, IsXLogScale = false, ScalingStrategy = ScalingStrategy.Linear }; // Tells us when data is available to send to our spectrum var notificationSource = new SingleBlockNotificationStream(source.ToSampleSource()); notificationSource.SingleBlockRead += NotificationSource_SingleBlockRead; // We use this to request data so it actualy flows through (figuring this out took forever...) finalSource = notificationSource.ToWaveSource(); capture.DataAvailable += Capture_DataAvailable; capture.Start(); }
// Start is called before the first frame update void Start() { loopbackCapture = new WasapiLoopbackCapture(); loopbackCapture.Initialize(); soundInSource = new SoundInSource(loopbackCapture); fftBuffer = new float[(int)CFftSize]; basicSpectrumProvider = new BasicSpectrumProvider(soundInSource.WaveFormat.Channels, soundInSource.WaveFormat.SampleRate, CFftSize); lineSpectrum = new LineSpectrum(CFftSize) { SpectrumProvider = basicSpectrumProvider, BarCount = numBars, UseAverage = true, IsXLogScale = false, ScalingStrategy = ScalingStrategy.Linear }; var notificationSource = new SingleBlockNotificationStream(soundInSource.ToSampleSource()); notificationSource.SingleBlockRead += NotificationSource_SingleBlockRead; finalSource = notificationSource.ToWaveSource(); loopbackCapture.DataAvailable += Capture_DataAvailable; loopbackCapture.Start(); //singleBlockNotificationStream = new SingleBlockNotificationStream(soundInSource.ToSampleSource()); //realTimeSource = singleBlockNotificationStream.ToWaveSource(); //byte[] buffer = new byte[realTimeSource.WaveFormat.BytesPerSecond / 2]; //soundInSource.DataAvailable += (s, ea) => //{ // while (realTimeSource.Read(buffer, 0, buffer.Length) > 0) // { // float[] spectrumData = lineSpectrum.GetSpectrumData(10); // receiveAudio(spectrumData); // Debug.Log(receiveAudio); // if (spectrumData != null && receiveAudio != null) // { // receiveAudio(spectrumData); // Debug.Log(receiveAudio); // } // } //}; //singleBlockNotificationStream.SingleBlockRead += SingleBlockNotificationStream_SingleBlockRead; }
/// <summary> /// /// </summary> private void StartAudioVisualization() { //Open the default device iSoundIn = new WasapiLoopbackCapture(); //Our loopback capture opens the default render device by default so the following is not needed //iSoundIn.Device = MMDeviceEnumerator.DefaultAudioEndpoint(DataFlow.Render, Role.Console); iSoundIn.Initialize(); SoundInSource soundInSource = new SoundInSource(iSoundIn); ISampleSource source = soundInSource.ToSampleSource(); const FftSize fftSize = FftSize.Fft2048; //create a spectrum provider which provides fft data based on some input BasicSpectrumProvider spectrumProvider = new BasicSpectrumProvider(source.WaveFormat.Channels, source.WaveFormat.SampleRate, fftSize); //linespectrum and voiceprint3dspectrum used for rendering some fft data //in oder to get some fft data, set the previously created spectrumprovider iLineSpectrum = new LineSpectrum(fftSize) { SpectrumProvider = spectrumProvider, UseAverage = false, // Does not matter since we hacked it BarCount = 16, BarSpacing = 1, IsXLogScale = true, // Does not matter since we hacked it ScalingStrategy = ScalingStrategy.Decibel // Does not matter since we hacked it }; //the SingleBlockNotificationStream is used to intercept the played samples var notificationSource = new SingleBlockNotificationStream(source); //pass the intercepted samples as input data to the spectrumprovider (which will calculate a fft based on them) notificationSource.SingleBlockRead += (s, a) => spectrumProvider.Add(a.Left, a.Right); iWaveSource = notificationSource.ToWaveSource(16); // We need to read from our source otherwise SingleBlockRead is never called and our spectrum provider is not populated byte[] buffer = new byte[iWaveSource.WaveFormat.BytesPerSecond / 2]; soundInSource.DataAvailable += (s, aEvent) => { int read; while ((read = iWaveSource.Read(buffer, 0, buffer.Length)) > 0) { ; } }; //Start recording iSoundIn.Start(); }
private void openToolStripMenuItem_Click(object sender, EventArgs e) { var openFileDialog = new OpenFileDialog() { Filter = CodecFactory.SupportedFilesFilterEn, Title = "Select a file..." }; if (openFileDialog.ShowDialog() == DialogResult.OK) { Stop(); const FftSize fftSize = FftSize.Fft4096; IWaveSource source = CodecFactory.Instance.GetCodec(openFileDialog.FileName); var spectrumProvider = new BasicSpectrumProvider(source.WaveFormat.Channels, source.WaveFormat.SampleRate, fftSize); _lineSpectrum = new LineSpectrum(fftSize) { SpectrumProvider = spectrumProvider, UseAverage = true, BarCount = 50, BarSpacing = 2, IsXLogScale = true, ScalingStrategy = ScalingStrategy.Sqrt }; _voicePrint3DSpectrum = new VoicePrint3DSpectrum(fftSize) { SpectrumProvider = spectrumProvider, UseAverage = true, PointCount = 200, IsXLogScale = true, ScalingStrategy = ScalingStrategy.Sqrt }; var notificationSource = new SingleBlockNotificationStream(source.ToSampleSource()); notificationSource.SingleBlockRead += (s, a) => spectrumProvider.Add(a.Left, a.Right); _source = notificationSource.ToWaveSource(16); _soundOut = new WasapiOut(); _soundOut.Initialize(_source.ToMono()); _soundOut.Play(); timer1.Start(); propertyGridTop.SelectedObject = _lineSpectrum; propertyGridBottom.SelectedObject = _voicePrint3DSpectrum; } }
private void SetupSampleSource(ISampleSource aSampleSource) { const FftSize fftSize = FftSize.Fft128; var spectrumProvider = new BasicSpectrumProvider(aSampleSource.WaveFormat.Channels, aSampleSource.WaveFormat.SampleRate, fftSize); _lineSpectrum = new LineSpectrum(fftSize) { SpectrumProvider = spectrumProvider, UseAverage = true, BarCount = 50, BarSpacing = 2, IsXLogScale = true, ScalingStrategy = ScalingStrategy.Linear }; var notificationSource = new SingleBlockNotificationStream(aSampleSource); notificationSource.SingleBlockRead += (s, a) => spectrumProvider.Add(a.Left, a.Right); _source = notificationSource.ToWaveSource(16); }
private void FillForm() { if (selectedFile == null) { editToolStripMenuItem.Enabled = false; return; } editToolStripMenuItem.Enabled = true; //Load channel data channelCB.Items.Clear(); channelCB.Items.Add("Channel [All]"); for (int i = 0; i < selectedFile.Channels.Count; i++) { channelCB.Items.Add(selectedFile.Channels[i].Name); } channelCB.SelectedIndex = selectedFile.SelectedChannelIndex; //Setup a sample source for spectum data lineSpectrum = (selectedFile.GetCurrentChannel().audioPlayer._lineSpectrum); }
/// <summary> /// /// </summary> private void StopAudioVisualization() { if (iSoundIn != null) { iSoundIn.Stop(); } if (iWaveSource != null) { iWaveSource.Dispose(); iWaveSource = null; } if (iSoundIn != null) { iSoundIn.Dispose(); iSoundIn = null; } iLineSpectrum = null; }
internal void StartListen() { capture.Initialize(); soundInSource = new SoundInSource(capture); basicSpectrumProvider = new BasicSpectrumProvider(soundInSource.WaveFormat.Channels, soundInSource.WaveFormat.SampleRate, C_FftSize); lineSpectrum = new LineSpectrum(C_FftSize, minFrequency, maxFrequency) { SpectrumProvider = basicSpectrumProvider, BarCount = spectrumSize, UseAverage = true, IsXLogScale = true, ScalingStrategy = EScalingStrategy.Sqrt }; capture.Start(); ISampleSource sampleSource = soundInSource.ToSampleSource(); singleBlockNotificationStream = new SingleBlockNotificationStream(sampleSource); realtimeSource = singleBlockNotificationStream.ToWaveSource(); byte[] buffer = new byte[realtimeSource.WaveFormat.BytesPerSecond / 128]; soundInSource.DataAvailable += (s, ea) => { while (realtimeSource.Read(buffer, 0, buffer.Length) > 0) { var spectrumData = lineSpectrum.GetSpectrumData(C_MaxAudioValue); if (spectrumData != null) { receiveAudio?.Invoke(spectrumData); } } }; singleBlockNotificationStream.SingleBlockRead += SingleBlockNotificationStream_SingleBlockRead; }
public void Update(LayerModel layerModel, ModuleDataModel dataModel, bool isPreview = false) { layerModel.ApplyProperties(true); var newProperties = (AudioPropertiesModel)layerModel.Properties; if (_properties == null) { _properties = newProperties; } SubscribeToAudioChange(); if (_audioCapture == null || newProperties.Device != _properties.Device || newProperties.DeviceType != _properties.DeviceType) { var device = GetMmDevice(); if (device != null) { _audioCapture = _audioCaptureManager.GetAudioCapture(device, newProperties.DeviceType); } } _properties = newProperties; if (_audioCapture == null) { return; } _audioCapture.Pulse(); var direction = ((AudioPropertiesModel)layerModel.Properties).Direction; int currentLines; double currentHeight; if (direction == Direction.BottomToTop || direction == Direction.TopToBottom) { currentLines = (int)layerModel.Width; currentHeight = layerModel.Height; } else { currentLines = (int)layerModel.Height; currentHeight = layerModel.Width; } // Get a new line spectrum if the lines changed, it is null or the layer hasn't rendered for a few frames if (_lines != currentLines || _lineSpectrum == null || DateTime.Now - _lastRender > TimeSpan.FromMilliseconds(100)) { _lines = currentLines; _lineSpectrum = _audioCapture.GetLineSpectrum(_lines, ScalingStrategy.Decibel); } var newLineValues = _audioCapture.GetLineSpectrum(_lines, ScalingStrategy.Decibel)?.GetLineValues(currentHeight); if (newLineValues != null) { _lineValues = newLineValues; _lastRender = DateTime.Now; } }
public void Update(LayerModel layerModel, ModuleDataModel dataModel, bool isPreview = false) { layerModel.ApplyProperties(true); var newProperties = (AudioPropertiesModel)layerModel.Properties; if (_properties == null) { _properties = newProperties; } SubscribeToAudioChange(); if (_audioCapture == null || newProperties.Device != _properties.Device || newProperties.DeviceType != _properties.DeviceType) { var device = GetMmDevice(); if (device != null) { _audioCapture = _audioCaptureManager.GetAudioCapture(device, newProperties.DeviceType); } } _properties = newProperties; if (_audioCapture == null) { return; } _audioCapture.Pulse(); var direction = ((AudioPropertiesModel)layerModel.Properties).Direction; int currentLines; double currentHeight; if (direction == Direction.BottomToTop || direction == Direction.TopToBottom) { currentLines = (int)layerModel.Width; currentHeight = layerModel.Height; } else { currentLines = (int)layerModel.Height; currentHeight = layerModel.Width; } if (_lines != currentLines || _lineSpectrum == null) { _lines = currentLines; _lineSpectrum = _audioCapture.GetLineSpectrum(_lines, ScalingStrategy.Decibel); if (_lineSpectrum == null) { return; } } var newLineValues = _lineSpectrum?.GetLineValues(currentHeight); if (newLineValues != null) { _lineValues = newLineValues; } }
public void StartListen() { switch (_captureType) { case WasapiCaptureType.Loopback: _wasapiCapture = new WasapiLoopbackCapture(); break; case WasapiCaptureType.Microphone: MMDevice defaultMicrophone; using (var deviceEnumerator = new MMDeviceEnumerator()) { defaultMicrophone = deviceEnumerator.GetDefaultAudioEndpoint(DataFlow.Capture, Role.Communications); } _wasapiCapture = new WasapiCapture(); _wasapiCapture.Device = defaultMicrophone; break; default: throw new InvalidOperationException("Unhandled WasapiCaptureType"); } _wasapiCapture.Initialize(); _soundInSource = new SoundInSource(_wasapiCapture); _basicSpectrumProvider = new BasicSpectrumProvider(_soundInSource.WaveFormat.Channels, _soundInSource.WaveFormat.SampleRate, CFftSize); _lineSpectrum = new LineSpectrum(CFftSize, _minFrequency, _maxFrequency) { SpectrumProvider = _basicSpectrumProvider, BarCount = _spectrumSize, UseAverage = true, IsXLogScale = true, ScalingStrategy = _scalingStrategy }; _wasapiCapture.Start(); var sampleSource = _soundInSource.ToSampleSource(); if (_filters != null && _filters.Length > 0) { foreach (var filter in _filters) { sampleSource = sampleSource.AppendSource(x => new BiQuadFilterSource(x)); var biQuadSource = (BiQuadFilterSource)sampleSource; switch (filter.Type) { case WasapiAudioFilterType.LowPass: biQuadSource.Filter = new LowpassFilter(_soundInSource.WaveFormat.SampleRate, filter.Frequency); break; case WasapiAudioFilterType.HighPass: biQuadSource.Filter = new HighpassFilter(_soundInSource.WaveFormat.SampleRate, filter.Frequency); break; case WasapiAudioFilterType.BandPass: biQuadSource.Filter = new BandpassFilter(_soundInSource.WaveFormat.SampleRate, filter.Frequency); break; } } } _singleBlockNotificationStream = new SingleBlockNotificationStream(sampleSource); _realtimeSource = _singleBlockNotificationStream.ToWaveSource(); var buffer = new byte[_realtimeSource.WaveFormat.BytesPerSecond / 2]; _soundInSource.DataAvailable += (s, ea) => { while (_realtimeSource.Read(buffer, 0, buffer.Length) > 0) { float[] spectrumData = _lineSpectrum.GetSpectrumData(MaxAudioValue); if (spectrumData != null) { _receiveAudio?.Invoke(spectrumData); } } }; _singleBlockNotificationStream.SingleBlockRead += SingleBlockNotificationStream_SingleBlockRead; }
private void openToolStripMenuItem_Click(object sender, EventArgs e) { var openFileDialog = new OpenFileDialog() { Filter = CodecFactory.SupportedFilesFilterEn, Title = "Select a file..." }; if (openFileDialog.ShowDialog() == DialogResult.OK) { Stop(); const FftSize fftSize = FftSize.Fft4096; //open the selected file ISampleSource source = CodecFactory.Instance.GetCodec(openFileDialog.FileName) .ToSampleSource() .AppendSource(x => new PitchShifter(x), out _pitchShifter); //create a spectrum provider which provides fft data based on some input var spectrumProvider = new BasicSpectrumProvider(source.WaveFormat.Channels, source.WaveFormat.SampleRate, fftSize); //linespectrum and voiceprint3dspectrum used for rendering some fft data //in oder to get some fft data, set the previously created spectrumprovider _lineSpectrum = new LineSpectrum(fftSize) { SpectrumProvider = spectrumProvider, UseAverage = true, BarCount = 50, BarSpacing = 2, IsXLogScale = true, ScalingStrategy = ScalingStrategy.Sqrt }; _voicePrint3DSpectrum = new VoicePrint3DSpectrum(fftSize) { SpectrumProvider = spectrumProvider, UseAverage = true, PointCount = 200, IsXLogScale = true, ScalingStrategy = ScalingStrategy.Sqrt }; //the SingleBlockNotificationStream is used to intercept the played samples var notificationSource = new SingleBlockNotificationStream(source); //pass the intercepted samples as input data to the spectrumprovider (which will calculate a fft based on them) notificationSource.SingleBlockRead += (s, a) => spectrumProvider.Add(a.Left, a.Right); _source = notificationSource.ToWaveSource(16); //play the audio _soundOut = new WasapiOut(); _soundOut.Initialize(_source); _soundOut.Play(); timer1.Start(); propertyGridTop.SelectedObject = _lineSpectrum; propertyGridBottom.SelectedObject = _voicePrint3DSpectrum; } }
private void SetupSampleSource(ISampleSource aSampleSource) { const FftSize fftSize = FftSize.Fft4096; //create a spectrum provider which provides fft data based on some input spectrumProvider = new BasicSpectrumProvider(aSampleSource.WaveFormat.Channels, aSampleSource.WaveFormat.SampleRate, fftSize); //spectrumProvider.GetFftData() //linespectrum and voiceprint3dspectrum used for rendering some fft data //in oder to get some fft data, set the previously created spectrumprovider _lineSpectrum = new LineSpectrum(fftSize) { SpectrumProvider = spectrumProvider, UseAverage = true, BarCount = 50, BarSpacing = 2, IsXLogScale = true, ScalingStrategy = ScalingStrategy.Sqrt }; _voicePrint3DSpectrum = new VoicePrint3DSpectrum(fftSize) { SpectrumProvider = spectrumProvider, UseAverage = true, PointCount = 1, IsXLogScale = false, ScalingStrategy = ScalingStrategy.Linear, MaximumFrequency = 100, MinimumFrequency = 20 }; _DigitallineSpectrum = new LineSpectrum(fftSize) { SpectrumProvider = spectrumProvider, UseAverage = true, BarCount = 50, BarSpacing = 2, IsXLogScale = true, ScalingStrategy = ScalingStrategy.Sqrt }; _DigitalBassPrint3DSpectrum = new VoicePrint3DSpectrum(fftSize) { Colors = new Color[2] { Color.Black, Color.Red }, SpectrumProvider = spectrumProvider, UseAverage = false, PointCount = 1, IsXLogScale = true, ScalingStrategy = ScalingStrategy.Linear, MaximumFrequency = 250, MinimumFrequency = 20 }; _DigitalMedioPrint3DSpectrum = new VoicePrint3DSpectrum(fftSize) { Colors = new Color[2] { Color.Black, Color.Green }, SpectrumProvider = spectrumProvider, UseAverage = false, PointCount = 1, IsXLogScale = true, ScalingStrategy = ScalingStrategy.Linear, MaximumFrequency = 3000, MinimumFrequency = 250 }; _DigitaltreblePrint3DSpectrum = new VoicePrint3DSpectrum(fftSize) { Colors = new Color[2] { Color.Black, Color.Blue }, SpectrumProvider = spectrumProvider, UseAverage = false, PointCount = 1, IsXLogScale = true, ScalingStrategy = ScalingStrategy.Linear, MaximumFrequency = 16000, MinimumFrequency = 3000 }; _GenericlineSpectrum = new LineSpectrum(fftSize) { SpectrumProvider = spectrumProvider, UseAverage = true, BarCount = 50, BarSpacing = 2, IsXLogScale = true, ScalingStrategy = ScalingStrategy.Sqrt }; _GenericvoicePrint3DSpectrum = new VoicePrint3DSpectrum(fftSize) { SpectrumProvider = spectrumProvider, UseAverage = true, PointCount = 200, IsXLogScale = true, ScalingStrategy = ScalingStrategy.Sqrt, MaximumFrequency = 20000, MinimumFrequency = 20 }; //the SingleBlockNotificationStream is used to intercept the played samples var notificationSource = new SingleBlockNotificationStream(aSampleSource); //pass the intercepted samples as input data to the spectrumprovider (which will calculate a fft based on them) notificationSource.SingleBlockRead += (s, a) => spectrumProvider.Add(a.Left, a.Right); _source = notificationSource.ToWaveSource(16); }