private void SetupSampleSource(ISampleSource aSampleSource) { const FftSize fftSize = FftSize.Fft4096; //create a spectrum provider which provides fft data based on some input var spectrumProvider = new BasicSpectrumProvider(aSampleSource.WaveFormat.Channels, aSampleSource.WaveFormat.SampleRate, fftSize); //linespectrum and voiceprint3dspectrum used for rendering some fft data //in oder to get some fft data, set the previously created spectrumprovider _lineSpectrum = new LineSpectrum(fftSize) { SpectrumProvider = spectrumProvider, UseAverage = true, BarCount = 50, BarSpacing = 2, IsXLogScale = true, ScalingStrategy = ScalingStrategy.Sqrt }; //the SingleBlockNotificationStream is used to intercept the played samples var notificationSource = new SingleBlockNotificationStream(aSampleSource); //pass the intercepted samples as input data to the spectrumprovider (which will calculate a fft based on them) notificationSource.SingleBlockRead += (s, a) => spectrumProvider.Add(a.Left, a.Right); _waveSource = notificationSource.ToWaveSource(16); }
public Spectrograph() { InitializeComponent(); _soundIn = new WasapiLoopbackCapture(); _soundIn.Initialize(); var soundInSource = new SoundInSource(_soundIn); var singleBlockNotificationStream = new SingleBlockNotificationStream(soundInSource); _source = singleBlockNotificationStream.ToWaveSource(); if (!Directory.Exists(_loopbackDir)) { Directory.CreateDirectory(_loopbackDir); } _writer = new WaveWriter(_loopbackDir + "/loopback.wav", _source.WaveFormat); byte[] buffer = new byte[_source.WaveFormat.BytesPerSecond / 2]; soundInSource.DataAvailable += (s, e) => { int read; while ((read = _source.Read(buffer, 0, buffer.Length)) > 0) { _writer.Write(buffer, 0, read); } }; _lineSpectrumProvider = new BasicSpectrumProvider(_source.WaveFormat.Channels, _source.WaveFormat.SampleRate, fftSize); _spectrogramProvider = new BasicSpectrumProvider(_source.WaveFormat.Channels, _source.WaveFormat.SampleRate, fftSize); singleBlockNotificationStream.SingleBlockRead += SingleBlockNotificationStream_SingleBlockRead; _soundIn.Start(); _lineSpectrum = new LineSpectrum(fftSize) { SpectrumProvider = _lineSpectrumProvider, UseAverage = true, BarCount = 22, BarSpacing = 1, IsXLogScale = true, ScalingStrategy = ScalingStrategy.Sqrt }; _oscilloscope = new Oscilloscope(); _spectrogram = new Spectrogram(fftSize) { SpectrumProvider = _spectrogramProvider, UseAverage = true, BarCount = (int)fftSize, BarSpacing = 0, IsXLogScale = true, ScalingStrategy = ScalingStrategy.Sqrt }; _keyboardVisualizer = new KeyboardVisualizer(); UpdateTimer.Start(); }
// Start is called before the first frame update void Start() { loopbackCapture = new WasapiLoopbackCapture(); loopbackCapture.Initialize(); soundInSource = new SoundInSource(loopbackCapture); fftBuffer = new float[(int)CFftSize]; basicSpectrumProvider = new BasicSpectrumProvider(soundInSource.WaveFormat.Channels, soundInSource.WaveFormat.SampleRate, CFftSize); lineSpectrum = new LineSpectrum(CFftSize) { SpectrumProvider = basicSpectrumProvider, BarCount = numBars, UseAverage = true, IsXLogScale = false, ScalingStrategy = ScalingStrategy.Linear }; var notificationSource = new SingleBlockNotificationStream(soundInSource.ToSampleSource()); notificationSource.SingleBlockRead += NotificationSource_SingleBlockRead; finalSource = notificationSource.ToWaveSource(); loopbackCapture.DataAvailable += Capture_DataAvailable; loopbackCapture.Start(); //singleBlockNotificationStream = new SingleBlockNotificationStream(soundInSource.ToSampleSource()); //realTimeSource = singleBlockNotificationStream.ToWaveSource(); //byte[] buffer = new byte[realTimeSource.WaveFormat.BytesPerSecond / 2]; //soundInSource.DataAvailable += (s, ea) => //{ // while (realTimeSource.Read(buffer, 0, buffer.Length) > 0) // { // float[] spectrumData = lineSpectrum.GetSpectrumData(10); // receiveAudio(spectrumData); // Debug.Log(receiveAudio); // if (spectrumData != null && receiveAudio != null) // { // receiveAudio(spectrumData); // Debug.Log(receiveAudio); // } // } //}; //singleBlockNotificationStream.SingleBlockRead += SingleBlockNotificationStream_SingleBlockRead; }
void Awake() { barData = new float[numBars]; // This uses the wasapi api to get any sound data played by the computer switch (audioType) { case AudioSourceType.Microphone: capture = new WasapiCapture(); break; case AudioSourceType.Speakers: capture = new WasapiLoopbackCapture(); break; } capture.Initialize(); // Get our capture as a source IWaveSource source = new SoundInSource(capture); // From https://github.com/filoe/cscore/blob/master/Samples/WinformsVisualization/Form1.cs // This is the typical size, you can change this for higher detail as needed fftSize = FftSize.Fft4096; // Actual fft data fftBuffer = new float[(int)fftSize]; // These are the actual classes that give you spectrum data // The specific vars of lineSpectrum are changed below in the editor so most of these aren't that important here spectrumProvider = new BasicSpectrumProvider(capture.WaveFormat.Channels, capture.WaveFormat.SampleRate, fftSize); lineSpectrum = new LineSpectrum(fftSize) { SpectrumProvider = spectrumProvider, UseAverage = isAverage, BarCount = numBars, BarSpacing = 2, IsXLogScale = false, ScalingStrategy = ScalingStrategy.Linear }; // Tells us when data is available to send to our spectrum var notificationSource = new SingleBlockNotificationStream(source.ToSampleSource()); notificationSource.SingleBlockRead += NotificationSource_SingleBlockRead; // We use this to request data so it actualy flows through (figuring this out took forever...) finalSource = notificationSource.ToWaveSource(); capture.DataAvailable += Capture_DataAvailable; capture.Start(); }
/// <summary> /// /// </summary> private void StartAudioVisualization() { //Open the default device iSoundIn = new WasapiLoopbackCapture(); //Our loopback capture opens the default render device by default so the following is not needed //iSoundIn.Device = MMDeviceEnumerator.DefaultAudioEndpoint(DataFlow.Render, Role.Console); iSoundIn.Initialize(); SoundInSource soundInSource = new SoundInSource(iSoundIn); ISampleSource source = soundInSource.ToSampleSource(); const FftSize fftSize = FftSize.Fft2048; //create a spectrum provider which provides fft data based on some input BasicSpectrumProvider spectrumProvider = new BasicSpectrumProvider(source.WaveFormat.Channels, source.WaveFormat.SampleRate, fftSize); //linespectrum and voiceprint3dspectrum used for rendering some fft data //in oder to get some fft data, set the previously created spectrumprovider iLineSpectrum = new LineSpectrum(fftSize) { SpectrumProvider = spectrumProvider, UseAverage = false, // Does not matter since we hacked it BarCount = 16, BarSpacing = 1, IsXLogScale = true, // Does not matter since we hacked it ScalingStrategy = ScalingStrategy.Decibel // Does not matter since we hacked it }; //the SingleBlockNotificationStream is used to intercept the played samples var notificationSource = new SingleBlockNotificationStream(source); //pass the intercepted samples as input data to the spectrumprovider (which will calculate a fft based on them) notificationSource.SingleBlockRead += (s, a) => spectrumProvider.Add(a.Left, a.Right); iWaveSource = notificationSource.ToWaveSource(16); // We need to read from our source otherwise SingleBlockRead is never called and our spectrum provider is not populated byte[] buffer = new byte[iWaveSource.WaveFormat.BytesPerSecond / 2]; soundInSource.DataAvailable += (s, aEvent) => { int read; while ((read = iWaveSource.Read(buffer, 0, buffer.Length)) > 0) { ; } }; //Start recording iSoundIn.Start(); }
private void openToolStripMenuItem_Click(object sender, EventArgs e) { var openFileDialog = new OpenFileDialog() { Filter = CodecFactory.SupportedFilesFilterEn, Title = "Select a file..." }; if (openFileDialog.ShowDialog() == DialogResult.OK) { Stop(); const FftSize fftSize = FftSize.Fft4096; IWaveSource source = CodecFactory.Instance.GetCodec(openFileDialog.FileName); var spectrumProvider = new BasicSpectrumProvider(source.WaveFormat.Channels, source.WaveFormat.SampleRate, fftSize); _lineSpectrum = new LineSpectrum(fftSize) { SpectrumProvider = spectrumProvider, UseAverage = true, BarCount = 50, BarSpacing = 2, IsXLogScale = true, ScalingStrategy = ScalingStrategy.Sqrt }; _voicePrint3DSpectrum = new VoicePrint3DSpectrum(fftSize) { SpectrumProvider = spectrumProvider, UseAverage = true, PointCount = 200, IsXLogScale = true, ScalingStrategy = ScalingStrategy.Sqrt }; var notificationSource = new SingleBlockNotificationStream(source.ToSampleSource()); notificationSource.SingleBlockRead += (s, a) => spectrumProvider.Add(a.Left, a.Right); _source = notificationSource.ToWaveSource(16); _soundOut = new WasapiOut(); _soundOut.Initialize(_source.ToMono()); _soundOut.Play(); timer1.Start(); propertyGridTop.SelectedObject = _lineSpectrum; propertyGridBottom.SelectedObject = _voicePrint3DSpectrum; } }
private void SetupSampleSource(ISampleSource aSampleSource) { const FftSize fftSize = FftSize.Fft128; var spectrumProvider = new BasicSpectrumProvider(aSampleSource.WaveFormat.Channels, aSampleSource.WaveFormat.SampleRate, fftSize); _lineSpectrum = new LineSpectrum(fftSize) { SpectrumProvider = spectrumProvider, UseAverage = true, BarCount = 50, BarSpacing = 2, IsXLogScale = true, ScalingStrategy = ScalingStrategy.Linear }; var notificationSource = new SingleBlockNotificationStream(aSampleSource); notificationSource.SingleBlockRead += (s, a) => spectrumProvider.Add(a.Left, a.Right); _source = notificationSource.ToWaveSource(16); }
/// <summary> /// Setup the spectrum analyzer /// </summary> public static void SetupSampleSource(ISampleSource aSampleSource) { FFTSize = FftSize.Fft2048; SpectrumProvider = new BasicSpectrumProvider(aSampleSource.WaveFormat.Channels, aSampleSource.WaveFormat.SampleRate, FFTSize); m_SpectrumAnalyzer = new SpectrumAnalyzer(FFTSize) { SpectrumProvider = SpectrumProvider, UseAverage = true, BarCount = NumberOfAnalysisBars, UseLogScale = true, ScalingStrategy = ScalingStrategy.Sqrt }; var notificationSource = new SingleBlockNotificationStream(aSampleSource); notificationSource.SingleBlockRead += (s, a) => SpectrumProvider.Add(a.Left, a.Right); m_Source = notificationSource.ToWaveSource(16); }
internal void StartListen() { capture.Initialize(); soundInSource = new SoundInSource(capture); basicSpectrumProvider = new BasicSpectrumProvider(soundInSource.WaveFormat.Channels, soundInSource.WaveFormat.SampleRate, C_FftSize); lineSpectrum = new LineSpectrum(C_FftSize, minFrequency, maxFrequency) { SpectrumProvider = basicSpectrumProvider, BarCount = spectrumSize, UseAverage = true, IsXLogScale = true, ScalingStrategy = EScalingStrategy.Sqrt }; capture.Start(); ISampleSource sampleSource = soundInSource.ToSampleSource(); singleBlockNotificationStream = new SingleBlockNotificationStream(sampleSource); realtimeSource = singleBlockNotificationStream.ToWaveSource(); byte[] buffer = new byte[realtimeSource.WaveFormat.BytesPerSecond / 128]; soundInSource.DataAvailable += (s, ea) => { while (realtimeSource.Read(buffer, 0, buffer.Length) > 0) { var spectrumData = lineSpectrum.GetSpectrumData(C_MaxAudioValue); if (spectrumData != null) { receiveAudio?.Invoke(spectrumData); } } }; singleBlockNotificationStream.SingleBlockRead += SingleBlockNotificationStream_SingleBlockRead; }
public void StartListen() { switch (_captureType) { case WasapiCaptureType.Loopback: _wasapiCapture = new WasapiLoopbackCapture(); break; case WasapiCaptureType.Microphone: MMDevice defaultMicrophone; using (var deviceEnumerator = new MMDeviceEnumerator()) { defaultMicrophone = deviceEnumerator.GetDefaultAudioEndpoint(DataFlow.Capture, Role.Communications); } _wasapiCapture = new WasapiCapture(); _wasapiCapture.Device = defaultMicrophone; break; default: throw new InvalidOperationException("Unhandled WasapiCaptureType"); } _wasapiCapture.Initialize(); _soundInSource = new SoundInSource(_wasapiCapture); _basicSpectrumProvider = new BasicSpectrumProvider(_soundInSource.WaveFormat.Channels, _soundInSource.WaveFormat.SampleRate, CFftSize); _lineSpectrum = new LineSpectrum(CFftSize, _minFrequency, _maxFrequency) { SpectrumProvider = _basicSpectrumProvider, BarCount = _spectrumSize, UseAverage = true, IsXLogScale = true, ScalingStrategy = _scalingStrategy }; _wasapiCapture.Start(); var sampleSource = _soundInSource.ToSampleSource(); if (_filters != null && _filters.Length > 0) { foreach (var filter in _filters) { sampleSource = sampleSource.AppendSource(x => new BiQuadFilterSource(x)); var biQuadSource = (BiQuadFilterSource)sampleSource; switch (filter.Type) { case WasapiAudioFilterType.LowPass: biQuadSource.Filter = new LowpassFilter(_soundInSource.WaveFormat.SampleRate, filter.Frequency); break; case WasapiAudioFilterType.HighPass: biQuadSource.Filter = new HighpassFilter(_soundInSource.WaveFormat.SampleRate, filter.Frequency); break; case WasapiAudioFilterType.BandPass: biQuadSource.Filter = new BandpassFilter(_soundInSource.WaveFormat.SampleRate, filter.Frequency); break; } } } _singleBlockNotificationStream = new SingleBlockNotificationStream(sampleSource); _realtimeSource = _singleBlockNotificationStream.ToWaveSource(); var buffer = new byte[_realtimeSource.WaveFormat.BytesPerSecond / 2]; _soundInSource.DataAvailable += (s, ea) => { while (_realtimeSource.Read(buffer, 0, buffer.Length) > 0) { float[] spectrumData = _lineSpectrum.GetSpectrumData(MaxAudioValue); if (spectrumData != null) { _receiveAudio?.Invoke(spectrumData); } } }; _singleBlockNotificationStream.SingleBlockRead += SingleBlockNotificationStream_SingleBlockRead; }
private void SetupSampleSource(ISampleSource aSampleSource) { const FftSize fftSize = FftSize.Fft4096; //create a spectrum provider which provides fft data based on some input spectrumProvider = new BasicSpectrumProvider(aSampleSource.WaveFormat.Channels, aSampleSource.WaveFormat.SampleRate, fftSize); //spectrumProvider.GetFftData() //linespectrum and voiceprint3dspectrum used for rendering some fft data //in oder to get some fft data, set the previously created spectrumprovider _lineSpectrum = new LineSpectrum(fftSize) { SpectrumProvider = spectrumProvider, UseAverage = true, BarCount = 50, BarSpacing = 2, IsXLogScale = true, ScalingStrategy = ScalingStrategy.Sqrt }; _voicePrint3DSpectrum = new VoicePrint3DSpectrum(fftSize) { SpectrumProvider = spectrumProvider, UseAverage = true, PointCount = 1, IsXLogScale = false, ScalingStrategy = ScalingStrategy.Linear, MaximumFrequency = 100, MinimumFrequency = 20 }; _DigitallineSpectrum = new LineSpectrum(fftSize) { SpectrumProvider = spectrumProvider, UseAverage = true, BarCount = 50, BarSpacing = 2, IsXLogScale = true, ScalingStrategy = ScalingStrategy.Sqrt }; _DigitalBassPrint3DSpectrum = new VoicePrint3DSpectrum(fftSize) { Colors = new Color[2] { Color.Black, Color.Red }, SpectrumProvider = spectrumProvider, UseAverage = false, PointCount = 1, IsXLogScale = true, ScalingStrategy = ScalingStrategy.Linear, MaximumFrequency = 250, MinimumFrequency = 20 }; _DigitalMedioPrint3DSpectrum = new VoicePrint3DSpectrum(fftSize) { Colors = new Color[2] { Color.Black, Color.Green }, SpectrumProvider = spectrumProvider, UseAverage = false, PointCount = 1, IsXLogScale = true, ScalingStrategy = ScalingStrategy.Linear, MaximumFrequency = 3000, MinimumFrequency = 250 }; _DigitaltreblePrint3DSpectrum = new VoicePrint3DSpectrum(fftSize) { Colors = new Color[2] { Color.Black, Color.Blue }, SpectrumProvider = spectrumProvider, UseAverage = false, PointCount = 1, IsXLogScale = true, ScalingStrategy = ScalingStrategy.Linear, MaximumFrequency = 16000, MinimumFrequency = 3000 }; _GenericlineSpectrum = new LineSpectrum(fftSize) { SpectrumProvider = spectrumProvider, UseAverage = true, BarCount = 50, BarSpacing = 2, IsXLogScale = true, ScalingStrategy = ScalingStrategy.Sqrt }; _GenericvoicePrint3DSpectrum = new VoicePrint3DSpectrum(fftSize) { SpectrumProvider = spectrumProvider, UseAverage = true, PointCount = 200, IsXLogScale = true, ScalingStrategy = ScalingStrategy.Sqrt, MaximumFrequency = 20000, MinimumFrequency = 20 }; //the SingleBlockNotificationStream is used to intercept the played samples var notificationSource = new SingleBlockNotificationStream(aSampleSource); //pass the intercepted samples as input data to the spectrumprovider (which will calculate a fft based on them) notificationSource.SingleBlockRead += (s, a) => spectrumProvider.Add(a.Left, a.Right); _source = notificationSource.ToWaveSource(16); }
public async Task Play() { if (IsPlaying) { Pause(); return; } if (_audioGraph == null) { var settings = new AudioGraphSettings(AudioRenderCategory.Media) { PrimaryRenderDevice = SelectedDevice }; var createResult = await AudioGraph.CreateAsync(settings); if (createResult.Status != AudioGraphCreationStatus.Success) { return; } _audioGraph = createResult.Graph; _audioGraph.UnrecoverableErrorOccurred += OnAudioGraphError; } if (_deviceOutputNode == null) { var deviceResult = await _audioGraph.CreateDeviceOutputNodeAsync(); if (deviceResult.Status != AudioDeviceNodeCreationStatus.Success) { return; } _deviceOutputNode = deviceResult.DeviceOutputNode; } if (_frameOutputNode == null) { _frameOutputNode = _audioGraph.CreateFrameOutputNode(); _audioGraph.QuantumProcessed += GraphOnQuantumProcessed; } if (_fileInputNode == null) { if (CurrentPlayingFile == null) { return; } var fileResult = await _audioGraph.CreateFileInputNodeAsync(CurrentPlayingFile); if (fileResult.Status != AudioFileNodeCreationStatus.Success) { return; } _fileInputNode = fileResult.FileInputNode; _fileInputNode.AddOutgoingConnection(_deviceOutputNode); _fileInputNode.AddOutgoingConnection(_frameOutputNode); Duration = _fileInputNode.Duration; _fileInputNode.PlaybackSpeedFactor = PlaybackSpeed / 100.0; _fileInputNode.OutgoingGain = Volume / 100.0; _fileInputNode.FileCompleted += FileInputNodeOnFileCompleted; } Debug.WriteLine($" CompletedQuantumCount: {_audioGraph.CompletedQuantumCount}"); Debug.WriteLine($"SamplesPerQuantum: {_audioGraph.SamplesPerQuantum}"); Debug.WriteLine($"LatencyInSamples: {_audioGraph.LatencyInSamples}"); var channelCount = (int)_audioGraph.EncodingProperties.ChannelCount; _spectrumProvider = new BasicSpectrumProvider(channelCount, (int)_audioGraph.EncodingProperties.SampleRate, FftSize.Fft4096); _audioGraph.Start(); IsPlaying = true; }
private void openToolStripMenuItem_Click(object sender, EventArgs e) { var openFileDialog = new OpenFileDialog() { Filter = CodecFactory.SupportedFilesFilterEn, Title = "Select a file..." }; if (openFileDialog.ShowDialog() == DialogResult.OK) { Stop(); const FftSize fftSize = FftSize.Fft4096; //open the selected file ISampleSource source = CodecFactory.Instance.GetCodec(openFileDialog.FileName) .ToSampleSource() .AppendSource(x => new PitchShifter(x), out _pitchShifter); //create a spectrum provider which provides fft data based on some input var spectrumProvider = new BasicSpectrumProvider(source.WaveFormat.Channels, source.WaveFormat.SampleRate, fftSize); //linespectrum and voiceprint3dspectrum used for rendering some fft data //in oder to get some fft data, set the previously created spectrumprovider _lineSpectrum = new LineSpectrum(fftSize) { SpectrumProvider = spectrumProvider, UseAverage = true, BarCount = 50, BarSpacing = 2, IsXLogScale = true, ScalingStrategy = ScalingStrategy.Sqrt }; _voicePrint3DSpectrum = new VoicePrint3DSpectrum(fftSize) { SpectrumProvider = spectrumProvider, UseAverage = true, PointCount = 200, IsXLogScale = true, ScalingStrategy = ScalingStrategy.Sqrt }; //the SingleBlockNotificationStream is used to intercept the played samples var notificationSource = new SingleBlockNotificationStream(source); //pass the intercepted samples as input data to the spectrumprovider (which will calculate a fft based on them) notificationSource.SingleBlockRead += (s, a) => spectrumProvider.Add(a.Left, a.Right); _source = notificationSource.ToWaveSource(16); //play the audio _soundOut = new WasapiOut(); _soundOut.Initialize(_source); _soundOut.Play(); timer1.Start(); propertyGridTop.SelectedObject = _lineSpectrum; propertyGridBottom.SelectedObject = _voicePrint3DSpectrum; } }