/// <summary> /// Record sound made in Mic and save it to a wave file /// </summary> /// <param name="wavefile">name of the wave file with extension</param> public void CaptureMicToWave(string wavefile) { int i = 0; string extension = ".wav"; foreach (var device in WaveIn.Devices) { _waveIn = new WaveInEvent(new WaveFormat(44100, 16, device.Channels)); _waveIn.Device = i++; _waveIn.Initialize(); _waveIn.Start(); var waveInToSource = new SoundInSource(_waveIn); _source = waveInToSource; var notifyStream = new SingleBlockNotificationStream(_source); _source = notifyStream.ToWaveSource(16); _writerBuffer = new byte[_source.WaveFormat.BytesPerSecond]; wavefile = string.Format("{0}{1}{2}", wavefile.Remove(wavefile.LastIndexOf(extension) - (i > 1 ? 1 : 0)), i, extension); _writer = new WaveWriter(wavefile, _source.WaveFormat); waveInToSource.DataAvailable += (s, e) => { int read = 0; while ((read = _source.Read(_writerBuffer, 0, _writerBuffer.Length)) > 0) { _writer.Write(_writerBuffer, 0, read); } }; } }
public static void WriteToFile(string filename, IWaveSource source, bool deleteIfExists, int maxlength = -1) { if (deleteIfExists && File.Exists(filename)) File.Delete(filename); int read = 0; int r = 0; byte[] buffer = new byte[source.WaveFormat.BytesPerSecond]; using (var w = new WaveWriter(filename, source.WaveFormat)) { while ((read = source.Read(buffer, 0, buffer.Length)) > 0) { w.Write(buffer, 0, read); r += read; if (maxlength != -1 && r > maxlength) break; } } }
private void StartCapture(string fileName) { if (SelectedDevice == null) return; if(CaptureMode == CaptureMode.Capture) _soundIn = new WasapiCapture(); else _soundIn = new WasapiLoopbackCapture(); _soundIn.Device = SelectedDevice; _soundIn.Initialize(); var soundInSource = new SoundInSource(_soundIn); var singleBlockNotificationStream = new SingleBlockNotificationStream(soundInSource.ToSampleSource()); _finalSource = singleBlockNotificationStream.ToWaveSource(); _writer = new WaveWriter(fileName, _finalSource.WaveFormat); byte[] buffer = new byte[_finalSource.WaveFormat.BytesPerSecond / 2]; soundInSource.DataAvailable += (s, e) => { int read; while((read = _finalSource.Read(buffer, 0, buffer.Length)) > 0) _writer.Write(buffer, 0, read); }; singleBlockNotificationStream.SingleBlockRead += SingleBlockNotificationStreamOnSingleBlockRead; _soundIn.Start(); }
public static void EncodeWholeSource(MediaFoundationEncoder encoder, IWaveSource source) { byte[] buffer = new byte[source.WaveFormat.BytesPerSecond * 4]; int read = 0; while ((read = source.Read(buffer, 0, buffer.Length)) > 0) { Debug.WriteLine(String.Format("{0:#00.00}%", (double)source.Position / (double)source.Length * 100)); encoder.Write(buffer, 0, read); } }
private void CacheSource(IWaveSource source) { _cache = new MemoryStream {Position = 0}; int read = 0; int count = (int)Math.Min(source.WaveFormat.BytesPerSecond * 5, source.Length); byte[] buffer = new byte[count]; long position = 0; if(source.CanSeek) position = source.Position; while((read = source.Read(buffer, 0, count)) > 0) { _cache.Write(buffer, 0, read); } if (source.CanSeek) { source.Position = position; _cache.Position = source.Position; } else { _cache.Position = 0; } }
/// <summary> /// Encodes the whole <paramref name="source" /> with the specified <paramref name="encoder" />. The encoding process /// stops as soon as the <see cref="IReadableAudioSource{T}.Read" /> method of the specified <paramref name="source" /> /// returns 0. /// </summary> /// <param name="encoder">The encoder which should be used to encode the audio data.</param> /// <param name="source">The <see cref="IWaveSource" /> which provides the raw audio data to encode.</param> public static void EncodeWholeSource(MediaFoundationEncoder encoder, IWaveSource source) { if (encoder == null) throw new ArgumentNullException("encoder"); if (source == null) throw new ArgumentNullException("source"); var buffer = new byte[source.WaveFormat.BytesPerSecond * 4]; int read; while ((read = source.Read(buffer, 0, buffer.Length)) > 0) { Debug.WriteLine(String.Format("{0:#00.00}%", source.Position / (double) source.Length * 100)); encoder.Write(buffer, 0, read); } }
public int Read(byte[] buffer, int offset, int count) { return(_audioSource.Read(buffer, 0, count)); }
public override void Start() { if (_started) { Stop(); } DataFlow dataFlow = (DataFlow)_speechSettings.SelectedDataFlowId; var devices = MMDeviceEnumerator.EnumerateDevices(dataFlow, DeviceState.Active); if (devices.Count - 1 < _speechSettings.InputDeviceIndex) { throw new Exception($" device Index {_speechSettings.InputDeviceIndex} is not avalibe"); } if (dataFlow == DataFlow.Render) { var wasapiFormat = _waveFormatAdapter.WaveFormatFromCurrentSettings(); _soundIn = new WasapiLoopbackCapture(100, wasapiFormat); } else { _soundIn = new WasapiCapture(); } _soundIn.Device = devices[_speechSettings.InputDeviceIndex]; _soundIn.Initialize(); var wasapiCaptureSource = new SoundInSource(_soundIn) { FillWithZeros = false }; _waveSource = wasapiCaptureSource .ChangeSampleRate(_speechSettings.SampleRateValue) // sample rate .ToSampleSource() .ToWaveSource(_speechSettings.BitsPerSampleValue); //bits per sample; if (_speechSettings.ChannelValue == 1) { _waveSource = _waveSource.ToMono(); } else { _waveSource = _waveSource.ToStereo(); } wasapiCaptureSource.DataAvailable += (s, e) => { //read data from the converedSource //important: don't use the e.Data here //the e.Data contains the raw data provided by the //soundInSource which won't have your target format byte[] buffer = new byte[_waveSource.WaveFormat.BytesPerSecond / 2]; int read; //keep reading as long as we still get some data //if you're using such a loop, make sure that soundInSource.FillWithZeros is set to false while ((read = _waveSource.Read(buffer, 0, buffer.Length)) > 0) { SendData(buffer, read); } }; _soundIn.Start(); _started = true; }
public void Start(TimeSpan time) { int sampleRate = 48000; int bitsPerSample = 24; MMDeviceCollection devices; while (!(devices = MMDeviceEnumerator.EnumerateDevices(DataFlow.Capture, DeviceState.Active)).Any()) { Thread.Sleep(2000); } var device = devices.FirstOrDefault(); //TODO:We have a memory leak here (soundIn should be cleared from time to time). needs to be fixed! //create a new soundIn instance using (WasapiCapture soundIn = new WasapiCapture()) { soundIn.Device = device; //initialize the soundIn instance soundIn.Initialize(); //create a SoundSource around the the soundIn instance SoundInSource soundInSource = new SoundInSource(soundIn) { FillWithZeros = false }; //create a source, that converts the data provided by the soundInSource to any other format IWaveSource convertedSource = soundInSource .ChangeSampleRate(sampleRate) // sample rate .ToSampleSource() .ToWaveSource(bitsPerSample); //bits per sample using (var stream = new MemoryStream()) { var readBufferLength = convertedSource.WaveFormat.BytesPerSecond / 2; //channels... using (convertedSource = convertedSource.ToStereo()) { //create a new wavefile using (WaveWriter waveWriter = new WaveWriter(stream, convertedSource.WaveFormat)) { //register an event handler for the DataAvailable event of the soundInSource soundInSource.DataAvailable += (s, e) => { //read data from the converedSource byte[] buffer = new byte[readBufferLength]; int read; //keep reading as long as we still get some data while ((read = convertedSource.Read(buffer, 0, buffer.Length)) > 0) { var decibelsCalibrated = (int)Math.Round(GetSoundLevel(buffer, _calibrateAdd, _calibratescale, _calibrateRange)); if (decibelsCalibrated < 0) { decibelsCalibrated = 0; } OnNoiseData?.Invoke(null, new NoiseInfoEventArgs() { Decibels = decibelsCalibrated }); //write the read data to a file waveWriter.Write(buffer, 0, read); } }; soundIn.Stopped += (e, args) => { OnStopped?.Invoke(null, null); lock (_stopLocker) Monitor.PulseAll(_stopLocker); }; var tm = new Timer(state => soundIn?.Stop(), null, time, time); //start recording soundIn.Start(); OnStarted?.Invoke(null, null); Monitor.Enter(_stopLocker); { Monitor.PulseAll(_stopLocker); Monitor.Wait(_stopLocker); } //stop recording soundIn.Stop(); } } } } }
public void StartListen() { switch (_captureType) { case WasapiCaptureType.Loopback: _wasapiCapture = new WasapiLoopbackCapture(); break; case WasapiCaptureType.Microphone: MMDevice defaultMicrophone; using (var deviceEnumerator = new MMDeviceEnumerator()) { defaultMicrophone = deviceEnumerator.GetDefaultAudioEndpoint(DataFlow.Capture, Role.Communications); } _wasapiCapture = new WasapiCapture(); _wasapiCapture.Device = defaultMicrophone; break; default: throw new InvalidOperationException("Unhandled WasapiCaptureType"); } _wasapiCapture.Initialize(); _soundInSource = new SoundInSource(_wasapiCapture); _basicSpectrumProvider = new BasicSpectrumProvider(_soundInSource.WaveFormat.Channels, _soundInSource.WaveFormat.SampleRate, CFftSize); _lineSpectrum = new LineSpectrum(CFftSize, _minFrequency, _maxFrequency) { SpectrumProvider = _basicSpectrumProvider, BarCount = _spectrumSize, UseAverage = true, IsXLogScale = true, ScalingStrategy = _scalingStrategy }; _wasapiCapture.Start(); var sampleSource = _soundInSource.ToSampleSource(); if (_filters != null && _filters.Length > 0) { foreach (var filter in _filters) { sampleSource = sampleSource.AppendSource(x => new BiQuadFilterSource(x)); var biQuadSource = (BiQuadFilterSource)sampleSource; switch (filter.Type) { case WasapiAudioFilterType.LowPass: biQuadSource.Filter = new LowpassFilter(_soundInSource.WaveFormat.SampleRate, filter.Frequency); break; case WasapiAudioFilterType.HighPass: biQuadSource.Filter = new HighpassFilter(_soundInSource.WaveFormat.SampleRate, filter.Frequency); break; case WasapiAudioFilterType.BandPass: biQuadSource.Filter = new BandpassFilter(_soundInSource.WaveFormat.SampleRate, filter.Frequency); break; } } } _singleBlockNotificationStream = new SingleBlockNotificationStream(sampleSource); _realtimeSource = _singleBlockNotificationStream.ToWaveSource(); var buffer = new byte[_realtimeSource.WaveFormat.BytesPerSecond / 2]; _soundInSource.DataAvailable += (s, ea) => { UnityEngine.Debug.Log(buffer.Length); while (_realtimeSource.Read(buffer, 0, buffer.Length) > 0) { updateCnt++; if (updateCnt > 353535) { updateCnt = 0; } float[] spectrumData = _lineSpectrum.GetSpectrumData(MaxAudioValue); if (spectrumData != null) { _receiveAudio?.Invoke(spectrumData); } } }; _singleBlockNotificationStream.SingleBlockRead += SingleBlockNotificationStream_SingleBlockRead; }
public override int Read(byte[] buffer, int offset, int count) { int readbase = BaseSource.Read(buffer, offset, count); // want count, stored at offset if (readbase > 0) // mix into data the mix source.. ensure the mix source never runs out. { byte[] buffer2 = new byte[readbase]; int storepos = 0; int left = readbase; while (left > 0) { //System.Diagnostics.Debug.WriteLine("Read mix at " + mix.Position + " for " + left); int readmix = mix.Read(buffer2, storepos, left); // and read in the readmix.. //System.Diagnostics.Debug.WriteLine(".. read " + readmix); left -= readmix; if (left > 0) // if we have any left, means we need to loop it { mix.Position = 0; storepos += readmix; } } if (BaseSource.WaveFormat.BytesPerSample == 2) // FOR NOW, presuming its PCM, cope with a few different formats. { for (int i = 0; i < readbase; i += 2) { short v1 = BitConverter.ToInt16(buffer, i + offset); short v2 = BitConverter.ToInt16(buffer2, i); v1 += v2; var bytes = BitConverter.GetBytes(v1); buffer[i + offset] = bytes[0]; buffer[i + offset + 1] = bytes[1]; } } else if (BaseSource.WaveFormat.BytesPerSample == 4) { for (int i = 0; i < readbase; i += 4) { long v1 = BitConverter.ToInt32(buffer, i + offset); long v2 = BitConverter.ToInt32(buffer2, i); v1 += v2; var bytes = BitConverter.GetBytes(v1); buffer[i + offset] = bytes[0]; buffer[i + offset + 1] = bytes[1]; buffer[i + offset + 2] = bytes[2]; buffer[i + offset + 3] = bytes[3]; } } else { for (int i = 0; i < readbase; i += 1) { buffer[i + offset] += buffer2[i]; } } } return(readbase); }
// ReSharper disable once UnusedParameter.Local static void Main(string[] args) { CaptureMode captureMode; if (Boolean.Parse(ConfigurationManager.AppSettings["defaultToLoopback"])) { captureMode = CaptureMode.LoopbackCapture; } else { Console.WriteLine("Select capturing mode:"); Console.WriteLine("- 1: Capture"); Console.WriteLine("- 2: LoopbackCapture"); captureMode = (CaptureMode)ReadInteger(1, 2); } DataFlow dataFlow = captureMode == CaptureMode.Capture ? DataFlow.Capture : DataFlow.Render; var devices = MMDeviceEnumerator.EnumerateDevices(dataFlow, DeviceState.Active); if (!devices.Any()) { Console.WriteLine("No devices found."); return; } MMDevice device; if (devices.Count == 1) { device = devices[0]; } else { Console.WriteLine("Select device:"); for (int i = 0; i < devices.Count; i++) { Console.WriteLine("- {0:#00}: {1}", i, devices[i].FriendlyName); } int selectedDeviceIndex = ReadInteger(Enumerable.Range(0, devices.Count).ToArray()); device = devices[selectedDeviceIndex]; } int sampleRate = Int32.Parse(ConfigurationManager.AppSettings["sampleRate"]); int bitsPerSample = Int32.Parse(ConfigurationManager.AppSettings["bitsPerSample"]); int channels = 1; //create a new soundIn instance using (WasapiCapture soundIn = captureMode == CaptureMode.Capture ? new WasapiCapture() : new WasapiLoopbackCapture()) { //optional: set some properties soundIn.Device = device; //... //initialize the soundIn instance soundIn.Initialize(); //create a SoundSource around the the soundIn instance //this SoundSource will provide data, captured by the soundIn instance SoundInSource soundInSource = new SoundInSource(soundIn) { FillWithZeros = false }; //create a source, that converts the data provided by the //soundInSource to any other format //in this case the "Fluent"-extension methods are being used IWaveSource convertedSource = soundInSource .ChangeSampleRate(sampleRate) // sample rate .ToSampleSource() .ToWaveSource(bitsPerSample); //bits per sample //channels... using (convertedSource = channels == 1 ? convertedSource.ToMono() : convertedSource.ToStereo()) { //create a new wavefile var fileName = "out-" + DateTime.UtcNow.ToString("yyyy-MM-ddTHH-mm-ss") + ".wav"; using (WaveWriter waveWriter = new WaveWriter(fileName, convertedSource.WaveFormat)) { //register an event handler for the DataAvailable event of //the soundInSource //Important: use the DataAvailable of the SoundInSource //If you use the DataAvailable event of the ISoundIn itself //the data recorded by that event might won't be available at the //soundInSource yet soundInSource.DataAvailable += (s, e) => { //read data from the converedSource //important: don't use the e.Data here //the e.Data contains the raw data provided by the //soundInSource which won't have your target format byte[] buffer = new byte[convertedSource.WaveFormat.BytesPerSecond / 2]; int read; //keep reading as long as we still get some data //if you're using such a loop, make sure that soundInSource.FillWithZeros is set to false while ((read = convertedSource.Read(buffer, 0, buffer.Length)) > 0) { //write the read data to a file // ReSharper disable once AccessToDisposedClosure waveWriter.Write(buffer, 0, read); } }; //we've set everything we need -> start capturing data soundIn.Start(); Console.WriteLine("Capturing started ... press any key to stop."); Console.ReadKey(); soundIn.Stop(); } } } }
private void Capture_DataAvailable(object sender, DataAvailableEventArgs e) { finalSource.Read(e.Data, e.Offset, e.ByteCount); }
public async Task AudioStreamCommand([ Summary("Voice Channel name")] IVoiceChannel channel = null, [Summary("Number of audio channels, 1 for mono, 2 for stereo (Default)")] int nAudioChannels = 2, [Summary("Sample rate in hertz, 48000 (Default)")] int sampleRate = 48000, [Summary("Number of bits per sample, 16 (Default)")] int bitsPerSample = 16) { var connection = await channel.ConnectAsync(); var dstream = connection.CreatePCMStream(AudioApplication.Mixed); using (WasapiCapture soundIn = new WasapiLoopbackCapture()) { //initialize the soundIn instance soundIn.Initialize(); //create a SoundSource around the the soundIn instance //this SoundSource will provide data, captured by the soundIn instance SoundInSource soundInSource = new SoundInSource(soundIn) { FillWithZeros = false }; //create a source, that converts the data provided by the //soundInSource to any other format //in this case the "Fluent"-extension methods are being used IWaveSource convertedSource = soundInSource .ChangeSampleRate(sampleRate) // sample rate .ToSampleSource() .ToWaveSource(bitsPerSample); //bits per sample //int channels = 2; //channels... using (convertedSource = nAudioChannels == 1 ? convertedSource.ToMono() : convertedSource.ToStereo()) { //register an event handler for the DataAvailable event of //the soundInSource //Important: use the DataAvailable of the SoundInSource //If you use the DataAvailable event of the ISoundIn itself //the data recorded by that event might won't be available at the //soundInSource yet soundInSource.DataAvailable += (s, e) => { //read data from the converedSource //important: don't use the e.Data here //the e.Data contains the raw data provided by the //soundInSource which won't have your target format byte[] buffer = new byte[convertedSource.WaveFormat.BytesPerSecond / 2]; int read; //keep reading as long as we still get some data //if you're using such a loop, make sure that soundInSource.FillWithZeros is set to false while ((read = convertedSource.Read(buffer, 0, buffer.Length)) > 0) { //write the read data to a file // ReSharper disable once AccessToDisposedClosure dstream.Write(buffer, 0, read); } }; //we've set everything we need -> start capturing data soundIn.Start(); Console.WriteLine("Capturing started ... press any key to stop."); Console.ReadKey(); soundIn.Stop(); } } }