Example #1
0
        public virtual void Initialize()
        {
            _wasapiCapture = new WasapiCapture
            {
                Device = _captureDevice
            };
            _wasapiCapture.Initialize();

            var soundInSource = new SoundInSource(_wasapiCapture);

            if (_triggerSingleBlockRead)
            {
                var notificationStream =
                    new SingleBlockNotificationStream(soundInSource.ChangeSampleRate(48000).ToMono().ToSampleSource());
                notificationStream.SingleBlockRead += NotificationStreamOnSingleBlockRead;
                _captureSource = notificationStream.ToWaveSource(16);
            }
            else
            {
                _captureSource = soundInSource
                                 .ChangeSampleRate(48000)
                                 .ToMono()
                                 .ToSampleSource()
                                 .ToWaveSource(16);
            }

            soundInSource.DataAvailable += SoundInSourceOnDataAvailable;
            _wasapiCapture.Start();
        }
        public void StartRecordingSetDevice(MMDevice recordingDevice)
        {
            if (recordingDevice == null)
            {
                MessageBox.Show(Properties.Strings.MessageBox_NoRecordingDevices);
                Console.WriteLine("No devices found.");
                return;
            }

            soundIn = new CSCore.SoundIn.WasapiLoopbackCapture
            {
                Device = recordingDevice
            };

            soundIn.Initialize();
            soundInSource = new SoundInSource(soundIn)
            {
                FillWithZeros = false
            };
            convertedSource              = soundInSource.ChangeSampleRate(44100).ToSampleSource().ToWaveSource(16);
            convertedSource              = convertedSource.ToStereo();
            soundInSource.DataAvailable += OnDataAvailable;
            soundIn.Start();

            var format = convertedSource.WaveFormat;

            waveFormat = NAudio.Wave.WaveFormat.CreateCustomFormat(WaveFormatEncoding.Pcm, format.SampleRate, format.Channels, format.BytesPerSecond, format.BlockAlign, format.BitsPerSample);
        }
Example #3
0
        public async Task <MemoryStream> GetLoopbackAudio(int ms)
        {
            var Stream = new MemoryStream();

            using (WasapiCapture virtualaudiodev =
                       new WasapiLoopbackCapture())
            {
                virtualaudiodev.Initialize();
                var soundInSource = new SoundInSource(virtualaudiodev)
                {
                    FillWithZeros = false
                };
                var convertedSource = soundInSource.ChangeSampleRate(44100).ToSampleSource().ToWaveSource(16);
                using (convertedSource = convertedSource.ToMono())
                {
                    using (var waveWriter = new WaveWriter(Stream, convertedSource.WaveFormat))
                    {
                        soundInSource.DataAvailable += (s, e) =>
                        {
                            var buffer = new byte[convertedSource.WaveFormat.BytesPerSecond / 2];
                            int read;
                            while ((read = convertedSource.Read(buffer, 0, buffer.Length)) > 0)
                            {
                                waveWriter.Write(buffer, 0, read);
                            }
                        };
                        virtualaudiodev.Start();
                        Thread.Sleep(ms);
                        virtualaudiodev.Stop();
                    }
                }
            }

            return(Stream);
        }
        public static int Capture(string output_file, int time)
        {
            int sampleRate    = 48000;
            int bitsPerSample = 24;


            //create a new soundIn instance
            using (WasapiCapture soundIn = new WasapiLoopbackCapture())
            {
                //initialize the soundIn instance
                soundIn.Initialize();

                //create a SoundSource around the the soundIn instance
                SoundInSource soundInSource = new SoundInSource(soundIn)
                {
                    FillWithZeros = false
                };

                //create a source, that converts the data provided by the soundInSource to any other format
                IWaveSource convertedSource = soundInSource
                                              .ChangeSampleRate(sampleRate) // sample rate
                                              .ToSampleSource()
                                              .ToWaveSource(bitsPerSample); //bits per sample

                //channels...
                using (convertedSource = convertedSource.ToStereo())
                {
                    //create a new wavefile
                    using (WaveWriter waveWriter = new WaveWriter(output_file, convertedSource.WaveFormat))
                    {
                        //register an event handler for the DataAvailable event of the soundInSource
                        soundInSource.DataAvailable += (s, e) =>
                        {
                            //read data from the converedSource
                            byte[] buffer = new byte[convertedSource.WaveFormat.BytesPerSecond / 2];
                            int    read;

                            //keep reading as long as we still get some data
                            while ((read = convertedSource.Read(buffer, 0, buffer.Length)) > 0)
                            {
                                //write the read data to a file
                                waveWriter.Write(buffer, 0, read);
                            }
                        };

                        //start recording
                        soundIn.Start();

                        //delay and keep recording
                        Thread.Sleep(time);

                        //stop recording
                        soundIn.Stop();
                    }
                }
            }
            return(0);
        }
        /// <summary>
        /// Start recording on the device in the parameter.
        /// </summary>
        /// <param name="recordingDevice">the device to start recording</param>
        /// <returns>true if the recording is started, or false</returns>
        public bool StartRecordingSetDevice(MMDevice recordingDevice)
        {
            if (recordingDevice == null)
            {
                logger.Log(Properties.Strings.MessageBox_NoRecordingDevices);
                return(false);
            }

            try
            {
                soundIn = new CSCore.SoundIn.WasapiLoopbackCapture
                {
                    Device = recordingDevice
                };

                soundIn.Initialize();
                soundInSource = new SoundInSource(soundIn)
                {
                    FillWithZeros = false
                };
                convertedSource              = soundInSource.ChangeSampleRate(44100).ToSampleSource().ToWaveSource(16);
                convertedSource              = convertedSource.ToStereo();
                soundInSource.DataAvailable += OnDataAvailable;
                soundIn.Stopped             += OnRecordingStopped;
                soundIn.Start();

                var format = convertedSource.WaveFormat;
                waveFormat     = NAudio.Wave.WaveFormat.CreateCustomFormat(WaveFormatEncoding.Pcm, format.SampleRate, format.Channels, format.BytesPerSecond, format.BlockAlign, format.BitsPerSample);
                isRecording    = true;
                bufferCaptured = new BufferBlock()
                {
                    Data = new byte[convertedSource.WaveFormat.BytesPerSecond / 2]
                };
                bufferSend = new BufferBlock()
                {
                    Data = new byte[convertedSource.WaveFormat.BytesPerSecond / 2]
                };

                eventThread = new Thread(EventThread)
                {
                    Name         = "Loopback Event Thread",
                    IsBackground = true
                };
                eventThread.Start(new WeakReference <LoopbackRecorder>(this));

                return(true);
            }
            catch (Exception ex)
            {
                logger.Log(ex, "Error initializing the recording device:");
            }

            return(false);
        }
Example #6
0
        public void ChangeQuality(int sampleRate, int bitsPerSecond)
        {
            SampleRate   = sampleRate;
            BitPerSecond = bitsPerSecond;

            _soundIn.Stop();
            _targetSoundSource = _defaultSoundSource
                                 .ChangeSampleRate(SampleRate)
                                 .ToSampleSource()
                                 .ToStereo()
                                 .ToWaveSource(BitPerSecond);
            _soundIn.Start();
        }
Example #7
0
        public static void RecordTo(string fileName, TimeSpan time, WaveFormat format)
        {
            CaptureMode captureMode = CaptureMode.Capture;
            DataFlow    dataFlow    = captureMode == CaptureMode.Capture ? DataFlow.Capture : DataFlow.Render;

            var devices = MMDeviceEnumerator.EnumerateDevices(dataFlow, DeviceState.Active);
            var device  = devices.FirstOrDefault();

            using (WasapiCapture soundIn = captureMode == CaptureMode.Capture
                ? new WasapiCapture()
                : new WasapiLoopbackCapture())
            {
                soundIn.Device = device;
                soundIn.Initialize();
                SoundInSource soundInSource = new SoundInSource(soundIn)
                {
                    FillWithZeros = false
                };
                IWaveSource convertedSource = soundInSource
                                              .ChangeSampleRate(format.SampleRate) // sample rate
                                              .ToSampleSource()
                                              .ToWaveSource(format.BitsPerSample); //bits per sample
                using (convertedSource = format.Channels == 1 ? convertedSource.ToMono() : convertedSource.ToStereo())
                {
                    using (WaveWriter waveWriter = new WaveWriter(fileName, convertedSource.WaveFormat))
                    {
                        soundInSource.DataAvailable += (s, e) =>
                        {
                            byte[] buffer = new byte[convertedSource.WaveFormat.BytesPerSecond / 2];
                            int    read;
                            while ((read = convertedSource.Read(buffer, 0, buffer.Length)) > 0)
                            {
                                waveWriter.Write(buffer, 0, read);
                            }
                        };

                        soundIn.Start();

                        Console.WriteLine("Started recording");
                        Thread.Sleep(time);

                        soundIn.Stop();
                        Console.WriteLine("Finished recording");
                    }
                }
            }
        }
        public void Initialize(MMDevice captureDevice)
        {
            //BLARG 01.14.2020: Don't need the default when we're given an Audio Enpoint
            //MMDevice captureDevice = MMDeviceEnumerator.DefaultAudioEndpoint(DataFlow.Render, Role.Console);
            WaveFormat deviceFormat = captureDevice.DeviceFormat;

            _audioEndpointVolume = AudioEndpointVolume.FromDevice(captureDevice);

            //DarthAffe 07.02.2018: This is a really stupid workaround to (hopefully) finally fix the surround driver issues
            for (int i = 1; i < 13; i++)
            {
                try { _capture = new WasapiLoopbackCapture(100, new WaveFormat(deviceFormat.SampleRate, deviceFormat.BitsPerSample, i)); } catch { /* We're just trying ... */ }
            }

            if (_capture == null)
            {
                throw new NullReferenceException("Failed to initialize WasapiLoopbackCapture");
            }

            //BLARG: Actually setting the Device
            _capture.Device = captureDevice;
            _capture.Initialize();

            _soundInSource = new SoundInSource(_capture)
            {
                FillWithZeros = false
            };
            _source = _soundInSource.WaveFormat.SampleRate == 44100
                          ? _soundInSource.ToStereo()
                          : _soundInSource.ChangeSampleRate(44100).ToStereo();

            _stream = new SingleBlockNotificationStream(_source.ToSampleSource());
            _stream.SingleBlockRead += StreamOnSingleBlockRead;

            _source = _stream.ToWaveSource();

            byte[] buffer = new byte[_source.WaveFormat.BytesPerSecond / 2];
            _soundInSource.DataAvailable += (s, aEvent) =>
            {
                while ((_source.Read(buffer, 0, buffer.Length)) > 0)
                {
                    ;
                }
            };

            _capture.Start();
        }
        public void StartRecordingDevice(MMDevice recordingDevice)
        {
            if (recordingDevice == null)
            {
                Console.WriteLine("No devices found.");
                return;
            }

            StopRecording();

            soundIn = new CSCore.SoundIn.WasapiLoopbackCapture()
            {
                Device = recordingDevice
            };

            soundIn.Initialize();
            soundInSource = new SoundInSource(soundIn)
            {
                FillWithZeros = false
            };
            convertedSource              = soundInSource.ChangeSampleRate(44100).ToSampleSource().ToWaveSource(16);
            convertedSource              = convertedSource.ToStereo();
            soundInSource.DataAvailable += OnDataAvailable;
            soundIn.Start();

            waveFormat = convertedSource.WaveFormat;

            buffer0 = new BufferBlock()
            {
                Data = new byte[convertedSource.WaveFormat.BytesPerSecond / 2]
            };
            buffer1 = new BufferBlock()
            {
                Data = new byte[convertedSource.WaveFormat.BytesPerSecond / 2]
            };

            enabled = true;

            eventThread              = new Thread(EventThread);
            eventThread.Name         = "Loopback Event Thread";
            eventThread.IsBackground = true;
            eventThread.Start(new WeakReference <LoopbackRecorder>(this));
        }
Example #10
0
        /// <summary>
        /// Creates a new instance of the recorder. Captured data will be forwarded via the supplied data handler, if any.
        /// </summary>
        /// <param name="sampleRate">the target sample rate</param>
        /// <param name="bitsPerSample">the target bits per sample</param>
        /// <param name="handler">captured data handler (optional)</param>
        /// <see cref="DataHandler"/>
        public WasapiRecorder(int sampleRate, int bitsPerSample, DataHandler handler = null)
        {
            _capture = new WasapiLoopbackCapture();
            _capture.Initialize();
            _volume = AudioEndpointVolume.FromDevice(_capture.Device);

            _soundInSource = new SoundInSource(_capture)
            {
                FillWithZeros = false
            };

            //defines the source conversion
            _convertedSource = _soundInSource
                               .ChangeSampleRate(sampleRate)
                               .ToSampleSource()
                               .ToWaveSource(bitsPerSample)
                               .ToStereo();

            WithDataHandler(handler);
        }
Example #11
0
        public SoundInSourceWrapper(SoundInSource soundIn, BroadcastSettings settings)
        {
            Settings = settings;

            _soundIn = soundIn;

            _convertedSource =
                soundIn.ChangeSampleRate(Settings.SampleRate).ToSampleSource().ToWaveSource(Settings.BitDepth);

            if (settings.Channel == AudioChannel.Mono)
            {
                _convertedSource = _convertedSource.ToMono();
            }
            else
            {
                _convertedSource = _convertedSource.ToStereo();
            }

            _audioChunks = new ConcurrentQueue <byte[]>();

            _soundIn.DataAvailable += SoundInDataAvailable;
        }
Example #12
0
        private void InitializeAudioCapture(MMDevice selectedDevice)
        {
            if (selectedDevice != null)
            {
                mAudioCapture = selectedDevice.DataFlow == DataFlow.Capture ?
                                new WasapiCapture() : new WasapiLoopbackCapture();
                mAudioCapture.Device = selectedDevice;
                mAudioCapture.Initialize();
                mAudioCapture.DataAvailable += Capture_DataAvailable;
                mSoundInSource = new SoundInSource(mAudioCapture)
                {
                    FillWithZeros = false
                };
                //create a source, that converts the data provided by the
                //soundInSource to required format
                mConvertedSource = mSoundInSource
                                   .ChangeSampleRate(SampleRate) // sample rate
                                   .ToSampleSource()
                                   .ToWaveSource(16);            //bits per sample

                mConvertedSource = mConvertedSource.ToMono();
            }
        }
Example #13
0
        public void Initialize()
        {
            MMDevice   captureDevice = MMDeviceEnumerator.DefaultAudioEndpoint(DataFlow.Render, Role.Console);
            WaveFormat deviceFormat  = captureDevice.DeviceFormat;

            _audioEndpointVolume = AudioEndpointVolume.FromDevice(captureDevice);

            //DarthAffe 07.02.2018: This is a really stupid workaround to (hopefully) finally fix the surround driver issues
            for (int i = 1; i < 13; i++)
            {
                try
                {
                    _capture = new WasapiLoopbackCapture(100, new WaveFormat(deviceFormat.SampleRate, deviceFormat.BitsPerSample, i));
                }
                catch
                { }
            }

            if (_capture == null)
            {
                throw new NullReferenceException("Failed to initialize WasapiLoopbackCapture");
            }

            _capture.Initialize();
            _soundInSource = new SoundInSource(_capture)
            {
                FillWithZeros = false
            };

            _stream = _soundInSource.WaveFormat.SampleRate == 44100
                ? new SingleBlockNotificationStream(_soundInSource.ToStereo().ToSampleSource())
                : new SingleBlockNotificationStream(_soundInSource.ChangeSampleRate(44100).ToStereo().ToSampleSource());

            _soundInSource.DataAvailable += OnSoundDataAvailable;

            _capture.Start();
        }
Example #14
0
        public void Start()
        {
            if (_device == null || (_soundIn != null && _soundIn.RecordingState == RecordingState.Recording))
            {
                return;
            }
            //create a new soundIn instance
            _soundIn = _captureMode == CaptureMode.Capture ? new WasapiCapture() : new WasapiLoopbackCapture();
            //optional: set some properties
            _soundIn.Device = _device;

            //initialize the soundIn instance
            _soundIn.Initialize();

            //create a SoundSource around the the soundIn instance
            //this SoundSource will provide data, captured by the soundIn instance
            SoundInSource soundInSource = new SoundInSource(_soundIn)
            {
                FillWithZeros = false
            };

            //create a source, that converts the data provided by the
            //soundInSource to any other format
            //in this case the "Fluent"-extension methods are being used
            _convertedSource = soundInSource
                               .ChangeSampleRate(sampleRate) // sample rate
                               .ToSampleSource()
                               .ToWaveSource(bitsPerSample); //bits per sample

            //channels...
            _convertedSource = channels == 1 ? _convertedSource.ToMono() : _convertedSource.ToStereo();
            _waveWriter      = new WaveWriter("out.wav", _convertedSource.WaveFormat);

            soundInSource.DataAvailable += OnDatAvailable;
            _soundIn.Start();
        }
        /// <summary>
        /// Start recording on the device in the parameter.
        /// </summary>
        /// <param name="recordingDevice">the device to start recording</param>
        /// <returns>true if the recording is started, or false</returns>
        public bool StartRecordingSetDevice(MMDevice recordingDevice)
        {
            if (recordingDevice == null)
            {
                logger.Log(Properties.Strings.MessageBox_NoRecordingDevices);
                return(false);
            }

            try
            {
                if (recordingDevice.DataFlow == DataFlow.Render)
                {
                    soundIn = new CSCore.SoundIn.WasapiLoopbackCapture
                    {
                        Device = recordingDevice
                    };
                }
                else
                {
                    soundIn = new CSCore.SoundIn.WasapiCapture
                    {
                        Device = recordingDevice
                    };
                }


                soundIn.Initialize();
                soundInSource = new SoundInSource(soundIn)
                {
                    FillWithZeros = false
                };

                var selectedFormat = mainForm.GetSelectedStreamFormat();
                var convertMultiChannelToStereo = mainForm.GetConvertMultiChannelToStereo();
                CSCore.WaveFormat format;
                switch (selectedFormat)
                {
                case Classes.SupportedStreamFormat.Wav:
                    convertedSource = soundInSource.ChangeSampleRate(44100).ToSampleSource().ToWaveSource(16);
                    format          = convertedSource.WaveFormat;
                    if (convertMultiChannelToStereo)
                    {
                        convertedSource = convertedSource.ToStereo();
                    }
                    waveFormat = NAudio.Wave.WaveFormat.CreateCustomFormat(WaveFormatEncoding.Pcm, format.SampleRate, format.Channels, format.BytesPerSecond, format.BlockAlign, format.BitsPerSample);
                    break;

                case Classes.SupportedStreamFormat.Mp3_320:
                case Classes.SupportedStreamFormat.Mp3_128:
                    convertedSource = soundInSource.ToSampleSource().ToWaveSource(16);
                    convertedSource = convertedSource.ToStereo();
                    format          = convertedSource.WaveFormat;
                    waveFormat      = NAudio.Wave.WaveFormat.CreateCustomFormat(WaveFormatEncoding.Pcm, format.SampleRate, format.Channels, format.BytesPerSecond, format.BlockAlign, format.BitsPerSample);
                    break;

                case Classes.SupportedStreamFormat.Wav_16bit:
                    convertedSource = soundInSource.ToSampleSource().ToWaveSource(16);
                    if (convertMultiChannelToStereo)
                    {
                        convertedSource = convertedSource.ToStereo();
                    }
                    format     = convertedSource.WaveFormat;
                    waveFormat = NAudio.Wave.WaveFormat.CreateCustomFormat(WaveFormatEncoding.Pcm, format.SampleRate, format.Channels, format.BytesPerSecond, format.BlockAlign, format.BitsPerSample);
                    break;

                case Classes.SupportedStreamFormat.Wav_24bit:
                    convertedSource = soundInSource.ToSampleSource().ToWaveSource(24);
                    if (convertMultiChannelToStereo)
                    {
                        convertedSource = convertedSource.ToStereo();
                    }
                    format     = convertedSource.WaveFormat;
                    waveFormat = NAudio.Wave.WaveFormat.CreateCustomFormat(WaveFormatEncoding.Pcm, format.SampleRate, format.Channels, format.BytesPerSecond, format.BlockAlign, format.BitsPerSample);
                    break;

                case Classes.SupportedStreamFormat.Wav_32bit:
                    convertedSource = soundInSource.ToSampleSource().ToWaveSource(32);
                    if (convertMultiChannelToStereo)
                    {
                        convertedSource = convertedSource.ToStereo();
                    }
                    format     = convertedSource.WaveFormat;
                    waveFormat = NAudio.Wave.WaveFormat.CreateCustomFormat(WaveFormatEncoding.IeeeFloat, format.SampleRate, format.Channels, format.BytesPerSecond, format.BlockAlign, format.BitsPerSample);
                    break;

                default:
                    break;
                }

                logger.Log($"Stream format set to {waveFormat.Encoding} {waveFormat.SampleRate} {waveFormat.BitsPerSample} bit");
                soundInSource.DataAvailable += OnDataAvailable;
                soundIn.Stopped             += OnRecordingStopped;
                soundIn.Start();

                isRecording    = true;
                bufferCaptured = new BufferBlock()
                {
                    Data = new byte[convertedSource.WaveFormat.BytesPerSecond / 2]
                };
                bufferSend = new BufferBlock()
                {
                    Data = new byte[convertedSource.WaveFormat.BytesPerSecond / 2]
                };

                eventThread = new Thread(EventThread)
                {
                    Name         = "Loopback Event Thread",
                    IsBackground = true
                };
                eventThread.Start(new WeakReference <LoopbackRecorder>(this));

                return(true);
            }
            catch (Exception ex)
            {
                logger.Log(ex, "Error initializing the recording device:");
            }

            return(false);
        }
Example #16
0
        static void writeSpeakersToWav(string[] args)
        {
            const int GOOGLE_RATE            = 16000;
            const int GOOGLE_BITS_PER_SAMPLE = 16;
            const int GOOGLE_CHANNELS        = 1;
            const int EARPHONES = 5;

            CaptureMode captureMode = CaptureMode.LoopbackCapture;

            DataFlow dataFlow = captureMode == CaptureMode.Capture ? DataFlow.Capture : DataFlow.Render;

            var devices = MMDeviceEnumerator.EnumerateDevices(dataFlow, DeviceState.Active);

            foreach (var d in devices)
            {
                Console.WriteLine("- {0:#00}: {1}", d, d.FriendlyName);
            }
            var headphones = devices.First(x => x.FriendlyName.StartsWith("small"));

            //using (WasapiCapture capture = new WasapiLoopbackCapture())
            using (WasapiCapture soundIn = captureMode == CaptureMode.Capture
                ? new WasapiCapture()
                : new WasapiLoopbackCapture())
            {
                //if nessesary, you can choose a device here
                //to do so, simply set the device property of the capture to any MMDevice
                //to choose a device, take a look at the sample here: http://cscore.codeplex.com/

                soundIn.Device = headphones;

                Console.WriteLine("Waiting, press any key to start");
                Console.ReadKey();
                //initialize the selected device for recording
                soundIn.Initialize();

                //create a SoundSource around the the soundIn instance
                //this SoundSource will provide data, captured by the soundIn instance
                SoundInSource soundInSource = new SoundInSource(soundIn)
                {
                    FillWithZeros = false
                };

                //create a source, that converts the data provided by the
                //soundInSource to any other format
                //in this case the "Fluent"-extension methods are being used
                IWaveSource convertedSource = soundInSource
                                              .ChangeSampleRate(GOOGLE_RATE)         // sample rate
                                              .ToSampleSource()
                                              .ToWaveSource(GOOGLE_BITS_PER_SAMPLE); //bits per sample

                var channels = GOOGLE_CHANNELS;

                //channels...
                using (convertedSource = channels == 1 ? convertedSource.ToMono() : convertedSource.ToStereo())
                    //create a wavewriter to write the data to
                    using (WaveWriter w = new WaveWriter("dump.wav", convertedSource.WaveFormat))
                    {
                        //setup an eventhandler to receive the recorded data
                        //register an event handler for the DataAvailable event of
                        //the soundInSource
                        //Important: use the DataAvailable of the SoundInSource
                        //If you use the DataAvailable event of the ISoundIn itself
                        //the data recorded by that event might won't be available at the
                        //soundInSource yet
                        soundInSource.DataAvailable += (s, e) =>
                        {
                            //read data from the converedSource
                            //important: don't use the e.Data here
                            //the e.Data contains the raw data provided by the
                            //soundInSource which won't have your target format
                            byte[] buffer = new byte[convertedSource.WaveFormat.BytesPerSecond / 2];
                            int    read;

                            //keep reading as long as we still get some data
                            //if you're using such a loop, make sure that soundInSource.FillWithZeros is set to false
                            while ((read = convertedSource.Read(buffer, 0, buffer.Length)) > 0)
                            {
                                //write the read data to a file
                                // ReSharper disable once AccessToDisposedClosure
                                w.Write(buffer, 0, read);
                            }
                        };


                        //start recording
                        soundIn.Start();

                        Console.WriteLine("Started, press any key to stop");
                        Console.ReadKey();

                        //stop recording
                        soundIn.Stop();
                    }
            }
        }
        static int Main(string[] args)
        {
            int    time;
            string output_file;

            switch (args.Length)
            {
            case 1:
                if (args[0] == "-h")
                {
                    System.Console.WriteLine("Usage:");
                    System.Console.WriteLine("    LoopbackCapture.exe <output/wav> <time/milliseconds>");
                    return(1);
                }
                output_file = args[0];
                time        = 0;
                break;

            case 2:
                output_file = args[0];
                try
                {
                    time = Int32.Parse(args[1]);
                }
                catch
                {
                    time = 0;
                }
                break;

            default:
                time        = 0;
                output_file = "record.wav";
                break;
            }

            int sampleRate    = 48000;
            int bitsPerSample = 24;

            //create a new soundIn instance
            using (WasapiCapture soundIn = new WasapiLoopbackCapture())
            {
                //initialize the soundIn instance
                soundIn.Initialize();

                //create a SoundSource around the the soundIn instance
                SoundInSource soundInSource = new SoundInSource(soundIn)
                {
                    FillWithZeros = false
                };

                //create a source, that converts the data provided by the soundInSource to any other format

                IWaveSource convertedSource = soundInSource
                                              .ChangeSampleRate(sampleRate) // sample rate
                                              .ToSampleSource()
                                              .ToWaveSource(bitsPerSample); //bits per sample

                //channels...
                using (convertedSource = convertedSource.ToStereo())
                {
                    //create a new wavefile
                    using (WaveWriter waveWriter = new WaveWriter(output_file, convertedSource.WaveFormat))
                    {
                        //register an event handler for the DataAvailable event of the soundInSource
                        soundInSource.DataAvailable += (s, e) =>
                        {
                            //read data from the converedSource
                            byte[] buffer = new byte[convertedSource.WaveFormat.BytesPerSecond / 2];
                            int    read;

                            //keep reading as long as we still get some data
                            while ((read = convertedSource.Read(buffer, 0, buffer.Length)) > 0)
                            {
                                //write the read data to a file
                                waveWriter.Write(buffer, 0, read);
                            }
                        };

                        //start recording
                        soundIn.Start();

                        //delay and keep recording
                        if (time != 0)
                        {
                            Thread.Sleep(time);
                        }
                        else
                        {
                            Console.ReadKey();
                        }

                        //stop recording
                        soundIn.Stop();
                    }
                }
            }
            return(0);
        }
Example #18
0
        public void Record(string deviceName, string audioFilePath = @"C:\Temp\output.wav")
        {
            _timer = new Stopwatch();
            _timer.Start();

            // choose the capture mod
            CaptureMode captureMode = CaptureMode.LoopbackCapture;
            DataFlow    dataFlow    = captureMode == CaptureMode.Capture ? DataFlow.Capture : DataFlow.Render;

            //select the device:
            var devices = MMDeviceEnumerator.EnumerateDevices(dataFlow, DeviceState.Active);

            if (!devices.Any())
            {
                Console.WriteLine("### No devices found.");
                return;
            }

            Console.WriteLine($"### Using device {deviceName}");
            var device = devices.First(d => d.FriendlyName.Equals(deviceName));

            //start recording
            //create a new soundIn instance
            _soundIn = captureMode == CaptureMode.Capture
                ? new WasapiCapture()
                : new WasapiLoopbackCapture();


            //optional: set some properties
            _soundIn.Device = device;


            //initialize the soundIn instance
            _soundIn.Initialize();

            //create a SoundSource around the the soundIn instance
            //this SoundSource will provide data, captured by the soundIn instance
            SoundInSource soundInSource = new SoundInSource(_soundIn)
            {
                FillWithZeros = false
            };

            //create a source, that converts the data provided by the
            //soundInSource to any other format
            //in this case the "Fluent"-extension methods are being used
            _convertedSource = soundInSource
                               .ChangeSampleRate(SampleRate) // sample rate
                               .ToSampleSource()
                               .ToWaveSource(BitsPerSample); //bits per sample

            //channels...
            _convertedSource = _convertedSource.ToMono();

            //create a new wavefile
            _waveWriter = new WaveWriter(audioFilePath, _convertedSource.WaveFormat);

            //register an event handler for the DataAvailable event of
            //the soundInSource
            //Important: use the DataAvailable of the SoundInSource
            //If you use the DataAvailable event of the ISoundIn itself
            //the data recorded by that event might won't be available at the
            //soundInSource yet
            soundInSource.DataAvailable += (s, e) =>
            {
                //read data from the converedSource
                //important: don't use the e.Data here
                //the e.Data contains the raw data provided by the
                //soundInSource which won't have your target format
                byte[] buffer = new byte[_convertedSource.WaveFormat.BytesPerSecond / 2];
                int    read;

                //keep reading as long as we still get some data
                //if you're using such a loop, make sure that soundInSource.FillWithZeros is set to false
                while ((read = _convertedSource.Read(buffer, 0, buffer.Length)) > 0)
                {
                    //write the read data to a file
                    // ReSharper disable once AccessToDisposedClosure
                    _waveWriter.Write(buffer, 0, read);
                }
            };

            //we've set everything we need -> start capturing data
            _soundIn.Start();
            Console.WriteLine($"### RECORDING {audioFilePath}");

            while (_timer.ElapsedMilliseconds / 1000 < 15 && _timer.IsRunning)
            {
                Thread.Sleep(500);
            }

            Console.WriteLine("### STOP RECORDING");
            _soundIn.Stop();
            _timer.Stop();

            _waveWriter.Dispose();
            _convertedSource.Dispose();
            _soundIn.Dispose();

            AudioFileCaptured?.Invoke(this, new AudioRecorderEventArgs()
            {
                AudioFilePath = audioFilePath
            });
        }
Example #19
0
        public void Start(TimeSpan time)
        {
            int sampleRate    = 48000;
            int bitsPerSample = 24;

            MMDeviceCollection devices;

            while (!(devices = MMDeviceEnumerator.EnumerateDevices(DataFlow.Capture, DeviceState.Active)).Any())
            {
                Thread.Sleep(2000);
            }
            var device = devices.FirstOrDefault();

            //TODO:We have a memory leak here (soundIn should be cleared from time to time). needs to be fixed!
            //create a new soundIn instance
            using (WasapiCapture soundIn = new WasapiCapture())
            {
                soundIn.Device = device;
                //initialize the soundIn instance
                soundIn.Initialize();

                //create a SoundSource around the the soundIn instance
                SoundInSource soundInSource = new SoundInSource(soundIn)
                {
                    FillWithZeros = false
                };

                //create a source, that converts the data provided by the soundInSource to any other format

                IWaveSource convertedSource = soundInSource
                                              .ChangeSampleRate(sampleRate) // sample rate
                                              .ToSampleSource()
                                              .ToWaveSource(bitsPerSample); //bits per sample

                using (var stream = new MemoryStream())
                {
                    var readBufferLength = convertedSource.WaveFormat.BytesPerSecond / 2;
                    //channels...
                    using (convertedSource = convertedSource.ToStereo())
                    {
                        //create a new wavefile
                        using (WaveWriter waveWriter = new WaveWriter(stream, convertedSource.WaveFormat))
                        {
                            //register an event handler for the DataAvailable event of the soundInSource
                            soundInSource.DataAvailable += (s, e) =>
                            {
                                //read data from the converedSource
                                byte[] buffer = new byte[readBufferLength];
                                int    read;

                                //keep reading as long as we still get some data
                                while ((read = convertedSource.Read(buffer, 0, buffer.Length)) > 0)
                                {
                                    var decibelsCalibrated = (int)Math.Round(GetSoundLevel(buffer, _calibrateAdd, _calibratescale, _calibrateRange));
                                    if (decibelsCalibrated < 0)
                                    {
                                        decibelsCalibrated = 0;
                                    }
                                    OnNoiseData?.Invoke(null, new NoiseInfoEventArgs()
                                    {
                                        Decibels = decibelsCalibrated
                                    });
                                    //write the read data to a file
                                    waveWriter.Write(buffer, 0, read);
                                }
                            };
                            soundIn.Stopped += (e, args) =>
                            {
                                OnStopped?.Invoke(null, null);
                                lock (_stopLocker)
                                    Monitor.PulseAll(_stopLocker);
                            };

                            var tm = new Timer(state => soundIn?.Stop(), null, time, time);

                            //start recording
                            soundIn.Start();
                            OnStarted?.Invoke(null, null);
                            Monitor.Enter(_stopLocker);
                            {
                                Monitor.PulseAll(_stopLocker);
                                Monitor.Wait(_stopLocker);
                            }
                            //stop recording
                            soundIn.Stop();
                        }
                    }
                }
            }
        }
Example #20
0
        public void Record(string filename)
        {
            if (string.IsNullOrWhiteSpace(filename))
            {
                return;
            }

            cachedPosition = TimeSpan.Zero;
            position       = TimeSpan.Zero;
            sampleLength   = 0;
            recordedData   = new List <float>();

            if (InputDevice == null)
            {
                return;
            }

            if (recordingState == RecordingState.Recording)
            {
                return;
            }

            recordingState = RecordingState.Recording;

            if (inputDevice.Type == DeviceType.Capture)
            {
                _capture = new WasapiCapture();
            }
            else
            {
                _capture = new WasapiLoopbackCapture();
            }

            _capture.Device = inputDevice.ActualDevice;
            _capture.Initialize();

            _soundInSource = new SoundInSource(_capture)
            {
                FillWithZeros = false
            };
            _soundInSource.DataAvailable += _soundInSource_DataAvailable;

            _waveSource = _soundInSource
                          .ChangeSampleRate(SampleRate)
                          .ToSampleSource()
                          .ToWaveSource(BitResolution)
                          .ToMono();

            spectrumProvider = new BasicSpectrumProvider(_waveSource.WaveFormat.Channels,
                                                         _waveSource.WaveFormat.SampleRate,
                                                         CSCore.DSP.FftSize.Fft4096);

            _waveWriter = new WaveWriter(filename, _waveSource.WaveFormat);

            //the SingleBlockNotificationStream is used to intercept the played samples
            _notificationSource = new SingleBlockNotificationStream(_waveSource.ToSampleSource());
            //pass the intercepted samples as input data to the spectrumprovider (which will calculate a fft based on them)
            _notificationSource.SingleBlockRead += _notificationSource_SingleBlockRead;
            _waveSource = _notificationSource.ToWaveSource(16);

            RaiseSourceEvent(SourceEventType.Loaded);
            _capture.Start();
            RaiseSourcePropertyChangedEvent(SourceProperty.RecordingState, _capture.RecordingState);
        }
        private void StartCapture(string fileName)
        {
            //Capture Mode
            CaptureMode = (CaptureMode)1;
            DataFlow dataFlow = CaptureMode == CaptureMode.Capture ? DataFlow.Capture : DataFlow.Render;
            //

            //Getting the audio devices from
            var devices = MMDeviceEnumerator.EnumerateDevices(dataFlow, DeviceState.Active);

            if (!devices.Any())
            {
                MessageBox.Show("No devices found.");
                return;
            }

            int selectedDeviceIndex = 0;

            SelectedDevice = devices[selectedDeviceIndex];

            if (SelectedDevice == null)
            {
                return;
            }

            if (CaptureMode == CaptureMode.Capture)
            {
                _soundIn = new WasapiCapture();
            }
            else
            {
                _soundIn = new WasapiLoopbackCapture();
            }

            _soundIn.Device = SelectedDevice;

            //Sample rate of audio
            int sampleRate = 16000;
            //bits per rate
            int bitsPerSample = 16;
            //chanels
            int channels = 1;


            //initialize the soundIn instance
            _soundIn.Initialize();

            //create a SoundSource around the the soundIn instance
            //this SoundSource will provide data, captured by the soundIn instance
            var soundInSource = new SoundInSource(_soundIn)
            {
                FillWithZeros = false
            };

            //create a source, that converts the data provided by the
            //soundInSource to any other format
            //in this case the "Fluent"-extension methods are being used
            IWaveSource convertedSource = soundInSource
                                          .ChangeSampleRate(sampleRate) // sample rate
                                          .ToSampleSource()
                                          .ToWaveSource(bitsPerSample); //bits per sample

            //channels=1 then we  need to create  mono audio
            convertedSource = convertedSource.ToMono();

            AudioToText audioToText = new AudioToText();

            audioToText.SetFolderPermission(_folderPath);

            //create a new wavefile
            waveWriter = new WaveWriter(fileName, convertedSource.WaveFormat);
            //register an event handler for the DataAvailable event of
            //the soundInSource
            //Important: use the DataAvailable of the SoundInSource
            //If you use the DataAvailable event of the ISoundIn itself
            //the data recorded by that event might won't be available at the
            //soundInSource yet
            soundInSource.DataAvailable += (s, e) =>
            {
                //read data from the converedSource
                //important: don't use the e.Data here
                //the e.Data contains the raw data provided by the
                //soundInSource which won't have your target format
                byte[] buffer = new byte[convertedSource.WaveFormat.BytesPerSecond / 2];
                int    read;

                //keep reading as long as we still get some data
                //if you're using such a loop, make sure that soundInSource.FillWithZeros is set to false
                while ((read = convertedSource.Read(buffer, 0, buffer.Length)) > 0)
                {
                    //write the read data to a file
                    // ReSharper disable once AccessToDisposedClosure
                    waveWriter.Write(buffer, 0, read);
                }
            };

            //we've set everything we need -> start capturing data
            objStopWatch.Start();
            _soundIn.Start();
        }
Example #22
0
        // ReSharper disable once UnusedParameter.Local
        static void Main(string[] args)
        {
            //choose the capture mode
            Console.WriteLine("Select capturing mode:");
            Console.WriteLine("- 1: Capture");
            Console.WriteLine("- 2: LoopbackCapture");

            CaptureMode captureMode = (CaptureMode)ReadInteger(1, 2);
            DataFlow    dataFlow    = captureMode == CaptureMode.Capture ? DataFlow.Capture : DataFlow.Render;

            //---

            //select the device:
            var devices = MMDeviceEnumerator.EnumerateDevices(dataFlow, DeviceState.Active);

            if (!devices.Any())
            {
                Console.WriteLine("No devices found.");
                return;
            }

            Console.WriteLine("Select device:");
            for (int i = 0; i < devices.Count; i++)
            {
                Console.WriteLine("- {0:#00}: {1}", i, devices[i].FriendlyName);
            }
            int selectedDeviceIndex = ReadInteger(Enumerable.Range(0, devices.Count).ToArray());
            var device = devices[selectedDeviceIndex];

            //--- choose format
            Console.WriteLine("Enter sample rate:");
            int sampleRate;

            do
            {
                sampleRate = ReadInteger();
                if (sampleRate >= 100 && sampleRate <= 200000)
                {
                    break;
                }
                Console.WriteLine("Must be between 1kHz and 200kHz.");
            } while (true);

            Console.WriteLine("Choose bits per sample (8, 16, 24 or 32):");
            int bitsPerSample = ReadInteger(8, 16, 24, 32);

            //note: this sample does not support multi channel formats like surround 5.1,...
            //if this is required, the DmoChannelResampler class can be used
            Console.WriteLine("Choose number of channels (1, 2):");
            int channels = ReadInteger(1, 2);

            //---

            //start recording

            //create a new soundIn instance
            using (WasapiCapture soundIn = captureMode == CaptureMode.Capture
                ? new WasapiCapture()
                : new WasapiLoopbackCapture())
            {
                //optional: set some properties
                soundIn.Device = device;
                //...

                //initialize the soundIn instance
                soundIn.Initialize();

                //create a SoundSource around the the soundIn instance
                //this SoundSource will provide data, captured by the soundIn instance
                SoundInSource soundInSource = new SoundInSource(soundIn)
                {
                    FillWithZeros = false
                };

                //create a source, that converts the data provided by the
                //soundInSource to any other format
                //in this case the "Fluent"-extension methods are being used
                IWaveSource convertedSource = soundInSource
                                              .ChangeSampleRate(sampleRate) // sample rate
                                              .ToSampleSource()
                                              .ToWaveSource(bitsPerSample); //bits per sample

                //channels...
                using (convertedSource = channels == 1 ? convertedSource.ToMono() : convertedSource.ToStereo())
                {
                    //create a new wavefile
                    using (WaveWriter waveWriter = new WaveWriter("out.wav", convertedSource.WaveFormat))
                    {
                        //register an event handler for the DataAvailable event of
                        //the soundInSource
                        //Important: use the DataAvailable of the SoundInSource
                        //If you use the DataAvailable event of the ISoundIn itself
                        //the data recorded by that event might won't be available at the
                        //soundInSource yet
                        soundInSource.DataAvailable += (s, e) =>
                        {
                            //read data from the converedSource
                            //important: don't use the e.Data here
                            //the e.Data contains the raw data provided by the
                            //soundInSource which won't have your target format
                            byte[] buffer = new byte[convertedSource.WaveFormat.BytesPerSecond / 2];
                            int    read;

                            //keep reading as long as we still get some data
                            //if you're using such a loop, make sure that soundInSource.FillWithZeros is set to false
                            while ((read = convertedSource.Read(buffer, 0, buffer.Length)) > 0)
                            {
                                //write the read data to a file
                                // ReSharper disable once AccessToDisposedClosure
                                waveWriter.Write(buffer, 0, read);
                            }
                        };

                        //we've set everything we need -> start capturing data
                        soundIn.Start();

                        Console.WriteLine("Capturing started ... press any key to stop.");
                        Console.ReadKey();

                        soundIn.Stop();
                    }
                }
            }

            Process.Start("out.wav");
        }
        private WasapiCapture StartListeningOnLoopback()
        {
            const int GOOGLE_RATE            = 16000;
            const int GOOGLE_BITS_PER_SAMPLE = 16;
            const int GOOGLE_CHANNELS        = 1;

            CaptureMode captureMode = _captureMode;

            DataFlow dataFlow = captureMode == CaptureMode.Capture ? DataFlow.Capture : DataFlow.Render;

            var devices = MMDeviceEnumerator.EnumerateDevices(dataFlow, DeviceState.Active);

            Console.WriteLine("Please select devlice:");
            for (int i = 0; i < devices.Count; i++)
            {
                Console.WriteLine(i + ") " + devices[i].FriendlyName);
            }
            var deviceIndex = int.Parse(Console.ReadLine());

            var headphones = devices[deviceIndex];

            //using (WasapiCapture capture = new WasapiLoopbackCapture())
            _soundIn = captureMode == CaptureMode.Capture
                ? new WasapiCapture()
                : new WasapiLoopbackCapture();

            //if nessesary, you can choose a device here
            //to do so, simply set the device property of the capture to any MMDevice
            //to choose a device, take a look at the sample here: http://cscore.codeplex.com/

            _soundIn.Device = headphones;


            //initialize the selected device for recording
            _soundIn.Initialize();
            //create a SoundSource around the the soundIn instance
            //this SoundSource will provide data, captured by the soundIn instance
            _soundInSource = new SoundInSource(_soundIn)
            {
                FillWithZeros = false
            };

            //create a source, that converts the data provided by the
            //soundInSource to any other format
            //in this case the "Fluent"-extension methods are being used
            _convertedSource = _soundInSource
                               .ChangeSampleRate(GOOGLE_RATE)         // sample rate
                               .ToSampleSource()
                               .ToWaveSource(GOOGLE_BITS_PER_SAMPLE); //bits per sample

            var channels = GOOGLE_CHANNELS;

            //channels...
            var src = channels == 1 ? _convertedSource.ToMono() : _convertedSource.ToStereo();


            _soundInSource.DataAvailable += (sender, args) =>
            {
                //read data from the converedSource
                //important: don't use the e.Data here
                //the e.Data contains the raw data provided by the
                //soundInSource which won't have your target format
                byte[] buffer = new byte[_convertedSource.WaveFormat.BytesPerSecond / 2];
                int    read;

                //keep reading as long as we still get some data
                //if you're using such a loop, make sure that soundInSource.FillWithZeros is set to false
                while ((read = src.Read(buffer, 0, buffer.Length)) > 0)
                {
                    //write the read data to a file
                    // ReSharper disable once AccessToDisposedClosure
                    Debug.WriteLine($"Read {read} bytes");
                    _microphoneBuffer.Add(ByteString.CopyFrom(buffer, 0, read));

                    //w.Write(buffer, 0, read);
                }
            };



            return(_soundIn);
        }
Example #24
0
        // ReSharper disable once UnusedParameter.Local
        static void Main(string[] args)
        {
            CaptureMode captureMode;

            if (Boolean.Parse(ConfigurationManager.AppSettings["defaultToLoopback"]))
            {
                captureMode = CaptureMode.LoopbackCapture;
            }
            else
            {
                Console.WriteLine("Select capturing mode:");
                Console.WriteLine("- 1: Capture");
                Console.WriteLine("- 2: LoopbackCapture");

                captureMode = (CaptureMode)ReadInteger(1, 2);
            }
            DataFlow dataFlow = captureMode == CaptureMode.Capture ? DataFlow.Capture : DataFlow.Render;

            var devices = MMDeviceEnumerator.EnumerateDevices(dataFlow, DeviceState.Active);

            if (!devices.Any())
            {
                Console.WriteLine("No devices found.");
                return;
            }
            MMDevice device;

            if (devices.Count == 1)
            {
                device = devices[0];
            }
            else
            {
                Console.WriteLine("Select device:");
                for (int i = 0; i < devices.Count; i++)
                {
                    Console.WriteLine("- {0:#00}: {1}", i, devices[i].FriendlyName);
                }
                int selectedDeviceIndex = ReadInteger(Enumerable.Range(0, devices.Count).ToArray());
                device = devices[selectedDeviceIndex];
            }

            int sampleRate    = Int32.Parse(ConfigurationManager.AppSettings["sampleRate"]);
            int bitsPerSample = Int32.Parse(ConfigurationManager.AppSettings["bitsPerSample"]);
            int channels      = 1;

            //create a new soundIn instance
            using (WasapiCapture soundIn = captureMode == CaptureMode.Capture
                ? new WasapiCapture()
                : new WasapiLoopbackCapture())
            {
                //optional: set some properties
                soundIn.Device = device;
                //...

                //initialize the soundIn instance
                soundIn.Initialize();

                //create a SoundSource around the the soundIn instance
                //this SoundSource will provide data, captured by the soundIn instance
                SoundInSource soundInSource = new SoundInSource(soundIn)
                {
                    FillWithZeros = false
                };

                //create a source, that converts the data provided by the
                //soundInSource to any other format
                //in this case the "Fluent"-extension methods are being used
                IWaveSource convertedSource = soundInSource
                                              .ChangeSampleRate(sampleRate) // sample rate
                                              .ToSampleSource()
                                              .ToWaveSource(bitsPerSample); //bits per sample

                //channels...
                using (convertedSource = channels == 1 ? convertedSource.ToMono() : convertedSource.ToStereo())
                {
                    //create a new wavefile
                    var fileName = "out-" + DateTime.UtcNow.ToString("yyyy-MM-ddTHH-mm-ss") + ".wav";
                    using (WaveWriter waveWriter = new WaveWriter(fileName, convertedSource.WaveFormat))
                    {
                        //register an event handler for the DataAvailable event of
                        //the soundInSource
                        //Important: use the DataAvailable of the SoundInSource
                        //If you use the DataAvailable event of the ISoundIn itself
                        //the data recorded by that event might won't be available at the
                        //soundInSource yet
                        soundInSource.DataAvailable += (s, e) =>
                        {
                            //read data from the converedSource
                            //important: don't use the e.Data here
                            //the e.Data contains the raw data provided by the
                            //soundInSource which won't have your target format
                            byte[] buffer = new byte[convertedSource.WaveFormat.BytesPerSecond / 2];
                            int    read;

                            //keep reading as long as we still get some data
                            //if you're using such a loop, make sure that soundInSource.FillWithZeros is set to false
                            while ((read = convertedSource.Read(buffer, 0, buffer.Length)) > 0)
                            {
                                //write the read data to a file
                                // ReSharper disable once AccessToDisposedClosure
                                waveWriter.Write(buffer, 0, read);
                            }
                        };

                        //we've set everything we need -> start capturing data
                        soundIn.Start();

                        Console.WriteLine("Capturing started ... press any key to stop.");
                        Console.ReadKey();

                        soundIn.Stop();
                    }
                }
            }
        }
Example #25
0
        public async Task AudioStreamCommand([
                                                 Summary("Voice Channel name")]
                                             IVoiceChannel channel = null,
                                             [Summary("Number of audio channels, 1 for mono, 2 for stereo (Default)")]
                                             int nAudioChannels = 2,
                                             [Summary("Sample rate in hertz, 48000 (Default)")]
                                             int sampleRate = 48000,
                                             [Summary("Number of bits per sample, 16 (Default)")]
                                             int bitsPerSample = 16)
        {
            var connection = await channel.ConnectAsync();

            var dstream = connection.CreatePCMStream(AudioApplication.Mixed);

            using (WasapiCapture soundIn = new WasapiLoopbackCapture())
            {
                //initialize the soundIn instance
                soundIn.Initialize();

                //create a SoundSource around the the soundIn instance
                //this SoundSource will provide data, captured by the soundIn instance
                SoundInSource soundInSource = new SoundInSource(soundIn)
                {
                    FillWithZeros = false
                };

                //create a source, that converts the data provided by the
                //soundInSource to any other format
                //in this case the "Fluent"-extension methods are being used
                IWaveSource convertedSource = soundInSource
                                              .ChangeSampleRate(sampleRate) // sample rate
                                              .ToSampleSource()
                                              .ToWaveSource(bitsPerSample); //bits per sample
                //int channels = 2;
                //channels...
                using (convertedSource = nAudioChannels == 1 ? convertedSource.ToMono() : convertedSource.ToStereo())
                {
                    //register an event handler for the DataAvailable event of
                    //the soundInSource
                    //Important: use the DataAvailable of the SoundInSource
                    //If you use the DataAvailable event of the ISoundIn itself
                    //the data recorded by that event might won't be available at the
                    //soundInSource yet
                    soundInSource.DataAvailable += (s, e) =>
                    {
                        //read data from the converedSource
                        //important: don't use the e.Data here
                        //the e.Data contains the raw data provided by the
                        //soundInSource which won't have your target format
                        byte[] buffer = new byte[convertedSource.WaveFormat.BytesPerSecond / 2];
                        int    read;

                        //keep reading as long as we still get some data
                        //if you're using such a loop, make sure that soundInSource.FillWithZeros is set to false
                        while ((read = convertedSource.Read(buffer, 0, buffer.Length)) > 0)
                        {
                            //write the read data to a file
                            // ReSharper disable once AccessToDisposedClosure
                            dstream.Write(buffer, 0, read);
                        }
                    };

                    //we've set everything we need -> start capturing data
                    soundIn.Start();

                    Console.WriteLine("Capturing started ... press any key to stop.");
                    Console.ReadKey();
                    soundIn.Stop();
                }
            }
        }
Example #26
0
        public override void Start()
        {
            if (_started)
            {
                Stop();
            }
            DataFlow dataFlow = (DataFlow)_speechSettings.SelectedDataFlowId;

            var devices = MMDeviceEnumerator.EnumerateDevices(dataFlow, DeviceState.Active);


            if (devices.Count - 1 < _speechSettings.InputDeviceIndex)
            {
                throw new Exception($" device Index {_speechSettings.InputDeviceIndex} is not avalibe");
            }

            if (dataFlow == DataFlow.Render)
            {
                var wasapiFormat = _waveFormatAdapter.WaveFormatFromCurrentSettings();
                _soundIn = new WasapiLoopbackCapture(100, wasapiFormat);
            }
            else
            {
                _soundIn = new WasapiCapture();
            }

            _soundIn.Device = devices[_speechSettings.InputDeviceIndex];

            _soundIn.Initialize();

            var wasapiCaptureSource = new SoundInSource(_soundIn)
            {
                FillWithZeros = false
            };

            _waveSource = wasapiCaptureSource
                          .ChangeSampleRate(_speechSettings.SampleRateValue) // sample rate
                          .ToSampleSource()
                          .ToWaveSource(_speechSettings.BitsPerSampleValue); //bits per sample;

            if (_speechSettings.ChannelValue == 1)
            {
                _waveSource = _waveSource.ToMono();
            }
            else
            {
                _waveSource = _waveSource.ToStereo();
            }


            wasapiCaptureSource.DataAvailable += (s, e) =>
            {
                //read data from the converedSource
                //important: don't use the e.Data here
                //the e.Data contains the raw data provided by the
                //soundInSource which won't have your target format
                byte[] buffer = new byte[_waveSource.WaveFormat.BytesPerSecond / 2];
                int    read;

                //keep reading as long as we still get some data
                //if you're using such a loop, make sure that soundInSource.FillWithZeros is set to false
                while ((read = _waveSource.Read(buffer, 0, buffer.Length)) > 0)
                {
                    SendData(buffer, read);
                }
            };

            _soundIn.Start();

            _started = true;
        }