Example #1
0
        public AudioData Mix(AudioData front, AudioData mix)     // This mixes mix with Front.  Format changed to MIX.
        {
            if (front == null || mix == null)
            {
                return(null);
            }

            IWaveSource frontws = (IWaveSource)front.data;
            IWaveSource mixws   = (IWaveSource)mix.data;

            if (frontws.WaveFormat.Channels < mixws.WaveFormat.Channels)      // need to adapt to previous format
            {
                frontws = frontws.ToStereo();
            }
            else if (frontws.WaveFormat.Channels > mixws.WaveFormat.Channels)
            {
                frontws = frontws.ToMono();
            }

            if (mixws.WaveFormat.SampleRate != frontws.WaveFormat.SampleRate || mixws.WaveFormat.BitsPerSample != frontws.WaveFormat.BitsPerSample)
            {
                frontws = ChangeSampleDepth(frontws, mixws.WaveFormat.SampleRate, mixws.WaveFormat.BitsPerSample);
            }

            IWaveSource s = new MixWaveSource(frontws, mixws);

            System.Diagnostics.Debug.Assert(s != null);
            return(new AudioData(s));
        }
Example #2
0
        public AudioData Append(AudioData front, AudioData end)      // this adds END to Front.   Format is changed to END
        {
            if (front == null || end == null)
            {
                return(null);
            }

            IWaveSource frontws = (IWaveSource)front.data;
            IWaveSource mixws   = (IWaveSource)end.data;

            if (frontws.WaveFormat.Channels < mixws.WaveFormat.Channels)      // need to adapt to previous format
            {
                frontws = frontws.ToStereo();
            }
            else if (frontws.WaveFormat.Channels > mixws.WaveFormat.Channels)
            {
                frontws = frontws.ToMono();
            }

            if (mixws.WaveFormat.SampleRate != frontws.WaveFormat.SampleRate || mixws.WaveFormat.BitsPerSample != frontws.WaveFormat.BitsPerSample)
            {
                frontws = ChangeSampleDepth(frontws, mixws.WaveFormat.SampleRate, mixws.WaveFormat.BitsPerSample);
            }

            IWaveSource s = new AppendWaveSource(frontws, mixws);

            System.Diagnostics.Debug.Assert(s != null);
            return(new AudioData(s));
        }
        public void StartRecordingSetDevice(MMDevice recordingDevice)
        {
            if (recordingDevice == null)
            {
                MessageBox.Show(Properties.Strings.MessageBox_NoRecordingDevices);
                Console.WriteLine("No devices found.");
                return;
            }

            soundIn = new CSCore.SoundIn.WasapiLoopbackCapture
            {
                Device = recordingDevice
            };

            soundIn.Initialize();
            soundInSource = new SoundInSource(soundIn)
            {
                FillWithZeros = false
            };
            convertedSource              = soundInSource.ChangeSampleRate(44100).ToSampleSource().ToWaveSource(16);
            convertedSource              = convertedSource.ToStereo();
            soundInSource.DataAvailable += OnDataAvailable;
            soundIn.Start();

            var format = convertedSource.WaveFormat;

            waveFormat = NAudio.Wave.WaveFormat.CreateCustomFormat(WaveFormatEncoding.Pcm, format.SampleRate, format.Channels, format.BytesPerSecond, format.BlockAlign, format.BitsPerSample);
        }
        public static int Capture(string output_file, int time)
        {
            int sampleRate    = 48000;
            int bitsPerSample = 24;


            //create a new soundIn instance
            using (WasapiCapture soundIn = new WasapiLoopbackCapture())
            {
                //initialize the soundIn instance
                soundIn.Initialize();

                //create a SoundSource around the the soundIn instance
                SoundInSource soundInSource = new SoundInSource(soundIn)
                {
                    FillWithZeros = false
                };

                //create a source, that converts the data provided by the soundInSource to any other format
                IWaveSource convertedSource = soundInSource
                                              .ChangeSampleRate(sampleRate) // sample rate
                                              .ToSampleSource()
                                              .ToWaveSource(bitsPerSample); //bits per sample

                //channels...
                using (convertedSource = convertedSource.ToStereo())
                {
                    //create a new wavefile
                    using (WaveWriter waveWriter = new WaveWriter(output_file, convertedSource.WaveFormat))
                    {
                        //register an event handler for the DataAvailable event of the soundInSource
                        soundInSource.DataAvailable += (s, e) =>
                        {
                            //read data from the converedSource
                            byte[] buffer = new byte[convertedSource.WaveFormat.BytesPerSecond / 2];
                            int    read;

                            //keep reading as long as we still get some data
                            while ((read = convertedSource.Read(buffer, 0, buffer.Length)) > 0)
                            {
                                //write the read data to a file
                                waveWriter.Write(buffer, 0, read);
                            }
                        };

                        //start recording
                        soundIn.Start();

                        //delay and keep recording
                        Thread.Sleep(time);

                        //stop recording
                        soundIn.Stop();
                    }
                }
            }
            return(0);
        }
        /// <summary>
        /// Start recording on the device in the parameter.
        /// </summary>
        /// <param name="recordingDevice">the device to start recording</param>
        /// <returns>true if the recording is started, or false</returns>
        public bool StartRecordingSetDevice(MMDevice recordingDevice)
        {
            if (recordingDevice == null)
            {
                logger.Log(Properties.Strings.MessageBox_NoRecordingDevices);
                return(false);
            }

            try
            {
                soundIn = new CSCore.SoundIn.WasapiLoopbackCapture
                {
                    Device = recordingDevice
                };

                soundIn.Initialize();
                soundInSource = new SoundInSource(soundIn)
                {
                    FillWithZeros = false
                };
                convertedSource              = soundInSource.ChangeSampleRate(44100).ToSampleSource().ToWaveSource(16);
                convertedSource              = convertedSource.ToStereo();
                soundInSource.DataAvailable += OnDataAvailable;
                soundIn.Stopped             += OnRecordingStopped;
                soundIn.Start();

                var format = convertedSource.WaveFormat;
                waveFormat     = NAudio.Wave.WaveFormat.CreateCustomFormat(WaveFormatEncoding.Pcm, format.SampleRate, format.Channels, format.BytesPerSecond, format.BlockAlign, format.BitsPerSample);
                isRecording    = true;
                bufferCaptured = new BufferBlock()
                {
                    Data = new byte[convertedSource.WaveFormat.BytesPerSecond / 2]
                };
                bufferSend = new BufferBlock()
                {
                    Data = new byte[convertedSource.WaveFormat.BytesPerSecond / 2]
                };

                eventThread = new Thread(EventThread)
                {
                    Name         = "Loopback Event Thread",
                    IsBackground = true
                };
                eventThread.Start(new WeakReference <LoopbackRecorder>(this));

                return(true);
            }
            catch (Exception ex)
            {
                logger.Log(ex, "Error initializing the recording device:");
            }

            return(false);
        }
        public void StartRecordingDevice(MMDevice recordingDevice)
        {
            if (recordingDevice == null)
            {
                Console.WriteLine("No devices found.");
                return;
            }

            StopRecording();

            soundIn = new CSCore.SoundIn.WasapiLoopbackCapture()
            {
                Device = recordingDevice
            };

            soundIn.Initialize();
            soundInSource = new SoundInSource(soundIn)
            {
                FillWithZeros = false
            };
            convertedSource              = soundInSource.ChangeSampleRate(44100).ToSampleSource().ToWaveSource(16);
            convertedSource              = convertedSource.ToStereo();
            soundInSource.DataAvailable += OnDataAvailable;
            soundIn.Start();

            waveFormat = convertedSource.WaveFormat;

            buffer0 = new BufferBlock()
            {
                Data = new byte[convertedSource.WaveFormat.BytesPerSecond / 2]
            };
            buffer1 = new BufferBlock()
            {
                Data = new byte[convertedSource.WaveFormat.BytesPerSecond / 2]
            };

            enabled = true;

            eventThread              = new Thread(EventThread);
            eventThread.Name         = "Loopback Event Thread";
            eventThread.IsBackground = true;
            eventThread.Start(new WeakReference <LoopbackRecorder>(this));
        }
Example #7
0
        public SoundInSourceWrapper(SoundInSource soundIn, BroadcastSettings settings)
        {
            Settings = settings;

            _soundIn = soundIn;

            _convertedSource =
                soundIn.ChangeSampleRate(Settings.SampleRate).ToSampleSource().ToWaveSource(Settings.BitDepth);

            if (settings.Channel == AudioChannel.Mono)
            {
                _convertedSource = _convertedSource.ToMono();
            }
            else
            {
                _convertedSource = _convertedSource.ToStereo();
            }

            _audioChunks = new ConcurrentQueue <byte[]>();

            _soundIn.DataAvailable += SoundInDataAvailable;
        }
Example #8
0
        public void Start()
        {
            if (_device == null || (_soundIn != null && _soundIn.RecordingState == RecordingState.Recording))
            {
                return;
            }
            //create a new soundIn instance
            _soundIn = _captureMode == CaptureMode.Capture ? new WasapiCapture() : new WasapiLoopbackCapture();
            //optional: set some properties
            _soundIn.Device = _device;

            //initialize the soundIn instance
            _soundIn.Initialize();

            //create a SoundSource around the the soundIn instance
            //this SoundSource will provide data, captured by the soundIn instance
            SoundInSource soundInSource = new SoundInSource(_soundIn)
            {
                FillWithZeros = false
            };

            //create a source, that converts the data provided by the
            //soundInSource to any other format
            //in this case the "Fluent"-extension methods are being used
            _convertedSource = soundInSource
                               .ChangeSampleRate(sampleRate) // sample rate
                               .ToSampleSource()
                               .ToWaveSource(bitsPerSample); //bits per sample

            //channels...
            _convertedSource = channels == 1 ? _convertedSource.ToMono() : _convertedSource.ToStereo();
            _waveWriter      = new WaveWriter("out.wav", _convertedSource.WaveFormat);

            soundInSource.DataAvailable += OnDatAvailable;
            _soundIn.Start();
        }
Example #9
0
        public async Task AudioStreamCommand([
                                                 Summary("Voice Channel name")]
                                             IVoiceChannel channel = null,
                                             [Summary("Number of audio channels, 1 for mono, 2 for stereo (Default)")]
                                             int nAudioChannels = 2,
                                             [Summary("Sample rate in hertz, 48000 (Default)")]
                                             int sampleRate = 48000,
                                             [Summary("Number of bits per sample, 16 (Default)")]
                                             int bitsPerSample = 16)
        {
            var connection = await channel.ConnectAsync();

            var dstream = connection.CreatePCMStream(AudioApplication.Mixed);

            using (WasapiCapture soundIn = new WasapiLoopbackCapture())
            {
                //initialize the soundIn instance
                soundIn.Initialize();

                //create a SoundSource around the the soundIn instance
                //this SoundSource will provide data, captured by the soundIn instance
                SoundInSource soundInSource = new SoundInSource(soundIn)
                {
                    FillWithZeros = false
                };

                //create a source, that converts the data provided by the
                //soundInSource to any other format
                //in this case the "Fluent"-extension methods are being used
                IWaveSource convertedSource = soundInSource
                                              .ChangeSampleRate(sampleRate) // sample rate
                                              .ToSampleSource()
                                              .ToWaveSource(bitsPerSample); //bits per sample
                //int channels = 2;
                //channels...
                using (convertedSource = nAudioChannels == 1 ? convertedSource.ToMono() : convertedSource.ToStereo())
                {
                    //register an event handler for the DataAvailable event of
                    //the soundInSource
                    //Important: use the DataAvailable of the SoundInSource
                    //If you use the DataAvailable event of the ISoundIn itself
                    //the data recorded by that event might won't be available at the
                    //soundInSource yet
                    soundInSource.DataAvailable += (s, e) =>
                    {
                        //read data from the converedSource
                        //important: don't use the e.Data here
                        //the e.Data contains the raw data provided by the
                        //soundInSource which won't have your target format
                        byte[] buffer = new byte[convertedSource.WaveFormat.BytesPerSecond / 2];
                        int    read;

                        //keep reading as long as we still get some data
                        //if you're using such a loop, make sure that soundInSource.FillWithZeros is set to false
                        while ((read = convertedSource.Read(buffer, 0, buffer.Length)) > 0)
                        {
                            //write the read data to a file
                            // ReSharper disable once AccessToDisposedClosure
                            dstream.Write(buffer, 0, read);
                        }
                    };

                    //we've set everything we need -> start capturing data
                    soundIn.Start();

                    Console.WriteLine("Capturing started ... press any key to stop.");
                    Console.ReadKey();
                    soundIn.Stop();
                }
            }
        }
Example #10
0
        static void writeSpeakersToWav(string[] args)
        {
            const int GOOGLE_RATE            = 16000;
            const int GOOGLE_BITS_PER_SAMPLE = 16;
            const int GOOGLE_CHANNELS        = 1;
            const int EARPHONES = 5;

            CaptureMode captureMode = CaptureMode.LoopbackCapture;

            DataFlow dataFlow = captureMode == CaptureMode.Capture ? DataFlow.Capture : DataFlow.Render;

            var devices = MMDeviceEnumerator.EnumerateDevices(dataFlow, DeviceState.Active);

            foreach (var d in devices)
            {
                Console.WriteLine("- {0:#00}: {1}", d, d.FriendlyName);
            }
            var headphones = devices.First(x => x.FriendlyName.StartsWith("small"));

            //using (WasapiCapture capture = new WasapiLoopbackCapture())
            using (WasapiCapture soundIn = captureMode == CaptureMode.Capture
                ? new WasapiCapture()
                : new WasapiLoopbackCapture())
            {
                //if nessesary, you can choose a device here
                //to do so, simply set the device property of the capture to any MMDevice
                //to choose a device, take a look at the sample here: http://cscore.codeplex.com/

                soundIn.Device = headphones;

                Console.WriteLine("Waiting, press any key to start");
                Console.ReadKey();
                //initialize the selected device for recording
                soundIn.Initialize();

                //create a SoundSource around the the soundIn instance
                //this SoundSource will provide data, captured by the soundIn instance
                SoundInSource soundInSource = new SoundInSource(soundIn)
                {
                    FillWithZeros = false
                };

                //create a source, that converts the data provided by the
                //soundInSource to any other format
                //in this case the "Fluent"-extension methods are being used
                IWaveSource convertedSource = soundInSource
                                              .ChangeSampleRate(GOOGLE_RATE)         // sample rate
                                              .ToSampleSource()
                                              .ToWaveSource(GOOGLE_BITS_PER_SAMPLE); //bits per sample

                var channels = GOOGLE_CHANNELS;

                //channels...
                using (convertedSource = channels == 1 ? convertedSource.ToMono() : convertedSource.ToStereo())
                    //create a wavewriter to write the data to
                    using (WaveWriter w = new WaveWriter("dump.wav", convertedSource.WaveFormat))
                    {
                        //setup an eventhandler to receive the recorded data
                        //register an event handler for the DataAvailable event of
                        //the soundInSource
                        //Important: use the DataAvailable of the SoundInSource
                        //If you use the DataAvailable event of the ISoundIn itself
                        //the data recorded by that event might won't be available at the
                        //soundInSource yet
                        soundInSource.DataAvailable += (s, e) =>
                        {
                            //read data from the converedSource
                            //important: don't use the e.Data here
                            //the e.Data contains the raw data provided by the
                            //soundInSource which won't have your target format
                            byte[] buffer = new byte[convertedSource.WaveFormat.BytesPerSecond / 2];
                            int    read;

                            //keep reading as long as we still get some data
                            //if you're using such a loop, make sure that soundInSource.FillWithZeros is set to false
                            while ((read = convertedSource.Read(buffer, 0, buffer.Length)) > 0)
                            {
                                //write the read data to a file
                                // ReSharper disable once AccessToDisposedClosure
                                w.Write(buffer, 0, read);
                            }
                        };


                        //start recording
                        soundIn.Start();

                        Console.WriteLine("Started, press any key to stop");
                        Console.ReadKey();

                        //stop recording
                        soundIn.Stop();
                    }
            }
        }
        static int Main(string[] args)
        {
            int    time;
            string output_file;

            switch (args.Length)
            {
            case 1:
                if (args[0] == "-h")
                {
                    System.Console.WriteLine("Usage:");
                    System.Console.WriteLine("    LoopbackCapture.exe <output/wav> <time/milliseconds>");
                    return(1);
                }
                output_file = args[0];
                time        = 0;
                break;

            case 2:
                output_file = args[0];
                try
                {
                    time = Int32.Parse(args[1]);
                }
                catch
                {
                    time = 0;
                }
                break;

            default:
                time        = 0;
                output_file = "record.wav";
                break;
            }

            int sampleRate    = 48000;
            int bitsPerSample = 24;

            //create a new soundIn instance
            using (WasapiCapture soundIn = new WasapiLoopbackCapture())
            {
                //initialize the soundIn instance
                soundIn.Initialize();

                //create a SoundSource around the the soundIn instance
                SoundInSource soundInSource = new SoundInSource(soundIn)
                {
                    FillWithZeros = false
                };

                //create a source, that converts the data provided by the soundInSource to any other format

                IWaveSource convertedSource = soundInSource
                                              .ChangeSampleRate(sampleRate) // sample rate
                                              .ToSampleSource()
                                              .ToWaveSource(bitsPerSample); //bits per sample

                //channels...
                using (convertedSource = convertedSource.ToStereo())
                {
                    //create a new wavefile
                    using (WaveWriter waveWriter = new WaveWriter(output_file, convertedSource.WaveFormat))
                    {
                        //register an event handler for the DataAvailable event of the soundInSource
                        soundInSource.DataAvailable += (s, e) =>
                        {
                            //read data from the converedSource
                            byte[] buffer = new byte[convertedSource.WaveFormat.BytesPerSecond / 2];
                            int    read;

                            //keep reading as long as we still get some data
                            while ((read = convertedSource.Read(buffer, 0, buffer.Length)) > 0)
                            {
                                //write the read data to a file
                                waveWriter.Write(buffer, 0, read);
                            }
                        };

                        //start recording
                        soundIn.Start();

                        //delay and keep recording
                        if (time != 0)
                        {
                            Thread.Sleep(time);
                        }
                        else
                        {
                            Console.ReadKey();
                        }

                        //stop recording
                        soundIn.Stop();
                    }
                }
            }
            return(0);
        }
Example #12
0
        public void Start(TimeSpan time)
        {
            int sampleRate    = 48000;
            int bitsPerSample = 24;

            MMDeviceCollection devices;

            while (!(devices = MMDeviceEnumerator.EnumerateDevices(DataFlow.Capture, DeviceState.Active)).Any())
            {
                Thread.Sleep(2000);
            }
            var device = devices.FirstOrDefault();

            //TODO:We have a memory leak here (soundIn should be cleared from time to time). needs to be fixed!
            //create a new soundIn instance
            using (WasapiCapture soundIn = new WasapiCapture())
            {
                soundIn.Device = device;
                //initialize the soundIn instance
                soundIn.Initialize();

                //create a SoundSource around the the soundIn instance
                SoundInSource soundInSource = new SoundInSource(soundIn)
                {
                    FillWithZeros = false
                };

                //create a source, that converts the data provided by the soundInSource to any other format

                IWaveSource convertedSource = soundInSource
                                              .ChangeSampleRate(sampleRate) // sample rate
                                              .ToSampleSource()
                                              .ToWaveSource(bitsPerSample); //bits per sample

                using (var stream = new MemoryStream())
                {
                    var readBufferLength = convertedSource.WaveFormat.BytesPerSecond / 2;
                    //channels...
                    using (convertedSource = convertedSource.ToStereo())
                    {
                        //create a new wavefile
                        using (WaveWriter waveWriter = new WaveWriter(stream, convertedSource.WaveFormat))
                        {
                            //register an event handler for the DataAvailable event of the soundInSource
                            soundInSource.DataAvailable += (s, e) =>
                            {
                                //read data from the converedSource
                                byte[] buffer = new byte[readBufferLength];
                                int    read;

                                //keep reading as long as we still get some data
                                while ((read = convertedSource.Read(buffer, 0, buffer.Length)) > 0)
                                {
                                    var decibelsCalibrated = (int)Math.Round(GetSoundLevel(buffer, _calibrateAdd, _calibratescale, _calibrateRange));
                                    if (decibelsCalibrated < 0)
                                    {
                                        decibelsCalibrated = 0;
                                    }
                                    OnNoiseData?.Invoke(null, new NoiseInfoEventArgs()
                                    {
                                        Decibels = decibelsCalibrated
                                    });
                                    //write the read data to a file
                                    waveWriter.Write(buffer, 0, read);
                                }
                            };
                            soundIn.Stopped += (e, args) =>
                            {
                                OnStopped?.Invoke(null, null);
                                lock (_stopLocker)
                                    Monitor.PulseAll(_stopLocker);
                            };

                            var tm = new Timer(state => soundIn?.Stop(), null, time, time);

                            //start recording
                            soundIn.Start();
                            OnStarted?.Invoke(null, null);
                            Monitor.Enter(_stopLocker);
                            {
                                Monitor.PulseAll(_stopLocker);
                                Monitor.Wait(_stopLocker);
                            }
                            //stop recording
                            soundIn.Stop();
                        }
                    }
                }
            }
        }
Example #13
0
        public override void Start()
        {
            if (_started)
            {
                Stop();
            }
            DataFlow dataFlow = (DataFlow)_speechSettings.SelectedDataFlowId;

            var devices = MMDeviceEnumerator.EnumerateDevices(dataFlow, DeviceState.Active);


            if (devices.Count - 1 < _speechSettings.InputDeviceIndex)
            {
                throw new Exception($" device Index {_speechSettings.InputDeviceIndex} is not avalibe");
            }

            if (dataFlow == DataFlow.Render)
            {
                var wasapiFormat = _waveFormatAdapter.WaveFormatFromCurrentSettings();
                _soundIn = new WasapiLoopbackCapture(100, wasapiFormat);
            }
            else
            {
                _soundIn = new WasapiCapture();
            }

            _soundIn.Device = devices[_speechSettings.InputDeviceIndex];

            _soundIn.Initialize();

            var wasapiCaptureSource = new SoundInSource(_soundIn)
            {
                FillWithZeros = false
            };

            _waveSource = wasapiCaptureSource
                          .ChangeSampleRate(_speechSettings.SampleRateValue) // sample rate
                          .ToSampleSource()
                          .ToWaveSource(_speechSettings.BitsPerSampleValue); //bits per sample;

            if (_speechSettings.ChannelValue == 1)
            {
                _waveSource = _waveSource.ToMono();
            }
            else
            {
                _waveSource = _waveSource.ToStereo();
            }


            wasapiCaptureSource.DataAvailable += (s, e) =>
            {
                //read data from the converedSource
                //important: don't use the e.Data here
                //the e.Data contains the raw data provided by the
                //soundInSource which won't have your target format
                byte[] buffer = new byte[_waveSource.WaveFormat.BytesPerSecond / 2];
                int    read;

                //keep reading as long as we still get some data
                //if you're using such a loop, make sure that soundInSource.FillWithZeros is set to false
                while ((read = _waveSource.Read(buffer, 0, buffer.Length)) > 0)
                {
                    SendData(buffer, read);
                }
            };

            _soundIn.Start();

            _started = true;
        }
Example #14
0
        public static void RecordTo(string fileName, TimeSpan time, WaveFormat format)
        {
            CaptureMode captureMode = CaptureMode.Capture;
            DataFlow    dataFlow    = captureMode == CaptureMode.Capture ? DataFlow.Capture : DataFlow.Render;

            var devices = MMDeviceEnumerator.EnumerateDevices(dataFlow, DeviceState.Active);
            var device  = devices.FirstOrDefault();

            using (WasapiCapture soundIn = captureMode == CaptureMode.Capture
                ? new WasapiCapture()
                : new WasapiLoopbackCapture())
            {
                soundIn.Device = device;
                soundIn.Initialize();
                SoundInSource soundInSource = new SoundInSource(soundIn)
                {
                    FillWithZeros = false
                };
                IWaveSource convertedSource = soundInSource
                                              .ChangeSampleRate(format.SampleRate) // sample rate
                                              .ToSampleSource()
                                              .ToWaveSource(format.BitsPerSample); //bits per sample
                using (convertedSource = format.Channels == 1 ? convertedSource.ToMono() : convertedSource.ToStereo())
                {
                    using (WaveWriter waveWriter = new WaveWriter(fileName, convertedSource.WaveFormat))
                    {
                        soundInSource.DataAvailable += (s, e) =>
                        {
                            byte[] buffer = new byte[convertedSource.WaveFormat.BytesPerSecond / 2];
                            int    read;
                            while ((read = convertedSource.Read(buffer, 0, buffer.Length)) > 0)
                            {
                                waveWriter.Write(buffer, 0, read);
                            }
                        };

                        soundIn.Start();

                        Console.WriteLine("Started recording");
                        Thread.Sleep(time);

                        soundIn.Stop();
                        Console.WriteLine("Finished recording");
                    }
                }
            }
        }
Example #15
0
        // ReSharper disable once UnusedParameter.Local
        static void Main(string[] args)
        {
            //choose the capture mode
            Console.WriteLine("Select capturing mode:");
            Console.WriteLine("- 1: Capture");
            Console.WriteLine("- 2: LoopbackCapture");

            CaptureMode captureMode = (CaptureMode)ReadInteger(1, 2);
            DataFlow    dataFlow    = captureMode == CaptureMode.Capture ? DataFlow.Capture : DataFlow.Render;

            //---

            //select the device:
            var devices = MMDeviceEnumerator.EnumerateDevices(dataFlow, DeviceState.Active);

            if (!devices.Any())
            {
                Console.WriteLine("No devices found.");
                return;
            }

            Console.WriteLine("Select device:");
            for (int i = 0; i < devices.Count; i++)
            {
                Console.WriteLine("- {0:#00}: {1}", i, devices[i].FriendlyName);
            }
            int selectedDeviceIndex = ReadInteger(Enumerable.Range(0, devices.Count).ToArray());
            var device = devices[selectedDeviceIndex];

            //--- choose format
            Console.WriteLine("Enter sample rate:");
            int sampleRate;

            do
            {
                sampleRate = ReadInteger();
                if (sampleRate >= 100 && sampleRate <= 200000)
                {
                    break;
                }
                Console.WriteLine("Must be between 1kHz and 200kHz.");
            } while (true);

            Console.WriteLine("Choose bits per sample (8, 16, 24 or 32):");
            int bitsPerSample = ReadInteger(8, 16, 24, 32);

            //note: this sample does not support multi channel formats like surround 5.1,...
            //if this is required, the DmoChannelResampler class can be used
            Console.WriteLine("Choose number of channels (1, 2):");
            int channels = ReadInteger(1, 2);

            //---

            //start recording

            //create a new soundIn instance
            using (WasapiCapture soundIn = captureMode == CaptureMode.Capture
                ? new WasapiCapture()
                : new WasapiLoopbackCapture())
            {
                //optional: set some properties
                soundIn.Device = device;
                //...

                //initialize the soundIn instance
                soundIn.Initialize();

                //create a SoundSource around the the soundIn instance
                //this SoundSource will provide data, captured by the soundIn instance
                SoundInSource soundInSource = new SoundInSource(soundIn)
                {
                    FillWithZeros = false
                };

                //create a source, that converts the data provided by the
                //soundInSource to any other format
                //in this case the "Fluent"-extension methods are being used
                IWaveSource convertedSource = soundInSource
                                              .ChangeSampleRate(sampleRate) // sample rate
                                              .ToSampleSource()
                                              .ToWaveSource(bitsPerSample); //bits per sample

                //channels...
                using (convertedSource = channels == 1 ? convertedSource.ToMono() : convertedSource.ToStereo())
                {
                    //create a new wavefile
                    using (WaveWriter waveWriter = new WaveWriter("out.wav", convertedSource.WaveFormat))
                    {
                        //register an event handler for the DataAvailable event of
                        //the soundInSource
                        //Important: use the DataAvailable of the SoundInSource
                        //If you use the DataAvailable event of the ISoundIn itself
                        //the data recorded by that event might won't be available at the
                        //soundInSource yet
                        soundInSource.DataAvailable += (s, e) =>
                        {
                            //read data from the converedSource
                            //important: don't use the e.Data here
                            //the e.Data contains the raw data provided by the
                            //soundInSource which won't have your target format
                            byte[] buffer = new byte[convertedSource.WaveFormat.BytesPerSecond / 2];
                            int    read;

                            //keep reading as long as we still get some data
                            //if you're using such a loop, make sure that soundInSource.FillWithZeros is set to false
                            while ((read = convertedSource.Read(buffer, 0, buffer.Length)) > 0)
                            {
                                //write the read data to a file
                                // ReSharper disable once AccessToDisposedClosure
                                waveWriter.Write(buffer, 0, read);
                            }
                        };

                        //we've set everything we need -> start capturing data
                        soundIn.Start();

                        Console.WriteLine("Capturing started ... press any key to stop.");
                        Console.ReadKey();

                        soundIn.Stop();
                    }
                }
            }

            Process.Start("out.wav");
        }
        /// <summary>
        /// Start recording on the device in the parameter.
        /// </summary>
        /// <param name="recordingDevice">the device to start recording</param>
        /// <returns>true if the recording is started, or false</returns>
        public bool StartRecordingSetDevice(MMDevice recordingDevice)
        {
            if (recordingDevice == null)
            {
                logger.Log(Properties.Strings.MessageBox_NoRecordingDevices);
                return(false);
            }

            try
            {
                if (recordingDevice.DataFlow == DataFlow.Render)
                {
                    soundIn = new CSCore.SoundIn.WasapiLoopbackCapture
                    {
                        Device = recordingDevice
                    };
                }
                else
                {
                    soundIn = new CSCore.SoundIn.WasapiCapture
                    {
                        Device = recordingDevice
                    };
                }


                soundIn.Initialize();
                soundInSource = new SoundInSource(soundIn)
                {
                    FillWithZeros = false
                };

                var selectedFormat = mainForm.GetSelectedStreamFormat();
                var convertMultiChannelToStereo = mainForm.GetConvertMultiChannelToStereo();
                CSCore.WaveFormat format;
                switch (selectedFormat)
                {
                case Classes.SupportedStreamFormat.Wav:
                    convertedSource = soundInSource.ChangeSampleRate(44100).ToSampleSource().ToWaveSource(16);
                    format          = convertedSource.WaveFormat;
                    if (convertMultiChannelToStereo)
                    {
                        convertedSource = convertedSource.ToStereo();
                    }
                    waveFormat = NAudio.Wave.WaveFormat.CreateCustomFormat(WaveFormatEncoding.Pcm, format.SampleRate, format.Channels, format.BytesPerSecond, format.BlockAlign, format.BitsPerSample);
                    break;

                case Classes.SupportedStreamFormat.Mp3_320:
                case Classes.SupportedStreamFormat.Mp3_128:
                    convertedSource = soundInSource.ToSampleSource().ToWaveSource(16);
                    convertedSource = convertedSource.ToStereo();
                    format          = convertedSource.WaveFormat;
                    waveFormat      = NAudio.Wave.WaveFormat.CreateCustomFormat(WaveFormatEncoding.Pcm, format.SampleRate, format.Channels, format.BytesPerSecond, format.BlockAlign, format.BitsPerSample);
                    break;

                case Classes.SupportedStreamFormat.Wav_16bit:
                    convertedSource = soundInSource.ToSampleSource().ToWaveSource(16);
                    if (convertMultiChannelToStereo)
                    {
                        convertedSource = convertedSource.ToStereo();
                    }
                    format     = convertedSource.WaveFormat;
                    waveFormat = NAudio.Wave.WaveFormat.CreateCustomFormat(WaveFormatEncoding.Pcm, format.SampleRate, format.Channels, format.BytesPerSecond, format.BlockAlign, format.BitsPerSample);
                    break;

                case Classes.SupportedStreamFormat.Wav_24bit:
                    convertedSource = soundInSource.ToSampleSource().ToWaveSource(24);
                    if (convertMultiChannelToStereo)
                    {
                        convertedSource = convertedSource.ToStereo();
                    }
                    format     = convertedSource.WaveFormat;
                    waveFormat = NAudio.Wave.WaveFormat.CreateCustomFormat(WaveFormatEncoding.Pcm, format.SampleRate, format.Channels, format.BytesPerSecond, format.BlockAlign, format.BitsPerSample);
                    break;

                case Classes.SupportedStreamFormat.Wav_32bit:
                    convertedSource = soundInSource.ToSampleSource().ToWaveSource(32);
                    if (convertMultiChannelToStereo)
                    {
                        convertedSource = convertedSource.ToStereo();
                    }
                    format     = convertedSource.WaveFormat;
                    waveFormat = NAudio.Wave.WaveFormat.CreateCustomFormat(WaveFormatEncoding.IeeeFloat, format.SampleRate, format.Channels, format.BytesPerSecond, format.BlockAlign, format.BitsPerSample);
                    break;

                default:
                    break;
                }

                logger.Log($"Stream format set to {waveFormat.Encoding} {waveFormat.SampleRate} {waveFormat.BitsPerSample} bit");
                soundInSource.DataAvailable += OnDataAvailable;
                soundIn.Stopped             += OnRecordingStopped;
                soundIn.Start();

                isRecording    = true;
                bufferCaptured = new BufferBlock()
                {
                    Data = new byte[convertedSource.WaveFormat.BytesPerSecond / 2]
                };
                bufferSend = new BufferBlock()
                {
                    Data = new byte[convertedSource.WaveFormat.BytesPerSecond / 2]
                };

                eventThread = new Thread(EventThread)
                {
                    Name         = "Loopback Event Thread",
                    IsBackground = true
                };
                eventThread.Start(new WeakReference <LoopbackRecorder>(this));

                return(true);
            }
            catch (Exception ex)
            {
                logger.Log(ex, "Error initializing the recording device:");
            }

            return(false);
        }
Example #17
0
        // ReSharper disable once UnusedParameter.Local
        static void Main(string[] args)
        {
            CaptureMode captureMode;

            if (Boolean.Parse(ConfigurationManager.AppSettings["defaultToLoopback"]))
            {
                captureMode = CaptureMode.LoopbackCapture;
            }
            else
            {
                Console.WriteLine("Select capturing mode:");
                Console.WriteLine("- 1: Capture");
                Console.WriteLine("- 2: LoopbackCapture");

                captureMode = (CaptureMode)ReadInteger(1, 2);
            }
            DataFlow dataFlow = captureMode == CaptureMode.Capture ? DataFlow.Capture : DataFlow.Render;

            var devices = MMDeviceEnumerator.EnumerateDevices(dataFlow, DeviceState.Active);

            if (!devices.Any())
            {
                Console.WriteLine("No devices found.");
                return;
            }
            MMDevice device;

            if (devices.Count == 1)
            {
                device = devices[0];
            }
            else
            {
                Console.WriteLine("Select device:");
                for (int i = 0; i < devices.Count; i++)
                {
                    Console.WriteLine("- {0:#00}: {1}", i, devices[i].FriendlyName);
                }
                int selectedDeviceIndex = ReadInteger(Enumerable.Range(0, devices.Count).ToArray());
                device = devices[selectedDeviceIndex];
            }

            int sampleRate    = Int32.Parse(ConfigurationManager.AppSettings["sampleRate"]);
            int bitsPerSample = Int32.Parse(ConfigurationManager.AppSettings["bitsPerSample"]);
            int channels      = 1;

            //create a new soundIn instance
            using (WasapiCapture soundIn = captureMode == CaptureMode.Capture
                ? new WasapiCapture()
                : new WasapiLoopbackCapture())
            {
                //optional: set some properties
                soundIn.Device = device;
                //...

                //initialize the soundIn instance
                soundIn.Initialize();

                //create a SoundSource around the the soundIn instance
                //this SoundSource will provide data, captured by the soundIn instance
                SoundInSource soundInSource = new SoundInSource(soundIn)
                {
                    FillWithZeros = false
                };

                //create a source, that converts the data provided by the
                //soundInSource to any other format
                //in this case the "Fluent"-extension methods are being used
                IWaveSource convertedSource = soundInSource
                                              .ChangeSampleRate(sampleRate) // sample rate
                                              .ToSampleSource()
                                              .ToWaveSource(bitsPerSample); //bits per sample

                //channels...
                using (convertedSource = channels == 1 ? convertedSource.ToMono() : convertedSource.ToStereo())
                {
                    //create a new wavefile
                    var fileName = "out-" + DateTime.UtcNow.ToString("yyyy-MM-ddTHH-mm-ss") + ".wav";
                    using (WaveWriter waveWriter = new WaveWriter(fileName, convertedSource.WaveFormat))
                    {
                        //register an event handler for the DataAvailable event of
                        //the soundInSource
                        //Important: use the DataAvailable of the SoundInSource
                        //If you use the DataAvailable event of the ISoundIn itself
                        //the data recorded by that event might won't be available at the
                        //soundInSource yet
                        soundInSource.DataAvailable += (s, e) =>
                        {
                            //read data from the converedSource
                            //important: don't use the e.Data here
                            //the e.Data contains the raw data provided by the
                            //soundInSource which won't have your target format
                            byte[] buffer = new byte[convertedSource.WaveFormat.BytesPerSecond / 2];
                            int    read;

                            //keep reading as long as we still get some data
                            //if you're using such a loop, make sure that soundInSource.FillWithZeros is set to false
                            while ((read = convertedSource.Read(buffer, 0, buffer.Length)) > 0)
                            {
                                //write the read data to a file
                                // ReSharper disable once AccessToDisposedClosure
                                waveWriter.Write(buffer, 0, read);
                            }
                        };

                        //we've set everything we need -> start capturing data
                        soundIn.Start();

                        Console.WriteLine("Capturing started ... press any key to stop.");
                        Console.ReadKey();

                        soundIn.Stop();
                    }
                }
            }
        }
        private WasapiCapture StartListeningOnLoopback()
        {
            const int GOOGLE_RATE            = 16000;
            const int GOOGLE_BITS_PER_SAMPLE = 16;
            const int GOOGLE_CHANNELS        = 1;

            CaptureMode captureMode = _captureMode;

            DataFlow dataFlow = captureMode == CaptureMode.Capture ? DataFlow.Capture : DataFlow.Render;

            var devices = MMDeviceEnumerator.EnumerateDevices(dataFlow, DeviceState.Active);

            Console.WriteLine("Please select devlice:");
            for (int i = 0; i < devices.Count; i++)
            {
                Console.WriteLine(i + ") " + devices[i].FriendlyName);
            }
            var deviceIndex = int.Parse(Console.ReadLine());

            var headphones = devices[deviceIndex];

            //using (WasapiCapture capture = new WasapiLoopbackCapture())
            _soundIn = captureMode == CaptureMode.Capture
                ? new WasapiCapture()
                : new WasapiLoopbackCapture();

            //if nessesary, you can choose a device here
            //to do so, simply set the device property of the capture to any MMDevice
            //to choose a device, take a look at the sample here: http://cscore.codeplex.com/

            _soundIn.Device = headphones;


            //initialize the selected device for recording
            _soundIn.Initialize();
            //create a SoundSource around the the soundIn instance
            //this SoundSource will provide data, captured by the soundIn instance
            _soundInSource = new SoundInSource(_soundIn)
            {
                FillWithZeros = false
            };

            //create a source, that converts the data provided by the
            //soundInSource to any other format
            //in this case the "Fluent"-extension methods are being used
            _convertedSource = _soundInSource
                               .ChangeSampleRate(GOOGLE_RATE)         // sample rate
                               .ToSampleSource()
                               .ToWaveSource(GOOGLE_BITS_PER_SAMPLE); //bits per sample

            var channels = GOOGLE_CHANNELS;

            //channels...
            var src = channels == 1 ? _convertedSource.ToMono() : _convertedSource.ToStereo();


            _soundInSource.DataAvailable += (sender, args) =>
            {
                //read data from the converedSource
                //important: don't use the e.Data here
                //the e.Data contains the raw data provided by the
                //soundInSource which won't have your target format
                byte[] buffer = new byte[_convertedSource.WaveFormat.BytesPerSecond / 2];
                int    read;

                //keep reading as long as we still get some data
                //if you're using such a loop, make sure that soundInSource.FillWithZeros is set to false
                while ((read = src.Read(buffer, 0, buffer.Length)) > 0)
                {
                    //write the read data to a file
                    // ReSharper disable once AccessToDisposedClosure
                    Debug.WriteLine($"Read {read} bytes");
                    _microphoneBuffer.Add(ByteString.CopyFrom(buffer, 0, read));

                    //w.Write(buffer, 0, read);
                }
            };



            return(_soundIn);
        }