示例#1
0
        /// <summary>
        /// Gets a list of available audio capture devices.
        /// </summary>
        /// <returns>
        /// An array of available capture device names.
        /// </returns>
        public static string[] GetAvailableCaptureDevices()
        {
            // Get the collection of available capture devices
            IMMDeviceCollection deviceCollection = DeviceUtil.GetAvailableDevices(EDataFlow.Capture);

            string[] devices     = null;
            int      deviceCount = deviceCollection.GetCount();

            devices = new string[deviceCount];

            // Iterate over the collection to get the device names
            for (int i = 0; i < deviceCount; i++)
            {
                IMMDevice device = deviceCollection.Item(i);

                // Get the friendly name of the device
                devices[i] = DeviceUtil.GetDeviceFriendlyName(device);

                // Done with the device so release it
                Marshal.ReleaseComObject(device);
            }

            // Release the collection when done
            Marshal.ReleaseComObject(deviceCollection);

            return(devices);
        }
示例#2
0
        /// <summary>
        /// Initializes the audio capture device.
        /// </summary>
        /// <param name="deviceDescription">
        /// The friendly name description of the device to capture from. This is usually
        /// something like "Microphone Array (USB Audio)". To capture from
        /// the default device, pass in NULL or an empty string.
        /// </param>
        public void Initialize(string deviceDescription)
        {
            // Activate native audio COM objects on a thread-pool thread to ensure that they are in an MTA
            Task.Run(() =>
            {
                if (string.IsNullOrEmpty(deviceDescription))
                {
                    // use the default console device
                    this.audioDevice = DeviceUtil.GetDefaultDevice(EDataFlow.Capture, ERole.Console);
                }
                else
                {
                    this.audioDevice = DeviceUtil.GetDeviceByName(EDataFlow.Capture, deviceDescription);
                }

                if (this.audioDevice != null)
                {
                    // Try to get the volume control
                    object obj  = this.audioDevice.Activate(new Guid(Guids.IAudioEndpointVolumeIIDString), ClsCtx.ALL, IntPtr.Zero);
                    this.volume = (IAudioEndpointVolume)obj;

                    // Now create an IAudioEndpointVolumeCallback object that wraps the callback and register it with the endpoint.
                    this.volumeCallback = new AudioEndpointVolumeCallback(this.AudioVolumeCallback);
                    this.volume.RegisterControlChangeNotify(this.volumeCallback);
                }
            }).Wait();
        }
示例#3
0
        /// <summary>
        /// Initializes the audio capture device.
        /// </summary>
        /// <param name="deviceDescription">
        /// The friendly name description of the device to capture from. This is usually
        /// something like "Microphone Array (USB Audio)". To capture from
        /// the default device, pass in NULL or an empty string.
        /// </param>
        public void Initialize(string deviceDescription)
        {
            Exception taskException = null;

            // Activate native audio COM objects on a thread-pool thread to ensure that they are in an MTA
            Task.Run(() =>
            {
                try
                {
                    if (string.IsNullOrEmpty(deviceDescription))
                    {
                        // use the default console device
                        this.audioDevice = DeviceUtil.GetDefaultDevice(EDataFlow.Capture, ERole.Console);
                    }
                    else
                    {
                        this.audioDevice = DeviceUtil.GetDeviceByName(EDataFlow.Capture, deviceDescription);
                    }

                    if (this.audioDevice != null)
                    {
                        // Try to get the volume control
                        object obj  = this.audioDevice.Activate(new Guid(Guids.IAudioEndpointVolumeIIDString), ClsCtx.ALL, IntPtr.Zero);
                        this.volume = (IAudioEndpointVolume)obj;

                        // Now create an IAudioEndpointVolumeCallback object that wraps the callback and register it with the endpoint.
                        this.volumeCallback = new AudioEndpointVolumeCallback(this.AudioVolumeCallback);
                        this.volume.RegisterControlChangeNotify(this.volumeCallback);
                    }
                }
                catch (Exception e)
                {
                    taskException = e;
                }
            }).Wait();

            // do error checking on the main thread
            if (taskException != null)
            {
                // rethrow exception
                throw taskException;
            }
            else if (this.audioDevice == null)
            {
                throw new IOException(string.IsNullOrEmpty(deviceDescription) ?
                                      "No default audio capture device found." :
                                      $"Audio capture device {deviceDescription} not found.");
            }
        }
示例#4
0
        /// <summary>
        /// Initialize the resampler.
        /// </summary>
        /// <param name="targetLatencyInMs">
        /// The target maximum number of milliseconds of acceptable lag between
        /// input and resampled output audio samples.
        /// </param>
        /// <param name="inFormat">
        /// The input format of the audio to be resampled.
        /// </param>
        /// <param name="outFormat">
        /// The output format of the resampled audio.
        /// </param>
        /// <param name="callback">
        /// Callback delegate which will receive the resampled data.
        /// </param>
        public void Initialize(int targetLatencyInMs, WaveFormat inFormat, WaveFormat outFormat, AudioDataAvailableCallback callback)
        {
            // Buffer sizes are calculated from the target latency.
            this.bufferLengthInMs    = targetLatencyInMs;
            this.inputBytesPerSecond = (int)inFormat.AvgBytesPerSec;
            this.inputBufferSize     = (int)(this.bufferLengthInMs * inFormat.AvgBytesPerSec / 1000);
            this.outputBufferSize    = (int)(this.bufferLengthInMs * outFormat.AvgBytesPerSec / 1000);

            // Activate native Media Foundation COM objects on a thread-pool thread to ensure that they are in an MTA
            Task.Run(() =>
            {
                DeviceUtil.CreateResamplerBuffer(this.inputBufferSize, out this.inputSample, out this.inputBuffer);
                DeviceUtil.CreateResamplerBuffer(this.outputBufferSize, out this.outputSample, out this.outputBuffer);

                // Create resampler object
                this.resampler = DeviceUtil.CreateResampler(inFormat, outFormat);
            }).Wait();

            // Set the callback function
            this.dataAvailableCallback = callback;
        }
示例#5
0
        /// <summary>
        /// Initialize the capturer.
        /// </summary>
        /// <param name="engineLatency">
        /// Number of milliseconds of acceptable lag between live sound being produced and recording operation.
        /// </param>
        /// <param name="gain">
        /// The gain to be applied to the audio after capture.
        /// </param>
        /// <param name="outFormat">
        /// The format of the audio to be captured. If this is NULL, the default audio format of the
        /// capture device will be used.
        /// </param>
        /// <param name="callback">
        /// Callback function delegate which will handle the captured data.
        /// </param>
        /// <param name="speech">
        /// If true, sets the audio category to speech to optimize audio pipeline for speech recognition.
        /// </param>
        public void Initialize(int engineLatency, float gain, WaveFormat outFormat, AudioDataAvailableCallback callback, bool speech)
        {
            // Create our shutdown event - we want a manual reset event that starts in the not-signaled state.
            this.shutdownEvent = new ManualResetEvent(false);

            // Now activate an IAudioClient object on our preferred endpoint and retrieve the mix format for that endpoint.
            object obj = this.endpoint.Activate(ref audioClientIID, ClsCtx.INPROC_SERVER, IntPtr.Zero);

            this.audioClient = (IAudioClient)obj;

            // The following block enables advanced mic array APO pipeline on Windows 10 RS2 builds >= 15004.
            // This must be called before the call to GetMixFormat() in LoadFormat().
            if (speech)
            {
                IAudioClient2 audioClient2 = (IAudioClient2)this.audioClient;
                if (audioClient2 != null)
                {
                    AudioClientProperties properties = new AudioClientProperties
                    {
                        Size     = Marshal.SizeOf <AudioClientProperties>(),
                        Category = AudioStreamCategory.Speech
                    };

                    int hr = audioClient2.SetClientProperties(ref properties);
                    if (hr != 0)
                    {
                        Console.WriteLine("Failed to set audio stream category to AudioCategory_Speech: {0}", hr);
                    }
                }
                else
                {
                    Console.WriteLine("Unable to get IAudioClient2 interface");
                }
            }

            // Load the MixFormat. This may differ depending on the shared mode used.
            this.LoadFormat();

            // Remember our configured latency
            this.engineLatencyInMs = engineLatency;

            // Set the gain
            this.gain = gain;

            // Determine whether or not we need a resampler
            this.resampler = null;

            if (outFormat != null)
            {
                // Check if the desired format is supported
                IntPtr closestMatchPtr;
                IntPtr outFormatPtr = WaveFormat.MarshalToPtr(outFormat);
                int    hr           = this.audioClient.IsFormatSupported(AudioClientShareMode.Shared, outFormatPtr, out closestMatchPtr);

                // Free outFormatPtr to prevent leaking memory
                Marshal.FreeHGlobal(outFormatPtr);

                if (hr == 0)
                {
                    // Replace _MixFormat with outFormat. Since it is supported, we will initialize
                    // the audio capture client with that format and capture without resampling.
                    this.mixFormat    = outFormat;
                    this.mixFrameSize = (this.mixFormat.BitsPerSample / 8) * this.mixFormat.Channels;
                }
                else
                {
                    // In all other cases, we need to resample to OutFormat
                    if ((hr == 1) && (closestMatchPtr != IntPtr.Zero))
                    {
                        // Use closest match suggested by IsFormatSupported() and resample
                        this.mixFormat    = WaveFormat.MarshalFromPtr(closestMatchPtr);
                        this.mixFrameSize = (this.mixFormat.BitsPerSample / 8) * this.mixFormat.Channels;

                        // Free closestMatchPtr to prevent leaking memory
                        Marshal.FreeCoTaskMem(closestMatchPtr);
                    }

                    this.inputBufferSize  = (int)(this.engineLatencyInMs * this.mixFormat.AvgBytesPerSec / 1000);
                    this.outputBufferSize = (int)(this.engineLatencyInMs * outFormat.AvgBytesPerSec / 1000);

                    DeviceUtil.CreateResamplerBuffer(this.inputBufferSize, out this.inputSample, out this.inputBuffer);
                    DeviceUtil.CreateResamplerBuffer(this.outputBufferSize, out this.outputSample, out this.outputBuffer);

                    // Create resampler object
                    this.resampler = DeviceUtil.CreateResampler(this.mixFormat, outFormat);
                }
            }

            this.InitializeAudioEngine();

            // Set the callback function
            this.dataAvailableCallback = callback;
        }
示例#6
0
        /// <summary>
        /// Initialize the renderer.
        /// </summary>
        /// <param name="engineLatency">
        /// Number of milliseconds of acceptable lag between playback of samples and live sound being produced.
        /// </param>
        /// <param name="gain">
        /// The gain to be applied to the audio before rendering.
        /// </param>
        /// <param name="inFormat">
        /// The format of the input audio samples to be rendered. If this is NULL, the current default audio
        /// format of the renderer device will be assumed.
        /// </param>
        /// <param name="callback">
        /// Callback function delegate which will supply the data to be rendered.
        /// </param>
        public void Initialize(int engineLatency, float gain, WaveFormat inFormat, AudioDataRequestedCallback callback)
        {
            // Create our shutdown event - we want a manual reset event that starts in the not-signaled state.
            this.shutdownEvent = new ManualResetEvent(false);

            // Now activate an IAudioClient object on our preferred endpoint and retrieve the mix format for that endpoint.
            object obj = this.endpoint.Activate(ref audioClientIID, ClsCtx.INPROC_SERVER, IntPtr.Zero);

            this.audioClient = (IAudioClient)obj;

            // Load the MixFormat. This may differ depending on the shared mode used.
            this.LoadFormat();

            // Remember our configured latency
            this.engineLatencyInMs = engineLatency;

            // Set the gain
            this.gain = gain;

            // Check if the desired format is supported
            IntPtr closestMatchPtr;
            IntPtr inFormatPtr = WaveFormat.MarshalToPtr(inFormat);
            int    hr          = this.audioClient.IsFormatSupported(AudioClientShareMode.Shared, inFormatPtr, out closestMatchPtr);

            // Free outFormatPtr to prevent leaking memory
            Marshal.FreeHGlobal(inFormatPtr);

            if (hr == 0)
            {
                // Replace _MixFormat with inFormat. Since it is supported, we will initialize
                // the audio render client with that format and render without resampling.
                this.mixFormat    = inFormat;
                this.mixFrameSize = (this.mixFormat.BitsPerSample / 8) * this.mixFormat.Channels;
            }
            else
            {
                // In all other cases, we need to resample to OutFormat
                if ((hr == 1) && (closestMatchPtr != IntPtr.Zero))
                {
                    // Use closest match suggested by IsFormatSupported() and resample
                    this.mixFormat    = WaveFormat.MarshalFromPtr(closestMatchPtr);
                    this.mixFrameSize = (this.mixFormat.BitsPerSample / 8) * this.mixFormat.Channels;

                    // Free closestMatchPtr to prevent leaking memory
                    Marshal.FreeCoTaskMem(closestMatchPtr);
                }
            }

            this.inputBufferSize  = (int)(this.engineLatencyInMs * inFormat.AvgBytesPerSec / 1000);
            this.outputBufferSize = (int)(this.engineLatencyInMs * this.mixFormat.AvgBytesPerSec / 1000);

            DeviceUtil.CreateResamplerBuffer(this.inputBufferSize, out this.inputSample, out this.inputBuffer);
            DeviceUtil.CreateResamplerBuffer(this.outputBufferSize, out this.outputSample, out this.outputBuffer);

            // Create resampler object
            this.resampler = DeviceUtil.CreateResampler(inFormat, this.mixFormat);

            this.InitializeAudioEngine();

            // Set the callback function
            this.dataRequestedCallback = callback;
        }