Esempio n. 1
0
        /// <summary>
        /// Starts rendering audio data.
        /// </summary>
        /// <param name="maxBufferSeconds">
        /// The maximum duration of audio that can be buffered for playback.
        /// </param>
        /// <param name="targetLatencyInMs">
        /// The target maximum number of milliseconds of acceptable lag between
        /// playback of samples and live sound being produced.
        /// </param>
        /// <param name="gain">
        /// The gain to be applied prior to rendering the audio.
        /// </param>
        /// <param name="inFormat">
        /// The input audio format.
        /// </param>
        public void StartRendering(double maxBufferSeconds, int targetLatencyInMs, float gain, WaveFormat inFormat)
        {
            if (this.wasapiRenderer != null)
            {
                this.StopRendering();
            }

            // Create an audio buffer to buffer audio awaiting playback.
            this.audioBufferStream = new CircularBufferStream((long)Math.Ceiling(maxBufferSeconds * inFormat.AvgBytesPerSec), false);

            this.wasapiRenderer = new WasapiRenderer(this.audioDevice);

            // Create a callback delegate and marshal it to a function pointer. Keep a
            // reference to the delegate as a class field to prevent it from being GC'd.
            this.callbackDelegate = new AudioDataRequestedCallback(this.AudioDataRequestedCallback);

            // initialize the renderer with the desired parameters
            this.wasapiRenderer.Initialize(targetLatencyInMs, gain, inFormat, this.callbackDelegate);

            // tell WASAPI to start rendering
            this.wasapiRenderer.Start();
        }
Esempio n. 2
0
        /// <summary>
        /// Initialize the renderer.
        /// </summary>
        /// <param name="engineLatency">
        /// Number of milliseconds of acceptable lag between playback of samples and live sound being produced.
        /// </param>
        /// <param name="gain">
        /// The gain to be applied to the audio before rendering.
        /// </param>
        /// <param name="inFormat">
        /// The format of the input audio samples to be rendered. If this is NULL, the current default audio
        /// format of the renderer device will be assumed.
        /// </param>
        /// <param name="callback">
        /// Callback function delegate which will supply the data to be rendered.
        /// </param>
        public void Initialize(int engineLatency, float gain, WaveFormat inFormat, AudioDataRequestedCallback callback)
        {
            // Create our shutdown event - we want a manual reset event that starts in the not-signaled state.
            this.shutdownEvent = new ManualResetEvent(false);

            // Now activate an IAudioClient object on our preferred endpoint and retrieve the mix format for that endpoint.
            object obj = this.endpoint.Activate(ref audioClientIID, ClsCtx.INPROC_SERVER, IntPtr.Zero);

            this.audioClient = (IAudioClient)obj;

            // Load the MixFormat. This may differ depending on the shared mode used.
            this.LoadFormat();

            // Remember our configured latency
            this.engineLatencyInMs = engineLatency;

            // Set the gain
            this.gain = gain;

            // Check if the desired format is supported
            IntPtr closestMatchPtr;
            IntPtr inFormatPtr = WaveFormat.MarshalToPtr(inFormat);
            int    hr          = this.audioClient.IsFormatSupported(AudioClientShareMode.Shared, inFormatPtr, out closestMatchPtr);

            // Free outFormatPtr to prevent leaking memory
            Marshal.FreeHGlobal(inFormatPtr);

            if (hr == 0)
            {
                // Replace _MixFormat with inFormat. Since it is supported, we will initialize
                // the audio render client with that format and render without resampling.
                this.mixFormat    = inFormat;
                this.mixFrameSize = (this.mixFormat.BitsPerSample / 8) * this.mixFormat.Channels;
            }
            else
            {
                // In all other cases, we need to resample to OutFormat
                if ((hr == 1) && (closestMatchPtr != IntPtr.Zero))
                {
                    // Use closest match suggested by IsFormatSupported() and resample
                    this.mixFormat    = WaveFormat.MarshalFromPtr(closestMatchPtr);
                    this.mixFrameSize = (this.mixFormat.BitsPerSample / 8) * this.mixFormat.Channels;

                    // Free closestMatchPtr to prevent leaking memory
                    Marshal.FreeCoTaskMem(closestMatchPtr);
                }
            }

            this.inputBufferSize  = (int)(this.engineLatencyInMs * inFormat.AvgBytesPerSec / 1000);
            this.outputBufferSize = (int)(this.engineLatencyInMs * this.mixFormat.AvgBytesPerSec / 1000);

            DeviceUtil.CreateResamplerBuffer(this.inputBufferSize, out this.inputSample, out this.inputBuffer);
            DeviceUtil.CreateResamplerBuffer(this.outputBufferSize, out this.outputSample, out this.outputBuffer);

            // Create resampler object
            this.resampler = DeviceUtil.CreateResampler(inFormat, this.mixFormat);

            this.InitializeAudioEngine();

            // Set the callback function
            this.dataRequestedCallback = callback;
        }