Ejemplo n.º 1
0
        /// <summary>
        /// Initializes the audio renderer.
        /// Call the Play Method to start reading samples.
        /// </summary>
        private void Initialize()
        {
            Destroy();

            // Check the wave format
            if (WaveFormat.BitsPerSample != 16 || WaveFormat.Channels != 2)
            {
                throw new NotSupportedException("Wave Format has to be 16-bit and 2-channel.");
            }

            // Release the audio device always upon exiting
            if (Application.Current is Application app)
            {
                app.Dispatcher?.BeginInvoke(new Action(() => { app.Exit += OnApplicationExit; }));
            }

            // Enumerate devices. The default device is the first one so we check
            // that we have more than 1 device (other than the default stub)
            var hasAudioDevices = MediaElement.RendererOptions.UseLegacyAudioOut ?
                                  LegacyAudioPlayer.EnumerateDevices().Count > 1 :
                                  DirectSoundPlayer.EnumerateDevices().Count > 1;

            // Check if we have an audio output device.
            if (hasAudioDevices == false)
            {
                HasFiredAudioDeviceStopped = true;
                this.LogWarning(Aspects.AudioRenderer,
                                "No audio device found for output.");

                return;
            }

            // Initialize the SoundTouch Audio Processor (if available)
            AudioProcessor = (SoundTouch.IsAvailable == false) ? null : new SoundTouch();
            if (AudioProcessor != null)
            {
                AudioProcessor.SetChannels(Convert.ToUInt32(WaveFormat.Channels));
                AudioProcessor.SetSampleRate(Convert.ToUInt32(WaveFormat.SampleRate));
            }

            // Initialize the Audio Device
            AudioDevice = MediaElement.RendererOptions.UseLegacyAudioOut ?
                          new LegacyAudioPlayer(this, MediaElement.RendererOptions.LegacyAudioDevice?.DeviceId ?? -1) as IWavePlayer :
                          new DirectSoundPlayer(this, MediaElement.RendererOptions.DirectSoundDevice?.DeviceId ?? DirectSoundPlayer.DefaultPlaybackDeviceId);

            // Create the Audio Buffer
            SampleBlockSize = Constants.AudioBytesPerSample * Constants.AudioChannelCount;
            var bufferLength = WaveFormat.ConvertMillisToByteSize(2000); // 2-second buffer

            AudioBuffer = new CircularBuffer(bufferLength);
            AudioDevice.Start();
        }
        /// <summary>
        /// Initializes the audio renderer.
        /// Call the Play Method to start reading samples
        /// </summary>
        private void Initialize()
        {
            Destroy();

            // Release the audio device always upon exiting
            if (Application.Current != null)
            {
                GuiContext.Current.EnqueueInvoke(DispatcherPriority.Render, () =>
                                                 Application.Current.Exit += OnApplicationExit);
            }

            // Enumerate devices. The default device is the first one so we check
            // that we have more than 1 device (other than the default stub)
            var hasAudioDevices = MediaElement.RendererOptions.UseLegacyAudioOut ?
                                  LegacyAudioPlayer.EnumerateDevices().Count > 1 :
                                  DirectSoundPlayer.EnumerateDevices().Count > 1;

            // Check if we have an audio output device.
            if (hasAudioDevices == false)
            {
                WaitForReadyEvent.Complete();
                HasFiredAudioDeviceStopped = true;
                this.LogWarning(Aspects.AudioRenderer,
                                "No audio device found for output.");

                return;
            }

            // Initialize the SoundTouch Audio Processor (if available)
            AudioProcessor = (SoundTouch.IsAvailable == false) ? null : new SoundTouch();
            if (AudioProcessor != null)
            {
                AudioProcessor.SetChannels(Convert.ToUInt32(WaveFormat.Channels));
                AudioProcessor.SetSampleRate(Convert.ToUInt32(WaveFormat.SampleRate));
            }

            // Initialize the Audio Device
            AudioDevice = MediaElement.RendererOptions.UseLegacyAudioOut ?
                          new LegacyAudioPlayer(this, MediaElement.RendererOptions.LegacyAudioDevice?.DeviceId ?? -1) as IWavePlayer :
                          new DirectSoundPlayer(this, MediaElement.RendererOptions.DirectSoundDevice?.DeviceId ?? DirectSoundPlayer.DefaultPlaybackDeviceId);

            // Create the Audio Buffer
            SampleBlockSize = Constants.Audio.BytesPerSample * Constants.Audio.ChannelCount;
            var bufferLength = WaveFormat.ConvertMillisToByteSize(2000); // 2-second buffer

            AudioBuffer = new CircularBuffer(bufferLength);
            AudioDevice.Start();
        }
Ejemplo n.º 3
0
        /// <summary>
        /// Initializes the audio renderer.
        /// Call the Play Method to start reading samples
        /// </summary>
        private void Initialize()
        {
            Destroy();

            // Enumerate devices. The default device is the first one so we check
            // that we have more than 1 device (other than the default stub)
            var hasAudioDevices = MediaElement.RendererOptions.UseLegacyAudioOut ?
                                  LegacyAudioPlayer.EnumerateDevices().Count > 1 :
                                  DirectSoundPlayer.EnumerateDevices().Count > 1;

            // Check if we have an audio output device.
            if (hasAudioDevices == false)
            {
                WaitForReadyEvent = null;
                MediaCore.Log(MediaLogMessageType.Warning,
                              $"AUDIO OUT: No audio device found for output.");

                return;
            }

            // Initialize the SoundTouch Audio Processor (if available)
            AudioProcessor = (SoundTouch.IsAvailable == false) ? null : new SoundTouch
            {
                Channels   = Convert.ToUInt32(WaveFormat.Channels),
                SampleRate = Convert.ToUInt32(WaveFormat.SampleRate)
            };

            // Initialize the Audio Device
            AudioDevice = MediaElement.RendererOptions.UseLegacyAudioOut ?
                          new LegacyAudioPlayer(this, MediaElement.RendererOptions.LegacyAudioDevice?.DeviceId ?? -1) as IWavePlayer :
                          new DirectSoundPlayer(this, MediaElement.RendererOptions.DirectSoundDevice?.DeviceId ?? DirectSoundPlayer.DefaultPlaybackDeviceId);

            // Create the Audio Buffer
            SampleBlockSize = Constants.Audio.BytesPerSample * Constants.Audio.ChannelCount;
            var bufferLength = WaveFormat.ConvertMillisToByteSize(2000); // 2-second buffer

            AudioBuffer = new CircularBuffer(bufferLength);
            AudioDevice.Start();
        }
Ejemplo n.º 4
0
        /// <summary>
        /// Initializes the audio renderer.
        /// Call the Play Method to start reading samples
        /// </summary>
        private void Initialize()
        {
            Destroy();

            if (SoundTouch.IsAvailable)
            {
                AudioProcessor = new SoundTouch
                {
                    Channels   = Convert.ToUInt32(WaveFormat.Channels),
                    SampleRate = Convert.ToUInt32(WaveFormat.SampleRate)
                };
            }

            AudioDevice = MediaElement.RendererOptions.UseLegacyAudioOut ?
                          new LegacyAudioPlayer(this, MediaElement.RendererOptions.LegacyAudioDevice?.DeviceId ?? -1) as IWavePlayer :
                          new DirectSoundPlayer(this, MediaElement.RendererOptions.DirectSoundDevice?.DeviceId ?? DirectSoundPlayer.DefaultPlaybackDeviceId);

            SampleBlockSize = Constants.Audio.BytesPerSample * Constants.Audio.ChannelCount;
            var bufferLength = WaveFormat.ConvertMillisToByteSize(AudioDevice.DesiredLatency) * MediaCore.Blocks[MediaType.Audio].Capacity / 2;

            AudioBuffer = new CircularBuffer(bufferLength);
            AudioDevice.Start();
        }
        private bool Synchronize(byte[] targetBuffer, int targetBufferOffset, int requestedBytes, double speedRatio)
        {
            #region Documentation

            /*
             * Wikipedia says:
             * For television applications, audio should lead video by no more than 15 milliseconds and audio should
             * lag video by no more than 45 milliseconds. For film, acceptable lip sync is considered to be no more
             * than 22 milliseconds in either direction.
             *
             * The Media and Acoustics Perception Lab says:
             * The results of the experiment determined that the average audio leading threshold for a/v sync
             * detection was 185.19 ms, with a standard deviation of 42.32 ms
             *
             * The ATSC says:
             * At first glance it seems loose: +90 ms to -185 ms as a Window of Acceptability
             * - Undetectable from -100 ms to +25 ms
             * - Detectable at -125 ms & +45 ms
             * - Becomes unacceptable at -185 ms & +90 ms
             *
             * NOTE: (- Sound delayed, + Sound advanced)
             */

            #endregion

            #region Private State

            var audioLatencyMs    = Latency.TotalMilliseconds;
            var isBeyondThreshold = false;
            var readableCount     = AudioBuffer.ReadableCount;
            var rewindableCount   = AudioBuffer.RewindableCount;

            #endregion

            #region Sync Give-up Conditions

            if (MediaElement.RendererOptions.AudioDisableSync)
            {
                return(true);
            }

            // Determine if we should continue to perform syncs.
            // Some live, non-seekable streams will send out-of-sync audio packets
            // and after trying too many times we simply give up.
            // The Sync conditions are reset in the Update method.
            if (MediaCore.State.IsSeekable == false && PlaySyncGaveUp.Value == false)
            {
                // 1. Determine if a sync is required
                if (audioLatencyMs > SyncThresholdLagging ||
                    audioLatencyMs < SyncThresholdLeading ||
                    Math.Abs(audioLatencyMs) > SyncThresholdPerfect)
                {
                    PlaySyncCount++;
                }

                // 2. Compute the variables to determine give-up conditions
                var playbackElapsedSeconds = PlaySyncStartTime.HasValue == false ?
                                             0 : DateTime.UtcNow.Subtract(PlaySyncStartTime.Value).TotalSeconds;
                var syncsPerSecond = PlaySyncCount / playbackElapsedSeconds;

                // 3. Determine if we need to give up
                if (playbackElapsedSeconds >= 3 && syncsPerSecond >= 3)
                {
                    this.LogWarning(Aspects.AudioRenderer,
                                    $"SYNC AUDIO: GIVE UP | SECS: {playbackElapsedSeconds:0.00}; " +
                                    $"SYN: {PlaySyncCount}; RATE: {syncsPerSecond:0.00} SYN/s; LAT: {audioLatencyMs} ms.");
                    PlaySyncGaveUp.Value = true;
                }
            }

            // Detect if we have given up
            if (PlaySyncGaveUp == true)
            {
                return(true);
            }

            #endregion

            #region Large Latency Handling

            if (audioLatencyMs > SyncThresholdLagging)
            {
                isBeyondThreshold = true;

                // a positive audio latency means we are rendering audio behind (after) the clock (skip some samples)
                // and therefore we need to advance the buffer before we read from it.
                if (Math.Abs(speedRatio - 1.0) <= double.Epsilon)
                {
                    this.LogWarning(Aspects.AudioRenderer,
                                    $"SYNC AUDIO: LATENCY: {audioLatencyMs} ms. | SKIP (samples being rendered too late)");
                }

                // skip some samples from the buffer.
                var audioLatencyBytes = WaveFormat.ConvertMillisToByteSize(Convert.ToInt32(Math.Ceiling(audioLatencyMs)));
                AudioBuffer.Skip(Math.Min(audioLatencyBytes, readableCount));
            }
            else if (audioLatencyMs < SyncThresholdLeading)
            {
                isBeyondThreshold = true;

                // Compute the latency in bytes
                var audioLatencyBytes = WaveFormat.ConvertMillisToByteSize(Convert.ToInt32(Math.Ceiling(Math.Abs(audioLatencyMs))));

                // audioLatencyBytes = requestedBytes; // uncomment this line to enable rewinding.
                if (audioLatencyBytes > requestedBytes && audioLatencyBytes < rewindableCount)
                {
                    // This means we have the audio pointer a little too ahead of time and we need to
                    // rewind it the requested amount of bytes.
                    AudioBuffer.Rewind(Math.Min(audioLatencyBytes, rewindableCount));
                }
                else
                {
                    // a negative audio latency means we are rendering audio ahead (before) the clock
                    // and therefore we need to render some silence until the clock catches up
                    if (Math.Abs(speedRatio - 1.0) <= double.Epsilon)
                    {
                        this.LogWarning(Aspects.AudioRenderer,
                                        $"SYNC AUDIO: LATENCY: {audioLatencyMs} ms. | WAIT (samples being rendered too early)");
                    }

                    // render silence for the wait time and return
                    Array.Clear(targetBuffer, targetBufferOffset, requestedBytes);
                    return(false);
                }
            }

            #endregion

            #region Small Latency Handling

            // Check if minor adjustment is necessary
            if (MediaCore.State.HasVideo == false || Math.Abs(speedRatio - 1.0) > double.Epsilon ||
                isBeyondThreshold || Math.Abs(audioLatencyMs) <= SyncThresholdPerfect)
            {
                return(true);
            }

            // Perform minor adjustments until the delay is less than 10ms in either direction
            var stepDurationMillis = Convert.ToInt32(Math.Min(SyncThresholdMaxStep, Math.Abs(audioLatencyMs)));
            var stepDurationBytes  = WaveFormat.ConvertMillisToByteSize(stepDurationMillis);

            if (audioLatencyMs > SyncThresholdPerfect)
            {
                AudioBuffer.Skip(Math.Min(stepDurationBytes, readableCount));
            }
            else if (audioLatencyMs < -SyncThresholdPerfect)
            {
                AudioBuffer.Rewind(Math.Min(stepDurationBytes, rewindableCount));
            }

            #endregion

            return(true);
        }
Ejemplo n.º 6
0
        private bool Synchronize(byte[] targetBuffer, int targetBufferOffset, int requestedBytes, double speedRatio)
        {
            #region Documentation

            /*
             * Wikipedia says:
             * For television applications, audio should lead video by no more than 15 milliseconds and audio should
             * lag video by no more than 45 milliseconds. For film, acceptable lip sync is considered to be no more
             * than 22 milliseconds in either direction.
             *
             * The Media and Acoustics Perception Lab says:
             * The results of the experiment determined that the average audio leading threshold for a/v sync
             * detection was 185.19 ms, with a standard deviation of 42.32 ms
             *
             * The ATSC says:
             * At first glance it seems loose: +90 ms to -185 ms as a Window of Acceptability
             * - Undetectable from -100 ms to +25 ms
             * - Detectable at -125 ms & +45 ms
             * - Becomes unacceptable at -185 ms & +90 ms
             *
             * NOTE: (- Sound delayed, + Sound advanced)
             */

            #endregion

            var hardwareLatencyMs   = WaveFormat.ConvertByteSizeToDuration(requestedBytes).TotalMilliseconds;
            var bufferLatencyMs     = BufferLatency.TotalMilliseconds; // we want the buffer latency to be the negative of the device latency
            var minAcceptableLeadMs = -1.5 * hardwareLatencyMs;        // less than this and we need to rewind samples
            var maxAcceptableLagMs  = -0.5 * hardwareLatencyMs;        // more than this and we need to skip samples
            var isLoggingEnabled    = Math.Abs(speedRatio - 1.0) <= double.Epsilon;
            var operationName       = string.Empty;

            try
            {
                RealTimeLatency = default;

                // we don't want to perform AV sync if the latency is huge
                // or if we have simply disabled it
                if (MediaElement.RendererOptions.AudioDisableSync)
                {
                    return(true);
                }

                // The ideal target latency is the negative of the audio device's desired latency.
                // this is approximately -40ms (i.e. have the buffer position 40ms ahead (negative lag) of the playback clock
                // so that samples are rendered right on time.)
                if (bufferLatencyMs >= minAcceptableLeadMs && bufferLatencyMs <= maxAcceptableLagMs)
                {
                    return(true);
                }

                if (bufferLatencyMs > maxAcceptableLagMs)
                {
                    // this is the case where the buffer latency is too positive (i.e. buffer is lagging by too much)
                    // the goal is to skip some samples to make the buffer latency approximately that of the hardware latency
                    // so that the buffer leads by the hardware lag and we get sync-perferct results.
                    var audioLatencyBytes = WaveFormat.ConvertMillisToByteSize(bufferLatencyMs + hardwareLatencyMs);

                    if (AudioBuffer.ReadableCount > audioLatencyBytes)
                    {
                        operationName = "SKIP OK ";
                        AudioBuffer.Skip(audioLatencyBytes);
                        return(true);
                    }

                    // render silence and return
                    operationName = "SKIP ERR";
                    Array.Clear(targetBuffer, targetBufferOffset, requestedBytes);
                    return(false);
                }
                else if (bufferLatencyMs < minAcceptableLeadMs)
                {
                    // this is the case where the buffer latency is too negative (i.e. buffer is leading by too much)
                    // the goal is to rewind some samples to make the buffer latency approximately that of the hardware latency
                    // so that the buffer leads by the hardware lag and we get sync-perferct results.
                    var audioLatencyBytes = WaveFormat.ConvertMillisToByteSize(Math.Abs(bufferLatencyMs) + hardwareLatencyMs);

                    if (AudioBuffer.RewindableCount > audioLatencyBytes)
                    {
                        operationName = "RWND OK ";
                        AudioBuffer.Rewind(audioLatencyBytes);
                        return(true);
                    }

                    // render silence and return
                    operationName = "RWND ERR";
                    Array.Clear(targetBuffer, targetBufferOffset, requestedBytes);
                    return(false);
                }
            }
            finally
            {
                RealTimeLatency = BufferLatency + TimeSpan.FromMilliseconds(hardwareLatencyMs);
                if (isLoggingEnabled && !string.IsNullOrWhiteSpace(operationName))
                {
                    this.LogWarning(Aspects.AudioRenderer,
                                    $"SYNC AUDIO: {operationName} | Initial: {bufferLatencyMs:0} ms. Current: {BufferLatency.TotalMilliseconds:0} ms. Device: {hardwareLatencyMs:0} ms.");
                }
            }

            return(true);
        }