示例#1
0
        /// <summary>
        /// Transmits the given audio frame to an auto allocated buffer. Returns whether the operation is success, and the buffer when it succeeds.
        /// </summary>
        /// <param name="context">The <see cref="DecodeContext"/> containing decoding information.</param>
        /// <param name="frame">The <see cref="AVFrame"/> containing decoded audio data.</param>
        /// <param name="buffer">An auto allocated buffer. If the function succeeds, it contains audio data of required format. The data is NOT planar.</param>
        /// <returns><see langword="true"/> if all operation succeeds, otherwise <see langword="false"/>.</returns>
        /// <seealso cref="RequiredChannels"/>
        /// <seealso cref="RequiredSampleFormat"/>
        /// <seealso cref="RequiredSampleRate"/>
        internal static bool TransmitAudioFrame([NotNull] DecodeContext context, [NotNull] AVFrame *frame, [CanBeNull] out byte[] buffer)
        {
            buffer = null;

            var audioContext = context.AudioContext;

            if (audioContext == null)
            {
                return(false);
            }

            if (frame->nb_samples == 0 || frame->channels == 0)
            {
                return(true);
            }

            const int            dstChannels     = RequiredChannels;
            const AVSampleFormat dstSampleFormat = RequiredSampleFormat;
            const int            dstSampleRate   = RequiredSampleRate;

            var resampleContext = audioContext.GetSuitableResampleContext(dstSampleFormat, dstChannels, dstSampleRate);

            // First roughly estimates the number of samples in the output data.
            var roughDstSampleCount = (int)ffmpeg.av_rescale_rnd(frame->nb_samples, dstSampleRate, audioContext.SampleRate, AVRounding.AV_ROUND_UP);

            if (roughDstSampleCount < 0)
            {
                throw new FFmpegException("Failed to calculate simple rescaled sample count.");
            }

            var dstSampleCount = roughDstSampleCount;

            // About dstData and being continuous:
            // We only care about 16-bit stereo audio, so the audio output always has 1 plane (not planar).
            // For more complicated situations: http://blog.csdn.net/dancing_night/article/details/45642107
            byte **   dstData     = null;
            var       dstLineSize = 0;
            const int planeCount  = 1;

            try {
                // Allocate channel array and sample buffers.
                Verify(ffmpeg.av_samples_alloc_array_and_samples(&dstData, &dstLineSize, dstChannels, dstSampleCount, dstSampleFormat, 0));

                Debug.Assert(dstData != null);

                // Then consider the possible resample delay and calculate the correct number of samples.
                // TODO: Isn't this redundant? We may use this value in the first place.
                dstSampleCount = (int)ffmpeg.av_rescale_rnd(ffmpeg.swr_get_delay(resampleContext, audioContext.SampleRate) + frame->nb_samples, dstSampleRate, audioContext.SampleRate, AVRounding.AV_ROUND_UP);

                if (dstSampleCount <= 0)
                {
                    throw new FFmpegException("Failed to calculate rescaled sample count (with possible delays).");
                }

                // If there is a delay, we have to adjust the buffers allocated. (Yeah actually one buffer.)
                if (dstSampleCount > roughDstSampleCount)
                {
                    for (var i = 0; i < planeCount; ++i)
                    {
                        ffmpeg.av_freep(&dstData[i]);
                    }

                    Verify(ffmpeg.av_samples_alloc(dstData, &dstLineSize, dstChannels, dstSampleCount, dstSampleFormat, 1));
                }

                var ptrs = frame->data.ToArray();
                int convertRet;

                // Next, resample.
                fixed(byte **data = ptrs)
                {
                    convertRet = ffmpeg.swr_convert(resampleContext, dstData, dstSampleCount, data, frame->nb_samples);

                    Verify(convertRet);
                }

                // Get resampled data size...
                var resampledDataSize = ffmpeg.av_samples_get_buffer_size(&dstLineSize, dstChannels, convertRet, dstSampleFormat, 1);

                // ... allocate the buffer...
                buffer = new byte[resampledDataSize];

                // .. and write to it.
                // TODO: sometimes dstData[0] is null?
                if (dstData[0] != null)
                {
                    Marshal.Copy((IntPtr)dstData[0], buffer, 0, resampledDataSize);
                }
            } finally {
                // Finally, clean up the native buffers.
                if (dstData != null)
                {
                    for (var i = 0; i < planeCount; ++i)
                    {
                        if (dstData[i] != null)
                        {
                            ffmpeg.av_freep(&dstData[i]);
                        }
                    }

                    ffmpeg.av_freep(&dstData);
                }
            }

            return(true);
        }
示例#2
0
        /// <summary>
        /// Transmit data from current video frame of a <see cref="DecodeContext"/> to a <see cref="Texture2D"/> using an auto allocated data buffer.
        /// The texture's surface format must be <see cref="SurfaceFormat.Color"/>.
        /// </summary>
        /// <param name="context">The <see cref="DecodeContext"/> </param>
        /// <param name="texture">The <see cref="Texture2D"/> to transmit to.</param>
        /// <returns><see langword="true"/> if all successful, otherwise <see langword="false"/>.</returns>
        /// <seealso cref="RequiredPixelFormat"/>
        internal static bool TransmitVideoFrame([NotNull] DecodeContext context, [NotNull] Texture2D texture)
        {
            var buffer = new uint[texture.Width * texture.Height];

            return(TransmitVideoFrame(context, texture, buffer));
        }