Exemplo n.º 1
0
        /// <summary>
        /// Converts decoded, raw frame data in the frame source into a a usable frame. <br />
        /// The process includes performing picture, samples or text conversions
        /// so that the decoded source frame data is easily usable in multimedia applications
        /// </summary>
        /// <param name="input">The source frame to use as an input.</param>
        /// <param name="output">The target frame that will be updated with the source frame. If null is passed the frame will be instantiated.</param>
        /// <param name="siblings">The sibling blocks that may help guess some additional parameters for the input frame.</param>
        /// <returns>
        /// Return the updated output frame
        /// </returns>
        /// <exception cref="System.ArgumentNullException">input cannot be null</exception>
        public override MediaBlock MaterializeFrame(MediaFrame input, ref MediaBlock output, List <MediaBlock> siblings)
        {
            if (output == null)
            {
                output = new SubtitleBlock();
            }
            var source = input as SubtitleFrame;
            var target = output as SubtitleBlock;

            if (source == null || target == null)
            {
                throw new ArgumentNullException($"{nameof(input)} and {nameof(output)} are either null or not of a compatible media type '{MediaType}'");
            }

            // Set the target data
            target.EndTime     = source.EndTime;
            target.StartTime   = source.StartTime;
            target.Duration    = source.Duration;
            target.StreamIndex = input.StreamIndex;

            target.OriginalText.Clear();
            if (source.Text.Count > 0)
            {
                target.OriginalText.AddRange(source.Text);
            }
            target.OriginalTextType = source.TextType;

            target.Text.Clear();
            foreach (var text in source.Text)
            {
                if (string.IsNullOrWhiteSpace(text))
                {
                    continue;
                }

                if (source.TextType == AVSubtitleType.SUBTITLE_ASS)
                {
                    var strippedText = text.StripAssFormat();
                    if (string.IsNullOrWhiteSpace(strippedText) == false)
                    {
                        target.Text.Add(strippedText);
                    }
                }
                else
                {
                    var strippedText = text.StripSrtFormat();
                    if (string.IsNullOrWhiteSpace(strippedText) == false)
                    {
                        target.Text.Add(strippedText);
                    }
                }
            }

            return(target);
        }
Exemplo n.º 2
0
        /// <summary>
        /// Adds a block to the playback blocks by converting the given frame.
        /// If there are no more blocks in the pool, the oldest block is returned to the pool
        /// and reused for the new block. The source frame is automatically disposed.
        /// </summary>
        /// <param name="source">The source.</param>
        /// <param name="container">The container.</param>
        /// <returns>The filled block.</returns>
        public MediaBlock Add(MediaFrame source, MediaContainer container)
        {
            lock (SyncRoot)
            {
                // Check if we already have a block at the given time
                if (IsInRange(source.StartTime))
                {
                    var reapeatedBlock = PlaybackBlocks.FirstOrDefault(f => f.StartTime.Ticks == source.StartTime.Ticks);
                    if (reapeatedBlock != null)
                    {
                        PlaybackBlocks.Remove(reapeatedBlock);
                        PoolBlocks.Enqueue(reapeatedBlock);
                    }
                }

                // if there are no available blocks, make room!
                if (PoolBlocks.Count <= 0)
                {
                    var firstBlock = PlaybackBlocks[0];
                    PlaybackBlocks.RemoveAt(0);
                    PoolBlocks.Enqueue(firstBlock);
                }

                // Get a block reference from the pool and convert it!
                var targetBlock = PoolBlocks.Dequeue();
                container.Convert(source, ref targetBlock, PlaybackBlocks, true);

                // Discard a frame with incorrect timing
                if (targetBlock.IsStartTimeGuessed && IsMonotonic && PlaybackBlocks.Count > 1 &&
                    targetBlock.Duration != PlaybackBlocks.Last().Duration)
                {
                    // return the converted block to the pool
                    PoolBlocks.Enqueue(targetBlock);
                    return(null);
                }
                else
                {
                    // Add the converted block to the playback list and sort it.
                    PlaybackBlocks.Add(targetBlock);
                    PlaybackBlocks.Sort();
                }

                return(targetBlock);
            }
        }
Exemplo n.º 3
0
        /// <summary>
        /// Converts decoded, raw frame data in the frame source into a a usable frame. <br />
        /// The process includes performing picture, samples or text conversions
        /// so that the decoded source frame data is easily usable in multimedia applications
        /// </summary>
        /// <param name="input">The source frame to use as an input.</param>
        /// <param name="output">The target frame that will be updated with the source frame. If null is passed the frame will be instantiated.</param>
        /// <param name="siblings">The siblings to help guess additional frame parameters.</param>
        /// <returns>
        /// Return the updated output frame
        /// </returns>
        /// <exception cref="System.ArgumentNullException">input</exception>
        public override MediaBlock MaterializeFrame(MediaFrame input, ref MediaBlock output, List <MediaBlock> siblings)
        {
            if (output == null)
            {
                output = new VideoBlock();
            }
            var source = input as VideoFrame;
            var target = output as VideoBlock;

            if (source == null || target == null)
            {
                throw new ArgumentNullException($"{nameof(input)} and {nameof(output)} are either null or not of a compatible media type '{MediaType}'");
            }

            // Retrieve a suitable scaler or create it on the fly
            Scaler = ffmpeg.sws_getCachedContext(
                Scaler,
                source.Pointer->width,
                source.Pointer->height,
                NormalizePixelFormat(source.Pointer),
                source.Pointer->width,
                source.Pointer->height,
                OutputPixelFormat,
                ScalerFlags,
                null,
                null,
                null);
            RC.Current.Add(Scaler, $"311: {nameof(VideoComponent)}.{nameof(MaterializeFrame)}()");

            // Perform scaling and save the data to our unmanaged buffer pointer
            var targetBufferStride = ffmpeg.av_image_get_linesize(OutputPixelFormat, source.Pointer->width, 0);
            var targetStride       = new int[] { targetBufferStride };
            var targetLength       = ffmpeg.av_image_get_buffer_size(OutputPixelFormat, source.Pointer->width, source.Pointer->height, 1);

            // Ensure proper allocation of the buffer
            // If there is a size mismatch between the wanted buffer length and the existing one,
            // then let's reallocate the buffer and set the new size (dispose of the existing one if any)
            if (target.PictureBufferLength != targetLength)
            {
                if (target.PictureBuffer != IntPtr.Zero)
                {
                    Marshal.FreeHGlobal(target.PictureBuffer);
                }

                target.PictureBufferLength = targetLength;
                target.PictureBuffer       = Marshal.AllocHGlobal(target.PictureBufferLength);
            }

            var targetScan = default(byte_ptrArray8);

            targetScan[0] = (byte *)target.PictureBuffer;

            // The scaling is done here
            var outputHeight = ffmpeg.sws_scale(Scaler, source.Pointer->data, source.Pointer->linesize, 0, source.Pointer->height, targetScan, targetStride);

            // Flag the block if we have to
            target.IsStartTimeGuessed = source.HasValidStartTime == false;

            // Try to fix the start time, duration and End time if we don't have valid data
            if (source.HasValidStartTime == false && siblings != null && siblings.Count > 0)
            {
                // Get timing information from the last sibling
                var lastSibling = siblings[siblings.Count - 1];

                // We set the target properties
                target.StartTime = lastSibling.EndTime;
                target.Duration  = source.Duration.Ticks > 0 ? source.Duration : lastSibling.Duration;
                target.EndTime   = TimeSpan.FromTicks(target.StartTime.Ticks + target.Duration.Ticks);
            }
            else
            {
                // We set the target properties directly from the source
                target.StartTime = source.StartTime;
                target.Duration  = source.Duration;
                target.EndTime   = source.EndTime;
            }

            target.StreamIndex          = input.StreamIndex;
            target.SmtpeTimecode        = source.SmtpeTimecode;
            target.DisplayPictureNumber = source.DisplayPictureNumber;
            target.CodedPictureNumber   = source.DisplayPictureNumber;
            target.BufferStride         = targetStride[0];

            target.PixelHeight = source.Pointer->height;
            target.PixelWidth  = source.Pointer->width;

            var aspectRatio = source.Pointer->sample_aspect_ratio;

            if (aspectRatio.num == 0 || aspectRatio.den == 0)
            {
                target.AspectWidth  = 1;
                target.AspectHeight = 1;
            }
            else
            {
                target.AspectWidth  = aspectRatio.num;
                target.AspectHeight = aspectRatio.den;
            }

            return(target);
        }
Exemplo n.º 4
0
        /// <summary>
        /// Receives 0 or more frames from the next available packet in the Queue.
        /// This sends the first available packet to dequeue to the decoder
        /// and uses the decoded frames (if any) to their corresponding
        /// ProcessFrame method.
        /// </summary>
        /// <returns>The list of frames</returns>
        private List <MediaFrame> DecodeNextPacketInternal()
        {
            var result = new List <MediaFrame>();

            // Ensure there is at least one packet in the queue
            if (PacketBufferCount <= 0)
            {
                return(result);
            }

            // Setup some initial state variables
            var packet = Packets.Dequeue();

            // The packets are alwasy sent. We dequeue them and keep a reference to them
            // in the SentPackets queue
            SentPackets.Push(packet);

            var receiveFrameResult = 0;

            if (MediaType == MediaType.Audio || MediaType == MediaType.Video)
            {
                // If it's audio or video, we use the new API and the decoded frames are stored in AVFrame
                // Let us send the packet to the codec for decoding a frame of uncompressed data later
                var sendPacketResult = ffmpeg.avcodec_send_packet(CodecContext, IsEmptyPacket(packet) ? null : packet);

                // Let's check and see if we can get 1 or more frames from the packet we just sent to the decoder.
                // Audio packets will typically contain 1 or more audioframes
                // Video packets might require several packets to decode 1 frame
                MediaFrame managedFrame = null;
                while (receiveFrameResult == 0)
                {
                    // Allocate a frame in unmanaged memory and
                    // Try to receive the decompressed frame data
                    var outputFrame = ffmpeg.av_frame_alloc();
                    RC.Current.Add(outputFrame, $"327: {nameof(MediaComponent)}[{MediaType}].{nameof(DecodeNextPacketInternal)}()");
                    receiveFrameResult = ffmpeg.avcodec_receive_frame(CodecContext, outputFrame);

                    try
                    {
                        managedFrame = null;
                        if (receiveFrameResult == 0)
                        {
                            // Send the frame to processing
                            managedFrame = CreateFrameSource(ref outputFrame);
                            if (managedFrame != null)
                            {
                                result.Add(managedFrame);
                            }
                        }

                        if (managedFrame == null)
                        {
                            RC.Current.Remove(outputFrame);
                            ffmpeg.av_frame_free(&outputFrame);
                        }
                    }
                    catch
                    {
                        // Release the frame as the decoded data could not be processed
                        RC.Current.Remove(outputFrame);
                        ffmpeg.av_frame_free(&outputFrame);
                        throw;
                    }
                }
            }
            else if (MediaType == MediaType.Subtitle)
            {
                // Fors subtitles we use the old API (new API send_packet/receive_frame) is not yet available
                var gotFrame    = 0;
                var outputFrame = SubtitleFrame.AllocateSubtitle();
                receiveFrameResult = ffmpeg.avcodec_decode_subtitle2(CodecContext, outputFrame, &gotFrame, packet);

                // Check if there is an error decoding the packet.
                // If there is, remove the packet clear the sent packets
                if (receiveFrameResult < 0)
                {
                    SubtitleFrame.DeallocateSubtitle(outputFrame);
                    SentPackets.Clear();
                    Container.Logger?.Log(MediaLogMessageType.Error, $"{MediaType}: Error decoding. Error Code: {receiveFrameResult}");
                }
                else
                {
                    // Process the first frame if we got it from the packet
                    // Note that there could be more frames (subtitles) in the packet
                    if (gotFrame != 0)
                    {
                        try
                        {
                            // Send the frame to processing
                            var managedFrame = CreateFrameSource(outputFrame);
                            if (managedFrame == null)
                            {
                                throw new MediaContainerException($"{MediaType} Component does not implement {nameof(CreateFrameSource)}");
                            }
                            result.Add(managedFrame);
                        }
                        catch
                        {
                            // Once processed, we don't need it anymore. Release it.
                            SubtitleFrame.DeallocateSubtitle(outputFrame);
                            throw;
                        }
                    }

                    // Let's check if we have more decoded frames from the same single packet
                    // Packets may contain more than 1 frame and the decoder is drained
                    // by passing an empty packet (data = null, size = 0)
                    while (gotFrame != 0 && receiveFrameResult > 0)
                    {
                        outputFrame = SubtitleFrame.AllocateSubtitle();
                        var emptyPacket = ffmpeg.av_packet_alloc();
                        RC.Current.Add(emptyPacket, $"406: {nameof(MediaComponent)}[{MediaType}].{nameof(DecodeNextPacketInternal)}()");

                        // Receive the frames in a loop
                        try
                        {
                            receiveFrameResult = ffmpeg.avcodec_decode_subtitle2(CodecContext, outputFrame, &gotFrame, emptyPacket);
                            if (gotFrame != 0 && receiveFrameResult > 0)
                            {
                                // Send the subtitle to processing
                                var managedFrame = CreateFrameSource(outputFrame);
                                if (managedFrame == null)
                                {
                                    throw new MediaContainerException($"{MediaType} Component does not implement {nameof(CreateFrameSource)}");
                                }
                                result.Add(managedFrame);
                            }
                        }
                        catch
                        {
                            // once the subtitle is processed. Release it from memory
                            SubtitleFrame.DeallocateSubtitle(outputFrame);
                            throw;
                        }
                        finally
                        {
                            // free the empty packet
                            RC.Current.Remove(emptyPacket);
                            ffmpeg.av_packet_free(&emptyPacket);
                        }
                    }
                }
            }

            // Release the sent packets if 1 or more frames were received in the packet
            if (result.Count >= 1 || (Container.IsAtEndOfStream && IsEmptyPacket(packet) && PacketBufferCount == 0))
            {
                // We clear the sent packet queue (releasing packet from unmanaged memory also)
                // because we got at least 1 frame from the packet.
                SentPackets.Clear();
            }

            return(result);
        }
Exemplo n.º 5
0
 /// <summary>
 /// Converts decoded, raw frame data in the frame source into a a usable frame. <br />
 /// The process includes performing picture, samples or text conversions
 /// so that the decoded source frame data is easily usable in multimedia applications
 /// </summary>
 /// <param name="input">The source frame to use as an input.</param>
 /// <param name="output">The target frame that will be updated with the source frame. If null is passed the frame will be instantiated.</param>
 /// <param name="siblings">The sibling blocks that may help guess some additional parameters for the input frame.</param>
 /// <returns>
 /// Return the updated output frame
 /// </returns>
 public abstract MediaBlock MaterializeFrame(MediaFrame input, ref MediaBlock output, List <MediaBlock> siblings);
Exemplo n.º 6
0
        /// <summary>
        /// Converts decoded, raw frame data in the frame source into a a usable frame. <br />
        /// The process includes performing picture, samples or text conversions
        /// so that the decoded source frame data is easily usable in multimedia applications
        /// </summary>
        /// <param name="input">The source frame to use as an input.</param>
        /// <param name="output">The target frame that will be updated with the source frame. If null is passed the frame will be instantiated.</param>
        /// <param name="siblings">The sibling blocks that may help guess some additional parameters for the input frame.</param>
        /// <returns>
        /// Return the updated output frame
        /// </returns>
        /// <exception cref="System.ArgumentNullException">input</exception>
        public override MediaBlock MaterializeFrame(MediaFrame input, ref MediaBlock output, List <MediaBlock> siblings)
        {
            if (output == null)
            {
                output = new AudioBlock();
            }
            var source = input as AudioFrame;
            var target = output as AudioBlock;

            if (source == null || target == null)
            {
                throw new ArgumentNullException($"{nameof(input)} and {nameof(output)} are either null or not of a compatible media type '{MediaType}'");
            }

            // Create the source and target ausio specs. We might need to scale from
            // the source to the target
            var sourceSpec = AudioParams.CreateSource(source.Pointer);
            var targetSpec = AudioParams.CreateTarget(source.Pointer);

            // Initialize or update the audio scaler if required
            if (Scaler == null || LastSourceSpec == null || AudioParams.AreCompatible(LastSourceSpec, sourceSpec) == false)
            {
                Scaler = ffmpeg.swr_alloc_set_opts(
                    Scaler,
                    targetSpec.ChannelLayout,
                    targetSpec.Format,
                    targetSpec.SampleRate,
                    sourceSpec.ChannelLayout,
                    sourceSpec.Format,
                    sourceSpec.SampleRate,
                    0,
                    null);

                RC.Current.Add(Scaler, $"109: {nameof(AudioComponent)}.{nameof(MaterializeFrame)}()");
                ffmpeg.swr_init(Scaler);
                LastSourceSpec = sourceSpec;
            }

            // Allocate the unmanaged output buffer
            if (target.AudioBufferLength != targetSpec.BufferLength)
            {
                if (target.AudioBuffer != IntPtr.Zero)
                {
                    Marshal.FreeHGlobal(target.AudioBuffer);
                }

                target.AudioBufferLength = targetSpec.BufferLength;
                target.AudioBuffer       = Marshal.AllocHGlobal(targetSpec.BufferLength);
            }

            var outputBufferPtr = (byte *)target.AudioBuffer;

            // Execute the conversion (audio scaling). It will return the number of samples that were output
            var outputSamplesPerChannel =
                ffmpeg.swr_convert(
                    Scaler,
                    &outputBufferPtr,
                    targetSpec.SamplesPerChannel,
                    source.Pointer->extended_data,
                    source.Pointer->nb_samples);

            // Compute the buffer length
            var outputBufferLength =
                ffmpeg.av_samples_get_buffer_size(null, targetSpec.ChannelCount, outputSamplesPerChannel, targetSpec.Format, 1);

            // Flag the block if we have to
            target.IsStartTimeGuessed = source.HasValidStartTime == false;

            // Try to fix the start time, duration and End time if we don't have valid data
            if (source.HasValidStartTime == false && siblings != null && siblings.Count > 0)
            {
                // Get timing information from the last sibling
                var lastSibling = siblings[siblings.Count - 1];

                // We set the target properties
                target.StartTime = lastSibling.EndTime;
                target.Duration  = source.Duration.Ticks > 0 ? source.Duration : lastSibling.Duration;
                target.EndTime   = TimeSpan.FromTicks(target.StartTime.Ticks + target.Duration.Ticks);
            }
            else
            {
                // We set the target properties directly from the source
                target.StartTime = source.StartTime;
                target.Duration  = source.Duration;
                target.EndTime   = source.EndTime;
            }

            target.BufferLength = outputBufferLength;
            target.ChannelCount = targetSpec.ChannelCount;

            target.SampleRate        = targetSpec.SampleRate;
            target.SamplesPerChannel = outputSamplesPerChannel;
            target.StreamIndex       = input.StreamIndex;

            return(target);
        }