public static extern AVRational av_guess_frame_rate(AVFormatContext* ctx, AVStream* stream, AVFrame* frame);
		public static extern void av_set_pts_info(AVStream* s, int pts_wrap_bits, int pts_num, int pts_den);
 public static extern void av_pkt_dump_log2(void* avcl, int level, AVPacket* pkt, int dump_payload, AVStream* st);
 public static extern int av_add_index_entry(AVStream* st, long pos, long timestamp, int size, int distance, int flags);
 public static extern void av_stream_set_recommended_encoder_configuration(AVStream* s, String configuration);
 public static extern byte* av_stream_get_side_data(AVStream* stream, AVPacketSideDataType type, int* size);
 public static extern int avformat_match_stream_specifier(AVFormatContext* @s, AVStream* @st, [MarshalAs(UnmanagedType.LPStr)] string @spec);
 public static extern AVCodecParserContext* av_stream_get_parser(AVStream* s);
        /// <summary>Creates a thumbnail</summary>
        /// <param name="info">Media information to use for header text</param>
        /// <param name="pFormatContext">The format context to use</param>
        /// <param name="vidStream">The video stream to use</param>
        /// <param name="options">Options setting the appearence of the sheet</param>
        /// <param name="onProgress">Is called after each taken frame and passes (int currentIndex, int positions.Count) - if true is returned, the loop is canceled</param>
        private unsafe static Bitmap ExtractVideoThumbnailSheet(FFmpegMediaInfo info, AVFormatContext* pFormatContext, AVStream* vidStream, VideoThumbSheetOptions options, Func<int, int, bool> onProgress)
        {
            #region Structure diagram
            /* m := Margin, p := Padding, tw := ThumbWidth, th := ThumbHeight, hh := HeaderHeight, ### := Thumbnail image
             
                 m ###tw### p ###tw### p ###tw### p ###tw### p ###tw### m
                +--------------------------------------------------------+ 
                |                                                        | m     }  Margin
                | +----------------------------------------------------+ | 
                | |Filename: xxxxxxxxxxxxxxxxxxxxxx.avi                | | xx    \
                | |Length: 0:00:00    Resolution: 640x480 @25FPS       | | hh     } Header height
                | |Streams: Video[div5/xvid], Audio[mp3a/MP3/eng]      | | xx    /
                | +----------------------------------------------------+ | 
                |                                                        | 2*p   }  2 * Padding
                | +----------------------------------------------------+ |
                | |########| |########| |########| |########| |########| | ##    \
                | |########| |########| |########| |########| |########| | th     } Thumbnail height
                | |########| |########| |########| |########| |########| | ##    /
                | +----------------------------------------------------+ |
                |                                                        | p     }  1* Padding
                | +----------------------------------------------------+ |
                | |########| |########| |########| |########| |########| |
                ...                         ...                        ...
                ...                         ...                        ...
                | |########| |########| |########| |########| |########| |
                + +----------------------------------------------------+ + 
                |                                                        | m     }  Margin
                +--------------------------------------------------------+ 
             */
            #endregion

            #region Header and image preperations
            // Get video info from codec
            int width = vidStream->codec->width;
            int height = vidStream->codec->height;
            AVRational framerate = vidStream->codec->framerate;
            long duration = pFormatContext->duration;

            // Create header text and calculate its height (using current video stream)
            string header = String.Format("Filename: {0}\r\nLength: {1}    Resolution: {2}x{3} @{4}FPS\r\nStreams: {5}",
                Path.GetFileName(info.Filename),
                ToFormattedString(info.Duration),
                width, height, ToDescString(framerate),
                String.Join(", ", info.Streams.Select(s => s.ToString()).ToArray())
            );
            int headerHight = Convert.ToInt32(MeassureString(header, options.HeaderFont).Height);

            // Calculate image sizes and create image
            int thumbHeight = height * options.ThumbWidth / width;
            int imgWidth = 2 * options.Margin + options.ThumbColumns * (options.ThumbWidth + options.Padding) - options.Padding;
            int imgHeight = 2 * options.Margin + headerHight + options.ThumbRows * (thumbHeight + options.Padding) + options.Padding;
            int thumbTop = options.Margin + headerHight + 2 * options.Padding;
            Bitmap bmp = new Bitmap(imgWidth, imgHeight);

            int count = options.ThumbColumns * options.ThumbRows;
            List<long> positions = CreateRegularTimePositionList(duration, count);
            #endregion

            using (Graphics g = Graphics.FromImage(bmp))
            using (Brush headerBrush = new SolidBrush(options.HeaderColor))
            using (Pen borderPen = new Pen(options.ThumbBorderColor, 1f))
            using (Brush indexShadowBrush = new SolidBrush(options.IndexShadowColor))
            using (Brush indexBrush = new SolidBrush(options.IndexColor))
            {
                // Set background color
                g.Clear(options.BackgroundColor);

                // Draw header text
                float marginF = Convert.ToSingle(options.Margin);
                g.DrawString(header, options.HeaderFont, headerBrush, new PointF(marginF, marginF));

                // Loop through images as they are extracted
                int c, r, x, y;
                string tsText;
                SizeF tsSize;
                float ix, iy;
                ExtractFrames(pFormatContext, vidStream, positions, options.ForceExactTimePosition, (i, ts, img) =>
                {
                    // Calculate positions and sizes
                    c = i % options.ThumbColumns; // column
                    r = i / options.ThumbColumns; // row
                    x = options.Margin + c * (options.ThumbWidth + options.Padding); // thumb left
                    y = thumbTop + r * (thumbHeight + options.Padding); // thumb top
                    tsText = String.Format("{0:00}:{1:00}:{2:00}", ts.Hours, ts.Minutes, ts.Seconds); // timestamp text
                    tsSize = g.MeasureString(tsText, options.IndexFont); // timestamp text size
                    ix = Convert.ToSingle(x + options.ThumbWidth) - tsSize.Width - 3f; // timestamp text left
                    iy = Convert.ToSingle(y + thumbHeight) - tsSize.Height - 3f; // timestamp text top

                    // Insert thumbnail image resized
                    g.DrawImage(img, x, y, options.ThumbWidth, thumbHeight);

                    // Overdraw edges with border
                    if (options.DrawThumbnailBorder)
                        g.DrawRectangle(borderPen, x, y, options.ThumbWidth, thumbHeight);

                    // Draw timestamp shadow
                    g.DrawString(tsText, options.IndexFont, indexShadowBrush, ix - 1f, iy - 1f);
                    g.DrawString(tsText, options.IndexFont, indexShadowBrush, ix + 1f, iy + 1f);
                    // Draw timestamp
                    g.DrawString(tsText, options.IndexFont, indexBrush, ix, iy);

                    // Dispose the thumbnail image because it's not needed anymore
                    img.Dispose();

                    // Publish progress
                    if (onProgress != null)
                        return onProgress(i, count);
                    else
                        return false;
                });
            }

            return bmp;
        }
 public static extern int av_add_index_entry(AVStream* @st, long @pos, long @timestamp, int @size, int @distance, int @flags);
        /// <summary>Extracts frame images</summary>
        /// <param name="pFormatContext">The format context to use</param>
        /// <param name="vidStream">The video stream to use</param>
        /// <param name="startTime">The start time</param>
        /// <param name="endTime">The end time</param>
        /// <param name="onProgress">Is called after every taken frame passing the list index, frame timestamp and frame image - if true is returned, the loop is canceled</param>
        private unsafe static void ExtractFrames(AVFormatContext* pFormatContext, AVStream* vidStream, TimeSpan startTime, TimeSpan endTime, Func<int, TimeSpan, Bitmap, bool> onProgress)
        {
            #region Preparations
            AVCodecContext codecContext = *(vidStream->codec);
            int width = codecContext.width;
            int height = codecContext.height;
            long duration = pFormatContext->duration;
            AVPixelFormat sourcePixFmt = codecContext.pix_fmt;
            AVCodecID codecId = codecContext.codec_id;
            var convertToPixFmt = AVPixelFormat.AV_PIX_FMT_BGR24;
            SwsContext* pConvertContext = ffmpeg.sws_getContext(width, height, sourcePixFmt, width, height, convertToPixFmt, ffmpeg.SWS_FAST_BILINEAR, null, null, null);
            if (pConvertContext == null)
                throw new Exception("Could not initialize the conversion context");
            AVCodecContext* pCodecContext = &codecContext;

            var pConvertedFrame = (AVPicture*)ffmpeg.av_frame_alloc();
            int convertedFrameBufferSize = ffmpeg.avpicture_get_size(convertToPixFmt, width, height);
            var pConvertedFrameBuffer = (sbyte*)ffmpeg.av_malloc((uint)convertedFrameBufferSize);
            ffmpeg.avpicture_fill(pConvertedFrame, pConvertedFrameBuffer, convertToPixFmt, width, height);

            AVCodec* pCodec = ffmpeg.avcodec_find_decoder(codecId);
            if (pCodec == null)
                throw new Exception("Unsupported codec");

            if ((pCodec->capabilities & ffmpeg.CODEC_CAP_TRUNCATED) == ffmpeg.CODEC_CAP_TRUNCATED)
                pCodecContext->flags |= ffmpeg.CODEC_FLAG_TRUNCATED;

            if (ffmpeg.avcodec_open2(pCodecContext, pCodec, null) < 0)
                throw new Exception("Could not open codec");

            AVFrame* pDecodedFrame = ffmpeg.av_frame_alloc();

            var packet = new AVPacket();
            AVPacket* pPacket = &packet;
            ffmpeg.av_init_packet(pPacket);

            AVCodecContext cont = *vidStream->codec;
            #endregion

            // Seek for key frames only - otherwise first frames are currupted until a key frame is decoded
            pFormatContext->seek2any = 0;

            var currTS = startTime.Ticks / TIME_FACTOR;

            if (currTS > 0)
                ffmpeg.av_seek_frame(pFormatContext, -1, currTS, ffmpeg.AVSEEK_FLAG_BACKWARD);

            TimeSpan pos;
            double timeBase = ToDouble(vidStream->time_base); // DTS or PTS timestamp to seconds multiplicator
            Bitmap img;
            int f = 0;
            do
            {
                // Decode next image - ATTENTION: Get a image copy or it will be unallocated!
                img = ExtractNextImage(pFormatContext, pCodecContext, pPacket, vidStream, pConvertContext, pDecodedFrame, pConvertedFrame, width, height, true, timeBase, out pos);

                if (img == null) break;

                if (onProgress != null)
                {
                    if (onProgress(++f, pos, img)) break;
                }

                if (pos >= endTime) break;

            } while (true); // end while

            #region Free allocated memory
            ffmpeg.av_free(pConvertedFrame);
            ffmpeg.av_free(pConvertedFrameBuffer);
            ffmpeg.sws_freeContext(pConvertContext);
            ffmpeg.av_free(pDecodedFrame);
            ffmpeg.avcodec_close(pCodecContext);
            #endregion
        }
        /// <summary>Extracts frame images</summary>
        /// <param name="pFormatContext">The format context to use</param>
        /// <param name="vidStream">The video stream to use</param>
        /// <param name="positions">A list of timestamps in 10ths of Ticks</param>
        /// <param name="anyPosition">If false, the last key frame is used - can result in inaccurate time positions but is much faster</param>
        /// <param name="onProgress">Is called after every taken frame passing the list index, frame timestamp and frame image - if true is returned, the loop is canceled</param>
        private unsafe static void ExtractFrames(AVFormatContext* pFormatContext, AVStream* vidStream, List<long> positions, bool anyPosition, Func<int, TimeSpan, Bitmap, bool> onProgress)
        {
            #region Preparations
            AVCodecContext codecContext = *(vidStream->codec);
            int width = codecContext.width;
            int height = codecContext.height;
            long duration = pFormatContext->duration;
            AVPixelFormat sourcePixFmt = codecContext.pix_fmt;
            AVCodecID codecId = codecContext.codec_id;
            var convertToPixFmt = AVPixelFormat.AV_PIX_FMT_BGR24;
            SwsContext* pConvertContext = ffmpeg.sws_getContext(width, height, sourcePixFmt, width, height, convertToPixFmt, ffmpeg.SWS_FAST_BILINEAR, null, null, null);
            if (pConvertContext == null)
                throw new Exception("Could not initialize the conversion context");
            AVCodecContext* pCodecContext = &codecContext;

            var pConvertedFrame = (AVPicture*)ffmpeg.av_frame_alloc();
            int convertedFrameBufferSize = ffmpeg.avpicture_get_size(convertToPixFmt, width, height);
            var pConvertedFrameBuffer = (sbyte*)ffmpeg.av_malloc((uint)convertedFrameBufferSize);
            ffmpeg.avpicture_fill(pConvertedFrame, pConvertedFrameBuffer, convertToPixFmt, width, height);

            AVCodec* pCodec = ffmpeg.avcodec_find_decoder(codecId);
            if (pCodec == null)
                throw new Exception("Unsupported codec");

            if ((pCodec->capabilities & ffmpeg.CODEC_CAP_TRUNCATED) == ffmpeg.CODEC_CAP_TRUNCATED)
                pCodecContext->flags |= ffmpeg.CODEC_FLAG_TRUNCATED;

            if (ffmpeg.avcodec_open2(pCodecContext, pCodec, null) < 0)
                throw new Exception("Could not open codec");

            AVFrame* pDecodedFrame = ffmpeg.av_frame_alloc();

            var packet = new AVPacket();
            AVPacket* pPacket = &packet;
            ffmpeg.av_init_packet(pPacket);

            AVCodecContext cont = *vidStream->codec;
            #endregion

            // Seek for key frames only - otherwise first frames are currupted until a key frame is decoded
            pFormatContext->seek2any = 0;

            TimeSpan pos;
            long currTS;
            double timeBase = ToDouble(vidStream->time_base); // DTS or PTS timestamp to seconds multiplicator
            Bitmap img;
            for (int f = 0; f < positions.Count; f++)
            {
                currTS = positions[f];

                // Seek to last keyframe before next position
                vidStream->skip_to_keyframe = 1;
                ffmpeg.av_seek_frame(pFormatContext, -1, currTS, ffmpeg.AVSEEK_FLAG_BACKWARD);

                // Decode next image - ATTENTION: Get a image copy or it will be unallocated!
                img = ExtractNextImage(pFormatContext, pCodecContext, pPacket, vidStream, pConvertContext, pDecodedFrame, pConvertedFrame, width, height, true, timeBase, out pos);

                // If any Position is to be used, go though the next images until the imate position is within timebase or is getting further away again
                if (anyPosition)
                {
                    vidStream->skip_to_keyframe = 0;
                    TimeSpan currTsSpan = ToTimeSpan(currTS);
                    double lastDiff = double.MaxValue;
                    double currDiff;
                    while ((currDiff = Math.Abs((currTsSpan - pos).TotalSeconds)) > timeBase && currDiff < lastDiff)
                    {
                        lastDiff = Math.Abs((currTsSpan - pos).TotalSeconds);
                        img.Dispose();
                        img = ExtractNextImage(pFormatContext, pCodecContext, pPacket, vidStream, pConvertContext, pDecodedFrame, pConvertedFrame, width, height, true, timeBase, out pos);
                    }
                    vidStream->skip_to_keyframe = 1;
                    GC.Collect();
                }

                if (img == null) continue;

                if (onProgress != null)
                {
                    if (onProgress(f, pos, img)) break;
                }
            } // end while

            #region Free allocated memory
            ffmpeg.av_free(pConvertedFrame);
            ffmpeg.av_free(pConvertedFrameBuffer);
            ffmpeg.sws_freeContext(pConvertContext);
            ffmpeg.av_free(pDecodedFrame);
            ffmpeg.avcodec_close(pCodecContext);
            #endregion
        }
        private unsafe static Bitmap ExtractNextImage(AVFormatContext* pFormatContext, AVCodecContext* pCodecContext, AVPacket* pPacket, AVStream* vidStream, SwsContext* pConvertContext, AVFrame* pDecodedFrame, AVPicture* pConvertedFrame, int width, int height, bool createCopy, double timeBase, out TimeSpan pos)
        {
            pos = new TimeSpan();
            Bitmap result = null;

            int gotPicture = 0;

            while (gotPicture != 1)
            {
                if (ffmpeg.av_read_frame(pFormatContext, pPacket) < 0)
                {
                    result = null;
                    break;
                }

                if (pPacket->stream_index != vidStream->index)
                    continue;

                gotPicture = 0;
                int size = ffmpeg.avcodec_decode_video2(pCodecContext, pDecodedFrame, &gotPicture, pPacket);
                if (size < 0)
                    throw new Exception("Error while decoding frame!");

                if (gotPicture == 1)
                {
                    // Get current position from frame
                    pos = ToTimeSpan(ffmpeg.av_frame_get_best_effort_timestamp(pDecodedFrame), timeBase);

                    // Extract image
                    sbyte** src = &pDecodedFrame->data0;
                    sbyte** dst = &pConvertedFrame->data0;
                    ffmpeg.sws_scale(pConvertContext, src, pDecodedFrame->linesize, 0, height, dst, pConvertedFrame->linesize);
                    var imageBufferPtr = new IntPtr(pConvertedFrame->data0);
                    int linesize = pConvertedFrame->linesize[0];
                    Bitmap img = new Bitmap(width, height, linesize, PixelFormat.Format24bppRgb, imageBufferPtr);

                    result = createCopy ? new Bitmap(img) : img;
                }

            }

            return result;
        }
 public static extern AVRational av_stream_get_r_frame_rate(AVStream* s);
 public static extern int avformat_transfer_internal_stream_timing_info(AVOutputFormat* @ofmt, AVStream* @ost, AVStream* @ist, AVTimebaseSource @copy_tb);
 public static extern void av_stream_set_r_frame_rate(AVStream* s, AVRational r);
 public static extern AVRational av_stream_get_codec_timebase(AVStream* @st);
 public static extern String av_stream_get_recommended_encoder_configuration(AVStream* s);
 public static extern sbyte* av_stream_get_recommended_encoder_configuration(AVStream* @s);
 public static extern long av_stream_get_end_pts(AVStream* st);
 public static extern void av_stream_set_recommended_encoder_configuration(AVStream* @s, IntPtr @configuration);
 public static extern void av_pkt_dump2(void* f, AVPacket* pkt, int dump_payload, AVStream* st);
 public static extern sbyte* av_stream_new_side_data(AVStream* @stream, AVPacketSideDataType @type, int @size);
 public static extern int av_index_search_timestamp(AVStream* st, long timestamp, int flags);
 public static extern void av_pkt_dump2(_iobuf* @f, AVPacket* @pkt, int @dump_payload, AVStream* @st);
 public static extern AVRational av_guess_sample_aspect_ratio(AVFormatContext* format, AVStream* stream, AVFrame* frame);
 public static extern void av_pkt_dump_log2(void* @avcl, int @level, AVPacket* @pkt, int @dump_payload, AVStream* @st);
 public static extern int avformat_match_stream_specifier(AVFormatContext* s, AVStream* st, String spec);
        private unsafe Bitmap ExtractNextImage2(AVCodecContext* pCodecContext, AVPacket* pPacket, AVStream* vidStream, SwsContext* pConvertContext, AVFrame* pDecodedFrame, AVPicture* pConvertedFrame, int width, int height, bool createCopy, double timeBase, int? delay, TimeSpan prev, out TimeSpan pos, out bool end)
        {
            pos = new TimeSpan();
            end = false;
            Bitmap result = null;

            int gotPicture = 0;

            while (gotPicture != 1)
            {
                if (ffmpeg.av_read_frame(this.AVFormatContext, pPacket) < 0)
                {
                    end = true;
                    result = null;
                    break;
                }

                if (pPacket->stream_index != vidStream->index)
                    continue;

                gotPicture = 0;
                int size = ffmpeg.avcodec_decode_video2(pCodecContext, pDecodedFrame, &gotPicture, pPacket);
                if (size < 0)
                    throw new Exception("Error while decoding frame!");

                if (gotPicture == 1)
                {
                    // Get current position from frame
                    pos = ToTimeSpan(ffmpeg.av_frame_get_best_effort_timestamp(pDecodedFrame), timeBase);

                    if (delay.HasValue && prev != TimeSpan.Zero && (pos - prev).TotalMilliseconds < delay)
                    {
                        return null;
                    }

                    // Extract image
                    sbyte** src = &pDecodedFrame->data0;
                    sbyte** dst = &pConvertedFrame->data0;
                    int src_height = pCodecContext->height;
                    ffmpeg.sws_scale(pConvertContext, src, pDecodedFrame->linesize, 0, src_height, dst, pConvertedFrame->linesize);
                    var imageBufferPtr = new IntPtr(pConvertedFrame->data0);
                    int linesize = pConvertedFrame->linesize[0];
                    result = new Bitmap(width, height, linesize, PixelFormat.Format24bppRgb, imageBufferPtr);
                }

            }

            return result;
        }