コード例 #1
0
        private static unsafe void encode(AVCodecContext *enc_ctx, AVFrame *frame, AVPacket *pkt,
                                          Stream output)
        {
            int resultCode = ffmpeg.avcodec_send_frame(enc_ctx, frame);

            if (resultCode < 0)
            {
                throw new FfmpegException($"Error sending a frame for encoding. Error code: {resultCode}.");
            }

            while (resultCode >= 0)
            {
                resultCode = ffmpeg.avcodec_receive_packet(enc_ctx, pkt);
                if (resultCode == ffmpeg.AVERROR(ffmpeg.EAGAIN) || resultCode == ffmpeg.AVERROR(ffmpeg.AVERROR_EOF))
                {
                    return;
                }
                else if (resultCode < 0)
                {
                    throw new FfmpegException($"Error during encoding. Error code: {resultCode}.");
                }

                _log.DebugFormat("Encoded frame {0} (size={1}).", pkt->pts, pkt->size);
                var data = new byte[pkt->size];
                Marshal.Copy((IntPtr)pkt->data, data, 0, pkt->size);

                output.Write(data, 0, data.Length);

                ffmpeg.av_packet_unref(pkt);
            }
        }
コード例 #2
0
        //@"D:\cshapdemo\ConsoleApp1\会不会.mp3"
        public void open(String url)
        {
            //var codecId = AVCodecID.AV_CODEC_ID_MP3;
            //_pCodecContext = ffmpeg.avcodec_alloc_context3(_pCodec);
            //        _pCodec = ffmpeg.avcodec_find_encoder(codecId);
            //_pCodecContext = ffmpeg.avcodec_alloc_context3(_pCodec);
            //_pCodecContext->time_base = new AVRational { num = 1, den = fps };
            //ret = ffmpeg.avcodec_open2(_pCodecContext, _pCodec, null);

            int ret = 0;

            _pFormatContext = ffmpeg.avformat_alloc_context();
            var pFormatContext = _pFormatContext;

            ret = ffmpeg.avformat_open_input(&pFormatContext, url, null, null);
            ret = ffmpeg.avformat_find_stream_info(pFormatContext, null);
            for (int i = 0; i < pFormatContext->nb_streams; i++)
            {
                if (pFormatContext->streams[i]->codecpar->codec_type == AVMediaType.AVMEDIA_TYPE_AUDIO)
                {
                    _streamIndex = i;
                    break;
                }
            }
            _pCodecContext = pFormatContext->streams[_streamIndex]->codec;
            AVCodec *codec = ffmpeg.avcodec_find_decoder(_pCodecContext->codec_id);

            ret = ffmpeg.avcodec_open2(_pCodecContext, codec, null);//初始化编码器
            ffmpeg.av_dump_format(pFormatContext, _streamIndex, url, 0);
        }
コード例 #3
0
        public VideoEncoder(AVCodecID codecID, int frameWidth, int frameHeight, int framesPerSecond)
        {
            _frameWidth  = frameWidth;
            _frameHeight = frameHeight;

            _videoCodec = ffmpeg.avcodec_find_encoder(codecID);
            if (_videoCodec == null)
            {
                throw new ApplicationException($"Codec encoder could not be found for {codecID}.");
            }

            _videoCodecContext = ffmpeg.avcodec_alloc_context3(_videoCodec);
            if (_videoCodecContext == null)
            {
                throw new ApplicationException("Failed to allocated codec context.");
            }

            _videoCodecContext->width         = frameWidth;
            _videoCodecContext->height        = frameHeight;
            _videoCodecContext->time_base.den = 30;
            _videoCodecContext->time_base.num = 1;
            _videoCodecContext->pix_fmt       = AVPixelFormat.AV_PIX_FMT_YUV420P;

            ffmpeg.avcodec_open2(_videoCodecContext, _videoCodec, null).ThrowExceptionIfError();
        }
コード例 #4
0
    public bool AvFrameToImageByteArray(AVFrame frame, out byte[] pngData)
    {
        AVCodec *       outCodec    = ffmpeg.avcodec_find_encoder(AVCodecID.AV_CODEC_ID_PNG);
        AVCodecContext *outCodecCtx = ffmpeg.avcodec_alloc_context3(outCodec);

        outCodecCtx->width         = _pCodecContext->width;
        outCodecCtx->height        = _pCodecContext->height;
        outCodecCtx->pix_fmt       = AVPixelFormat.AV_PIX_FMT_RGB24;
        outCodecCtx->codec_type    = AVMediaType.AVMEDIA_TYPE_VIDEO;
        outCodecCtx->time_base.num = _pCodecContext->time_base.num;
        outCodecCtx->time_base.den = _pCodecContext->time_base.den;

        if (ffmpeg.avcodec_open2(outCodecCtx, outCodec, null) < 0)
        {
            pngData = new byte[] { };
            return(false);
        }

        AVPacket outPacket = new AVPacket();

        ffmpeg.av_init_packet(&outPacket);
        outPacket.size = 0;
        outPacket.data = null;

        ffmpeg.avcodec_send_frame(outCodecCtx, &frame);
        ffmpeg.avcodec_receive_packet(outCodecCtx, &outPacket);

        pngData = new byte[outPacket.size];

        Marshal.Copy((IntPtr)outPacket.data, pngData, 0, outPacket.size);
        return(true);
    }
コード例 #5
0
ファイル: MediaEncoder.cs プロジェクト: geedrius/EmguFFmpeg
 /// <summary>
 /// Create and init video encode
 /// </summary>
 /// <param name="videoCodec"></param>
 /// <param name="flags"><see cref="MediaFormat.Flags"/></param>
 /// <param name="width">width pixel, must be greater than 0</param>
 /// <param name="height">height pixel, must be greater than 0</param>
 /// <param name="fps">fps, must be greater than 0</param>
 /// <param name="bitRate">default is auto bit rate, must be greater than or equal to 0</param>
 /// <param name="format">default is first supported pixel format</param>
 /// <returns></returns>
 public static MediaEncoder CreateVideoEncode(AVCodecID videoCodec, int flags, int width, int height, int fps, long bitRate = 0, AVPixelFormat format = AVPixelFormat.AV_PIX_FMT_NONE)
 {
     return(CreateEncode(videoCodec, flags, _ =>
     {
         AVCodecContext *pCodecContext = _;
         if (width <= 0 || height <= 0 || fps <= 0 || bitRate < 0)
         {
             throw new FFmpegException(FFmpegException.NonNegative);
         }
         if (_.SupportedPixelFmts.Count() <= 0)
         {
             throw new FFmpegException(FFmpegException.NotSupportCodecId);
         }
         if (format == AVPixelFormat.AV_PIX_FMT_NONE)
         {
             format = _.SupportedPixelFmts[0];
         }
         else if (_.SupportedPixelFmts.Where(__ => __ == format).Count() <= 0)
         {
             throw new FFmpegException(FFmpegException.NotSupportFormat);
         }
         pCodecContext->width = width;
         pCodecContext->height = height;
         pCodecContext->time_base = new AVRational {
             num = 1, den = fps
         };
         pCodecContext->pix_fmt = format;
         pCodecContext->bit_rate = bitRate;
     }));
 }
コード例 #6
0
        public bool WriteVideoFrame(Bitmap bitmap)
        {
            int             ret;
            AVCodecContext *c = videoStream.enc;
            AVFrame *       frame;
            int             got_packet = 0;
            AVPacket        pkt        = new AVPacket();

            frame = MakeVideoFrame(bitmap);

            av_init_packet(&pkt);

            ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);
            if (ret < 0)
            {
                throw new FFmpegException("Error encoding video frame", ret);
            }

            if (got_packet != 0)
            {
                ret = WriteFrame(&c->time_base, videoStream.st, &pkt);
            }
            else
            {
                ret = 0;
            }

            if (ret < 0)
            {
                throw new FFmpegException("Error writing video frame", ret);
            }

            return((frame != null || got_packet != 0) ? false : true);
        }
コード例 #7
0
    public static AVPixelFormat GetFormat(AVCodecContext *context, AVPixelFormat *px_fmts)
    {
        while (*px_fmts != AVPixelFormat.AV_PIX_FMT_NONE)
        {
            if (*px_fmts == AVPixelFormat.AV_PIX_FMT_QSV)
            {
                AVHWFramesContext * fr_ctx;
                AVQSVFramesContext *qsv_fr_ctx;


                context->hw_frames_ctx = ffmpeg.av_hwframe_ctx_alloc(context->hw_device_ctx);


                fr_ctx     = (AVHWFramesContext *)context->hw_frames_ctx->data;
                qsv_fr_ctx = (AVQSVFramesContext *)fr_ctx->hwctx;

                int initialPoolSize = 32;

                fr_ctx->format            = AVPixelFormat.AV_PIX_FMT_QSV;
                fr_ctx->sw_format         = context->sw_pix_fmt;
                fr_ctx->width             = context->coded_width.FFALIGN(initialPoolSize);
                fr_ctx->height            = context->coded_height.FFALIGN(initialPoolSize);
                fr_ctx->initial_pool_size = initialPoolSize;
                qsv_fr_ctx->frame_type    = (int)MFX_MEMTYPE.MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET;

                ffmpeg.av_hwframe_ctx_init(context->hw_frames_ctx).ThrowExceptionIfError();

                return(AVPixelFormat.AV_PIX_FMT_QSV);
            }
            px_fmts++;
        }

        return(AVPixelFormat.AV_PIX_FMT_NONE);
    }
コード例 #8
0
ファイル: ffAS.cs プロジェクト: HAN-IBS/ffmpeg4tc
        public static int Encode(AVCodecContext *avctx, AVPacket *avpkt, AVFrame *frame, ref int got_packet_ptr)
        {
            int ret = 0;

            got_packet_ptr = 0;
            if ((ret = ffmpeg.avcodec_send_frame(avctx, frame)) == 0)
            {
                //0 on success, otherwise negative error code
                return(EncodeNext(avctx, avpkt, frame, ref got_packet_ptr));
            }
            else if (ret == ffmpeg.AVERROR(ffmpeg.EAGAIN))
            {
                die("input is not accepted in the current state - user must read output with avcodec_receive_packet() (once all output is read, the packet should be resent, and the call will not fail with EAGAIN)");
            }
            else if (ret == ffmpeg.AVERROR_EOF)
            {
                die("AVERROR_EOF: the decoder has been flushed, and no new packets can be sent to it (also returned if more than 1 flush packet is sent");
            }
            else if (ret == ffmpeg.AVERROR(ffmpeg.EINVAL))
            {
                die("AVERROR(ffmpeg.EINVAL) codec not opened, refcounted_frames not set, it is a decoder, or requires flush");
            }
            else if (ret == ffmpeg.AVERROR(ffmpeg.ENOMEM))
            {
                die("AVERROR(ENOMEM) failed to add packet to internal queue, or similar other errors: legitimate decoding errors");
            }
            else
            {
                die("unknown");
            }
            return(ret);//ffmpeg.avcodec_encode_audio2(audioCodecContext, &outPacket, audioFrameConverted, &frameFinished)
        }
コード例 #9
0
ファイル: ffAS.cs プロジェクト: HAN-IBS/ffmpeg4tc
        public static int DecodeNext(AVCodecContext *avctx, AVFrame *frame, ref int got_frame_ptr, AVPacket *avpkt)
        {
            int ret = 0;

            got_frame_ptr = 0;
            if ((ret = ffmpeg.avcodec_receive_frame(avctx, frame)) == 0)
            {
                //0 on success, otherwise negative error code
                got_frame_ptr = 1;
            }
            else if (ret == ffmpeg.AVERROR(ffmpeg.EAGAIN))
            {
                //AVERROR(EAGAIN): input is not accepted in the current state - user must read output with avcodec_receive_packet()
                //(once all output is read, the packet should be resent, and the call will not fail with EAGAIN)
                ret = Decode(avctx, frame, ref got_frame_ptr, avpkt);
            }
            else if (ret == ffmpeg.AVERROR_EOF)
            {
                die("AVERROR_EOF: the encoder has been flushed, and no new frames can be sent to it");
            }
            else if (ret == ffmpeg.AVERROR(ffmpeg.EINVAL))
            {
                die("AVERROR(EINVAL): codec not opened, refcounted_frames not set, it is a decoder, or requires flush");
            }
            else if (ret == ffmpeg.AVERROR(ffmpeg.ENOMEM))
            {
                die("Failed to add packet to internal queue, or similar other errors: legitimate decoding errors");
            }
            else
            {
                die("unknown");
            }
            return(ret);
        }
コード例 #10
0
 private void ConfigureEncoderSpecificSettings(AVCodecContext *codecCtx)
 {
     if (codecCtx->codec_id == AVCodecID.AV_CODEC_ID_MSMPEG4V3)
     {
         codecCtx->max_b_frames = 0;
     }
 }
コード例 #11
0
ファイル: ffAS.cs プロジェクト: HAN-IBS/ffmpeg4tc
        public static int EncodeNext(AVCodecContext *avctx, AVPacket *avpkt, AVFrame *frame, ref int got_packet_ptr)
        {
            int ret = 0;

            got_packet_ptr = 0;
            if ((ret = ffmpeg.avcodec_receive_packet(avctx, avpkt)) == 0)
            {
                got_packet_ptr = 1;
                //0 on success, otherwise negative error code
            }
            else if (ret == ffmpeg.AVERROR(ffmpeg.EAGAIN))
            {
                //output is not available in the current state - user must try to send input
                return(Encode(avctx, avpkt, frame, ref got_packet_ptr));
            }
            else if (ret == ffmpeg.AVERROR_EOF)
            {
                die("AVERROR_EOF: the encoder has been fully flushed, and there will be no more output packets");
            }
            else if (ret == ffmpeg.AVERROR(ffmpeg.EINVAL))
            {
                die("AVERROR(EINVAL) codec not opened, or it is an encoder other errors: legitimate decoding errors");
            }
            else
            {
                die("unknown");
            }
            return(ret);//ffmpeg.avcodec_encode_audio2(audioCodecContext, &outPacket, audioFrameConverted, &frameFinished)
        }
コード例 #12
0
 internal override void SetCodecContextParams(AVCodecContext *codecContext)
 {
     codecContext->sample_rate    = sampleRate;
     codecContext->sample_fmt     = avSourceSampleFormat;
     codecContext->channels       = channels;
     codecContext->channel_layout = (ulong)avChannelLayout;
 }
コード例 #13
0
        public FFmpegContext(AVCodecID codecId)
        {
            _codec = ffmpeg.avcodec_find_decoder(codecId);
            if (_codec == null)
            {
                Logger.Error?.PrintMsg(LogClass.FFmpeg, $"Codec wasn't found. Make sure you have the {codecId} codec present in your FFmpeg installation.");

                return;
            }

            _context = ffmpeg.avcodec_alloc_context3(_codec);
            if (_context == null)
            {
                Logger.Error?.PrintMsg(LogClass.FFmpeg, "Codec context couldn't be allocated.");

                return;
            }

            if (ffmpeg.avcodec_open2(_context, _codec, null) != 0)
            {
                Logger.Error?.PrintMsg(LogClass.FFmpeg, "Codec couldn't be opened.");

                return;
            }

            _packet = ffmpeg.av_packet_alloc();
            if (_packet == null)
            {
                Logger.Error?.PrintMsg(LogClass.FFmpeg, "Packet couldn't be allocated.");

                return;
            }

            _decodeFrame = Marshal.GetDelegateForFunctionPointer <AVCodec_decode>(_codec->decode.Pointer);
        }
コード例 #14
0
        public H264VideoStreamEncoder(Stream stream, int fps, Size frameSize)
        {
            _stream    = stream;
            _frameSize = frameSize;

            var codecId = AVCodecID.AV_CODEC_ID_H264;

            _pCodec = ffmpeg.avcodec_find_encoder(codecId);
            if (_pCodec == null)
            {
                throw new InvalidOperationException("Codec not found.");
            }

            _pCodecContext            = ffmpeg.avcodec_alloc_context3(_pCodec);
            _pCodecContext->width     = frameSize.Width;
            _pCodecContext->height    = frameSize.Height;
            _pCodecContext->time_base = new AVRational {
                num = 1, den = fps
            };
            _pCodecContext->pix_fmt = AVPixelFormat.AV_PIX_FMT_YUV420P;
            ffmpeg.av_opt_set(_pCodecContext->priv_data, "preset", "veryslow", 0);

            ffmpeg.avcodec_open2(_pCodecContext, _pCodec, null).ThrowExceptionIfError();

            _linesizeY = frameSize.Width;
            _linesizeU = frameSize.Width / 2;
            _linesizeV = frameSize.Width / 2;

            _ySize = _linesizeY * frameSize.Height;
            _uSize = _linesizeU * frameSize.Height / 2;
        }
コード例 #15
0
ファイル: StreamDecoder.cs プロジェクト: EYHN/Anything
    public StreamDecoder(
        AVFormatContext *formatContext,
        int streamIndex,
        AVCodec *codec,
        AVHWDeviceType hwDeviceType = AVHWDeviceType.AV_HWDEVICE_TYPE_NONE)
        : base(null)
    {
        _streamIndex   = streamIndex;
        _formatContext = formatContext;
        _stream        = formatContext->streams[streamIndex];

        _codecContext = ffmpeg.avcodec_alloc_context3(codec);
        if (hwDeviceType != AVHWDeviceType.AV_HWDEVICE_TYPE_NONE)
        {
            ffmpeg.av_hwdevice_ctx_create(&_codecContext->hw_device_ctx, hwDeviceType, null, null, 0)
            .ThrowExceptionIfError();
        }

        ffmpeg.avcodec_parameters_to_context(_codecContext, _stream->codecpar)
        .ThrowExceptionIfError();
        ffmpeg.avcodec_open2(_codecContext, codec, null).ThrowExceptionIfError();
        _codecContext->pkt_timebase = _stream->time_base;

        _codec = codec;

        _packet        = ffmpeg.av_packet_alloc();
        _frame         = ffmpeg.av_frame_alloc();
        _receivedFrame = ffmpeg.av_frame_alloc();
    }
コード例 #16
0
        /// <summary>
        /// Add stream by copy <see cref="ffmpeg.avcodec_parameters_copy(AVCodecParameters*, AVCodecParameters*)"/>,
        /// </summary>
        /// <param name="stream"></param>
        /// <param name="flags"></param>
        /// <returns></returns>
        public MediaStream AddStream(MediaStream stream, int flags = 0)
        {
            AVStream *pstream = ffmpeg.avformat_new_stream(pFormatContext, null);

            pstream->id = (int)(pFormatContext->nb_streams - 1);
            ffmpeg.avcodec_parameters_copy(pstream->codecpar, stream.Stream.codecpar);
            pstream->codecpar->codec_tag = 0;
            MediaCodec mediaCodec = null;

            if (stream.Codec != null)
            {
                mediaCodec = MediaEncoder.CreateEncode(stream.Codec.AVCodecContext.codec_id, flags, _ =>
                {
                    AVCodecContext *pContext       = _;
                    AVCodecParameters *pParameters = ffmpeg.avcodec_parameters_alloc();
                    ffmpeg.avcodec_parameters_from_context(pParameters, stream.Codec).ThrowIfError();
                    ffmpeg.avcodec_parameters_to_context(pContext, pParameters);
                    ffmpeg.avcodec_parameters_free(&pParameters);
                    pContext->time_base = stream.Stream.r_frame_rate.ToInvert();
                });
            }
            streams.Add(new MediaStream(pstream)
            {
                TimeBase = stream.Stream.r_frame_rate.ToInvert(), Codec = mediaCodec
            });
            return(streams.Last());
        }
コード例 #17
0
        public VideoStreamDecoder(string url, AVHWDeviceType HWDeviceType = AVHWDeviceType.AV_HWDEVICE_TYPE_NONE)
        {
            _pFormatContext = ffmpeg.avformat_alloc_context();
            _receivedFrame  = ffmpeg.av_frame_alloc();
            var pFormatContext = _pFormatContext;

            ffmpeg.avformat_open_input(&pFormatContext, url, null, null).ThrowExceptionIfError();
            ffmpeg.avformat_find_stream_info(_pFormatContext, null).ThrowExceptionIfError();
            AVCodec *codec = null;

            _streamIndex = ffmpeg
                           .av_find_best_stream(_pFormatContext, AVMediaType.AVMEDIA_TYPE_VIDEO, -1, -1, &codec, 0)
                           .ThrowExceptionIfError();
            _pCodecContext = ffmpeg.avcodec_alloc_context3(codec);
            if (HWDeviceType != AVHWDeviceType.AV_HWDEVICE_TYPE_NONE)
            {
                ffmpeg.av_hwdevice_ctx_create(&_pCodecContext->hw_device_ctx, HWDeviceType, null, null, 0)
                .ThrowExceptionIfError();
            }
            ffmpeg.avcodec_parameters_to_context(_pCodecContext, _pFormatContext->streams[_streamIndex]->codecpar)
            .ThrowExceptionIfError();
            ffmpeg.avcodec_open2(_pCodecContext, codec, null).ThrowExceptionIfError();

            CodecName   = ffmpeg.avcodec_get_name(codec->id);
            FrameSize   = new Size(_pCodecContext->width, _pCodecContext->height);
            PixelFormat = _pCodecContext->pix_fmt;

            _pPacket = ffmpeg.av_packet_alloc();
            _pFrame  = ffmpeg.av_frame_alloc();
        }
コード例 #18
0
        public unsafe int DecodeNext(AVCodecContext *avctx, AVFrame *frame, ref int got_frame_ptr, AVPacket *avpkt)
        {
            int ret = 0;

            got_frame_ptr = 0;
            if ((ret = ffmpeg.avcodec_receive_frame(avctx, frame)) == 0)
            {
                //0 on success, otherwise negative error code
                got_frame_ptr = 1;
            }
            else if (ret == ffmpeg.AVERROR(ffmpeg.EAGAIN))
            {
                //AVERROR(EAGAIN): input is not accepted in the current state - user must read output with avcodec_receive_packet()
                //(once all output is read, the packet should be resent, and the call will not fail with EAGAIN)
                ret = Decode(avctx, frame, ref got_frame_ptr, avpkt);
            }
            else if (ret == ffmpeg.AVERROR_EOF)
            {
                throw new FormatException("FFMPEG: Unexpected end of stream.");
            }
            else if (ret == ffmpeg.AVERROR(ffmpeg.EINVAL))
            {
                throw new FormatException("FFMPEG: Invalid data.");
            }
            else if (ret == ffmpeg.AVERROR(ffmpeg.ENOMEM))
            {
                throw new FormatException("FFMPEG: Out of memory.");
            }
            else
            {
                throw new FormatException($"FFMPEG: Unknown return code {ret}.");
            }
            return(ret);
        }
コード例 #19
0
        /// <summary>
        /// Downloads the frame from the hardware into a software frame if possible.
        /// The input hardware frame gets freed and the return value will point to the new software frame.
        /// </summary>
        /// <param name="codecContext">The codec context.</param>
        /// <param name="input">The input frame coming from the decoder (may or may not be hardware).</param>
        /// <param name="isHardwareFrame">if set to <c>true</c> [comes from hardware] otherwise, hardware decoding was not performed.</param>
        /// <returns>
        /// The frame downloaded from the device into RAM.
        /// </returns>
        /// <exception cref="Exception">Failed to transfer data to output frame.</exception>
        public AVFrame *ExchangeFrame(AVCodecContext *codecContext, AVFrame *input, out bool isHardwareFrame)
        {
            isHardwareFrame = false;

            if (codecContext->hw_device_ctx == null)
            {
                return(input);
            }

            isHardwareFrame = true;

            if (input->format != (int)PixelFormat)
            {
                return(input);
            }

            var output = MediaFrame.CreateAVFrame();

            var result = ffmpeg.av_hwframe_transfer_data(output, input, 0);

            ffmpeg.av_frame_copy_props(output, input);
            if (result < 0)
            {
                MediaFrame.ReleaseAVFrame(output);
                throw new MediaContainerException("Failed to transfer data to output frame");
            }

            MediaFrame.ReleaseAVFrame(input);

            return(output);
        }
コード例 #20
0
        /// <summary>
        /// Downloads the frame from the hardware into a software frame if possible.
        /// The input hardware frame gets freed and the return value will point to the new software frame
        /// </summary>
        /// <param name="codecContext">The codec context.</param>
        /// <param name="input">The input frame coming from the decoder (may or may not be hardware).</param>
        /// <param name="comesFromHardware">if set to <c>true</c> [comes from hardware] otherwise, hardware decoding was not perfomred.</param>
        /// <returns>
        /// The frame downloaded from the device into RAM
        /// </returns>
        /// <exception cref="Exception">Failed to transfer data to output frame</exception>
        public AVFrame *ExchangeFrame(AVCodecContext *codecContext, AVFrame *input, out bool comesFromHardware)
        {
            comesFromHardware = false;

            if (codecContext->hw_device_ctx == null)
            {
                return(input);
            }

            comesFromHardware = true;

            if (input->format != (int)PixelFormat)
            {
                return(input);
            }

            var output = ffmpeg.av_frame_alloc();

            var result = ffmpeg.av_hwframe_transfer_data(output, input, 0);

            ffmpeg.av_frame_copy_props(output, input);
            if (result < 0)
            {
                ffmpeg.av_frame_free(&output);
                throw new Exception("Failed to transfer data to output frame");
            }

            ffmpeg.av_frame_free(&input);
            RC.Current.Remove((IntPtr)input);
            RC.Current.Add(output, $"86: {nameof(HardwareAccelerator)}[{PixelFormat}].{nameof(ExchangeFrame)}()");

            return(output);
        }
コード例 #21
0
 /// <summary>
 /// Sets the codec context parameters.
 /// </summary>
 internal override void SetCodecContextParams(AVCodecContext *codecContext)
 {
     codecContext->width     = videoWidth;
     codecContext->height    = videoHeight;
     codecContext->time_base = avTimebase;
     codecContext->pix_fmt   = avSourcePixelFormat;
 }
コード例 #22
0
        public FFmpegAudioStream(IAudioData audioData, AVFormatContext *formatContext, AVStream *stream) : base(audioData)
        {
            lock (FFmpegDecoder.SyncObject)
            {
                _formatContext = formatContext;
                _stream        = stream;

                var codec = avcodec_find_decoder(_stream->codecpar->codec_id);

                _codecContext = avcodec_alloc_context3(codec);

                if (avcodec_parameters_to_context(_codecContext, _stream->codecpar) != 0)
                {
                    throw new Exception(); // TODO
                }
                AVDictionary *dict;
                av_dict_set_int(&dict, "refcounted_frames", 1, 0);

                if (avcodec_open2(_codecContext, codec, &dict) != 0)
                {
                    throw new Exception();
                }

                _avFrame = av_frame_alloc();
            }
        }
コード例 #23
0
/**
 * Initialize one input frame for writing to the output file.
 * The frame will be exactly frame_size samples large.
 * @param[out] frame                Frame to be initialized
 * @param      output_codec_context Codec context of the output file
 * @param      frame_size           Size of the frame
 * @return Error code (0 if successful)
 */
    int init_output_frame(AVFrame **frame,
                          AVCodecContext *output_codec_context,
                          int frame_size)
    {
        int error;

        /* Create a new frame to store the audio samples. */
        if ((*frame = av_frame_alloc()) == null)
        {
            Console.WriteLine("error: Could not allocate output frame");
            return(AVERROR_EXIT);
        }

        /* Set the frame's parameters, especially its size and format.
         * av_frame_get_buffer needs this to allocate memory for the
         * audio samples of the frame.
         * Default channel layouts based on the number of channels
         * are assumed for simplicity. */
        (*frame)->nb_samples     = frame_size;
        (*frame)->channel_layout = output_codec_context->channel_layout;
        (*frame)->format         = (int)output_codec_context->sample_fmt;
        (*frame)->sample_rate    = output_codec_context->sample_rate;

        /* Allocate the samples of the created frame. This call will make
         * sure that the audio frame can hold as many samples as specified. */
        if ((error = av_frame_get_buffer(*frame, 0)) < 0)
        {
            Console.WriteLine($"error: Could not allocate output frame samples (error '{LibAVErrorToString(error)}')");
            av_frame_free(frame);
            return(error);
        }
        return(0);
    }
コード例 #24
0
        internal StreamContext(AVStream *stream, FFMPEGDecoder source)
        {
            _stream = stream;
            _source = source;
            var origCtx = _stream->codec;

            //find the corresponding codec
            _codec = ffmpeg.avcodec_find_decoder(origCtx->codec_id);
            if (_codec == null)
            {
                throw new NotSupportedException("This " + ffmpeg.av_get_media_type_string(origCtx->codec_type) +
                                                " codec is not supported by the current ffmpeg binaries!");
            }

            //copy the context from ffmpeg (required because we don't own the other one)
            _codecCtx = ffmpeg.avcodec_alloc_context3(_codec);
            if (ffmpeg.avcodec_parameters_to_context(_codecCtx, _stream->codecpar) != 0)
            {
                throw new Exception("Couldn't copy stream parameters!");
            }

            if (ffmpeg.avcodec_open2(_codecCtx, _codec, null) != 0)
            {
                throw new Exception("Couldn't copy the codec!");
            }


            _decoded = ffmpeg.av_frame_alloc();
        }
コード例 #25
0
        public unsafe int Decode(AVCodecContext *avctx, AVFrame *frame, ref int got_frame_ptr, AVPacket *avpkt)
        {
            int ret = 0;

            got_frame_ptr = 0;
            if ((ret = ffmpeg.avcodec_send_packet(avctx, avpkt)) == 0)
            {
                //0 on success, otherwise negative error code
                return(DecodeNext(avctx, frame, ref got_frame_ptr, avpkt));
            }
            else if (ret == ffmpeg.AVERROR(ffmpeg.EAGAIN))
            {
                throw new FormatException("input is not accepted in the current state - user must read output with avcodec_receive_frame()(once all output is read, the packet should be resent, and the call will not fail with EAGAIN");
            }
            else if (ret == ffmpeg.AVERROR_EOF)
            {
                throw new FormatException("AVERROR_EOF: the decoder has been flushed, and no new packets can be sent to it (also returned if more than 1 flush packet is sent");
            }
            else if (ret == ffmpeg.AVERROR(ffmpeg.EINVAL))
            {
                throw new FormatException("codec not opened, it is an encoder, or requires flush");
            }
            else if (ret == ffmpeg.AVERROR(ffmpeg.ENOMEM))
            {
                throw new FormatException("Failed to add packet to internal queue, or similar other errors: legitimate decoding errors");
            }
            else
            {
                throw new FormatException($"FFMPEG: Unknown return code {ret}.");
            }
        }
コード例 #26
0
/**
 * Initialize a temporary storage for the specified number of audio samples.
 * The conversion requires temporary storage due to the different format.
 * The number of audio samples to be allocated is specified in frame_size.
 * @param[out] converted_input_samples Array of converted samples. The
 *                                     dimensions are reference, channel
 *                                     (for multi-channel audio), sample.
 * @param      output_codec_context    Codec context of the output file
 * @param      frame_size              Number of samples to be converted in
 *                                     each round
 * @return Error code (0 if successful)
 */
    int init_converted_samples(byte ***converted_input_samples,
                               AVCodecContext *output_codec_context,
                               int frame_size)
    {
        int error;

        /* Allocate as many pointers as there are audio channels.
         * Each pointer will later point to the audio samples of the corresponding
         * channels (although it may be NULL for interleaved formats).
         */
        if ((*converted_input_samples = (byte **)Marshal.AllocHGlobal(output_codec_context->channels * sizeof(IntPtr))) == null)
        {
            Console.WriteLine($"error: Could not allocate converted input sample pointers");
            return(AVERROR(ENOMEM));
        }

        /* Allocate memory for the samples of all channels in one consecutive
         * block for convenience. */
        if ((error = av_samples_alloc(*converted_input_samples, null,
                                      output_codec_context->channels,
                                      frame_size,
                                      output_codec_context->sample_fmt, 0)) < 0)
        {
            Console.WriteLine($"error: Could not allocate converted input samples (error '{LibAVErrorToString(error)}')");
            av_freep(&(*converted_input_samples)[0]);
            Marshal.FreeHGlobal((IntPtr)(*converted_input_samples));
            return(error);
        }

        return(0);
    }
コード例 #27
0
        public void SelectStream(AVMediaType type)
        {
            AVCodec *avCodec = null;

            _streamIndex    = ffmpeg.av_find_best_stream(_avFormatContext, type, -1, -1, &avCodec, 0).ThrowOnError();
            _avCodecContext = ffmpeg.avcodec_alloc_context3(avCodec);
            var stream = _avFormatContext->streams[_streamIndex];

            if (HardwareDevice != AVHWDeviceType.AV_HWDEVICE_TYPE_NONE)
            {
                ffmpeg.av_hwdevice_ctx_create(&_avCodecContext->hw_device_ctx, HardwareDevice, null, null, 0).ThrowOnError();
            }

            ffmpeg.avcodec_parameters_to_context(_avCodecContext, stream->codecpar).ThrowOnError();
            ffmpeg.avcodec_open2(_avCodecContext, avCodec, null).ThrowOnError();

            CodecId        = avCodec->id;
            CodecName      = ffmpeg.avcodec_get_name(CodecId);
            FrameSize      = new Size(_avCodecContext->width, _avCodecContext->height);
            AudioFrameSize = _avCodecContext->frame_size;;
            PixelFormat    = HardwareDevice == AVHWDeviceType.AV_HWDEVICE_TYPE_NONE ? _avCodecContext->pix_fmt : GetHWPixelFormat(HardwareDevice);
            BitRate        = _avCodecContext->bit_rate;
            FrameRate      = _avCodecContext->framerate;
            TimeBase       = stream->time_base;
        }
コード例 #28
0
        public FFmpegContext()
        {
            _codec   = ffmpeg.avcodec_find_decoder(AVCodecID.AV_CODEC_ID_H264);
            _context = ffmpeg.avcodec_alloc_context3(_codec);

            ffmpeg.avcodec_open2(_context, _codec, null);
        }
コード例 #29
0
        //public AVFormatContext* PFormatContext
        //{
        //    get
        //    {
        //        return _pFormatContext;
        //    }
        //}

        public VideoStreamDecoder(string url, AVHWDeviceType HWDeviceType = AVHWDeviceType.AV_HWDEVICE_TYPE_NONE)
        {
            _pFormatContext = ffmpeg.avformat_alloc_context();
            _receivedFrame  = ffmpeg.av_frame_alloc();
            var pFormatContext = _pFormatContext;

            ffmpeg.avformat_open_input(&pFormatContext, url, null, null).ThrowExceptionIfError();
            ffmpeg.avformat_find_stream_info(_pFormatContext, null).ThrowExceptionIfError();
            AVCodec *codec = null;

            // TODO: Why is ffmpeg.AV_CODEC_FLAG_LOW_DELAY ignored!
            _streamIndex = ffmpeg.av_find_best_stream(_pFormatContext, AVMediaType.AVMEDIA_TYPE_VIDEO,
                                                      -1, -1, &codec, ffmpeg.AV_CODEC_FLAG_LOW_DELAY).ThrowExceptionIfError();
            _pCodecContext = ffmpeg.avcodec_alloc_context3(codec);

            if (HWDeviceType != AVHWDeviceType.AV_HWDEVICE_TYPE_NONE)
            {
                ffmpeg.av_hwdevice_ctx_create(&_pCodecContext->hw_device_ctx, HWDeviceType, null, null, 0).ThrowExceptionIfError();
            }
            ffmpeg.avcodec_parameters_to_context(_pCodecContext, _pFormatContext->streams[_streamIndex]->codecpar).ThrowExceptionIfError();
            ffmpeg.avcodec_open2(_pCodecContext, codec, null).ThrowExceptionIfError();

            CodecName            = ffmpeg.avcodec_get_name(codec->id);
            FrameSize            = new Size(_pCodecContext->width, _pCodecContext->height);
            PixelFormat          = _pCodecContext->pix_fmt;
            DurationMilliseconds = _pFormatContext->duration;
            Framerate            = 25;// _pFormatContext->video_codec->supported_framerates[0].num / _pFormatContext->video_codec->supported_framerates[0].den;

            _pPacket = ffmpeg.av_packet_alloc();
            _pFrame  = ffmpeg.av_frame_alloc();
        }
コード例 #30
0
        public VideoDecoder()
        {
            AVCodec *codec = FFmpegInvoke.avcodec_find_decoder(CodecId);

            if (codec == null)
            {
                throw new Exception("Codec not found");
            }

            codec_context = FFmpegInvoke.avcodec_alloc_context3(codec);
            if (codec_context == null)
            {
                throw new Exception("Could not allocate video codec context");
            }

            if (FFmpegInvoke.avcodec_open2(codec_context, codec, null) < 0)
            {
                throw new Exception("Could not open codec");
            }

            avFrame = FFmpegInvoke.avcodec_alloc_frame();
            if (avFrame == null)
            {
                throw new Exception("Could not allocate video frame");
            }
        }
コード例 #31
0
ファイル: VideoDecoder.cs プロジェクト: Rutoka/AR.Drone
        public VideoDecoder()
        {
            AVCodec* pCodec = FFmpegInvoke.avcodec_find_decoder(CodecId);
            if (pCodec == null)
                throw new VideoDecoderException("Unsupported codec.");

            _pDecodingContext = FFmpegInvoke.avcodec_alloc_context3(pCodec);

            if (FFmpegInvoke.avcodec_open2(_pDecodingContext, pCodec, null) < 0)
                throw new VideoDecoderException("Could not open codec.");
        }
コード例 #32
0
        public VideoDecoder()
        {
            AVCodec* codec = FFmpegInvoke.avcodec_find_decoder(CodecId);
            if (codec == null) throw new Exception("Codec not found");

            codec_context = FFmpegInvoke.avcodec_alloc_context3(codec);
            if (codec_context == null) throw new Exception("Could not allocate video codec context");

            if (FFmpegInvoke.avcodec_open2(codec_context, codec, null) < 0) throw new Exception("Could not open codec");

            avFrame = FFmpegInvoke.avcodec_alloc_frame();
            if (avFrame == null) throw new Exception("Could not allocate video frame");
        }
コード例 #33
0
        public VideoEncoder(int width, int height, int fps)
        {
            _converter = new VideoConverter(CODEC_PIXEL_FORMAT);

            AVCodec* codec = FFmpegInvoke.avcodec_find_encoder(CODEC_ID);
            if (codec == null) throw new Exception("Codec not found");

            _codec_context = FFmpegInvoke.avcodec_alloc_context3(codec);
            if (_codec_context == null) throw new Exception("Could not allocate video codec context");

            _codec_context->bit_rate = 50000;
            _codec_context->width = width;
            _codec_context->height = height;
            _codec_context->time_base = new AVRational() { num = 1, den = fps };
            _codec_context->gop_size = 10; // emit one intra frame every ten frames
            _codec_context->max_b_frames = 1;
            _codec_context->pix_fmt = CODEC_PIXEL_FORMAT;
            FFmpegInvoke.av_opt_set(_codec_context->priv_data, "preset", "fast", 0);
            if (FFmpegInvoke.avcodec_open2(_codec_context, codec, null) < 0) throw new Exception("Could not open codec");

            _avFrameYUV = FFmpegInvoke.avcodec_alloc_frame();
            if (_avFrameYUV == null) throw new Exception("Could not allocate video frame");
            _avFrameYUV->format = (int)CODEC_PIXEL_FORMAT;
            _avFrameYUV->width = width;
            _avFrameYUV->height = height;

            var ret1 = FFmpegInvoke.av_image_alloc(&_avFrameYUV->data_0, _avFrameYUV->linesize, width, height, CODEC_PIXEL_FORMAT, 32);
            if (ret1 < 0) throw new Exception("Could not allocate raw picture buffer");

            _avFrameBGR = FFmpegInvoke.avcodec_alloc_frame();
            if (_avFrameBGR == null) throw new Exception("Could not allocate video frame");
            _avFrameBGR->format = (int)INPUT_PIXEL_FORMAT;
            _avFrameBGR->width = width;
            _avFrameBGR->height = height;

            var ret2 = FFmpegInvoke.av_image_alloc(&_avFrameBGR->data_0, _avFrameBGR->linesize, width, height, INPUT_PIXEL_FORMAT, 32);
            if (ret2 < 0) throw new Exception("Could not allocate raw picture buffer");
        }
コード例 #34
0
        private void InitializeVideo()
        {
            // Extract pixel format and codec id
            var inputCodecContext = *(InputVideoStream->codec);
            var inputPixelFormat = inputCodecContext.pix_fmt;
            var inputCodecId = inputCodecContext.codec_id;

            // Populate basic properties
            VideoCodec = inputCodecContext.codec_id.ToString(); // Utils.GetAnsiString(new IntPtr(inputCodecContext.codec_name));
            VideoBitrate = (int)inputCodecContext.bit_rate;
            VideoFrameWidth = inputCodecContext.width;
            VideoFrameHeight = inputCodecContext.height;

            VideoFrameRate = Convert.ToDecimal(Convert.ToDouble(inputCodecContext.framerate.num) / Convert.ToDouble(inputCodecContext.framerate.den));
            VideoFrameLength = VideoFrameRate > 0M ? 1M / VideoFrameRate : 0M;

            // Get an input decoder for the input codec
            AVCodec* inputDecoder = ffmpeg.avcodec_find_decoder(inputCodecId);
            if (inputDecoder == null)
                throw new Exception("Unsupported video codec");

            // Create a Software Sacaling context -- this allows us to do fast colorspace conversion
            VideoResampler = ffmpeg.sws_getContext(
                VideoFrameWidth, VideoFrameHeight, inputPixelFormat,
                VideoFrameWidth, VideoFrameHeight, Constants.VideoOutputPixelFormat,
                (int)ffmpeg.SWS_BILINEAR, null, null, null);

            if (VideoResampler == null)
                throw new Exception("Could not initialize the output conversion context");

            //Create an output codec context. -- We copy the data from the input context and we
            //then proceed to adjust some output parameters.
            // Before it said: var outputCodecContext = &inputCodecContext;
            VideoCodecContext = ffmpeg.avcodec_alloc_context3(inputDecoder);
            if (ffmpeg.avcodec_copy_context(VideoCodecContext, &inputCodecContext) != Constants.SuccessCode)
                throw new Exception("Could not create video output codec context from input");

            if ((inputDecoder->capabilities & (int)ffmpeg.AV_CODEC_CAP_TRUNCATED) == (int)ffmpeg.AV_CODEC_CAP_TRUNCATED)
                VideoCodecContext->flags |= (int)ffmpeg.AV_CODEC_FLAG_TRUNCATED;

            if (ffmpeg.avcodec_open2(VideoCodecContext, inputDecoder, null) < Constants.SuccessCode)
                throw new Exception("Could not open codec");

            // All output frames will have the same length and will be held by the same structure; the Decoder frame holder.
            DecodedPictureHolder = ffmpeg.av_frame_alloc();
            OutputPictureBufferLength = ffmpeg.avpicture_get_size(Constants.VideoOutputPixelFormat, VideoFrameWidth, VideoFrameHeight);
        }
コード例 #35
0
        /// <summary>
        /// Initializes the audio.
        /// </summary>
        /// <exception cref="System.Exception">
        /// Unsupported audio codec
        /// or
        /// Could not create audio output codec context from input
        /// or
        /// Could not open codec
        /// </exception>
        /// <exception cref="System.InvalidOperationException">Could not load media file</exception>
        private void InitializeAudio()
        {
            // Extract wave sample format and codec id
            var inputCodecContext = *(InputAudioStream->codec);
            var inputCodecId = inputCodecContext.codec_id;

            // Get an input decoder for the input codec
            AVCodec* inputDecoder = ffmpeg.avcodec_find_decoder(inputCodecId);
            if (inputDecoder == null)
                throw new Exception("Unsupported audio codec");

            //Create an output codec context. -- We copy the data from the input context and we
            //then proceed to adjust some output parameters.
            // Before it said: var outputCodecContext = &inputCodecContext;
            AudioCodecContext = ffmpeg.avcodec_alloc_context3(inputDecoder);
            if (ffmpeg.avcodec_copy_context(AudioCodecContext, &inputCodecContext) != Constants.SuccessCode)
                throw new Exception("Could not create audio output codec context from input");

            if ((inputDecoder->capabilities & (int)ffmpeg.CODEC_CAP_TRUNCATED) == (int)ffmpeg.CODEC_CAP_TRUNCATED)
                AudioCodecContext->flags |= ffmpeg.AV_CODEC_FLAG_TRUNCATED;

            if (ffmpeg.avcodec_open2(AudioCodecContext, inputDecoder, null) < Constants.SuccessCode)
                throw new Exception("Could not open codec");

            // setup basic properties
            AudioBytesPerSample = ffmpeg.av_get_bytes_per_sample(AudioCodecContext->sample_fmt);
            AudioCodec = inputCodecContext.codec_id.ToString();
            AudioChannels = inputCodecContext.channels;
            AudioBitrate = (int)inputCodecContext.bit_rate;
            AudioOutputBitsPerSample = ffmpeg.av_get_bytes_per_sample(Constants.AudioOutputSampleFormat) * 8;
            AudioSampleRate = inputCodecContext.sample_rate;
            AudioOutputSampleRate = AudioSampleRate > 44100 ? 44100 : AudioSampleRate; // We set a max of 44.1 kHz to save CPU. Anything more is too much (for most people).

            // Reference: http://www.ffmpeg.org/doxygen/2.0/group__lswr.html
            // Used Example: https://github.com/FFmpeg/FFmpeg/blob/7206b94fb893c63b187bcdfe26422b4e026a3ea0/doc/examples/resampling_audio.c
            AudioResampler = ffmpeg.swr_alloc();
            ffmpeg.av_opt_set_int(AudioResampler, "in_channel_layout", (long)AudioCodecContext->channel_layout, 0);
            ffmpeg.av_opt_set_int(AudioResampler, "out_channel_layout", (long)(ffmpeg.AV_CH_FRONT_LEFT | ffmpeg.AV_CH_FRONT_RIGHT), 0);
            ffmpeg.av_opt_set_int(AudioResampler, "in_sample_rate", AudioSampleRate, 0);
            ffmpeg.av_opt_set_int(AudioResampler, "out_sample_rate", AudioOutputSampleRate, 0);
            ffmpeg.av_opt_set_sample_fmt(AudioResampler, "in_sample_fmt", AudioCodecContext->sample_fmt, 0);
            ffmpeg.av_opt_set_sample_fmt(AudioResampler, "out_sample_fmt", Constants.AudioOutputSampleFormat, 0);
            ffmpeg.swr_init(AudioResampler);

            // All output frames will have the same length and will be held by the same structure; the Decoder frame holder.
            DecodedWaveHolder = ffmpeg.av_frame_alloc();

            // Ensure proper audio properties
            if (AudioOutputBitsPerSample <= 0 || AudioSampleRate <= 0)
                throw new InvalidOperationException("Could not load media file");
        }
コード例 #36
0
        /// <summary>
        /// Releases all managed and unmanaged resources
        /// </summary>
        public void Dispose()
        {
            if (IsCancellationPending)
                return;

            this.IsCancellationPending = true;

            this.VideoRenderTimer.Stop();

            if (this.AudioRenderer != null)
            {
                if (this.AudioRenderer.HasInitialized)
                    this.AudioRenderer.Stop();

                this.AudioRenderer.Dispose();
                this.AudioRenderer = null;
            }

            if (MediaFrameExtractorThread != null)
            {
                MediaFrameExtractorThread.Join();
                MediaFrameExtractorThread = null;
            }

            if (MediaFramesExtractedDone != null)
            {
                try
                {
                    MediaFramesExtractedDone.Dispose();
                    MediaFramesExtractedDone = null;
                }
                finally { }
            }

            if (PrimaryFramesCache != null)
            {
                PrimaryFramesCache.Clear();
                PrimaryFramesCache = null;
            }

            if (SecondaryFramesCache != null)
            {
                SecondaryFramesCache.Clear();
                SecondaryFramesCache = null;
            }

            if (VideoCodecContext != null)
            {
                fixed (AVCodecContext** videoCodecContextRef = &VideoCodecContext)
                {
                    ffmpeg.avcodec_close(VideoCodecContext);
                    ffmpeg.avcodec_free_context(videoCodecContextRef);
                    VideoCodecContext = null;
                }
            }

            if (AudioCodecContext != null)
            {
                fixed (AVCodecContext** audioCodecContextRef = &AudioCodecContext)
                {
                    ffmpeg.avcodec_close(AudioCodecContext);
                    ffmpeg.avcodec_free_context(audioCodecContextRef);
                    AudioCodecContext = null;
                }
            }

            if (VideoResampler != null)
            {
                ffmpeg.sws_freeContext(VideoResampler);
                VideoResampler = null;
            }

            if (AudioResampler != null)
            {
                fixed (SwrContext** audioResamplerRef = &AudioResampler)
                {
                    ffmpeg.swr_close(AudioResampler);
                    ffmpeg.swr_free(audioResamplerRef);
                    AudioResampler = null;
                }
            }

            if (InputFormatContext != null)
            {
                fixed (AVFormatContext** inputFormatContextRef = &InputFormatContext)
                {
                    ffmpeg.avformat_close_input(inputFormatContextRef);
                    ffmpeg.avformat_free_context(InputFormatContext);
                    InputFormatContext = null;
                }
            }

            if (DecodedPictureHolder != null)
            {
                ffmpeg.av_free(DecodedPictureHolder);
                DecodedPictureHolder = null;
            }

            if (DecodedWaveHolder != null)
            {
                ffmpeg.av_free(DecodedWaveHolder);
                DecodedWaveHolder = null;
            }

        }