Exemplo n.º 1
0
        /// <summary>
        /// <see cref="avcodec_parameters_free(AVCodecParameters**)"/>
        /// </summary>
        public void Free()
        {
            AVCodecParameters *ptr = this;

            avcodec_parameters_free(&ptr);
            _nativePointer = (IntPtr)ptr;
        }
Exemplo n.º 2
0
        /// <summary>
        /// Add stream by copy <see cref="ffmpeg.avcodec_parameters_copy(AVCodecParameters*, AVCodecParameters*)"/>,
        /// </summary>
        /// <param name="stream"></param>
        /// <param name="flags"></param>
        /// <returns></returns>
        public MediaStream AddStream(MediaStream stream, int flags = 0)
        {
            AVStream *pstream = ffmpeg.avformat_new_stream(pFormatContext, null);

            pstream->id = (int)(pFormatContext->nb_streams - 1);
            ffmpeg.avcodec_parameters_copy(pstream->codecpar, stream.Stream.codecpar);
            pstream->codecpar->codec_tag = 0;
            MediaCodec mediaCodec = null;

            if (stream.Codec != null)
            {
                mediaCodec = MediaEncoder.CreateEncode(stream.Codec.AVCodecContext.codec_id, flags, _ =>
                {
                    AVCodecContext *pContext       = _;
                    AVCodecParameters *pParameters = ffmpeg.avcodec_parameters_alloc();
                    ffmpeg.avcodec_parameters_from_context(pParameters, stream.Codec).ThrowIfError();
                    ffmpeg.avcodec_parameters_to_context(pContext, pParameters);
                    ffmpeg.avcodec_parameters_free(&pParameters);
                    pContext->time_base = stream.Stream.r_frame_rate.ToInvert();
                });
            }
            streams.Add(new MediaStream(pstream)
            {
                TimeBase = stream.Stream.r_frame_rate.ToInvert(), Codec = mediaCodec
            });
            return(streams.Last());
        }
Exemplo n.º 3
0
        public override Size GetTextureSize(Stream stream)
        {
            stream.Seek(0, SeekOrigin.Begin);
            using var context = new FormatContext(stream);
            int streamId = ffmpeg.av_find_best_stream(
                context.Inner,
                AVMediaType.AVMEDIA_TYPE_VIDEO,
                -1, -1, null, 0
                );
            AVCodecParameters *codecpar = context.Inner->streams[streamId]->codecpar;

            return(new Size((uint)codecpar->width, (uint)codecpar->height));
        }
Exemplo n.º 4
0
 private static extern int avcodec_parameters_to_context(AVCodecContext *codec, AVCodecParameters *par);
Exemplo n.º 5
0
        public static void Main(string[] argv)
        {
            //ffmpeg.av_register_all();

            if (argv.Length != 2)
            {
                //fprintf(stderr, "%s <in> <out>\n", argv[0]);
                return;
            }

            // Allocate and init re-usable frames
            AVCodecContext * fileCodecContext, audioCodecContext;
            AVFormatContext *formatContext, outContext;
            AVStream *       out_audioStream;
            SwrContext *     swrContext;
            int streamId;

            // input file
            string file = argv[0];
            int    res  = ffmpeg.avformat_open_input(&formatContext, file, null, null);

            if (res != 0)
            {
                die("avformat_open_input");
            }

            res = ffmpeg.avformat_find_stream_info(formatContext, null);
            if (res < 0)
            {
                die("avformat_find_stream_info");
            }

            AVCodec *codec;

            res = ffmpeg.av_find_best_stream(formatContext, AVMediaType.AVMEDIA_TYPE_AUDIO, -1, -1, &codec, 0);
            if (res < 0)
            {
                return; // die("av_find_best_stream");
            }

            streamId         = res;
            fileCodecContext = ffmpeg.avcodec_alloc_context3(codec);
            AVCodecParameters *cp = null;

            ffmpeg.avcodec_parameters_to_context(fileCodecContext, formatContext->streams[streamId]->codecpar);
            res = ffmpeg.avcodec_open2(fileCodecContext, codec, null);
            if (res < 0)
            {
                die("avcodec_open2");
            }

            in_audioStream = formatContext->streams[streamId];

            // output file
            //string outfile = Path.Combine(Path.GetTempPath(), $"{Path.GetFileNameWithoutExtension(argv[0])}.pcm");
            //AVOutputFormat* fmt = fmt = ffmpeg.av_guess_format("s16le", null, null);
            string          outfile = argv[1];
            AVOutputFormat *fmt     = fmt = ffmpeg.av_guess_format(null, outfile, null);

            if (fmt == null)
            {
                die("av_guess_format");
            }

            outContext          = ffmpeg.avformat_alloc_context();
            outContext->oformat = fmt;
            out_audioStream     = add_audio_stream(outContext, fmt->audio_codec, in_audioStream->codec->sample_rate);
            open_audio(outContext, out_audioStream);
            out_audioStream->time_base = in_audioStream->time_base;
            res = ffmpeg.avio_open2(&outContext->pb, outfile, ffmpeg.AVIO_FLAG_WRITE, null, null);
            if (res < 0)
            {
                die("url_fopen");
            }

            ffmpeg.avformat_write_header(outContext, null);
            AVCodec *ocodec;

            res = ffmpeg.av_find_best_stream(outContext, AVMediaType.AVMEDIA_TYPE_AUDIO, -1, -1, &ocodec, 0);
            audioCodecContext = ffmpeg.avcodec_alloc_context3(ocodec);
            ffmpeg.avcodec_parameters_to_context(audioCodecContext, out_audioStream->codecpar);
            res = ffmpeg.avcodec_open2(audioCodecContext, ocodec, null);
            if (res < 0)
            {
                die("avcodec_open2");
            }
            // resampling
            swrContext = ffmpeg.swr_alloc();
            ffmpeg.av_opt_set_channel_layout(swrContext, "in_channel_layout", (long)fileCodecContext->channel_layout, 0);
            ffmpeg.av_opt_set_channel_layout(swrContext, "out_channel_layout", (long)audioCodecContext->channel_layout, 0);
            ffmpeg.av_opt_set_int(swrContext, "in_sample_rate", fileCodecContext->sample_rate, 0);
            ffmpeg.av_opt_set_int(swrContext, "out_sample_rate", audioCodecContext->sample_rate, 0);
            ffmpeg.av_opt_set_sample_fmt(swrContext, "in_sample_fmt", fileCodecContext->sample_fmt, 0);
            ffmpeg.av_opt_set_sample_fmt(swrContext, "out_sample_fmt", audioCodecContext->sample_fmt, 0);
            res = ffmpeg.swr_init(swrContext);
            if (res < 0)
            {
                die("swr_init");
            }

            AVFrame *audioFrameDecoded = ffmpeg.av_frame_alloc();

            if (audioFrameDecoded == null)
            {
                die("Could not allocate audio frame");
            }

            audioFrameDecoded->format         = (int)fileCodecContext->sample_fmt;
            audioFrameDecoded->channel_layout = fileCodecContext->channel_layout;
            audioFrameDecoded->channels       = fileCodecContext->channels;
            audioFrameDecoded->sample_rate    = fileCodecContext->sample_rate;

            AVFrame *audioFrameConverted = ffmpeg.av_frame_alloc();

            if (audioFrameConverted == null)
            {
                die("Could not allocate audio frame");
            }

            audioFrameConverted->nb_samples     = audioCodecContext->frame_size;
            audioFrameConverted->format         = (int)audioCodecContext->sample_fmt;
            audioFrameConverted->channel_layout = audioCodecContext->channel_layout;
            audioFrameConverted->channels       = audioCodecContext->channels;
            audioFrameConverted->sample_rate    = audioCodecContext->sample_rate;
            if (audioFrameConverted->nb_samples <= 0)
            {
                audioFrameConverted->nb_samples = 32;
            }

            AVPacket inPacket;

            ffmpeg.av_init_packet(&inPacket);
            inPacket.data = null;
            inPacket.size = 0;

            int frameFinished = 0;


            for (; ;)
            {
                if (ffmpeg.av_read_frame(formatContext, &inPacket) < 0)
                {
                    break;
                }

                if (inPacket.stream_index == streamId)
                {
                    int len = Decode(fileCodecContext, audioFrameDecoded, ref frameFinished, &inPacket);
                    if (len == ffmpeg.AVERROR_EOF)
                    {
                        break;
                    }

                    if (frameFinished != 0)
                    {
                        // Convert

                        byte *convertedData = null;

                        if (ffmpeg.av_samples_alloc(&convertedData,
                                                    null,
                                                    audioCodecContext->channels,
                                                    audioFrameConverted->nb_samples,
                                                    audioCodecContext->sample_fmt, 0) < 0)
                        {
                            die("Could not allocate samples");
                        }

                        int outSamples = 0;
                        fixed(byte **tmp = (byte *[])audioFrameDecoded->data)
                        {
                            outSamples = ffmpeg.swr_convert(swrContext, null, 0,
                                                            //&convertedData,
                                                            //audioFrameConverted->nb_samples,
                                                            tmp,
                                                            audioFrameDecoded->nb_samples);
                        }

                        if (outSamples < 0)
                        {
                            die("Could not convert");
                        }

                        for (; ;)
                        {
                            outSamples = ffmpeg.swr_get_out_samples(swrContext, 0);
                            if ((outSamples < audioCodecContext->frame_size * audioCodecContext->channels) || audioCodecContext->frame_size == 0 && (outSamples < audioFrameConverted->nb_samples * audioCodecContext->channels))
                            {
                                break; // see comments, thanks to @dajuric for fixing this
                            }

                            outSamples = ffmpeg.swr_convert(swrContext,
                                                            &convertedData,
                                                            audioFrameConverted->nb_samples, null, 0);

                            int buffer_size = ffmpeg.av_samples_get_buffer_size(null,
                                                                                audioCodecContext->channels,
                                                                                audioFrameConverted->nb_samples,
                                                                                audioCodecContext->sample_fmt,
                                                                                0);
                            if (buffer_size < 0)
                            {
                                die("Invalid buffer size");
                            }

                            if (ffmpeg.avcodec_fill_audio_frame(audioFrameConverted,
                                                                audioCodecContext->channels,
                                                                audioCodecContext->sample_fmt,
                                                                convertedData,
                                                                buffer_size,
                                                                0) < 0)
                            {
                                die("Could not fill frame");
                            }

                            AVPacket outPacket;
                            ffmpeg.av_init_packet(&outPacket);
                            outPacket.data = null;
                            outPacket.size = 0;
                            if (Encode(audioCodecContext, &outPacket, audioFrameConverted, ref frameFinished) < 0)
                            {
                                die("Error encoding audio frame");
                            }


                            //outPacket.flags |= ffmpeg.AV_PKT_FLAG_KEY;
                            outPacket.stream_index = out_audioStream->index;
                            //outPacket.data = audio_outbuf;
                            outPacket.dts = audioFrameDecoded->pkt_dts;
                            outPacket.pts = audioFrameDecoded->pkt_pts;
                            ffmpeg.av_packet_rescale_ts(&outPacket, in_audioStream->time_base, out_audioStream->time_base);

                            if (frameFinished != 0)
                            {
                                if (ffmpeg.av_interleaved_write_frame(outContext, &outPacket) != 0)
                                {
                                    die("Error while writing audio frame");
                                }

                                ffmpeg.av_packet_unref(&outPacket);
                            }
                        }
                    }
                }
            }
            EncodeFlush(audioCodecContext);
            DecodeFlush(fileCodecContext, &inPacket);

            ffmpeg.swr_close(swrContext);
            ffmpeg.swr_free(&swrContext);
            ffmpeg.av_frame_free(&audioFrameConverted);
            ffmpeg.av_frame_free(&audioFrameDecoded);
            ffmpeg.av_packet_unref(&inPacket);
            ffmpeg.av_write_trailer(outContext);
            ffmpeg.avio_close(outContext->pb);
            ffmpeg.avcodec_close(fileCodecContext);
            ffmpeg.avcodec_free_context(&fileCodecContext);
            ffmpeg.avformat_close_input(&formatContext);
            return;
        }
Exemplo n.º 6
0
        public void changecontainerset()
        {
            AVFormatContext *input_format_context  = null;
            AVFormatContext *output_format_context = null;

            FFmpegBinariesHelper.RegisterFFmpegBinaries();
            ffmpeg.avdevice_register_all();
            input_format_context = ffmpeg.avformat_alloc_context();

            AVInputFormat *iformat = ffmpeg.av_find_input_format("dshow");
            string         device  = "video=USB3. 0 capture:audio=디지털 오디오 인터페이스(5- USB3. 0 capture)";


            var a = ffmpeg.avformat_open_input(&input_format_context, device, iformat, null); //음수이면 파일 안열려..그런 장치 없어!!
            var b = ffmpeg.avformat_find_stream_info(input_format_context, null);             //Stream을 찾을수 없어...

            var fileName = @"C:\Users\admin\Desktop\changeContainer.avi";

            ffmpeg.avformat_alloc_output_context2(&output_format_context, null, null, fileName);
            var number_of_streams = input_format_context->nb_streams;

            var streams_list = new int[2];
            int stream_index = 0;


            for (int i = 0; i < input_format_context->nb_streams; i++)
            {
                AVStream *         out_stream;
                AVStream *         in_stream   = input_format_context->streams[i];
                AVCodecParameters *in_codecpar = in_stream->codecpar;
                Console.WriteLine(in_codecpar->codec_id);

                if (in_codecpar->codec_type != AVMediaType.AVMEDIA_TYPE_VIDEO &&
                    in_codecpar->codec_type != AVMediaType.AVMEDIA_TYPE_AUDIO)
                {
                    streams_list[i] = -1;
                    continue;
                }
                streams_list[i] = stream_index++;

                out_stream = ffmpeg.avformat_new_stream(output_format_context, null);

                var ret = ffmpeg.avcodec_parameters_copy(out_stream->codecpar, in_codecpar);
            }

            if (ffmpeg.avio_open(&output_format_context->pb, fileName, ffmpeg.AVIO_FLAG_WRITE) < 0)
            {
                Console.WriteLine("Failed to open output file! \n");
            }

            ffmpeg.avformat_write_header(output_format_context, null);

            output_format_context->streams[0]->time_base = new AVRational {
                num = 1, den = 30
            };
            output_format_context->streams[0]->codec->time_base = new AVRational {
                num = 1, den = 30
            };
            output_format_context->streams[0]->codec->framerate = new AVRational {
                num = 30, den = 1
            };
            int index = 1;

            while (index < 1000)
            {
                AVStream *in_stream;
                AVStream *out_stream;
                AVPacket  packet;
                var       ret = ffmpeg.av_read_frame(input_format_context, &packet);
                if (ret < 0)
                {
                    break;
                }
                in_stream = input_format_context->streams[packet.stream_index];

                if (packet.stream_index == 0)
                {
                    in_stream->codec->time_base = new AVRational {
                        num = 1, den = 30
                    };
                    in_stream->codec->framerate = new AVRational {
                        num = 30, den = 1
                    };
                    in_stream->r_frame_rate = new AVRational {
                        num = 30, den = 1
                    };
                    output_format_context->streams[0]->r_frame_rate = new AVRational {
                        num = 30, den = 1
                    };
                }
                if (packet.stream_index >= number_of_streams || streams_list[packet.stream_index] < 0)
                {
                    ffmpeg.av_packet_unref(&packet);
                    continue;
                }
                packet.stream_index = streams_list[packet.stream_index];

                out_stream = output_format_context->streams[packet.stream_index];

                Console.WriteLine(output_format_context->streams[0]->time_base.num + "/ " + output_format_context->streams[0]->time_base.den);
                if (packet.stream_index == 0)
                {
                    packet.pts = index;
                    packet.dts = index;
                }
                else
                {
                }

                packet.pts = ffmpeg.av_rescale_q_rnd(packet.pts, output_format_context->streams[packet.stream_index]->codec->time_base, output_format_context->streams[packet.stream_index]->time_base, AVRounding.AV_ROUND_INF | AVRounding.AV_ROUND_PASS_MINMAX);
                packet.dts = ffmpeg.av_rescale_q_rnd(packet.dts, output_format_context->streams[packet.stream_index]->codec->time_base, output_format_context->streams[packet.stream_index]->time_base, AVRounding.AV_ROUND_INF | AVRounding.AV_ROUND_PASS_MINMAX);


                Console.WriteLine(output_format_context->streams[packet.stream_index]->codec->time_base.den);
                ///* copy packet */

                Console.WriteLine($"Packet {packet.pts} / {packet.dts} ");

                //Console.WriteLine($"Packet {packet.pts} / {packet.dts} ");
                //Console.WriteLine($"Packet {packet.pts} / {packet.dts} ");
                index++;


                var ret1 = ffmpeg.av_interleaved_write_frame(output_format_context, &packet);
                if (ret < 0)
                {
                    Console.WriteLine("write error");
                }
                //av_packet_unref(&packet);
            }
            ffmpeg.av_write_trailer(output_format_context);
        }
Exemplo n.º 7
0
 public static extern int avcodec_parameters_from_context(AVCodecParameters *par, AVCodecContext *codec);
Exemplo n.º 8
0
 public static extern int avcodec_parameters_copy(AVCodecParameters *dst, AVCodecParameters *src);
Exemplo n.º 9
0
 public static extern int av_get_audio_frame_duration2(AVCodecParameters *par, int frame_bytes);
Exemplo n.º 10
0
 public static CodecParameters FromNative(AVCodecParameters *ptr, bool isOwner) => new CodecParameters(ptr, isOwner);
Exemplo n.º 11
0
 protected CodecParameters(AVCodecParameters *ptr, bool isOwner) : base(NativeUtils.NotNull((IntPtr)ptr), isOwner)
 {
 }