コード例 #1
0
        /// <summary>
        /// transcode audio
        /// </summary>
        /// <param name="input">input audio file</param>
        /// <param name="output">output audio file</param>
        /// <param name="outChannels">output audio file channels</param>
        /// <param name="outSampleRate">output audio file sample rate</param>
        public AudioTranscode(string input, string output, int outChannels = 2, int outSampleRate = 44100)
        {
            using (MediaWriter writer = new MediaWriter(output))
                using (MediaReader reader = new MediaReader(input))
                {
                    int audioIndex = reader.First(_ => _.Codec.Type == AVMediaType.AVMEDIA_TYPE_AUDIO).Index;

                    writer.AddStream(MediaEncoder.CreateAudioEncode(writer.Format, outChannels, outSampleRate));
                    writer.Initialize();

                    AudioFrame      dst       = AudioFrame.CreateFrameByCodec(writer[0].Codec);
                    SampleConverter converter = new SampleConverter(dst);
                    long            pts       = 0;
                    foreach (var packet in reader.ReadPacket())
                    {
                        foreach (var srcframe in reader[audioIndex].ReadFrame(packet))
                        {
                            foreach (var dstframe in converter.Convert(srcframe))
                            {
                                pts         += dstframe.AVFrame.nb_samples;
                                dstframe.Pts = pts; // audio's pts is total samples, pts can only increase.
                                foreach (var outpacket in writer[0].WriteFrame(dstframe))
                                {
                                    writer.WritePacket(outpacket);
                                }
                            }
                        }
                    }
                    writer.FlushMuxer();
                }
        }
コード例 #2
0
        /// <summary>
        /// decode video to image
        /// filter graph:
        /// ┌──────┐     ┌──────┐     ┌─────┐     ┌──────────┐     ┌──────┐
        /// │input0│---->│buffer│---->│scale│---->│buffersink│---->│output│
        /// └──────┘     └──────┘     └─────┘     └──────────┘     └──────┘
        /// </summary>
        /// <param name="inputFile">input video file</param>
        /// <param name="outDirectory">folder for output image files</param>
        /// <param name="scaleOptions">scale options <see cref="http://ffmpeg.org/ffmpeg-filters.html#scale-1"/></param>
        public DecodeVideoWithCustomCodecScaledToMat(string inputFile, string outDirectory, string scaleOptions = "512:288")
        {
            using (MediaReader reader = new MediaReader(inputFile, null, null))
            {
                var videoIndex = reader.First(_ => _.Codec.AVCodecContext.codec_type == AVMediaType.AVMEDIA_TYPE_VIDEO).Index;

                unsafe
                {
                    // relpace the default vide decode
                    // !!! IMPORTANT NOTE: This sample won't work, if you haven't downloaded ffmpeg (GPL license, as it is more complete), and you don't have NVIDIA hardware (CUDA) !!!
                    reader[videoIndex].Codec = MediaDecode.CreateDecode("h264_cuvid", _ => ffmpeg.avcodec_parameters_to_context(_, reader[videoIndex].Stream.codecpar));
                }

                int        height              = reader[videoIndex].Codec.AVCodecContext.height;
                int        width               = reader[videoIndex].Codec.AVCodecContext.width;
                int        format              = (int)reader[videoIndex].Codec.AVCodecContext.pix_fmt;
                AVRational time_base           = reader[videoIndex].TimeBase;
                AVRational sample_aspect_ratio = reader[videoIndex].Codec.AVCodecContext.sample_aspect_ratio;

                /* We are moving the packet to CUDA to perform the scaling.
                 * We can then:
                 * - remove hwdownload and format to leave it in CUDA, and forward the pointer to any other function, or write the frame to the output video
                 * - convert it to MAT whereas converting speed depends on the size of the scaled frame.
                 */
                MediaFilterGraph filterGraph = new MediaFilterGraph();
                filterGraph.AddVideoSrcFilter(new MediaFilter(MediaFilter.VideoSources.Buffer), width, height, (AVPixelFormat)format, time_base, sample_aspect_ratio)
                .LinkTo(0, filterGraph.AddFilter(new MediaFilter("scale"), scaleOptions))
                .LinkTo(0, filterGraph.AddVideoSinkFilter(new MediaFilter(MediaFilter.VideoSinks.Buffersink)));
                filterGraph.Initialize();

                var sw = Stopwatch.StartNew();
                foreach (var packet in reader.ReadPacket())
                {
                    foreach (var frame in reader[videoIndex].ReadFrame(packet))
                    {
                        filterGraph.Inputs.First().WriteFrame(frame);
                        foreach (var filterFrame in filterGraph.Outputs.First().ReadFrame())
                        {
                            using (var image = filterFrame.ToMat())
                            {
                                image.Save(Path.Combine(Directory.CreateDirectory(outDirectory).FullName, $"{DateTime.Now.Ticks}.jpg"));
                            }
                        }
                    }
                }
                Console.WriteLine($"Converting to MAT [ processed in {sw.Elapsed.TotalMilliseconds:0} ms ]");
            }
        }