public int SendFrame([In] MediaFrame frame) { unsafe { return(ffmpeg.avcodec_send_frame(pCodecContext, frame)); } }
public int AddFrame(MediaFrame frame, BufferSrcFlags flags = BufferSrcFlags.KeepRef) { unsafe { return(ffmpeg.av_buffersrc_add_frame_flags(pFilterContext, frame, (int)flags)); } }
public int GetFrame(MediaFrame frame) { unsafe { return(ffmpeg.av_buffersink_get_frame(pFilterContext, frame)); } }
/// <summary> /// receive frame from decoder /// </summary> /// <param name="frame"></param> /// <returns></returns> public int ReceiveFrame([Out] MediaFrame frame) { if (pCodecContext == null) { throw new FFmpegException(FFmpegException.NotInitCodecContext); } return(ffmpeg.avcodec_receive_frame(pCodecContext, frame)); }
/// <summary> /// <see cref="ffmpeg.avcodec_send_frame(AVCodecContext*, AVFrame*)"/> /// </summary> /// <param name="frame"></param> /// <returns></returns> public int SendFrame([In] MediaFrame frame) { if (pCodecContext == null) { throw new FFmpegException(FFmpegException.NotInitCodecContext); } return(ffmpeg.avcodec_send_frame(pCodecContext, frame)); }
/// <summary> /// Convert <paramref name="srcFrame"/>. /// <para> /// sometimes audio inputs and outputs are used at different /// frequencies and need to be resampled using fifo, /// so use <see cref="IEnumerable{T}"/>. /// </para> /// </summary> /// <param name="srcFrame"></param> /// <returns></returns> public override IEnumerable <AudioFrame> Convert(MediaFrame srcFrame) { SwrCheckInit(srcFrame); FifoPush(srcFrame); while (AudioFifo.Size >= DstNbSamples) { yield return(FifoPop()); } }
/// <summary> /// convert input audio frame to output frame /// </summary> /// <param name="srcFrame">input audio frame</param> /// <param name="outSamples">number of samples actually output</param> /// <param name="cacheSamples">number of samples in the internal cache</param> /// <returns></returns> public AudioFrame ConvertFrame(MediaFrame srcFrame, out int outSamples, out int cacheSamples) { SwrCheckInit(srcFrame); int curSamples = FifoPush(srcFrame); AudioFrame dstframe = FifoPop(); cacheSamples = AudioFifo.Size; outSamples = curSamples - cacheSamples; return(dstframe); }
/// <summary> /// pre process frame /// </summary> /// <param name="frame"></param> private void RemoveSideData(MediaFrame frame) { if (frame != null) { // Make sure Closed Captions will not be duplicated if (AVCodecContext.codec_type == AVMediaType.AVMEDIA_TYPE_VIDEO) { ffmpeg.av_frame_remove_side_data(frame, AVFrameSideDataType.AV_FRAME_DATA_A53_CC); } } }
private static Mat BgraToMat(MediaFrame frame) { Mat mat = new Mat(frame.AVFrame.height, frame.AVFrame.width, DepthType.Cv8U, 4); int stride = mat.Step; unsafe { var bytewidth = Math.Min(stride, frame.AVFrame.linesize[0]); ffmpeg.av_image_copy_plane((byte *)mat.DataPointer, stride, (byte *)frame.Data[0], frame.AVFrame.linesize[0], bytewidth, frame.AVFrame.height); } return(mat); }
/// <summary> /// Write a fram by <see cref="Codec"/>. /// <para><see cref="MediaEncode.EncodeFrame(MediaFrame)"/></para> /// <para><see cref="FixPacket(MediaPacket)"/></para> /// </summary> /// <param name="frame"></param> /// <exception cref="FFmpegException"/> /// <returns></returns> public IEnumerable <MediaPacket> WriteFrame(MediaFrame frame) { if (!HasEncoder) { throw new FFmpegException(ffmpeg.AVERROR_ENCODER_NOT_FOUND); } foreach (var packet in (Codec as MediaEncode).EncodeFrame(frame)) { FixPacket(packet); yield return(packet); } }
/// <summary> /// Convert to Mat /// <para> /// video frame: convert to AV_PIX_FMT_BGRA and return new Mat(frame.Height, frame.Width, DepthType.Cv8U, 4) /// </para> /// <para> /// audio frame: /// <list type="bullet"> /// <item>if is planar, return new Mat(frame.AVFrame.nb_samples, frame.AVFrame.channels , depthType, 1);</item> /// <item>if is packet, return new Mat(frame.AVFrame.nb_samples, 1 , depthType, frame.AVFrame.channels);</item> /// </list> /// <para><see cref="AVSampleFormat"/> to <see cref="DepthType"/> mapping table</para> /// <list type="table" > /// <item> /// <term><see cref="AVSampleFormat.AV_SAMPLE_FMT_U8"/>/<see cref="AVSampleFormat.AV_SAMPLE_FMT_U8P"/></term> /// <description><see cref="DepthType.Cv8U"/></description> /// </item> /// <item> /// <term><see cref="AVSampleFormat.AV_SAMPLE_FMT_S16"/>/<see cref="AVSampleFormat.AV_SAMPLE_FMT_S16P"/></term> /// <description><see cref="DepthType.Cv16S"/></description> /// </item> /// <item> /// <term><see cref="AVSampleFormat.AV_SAMPLE_FMT_S32"/>/<see cref="AVSampleFormat.AV_SAMPLE_FMT_S32P"/></term> /// <description><see cref="DepthType.Cv32S"/></description> /// </item> /// <item> /// <term><see cref="AVSampleFormat.AV_SAMPLE_FMT_FLT"/>/<see cref="AVSampleFormat.AV_SAMPLE_FMT_FLTP"/></term> /// <description><see cref="DepthType.Cv32F"/></description> /// </item> /// <item> /// <term><see cref="AVSampleFormat.AV_SAMPLE_FMT_DBL"/>/<see cref="AVSampleFormat.AV_SAMPLE_FMT_DBLP"/></term> /// <description><see cref="DepthType.Cv64F"/></description> /// </item> /// <item> /// <term><see cref="AVSampleFormat.AV_SAMPLE_FMT_S64"/>/<see cref="AVSampleFormat.AV_SAMPLE_FMT_S64P"/></term> /// <description><see cref="DepthType.Cv64F"/></description> /// </item> /// <item>NOTE: Emgucv not supported S64, replace with Cv64F, so read result by bytes convert to int64, otherwise will read <see cref="double.NaN"/> /// </item> /// </list> /// </para> /// </summary> /// <param name="frame"></param> /// <returns></returns> public static Mat ToMat(this MediaFrame frame) { if (frame.IsVideoFrame) { return(VideoFrameToMat(frame as VideoFrame)); } else if (frame.IsAudioFrame) { return(AudioFrameToMat(frame as AudioFrame)); } throw new FFmpegException(FFmpegException.InvalidFrame); }
public VideoFrame ConvertFrame(MediaFrame srcFrame) { AVFrame *src = srcFrame; AVFrame *dst = dstFrame; if (pSwsContext == null && !isDisposing) { pSwsContext = ffmpeg.sws_getContext( src->width, src->height, (AVPixelFormat)src->format, DstWidth, DstHeight, DstFormat, SwsFlag, null, null, null); } ffmpeg.sws_scale(pSwsContext, src->data, src->linesize, 0, src->height, dst->data, dst->linesize).ThrowIfError(); return(dstFrame as VideoFrame); }
/// <summary> /// TODO: add SubtitleFrame support /// </summary> /// <param name="frame"></param> /// <returns></returns> public virtual IEnumerable <MediaPacket> EncodeFrame(MediaFrame frame) { SendFrame(frame).ThrowIfError(); RemoveSideData(frame); using (MediaPacket packet = new MediaPacket()) { while (true) { int ret = ReceivePacket(packet); if (ret == ffmpeg.AVERROR(ffmpeg.EAGAIN) || ret == ffmpeg.AVERROR_EOF) { break; } ret.ThrowIfError(); yield return(packet); } } }
private void SwrCheckInit(MediaFrame srcFrame) { if (pSwrContext == null && !isDisposing) { AVFrame *src = srcFrame; AVFrame *dst = dstFrame; ulong srcChannelLayout = src->channel_layout; if (srcChannelLayout == 0) { srcChannelLayout = FFmpegHelper.GetChannelLayout(src->channels); } pSwrContext = ffmpeg.swr_alloc_set_opts(null, (long)DstChannelLayout, DstFormat, DstSampleRate == 0 ? src->sample_rate : DstSampleRate, (long)srcChannelLayout, (AVSampleFormat)src->format, src->sample_rate, 0, null); ffmpeg.swr_init(pSwrContext).ThrowIfError(); } }
private int FifoPush(MediaFrame srcFrame) { AVFrame *src = srcFrame; AVFrame *dst = dstFrame; for (int i = 0, ret = DstNbSamples; ret == DstNbSamples && src != null; i++) { if (i == 0 && src != null) { ret = ffmpeg.swr_convert(pSwrContext, dst->extended_data, dst->nb_samples, src->extended_data, src->nb_samples).ThrowIfError(); } else { ret = ffmpeg.swr_convert(pSwrContext, dst->extended_data, dst->nb_samples, null, 0).ThrowIfError(); } AudioFifo.Add((void **)dst->extended_data, ret); } return(AudioFifo.Size); }
/// <summary> /// decode packet to get frame. /// TODO: add SubtitleFrame support /// <para> /// <see cref="SendPacket(MediaPacket)"/> and <see cref="ReceiveFrame(MediaFrame)"/> /// </para> /// </summary> /// <param name="packet"></param> /// <returns></returns> public virtual IEnumerable <MediaFrame> DecodePacket(MediaPacket packet) { if (SendPacket(packet) >= 0) { // if codoc type is video, create new video frame // else if codoc type is audio, create new audio frame // else throw a exception (e.g. codec type is subtitle) // TODO: add subtitle supported using (MediaFrame frame = Type == AVMediaType.AVMEDIA_TYPE_VIDEO ? new VideoFrame() : Type == AVMediaType.AVMEDIA_TYPE_AUDIO ? (MediaFrame) new AudioFrame() : throw new FFmpegException(FFmpegException.NotSupportFrame)) { while (ReceiveFrame(frame) >= 0) { yield return(frame); } } } }
public virtual IEnumerable <MediaFrame> Convert(MediaFrame frame) { throw new FFmpegException(FFmpegException.NotImplemented); }
/// <summary> /// Convert <paramref name="srcframe"/> /// <para> /// Video conversion can be made without the use of IEnumerable, /// here In order to be consistent with the <see cref="SampleConverter"/> interface. /// </para> /// </summary> /// <param name="srcframe"></param> /// <returns></returns> public override IEnumerable <VideoFrame> Convert(MediaFrame srcframe) { yield return(ConvertFrame(srcframe)); }
public void WriteFrame(MediaFrame frame, BufferSrcFlags flags = BufferSrcFlags.KeepRef) { AddFrame(frame, flags).ThrowExceptionIfError(); }