private FFmpegFrame ensureFramePixelFormat(FFmpegFrame frame, AVPixelFormat targetPixelFormat) { if (frame.PixelFormat == targetPixelFormat) { return(frame); } int width = frame.Pointer->width; int height = frame.Pointer->height; swsContext = ffmpeg.sws_getCachedContext( swsContext, width, height, frame.PixelFormat, width, height, targetPixelFormat, 1, null, null, null); if (!scalerFrames.TryDequeue(out var scalerFrame)) { scalerFrame = new FFmpegFrame(ffmpeg, returnScalerFrame); } // (re)initialize the scaler frame if needed. if (scalerFrame.PixelFormat != targetPixelFormat || scalerFrame.Pointer->width != width || scalerFrame.Pointer->height != height) { ffmpeg.av_frame_unref(scalerFrame.Pointer); // Note: this field determines the scaler's output pix format. scalerFrame.PixelFormat = targetPixelFormat; scalerFrame.Pointer->width = width; scalerFrame.Pointer->height = height; int getBufferResult = ffmpeg.av_frame_get_buffer(scalerFrame.Pointer, 0); if (getBufferResult < 0) { Logger.Log($"Failed to allocate SWS frame buffer: {getErrorMessage(getBufferResult)}"); scalerFrame.Dispose(); frame.Return(); return(null); } } int scalerResult = ffmpeg.sws_scale( swsContext, frame.Pointer->data, frame.Pointer->linesize, 0, height, scalerFrame.Pointer->data, scalerFrame.Pointer->linesize); // return the original frame regardless of the scaler result. frame.Return(); if (scalerResult < 0) { Logger.Log($"Failed to scale frame: {getErrorMessage(scalerResult)}"); scalerFrame.Dispose(); return(null); } return(scalerFrame); }
private void decodeNextFrame(AVPacket *packet) { int readFrameResult = ffmpeg.av_read_frame(formatContext, packet); if (readFrameResult >= 0) { State = DecoderState.Running; if (packet->stream_index == stream->index) { int sendPacketResult = ffmpeg.avcodec_send_packet(stream->codec, packet); if (sendPacketResult == 0) { AVFrame *frame = ffmpeg.av_frame_alloc(); AVFrame *outFrame = null; var result = ffmpeg.avcodec_receive_frame(stream->codec, frame); if (result == 0) { var frameTime = (frame->best_effort_timestamp - stream->start_time) * timeBaseInSeconds * 1000; if (!skipOutputUntilTime.HasValue || skipOutputUntilTime.Value < frameTime) { skipOutputUntilTime = null; if (convert) { outFrame = ffmpeg.av_frame_alloc(); outFrame->format = (int)AVPixelFormat.AV_PIX_FMT_YUV420P; outFrame->width = stream->codec->width; outFrame->height = stream->codec->height; var ret = ffmpeg.av_frame_get_buffer(outFrame, 32); if (ret < 0) { throw new InvalidOperationException($"Error allocating video frame: {getErrorMessage(ret)}"); } ffmpeg.sws_scale(convCtx, frame->data, frame->linesize, 0, stream->codec->height, outFrame->data, outFrame->linesize); } else { outFrame = frame; } if (!availableTextures.TryDequeue(out var tex)) { tex = new Texture(new VideoTexture(codecParams.width, codecParams.height)); } var upload = new VideoTextureUpload(outFrame, ffmpeg.av_frame_free); tex.SetData(upload); decodedFrames.Enqueue(new DecodedFrame { Time = frameTime, Texture = tex }); } lastDecodedFrameTime = (float)frameTime; } // There are two cases: outFrame could be null in which case the above decode hasn't run, or the outFrame doesn't match the input frame, // in which case it won't be automatically freed by the texture upload. In both cases we need to free the input frame. if (outFrame != frame) { ffmpeg.av_frame_free(&frame); } } else { Logger.Log($"Error {sendPacketResult} sending packet in VideoDecoder"); } } ffmpeg.av_packet_unref(packet); } else if (readFrameResult == AGffmpeg.AVERROR_EOF) { if (Looping) { Seek(0); } else { // This marks the video stream as no longer relevant (until a future potential Seek operation). State = DecoderState.EndOfStream; } } else { State = DecoderState.Ready; Thread.Sleep(1); } }
private void decodingLoop(CancellationToken cancellationToken) { var packet = ffmpeg.av_packet_alloc(); const int max_pending_frames = 3; try { while (true) { if (cancellationToken.IsCancellationRequested) { return; } if (decodedFrames.Count < max_pending_frames) { int readFrameResult = ffmpeg.av_read_frame(formatContext, packet); if (readFrameResult >= 0) { State = DecoderState.Running; if (packet->stream_index == stream->index) { int sendPacketResult = ffmpeg.avcodec_send_packet(stream->codec, packet); if (sendPacketResult == 0) { AVFrame *frame = ffmpeg.av_frame_alloc(); AVFrame *outFrame = null; var result = ffmpeg.avcodec_receive_frame(stream->codec, frame); if (result == 0) { var frameTime = (frame->best_effort_timestamp - stream->start_time) * timeBaseInSeconds * 1000; if (!skipOutputUntilTime.HasValue || skipOutputUntilTime.Value < frameTime) { skipOutputUntilTime = null; if (convert) { outFrame = ffmpeg.av_frame_alloc(); outFrame->format = (int)AVPixelFormat.AV_PIX_FMT_YUV420P; outFrame->width = stream->codec->width; outFrame->height = stream->codec->height; var ret = ffmpeg.av_frame_get_buffer(outFrame, 32); if (ret < 0) { throw new InvalidOperationException($"Error allocating video frame: {getErrorMessage(ret)}"); } ffmpeg.sws_scale(convCtx, frame->data, frame->linesize, 0, stream->codec->height, outFrame->data, outFrame->linesize); } else { outFrame = frame; } if (!availableTextures.TryDequeue(out var tex)) { tex = new Texture(new VideoTexture(codecParams.width, codecParams.height)); } var upload = new VideoTextureUpload(outFrame, ffmpeg.av_frame_free); tex.SetData(upload); decodedFrames.Enqueue(new DecodedFrame { Time = frameTime, Texture = tex }); } lastDecodedFrameTime = (float)frameTime; } // There are two cases: outFrame could be null in which case the above decode hasn't run, or the outFrame doesn't match the input frame, // in which case it won't be automatically freed by the texture upload. In both cases we need to free the input frame. if (outFrame != frame) { ffmpeg.av_frame_free(&frame); } } else { Logger.Log($"Error {sendPacketResult} sending packet in VideoDecoder"); } } ffmpeg.av_packet_unref(packet); } else if (readFrameResult == AGffmpeg.AVERROR_EOF) { if (Looping) { Seek(0); } else { State = DecoderState.EndOfStream; } } else { State = DecoderState.Ready; Thread.Sleep(1); } } else { // wait until existing buffers are consumed. State = DecoderState.Ready; Thread.Sleep(1); } while (!decoderCommands.IsEmpty) { if (cancellationToken.IsCancellationRequested) { return; } if (decoderCommands.TryDequeue(out var cmd)) { cmd(); } } } } catch (Exception e) { Logger.Log($"VideoDecoder faulted: {e}"); State = DecoderState.Faulted; } finally { ffmpeg.av_packet_free(&packet); if (State != DecoderState.Faulted) { State = DecoderState.Stopped; } } }