/// <summary> /// Receives 0 or more frames from the next available packet in the Queue. /// This sends the first available packet to dequeue to the decoder /// and uses the decoded frames (if any) to their corresponding /// ProcessFrame method. /// </summary> /// <returns></returns> private List <MediaFrame> DecodeNextPacketInternal() { var result = new List <MediaFrame>(); // Ensure there is at least one packet in the queue if (PacketBufferCount <= 0) { return(result); } // Setup some initial state variables var packet = Packets.Dequeue(); // The packets are alwasy sent. We dequeue them and keep a reference to them // in the SentPackets queue SentPackets.Push(packet); var receiveFrameResult = 0; if (MediaType == MediaType.Audio || MediaType == MediaType.Video) { // If it's audio or video, we use the new API and the decoded frames are stored in AVFrame // Let us send the packet to the codec for decoding a frame of uncompressed data later var sendPacketResult = ffmpeg.avcodec_send_packet(CodecContext, IsEmptyPacket(packet) ? null : packet); // Let's check and see if we can get 1 or more frames from the packet we just sent to the decoder. // Audio packets will typically contain 1 or more audioframes // Video packets might require several packets to decode 1 frame MediaFrame managedFrame = null; while (receiveFrameResult == 0) { // Allocate a frame in unmanaged memory and // Try to receive the decompressed frame data var outputFrame = ffmpeg.av_frame_alloc(); RC.Current.Add(outputFrame, $"327: {nameof(MediaComponent)}[{MediaType}].{nameof(DecodeNextPacketInternal)}()"); receiveFrameResult = ffmpeg.avcodec_receive_frame(CodecContext, outputFrame); try { managedFrame = null; if (receiveFrameResult == 0) { // Send the frame to processing managedFrame = CreateFrameSource(outputFrame); if (managedFrame != null) { result.Add(managedFrame); } } if (managedFrame == null) { RC.Current.Remove(outputFrame); ffmpeg.av_frame_free(&outputFrame); } } catch { // Release the frame as the decoded data could not be processed RC.Current.Remove(outputFrame); ffmpeg.av_frame_free(&outputFrame); throw; } } } else if (MediaType == MediaType.Subtitle) { // Fors subtitles we use the old API (new API send_packet/receive_frame) is not yet available var gotFrame = 0; var outputFrame = new AVSubtitle(); // We create the struct in managed memory as there is no API to create a subtitle. receiveFrameResult = ffmpeg.avcodec_decode_subtitle2(CodecContext, &outputFrame, &gotFrame, packet); // Check if there is an error decoding the packet. // If there is, remove the packet clear the sent packets if (receiveFrameResult < 0) { ffmpeg.avsubtitle_free(&outputFrame); SentPackets.Clear(); Container.Logger?.Log(MediaLogMessageType.Error, $"{MediaType}: Error decoding. Error Code: {receiveFrameResult}"); } else { // Process the first frame if we got it from the packet // Note that there could be more frames (subtitles) in the packet if (gotFrame != 0) { try { // Send the frame to processing var managedFrame = CreateFrameSource(&outputFrame); if (managedFrame == null) { throw new MediaContainerException($"{MediaType} Component does not implement {nameof(CreateFrameSource)}"); } result.Add(managedFrame); } catch { // Once processed, we don't need it anymore. Release it. ffmpeg.avsubtitle_free(&outputFrame); throw; } } // Let's check if we have more decoded frames from the same single packet // Packets may contain more than 1 frame and the decoder is drained // by passing an empty packet (data = null, size = 0) while (gotFrame != 0 && receiveFrameResult > 0) { outputFrame = new AVSubtitle(); var emptyPacket = ffmpeg.av_packet_alloc(); RC.Current.Add(emptyPacket, $"406: {nameof(MediaComponent)}[{MediaType}].{nameof(DecodeNextPacketInternal)}()"); // Receive the frames in a loop try { receiveFrameResult = ffmpeg.avcodec_decode_subtitle2(CodecContext, &outputFrame, &gotFrame, emptyPacket); if (gotFrame != 0 && receiveFrameResult > 0) { // Send the subtitle to processing var managedFrame = CreateFrameSource(&outputFrame); if (managedFrame == null) { throw new MediaContainerException($"{MediaType} Component does not implement {nameof(CreateFrameSource)}"); } result.Add(managedFrame); } } catch { // once the subtitle is processed. Release it from memory ffmpeg.avsubtitle_free(&outputFrame); throw; } finally { // free the empty packet RC.Current.Remove(emptyPacket); ffmpeg.av_packet_free(&emptyPacket); } } } } // Release the sent packets if 1 or more frames were received in the packet if (result.Count >= 1 || (Container.IsAtEndOfStream && IsEmptyPacket(packet) && PacketBufferCount == 0)) { // We clear the sent packet queue (releasing packet from unmanaged memory also) // because we got at least 1 frame from the packet. SentPackets.Clear(); } return(result); }
public void Decode() { //int xf = 0; AVPacket *pkt; while (true) { if (status != Status.END) { status = Status.READY; } decodeARE.Reset(); decodeARE.WaitOne(); status = Status.PLAY; forcePause = false; bool shouldStop = false; int allowedErrors = decCtx.opt.demuxer.MaxErrors; int ret = -1; Log("Started"); // Wait for demuxer to come up if (demuxer.status == Status.READY) { demuxer.demuxARE.Set(); while (!demuxer.isPlaying && demuxer.status != Status.END) { Thread.Sleep(1); } } while (true) { // No Packets || Max Frames Brakes if (packets.Count == 0 || (type == Type.Audio && frames.Count > decCtx.opt.audio.MaxDecodedFrames) || (type == Type.Video && frames.Count > decCtx.opt.video.MaxDecodedFrames) || (type == Type.Subs && frames.Count > decCtx.opt.subs.MaxDecodedFrames)) { shouldStop = false; //isWaiting = true; do { if (!decCtx.isPlaying || forcePause) // Proper Pause { Log("Pausing"); shouldStop = true; break; } else if (packets.Count == 0 && demuxer.status == Status.END) // Drain { Log("Draining"); break; } //else if (packets.Count == 0 && (!demuxer.isPlaying || demuxer.isWaiting)) // No reason to run else if (packets.Count == 0 && (!demuxer.isPlaying || ((!isEmbedded || type == Type.Video) && demuxer.isWaiting))) // No reason to run { Log("Exhausted " + isPlaying); shouldStop = true; break; } Thread.Sleep(10); } while (packets.Count == 0 || (type == Type.Audio && frames.Count > decCtx.opt.audio.MaxDecodedFrames) || (type == Type.Video && frames.Count > decCtx.opt.video.MaxDecodedFrames) || (type == Type.Subs && frames.Count > decCtx.opt.subs.MaxDecodedFrames)); //isWaiting = false; if (shouldStop) { break; } } if (packets.Count == 0 && demuxer.status == Status.END) { if (type == Type.Video) { // Check case pause while draining Log("Draining..."); pkt = null; } else { status = Status.END; Log("EOF"); break; } } else { packets.TryDequeue(out IntPtr pktPtr); pkt = (AVPacket *)pktPtr; if (type == Type.Subs) { MediaFrame mFrame = new MediaFrame(); mFrame.pts = pkt->pts; mFrame.timestamp = (long)((mFrame.pts * info.Timebase)) + opt.audio.LatencyTicks + opt.subs.DelayTicks; //Log(Utils.TicksToTime((long)(mFrame.pts * demuxer.streams[st->index].timebase)) + " | pts -> " + mFrame.pts); //xf++; if (mFrame.pts == AV_NOPTS_VALUE) { av_packet_free(&pkt); continue; } int gotFrame = 0; AVSubtitle sub = new AVSubtitle(); // drain mode todo // pkt->data set to NULL && pkt->size = 0 until it stops returning subtitles ret = avcodec_decode_subtitle2(codecCtx, &sub, &gotFrame, pkt); if (ret < 0) { allowedErrors--; Log($"[ERROR-2] {Utils.ErrorCodeToMsg(ret)} ({ret})"); if (allowedErrors == 0) { Log("[ERROR-0] Too many errors!"); break; } continue; } if (gotFrame < 1 || sub.num_rects < 1) { continue; } MediaFrame.ProcessSubsFrame(this, mFrame, &sub); frames.Enqueue(mFrame); avsubtitle_free(&sub); av_packet_free(&pkt); continue; } } lock (demuxer.decCtx.device) ret = avcodec_send_packet(codecCtx, pkt); if (ret != 0 && ret != AVERROR(EAGAIN)) { if (ret == AVERROR_EOF) { status = Status.END; Log("EOF"); break; } else //if (ret == AVERROR_INVALIDDATA) // We also get Error number -16976906 occurred { allowedErrors--; Log($"[ERROR-2] {Utils.ErrorCodeToMsg(ret)} ({ret})"); if (allowedErrors == 0) { Log("[ERROR-0] Too many errors!"); break; } continue; } } av_packet_free(&pkt); while (true) { lock (demuxer.decCtx.device) ret = avcodec_receive_frame(codecCtx, frame); if (ret == 0) { MediaFrame mFrame = new MediaFrame(); mFrame.pts = frame->best_effort_timestamp == AV_NOPTS_VALUE ? frame->pts : frame->best_effort_timestamp; if (mFrame.pts == AV_NOPTS_VALUE) { av_frame_unref(frame); continue; } //Log(Utils.TicksToTime((long)(mFrame.pts * demuxer.streams[st->index].Timebase)) + " | pts -> " + mFrame.pts); if (type == Type.Video) { if (hwAccelSuccess && frame->hw_frames_ctx == null) { hwAccelSuccess = false; } mFrame.timestamp = ((long)(mFrame.pts * info.Timebase) - demuxer.streams[st->index].StartTime) + opt.audio.LatencyTicks; if (MediaFrame.ProcessVideoFrame(this, mFrame, frame) != 0) { mFrame = null; } } else // Audio { mFrame.timestamp = ((long)(mFrame.pts * info.Timebase) - demuxer.streams[st->index].StartTime) + opt.audio.DelayTicks + (demuxer.streams[st->index].StartTime - demuxer.decCtx.vDecoder.info.StartTime); if (MediaFrame.ProcessAudioFrame(this, mFrame, frame) < 0) { mFrame = null; } } if (mFrame != null) { frames.Enqueue(mFrame); //xf++; } av_frame_unref(frame); continue; } av_frame_unref(frame); break; } if (ret == AVERROR_EOF) { status = Status.END; Log("EOF"); if (type == Type.Video && decCtx.aDecoder.status != Status.PLAY) { Log("EOF All"); decCtx.status = Status.END; } else if (type == Type.Audio && decCtx.vDecoder.status != Status.PLAY) { Log("EOF All"); decCtx.status = Status.END; } break; } if (ret != AVERROR(EAGAIN)) { Log($"[ERROR-3] {Utils.ErrorCodeToMsg(ret)} ({ret})"); break; } } Log($"Done {(allowedErrors == decCtx.opt.demuxer.MaxErrors ? "" : $"[Errors: {decCtx.opt.demuxer.MaxErrors - allowedErrors}]")}"); } }