private bool ProcessTextPacket(ClosedCaptionPacket packet) { var needsRepaint = false; if (StateMode == ParserStateMode.Scrolling || StateMode == ParserStateMode.Buffered) { var offset = 0; var cell = default(ClosedCaptionsCellState); for (var c = CursorColumnIndex; c < ColumnCount; c++) { if (offset > packet.Text.Length - 1) { break; } cell = StateMode == ParserStateMode.Scrolling ? State[CursorRowIndex][c].Display : State[CursorRowIndex][c].Buffer; cell.Character = packet.Text[offset]; cell.IsItalics = IsItalics; cell.IsUnderlined = IsUnderlined; offset++; } needsRepaint = StateMode == ParserStateMode.Scrolling; CursorColumnIndex += offset; } return(needsRepaint); }
/// <summary> /// Initializes a new instance of the <see cref="VideoFrame" /> class. /// </summary> /// <param name="frame">The frame.</param> /// <param name="component">The video component.</param> internal VideoFrame(AVFrame *frame, VideoComponent component) : base(frame, component, MediaType.Video) { var timeBase = ffmpeg.av_guess_frame_rate(component.Container.InputContext, component.Stream, frame); var mainOffset = component.Container.Components.Main.StartTime; var repeatFactor = 1d + (0.5d * frame->repeat_pict); Duration = frame->pkt_duration <= 0 ? repeatFactor.ToTimeSpan(new AVRational { num = timeBase.den, den = timeBase.num }) : frame->pkt_duration.ToTimeSpan(component.Stream->time_base); // for video frames, we always get the best effort timestamp as dts and pts might // contain different times. frame->pts = frame->best_effort_timestamp; HasValidStartTime = frame->pts != ffmpeg.AV_NOPTS_VALUE; StartTime = frame->pts == ffmpeg.AV_NOPTS_VALUE ? TimeSpan.FromTicks(0) : TimeSpan.FromTicks(frame->pts.ToTimeSpan(StreamTimeBase).Ticks - mainOffset.Ticks); EndTime = TimeSpan.FromTicks(StartTime.Ticks + Duration.Ticks); // Picture Type, Number and SMTPE TimeCode PictureType = frame->pict_type; DisplayPictureNumber = frame->display_picture_number == 0 ? Extensions.ComputePictureNumber(StartTime, Duration, 1) : frame->display_picture_number; CodedPictureNumber = frame->coded_picture_number; SmtpeTimeCode = Extensions.ComputeSmtpeTimeCode(component.StartTime, Duration, timeBase, DisplayPictureNumber); IsHardwareFrame = component.IsUsingHardwareDecoding; HardwareAcceleratorName = component.HardwareAccelerator?.Name; // Process side data such as CC packets for (var i = 0; i < frame->nb_side_data; i++) { var sideData = frame->side_data[i]; // Get the Closed-Caption packets if (sideData->type != AVFrameSideDataType.AV_FRAME_DATA_A53_CC) { continue; } // Parse 3 bytes at a time for (var p = 0; p < sideData->size; p += 3) { var packet = new ClosedCaptionPacket(TimeSpan.FromTicks(StartTime.Ticks + p), sideData->data, p); if (packet.PacketType == CaptionsPacketType.NullPad || packet.PacketType == CaptionsPacketType.Unrecognized) { continue; } // at this point, we have valid CC data ClosedCaptions.Add(packet); } } }
private void RenderTextPacket(ClosedCaptionPacket c) { foreach (var ch in c.Text) { SetCurrentChar(new string(ch, 1)); ColumnIndex++; } }
/// <summary> /// Initializes a new instance of the <see cref="VideoFrame" /> class. /// </summary> /// <param name="frame">The frame.</param> /// <param name="component">The component.</param> internal VideoFrame(AVFrame *frame, MediaComponent component) : base(frame, component) { m_Pointer = (AVFrame *)InternalPointer; var repeatFactor = 1d + (0.5d * frame->repeat_pict); var timeBase = ffmpeg.av_guess_frame_rate(component.Container.InputContext, component.Stream, frame); Duration = repeatFactor.ToTimeSpan(new AVRational { num = timeBase.den, den = timeBase.num }); // for video frames, we always get the best effort timestamp as dts and pts might // contain different times. frame->pts = ffmpeg.av_frame_get_best_effort_timestamp(frame); HasValidStartTime = frame->pts != ffmpeg.AV_NOPTS_VALUE; StartTime = frame->pts == ffmpeg.AV_NOPTS_VALUE ? TimeSpan.FromTicks(0) : TimeSpan.FromTicks(frame->pts.ToTimeSpan(StreamTimeBase).Ticks - component.Container.MediaStartTimeOffset.Ticks); EndTime = TimeSpan.FromTicks(StartTime.Ticks + Duration.Ticks); // Picture Number and SMTPE TimeCode DisplayPictureNumber = frame->display_picture_number == 0 ? Extensions.ComputePictureNumber(StartTime, Duration, 1) : frame->display_picture_number; CodedPictureNumber = frame->coded_picture_number; SmtpeTimecode = Extensions.ComputeSmtpeTimeCode(component.StartTimeOffset, Duration, timeBase, DisplayPictureNumber); // Process side data such as CC packets for (var i = 0; i < frame->nb_side_data; i++) { var sideData = frame->side_data[i]; // Get the Closed-Caption packets if (sideData->type == AVFrameSideDataType.AV_FRAME_DATA_A53_CC) { // Parse 3 bytes at a time for (var p = 0; p < sideData->size; p += 3) { var packet = new ClosedCaptionPacket(StartTime, sideData->data[p + 0], sideData->data[p + 1], sideData->data[p + 2]); if (packet.PacketType == CCPacketType.NullPad || packet.PacketType == CCPacketType.Unrecognized) { continue; } // at this point, we have valid CC data ClosedCaptions.Add(packet); } continue; } } }
/// <summary> /// Initializes a new instance of the <see cref="VideoFrame" /> class. /// </summary> /// <param name="frame">The frame.</param> /// <param name="component">The component.</param> internal VideoFrame(AVFrame *frame, MediaComponent component) : base(frame, component) { m_Pointer = (AVFrame *)InternalPointer; // for vide frames, we always get the best effort timestamp as dts and pts might // contain different times. frame->pts = ffmpeg.av_frame_get_best_effort_timestamp(frame); StartTime = frame->pts == FFmpegEx.AV_NOPTS ? TimeSpan.FromTicks(component.Container.MediaStartTimeOffset.Ticks) : TimeSpan.FromTicks(frame->pts.ToTimeSpan(StreamTimeBase).Ticks - component.Container.MediaStartTimeOffset.Ticks); var repeatFactor = 1d + (0.5d * frame->repeat_pict); var timeBase = ffmpeg.av_guess_frame_rate(component.Container.InputContext, component.Stream, frame); Duration = repeatFactor.ToTimeSpan(new AVRational { num = timeBase.den, den = timeBase.num }); EndTime = TimeSpan.FromTicks(StartTime.Ticks + Duration.Ticks); // Get the Closed-Caption packets for (var i = 0; i < frame->nb_side_data; i++) { var sideData = frame->side_data[i]; if (sideData->type != AVFrameSideDataType.AV_FRAME_DATA_A53_CC) { continue; } // Parse 3 bytes at a time for (var p = 0; p < sideData->size; p += 3) { var packet = new ClosedCaptionPacket(StartTime, sideData->data[p + 0], sideData->data[p + 1], sideData->data[p + 2]); if (packet.PacketType == CCPacketType.NullPad || packet.PacketType == CCPacketType.Unrecognized) { continue; } // at this point, we have valid CC data ClosedCaptions.Add(packet); } } }
private void ProcessPreamblePacket(ClosedCaptionPacket packet) { if (StateMode == ParserStateMode.Scrolling || StateMode == ParserStateMode.Buffered) { ScrollBaseRowIndex = packet.PreambleRow - 1; if (ScrollBaseRowIndex < 0) { ScrollBaseRowIndex = 0; } if (ScrollBaseRowIndex >= RowCount) { ScrollBaseRowIndex = RowCount - 1; } CursorRowIndex = ScrollBaseRowIndex; CursorColumnIndex = packet.PreambleIndent; IsItalics = packet.IsItalics; IsUnderlined = packet.IsUnderlined; } }
/// <summary> /// Initializes a new instance of the <see cref="VideoFrame" /> class. /// </summary> /// <param name="frame">The frame.</param> /// <param name="component">The component.</param> internal VideoFrame(AVFrame *frame, MediaComponent component) : base(frame, component) { const int AV_TIMECODE_STR_SIZE = 16 + 1; m_Pointer = (AVFrame *)InternalPointer; var repeatFactor = 1d + (0.5d * frame->repeat_pict); var timeBase = ffmpeg.av_guess_frame_rate(component.Container.InputContext, component.Stream, frame); Duration = repeatFactor.ToTimeSpan(new AVRational { num = timeBase.den, den = timeBase.num }); // for video frames, we always get the best effort timestamp as dts and pts might // contain different times. frame->pts = ffmpeg.av_frame_get_best_effort_timestamp(frame); HasValidStartTime = frame->pts != FFmpegEx.AV_NOPTS; StartTime = frame->pts == FFmpegEx.AV_NOPTS ? TimeSpan.FromTicks(component.Container.MediaStartTimeOffset.Ticks) : TimeSpan.FromTicks(frame->pts.ToTimeSpan(StreamTimeBase).Ticks - component.Container.MediaStartTimeOffset.Ticks); EndTime = TimeSpan.FromTicks(StartTime.Ticks + Duration.Ticks); DisplayPictureNumber = frame->display_picture_number == 0 ? (int)Math.Round((double)StartTime.Ticks / Duration.Ticks, 0) : 0; CodedPictureNumber = frame->coded_picture_number; // SMTPE timecode calculation var timeCodeInfo = (AVTimecode *)ffmpeg.av_malloc((ulong)Marshal.SizeOf(typeof(AVTimecode))); var startFrameNumber = (int)Math.Round((double)component.StartTimeOffset.Ticks / Duration.Ticks, 0); ffmpeg.av_timecode_init(timeCodeInfo, timeBase, 0, startFrameNumber, null); var isNtsc = timeBase.num == 30000 && timeBase.den == 1001; var frameNumber = isNtsc ? ffmpeg.av_timecode_adjust_ntsc_framenum2(DisplayPictureNumber, (int)timeCodeInfo->fps) : DisplayPictureNumber; var timeCode = ffmpeg.av_timecode_get_smpte_from_framenum(timeCodeInfo, DisplayPictureNumber); var timeCodeBuffer = (byte *)ffmpeg.av_malloc(AV_TIMECODE_STR_SIZE); ffmpeg.av_timecode_make_smpte_tc_string(timeCodeBuffer, timeCode, 1); SmtpeTimecode = Marshal.PtrToStringAnsi(new IntPtr(timeCodeBuffer)); ffmpeg.av_free(timeCodeInfo); ffmpeg.av_free(timeCodeBuffer); // Process side data such as CC packets for (var i = 0; i < frame->nb_side_data; i++) { var sideData = frame->side_data[i]; // Get the Closed-Caption packets if (sideData->type == AVFrameSideDataType.AV_FRAME_DATA_A53_CC) { // Parse 3 bytes at a time for (var p = 0; p < sideData->size; p += 3) { var packet = new ClosedCaptionPacket(StartTime, sideData->data[p + 0], sideData->data[p + 1], sideData->data[p + 2]); if (packet.PacketType == CCPacketType.NullPad || packet.PacketType == CCPacketType.Unrecognized) { continue; } // at this point, we have valid CC data ClosedCaptions.Add(packet); } continue; } } }
private bool ProcessCommandPacket(ClosedCaptionPacket packet) { var needsRepaint = false; var command = packet.Command; // Set the scroll size if we have a rollup command switch (command) { case CaptionsCommand.RollUp2: ScrollSize = 2; break; case CaptionsCommand.RollUp3: ScrollSize = 3; break; case CaptionsCommand.RollUp4: ScrollSize = 4; break; default: break; } switch (command) { case CaptionsCommand.StartCaption: { StateMode = ParserStateMode.Scrolling; ScrollBaseRowIndex = DefaultBaseRowIndex; CursorRowIndex = ScrollBaseRowIndex; CursorColumnIndex = default; IsItalics = default; IsUnderlined = default; break; } case CaptionsCommand.ResumeNonCaption: case CaptionsCommand.StartNonCaption: { StateMode = ParserStateMode.None; break; } case CaptionsCommand.RollUp2: case CaptionsCommand.RollUp3: case CaptionsCommand.RollUp4: { // Update the state to scrolling StateMode = ParserStateMode.Scrolling; // Clear rows outside of the scrolling area for (var r = 0; r < RowCount; r++) { if (r > ScrollBaseRowIndex - ScrollSize && r <= ScrollBaseRowIndex) { continue; } for (var c = 0; c < ColumnCount; c++) { State[r][c].Display.Clear(); } } IsItalics = default; IsUnderlined = default; needsRepaint = true; break; } case CaptionsCommand.Backspace: { if (StateMode == ParserStateMode.Buffered) { State[CursorRowIndex][CursorColumnIndex].Buffer.Clear(); CursorColumnIndex--; } else if (StateMode == ParserStateMode.Scrolling) { State[CursorRowIndex][CursorColumnIndex].Display.Clear(); CursorColumnIndex--; needsRepaint = true; } break; } case CaptionsCommand.NewLine: { if (StateMode == ParserStateMode.Scrolling) { var targetRowIndex = CursorRowIndex - 1; if (targetRowIndex < 0) { targetRowIndex = 0; } for (var c = 0; c < ColumnCount; c++) { State[targetRowIndex][c].Display.CopyFrom(State[CursorRowIndex][c].Display); State[CursorRowIndex][c].Display.Clear(); } CursorRowIndex = ScrollBaseRowIndex; CursorColumnIndex = default; IsItalics = default; IsUnderlined = default; needsRepaint = true; } break; } case CaptionsCommand.Resume: { StateMode = ParserStateMode.Buffered; CursorRowIndex = default; CursorColumnIndex = default; IsItalics = default; IsUnderlined = default; break; } case CaptionsCommand.ClearLine: { if (StateMode == ParserStateMode.Buffered) { for (var c = 0; c < ColumnCount; c++) { State[CursorRowIndex][c].Buffer.Clear(); } } else if (StateMode == ParserStateMode.Scrolling) { for (var c = 0; c < ColumnCount; c++) { State[CursorRowIndex][c].Display.Clear(); } needsRepaint = true; } if (StateMode == ParserStateMode.Buffered || StateMode == ParserStateMode.Scrolling) { CursorColumnIndex = default; IsItalics = default; IsUnderlined = default; } break; } case CaptionsCommand.ClearBuffer: { for (var r = 0; r < RowCount; r++) { for (var c = 0; c < ColumnCount; c++) { State[r][c].Buffer.Clear(); } } IsItalics = default; IsUnderlined = default; break; } case CaptionsCommand.ClearScreen: { for (var r = 0; r < RowCount; r++) { for (var c = 0; c < ColumnCount; c++) { State[r][c].Display.Clear(); } } IsItalics = default; IsUnderlined = default; needsRepaint = true; break; } case CaptionsCommand.EndCaption: { StateMode = ParserStateMode.None; CursorRowIndex = default; CursorColumnIndex = default; IsItalics = default; IsUnderlined = default; needsRepaint = true; for (var r = 0; r < RowCount; r++) { for (var c = 0; c < ColumnCount; c++) { State[r][c].DisplayBuffer(); } } break; } case CaptionsCommand.AlarmOff: case CaptionsCommand.AlarmOn: case CaptionsCommand.None: default: { break; } } return(needsRepaint); }
/// <summary> /// Writes the packets and demuxes them into its independent channel buffers /// </summary> /// <param name="currentBlock">The current block.</param> /// <param name="mediaCore">The media core.</param> public void Write(VideoBlock currentBlock, MediaEngine mediaCore) { lock (SyncLock) { // Feed the available closed captions into the packet buffer // We pre-feed the video blocks to avoid any skipping of CC packets // as a result of skipping video frame render calls var block = currentBlock; while (block != null) { // Skip the block if we already wrote its CC packets if (block.ClosedCaptions.Count > 0 && block.StartTime.Ticks > WriteTag.Ticks) { // Add the CC packets to the linear, ordered packet buffer foreach (var cc in block.ClosedCaptions) { PacketBuffer[cc.Timestamp.Ticks] = cc; } // Update the Write Tag and move on to the next block WriteTag = block.StartTime; } block = mediaCore.Blocks[currentBlock.MediaType].Next(block) as VideoBlock; } // Now, we need to demux the packets from the linear packet buffer // into the corresponding independent channel packet buffers var maxPosition = currentBlock.EndTime.Ticks; // The maximum demuxer position var lastDemuxedKey = long.MinValue; // The demuxer position var linearBufferKeys = PacketBuffer.Keys.OrderBy(k => k).ToArray(); foreach (var position in linearBufferKeys) { // Get a reference to the packet to demux var packet = PacketBuffer[position]; // Stop demuxing packets beyond the current video block if (position > maxPosition) { break; } // Update the last processed psoition lastDemuxedKey = position; // Skip packets that don't have a valid field parity or that are null if (packet.FieldParity != 1 && packet.FieldParity != 2) { continue; } if (packet.PacketType == CaptionsPacketType.NullPad || packet.PacketType == CaptionsPacketType.Unrecognized) { continue; } // Update the last channel state if we have all available info (both parity and channel) // This is because some packets will arrive with Field data but not with channel data which means just use the prior channel from the same field. if (packet.FieldChannel == 1 || packet.FieldChannel == 2) { if (packet.FieldParity == 1) { Field1LastChannel = packet.FieldChannel; } else { Field2LastChannel = packet.FieldChannel; } } // Compute the channel using the packet's field parity and the last available channel state var channel = ClosedCaptionPacket.ComputeChannel( packet.FieldParity, (packet.FieldParity == 1) ? Field1LastChannel : Field2LastChannel); // Demux the packet to the correspnding channel buffer so the channels are independent ChannelPacketBuffer[channel][position] = packet; } // Remove the demuxed packets from the general (linear) packet buffer foreach (var bufferKey in linearBufferKeys) { if (bufferKey > lastDemuxedKey) { break; } PacketBuffer.Remove(bufferKey); } // Trim all buffers to their max length TrimBuffers(); } }
private void RenderMiscCommandPacket(ClosedCaptionPacket c) { switch (c.MiscCommand) { case CCMiscCommandType.RollUp2: { // TODO: B.5 Base Row Implementation ShiftTextUp(RowIndex - 2); ColumnIndex = 0; RowIndex = 14; break; } case CCMiscCommandType.RollUp3: { // TODO: B.5 Base Row Implementation ShiftTextUp(10); ColumnIndex = 0; RowIndex = 14; break; } case CCMiscCommandType.RollUp4: { // TODO: B.5 Base Row Implementation ShiftTextUp(9); ColumnIndex = 0; RowIndex = 14; break; } case CCMiscCommandType.NewLine: { ColumnIndex = 0; ShiftTextUp(RowIndex - 1); RowIndex += 1; ClearLine(RowIndex); break; } case CCMiscCommandType.Backspace: { ColumnIndex -= 1; ClearCurrentChar(); break; } case CCMiscCommandType.ClearLine: { ClearLine(RowIndex); ColumnIndex = 0; break; } case CCMiscCommandType.ClearBuffer: case CCMiscCommandType.ClearScreen: { ClearScreen(); RowIndex = 0; ColumnIndex = 0; break; } case CCMiscCommandType.Resume: { ClearLine(RowIndex); ColumnIndex = 0; break; } default: { System.Diagnostics.Debug.WriteLine($"CC Packet not rendered: {c}"); break; } } }