// QBoxCompare is not used private int QBoxCompare(QBox x, QBox y) { if (x == null) { if (y == null) { // If x is null and y is null, they're // equal. return(0); } else { // If x is null and y is not null, y // is greater. return(-1); } } else { // If x is not null... // if (y == null) // ...and y is null, x is greater. { return(1); } else { // ...and y is not null, compare the // time stamps of the two QBoxes. // return(x.mSampleCTS.CompareTo(y.mSampleCTS)); } } }
public override void PrepareSampleWriting(List <StreamDataBlockInfo> sampleLocations, ref ulong currMdatOffset) { bool firstSlice = true; long oneSecTicks = TimeSpan.FromSeconds(1.0).Ticks; _qBoxes.Clear(); // discard boxes from previous batch foreach (StreamDataBlockInfo blockInfo in sampleLocations) { ulong timeStamp = ((ulong)blockInfo.SliceDuration * (ulong)TimeScale); timeStamp /= (ulong)oneSecTicks; string trackType = (Codec.CodecType == CodecTypes.Audio) ? "Audio" : (Codec.CodecType == CodecTypes.Video) ? "Video" : "Unknown"; ulong sampleFlags = ((blockInfo.SliceType == SliceType.IFrame) || (trackType == "Audio" && firstSlice)) ? QBox.QBOX_SAMPLE_FLAGS_SYNC_POINT : 0; QBox qbox = new QBox(blockInfo.SliceSize, 0u, _prevTimeStamp, trackType, sampleFlags); _prevTimeStamp += timeStamp; _qBoxes.Add(qbox); if (blockInfo.SliceSize > 0) { // for audio, first two qboxes will be marked for sync because very first one is empty firstSlice = false; } } base.PrepareSampleWriting(sampleLocations, ref currMdatOffset); // to update duration }
/// <summary> /// Default constructor. /// For writing to a QBox file. /// </summary> public QBoxTrackFormat() { firstQB = null; audioMetaSample = null; videoMetaSample = null; DurationIn100NanoSecs = 0; }
private void GetAudioPrivateCodecData() { WaveFormatEx waveFormat; waveFormat = new WaveFormatEx(); waveFormat.BitsPerSample = (short)this.SampleSize; waveFormat.AvgBytesPerSec = (this.SampleSize / 8) * this.SampleRate * this.ChannelCount; waveFormat.Channels = (short)this.ChannelCount; waveFormat.FormatTag = WaveFormatEx.FormatMpegHEAAC; // 0xFF; // WaveFormatEx.FormatPCM; // Raw_AAC waveFormat.SamplesPerSec = (int)this.SampleRate; waveFormat.BlockAlign = 1; // (short)(waveFormat.Channels * (waveFormat.BitsPerSample / 8)); byte[] config = QBox.GetAudioSpecificConfig(this.audioMetaSample); waveFormat.ext = new byte[12 + config.Length]; for (int i = 0; i < 12; i++) { waveFormat.ext[i] = 0; } //waveFormat.ext[0] = 3; // payload type waveFormat.Size = (short)waveFormat.ext.Length; for (int i = 12, j = 0; i < waveFormat.Size; i++, j++) { waveFormat.ext[i] = config[j]; } waveFormat.ValidateWaveFormat(); Codec.PrivateCodecData = waveFormat.ToHexString(); }
public override void Write(BinaryWriter bw, int dataLen) { base.Write(bw, dataLen); bw.Write((UInt32)QBox.BE32(version)); bw.Write((UInt32)QBox.BE32(samplingFrequency)); bw.Write((UInt32)QBox.BE32(channels)); }
public virtual int Read(BinaryReader br) { int total = 0; boxSize = QBox.BE32(br.ReadUInt32()); total += 4; boxType = QBox.BE32(br.ReadUInt32()); total += 4; if (QMED_BOX_TYPE != boxType) { throw new Exception("Expecting a QMed, box type is incorrect"); } boxFlags.value = QBox.BE32(br.ReadUInt32()); total += 4; majorMediaType = QBox.BE32(br.ReadUInt32()); total += 4; if (majorMediaType != expectedMediaType) { throw new Exception("Media type is not as expected"); } minorMediaType = QBox.BE32(br.ReadUInt32()); total += 4; if (boxFlags.version == 1) { hashSize = QBox.BE32(br.ReadUInt32()); total += 4; for (int i = 0; i < (int)hashSize; i++) { hashPayload[i] = QBox.BE32(br.ReadUInt32()); total += 4; } } return(total); }
public override void Write(BinaryWriter bw, int dataLen) { base.Write(bw, dataLen); bw.Write((UInt64)QBox.BE64(version)); bw.Write((UInt64)QBox.BE64(width)); bw.Write((UInt64)QBox.BE64(height)); bw.Write((UInt64)QBox.BE64(frameTicks)); }
public override int Read(BinaryReader br) { int total = base.Read(br); version = QBox.BE64(br.ReadUInt64()); total += 8; accessUnits = (uint)QBox.BE32(br.ReadUInt32()); total += 4; return(total); }
public override int Read(BinaryReader br) { int total = base.Read(br); version = QBox.BE32(br.ReadUInt32()); total += 4; samplingFrequency = (uint)QBox.BE32(br.ReadUInt32()); total += 4; channels = (uint)QBox.BE32(br.ReadUInt32()); total += 4; return(total); }
public override void Write(BinaryWriter bw, int dataLen) { base.Write(bw, dataLen); bw.Write((UInt32)QBox.BE32(sampleSize)); //sampleSize = (uint)QBox.BE32(br.ReadUInt32()); bw.Write((UInt32)QBox.BE32(audioSpecificConfigSize)); //audioSpecificConfigSize = (uint)QBox.BE32(br.ReadUInt32()); for (int i = 0; i < (2 * audioSpecificConfigSize); i++) { bw.Write(audioSpecificConfig[i]); // //audioSpecificConfig[i] = br.ReadByte(); } }
public override int Read(BinaryReader br) { int total = base.Read(br); version = QBox.BE64(br.ReadUInt64()); total += 8; width = QBox.BE64(br.ReadUInt64()); total += 8; height = QBox.BE64(br.ReadUInt64()); total += 8; frameTicks = QBox.BE64(br.ReadUInt64()); total += 8; return(total); }
public byte[] GetH264Nalu() { Stream rawPayload = new MemoryStream(SliceBytes); BinaryReader br = new BinaryReader(rawPayload); // first, determine actual length of NALU (without trailing bytes) int totalSize = SliceBytes.Length; int strippedCount = 0; while (totalSize > 4) { ulong naluLen = QBox.BE32(br.ReadUInt32()); if (naluLen > 0UL) { rawPayload.Position += (long)naluLen; // don't read yet, just advance } int totalNaluLen = (int)naluLen + 4; totalSize -= totalNaluLen; strippedCount += totalNaluLen; } // use actual length to declare outut array of bytes byte[] outBytes = new byte[strippedCount]; // reset Position of memory stream rawPayload.Position = 0; // get rid of trailing bytes, if any // at the same time, convert to bit stream totalSize = SliceBytes.Length; int offset = 0; int naluCount = 0; while (totalSize > 4) { ulong naluLen = QBox.BE32(br.ReadUInt32()); totalSize -= 4; if (naluLen > 0UL) { int readLen = (int)naluLen; outBytes[offset + 3] = (byte)1; // assume that outBytes[offset] to outBytes[offset + 2] are zero. offset += 4; rawPayload.Read(outBytes, offset, readLen); offset += readLen; totalSize -= readLen; } else { naluLen = 0; // debugging break point } naluCount++; } // end of while return(outBytes); }
/// <summary> /// QBoxTrackFormat /// Constructor to use when writing out to a stream. /// </summary> /// <param name="trackInfo"></param> public QBoxTrackFormat(IsochronousTrackInfo trackInfo) : this() { _qBoxes = new List <QBox>(); firstQB = new QBox(trackInfo); CodecTypes codecType = (trackInfo.HandlerType == "Audio") ? CodecTypes.Audio : (trackInfo.HandlerType == "Video") ? CodecTypes.Video : CodecTypes.Unknown; Codec = new Codec(codecType); Codec.PrivateCodecData = trackInfo.CodecPrivateData; DurationIn100NanoSecs = trackInfo.DurationIn100NanoSecs; }
/// <summary> /// GetDurationFromLastAudioQBox /// This is necessary for a dynamically growing stream. /// It needs to be threadsafe because we set and then reset the stream Position. /// Remove mutex and call this from LazyRead to avoid deadlocks. /// </summary> /// <returns></returns> public ulong GetDurationFromLastQBox() { if (Stream.CanSeek == false) { return(0); } lock (base.Stream) { long currentPos = Stream.Position; // store where we are at currently... // try to find an audio qbox... BinaryReader br = new BinaryReader(base.Stream); br.BaseStream.Position = br.BaseStream.Length - 8; while (true) { if (QBox.SeekPrevQBox(br)) { QBox box = new QBox(); long boxPos = br.BaseStream.Position; box.Read(br); // if (box.mSampleStreamType == QBox.QBOX_SAMPLE_TYPE_AAC) if (box.mStreamDuration != 0) { br.BaseStream.Position = currentPos; ulong ans = MediaTimes[box.mSampleStreamId].TicksToTime(box.mStreamDuration, MediaTimeUtils.TimeUnitType.OneHundredNanoSeconds); return(ans); } else { if (br.BaseStream.Position - 8 <= 0) { // no more data to read... br.BaseStream.Position = currentPos; } br.BaseStream.Position = boxPos - 8; // rewind a bit... } } else { //DurationIn100NanoSecs = 0; // return value assigned to DurationIn100NanoSecs break; // can't find a previous qbox!! } } br.BaseStream.Position = currentPos; } return(0U); }
public virtual void Write(BinaryWriter bw, int dataLen) { // this method must be called AFTER boxSize has been adjusted by derived object bw.Write((Int32)QBox.BE32(boxSize)); bw.Write((Int32)QBox.BE32(boxType)); bw.Write((Int32)QBox.BE32(boxFlags.value)); bw.Write((Int32)QBox.BE32(majorMediaType)); bw.Write((Int32)QBox.BE32(minorMediaType)); if (boxFlags.version == 1) { bw.Write((Int32)QBox.BE32(hashSize)); for (int i = 0; i < (int)hashSize; i++) { bw.Write((Int32)QBox.BE32(hashPayload[i])); } } }
/// <summary> /// NearEnd /// For every IFrame Qbox, check whether this is the last IFrame before the end of this run. /// If it is, don't include this IFrame in the current run; it will be the first QBox in the NEXT run. /// NOTE: This is not used anywhere. /// </summary> /// <param name="boxCount"></param> /// <param name="inEndSampleTime"></param> /// <param name="lastEnd"></param> /// <returns></returns> private bool NearEnd(int boxCount, UInt64 inEndSampleTime, ulong lastEnd, float scaleFactor) { if (inEndSampleTime < lastEnd) { return(true); } if (boxCount >= _qBoxes.Count) // it is not near the end, it's AT the end { return(false); } int index = boxCount + 1; ulong blockTime = lastEnd; QBox box = _qBoxes[index]; while (((uint)box.mSampleFlags & QBox.QBOX_SAMPLE_FLAGS_SYNC_POINT) == 0) { string streamType = box.SampleStreamTypeString(); if (streamType == "H264") { blockTime += (ulong)(scaleFactor * box.mSampleDuration); } else if (streamType == "AAC") { blockTime = (ulong)(scaleFactor * box.mSampleCTS); } else { throw new Exception(string.Format("Unsupported qbox stream type: {0}", streamType)); } if (inEndSampleTime < blockTime) { return(true); } index++; if (index == _qBoxes.Count) { return(false); } box = _qBoxes[index]; } return(false); }
public override Slice GetSample(StreamDataBlockInfo SampleInfo) { int delimiterLength = 0; Slice ans = new Slice(); ans.Copy(SampleInfo); ans.SliceBytes = new byte[SampleInfo.SliceSize]; #if REMOVE_EXTRA_SPS NaluDelimiterBlockInfo blockInfo = SampleInfo as NaluDelimiterBlockInfo; if (blockInfo.AccessUnitDelimiter != null) { delimiterLength = blockInfo.AccessUnitDelimiter.Length + 4; // access unit delimiter length is always 2 ans.SliceBytes[3] = (byte)(delimiterLength - 4); // assume that SliceBytes[0 to 2] are all zeroes, we only need to set LSB blockInfo.AccessUnitDelimiter.CopyTo(ans.SliceBytes, 4); } #endif //ParentStream.Stream.Position = (long)SampleInfo.StreamOffset; // remove empty NALUs (length == 0) // also remove trailing bytes, if any, from each NALU Slice inSlice = SampleInfo as Slice; BinaryReader br = new BinaryReader(new MemoryStream(inSlice.SliceBytes)); //BinaryReader br = new BinaryReader(ParentStream.Stream); int totalSize = SampleInfo.SliceSize - delimiterLength; int offset = delimiterLength; while (totalSize > 4) { ulong naluLen = QBox.BE32(br.ReadUInt32()); if (naluLen > 0UL) { br.BaseStream.Position -= 4; int readLen = (int)naluLen + 4; br.Read(ans.SliceBytes, offset, readLen); offset += readLen; totalSize -= readLen; } else { naluLen = 0; // debugging break point } } return(ans); }
public override int Read(BinaryReader br) { int total = base.Read(br); sampleSize = (uint)QBox.BE32(br.ReadUInt32()); total += 4; audioSpecificConfigSize = (uint)QBox.BE32(br.ReadUInt32()); total += 4; audioSpecificConfig = new byte[audioSpecificConfigSize * 2]; for (int i = 0; i < (2 * audioSpecificConfigSize); i++) { audioSpecificConfig[i] = br.ReadByte(); total += 1; } #if ADTS payloadSize -= (int)boxSize; payloadSize += 7; // get samplerate, channels and audio coding informations from QMED header int objectType = audioSpecificConfig[0] >> 3; int samplingFrequencyIndex = ((audioSpecificConfig[0] & 0x07) << 1) | ((audioSpecificConfig[1] & 0x80) >> 7); int channelConfiguration = (audioSpecificConfig[1] & 0x78) >> 3; adtsHeader = new byte[7]; adtsHeader[0] = 0xFF; adtsHeader[1] = 0xF0; adtsHeader[1] |= 0x1; adtsHeader[2] = (byte)(((objectType - 1) << 6) & 0xFF); // 0x40 for AAC-LC adtsHeader[2] |= (byte)(samplingFrequencyIndex << 2); adtsHeader[3] = (byte)(channelConfiguration << 6); adtsHeader[3] |= (byte)((payloadSize & 0x1800) >> 11); adtsHeader[4] = (byte)((payloadSize & 0x07f8) >> 3); adtsHeader[5] = (byte)((payloadSize & 0x0007) << 5); adtsHeader[5] |= 0x1F; adtsHeader[6] = 0xFC; #endif return(total); }
/// <summary> /// PrepareSampleReading /// In MP4, reading of box headers is separate from reading of the H264 and audio bits. This is because the bits are stored /// in a different place in the file (or may in fact be in a separate file). In a QBox file, however, both headers and bits /// are stored in the qbox. It makes no sense to separate the two. Therefore, in this implementation of PrepareSampleReading, /// we actually read the bits together with the headers. The routine WriteSamples doesn't do much. /// /// There are two signatures for this method: one that accepts qbox indices (this one), and another that accepts ulong start /// and end times. /// /// We don't keep the qboxes. QBoxes already processed are disposed of as a last step. If we run out of qboxes, we read-in /// more. /// </summary> /// <param name="inStartSampleIndex">int index to first qbox to be processed</param> /// <param name="inEndSampleIndex">int index to last qbox to be processed</param> /// <param name="dummy">not used</param> /// <returns></returns> public override List <StreamDataBlockInfo> PrepareSampleReading(int inStartSampleIndex, int inEndSampleIndex, ref ulong dummy) { List <StreamDataBlockInfo> retList = new List <StreamDataBlockInfo>(); if (_qBoxes.Count == 0) { return(retList); } float scaleFactor = TimeSpan.FromSeconds(1.0).Ticks / this.TimeScale; bool foundFirstSlice = false; int boxCount = 0; // we traverse the _qBoxes list from the beginning; // can't use foreach because _qBoxes can change; // box.mIndex is NOT the same as index i. // we use a for loop only because we are adding qboxes to _qBoxes as part of the loop for (int i = 0; i < _qBoxes.Count; i++) { QBox box = _qBoxes[i]; boxCount++; // reject qboxes with sample size zero (no data) if (box.mSampleSize == 0) { continue; } // we shouldn't be searching for the first box of interest, because it should always be the first one // it should always be the first one because we threw away all boxes already processed if (((ulong)inStartSampleIndex > (box.mFrameCounter - 1)) || ((!foundFirstSlice) && (((uint)box.mSampleFlags & QBox.QBOX_SAMPLE_FLAGS_SYNC_POINT) == 0))) { continue; // skip } else if ((ulong)inStartSampleIndex == (box.mFrameCounter - 1)) { foundFirstSlice = true; } else if (!foundFirstSlice) { _qBoxes.Clear(); base.GetNextBatch(0, inStartSampleIndex); // throw new Exception("First IFrame not found"); i = -1; // this gets incremented to zero boxCount = 0; // start all over continue; } StreamDataBlockInfo datBlock = new Slice(); switch (box.SampleStreamTypeString()) { case "AAC": datBlock = new ADTSDataBlockInfo(); datBlock.SliceType = SliceType.AAC; break; case "Q711": case "PCM": datBlock.SliceType = SliceType.Unknown; // FIXME: add sample type for PCM break; case "MP2A": datBlock.SliceType = SliceType.MP4A; break; case "Q722": // ADPCM case "Q726": case "Q728": datBlock.SliceType = SliceType.Unknown; // FIXME: add sample type for ADPCM break; case "H264": case "H264_SLICE": datBlock = new NaluDelimiterBlockInfo(); if (((uint)box.mSampleFlags & QBox.QBOX_SAMPLE_FLAGS_SYNC_POINT) == 0) { datBlock.SliceType = SliceType.DFrame; } else { datBlock.SliceType = SliceType.IFrame; } if ((box.mSample != null) && (box.mSample.v != null)) { NaluDelimiterBlockInfo blockInfo = datBlock as NaluDelimiterBlockInfo; blockInfo.AccessUnitDelimiter = box.mSample.v.aud; } break; case "JPEG": datBlock.SliceType = SliceType.JPEG; break; case "MPEG2_ELEMENTARY": datBlock.SliceType = SliceType.Unknown; // FIXME: add sample type for MPEG2 break; case "VIN_STATS_GLOBAL": case "VIN_STATS_MB": case "USER_METADATA": case "DEBUG": default: System.Diagnostics.Debug.WriteLine("Unknown QBox: {0}", box.SampleStreamTypeString()); break; } datBlock.CTS = (ulong)((box.mSampleCTS - (box.mStreamDuration - box.mSampleDuration)) * scaleFactor); datBlock.SliceDuration = (uint)(scaleFactor * box.mSampleDuration); if (box.mFrameCounter == 0 && box.mStreamDuration == 0) { datBlock.TimeStampNew = 0; } else if (box.mStreamDuration == 0) { datBlock.TimeStampNew = null; } else { datBlock.TimeStampNew = (ulong)(scaleFactor * (box.mStreamDuration - box.mSampleDuration)); } datBlock.SliceSize = box.mSampleSize; datBlock.index = (int)box.mFrameCounter - 1; // boxCount; // NOTE! For qbox, StreamOffset has a different meaning than in MP4. // Here, StreamOffset is the offset to the qbox itself; whereas in // MP4, StreamOffset is the offset to the H264 payload. // In GenericMediaTrack.GetSample, StreamOffset is used as in MP4, but // this method is overriden by another in QBoxVideoTrack that does not use StreamOffset. // For flashback to work for both MP4 and qbox files, the caching mechanism // is different in MP4 from than in qbox. datBlock.StreamOffset = (ulong)box.mHeaderPosition; // needed for flashback to work // set payload Slice slice = datBlock as Slice; slice.SliceBytes = box.mSample.mPayload; #if ADTS if (box.mSampleStreamType == QBox.QBOX_SAMPLE_TYPE_AAC) { QMed.QMedAAC qmedaac = (QMed.QMedAAC)box.mSample.qmed; #if PES datBlock.PESandADTSHeaders = new byte[qmedaac.pesHeader.Length + qmedaac.adtsHeader.Length]; qmedaac.pesHeader.CopyTo(datBlock.PESandADTSHeaders, 0); qmedaac.adtsHeader.CopyTo(datBlock.PESandADTSHeaders, qmedaac.pesHeader.Length); #else datBlock.PESandADTSHeaders = new byte[qmedaac.adtsHeader.Length]; qmedaac.adtsHeader.CopyTo(datBlock.PESandADTSHeaders, 0); #endif datBlock.SampleSize += datBlock.PESandADTSHeaders.Length; } #endif if (datBlock.SliceDuration == 0) { datBlock.SliceDuration = (uint)(scaleFactor * box.mSampleDuration); // any non-zero duration is better } if ((((uint)box.mSampleFlags & QBox.QBOX_SAMPLE_FLAGS_SYNC_POINT) != 0) && ((box.mFrameCounter - 1) >= (ulong)inEndSampleIndex)) { boxCount--; break; // don't put last IFrame box in return list } retList.Add(datBlock); if (box == _qBoxes.Last()) { base.GetNextBatch(GenericMediaStream.MAX_BOXES_TO_READ, 0); // callee should set end FIXME: is box.mCurrentPosition being set? } } // end of for loop _qBoxes.RemoveRange(0, boxCount); return(retList); }
/// <summary> /// Constructor accepting a list of qboxes as input. /// (For reading a QBox file.) /// FIXME: we need to pick up the rest of the tracks (other than the first one) /// </summary> /// <param name="qboxes"></param> public QBoxTrackFormat(List <QBox> qboxes, ushort trackID, MediaTimeUtils mediaTime) : this() { _qBoxes = new List <QBox>(); qboxes.ForEach(delegate(QBox q) { if (q.mSampleStreamId == trackID) { _qBoxes.Add(q); } }); if (_qBoxes.Count == 0) { throw new Exception(string.Format("There is no track with ID = {0}", trackID)); } _mediaTime = mediaTime; HasIFrameBoxes = _qBoxes.Any(box => (((uint)box.mSampleFlags & QBox.QBOX_SAMPLE_FLAGS_SYNC_POINT) != 0)); firstQB = _qBoxes[0]; if (firstQB.mSampleStreamType == QBox.QBOX_SAMPLE_TYPE_H264) { Codec = new Codec(CodecTypes.Video); firstQB = _qBoxes.First(q => ((q.mSampleFlags & QBox.QBOX_SAMPLE_FLAGS_CONFIGURATION_INFO) != 0u)); if (firstQB.mSample.v != null) { this.videoMetaSample = firstQB.mSample; seqParamSetData = firstQB.mSample.v.sps; picParamSetData = firstQB.mSample.v.pps; Codec.PrivateCodecData = this.VideoCodecPrivateData; } else { Codec.PrivateCodecData = ToHexString(firstQB.mSample.privateCodecData); } } else if (firstQB.mSampleStreamType == QBox.QBOX_SAMPLE_TYPE_AAC) { Codec = new Codec(CodecTypes.Audio); firstQB = _qBoxes.First(q => ((q.mSample.a != null) && ((q.mSampleFlags & QBox.QBOX_SAMPLE_FLAGS_META_INFO) != 0u)) || ((q.mSample.qmed != null) && ((q.mSampleFlags & QBox.QBOX_SAMPLE_FLAGS_QMED_PRESENT) != 0u))); this.audioMetaSample = firstQB.mSample; if (audioMetaSample.privateCodecData != null) { Codec.PrivateCodecData = ToHexString(audioMetaSample.privateCodecData); } else { #if USE_WAVEFORMATEX GetAudioPrivateCodecDataFromWaveFormatEx(); #else GetAudioPrivateCodecDataAdHoc(); #endif } } else if (firstQB.mSampleStreamType == QBox.QBOX_SAMPLE_TYPE_JPEG) { Codec = new Codec(CodecTypes.Video); if (firstQB.mSample.privateCodecData != null) { Codec.PrivateCodecData = ToHexString(firstQB.mSample.privateCodecData); } } else { throw new Exception(string.Format("QBox sample type not implemented: {0}", firstQB.mSampleStreamType)); } }
/// <summary> /// PrepareSampleReading /// There are two signatures for this method: one that accepts qbox indices (see above), and another that accepts ulong start /// and end times (this one). /// /// If we run out of qboxes, we read-in more. /// </summary> /// <param name="inStartSampleTime">QBoxes with time stamps equal to or more than this are included in the output list</param> /// <param name="inEndSampleTime">QBoxes with time stamps equal to or less than this are included in the output list</param> /// <param name="dummy">unused</param> /// <returns></returns> public override List <StreamDataBlockInfo> PrepareSampleReading(UInt64 inStartSampleTime, UInt64 inEndSampleTime, ref ulong dummy) { if (_qBoxes.Count == 0) { return(new List <StreamDataBlockInfo>()); // empty list } long oneSecTicks = TimeSpan.FromSeconds(1.0).Ticks; float scaleFactor = oneSecTicks / this.TimeScale; ulong averageSliceDuration = 0UL; int boxCount = 0; ulong timeStamp = 0UL; int startIndex = 0; int endIndex = 0; bool startSet = false; // we traverse the _qBoxes list from the beginning (one of two traversals, because we call the other PrepareSampleReading after this) // the purpose of this traversal is just to determine the start and end indices. // FIXME: we should optimize the search for the first qbox (we can use binary search if we first convert all mSampleCTS to mean // the same thing -- a time stamp) CCT. for (int i = 0; i < _qBoxes.Count; i++) { QBox box = _qBoxes[i]; boxCount++; // reject qboxes with sample size zero (no data) if (box.mSampleSize == 0) { boxCount--; continue; } timeStamp = (ulong)(scaleFactor * (box.mStreamDuration - box.mSampleDuration)); averageSliceDuration += (ulong)(scaleFactor * box.mSampleDuration); if (!startSet) { long diff = ((long)inStartSampleTime - (long)timeStamp) >> 1; // divided by 2 // the first qbox should be the start because we dispose of qboxes already processed if (((uint)box.mSampleFlags & QBox.QBOX_SAMPLE_FLAGS_SYNC_POINT) != 0) { startIndex = (int)box.mFrameCounter - 1; startSet = true; } if (!startSet) { throw new Exception("Track problem: first box in queue is not sync point"); } } if ((((uint)box.mSampleFlags & QBox.QBOX_SAMPLE_FLAGS_SYNC_POINT) != 0) && (inEndSampleTime <= timeStamp)) // NearEnd(boxCount, inEndSampleTime, timeStamp, scaleFactor)) { endIndex = (int)box.mFrameCounter - 1; // do not put this sync box in List; it should instead be the first box in next fragment break; } #if ADTS if (box.mSampleStreamType == QBox.QBOX_SAMPLE_TYPE_AAC) { QMed.QMedAAC qmedaac = (QMed.QMedAAC)box.mSample.qmed; #if PES datBlock.PESandADTSHeaders = new byte[qmedaac.pesHeader.Length + qmedaac.adtsHeader.Length]; qmedaac.pesHeader.CopyTo(datBlock.PESandADTSHeaders, 0); qmedaac.adtsHeader.CopyTo(datBlock.PESandADTSHeaders, qmedaac.pesHeader.Length); #else datBlock.PESandADTSHeaders = new byte[qmedaac.adtsHeader.Length]; qmedaac.adtsHeader.CopyTo(datBlock.PESandADTSHeaders, 0); #endif datBlock.SampleSize += datBlock.PESandADTSHeaders.Length; } #endif if (box == _qBoxes.Last()) { base.GetNextBatch(GenericMediaStream.MAX_BOXES_TO_READ, 0); // callee should set end FIXME: is box.mCurrentPosition being set? } } // end of for loop // we did not find the end, which means we ran out of qboxes to process if (endIndex == 0) { averageSliceDuration /= (uint)boxCount; int desiredBoxCount = (int)(((inEndSampleTime - inStartSampleTime) + averageSliceDuration) / averageSliceDuration); endIndex = startIndex + desiredBoxCount; } if (startIndex == endIndex) { throw new Exception("Traversing QBoxes did not yield any qbox."); } return(PrepareSampleReading(startIndex, endIndex, ref dummy)); }
//nbl; removed as we shouldn't 'fix' bframe time stamps //private Dictionary<ushort, ulong> PrevTimeStamps = new Dictionary<ushort, ulong>(); //private Dictionary<ushort, int> PrevIndices = new Dictionary<ushort, int>(); public override void LazyRead(int requestedBoxCount) { QBox qbox = null; int i = 0; int boxCount = 0; lock (_binaryReader.BaseStream) { // clear out all qbox lists // we expect the payload buffers to stay intact because these are now referenced in Slices _audioBoxes.Clear(); _videoBoxes.Clear(); while ((boxCount < requestedBoxCount) && (_binaryReader.BaseStream.Position < _binaryReader.BaseStream.Length)) { try { qbox = new QBox(); qbox.Read(_binaryReader); if (MediaTimes[qbox.mSampleStreamId] == null) { MediaTimes[qbox.mSampleStreamId] = new MediaTimeUtils(); } MediaTimes[qbox.mSampleStreamId].SetClockRate(((qbox.mSampleFlags & QBox.QBOX_SAMPLE_FLAGS_120HZ_CLOCK) != 0U)); //nbl; we can't fill in duration for bframes as this doesn't make sense... the CTTS info is presentation time used for mp4 stuff //qbox.FixTimeStamp(PrevTimeStamps, PrevIndices); // <---- Kludge! Some qboxes may have mStreamDuration reset, fix it here boxCount++; } // for the moment we catch two different exceptions, yet all we do is break our while loop catch (EndOfStreamException eos) { string msg = eos.Message; break; } catch (Exception ex) { throw ex; } switch (qbox.SampleStreamTypeString()) { case "AAC": case "PCM": case "MP2A": case "Q711": case "Q722": case "Q726": case "Q728": _audioBoxes.Add(qbox); break; case "H264": case "H264_SLICE": case "JPEG": case "MPEG2_ELEMENTARY": if (!_videoTrackIDs.Contains(qbox.mSampleStreamId)) { _videoTrackIDs.Add(qbox.mSampleStreamId); } _videoBoxes.Add(qbox); break; case "VIN_STATS_GLOBAL": case "VIN_STATS_MB": case "USER_METADATA": case "DEBUG": default: System.Diagnostics.Debug.WriteLine("Unknown QBox: {0}", qbox.SampleStreamTypeString()); break; } i++; } // end of while } // define the tracks, if we haven't already // note that for qboxes, we really only care about formats (QBoxTrackFormat), and tracks are just generic. if (MediaTracks.Count == 0 && qbox != null) { if (_audioBoxes.Count > 0) { ushort audioTrackID = _audioBoxes[0].mSampleStreamId; QBoxTrackFormat audioTrackFormat = new QBoxTrackFormat(_audioBoxes, audioTrackID, MediaTimes[audioTrackID]); QBoxAudioTrack audioTrack = new QBoxAudioTrack(audioTrackFormat, this); // audioTrack.NextIndexToRead = tempIndices[audioTrackID]; //GenericAudioTrack audioTrack = new GenericAudioTrack(audioTrackFormat, this); //this.Duration = audioTrack.TrackDuration; //this.TimeScale = (uint)audioTrack.SampleRate; base.AddTrack(audioTrack); } foreach (ushort trackID in _videoTrackIDs) { QBoxTrackFormat videoTrackFormat = new QBoxTrackFormat(_videoBoxes, trackID, MediaTimes[trackID]); QBoxVideoTrack videoTrack = new QBoxVideoTrack(videoTrackFormat, this); videoTrack.NextIndexToRead = (int)(qbox.mBoxContinuityCounter + 1); if (DurationIn100NanoSecs < videoTrack.TrackDurationIn100NanoSecs) { this.DurationIn100NanoSecs = videoTrack.TrackDurationIn100NanoSecs; } //this.TimeScale = videoTrack.TrackFormat.TimeScale; base.AddTrack(videoTrack); } } else if (_audioBoxes.Count > 0 && _videoBoxes.Count > 0) { // add qboxes to existing track formats foreach (GenericMediaTrack track in this.MediaTracks) { QBoxTrackFormat format = track.TrackFormat as QBoxTrackFormat; if (track is GenericAudioTrack) { format.AddMore(_audioBoxes); } else { format.AddMore(_videoBoxes); } } } if (currStreamLength < Stream.Length) { currStreamLength = Stream.Length; // if the duration we're getting from the last audio qbox is shorter than we already have, then don't bother ulong liveDuration = (ulong)GetDurationFromLastQBox(); // seek all the way forward and back, just to determine duration if (liveDuration > DurationIn100NanoSecs) { DurationIn100NanoSecs = liveDuration; } // might as well set audio and video durations foreach (IMediaTrack track in MediaTracks) { track.TrackDurationIn100NanoSecs = DurationIn100NanoSecs; } } }