Example #1
0
        public ITag ReadTag()
        {
            lock (this.SyncRoot)
            {
                ITag tag = null;
                //empty-out the pre-streaming tags first
                if (_firstTags.Count > 0)
                {
                    //log.debug("Returning pre-tag");
                    // Return first tags before media data
                    tag = _firstTags.First.Value;
                    _firstTags.RemoveFirst();
                    return(tag);
                }
                //log.debug("Read tag - sample {} prevFrameSize {} audio: {} video: {}", new Object[]{currentSample, prevFrameSize, audioCount, videoCount});

                //get the current frame
                Mp4Frame frame = _frames[_currentFrame];
#if LOGGING
                log.Debug(string.Format("Playback #{0} {1}", _currentFrame, frame));
#endif

                int sampleSize = frame.Size;

                int time = (int)Math.Round(frame.Time * 1000.0);
                //log.debug("Read tag - dst: {} base: {} time: {}", new Object[]{frameTs, baseTs, time});

                long samplePos = frame.Offset;
                //log.debug("Read tag - samplePos {}", samplePos);

                //determine frame type and packet body padding
                byte type = frame.Type;

                //create a byte buffer of the size of the sample
                byte[] data = new byte[sampleSize + 2];
                try
                {
                    Array.Copy(Mp4Reader.PREFIX_AUDIO_FRAME, data, Mp4Reader.PREFIX_AUDIO_FRAME.Length);
                    //do we need to add the mdat offset to the sample position?
                    _fs.Position = samplePos;
                    _fs.Read(data, Mp4Reader.PREFIX_AUDIO_FRAME.Length, sampleSize);
                }
                catch (Exception ex)
                {
#if LOGGING
                    log.Error("Error on channel position / read", ex);
#endif
                }

                //create the tag
                tag = new Tag(type, time, data.Length, data, _prevFrameSize);
                //increment the frame number
                _currentFrame++;
                //set the frame / tag size
                _prevFrameSize = tag.BodySize;
                //log.debug("Tag: {}", tag);
                return(tag);
            }
        }
Example #2
0
        /// <summary>
        /// Performs frame analysis and generates metadata for use in seeking. All the frames are analyzed and sorted together based on time and offset.
        /// </summary>
        public void AnalyzeFrames()
        {
#if LOGGING
            log.Debug("Analyzing frames");
#endif
            // tag == sample
            int  sample = 1;
            long pos    = 0;

            //add the audio frames / samples / chunks
            for (int i = 0; i < _audioSamplesToChunks.Count; i++)
            {
                Mp4Atom.Record record     = _audioSamplesToChunks[i];
                int            firstChunk = record.FirstChunk;
                int            lastChunk  = _audioChunkOffsets.Count;
                if (i < _audioSamplesToChunks.Count - 1)
                {
                    Mp4Atom.Record nextRecord = _audioSamplesToChunks[i + 1];
                    lastChunk = nextRecord.FirstChunk - 1;
                }
                for (int chunk = firstChunk; chunk <= lastChunk; chunk++)
                {
                    int sampleCount = record.SamplesPerChunk;
                    pos = _audioChunkOffsets[chunk - 1];
                    while (sampleCount > 0)
                    {
                        //calculate ts
                        double ts = (_audioSampleDuration * (sample - 1)) / _audioTimeScale;
                        //sample size
                        int size = _audioSamples[sample - 1];
                        //create a frame
                        Mp4Frame frame = new Mp4Frame();
                        frame.Offset = pos;
                        frame.Size   = size;
                        frame.Time   = ts;
                        frame.Type   = IOConstants.TYPE_AUDIO;
                        _frames.Add(frame);
                        //log.debug("Sample #{} {}", sample, frame);
                        //inc and dec stuff
                        pos += size;
                        sampleCount--;
                        sample++;
                    }
                }
            }
            //sort the frames
            _frames.Sort();

#if LOGGING
            log.Debug(string.Format("Frames count: {0}", _frames.Count));
#endif
        }
Example #3
0
        /// <summary>
        /// Performs frame analysis and generates metadata for use in seeking. All the frames are analyzed and sorted together based on time and offset.
        /// </summary>
        public void AnalyzeFrames()
        {
#if !SILVERLIGHT
            log.Debug("Analyzing frames");
#endif
            // Maps positions, samples, timestamps to one another
            _timePosMap = new Dictionary<int, long>();
            _samplePosMap = new Dictionary<int, long>();
            // tag == sample
            int sample = 1;
            long pos;
            for (int i = 0; i < _videoSamplesToChunks.Count; i++)
            {
                Mp4Atom.Record record = _videoSamplesToChunks[i];
                int firstChunk = record.FirstChunk;
                int lastChunk = _videoChunkOffsets.Count;
                if (i < _videoSamplesToChunks.Count - 1)
                {
                    Mp4Atom.Record nextRecord = _videoSamplesToChunks[i + 1];
                    lastChunk = nextRecord.FirstChunk - 1;
                }
                for (int chunk = firstChunk; chunk <= lastChunk; chunk++)
                {
                    int sampleCount = record.SamplesPerChunk;
                    pos = _videoChunkOffsets[chunk - 1];
                    while (sampleCount > 0)
                    {
                        //log.debug("Position: {}", pos);
                        _samplePosMap.Add(sample, pos);
                        //calculate ts
                        double ts = (_videoSampleDuration * (sample - 1)) / _videoTimeScale;
                        //check to see if the sample is a keyframe
                        bool keyframe = false;
                        //some files appear not to have sync samples
                        if (_syncSamples != null)
                        {
                            keyframe = _syncSamples.Contains(sample);
                            if (_seekPoints == null)
                            {
                                _seekPoints = new LinkedList<int>();
                            }
                            int keyframeTs = (int)Math.Round(ts * 1000.0);
                            _seekPoints.AddLast(keyframeTs);
                            _timePosMap.Add(keyframeTs, pos);
                        }
                        //size of the sample
                        int size = _videoSamples[sample - 1];
                        //create a frame
                        Mp4Frame frame = new Mp4Frame();
                        frame.IsKeyFrame = keyframe;
                        frame.Offset = pos;
                        frame.Size = size;
                        frame.Time = ts;
                        frame.Type = IOConstants.TYPE_VIDEO;
                        _frames.Add(frame);

                        //log.debug("Sample #{} {}", sample, frame);

                        //inc and dec stuff
                        pos += size;
                        sampleCount--;
                        sample++;
                    }
                }
            }

            //log.debug("Sample position map (video): {}", samplePosMap);

            //add the audio frames / samples / chunks		
            sample = 1;
            for (int i = 0; i < _audioSamplesToChunks.Count; i++)
            {
                Mp4Atom.Record record = _audioSamplesToChunks[i];
                int firstChunk = record.FirstChunk;
                int lastChunk = _audioChunkOffsets.Count;
                if (i < _audioSamplesToChunks.Count - 1)
                {
                    Mp4Atom.Record nextRecord = _audioSamplesToChunks[i + 1];
                    lastChunk = nextRecord.FirstChunk - 1;
                }
                for (int chunk = firstChunk; chunk <= lastChunk; chunk++)
                {
                    int sampleCount = record.SamplesPerChunk;
                    pos = _audioChunkOffsets[chunk - 1];
                    while (sampleCount > 0)
                    {
                        //calculate ts
                        double ts = (_audioSampleDuration * (sample - 1)) / _audioTimeScale;
                        //sample size
                        int size = _audioSamples[sample - 1];
                        //create a frame
                        Mp4Frame frame = new Mp4Frame();
                        frame.Offset = pos;
                        frame.Size = size;
                        frame.Time = ts;
                        frame.Type = IOConstants.TYPE_AUDIO;
                        _frames.Add(frame);

                        //log.debug("Sample #{} {}", sample, frame);

                        //inc and dec stuff
                        pos += size;
                        sampleCount--;
                        sample++;
                    }
                }
            }

            //sort the frames
            _frames.Sort();
#if !SILVERLIGHT
            log.Debug(string.Format("Frames count: {0}", _frames.Count));
            //log.debug("Frames: {}", frames);
#endif

            //release some memory, since we're done with the vectors
            _audioChunkOffsets.Clear();
            _audioChunkOffsets = null;
            _audioSamplesToChunks.Clear();
            _audioSamplesToChunks = null;

            _videoChunkOffsets.Clear();
            _videoChunkOffsets = null;
            _videoSamplesToChunks.Clear();
            _videoSamplesToChunks = null;

            _syncSamples.Clear();
            _syncSamples = null;            
        }
Example #4
0
		/// <summary>
		/// Performs frame analysis and generates metadata for use in seeking. All the frames are analyzed and sorted together based on time and offset.
		/// </summary>
		public void AnalyzeFrames() {
			log.Debug("Analyzing frames");
			// tag == sample
			int sample = 1;
			long pos = 0;

			//add the audio frames / samples / chunks
			for (int i = 0; i < _audioSamplesToChunks.Count; i++) {
				Mp4Atom.Record record = _audioSamplesToChunks[i];
				int firstChunk = record.FirstChunk;
				int lastChunk = _audioChunkOffsets.Count;
				if (i < _audioSamplesToChunks.Count - 1) {
					Mp4Atom.Record nextRecord = _audioSamplesToChunks[i + 1];
					lastChunk = nextRecord.FirstChunk - 1;
				}
				for (int chunk = firstChunk; chunk <= lastChunk; chunk++) {
					int sampleCount = record.SamplesPerChunk;
					pos = _audioChunkOffsets[chunk - 1];
					while (sampleCount > 0) {
						//calculate ts
						double ts = (_audioSampleDuration * (sample - 1)) / _audioTimeScale;
						//sample size
						int size = _audioSamples[sample - 1];
						//create a frame
						Mp4Frame frame = new Mp4Frame();
						frame.Offset = pos;
						frame.Size = size;
						frame.Time = ts;
						frame.Type = IOConstants.TYPE_AUDIO;
						_frames.Add(frame);
						//log.debug("Sample #{} {}", sample, frame);
						//inc and dec stuff
						pos += size;
						sampleCount--;
						sample++;
					}
				}
			}
			//sort the frames
			_frames.Sort();

			log.Debug(string.Format("Frames count: {0}", _frames.Count));
		}