Пример #1
0
        /// <summary>
        /// Tag sequence
        /// MetaData, Video config, Audio config, remaining audio and video 
        /// 
        /// Packet prefixes:
        /// 17 00 00 00 00 = Video extra data (first video packet)
        /// 17 01 00 00 00 = Video keyframe
        /// 27 01 00 00 00 = Video interframe
        /// af 00 ...   06 = Audio extra data (first audio packet)
        /// af 01          = Audio frame
        /// 
        /// Audio extra data(s):
        /// af 00                = Prefix
        /// 11 90 4f 14          = AAC Main   = aottype 0
        /// 12 10                = AAC LC     = aottype 1
        /// 13 90 56 e5 a5 48 00 = HE-AAC SBR = aottype 2
        /// 06                   = Suffix
        /// 
        /// Still not absolutely certain about this order or the bytes - need to verify later
        /// </summary>
        private void CreatePreStreamingTags()
        {
#if !SILVERLIGHT
            log.Debug("Creating pre-streaming tags");
#endif
            ITag tag = null;
            //byte[] body = null;
            ByteBuffer body;

            if (_hasVideo)
            {
                //video tag #1
                body = ByteBuffer.Allocate(41);
                body.AutoExpand = true;
                body.Put(PREFIX_VIDEO_CONFIG_FRAME);
                if (_videoDecoderBytes != null)
                {
                    body.Put(_videoDecoderBytes);
                }
                tag = new Tag(IOConstants.TYPE_VIDEO, 0, (int)body.Length, body.ToArray(), 0);
                //add tag
                _firstTags.AddLast(tag);
            }

            if (_hasAudio)
            {
                //audio tag #1
                body = ByteBuffer.Allocate(7);
                body.AutoExpand = true;
                body.Put(new byte[] { (byte)0xaf, (byte)0 }); //prefix
                if (_audioDecoderBytes != null)
                {
                    body.Put(_audioDecoderBytes);
                }
                else
                {
                    //default to aac-lc when the esds doesn't contain descriptor bytes
                    //Array.Copy(AUDIO_CONFIG_FRAME_AAC_LC, 0, body, PREFIX_AUDIO_FRAME.Length, AUDIO_CONFIG_FRAME_AAC_LC.Length);
                    //body[PREFIX_AUDIO_FRAME.Length + AUDIO_CONFIG_FRAME_AAC_LC.Length] = 0x06; //suffix
                    body.Put(AUDIO_CONFIG_FRAME_AAC_LC);
                }
                body.Put((byte)0x06); //suffix
                tag = new Tag(IOConstants.TYPE_AUDIO, 0, (int)body.Length, body.ToArray(), tag.BodySize);
                //add tag
                _firstTags.AddLast(tag);
            }            
        }
Пример #2
0
        /// <summary>
        /// Packages media data for return to providers.
        /// </summary>
        /// <returns></returns>
        public ITag ReadTag()
        {
            lock (this.SyncRoot)
            {
                ITag tag = null;
                //empty-out the pre-streaming tags first
                if (_firstTags.Count > 0)
                {
                    //log.debug("Returning pre-tag");
                    // Return first tags before media data
                    tag = _firstTags.First.Value;
                    _firstTags.RemoveFirst();
                    return tag;
                }
                //log.debug("Read tag - sample {} prevFrameSize {} audio: {} video: {}", new Object[]{currentSample, prevFrameSize, audioCount, videoCount});

                //get the current frame
                Mp4Frame frame = _frames[_currentFrame];
#if !SILVERLIGHT
                log.Debug(string.Format("Playback #{0} {1}", _currentFrame, frame));
#endif
                int sampleSize = frame.Size;

                int time = (int)Math.Round(frame.Time * 1000.0);
                //log.debug("Read tag - dst: {} base: {} time: {}", new Object[]{frameTs, baseTs, time});

                long samplePos = frame.Offset;
                //log.debug("Read tag - samplePos {}", samplePos);

                //determine frame type and packet body padding
                byte type = frame.Type;
                //assume video type
                int pad = 5;
                if (type == IOConstants.TYPE_AUDIO)
                {
                    pad = 2;
                }

                //create a byte buffer of the size of the sample
                byte[] data = new byte[sampleSize + pad];
                try
                {
                    //prefix is different for keyframes
                    if (type == IOConstants.TYPE_VIDEO)
                    {
                        if (frame.IsKeyFrame)
                        {
                            //log.debug("Writing keyframe prefix");
                            Array.Copy(PREFIX_VIDEO_KEYFRAME, data, PREFIX_VIDEO_KEYFRAME.Length);
                        }
                        else
                        {
                            //log.debug("Writing interframe prefix");
                            Array.Copy(PREFIX_VIDEO_FRAME, data, PREFIX_VIDEO_FRAME.Length);
                        }
                        _videoCount++;
                    }
                    else
                    {
                        //log.debug("Writing audio prefix");
                        Array.Copy(PREFIX_AUDIO_FRAME, data, PREFIX_AUDIO_FRAME.Length);
                        _audioCount++;
                    }
                    //do we need to add the mdat offset to the sample position?
                    _stream.Position = samplePos;
                    _stream.Read(data, pad, sampleSize);
                }
                catch (Exception ex)
                {
#if !SILVERLIGHT
                    log.Error("Error on channel position / read", ex);
#endif
                }

                //create the tag
                tag = new Tag(type, time, data.Length, data, _prevFrameSize);
                //log.debug("Read tag - type: {} body size: {}", (type == TYPE_AUDIO ? "Audio" : "Video"), tag.getBodySize());

                //increment the frame number
                _currentFrame++;
                //set the frame / tag size
                _prevFrameSize = tag.BodySize;
                //log.debug("Tag: {}", tag);
                return tag;
            }
        }
Пример #3
0
        /// <summary>
        /// Push message through pipe.
        /// Synchronize this method to avoid FLV corruption from abrupt disconnection.
        /// </summary>
        /// <param name="pipe">Pipe.</param>
        /// <param name="message">Message to push.</param>
        public void PushMessage(IPipe pipe, IMessage message)
        {
            lock (this.SyncRoot)
            {
                if (message is ResetMessage)
                {
                    _startTimestamp = -1;
                    _offset += _lastTimestamp;
                    return;
                }
                else if (message is StatusMessage)
                {
                    return;
                }
                if (!(message is RtmpMessage))
                    return;

                if (_writer == null)
                {
                    Init();
                }
                FluorineFx.Messaging.Rtmp.Stream.Messages.RtmpMessage rtmpMsg = message as FluorineFx.Messaging.Rtmp.Stream.Messages.RtmpMessage;
                IRtmpEvent msg = rtmpMsg.body;
                if (_startTimestamp == -1)
                {
                    _startTimestamp = msg.Timestamp;
                }
                int timestamp = msg.Timestamp - _startTimestamp;
                if (timestamp < 0)
                {
                    log.Warn("Skipping message with negative timestamp.");
                    return;
                }
                _lastTimestamp = timestamp;

                ITag tag = new Tag();

                tag.DataType = (byte)msg.DataType;
                tag.Timestamp = timestamp + _offset;
                if (msg is IStreamData)
                {
                    ByteBuffer data = (msg as IStreamData).Data;
                    tag.Body = data.ToArray();
                }

                try
                {
                    _writer.WriteTag(tag);
                }
                catch (IOException ex)
                {
                    log.Error("Error writing tag", ex);
                }
            }
        }
Пример #4
0
        /// <summary>
        /// Create tag for metadata event.
        /// 
        /// Info from http://www.kaourantin.net/2007/08/what-just-happened-to-video-on-web_20.html
        /// <para>
        /// duration - Obvious. But unlike for FLV files this field will always be present.
        /// videocodecid - For H.264 we report 'avc1'.
        /// audiocodecid - For AAC we report 'mp4a', for MP3 we report '.mp3'.
        /// avcprofile - 66, 77, 88, 100, 110, 122 or 144 which corresponds to the H.264 profiles.
        /// avclevel - A number between 10 and 51. Consult this list to find out more.
        /// aottype - Either 0, 1 or 2. This corresponds to AAC Main, AAC LC and SBR audio types.
        /// moovposition - The offset in bytes of the moov atom in a file.
        /// trackinfo - An array of objects containing various infomation about all the tracks in a file
        ///   ex.
        ///     trackinfo[0].length: 7081
        ///     trackinfo[0].timescale: 600
        ///     trackinfo[0].sampledescription.sampletype: avc1
        ///     trackinfo[0].language: und
        ///     trackinfo[1].length: 525312
        ///     trackinfo[1].timescale: 44100
        ///     trackinfo[1].sampledescription.sampletype: mp4a
        ///     trackinfo[1].language: und
        /// 
        /// chapters - As mentioned above information about chapters in audiobooks.
        /// seekpoints - As mentioned above times you can directly feed into NetStream.seek();
        /// videoframerate - The frame rate of the video if a monotone frame rate is used. Most videos will have a monotone frame rate.
        /// audiosamplerate - The original sampling rate of the audio track.
        /// audiochannels - The original number of channels of the audio track.
        /// tags - As mentioned above ID3 like tag information.
        /// </para>
        /// 
        /// <para>
        /// width: Display width in pixels.
        /// height: Display height in pixels.
        /// duration: Duration in seconds.
        /// avcprofile: AVC profile number such as 55, 77, 100 etc.
        /// avclevel: AVC IDC level number such as 10, 11, 20, 21 etc.
        /// aacaot: AAC audio object type; 0, 1 or 2 are supported.
        /// videoframerate: Frame rate of the video in this MP4.
        /// seekpoints: Array that lists the available keyframes in a file as time stamps in milliseconds. 
        ///     This is optional as the MP4 file might not contain this information. Generally speaking, 
        ///     most MP4 files will include this by default.
        /// videocodecid: Usually a string such as "avc1" or "VP6F."
        /// audiocodecid: Usually a string such as ".mp3" or "mp4a."
        /// progressivedownloadinfo: Object that provides information from the "pdin" atom. This is optional 
        ///     and many files will not have this field.
        /// trackinfo: Object that provides information on all the tracks in the MP4 file, including their sample description ID.
        /// tags: Array of key value pairs representing the information present in the "ilst" atom, which is 
        ///     the equivalent of ID3 tags for MP4 files. These tags are mostly used by iTunes. 
        /// </para>
        /// </summary>
        /// <returns>Metadata event tag.</returns>
        ITag CreateFileMeta()
        {
#if !SILVERLIGHT
            log.Debug("Creating onMetaData");
#endif
            // Create tag for onMetaData event
            ByteBuffer buf = ByteBuffer.Allocate(1024);
            buf.AutoExpand = true;
            AMFWriter output = new AMFWriter(buf);
            output.WriteString("onMetaData");

            Dictionary<string, object> props = new Dictionary<string, object>();
            // Duration property
            props.Add("duration", ((double)_duration / (double)_timeScale));
            props.Add("width", _width);
            props.Add("height", _height);

            // Video codec id
            props.Add("videocodecid", _videoCodecId);
            props.Add("avcprofile", _avcProfile);
            props.Add("avclevel", _avcLevel);
            props.Add("videoframerate", _fps);
            // Audio codec id - watch for mp3 instead of aac
            props.Add("audiocodecid", _audioCodecId);
            props.Add("aacaot", _audioCodecType);
            props.Add("audiosamplerate", _audioTimeScale);
            props.Add("audiochannels", _audioChannels);

            props.Add("moovposition", _moovOffset);
            //props.put("chapters", ""); //this is for f4b - books
            if (_seekPoints != null)
            {
                props.Add("seekpoints", _seekPoints);
            }
            //tags will only appear if there is an "ilst" atom in the file
            //props.put("tags", "");

            List<Dictionary<String, Object>> arr = new List<Dictionary<String, Object>>(2);
            if (_hasAudio)
            {
                Dictionary<String, Object> audioMap = new Dictionary<String, Object>(4);
                audioMap.Add("timescale", _audioTimeScale);
                audioMap.Add("language", "und");

                List<Dictionary<String, String>> desc = new List<Dictionary<String, String>>(1);
                audioMap.Add("sampledescription", desc);

                Dictionary<String, String> sampleMap = new Dictionary<String, String>(1);
                sampleMap.Add("sampletype", _audioCodecId);
                desc.Add(sampleMap);

                if (_audioSamples != null)
                {
                    audioMap.Add("length_property", _audioSampleDuration * _audioSamples.Count);
                    //release some memory, since we're done with the vectors
                    _audioSamples.Clear();
                    _audioSamples = null;
                }
                arr.Add(audioMap);
            }
            if (_hasVideo)
            {
                Dictionary<String, Object> videoMap = new Dictionary<String, Object>(3);
                videoMap.Add("timescale", _videoTimeScale);
                videoMap.Add("language", "und");

                List<Dictionary<String, String>> desc = new List<Dictionary<String, String>>(1);
                videoMap.Add("sampledescription", desc);

                Dictionary<String, String> sampleMap = new Dictionary<String, String>(1);
                sampleMap.Add("sampletype", _videoCodecId);
                desc.Add(sampleMap);

                if (_videoSamples != null)
                {
                    videoMap.Add("length_property", _videoSampleDuration * _videoSamples.Count);
                    //release some memory, since we're done with the vectors
                    _videoSamples.Clear();
                    _videoSamples = null;
                }
                arr.Add(videoMap);
            }
            props.Add("trackinfo", arr.ToArray());
            //set this based on existence of seekpoints
            props.Add("canSeekToEnd", (_seekPoints != null));

            output.WriteAssociativeArray(ObjectEncoding.AMF0, props);
            buf.Flip();

            //now that all the meta properties are done, update the duration
            _duration = (long)Math.Round(_duration * 1000d);

            ITag result = new Tag(IOConstants.TYPE_METADATA, 0, buf.Limit, buf.ToArray(), 0);
            return result;
        }
Пример #5
0
 /// <summary>
 /// Write "onMetaData" tag to the file.
 /// </summary>
 /// <param name="duration">Duration to write in milliseconds.</param>
 /// <param name="videoCodecId">Id of the video codec used while recording.</param>
 /// <param name="audioCodecId">Id of the audio codec used while recording.</param>
 private void WriteMetadataTag(double duration, object videoCodecId, object audioCodecId)
 {
     _metaPosition = _writer.BaseStream.Position;
     MemoryStream ms = new MemoryStream();
     AMFWriter output = new AMFWriter(ms);
     output.WriteString("onMetaData");
     Dictionary<string, object> props = new Dictionary<string, object>();
     props.Add("duration", _duration);
     if (videoCodecId != null)
     {
         props.Add("videocodecid", videoCodecId);
     }
     if (audioCodecId != null)
     {
         props.Add("audiocodecid", audioCodecId);
     }
     props.Add("canSeekToEnd", true);
     output.WriteAssociativeArray(ObjectEncoding.AMF0, props);
     byte[] buffer = ms.ToArray();
     if (_fileMetaSize == 0)
     {
         _fileMetaSize = buffer.Length;
     }
     ITag onMetaData = new Tag(IOConstants.TYPE_METADATA, 0, buffer.Length, buffer, 0);
     WriteTag(onMetaData);
 }
Пример #6
0
        /// <summary>
        /// Create tag for metadata event.
        /// </summary>
        /// <returns></returns>
        private ITag CreateFileMeta()
        {
            // Create tag for onMetaData event
            ByteBuffer buf = ByteBuffer.Allocate(1024);
            buf.AutoExpand = true;
            AMFWriter output = new AMFWriter(buf);

            // Duration property
            output.WriteString("onMetaData");
            Dictionary<string, object> props = new Dictionary<string,object>();
            props.Add("duration", _duration / 1000.0);
            if (_firstVideoTag != -1)
            {
                long old = GetCurrentPosition();
                SetCurrentPosition(_firstVideoTag);
                ReadTagHeader();
                byte frametype = _reader.ReadByte();
                // Video codec id
                props.Add("videocodecid", frametype & IOConstants.MASK_VIDEO_CODEC);
                SetCurrentPosition(old);
            }
            if (_firstAudioTag != -1)
            {
                long old = GetCurrentPosition();
                SetCurrentPosition(_firstAudioTag);
                ReadTagHeader();
                byte frametype = _reader.ReadByte();
                // Audio codec id
                props.Add("audiocodecid", (frametype & IOConstants.MASK_SOUND_FORMAT) >> 4);
                SetCurrentPosition(old);
            }
            props.Add("canSeekToEnd", true);
            output.WriteAssociativeArray(ObjectEncoding.AMF0, props);
            buf.Flip();

            ITag result = new Tag(IOConstants.TYPE_METADATA, 0, buf.Limit, buf.ToArray(), 0);
            return result;
        }
Пример #7
0
        /// <summary>
        /// Create tag for metadata event.
        /// </summary>
        /// <returns></returns>
        ITag CreateFileMeta()
        {
            log.Debug("Creating onMetaData");
            // Create tag for onMetaData event
            ByteBuffer buf = ByteBuffer.Allocate(1024);
            buf.AutoExpand = true;
            AMFWriter output = new AMFWriter(buf);
            output.WriteString("onMetaData");

            Hashtable props = new Hashtable();
            // Duration property
            props.Add("duration", ((double)_duration / (double)_timeScale));
            // Audio codec id - watch for mp3 instead of aac
            props.Add("audiocodecid", _audioCodecId);
            props.Add("aacaot", _audioCodecType);
            props.Add("audiosamplerate", _audioTimeScale);
            props.Add("audiochannels", _audioChannels);

            props.Add("moovposition", _moovOffset);
            //tags will only appear if there is an "ilst" atom in the file
            //props.put("tags", "");

            props.Add("canSeekToEnd", false);
            output.WriteAssociativeArray(ObjectEncoding.AMF0, props);
            buf.Flip();

            //now that all the meta properties are done, update the duration
            _duration = (long)Math.Round(_duration * 1000d);

            ITag result = new Tag(IOConstants.TYPE_METADATA, 0, buf.Limit, buf.ToArray(), 0);
            return result;
        }
Пример #8
0
 /// <summary>
 /// Tag sequence
 /// MetaData, Audio config, remaining audio
 /// 
 /// Packet prefixes:
 /// af 00 ...   06 = Audio extra data (first audio packet)
 /// af 01          = Audio frame
 /// 
 /// Audio extra data(s):
 /// af 00                = Prefix
 /// 11 90 4f 14          = AAC Main   = aottype 0
 /// 12 10                = AAC LC     = aottype 1
 /// 13 90 56 e5 a5 48 00 = HE-AAC SBR = aottype 2
 /// 06                   = Suffix
 /// 
 /// Still not absolutely certain about this order or the bytes - need to verify later
 /// </summary>
 private void CreatePreStreamingTags()
 {
     log.Debug("Creating pre-streaming tags");
     ByteBuffer body = ByteBuffer.Allocate(41);
     body.AutoExpand = true;
     body.Put(new byte[] { (byte)0xaf, (byte)0 }); //prefix
     if (_audioDecoderBytes != null)
     {
         body.Put(_audioDecoderBytes);
     }
     else
     {
         //default to aac-lc when the esds doesnt contain descripter bytes
         body.Put(Mp4Reader.AUDIO_CONFIG_FRAME_AAC_LC);
     }
     body.Put((byte)0x06); //suffix
     ITag tag = new Tag(IOConstants.TYPE_AUDIO, 0, (int)body.Length, body.ToArray(), _prevFrameSize);
     //add tag
     _firstTags.AddLast(tag);
 }
Пример #9
0
        public ITag ReadTag()
        {
            lock (this.SyncRoot)
            {
                ITag tag = null;
                //empty-out the pre-streaming tags first
                if (_firstTags.Count > 0)
                {
                    //log.debug("Returning pre-tag");
                    // Return first tags before media data
                    tag = _firstTags.First.Value;
                    _firstTags.RemoveFirst();
                    return tag;
                }
                //log.debug("Read tag - sample {} prevFrameSize {} audio: {} video: {}", new Object[]{currentSample, prevFrameSize, audioCount, videoCount});

                //get the current frame
                Mp4Frame frame = _frames[_currentFrame];
                log.Debug(string.Format("Playback #{0} {1}", _currentFrame, frame));

                int sampleSize = frame.Size;

                int time = (int)Math.Round(frame.Time * 1000.0);
                //log.debug("Read tag - dst: {} base: {} time: {}", new Object[]{frameTs, baseTs, time});

                long samplePos = frame.Offset;
                //log.debug("Read tag - samplePos {}", samplePos);

                //determine frame type and packet body padding
                byte type = frame.Type;

                //create a byte buffer of the size of the sample
                byte[] data = new byte[sampleSize + 2];
                try
                {
                    Array.Copy(Mp4Reader.PREFIX_AUDIO_FRAME, data, Mp4Reader.PREFIX_AUDIO_FRAME.Length);
                    //do we need to add the mdat offset to the sample position?
                    _fs.Position = samplePos;
                    _fs.Read(data, Mp4Reader.PREFIX_AUDIO_FRAME.Length, sampleSize);
                }
                catch (Exception ex)
                {
                    log.Error("Error on channel position / read", ex);
                }

                //create the tag
                tag = new Tag(type, time, data.Length, data, _prevFrameSize);
                //increment the frame number
                _currentFrame++;
                //set the frame / tag size
                _prevFrameSize = tag.BodySize;
                //log.debug("Tag: {}", tag);
                return tag;
            }
        }