/// <summary> /// WriteSamples /// Write several samples to the output file. /// </summary> /// <param name="slices"></param> /// <param name="codecType"></param> public override void WriteSamples(IEnumerable <Slice> slices, CodecTypes codecType) { if (codecType == CodecTypes.Audio) { // ignore audio } else if (codecType == CodecTypes.Video) { IMediaTrack trak = MediaTracks.First(t => t.Codec.CodecType == CodecTypes.Video); foreach (Slice sample in slices) { // convert to bit-stream format (prefix with 0001) Stream mstrm = new MemoryStream(sample.SliceBytes); Stream ostr = H264.H264Utilities.H264Stream(firstSample, trak.Codec.PrivateCodecData, mstrm, 0, (uint)sample.SliceBytes.Length); firstSample = false; mstrm.Close(); ostr.Position = 0L; byte[] buf = new byte[ostr.Length]; ostr.Read(buf, 0, (int)ostr.Length); ostr.Close(); sample.SliceBytes = buf; Stream.Write(sample.SliceBytes, 0, sample.SliceBytes.Length); } } else { throw new Exception("WriteSamples: unknown codec type"); } }
private static int GetChannels(IMediaTrack track) { int result; int.TryParse(track.Attributes.GetEntryIgnoreCase(ChannelsAttribute), out result); return(result); }
public void TriggerLogicalBreak(IMediaTrack inTrack) { if (MediaTrackLogicalBreak != null) { MediaTrackLogicalBreak(inTrack); } }
/// <summary> /// PrepareSampleWriting /// If this is a destination track, this needs to be called to initialize moov box structure. /// Derived classes implement this method, but still needs to call this base method. /// </summary> /// <param name="sourceTrack"></param> public virtual void PrepareSampleWriting(IMediaTrack sourceTrack, ref ulong currMdatOffset) { if (sourceTrack.SampleStreamLocations.Count > 0) { this.TrackFormat.PrepareSampleWriting(sourceTrack.SampleStreamLocations, ref currMdatOffset); } }
protected void SourceStream_MediaTrackAdded(IMediaTrack inUpdatedTrack) { if (RecodeTrackAvailable != null) { RecodeTrackAvailable(inUpdatedTrack); } inUpdatedTrack.SampleAvailable += new SampleHandler(inUpdatedTrack_SampleAvailable); }
public void AddTrack(IMediaTrack inTrack) { Common.Logger.Instance.Info("[GenericMediaStream::AddTrack] added " + inTrack + ", " + (inTrack != null ? inTrack.GetType().Name : string.Empty)); MediaTracks.Add(inTrack); if (MediaTrackAdded != null) { MediaTrackAdded(inTrack); } }
/// <summary> /// PrepareSampleWriting /// This overloaded method accepts slices and codec type as params. /// </summary> /// <param name="slicesInfo">The list of slices to be written, in StreamDataBlockInfo format</param> /// <param name="codecType">A member of the CodecTypes enum</param> public void PrepareSampleWriting(List <StreamDataBlockInfo> slicesInfo, CodecTypes codecType) { IMediaTrack track = this[codecType, 0]; if ((track != null) && (track.TrackFormat != null)) { track.TrackFormat.PrepareSampleWriting(slicesInfo, ref _currMDatOffset); } }
/// <summary> /// PrepareSampleWriting /// Note: this[CodecTypes.Audio, 1] is of the same type as sourceAudio, but different instances (one is the destination, the other source). /// </summary> /// <param name="sourceAudio"></param> /// <param name="sourceVideo"></param> public void PrepareSampleWriting(IMediaTrack sourceAudio, IMediaTrack sourceVideo) { // NOTE: the sequence order of tracks is important because mdat offsets have to match. if (this[CodecTypes.Audio, 0] != null) { this[CodecTypes.Audio, 0].PrepareSampleWriting(sourceAudio, ref _currMDatOffset); } if (this[CodecTypes.Video, 0] != null) { this[CodecTypes.Video, 0].PrepareSampleWriting(sourceVideo, ref _currMDatOffset); } }
/// <summary> /// WriteSamples /// Writing out a slice of both the audio and video tracks means that the tracks are going to be interleaved in the final mdat. /// NOTE: For fragments, the derived class ISMVStreamWriter takes care of fragments having a separate mdat box for each fragment. /// </summary> /// <param name="sourceAudio"></param> /// <param name="sourceVideo"></param> public override void WriteSamples(IMediaTrack sourceAudio, IMediaTrack sourceVideo) { // NOTE: the sequence order of tracks is important! this.WriteSamples(sourceAudio); this.WriteSamples(sourceVideo); // use current offset into mdat to verify file position AFTER writing all samples in this batch if (tempMdat.Position != (long)base.CurrMDatOffset - 8) { throw new Exception("MPrStreamWriter: current file position does not match stbl data"); } }
public void SelectTracks(string key, string value, long minBitrate, long maxBitrate) { if (this.CurrentSegment != null) { IMediaStream videoStream = this.CurrentSegment.AvailableStreams.FirstOrDefault(x => x.Type == StreamType.Video); if (videoStream != null) { bool attributeAvailable = false; IList <IMediaTrack> tracks = new List <IMediaTrack>(); if (key != null && value != null) { foreach (IMediaTrack trackInfo in videoStream.AvailableTracks) { string keyValue; trackInfo.CustomAttributes.TryGetValue(key, out keyValue); if (!string.IsNullOrEmpty(keyValue) && keyValue.ToUpper(CultureInfo.InvariantCulture) == value.ToUpper(CultureInfo.InvariantCulture)) { attributeAvailable = true; if (trackInfo.Bitrate >= minBitrate && trackInfo.Bitrate <= maxBitrate) { tracks.Add(trackInfo); } } } } if (!attributeAvailable) { tracks = videoStream.AvailableTracks.Where(x => x.Bitrate >= minBitrate && x.Bitrate <= maxBitrate).ToList(); } if (tracks.Count > 0) { if (this.singleBitrate && tracks.Count > 1) { long bitrate = tracks.Max(x => x.Bitrate); IMediaTrack track = tracks.FirstOrDefault(x => x.Bitrate == bitrate); tracks.Clear(); tracks.Add(track); } videoStream.SetSelectedTracks(tracks); } } } }
public bool TryAddTrack(IMediaTrack track) { if (this.Entity.Tracks.Contains(track)) { this.Status.SetError(string.Format("Track '{0}' already exists in this group.", track.Title)); return(false); } else { this.Entity.Tracks.Add(track); this.Status.SetPositive(string.Format("Track '{0}' has been added to the group.", track.Title)); return(true); } }
private void OnStreamSelected(IAdaptiveMediaPlugin mediaPlugin, IMediaStream stream) { try { if ((stream.Type == StreamType.Binary || stream.Type == StreamType.Text) && stream.AvailableTracks.Count() > 0) { IMediaTrack track = stream.AvailableTracks.First(); this.rceMediaPlugin.DownloadStreamData(track); } } catch (Exception ex) { } }
private void OnDownloadStreamDataCompleted(IAdaptiveMediaPlugin mediaPlugin, IMediaTrack track, IStreamDownloadResult result) { try { if (result != null && result.Stream != null) { var data = new byte[result.Stream.Length]; result.Stream.Read(data, 0, data.Length); result.Stream.Flush(); this.ParseTimelineEvent(data); } } catch (Exception ex) { } }
///<summary>Execute Edit item Command</summary> void DoEditCmd(object prm = null) { IMediaTrack mt = (prm as IMediaTrack) ?? this.CurrentTrack; if (mt != null) { if (mt.IsGroup) { TrackGroupVModel gvm = new TrackGroupVModel(); gvm.Entity = mt as MediaTrackGroup; Views.PopupView vw = new Views.PopupView(gvm); vw.WindowStartupLocation = System.Windows.WindowStartupLocation.CenterScreen; vw.Topmost = true; vw.Show(); } } }
public virtual void Initialize(GenericMediaTrack cachedTrack) { track = cachedTrack; format = track.TrackFormat; stream = track.ParentStream; if (stream.IsForReading) { track.BlockWithSlice += new NextBlock(track_BlockWithSlice); track.TrackFormat.FetchNextBatch += new LazyRead(GetMoreBoxes); if ((writeCache == 0) && (readCache == 0)) { PrepareSampleInfo(0UL); // fill up the cache with first four blocks } } else { track.PrepareMediaHeaders += new SlicePutRequest(track_PrepareMediaHeaders); } }
public override void Recode(ulong startTime100NanoSec, ulong endTime100NanoSec, ushort videoTrackID) { var vidTracks = DestStream.MediaTracks.Where(t => t is GenericVideoTrack); int vidTrackCount = (vidTracks == null) ? 0 : vidTracks.Count(); if (endTime100NanoSec == 0) { // special case when endTime == 0 // using duration here is ok as it is about the total time of the source endTime100NanoSec = SourceStream.DurationIn100NanoSecs; } if (endTime100NanoSec - startTime100NanoSec < MaxIterateDuration) { throw new Exception("Desired time interval for output stream too short"); } int outTracks = DestStream.MediaTracks.Count; RecodeSet[] trackEnumerators = new RecodeSet[outTracks]; int k = 0; int n = 0; foreach (IMediaTrack track in SourceStream.MediaTracks) { if (((track.Codec.CodecType == CodecTypes.Audio) && (audioOrVideoOrBoth != TracksIncluded.Video)) || ((track.Codec.CodecType == CodecTypes.Video) && ((videoTrackID == 0) || (track.TrackID == videoTrackID)) && (audioOrVideoOrBoth != TracksIncluded.Audio))) { RecodeSet recodeSet = new RecodeSet(); recodeSet.sourceTrack = (IMediaTrackSliceEnumerator)track.GetEnumerator(); recodeSet.sourceTrack.Reset(); recodeSet.pendingChunkSlices = new List <Slice>(); IMediaTrack destination = DestStream[recodeSet.sourceTrack.CodecType, 0]; if ((track.Codec.CodecType != CodecTypes.Video) || (vidTrackCount == 1)) { destination = DestStream[recodeSet.sourceTrack.CodecType, 0]; } else if (vidTrackCount > 1) { destination = vidTracks.ElementAt(n); n++; } if (destination == null) { throw new Exception(string.Format("No {0} destination track. Try vo or so option.", recodeSet.sourceTrack.CodecType)); } // normally the destination TrackDurationIn100NanoSecs is set to source duration; // here we reset its value back to zero because it may be smaller than source duration // (for example, if the start time is more than zero). destination.TrackDurationIn100NanoSecs = 0UL; recodeSet.destination = destination; recodeSet.destinationTrack = (IMediaTrackSliceEnumerator)destination.GetEnumerator(); recodeSet.destinationTrack.Reset(); trackEnumerators[k++] = recodeSet; } } RaiseRecodeProgressUpdate(0.01f, true, null); // Indicate we have completed a portion of the work. // Need to call MoveNext() first for all source track enumerators foreach (RecodeSet recodeSet in trackEnumerators) { while (recodeSet.sourceTrack.MoveNext()) { if (recodeSet.sourceTrack.Current != null) { break; } } } IVideoTrack videoTrack = (IVideoTrack)SourceStream[CodecTypes.Video, 0]; ulong prevSyncTime = 0UL; bool validSyncPointsFound = false; foreach (ulong syncTime in EnumerateSyncPoints(videoTrack)) { // Cycle through all of the sync points in the video... Logger.Instance.Info("[GenericRecodeWRC::Recode] [merge] iterating at syncTime [" + syncTime + "]."); if ((syncTime > endTime100NanoSec) && (prevSyncTime > endTime100NanoSec)) { break; // If we are past the requested end time, stop doing work } // Each source and destinatin track has its own, independent counter (enumerator). // The slices are synced with respect to time, and NOT with respect to index. // The outer for loop below iterates through each track being recoded; // the inner while loop iterates through each slice skipped. // .timeStart == time relative to source track at which recoding starts (should be first slice NOT skipped); // .indexStart == index of first slice NOT skipped. if (startTime100NanoSec > prevSyncTime) { // Skip a portion of slices. for (int i = 0; i < trackEnumerators.Length; i++) { if (trackEnumerators[i].sourceTrack.CurrentTimeStampNew.HasValue == false) { continue; // b-frame and we can't use it to compare... } while (trackEnumerators[i].sourceTrack.CurrentTimeStampNew.Value < syncTime) { Slice slice = trackEnumerators[i].sourceTrack.Current; if (slice == null) { break; } if (slice.TimeStampNew.HasValue == false) { continue; // its a b-frame, thus no time is available } trackEnumerators[i].timeStart = slice.TimeStampNew.Value; // at this point its guaranteed to have a value... trackEnumerators[i].indexStart = slice.index; // Find the next valid CurrentTimeStampNew value. bool tmpEnd = false; while (true) { if (!trackEnumerators[i].sourceTrack.MoveNext()) { tmpEnd = true; break; // Ended. } if (trackEnumerators[i].sourceTrack.CurrentTimeStampNew.HasValue == true) { break; // Found it. } } if (tmpEnd == true) { break; } } } prevSyncTime = syncTime; continue; } // If we never hit this condition there is nothing actually taken in to process and this causes an exception down the road. validSyncPointsFound = true; // Each source and destinatin track has its own, independent counter (enumerator). // The slices are synced with respect to time, and NOT with respect to index. // The outer foreach loop below iterates through each track being recoded; // the inner while loop iterates through each slice. // recodeSet.sourceTrack ==> source track enumerator // recodeSet.destinationTrack ==> destination track enumerator ulong timeStamp100NanoSec = ulong.MaxValue; foreach (RecodeSet recodeSet in trackEnumerators) { recodeSet.pendingChunkSlices.Clear(); // Start writing the actual data. while (recodeSet.sourceTrack.CurrentTimeStampNew.HasValue == false || recodeSet.sourceTrack.CurrentTimeStampNew.Value <= syncTime) { Slice slice = recodeSet.sourceTrack.Current; if (slice == null) { break; } //Logger.Instance.Info("[GenericRecodeWRC::Recode] dumping slice [" + slice.TimeStampNew + ", dur " + (int)slice.SliceDuration + "], track type [" + recodeSet.sourceTrack.CodecType + "]."); // Prepare the slice; apply position and time compensation, to base it to the start of the extract. slice.index -= recodeSet.indexStart; if (slice.TimeStampNew.HasValue) { // TimeStamp == null if we are a bframe, thus we are not here... if (slice.TimeStampNew.Value < recodeSet.timeStart) { throw new Exception("GenericRecodeWRC.Recode: Offset time stamping error"); } // adjust time-stamp and index (offset from time start) slice.TimeStampNew -= recodeSet.timeStart; if (timeStamp100NanoSec == ulong.MaxValue || slice.TimeStampNew.Value > timeStamp100NanoSec) { timeStamp100NanoSec = slice.TimeStampNew.Value; // Take the value for the progress report. } } // Put the slices in the pending Chunk buffer for overview and confirmation. recodeSet.pendingChunkSlices.Add(slice); // position to next output slice recodeSet.destinationTrack.MoveNext(); // put slice in destination track recodeSet.destinationTrack.SetCurrent(slice); recodeSet.destination.TrackDurationIn100NanoSecs += (ulong)slice.SliceDuration; // move to next input slice, exit if done if (!recodeSet.sourceTrack.MoveNext()) { break; } } } // Report progress. if (timeStamp100NanoSec != ulong.MaxValue) { float progress = (float)(((double)timeStamp100NanoSec - (double)startTime100NanoSec) / ((double)endTime100NanoSec - (double)startTime100NanoSec)); if (progress > 1) { Common.Logger.Instance.Error("[GenericRecodeWRC::Recode] Progress value [" + progress + "] mis-calculated, progress report skipped."); } else { RaiseRecodeProgressUpdate(progress, true, null); } } prevSyncTime = syncTime; } if (validSyncPointsFound == false) { // Nothing meaningful found to process, end now. // Do not DestStream.FinalizeStream() as this will try to write and cause an exception. RaiseRecodeProgressUpdate(1, false, null); RaiseRecodeProgressUpdate(2, false, null); return; } RaiseRecodeProgressUpdate(1, true, null); // All the work is done, but there may be some finalizers left. // Assemble all stbl or moof boxes. // Write out the mdat slice in the case of MP4 output; // in the case of fragmented files (ISMV output), all moof and mdat boxes have already been written out at this point, and we // only need to write out the mfra slice, if it is needed. DestStream.FinalizeStream(); RaiseRecodeProgressUpdate(2, true, null); // Everything is completed. }
public override void WriteSamples(IMediaTrack sourceAudio, IMediaTrack sourceVideo) { // ignore audio this.WriteSamples(sourceVideo); }
public void Dispose() { MediaTrack = null; }
public IGenericMediaTrackEnumerator(IMediaTrack inTrack) { MediaTrack = inTrack; slice = null; }
/// <summary> /// WriteSamples /// Writing out a slice of both the audio and video tracks means that the tracks are going to be interleaved in the final mdat. /// </summary> /// <param name="sourceAudio"></param> /// <param name="sourceVideo"></param> public virtual void WriteSamples(IMediaTrack sourceAudio, IMediaTrack sourceVideo) { throw new NotImplementedException("Have to implement WriteSamples(GenericAudioTrack sourceAudio, GenericVideoTrack sourceVideo) in derived class"); }
public virtual void Initialize(GenericMediaTrack cachedTrack) { track = cachedTrack; format = track.TrackFormat; stream = track.ParentStream; if (stream.IsForReading) { track.BlockWithSlice += new NextBlock(track_BlockWithSlice); track.TrackFormat.FetchNextBatch += new LazyRead(GetMoreBoxes); if ((writeCache == 0) && (readCache == 0)) PrepareSampleInfo(0UL); // fill up the cache with first four blocks } else { track.PrepareMediaHeaders += new SlicePutRequest(track_PrepareMediaHeaders); } }
private void MediaPlugin_DownloadStreamDataFailed(IAdaptiveMediaPlugin mediaPlugin, IMediaTrack track, IDataChunk dataChunk, Exception error) { string message = string.Format(SilverlightMediaFrameworkResources.GenericErrorOccurredLogMessage, "MediaPlugin_DownloadStreamDataFailed", error.Message); SendLogEntry(KnownLogEntryTypes.DownloadStreamDataFailed, LogLevel.Error, message); }
/// <summary> /// PrepareSampleWriting /// Note: this[CodecTypes.Audio, 1] is of the same type as sourceAudio, but different instances (one is the destination, the other source). /// </summary> /// <param name="sourceAudio"></param> /// <param name="sourceVideo"></param> public void PrepareSampleWriting(IMediaTrack sourceAudio, IMediaTrack sourceVideo) { // NOTE: the sequence order of tracks is important because mdat offsets have to match. if (this[CodecTypes.Audio, 0] != null) this[CodecTypes.Audio, 0].PrepareSampleWriting(sourceAudio, ref _currMDatOffset); if (this[CodecTypes.Video, 0] != null) this[CodecTypes.Video, 0].PrepareSampleWriting(sourceVideo, ref _currMDatOffset); }
//private void MediaPlugin_VideoPlaybackTrackChanged(IAdaptiveMediaPlugin adaptiveMediaPlugin, IMediaTrack track) //{ // Dispatcher.BeginInvoke(() => OnVideoPlaybackTrackChanged(track.Bitrate)); //} private void MediaPlugin_VideoDownloadTrackChanged(IAdaptiveMediaPlugin adaptiveMediaPlugin, IMediaTrack track) { Dispatcher.BeginInvoke(() => OnVideoDownloadTrackChanged(track.Bitrate)); }
private void MediaPlugin_DownloadStreamDataCompleted(IAdaptiveMediaPlugin mediaPlugin, IMediaTrack track, IStreamDownloadResult result) { //Dispatcher.BeginInvoke(() => OnDownloadStreamDataCompleted(mediaPlugin, track, result)); OnDownloadStreamDataCompleted(mediaPlugin, track, result); }
protected virtual void OnDownloadStreamDataCompleted(IAdaptiveMediaPlugin mediaPlugin, IMediaTrack track, IStreamDownloadResult result) { try { if (DataReceived != null) { int length = (int)result.Stream.Length; var data = new byte[length]; int count; int sum = 0; do { count = result.Stream.Read(data, sum, length - sum); sum += count; } while (count > 0 && sum < length); DataReceived(this, new DataReceivedInfo(data, result.DataChunk, track.ParentStream.Attributes)); SendLogEntry(KnownLogEntryTypes.DataReceived); } } catch (Exception err) { string message = string.Format(SilverlightMediaFrameworkResources.GenericErrorOccurredLogMessage, "OnDownloadStreamDataCompleted", err.Message); SendLogEntry(KnownLogEntryTypes.DownloadStreamDataCompleted, LogLevel.Error, message); } }
///// <summary> ///// Copy constructor ///// </summary> ///// <param name="vt"></param> //public IGenericVideoTrack(IGenericVideoTrack vt) // : base((GenericMediaTrack)vt) //{ // this.Codec.CodecType = CodecTypes.Video; // this.FrameSize = vt.FrameSize; // this.PayloadType = vt.PayloadType; //} //public IGenericVideoTrack(RawVideoTrackInfo trakInfo) : this() //{ // this.FrameSize.Width = trakInfo.Width; // this.FrameSize.Height = trakInfo.Height; // this.PayloadType = trakInfo.PayloadType; // this.Codec.PrivateCodecData = trakInfo.CodecPrivateData; //} public override void PrepareSampleWriting(IMediaTrack sourceTrack, ref ulong currMdatOffset) { base.PrepareSampleWriting(sourceTrack, ref currMdatOffset); }
///// <summary> ///// Copy constructor ///// </summary> ///// <param name="vt"></param> //public IGenericVideoTrack(IGenericVideoTrack vt) // : base((GenericMediaTrack)vt) //{ // this.Codec.CodecType = CodecTypes.Video; // this.FrameSize = vt.FrameSize; // this.PayloadType = vt.PayloadType; //} //public IGenericVideoTrack(RawVideoTrackInfo trakInfo) : this() //{ // this.FrameSize.Width = trakInfo.Width; // this.FrameSize.Height = trakInfo.Height; // this.PayloadType = trakInfo.PayloadType; // this.Codec.PrivateCodecData = trakInfo.CodecPrivateData; //} public override void PrepareSampleWriting(IMediaTrack sourceTrack, ref ulong currMdatOffset) { base.PrepareSampleWriting(sourceTrack, ref currMdatOffset); }
public QBoxTrackEnumerator(IMediaTrack track) : base(track) { }
public void CancelDownloadStreamData(IMediaTrack track) { #if !WINDOWS_PHONE var mediaTrack = track as MediaTrack; if (mediaTrack != null) { _chunkDownloadManager.RemoveRequests(mediaTrack); } #endif }
public void AddTrack(IMediaTrack inTrack) { Common.Logger.Instance.Info("[GenericMediaStream::AddTrack] added " + inTrack + ", " + (inTrack != null ? inTrack.GetType().Name : string.Empty)); MediaTracks.Add(inTrack); if (MediaTrackAdded != null) MediaTrackAdded(inTrack); }
public override void WriteSamples(IMediaTrack sourceAudio, IMediaTrack sourceVideo) { // ignore audio this.WriteSamples(sourceVideo); }
public void TriggerLogicalBreak(IMediaTrack inTrack) { if (MediaTrackLogicalBreak != null) MediaTrackLogicalBreak(inTrack); }
/// <summary> /// Downloads all of the available data from the specified track. /// </summary> /// <param name="track">the track that contains the data to be downloaded.</param> public void DownloadStreamData(IMediaTrack track) { #if !WINDOWS_PHONE var mediaTrack = track as MediaTrack; if (mediaTrack != null) { _chunkDownloadManager.AddRequests(mediaTrack.ParentStream.DataChunks.Select(chunk => new Tuple<MediaTrack, TimeSpan>(mediaTrack, chunk.Timestamp))); } #endif }
public void OnTrackDisabled(IConversation conversation, IParticipant participant, IMediaTrack mediaTrack) { TrackDisabledHandler?.Invoke(conversation, participant, mediaTrack); }
public QBoxTrackEnumerator(IMediaTrack track) : base(track) { }
/// <summary> /// WriteSamples /// Writing out a slice of both the audio and video tracks means that the tracks are going to be interleaved in the final mdat. /// </summary> /// <param name="sourceAudio"></param> /// <param name="sourceVideo"></param> public virtual void WriteSamples(IMediaTrack sourceAudio, IMediaTrack sourceVideo) { throw new NotImplementedException("Have to implement WriteSamples(GenericAudioTrack sourceAudio, GenericVideoTrack sourceVideo) in derived class"); }
void AdaptiveMediaPlugin_VideoPlaybackTrackChanged(IAdaptiveMediaPlugin arg1, IMediaTrack videoPlaybackTrack) { //When the SSME changes its playback bitrate, we need to re-set the maximum width and height of the //SSME explicitly. if (videoPlaybackTrack.Attributes.ContainsKey("MaxWidth")) { ssmeMaxWidth = Convert.ToDouble(videoPlaybackTrack.Attributes["MaxWidth"]); } if (videoPlaybackTrack.Attributes.ContainsKey("MaxHeight")) { ssmeMaxHeight = Convert.ToDouble(videoPlaybackTrack.Attributes["MaxHeight"]); } SetVideoProperties(); }
public void WriteSamples(IMediaTrack sourceTrack) { WriteSamples(sourceTrack, sourceTrack.Codec.CodecType); }
/// <summary> /// WriteSamples /// Writing out a slice of both the audio and video tracks means that the fragments are going to be interleaved in the output file. /// Don't call base.WriteSamples from here because at this point, both ftyp and moov boxes are already complete. /// </summary> /// <param name="sourceAudio"></param> /// <param name="sourceVideo"></param> public override void WriteSamples(IMediaTrack sourceAudio, IMediaTrack sourceVideo) { // NOTE: the sequence order of tracks is important! this.WriteSamples(sourceAudio, CodecTypes.Audio); this.WriteSamples(sourceVideo, CodecTypes.Video); }
protected void SourceStream_MediaTrackAdded(IMediaTrack inUpdatedTrack) { if (RecodeTrackAvailable != null) RecodeTrackAvailable(inUpdatedTrack); inUpdatedTrack.SampleAvailable += new SampleHandler(inUpdatedTrack_SampleAvailable); }
void AdaptiveMediaPlugin_VideoPlaybackTrackChanged(IAdaptiveMediaPlugin arg1, IMediaTrack videoPlaybackTrack) { //When the SSME changes its playback bitrate, we need to re-set the maximum width and height of the //SSME explicitly. if (videoPlaybackTrack.Attributes.ContainsKey("MaxWidth")) ssmeMaxWidth = Convert.ToDouble(videoPlaybackTrack.Attributes["MaxWidth"]); if (videoPlaybackTrack.Attributes.ContainsKey("MaxHeight")) ssmeMaxHeight = Convert.ToDouble(videoPlaybackTrack.Attributes["MaxHeight"]); SetVideoProperties(); }
public void WriteSamples(IMediaTrack sourceTrack) { CodecTypes codecType = sourceTrack.Codec.CodecType; WriteSamples(sourceTrack, codecType); }
/// <summary> /// WriteSamples /// Writing out a slice of both the audio and video tracks means that the fragments are going to be interleaved in the output file. /// Don't call base.WriteSamples from here because at this point, both ftyp and moov boxes are already complete. /// </summary> /// <param name="sourceAudio"></param> /// <param name="sourceVideo"></param> public override void WriteSamples(IMediaTrack sourceAudio, IMediaTrack sourceVideo) { // NOTE: the sequence order of tracks is important! this.WriteSamples(sourceAudio, CodecTypes.Audio); this.WriteSamples(sourceVideo, CodecTypes.Video); }
public void WriteSamples(IMediaTrack sourceTrack) { CodecTypes codecType = sourceTrack.Codec.CodecType; WriteSamples(sourceTrack, codecType); }
/// <summary> /// Downloads the chunk of data that is part of the specified track and has the specified timestamp id. /// </summary> /// <param name="track">the track that contains the data to be downloaded.</param> /// <param name="chunk">the chunk to be downloaded.</param> public void DownloadStreamData(IMediaTrack track, IDataChunk chunk) { #if !WINDOWS_PHONE var mediaTrack = track as MediaTrack; if (mediaTrack != null) { _chunkDownloadManager.AddRequest(mediaTrack, chunk.Timestamp); } #endif }