/// <summary> /// Given a byte[] containing an audio frame, return the audio MediaType. /// </summary> /// <param name="frame"></param> /// <param name="compressionData"></param> /// <returns></returns> private static MediaTypeWaveFormatEx AudioMediaTypeFromFrame(byte[] frame, out byte[] compressionData) { if (frame == null) { compressionData = null; return(null); } BufferChunk bc = new BufferChunk(frame); short headerSize = bc.NextInt16(); //first short tells us the header size BufferChunk header = bc.NextBufferChunk(headerSize); //The header contains a custom serialization of AM_SAMPLE2_PROPERTIES followed by // AM_MEDIA_TYPE and an optional format type. //AM_SAMPLE2_PROPERTIES BufferChunk AmSample2Properties = header.NextBufferChunk(48); //AM_MEDIA_TYPE MediaTypeWaveFormatEx amt = new MediaTypeWaveFormatEx(); ReconstituteBaseMediaType((MediaType)amt, header); compressionData = null; if (amt.FormatType == FormatType.WaveFormatEx) { ReconstituteAudioFormat(amt, header, out compressionData); } return(amt); }
public AudioTypeMonitor(MediaTypeWaveFormatEx mt, ulong duration, Guid guid, String cname, String name, long starttime, int streamID) { this.mt = mt; this.duration = duration; this.streamID = streamID; streams = new Hashtable(); streams.Add(guid, new StreamData(guid, cname, name, starttime, streamID)); }
public ProfileData(MediaTypeVideoInfo videoMediaType, byte[] videoCodecData, MediaTypeWaveFormatEx audioMediaType, byte[] audioCodecData) { init(); this.videoMediaType = videoMediaType; this.videoCodecData = videoCodecData; this.audioMediaType = audioMediaType; this.audioCodecData = audioCodecData; }
public static void DebugPrintAudioFormat(MediaTypeWaveFormatEx mt) { Debug.WriteLine(" AvgBytesPerSec=" + mt.WaveFormatEx.AvgBytesPerSec.ToString()); Debug.WriteLine(" BitsPerSample=" + mt.WaveFormatEx.BitsPerSample.ToString()); Debug.WriteLine(" BlockAlign=" + mt.WaveFormatEx.BlockAlign.ToString()); Debug.WriteLine(" Channels=" + mt.WaveFormatEx.Channels.ToString()); Debug.WriteLine(" FormatTag=" + mt.WaveFormatEx.FormatTag.ToString()); Debug.WriteLine(" SamplesPerSec=" + mt.WaveFormatEx.SamplesPerSec.ToString()); Debug.WriteLine(" Size=" + mt.WaveFormatEx.Size.ToString()); }
/// <summary> /// Assume that all the StreamMgrs provided are configured with the correct time range. Do not assume /// contiguous data, or compatible media types. /// </summary> /// <param name="audioMgr"></param> /// <param name="log"></param> public AudioMixer(StreamMgr[] audioMgr, LogMgr log) { fileStreamPlayer = null; this.audioMgr = audioMgr; this.log = log; refTime = long.MinValue; inbufs = new ArrayList(); if (audioMgr.Length == 0) { return; } /// Examine the uncompressed MT's for each audioMgr, and implement a voting system so that the /// media type that is dominant for this mixer is the one we use, and other incompatible MT's /// are ignored in the mix. Log a warning at places where the MT changes. /// Remember that each audioMgr may itself have multiple FileStreamPlayers which have different uncompressed /// media types. /// Finally we need to make our uncompressed MT available to the caller for use in configuring the writer. /// Later on let's look at ways to convert any common uncompressed types so that they are compatible. AudioCompatibilityMgr audioCompatibilityMgr = new AudioCompatibilityMgr(); foreach (StreamMgr astream in audioMgr) { astream.CheckUncompressedAudioTypes(audioCompatibilityMgr); } this.uncompressedMediaType = audioCompatibilityMgr.GetDominantType(); String warning = audioCompatibilityMgr.GetWarningString(); if (warning != "") { log.WriteLine(warning); log.ErrorLevel = 5; } incompatibleGuids = audioCompatibilityMgr.GetIncompatibleGuids(); // Here we also want to collect a "native" (compressed) profile corresponding to one of the "compatible" // streams. This is useful in case we need to recompress. Note this profile can be created if we have // a stream ID. compatibleStreamID = audioCompatibilityMgr.GetCompatibleStreamID(); this.bitsPerSample = this.uncompressedMediaType.WaveFormatEx.BitsPerSample; this.bytesPerSample = bitsPerSample / 8; this.ticksPerSample = ((uint)Constants.TicksPerSec) / (this.uncompressedMediaType.WaveFormatEx.SamplesPerSec * this.uncompressedMediaType.WaveFormatEx.Channels); limit = (long)((ulong)1 << (int)bitsPerSample) / 2 - 1; //clip level buffers = new SampleBuffer[audioMgr.Length]; for (int i = 0; i < buffers.Length; i++) { buffers[i] = new SampleBuffer(audioMgr[i], ticksPerSample, incompatibleGuids, this.uncompressedMediaType.WaveFormatEx.Channels); } }
private void init() { this.audioMediaType = null; this.audioCodecData = null; this.videoMediaType = null; this.videoCodecData = null; this.videoCodecGuid = Guid.Empty; this.height = 0; this.width = 0; this.bitrate = 0; this.bufferwindow = 0; }
private static ProfileData AudioFrameToProfileData(byte[] aframe) { if (aframe == null) { return(null); } byte[] audioCompressionData; MediaTypeWaveFormatEx amt = AudioMediaTypeFromFrame(aframe, out audioCompressionData); return(new ProfileData(amt, audioCompressionData)); }
/// <summary> /// Return a new ProfileData instance containing MediaTypes and codec private data as determined /// by the audio and video frames given. One, but not both, frames may be null. /// </summary> /// <param name="aframe"></param> /// <param name="vframe"></param> /// <returns></returns> private static ProfileData FramesToProfileData(byte[] aframe, byte[] vframe) { if ((aframe == null) && (vframe == null)) { return(null); } byte[] audioCompressionData; byte[] videoCompressionData; MediaTypeWaveFormatEx amt = AudioMediaTypeFromFrame(aframe, out audioCompressionData); MediaTypeVideoInfo vmt = VideoMediaTypeFromFrame(vframe, out videoCompressionData); return(new ProfileData(vmt, videoCompressionData, amt, audioCompressionData)); }
/// <summary> /// Find the type with the highest vote count. /// </summary> /// <returns></returns> public MediaTypeWaveFormatEx GetDominantType() { ulong maxduration = 0; MediaTypeWaveFormatEx dominantType = null; foreach (AudioTypeMonitor mon in audioTypeMonitors) { if (mon.Duration > maxduration) { maxduration = mon.Duration; dominantType = mon.MT; } } return(dominantType); }
/// <summary> /// Fill in the audio-specific parts of the MediaType from the data in the BufferChunk. /// Also return the compression data which is the remaining bytes at the end of the byte[]. /// </summary> /// <param name="mt"></param> /// <param name="bc"></param> /// <param name="compressionData"></param> public static void ReconstituteAudioFormat(MediaTypeWaveFormatEx mt, BufferChunk bc, out byte[] compressionData) { mt.WaveFormatEx.FormatTag = (ushort)NextInt16(bc); mt.WaveFormatEx.Channels = (ushort)NextInt16(bc); mt.WaveFormatEx.SamplesPerSec = (uint)NextInt32(bc); mt.WaveFormatEx.AvgBytesPerSec = (uint)NextInt32(bc); mt.WaveFormatEx.BlockAlign = (ushort)NextInt16(bc); mt.WaveFormatEx.BitsPerSample = (ushort)NextInt16(bc); mt.WaveFormatEx.Size = (ushort)NextInt16(bc); compressionData = new byte[mt.WaveFormatEx.Size]; for (int i = 0; i < mt.WaveFormatEx.Size; i++) { compressionData[i] = bc.NextByte(); } }
/// <summary> /// Return true if the type is compatible for PCM mixing. Also return /// true if the type is null. /// </summary> /// <param name="mt2"></param> /// <returns></returns> public bool Matches(MediaTypeWaveFormatEx mt2) { if ((mt == null) || (mt2 == null)) { return(true); //one of them is unassigned. } if ((mt.MajorType == mt2.MajorType) && (mt.SubType == mt2.SubType) && (mt.WaveFormatEx.SamplesPerSec == mt2.WaveFormatEx.SamplesPerSec) && (mt.WaveFormatEx.BitsPerSample == mt2.WaveFormatEx.BitsPerSample)) { return(true); } return(false); }
/// <summary> /// Record the MediaType vote and stream details for this stream. /// </summary> /// <param name="mt"></param> /// <param name="duration"></param> /// <param name="guid"></param> /// <param name="cname"></param> /// <param name="starttime"></param> public void Check(MediaTypeWaveFormatEx mt, ulong duration, Guid guid, String cname, String name, long starttime, int streamID) { bool match = false; foreach (AudioTypeMonitor mon in audioTypeMonitors) { if (mon.Matches(mt)) { mon.AddToDuration(duration, guid, cname, name, starttime, streamID); match = true; break; } } if (!match) { audioTypeMonitors.Add(new AudioTypeMonitor(mt, duration, guid, cname, name, starttime, streamID)); } }
/// <summary> /// Compare important fields to make sure the Audio MediaTypes are "compatible". This is used /// with compressed audio to make sure we won't cause a Windows Media Writer object to /// except when we feed it stream samples from multiple RTP audio streams. /// By definition a null is compatible with any media type. /// Note: We do something similar for uncompressed samples in AudioTypeMonitor to validate MediaTypes /// prior to audio mixing. /// </summary> /// PRI2: It is not totally clear that we check the correct set of parameters to cover all cases. /// <param name="mt1"></param> /// <param name="mt2"></param> /// <returns></returns> public static bool CompareAudioMediaTypes(MediaTypeWaveFormatEx mt1, MediaTypeWaveFormatEx mt2) { if ((mt1 == null) || (mt2 == null)) { return(true); } if ((mt1.MajorType != mt2.MajorType) || (mt1.SubType != mt2.SubType) || (mt1.FormatType != mt2.FormatType)) { return(false); } if ((mt1.WaveFormatEx.SamplesPerSec != mt2.WaveFormatEx.SamplesPerSec)) { return(false); } return(true); }
public FileStreamPlayer(String filename, long start, long end, bool compressed, int streamID) { this.streamID = streamID; this.filename = filename; this.start = start; this.end = end; this.duration = (ulong)(end - start); outOfData = false; this.guid = Guid.NewGuid(); //create IWMSyncReader and open the file. uint hr = WMFSDKFunctions.WMCreateSyncReader(null, 0, out reader); IntPtr fn = Marshal.StringToCoTaskMemUni(filename); reader.Open(fn); Marshal.FreeCoTaskMem(fn); //Verify that the file contains one stream. uint outputcnt; reader.GetOutputCount(out outputcnt); Debug.Assert(outputcnt == 1); //Extract the MediaType for the stream. uint cmt = 0; IntPtr ipmt; IWMOutputMediaProps outputProps; reader.GetOutputProps(0, out outputProps); outputProps.GetMediaType(IntPtr.Zero, ref cmt); ipmt = Marshal.AllocCoTaskMem((int)cmt); outputProps.GetMediaType(ipmt, ref cmt); byte[] bmt = new byte[cmt]; Marshal.Copy(ipmt, bmt, 0, (int)cmt); BufferChunk bc = new BufferChunk(bmt); byte[] cd; GUID majorTypeGUID; outputProps.GetType(out majorTypeGUID); if (WMGuids.ToGuid(majorTypeGUID) == WMGuids.WMMEDIATYPE_Video) { vmt = new MediaTypeVideoInfo(); ProfileUtility.ReconstituteBaseMediaType((MediaType)vmt, bc); ProfileUtility.ReconstituteVideoFormat(vmt, bc, out cd); //Note: This is a special case which we would like to generalize: The default output format for the //12bpp video was found not to return any uncompressed samples. Setting this particular case to RGB 24 fixed it. if ((!compressed) && (vmt.VideoInfo.BitmapInfo.BitCount == 12)) { SetVideoOutputProps(); } } else if (WMGuids.ToGuid(majorTypeGUID) == WMGuids.WMMEDIATYPE_Audio) { amt = new MediaTypeWaveFormatEx(); ProfileUtility.ReconstituteBaseMediaType((MediaType)amt, bc); ProfileUtility.ReconstituteAudioFormat(amt, bc, out cd); } //if compressed is set, retrieve stream samples if (compressed) { reader.SetReadStreamSamples(1, 1); } }
public ProfileData(MediaTypeWaveFormatEx audioMediaType, byte[] audioCodecData) { init(); this.audioMediaType = audioMediaType; this.audioCodecData = audioCodecData; }