/// <summary> /// Resynchronizes the audio stream to the next valid Mp3 frame. /// </summary> private void ResyncStream() { // We need to restructure this whole thing!!! // This is PROBABLY due to an intro file, so resync and hope for the best. :D byte[] audioData = new byte[ShoutcastStream.DefaultInitialBufferSize]; int bytesRead; lock (this.syncRoot) { bytesRead = this.circularBuffer.Peek(audioData, 0, audioData.Length); } AudioFrame mpegLayer3Frame; int result = this.SyncStream(audioData, out mpegLayer3Frame); // Throw away X bytes byte[] garbage = new byte[result]; bytesRead = this.circularBuffer.Get(garbage, 0, result); // Fix the metadata this.metadataCount += bytesRead; this.currentFrameSize = mpegLayer3Frame.FrameSize; this.bytesLeftInFrame = this.currentFrameSize; this.nextFrame = mpegLayer3Frame; }
/// <summary> /// Parses initial audio stream byte buffer. /// </summary> /// <param name="initialBuffer">Initial bytes from the audio stream.</param> private void ParseInitialBuffer(byte[] initialBuffer) { // Initialize data structures to pass to the Media pipeline via the MediaStreamSource Dictionary<MediaSourceAttributesKeys, string> mediaSourceAttributes = new Dictionary<MediaSourceAttributesKeys, string>(); Dictionary<MediaStreamAttributeKeys, string> mediaStreamAttributes = new Dictionary<MediaStreamAttributeKeys, string>(); byte[] audioData = initialBuffer; int bytesRead = initialBuffer.Length; AudioFrame mpegLayer3Frame; int result = this.SyncStream(audioData, out mpegLayer3Frame); this.metadataCount = result; if (this.MediaInformation.ContentType == "audio/mpeg") { this.mpegLayer3WaveFormat = ShoutcastStream.CreateMp3WaveFormat(mpegLayer3Frame); } else if (this.MediaInformation.ContentType == "audio/aacp") { this.mpegLayer3WaveFormat = ShoutcastStream.CreateAacPlusFormat(mpegLayer3Frame); } else { throw new InvalidOperationException(string.Format(CultureInfo.InvariantCulture, "Invalid content type: {0}", this.MediaInformation.ContentType)); } mediaStreamAttributes[MediaStreamAttributeKeys.CodecPrivateData] = this.mpegLayer3WaveFormat.ToHexString(); this.audioStreamDescription = new MediaStreamDescription(MediaStreamType.Audio, mediaStreamAttributes); // Setting a 0 duration, since we are a potentially infinite Mp3 stream. mediaSourceAttributes[MediaSourceAttributesKeys.Duration] = TimeSpan.FromMinutes(0).Ticks.ToString(CultureInfo.InvariantCulture); // No seeking within the stream! mediaSourceAttributes[MediaSourceAttributesKeys.CanSeek] = "0"; this.audioSourceAttributes = mediaSourceAttributes; this.currentFrameSize = mpegLayer3Frame.FrameSize; // Set up bytes left in frame so we can support non-frame size counts this.bytesLeftInFrame = this.currentFrameSize; this.nextFrame = mpegLayer3Frame; int bufferByteSize = this.CalculateCircularBufferSize(mpegLayer3Frame.FrameSize); this.circularBuffer = new CircularBuffer<byte>(bufferByteSize, true); this.circularBuffer.Put(audioData, result, bytesRead - result); // Read some more to fill out the buffer // audioData = new byte[this.minimumBufferedBytes - this.circularBuffer.Size]; // We have to force reading from the stream at first. This is because when we read from the NetworkStream, it will return all of the available data in its buffer. // If there is less data than we ask for, it only returns what it has. It does not block until it has enough. So, we will force a loop until we have read what we need. // bytesRead = this.ForceReadFromStream(audioData, 0, audioData.Length); // this.circularBuffer.Put(audioData, 0, bytesRead); }
/// <summary> /// Initializes a WaveFormatExtensible instance representing an MP3 frame. /// </summary> /// <param name="audioFrame">Audio frame representing an MP3 frame.</param> /// <returns>A WaveFormatExtensible for the supplied audio frame.</returns> private static MpegLayer3WaveFormat CreateMp3WaveFormat(AudioFrame audioFrame) { if (audioFrame == null) { throw new ArgumentNullException("audioFrame"); } WaveFormatExtensible waveFormatExtensible = new WaveFormatExtensible() { AverageBytesPerSecond = audioFrame.BitRate / 8, BitsPerSample = 0, BlockAlign = 1, Channels = (short)audioFrame.NumberOfChannels, FormatTag = 85, SamplesPerSec = audioFrame.SamplingRate, Size = 12 }; MpegLayer3WaveFormat waveFormat = new MpegLayer3WaveFormat(waveFormatExtensible); waveFormat.Id = 1; waveFormat.BitratePaddingMode = 0; waveFormat.FramesPerBlock = 1; waveFormat.BlockSize = (short)audioFrame.FrameSize; waveFormat.CodecDelay = 0; return waveFormat; }
/// <summary> /// Initializes a WaveFormatExtensible instance representing an AAC+ frame. /// </summary> /// <param name="audioFrame">Audio frame representing an AAC+ frame.</param> /// <returns>A WaveFormatExtensible for the supplied audio frame.</returns> private static HeAacWaveFormat CreateAacPlusFormat(AudioFrame audioFrame) { if (audioFrame == null) { throw new ArgumentNullException("audioFrame"); } WaveFormatExtensible wfx = new WaveFormatExtensible(); wfx.FormatTag = 0x1610; wfx.Channels = (short)audioFrame.NumberOfChannels; wfx.SamplesPerSec = audioFrame.SamplingRate; wfx.AverageBytesPerSecond = audioFrame.BitRate / 8; wfx.BlockAlign = 1; wfx.BitsPerSample = 0; wfx.Size = 12; HeAacWaveFormat aacf = new HeAacWaveFormat(wfx); // Extra 3 words in WAVEFORMATEX aacf.PayloadType = 0x1; // Audio Data Transport Stream (ADTS). The stream contains an adts_sequence, as defined by MPEG-2. aacf.AudioProfileLevelIndication = 0xFE; aacf.StructType = 0; return aacf; }
/// <summary> /// Synchronizes the MP3 data on a frame header. /// </summary> /// <param name="audioData">Byte array representing a chunk of MP3 data.</param> /// <param name="mpegFrame">Assigned to the resultant, parsed MpegFrame pointed to by the return value.</param> /// <returns>Offset into the audioData parameters representing the next, valid MpegFrame</returns> private int SyncStream(byte[] audioData, out AudioFrame mpegFrame) { if (audioData == null) { throw new ArgumentNullException("audioData"); } if (audioData.Length == 0) { throw new ArgumentException("audioData cannot have a Length of 0."); } int frameHeaderSize; byte[] syncBytes; Func<byte[], bool> isValidFrame; Func<byte[], AudioFrame> createFrame; if (this.MediaInformation.ContentType == "audio/mpeg") { frameHeaderSize = MpegFrame.FrameHeaderSize; syncBytes = MpegFrame.SyncBytes; isValidFrame = MpegFrame.IsValidFrame; createFrame = b => new MpegFrame(b); } else if (this.MediaInformation.ContentType == "audio/aacp") { frameHeaderSize = AacpFrame.FrameHeaderSize; syncBytes = AacpFrame.SyncBytes; isValidFrame = AacpFrame.IsValidFrame; createFrame = b => new AacpFrame(b); } else { throw new InvalidOperationException(string.Format(CultureInfo.InvariantCulture, "Invalid content type: {0}", this.MediaInformation.ContentType)); } // We need to restructure this whole thing!!! // This is PROBABLY due to an intro file, so resync and hope for the best. :D int bytesRead = audioData.Length; // Find the syncpoint int result = BitTools.FindBitPattern(audioData, syncBytes, syncBytes); AudioFrame mpegLayer3Frame = null; byte[] frameHeader = new byte[frameHeaderSize]; // TODO - Make sure we have enough left in the array, otherwise, we'll get an exception! while (mpegLayer3Frame == null) { if (result == -1) { // Something is wrong. Likely due to the the socket returning no data. // We'll throw for now. throw new InvalidOperationException("Sync bit pattern not found"); } Array.Copy(audioData, result, frameHeader, 0, frameHeaderSize); if (isValidFrame(frameHeader)) { mpegLayer3Frame = createFrame(frameHeader); // If this works, we need to take the frame size, index into the buffer, and pull out another frame header. If the sample rate and such match, then we are good. // Otherwise, we need to find the next set of sync bytes starting at the first index and do it again. This is to reduce the false positives. byte[] nextFrameHeader = new byte[frameHeaderSize]; Array.Copy(audioData, result + mpegLayer3Frame.FrameSize, nextFrameHeader, 0, frameHeaderSize); if (isValidFrame(nextFrameHeader)) { // Both are valid frame, so compare. AudioFrame nextMpegLayer3Frame = createFrame(nextFrameHeader); // Check the version, layer, sampling frequency, and number of channels. If they match, we should be good. if (!mpegLayer3Frame.Equals(nextMpegLayer3Frame)) { mpegLayer3Frame = null; result = BitTools.FindBitPattern(audioData, syncBytes, syncBytes, result + 1); } } else { // The second frame header was not valid, so we need to reset. mpegLayer3Frame = null; result = BitTools.FindBitPattern(audioData, syncBytes, syncBytes, result + 1); } } else { // The second frame header was not valid, so we need to reset. mpegLayer3Frame = null; result = BitTools.FindBitPattern(audioData, syncBytes, syncBytes, result + 1); } } mpegFrame = mpegLayer3Frame; return result; }
/// <summary> /// Reads the next MP3 frame header. /// </summary> private void SetupNextFrame() { int frameHeaderSize; byte[] frameHeader; Func<byte[], bool> isValidFrame; Func<byte[], AudioFrame> createFrame; if (this.MediaInformation.ContentType == "audio/mpeg") { frameHeaderSize = MpegFrame.FrameHeaderSize; frameHeader = new byte[frameHeaderSize]; isValidFrame = MpegFrame.IsValidFrame; createFrame = b => new MpegFrame(b); } else if (this.MediaInformation.ContentType == "audio/aacp") { frameHeaderSize = AacpFrame.FrameHeaderSize; frameHeader = new byte[frameHeaderSize]; isValidFrame = AacpFrame.IsValidFrame; createFrame = b => new AacpFrame(b); } else { throw new InvalidOperationException(string.Format(CultureInfo.InvariantCulture, "Invalid content type: {0}", this.MediaInformation.ContentType)); } // If bytesLeftInFrame == 0, then we need to read the next frame header if (this.bytesLeftInFrame == 0) { this.ReadOrPeekBuffer(frameHeader, frameHeaderSize, true); if (isValidFrame(frameHeader)) { AudioFrame mpegFrame = createFrame(frameHeader); this.currentFrameSize = mpegFrame.FrameSize; this.bytesLeftInFrame = this.currentFrameSize; this.nextFrame = mpegFrame; } else { // We are out of sync, probably due to an intro this.ResyncStream(); } } }