/// <summary> /// construct object to wrap the audio /// passing in audio size and id3 length tag (if any) to help with bitrate calculations /// </summary> /// <param name="firstFrame"></param> /// <param name="id3DurationTag">length from ID3v2, if any</param> public Audio(AudioFrame firstFrame, TimeSpan? id3DurationTag) { if (firstFrame == null) { throw new InvalidAudioFrameException("MPEG Audio Frame not found"); } _firstFrame = firstFrame; _id3DurationTag = id3DurationTag; /*_hasInconsistencies = false;*/ }
/// <summary> /// construct object to wrap the audio /// passing in audio size and id3 length tag (if any) to help with bitrate calculations /// </summary> /// <param name="firstFrame"></param> /// <param name="id3DurationTag">length from ID3v2, if any</param> public Audio(AudioFrame firstFrame, TimeSpan?id3DurationTag) { if (firstFrame == null) { throw new InvalidAudioFrameException("MPEG Audio Frame not found"); } _firstFrame = firstFrame; _id3DurationTag = id3DurationTag; /*_hasInconsistencies = false;*/ }
/// <summary> /// seek and create derived type of AudioFrame from stream /// </summary> /// <param name="stream">source stream, advanced by length of the frame on read</param> /// <param name="remainingBytes">number of bytes in audio block, as reported by the caller</param> /// <returns>wrapper for derived type of AudioFrame</returns> static public AudioFrame CreateFrame(Stream stream, uint remainingBytes) { // find and parse frame header, then rewind stream back to start. // if reach the end of the file, return null // if any other error, throw long firstFrameStart = stream.Position; AudioFrameHeader header = CreateHeader(stream, remainingBytes); if (header == null) { return(null); } uint frameFullSize; // if free rate file, find the start of the next frame and use the difference as the frame size. // NB. This won't be very efficient! if (header.IsFreeBitRate) { uint firstFrameHdrSize = (uint)(stream.Position - firstFrameStart); frameFullSize = firstFrameHdrSize + GetNextFrameOffset(stream, remainingBytes - (uint)(stream.Position - firstFrameStart)); //GetNextFrameOffset(stream, remainingBytes - (uint)(stream.Position - firstFrameStart)); //GetNextFrameOffset(stream, remainingBytes - (uint)(stream.Position - firstFrameStart)); //GetNextFrameOffset(stream, remainingBytes - (uint)(stream.Position - firstFrameStart)); //GetNextFrameOffset(stream, remainingBytes - (uint)(stream.Position - firstFrameStart)); //GetNextFrameOffset(stream, remainingBytes - (uint)(stream.Position - firstFrameStart)); //GetNextFrameOffset(stream, remainingBytes - (uint)(stream.Position - firstFrameStart)); //GetNextFrameOffset(stream, remainingBytes - (uint)(stream.Position - firstFrameStart)); //GetNextFrameOffset(stream, remainingBytes - (uint)(stream.Position - firstFrameStart)); //GetNextFrameOffset(stream, remainingBytes - (uint)(stream.Position - firstFrameStart)); //GetNextFrameOffset(stream, remainingBytes - (uint)(stream.Position - firstFrameStart)); } else { uint?frameLengthInBytes = header.FrameLengthInBytes; Trace.Assert(frameLengthInBytes != null); frameFullSize = (uint)header.FrameLengthInBytes; } // rewind the stream to the start of the frame, so we can read it all in one chunk stream.Position = firstFrameStart; AudioFrame firstTry = new AudioFrame(stream, header, frameFullSize, remainingBytes); return(CreateSpecialisedHeaderFrame(firstTry)); }
/// <summary> /// seek and create derived type of AudioFrame from stream /// </summary> /// <param name="stream">source stream, advanced by length of the frame on read</param> /// <param name="remainingBytes">number of bytes in audio block, as reported by the caller</param> /// <returns>wrapper for derived type of AudioFrame</returns> public static AudioFrame CreateFrame( Stream stream, uint remainingBytes ) { // find and parse frame header, then rewind stream back to start. // if reach the end of the file, return null // if any other error, throw long firstFrameStart = stream.Position; AudioFrameHeader header = CreateHeader( stream, remainingBytes ); if( header == null ) return null; uint frameFullSize; // if free rate file, find the start of the next frame and use the difference as the frame size. // NB. This won't be very efficient! if( header.IsFreeBitRate ) { uint firstFrameHdrSize = (uint)(stream.Position - firstFrameStart); frameFullSize = firstFrameHdrSize + GetNextFrameOffset(stream, remainingBytes - (uint)(stream.Position - firstFrameStart)); //GetNextFrameOffset(stream, remainingBytes - (uint)(stream.Position - firstFrameStart)); //GetNextFrameOffset(stream, remainingBytes - (uint)(stream.Position - firstFrameStart)); //GetNextFrameOffset(stream, remainingBytes - (uint)(stream.Position - firstFrameStart)); //GetNextFrameOffset(stream, remainingBytes - (uint)(stream.Position - firstFrameStart)); //GetNextFrameOffset(stream, remainingBytes - (uint)(stream.Position - firstFrameStart)); //GetNextFrameOffset(stream, remainingBytes - (uint)(stream.Position - firstFrameStart)); //GetNextFrameOffset(stream, remainingBytes - (uint)(stream.Position - firstFrameStart)); //GetNextFrameOffset(stream, remainingBytes - (uint)(stream.Position - firstFrameStart)); //GetNextFrameOffset(stream, remainingBytes - (uint)(stream.Position - firstFrameStart)); //GetNextFrameOffset(stream, remainingBytes - (uint)(stream.Position - firstFrameStart)); } else { uint? frameLengthInBytes = header.FrameLengthInBytes; Trace.Assert(frameLengthInBytes != null); frameFullSize = (uint)header.FrameLengthInBytes; } // rewind the stream to the start of the frame, so we can read it all in one chunk stream.Position = firstFrameStart; AudioFrame firstTry = new AudioFrame(stream, header, frameFullSize, remainingBytes); return CreateSpecialisedHeaderFrame( firstTry ); }
private static AudioFrame CreateSpecialisedHeaderFrame(AudioFrame firstTry) { //string wholeHeader = ASCIIEncoding.ASCII.GetString(_frameBuffer, 0, _frameBuffer.Length); if (firstTry.IsXingHeader) { //if (firstTry.IsLameHeader) // return new AudioFrameLameHeader(firstTry); //else return(new AudioFrameXingHeader(firstTry)); } else if (firstTry.IsVbriHeader) { return(new AudioFrameVbriHeader(firstTry)); } else { return(firstTry); } }
/// <summary> /// Count frames and bytes of file to see who's telling porkies /// </summary> public void ScanWholeFile() { _audioStats._numFrames = 0; _audioStats._numBytes = 0; using (Stream stream = OpenAudioStream()) { uint payloadStart = (uint)stream.Position; try { while (true) { uint pos = (uint)stream.Position; uint used = pos - payloadStart; uint remainingBytes = NumPayloadBytes - used; AudioFrame frame = AudioFrameFactory.CreateFrame(stream, remainingBytes); if (frame == null) { break; } ++_audioStats._numFrames; _audioStats._numBytes += frame.FrameLengthInBytes; //Trace.WriteLine(string.Format("frame {0} ({1} bytes) found at {2}", // _audioStats._numFrames, // frame.Header.FrameLengthInBytes, // stream.Position - frame.Header.FrameLengthInBytes)); } } catch (Exception e) { _hasInconsistencies = true; Trace.WriteLine(e.Message); } } }
/// <summary> /// construct XingHeader frame from a pre-existing raw frame; "downcast". /// </summary> /// <param name="baseclass"></param> public AudioFrameVbriHeader(AudioFrame baseclass) : base(baseclass) { }
/// <summary> /// construct XingHeader frame from a pre-existing raw frame; "downcast". /// </summary> /// <param name="baseclass"></param> public AudioFrameXingHeader(AudioFrame baseclass) : base(baseclass) { }
/// <summary> /// create derived type of AudioFrame from buffer, or throw /// </summary> /// <param name="sourceBuffer"></param> /// <returns>wrapper for derived type of AudioFrame</returns> static public AudioFrame CreateFrame(byte[] sourceBuffer) { AudioFrame firstTry = new AudioFrame(sourceBuffer); return(CreateSpecialisedHeaderFrame(firstTry)); }
/// <summary> /// copy construct AudioFrame for derived classes /// </summary> /// <param name="other"></param> protected AudioFrame(AudioFrame other) { _frameBuffer = other._frameBuffer; _header = other._header; _headerBytes = other._headerBytes; }
/// <summary> /// create derived type of AudioFrame from buffer, or throw /// </summary> /// <param name="sourceBuffer"></param> /// <returns>wrapper for derived type of AudioFrame</returns> public static AudioFrame CreateFrame( byte[] sourceBuffer ) { AudioFrame firstTry = new AudioFrame( sourceBuffer ); return CreateSpecialisedHeaderFrame( firstTry ); }
private static AudioFrame CreateSpecialisedHeaderFrame( AudioFrame firstTry ) { //string wholeHeader = ASCIIEncoding.ASCII.GetString(_frameBuffer, 0, _frameBuffer.Length); if( firstTry.IsXingHeader ) { //if (firstTry.IsLameHeader) // return new AudioFrameLameHeader(firstTry); //else return new AudioFrameXingHeader( firstTry ); } else if( firstTry.IsVbriHeader ) return new AudioFrameVbriHeader( firstTry ); else return firstTry; }