void ReadAllocation(IMpegFrame frame, int[] rateTable) { var _subBandCount = rateTable.Length; if (_jsbound > _subBandCount) { _jsbound = _subBandCount; } Array.Clear(_allocation[0], 0, SBLIMIT); Array.Clear(_allocation[1], 0, SBLIMIT); int sb = 0; for (; sb < _jsbound; sb++) { var table = _allocLookupTable[rateTable[sb]]; var bits = table[0]; for (int ch = 0; ch < _channels; ch++) { _allocation[ch][sb] = table[frame.ReadBits(bits) + 1]; } } for (; sb < _subBandCount; sb++) { var table = _allocLookupTable[rateTable[sb]]; _allocation[0][sb] = _allocation[1][sb] = table[frame.ReadBits(table[0]) + 1]; } }
// figure out which rate table to use... basically, high-rate full, high-rate limited, low-rate limited, low-rate minimal, and LSF. static int[] SelectTable(IMpegFrame frame) { var bitRatePerChannel = (frame.BitRate / (frame.ChannelMode == MpegChannelMode.Mono ? 1 : 2)) / 1000; if (frame.Version == MpegVersion.Version1) { if ((bitRatePerChannel >= 56 && bitRatePerChannel <= 80) || (frame.SampleRate == 48000 && bitRatePerChannel >= 56)) { return _rateLookupTable[0]; // high-rate, 27 subbands } else if (frame.SampleRate != 48000 && bitRatePerChannel >= 96) { return _rateLookupTable[1]; // high-rate, 30 subbands } else if (frame.SampleRate != 32000 && bitRatePerChannel <= 48) { return _rateLookupTable[2]; // low-rate, 8 subbands } else { return _rateLookupTable[3]; // low-rate, 12 subbands } } else { return _rateLookupTable[4]; // lsf, 30 subbands } }
int DecodeFrameImpl(IMpegFrame frame, Array dest, int destOffset) { Decoder.LayerDecoderBase curDecoder = null; switch (frame.Layer) { case MpegLayer.LayerI: if (_layerIDecoder == null) { _layerIDecoder = new Decoder.LayerIDecoder(); } curDecoder = _layerIDecoder; break; case MpegLayer.LayerII: if (_layerIIDecoder == null) { _layerIIDecoder = new Decoder.LayerIIDecoder(); } curDecoder = _layerIIDecoder; break; case MpegLayer.LayerIII: if (_layerIIIDecoder == null) { _layerIIIDecoder = new Decoder.LayerIIIDecoder(); } curDecoder = _layerIIIDecoder; break; } if (curDecoder != null) { curDecoder.SetEQ(_eqFactors); curDecoder.StereoMode = StereoMode; var cnt = curDecoder.DecodeFrame(frame, _ch0, _ch1); if (frame.ChannelMode == MpegChannelMode.Mono) { Buffer.BlockCopy(_ch0, 0, dest, destOffset * sizeof(float), cnt * sizeof(float)); } else { // This is kinda annoying... if we're doing a downmix, we should technically only output a single channel // The problem is, our caller is probably expecting stereo output. Grrrr.... // We use Buffer.BlockCopy here because we don't know dest's type, but do know it's big enough to do the copy for (int i = 0; i < cnt; i++) { Buffer.BlockCopy(_ch0, i * sizeof(float), dest, destOffset * sizeof(float), sizeof(float)); ++destOffset; Buffer.BlockCopy(_ch1, i * sizeof(float), dest, destOffset * sizeof(float), sizeof(float)); ++destOffset; } cnt *= 2; } return cnt; } return 0; }
void ReadSamples(IMpegFrame frame) { // load in all the data for this frame (1152 samples in this case) // NB: we flatten these into output order for (int ss = 0, idx = 0; ss < SSLIMIT; ss++, idx += SBLIMIT * (_granuleCount - 1)) { for (int sb = 0; sb < SBLIMIT; sb++, idx++) { for (int ch = 0; ch < _channels; ch++) { if (ch == 0 || sb < _jsbound) { var alloc = _allocation[ch][sb]; if (alloc != 0) { if (alloc < 0) { // grouping (Layer II only, so we don't have to play with the granule count) var val = frame.ReadBits(-alloc); var levels = (1 << (-alloc / 2 + -alloc % 2 - 1)) + 1; _samples[ch][idx] = val % levels; val /= levels; _samples[ch][idx + SBLIMIT] = val % levels; _samples[ch][idx + SBLIMIT * 2] = val / levels; } else { // non-grouping for (int gr = 0; gr < _granuleCount; gr++) { _samples[ch][idx + SBLIMIT * gr] = frame.ReadBits(alloc); } } } else { // no energy... zero out the samples for (int gr = 0; gr < _granuleCount; gr++) { _samples[ch][idx + SBLIMIT * gr] = 0; } } } else { // copy chan 0 to chan 1 for (int gr = 0; gr < _granuleCount; gr++) { _samples[1][idx + SBLIMIT * gr] = _samples[0][idx + SBLIMIT * gr]; } } } } } }
static int GetSlots(IMpegFrame frame) { var cnt = frame.FrameLength - 4; if (frame.HasCrc) cnt -= 2; if (frame.Version == MpegVersion.Version1 && frame.ChannelMode != MpegChannelMode.Mono) return cnt - 32; if (frame.Version > MpegVersion.Version1 && frame.ChannelMode == MpegChannelMode.Mono) return cnt - 9; return cnt - 17; }
public int DecodeFrame(IMpegFrame frame, float[] dest, int destOffset) { if (frame == null) throw new ArgumentNullException("frame"); if (dest == null) throw new ArgumentNullException("dest"); if (dest.Length - destOffset < (frame.ChannelMode == MpegChannelMode.Mono ? 1 : 2) * frame.SampleCount) { throw new ArgumentException("Buffer not large enough! Must be big enough to hold the frame's entire output. This is up to 2,304 elements.", "dest"); } return DecodeFrameImpl(frame, dest, destOffset); }
public void AddBits(IMpegFrame frame) { var slots = GetSlots(frame); while (--slots >= 0) { var temp = frame.ReadBits(8); if (temp == -1) throw new System.IO.InvalidDataException("Frame did not have enough bytes!"); _buf[++_end] = (byte)temp; if (_end == _buf.Length - 1) _end = -1; } if (_bitsLeft == 0) _bitsLeft = 8; }
protected override void ReadScaleFactorSelection(IMpegFrame frame, int[][] scfsi, int channels) { // we'll never have more than 30 active subbands for (int sb = 0; sb < 30; sb++) { for (int ch = 0; ch < channels; ch++) { if (scfsi[ch][sb] == 2) { scfsi[ch][sb] = frame.ReadBits(2); } } } }
public int DecodeFrame(IMpegFrame frame, byte[] dest, int destOffset) { if (frame == null) throw new ArgumentNullException("frame"); if (dest == null) throw new ArgumentNullException("dest"); if (destOffset % 4 != 0) throw new ArgumentException("Must be an even multiple of 4", "destOffset"); var bufferAvailable = (dest.Length - destOffset) / 4; if (bufferAvailable < (frame.ChannelMode == MpegChannelMode.Mono ? 1 : 2) * frame.SampleCount) { throw new ArgumentException("Buffer not large enough! Must be big enough to hold the frame's entire output. This is up to 9,216 bytes.", "dest"); } return DecodeFrameImpl(frame, dest, destOffset / 4) * 4; }
void ReadScaleFactors(IMpegFrame frame) { for (int sb = 0; sb < SBLIMIT; sb++) { for (int ch = 0; ch < _channels; ch++) { switch (_scfsi[ch][sb]) { case 0: // all three _scalefac[ch][0][sb] = frame.ReadBits(6); _scalefac[ch][1][sb] = frame.ReadBits(6); _scalefac[ch][2][sb] = frame.ReadBits(6); break; case 1: // only two (2 = 1) _scalefac[ch][0][sb] = _scalefac[ch][1][sb] = frame.ReadBits(6); _scalefac[ch][2][sb] = frame.ReadBits(6); break; case 2: // only one (3 = 2 = 1) _scalefac[ch][0][sb] = _scalefac[ch][1][sb] = _scalefac[ch][2][sb] = frame.ReadBits(6); break; case 3: // only two (3 = 2) _scalefac[ch][0][sb] = frame.ReadBits(6); _scalefac[ch][1][sb] = _scalefac[ch][2][sb] = frame.ReadBits(6); break; default: // none _scalefac[ch][0][sb] = 63; _scalefac[ch][1][sb] = 63; _scalefac[ch][2][sb] = 63; break; } } } }
/// <summary> /// Decode the Mpeg frame into provided buffer. /// Result varies with different <see cref="StereoMode"/>: /// <list type="bullet"> /// <item> /// <description>For <see cref="NLayer.StereoMode.Both"/>, sample data on both two channels will occur in turn (left first).</description> /// </item> /// <item> /// <description>For <see cref="NLayer.StereoMode.LeftOnly"/> and <see cref="NLayer.StereoMode.RightOnly"/>, only data on /// specified channel will occur.</description> /// </item> /// <item> /// <description>For <see cref="NLayer.StereoMode.DownmixToMono"/>, two channels will be down-mixed into single channel.</description> /// </item> /// </list> /// </summary> /// <param name="frame">The Mpeg frame to be decoded.</param> /// <param name="dest">Destination buffer. Decoded PCM (single-precision floating point array) will be written into it.</param> /// <param name="destOffset">Writing offset on the destination buffer.</param> /// <returns></returns> public int DecodeFrame(IMpegFrame frame, float[] dest, int destOffset) { if (frame == null) { throw new ArgumentNullException("frame"); } if (dest == null) { throw new ArgumentNullException("dest"); } if (dest.Length - destOffset < (frame.ChannelMode == MpegChannelMode.Mono ? 1 : 2) * frame.SampleCount) { throw new ArgumentException("Buffer not large enough! Must be big enough to hold the frame's entire output. This is up to 2,304 elements.", "dest"); } return(DecodeFrameImpl(frame, dest, destOffset)); }
public bool AddBits(IMpegFrame frame, int overlap) { var originalEnd = _end; var slots = GetSlots(frame); while (--slots >= 0) { var temp = frame.ReadBits(8); if (temp == -1) { throw new System.IO.InvalidDataException("Frame did not have enough bytes!"); } _buf[++_end] = (byte)temp; if (_end == _buf.Length - 1) { _end = -1; } } _bitsLeft = 8; if (originalEnd == -1) { // it's either the start of the stream or we've reset... only return true if overlap says this frame is enough return(overlap == 0); } else { // it's not the start of the stream so calculate _start based on whether we have enough bytes left // if we have enough bytes, reset start to match overlap if ((originalEnd + 1 - _start + _buf.Length) % _buf.Length >= overlap) { _start = (originalEnd + 1 - overlap + _buf.Length) % _buf.Length; return(true); } // otherwise, just set start to match the start of the frame (we probably skipped a frame) else { _start = originalEnd + overlap; return(false); } } }
static int GetSlots(IMpegFrame frame) { var cnt = frame.FrameLength - 4; if (frame.HasCrc) { cnt -= 2; } if (frame.Version == MpegVersion.Version1 && frame.ChannelMode != MpegChannelMode.Mono) { return(cnt - 32); } if (frame.Version > MpegVersion.Version1 && frame.ChannelMode == MpegChannelMode.Mono) { return(cnt - 9); } return(cnt - 17); }
// this just reads the channel mode and set a few flags void InitFrame(IMpegFrame frame) { switch (frame.ChannelMode) { case MpegChannelMode.Mono: _channels = 1; _jsbound = SBLIMIT; break; case MpegChannelMode.JointStereo: _channels = 2; _jsbound = frame.ChannelModeExtension * 4 + 4; break; default: _channels = 2; _jsbound = SBLIMIT; break; } }
internal override int DecodeFrame(IMpegFrame frame, float[] ch0, float[] ch1) { InitFrame(frame); var rateTable = GetRateTable(frame); ReadAllocation(frame, rateTable); for (int i = 0; i < _scfsi[0].Length; i++) { // Since Layer II has to know which subbands have energy, we use the "Layer I valid" selection to mark that energy is present. // That way Layer I doesn't have to do anything else. _scfsi[0][i] = _allocation[0][i] != 0 ? 2 : -1; _scfsi[1][i] = _allocation[1][i] != 0 ? 2 : -1; } ReadScaleFactorSelection(frame, _scfsi, _channels); ReadScaleFactors(frame); ReadSamples(frame); return(DecodeSamples(ch0, ch1)); }
/// <summary> /// Decode the Mpeg frame into provided buffer. Do exactly the same as <see cref="DecodeFrame(IMpegFrame, float[], int)"/> /// except that the data is written in type as byte array, while still representing single-precision float (in local endian). /// </summary> /// <param name="frame">The Mpeg frame to be decoded.</param> /// <param name="dest">Destination buffer. Decoded PCM (single-precision floating point array) will be written into it.</param> /// <param name="destOffset">Writing offset on the destination buffer.</param> /// <returns></returns> public int DecodeFrame(IMpegFrame frame, byte[] dest, int destOffset) { if (frame == null) { throw new ArgumentNullException("frame"); } if (dest == null) { throw new ArgumentNullException("dest"); } if (destOffset % 4 != 0) { throw new ArgumentException("Must be an even multiple of 4", "destOffset"); } var bufferAvailable = (dest.Length - destOffset) / 4; if (bufferAvailable < (frame.ChannelMode == MpegChannelMode.Mono ? 1 : 2) * frame.SampleCount) { throw new ArgumentException("Buffer not large enough! Must be big enough to hold the frame's entire output. This is up to 9,216 bytes.", "dest"); } return(DecodeFrameImpl(frame, dest, destOffset / 4) * 4); }
void ReadSideInfo(IMpegFrame frame) { if (frame.Version == MpegVersion.Version1) { // main_data_begin 9 _mainDataBegin = frame.ReadBits(9); // private_bits 3 or 5 if (frame.ChannelMode == MpegChannelMode.Mono) { _privBits = frame.ReadBits(5); _channels = 1; } else { _privBits = frame.ReadBits(3); _channels = 2; } for (var ch = 0; ch < _channels; ch++) { // scfsi[ch][0...3] 1 x4 _scfsi[ch][0] = frame.ReadBits(1); _scfsi[ch][1] = frame.ReadBits(1); _scfsi[ch][2] = frame.ReadBits(1); _scfsi[ch][3] = frame.ReadBits(1); } for (var gr = 0; gr < 2; gr++) { for (var ch = 0; ch < _channels; ch++) { // part2_3_length[gr][ch] 12 _part23Length[gr][ch] = frame.ReadBits(12); // big_values[gr][ch] 9 _bigValues[gr][ch] = frame.ReadBits(9); // global_gain[gr][ch] 8 _globalGain[gr][ch] = GAIN_TAB[frame.ReadBits(8)]; // scalefac_compress[gr][ch] 4 _scalefacCompress[gr][ch] = frame.ReadBits(4); // blocksplit_flag[gr][ch] 1 _blockSplitFlag[gr][ch] = frame.ReadBits(1) == 1; if (_blockSplitFlag[gr][ch]) { // block_type[gr][ch] 2 _blockType[gr][ch] = frame.ReadBits(2); // switch_point[gr][ch] 1 _mixedBlockFlag[gr][ch] = frame.ReadBits(1) == 1; // table_select[gr][ch][0..1] 5 x2 _tableSelect[gr][ch][0] = frame.ReadBits(5); _tableSelect[gr][ch][1] = frame.ReadBits(5); _tableSelect[gr][ch][2] = 0; // set the region information if (_blockType[gr][ch] == 2 && !_mixedBlockFlag[gr][ch]) { _regionAddress1[gr][ch] = 8; } else { _regionAddress1[gr][ch] = 7; } _regionAddress2[gr][ch] = 20 - _regionAddress1[gr][ch]; // subblock_gain[gr][ch][0..2] 3 x3 _subblockGain[gr][ch][0] = frame.ReadBits(3) * -2f; _subblockGain[gr][ch][1] = frame.ReadBits(3) * -2f; _subblockGain[gr][ch][2] = frame.ReadBits(3) * -2f; } else { // table_select[0..2][gr][ch] 5 x3 _tableSelect[gr][ch][0] = frame.ReadBits(5); _tableSelect[gr][ch][1] = frame.ReadBits(5); _tableSelect[gr][ch][2] = frame.ReadBits(5); // region_address1[gr][ch] 4 _regionAddress1[gr][ch] = frame.ReadBits(4); // region_address2[gr][ch] 3 _regionAddress2[gr][ch] = frame.ReadBits(3); // set the block type so it doesn't accidentally carry _blockType[gr][ch] = 0; // make subblock gain equal unity _subblockGain[gr][ch][0] = 0; _subblockGain[gr][ch][1] = 0; _subblockGain[gr][ch][2] = 0; } // preflag[gr][ch] 1 _preflag[gr][ch] = frame.ReadBits(1); // scalefac_scale[gr][ch] 1 _scalefacScale[gr][ch] = .5f * (1f + frame.ReadBits(1)); // count1table_select[gr][ch] 1 _count1TableSelect[gr][ch] = frame.ReadBits(1); } } } else // MPEG 2+ { // main_data_begin 8 _mainDataBegin = frame.ReadBits(8); // private_bits 1 or 2 if (frame.ChannelMode == MpegChannelMode.Mono) { _privBits = frame.ReadBits(1); _channels = 1; } else { _privBits = frame.ReadBits(2); _channels = 2; } for (var gr = 0; gr < 2; gr++) { for (var ch = 0; ch < _channels; ch++) { // part2_3_length[gr][ch] 12 _part23Length[gr][ch] = frame.ReadBits(12); // big_values[gr][ch] 9 _bigValues[gr][ch] = frame.ReadBits(9); // global_gain[gr][ch] 8 _globalGain[gr][ch] = GAIN_TAB[frame.ReadBits(8)]; // scalefac_compress[gr][ch] 9 _scalefacCompress[gr][ch] = frame.ReadBits(9); // blocksplit_flag[gr][ch] 1 _blockSplitFlag[gr][ch] = frame.ReadBits(1) == 1; if (_blockSplitFlag[gr][ch]) { // block_type[gr][ch] 2 _blockType[gr][ch] = frame.ReadBits(2); // switch_point[gr][ch] 1 _mixedBlockFlag[gr][ch] = frame.ReadBits(1) == 1; // table_select[gr][ch][0..1] 5 x2 _tableSelect[gr][ch][0] = frame.ReadBits(5); _tableSelect[gr][ch][1] = frame.ReadBits(5); _tableSelect[gr][ch][2] = 0; // set the region information if (_blockType[gr][ch] == 2 && !_mixedBlockFlag[gr][ch]) { _regionAddress1[gr][ch] = 8; } else { _regionAddress1[gr][ch] = 7; } _regionAddress2[gr][ch] = 20 - _regionAddress1[gr][ch]; // subblock_gain[gr][ch][0..2] 3 x3 _subblockGain[gr][ch][0] = frame.ReadBits(3) * -2f; _subblockGain[gr][ch][1] = frame.ReadBits(3) * -2f; _subblockGain[gr][ch][2] = frame.ReadBits(3) * -2f; } else { // table_select[0..2][gr][ch] 5 x3 _tableSelect[gr][ch][0] = frame.ReadBits(5); _tableSelect[gr][ch][1] = frame.ReadBits(5); _tableSelect[gr][ch][2] = frame.ReadBits(5); // region_address1[gr][ch] 4 _regionAddress1[gr][ch] = frame.ReadBits(4); // region_address2[gr][ch] 3 _regionAddress2[gr][ch] = frame.ReadBits(3); // set the block type so it doesn't accidentally carry _blockType[gr][ch] = 0; // make subblock gain equal unity _subblockGain[gr][ch][0] = 0; _subblockGain[gr][ch][1] = 0; _subblockGain[gr][ch][2] = 0; } // scalefac_scale[gr][ch] 1 _scalefacScale[gr][ch] = .5f * (1f + frame.ReadBits(1)); // count1table_select[gr][ch] 1 _count1TableSelect[gr][ch] = frame.ReadBits(1); } } } }
protected override void ReadScaleFactorSelection(IMpegFrame frame, int[][] scfsi, int channels) { // this is a no-op since the base logic uses "2" as the "has energy" marker }
protected override int[] GetRateTable(IMpegFrame frame) { return _rateTable; }
abstract internal int DecodeFrame(IMpegFrame frame, float[] ch0, float[] ch1);
void PrepTables(IMpegFrame frame) { if (_cbLookupSR != frame.SampleRate) { switch (frame.SampleRate) { case 44100: _sfBandIndexL = _sfBandIndexLTable[0]; _sfBandIndexS = _sfBandIndexSTable[0]; break; case 48000: _sfBandIndexL = _sfBandIndexLTable[1]; _sfBandIndexS = _sfBandIndexSTable[1]; break; case 32000: _sfBandIndexL = _sfBandIndexLTable[2]; _sfBandIndexS = _sfBandIndexSTable[2]; break; case 22050: _sfBandIndexL = _sfBandIndexLTable[3]; _sfBandIndexS = _sfBandIndexSTable[3]; break; case 24000: _sfBandIndexL = _sfBandIndexLTable[4]; _sfBandIndexS = _sfBandIndexSTable[4]; break; case 16000: _sfBandIndexL = _sfBandIndexLTable[5]; _sfBandIndexS = _sfBandIndexSTable[5]; break; case 11025: _sfBandIndexL = _sfBandIndexLTable[6]; _sfBandIndexS = _sfBandIndexSTable[6]; break; case 12000: _sfBandIndexL = _sfBandIndexLTable[7]; _sfBandIndexS = _sfBandIndexSTable[7]; break; case 8000: _sfBandIndexL = _sfBandIndexLTable[8]; _sfBandIndexS = _sfBandIndexSTable[8]; break; } // precalculate the critical bands per bucket int cbL = 0, cbS = 0; int next_cbL = _sfBandIndexL[1], next_cbS = _sfBandIndexS[1] * 3; for (int i = 0; i < 576; i++) { if (i == next_cbL) { ++cbL; next_cbL = _sfBandIndexL[cbL + 1]; } if (i == next_cbS) { ++cbS; next_cbS = _sfBandIndexS[cbS + 1] * 3; } _cbLookupL[i] = (byte)cbL; _cbLookupS[i] = (byte)cbS; } // set up the short block windows int idx = 0; for (cbS = 0; cbS < 12; cbS++) { var width = _sfBandIndexS[cbS + 1] - _sfBandIndexS[cbS]; for (int i = 0; i < 3; i++) { for (int j = 0; j < width; j++, idx++) { _cbwLookupS[idx] = (byte)i; } } } _cbLookupSR = frame.SampleRate; } }
protected override int[] GetRateTable(IMpegFrame frame) { return SelectTable(frame); }
abstract protected void ReadScaleFactorSelection(IMpegFrame frame, int[][] scfsi, int channels);
abstract protected int[] GetRateTable(IMpegFrame frame);
internal override int DecodeFrame(IMpegFrame frame, float[] ch0, float[] ch1) { // load the frame information ReadSideInfo(frame); // load the frame's main data if (!LoadMainData(frame)) { return 0; } // prep the reusable tables PrepTables(frame); // do our stereo mode setup var chanBufs = new float[2][]; var startChannel = 0; var endChannel = _channels - 1; if (_channels == 1 || StereoMode == StereoMode.LeftOnly || StereoMode == StereoMode.DownmixToMono) { chanBufs[0] = ch0; endChannel = 0; } else if (StereoMode == StereoMode.RightOnly) { chanBufs[1] = ch0; // this is correct... if there's only a single channel output, it goes in channel 0's buffer startChannel = 1; } else // MpegStereoMode.Both { chanBufs[0] = ch0; chanBufs[1] = ch1; } // get the granule count int granules; if (frame.Version == MpegVersion.Version1) { granules = 2; } else { granules = 1; } // decode the audio data int offset = 0; for (var gr = 0; gr < granules; gr++) { for (var ch = 0; ch < _channels; ch++) { // read scale factors int sfbits; if (frame.Version == MpegVersion.Version1) { sfbits = ReadScalefactors(gr, ch); } else { sfbits = ReadLsfScalefactors(gr, ch, frame.ChannelModeExtension); } // huffman & dequant ReadSamples(sfbits, gr, ch); } // stereo processing Stereo(frame.ChannelMode, frame.ChannelModeExtension, gr, frame.Version != MpegVersion.Version1); for (int ch = startChannel; ch <= endChannel; ch++) { // pull some values so we don't have to index them again later var buf = _samples[ch]; var blockType = _blockType[gr][ch]; var blockSplit = _blockSplitFlag[gr][ch]; var mixedBlock = _mixedBlockFlag[gr][ch]; // do the short/long/mixed logic here so it's only done once per channel per granule if (blockSplit && blockType == 2) { if (mixedBlock) { // reorder & antialias mixed blocks Reorder(buf, true); AntiAlias(buf, true); } else { // reorder short blocks Reorder(buf, false); } } else { // antialias long blocks AntiAlias(buf, false); } // hybrid processing _hybrid.Apply(buf, ch, blockType, blockSplit && mixedBlock); // frequency inversion FrequencyInversion(buf); // inverse polyphase InversePolyphase(buf, ch, offset, chanBufs[ch]); } offset += SBLIMIT * SSLIMIT; } return offset; }
int DecodeFrameImpl(IMpegFrame frame, Array dest, int destOffset) { frame.Reset(); Decoder.LayerDecoderBase curDecoder = null; switch (frame.Layer) { case MpegLayer.LayerI: if (_layerIDecoder == null) { _layerIDecoder = new Decoder.LayerIDecoder(); } curDecoder = _layerIDecoder; break; case MpegLayer.LayerII: if (_layerIIDecoder == null) { _layerIIDecoder = new Decoder.LayerIIDecoder(); } curDecoder = _layerIIDecoder; break; case MpegLayer.LayerIII: if (_layerIIIDecoder == null) { _layerIIIDecoder = new Decoder.LayerIIIDecoder(); } curDecoder = _layerIIIDecoder; break; } if (curDecoder != null) { curDecoder.SetEQ(_eqFactors); curDecoder.StereoMode = StereoMode; var cnt = curDecoder.DecodeFrame(frame, _ch0, _ch1); if (frame.ChannelMode == MpegChannelMode.Mono) { Buffer.BlockCopy(_ch0, 0, dest, destOffset * sizeof(float), cnt * sizeof(float)); } else { // This is kinda annoying... if we're doing a downmix, we should technically only output a single channel // The problem is, our caller is probably expecting stereo output. Grrrr.... // We use Buffer.BlockCopy here because we don't know dest's type, but do know it's big enough to do the copy for (int i = 0; i < cnt; i++) { Buffer.BlockCopy(_ch0, i * sizeof(float), dest, destOffset * sizeof(float), sizeof(float)); ++destOffset; Buffer.BlockCopy(_ch1, i * sizeof(float), dest, destOffset * sizeof(float), sizeof(float)); ++destOffset; } cnt *= 2; } return(cnt); } return(0); }
protected override int[] GetRateTable(IMpegFrame frame) { return(_rateTable); }
bool LoadMainData(IMpegFrame frame) { // figure out the "skip" count (do it in bits so we don't have to flush at the end of this frame) var skipBits = _bitRes.BitsAvailable - (_mainDataBegin * 8); // if we don't have enough bits, just return if (skipBits < 0) { // load the audio data _bitRes.AddBits(frame); return false; } // skip the unused data _bitRes.SkipBits(skipBits); // load the audio data _bitRes.AddBits(frame); return true; }