internal abstract float[][] Decode(DataPacket packet, bool[] doNotDecode, int channels, int blockSize);
internal override float[][] Decode(DataPacket packet, bool[] doNotDecode, int channels, int blockSize) { var residue = GetResidueBuffer(doNotDecode.Length); // this is pretty well stolen directly from libvorbis... BSD license var end = _end < blockSize / 2 ? _end : blockSize / 2; var n = end - _begin; if (n > 0 && doNotDecode.Contains(false)) { var partVals = n / _partitionSize; var partWords = (partVals + _classBook.Dimensions - 1) / _classBook.Dimensions; for (var j = 0; j < channels; j++) { Array.Clear(_partWordCache[j], 0, partWords); } for (var s = 0; s < _maxStages; s++) { for (int i = 0, l = 0; i < partVals; l++) { if (s == 0) { for (var j = 0; j < channels; j++) { var idx = _classBook.DecodeScalar(packet); if (idx >= 0 && idx < _decodeMap.Length) { _partWordCache[j][l] = _decodeMap[idx]; } else { i = partVals; s = _maxStages; break; } } } for (var k = 0; i < partVals && k < _classBook.Dimensions; k++, i++) { var offset = _begin + i * _partitionSize; for (var j = 0; j < channels; j++) { var idx = _partWordCache[j][l][k]; if ((_cascade[idx] & (1 << s)) != 0) { var book = _books[idx][s]; if (book != null) { if (WriteVectors(book, packet, residue, j, offset, _partitionSize)) { // bad packet... exit now and try to use what we already have i = partVals; s = _maxStages; break; } } } } } } } } return(residue); }
// We can use the type 0 logic by saying we're doing a single channel buffer big enough to hold the samples for all channels // This works because WriteVectors(...) "knows" the correct channel count and processes the data accordingly. internal override float[][] Decode(DataPacket packet, bool[] doNotDecode, int channels, int blockSize) { _channels = channels; return(base.Decode(packet, doNotDecode, 1, blockSize * channels)); }
protected override void Init(DataPacket packet) { // this is pretty well stolen directly from libvorbis... BSD license _begin = (int)packet.ReadBits(24); _end = (int)packet.ReadBits(24); _partitionSize = (int)packet.ReadBits(24) + 1; _classifications = (int)packet.ReadBits(6) + 1; _classBook = _vorbis.Books[(int)packet.ReadBits(8)]; _cascade = new int[_classifications]; var acc = 0; for (var i = 0; i < _classifications; i++) { var low_bits = (int)packet.ReadBits(3); if (packet.ReadBit()) { _cascade[i] = (int)packet.ReadBits(5) << 3 | low_bits; } else { _cascade[i] = low_bits; } acc += icount(_cascade[i]); } var bookNums = new int[acc]; for (var i = 0; i < acc; i++) { bookNums[i] = (int)packet.ReadBits(8); if (_vorbis.Books[bookNums[i]].MapType == 0) { throw new InvalidDataException(); } } var entries = _classBook.Entries; var dim = _classBook.Dimensions; var partvals = 1; while (dim > 0) { partvals *= _classifications; if (partvals > entries) { throw new InvalidDataException(); } --dim; } // now the lookups dim = _classBook.Dimensions; _books = new VorbisCodebook[_classifications][]; acc = 0; var maxstage = 0; int stages; for (var j = 0; j < _classifications; j++) { stages = Utils.ilog(_cascade[j]); _books[j] = new VorbisCodebook[stages]; if (stages > 0) { maxstage = Math.Max(maxstage, stages); for (var k = 0; k < stages; k++) { if ((_cascade[j] & (1 << k)) > 0) { _books[j][k] = _vorbis.Books[bookNums[acc++]]; } } } } _maxStages = maxstage; _decodeMap = new int[partvals][]; for (var j = 0; j < partvals; j++) { var val = j; var mult = partvals / _classifications; _decodeMap[j] = new int[_classBook.Dimensions]; for (var k = 0; k < _classBook.Dimensions; k++) { var deco = val / mult; val -= deco * mult; mult /= _classifications; _decodeMap[j][k] = deco; } } _entryCache = new int[_partitionSize]; _partWordCache = new int[_vorbis._channels][][]; var maxPartWords = ((_end - _begin) / _partitionSize + _classBook.Dimensions - 1) / _classBook.Dimensions; for (var ch = 0; ch < _vorbis._channels; ch++) { _partWordCache[ch] = new int[maxPartWords][]; } }