void CollectAudioData() { var audio1 = new MemoryStream(); // left channel / mono var audio2 = new MemoryStream(); // right channel var adpcmIndex = 0; var compressed = false; for (var i = 0; i < Frames; i++) { stream.Seek(offsets[i], SeekOrigin.Begin); var end = (i < Frames - 1) ? offsets[i + 1] : stream.Length; while (stream.Position < end) { var type = stream.ReadASCII(4); if (type == "SN2J") { var jmp = int2.Swap(stream.ReadUInt32()); stream.Seek(jmp, SeekOrigin.Current); type = stream.ReadASCII(4); } var length = int2.Swap(stream.ReadUInt32()); switch (type) { case "SND0": case "SND2": if (audioChannels == 0) { throw new NotSupportedException(); } else if (audioChannels == 1) { var rawAudio = stream.ReadBytes((int)length); audio1.WriteArray(rawAudio); } else { var rawAudio = stream.ReadBytes((int)length / 2); audio1.WriteArray(rawAudio); rawAudio = stream.ReadBytes((int)length / 2); audio2.WriteArray(rawAudio); if (length % 2 != 0) { stream.ReadBytes(2); } } compressed = type == "SND2"; break; default: if (length + stream.Position > stream.Length) { throw new NotSupportedException("Vqa uses unknown Subtype: {0}".F(type)); } stream.ReadBytes((int)length); break; } // Chunks are aligned on even bytes; advance by a byte if the next one is null if (stream.Peek() == 0) { stream.ReadByte(); } } } if (audioChannels == 1) { audioData = compressed ? ImaAdpcmReader.LoadImaAdpcmSound(audio1.ToArray(), ref adpcmIndex) : audio1.ToArray(); } else { byte[] leftData, rightData; if (!compressed) { leftData = audio1.ToArray(); rightData = audio2.ToArray(); } else { adpcmIndex = 0; leftData = ImaAdpcmReader.LoadImaAdpcmSound(audio1.ToArray(), ref adpcmIndex); adpcmIndex = 0; rightData = ImaAdpcmReader.LoadImaAdpcmSound(audio2.ToArray(), ref adpcmIndex); } audioData = new byte[rightData.Length + leftData.Length]; var rightIndex = 0; var leftIndex = 0; for (var i = 0; i < audioData.Length;) { audioData[i++] = leftData[leftIndex++]; audioData[i++] = leftData[leftIndex++]; audioData[i++] = rightData[rightIndex++]; audioData[i++] = rightData[rightIndex++]; } } hasAudio = audioData.Length > 0; }
protected override bool BufferData(Stream baseStream, Queue <byte> data) { // Decode each block of IMA ADPCM data // Each block starts with a initial state per-channel for (var c = 0; c < channels; c++) { predictor[c] = baseStream.ReadInt16(); index[c] = baseStream.ReadUInt8(); baseStream.ReadUInt8(); // Unknown/Reserved // Output first sample from input data.Enqueue((byte)predictor[c]); data.Enqueue((byte)(predictor[c] >> 8)); outOffset += 2; if (outOffset >= outputSize) { return(true); } } // Decode and output remaining data in this block var blockOffset = 0; while (blockOffset < blockDataSize) { for (var c = 0; c < channels; c++) { // Decode 4 bytes (to 16 bytes of output) per channel var chunk = baseStream.ReadBytes(4); var decoded = ImaAdpcmReader.LoadImaAdpcmSound(chunk, ref index[c], ref predictor[c]); // Interleave output, one sample per channel var interleaveChannelOffset = 2 * c; for (var i = 0; i < decoded.Length; i += 2) { var interleaveSampleOffset = interleaveChannelOffset + i; interleaveBuffer[interleaveSampleOffset] = decoded[i]; interleaveBuffer[interleaveSampleOffset + 1] = decoded[i + 1]; interleaveChannelOffset += 2 * (channels - 1); } blockOffset += 4; } var outputRemaining = outputSize - outOffset; var toCopy = Math.Min(outputRemaining, interleaveBuffer.Length); for (var i = 0; i < toCopy; i++) { data.Enqueue(interleaveBuffer[i]); } outOffset += 16 * channels; if (outOffset >= outputSize) { return(true); } } return(++currentBlock >= numBlocks); }
public byte[] DecodeImaAdpcmData() { var s = new MemoryStream(RawOutput); var numBlocks = DataSize / BlockAlign; var blockDataSize = BlockAlign - (Channels * 4); var outputSize = UncompressedSize * Channels * 2; var outOffset = 0; var output = new byte[outputSize]; var predictor = new int[Channels]; var index = new int[Channels]; // Decode each block of IMA ADPCM data in RawOutput for (var block = 0; block < numBlocks; block++) { // Each block starts with a initial state per-channel for (var c = 0; c < Channels; c++) { predictor[c] = s.ReadInt16(); index[c] = s.ReadUInt8(); /* unknown/reserved */ s.ReadUInt8(); // Output first sample from input output[outOffset++] = (byte)predictor[c]; output[outOffset++] = (byte)(predictor[c] >> 8); if (outOffset >= outputSize) { return(output); } } // Decode and output remaining data in this block var blockOffset = 0; while (blockOffset < blockDataSize) { for (var c = 0; c < Channels; c++) { // Decode 4 bytes (to 16 bytes of output) per channel var chunk = s.ReadBytes(4); var decoded = ImaAdpcmReader.LoadImaAdpcmSound(chunk, ref index[c], ref predictor[c]); // Interleave output, one sample per channel var outOffsetChannel = outOffset + (2 * c); for (var i = 0; i < decoded.Length; i += 2) { var outOffsetSample = outOffsetChannel + i; if (outOffsetSample >= outputSize) { return(output); } output[outOffsetSample] = decoded[i]; output[outOffsetSample + 1] = decoded[i + 1]; outOffsetChannel += 2 * (Channels - 1); } blockOffset += 4; } outOffset += 16 * Channels; } } return(output); }