public static void Encode(IAudioDecoder input, Stream output) { byte[] decodedSound = Decode(input); int channels = input.GetFormat() == AudioFormat.Stereo16 ? 2 : 1; int blockSize = channels * 1024; var ima4Encoder = new Ima4Encoder(); var adpcmSound = ima4Encoder.Encode(decodedSound, channels, blockSize); long numFrames = (decodedSound.Length / channels / 2); int frequency = input.GetFrequency(); int averageBytesPerSecond = (int)(adpcmSound.Length * (long)frequency / numFrames); int framesPerBlock = (blockSize - 4 * channels) * 8 / (4 * channels) + 1; var bw = new BinaryWriter(output); // RIFF chunk bw.Write(Encoding.UTF8.GetBytes("RIFF")); bw.Write(adpcmSound.Length + 44 - 8); bw.Write(Encoding.UTF8.GetBytes("WAVE")); // Fmt sub-chunk bw.Write(Encoding.UTF8.GetBytes("fmt ")); bw.Write(20); // chunk size bw.Write((ushort)WaveFormat.IMA_ADPCM); // format bw.Write((ushort)channels); bw.Write(frequency); bw.Write(averageBytesPerSecond); // average bytes per seconds bw.Write((ushort)(blockSize)); // block align bw.Write((ushort)4); // bits per sample bw.Write((ushort)channels); bw.Write((ushort)framesPerBlock); // Data sub-chunk bw.Write(Encoding.UTF8.GetBytes("data")); bw.Write(adpcmSound.Length); // Write down the data output.Write(adpcmSound, 0, adpcmSound.Length); }
private bool FillBuffer(int buffer) { int totalRead = 0; int needToRead = BufferSize / decoder.GetBlockSize(); while (true) { int actuallyRead = decoder.ReadBlocks(decodedData, totalRead, needToRead - totalRead); totalRead += actuallyRead; if (totalRead == needToRead || !looping) { break; } decoder.Rewind(); } if (totalRead > 0) { ALFormat format = (decoder.GetFormat() == AudioFormat.Stereo16) ? ALFormat.Stereo16 : ALFormat.Mono16; int dataSize = totalRead * decoder.GetBlockSize(); AL.BufferData(buffer, format, decodedData, dataSize, decoder.GetFrequency()); return(true); } return(false); }