public override bool LoadStream(Stream stream) { Close(); _fileStream = stream; _br = new BinaryReader(stream); // Read in the Smacker header _header = new Header(); _header.signature = _br.ReadUInt32BigEndian(); if (_header.signature != ScummHelper.MakeTag('S', 'M', 'K', '2') && _header.signature != ScummHelper.MakeTag('S', 'M', 'K', '4')) { return(false); } uint width = _br.ReadUInt32(); uint height = _br.ReadUInt32(); uint frameCount = _br.ReadUInt32(); int frameDelay = _br.ReadInt32(); // frame rate contains 2 digits after the comma, so 1497 is actually 14.97 fps Rational frameRate; if (frameDelay > 0) { frameRate = new Rational(1000, frameDelay); } else if (frameDelay < 0) { frameRate = new Rational(100000, -frameDelay); } else { frameRate = new Rational(1000); } // Flags are determined by which bit is set, which can be one of the following: // 0 - set to 1 if file contains a ring frame. // 1 - set to 1 if file is Y-interlaced // 2 - set to 1 if file is Y-doubled // If bits 1 or 2 are set, the frame should be scaled to twice its height // before it is displayed. _header.flags = _br.ReadUInt32(); var videoTrack = CreateVideoTrack(width, height, frameCount, frameRate, _header.flags, _header.signature); AddTrack(videoTrack); // TODO: should we do any extra processing for Smacker files with ring frames? // TODO: should we do any extra processing for Y-doubled videos? Are they the // same as Y-interlaced videos? uint i; for (i = 0; i < 7; ++i) { _header.audioSize[i] = _br.ReadUInt32(); } _header.treesSize = _br.ReadUInt32(); _header.mMapSize = _br.ReadUInt32(); _header.mClrSize = _br.ReadUInt32(); _header.fullSize = _br.ReadUInt32(); _header.typeSize = _br.ReadUInt32(); for (i = 0; i < 7; ++i) { // AudioRate - Frequency and format information for each sound track, up to 7 audio tracks. // The 32 constituent bits have the following meaning: // * bit 31 - indicates Huffman + Dpcm compression // * bit 30 - indicates that audio data is present for this track // * bit 29 - 1 = 16-bit audio; 0 = 8-bit audio // * bit 28 - 1 = stereo audio; 0 = mono audio // * bit 27 - indicates Bink Rdft compression // * bit 26 - indicates Bink Dct compression // * bits 25-24 - unused // * bits 23-0 - audio sample rate uint audioInfo = _br.ReadUInt32(); _header.audioInfo[i].hasAudio = (audioInfo & 0x40000000) != 0; _header.audioInfo[i].is16Bits = (audioInfo & 0x20000000) != 0; _header.audioInfo[i].isStereo = (audioInfo & 0x10000000) != 0; _header.audioInfo[i].sampleRate = audioInfo & 0xFFFFFF; if ((audioInfo & 0x8000000) != 0) { _header.audioInfo[i].compression = AudioCompression.Rdft; } else if ((audioInfo & 0x4000000) != 0) { _header.audioInfo[i].compression = AudioCompression.Dct; } else if ((audioInfo & 0x80000000) != 0) { _header.audioInfo[i].compression = AudioCompression.Dpcm; } else { _header.audioInfo[i].compression = AudioCompression.None; } if (_header.audioInfo[i].hasAudio) { if (_header.audioInfo[i].compression == AudioCompression.Rdft || _header.audioInfo[i].compression == AudioCompression.Dct) { throw new InvalidOperationException("Unhandled Smacker v2 audio compression"); } AddTrack(new SmackerAudioTrack(_mixer, _header.audioInfo[i], _soundType)); } } _header.dummy = _br.ReadUInt32(); _frameSizes = new uint[frameCount]; for (i = 0; i < frameCount; ++i) { _frameSizes[i] = _br.ReadUInt32(); } _frameTypes = new byte[frameCount]; for (i = 0; i < frameCount; ++i) { _frameTypes[i] = _br.ReadByte(); } var huffmanTrees = _br.ReadBytes((int)_header.treesSize); using (var ms = new MemoryStream(huffmanTrees)) { var bs = BitStream.Create8Lsb(ms); videoTrack.ReadTrees(bs, (int)_header.mMapSize, (int)_header.mClrSize, (int)_header.fullSize, (int)_header.typeSize); } _firstFrameStart = (uint)_fileStream.Position; return(true); }
public void QueueCompressedBuffer(byte[] buffer, int bufferSize, int unpackedSize) { using (var ms = new MemoryStream(buffer, 0, bufferSize)) { var audioBS = BitStream.Create8Lsb(ms); bool dataPresent = audioBS.GetBit() != 0; if (!dataPresent) { return; } bool isStereo = audioBS.GetBit() != 0; Debug.Assert(isStereo == _audioInfo.isStereo); bool is16Bits = audioBS.GetBit() != 0; Debug.Assert(is16Bits == _audioInfo.is16Bits); int numBytes = 1 * (isStereo ? 2 : 1) * (is16Bits ? 2 : 1); byte[] unpackedBuffer = new byte[unpackedSize]; var curPointer = 0; var curPos = 0; SmallHuffmanTree[] audioTrees = new SmallHuffmanTree[4]; for (int k = 0; k < numBytes; k++) { audioTrees[k] = new SmallHuffmanTree(audioBS); } // Base values, stored as big endian int[] bases = new int[2]; if (isStereo) { if (is16Bits) { bases[1] = ScummHelper.SwapBytes((ushort)audioBS.GetBits(16)); } else { bases[1] = (int)audioBS.GetBits(8); } } if (is16Bits) { bases[0] = ScummHelper.SwapBytes((ushort)audioBS.GetBits(16)); } else { bases[0] = (int)audioBS.GetBits(8); } // The bases are the first samples, too for (int i = 0; i < (isStereo ? 2 : 1); i++, curPointer += (is16Bits ? 2 : 1), curPos += (is16Bits ? 2 : 1)) { if (is16Bits) { unpackedBuffer.WriteUInt16BigEndian(curPointer, (ushort)bases[i]); } else { unpackedBuffer[curPointer] = (byte)((bases[i] & 0xFF) ^ 0x80); } } // Next follow the deltas, which are added to the corresponding base values and // are stored as little endian // We store the unpacked bytes in big endian format while (curPos < unpackedSize) { // If the sample is stereo, the data is stored for the left and right channel, respectively // (the exact opposite to the base values) if (!is16Bits) { for (int k = 0; k < (isStereo ? 2 : 1); k++) { sbyte delta = (sbyte)((short)audioTrees[k].GetCode(audioBS)); bases[k] = (bases[k] + delta) & 0xFF; unpackedBuffer[curPointer++] = (byte)(bases[k] ^ 0x80); curPos++; } } else { for (int k = 0; k < (isStereo ? 2 : 1); k++) { byte lo = (byte)audioTrees[k * 2].GetCode(audioBS); byte hi = (byte)audioTrees[k * 2 + 1].GetCode(audioBS); bases[k] += (short)(lo | (hi << 8)); unpackedBuffer.WriteUInt16BigEndian(curPointer, (ushort)bases[k]); curPointer += 2; curPos += 2; } } } QueuePCM(unpackedBuffer, unpackedSize); } }
protected override void ReadNextPacket() { var videoTrack = (SmackerVideoTrack)GetTrack(0); if (videoTrack.EndOfTrack) { return; } videoTrack.IncreaseCurrentFrame(); int i; uint chunkSize = 0; uint dataSizeUnpacked = 0; uint startPos = (uint)_fileStream.Position; // Check if we got a frame with palette data, and // call back the virtual setPalette function to set // the current palette if ((_frameTypes[videoTrack.CurrentFrame] & 1) != 0) { videoTrack.UnpackPalette(_fileStream); } // Load audio tracks for (i = 0; i < 7; ++i) { if ((_frameTypes[videoTrack.CurrentFrame] & (2 << i)) == 0) { continue; } chunkSize = _br.ReadUInt32(); chunkSize -= 4; // subtract the first 4 bytes (chunk size) if (_header.audioInfo[i].compression == AudioCompression.None) { dataSizeUnpacked = chunkSize; } else { dataSizeUnpacked = _br.ReadUInt32(); chunkSize -= 4; // subtract the next 4 bytes (unpacked data size) } HandleAudioTrack((byte)i, (int)chunkSize, (int)dataSizeUnpacked); } uint frameSize = (uint)(_frameSizes[videoTrack.CurrentFrame] & ~3); // uint32 remainder = _frameSizes[videoTrack.getCurFrame()] & 3; if (_fileStream.Position - startPos > frameSize) { throw new InvalidOperationException("Smacker actual frame size exceeds recorded frame size"); } uint frameDataSize = (uint)(frameSize - (_fileStream.Position - startPos)); var frameData = new byte[frameDataSize + 1]; // Padding to keep the BigHuffmanTrees from reading past the data end frameData[frameDataSize] = 0x00; _fileStream.Read(frameData, 0, (int)frameDataSize); using (var ms = new MemoryStream(frameData, 0, (int)(frameDataSize + 1))) { var bs = BitStream.Create8Lsb(ms); videoTrack.DecodeFrame(bs); } _fileStream.Seek(startPos + frameSize, SeekOrigin.Begin); }