private void ReadPre530Metadata(BundleReader reader, out Stream dataStream, out long metadataOffset) { switch (Header.Type) { case BundleType.UnityRaw: { Metadata.Read(reader); dataStream = reader.BaseStream; metadataOffset = Header.HeaderSize; } break; case BundleType.UnityWeb: { // read only last chunk. wtf? ChunkInfo chunkInfo = Header.ChunkInfos[Header.ChunkInfos.Count - 1]; MemoryStream stream = new MemoryStream(new byte[chunkInfo.DecompressedSize]); SevenZipHelper.DecompressLZMASizeStream(reader.BaseStream, chunkInfo.CompressedSize, stream); using (BundleReader decompressReader = new BundleReader(stream, reader.EndianType, reader.Type, reader.Generation)) { Metadata.Read(decompressReader); } dataStream = stream; metadataOffset = 0; } break; default: throw new NotSupportedException($"Bundle type {Header.Type} isn't supported for pre530 generation"); } }
private SmartStream ReadPre530Metadata(BundleReader reader) { switch (Header.Type) { case BundleType.UnityRaw: { Metadata.Read(reader); return(m_stream.CreateReference()); } case BundleType.UnityWeb: { // read only last chunk. wtf? ChunkInfo chunkInfo = Header.ChunkInfos[Header.ChunkInfos.Count - 1]; using (SmartStream stream = SmartStream.CreateMemory(new byte[chunkInfo.DecompressedSize])) { SevenZipHelper.DecompressLZMASizeStream(reader.BaseStream, chunkInfo.CompressedSize, stream); using (BundleReader decompressReader = new BundleReader(stream, reader.EndianType, reader.Generation)) { Metadata.Read(decompressReader); } return(stream.CreateReference()); } } default: throw new NotSupportedException($"Bundle type {Header.Type} isn't supported before 530 generation"); } }
private void ReadRawWebMetadata(Stream stream, out Stream dataStream, out long metadataOffset) { BundleRawWebHeader header = Header.RawWeb; int metadataSize = BundleRawWebHeader.HasUncompressedBlocksInfoSize(Header.Version) ? header.UncompressedBlocksInfoSize : 0; switch (Header.Signature) { case BundleType.UnityRaw: { dataStream = stream; metadataOffset = stream.Position; ReadMetadata(dataStream, metadataSize); } break; case BundleType.UnityWeb: { // read only last chunk BundleScene chunkInfo = header.Scenes[header.Scenes.Length - 1]; dataStream = new MemoryStream(new byte[chunkInfo.DecompressedSize]); SevenZipHelper.DecompressLZMASizeStream(stream, chunkInfo.CompressedSize, dataStream); metadataOffset = 0; dataStream.Position = 0; ReadMetadata(dataStream, metadataSize); } break; default: throw new Exception($"Unsupported bundle signature '{Header.Signature}'"); } }
private void ReadFileStreamMetadata(Stream stream, long basePosition) { BundleFileStreamHeader header = Header.FileStream; if (header.Flags.IsBlocksInfoAtTheEnd()) { stream.Position = basePosition + (header.Size - header.CompressedBlocksInfoSize); } CompressionType metaCompression = header.Flags.GetCompression(); switch (metaCompression) { case CompressionType.None: { ReadMetadata(stream, header.UncompressedBlocksInfoSize); } break; case CompressionType.Lzma: { using (MemoryStream uncompressedStream = new MemoryStream(new byte[header.UncompressedBlocksInfoSize])) { SevenZipHelper.DecompressLZMAStream(stream, header.CompressedBlocksInfoSize, uncompressedStream, header.UncompressedBlocksInfoSize); uncompressedStream.Position = 0; ReadMetadata(uncompressedStream, header.UncompressedBlocksInfoSize); } } break; case CompressionType.Lz4: case CompressionType.Lz4HC: { using (MemoryStream uncompressedStream = new MemoryStream(new byte[header.UncompressedBlocksInfoSize])) { using (Lz4DecodeStream decodeStream = new Lz4DecodeStream(stream, header.CompressedBlocksInfoSize)) { decodeStream.ReadBuffer(uncompressedStream, header.UncompressedBlocksInfoSize); } uncompressedStream.Position = 0; ReadMetadata(uncompressedStream, header.UncompressedBlocksInfoSize); } } break; default: throw new NotSupportedException($"Bundle compression '{metaCompression}' isn't supported"); } }
private void Read530Metadata(BundleReader reader, long headerSize) { if (Header.Flags.IsMetadataAtTheEnd()) { reader.BaseStream.Position = Header.BundleSize - Header.MetadataCompressedSize; } BundleCompressType metaCompression = Header.Flags.GetCompression(); switch (metaCompression) { case BundleCompressType.None: { Metadata.Read(reader); long expectedPosition = Header.Flags.IsMetadataAtTheEnd() ? Header.BundleSize : headerSize + Header.MetadataDecompressedSize; if (reader.BaseStream.Position != expectedPosition) { throw new Exception($"Read {reader.BaseStream.Position - headerSize} but expected {Header.MetadataDecompressedSize}"); } } break; case BundleCompressType.LZMA: { using (MemoryStream stream = new MemoryStream(new byte[Header.MetadataDecompressedSize])) { SevenZipHelper.DecompressLZMASizeStream(reader.BaseStream, Header.MetadataCompressedSize, stream); using (BundleReader decompressReader = new BundleReader(stream, reader.EndianType, reader.Type, reader.Generation)) { Metadata.Read(decompressReader); } if (stream.Position != Header.MetadataDecompressedSize) { throw new Exception($"Read {stream.Position} but expected {Header.MetadataDecompressedSize}"); } } } break; case BundleCompressType.LZ4: case BundleCompressType.LZ4HZ: { using (MemoryStream stream = new MemoryStream(new byte[Header.MetadataDecompressedSize])) { using (Lz4DecodeStream decodeStream = new Lz4DecodeStream(reader.BaseStream, Header.MetadataCompressedSize)) { decodeStream.ReadBuffer(stream, Header.MetadataDecompressedSize); } stream.Position = 0; using (BundleReader decompressReader = new BundleReader(stream, reader.EndianType, reader.Type, reader.Generation)) { Metadata.Read(decompressReader); } if (stream.Position != Header.MetadataDecompressedSize) { throw new Exception($"Read {stream.Position} but expected {Header.MetadataDecompressedSize}"); } } } break; default: throw new NotSupportedException($"Bundle compression '{metaCompression}' isn't supported"); } }
private void Read530Blocks(SmartStream dataStream, long headerSize) { if (Header.Flags.IsMetadataAtTheEnd()) { dataStream.Position = headerSize; } int cachedBlock = -1; long dataOffset = dataStream.Position; // If MemoryStream has compressed block then we need to create individual streams for each entry and copy its data into it bool createIndividualStreams = dataStream.StreamType == SmartStreamType.Memory; if (createIndividualStreams) { // find out if this bundle file has compressed blocks foreach (BlockInfo block in Metadata.BlockInfos) { if (block.Flags.GetCompression() != BundleCompressType.None) { createIndividualStreams = true; break; } } } using (SmartStream blockStream = SmartStream.CreateNull()) { foreach (BundleFileEntry entry in Metadata.Entries.Values) { // find out block offsets long blockCompressedOffset = 0; long blockDecompressedOffset = 0; int blockIndex = 0; while (blockDecompressedOffset + Metadata.BlockInfos[blockIndex].DecompressedSize <= entry.Offset) { blockCompressedOffset += Metadata.BlockInfos[blockIndex].CompressedSize; blockDecompressedOffset += Metadata.BlockInfos[blockIndex].DecompressedSize; blockIndex++; } // if at least one block of this entry is compressed or acording to the rule above // we should copy the data of current entry to a separate stream bool needToCopy = createIndividualStreams; if (!needToCopy) { // check if this entry has compressed blocks long entrySize = 0; for (int bi = blockIndex; entrySize < entry.Size; bi++) { if (Metadata.BlockInfos[bi].Flags.GetCompression() != BundleCompressType.None) { // it does. then we need to create individual stream and decomress its data into it needToCopy = true; break; } entrySize += Metadata.BlockInfos[bi].DecompressedSize; } } long entryOffsetInsideBlock = entry.Offset - blockDecompressedOffset; if (needToCopy) { // well, at leat one block is compressed so we should copy data of current entry to a separate stream using (SmartStream entryStream = CreateStream(entry.Size)) { long left = entry.Size; dataStream.Position = dataOffset + blockCompressedOffset; // copy data of all blocks used by current entry to new stream for (int bi = blockIndex; left > 0; bi++) { long blockOffset = 0; BlockInfo block = Metadata.BlockInfos[bi]; if (cachedBlock == bi) { // some data of previous entry is in the same block as this one // so we don't need to unpack it once again. Instead we can use cached stream dataStream.Position += block.CompressedSize; } else { BundleCompressType compressType = block.Flags.GetCompression(); switch (compressType) { case BundleCompressType.None: blockOffset = dataOffset + blockCompressedOffset; blockStream.Assign(dataStream); break; case BundleCompressType.LZMA: blockStream.Move(CreateStream(block.DecompressedSize)); SevenZipHelper.DecompressLZMAStream(dataStream, block.CompressedSize, blockStream, block.DecompressedSize); break; case BundleCompressType.LZ4: case BundleCompressType.LZ4HZ: blockStream.Move(CreateStream(block.DecompressedSize)); using (Lz4DecodeStream lzStream = new Lz4DecodeStream(dataStream, block.CompressedSize)) { long read = lzStream.Read(blockStream, block.DecompressedSize); if (read != block.DecompressedSize) { throw new Exception($"Read {read} but expected {block.DecompressedSize}"); } if (lzStream.IsDataLeft) { throw new Exception($"LZ4 stream still has some data"); } } break; default: throw new NotImplementedException($"Bundle compression '{compressType}' isn't supported"); } cachedBlock = bi; } // consider next offsets: // 1) block - if it is new stream then offset is 0, otherwise offset of this block in the bundle file // 2) entry - if this is first block for current entry then it is offset of this entry related to this block // otherwise 0 long fragmentSize = block.DecompressedSize - entryOffsetInsideBlock; blockStream.Position = blockOffset + entryOffsetInsideBlock; entryOffsetInsideBlock = 0; long size = Math.Min(fragmentSize, left); blockStream.CopyStream(entryStream, size); blockCompressedOffset += block.CompressedSize; left -= size; } if (left < 0) { throw new Exception($"Read more than expected"); } FileEntryOffset feOffset = new FileEntryOffset(entryStream.CreateReference(), 0); m_entryStreams.Add(entry, feOffset); } } else { // no compressed blocks was found so we can use original bundle stream // since FileEntry.Offset contains decompressedOffset we need to preliminarily subtract it FileEntryOffset feOffset = new FileEntryOffset(dataStream.CreateReference(), dataOffset + blockCompressedOffset + entryOffsetInsideBlock); m_entryStreams.Add(entry, feOffset); } } } }