Ejemplo n.º 1
0
        private SmartStream ReadPre530Metadata(BundleFileReader reader)
        {
            switch (Header.Type)
            {
            case BundleType.UnityRaw:
            {
                Metadata.Read(reader);
                return(m_stream.CreateReference());
            }

            case BundleType.UnityWeb:
            {
                // read only last chunk. wtf?
                ChunkInfo chunkInfo = Header.ChunkInfos[Header.ChunkInfos.Count - 1];
                using (SmartStream stream = SmartStream.CreateMemory(new byte[chunkInfo.DecompressedSize]))
                {
                    SevenZipHelper.DecompressLZMASizeStream(reader.BaseStream, chunkInfo.CompressedSize, stream);
                    using (BundleFileReader decompressReader = new BundleFileReader(stream, reader.EndianType, reader.Generation))
                    {
                        Metadata.Read(decompressReader);
                    }
                    return(stream.CreateReference());
                }
            }

            default:
                throw new NotSupportedException($"Bundle type {Header.Type} isn't supported before 530 generation");
            }
        }
Ejemplo n.º 2
0
 private SmartStream ReadBrotli(EndianReader reader)
 {
     using (SmartStream stream = SmartStream.CreateMemory())
     {
         using (BrotliInputStream brotliStream = new BrotliInputStream(reader.BaseStream))
         {
             brotliStream.CopyTo(stream);
         }
         return(stream.CreateReference());
     }
 }
Ejemplo n.º 3
0
 private SmartStream ReadGZip(EndianReader reader)
 {
     using (SmartStream stream = SmartStream.CreateMemory())
     {
         using (GZipStream gzipStream = new GZipStream(reader.BaseStream, CompressionMode.Decompress))
         {
             gzipStream.CopyTo(stream);
         }
         return(stream.CreateReference());
     }
 }
Ejemplo n.º 4
0
        private void ReadPre530Blocks(SmartStream dataStream)
        {
            long baseOffset;

            switch (Header.Type)
            {
            case BundleType.UnityRaw:
                baseOffset = Header.HeaderSize;
                break;

            case BundleType.UnityWeb:
                baseOffset = 0;
                break;

            default:
                throw new NotSupportedException($"Bundle type {Header.Type} isn't supported before 530 generation");
            }

            foreach (BundleFileEntry entry in Metadata.Entries.Values)
            {
                FileEntryOffset feOffset = new FileEntryOffset(dataStream.CreateReference(), baseOffset + entry.Offset);
                m_entryStreams.Add(entry, feOffset);
            }
        }
Ejemplo n.º 5
0
        private void Read530Blocks(SmartStream dataStream)
        {
            if (Header.Flags.IsMetadataAtTheEnd())
            {
                dataStream.Position = Header.HeaderSize;
            }

            int  cachedBlock = -1;
            long dataOffset  = dataStream.Position;

            // If MemoryStream has compressed block then we need to create individual streams for each entry and copy its data into it
            bool createIndividualStreams = dataStream.StreamType == SmartStreamType.Memory;

            if (createIndividualStreams)
            {
                // find out if this bundle file has compressed blocks
                foreach (BlockInfo block in Metadata.BlockInfos)
                {
                    if (block.Flags.GetCompression() != BundleCompressType.None)
                    {
                        createIndividualStreams = true;
                        break;
                    }
                }
            }

            using (SmartStream blockStream = SmartStream.CreateNull())
            {
                foreach (BundleFileEntry entry in Metadata.Entries.Values)
                {
                    // find out block offsets
                    long blockCompressedOffset   = 0;
                    long blockDecompressedOffset = 0;
                    int  blockIndex = 0;
                    while (blockDecompressedOffset + Metadata.BlockInfos[blockIndex].DecompressedSize <= entry.Offset)
                    {
                        blockCompressedOffset   += Metadata.BlockInfos[blockIndex].CompressedSize;
                        blockDecompressedOffset += Metadata.BlockInfos[blockIndex].DecompressedSize;
                        blockIndex++;
                    }

                    // if at least one block of this entry is compressed or acording to the rule above
                    // we should copy the data of current entry to a separate stream
                    bool needToCopy = createIndividualStreams;
                    if (!needToCopy)
                    {
                        // check if this entry has compressed blocks
                        long entrySize = 0;
                        for (int bi = blockIndex; entrySize < entry.Size; bi++)
                        {
                            if (Metadata.BlockInfos[bi].Flags.GetCompression() != BundleCompressType.None)
                            {
                                // it does. then we need to create individual stream and decomress its data into it
                                needToCopy = true;
                                break;
                            }
                            entrySize += Metadata.BlockInfos[bi].DecompressedSize;
                        }
                    }

                    long entryOffsetInsideBlock = entry.Offset - blockDecompressedOffset;
                    if (needToCopy)
                    {
                        // well, at leat one block is compressed so we should copy data of current entry to a separate stream
                        using (SmartStream entryStream = CreateStream(entry.Size))
                        {
                            long left = entry.Size;
                            dataStream.Position = dataOffset + blockCompressedOffset;

                            // copy data of all blocks used by current entry to new stream
                            for (int bi = blockIndex; left > 0; bi++)
                            {
                                long      blockOffset = 0;
                                BlockInfo block       = Metadata.BlockInfos[bi];
                                if (cachedBlock == bi)
                                {
                                    // some data of previous entry is in the same block as this one
                                    // so we don't need to unpack it once again. Instead we can use cached stream
                                    dataStream.Position += block.CompressedSize;
                                }
                                else
                                {
                                    BundleCompressType compressType = block.Flags.GetCompression();
                                    switch (compressType)
                                    {
                                    case BundleCompressType.None:
                                        blockOffset = dataOffset + blockCompressedOffset;
                                        blockStream.Assign(dataStream);
                                        break;

                                    case BundleCompressType.LZMA:
                                        blockStream.Move(CreateStream(block.DecompressedSize));
                                        SevenZipHelper.DecompressLZMAStream(dataStream, block.CompressedSize, blockStream, block.DecompressedSize);
                                        break;

                                    case BundleCompressType.LZ4:
                                    case BundleCompressType.LZ4HZ:
                                        blockStream.Move(CreateStream(block.DecompressedSize));
                                        using (Lz4DecodeStream lzStream = new Lz4DecodeStream(dataStream, block.CompressedSize))
                                        {
                                            long read = lzStream.Read(blockStream, block.DecompressedSize);
                                            if (read != block.DecompressedSize || lzStream.IsDataLeft)
                                            {
                                                throw new Exception($"Read {read} but expected {block.DecompressedSize}");
                                            }
                                        }
                                        break;

                                    default:
                                        throw new NotImplementedException($"Bundle compression '{compressType}' isn't supported");
                                    }
                                    cachedBlock = bi;
                                }

                                // consider next offsets:
                                // 1) block - if it is new stream then offset is 0, otherwise offset of this block in the bundle file
                                // 2) entry - if this is first block for current entry then it is offset of this entry related to this block
                                //			  otherwise 0
                                long fragmentSize = block.DecompressedSize - entryOffsetInsideBlock;
                                blockStream.Position   = blockOffset + entryOffsetInsideBlock;
                                entryOffsetInsideBlock = 0;

                                long size = Math.Min(fragmentSize, left);
                                blockStream.CopyStream(entryStream, size);

                                blockCompressedOffset += block.CompressedSize;
                                left -= size;
                            }
                            if (left < 0)
                            {
                                throw new Exception($"Read more than expected");
                            }

                            FileEntryOffset feOffset = new FileEntryOffset(entryStream.CreateReference(), 0);
                            m_entryStreams.Add(entry, feOffset);
                        }
                    }
                    else
                    {
                        // no compressed blocks was found so we can use original bundle stream
                        // since FileEntry.Offset contains decompressedOffset we need to preliminarily subtract it
                        FileEntryOffset feOffset = new FileEntryOffset(dataStream.CreateReference(), dataOffset + blockCompressedOffset + entryOffsetInsideBlock);
                        m_entryStreams.Add(entry, feOffset);
                    }
                }
            }
        }
Ejemplo n.º 6
0
        public SmartStream ReadEntry(BundleFileEntry entry)
        {
            if (m_isDisposed)
            {
                throw new ObjectDisposedException(nameof(BundleFileBlockReader));
            }

            // find out block offsets
            int  blockIndex;
            long blockCompressedOffset   = 0;
            long blockDecompressedOffset = 0;

            for (blockIndex = 0; blockDecompressedOffset + m_metadata.BlockInfos[blockIndex].DecompressedSize <= entry.Offset; blockIndex++)
            {
                blockCompressedOffset   += m_metadata.BlockInfos[blockIndex].CompressedSize;
                blockDecompressedOffset += m_metadata.BlockInfos[blockIndex].DecompressedSize;
            }
            long entryOffsetInsideBlock = entry.Offset - blockDecompressedOffset;

            using (SmartStream entryStream = CreateStream(entry.Size))
            {
                long left = entry.Size;
                m_stream.Position = m_dataOffset + blockCompressedOffset;

                // copy data of all blocks used by current entry to new stream
                while (left > 0)
                {
                    long      blockStreamOffset;
                    Stream    blockStream;
                    BlockInfo block = m_metadata.BlockInfos[blockIndex];
                    if (m_cachedBlockIndex == blockIndex)
                    {
                        // data of the previous entry is in the same block as this one
                        // so we don't need to unpack it once again. Instead we can use cached stream
                        blockStreamOffset  = 0;
                        blockStream        = m_cachedBlockStream;
                        m_stream.Position += block.CompressedSize;
                    }
                    else
                    {
                        BundleCompressType compressType = block.Flags.GetCompression();
                        if (compressType == BundleCompressType.None)
                        {
                            blockStreamOffset = m_dataOffset + blockCompressedOffset;
                            blockStream       = m_stream;
                        }
                        else
                        {
                            blockStreamOffset  = 0;
                            m_cachedBlockIndex = blockIndex;
                            m_cachedBlockStream.Move(CreateStream(block.DecompressedSize));
                            switch (compressType)
                            {
                            case BundleCompressType.LZMA:
                                SevenZipHelper.DecompressLZMAStream(m_stream, block.CompressedSize, m_cachedBlockStream, block.DecompressedSize);
                                break;

                            case BundleCompressType.LZ4:
                            case BundleCompressType.LZ4HZ:
                                using (Lz4DecodeStream lzStream = new Lz4DecodeStream(m_stream, block.CompressedSize))
                                {
                                    lzStream.ReadBuffer(m_cachedBlockStream, block.DecompressedSize);
                                }
                                break;

                            default:
                                throw new NotImplementedException($"Bundle compression '{compressType}' isn't supported");
                            }
                            blockStream = m_cachedBlockStream;
                        }
                    }

                    // consider next offsets:
                    // 1) block - if it is new stream then offset is 0, otherwise offset of this block in the bundle file
                    // 2) entry - if this is first block for current entry then it is offset of this entry related to this block
                    //			  otherwise 0
                    long blockSize = block.DecompressedSize - entryOffsetInsideBlock;
                    blockStream.Position   = blockStreamOffset + entryOffsetInsideBlock;
                    entryOffsetInsideBlock = 0;

                    long size = Math.Min(blockSize, left);
                    blockStream.CopyStream(entryStream, size);
                    blockIndex++;

                    blockCompressedOffset += block.CompressedSize;
                    left -= size;
                }
                if (left < 0)
                {
                    throw new Exception($"Read more than expected");
                }
                entryStream.Position = 0;
                return(entryStream.CreateReference());
            }
        }