Exemple #1
0
        public void Read(BundleReader reader)
        {
            if (HasBlockInfo(reader.Generation))
            {
                // unknown 0x10
                reader.BaseStream.Position += 0x10;
                BlockInfos = reader.ReadBundleArray <BlockInfo>();
            }

            int count = reader.ReadInt32();
            Dictionary <string, BundleFileEntry> entries = new Dictionary <string, BundleFileEntry>(count);

            for (int i = 0; i < count; i++)
            {
                BundleFileEntry entry = reader.ReadBundle <BundleFileEntry>();
                entries.Add(entry.Name, entry);
            }
            Entries = entries;
        }
Exemple #2
0
 internal BundleFileEntry(BundleFileEntry copy, long offset) :
     this(copy.m_stream, copy.FilePath, copy.Name, offset, copy.Size)
 {
 }
Exemple #3
0
        private void Read530Blocks(SmartStream bundleStream, BlockInfo[] blockInfos)
        {
            int  cachedBlock = -1;
            long dataOffset  = bundleStream.Position;

            BundleFileEntry[] newEntries = new BundleFileEntry[Metadata.Entries.Count];
            using (SmartStream blockStream = SmartStream.CreateNull())
            {
                for (int ei = 0; ei < Metadata.Entries.Count; ei++)
                {
                    BundleFileEntry entry = Metadata.Entries[ei];

                    // find block corresponding to current entry
                    int  blockIndex         = 0;
                    long compressedOffset   = 0;
                    long decompressedOffset = 0;
                    while (true)
                    {
                        BlockInfo block = blockInfos[blockIndex];
                        if (decompressedOffset + block.DecompressedSize > entry.Offset)
                        {
                            break;
                        }
                        blockIndex++;
                        compressedOffset   += block.CompressedSize;
                        decompressedOffset += block.DecompressedSize;
                    }

                    // check does this entry use any compressed blocks
                    long entrySize    = 0;
                    bool isCompressed = false;
                    for (int bi = blockIndex; entrySize < entry.Size; bi++)
                    {
                        BlockInfo block = blockInfos[bi];
                        entrySize += block.DecompressedSize;
                        if (block.Flags.GetCompression() != BundleCompressType.None)
                        {
                            isCompressed = true;
                            break;
                        }
                    }

                    if (isCompressed)
                    {
                        // well, at leat one block is compressed so we should copy data of current entry to separate stream
                        using (SmartStream entryStream = CreateStream(entry.Size))
                        {
                            long left        = entry.Size;
                            long entryOffset = entry.Offset - decompressedOffset;
                            bundleStream.Position = dataOffset + compressedOffset;

                            // copy data of all blocks used by current entry to created stream
                            for (int bi = blockIndex; left > 0; bi++)
                            {
                                long      blockOffset = 0;
                                BlockInfo block       = blockInfos[bi];
                                if (cachedBlock == bi)
                                {
                                    // some data of previous entry is in the same block as this one
                                    // so we don't need to unpack it once again but can use cached stream
                                    bundleStream.Position += block.CompressedSize;
                                }
                                else
                                {
                                    BundleCompressType compressType = block.Flags.GetCompression();
                                    switch (compressType)
                                    {
                                    case BundleCompressType.None:
                                        blockOffset = dataOffset + compressedOffset;
                                        blockStream.Assign(bundleStream);
                                        break;

                                    case BundleCompressType.LZMA:
                                        blockStream.Move(CreateStream(block.DecompressedSize));
                                        SevenZipHelper.DecompressLZMAStream(bundleStream, block.CompressedSize, blockStream, block.DecompressedSize);
                                        break;

                                    case BundleCompressType.LZ4:
                                    case BundleCompressType.LZ4HZ:
                                        blockStream.Move(CreateStream(block.DecompressedSize));
                                        using (Lz4Stream lzStream = new Lz4Stream(bundleStream, block.CompressedSize))
                                        {
                                            long read = lzStream.Read(blockStream, block.DecompressedSize);
                                            if (read != block.DecompressedSize)
                                            {
                                                throw new Exception($"Read {read} but expected {block.CompressedSize}");
                                            }
                                        }
                                        break;

                                    default:
                                        throw new NotImplementedException($"Bundle compression '{compressType}' isn't supported");
                                    }
                                    cachedBlock = bi;
                                }

                                // consider next offsets:
                                // 1) block - if it is new stream then offset is 0, otherwise offset of this block in bundle file
                                // 2) entry - if this is first block for current entry then it is offset of this entry related to this block
                                //			  otherwise 0
                                long fragmentSize = block.DecompressedSize - entryOffset;
                                blockStream.Position = blockOffset + entryOffset;
                                entryOffset          = 0;

                                long size = Math.Min(fragmentSize, left);
                                blockStream.CopyStream(entryStream, size);

                                compressedOffset += block.CompressedSize;
                                left             -= size;
                            }
                            if (left < 0)
                            {
                                throw new Exception($"Read more than expected");
                            }

                            newEntries[ei] = new BundleFileEntry(entryStream, entry.FilePath, entry.Name, 0, entry.Size);
                        }
                    }
                    else
                    {
                        // no compressed blocks was found so we can use original bundle stream
                        newEntries[ei] = new BundleFileEntry(entry, dataOffset + entry.Offset);
                    }
                }
            }
            Metadata.Dispose();
            Metadata = new BundleMetadata(m_filePath, newEntries);
        }
        public SmartStream ReadEntry(BundleFileEntry entry)
        {
            if (m_isDisposed)
            {
                throw new ObjectDisposedException(nameof(BundleFileBlockReader));
            }

            // find out block offsets
            int  blockIndex;
            long blockCompressedOffset   = 0;
            long blockDecompressedOffset = 0;

            for (blockIndex = 0; blockDecompressedOffset + m_metadata.BlockInfos[blockIndex].DecompressedSize <= entry.Offset; blockIndex++)
            {
                blockCompressedOffset   += m_metadata.BlockInfos[blockIndex].CompressedSize;
                blockDecompressedOffset += m_metadata.BlockInfos[blockIndex].DecompressedSize;
            }
            long entryOffsetInsideBlock = entry.Offset - blockDecompressedOffset;

            using (SmartStream entryStream = CreateStream(entry.Size))
            {
                long left = entry.Size;
                m_stream.Position = m_dataOffset + blockCompressedOffset;

                // copy data of all blocks used by current entry to new stream
                while (left > 0)
                {
                    long      blockStreamOffset;
                    Stream    blockStream;
                    BlockInfo block = m_metadata.BlockInfos[blockIndex];
                    if (m_cachedBlockIndex == blockIndex)
                    {
                        // data of the previous entry is in the same block as this one
                        // so we don't need to unpack it once again. Instead we can use cached stream
                        blockStreamOffset  = 0;
                        blockStream        = m_cachedBlockStream;
                        m_stream.Position += block.CompressedSize;
                    }
                    else
                    {
                        BundleCompressType compressType = block.Flags.GetCompression();
                        if (compressType == BundleCompressType.None)
                        {
                            blockStreamOffset = m_dataOffset + blockCompressedOffset;
                            blockStream       = m_stream;
                        }
                        else
                        {
                            blockStreamOffset  = 0;
                            m_cachedBlockIndex = blockIndex;
                            m_cachedBlockStream.Move(CreateStream(block.DecompressedSize));
                            switch (compressType)
                            {
                            case BundleCompressType.LZMA:
                                SevenZipHelper.DecompressLZMAStream(m_stream, block.CompressedSize, m_cachedBlockStream, block.DecompressedSize);
                                break;

                            case BundleCompressType.LZ4:
                            case BundleCompressType.LZ4HZ:
                                using (Lz4DecodeStream lzStream = new Lz4DecodeStream(m_stream, block.CompressedSize))
                                {
                                    lzStream.ReadBuffer(m_cachedBlockStream, block.DecompressedSize);
                                }
                                break;

                            default:
                                throw new NotImplementedException($"Bundle compression '{compressType}' isn't supported");
                            }
                            blockStream = m_cachedBlockStream;
                        }
                    }

                    // consider next offsets:
                    // 1) block - if it is new stream then offset is 0, otherwise offset of this block in the bundle file
                    // 2) entry - if this is first block for current entry then it is offset of this entry related to this block
                    //			  otherwise 0
                    long blockSize = block.DecompressedSize - entryOffsetInsideBlock;
                    blockStream.Position   = blockStreamOffset + entryOffsetInsideBlock;
                    entryOffsetInsideBlock = 0;

                    long size = Math.Min(blockSize, left);
                    blockStream.CopyStream(entryStream, size);
                    blockIndex++;

                    blockCompressedOffset += block.CompressedSize;
                    left -= size;
                }
                if (left < 0)
                {
                    throw new Exception($"Read more than expected");
                }
                entryStream.Position = 0;
                return(entryStream.CreateReference());
            }
        }