// If anyone really comes to read this here are some of my thoughts on designing loading of chunk ids // UE Code builds a Map<FIoChunkId, FIoOffsetAndLength> to optimize loading of chunks just by their id // After some testing this appeared to take ~30mb of memory // We can save that memory since we rarely use loading by FIoChunkId directly (I'm pretty sure we just do for the global reader) // If anyone want to use the map anyway the define GENERATE_CHUNK_ID_DICT exists public bool DoesChunkExist(FIoChunkId chunkId) { #if GENERATE_CHUNK_ID_DICT return(Toc.ContainsKey(chunkId)); #else return(Array.IndexOf(TocResource.ChunkIds, chunkId) >= 0); #endif }
public byte[] Read(FIoChunkId chunkId) { #if GENERATE_CHUNK_ID_DICT var offsetLength = Toc[chunkId]; #else var offsetLength = TocResource.ChunkOffsetLengths[Array.IndexOf(TocResource.ChunkIds, chunkId)]; #endif return(Read(chunkId, (long)offsetLength.Offset, (long)offsetLength.Length)); }
public byte[] Read(FIoChunkId chunkId) { if (TryResolve(chunkId, out var offsetLength)) { return(Read((long)offsetLength.Offset, (long)offsetLength.Length)); } throw new KeyNotFoundException($"Couldn't find chunk {chunkId} in IoStore {Name}"); }
private byte[] Read(FIoChunkId chunkId, long offset, long length) { var compressionBlockSize = TocResource.Header.CompressionBlockSize; var dst = new byte[length]; var firstBlockIndex = (int)(offset / compressionBlockSize); var lastBlockIndex = (int)(((offset + dst.Length).Align((int)compressionBlockSize) - 1) / compressionBlockSize); var offsetInBlock = offset % compressionBlockSize; var remainingSize = length; var dstOffset = 0; var compressedBuffer = Array.Empty <byte>(); var uncompressedBuffer = Array.Empty <byte>(); var clonedReaders = new FArchive[ContainerStreams.Count]; for (int blockIndex = firstBlockIndex; blockIndex <= lastBlockIndex; blockIndex++) { ref var compressionBlock = ref TocResource.CompressionBlocks[blockIndex]; var rawSize = compressionBlock.CompressedSize.Align(Aes.ALIGN); if (compressedBuffer.Length < rawSize) { //Console.WriteLine($"{chunkId}: block {blockIndex} CompressedBuffer size: {rawSize} - Had to create copy"); compressedBuffer = new byte[rawSize]; } var uncompressedSize = compressionBlock.UncompressedSize; if (uncompressedBuffer.Length < uncompressedSize) { //Console.WriteLine($"{chunkId}: block {blockIndex} UncompressedBuffer size: {uncompressedSize} - Had to create copy"); uncompressedBuffer = new byte[uncompressedSize]; } var partitionIndex = (int)((ulong)compressionBlock.Offset / TocResource.Header.PartitionSize); var partitionOffset = (long)((ulong)compressionBlock.Offset % TocResource.Header.PartitionSize); //Our offset here //partitionOffset Pro_Swapper.Fortnite.FortniteExport.Offset = partitionOffset; FArchive reader; if (IsConcurrent) { ref var clone = ref clonedReaders[partitionIndex]; clone ??= (FArchive)ContainerStreams[partitionIndex].Clone(); reader = clone; }
public bool TryResolve(FIoChunkId chunkId, out FIoOffsetAndLength outOffsetLength) { if (TocResource.ChunkPerfectHashSeeds != null) { var chunkCount = TocResource.Header.TocEntryCount; if (chunkCount == 0) { outOffsetLength = default; return(false); } var seedCount = (uint)TocResource.ChunkPerfectHashSeeds.Length; var seedIndex = (uint)(chunkId.HashWithSeed(0) % seedCount); var seed = TocResource.ChunkPerfectHashSeeds[seedIndex]; if (seed == 0) { outOffsetLength = default; return(false); } uint slot; if (seed < 0) { var seedAsIndex = (uint)(-seed - 1); if (seedAsIndex < chunkCount) { slot = seedAsIndex; } else { // Entry without perfect hash return(TryResolveImperfect(chunkId, out outOffsetLength)); } } else { slot = (uint)(chunkId.HashWithSeed(seed) % chunkCount); } if (TocResource.ChunkIds[slot].GetHashCode() == chunkId.GetHashCode()) { outOffsetLength = TocResource.ChunkOffsetLengths[slot]; return(true); } outOffsetLength = default; return(false); } return(TryResolveImperfect(chunkId, out outOffsetLength)); }
private bool TryResolveImperfect(FIoChunkId chunkId, out FIoOffsetAndLength outOffsetLength) { if (TocImperfectHashMapFallback != null) { return(TocImperfectHashMapFallback.TryGetValue(chunkId, out outOffsetLength)); } var chunkIndex = Array.IndexOf(TocResource.ChunkIds, chunkId); if (chunkIndex == -1) { outOffsetLength = default; return(false); } outOffsetLength = TocResource.ChunkOffsetLengths[chunkIndex]; return(true); }
public int ChunkIndex(FIoChunkId chunkId) => Array.IndexOf(TocResource.ChunkIds, chunkId);
// If anyone really comes to read this here are some of my thoughts on designing loading of chunk ids // UE Code builds a Map<FIoChunkId, FIoOffsetAndLength> to optimize loading of chunks just by their id // After some testing this appeared to take ~30mb of memory // We can save that memory since we rarely use loading by FIoChunkId directly (I'm pretty sure we just do for the global reader) // If anyone want to use the map anyway the define GENERATE_CHUNK_ID_DICT exists public bool DoesChunkExist(FIoChunkId chunkId) => TryResolve(chunkId, out _);