//Dynamic size entries are split into chunks that are spread around the file. We need to read per chunk unsafe ReadHandle ScheduleDynamicSizeElementArrayReads(Entry entry, uint elementOffset, uint elementCount, long readSize, void *buffer) { var block = m_Blocks[(int)entry.Header.BlockIndex]; byte *dst = (byte *)buffer; var chunkReads = new NativeBlockList <ReadCommand>(64, 64); for (int i = 0; i < elementCount; ++i) { var e = new ElementRead(); e.start = entry.GetAdditionalStoragePtr()[i + elementOffset]; e.end = i + elementOffset + 1 == entry.Count ? (long)entry.Header.HeaderMeta : entry.GetAdditionalStoragePtr()[i + elementOffset + 1]; e.readDst = dst; var readOffset = ProcessDynamicSizeElement(ref chunkReads, ref block, e); dst += readOffset; readSize -= readOffset; } Checks.CheckEquals(0, readSize); //TODO: find a way to use the block array chunks directly for scheduling, probably add readcommandbuffer using (NativeArray <ReadCommand> readCommands = new NativeArray <ReadCommand>((int)chunkReads.Count, Allocator.Temp)) { var cmdsPtr = (ReadCommand *)readCommands.GetUnsafePtr(); for (int i = 0; i < readCommands.Length; ++i) { *(cmdsPtr + i) = chunkReads[i]; } chunkReads.Dispose(); return(m_FileReader.Read(cmdsPtr, (uint)readCommands.Length, ReadMode.Async)); } }
unsafe ReadHandle ScheduleConstSizeElementArrayRead(Entry entry, uint firstElement, long readSize, void *dst) { var block = m_Blocks[(int)entry.Header.BlockIndex]; var blockOffset = entry.Header.EntriesMeta * firstElement; var chunkSize = block.Header.ChunkSize; var chunkIndex = (uint)(blockOffset / chunkSize); var chunk = block.GetOffsetsPtr() + chunkIndex; var chunkWithLocalOffset = *chunk + (uint)(blockOffset % chunkSize); byte *dstPtr = (byte *)dst; using (NativeBlockList <ReadCommand> chunkReads = new NativeBlockList <ReadCommand>(64, 64)) { var readCmd = ReadCommandBufferUtils.GetCommand(dstPtr, readSize, chunkWithLocalOffset); chunkReads.Push(readCmd); dstPtr += (long)(chunkSize - (blockOffset % chunkSize)); readSize -= (long)(chunkSize - (blockOffset % chunkSize)); while (readSize > 0) { ++chunkIndex; var chunkReadSize = Math.Min(readSize, (long)chunkSize); chunk = block.GetOffsetsPtr() + chunkIndex; readCmd = ReadCommandBufferUtils.GetCommand(dstPtr, chunkReadSize, *chunk); dstPtr += chunkReadSize; readSize -= chunkReadSize; chunkReads.Push(readCmd); } //TODO: find a way to use the block array chunks directly for scheduling, probably add readcommandbuffer using (var tempCmds = new NativeArray <ReadCommand>((int)chunkReads.Count, Allocator.TempJob)) { ReadCommand *cmdPtr = (ReadCommand *)tempCmds.GetUnsafePtr(); for (int i = 0; i < tempCmds.Length; ++i) { *(cmdPtr + i) = chunkReads[i]; } return(m_FileReader.Read(cmdPtr, (uint)tempCmds.Length, ReadMode.Async)); } } }
//Returns ammount of written bytes unsafe static long ProcessDynamicSizeElement(ref NativeBlockList <ReadCommand> chunkReads, ref Block block, ElementRead elementRead) { long written = 0; var chunkSize = (long)block.Header.ChunkSize; var elementSize = elementRead.end - elementRead.start; var dst = elementRead.readDst; var chunkIndex = elementRead.start / chunkSize; var chunkOffset = block.GetOffsetsPtr()[chunkIndex]; var elementOffsetInChunk = elementRead.start % chunkSize; var remainingChunksize = chunkSize - elementOffsetInChunk; var rSize = Math.Min(chunkSize, elementSize); if (remainingChunksize != chunkSize) { chunkOffset += elementOffsetInChunk; //align the read if (rSize > remainingChunksize) { rSize = remainingChunksize; } } chunkReads.Push(ReadCommandBufferUtils.GetCommand(elementRead.readDst, rSize, chunkOffset)); elementRead.readDst += rSize; elementSize -= rSize; written += rSize; //if the element spans multiple chunks while (elementSize > 0) { chunkIndex++; chunkOffset = block.GetOffsetsPtr()[chunkIndex]; rSize = Math.Min(chunkSize, elementSize); chunkReads.Push(ReadCommandBufferUtils.GetCommand(elementRead.readDst, rSize, chunkOffset)); elementRead.readDst += rSize; elementSize -= rSize; written += rSize; } return(written); }