private ArcStruct.Chunk[] GetChunks(uint startOffset, uint chunkCount) { // Check if we can even read those chunks if (startOffset + (ArcStruct.ChunkSize * chunkCount) > _header.ChunkIndexSize) throw new ArgumentOutOfRangeException("The requested chunks are out of the chunk index bounds."); _stream.Seek(_chunkIndexPointer + (startOffset * ArcStruct.ChunkSize), SeekOrigin.Begin); var chunks = new ArcStruct.Chunk[chunkCount]; for (int i = 0; i < chunkCount; ++i) { chunks[i] = ArcStruct.Chunk.FromBytesLE(_reader.ReadBytes(ArcStruct.ChunkSize)); } return chunks; }
/// <summary> /// Creates a new file entry in the archive using the provided path and data. /// </summary> /// <param name="path">The path of the entry in the archive.</param> /// <param name="data">The data of the entry.</param> /// <param name="storageMode"> /// If storageMode is not StorageMode.Plain, data will be compressed accordingly. /// </param> /// <exception cref="ArgumentNullException">path or data are <see cref="null"/>.</exception> /// <exception cref="ArgumentException"> /// The specified path is invalid or the specified storage mode is not supported. /// </exception> /// <returns>The <see cref="ArcEntry"/> instance of the new entry.</returns> /// <remarks>Locks the data object.</remarks> public ArcEntry CreateEntry(string path, byte[] data, StorageMode storageMode) { lock (data) lock (_lock) { ThrowIfDisposed(); ThrowIfReadOnly(); if (path == null) throw new ArgumentNullException("Path cannot be null.", "path"); if (!PathUtils.EntryAbsolutePathRegex.IsMatch(path)) throw new ArgumentException("The specified path is invalid.", "path"); if (data == null) throw new ArgumentNullException("Data cannot be null.", "data"); if (!Enum.IsDefined(typeof(StorageMode), storageMode)) throw new ArgumentException("The specified storage mode is not supported.", "storageMode"); byte[] writeData = data; if (storageMode == StorageMode.Lz4Compressed) { writeData = new byte[Lz4.CompressBound(data.Length)]; var cSize = Lz4.CompressDefault(data, writeData, data.Length, writeData.Length); Array.Resize(ref writeData, cSize); } _stream.Seek(_header.FooterPointer, SeekOrigin.Begin); var chunk = new ArcStruct.Chunk() { DataPointer = (uint)_stream.Position, CompressedSize = (uint)writeData.Length, PlainSize = (uint)data.Length }; _writer.Write(writeData); writeData = null; var entry = new ArcEntry( this, new ArcStruct.Entry() { StorageMode = (uint)storageMode, DataPointer = chunk.DataPointer, CompressedSize = chunk.CompressedSize, PlainSize = chunk.PlainSize, Adler32 = Checksum(data), FileTime = DateTime.Now.ToFileTime() // The other fields will be set by UpdateMeta() }, path, new ArcStruct.Chunk[] { chunk } ); _entries.Add(entry); UpdateMeta((uint)_stream.Position); return entry; } }