private void ExtractEntryTo(ArcEntry entry, string toPath, bool checksum) { if (_arc == null) return; if (toPath == null) throw new ArgumentNullException("toPath"); Directory.CreateDirectory(Path.Combine(toPath, Path.GetDirectoryName(entry.Path))); toPath = Path.Combine(toPath, entry.Path); lock (_arc) { var data = _arc.ReadEntry(entry); if (checksum && Arc.Checksum(data) != entry.Adler32) throw new InvalidDataException("Checksum failed: the data is probably corrupted."); File.WriteAllBytes(toPath, data); } }
private void ThrowIfEntryNotOwned(ArcEntry entry) { if (!_entries.Contains(entry)) throw new ArgumentOutOfRangeException("entry", "This Arc instance doesn't own that entry."); }
/// <summary> /// Reads an entry in the archive and returns its data. /// </summary> /// <param name="entry">An entry owned by this archive.</param> /// <returns>The data read.</returns> /// <exception cref="ArgumentOutOfRangeException">The specified entry is not owned by this archive.</exception> /// <exception cref="InvalidDataException">The entry has been stored using an unsupported mode.</exception> public byte[] ReadEntry(ArcEntry entry) { lock (_lock) { ThrowIfDisposed(); ThrowIfEntryNotOwned(entry); /* * Addendum -- Checking for corrupted data * * I have decided to delegate the burden of checking if the data is really corrupted to the * applications using this library. The ArcEntry class provides the Adler32 checksum stored * in the ARC file and the Arc class provides a Checksum() method for that very reason. * Why? Because I believe the users should have the freedom to decide whether they want to accept * the corrupted data or not. Also, YOLO. */ if (!Enum.IsDefined(typeof(StorageMode), entry.StorageMode)) throw new InvalidDataException("The entry has been stored using an unsupported mode."); using (var plainData = new MemoryStream((int)entry.EntryStruct.PlainSize)) { foreach (var chunk in entry.Chunks) { // Read the compressed chunk of data from the file _stream.Seek(chunk.DataPointer, SeekOrigin.Begin); if (entry.StorageMode == StorageMode.Plain) { _reader.Read(plainData.GetBuffer(), (int)plainData.Position, (int)chunk.PlainSize); plainData.Position += chunk.PlainSize; } else if (entry.StorageMode == StorageMode.Lz4Compressed) { // Decompress it var decompressedChunk = new byte[chunk.PlainSize]; Lz4.DecompressSafe( _reader.ReadBytes((int)chunk.CompressedSize), decompressedChunk, (int)chunk.CompressedSize, (int)chunk.PlainSize ); plainData.Write(decompressedChunk, 0, (int)chunk.PlainSize); decompressedChunk = null; } } return plainData.GetBuffer(); } } }
/// <summary> /// Sets the path of an entry. /// </summary> /// <param name="entry">An entry owned by this archive.</param> /// <param name="newPath"> /// A new path for the entry. /// <para /> /// To be valid it must be: /// - Absolute and canonical like "/my/stored/file.ext" (file extension is not required) /// - Contain only non-extended ASCII characters, except for these: [control characters] : * ? " < > | /// </param> /// <exception cref="ArgumentNullException">The new path is <see cref="null"/>.</exception> /// <exception cref="ArgumentOutOfRangeException">The specified entry is not owned by this archive.</exception> /// <exception cref="ArgumentException">The new path is invalid.</exception> public void MoveEntry(ArcEntry entry, string newPath) { lock (_lock) { ThrowIfDisposed(); ThrowIfReadOnly(); ThrowIfEntryNotOwned(entry); if (newPath == null) throw new ArgumentNullException("New path cannot be null.", "newPath"); newPath = newPath.Trim(); if (!PathUtils.EntryAbsolutePathRegex.IsMatch(newPath)) throw new ArgumentException("The specified path is invalid.", "newLocation"); entry.EntryPath = newPath.TrimStart('/'); UpdateMeta(_header.FooterPointer); } }
/// <summary> /// Removes an entry from the archive permanently. /// </summary> /// <param name="entry">An entry owned by this archive.</param> /// <exception cref="ArgumentOutOfRangeException">The specified entry is not owned by this archive.</exception> public void DeleteEntry(ArcEntry entry) { lock (_lock) { ThrowIfDisposed(); ThrowIfReadOnly(); ThrowIfEntryNotOwned(entry); uint totalStripped = 0; if (entry.EntryStruct.CompressedSize > 0) { // Step 1: strip entry data var frm = entry.EntryStruct.DataPointer + entry.EntryStruct.CompressedSize; var len = (int)_stream.Length - frm; MoveData(frm, (uint)len, entry.EntryStruct.DataPointer); totalStripped += entry.EntryStruct.CompressedSize; // Step 2: update all remaining entries if (totalStripped > 0) { foreach (var rEntry in _entries) { if (ReferenceEquals(rEntry, entry)) continue; if (rEntry.Chunks.Length > 0) { for (uint i = 0; i < rEntry.Chunks.Length; ++i) { if (rEntry.Chunks[i].DataPointer > entry.EntryStruct.DataPointer) rEntry.Chunks[i].DataPointer -= totalStripped; } rEntry.EntryStruct.DataPointer = rEntry.Chunks[0].DataPointer; } else rEntry.EntryStruct.DataPointer -= totalStripped; } } } _entries.Remove(entry); entry.Dispose(this); // Step 3: write updated metadata to the archive UpdateMeta(_header.FooterPointer - totalStripped); } }
/// <summary> /// Creates a new file entry in the archive using the provided path and data. /// </summary> /// <param name="path">The path of the entry in the archive.</param> /// <param name="data">The data of the entry.</param> /// <param name="storageMode"> /// If storageMode is not StorageMode.Plain, data will be compressed accordingly. /// </param> /// <exception cref="ArgumentNullException">path or data are <see cref="null"/>.</exception> /// <exception cref="ArgumentException"> /// The specified path is invalid or the specified storage mode is not supported. /// </exception> /// <returns>The <see cref="ArcEntry"/> instance of the new entry.</returns> /// <remarks>Locks the data object.</remarks> public ArcEntry CreateEntry(string path, byte[] data, StorageMode storageMode) { lock (data) lock (_lock) { ThrowIfDisposed(); ThrowIfReadOnly(); if (path == null) throw new ArgumentNullException("Path cannot be null.", "path"); if (!PathUtils.EntryAbsolutePathRegex.IsMatch(path)) throw new ArgumentException("The specified path is invalid.", "path"); if (data == null) throw new ArgumentNullException("Data cannot be null.", "data"); if (!Enum.IsDefined(typeof(StorageMode), storageMode)) throw new ArgumentException("The specified storage mode is not supported.", "storageMode"); byte[] writeData = data; if (storageMode == StorageMode.Lz4Compressed) { writeData = new byte[Lz4.CompressBound(data.Length)]; var cSize = Lz4.CompressDefault(data, writeData, data.Length, writeData.Length); Array.Resize(ref writeData, cSize); } _stream.Seek(_header.FooterPointer, SeekOrigin.Begin); var chunk = new ArcStruct.Chunk() { DataPointer = (uint)_stream.Position, CompressedSize = (uint)writeData.Length, PlainSize = (uint)data.Length }; _writer.Write(writeData); writeData = null; var entry = new ArcEntry( this, new ArcStruct.Entry() { StorageMode = (uint)storageMode, DataPointer = chunk.DataPointer, CompressedSize = chunk.CompressedSize, PlainSize = chunk.PlainSize, Adler32 = Checksum(data), FileTime = DateTime.Now.ToFileTime() // The other fields will be set by UpdateMeta() }, path, new ArcStruct.Chunk[] { chunk } ); _entries.Add(entry); UpdateMeta((uint)_stream.Position); return entry; } }