/// <summary> /// Get the binary data representing this instance. /// </summary> /// <returns>The raw binary data representing this instance.</returns> public byte[] GetData() { using (MemoryStream ms = new MemoryStream()) using (ZipHelperStream helperStream = new ZipHelperStream(ms)) { helperStream.IsStreamOwner = false; helperStream.WriteLEInt(0); // Reserved helperStream.WriteLEShort(1); // Tag helperStream.WriteLEShort(24); // Length = 3 x 8. helperStream.WriteLELong(lastModificationTime_.ToFileTime()); helperStream.WriteLELong(lastAccessTime_.ToFileTime()); helperStream.WriteLELong(createTime_.ToFileTime()); return(ms.ToArray()); } }
/// <summary> /// Test an archive for integrity/validity /// </summary> /// <param name="testData">Perform low level data Crc check</param> /// <param name="strategy">The <see cref="TestStrategy"></see> to apply.</param> /// <param name="resultHandler">The <see cref="ZipTestResultHandler"></see> handler to call during testing.</param> /// <returns>true if all tests pass, false otherwise</returns> /// <exception cref="ObjectDisposedException">The object has already been closed.</exception> public bool TestArchive(bool testData, TestStrategy strategy, ZipTestResultHandler resultHandler) { if (isDisposed_) { throw new ObjectDisposedException("ZipFile"); } TestStatus status = new TestStatus(this); if ( resultHandler != null ) { resultHandler(status, null); } HeaderTest test = testData ? (HeaderTest.Header | HeaderTest.Extract) : HeaderTest.Header; bool testing = true; try { int entryIndex = 0; while ( testing && (entryIndex < Count) ) { if ( resultHandler != null ) { status.SetEntry(this[entryIndex]); status.SetOperation(TestOperation.EntryHeader); resultHandler(status, null); } try { TestLocalHeader(this[entryIndex], test); } catch(ZipException ex) { status.AddError(); if ( resultHandler != null ) { resultHandler(status, string.Format("Exception during test - '{0}'", ex.Message)); } if ( strategy == TestStrategy.FindFirstError ) { testing = false; } } if ( testing && testData && this[entryIndex].IsFile ) { if ( resultHandler != null ) { status.SetOperation(TestOperation.EntryData); resultHandler(status, null); } Crc32 crc = new Crc32(); using (Stream entryStream = this.GetInputStream(this[entryIndex])) { byte[] buffer = new byte[4096]; long totalBytes = 0; int bytesRead; while ((bytesRead = entryStream.Read(buffer, 0, buffer.Length)) > 0) { crc.Update(buffer, 0, bytesRead); if (resultHandler != null) { totalBytes += bytesRead; status.SetBytesTested(totalBytes); resultHandler(status, null); } } } if (this[entryIndex].Crc != crc.Value) { status.AddError(); if ( resultHandler != null ) { resultHandler(status, "CRC mismatch"); } if ( strategy == TestStrategy.FindFirstError ) { testing = false; } } if (( this[entryIndex].Flags & (int)GeneralBitFlags.Descriptor) != 0 ) { ZipHelperStream helper = new ZipHelperStream(baseStream_); DescriptorData data = new DescriptorData(); helper.ReadDataDescriptor(this[entryIndex].LocalHeaderRequiresZip64, data); if (this[entryIndex].Crc != data.Crc) { status.AddError(); } if (this[entryIndex].CompressedSize != data.CompressedSize) { status.AddError(); } if (this[entryIndex].Size != data.Size) { status.AddError(); } } } if ( resultHandler != null ) { status.SetOperation(TestOperation.EntryComplete); resultHandler(status, null); } entryIndex += 1; } if ( resultHandler != null ) { status.SetOperation(TestOperation.MiscellaneousTests); resultHandler(status, null); } // TODO: the 'Corrina Johns' test where local headers are missing from // the central directory. They are therefore invisible to many archivers. } catch (Exception ex) { status.AddError(); if ( resultHandler != null ) { resultHandler(status, string.Format("Exception during test - '{0}'", ex.Message)); } } if ( resultHandler != null ) { status.SetOperation(TestOperation.Complete); status.SetEntry(null); resultHandler(status, null); } return (status.ErrorCount == 0); }
// NOTE this returns the offset of the first byte after the signature. long LocateBlockWithSignature(int signature, long endLocation, int minimumBlockSize, int maximumVariableData) { using ( ZipHelperStream les = new ZipHelperStream(baseStream_) ) { return les.LocateBlockWithSignature(signature, endLocation, minimumBlockSize, maximumVariableData); } }
void RunUpdates() { long sizeEntries = 0; long endOfStream = 0; bool directUpdate = false; long destinationPosition = 0; // NOT SFX friendly ZipFile workFile; if ( IsNewArchive ) { workFile = this; workFile.baseStream_.Position = 0; directUpdate = true; } else if ( archiveStorage_.UpdateMode == FileUpdateMode.Direct ) { workFile = this; workFile.baseStream_.Position = 0; directUpdate = true; // Sort the updates by offset within copies/modifies, then adds. // This ensures that data required by copies will not be overwritten. updates_.Sort(new UpdateComparer()); } else { workFile = ZipFile.Create(archiveStorage_.GetTemporaryOutput()); workFile.UseZip64 = UseZip64; if (key != null) { workFile.key = (byte[])key.Clone(); } } try { foreach ( ZipUpdate update in updates_ ) { if (update != null) { switch (update.Command) { case UpdateCommand.Copy: if (directUpdate) { CopyEntryDirect(workFile, update, ref destinationPosition); } else { CopyEntry(workFile, update); } break; case UpdateCommand.Modify: // TODO: Direct modifying of an entry will take some legwork. ModifyEntry(workFile, update); break; case UpdateCommand.Add: if (!IsNewArchive && directUpdate) { workFile.baseStream_.Position = destinationPosition; } AddEntry(workFile, update); if (directUpdate) { destinationPosition = workFile.baseStream_.Position; } break; } } } if ( !IsNewArchive && directUpdate ) { workFile.baseStream_.Position = destinationPosition; } long centralDirOffset = workFile.baseStream_.Position; foreach ( ZipUpdate update in updates_ ) { if (update != null) { sizeEntries += workFile.WriteCentralDirectoryHeader(update.OutEntry); } } byte[] theComment = (newComment_ != null) ? newComment_.RawComment : ZipConstants.ConvertToArray(comment_); using ( ZipHelperStream zhs = new ZipHelperStream(workFile.baseStream_) ) { zhs.WriteEndOfCentralDirectory(updateCount_, sizeEntries, centralDirOffset, theComment); } endOfStream = workFile.baseStream_.Position; // And now patch entries... foreach ( ZipUpdate update in updates_ ) { if (update != null) { // If the size of the entry is zero leave the crc as 0 as well. // The calculated crc will be all bits on... if ((update.CrcPatchOffset > 0) && (update.OutEntry.CompressedSize > 0)) { workFile.baseStream_.Position = update.CrcPatchOffset; workFile.WriteLEInt((int)update.OutEntry.Crc); } if (update.SizePatchOffset > 0) { workFile.baseStream_.Position = update.SizePatchOffset; if (update.OutEntry.LocalHeaderRequiresZip64) { workFile.WriteLeLong(update.OutEntry.Size); workFile.WriteLeLong(update.OutEntry.CompressedSize); } else { workFile.WriteLEInt((int)update.OutEntry.CompressedSize); workFile.WriteLEInt((int)update.OutEntry.Size); } } } } } catch { workFile.Close(); if (!directUpdate && (workFile.Name != null)) { File.Delete(workFile.Name); } throw; } if (directUpdate) { workFile.baseStream_.SetLength(endOfStream); workFile.baseStream_.Flush(); isNewArchive_ = false; ReadEntries(); } else { baseStream_.Close(); Reopen(archiveStorage_.ConvertTemporaryToFinal()); } }
void UpdateCommentOnly() { long baseLength = baseStream_.Length; ZipHelperStream updateFile = null; if ( archiveStorage_.UpdateMode == FileUpdateMode.Safe ) { Stream copyStream = archiveStorage_.MakeTemporaryCopy(baseStream_); updateFile = new ZipHelperStream(copyStream); updateFile.IsStreamOwner = true; baseStream_.Close(); baseStream_ = null; } else { if (archiveStorage_.UpdateMode == FileUpdateMode.Direct) { // TODO: archiveStorage wasnt originally intended for this use. // Need to revisit this to tidy up handling as archive storage currently doesnt // handle the original stream well. // The problem is when using an existing zip archive with an in memory archive storage. // The open stream wont support writing but the memory storage should open the same file not an in memory one. // Need to tidy up the archive storage interface and contract basically. baseStream_ = archiveStorage_.OpenForDirectUpdate(baseStream_); updateFile = new ZipHelperStream(baseStream_); } else { baseStream_.Close(); baseStream_ = null; updateFile = new ZipHelperStream(Name); } } using ( updateFile ) { long locatedCentralDirOffset = updateFile.LocateBlockWithSignature(ZipConstants.EndOfCentralDirectorySignature, baseLength, ZipConstants.EndOfCentralRecordBaseSize, 0xffff); if ( locatedCentralDirOffset < 0 ) { throw new ZipException("Cannot find central directory"); } const int CentralHeaderCommentSizeOffset = 16; updateFile.Position += CentralHeaderCommentSizeOffset; byte[] rawComment = newComment_.RawComment; updateFile.WriteLEShort(rawComment.Length); updateFile.Write(rawComment, 0, rawComment.Length); updateFile.SetLength(updateFile.Position); } if ( archiveStorage_.UpdateMode == FileUpdateMode.Safe ) { Reopen(archiveStorage_.ConvertTemporaryToFinal()); } else { ReadEntries(); } }
void AddEntry(ZipFile workFile, ZipUpdate update) { Stream source = null; if ( update.Entry.IsFile ) { source = update.GetSource(); if ( source == null ) { source = updateDataSource_.GetSource(update.Entry, update.Filename); } } if ( source != null ) { using ( source ) { long sourceStreamLength = source.Length; if ( update.OutEntry.Size < 0 ) { update.OutEntry.Size = sourceStreamLength; } else { // Check for errant entries. if ( update.OutEntry.Size != sourceStreamLength ) { throw new ZipException("Entry size/stream size mismatch"); } } workFile.WriteLocalEntryHeader(update); long dataStart = workFile.baseStream_.Position; using ( Stream output = workFile.GetOutputStream(update.OutEntry) ) { CopyBytes(update, output, source, sourceStreamLength, true); } long dataEnd = workFile.baseStream_.Position; update.OutEntry.CompressedSize = dataEnd - dataStart; if ((update.OutEntry.Flags & (int)GeneralBitFlags.Descriptor) == (int)GeneralBitFlags.Descriptor) { ZipHelperStream helper = new ZipHelperStream(workFile.baseStream_); helper.WriteDataDescriptor(update.OutEntry); } } } else { workFile.WriteLocalEntryHeader(update); update.OutEntry.CompressedSize = 0; } }
/// <summary> /// Commit current updates, updating this archive. /// </summary> /// <seealso cref="BeginUpdate()"></seealso> /// <seealso cref="AbortUpdate"></seealso> /// <exception cref="ObjectDisposedException">ZipFile has been closed.</exception> public void CommitUpdate() { if ( isDisposed_ ) { throw new ObjectDisposedException("ZipFile"); } CheckUpdating(); try { updateIndex_.Clear(); updateIndex_=null; if( contentsEdited_ ) { RunUpdates(); } else if( commentEdited_ ) { UpdateCommentOnly(); } else { // Create an empty archive if none existed originally. if( entries_.Length==0 ) { byte[] theComment=(newComment_!=null)?newComment_.RawComment:ZipConstants.ConvertToArray(comment_); using( ZipHelperStream zhs=new ZipHelperStream(baseStream_) ) { zhs.WriteEndOfCentralDirectory(0, 0, 0, theComment); } } } } finally { PostUpdateCleanup(); } }
/// <summary> /// Set the data from the raw values provided. /// </summary> /// <param name="data">The raw data to extract values from.</param> /// <param name="index">The index to start extracting values from.</param> /// <param name="count">The number of bytes available.</param> public void SetData(byte[] data, int index, int count) { using (MemoryStream ms = new MemoryStream(data, index, count, false)) using (ZipHelperStream helperStream = new ZipHelperStream(ms)) { helperStream.ReadLEInt(); // Reserved while (helperStream.Position < helperStream.Length) { int ntfsTag = helperStream.ReadLEShort(); int ntfsLength = helperStream.ReadLEShort(); if (ntfsTag == 1) { if (ntfsLength >= 24) { long lastModificationTicks = helperStream.ReadLELong(); lastModificationTime_ = DateTime.FromFileTime(lastModificationTicks); long lastAccessTicks = helperStream.ReadLELong(); lastAccessTime_ = DateTime.FromFileTime(lastAccessTicks); long createTimeTicks = helperStream.ReadLELong(); createTime_ = DateTime.FromFileTime(createTimeTicks); } break; } else { // An unknown NTFS tag so simply skip it. helperStream.Seek(ntfsLength, SeekOrigin.Current); } } } }
/// <summary> /// Get the binary data representing this instance. /// </summary> /// <returns>The raw binary data representing this instance.</returns> public byte[] GetData() { using (MemoryStream ms = new MemoryStream()) using (ZipHelperStream helperStream = new ZipHelperStream(ms)) { helperStream.IsStreamOwner = false; helperStream.WriteLEInt(0); // Reserved helperStream.WriteLEShort(1); // Tag helperStream.WriteLEShort(24); // Length = 3 x 8. helperStream.WriteLELong(lastModificationTime_.ToFileTime()); helperStream.WriteLELong(lastAccessTime_.ToFileTime()); helperStream.WriteLELong(createTime_.ToFileTime()); return ms.ToArray(); } }
/// <summary> /// Set the data from the raw values provided. /// </summary> /// <param name="data">The raw data to extract values from.</param> /// <param name="index">The index to start extracting values from.</param> /// <param name="count">The number of bytes available.</param> public void SetData(byte[] data, int index, int count) { using (MemoryStream ms = new MemoryStream(data, index, count, false)) using (ZipHelperStream helperStream = new ZipHelperStream(ms)) { // bit 0 if set, modification time is present // bit 1 if set, access time is present // bit 2 if set, creation time is present flags_ = (Flags)helperStream.ReadByte(); if (((flags_ & Flags.ModificationTime) != 0) && (count >= 5)) { int iTime = helperStream.ReadLEInt(); modificationTime_ = (new System.DateTime(1970, 1, 1, 0, 0, 0).ToUniversalTime() + new TimeSpan(0, 0, 0, iTime, 0)).ToLocalTime(); } if ((flags_ & Flags.AccessTime) != 0) { int iTime = helperStream.ReadLEInt(); lastAccessTime_ = (new System.DateTime(1970, 1, 1, 0, 0, 0).ToUniversalTime() + new TimeSpan(0, 0, 0, iTime, 0)).ToLocalTime(); } if ((flags_ & Flags.CreateTime) != 0) { int iTime = helperStream.ReadLEInt(); createTime_ = (new System.DateTime(1970, 1, 1, 0, 0, 0).ToUniversalTime() + new TimeSpan(0, 0, 0, iTime, 0)).ToLocalTime(); } } }
/// <summary> /// Get the binary data representing this instance. /// </summary> /// <returns>The raw binary data representing this instance.</returns> public byte[] GetData() { using (MemoryStream ms = new MemoryStream()) using (ZipHelperStream helperStream = new ZipHelperStream(ms)) { helperStream.IsStreamOwner = false; helperStream.WriteByte((byte)flags_); // Flags if ( (flags_ & Flags.ModificationTime) != 0) { TimeSpan span = modificationTime_.ToUniversalTime() - new System.DateTime(1970, 1, 1, 0, 0, 0).ToUniversalTime(); int seconds = (int)span.TotalSeconds; helperStream.WriteLEInt(seconds); } if ( (flags_ & Flags.AccessTime) != 0) { TimeSpan span = lastAccessTime_.ToUniversalTime() - new System.DateTime(1970, 1, 1, 0, 0, 0).ToUniversalTime(); int seconds = (int)span.TotalSeconds; helperStream.WriteLEInt(seconds); } if ( (flags_ & Flags.CreateTime) != 0) { TimeSpan span = createTime_.ToUniversalTime() - new System.DateTime(1970, 1, 1, 0, 0, 0).ToUniversalTime(); int seconds = (int)span.TotalSeconds; helperStream.WriteLEInt(seconds); } return ms.ToArray(); } }
/// <summary> /// Finishes the stream. This will write the central directory at the /// end of the zip file and flush the stream. /// </summary> /// <remarks> /// This is automatically called when the stream is closed. /// </remarks> /// <exception cref="System.IO.IOException"> /// An I/O error occurs. /// </exception> /// <exception cref="ZipException"> /// Comment exceeds the maximum length<br/> /// Entry name exceeds the maximum length /// </exception> public override void Finish() { if (entries == null) { return; } if (curEntry != null) { CloseEntry(); } long numEntries = entries.Count; long sizeEntries = 0; foreach (var entry in entries) { WriteLeInt(ZipConstants.CentralHeaderSignature); WriteLeShort(ZipConstants.VersionMadeBy); WriteLeShort(entry.Version); WriteLeShort(entry.Flags); WriteLeShort((short)entry.CompressionMethod); WriteLeInt((int)entry.DosTime); WriteLeInt((int)entry.Crc); if (entry.IsZip64Forced() || (entry.CompressedSize >= uint.MaxValue)) { WriteLeInt(-1); } else { WriteLeInt((int)entry.CompressedSize); } if (entry.IsZip64Forced() || (entry.Size >= uint.MaxValue)) { WriteLeInt(-1); } else { WriteLeInt((int)entry.Size); } var name = ZipConstants.ConvertToArray(entry.Flags, entry.Name); if (name.Length > 0xffff) { throw new ZipException("Name too long."); } var ed = new ZipExtraData(entry.ExtraData); if (entry.CentralHeaderRequiresZip64) { ed.StartNewEntry(); if (entry.IsZip64Forced() || (entry.Size >= 0xffffffff)) { ed.AddLeLong(entry.Size); } if (entry.IsZip64Forced() || (entry.CompressedSize >= 0xffffffff)) { ed.AddLeLong(entry.CompressedSize); } if (entry.Offset >= 0xffffffff) { ed.AddLeLong(entry.Offset); } ed.AddNewEntry(1); } else { ed.Delete(1); } var extra = ed.GetEntryData(); var entryComment = (entry.Comment != null) ? ZipConstants.ConvertToArray(entry.Flags, entry.Comment) : new byte[0]; if (entryComment.Length > 0xffff) { throw new ZipException("Comment too long."); } WriteLeShort(name.Length); WriteLeShort(extra.Length); WriteLeShort(entryComment.Length); WriteLeShort(0); // disk number WriteLeShort(0); // internal file attributes // external file attributes if (entry.ExternalFileAttributes != -1) { WriteLeInt(entry.ExternalFileAttributes); } else { if (entry.IsDirectory) { // mark entry as directory (from nikolam.AT.perfectinfo.com) WriteLeInt(16); } else { WriteLeInt(0); } } if (entry.Offset >= uint.MaxValue) { WriteLeInt(-1); } else { WriteLeInt((int)entry.Offset); } if (name.Length > 0) { baseOutputStream_.Write(name, 0, name.Length); } if (extra.Length > 0) { baseOutputStream_.Write(extra, 0, extra.Length); } if (entryComment.Length > 0) { baseOutputStream_.Write(entryComment, 0, entryComment.Length); } sizeEntries += ZipConstants.CentralHeaderBaseSize + name.Length + extra.Length + entryComment.Length; } using (var zhs = new ZipHelperStream(baseOutputStream_)) { zhs.WriteEndOfCentralDirectory(numEntries, sizeEntries, _offset, zipComment); } entries = null; }
/// <summary> /// Finishes the stream. This will write the central directory at the /// end of the zip file and flush the stream. /// </summary> /// <remarks> /// This is automatically called when the stream is closed. /// </remarks> /// <exception cref="System.IO.IOException"> /// An I/O error occurs. /// </exception> /// <exception cref="ZipException"> /// Comment exceeds the maximum length<br/> /// Entry name exceeds the maximum length /// </exception> public override void Finish() { if (entries == null) { return; } if (curEntry != null) { CloseEntry(); } long numEntries = entries.Count; long sizeEntries = 0; foreach (ZipEntry entry in entries) { WriteLeInt(ZipConstants.CentralHeaderSignature); WriteLeShort(ZipConstants.VersionMadeBy); WriteLeShort(entry.Version); WriteLeShort(entry.Flags); WriteLeShort((short)entry.CompressionMethodForHeader); WriteLeInt((int)entry.DosTime); WriteLeInt((int)entry.Crc); if ( entry.IsZip64Forced() || (entry.CompressedSize >= uint.MaxValue) ) { WriteLeInt(-1); } else { WriteLeInt((int)entry.CompressedSize); } if ( entry.IsZip64Forced() || (entry.Size >= uint.MaxValue) ) { WriteLeInt(-1); } else { WriteLeInt((int)entry.Size); } byte[] name = ZipConstants.ConvertToArray(entry.Flags, entry.Name); if (name.Length > 0xffff) { throw new ZipException("Name too long."); } ZipExtraData ed = new ZipExtraData(entry.ExtraData); if ( entry.CentralHeaderRequiresZip64 ) { ed.StartNewEntry(); if ( entry.IsZip64Forced() || (entry.Size >= 0xffffffff) ) { ed.AddLeLong(entry.Size); } if ( entry.IsZip64Forced() || (entry.CompressedSize >= 0xffffffff) ) { ed.AddLeLong(entry.CompressedSize); } if ( entry.Offset >= 0xffffffff ) { ed.AddLeLong(entry.Offset); } ed.AddNewEntry(1); } else { ed.Delete(1); } #if !NET_1_1 && !NETCF_2_0 if (entry.AESKeySize > 0) { AddExtraDataAES(entry, ed); } #endif byte[] extra = ed.GetEntryData(); byte[] entryComment = (entry.Comment != null) ? ZipConstants.ConvertToArray(entry.Flags, entry.Comment) : new byte[0]; if (entryComment.Length > 0xffff) { throw new ZipException("Comment too long."); } WriteLeShort(name.Length); WriteLeShort(extra.Length); WriteLeShort(entryComment.Length); WriteLeShort(0); // disk number WriteLeShort(0); // internal file attributes // external file attributes if (entry.ExternalFileAttributes != -1) { WriteLeInt(entry.ExternalFileAttributes); } else { if (entry.IsDirectory) { // mark entry as directory (from nikolam.AT.perfectinfo.com) WriteLeInt(16); } else { WriteLeInt(0); } } if ( entry.Offset >= uint.MaxValue ) { WriteLeInt(-1); } else { WriteLeInt((int)entry.Offset); } if ( name.Length > 0 ) { baseOutputStream_.Write(name, 0, name.Length); } if ( extra.Length > 0 ) { baseOutputStream_.Write(extra, 0, extra.Length); } if ( entryComment.Length > 0 ) { baseOutputStream_.Write(entryComment, 0, entryComment.Length); } sizeEntries += ZipConstants.CentralHeaderBaseSize + name.Length + extra.Length + entryComment.Length; } using ( ZipHelperStream zhs = new ZipHelperStream(baseOutputStream_) ) { zhs.WriteEndOfCentralDirectory(numEntries, sizeEntries, offset, zipComment); } entries = null; }