void UpdateCommentOnly() { long baseLength = baseStream_.Length; HfsHelperStream updateFile = null; if (archiveStorage_.UpdateMode == FileUpdateMode.Safe) { Stream copyStream = archiveStorage_.MakeTemporaryCopy(baseStream_); updateFile = new HfsHelperStream(copyStream); updateFile.IsStreamOwner = true; baseStream_.Close(); baseStream_ = null; } else { if (archiveStorage_.UpdateMode == FileUpdateMode.Direct) { // TODO: archiveStorage wasnt originally intended for this use. // Need to revisit this to tidy up handling as archive storage currently doesnt // handle the original stream well. // The problem is when using an existing Hfs archive with an in memory archive storage. // The open stream wont support writing but the memory storage should open the same file not an in memory one. // Need to tidy up the archive storage interface and contract basically. baseStream_ = archiveStorage_.OpenForDirectUpdate(baseStream_); updateFile = new HfsHelperStream(baseStream_); } else { baseStream_.Close(); baseStream_ = null; updateFile = new HfsHelperStream(Name); } } using (updateFile) { long locatedCentralDirOffset = updateFile.LocateBlockWithSignature(HfsConstants.EndOfCentralDirectorySignature, baseLength, HfsConstants.EndOfCentralRecordBaseSize, 0xffff); if (locatedCentralDirOffset < 0) { throw new HfsException("Cannot find central directory"); } const int CentralHeaderCommentSizeOffset = 16; updateFile.Position += CentralHeaderCommentSizeOffset; byte[] rawComment = newComment_.RawComment; updateFile.WriteLEShort(rawComment.Length); updateFile.Write(rawComment, 0, rawComment.Length); updateFile.SetLength(updateFile.Position); } if (archiveStorage_.UpdateMode == FileUpdateMode.Safe) { Reopen(archiveStorage_.ConvertTemporaryToFinal()); } else { ReadEntries(); } }
// NOTE this returns the offset of the first byte after the signature. long LocateBlockWithSignature(int signature, long endLocation, int minimumBlockSize, int maximumVariableData) { using (HfsHelperStream les = new HfsHelperStream(baseStream_)) { return les.LocateBlockWithSignature(signature, endLocation, minimumBlockSize, maximumVariableData); } }
void RunUpdates() { long sizeEntries = 0; long endOfStream = 0; bool directUpdate = false; long destinationPosition = 0; // NOT SFX friendly HfsFile workFile; if (IsNewArchive) { workFile = this; workFile.baseStream_.Position = 0; directUpdate = true; } else if (archiveStorage_.UpdateMode == FileUpdateMode.Direct) { workFile = this; workFile.baseStream_.Position = 0; directUpdate = true; // Sort the updates by offset within copies/modifies, then adds. // This ensures that data required by copies will not be overwritten. updates_.Sort(new UpdateComparer()); } else { workFile = HfsFile.Create(archiveStorage_.GetTemporaryOutput()); if (key != null) { workFile.key = (byte[])key.Clone(); } if (obfuscationkey_ != 0) { workFile.obfuscationkey_ = obfuscationkey_; workFile.bytekey_ = bytekey_; } } try { foreach (HfsUpdate update in updates_) { if (update != null) { switch (update.Command) { case UpdateCommand.Copy: if (directUpdate) { CopyEntryDirect(workFile, update, ref destinationPosition); } else { CopyEntry(workFile, update); } break; case UpdateCommand.Modify: // TODO: Direct modifying of an entry will take some legwork. ModifyEntry(workFile, update); break; case UpdateCommand.Add: if (!IsNewArchive && directUpdate) { workFile.baseStream_.Position = destinationPosition; } AddEntry(workFile, update); if (directUpdate) { destinationPosition = workFile.baseStream_.Position; } break; } } } if (!IsNewArchive && directUpdate) { workFile.baseStream_.Position = destinationPosition; } long centralDirOffset = workFile.baseStream_.Position; foreach (HfsUpdate update in updates_) { if (update != null) { sizeEntries += workFile.WriteCentralDirectoryHeader(update.OutEntry); } } byte[] theComment = (newComment_ != null) ? newComment_.RawComment : HfsConstants.ConvertToArray(comment_); uint obfuscationKey = workFile.obfuscationkey_; if (obfuscationKey > 0) { obfuscationKey = workFile.obfuscationkey_ = (UInt32)(centralDirOffset * sizeEntries); Array.Copy(BitConverter.GetBytes(obfuscationKey), workFile.bytekey_, 4); } using (HfsHelperStream zhs = new HfsHelperStream(workFile.baseStream_)) { zhs.WriteEndOfCentralDirectory(updateCount_, sizeEntries, centralDirOffset, theComment, obfuscationKey); } endOfStream = workFile.baseStream_.Position; // And now patch entries... foreach (HfsUpdate update in updates_) { if (update != null) { // If the size of the entry is zero leave the crc as 0 as well. // The calculated crc will be all bits on... if ((update.CrcPatchOffset > 0) && (update.OutEntry.CompressedSize > 0)) { workFile.baseStream_.Position = update.CrcPatchOffset; workFile.WriteLEInt((int)update.OutEntry.Crc); } if (update.SizePatchOffset > 0) { workFile.baseStream_.Position = update.SizePatchOffset; workFile.WriteLEInt((int)update.OutEntry.CompressedSize); workFile.WriteLEInt((int)update.OutEntry.Size); } // HFS xor post-process if (obfuscationKey > 0 && update.Command == UpdateCommand.Add) { long dataStart = update.OutEntry.DataOffset; byte[] buffer = new byte[4096]; long bytesToCopy = update.OutEntry.CompressedSize; int bytesRead; do { workFile.baseStream_.Position = dataStart; int readSize = buffer.Length; if (bytesToCopy < readSize) { readSize = (int)bytesToCopy; } bytesRead = workFile.baseStream_.Read(buffer, 0, readSize); if (bytesRead > 0) { workFile.baseStream_.Position = dataStart; HfsXorCipher.XorBlockWithKey(buffer, bytekey_, (int)dataStart); workFile.baseStream_.Write(buffer, 0, bytesRead); } dataStart += bytesRead; bytesToCopy -= bytesRead; } while ((bytesRead > 0) && (bytesToCopy > 0)); } } } } catch { workFile.Close(); if (!directUpdate && (workFile.Name != null)) { File.Delete(workFile.Name); } throw; } if (directUpdate) { workFile.baseStream_.SetLength(endOfStream); workFile.baseStream_.Flush(); isNewArchive_ = false; ReadEntries(); } else { baseStream_.Close(); Reopen(archiveStorage_.ConvertTemporaryToFinal()); } }
/// <summary> /// Commit current updates, updating this archive. /// </summary> /// <seealso cref="BeginUpdate()"></seealso> /// <seealso cref="AbortUpdate"></seealso> /// <exception cref="ObjectDisposedException">HfsFile has been closed.</exception> public void CommitUpdate() { if (isDisposed_) { throw new ObjectDisposedException("HfsFile"); } CheckUpdating(); try { updateIndex_.Clear(); updateIndex_ = null; if (contentsEdited_) { RunUpdates(); } else if (commentEdited_) { UpdateCommentOnly(); } else { // Create an empty archive if none existed originally. if (entries_.Length == 0) { byte[] theComment = (newComment_ != null) ? newComment_.RawComment : HfsConstants.ConvertToArray(comment_); using (HfsHelperStream zhs = new HfsHelperStream(baseStream_)) { zhs.WriteEndOfCentralDirectory(0, 0, 0, theComment, 0); } } } } finally { PostUpdateCleanup(); } }