public void Commit(bool cleanCommit) { if (this.Stream.CanWrite == false) { throw new NotSupportedException(); } DatabasePackedFile dbpf = new DatabasePackedFile(); dbpf.Version = new Version(2, 0); if (cleanCommit == false) { if (this.EndOfDataOffset == 0) { // new archive this.Stream.Seek(this.BaseOffset, SeekOrigin.Begin); dbpf.WriteHeader(this.Stream, 0, 0); this.EndOfDataOffset = this.Stream.Position - this.BaseOffset; } foreach (KeyValuePair<MadScience.Wrappers.ResourceKey, Entry> kvp in this._Entries) { DatabasePackedFile.Entry entry = new DatabasePackedFile.Entry(); entry.Key = kvp.Key; if (kvp.Value is MemoryEntry) { MemoryEntry memory = (MemoryEntry)kvp.Value; byte[] compressed; bool success = MadScience.RefPack.Compression.Compress(memory.Data, out compressed); //bool success = memory.Data.RefPackCompress(out compressed); if (success == true) { entry.DecompressedSize = (uint)(memory.Data.Length); entry.CompressedSize = (uint)(compressed.Length) | 0x80000000; entry.CompressionFlags = -1; entry.Flags = 1; memory.Data = compressed; } else { entry.DecompressedSize = memory.DecompressedSize; entry.CompressedSize = memory.CompressedSize | 0x80000000; entry.CompressionFlags = 0; entry.Flags = 1; } // Is this replacing old data? if (this.OriginalEntries.ContainsKey(kvp.Key) == true) { StreamEntry stream = this.OriginalEntries[kvp.Key]; // Let's see if the new data can fit where the old data was if (memory.Data.Length <= stream.CompressedSize) { entry.Offset = stream.Offset; this.Stream.Seek(this.BaseOffset + stream.Offset, SeekOrigin.Begin); this.Stream.Write(memory.Data, 0, memory.Data.Length); } else { entry.Offset = this.EndOfDataOffset; this.Stream.Seek(this.BaseOffset + this.EndOfDataOffset, SeekOrigin.Begin); this.Stream.Write(memory.Data, 0, memory.Data.Length); this.EndOfDataOffset += memory.Data.Length; } } // New data else { entry.Offset = this.EndOfDataOffset; this.Stream.Seek(this.BaseOffset + this.EndOfDataOffset, SeekOrigin.Begin); this.Stream.Write(memory.Data, 0, memory.Data.Length); this.EndOfDataOffset += memory.Data.Length; } } else if (kvp.Value is StreamEntry) { StreamEntry stream = (StreamEntry)kvp.Value; entry.CompressedSize = stream.CompressedSize | 0x80000000; entry.DecompressedSize = stream.DecompressedSize; entry.Offset = stream.Offset; entry.CompressionFlags = stream.CompressedFlags; entry.Flags = stream.Flags; } else { throw new InvalidOperationException(); } dbpf.Entries.Add(entry); } this.Stream.Seek(this.BaseOffset + this.EndOfDataOffset, SeekOrigin.Begin); dbpf.WriteIndex(this.Stream); long indexSize = (this.Stream.Position - (this.BaseOffset + this.EndOfDataOffset)); this.Stream.Seek(this.BaseOffset, SeekOrigin.Begin); dbpf.WriteHeader(this.Stream, this.EndOfDataOffset, indexSize); } else { Stream clean; string tempFileName = null; // Packages greater than five mb will be cleaned with a file supported stream if (this.Stream.Length >= (1024 * 1024) * 5) { tempFileName = Path.GetTempFileName(); clean = File.Open(tempFileName, FileMode.Create, FileAccess.ReadWrite, FileShare.Read); } else { clean = new MemoryStream(); } dbpf.WriteHeader(clean, 0, 0); this.EndOfDataOffset = clean.Position; foreach (KeyValuePair<MadScience.Wrappers.ResourceKey, Entry> kvp in this._Entries) { DatabasePackedFile.Entry entry = new DatabasePackedFile.Entry(); entry.Key = kvp.Key; if (kvp.Value is MemoryEntry) { MemoryEntry memory = (MemoryEntry)kvp.Value; byte[] compressed; bool success = MadScience.RefPack.Compression.Compress(memory.Data, out compressed); //bool success = memory.Data.RefPackCompress(out compressed); if (success == true) { entry.DecompressedSize = (uint)(memory.Data.Length); entry.CompressedSize = (uint)(compressed.Length) | 0x80000000; entry.CompressionFlags = -1; entry.Flags = 1; entry.Offset = this.EndOfDataOffset; memory.Data = compressed; } else { entry.DecompressedSize = memory.DecompressedSize; entry.CompressedSize = memory.CompressedSize | 0x80000000; entry.CompressionFlags = 0; entry.Flags = 1; entry.Offset = this.EndOfDataOffset; } clean.Write(memory.Data, 0, memory.Data.Length); this.EndOfDataOffset += memory.Data.Length; } else if (kvp.Value is StreamEntry) { StreamEntry stream = (StreamEntry)kvp.Value; entry.CompressedSize = stream.CompressedSize | 0x80000000; entry.DecompressedSize = stream.DecompressedSize; entry.CompressionFlags = stream.CompressedFlags; entry.Flags = stream.Flags; entry.Offset = this.EndOfDataOffset; // Copy data this.Stream.Seek(this.BaseOffset + stream.Offset, SeekOrigin.Begin); byte[] data = new byte[4096]; int left = (int)stream.CompressedSize; while (left > 0) { int block = Math.Min(left, (int)data.Length); this.Stream.Read(data, 0, block); clean.Write(data, 0, block); left -= block; } this.EndOfDataOffset += stream.CompressedSize; } else { throw new InvalidOperationException(); } dbpf.Entries.Add(entry); } dbpf.WriteIndex(clean); long indexSize = clean.Position - this.EndOfDataOffset; clean.Seek(0, SeekOrigin.Begin); dbpf.WriteHeader(clean, this.EndOfDataOffset, indexSize); // copy clean to real stream { this.Stream.Seek(this.BaseOffset, SeekOrigin.Begin); clean.Seek(0, SeekOrigin.Begin); byte[] data = new byte[4096]; long left = clean.Length; while (left > 0) { int block = (int)Math.Min(left, data.Length); clean.Read(data, 0, block); this.Stream.Write(data, 0, block); left -= block; } } this.Stream.SetLength(this.BaseOffset + this.EndOfDataOffset + indexSize); clean.Close(); if (tempFileName != null) { try { File.Delete(tempFileName); } catch (Exception) { } } } this._Entries.Clear(); this.OriginalEntries.Clear(); foreach (DatabasePackedFile.Entry entry in dbpf.Entries) { this._Entries.Add(entry.Key, new StreamEntry() { Compressed = entry.Compressed, Offset = entry.Offset, CompressedSize = entry.CompressedSize, DecompressedSize = entry.DecompressedSize, CompressedFlags = entry.CompressionFlags, Flags = entry.Flags, }); } }
public void Commit(bool cleanCommit) { if (this.Stream.CanWrite == false) { throw new NotSupportedException(); } DatabasePackedFile dbpf = new DatabasePackedFile(); dbpf.Version = new Version(2, 0); if (cleanCommit == false) { if (this.EndOfDataOffset == 0) { // new archive this.Stream.Seek(this.BaseOffset, SeekOrigin.Begin); dbpf.WriteHeader(this.Stream, 0, 0); this.EndOfDataOffset = this.Stream.Position - this.BaseOffset; } foreach (KeyValuePair <MadScience.Wrappers.ResourceKey, Entry> kvp in this._Entries) { DatabasePackedFile.Entry entry = new DatabasePackedFile.Entry(); entry.Key = kvp.Key; if (kvp.Value is MemoryEntry) { MemoryEntry memory = (MemoryEntry)kvp.Value; byte[] compressed; bool success = MadScience.RefPack.Compression.Compress(memory.Data, out compressed); //bool success = memory.Data.RefPackCompress(out compressed); if (success == true) { entry.DecompressedSize = (uint)(memory.Data.Length); entry.CompressedSize = (uint)(compressed.Length) | 0x80000000; entry.CompressionFlags = -1; entry.Flags = 1; memory.Data = compressed; } else { entry.DecompressedSize = memory.DecompressedSize; entry.CompressedSize = memory.CompressedSize | 0x80000000; entry.CompressionFlags = 0; entry.Flags = 1; } // Is this replacing old data? if (this.OriginalEntries.ContainsKey(kvp.Key) == true) { StreamEntry stream = this.OriginalEntries[kvp.Key]; // Let's see if the new data can fit where the old data was if (memory.Data.Length <= stream.CompressedSize) { entry.Offset = stream.Offset; this.Stream.Seek(this.BaseOffset + stream.Offset, SeekOrigin.Begin); this.Stream.Write(memory.Data, 0, memory.Data.Length); } else { entry.Offset = this.EndOfDataOffset; this.Stream.Seek(this.BaseOffset + this.EndOfDataOffset, SeekOrigin.Begin); this.Stream.Write(memory.Data, 0, memory.Data.Length); this.EndOfDataOffset += memory.Data.Length; } } // New data else { entry.Offset = this.EndOfDataOffset; this.Stream.Seek(this.BaseOffset + this.EndOfDataOffset, SeekOrigin.Begin); this.Stream.Write(memory.Data, 0, memory.Data.Length); this.EndOfDataOffset += memory.Data.Length; } } else if (kvp.Value is StreamEntry) { StreamEntry stream = (StreamEntry)kvp.Value; entry.CompressedSize = stream.CompressedSize | 0x80000000; entry.DecompressedSize = stream.DecompressedSize; entry.Offset = stream.Offset; entry.CompressionFlags = stream.CompressedFlags; entry.Flags = stream.Flags; } else { throw new InvalidOperationException(); } dbpf.Entries.Add(entry); } this.Stream.Seek(this.BaseOffset + this.EndOfDataOffset, SeekOrigin.Begin); dbpf.WriteIndex(this.Stream); long indexSize = (this.Stream.Position - (this.BaseOffset + this.EndOfDataOffset)); this.Stream.Seek(this.BaseOffset, SeekOrigin.Begin); dbpf.WriteHeader(this.Stream, this.EndOfDataOffset, indexSize); } else { Stream clean; string tempFileName = null; // Packages greater than five mb will be cleaned with a file supported stream if (this.Stream.Length >= (1024 * 1024) * 5) { tempFileName = Path.GetTempFileName(); clean = File.Open(tempFileName, FileMode.Create, FileAccess.ReadWrite, FileShare.Read); } else { clean = new MemoryStream(); } dbpf.WriteHeader(clean, 0, 0); this.EndOfDataOffset = clean.Position; foreach (KeyValuePair <MadScience.Wrappers.ResourceKey, Entry> kvp in this._Entries) { DatabasePackedFile.Entry entry = new DatabasePackedFile.Entry(); entry.Key = kvp.Key; if (kvp.Value is MemoryEntry) { MemoryEntry memory = (MemoryEntry)kvp.Value; byte[] compressed; bool success = MadScience.RefPack.Compression.Compress(memory.Data, out compressed); //bool success = memory.Data.RefPackCompress(out compressed); if (success == true) { entry.DecompressedSize = (uint)(memory.Data.Length); entry.CompressedSize = (uint)(compressed.Length) | 0x80000000; entry.CompressionFlags = -1; entry.Flags = 1; entry.Offset = this.EndOfDataOffset; memory.Data = compressed; } else { entry.DecompressedSize = memory.DecompressedSize; entry.CompressedSize = memory.CompressedSize | 0x80000000; entry.CompressionFlags = 0; entry.Flags = 1; entry.Offset = this.EndOfDataOffset; } clean.Write(memory.Data, 0, memory.Data.Length); this.EndOfDataOffset += memory.Data.Length; } else if (kvp.Value is StreamEntry) { StreamEntry stream = (StreamEntry)kvp.Value; entry.CompressedSize = stream.CompressedSize | 0x80000000; entry.DecompressedSize = stream.DecompressedSize; entry.CompressionFlags = stream.CompressedFlags; entry.Flags = stream.Flags; entry.Offset = this.EndOfDataOffset; // Copy data this.Stream.Seek(this.BaseOffset + stream.Offset, SeekOrigin.Begin); byte[] data = new byte[4096]; int left = (int)stream.CompressedSize; while (left > 0) { int block = Math.Min(left, (int)data.Length); this.Stream.Read(data, 0, block); clean.Write(data, 0, block); left -= block; } this.EndOfDataOffset += stream.CompressedSize; } else { throw new InvalidOperationException(); } dbpf.Entries.Add(entry); } dbpf.WriteIndex(clean); long indexSize = clean.Position - this.EndOfDataOffset; clean.Seek(0, SeekOrigin.Begin); dbpf.WriteHeader(clean, this.EndOfDataOffset, indexSize); // copy clean to real stream { this.Stream.Seek(this.BaseOffset, SeekOrigin.Begin); clean.Seek(0, SeekOrigin.Begin); byte[] data = new byte[4096]; long left = clean.Length; while (left > 0) { int block = (int)Math.Min(left, data.Length); clean.Read(data, 0, block); this.Stream.Write(data, 0, block); left -= block; } } this.Stream.SetLength(this.BaseOffset + this.EndOfDataOffset + indexSize); clean.Close(); if (tempFileName != null) { try { File.Delete(tempFileName); } catch (Exception) { } } } this._Entries.Clear(); this.OriginalEntries.Clear(); foreach (DatabasePackedFile.Entry entry in dbpf.Entries) { this._Entries.Add(entry.Key, new StreamEntry() { Compressed = entry.Compressed, Offset = entry.Offset, CompressedSize = entry.CompressedSize, DecompressedSize = entry.DecompressedSize, CompressedFlags = entry.CompressionFlags, Flags = entry.Flags, }); } }