public void CreateDataEntry(PakBinaryWriter writer, PakEntry pakEntry, bool usePadding) { pakEntry.Offset = writer.BaseStream.Position; var bytes = File.ReadAllBytes(pakEntry.Import); pakEntry.Hash = HashBytes(bytes); writer.Write(pakEntry, true); writer.Write(bytes); if (usePadding && pakEntry.Padded) { var toWrite = 2048 - writer.BaseStream.Position % 2048; if (toWrite != 2048) { writer.Write(new byte[toWrite]); } } }
public void SavePakFileStructure(PakBinaryWriter writer) { byte[] streamBytes; Archive.DataSize = writer.BaseStream.Position; using (MemoryStream memoryStream = new MemoryStream()) { using (PakBinaryWriter memoryWriter = new PakBinaryWriter(memoryStream)) { memoryWriter.Write(Archive.Directory); foreach (PakEntry entry in Archive.Directory.Entries) { memoryWriter.Write(entry, false); } } streamBytes = memoryStream.ToArray(); } writer.Write(streamBytes); Archive.Hash = HashBytes(streamBytes); Archive.FileTreeSize = streamBytes.Length; if (Archive.Version >= 7) { writer.Write(new byte[17]); } writer.Write(Archive); if (Archive.Version == 8) { writer.Write(new byte[160]); } }
public void SaveDataEntry(PakBinaryReader reader, PakBinaryWriter writer, PakEntry pakEntry, bool usePadding) { if (pakEntry.Offset == -1) { return; } long originalOffset = pakEntry.Offset; pakEntry.Offset = writer.BaseStream.Position; if (Archive.Version < 7) { var offsetDifference = pakEntry.Offset - originalOffset; foreach (var chunk in pakEntry.Chunks) { chunk.ChunkEnd += offsetDifference; chunk.ChunkOffset += offsetDifference; } } writer.Write(pakEntry, true); if (pakEntry.Import != null) { using (var importFileStream = File.Open(pakEntry.Import, FileMode.Open)) { var importReader = new PakBinaryReader(importFileStream); writer.Write(importReader.ReadBytes((int)pakEntry.UncompressedSize)); } } else { if (reader.BaseStream.Position != originalOffset) { reader.BaseStream.Seek(originalOffset, SeekOrigin.Begin); } var importFilePakEntry = reader.ReadFileLevelPakEntry(); if (!importFilePakEntry.Equals(pakEntry)) { return; } switch (pakEntry.CompressionType) { case 0: writer.Write(reader.ReadBytes((int)pakEntry.UncompressedSize)); break; case 1: case 4: case 16400: foreach (var chunk in pakEntry.Chunks) { var compressedData = reader.ReadBytes((int)(chunk.ChunkEnd - chunk.ChunkOffset)); writer.Write(compressedData); } break; default: throw new InvalidOperationException(); } } if (usePadding && pakEntry.Padded) { var toWrite = 2048 - writer.BaseStream.Position % 2048; if (toWrite != 2048) { writer.Write(new byte[toWrite]); } } }