/// <summary> /// Gets the bytes of one file by index from the archive. /// </summary> /// <param name="idx"></param> /// <returns></returns> public byte[] GetFileData(int idx, MemoryMappedFile mmf) { if (idx >= _table.FileInfo.Count) { return(null); } var entry = _table.FileInfo[idx]; var startindex = (int)entry.FirstDataSector; var nextindex = (int)entry.NextDataSector; using var ms = new MemoryStream(); using var bw = new BinaryWriter(ms); for (int j = startindex; j < nextindex; j++) { var offsetentry = this._table.Offsets[j]; using var vs = mmf.CreateViewStream((long)offsetentry.Offset, (long)offsetentry.PhysicalSize, MemoryMappedFileAccess.Read); using var binaryReader = new BinaryReader(vs); if (offsetentry.PhysicalSize == offsetentry.VirtualSize) { var buffer = binaryReader.ReadBytes((int)offsetentry.PhysicalSize); bw.Write(buffer); } else { var oodleCompression = binaryReader.ReadBytes(4); if (!(oodleCompression.SequenceEqual(new byte[] { 0x4b, 0x41, 0x52, 0x4b }))) { throw new NotImplementedException(); } var size = binaryReader.ReadUInt32(); if (size != offsetentry.VirtualSize) { throw new NotImplementedException(); } var buffer = binaryReader.ReadBytes((int)offsetentry.PhysicalSize - 8); byte[] unpacked = new byte[offsetentry.VirtualSize]; long unpackedSize = OodleLZ.Decompress(buffer, unpacked); if (unpackedSize != offsetentry.VirtualSize) { throw new Exception(string.Format("Unpacked size doesn't match real size. {0} vs {1}", unpackedSize, offsetentry.VirtualSize)); } bw.Write(unpacked); } } return(ms.ToArray()); }
public void ExtractFile(ulong pathId, Stream stream) { using var handle = File.OpenRead(_archivePath); // Hashed path -> file entry -> block entries int fileIndex = GetFileEntryIndex(pathId); var fileEntry = FileEntries[fileIndex]; int firstBlock = GetBlockEntryIndex(fileEntry.DecompressedOffset); int lastBlock = GetBlockEntryIndex(fileEntry.DecompressedOffset + fileEntry.DecompressedSize - 1); using var reader = new BinaryReader(handle, Encoding.UTF8, true); using var writer = new BinaryWriter(stream, new UTF8Encoding(false, true), true); // Keep a small cache sitting around to avoid excessive allocations Span <byte> decompressedData = new byte[ReaderBlockSizeThreshold * 2]; ulong fileDataOffset = fileEntry.DecompressedOffset; // ulong fileDataLength = fileEntry.DecompressedSize; // Remainder // Files can be split across multiple sequential blocks for (int blockIndex = firstBlock; blockIndex <= lastBlock; blockIndex++) { var block = BlockEntries[blockIndex]; if (block.DecompressedSize > decompressedData.Length) { throw new Exception("Increase cache buffer size"); } // Read from the bin, decrypt, and decompress reader.BaseStream.Position = (long)block.Offset; var data = reader.ReadBytesStrict(block.Size); if (Header.IsEncrypted) { block.XorDataBuffer(data); } //if the buffer is bigger then the decompressed size OodleLZ v3 doesn't decompress data correctly every time //however if the buffer is the correct size it will decompress correctly but report that it failed OodleLZ.Decompress(data, decompressedData, block.DecompressedSize); // Copy data from the adjusted offset within the decompressed buffer. If the file requires another block, // truncate the copy and loop again. ulong copyOffset = fileDataOffset - block.DecompressedOffset; ulong copySize = Math.Min(fileDataLength, block.DecompressedSize - copyOffset); writer.Write(decompressedData.Slice((int)copyOffset, (int)copySize).ToArray()); fileDataOffset += copySize; fileDataLength -= copySize; } }
public void ExtractFile(string path, string destinationPath, bool allowOverwrite = false) { // Hashed path -> file entry -> block entries uint fileIndex = GetFileEntryIndex(path); var fileEntry = FileEntries[fileIndex]; uint firstBlock = GetBlockEntryIndex(fileEntry.DecompressedOffset); uint lastBlock = GetBlockEntryIndex(fileEntry.DecompressedOffset + fileEntry.DecompressedSize - 1); using (var reader = new BinaryReader(ArchiveFileHandle, Encoding.UTF8, true)) using (var writer = new BinaryWriter(File.Open(destinationPath, allowOverwrite ? FileMode.CreateNew : FileMode.Create, FileAccess.Write))) { // Keep a small cache sitting around to avoid excessive allocations Span <byte> decompressedData = new byte[512 * 1024]; ulong fileDataOffset = fileEntry.DecompressedOffset; // ulong fileDataLength = fileEntry.DecompressedSize; // Remainder // Files can be split across multiple sequential blocks for (uint blockIndex = firstBlock; blockIndex <= lastBlock; blockIndex++) { var block = BlockEntries[blockIndex]; if (block.DecompressedSize > decompressedData.Length) { throw new Exception("Increase cache buffer size"); } // Read & decompress from the bin reader.BaseStream.Position = (long)block.Offset; var data = reader.ReadBytes(block.Size); if (!OodleLZ.Decompress(data, decompressedData)) { throw new InvalidDataException("OodleLZ block decompression failed"); } // Copy data from the adjusted offset within the decompressed buffer. If the file requires another block, // truncate the copy and loop again. ulong copyOffset = fileDataOffset - block.DecompressedOffset; ulong copySize = Math.Min(fileDataLength, block.DecompressedSize - copyOffset); writer.Write(decompressedData.Slice((int)copyOffset, (int)copySize)); fileDataOffset += copySize; fileDataLength -= copySize; } } }
public static int OodleTask(string path, bool decompress) { if (string.IsNullOrEmpty(path)) { return(0); } if (decompress) { var file = File.ReadAllBytes(path); using var ms = new MemoryStream(file); using var br = new BinaryReader(ms); var oodleCompression = br.ReadBytes(4); if (!(oodleCompression.SequenceEqual(new byte[] { 0x4b, 0x41, 0x52, 0x4b }))) { throw new NotImplementedException(); } var size = br.ReadUInt32(); var buffer = br.ReadBytes(file.Length - 8); byte[] unpacked = new byte[size]; long unpackedSize = OodleLZ.Decompress(buffer, unpacked); using var msout = new MemoryStream(); using var bw = new BinaryWriter(msout); bw.Write(unpacked); File.WriteAllBytes($"{path}.kark", msout.ToArray()); } return(1); }
/// <summary> /// Gets the bytes of one file by index from the archive. /// </summary> /// <param name="hash"></param> /// <param name="mmf"></param> /// <returns></returns> public (byte[], List <byte[]>) GetFileData(ulong hash, MemoryMappedFile mmf) { if (!Files.ContainsKey(hash)) { return(null, null); } var entry = Files[hash]; var startindex = (int)entry.FirstDataSector; var nextindex = (int)entry.NextDataSector; var file = ExtractFile(this._table.Offsets[startindex]); var buffers = new List <byte[]>(); for (int j = startindex + 1; j < nextindex; j++) { var offsetentry = this._table.Offsets[j]; var buffer = ExtractFile(offsetentry); buffers.Add(buffer); } return(file, buffers); // local byte[] ExtractFile(OffsetEntry offsetentry) { using var ms = new MemoryStream(); using var bw = new BinaryWriter(ms); using var vs = mmf.CreateViewStream((long)offsetentry.Offset, (long)offsetentry.PhysicalSize, MemoryMappedFileAccess.Read); using var binaryReader = new BinaryReader(vs); if (offsetentry.PhysicalSize == offsetentry.VirtualSize) { var buffer = binaryReader.ReadBytes((int)offsetentry.PhysicalSize); bw.Write(buffer); } else { var oodleCompression = binaryReader.ReadBytes(4); if (!(oodleCompression.SequenceEqual(new byte[] { 0x4b, 0x41, 0x52, 0x4b }))) { throw new NotImplementedException(); } var size = binaryReader.ReadUInt32(); if (size != offsetentry.VirtualSize) { throw new NotImplementedException(); } var buffer = binaryReader.ReadBytes((int)offsetentry.PhysicalSize - 8); byte[] unpacked = new byte[offsetentry.VirtualSize]; long unpackedSize = OodleLZ.Decompress(buffer, unpacked); if (unpackedSize != offsetentry.VirtualSize) { throw new Exception( $"Unpacked size doesn't match real size. {unpackedSize} vs {offsetentry.VirtualSize}"); } bw.Write(unpacked); } return(ms.ToArray()); } }
/// <summary> /// Extract a file contained in the archive. /// </summary> /// <param name="pathId">Source hashed Decima core path</param> /// <param name="stream">Destination stream</param> public void ExtractFile(ulong pathId, Stream stream) { using var archiveHandle = CreateReadStream(_fileHandle.Name); // Hashed path -> file entry -> block entries int fileIndex = GetFileEntryIndex(pathId); if (fileIndex == InvalidEntryIndex) { throw new FileNotFoundException($"Unable to extract file with path ID {pathId}"); } var fileEntry = _fileEntries[fileIndex]; int firstBlock = GetBlockEntryIndex(fileEntry.DecompressedOffset); int lastBlock = GetBlockEntryIndex(fileEntry.DecompressedOffset + fileEntry.DecompressedSize - 1); ulong fileDataOffset = fileEntry.DecompressedOffset; // ulong fileDataLength = fileEntry.DecompressedSize; // Remainder // Files can be split across multiple sequential blocks for (int blockIndex = firstBlock; blockIndex <= lastBlock; blockIndex++) { var block = _blockEntries[blockIndex]; var compressedData = ArrayPool <byte> .Shared.Rent((int)block.Size); var decompressedData = ArrayPool <byte> .Shared.Rent((int)block.DecompressedSize); try { // Read from the bin, decrypt, and decompress archiveHandle.Position = (long)block.Offset; if (archiveHandle.Read(compressedData, 0, (int)block.Size) != block.Size) { throw new EndOfStreamException("Short read of archive data"); } if (Header.IsEncrypted) { block.XorDataBuffer(compressedData); } // If the buffer is bigger then the decompressed size, OodleLZ v3 doesn't decompress data correctly every time. // However, if the buffer is the correct size it will decompress correctly but report that it failed. OodleLZ.Decompress(compressedData, decompressedData, block.DecompressedSize); // Copy data from the adjusted offset within the decompressed buffer. If the file requires another block, // truncate the copy and loop again. ulong copyOffset = fileDataOffset - block.DecompressedOffset; ulong copySize = Math.Min(fileDataLength, block.DecompressedSize - copyOffset); stream.Write(decompressedData, (int)copyOffset, (int)copySize); fileDataOffset += copySize; fileDataLength -= copySize; } finally { ArrayPool <byte> .Shared.Return(decompressedData); ArrayPool <byte> .Shared.Return(compressedData); } } }