/// <summary> /// Decompresses a dat file stream (includes headers and chunks) /// </summary> /// <param name="limit">Maximum number of bytes to read from the stream</param> public byte[] Decompress(Stream stream, int limit) { using (var fs = new IO.FileStream(stream, true, false, false)) { return(DecompressStream(fs, true, limit)); } }
/// <summary> /// Decompresses a raw file stream (doesn't include headers or chunks) /// </summary> /// <param name="uncompressedSize">Size of the uncompressed file, or -1 if unknown</param> public byte[] DecompressRaw(Stream stream, int uncompressedSize) { using (var fs = new IO.FileStream(stream, true, false, false)) { fs.BlockSize = 0; return(DecompressStream(fs, false, uncompressedSize)); } }
/// <summary> /// Decompresses a dat file stream (includes headers and chunks) /// </summary> /// <param name="limit">Maximum number of bytes to read from the stream</param> public byte[] Decompress(byte[] data, int limit) { using (var stream = new MemoryStream(data, false)) { using (var fs = new IO.FileStream(stream, true, false, false)) { return(DecompressStream(fs, true, limit)); } } }
/// <summary> /// Creates a bare Local.dat file /// </summary> /// <param name="build">If > 0, sets the gw2cache location</param> /// <returns>Local.dat file in bytes</returns> public static byte[] CreateNew(int build = 0) { int blockSize = 512; using (var ms = new MemoryStream(blockSize * (build > 0 ? 4 : 2))) //4 blocks: header, entries, cache, ids (cache + ids only needed if build is set) { using (var fs = new Dat.Compression.IO.FileStream(ms, true, false, false)) { using (var w = new BinaryWriter(fs)) { DatFile.MftEntry e; var entries = new DatFile.MftEntry[build > 0 ? 16 : 15]; //first 15 entries are reserved long ofs = 0; entries[INDEX_HEADER] = new DatFile.MftEntry() { flags = 3, size = 40, }; ofs += blockSize; if (build > 0) { var indexCache = entries.Length - 1; e = entries[indexCache] = new DatFile.MftEntry() { offset = ofs, compression = 8, flags = 3, crc = 1214729159, //note the crc is always the same because the file ends with a crc, which cancels out when crc'd again }; w.BaseStream.Position = ofs; w.Write(CreateGw2Cache(new Archive(), build, true)); e.size = (int)(w.BaseStream.Position - ofs); ofs += (e.size / blockSize + 1) * blockSize; e = entries[INDEX_IDS] = new DatFile.MftEntry() { offset = ofs, flags = 3, }; w.BaseStream.Position = ofs; fs.ComputeCRC = true; w.Write(ID_CACHE); w.Write(indexCache + 1); e.size = (int)(w.BaseStream.Position - ofs); e.crc = fs.CRC; fs.ComputeCRC = false; fs.ResetCRC(); ofs += blockSize; } e = entries[INDEX_ENTRIES] = new DatFile.MftEntry() { offset = ofs, flags = 3, }; w.BaseStream.Position = e.offset; fs.ComputeCRC = true; w.Write(443835981U); w.Write(0L); w.Write(entries.Length + 1); //this header is included in the count w.Write(0L); for (var i = 0; i < INDEX_ENTRIES; i++) { WriteEntry(w, entries[i]); } w.BaseStream.Position += 24; //INDEX_ENTRIES is written last for (var i = INDEX_ENTRIES + 1; i < entries.Length; i++) { WriteEntry(w, entries[i]); } e.size = (int)(w.BaseStream.Position - e.offset); e.crc = fs.CRC; fs.ComputeCRC = false; fs.ResetCRC(); ofs += blockSize; w.BaseStream.Position = e.offset + (INDEX_ENTRIES + 1) * 24; WriteEntry(w, entries[INDEX_ENTRIES]); w.BaseStream.Position = 0; w.Write(441336215U); w.Write(entries[INDEX_HEADER].size); w.Write(3401187329U); w.Write(blockSize); w.Write(2396038944U); w.Write(0); w.Write(entries[INDEX_ENTRIES].offset); w.Write(entries[INDEX_ENTRIES].size); w.Write(0); } } return(ms.ToArray()); } }
private void Update(Session s, Settings.IDatFile dat) { using (var r = new BinaryReader(new BufferedStream(System.IO.File.Open(dat.Path, FileMode.Open, FileAccess.ReadWrite, FileShare.Read)))) { DatFile.Mft mft; int mftEntries; if (r.BaseStream.Length > 0) { mft = DatFile.ReadMft(r); mftEntries = mft.entries.Length; } else { mft = null; mftEntries = 0; } var blockSize = s.BlockSize; var custom = new Dictionary <int, CustomEntry>(); var entries = s.entries; var count = s.entriesCount; var remaining = s.snapshot.Count; var changed = false; var size = 0; for (var i = 0; i < mftEntries; i++) { var entry = mft.entries[i]; if (entry.baseId > 0) { if (!changed) { int fileId; if (s.snapshot.TryGetValue(entry.baseId, out fileId)) { --remaining; if (fileId != entry.fileId) { changed = true; } else { if (entry.baseId == ID_CACHE) { var e = FindEntry(s.entries, s.entriesCount, entry.baseId); if (e != null && e.size != entry.size) { changed = true; } } } continue; } } if (entry.baseId == entry.fileId && entry.baseId < 100 && entry.size > 0) { if (changed && s.snapshot.ContainsKey(entry.baseId)) { continue; } switch (entry.baseId) { case ID_LOCAL: byte[] data; int position; var build = ReadBuild(s.compressor, r.BaseStream, entry, out data, out position); if (s.build > build) { changed = true; //updating the build //note that this isn't required var b = BitConverter.GetBytes(s.build); Array.Copy(b, 0, data, position, 4); var c = new CustomEntry() { compress = true, data = data, entry = entry, index = i, length = data.Length, }; custom[entry.baseId] = c; entry.offset = long.MaxValue; entry.size = data.Length; } break; } entries[count++] = entry; size += (entry.size / blockSize + 1) * blockSize; } } } if (!changed && remaining == 0) { return; } size += ((s.entriesCount + count) * 40 / blockSize + 1) * blockSize; //add padding for entries and IDs (24 bytes per entry, 8-16 bytes per id) Array.Sort(entries, s.entriesCount, count - s.entriesCount, Comparer <DatFile.MftEntry> .Create( delegate(DatFile.MftEntry a, DatFile.MftEntry b) { return(a.offset.CompareTo(b.offset)); })); var buffer = s.buffer2; if (buffer == null || buffer.Length < size) { s.buffer2 = buffer = new byte[size + 1024 * 1024]; } var ofs = 0; var fofs = s.buffer1.Length; for (var i = s.entriesCount; i < count; i++) { var e = entries[i]; if (e.size == 0) { e.offset = 0; continue; } if (e.offset != long.MaxValue) { r.BaseStream.Position = e.offset; ReadBytes(r, buffer, ofs, e.size); } else { CustomEntry c; if (custom.TryGetValue(e.baseId, out c)) { e.size = c.WriteTo(buffer, ofs, s.compressor); } } e.offset = ofs + fofs; ofs += (e.size / blockSize + 1) * blockSize; } using (var fs = new Dat.Compression.IO.FileStream(new MemoryStream(buffer), false, true, false)) { using (var w = new BinaryWriter(fs)) { w.BaseStream.Position = ofs; //write ids for (var i = 1; i <= count; i++) { var entry = entries[i - 1]; if (entry.baseId > 0) { w.Write(entry.baseId); w.Write(i); if (entry.baseId != entry.fileId) { w.Write(entry.fileId); w.Write(i); } } } var e = entries[INDEX_IDS]; e.offset = ofs + fofs; e.size = (int)(w.BaseStream.Position - ofs); e.crc = fs.CRC; ofs += (e.size / blockSize + 1) * blockSize; e = entries[INDEX_ENTRIES]; w.BaseStream.Position = ofs; fs.ResetCRC(); //write entries header w.Write(443835981U); w.Write(0L); w.Write(count + 1); w.Write(0L); //write first set of entries for (var i = 0; i < INDEX_ENTRIES; i++) { WriteEntry(w, entries[i]); } var p0 = w.BaseStream.Position; w.BaseStream.Position += 24; //entry is written last //write remaining entries for (var i = INDEX_ENTRIES + 1; i < count; i++) { WriteEntry(w, entries[i]); } e.offset = ofs + fofs; e.size = (int)(w.BaseStream.Position - ofs); e.crc = fs.CRC; ofs += (e.size / blockSize + 1) * blockSize; s.MftOffset = e.offset; s.MftSize = e.size; w.BaseStream.Position = p0; WriteEntry(w, entries[INDEX_ENTRIES]); } } var stream = r.BaseStream; stream.Position = 0; stream.Write(s.buffer1, 0, s.buffer1.Length); stream.Write(buffer, 0, ofs); stream.SetLength(stream.Position); } }