// Rebuilding attempts. Almost works, but needs to handle embedded LS entries. private static unsafe void RebuildArchive(string patchFolder) { LSEntryObject _resMain = lsFile.Entries[calc_crc("resource")]; byte[] resource = GetFileDataDecompressed(_resMain.DTOffset + (uint)_resMain.PaddingLength, _resMain.Size, _resMain.DTIndex); File.WriteAllBytes("resource", resource); Console.WriteLine($"Rebuilding archive..."); RFFile rfFile = new RFFile("resource"); var pathParts = new string[20]; var offsetParts = new LSEntryObject[20]; using (FileStream strm = File.Create("dt_rebuild")) { int lsSize = 0; foreach (ResourceEntryObject rsobj in rfFile.ResourceEntries.Where(rsobj => rsobj != null)) { pathParts[rsobj.FolderDepth - 1] = rsobj.EntryString; Array.Clear(pathParts, rsobj.FolderDepth, pathParts.Length - (rsobj.FolderDepth + 1)); var path = string.Join("", pathParts); LSEntryObject fileEntry; if (rsobj.HasPack) { // if this is the end of a pack, update info and write end padding if (offsetParts.Where(x => x != null).ToArray().Length > 0) { LSEntryObject lsentry = offsetParts.Last(x => x != null); int align = lsSize.RoundUp(0x10) - lsSize; if (lsentry.DTOffset != strm.Position + 0x80) for (int i = 0; i < 0x60 + align; i++) strm.WriteByte(0xbb); lsentry.Size = lsSize; } // grab a new pack using the filepath we've built then write start padding. lsSize = 0; var crcPath = $"data/{path.TrimEnd('/') + (rsobj.Compressed ? "/packed" : "")}"; var crc = calc_crc(crcPath); lsFile.Entries.TryGetValue(crc, out fileEntry); fileEntry.DTOffset = (uint)strm.Position; for (int i = 0; i < 0x80; i++, lsSize++) strm.WriteByte(0xcc); } else fileEntry = null; offsetParts[rsobj.FolderDepth - 1] = fileEntry; Array.Clear(offsetParts, rsobj.FolderDepth, offsetParts.Length - (rsobj.FolderDepth)); if (!path.EndsWith("/")) if (File.Exists($"{patchFolder}/{path}")) { LSEntryObject lsentry = offsetParts.Last(x => x != null); Console.WriteLine($"{patchFolder}/{path}"); Logstream.WriteLine($"{patchFolder}/{path}"); byte[] raw = File.ReadAllBytes($"{patchFolder}/{path}"); byte[] compressed = Util.Compress(raw); int align = compressed.Length.RoundUp(0x10) - compressed.Length; long off = strm.Position - lsentry.DTOffset; strm.Write(compressed, 0, compressed.Length); // write file borders for (int i = 0; i < 0x20 + align; i++) strm.WriteByte(0xcc); lsSize += compressed.Length + 0x20 + align; rsobj.CmpSize = compressed.Length; rsobj.DecSize = raw.Length; rsobj.OffInPack = (uint)off; } } var entry = offsetParts.Last(x => x != null); entry.DTOffset = (uint)strm.Position - (uint)lsSize; entry.Size = lsSize; // Update resource and LS files. rfFile.UpdateEntries(); byte[] dec = rfFile._workingSource.Slice((int)rfFile.Header.HeaderLen1, (int)(rfFile._workingSource.Length - rfFile.Header.HeaderLen1)); byte[] cmp = Util.Compress(dec); rfFile.Header.CompressedLen = (uint)cmp.Length; rfFile.Header.DecompressedLen = (uint)dec.Length; byte[] header = rfFile.Header.ToArray(); byte[] full = header.Concat(cmp).ToArray(); // Patch the resource data back into the DT file. long rOff = strm.Position; strm.Write(full, 0, full.Length); rfFile._workingSource.Close(); lsFile.Entries[calc_crc("resource")].Size = full.Length; lsFile.Entries[calc_crc("resource")].DTOffset = (uint)rOff; lsFile.Entries[calc_crc("resource(us_en)")].Size = full.Length; lsFile.Entries[calc_crc("resource(us_en)")].DTOffset = (uint)rOff; lsFile.Entries[calc_crc("resource(us_fr)")].Size = full.Length; lsFile.Entries[calc_crc("resource(us_fr)")].DTOffset = (uint)rOff; lsFile.Entries[calc_crc("resource(us_sp)")].Size = full.Length; lsFile.Entries[calc_crc("resource(us_sp)")].DTOffset = (uint)rOff; lsFile.UpdateEntries(); } if (File.Exists("resource")) File.Delete("resource"); if (File.Exists("resource.dec")) File.Delete("resource.dec"); }
// Rebuilding attempts. Almost works, but needs to handle embedded LS entries. private static unsafe void RebuildArchive(string patchFolder) { LSEntryObject _resMain = lsFile.Entries[calc_crc("resource")]; byte[] resource = GetFileDataDecompressed(_resMain.DTOffset + (uint)_resMain.PaddingLength, _resMain.Size, _resMain.DTIndex); File.WriteAllBytes("resource", resource); Console.WriteLine($"Rebuilding archive..."); RFFile rfFile = new RFFile("resource"); var pathParts = new string[20]; var offsetParts = new LSEntryObject[20]; using (FileStream strm = File.Create("dt_rebuild")) { int lsSize = 0; foreach (ResourceEntryObject rsobj in rfFile.ResourceEntries.Where(rsobj => rsobj != null)) { pathParts[rsobj.FolderDepth - 1] = rsobj.EntryString; Array.Clear(pathParts, rsobj.FolderDepth, pathParts.Length - (rsobj.FolderDepth + 1)); var path = string.Join("", pathParts); LSEntryObject fileEntry; if (rsobj.HasPack) { // if this is the end of a pack, update info and write end padding if (offsetParts.Where(x => x != null).ToArray().Length > 0) { LSEntryObject lsentry = offsetParts.Last(x => x != null); int align = lsSize.RoundUp(0x10) - lsSize; if (lsentry.DTOffset != strm.Position + 0x80) { for (int i = 0; i < 0x60 + align; i++) { strm.WriteByte(0xbb); } } lsentry.Size = lsSize; } // grab a new pack using the filepath we've built then write start padding. lsSize = 0; var crcPath = $"data/{path.TrimEnd('/') + (rsobj.Compressed ? "/packed" : "")}"; var crc = calc_crc(crcPath); lsFile.Entries.TryGetValue(crc, out fileEntry); fileEntry.DTOffset = (uint)strm.Position; for (int i = 0; i < 0x80; i++, lsSize++) { strm.WriteByte(0xcc); } } else { fileEntry = null; } offsetParts[rsobj.FolderDepth - 1] = fileEntry; Array.Clear(offsetParts, rsobj.FolderDepth, offsetParts.Length - (rsobj.FolderDepth)); if (!path.EndsWith("/")) { if (File.Exists($"{patchFolder}/{path}")) { LSEntryObject lsentry = offsetParts.Last(x => x != null); Console.WriteLine($"{patchFolder}/{path}"); Logstream.WriteLine($"{patchFolder}/{path}"); byte[] raw = File.ReadAllBytes($"{patchFolder}/{path}"); byte[] compressed = Util.Compress(raw); int align = compressed.Length.RoundUp(0x10) - compressed.Length; long off = strm.Position - lsentry.DTOffset; strm.Write(compressed, 0, compressed.Length); // write file borders for (int i = 0; i < 0x20 + align; i++) { strm.WriteByte(0xcc); } lsSize += compressed.Length + 0x20 + align; rsobj.CmpSize = compressed.Length; rsobj.DecSize = raw.Length; rsobj.OffInPack = (uint)off; } } } var entry = offsetParts.Last(x => x != null); entry.DTOffset = (uint)strm.Position - (uint)lsSize; entry.Size = lsSize; // Update resource and LS files. rfFile.UpdateEntries(); byte[] dec = rfFile._workingSource.Slice((int)rfFile.Header.HeaderLen1, (int)(rfFile._workingSource.Length - rfFile.Header.HeaderLen1)); byte[] cmp = Util.Compress(dec); rfFile.Header.CompressedLen = (uint)cmp.Length; rfFile.Header.DecompressedLen = (uint)dec.Length; byte[] header = rfFile.Header.ToArray(); byte[] full = header.Concat(cmp).ToArray(); // Patch the resource data back into the DT file. long rOff = strm.Position; strm.Write(full, 0, full.Length); rfFile._workingSource.Close(); lsFile.Entries[calc_crc("resource")].Size = full.Length; lsFile.Entries[calc_crc("resource")].DTOffset = (uint)rOff; lsFile.Entries[calc_crc("resource(us_en)")].Size = full.Length; lsFile.Entries[calc_crc("resource(us_en)")].DTOffset = (uint)rOff; lsFile.Entries[calc_crc("resource(us_fr)")].Size = full.Length; lsFile.Entries[calc_crc("resource(us_fr)")].DTOffset = (uint)rOff; lsFile.Entries[calc_crc("resource(us_sp)")].Size = full.Length; lsFile.Entries[calc_crc("resource(us_sp)")].DTOffset = (uint)rOff; lsFile.UpdateEntries(); } if (File.Exists("resource")) { File.Delete("resource"); } if (File.Exists("resource.dec")) { File.Delete("resource.dec"); } }
private static unsafe void PatchArchive(string resourceString, string patchFolder) { LSEntryObject _resource = lsFile.Entries[calc_crc(resourceString)]; byte[] resource = GetFileDataDecompressed(_resource.DTOffset + (uint)_resource.PaddingLength, _resource.Size, _resource.DTIndex); File.WriteAllBytes(resourceString, resource); Console.WriteLine($"Patching {resourceString}"); RFFile rfFile = new RFFile(resourceString); var pathParts = new string[20]; var offsetParts = new LSEntryObject[20]; foreach (ResourceEntryObject rsobj in rfFile.ResourceEntries) { if (rsobj == null) continue; pathParts[rsobj.FolderDepth - 1] = rsobj.EntryString; Array.Clear(pathParts, rsobj.FolderDepth, pathParts.Length - (rsobj.FolderDepth + 1)); var path = string.Join("", pathParts); LSEntryObject fileEntry; if (rsobj.HasPack) { var crcPath = $"data/{path.TrimEnd('/') + (rsobj.Compressed ? "/packed" : "")}"; var crc = calc_crc(crcPath); lsFile.Entries.TryGetValue(crc, out fileEntry); } else fileEntry = null; offsetParts[rsobj.FolderDepth - 1] = fileEntry; Array.Clear(offsetParts, rsobj.FolderDepth, offsetParts.Length - (rsobj.FolderDepth + 1)); if (!path.EndsWith("/")) if (File.Exists($"{patchFolder}/{path}")) { Console.WriteLine($"Patch found: {patchFolder}/{path}"); Logstream.WriteLine($"Patch found: {patchFolder}/{path}"); LSEntryObject lsentry = offsetParts.Last(x => x != null); byte[] raw = File.ReadAllBytes($"{patchFolder}/{path}"); byte[] compressed = Util.Compress(raw); if (compressed.Length > rsobj.CmpSize + 1) { Console.WriteLine("Patching files larger than original not yet supported, skipping"); continue; } rsobj.CmpSize = compressed.Length; rsobj.DecSize = raw.Length; uint difference = 0; DataSource src = GetFileChunk(lsentry.DTOffset, lsentry.Size, lsentry.DTIndex, out difference); VoidPtr addr = src.Address + difference; addr += rsobj.OffInPack; for (int i = 0; i < compressed.Length; i++) *(byte*)(addr + i) = compressed[i]; // write 0xCC over unused bytes. addr += compressed.Length; int truncateBytes = (int)rsobj.CmpSize - compressed.Length; for (int i = 0; i < truncateBytes; i++) *(byte*)(addr + i) = 0xCC; src.Close(); } } // Update resource and LS files. rfFile.UpdateEntries(); byte[] dec = rfFile._workingSource.Slice((int)rfFile.Header.HeaderLen1, (int)(rfFile._workingSource.Length - rfFile.Header.HeaderLen1)); byte[] cmp = Util.Compress(dec); rfFile.Header.CompressedLen = (uint)cmp.Length; rfFile.Header.DecompressedLen = (uint)dec.Length; byte[] header = rfFile.Header.ToArray(); byte[] full = header.Concat(cmp).ToArray(); lsFile.Entries[calc_crc(resourceString)].Size = full.Length; lsFile.UpdateEntries(); // Patch the resource data back into the DT file. uint diff; DataSource rSource = GetFileChunk(_resource.DTOffset, _resource.Size, _resource.DTIndex, out diff); VoidPtr curAddr = rSource.Address + diff; for (int i = 0; i < full.Length; i++) { *(byte*)(curAddr + i) = full[i]; } rSource.Close(); rfFile._workingSource.Close(); if (File.Exists(resourceString)) File.Delete(resourceString); if (File.Exists(resourceString + ".dec")) File.Delete(resourceString + ".dec"); }
private static unsafe void PatchArchive(string resourceString, string patchFolder) { LSEntryObject _resource = lsFile.Entries[calc_crc(resourceString)]; byte[] resource = GetFileDataDecompressed(_resource.DTOffset + (uint)_resource.PaddingLength, _resource.Size, _resource.DTIndex); File.WriteAllBytes(resourceString, resource); Console.WriteLine($"Patching {resourceString}"); RFFile rfFile = new RFFile(resourceString); var pathParts = new string[20]; var offsetParts = new LSEntryObject[20]; foreach (ResourceEntryObject rsobj in rfFile.ResourceEntries) { if (rsobj == null) { continue; } pathParts[rsobj.FolderDepth - 1] = rsobj.EntryString; Array.Clear(pathParts, rsobj.FolderDepth, pathParts.Length - (rsobj.FolderDepth + 1)); var path = string.Join("", pathParts); LSEntryObject fileEntry; if (rsobj.HasPack) { var crcPath = $"data/{path.TrimEnd('/') + (rsobj.Compressed ? "/packed" : "")}"; var crc = calc_crc(crcPath); lsFile.Entries.TryGetValue(crc, out fileEntry); } else { fileEntry = null; } offsetParts[rsobj.FolderDepth - 1] = fileEntry; Array.Clear(offsetParts, rsobj.FolderDepth, offsetParts.Length - (rsobj.FolderDepth + 1)); if (!path.EndsWith("/")) { if (File.Exists($"{patchFolder}/{path}")) { Console.WriteLine($"Patch found: {patchFolder}/{path}"); Logstream.WriteLine($"Patch found: {patchFolder}/{path}"); LSEntryObject lsentry = offsetParts.Last(x => x != null); byte[] raw = File.ReadAllBytes($"{patchFolder}/{path}"); byte[] compressed = Util.Compress(raw); if (compressed.Length > rsobj.CmpSize + 1) { Console.WriteLine("Patching files larger than original not yet supported, skipping"); continue; } rsobj.CmpSize = compressed.Length; rsobj.DecSize = raw.Length; uint difference = 0; DataSource src = GetFileChunk(lsentry.DTOffset, lsentry.Size, lsentry.DTIndex, out difference); VoidPtr addr = src.Address + difference; addr += rsobj.OffInPack; for (int i = 0; i < compressed.Length; i++) { *(byte *)(addr + i) = compressed[i]; } // write 0xCC over unused bytes. addr += compressed.Length; int truncateBytes = (int)rsobj.CmpSize - compressed.Length; for (int i = 0; i < truncateBytes; i++) { *(byte *)(addr + i) = 0xCC; } src.Close(); } } } // Update resource and LS files. rfFile.UpdateEntries(); byte[] dec = rfFile._workingSource.Slice((int)rfFile.Header.HeaderLen1, (int)(rfFile._workingSource.Length - rfFile.Header.HeaderLen1)); byte[] cmp = Util.Compress(dec); rfFile.Header.CompressedLen = (uint)cmp.Length; rfFile.Header.DecompressedLen = (uint)dec.Length; byte[] header = rfFile.Header.ToArray(); byte[] full = header.Concat(cmp).ToArray(); lsFile.Entries[calc_crc(resourceString)].Size = full.Length; lsFile.UpdateEntries(); // Patch the resource data back into the DT file. uint diff; DataSource rSource = GetFileChunk(_resource.DTOffset, _resource.Size, _resource.DTIndex, out diff); VoidPtr curAddr = rSource.Address + diff; for (int i = 0; i < full.Length; i++) { *(byte *)(curAddr + i) = full[i]; } rSource.Close(); rfFile._workingSource.Close(); if (File.Exists(resourceString)) { File.Delete(resourceString); } if (File.Exists(resourceString + ".dec")) { File.Delete(resourceString + ".dec"); } }