public void HybridChunkWriter_ContainsFilesTest() { Mock <IArchiveContainer> containerMock = new Mock <IArchiveContainer>(); HybridChunkWriter writer = new HybridChunkWriter(666, ArchiveChunkCompression.Uncompressed, containerMock.Object, 0, Core.Settings.DefaultEncodingConversions); Assert.IsFalse(writer.ContainsFiles); byte[] data = new byte[16]; Subfile subfileMock = new Subfile(new MemorySource(data), "", "", ArchiveFileType.Raw); writer.AddFile(subfileMock); Assert.IsTrue(writer.ContainsFiles); }
public void AppendMetadataTest() { var ppx = TestCommon.GeneratePPX(); ExtendedArchiveAppender appender = new ExtendedArchiveAppender(ppx); appender.Name = "test-appended"; var file1 = new Subfile( new MemorySource(TestData), "test3-2", "t", ArchiveFileType.Raw); var file2 = new Subfile( new MemorySource(Encoding.UTF8.GetBytes("short data")), "test4-2", "t", ArchiveFileType.Raw); var file3 = appender.BaseArchive.RawFiles.Find(x => x.Name == "test00"); appender.FilesToAdd.Add(file1); appender.FilesToAdd.Add(file2); appender.FilesToRemove.Add(file3); appender.Write(); ppx = new ExtendedArchive(ppx.Filename); Assert.AreEqual("test-appended", ppx.Title); Assert.AreEqual(33, ppx.Files.Count); Assert.IsTrue(ppx.Files.Any(x => x.Name == file1.Name)); Assert.IsTrue(ppx.Files.Any(x => x.Name == file2.Name)); Assert.IsFalse(ppx.Files.Any(x => x.Name == file3.Name)); AssertCRC(ppx); TestCommon.TeardownPPX(ppx); }
public void HybridChunkWriter_TryAddFileTest() { Mock <IArchiveContainer> containerMock = new Mock <IArchiveContainer>(); HybridChunkWriter writer = new HybridChunkWriter(666, ArchiveChunkCompression.Uncompressed, containerMock.Object, 20, Core.Settings.DefaultEncodingConversions); byte[] data = new byte[16]; Subfile subfileMock = new Subfile(new MemorySource(data), "1", "1"); Subfile subfileMock2 = new Subfile(new MemorySource(data), "2", "1"); Assert.IsTrue(writer.TryAddFile(subfileMock)); Assert.IsTrue(writer.TryAddFile(subfileMock2), "Writer did not recognize a duplicate subfile entry."); byte[] data2 = new byte[32]; Subfile subfileMock3 = new Subfile(new MemorySource(data2), "3", "2"); Assert.IsFalse(writer.TryAddFile(subfileMock3), "Writer did not reject a subfile that went over the chunk size limit."); }
public static ExtendedArchive GeneratePPX() { //generate random data Random random = new Random(); TestData = new byte[filecount][]; for (int i = 0; i < filecount; i++) { TestData[i] = new byte[512]; random.NextBytes(TestData[i]); } string name = "test_ppx" + random.Next(0, 255).ToString(); //create archive var writer = new ExtendedArchiveWriter(name, true); writer.ChunkSizeLimit = 1024; //add files + duplicates for (int i = 0; i < 8; i++) { for (int d = 0; d < 4; d++) { ISubfile file = new Subfile(new MemorySource(TestData[i]), $"test{i}{d}", "test", ArchiveFileType.Raw); writer.Files.Add(file); } } using (FileStream arc = new FileStream($"{name}.ppx", FileMode.Create)) writer.Write(arc); return(new ExtendedArchive($"{name}.ppx")); }
public abstract byte[] Data(uint id, shared.DatFile dat, SQLiteConnection con, Subfile subfile, PatchContentType contentType, ref int errors);
public override byte[] Data(uint id, shared.DatFile dat, SQLiteConnection con, Subfile subfile, PatchContentType contentType, ref int errors) { if (!dat.SubfileInfo.ContainsKey(id)) { ++errors; Logger.Write($"файл #{id} не существует в ресурсах игры, пропускаю."); return(null); } var blob = Database.GetBlob(con, id); if (blob == null) { ++errors; Logger.Write($"файл #{id} не удалось прочитать из патча, пропускаю."); return(null); } using (var ms = new MemoryStream()) { using (var sw = new BinaryWriter(ms, Encoding.Unicode)) { if (!subfile.IsValid) { ++errors; return(null); } switch (contentType) { case PatchContentType.Image: for (var i = 0; i < 20; ++i) { sw.Write(subfile.Data[i]); } sw.Write(blob.Length); break; case PatchContentType.Sound: for (var i = 0; i < 4; ++i) { sw.Write(subfile.Data[i]); } sw.Write(blob.Length - 8); break; case PatchContentType.Texture: // TODO - in case of modified width/height for (var i = 0; i < 24; ++i) { sw.Write(subfile.Data[i]); } blob = blob.Skip(128).ToArray(); break; } sw.Write(blob); } return(ms.ToArray()); } }
public void HybridChunkWriter_CompressTest() { Mock <BaseCompressor> compressorMock = new Mock <BaseCompressor>(); compressorMock.CallBase = true; compressorMock.Setup(x => x.WriteToStream(It.IsAny <Stream>(), It.IsAny <Stream>())) .Callback((Stream i, Stream o) => i.CopyTo(o)); compressorMock.Setup(x => x.Compression).Returns(ArchiveChunkCompression.Uncompressed); Mock <BaseCompressor> compressorMock2 = new Mock <BaseCompressor>(); compressorMock2.CallBase = true; compressorMock2.Setup(x => x.WriteToStream(It.IsAny <Stream>(), It.IsAny <Stream>())) .Callback((Stream i, Stream o) => i.CopyTo(o)); compressorMock2.Setup(x => x.Compression).Returns(ArchiveChunkCompression.Zstandard); Mock <IArchiveContainer> containerMock = new Mock <IArchiveContainer>(); HybridChunkWriter writer = new HybridChunkWriter(666, ArchiveChunkCompression.Uncompressed, containerMock.Object, 0, Core.Settings.DefaultEncodingConversions); byte[] data = new byte[16]; Subfile subfileMock = new Subfile(new MemorySource(data), "1", "1"); Subfile subfileMock2 = new Subfile(new MemorySource(data), "2", "1"); writer.AddFile(subfileMock); writer.AddFile(subfileMock2); byte[] data2 = new byte[32]; Subfile subfileMock3 = new Subfile(new MemorySource(data2), "3", "2"); writer.AddFile(subfileMock3); writer.Compress(new List <ICompressor> { compressorMock.Object, compressorMock2.Object }); compressorMock.Verify(x => x.WriteToStream(It.IsAny <Stream>(), It.IsAny <Stream>()), Times.AtLeastOnce); compressorMock2.Verify(x => x.WriteToStream(It.IsAny <Stream>(), It.IsAny <Stream>()), Times.Never); Assert.IsNotNull(writer.CompressedStream); Assert.IsTrue(writer.CompressedStream.Length == 16 + 32); Assert.IsNotNull(writer.Receipt); Assert.AreEqual(ArchiveChunkCompression.Uncompressed, writer.Receipt.Compression); Assert.AreEqual((uint)(16 + 32), writer.Receipt.CompressedSize); Assert.AreEqual((uint)(16 + 32), writer.Receipt.UncompressedSize); Assert.AreEqual((ulong)0, writer.Receipt.FileOffset); Assert.AreEqual((uint)666, writer.Receipt.ID); Assert.AreEqual(PPeX.External.CRC32.CRC32.Compute(writer.CompressedStream), writer.Receipt.CRC); Assert.AreEqual(3, writer.Receipt.FileReceipts.Count); foreach (var reciept in writer.Receipt.FileReceipts) { Assert.IsNotNull(reciept.InternalName); Assert.AreNotEqual("", reciept.InternalName); Assert.AreNotEqual("", reciept.EmulatedName); } }
public override byte[] Data(uint id, shared.DatFile dat, SQLiteConnection con, Subfile subfile, PatchContentType contentType, ref int errors) { if (!dat.SubfileInfo.ContainsKey(id)) { ++errors; Logger.Write($"файл #{id} не существует в ресурсах игры, пропускаю."); return(null); } var fragments = Database.GetContent(con, id); if (fragments.Count == 0) { ++errors; Logger.Write($"файл #{id} пуст, пропускаю."); return(null); } if (!subfile.IsValid) { ++errors; return(null); } var orig = new TextSubfile(subfile.Data); if (!orig.IsValid) { ++errors; Logger.Write($"ошибка чтения файла #{id}, пропускаю весь файл."); return(null); } var skippedFragmentsCount = fragments.Count(fragment => !orig.Body.ContainsKey(fragment.Key)); if (skippedFragmentsCount == fragments.Count) { ++errors; Logger.Write($"ни один фрагмент не подходит для перезаписи файла #{id}, пропускаю весь файл."); return(null); } if (skippedFragmentsCount > 0) { Logger.Write($"{skippedFragmentsCount} неопознанных фрагментов в патче для файла #{id} будет пропущено."); } using (var ms = new MemoryStream()) { using (var sw = new BinaryWriter(ms, Encoding.Unicode)) { //Logger.Write($"Кек #{orig.Head.Unknown}"); sw.Write(id); sw.Write(orig.Head.Unknown); //sw.Write(0); sw.Write(orig.Head.Unknown2); ByteOrShort(sw, orig.Head.Fragments); var missingFragmentsCount = 0; foreach (var body in orig.Body) { // detect possible errors var useDefault = false; // patch does not contain data for this fragment if (!fragments.ContainsKey(body.Key)) { missingFragmentsCount++; useDefault = true; Logger.Write($"файл #{id}, фрагмент #{body.Key} - отсутствует в патче, использую оригинал."); } // patch fragment args contain error if (!useDefault && fragments[body.Key].IsArgsError) { missingFragmentsCount++; useDefault = true; Logger.Write($"файл #{id}, фрагмент #{body.Key} - ошибка в строке аргументов, использую оригинал."); } // patch fragment args order contains error if (!useDefault && fragments[body.Key].IsOrderError) { missingFragmentsCount++; useDefault = true; Logger.Write($"файл #{id}, фрагмент #{body.Key} - ошибка в строке порядка, использую оригинал."); } // patch fragment text contain error if (!useDefault && !fragments[body.Key].IsValid) { missingFragmentsCount++; useDefault = true; Logger.Write($"файл #{id}, фрагмент #{body.Key} - ошибка в содержимом, использую оригинал."); } var pieces = useDefault ? body.Value.Pieces : fragments[body.Key].Content.Split(new[] { "<--DO_NOT_TOUCH!-->" }, StringSplitOptions.None); // patch args order was intentionally emptied, force it if (!useDefault && fragments[body.Key].ArgsOrder.Length == 0 && fragments[body.Key].Args.Length > 0 && pieces.Length == 1) { fragments[body.Key].IsDefaultOrder = false; } // broken relation between args count and their plant holes if (!useDefault && (!fragments[body.Key].IsDefaultOrder && fragments[body.Key].ArgsOrder.Length != pieces.Length - 1 || fragments[body.Key].IsDefaultOrder && fragments[body.Key].Args.Length != pieces.Length - 1)) { missingFragmentsCount++; useDefault = true; pieces = body.Value.Pieces; Logger.Write($"файл #{id}, фрагмент #{body.Key} - ошибка в строке порядка, использую оригинал."); } // write token sw.Write(body.Key); // write pieces sw.Write(pieces.Length); foreach (var piece in pieces) { var length = (short)piece.Length; ByteOrShort(sw, length); sw.Write(Encoding.Unicode.GetBytes(piece)); } if (useDefault) { var args = body.Value.Arguments; sw.Write(args.Length); foreach (var arg in args) { sw.Write(arg); } } else { if (fragments[body.Key].IsDefaultOrder) { // use patch default args (ignore patch args order or empty patch args order) var args = fragments[body.Key].Args; sw.Write(args.Length); foreach (var arg in args) { sw.Write(arg); } } else { #if DEBUG if (fragments[body.Key].IsExtraOrder) { Logger.Write($"фрагмент #{body.Key} из файла #{id} - порядок аргументов длиннее исходного."); } #endif // use patch alt args order sw.Write(fragments[body.Key].ArgsOrder.Length); foreach (var ord in fragments[body.Key].ArgsOrder) { sw.Write(fragments[body.Key].Args[ord - 1]); } } } // use original variables var vars = body.Value.Variables; sw.Write((byte)vars.Length); foreach (var pack in vars) { sw.Write(pack.Length); foreach (var rice in pack) { var length = (short)rice.Length; ByteOrShort(sw, length); sw.Write(Encoding.Unicode.GetBytes(rice)); } } } if (missingFragmentsCount > 0) { Logger.Write( $"{missingFragmentsCount} из {orig.Head.Fragments} фрагментов в файле #{id} не изменились."); } } return(ms.ToArray()); } }