public void Write(UnityBinaryWriter writer) { writer.WriteStringToNull(""); writer.WriteBytes(Guid.ToByteArray()); writer.WriteInt(Type); writer.WriteStringToNull(PathName); }
public void Write(UnityBinaryWriter writer) { writer.WriteStringToNull(signature); writer.WriteIntBE(format); writer.WriteStringToNull(versionPlayer); writer.WriteStringToNull(versionEngine); writer.WriteLongBE(bundleSize); }
/// <summary> /// Add string to the table. /// </summary> /// <param name="str">String to Add.</param> /// <returns>Offset of the string.</returns> public ushort AddString(string str) { if (offset_table.ContainsKey(str)) { return((ushort)offset_table[str]); } else { offset_table[str] = _writer.Position; _writer.WriteStringToNull(str); return((ushort)offset_table[str]); } }
public void Write(UnityBinaryWriter writer) { // Skip Header since MetadataSize and DataOffset are unknown int header_pos = writer.Position; writer.Position += Header.CalcSize(); // Write Metadata writeMetadata(writer); // Write Objects byte[] body = writeObjects(writer); // Write Scripts writeScripts(writer); // Write Externals writeExternals(writer); // Write UserInformation writer.WriteStringToNull(UserInformation); Header.MetadataSize = writer.Position - Header.CalcSize(); // Align body if (writer.Position < 0x1000) { writer.Position = 0x1000; } else { writer.Align(16); } Header.DataOffset = writer.Position; // Write body writer.WriteBytes(body); // Write Header Header.FileSize = writer.Position; writer.Position = header_pos; Header.Write(writer); }
public void Write(UnityBinaryWriter writer) { writer.WriteStringToNull(UnityVersion); writer.WriteInt(TargetPlatform); writer.WriteByte((byte)(EnableTypeTree ? 1 : 0)); }
private void writeFiles(UnityBinaryWriter writer) { // calc total size int totalsize = Files.Sum(f => f.Data.Length); // teardown into blocks // 1.File is not aligned to block boundary (Simple concatation) // 2.Maximum block size is BLOCK_SIZE // Calculate block count var totalbytes = Files.Sum(f => f.Data.Length); var blockcount = totalbytes / BLOCK_SIZE + (totalbytes % BLOCK_SIZE != 0 ? 1 : 0); // Build blockinfo BlockInfo[] blockinfos = new BlockInfo[blockcount]; short blockflag = EnableCompression ? (short)2 : (short)0; for (int i = 0; i < blockcount; i++) { blockinfos[i].uncompressedSize = BLOCK_SIZE; blockinfos[i].compressedSize = BLOCK_SIZE; blockinfos[i].flag = blockflag; } if (totalbytes % BLOCK_SIZE != 0) { blockinfos[blockcount - 1].uncompressedSize = totalbytes % BLOCK_SIZE; blockinfos[blockcount - 1].compressedSize = totalbytes % BLOCK_SIZE; } // Seek Writer (Skip Info) //int infoheadersize = 4 + 4 + 4; int blockinfosize = 0x10 + 4 + (4 + 4 + 2) * blockinfos.Length; int fileinfosize = 4 + Files.Sum(f => f.CalcInfoSize()); int info_offset = writer.Position; // Write Blocks // If no compression required, just copy all files if (!EnableCompression) { // Write Header // Info Header writer.WriteIntBE(blockinfosize + fileinfosize); writer.WriteIntBE(blockinfosize + fileinfosize); writer.WriteIntBE(0x40); writer.Position += 0x10; // BlockInfo writer.WriteIntBE(blockcount); blockinfos.Write(writer); // FileInfo writer.WriteIntBE(Files.Length); int curoffset = 0; for (int i = 0; i < Files.Length; i++) { writer.WriteLongBE(curoffset); writer.WriteLongBE(Files[i].Data.LongLength); writer.WriteIntBE(4); writer.WriteStringToNull(Files[i].Name); curoffset += Files[i].Data.Length; } // Write Files for (int i = 0; i < Files.Length; i++) { writer.WriteBytes(Files[i].Data); } } // In compression mode, try to parallelize the compression else { // First of all, Prepare buffer for compression byte[] compbuf = MemoryPool <AssetBundleFile> .GetBuffer(blockcount *BLOCK_SIZE); // don't parallelize when block count is small if (blockcount < 128) { byte[] boundarybuf = MiniMemoryPool <AssetBundleFile> .GetBuffer(BLOCK_SIZE); int remainlength = 0; int curblock = 0; for (int i = 0; i < Files.Length; i++) { // If previous file has overflow, concat and compress if (remainlength > 0) { Buffer.BlockCopy(Files[i].Data, 0, boundarybuf, remainlength, BLOCK_SIZE - remainlength); blockinfos[curblock].compressedSize = TryLZ4Compress(boundarybuf, 0, compbuf, curblock * BLOCK_SIZE, BLOCK_SIZE); if (blockinfos[curblock].compressedSize == BLOCK_SIZE) { blockinfos[curblock].flag &= ~0x3F; } curblock++; } // update remainlength int blockstart = 0; if (remainlength > 0) { blockstart = BLOCK_SIZE - remainlength; } // compress fullblocks int fullblockcount = (Files[i].Data.Length - blockstart) / BLOCK_SIZE; for (int j = 0; j < fullblockcount; j++, curblock++) { blockinfos[curblock].compressedSize = TryLZ4Compress(Files[i].Data, blockstart + j * BLOCK_SIZE, compbuf, curblock * BLOCK_SIZE, BLOCK_SIZE); if (blockinfos[curblock].compressedSize == BLOCK_SIZE) { blockinfos[curblock].flag &= ~0x3F; } } // If the file has remaindata, buffer them remainlength = (Files[i].Data.Length - blockstart) % BLOCK_SIZE; if (remainlength > 0) { Buffer.BlockCopy(Files[i].Data, Files[i].Data.Length - remainlength, boundarybuf, 0, remainlength); } } if (remainlength > 0) // Process last block { blockinfos[curblock].compressedSize = TryLZ4Compress(boundarybuf, 0, compbuf, curblock * BLOCK_SIZE, remainlength); if (blockinfos[curblock].compressedSize == remainlength) { blockinfos[curblock].flag &= ~0x3F; } } } else { // Create CompressionInfo & Compress file boundary CompressionInfo[] compinfos = new CompressionInfo[blockcount]; byte[] boundarybuf = MiniMemoryPool <AssetBundleFile> .GetBuffer(BLOCK_SIZE); int curblock = 0; int remainlength = 0; for (int i = 0; i < Files.Length; i++) { // If previous file has overflow, concat and compress if (remainlength > 0) { Buffer.BlockCopy(Files[i].Data, 0, boundarybuf, remainlength, BLOCK_SIZE - remainlength); blockinfos[curblock].compressedSize = TryLZ4Compress(boundarybuf, 0, compbuf, curblock * BLOCK_SIZE, BLOCK_SIZE); if (blockinfos[curblock].compressedSize == BLOCK_SIZE) { blockinfos[curblock].flag &= ~0x3F; } curblock++; } int blockstart = 0; if (remainlength > 0) { blockstart = BLOCK_SIZE - remainlength; } int fullblockcount = (Files[i].Data.Length - blockstart) / BLOCK_SIZE; for (int j = 0; j < fullblockcount; j++, curblock++) { compinfos[curblock].data = Files[i].Data; compinfos[curblock].length = BLOCK_SIZE; compinfos[curblock].offset = blockstart + j * BLOCK_SIZE; } // If the file has remaindata, buffer them remainlength = (Files[i].Data.Length - blockstart) % BLOCK_SIZE; if (remainlength > 0) { Buffer.BlockCopy(Files[i].Data, Files[i].Data.Length - remainlength, boundarybuf, 0, remainlength); } } if (remainlength > 0) // Process last block { blockinfos[curblock].compressedSize = LZ4.LZ4Codec.Encode(boundarybuf, 0, remainlength, compbuf, curblock * BLOCK_SIZE, remainlength); // If compression is no use, just copy if (blockinfos[curblock].compressedSize == 0) { blockinfos[curblock].compressedSize = remainlength; blockinfos[curblock].flag &= ~0x3F; Buffer.BlockCopy(boundarybuf, 0, compbuf, curblock * BLOCK_SIZE, BLOCK_SIZE); } } // Parallelly compress the data Parallel.For(0, blockcount, i => { if (compinfos[i].data == null) { return; } blockinfos[i].compressedSize = TryLZ4Compress(compinfos[i].data, compinfos[i].offset, compbuf, i * BLOCK_SIZE, compinfos[i].length); if (blockinfos[i].compressedSize == BLOCK_SIZE) { blockinfos[i].flag &= ~0x3F; } }); } // Write Headers UnityBinaryWriter headerwriter = new UnityBinaryWriter(); // Info Header headerwriter.Position += 0x10; // BlockInfo headerwriter.WriteIntBE(blockcount); blockinfos.Write(headerwriter); // FileInfo headerwriter.WriteIntBE(Files.Length); int curoffset = 0; for (int i = 0; i < Files.Length; i++) { headerwriter.WriteLongBE(curoffset); headerwriter.WriteLongBE(Files[i].Data.LongLength); headerwriter.WriteIntBE(4); headerwriter.WriteStringToNull(Files[i].Name); curoffset += Files[i].Data.Length; } // Compress and write header writer.Position += 4 + 4 + 4; int header_compsize = writer.WriteLZ4Data(headerwriter.ToBytes()); int final_pos = writer.Position; writer.Position = info_offset; writer.WriteIntBE(header_compsize); writer.WriteIntBE(blockinfosize + fileinfosize); writer.WriteIntBE(0x42); writer.Position = final_pos; // Write Blocks for (int i = 0; i < blockcount; i++) { writer.WriteBytes(compbuf, i * BLOCK_SIZE, blockinfos[i].compressedSize); } } }