public void WriteOffsetRecords(BinaryWriter writer, IDBRowSerializer <T> serializer, uint recordOffset, int sparseCount) { var sparseIdLookup = new Dictionary <int, uint>(sparseCount); for (int i = 0; i < sparseCount; i++) { if (serializer.Records.TryGetValue(i, out var record)) { if (CopyData.TryGetValue(i, out int copyid)) { // copy records use their parent's offset writer.Write(sparseIdLookup[copyid]); writer.Write(record.TotalBytesWrittenOut); } else { writer.Write(sparseIdLookup[i] = recordOffset); writer.Write(record.TotalBytesWrittenOut); recordOffset += (uint)record.TotalBytesWrittenOut; } } else { // unused ids are empty records writer.BaseStream.Position += 6; } } }
public WDC2Writer(WDC2Reader reader, IDictionary <int, T> storage, Stream stream) : base(reader) { // always 2 empties StringTableSize++; WDC2RowSerializer <T> serializer = new WDC2RowSerializer <T>(this); serializer.Serialize(storage); serializer.GetCopyRows(); serializer.UpdateStringOffsets(storage); RecordsCount = serializer.Records.Count - CopyData.Count; var(commonDataSize, palletDataSize, referenceDataSize) = GetDataSizes(); using (var writer = new BinaryWriter(stream)) { int minIndex = storage.Keys.Min(); int maxIndex = storage.Keys.Max(); int copyTableSize = Flags.HasFlagExt(DB2Flags.Sparse) ? 0 : CopyData.Count * 8; writer.Write(reader.Signature); writer.Write(RecordsCount); writer.Write(FieldsCount); writer.Write(RecordSize); writer.Write(StringTableSize); writer.Write(reader.TableHash); writer.Write(reader.LayoutHash); writer.Write(minIndex); writer.Write(maxIndex); writer.Write(reader.Locale); writer.Write((ushort)Flags); writer.Write((ushort)IdFieldIndex); writer.Write(FieldsCount); // totalFieldCount writer.Write(reader.PackedDataOffset); writer.Write(ReferenceData.Count > 0 ? 1 : 0); // RelationshipColumnCount writer.Write(ColumnMeta.Length * 24); // ColumnMetaDataSize writer.Write(commonDataSize); writer.Write(palletDataSize); writer.Write(1); // sections count if (storage.Count == 0) { return; } // section header int fileOffset = HeaderSize + (Meta.Length * 4) + (ColumnMeta.Length * 24) + Unsafe.SizeOf <SectionHeader>() + palletDataSize + commonDataSize; writer.Write(0UL); // TactKeyLookup writer.Write(fileOffset); // FileOffset writer.Write(RecordsCount); // NumRecords writer.Write(StringTableSize); writer.Write(copyTableSize); writer.Write(0); // sparseTableOffset writer.Write(RecordsCount * 4); // indexTableSize writer.Write(referenceDataSize); // field meta writer.WriteArray(Meta); // column meta data writer.WriteArray(ColumnMeta); // pallet data for (int i = 0; i < ColumnMeta.Length; i++) { if (ColumnMeta[i].CompressionType == CompressionType.Pallet || ColumnMeta[i].CompressionType == CompressionType.PalletArray) { foreach (var palletData in PalletData[i]) { writer.WriteArray(palletData); } } } // common data for (int i = 0; i < ColumnMeta.Length; i++) { if (ColumnMeta[i].CompressionType == CompressionType.Common) { foreach (var commondata in CommonData[i]) { writer.Write(commondata.Key); writer.Write(commondata.Value.GetValue <int>()); } } } // record data uint recordsOffset = (uint)writer.BaseStream.Position; foreach (var record in serializer.Records) { if (!CopyData.TryGetValue(record.Key, out int parent)) { record.Value.CopyTo(writer.BaseStream); } } // string table if (!Flags.HasFlagExt(DB2Flags.Sparse)) { writer.WriteCString(""); foreach (var str in StringTable) { writer.WriteCString(str.Key); } } // sparse data if (Flags.HasFlagExt(DB2Flags.Sparse)) { // set the sparseTableOffset long oldPos = writer.BaseStream.Position; writer.BaseStream.Position = 96; writer.Write((uint)oldPos); writer.BaseStream.Position = oldPos; WriteOffsetRecords(writer, serializer, recordsOffset, maxIndex - minIndex + 1); } // index table if (Flags.HasFlagExt(DB2Flags.Index)) { writer.WriteArray(serializer.Records.Keys.Except(CopyData.Keys).ToArray()); } // copy table if (!Flags.HasFlagExt(DB2Flags.Sparse)) { foreach (var copyRecord in CopyData) { writer.Write(copyRecord.Key); writer.Write(copyRecord.Value); } } // reference data if (ReferenceData.Count > 0) { writer.Write(ReferenceData.Count); writer.Write(ReferenceData.Min()); writer.Write(ReferenceData.Max()); for (int i = 0; i < ReferenceData.Count; i++) { writer.Write(ReferenceData[i]); writer.Write(i); } } } }
private const uint WDB6FmtSig = 0x36424457; // WDB6 public WDB6Writer(WDB6Reader reader, IDictionary <int, T> storage, Stream stream) : base(reader) { // always 2 empties StringTableSize++; CommonData = new Dictionary <int, Value32> [Meta.Length - FieldsCount]; Array.ForEach(CommonData, x => x = new Dictionary <int, Value32>()); WDB6RowSerializer <T> serializer = new WDB6RowSerializer <T>(this); serializer.Serialize(storage); serializer.GetCopyRows(); RecordsCount = serializer.Records.Count - CopyData.Count; using (var writer = new BinaryWriter(stream)) { int minIndex = storage.Keys.Min(); int maxIndex = storage.Keys.Max(); int copyTableSize = Flags.HasFlagExt(DB2Flags.Sparse) ? 0 : CopyData.Count * 8; writer.Write(WDB6FmtSig); writer.Write(RecordsCount); writer.Write(FieldsCount); writer.Write(RecordSize); writer.Write(StringTableSize); // if flags & 0x01 != 0, offset to the offset_map writer.Write(reader.TableHash); writer.Write(reader.LayoutHash); writer.Write(minIndex); writer.Write(maxIndex); writer.Write(reader.Locale); writer.Write(copyTableSize); writer.Write((ushort)Flags); writer.Write((ushort)IdFieldIndex); writer.Write(Meta.Length); // totalFieldCount writer.Write(0); // commonDataSize if (storage.Count == 0) { return; } // field meta for (int i = 0; i < FieldsCount; i++) { writer.Write(Meta[i]); } // record data uint recordsOffset = (uint)writer.BaseStream.Position; foreach (var record in serializer.Records) { if (!CopyData.TryGetValue(record.Key, out int parent)) { record.Value.CopyTo(writer.BaseStream); } } // string table if (!Flags.HasFlagExt(DB2Flags.Sparse)) { writer.WriteCString(""); foreach (var str in StringTable) { writer.WriteCString(str.Key); } } // sparse data if (Flags.HasFlagExt(DB2Flags.Sparse)) { // change the StringTableSize to the offset_map position long oldPos = writer.BaseStream.Position; writer.BaseStream.Position = 16; writer.Write((uint)oldPos); writer.BaseStream.Position = oldPos; WriteOffsetRecords(writer, serializer, recordsOffset, maxIndex - minIndex + 1); } // secondary key if (Flags.HasFlagExt(DB2Flags.SecondaryKey)) { WriteSecondaryKeyData(writer, storage, maxIndex - minIndex + 1); } // index table if (Flags.HasFlagExt(DB2Flags.Index)) { writer.WriteArray(serializer.Records.Keys.Except(CopyData.Keys).ToArray()); } // copy table if (!Flags.HasFlagExt(DB2Flags.Sparse)) { foreach (var copyRecord in CopyData) { writer.Write(copyRecord.Key); writer.Write(copyRecord.Value); } } // common data // HACK this is bodged together // - it only writes common data columns and all values including common ones if (CommonData.Length > 0) { long startPos = writer.BaseStream.Position; writer.Write(Meta.Length - FieldsCount); for (int i = 0; i < CommonData.Length; i++) { writer.Write(CommonData[i].Count); writer.Write(reader.CommonDataTypes[i]); // type foreach (var record in CommonData[i]) { writer.Write(record.Key); switch (reader.CommonDataIsAligned) { // ushort case false when reader.CommonDataTypes[i] == 1: writer.Write(record.Value.GetValue <ushort>()); break; // byte case false when reader.CommonDataTypes[i] == 2: writer.Write(record.Value.GetValue <byte>()); break; default: writer.Write(record.Value.GetValue <uint>()); break; } } } // set the CommonDataSize writer.BaseStream.Position = 52; writer.Write((uint)(writer.BaseStream.Position - startPos)); writer.BaseStream.Position = writer.BaseStream.Length; } } }
private const uint WDC3FmtSig = 0x33434457; // WDC3 public WDC3Writer(WDC3Reader reader, IDictionary <int, T> storage, Stream stream) : base(reader) { // always 2 empties //StringTableSize++; //no need since we calcuated the value in BaseWriter::InternString WDC3RowSerializer <T> serializer = new WDC3RowSerializer <T>(this); serializer.Serialize(storage); serializer.GetCopyRows(); serializer.UpdateStringOffsets(storage); RecordsCount = serializer.Records.Count - CopyData.Count; var(commonDataSize, palletDataSize, referenceDataSize) = GetDataSizes(); using (var writer = new BinaryWriter(stream)) { int minIndex = storage.Keys.Min(); int maxIndex = storage.Keys.Max(); writer.Write(WDC3FmtSig); writer.Write(RecordsCount); writer.Write(FieldsCount); writer.Write(RecordSize); writer.Write(StringTableSize); writer.Write(reader.TableHash); writer.Write(reader.LayoutHash); writer.Write(minIndex); writer.Write(maxIndex); writer.Write(reader.Locale); writer.Write((ushort)Flags); writer.Write((ushort)IdFieldIndex); writer.Write(FieldsCount); // totalFieldCount writer.Write(reader.PackedDataOffset); writer.Write(ReferenceData.Count > 0 ? 1 : 0); // RelationshipColumnCount writer.Write(ColumnMeta.Length * 24); // ColumnMetaDataSize writer.Write(commonDataSize); writer.Write(palletDataSize); writer.Write(reader.SectionsCount); // sections count if (storage.Count == 0) { return; } // section header int fileOffset = HeaderSize + (field_structure_data.Length * 4) + (ColumnMeta.Length * 24) + Unsafe.SizeOf <SectionHeaderWDC3>() + palletDataSize + commonDataSize; writer.Write(0UL); // TactKeyLookup writer.Write(fileOffset); // FileOffset writer.Write(RecordsCount); // NumRecords writer.Write(StringTableSize); writer.Write(0); // OffsetRecordsEndOffset writer.Write(RecordsCount * 4); // IndexDataSize writer.Write(referenceDataSize); // ParentLookupDataSize writer.Write(Flags.HasFlagExt(DB2Flags.Sparse) ? RecordsCount : 0); // OffsetMapIDCount writer.Write(CopyData.Count); // CopyTableCount //reset section header for (int i = 1; i < reader.SectionsCount; i++) { writer.Write(reader.SectionHeaders[i].TactKeyLookup); // TactKeyLookup writer.Write(reader.SectionHeaders[i].FileOffset); // FileOffset writer.Write(reader.SectionHeaders[i].NumRecords); // NumRecords writer.Write(reader.SectionHeaders[i].StringTableSize); writer.Write(reader.SectionHeaders[i].OffsetRecordsEndOffset); // OffsetRecordsEndOffset writer.Write(reader.SectionHeaders[i].IndexDataSize); // IndexDataSize writer.Write(reader.SectionHeaders[i].ParentLookupDataSize); // ParentLookupDataSize writer.Write(reader.SectionHeaders[i].OffsetMapIDCount); // OffsetMapIDCount writer.Write(reader.SectionHeaders[i].CopyTableCount); // CopyTableCount } // field meta writer.WriteArray(field_structure_data); // column meta data writer.WriteArray(ColumnMeta); // pallet data for (int i = 0; i < ColumnMeta.Length; i++) { if (ColumnMeta[i].CompressionType == CompressionType.Pallet || ColumnMeta[i].CompressionType == CompressionType.PalletArray) { foreach (var palletData in PalletData[i]) { writer.WriteArray(palletData); } } } // common data for (int i = 0; i < ColumnMeta.Length; i++) { if (ColumnMeta[i].CompressionType == CompressionType.Common) { foreach (var commondata in CommonData[i]) { writer.Write(commondata.Key); writer.Write(commondata.Value.GetValue <int>()); } } } // record data var m_sparseEntries = new Dictionary <int, offset_map_entry>(storage.Count); foreach (var record in serializer.Records) { if (!CopyData.TryGetValue(record.Key, out int parent)) { m_sparseEntries.Add(record.Key, new offset_map_entry() { Offset = (uint)writer.BaseStream.Position, Size = (ushort)record.Value.TotalBytesWrittenOut }); record.Value.CopyTo(writer.BaseStream); } } // string table if (!Flags.HasFlagExt(DB2Flags.Sparse)) { writer.WriteCString(""); foreach (var str in StringTableStingAsKeyPosAsValue) { writer.WriteCString(str.Key); } } // set the OffsetRecordsEndOffset if (Flags.HasFlagExt(DB2Flags.Sparse)) { long oldPos = writer.BaseStream.Position; writer.BaseStream.Position = 92; writer.Write((uint)oldPos); writer.BaseStream.Position = oldPos; } // index table if (Flags.HasFlagExt(DB2Flags.Index)) { writer.WriteArray(serializer.Records.Keys.Except(CopyData.Keys).ToArray()); } // copy table foreach (var copyRecord in CopyData) { writer.Write(copyRecord.Key); writer.Write(copyRecord.Value); } // sparse data if (Flags.HasFlagExt(DB2Flags.Sparse)) { writer.WriteArray(m_sparseEntries.Values.ToArray()); } // reference data if (ReferenceData.Count > 0) { writer.Write(ReferenceData.Count); writer.Write(ReferenceData.Min()); writer.Write(ReferenceData.Max()); for (int i = 0; i < ReferenceData.Count; i++) { writer.Write(ReferenceData[i]); writer.Write(i); } } // sparse data idss if (Flags.HasFlagExt(DB2Flags.Sparse)) { writer.WriteArray(m_sparseEntries.Keys.ToArray()); } //rest data writer.WriteArray(reader.NoParseRecordsData); } }