public Storage(Stream stream) { DB2Reader reader; using (stream) using (var bin = new BinaryReader(stream)) { var identifier = new string(bin.ReadChars(4)); stream.Position = 0; switch (identifier) { case "WDC3": reader = new WDC3Reader(stream); break; case "WDC2": case "1SLC": reader = new WDC2Reader(stream); break; case "WDC1": reader = new WDC1Reader(stream); break; case "WDB6": reader = new WDB6Reader(stream); break; case "WDB5": reader = new WDB5Reader(stream); break; case "WDB4": reader = new WDB4Reader(stream); break; case "WDB3": reader = new WDB3Reader(stream); break; case "WDB2": reader = new WDB2Reader(stream); break; case "WDBC": reader = new WDBCReader(stream); break; default: throw new Exception("DB type " + identifier + " is not supported!"); } } FieldInfo[] fields = typeof(T).GetFields(); FieldCache <T>[] fieldCache = new FieldCache <T> [fields.Length]; for (int i = 0; i < fields.Length; ++i) { bool indexMapAttribute = reader.Flags.HasFlagExt(DB2Flags.Index) ? Attribute.IsDefined(fields[i], typeof(IndexAttribute)) : false; fieldCache[i] = new FieldCache <T>(fields[i], indexMapAttribute); } Parallel.ForEach(reader.AsEnumerable(), new ParallelOptions() { MaxDegreeOfParallelism = 1 }, row => { T entry = new T(); row.Value.GetFields(fieldCache, entry); lock (this) Add(row.Value.Id, entry); }); }
private const uint WDB4FmtSig = 0x34424457; // WDB4 public WDB4Writer(WDB4Reader reader, IDictionary <int, T> storage, Stream stream) : base(reader) { // always 2 empties StringTableSize++; WDB4RowSerializer <T> serializer = new WDB4RowSerializer <T>(this); serializer.Serialize(storage); serializer.GetCopyRows(); RecordsCount = serializer.Records.Count - CopyData.Count; using (var writer = new BinaryWriter(stream)) { int minIndex = storage.Keys.Min(); int maxIndex = storage.Keys.Max(); int copyTableSize = Flags.HasFlagExt(DB2Flags.Sparse) ? 0 : CopyData.Count * 8; writer.Write(WDB4FmtSig); writer.Write(RecordsCount); writer.Write(FieldsCount); writer.Write(RecordSize); writer.Write(StringTableSize); // if flags & 0x01 != 0, offset to the offset_map writer.Write(reader.TableHash); writer.Write(reader.Build); writer.Write((uint)DateTimeOffset.UtcNow.ToUnixTimeSeconds()); writer.Write(minIndex); writer.Write(maxIndex); writer.Write(reader.Locale); writer.Write(copyTableSize); writer.Write((uint)Flags); if (storage.Count == 0) { return; } // record data uint recordsOffset = (uint)writer.BaseStream.Position; foreach (var record in serializer.Records) { if (!CopyData.ContainsKey(record.Key)) { record.Value.CopyTo(writer.BaseStream); } } // string table if (!Flags.HasFlagExt(DB2Flags.Sparse)) { writer.WriteCString(""); foreach (var str in StringTableStingAsKeyPosAsValue) { writer.WriteCString(str.Key); } } // sparse data if (Flags.HasFlagExt(DB2Flags.Sparse)) { // change the StringTableSize to the offset_map position long oldPos = writer.BaseStream.Position; writer.BaseStream.Position = 16; writer.Write((uint)oldPos); writer.BaseStream.Position = oldPos; WriteOffsetRecords(writer, serializer, recordsOffset, maxIndex - minIndex + 1); } // secondary key if (Flags.HasFlagExt(DB2Flags.SecondaryKey)) { WriteSecondaryKeyData(writer, storage, maxIndex - minIndex + 1); } // index table if (Flags.HasFlagExt(DB2Flags.Index)) { writer.WriteArray(serializer.Records.Keys.Except(CopyData.Keys).ToArray()); } // copy table if (!Flags.HasFlagExt(DB2Flags.Sparse)) { foreach (var copyRecord in CopyData) { writer.Write(copyRecord.Key); writer.Write(copyRecord.Value); } } } }