private static uint GetHash(string str) { uint hash; if (stringHashLookup.ContainsKey(str)) { hash = stringHashLookup[str]; } else { hash = Checksum.FFnv32(str); stringHashLookup.Add(str, hash); } return(hash); }
public override void Read(BinaryStream bs) { Stopwatch sw = new Stopwatch(); sw.Start(); // read header HeaderInfo headerInfo = bs.Read.Type <HeaderInfo>(); this.Patch = headerInfo.patchName; this.Timestamp = new DateTime(); this.Timestamp = Util.Time.DateTimeFromUnixTimestampMicroseconds((long)headerInfo.timestamp); this.Flags = headerInfo.flags; this.fileVersion = headerInfo.version; //Console.WriteLine("read: "+sw.ElapsedMilliseconds); sw.Restart(); // deobfuscate byte[] data = bs.Read.ByteArray((int)headerInfo.payloadSize); MTXor(Checksum.FFnv32(headerInfo.patchName), ref data); //Console.WriteLine("dxor: " + sw.ElapsedMilliseconds); sw.Restart(); // cleanup memory, the original stream is not needed anymore bs.Dispose(); bs = null; GC.Collect(); // read compression header // uint inflated size // uint padding // ushort 0x78 0x01 zlib deflate low/no compression uint inflatedSize = UIntFromBufferLE(ref data); ushort ds = UShortFromBufferLE(ref data, 8); byte[] inflated = new byte[inflatedSize]; Inflate(data, ref inflated, SharpCompress.Compressors.Deflate.CompressionLevel.BestSpeed, (int)inflatedSize, 10); BinaryStream ibs = new BinaryStream(new MemoryStream((inflated))); data = null; //Console.WriteLine("infl: " + sw.ElapsedMilliseconds); sw.Restart(); // cleanup memory, the deobfuscated stream is not needed anymore GC.Collect(); // read table header this.tableVersion = ibs.Read.UInt(); ushort indexLength = ibs.Read.UShort(); // read table info TableInfo[] tableInfos = new TableInfo[indexLength]; for (ushort i = 0; i < indexLength; i++) { tableInfos[i] = ibs.Read.Type <TableInfo>(); } // read field info FieldInfo[][] fieldInfos = new FieldInfo[indexLength][]; for (int i = 0; i < indexLength; i++) { fieldInfos[i] = new FieldInfo[tableInfos[i].numFields]; for (int x = 0; x < tableInfos[i].numFields; x++) { fieldInfos[i][x] = ibs.Read.Type <FieldInfo>(); } } // read row info RowInfo[] rowInfos = new RowInfo[indexLength]; for (ushort i = 0; i < indexLength; i++) { rowInfos[i] = ibs.Read.Type <RowInfo>(); } // build tables Tables = new List <Table>(indexLength); for (ushort i = 0; i < indexLength; i++) { TableInfo tableInfo = tableInfos[i]; FieldInfo[] fieldInfo = fieldInfos[i]; RowInfo rowInfo = rowInfos[i]; // setup table Table table = new Table(); table.Id = tableInfo.id; // add fields table.Columns = new List <Column>(tableInfo.numFields); int currentWidth = 0; for (int x = 0; x < tableInfo.numFields; x++) { Column field = new Column(); field.Id = fieldInfos[i][x].id; field.Type = (DBType)fieldInfos[i][x].type; // fix removed fields? (weird padding some places) if (fieldInfo[x].start != currentWidth) { int padding = fieldInfo[x].start - currentWidth; field.Padding = padding; currentWidth += padding; } currentWidth += DBTypeLength((DBType)fieldInfo[x].type); table.Columns.Add(field); } // if any, add nullable fields if (tableInfo.nullableBitfields != 0) { int count = 0; for (int x = 0; x < tableInfo.numFields; x++) { if (fieldInfos[i][x].nullableIndex != 255) { count++; } } Column[] nullableColumns = new Column[count]; for (int x = 0; x < tableInfo.numFields; x++) { if (fieldInfos[i][x].nullableIndex != 255) { nullableColumns[fieldInfos[i][x].nullableIndex] = table.Columns[x]; } } table.NullableColumn = new List <Column>(nullableColumns); } else { table.NullableColumn = new List <Column>(); } Tables.Add(table); } //Console.WriteLine("tabl: " + sw.ElapsedMilliseconds); sw.Restart(); // read rows ConcurrentQueue <int> tableRowsReadQueue = new ConcurrentQueue <int>(); for (ushort i = 0; i < indexLength; i++) { tableRowsReadQueue.Enqueue(i); } Parallel.For(0, numThreads, new ParallelOptions { MaxDegreeOfParallelism = numThreads }, q => { BinaryStream dbs = new BinaryStream(new MemoryStream(inflated)); while (tableRowsReadQueue.Count != 0) { int i; if (!tableRowsReadQueue.TryDequeue(out i)) { continue; } TableInfo tableInfo = tableInfos[i]; FieldInfo[] fieldInfo = fieldInfos[i]; RowInfo rowInfo = rowInfos[i]; Tables[i].Rows = new List <Row>(); for (int y = 0; y < rowInfo.rowCount; y++) { Row row = new Row(tableInfo.numFields); dbs.ByteOffset = rowInfo.rowOffset + (tableInfo.numBytes * y) + fieldInfo[0].start; for (int z = 0; z < tableInfo.numFields; z++) { if (Tables[i].Columns[z].Padding != 0) { dbs.ByteOffset += Tables[i].Columns[z].Padding; } // just read the basic type now, unpack & decrypt later to reduce seeking row.Fields.Add(ReadDBType(dbs, (DBType)fieldInfo[z].type)); } // null out nulls again :P if (tableInfo.nullableBitfields > 0) { byte[] nulls = dbs.Read.BitArray(tableInfo.nullableBitfields * 8); for (int n = 0; n < Tables[i].NullableColumn.Count; n++) { if (nulls[n] == 1) { int index = Tables[i].Columns.IndexOf(Tables[i].NullableColumn[n]); row[index] = null; } } } Tables[i].Rows.Add(row); } } }); inflated = null; //Console.WriteLine("rows: " + sw.ElapsedMilliseconds); sw.Restart(); // seek to the very end of the tables/start of data RowInfo lri = rowInfos[rowInfos.Length - 1]; TableInfo lti = tableInfos[tableInfos.Length - 1]; ibs.ByteOffset = lri.rowOffset + (lri.rowCount * lti.numBytes); // copy the data to a new stream int dataLength = (int)(ibs.Length - ibs.ByteOffset); byte[] dataBlock = ibs.Read.ByteArray(dataLength); // cleanup ibs.Dispose(); ibs = null; GC.Collect(); // get unique data entry keys HashSet <uint> uniqueKeys = new HashSet <uint>(); ConcurrentQueue <uint> uniqueQueue = new ConcurrentQueue <uint>(); uniqueEntries = new Dictionary <uint, byte[]>(); for (int i = 0; i < Tables.Count; i++) { for (int x = 0; x < Tables[i].Columns.Count; x++) { DBType type = Tables[i].Columns[x].Type; if (IsDataType(type)) { for (int y = 0; y < Tables[i].Rows.Count; y++) { uint?k = (uint?)Tables[i].Rows[y][x]; if (k != null) { if (!uniqueKeys.Contains((uint)k)) { uniqueKeys.Add((uint)k); uniqueQueue.Enqueue((uint)k); } } } } } } //Console.WriteLine("uniq: " + sw.ElapsedMilliseconds); sw.Restart(); // unpack & decrypt unique data entries to cache Parallel.For(0, numThreads, new ParallelOptions { MaxDegreeOfParallelism = numThreads }, i => { BinaryStream dbs = new BinaryStream(new MemoryStream(dataBlock)); while (uniqueQueue.Count != 0) { uint key; if (!uniqueQueue.TryDequeue(out key)) { continue; } byte[] d = GetDataEntry(dbs, key); lock (uniqueEntries) { uniqueEntries.Add(key, d); } } dbs.Dispose(); }); dataBlock = null; //Console.WriteLine("upac: " + sw.ElapsedMilliseconds); sw.Restart(); // copy data entires to the tables from cache for (int z = 0; z < Tables.Count; z++) { for (int x = 0; x < Tables[z].Columns.Count; x++) { DBType type = Tables[z].Columns[x].Type; if (IsDataType(type)) { Parallel.For(0, Tables[z].Rows.Count, y => { uint?k = (uint?)Tables[z].Rows[y][x]; object obj = null; if (k != null) { if (uniqueEntries.ContainsKey((uint)k)) { byte[] d = uniqueEntries[(uint)k]; if (d != null) { obj = BytesToDBType(type, d); } } } Tables[z].Rows[y][x] = obj; }); } } } //Console.WriteLine("assi: " + sw.ElapsedMilliseconds); sw.Restart(); // cleanup :> uniqueKeys = null; uniqueQueue = null; //uniqueEntries = null; // dont clean up these in case you need to look up data entries post load headerInfo = null; tableInfos = null; fieldInfos = null; rowInfos = null; GC.Collect(); }