private static int ProcessBlock(SectorCache cache, IEnumerator <JToken> enumerator, bool[] grididallowed, // null = all, else grid bool value bool tablesareempty, string tablepostfix, ref DateTime maxdate, // updated with latest date ref int nextsectorid, out bool jr_eof) { int recordstostore = 0; DbCommand selectSectorCmd = null; DateTime cpmaxdate = maxdate; int cpnextsectorid = nextsectorid; const int BlockSize = 10000; int Limit = int.MaxValue; var entries = new List <TableWriteData>(); jr_eof = false; while (true) { if (!enumerator.MoveNext()) // get next token, if not, stop eof { jr_eof = true; break; } JToken t = enumerator.Current; if (t.IsObject) // if start of object.. { EDSMFileEntry d = new EDSMFileEntry(); if (d.Deserialize(enumerator) && d.id >= 0 && d.name.HasChars() && d.z != int.MinValue) // if we have a valid record { int gridid = GridId.Id128(d.x, d.z); if (grididallowed == null || (grididallowed.Length > gridid && grididallowed[gridid])) // allows a null or small grid { TableWriteData data = new TableWriteData() { edsm = d, classifier = new EliteNameClassifier(d.name), gridid = gridid }; if (!TryCreateNewUpdate(cache, data, tablesareempty, ref cpmaxdate, ref cpnextsectorid, out Sector sector)) { entries.Add(data); } recordstostore++; } } if (--Limit == 0) { jr_eof = true; break; } if (recordstostore >= BlockSize) { break; } } } SystemsDatabase.Instance.ExecuteWithDatabase(action: db => { try { var cn = db.Connection; selectSectorCmd = cn.CreateSelect("Sectors" + tablepostfix, "id", "name = @sname AND gridid = @gid", null, new string[] { "sname", "gid" }, new DbType[] { DbType.String, DbType.Int32 }); foreach (var entry in entries) { CreateNewUpdate(cache, selectSectorCmd, entry, tablesareempty, ref cpmaxdate, ref cpnextsectorid); } } finally { if (selectSectorCmd != null) { selectSectorCmd.Dispose(); } } }); maxdate = cpmaxdate; nextsectorid = cpnextsectorid; return(recordstostore); }
private static int ProcessBlock(SectorCache cache, JsonTextReader jr, bool[] grididallowed, // null = all, else grid bool value bool tablesareempty, string tablepostfix, ref DateTime maxdate, // updated with latest date ref int nextsectorid, ref bool jr_eof) { int recordstostore = 0; DbCommand selectSectorCmd = null; DateTime cpmaxdate = maxdate; int cpnextsectorid = nextsectorid; const int BlockSize = 10000; int Limit = int.MaxValue; var entries = new List <TableWriteData>(); while (jr_eof == false) { try { if (jr.Read()) { if (jr.TokenType == JsonToken.StartObject) { EDSMFileEntry d = new EDSMFileEntry(); if (d.Deserialize(jr) && d.id >= 0 && d.name.HasChars() && d.z != int.MinValue) // if we have a valid record { int gridid = GridId.Id128(d.x, d.z); if (grididallowed == null || (grididallowed.Length > gridid && grididallowed[gridid])) // allows a null or small grid { TableWriteData data = new TableWriteData() { edsm = d, classifier = new EliteNameClassifier(d.name), gridid = gridid }; if (!TryCreateNewUpdate(cache, data, tablesareempty, ref cpmaxdate, ref cpnextsectorid, out Sector sector)) { entries.Add(data); } recordstostore++; } } if (--Limit == 0) { jr_eof = true; break; } if (recordstostore >= BlockSize) { break; } } } else { jr_eof = true; break; } } catch (Exception ex) { System.Diagnostics.Debug.WriteLine("EDSM JSON file exception " + ex.ToString()); jr_eof = true; // stop read, but let it continue to finish this section } } SystemsDatabase.Instance.ExecuteWithDatabase(action: db => { try { var cn = db.Connection; selectSectorCmd = cn.CreateSelect("Sectors" + tablepostfix, "id", "name = @sname AND gridid = @gid", null, new string[] { "sname", "gid" }, new DbType[] { DbType.String, DbType.Int32 }); foreach (var entry in entries) { CreateNewUpdate(cache, selectSectorCmd, entry, tablesareempty, ref cpmaxdate, ref cpnextsectorid); } } finally { if (selectSectorCmd != null) { selectSectorCmd.Dispose(); } } }); maxdate = cpmaxdate; nextsectorid = cpnextsectorid; return(recordstostore); }
// take old system table and turn to new. tablesarempty=false is normal, only set to true if using this code for checking replace algorithm public static long UpgradeDB102to200(Func <bool> cancelRequested, Action <string> reportProgress, string tablepostfix, bool tablesareempty = false, int maxgridid = int.MaxValue) { var cache = new SectorCache(); int nextsectorid = GetNextSectorID(); long updates = 0; long Limit = long.MaxValue; DateTime maxdate = DateTime.MinValue; // we don't pass this back due to using the same date reportProgress?.Invoke("Begin System DB upgrade"); List <int> gridids = DB.GridId.AllId(); BaseUtils.AppTicks.TickCountLap("UTotal"); //int debug_z = 0; foreach (int gridid in gridids) // using grid id to keep chunks a good size.. can't read and write so can't just read the whole. { if (cancelRequested()) { updates = -1; break; } if (gridid == maxgridid) // for debugging { break; } int recordstostore = 0; DbCommand selectSectorCmd = null; DbCommand selectPrev = null; SystemsDatabase.Instance.ExecuteWithDatabase(db => { try { var cn = db.Connection; selectSectorCmd = cn.CreateSelect("Sectors" + tablepostfix, "id", "name = @sname AND gridid = @gid", null, new string[] { "sname", "gid" }, new DbType[] { DbType.String, DbType.Int32 }); selectPrev = cn.CreateSelect("EdsmSystems s", "s.EdsmId,s.x,s.y,s.z,n.Name,s.UpdateTimeStamp", "s.GridId = @gid", null, new string[] { "gid" }, new DbType[] { DbType.Int32 }, joinlist: new string[] { "LEFT OUTER JOIN SystemNames n ON n.EdsmId=s.EdsmId" }); selectPrev.Parameters[0].Value = gridid; using (DbDataReader reader = selectPrev.ExecuteReader()) // find name:gid { BaseUtils.AppTicks.TickCountLap("U1"); while (reader.Read()) { try { EDSMFileEntry d = new EDSMFileEntry(); d.id = (long)reader[0]; d.x = (int)(long)reader[1]; d.y = (int)(long)reader[2]; d.z = (int)(long)reader[3]; d.name = (string)reader[4]; d.date = new DateTime(2015, 1, 1, 0, 0, 0, DateTimeKind.Utc) + TimeSpan.FromSeconds((long)reader["UpdateTimestamp"]); int grididentry = GridId.Id128(d.x, d.z); // because i don't trust the previous gridid - it should be the same as the outer loop, but lets recalc //if (!tablesareempty) d.z = debug_z++; // for debug checking CreateNewUpdate(cache, selectSectorCmd, d, grididentry, tablesareempty, ref maxdate, ref nextsectorid); // not using gridid on purpose to double check it. recordstostore++; } catch (Exception ex) { System.Diagnostics.Debug.WriteLine("Reading prev table" + ex); } } } } finally { selectSectorCmd?.Dispose(); } }, 5000); //System.Diagnostics.Debug.WriteLine("Reader took " + BaseUtils.AppTicks.TickCountLap("U1") + " in " + gridid + " " + recordpos + " total " + recordstostore); if (recordstostore >= 0) { updates += StoreNewEntries(cache, tablepostfix, null); reportProgress?.Invoke("System DB upgrade processed " + updates); Limit -= recordstostore; if (Limit <= 0) { break; } System.Threading.Thread.Sleep(20); // just sleepy for a bit to let others use the db } var tres1 = BaseUtils.AppTicks.TickCountLapDelta("U1"); var tres2 = BaseUtils.AppTicks.TickCountFromLastLap("UTotal"); System.Diagnostics.Debug.WriteLine("Sector " + gridid + " took " + tres1.Item1 + " store " + recordstostore + " total " + updates + " " + ((float)tres1.Item2 / (float)recordstostore) + " cumulative " + tres2); } reportProgress?.Invoke("System DB complete, processed " + updates); PutNextSectorID(nextsectorid); // and store back return(updates); }
// set tempostfix to use another set of tables public static long ParseEDSMJSON(JsonTextReader jr, bool[] grididallowed, // null = all, else grid bool value ref DateTime maxdate, // updated with latest date Func <bool> cancelRequested, Action <string> reportProgress, string tablepostfix, // set to add on text to table names to redirect to another table bool tablesareempty = false, // set to presume table is empty, so we don't have to do look up queries string debugoutputfile = null ) { sectoridcache = new Dictionary <long, Sector>(); sectornamecache = new Dictionary <string, Sector>(); int nextsectorid = GetNextSectorID(); SQLiteConnectionSystem cn = new SQLiteConnectionSystem(mode: SQLLiteExtensions.SQLExtConnection.AccessMode.Writer); StreamWriter sw = null; #if DEBUG try { if (debugoutputfile != null) { sw = new StreamWriter(debugoutputfile); } } catch { } #endif long updates = 0; const int BlockSize = 100000; int Limit = int.MaxValue; bool jr_eof = false; DbCommand selectSectorCmd = cn.CreateSelect("Sectors" + tablepostfix, "id", "name = @sname AND gridid = @gid", null, new string[] { "sname", "gid" }, new DbType[] { DbType.String, DbType.Int32 }); while (jr_eof == false) { if (cancelRequested()) { updates = -1; break; } int recordstostore = 0; while (true) { try { if (jr.Read()) // collect a decent amount { if (jr.TokenType == JsonToken.StartObject) { EDSMFileEntry d = new EDSMFileEntry(); if (d.Deserialize(jr) && d.id >= 0 && d.name.HasChars() && d.z != int.MinValue) // if we have a valid record { int gridid = GridId.Id(d.x, d.z); if (grididallowed == null || (grididallowed.Length > gridid && grididallowed[gridid])) // allows a null or small grid { CreateNewUpdate(selectSectorCmd, d, gridid, tablesareempty, ref maxdate, ref nextsectorid); recordstostore++; } } if (--Limit == 0) { jr_eof = true; break; } if (recordstostore >= BlockSize) { break; } } } else { jr_eof = true; break; } } catch (Exception ex) { System.Diagnostics.Debug.WriteLine("EDSM JSON file exception " + ex.ToString()); jr_eof = true; // stop read, but let it continue to finish this section } } System.Diagnostics.Debug.WriteLine("Process " + BaseUtils.AppTicks.TickCountLap("L1") + " " + updates); if (recordstostore > 0) { updates += StoreNewEntries(cn, tablepostfix, sw); reportProgress?.Invoke("EDSM Star database updated " + updates); } if (jr_eof) { break; } if (SQLiteConnectionSystem.IsReadWaiting) { System.Threading.Thread.Sleep(20); // just sleepy for a bit to let others use the db } } System.Diagnostics.Debug.WriteLine("Process " + BaseUtils.AppTicks.TickCountLap("L1") + " " + updates); reportProgress?.Invoke("EDSM Star database updated " + updates); if (sw != null) { sw.Close(); } selectSectorCmd.Dispose(); cn.Dispose(); PutNextSectorID(nextsectorid); // and store back sectoridcache = null; sectornamecache = null; return(updates); }