static void Main(string[] args) { try { SQLInterface sql = new SQLInterface(config.db); if (!Directory.Exists(config.datadir)) Directory.CreateDirectory(config.datadir); SortedSet<Job> myJobs = sql.FindJobs(config.datadir, config.username); foreach (Job j in myJobs) { j.Download(sql); // Global.Say(string.Format("Downloaded {0} batches for job #{1}. Average batch time: {2} sec.", j.BatchCount, j.MetaData.Id, j.AverageBatchTime)); } Jobs jobs = new Jobs(config.datadir); // includes unfinished. if (myJobs.Count > 0) { uint lastId = 0; foreach (Job j in jobs) { if (j.MetaData.Id >= myJobs.Last().MetaData.Id && j.MetaData.Reference == 0) { j.MetaData.Reference = lastId; j.MetaData.Save(); } lastId = j.MetaData.Id; } Records records = new Records(config.datadir); foreach (Job j in myJobs) { if (j.MetaData.isFinished) { Report r = new Report(j); if (r.IsInteresting) r.SendTo(config.developers); records.Update(j); } } records.Save(); } Aggregate(); } catch (Exception ex) { Global.Say("Caught exception: " + ex.Message); } }
public Job(uint id, SQLInterface sql) { _metaData.Id = id; Download(sql); }
protected int GetBatch(SQLInterface sql) { Stopwatch timer = new Stopwatch(); timer.Restart(); List <int> ids = new List <int>(); SqlDataReader r = null; uint limit = 1000; string order = "ORDER BY ID"; try { ids.Clear(); r = sql.GetReader("SELECT TOP " + limit + " Data.ID as ID,s as Filename,Runtime,Returnvalue,ResultCode,stdout,stderr,SAT,UNSAT,UNKNOWN,TargetSAT,TargetUNSAT,TargetUNKNOWN FROM Data,Strings WHERE ExperimentID=" + _metaData.Id.ToString() + " AND FilenameP=Strings.ID " + order + ";"); if (!r.HasRows) { r.Close(); return(0); } //Global.Say("Query time: " + (DateTime.Now - before).TotalSeconds + " sec"); while (r.Read()) { string fn = (string)r["Filename"]; fn = fn.Substring(fn.IndexOf('\\') + 1); // remove top-level `smtlib-latest' dir. _data.AddRow(fn, SQLInterface.getint(ref r, "Returnvalue"), SQLInterface.getdouble(ref r, "Runtime"), SQLInterface.getuint(ref r, "ResultCode"), SQLInterface.getuint(ref r, "SAT"), SQLInterface.getuint(ref r, "UNSAT"), SQLInterface.getuint(ref r, "UNKNOWN"), SQLInterface.getuint(ref r, "TargetSAT"), SQLInterface.getuint(ref r, "TargetUNSAT"), SQLInterface.getuint(ref r, "TargetUNKNOWN"), SQLInterface.getstring(ref r, "stdout"), SQLInterface.getstring(ref r, "stderr") ); ids.Add(Convert.ToInt32(r["ID"])); } r.Close(); } catch (SqlException ex) { if (ex.Number != -2) { throw ex; } // Else that's OK. } if (!r.IsClosed) { r.Close(); } int i = 0; string query = ""; while (i < ids.Count) { int range_start = (int)ids[i]; int range_end = range_start; while (i < ids.Count - 1 && ((int)ids[i + 1]) == range_end + 1) { range_end++; i++; } if (query == "") { query = "DELETE FROM Data WHERE (ID>=" + range_start + " AND ID<=" + range_end + ") "; } else { query += "OR (ID>=" + range_start + " AND ID<=" + range_end + ") "; } i++; } sql.Query(query); //Global.Say("Delete time: " + (DateTime.Now - b).TotalSeconds + " sec"); //Global.Say("Downloaded " + ids.Count + " entries for experiment " + eid + "."); timer.Stop(); _downloadBatches++; _downloadTime += timer.Elapsed; return(ids.Count); }
public void Download(SQLInterface sql) { // Global.Say("Downloading #" + _metaData.Id); Dictionary <string, Object> r = sql.Read("SELECT " + "ID,SubmissionTime,SharedDir,Binary,Parameters,Timeout,Memout," + "Cluster,ClusterJobId,Nodegroup,Locality,Longparams FROM Experiments " + "WHERE ID=" + _metaData.Id.ToString() + ";"); if (SQLInterface.getuint(ref r, "ID") != _metaData.Id) { throw new Exception("Job ID mismatch"); } _metaData.SubmissionTime = Convert.ToDateTime(r["SubmissionTime"], Global.culture); _metaData.BaseDirectory = r["SharedDir"].ToString(); _metaData.BinaryId = Convert.ToUInt32(r["Binary"]); if (r["Parameters"].Equals(DBNull.Value)) { _metaData.Parameters = r["Longparams"].ToString(); } else { _metaData.Parameters = r["Parameters"].ToString(); } _metaData.Timeout = Convert.ToUInt32(r["Timeout"]); _metaData.Memoryout = Convert.ToUInt32(r["Memout"]); _metaData.Cluster = r["Cluster"].ToString(); _metaData.ClusterJobId = SQLInterface.getuint(ref r, "ClusterJobId"); _metaData.Nodegroup = r["Nodegroup"].ToString(); _metaData.Locality = r["Locality"].ToString(); _metaData.isFinished = false; _metaData.Save(); r.Clear(); _data = new CSVData(_dataFilename); bool have_new_data = false; while (GetBatch(sql) > 0) { have_new_data = true; } if (have_new_data) { _summary.Rebuild(this); } string ids = _metaData.Id.ToString(); bool clusterDone = false; if (_metaData.Cluster != "" && _metaData.ClusterJobId != 0) { try { r = sql.Read("SELECT COUNT(1) FROM JobQueue WHERE ExperimentID=" + ids); if ((int)r.First().Value != 0) { clusterDone = false; } else { Scheduler scheduler = new Scheduler(); scheduler.Connect(_metaData.Cluster); ISchedulerJob job = scheduler.OpenJob((int)_metaData.ClusterJobId); JobState state = job.State; if (state == JobState.Finished || state == JobState.Finishing || state == JobState.Failed || state == JobState.Canceled || state == JobState.Canceling) { clusterDone = true; } } } catch (SchedulerException ex) { if (ex.Code == ErrorCode.Operation_InvalidJobId) { clusterDone = true; } else { Global.Say("Job considered not finished because the scheduler threw: " + ex.Message); } } catch { clusterDone = false; } } if (clusterDone) { // Delete the experiment only if the job on the cluster is done and there are no more jobs. Dictionary <string, object> q = sql.Read("(SELECT (SELECT COUNT(1) FROM Data WHERE ExperimentID=" + ids + ") + " + "(SELECT COUNT(1) FROM JobQueue WHERE ExperimentID=" + ids + "))"); if (q.Count > 0 && (int)q.First().Value == 0) { // Cluster is done & database is done. sql.Query("DELETE FROM Experiments WHERE ID=" + ids); _metaData.isFinished = true; _metaData.Save(); } } }
static void Main(string[] args) { try { SQLInterface sql = new SQLInterface(config.db); if (!Directory.Exists(config.datadir)) Directory.CreateDirectory(config.datadir); SortedSet<Job> myJobs = sql.FindJobs(config.datadir, config.username); foreach (Job j in myJobs) { j.Download(sql); // Global.Say(string.Format("Downloaded {0} batches for job #{1}. Average batch time: {2} sec.", j.BatchCount, j.MetaData.Id, j.AverageBatchTime)); } Jobs jobs = new Jobs(config.datadir); // includes unfinished. if (myJobs.Count > 0) { uint lastId = 0; foreach (Job j in jobs) { if (j.MetaData.Id >= myJobs.Last().MetaData.Id && j.MetaData.Reference == 0) { j.MetaData.Reference = lastId; j.MetaData.Save(); } lastId = j.MetaData.Id; } Records records = new Records(config.datadir); foreach (Job j in myJobs) { if (j.MetaData.isFinished) { Report r = new Report(j); if (r.IsInteresting) r.SendTo(config.developers); records.Update(j); } else { try { string cluster = j.MetaData.Cluster; uint cluster_jid = j.MetaData.ClusterJobId; if (cluster != "" && cluster_jid != 0) { Scheduler scheduler = new Scheduler(); scheduler.Connect(cluster); ISchedulerJob job = scheduler.OpenJob(Convert.ToInt32(cluster_jid)); if (job.State == JobState.Canceled && job.ErrorMessage.StartsWith("Canceled by the scheduler")) { Global.Say("Requeing job #" + j.MetaData.Id + " after the scheduler canceled it (# requeues = " + job.RequeueCount + ")."); try { job.Requeue(); } catch (Exception ex) { Console.WriteLine("requeue-exception: " + ex.Message); } } } } catch (SchedulerException) { /* Ignore. */} } } records.Save(); } Aggregate(); } catch (Exception ex) { Global.Say("Caught exception: " + ex.Message); } }
public override void Download(SQLInterface sql) { throw new Exception("RecordJob does not support downloading from an SQL DB."); }