public void Save() { if (_modified) { using (FileStream s = new FileStream(_filename, FileMode.Create)) using (StreamWriter w = new StreamWriter(s)) foreach (KeyValuePair <string, Record> kvp in _records) { w.WriteLine("\"" + CSVData.Escape(kvp.Key) + "\"," + kvp.Value.Time.ToString() + "," + kvp.Value.JobId.ToString()); } SaveSummary(); _modified = false; } }
public void Dispose() { RebuildSummary(); _dataDir = null; _dataFilename = null; if (_data != null) { _data.Dispose(); } _data = null; _metaData = null; _summary = null; if (_cache != null) { _cache.Dispose(); } _cache = null; }
public void Dispose() { if (_needSummaryRebuild) { _summary.Rebuild(this); _needSummaryRebuild = false; } if (_data != null) { _data.Dispose(); } _data = null; _metaData = null; _summary = null; if (_cache != null) { _cache.Dispose(); } _cache = null; }
public Job(string dataDir, uint id = 0, bool readOnly = false) { if (!Directory.Exists(dataDir)) { throw new Exception("Data directory does not exist."); } if (id == 0) { throw new Exception("Job must have non-zero id."); } _readOnly = readOnly; _dataDir = dataDir; _metaData = new MetaData(dataDir, id); _summary = new Summary(dataDir, this); // if the summary is rebuilt, it will load all the data, // which we can forget for now. if (_data != null) { _data.Dispose(); _data = null; } _cache = new JobCache(dataDir, this); }
public void Download(SQLInterface sql) { // Global.Say("Downloading #" + _metaData.Id); Dictionary <string, Object> r = sql.Read("SELECT " + "ID,SubmissionTime,SharedDir,Binary,Parameters,Timeout,Memout," + "Cluster,ClusterJobId,Nodegroup,Locality,Longparams FROM Experiments " + "WHERE ID=" + _metaData.Id.ToString() + ";"); if (SQLInterface.getuint(ref r, "ID") != _metaData.Id) { throw new Exception("Job ID mismatch"); } _metaData.SubmissionTime = Convert.ToDateTime(r["SubmissionTime"], Global.culture); _metaData.BaseDirectory = r["SharedDir"].ToString(); _metaData.BinaryId = Convert.ToUInt32(r["Binary"]); if (r["Parameters"].Equals(DBNull.Value)) { _metaData.Parameters = r["Longparams"].ToString(); } else { _metaData.Parameters = r["Parameters"].ToString(); } _metaData.Timeout = Convert.ToUInt32(r["Timeout"]); _metaData.Memoryout = Convert.ToUInt32(r["Memout"]); _metaData.Cluster = r["Cluster"].ToString(); _metaData.ClusterJobId = SQLInterface.getuint(ref r, "ClusterJobId"); _metaData.Nodegroup = r["Nodegroup"].ToString(); _metaData.Locality = r["Locality"].ToString(); _metaData.isFinished = false; _metaData.Save(); r.Clear(); _data = new CSVData(_dataFilename); bool have_new_data = false; while (GetBatch(sql) > 0) { have_new_data = true; } if (have_new_data) { _summary.Rebuild(this); } string ids = _metaData.Id.ToString(); bool clusterDone = false; if (_metaData.Cluster != "" && _metaData.ClusterJobId != 0) { try { r = sql.Read("SELECT COUNT(1) FROM JobQueue WHERE ExperimentID=" + ids); if ((int)r.First().Value != 0) { clusterDone = false; } else { Scheduler scheduler = new Scheduler(); scheduler.Connect(_metaData.Cluster); ISchedulerJob job = scheduler.OpenJob((int)_metaData.ClusterJobId); JobState state = job.State; if (state == JobState.Finished || state == JobState.Finishing || state == JobState.Failed || state == JobState.Canceled || state == JobState.Canceling) { clusterDone = true; } } } catch (SchedulerException ex) { if (ex.Code == ErrorCode.Operation_InvalidJobId) { clusterDone = true; } else { Global.Say("Job considered not finished because the scheduler threw: " + ex.Message); } } catch { clusterDone = false; } } if (clusterDone) { // Delete the experiment only if the job on the cluster is done and there are no more jobs. Dictionary <string, object> q = sql.Read("(SELECT (SELECT COUNT(1) FROM Data WHERE ExperimentID=" + ids + ") + " + "(SELECT COUNT(1) FROM JobQueue WHERE ExperimentID=" + ids + "))"); if (q.Count > 0 && (int)q.First().Value == 0) { // Cluster is done & database is done. sql.Query("DELETE FROM Experiments WHERE ID=" + ids); _metaData.isFinished = true; _metaData.Save(); } } }