public void ProcessRequests() { _logger.Info("Web Server started"); try { _processRequestTasks = new List <Task>(); _ws = new WebService(_cfg); _logger.Debug("Starting listener"); _listener = StartListener(); while (!_shouldStop) { var contextTask = _listener.GetContextAsync(); while (!contextTask.IsCompleted) { if (_shouldStop) { return; } CollectFinishedTasks(); contextTask.Wait(50); } // Dispatch new processing task var task = Task.Factory.StartNew(() => ProcessRequest(contextTask.Result)); _processRequestTasks.Add(task); CollectFinishedTasks(); _logger.Debug("Number of running tasks {0}", _processRequestTasks.Count); } _listener.Stop(); _listener.Close(); } catch (Exception e) { _logger.Error(e.Message); _logger.Debug(e.StackTrace); if (_listener != null) { if (_listener.IsListening) { _listener.Stop(); } _listener.Close(); } _mgr.ReportFailure("WebServer"); } _logger.Info("Web Server stopped"); }
// method that runs continuously and processes messages in the queue public void ProcessQueue() { int count = 0; ProbeResultsDataMessage msg; try { _logger.Info("Analyzer started"); while (!_shouldStop) { if (_dataQueue.TryDequeue(out msg)) { // Skip empty results if (msg.Data.NumberOfRows == 0) { continue; } Writer.Enqueue(msg.Target, msg.MetricGroup, msg.Data); count++; _logger.Debug("{0} messages processed. {1} in queue. Date of data being processed: {2}", count, _dataQueue.Count, msg.Data.probeDateTime); } else { Thread.Sleep(50); } } _logger.Info("Analyzer stopped"); } catch (Exception e) { _logger.Error(e.Message); _logger.Error(e.StackTrace); mgr.ReportFailure("Analyzer"); } }
// method that runs continuously and purges data in repository public void Work() { List <int> keys; MetricGroup metricGroup; InstanceSchedule schedule; ArchiveWatermark archiveWatermark, archiveWatermarkNew; ArchiveOffset archiveOffset; _logger.Info("Archiver started"); _reposConn = new SqlConnection(Configuration.GetReposConnectionString("Archiver")); while (!_shouldStop) { try { // this is to wait until the repository is ready to serve requests if (!Manager.IsRepositoryAccessible) { Thread.Sleep(250); continue; } if (_reposConn.State != ConnectionState.Open) { _reposConn.Open(); } // go over TimeTable to get all target/metricGroup pairs keys = new List <int>(Configuration.archiveWatermarks.Keys); foreach (int key in keys) { if (_shouldStop) { break; } if (!Configuration.archiveWatermarks.TryGetValue(key, out archiveWatermark)) { continue; } if ( !Configuration.archiveOffsets.TryGetValue(archiveWatermark.ArchiveOffsetId, out archiveOffset)) { continue; } if (!Configuration.timeTable.TryGetValue(archiveOffset.ScheduleId, out schedule)) { continue; } metricGroup = Configuration.metricGroups[schedule._metricGroupId]; // Do not archive static and slow changing metrics if (metricGroup.changeSpeed != ChangeSpeed.Fast) { continue; } // Check whether there is enough time to archive data between watermark and offset // Watermark + OffsetInMinutes + IntervalInSeconds < Now if (archiveWatermark.ArchivedToDate .AddMinutes(archiveOffset.OffsetInMinutes) .AddSeconds(archiveOffset.IntervalInSeconds) .CompareTo(DateTime.Now) >= 0) { continue; } DateTime nextLevelArchiveToDateTime = Configuration.archiveWatermarks.GetNextLevelArchivedToDate(archiveWatermark.Id); DateTime archiveFromDateTime = nextLevelArchiveToDateTime > archiveWatermark.ArchivedToDate ? nextLevelArchiveToDateTime : archiveWatermark.ArchivedToDate; DateTime archiveToDateTime = DateTime.Now.AddMinutes(-1 * archiveOffset.OffsetInMinutes); _logger.Debug( "TargetId: [{0}] MetricGroup: [{1}] ArchiveOffset: {2} Interval: {3} From: {4} To: {5}", archiveWatermark.TargetId, metricGroup.name, archiveOffset.OffsetInMinutes, archiveOffset.IntervalInSeconds, archiveFromDateTime, archiveToDateTime ); // this is to wait until the repository is ready to serve requests if (!Manager.IsRepositoryAccessible) { Thread.Sleep(250); break; } if (_reposConn.State != ConnectionState.Open) { _reposConn.Open(); } if (!Archive(archiveWatermark.TargetId, archiveOffset, metricGroup, archiveFromDateTime, archiveToDateTime)) { _logger.Debug("TargetId: [{0}] MetricGroup: [{1}] Failed to archive data", archiveWatermark.TargetId, metricGroup.name); continue; } _logger.Debug("TargetId: [{0}] MetricGroup: [{1}] Archived successfully", archiveWatermark.TargetId, metricGroup.name); // Update in-memory version of ArchiveWatermark (ArchivedToDate value) archiveWatermarkNew = archiveWatermark; archiveWatermarkNew.ArchivedToDate = archiveToDateTime; Configuration.archiveWatermarks.TryUpdate(key, archiveWatermarkNew, archiveWatermark); } // foreach for (int i = 0; i < 100; i++) { if (_shouldStop) { break; } Thread.Sleep(250); } } catch (Exception e) { if (_reposConn != null) { switch (_reposConn.State) { case ConnectionState.Broken: case ConnectionState.Closed: Manager.SetRepositoryAccessibility(false); break; default: _logger.Error(e.Message); _logger.Error(e.StackTrace); _mgr.ReportFailure("Archiver"); return; } } else { _logger.Error(e.Message); _logger.Error(e.StackTrace); _mgr.ReportFailure("Archiver"); return; } } } // end of while (!_shouldStop) _logger.Info("Archiver stopped"); } // end of Work method
// method that runs continuously and starts tasks public void Launcher() { string probeCode; int timeTableId; object probeClassReference; ProbeDelegate probeDelegate; List <ActiveDelegate> activeDelegates; List <Tuple <DateTime, int> > nextRuns; try { activeDelegates = new List <ActiveDelegate>(); _logger.Info("Scheduler started"); while (!this._shouldStop) { // start up Probes nextRuns = Configuration.timeTable.NextRuns; if (nextRuns.Count > 0) { foreach (Tuple <DateTime, int> nextRun in nextRuns.ToList()) { // start up a new Probe if there is less than TimeDelta milliseconds to the start or start time has passed already // Use Ticks, otherwise new day values will get you into always false check (comparing late yesterday 23:59:59 with today) if (nextRun.Item1.Ticks < DateTime.Now.Ticks + TimeDelta * 10000) { timeTableId = nextRun.Item2; // Check whether schedule is still there if (!Configuration.timeTable.ContainsKey(timeTableId)) { continue; } var timeTable = Configuration.timeTable[timeTableId]; var target = Configuration.targets[timeTable._targetId]; var metricGroup = Configuration.metricGroups[timeTable._metricGroupId]; probeCode = metricGroup.probeCode; probeClassReference = Configuration.probes.GetClassReferenceByCode(probeCode); _logger.Debug("Delegate for schedule {0} is being started", timeTableId); Delegate d = probeClassReference.GetType().GetMethod("Probe").CreateDelegate(typeof(ProbeDelegate), probeClassReference); probeDelegate = (ProbeDelegate)d; IAsyncResult result = probeDelegate.BeginInvoke(timeTableId, target, metricGroup, null, null); activeDelegates.Add(new ActiveDelegate(timeTableId, probeDelegate, result)); Configuration.timeTable.RemoveNextRun(nextRun); _logger.Debug("{0} delegates are running", activeDelegates.Count); } } } // check for ended delegates foreach (ActiveDelegate ad in activeDelegates.ToList()) { if (ad.result.IsCompleted) { _logger.Debug("Delegate for schedule " + ad.timeTableId.ToString() + " finished execution"); try { ad.probe.EndInvoke(ad.result); } catch (Exception e) { _logger.Error("EndInvoke failed with error: " + e.Message); } activeDelegates.Remove(ad); if (Configuration.timeTable.ContainsKey(ad.timeTableId)) { if (Configuration.targets.ContainsKey(Configuration.timeTable[ad.timeTableId]._targetId)) { Configuration.timeTable.SetNextRun(ad.timeTableId); } else { // Remove all target schedules if target has been deleted Configuration.timeTable.RemoveTargetSchedules(Configuration.timeTable[ad.timeTableId]._targetId); } } else { // Remove Configuration.timeTable.Remove(ad.timeTableId); } } } Thread.Sleep(TimeDelta / 2); } _logger.Info("Scheduler stopped"); } catch (Exception e) { _logger.Error(e.Message); _logger.Error(e.StackTrace); _mgr.ReportFailure("Scheduler"); } } // end of Launcher method
// method that runs continuously and purges data in repository public void Work() { int targetId; int timeTableId; string sqlStmt; MetricGroup metricGroup; InstanceSchedule schedule; List <Tuple <DateTime, int> > lastPurgeList; _logger.Info("Purger started"); _reposConn = new SqlConnection(Configuration.GetReposConnectionString("Purger")); while (!_shouldStop) { try { // this is to wait until the repository is ready to serve requests if (!Manager.IsRepositoryAccessible) { Thread.Sleep(250); continue; } if (this._reposConn.State != ConnectionState.Open) { this._reposConn.Open(); } lastPurgeList = Configuration.timeTable.LastPurge(); foreach (Tuple <DateTime, int> lastPurge in lastPurgeList) { timeTableId = lastPurge.Item2; // Skip schedules that have been deleted if (!Configuration.timeTable.TryGetValue(timeTableId, out schedule)) { continue; } // skip schedules that were purged recently if (DateTime.Compare(lastPurge.Item1.AddMinutes(Configuration.purgeInterval), DateTime.Now) > 0) { continue; } targetId = schedule._targetId; metricGroup = Configuration.metricGroups[schedule._metricGroupId]; // prepare SQL statement switch (metricGroup.changeSpeed) { case ChangeSpeed.Slow: sqlStmt = "DELETE FROM " + SqlServerProbe.DataTableName(targetId, metricGroup) + " WHERE endDate <= '" + SqlServerProbe.DateTimeToString(DateTime.Now.AddHours(-1 * schedule._schedule.retention)) + "'"; break; case ChangeSpeed.Fast: sqlStmt = "DELETE FROM " + SqlServerProbe.DataTableName(targetId, metricGroup) + " WHERE dt <= '" + SqlServerProbe.DateTimeToString(DateTime.Now.AddHours(-1 * schedule._schedule.retention)) + "'"; break; default: throw new Exception("Unsupported change speed"); } // execute SQL statement try { if (!Manager.IsRepositoryAccessible) { Thread.Sleep(250); break; } if (_reposConn.State != ConnectionState.Open) { _reposConn.Open(); } using (SqlCommand cmd = _reposConn.CreateCommand()) { cmd.CommandType = CommandType.Text; cmd.CommandText = sqlStmt; cmd.CommandTimeout = 300; int rowCount = cmd.ExecuteNonQuery(); _logger.Debug("Rows deleted from table " + SqlServerProbe.DataTableName(targetId, metricGroup) + ": " + rowCount.ToString()); } // update last purge time Configuration.timeTable.SetLastPurge(lastPurge.Item2, DateTime.Now); } catch (SqlException e) { if (e.Number == 208) { _logger.Debug("Table " + SqlServerProbe.DataTableName(targetId, metricGroup) + " does not exist"); } else if (this._reposConn.State != ConnectionState.Open) { Manager.SetRepositoryAccessibility(false); } else { _logger.Error("Could not purge " + SqlServerProbe.DataTableName(targetId, metricGroup) + " due to error: " + e.Message); } } catch (Exception e) { if (this._reposConn.State == ConnectionState.Open) { _logger.Error("Could not purge " + SqlServerProbe.DataTableName(targetId, metricGroup) + " due to error: " + e.Message); } else { Manager.SetRepositoryAccessibility(false); } continue; } Thread.Sleep(250); // we don't want to stress the repository too much } // foreach Thread.Sleep(250); } catch (Exception e) { if (this._reposConn != null) { switch (this._reposConn.State) { case System.Data.ConnectionState.Broken: case System.Data.ConnectionState.Closed: Manager.SetRepositoryAccessibility(false); break; default: _logger.Error(e.Message); _logger.Error(e.StackTrace); _mgr.ReportFailure("Purger"); return; } } else { _logger.Error(e.Message); _logger.Error(e.StackTrace); _mgr.ReportFailure("Purger"); return; } } // end of catch } // end of while (!_shouldStop) _logger.Info("Purger stopped"); }
/// <summary>Runs continuously and processes messages in the queue</summary> public void ProcessQueue() { ProbeResultsDataMessage msg; _logger.Info("Writer started"); this.reposConn = new SqlConnection(Configuration.GetReposConnectionString("Writer")); while (!_shouldStop) { try { // this is to wait until the repository is ready to serve requests while (!Manager.IsRepositoryAccessible) { Thread.Sleep(250); if (_shouldStop) { break; } } if (_shouldStop) { break; } if (this.reposConn.State != System.Data.ConnectionState.Open) { this.reposConn.Open(); } while (_dataQueue.TryDequeue(out msg)) { var data = msg.Data; var target = msg.Target; var metricGroup = msg.MetricGroup; _logger.Debug("{0} in queue. Date of data being processed: {1}", _dataQueue.Count, data.probeDateTime); if (metricGroup.isMultiRow) { WriteMultipleRowsToRepository(target.id, metricGroup, msg.Data); } else { switch (metricGroup.changeSpeed) { case ChangeSpeed.Slow: WriteSlowSingleRowToRepository(target.id, metricGroup, msg.Data); break; case ChangeSpeed.Fast: WriteFastSingleRowToRepository(target.id, metricGroup, msg.Data); break; default: throw new Exception("Unexpected change speed attribute. Metric: " + metricGroup.name + ". Target: " + target.name + "."); } } } Thread.Sleep(15000); } catch (Exception e) { if (this.reposConn != null) { switch (this.reposConn.State) { case System.Data.ConnectionState.Broken: case System.Data.ConnectionState.Closed: Manager.SetRepositoryAccessibility(false); break; default: _logger.Error(e.Message); _logger.Error(e.StackTrace); _mgr.ReportFailure("Writer"); return; } } else { _logger.Error(e.Message); _logger.Error(e.StackTrace); _mgr.ReportFailure("Writer"); return; } } // end of catch } // end of while(!_shouldStop) if (this.reposConn != null) { ((IDisposable)this.reposConn).Dispose(); } _logger.Info("Writer stopped"); } // end of ProcessQueue function