// method that runs continuously and purges data in repository public void Work() { List <int> keys; MetricGroup metricGroup; InstanceSchedule schedule; ArchiveWatermark archiveWatermark, archiveWatermarkNew; ArchiveOffset archiveOffset; _logger.Info("Archiver started"); _reposConn = new SqlConnection(Configuration.GetReposConnectionString("Archiver")); while (!_shouldStop) { try { // this is to wait until the repository is ready to serve requests if (!Manager.IsRepositoryAccessible) { Thread.Sleep(250); continue; } if (_reposConn.State != ConnectionState.Open) { _reposConn.Open(); } // go over TimeTable to get all target/metricGroup pairs keys = new List <int>(Configuration.archiveWatermarks.Keys); foreach (int key in keys) { if (_shouldStop) { break; } if (!Configuration.archiveWatermarks.TryGetValue(key, out archiveWatermark)) { continue; } if ( !Configuration.archiveOffsets.TryGetValue(archiveWatermark.ArchiveOffsetId, out archiveOffset)) { continue; } if (!Configuration.timeTable.TryGetValue(archiveOffset.ScheduleId, out schedule)) { continue; } metricGroup = Configuration.metricGroups[schedule._metricGroupId]; // Do not archive static and slow changing metrics if (metricGroup.changeSpeed != ChangeSpeed.Fast) { continue; } // Check whether there is enough time to archive data between watermark and offset // Watermark + OffsetInMinutes + IntervalInSeconds < Now if (archiveWatermark.ArchivedToDate .AddMinutes(archiveOffset.OffsetInMinutes) .AddSeconds(archiveOffset.IntervalInSeconds) .CompareTo(DateTime.Now) >= 0) { continue; } DateTime nextLevelArchiveToDateTime = Configuration.archiveWatermarks.GetNextLevelArchivedToDate(archiveWatermark.Id); DateTime archiveFromDateTime = nextLevelArchiveToDateTime > archiveWatermark.ArchivedToDate ? nextLevelArchiveToDateTime : archiveWatermark.ArchivedToDate; DateTime archiveToDateTime = DateTime.Now.AddMinutes(-1 * archiveOffset.OffsetInMinutes); _logger.Debug( "TargetId: [{0}] MetricGroup: [{1}] ArchiveOffset: {2} Interval: {3} From: {4} To: {5}", archiveWatermark.TargetId, metricGroup.name, archiveOffset.OffsetInMinutes, archiveOffset.IntervalInSeconds, archiveFromDateTime, archiveToDateTime ); // this is to wait until the repository is ready to serve requests if (!Manager.IsRepositoryAccessible) { Thread.Sleep(250); break; } if (_reposConn.State != ConnectionState.Open) { _reposConn.Open(); } if (!Archive(archiveWatermark.TargetId, archiveOffset, metricGroup, archiveFromDateTime, archiveToDateTime)) { _logger.Debug("TargetId: [{0}] MetricGroup: [{1}] Failed to archive data", archiveWatermark.TargetId, metricGroup.name); continue; } _logger.Debug("TargetId: [{0}] MetricGroup: [{1}] Archived successfully", archiveWatermark.TargetId, metricGroup.name); // Update in-memory version of ArchiveWatermark (ArchivedToDate value) archiveWatermarkNew = archiveWatermark; archiveWatermarkNew.ArchivedToDate = archiveToDateTime; Configuration.archiveWatermarks.TryUpdate(key, archiveWatermarkNew, archiveWatermark); } // foreach for (int i = 0; i < 100; i++) { if (_shouldStop) { break; } Thread.Sleep(250); } } catch (Exception e) { if (_reposConn != null) { switch (_reposConn.State) { case ConnectionState.Broken: case ConnectionState.Closed: Manager.SetRepositoryAccessibility(false); break; default: _logger.Error(e.Message); _logger.Error(e.StackTrace); _mgr.ReportFailure("Archiver"); return; } } else { _logger.Error(e.Message); _logger.Error(e.StackTrace); _mgr.ReportFailure("Archiver"); return; } } } // end of while (!_shouldStop) _logger.Info("Archiver stopped"); } // end of Work method
// method that runs continuously and purges data in repository public void Work() { int targetId; int timeTableId; string sqlStmt; MetricGroup metricGroup; InstanceSchedule schedule; List <Tuple <DateTime, int> > lastPurgeList; _logger.Info("Purger started"); _reposConn = new SqlConnection(Configuration.GetReposConnectionString("Purger")); while (!_shouldStop) { try { // this is to wait until the repository is ready to serve requests if (!Manager.IsRepositoryAccessible) { Thread.Sleep(250); continue; } if (this._reposConn.State != ConnectionState.Open) { this._reposConn.Open(); } lastPurgeList = Configuration.timeTable.LastPurge(); foreach (Tuple <DateTime, int> lastPurge in lastPurgeList) { timeTableId = lastPurge.Item2; // Skip schedules that have been deleted if (!Configuration.timeTable.TryGetValue(timeTableId, out schedule)) { continue; } // skip schedules that were purged recently if (DateTime.Compare(lastPurge.Item1.AddMinutes(Configuration.purgeInterval), DateTime.Now) > 0) { continue; } targetId = schedule._targetId; metricGroup = Configuration.metricGroups[schedule._metricGroupId]; // prepare SQL statement switch (metricGroup.changeSpeed) { case ChangeSpeed.Slow: sqlStmt = "DELETE FROM " + SqlServerProbe.DataTableName(targetId, metricGroup) + " WHERE endDate <= '" + SqlServerProbe.DateTimeToString(DateTime.Now.AddHours(-1 * schedule._schedule.retention)) + "'"; break; case ChangeSpeed.Fast: sqlStmt = "DELETE FROM " + SqlServerProbe.DataTableName(targetId, metricGroup) + " WHERE dt <= '" + SqlServerProbe.DateTimeToString(DateTime.Now.AddHours(-1 * schedule._schedule.retention)) + "'"; break; default: throw new Exception("Unsupported change speed"); } // execute SQL statement try { if (!Manager.IsRepositoryAccessible) { Thread.Sleep(250); break; } if (_reposConn.State != ConnectionState.Open) { _reposConn.Open(); } using (SqlCommand cmd = _reposConn.CreateCommand()) { cmd.CommandType = CommandType.Text; cmd.CommandText = sqlStmt; cmd.CommandTimeout = 300; int rowCount = cmd.ExecuteNonQuery(); _logger.Debug("Rows deleted from table " + SqlServerProbe.DataTableName(targetId, metricGroup) + ": " + rowCount.ToString()); } // update last purge time Configuration.timeTable.SetLastPurge(lastPurge.Item2, DateTime.Now); } catch (SqlException e) { if (e.Number == 208) { _logger.Debug("Table " + SqlServerProbe.DataTableName(targetId, metricGroup) + " does not exist"); } else if (this._reposConn.State != ConnectionState.Open) { Manager.SetRepositoryAccessibility(false); } else { _logger.Error("Could not purge " + SqlServerProbe.DataTableName(targetId, metricGroup) + " due to error: " + e.Message); } } catch (Exception e) { if (this._reposConn.State == ConnectionState.Open) { _logger.Error("Could not purge " + SqlServerProbe.DataTableName(targetId, metricGroup) + " due to error: " + e.Message); } else { Manager.SetRepositoryAccessibility(false); } continue; } Thread.Sleep(250); // we don't want to stress the repository too much } // foreach Thread.Sleep(250); } catch (Exception e) { if (this._reposConn != null) { switch (this._reposConn.State) { case System.Data.ConnectionState.Broken: case System.Data.ConnectionState.Closed: Manager.SetRepositoryAccessibility(false); break; default: _logger.Error(e.Message); _logger.Error(e.StackTrace); _mgr.ReportFailure("Purger"); return; } } else { _logger.Error(e.Message); _logger.Error(e.StackTrace); _mgr.ReportFailure("Purger"); return; } } // end of catch } // end of while (!_shouldStop) _logger.Info("Purger stopped"); }
} // end of Work method private bool Archive(int targetId, ArchiveOffset archiveOffset, MetricGroup metricGroup, DateTime archiveTo, DateTime archiveFrom) { // Do not archive static and slow changing metrics if (metricGroup.changeSpeed != ChangeSpeed.Fast) { return(false); } // Compose SQL statement // Save aggregated data in a temp table string sqlStmt = "SELECT " + RoundDate("dt", archiveOffset.IntervalInSeconds) + " as dt, " + Environment.NewLine; // add dictId if the metric group has multiple rows if (metricGroup.isMultiRow) { sqlStmt += "dictId, "; } // Add AVG(column names) foreach (var item in metricGroup.metrics) { sqlStmt += "AVG(" + item.Value.name.Replace(' ', '_') + ") as " + item.Value.name.Replace(' ', '_') + ", "; } sqlStmt = sqlStmt.Remove(sqlStmt.Length - 2) + Environment.NewLine; // remove last comma sqlStmt += "INTO #AVG_TMP_" + metricGroup.dataTableName + Environment.NewLine; sqlStmt += "FROM " + SqlServerProbe.DataTableName(targetId, metricGroup) + Environment.NewLine; sqlStmt += "WHERE dt BETWEEN @dateFrom AND @dateTo" + Environment.NewLine; sqlStmt += "GROUP BY " + RoundDate("dt", archiveOffset.IntervalInSeconds) + ", "; // add dictId if the metric group has multiple rows if (metricGroup.isMultiRow) { sqlStmt += "dictId, "; } sqlStmt = sqlStmt.Remove(sqlStmt.Length - 2) + ";" + Environment.NewLine + Environment.NewLine; // remove last comma // Delete aggregated records sqlStmt += "DELETE FROM " + SqlServerProbe.DataTableName(targetId, metricGroup) + " WHERE dt BETWEEN @dateFrom AND @dateTo;" + Environment.NewLine + Environment.NewLine; // Copy records from the temp table sqlStmt += "INSERT INTO " + SqlServerProbe.DataTableName(targetId, metricGroup) + " (dt, "; // add dictId if the metric group has multiple rows if (metricGroup.isMultiRow) { sqlStmt += "dictId, "; } // Add column names foreach (var item in metricGroup.metrics) { sqlStmt += item.Value.name.Replace(' ', '_') + ", "; } sqlStmt = sqlStmt.Remove(sqlStmt.Length - 2); // remove last comma sqlStmt += ")" + Environment.NewLine; sqlStmt += "SELECT dt, "; // add dictId if the metric group has multiple rows if (metricGroup.isMultiRow) { sqlStmt += "dictId, "; } // Add column names foreach (var item in metricGroup.metrics) { sqlStmt += item.Value.name.Replace(' ', '_') + ", "; } sqlStmt = sqlStmt.Remove(sqlStmt.Length - 2) + Environment.NewLine; // remove last comma sqlStmt += "FROM #AVG_TMP_" + metricGroup.dataTableName + Environment.NewLine + Environment.NewLine; // Update ArchivedToDate value sqlStmt += "UPDATE dbo.ArchiveWatermarks SET ArchivedToDate = @dateTo WHERE ArchiveOffsetId = @archiveOffsetId and TargetId = @targetId;"; _logger.Trace(sqlStmt); // Execute SQL statement SqlTransaction reposTran = null; SqlCommand reposCmd = null; try { if (_reposConn.State != ConnectionState.Open) { _reposConn.Open(); } reposTran = _reposConn.BeginTransaction(); reposCmd = _reposConn.CreateCommand(); reposCmd.Transaction = reposTran; reposCmd.CommandType = CommandType.Text; reposCmd.CommandText = sqlStmt; reposCmd.CommandTimeout = 300; reposCmd.Parameters.Add("@targetId", SqlDbType.Int); reposCmd.Parameters["@targetId"].Value = targetId; reposCmd.Parameters.Add("@archiveOffsetId", SqlDbType.Int); reposCmd.Parameters["@archiveOffsetId"].Value = archiveOffset.Id; reposCmd.Parameters.Add("@dateFrom", SqlDbType.DateTime2, 6); reposCmd.Parameters["@dateFrom"].Value = RoundDate(archiveFrom, archiveOffset.IntervalInSeconds); reposCmd.Parameters.Add("@dateTo", SqlDbType.DateTime2, 6); reposCmd.Parameters["@dateTo"].Value = archiveTo; reposCmd.Prepare(); reposCmd.ExecuteNonQuery(); reposTran.Commit(); } catch (SqlException e) { if (_reposConn.State != ConnectionState.Open) { Manager.SetRepositoryAccessibility(false); return(false); } switch (e.Number) { case 208: // Ignore missing tables. Target might be recently initialized break; default: _logger.Error("SqlException: {0} ErrorCode: {1}", e.Message, e.Number); break; } if (reposTran != null) { // Transaction might be rolled back if commit fails. In this case second rollback will fail try { reposTran.Rollback(); } catch (Exception) { _logger.Debug("Transaction has been rolled back already"); } } return(false); } catch (Exception e) { if (_reposConn.State == ConnectionState.Open) { _logger.Error(e.Message); _logger.Error(e.StackTrace); } else { Manager.SetRepositoryAccessibility(false); } return(false); } finally { if (reposCmd != null) { ((IDisposable)reposCmd).Dispose(); } if (reposTran != null) { ((IDisposable)reposTran).Dispose(); } } return(true); } // end of Archive method
/// <summary>Runs continuously and processes messages in the queue</summary> public void ProcessQueue() { ProbeResultsDataMessage msg; _logger.Info("Writer started"); this.reposConn = new SqlConnection(Configuration.GetReposConnectionString("Writer")); while (!_shouldStop) { try { // this is to wait until the repository is ready to serve requests while (!Manager.IsRepositoryAccessible) { Thread.Sleep(250); if (_shouldStop) { break; } } if (_shouldStop) { break; } if (this.reposConn.State != System.Data.ConnectionState.Open) { this.reposConn.Open(); } while (_dataQueue.TryDequeue(out msg)) { var data = msg.Data; var target = msg.Target; var metricGroup = msg.MetricGroup; _logger.Debug("{0} in queue. Date of data being processed: {1}", _dataQueue.Count, data.probeDateTime); if (metricGroup.isMultiRow) { WriteMultipleRowsToRepository(target.id, metricGroup, msg.Data); } else { switch (metricGroup.changeSpeed) { case ChangeSpeed.Slow: WriteSlowSingleRowToRepository(target.id, metricGroup, msg.Data); break; case ChangeSpeed.Fast: WriteFastSingleRowToRepository(target.id, metricGroup, msg.Data); break; default: throw new Exception("Unexpected change speed attribute. Metric: " + metricGroup.name + ". Target: " + target.name + "."); } } } Thread.Sleep(15000); } catch (Exception e) { if (this.reposConn != null) { switch (this.reposConn.State) { case System.Data.ConnectionState.Broken: case System.Data.ConnectionState.Closed: Manager.SetRepositoryAccessibility(false); break; default: _logger.Error(e.Message); _logger.Error(e.StackTrace); _mgr.ReportFailure("Writer"); return; } } else { _logger.Error(e.Message); _logger.Error(e.StackTrace); _mgr.ReportFailure("Writer"); return; } } // end of catch } // end of while(!_shouldStop) if (this.reposConn != null) { ((IDisposable)this.reposConn).Dispose(); } _logger.Info("Writer stopped"); } // end of ProcessQueue function