} // end of GenerateSlowSqlSingleRow function // Generates INSERT statement for a single data row - fast changing metric private static string GenerateSqlSingleRowFast(int targetId, MetricGroup metricGroup, ProbeResultingData data) { string dataSqlStmt = "INSERT INTO " + SqlServerProbe.DataTableName(targetId, metricGroup) + " (dt,"; for (int i = 0; i < metricGroup.NumberOfMetrics; i++) { dataSqlStmt += metricGroup.metrics[i].name.Replace(' ', '_') + ","; } dataSqlStmt = dataSqlStmt.Remove(dataSqlStmt.Length - 1); // remove last comma dataSqlStmt += ")" + Environment.NewLine + "VALUES ('" + SqlServerProbe.DateTimeToString(data.probeDateTime) + "',"; if (metricGroup.NumberOfMetrics != data.NumberOfColumns) { throw new Exception("Number of metrics don't match number of columns in probe results"); } // add metric values for (int i = 0; i < metricGroup.NumberOfMetrics; i++) { dataSqlStmt += SqlServerProbe.DataValueToString(metricGroup.metrics[i].type, data.values[0, metricGroup.NumberOfMultiRowKeys + metricGroup.NumberOfMultiRowKeyAttributes + i]) + ","; } dataSqlStmt = dataSqlStmt.Remove(dataSqlStmt.Length - 1); // remove last comma dataSqlStmt += ")"; return(dataSqlStmt); } // end of GenerateFastSqlSingleRow function
// Returns query that select the latest data record for single-row realtime metric private static string QueryToLoadSingleRowRealtime(int targetId, MetricGroup metricGroup) { string query = "SELECT TOP 1 "; for (int i = 0; i < metricGroup.NumberOfMetrics; i++) { query += metricGroup.metrics[i].name.Replace(' ', '_') + ", "; } query = query.Remove(query.Length - 2); // remove last comma query += " FROM " + SqlServerProbe.DataTableName(targetId, metricGroup) + " ORDER BY "; switch (metricGroup.changeSpeed) { case ChangeSpeed.Slow: query += "startDate"; break; case ChangeSpeed.Fast: query += "dt"; break; default: throw new Exception("Only Slow and Fast change speeds have been implemented so far"); } query += " DESC"; return(query); }
// returns a set of datetime-value tuples in a JSON string // param0: MetricGroupName - MetricGroup->Name // param1: MetricName - MetricGroup->Metrics->Name // param2: TargetId - Targets->Id // param3: StartDateTime - YYYYMMDDHHMM // param4: EndDateTime - YYYYMMDDHHMM // param5: Interval - M in minutes // test string ok http://localhost:3128/ws/fastsingle/Range/SQL%20Server%20Activity/CPU%20mils/0/201305201800/201305201900/5 // test string not ok http://localhost:3128/ws/fastsingle/Range/SQL%20Server%20Activity/CPU%20mils/0/201305201800/201305M900/5 private WebServiceResult Range(string[] parameters) { if (parameters.Count() < 6) { return(WebServiceResult.ReturnError(GetType().Name + ".Range(): not enough parameters. Format is MetricGroupName/MetricName/TargetId/StartDateTime/EndDateTime/IntervalInMinutes")); } Regex r = new Regex("^[0-9]*$"); // check that supplied target id is valid if (!r.IsMatch(parameters[2]) || !Configuration.targets.ContainsId(Convert.ToInt32(parameters[2]))) { return(WebServiceResult.ReturnError(GetType().Name + ".Range(): TargetId is either not numeric or doesn't exist")); } if (!r.IsMatch(parameters[3]) || !r.IsMatch(parameters[4]) || !r.IsMatch(parameters[5])) { return(WebServiceResult.ReturnError(GetType().Name + ".Range(): at least one of the parameters is not numeric. Format is TargetId/StartDateTime/EndDateTime/IntervalInMinutes")); } if (parameters[3].Length != 12 || parameters[4].Length != 12) { return(WebServiceResult.ReturnError(GetType().Name + ".Range(): StartDateTime and EndDateTime must be in YYYYMMDDHHMM format")); } // look up metric group by name int metricGroupId = Configuration.metricGroups[parameters[0]].id; // look up metric by name string columnName = parameters[1].Replace(' ', '_'); // prepare parameters SqlParameters sqlParameters = new SqlParameters { { "@tablename", SqlServerProbe.DataTableName(Convert.ToInt32(parameters[2]), Configuration.metricGroups[metricGroupId]) }, { "@columnname", columnName }, { "@start_dt", SqlServerProbe.FormatDate(parameters[3]) }, { "@end_dt", SqlServerProbe.FormatDate(parameters[4]) }, { "@interval", parameters[5] } }; // execute procedure and return results return(GetData(System.Data.CommandType.StoredProcedure, "dbo.GetRangeSingleRowFastCumulative", sqlParameters)); }
} // end of CreateCacheTableSingleRowRealtime /// <summary> Returns cache key name /// For single-row metric cache is common for all targets (table name only, no schema) /// Multi-row metrics each have its own cache </summary> /// <param name="targetId">target id or -1 for single row metrics</param> /// <param name="metricGroup">metric group</param> /// <param name="CacheType">data/dictionary</param> public static string GetCacheKey(int targetId, MetricGroup metricGroup, CacheType cacheType = CacheType.Data) { switch (cacheType) { case CacheType.Data: if (metricGroup.isMultiRow) { return(SqlServerProbe.DataTableName(targetId, metricGroup)); } else { return(metricGroup.dataTableName); } case CacheType.Dictionary: return(SqlServerProbe.DictTableName(targetId, metricGroup)); default: throw new Exception("Unsupported cache type"); } } // end of GetCacheKey method
} // end of ProcessQueue function #endregion public methods declarations #region private static methods declarations /// <summary>Generates UPDATE or INSERT statement for a single data row - slow changing metric</summary> private static string GenerateSqlSingleRowSlow(int targetId, MetricGroup metricGroup, int dataMatches, ProbeResultingData data) { string dataSqlStmt; if (dataMatches == 0) // just update endDate when current data matches one stored in repository { dataSqlStmt = "UPDATE " + SqlServerProbe.DataTableName(targetId, metricGroup) + " SET endDate = '" + SqlServerProbe.DateTimeToString(data.probeDateTime) + "'" + " WHERE startDate = (SELECT MAX(startDate) FROM " + SqlServerProbe.DataTableName(targetId, metricGroup) + ")"; } else { if (metricGroup.NumberOfMetrics != data.NumberOfColumns) { throw new Exception("Number of metrics doesn't match number of columns in probe results"); } dataSqlStmt = "INSERT INTO " + SqlServerProbe.DataTableName(targetId, metricGroup) + " ("; for (int i = 0; i < metricGroup.NumberOfMetrics; i++) { dataSqlStmt += metricGroup.metrics[i].name.Replace(' ', '_') + ","; } dataSqlStmt += "startDate,endDate)" + Environment.NewLine + "VALUES ("; // add metric values for (int i = 0; i < metricGroup.NumberOfMetrics; i++) { dataSqlStmt += SqlServerProbe.DataValueToString(metricGroup.metrics[i].type, data.values[0, metricGroup.NumberOfMultiRowKeys + metricGroup.NumberOfMultiRowKeyAttributes + i]) + ","; } // startDate,endDate dataSqlStmt += "'" + SqlServerProbe.DateTimeToString(data.probeDateTime) + "','" + SqlServerProbe.DateTimeToString(data.probeDateTime) + "')"; } return(dataSqlStmt); } // end of GenerateSlowSqlSingleRow function
} // end of Work method private bool Archive(int targetId, ArchiveOffset archiveOffset, MetricGroup metricGroup, DateTime archiveTo, DateTime archiveFrom) { // Do not archive static and slow changing metrics if (metricGroup.changeSpeed != ChangeSpeed.Fast) { return(false); } // Compose SQL statement // Save aggregated data in a temp table string sqlStmt = "SELECT " + RoundDate("dt", archiveOffset.IntervalInSeconds) + " as dt, " + Environment.NewLine; // add dictId if the metric group has multiple rows if (metricGroup.isMultiRow) { sqlStmt += "dictId, "; } // Add AVG(column names) foreach (var item in metricGroup.metrics) { sqlStmt += "AVG(" + item.Value.name.Replace(' ', '_') + ") as " + item.Value.name.Replace(' ', '_') + ", "; } sqlStmt = sqlStmt.Remove(sqlStmt.Length - 2) + Environment.NewLine; // remove last comma sqlStmt += "INTO #AVG_TMP_" + metricGroup.dataTableName + Environment.NewLine; sqlStmt += "FROM " + SqlServerProbe.DataTableName(targetId, metricGroup) + Environment.NewLine; sqlStmt += "WHERE dt BETWEEN @dateFrom AND @dateTo" + Environment.NewLine; sqlStmt += "GROUP BY " + RoundDate("dt", archiveOffset.IntervalInSeconds) + ", "; // add dictId if the metric group has multiple rows if (metricGroup.isMultiRow) { sqlStmt += "dictId, "; } sqlStmt = sqlStmt.Remove(sqlStmt.Length - 2) + ";" + Environment.NewLine + Environment.NewLine; // remove last comma // Delete aggregated records sqlStmt += "DELETE FROM " + SqlServerProbe.DataTableName(targetId, metricGroup) + " WHERE dt BETWEEN @dateFrom AND @dateTo;" + Environment.NewLine + Environment.NewLine; // Copy records from the temp table sqlStmt += "INSERT INTO " + SqlServerProbe.DataTableName(targetId, metricGroup) + " (dt, "; // add dictId if the metric group has multiple rows if (metricGroup.isMultiRow) { sqlStmt += "dictId, "; } // Add column names foreach (var item in metricGroup.metrics) { sqlStmt += item.Value.name.Replace(' ', '_') + ", "; } sqlStmt = sqlStmt.Remove(sqlStmt.Length - 2); // remove last comma sqlStmt += ")" + Environment.NewLine; sqlStmt += "SELECT dt, "; // add dictId if the metric group has multiple rows if (metricGroup.isMultiRow) { sqlStmt += "dictId, "; } // Add column names foreach (var item in metricGroup.metrics) { sqlStmt += item.Value.name.Replace(' ', '_') + ", "; } sqlStmt = sqlStmt.Remove(sqlStmt.Length - 2) + Environment.NewLine; // remove last comma sqlStmt += "FROM #AVG_TMP_" + metricGroup.dataTableName + Environment.NewLine + Environment.NewLine; // Update ArchivedToDate value sqlStmt += "UPDATE dbo.ArchiveWatermarks SET ArchivedToDate = @dateTo WHERE ArchiveOffsetId = @archiveOffsetId and TargetId = @targetId;"; _logger.Trace(sqlStmt); // Execute SQL statement SqlTransaction reposTran = null; SqlCommand reposCmd = null; try { if (_reposConn.State != ConnectionState.Open) { _reposConn.Open(); } reposTran = _reposConn.BeginTransaction(); reposCmd = _reposConn.CreateCommand(); reposCmd.Transaction = reposTran; reposCmd.CommandType = CommandType.Text; reposCmd.CommandText = sqlStmt; reposCmd.CommandTimeout = 300; reposCmd.Parameters.Add("@targetId", SqlDbType.Int); reposCmd.Parameters["@targetId"].Value = targetId; reposCmd.Parameters.Add("@archiveOffsetId", SqlDbType.Int); reposCmd.Parameters["@archiveOffsetId"].Value = archiveOffset.Id; reposCmd.Parameters.Add("@dateFrom", SqlDbType.DateTime2, 6); reposCmd.Parameters["@dateFrom"].Value = RoundDate(archiveFrom, archiveOffset.IntervalInSeconds); reposCmd.Parameters.Add("@dateTo", SqlDbType.DateTime2, 6); reposCmd.Parameters["@dateTo"].Value = archiveTo; reposCmd.Prepare(); reposCmd.ExecuteNonQuery(); reposTran.Commit(); } catch (SqlException e) { if (_reposConn.State != ConnectionState.Open) { Manager.SetRepositoryAccessibility(false); return(false); } switch (e.Number) { case 208: // Ignore missing tables. Target might be recently initialized break; default: _logger.Error("SqlException: {0} ErrorCode: {1}", e.Message, e.Number); break; } if (reposTran != null) { // Transaction might be rolled back if commit fails. In this case second rollback will fail try { reposTran.Rollback(); } catch (Exception) { _logger.Debug("Transaction has been rolled back already"); } } return(false); } catch (Exception e) { if (_reposConn.State == ConnectionState.Open) { _logger.Error(e.Message); _logger.Error(e.StackTrace); } else { Manager.SetRepositoryAccessibility(false); } return(false); } finally { if (reposCmd != null) { ((IDisposable)reposCmd).Dispose(); } if (reposTran != null) { ((IDisposable)reposTran).Dispose(); } } return(true); } // end of Archive method
} // end of LoadDictionaryIntoCache function // Loads data from repository into in-memory cache. Creates a new record in dictionaryCache public static void LoadDataIntoCache(int targetId, MetricGroup metricGroup, bool allowReload, SqlConnection connection = null, SqlTransaction transaction = null) { string cacheKey; SqlConnection conn; if (metricGroup.changeSpeed != ChangeSpeed.Fast) { throw new Exception("Only fast changing metric is allowed"); } SqlCommand cmd = null; SqlDataReader dataReader = null; // create new in-memory cache for dictionary if (!ContainsKey(GetCacheKey(targetId, metricGroup, CacheType.Data))) { Dictionary <int, Column> valueColumns = new Dictionary <int, Column>(); // value/metric columns for (int i = 0; i < metricGroup.NumberOfMetrics; i++) { valueColumns.Add(i, metricGroup.metrics[i]); } TryAdd(GetCacheKey(targetId, metricGroup, CacheType.Data), new CacheTable(new Dictionary <int, Column>(), valueColumns, metricGroup.isCumulative)); } cacheKey = GetCacheKey(targetId, metricGroup, CacheType.Data); CacheTable tmpCache = _cache[cacheKey].CloneAndClear(); // don't reload cache unless allowReload is specified if (allowReload == false && tmpCache.loadedFromDatabase) { return; } try { string sqlStmt = "SELECT dictId, "; for (int i = 0; i < metricGroup.NumberOfMetrics; i++) { sqlStmt += metricGroup.metrics[i].name.Replace(' ', '_') + ", "; } sqlStmt = sqlStmt.Remove(sqlStmt.Length - 2); // remove last comma sqlStmt += Environment.NewLine + "FROM " + SqlServerProbe.DataTableName(targetId, metricGroup) + Environment.NewLine + "WHERE dt = (SELECT MAX(dt) FROM " + SqlServerProbe.DataTableName(targetId, metricGroup) + ")"; _logger.Trace(sqlStmt); if (connection == null) { conn = new SqlConnection(Configuration.GetReposConnectionString("Cache")); conn.Open(); } else { conn = connection; } int attempt = 1; bool canTry = true; while (attempt < 3 && canTry) { cmd = conn.CreateCommand(); cmd.CommandText = sqlStmt; cmd.CommandType = System.Data.CommandType.Text; if (transaction != null) { cmd.Transaction = transaction; } try { dataReader = cmd.ExecuteReader(); int id; object[] values = new object[metricGroup.NumberOfMetrics]; while (dataReader.Read()) { id = (int)dataReader["dictId"]; for (int i = 0; i < metricGroup.NumberOfMetrics; i++) { values[i] = dataReader[metricGroup.metrics[i].name.Replace(' ', '_')]; } tmpCache.Add(id, new object[0], values); } dataReader.Close(); tmpCache.loadedFromDatabase = true; Replace(cacheKey, tmpCache); } catch (SqlException e) { if (transaction != null) { transaction.Rollback(); } switch (e.Number) { case 208: // Invalid object // Do not create tables if target has been deleted if (!Configuration.targets.ContainsKey(targetId)) { return; } SqlServerProbe.CreateTablesForMetricGroup(targetId, metricGroup); break; default: _logger.Error("SqlException: " + e.Message + " ErrorCode: " + e.Number.ToString()); canTry = false; break; } } finally { if (dataReader != null) { ((IDisposable)dataReader).Dispose(); } if (cmd != null) { ((IDisposable)cmd).Dispose(); } } attempt++; } if (connection == null) { conn.Close(); } } catch (Exception e) { _logger.Error("SqlException: " + e.Message); } } // end of LoadDataIntoCache function
// method that runs continuously and purges data in repository public void Work() { int targetId; int timeTableId; string sqlStmt; MetricGroup metricGroup; InstanceSchedule schedule; List <Tuple <DateTime, int> > lastPurgeList; _logger.Info("Purger started"); _reposConn = new SqlConnection(Configuration.GetReposConnectionString("Purger")); while (!_shouldStop) { try { // this is to wait until the repository is ready to serve requests if (!Manager.IsRepositoryAccessible) { Thread.Sleep(250); continue; } if (this._reposConn.State != ConnectionState.Open) { this._reposConn.Open(); } lastPurgeList = Configuration.timeTable.LastPurge(); foreach (Tuple <DateTime, int> lastPurge in lastPurgeList) { timeTableId = lastPurge.Item2; // Skip schedules that have been deleted if (!Configuration.timeTable.TryGetValue(timeTableId, out schedule)) { continue; } // skip schedules that were purged recently if (DateTime.Compare(lastPurge.Item1.AddMinutes(Configuration.purgeInterval), DateTime.Now) > 0) { continue; } targetId = schedule._targetId; metricGroup = Configuration.metricGroups[schedule._metricGroupId]; // prepare SQL statement switch (metricGroup.changeSpeed) { case ChangeSpeed.Slow: sqlStmt = "DELETE FROM " + SqlServerProbe.DataTableName(targetId, metricGroup) + " WHERE endDate <= '" + SqlServerProbe.DateTimeToString(DateTime.Now.AddHours(-1 * schedule._schedule.retention)) + "'"; break; case ChangeSpeed.Fast: sqlStmt = "DELETE FROM " + SqlServerProbe.DataTableName(targetId, metricGroup) + " WHERE dt <= '" + SqlServerProbe.DateTimeToString(DateTime.Now.AddHours(-1 * schedule._schedule.retention)) + "'"; break; default: throw new Exception("Unsupported change speed"); } // execute SQL statement try { if (!Manager.IsRepositoryAccessible) { Thread.Sleep(250); break; } if (_reposConn.State != ConnectionState.Open) { _reposConn.Open(); } using (SqlCommand cmd = _reposConn.CreateCommand()) { cmd.CommandType = CommandType.Text; cmd.CommandText = sqlStmt; cmd.CommandTimeout = 300; int rowCount = cmd.ExecuteNonQuery(); _logger.Debug("Rows deleted from table " + SqlServerProbe.DataTableName(targetId, metricGroup) + ": " + rowCount.ToString()); } // update last purge time Configuration.timeTable.SetLastPurge(lastPurge.Item2, DateTime.Now); } catch (SqlException e) { if (e.Number == 208) { _logger.Debug("Table " + SqlServerProbe.DataTableName(targetId, metricGroup) + " does not exist"); } else if (this._reposConn.State != ConnectionState.Open) { Manager.SetRepositoryAccessibility(false); } else { _logger.Error("Could not purge " + SqlServerProbe.DataTableName(targetId, metricGroup) + " due to error: " + e.Message); } } catch (Exception e) { if (this._reposConn.State == ConnectionState.Open) { _logger.Error("Could not purge " + SqlServerProbe.DataTableName(targetId, metricGroup) + " due to error: " + e.Message); } else { Manager.SetRepositoryAccessibility(false); } continue; } Thread.Sleep(250); // we don't want to stress the repository too much } // foreach Thread.Sleep(250); } catch (Exception e) { if (this._reposConn != null) { switch (this._reposConn.State) { case System.Data.ConnectionState.Broken: case System.Data.ConnectionState.Closed: Manager.SetRepositoryAccessibility(false); break; default: _logger.Error(e.Message); _logger.Error(e.StackTrace); _mgr.ReportFailure("Purger"); return; } } else { _logger.Error(e.Message); _logger.Error(e.StackTrace); _mgr.ReportFailure("Purger"); return; } } // end of catch } // end of while (!_shouldStop) _logger.Info("Purger stopped"); }
/// <summary>Returns true if new data matches in-memory copy or no history is found - single row - slow changing</summary> private int CompareSlowSingleRowWithInMemoryData(int targetId, MetricGroup metricGroup, ProbeResultingData data, SqlConnection connection) { bool noHistory = false; CacheTable dataCache; // create in-memory cache table if it doesn't exist if (!InMemoryCache.ContainsKey(InMemoryCache.GetCacheKey(-1, metricGroup))) { // Do not create tables if target has been deleted if (!Configuration.targets.ContainsKey(targetId)) { return(-1); } InMemoryCache.CreateCacheTableSingleRow(metricGroup, CacheType.Data); } dataCache = Configuration.inMemoryCache[InMemoryCache.GetCacheKey(-1, metricGroup, CacheType.Data)]; // load latest row from the repository if it is not in-memory yet int id = dataCache.GetIdByKey(new object[] { targetId }); if (id == -1) { string sqlStmt = "SELECT "; for (int i = 0; i < metricGroup.NumberOfMetrics; i++) { sqlStmt += metricGroup.metrics[i].name.Replace(' ', '_') + ","; } sqlStmt = sqlStmt.Remove(sqlStmt.Length - 1); // remove last comma sqlStmt += " FROM " + SqlServerProbe.DataTableName(targetId, metricGroup); sqlStmt += " WHERE startDate = (SELECT MAX(startDate) FROM " + SqlServerProbe.DataTableName(targetId, metricGroup) + ")"; using (SqlCommand cmd = connection.CreateCommand()) { cmd.CommandType = System.Data.CommandType.Text; cmd.CommandText = sqlStmt; try { SqlDataReader dataReader = cmd.ExecuteReader(); if (dataReader.Read()) { object[] oldValues = new object[metricGroup.NumberOfMetrics]; for (int i = 0; i < metricGroup.NumberOfMetrics; i++) { // check data type before casting switch (metricGroup.metrics[i].type) { case DataType.Ansi: if (!DataTypeMappingSqlServer.DoesBelong(dataReader.GetDataTypeName(i), DataType.Ansi)) { throw new Exception("Data type of column #" + (i + 1).ToString() + " of '" + metricGroup.name + "' metric does not match any allowed data type for internal data type Ansi"); } oldValues[i] = (object)dataReader.GetString(i); break; case DataType.Unicode: if (!DataTypeMappingSqlServer.DoesBelong(dataReader.GetDataTypeName(i), DataType.Unicode)) { throw new Exception("Data type of column #" + (i + 1).ToString() + " of '" + metricGroup.name + "' metric does not match any allowed data type for internal data type Unicode"); } oldValues[i] = (object)dataReader.GetString(i); break; case DataType.Double: if (!DataTypeMappingSqlServer.DoesBelong(dataReader.GetDataTypeName(i), DataType.Double)) { throw new Exception("Data type of column #" + (i + 1).ToString() + " of '" + metricGroup.name + "' metric does not match any allowed data type for internal data type Double"); } oldValues[i] = (object)dataReader.GetDouble(i); break; case DataType.SmallInt: if (!DataTypeMappingSqlServer.DoesBelong(dataReader.GetDataTypeName(i), DataType.SmallInt)) { throw new Exception("Data type of column #" + (i + 1).ToString() + " of '" + metricGroup.name + "' metric does not match any allowed data type for internal data type Int16"); } oldValues[i] = (object)dataReader.GetInt16(i); break; case DataType.Datetime: if (!DataTypeMappingSqlServer.DoesBelong(dataReader.GetDataTypeName(i), DataType.Datetime)) { throw new Exception("Data type of column #" + (i + 1).ToString() + " of '" + metricGroup.name + "' metric does not match any allowed data type for internal data type Datetime"); } oldValues[i] = (object)dataReader.GetDateTime(i); break; default: throw new Exception("Unknown data type"); } // end of switch i++; } id = dataCache.Add(-1, new object[] { targetId }, oldValues); } else { noHistory = true; } dataReader.Close(); dataReader.Dispose(); } catch (SqlException e) { if (e.Number == 208) // Invalid object { // Do not create tables if target has been deleted if (!Configuration.targets.ContainsKey(targetId)) { return(-1); } SqlServerProbe.CreateTablesForMetricGroup(targetId, metricGroup); noHistory = true; } else { throw; } } } if (noHistory) { return(-1); } } // compare old and new values object[] newValues = new object[metricGroup.NumberOfMetrics]; for (int i = 0; i < metricGroup.NumberOfMetrics; i++) { newValues[i] = data.values[0, i]; } if (dataCache.CompareAttributesForKey(id, newValues)) { return(0); } else { return(1); } }
} // end of WriteFastSingleRowToRepository function /// <summary>Saves data into dictionary and data table for multi-value metrics</summary> private void WriteMultipleRowsToRepository(int targetId, MetricGroup metricGroup, ProbeResultingData data) { int id; CacheTable dictCache, dataCache; List <int> newDictRows; List <Tuple <int, int> > oldDictRows; object[] key, attributes; string dataTableName, dictTableName; byte tryCount = 0; bool canExit = false; SqlTransaction tran = null; string dictSqlStmt = string.Empty; string dataSqlStmt = string.Empty; newDictRows = new List <int>(); // ids of records that should be added to the dictionary (new rows or rows with updated attributes) oldDictRows = new List <Tuple <int, int> >(); // ids and dictionary ids of records that changed since last probe and need to be closed dataTableName = SqlServerProbe.DataTableName(targetId, metricGroup); _logger.Debug("Name of data table: " + dataTableName); dictTableName = SqlServerProbe.DictTableName(targetId, metricGroup); _logger.Debug("Name of dictionary: " + dictTableName); // load the dictionary cache table if it doesn't exist if (!InMemoryCache.ContainsKey(dictTableName)) { InMemoryCache.LoadDictionaryIntoCache(targetId, metricGroup, false); } dictCache = Configuration.inMemoryCache[dictTableName]; // load the dictionary cache table if it doesn't exist if (!InMemoryCache.ContainsKey(dataTableName)) { InMemoryCache.LoadDataIntoCache(targetId, metricGroup, false); } /* * Checks for changed or new records in dictionary and if needed prepares SQL statement to update dictionary table */ switch (metricGroup.multiRowKeyAttributesChangeSpeed) { case ChangeSpeed.Static: // check whether all records are in the dictionary or some need to be added to it for (int i = 0; i < data.NumberOfRows; i++) { key = new object[metricGroup.NumberOfMultiRowKeys]; for (int j = 0; j < metricGroup.NumberOfMultiRowKeys; j++) { key[j] = data.values[i, j]; } if (dictCache.GetIdByKey(key) == -1) { newDictRows.Add(i); } } // generate SQL statements if there are any new dictionary records if (newDictRows.Count > 0) { dictSqlStmt = GenerateSqlStaticDict(targetId, metricGroup, data, newDictRows); } break; case ChangeSpeed.Slow: // check whether all records are in the dictionary or some need to be added to it for (int i = 0; i < data.NumberOfRows; i++) { key = new object[metricGroup.NumberOfMultiRowKeys]; for (int j = 0; j < metricGroup.NumberOfMultiRowKeys; j++) { key[j] = data.values[i, j]; } id = dictCache.GetIdByKey(key); if (id == -1) { newDictRows.Add(i); } else // check that attributes match { attributes = new object[metricGroup.NumberOfMultiRowKeyAttributes]; for (int j = 0; j < metricGroup.NumberOfMultiRowKeyAttributes; j++) { attributes[j] = data.values[i, metricGroup.NumberOfMultiRowKeys + j]; } if (!dictCache.CompareAttributesForKey(id, attributes)) { oldDictRows.Add(new Tuple <int, int>(i, id)); // this is to close the old record - UPDATE } } } // generate SQL statements if there are any changes or new records in dictionary if (oldDictRows.Count > 0 || newDictRows.Count > 0) { dictSqlStmt = GenerateSqlSlowDict(targetId, metricGroup, data, oldDictRows, newDictRows); } break; default: throw new Exception("Unknown dictionary change speed"); } /* * Write new data into dictionary but don't close transaction yet */ if (dictSqlStmt.CompareTo(string.Empty) != 0) { _logger.Trace(dictSqlStmt); // If tables don't exist, will try to create them and rerun SQL statements while (!canExit && tryCount < 2) { try { // we will write to the dictionary first and then to the data table so we need to begin a transaction tran = this.reposConn.BeginTransaction(); if (dictSqlStmt.CompareTo(string.Empty) != 0) { // save dictionary changes using (SqlCommand cmd = this.reposConn.CreateCommand()) { cmd.Transaction = tran; cmd.CommandType = System.Data.CommandType.Text; cmd.CommandText = dictSqlStmt; int rowCount = cmd.ExecuteNonQuery(); _logger.Debug("Rows affected: " + rowCount.ToString()); } } InMemoryCache.LoadDictionaryIntoCache(targetId, metricGroup, true, this.reposConn, tran); canExit = true; } catch (SqlException e) { if (tran != null) { tran.Rollback(); tran.Dispose(); tran = null; } switch (e.Number) { case 208: // Invalid object // Do not create tables if target has been deleted if (!Configuration.targets.ContainsKey(targetId)) { return; } SqlServerProbe.CreateTablesForMetricGroup(targetId, metricGroup); break; default: _logger.Error("SqlException: " + e.Message + " ErrorCode: " + e.Number.ToString()); break; } } tryCount++; } } /* * Prepare SQL statement to save data with right references to the dictionary records */ switch (metricGroup.changeSpeed) { case ChangeSpeed.Fast: dataSqlStmt = "INSERT INTO " + dataTableName + " (dt,dictId,"; for (int i = 0; i < metricGroup.NumberOfMetrics; i++) { dataSqlStmt += metricGroup.metrics[i].name.Replace(' ', '_') + ","; } dataSqlStmt = dataSqlStmt.Remove(dataSqlStmt.Length - 1); // remove last comma dataSqlStmt += ")" + Environment.NewLine + "VALUES"; for (int i = 0; i < data.NumberOfRows; i++) { dataSqlStmt += Environment.NewLine + "('" + SqlServerProbe.DateTimeToString(data.probeDateTime) + "',"; // retrieve corresponding id from dictionary key = new object[metricGroup.NumberOfMultiRowKeys]; for (int k = 0; k < metricGroup.NumberOfMultiRowKeys; k++) { key[k] = data.values[i, k]; } id = dictCache.GetIdByKey(key); dataSqlStmt += id.ToString() + ","; // add metric values for (int j = 0; j < metricGroup.NumberOfMetrics; j++) { dataSqlStmt += SqlServerProbe.DataValueToString(metricGroup.metrics[j].type, data.values[i, metricGroup.NumberOfMultiRowKeys + metricGroup.NumberOfMultiRowKeyAttributes + j]) + ","; } dataSqlStmt = dataSqlStmt.Remove(dataSqlStmt.Length - 1); // remove last comma dataSqlStmt += "),"; } dataSqlStmt = dataSqlStmt.Remove(dataSqlStmt.Length - 1); // remove last comma _logger.Trace(dataSqlStmt); break; default: throw new Exception("Unsupported data change speed"); } /* * Executes SQL statements * If tables don't exist, will try to create them and rerun SQL statements */ try { // save data using (SqlCommand cmd = this.reposConn.CreateCommand()) { if (tran != null) // use same transaction as for the dictionary { cmd.Transaction = tran; } cmd.CommandType = System.Data.CommandType.Text; cmd.CommandText = dataSqlStmt; int rowCount = cmd.ExecuteNonQuery(); _logger.Debug("Rows affected: " + rowCount.ToString()); } if (tran != null) { tran.Commit(); } InMemoryCache.LoadDictionaryIntoCache(targetId, metricGroup, true); dictCache = Configuration.inMemoryCache[dictTableName]; // Update in-memory data cache object[] newValues; dataCache = Configuration.inMemoryCache[dataTableName]; for (int i = 0; i < data.NumberOfRows; i++) { key = new object[metricGroup.NumberOfMultiRowKeys]; for (int j = 0; j < metricGroup.NumberOfMultiRowKeys; j++) { key[j] = data.values[i, j]; } id = dictCache.GetIdByKey(key); newValues = new object[metricGroup.NumberOfMetrics]; for (int j = 0; j < metricGroup.NumberOfMetrics; j++) { newValues[j] = data.values[i, metricGroup.NumberOfMultiRowKeys + metricGroup.NumberOfMultiRowKeyAttributes + j]; } dataCache.AddOrUpdateRowValues(id, new object[0], newValues); } canExit = true; } catch (SqlException e) { _logger.Error("SqlException: " + e.Message + " ErrorCode: " + e.Number.ToString()); if (tran != null) { tran.Rollback(); InMemoryCache.LoadDictionaryIntoCache(targetId, metricGroup, true); } } }
// returns a set of datetime-value tuples in a JSON string // param0: MetricGroupName - MetricGroup->Name // param1: TargetId - Targets->Id // param2: StartDateTime - YYYYMMDDHHMM // param3: EndDateTime - YYYYMMDDHHMM // param4: Interval - M in minutes // param5: MetricName - MetricGroup->metrics->name // param6: NumOfRowsToReturn - Number of records to return (TOP X) // param7: DictionaryKeyName - MetricGroup->multiRowKeys->name // param8: Optional. Dictionary keys to exclude // test string ok http://localhost:3128/ws/fastmulti/Range/SQL%20Server%20Wait%20Stats/0/201305201800/201305201900/5/Wait%20Time%20ms/5/Wait%20Type/ // test string not ok http://localhost:3128/ws/fastmulti/Range/SQL%20Server%20Wait%20Stats/0/201305201800/201305M900/5/Wait%20Time%20ms/10/Wait%20Type/ private WebServiceResult Range(string[] parameters) { if (parameters.Count() < 8) { return(WebServiceResult.ReturnError(GetType().Name + ".Range(): too few parameters. Format is MetricGroupName/TargetId/StartDateTime/EndDateTime/IntervalInMinutes/MetricName/DictionaryKeyName/[DictionaryKeysToExclude]")); } if (parameters.Count() > 9) { return(WebServiceResult.ReturnError(GetType().Name + ".Range(): too many parameters. Format is MetricGroupName/TargetId/StartDateTime/EndDateTime/IntervalInMinutes/MetricName/numOfRowsToReturn/DictionaryKeyName/[DictionaryKeysToExclude]")); } Regex r = new Regex("^[0-9]*$"); // check that supplied target id is valid if (!r.IsMatch(parameters[1]) || !Configuration.targets.ContainsId(Convert.ToInt32(parameters[1]))) { return(WebServiceResult.ReturnError(GetType().Name + ".Range(): TargetId is either not numeric or target with specified id doesn't exist")); } if (!r.IsMatch(parameters[2]) || !r.IsMatch(parameters[3]) || !r.IsMatch(parameters[4])) { return(WebServiceResult.ReturnError(GetType().Name + ".Range(): TargetId or StartDateTime or EndDateTime or Interval is not numeric. Format is MetricGroupName/TargetId/StartDateTime/EndDateTime/IntervalInMinutes/MetricName/DictionaryKeyName/[DictionaryKeysToExclude]")); } if (parameters[2].Length != 12 || parameters[3].Length != 12) { return(WebServiceResult.ReturnError(GetType().Name + ".Range(): StartDateTime and EndDateTime must be in YYYYMMDDHHMM format")); } if (!r.IsMatch(parameters[6])) { return(WebServiceResult.ReturnError(GetType().Name + ".Range(): NumOfRowsToReturn is not numeric")); } // look up metric group by name var metricGroup = Configuration.metricGroups[parameters[0]]; string metricColumn = parameters[5].Replace(' ', '_'); string numOfRowsToReturn = parameters[6]; string exclusionColumn = parameters[7].Replace(' ', '_'); string excludedValues = string.Empty; if (parameters.Count() == 9) { excludedValues = parameters[8]; } // prepare parameters SqlParameters sqlParameters = new SqlParameters { { "@dataTable", SqlServerProbe.DataTableName(Convert.ToInt32(parameters[1]), metricGroup) }, { "@dictionary", SqlServerProbe.DictTableName(Convert.ToInt32(parameters[1]), metricGroup) }, { "@start_dt", SqlServerProbe.FormatDate(parameters[2]) }, { "@end_dt", SqlServerProbe.FormatDate(parameters[3]) }, { "@interval", parameters[4] }, { "@metricColumn", metricColumn }, { "@numOfRowsToReturn", numOfRowsToReturn }, { "@exclusionColumn", exclusionColumn }, { "@excludedValues", excludedValues } }; // execute procedure and return results if (metricGroup.isCumulative) { return(GetData(System.Data.CommandType.StoredProcedure, "dbo.GetRangeMultiRowFastCumulative", sqlParameters)); } return(GetData(System.Data.CommandType.StoredProcedure, "dbo.GetRangeMultiRowFastNonCumulative", sqlParameters)); }