public Tuple <List <SloRecord>, List <SloValidationException> > ExecuteQuery(string query) { var items = new List <SloRecord>(); var errors = new List <SloValidationException>(); var uniqueServiceIds = new SortedSet <string>(); // Append to the query the join to get the review data query += @"| project ServiceId, OrganizationName, ServiceGroupName, TeamGroupName, ServiceName, YamlValue, ServiceIdGuid = toguid(ServiceId) | join kind = leftouter SloDefinitionManualReview on $left.ServiceIdGuid == $right.ServiceId | sort by ReviewDate desc "; // "GetSloJsonActionItemReport() | where YamlValue contains ServiceId" using (var results = client_.ExecuteQuery(query)) { for (int i = 0; results.Read(); i++) { try { var result = ReadSingleResult((IDataRecord)results); // Only add the latest value if (!uniqueServiceIds.Contains(result.ServiceId)) { items.Add(result); uniqueServiceIds.Add(result.ServiceId); } } catch (SloValidationException ex) { errors.Add(ex); } catch (Exception ex) { Debug.WriteLine($"Schema violation: {ex.Message}"); } } } return(Tuple.Create(items, errors)); }
private void CheckUsability() { if (!_isUsable) { try { var resultTask = Task.Run(() => adx.ExecuteQuery(".show databases | count")); if (resultTask.Wait(30000)) { _isUsable = true; } } catch (Exception) { //do nothing } } }
public List <string> Query(string query) { Log.Info($"query:{query}", ConsoleColor.Blue); List <string> list = new List <string>(); using (ICslQueryProvider kustoQueryClient = KustoClientFactory.CreateCslQueryProvider(ManagementConnection)) { return(EnumerateResults(kustoQueryClient.ExecuteQuery(query))); } }
/// <summary> /// Queries Kuto for an internal alias from a given GitHub alias /// </summary> /// <param name="githubUserName">GitHub alias</param> /// <returns>Internal alias or null if no internal user found</returns> public async Task <string> GetInternalUserPrincipal(string githubUserName) { var query = $"{kustoTable} | where githubUserName == '{githubUserName}' | project aadUpn | limit 1;"; // TODO: Figure out how to make this async using (var reader = client.ExecuteQuery(query)) { if (reader.Read()) { return(reader.GetString(0)); } logger.LogWarning("Could Not Resolve GitHub User Username = {0}", githubUserName); return(default);
/// <summary> /// Executo a kisto query for a specific queue and parse its data /// </summary> protected override BuildQueueToMachineMapOutput Perform(BuildQueueToMachineMapInput input) { s_logger.Debug($"BuildQueueToMachineMap starts for queue {input.QueueName}"); var machinesWithFrequencies = new Dictionary <string, int>(); try { using (var reader = m_queryProvider.ExecuteQuery(m_query, null)) { // each line has a set of machines, which are comma separated while (reader.Read()) { // get their names var machines = reader.GetString(0).Split(','); foreach (var machine in machines) { var name = machine.Contains(":") ? machine.Substring(0, machine.IndexOf(":")) : machine; // and store their frequencies if (machinesWithFrequencies.ContainsKey(name)) { machinesWithFrequencies[name] += 1; } else { machinesWithFrequencies[name] = 1; } } } // now sort them var sortedByFrequency = machinesWithFrequencies.ToList(); sortedByFrequency.Sort((v1, v2) => { return(v1.Value > v2.Value ? -1 : (v1.Value < v2.Value ? 1 : 0)); }); // now, we can write it s_logger.Debug($"Saving to [{m_outputFile}]"); foreach (var entry in sortedByFrequency) { m_writer.WriteLine(string.Join(",", entry.Key, entry.Value)); } // and done... return(new BuildQueueToMachineMapOutput(sortedByFrequency)); } } finally { s_logger.Debug($"BuildQueueToMachineMap ends in {Stopwatch.ElapsedMilliseconds}ms"); } }
public void ExecuteQuery(String dbName, string query, String context, TelemetryClient telemetry) { var queryParameters = new Dictionary <String, String>() { //{ "xIntValue", "111" }, // { "xStrValue", "abc" }, // { "xDoubleValue", "11.1" } }; var clientRequestProperties = new Kusto.Data.Common.ClientRequestProperties( options: null, parameters: queryParameters); clientRequestProperties.ClientRequestId = "Benchmarkapp-" + Guid.NewGuid().ToString(); int results = 0; TelemetryHelper.TrackEvent(EXECUTIONS, context, telemetry); Stopwatch stopwatch = Stopwatch.StartNew(); try { var queryresult = adx.ExecuteQuery(dbName, query, clientRequestProperties); while (queryresult.Read()) { results++; } } catch (Exception ex) { Console.WriteLine( "Failed invoking query '{0}' against Kusto.", query); telemetry.TrackException(new ExceptionTelemetry(ex)); throw ex; } stopwatch.Stop(); TelemetryHelper.TrackMetric(RESULTCOUNT, context, telemetry, results); TelemetryHelper.TrackMetric(QUERYTIME, context, telemetry, Convert.ToDouble(stopwatch.ElapsedMilliseconds)); }
/// <summary> /// This is the action queries kusto for a set of builds (less than or equal to MaxBuilds * 5) /// We obtain more than one because we will retry all failed with new builds /// </summary> protected override GetKustoBuildOutput Perform(GetKustoBuildInput input) { s_logger.Debug($"GetKustoBuild starts"); try { var builds = new List <List <KustoBuild> >(); using (var reader = m_queryProvider.ExecuteQuery(m_query, null)) { // each line has a single build var pack = new List <KustoBuild>(); while (reader.Read()) { pack.Add( new KustoBuild() { BuildId = reader.GetString(0), LogDirectory = reader.GetString(1), StartTime = reader.GetDateTime(2).Ticks, BuildDurationMs = Convert.ToDouble(reader.GetInt64(3)), BuildControllerMachineName = reader.GetString(4), BuildQueue = reader.GetString(5) } ); if (pack.Count == s_maxRetryBuilds) { var values = new KustoBuild[pack.Count]; pack.CopyTo(values); builds.Add(new List <KustoBuild>(values)); pack = new List <KustoBuild>(); } } return(new GetKustoBuildOutput(builds)); } } finally { s_logger.Debug($"GetKustoBuild ends in {Stopwatch.ElapsedMilliseconds}ms"); } }
public List <object[]> Query(string table, ref string query, int icm, string timestampField = "TIMESTAMP", int limit = Constants.KustoClientQueryLimit) { if (timestampField != null) { // TODO : If ICM AnalyzerStartTimeField was changed, it might be newer than the ICM creation date DateTime startTime; if (!DateTime.TryParse(SALsA.GetInstance(icm).ICM.GetCustomField(Constants.AnalyzerStartTimeField), out startTime)) { startTime = SALsA.GetInstance(icm).ICM.CurrentICM.CreateDate.AddDays(-1); } string start = startTime.ToString("u"); //string end = SALsA.GetInstance(icm).ICM.CurrentICM.CreateDate.ToString("u"); query = String.Format("{0} | where {1} > datetime({2}) | {3} | limit {4}", table, timestampField, start, /*end,*/ query, limit); } else { query = String.Format("{0} | {1} | limit {2}", table, query, limit); } Log.Verbose("Sending {0} query : {1}", client.DefaultDatabaseName, query); var clientRequestProperties = new ClientRequestProperties() { ClientRequestId = Guid.NewGuid().ToString() }; using (var reader = client.ExecuteQuery(query, clientRequestProperties)) { DataTable dt = new DataTable(); dt.Load(reader); List <object[]> data = new List <object[]>(); data.Add(new object[dt.Columns.Count]); dt.Columns.CopyTo(data[0], 0); foreach (DataRow line in dt.Rows) { data.Add(new object[dt.Columns.Count]); line.ItemArray.CopyTo(data[data.Count - 1], 0); } return(data); } }
/// <summary> /// Download data for queues over the specified month /// </summary> protected override DownloadMonthlyQueueDataOutput Perform(DownloadMonthlyQueueDataInput input) { s_logger.Debug($"DownloadMonthlyQueueData starts"); var queues = new List <KustoQueueData>(); var minMax = new List <MinMaxPair>(); try { using (var reader = m_queryProvider.ExecuteQuery(m_query, null)) { // each line has a single queue while (reader.Read()) { var queueData = new KustoQueueData() { Stamp = reader.GetString(0), Architecture = reader.GetString(1), QueueName = reader.GetString(2) }; // initialize this here if (minMax.Count == 0) { for (var i = 0; i < reader.FieldCount - 3; ++i) { minMax.Add(new MinMaxPair()); } } // add the numbers for (var i = 3; i < reader.FieldCount; ++i) { try { queueData.Data.Add(Math.Round(reader.GetDouble(i), KustoQueueData.DefaultPrecision)); } #pragma warning disable ERP022 catch (Exception) { // if we could not get a double, we might have got NaN, which is not wrong queueData.Data.Add(0.0); } #pragma warning enable ERP022 // store the min and max var minMaxPos = i - 3; if (minMax[minMaxPos].Min > queueData.Data[minMaxPos]) { minMax[minMaxPos].Min = queueData.Data[minMaxPos]; } if (minMax[minMaxPos].Max < queueData.Data[minMaxPos]) { minMax[minMaxPos].Max = queueData.Data[minMaxPos]; } } // save it queues.Add(queueData); } // normalize s_logger.Debug($"Got {queues.Count} rows, normalizing"); foreach (var q in queues) { for (var i = 0; i < q.Data.Count; ++i) { if (minMax[i].Min != minMax[i].Max) { q.Data[i] = (q.Data[i] - minMax[i].Min) / (minMax[i].Max - minMax[i].Min); } else { q.Data[i] = q.Data[i] - minMax[i].Min; } } } // now, we can write it s_logger.Debug($"Saving to [{m_outputFile}]"); foreach (var q in queues) { m_writer.WriteLine(string.Join(",", q.Stamp, q.Architecture, q.QueueName, string.Join(",", q.Data))); } // and we can create the distance maps s_logger.Debug($"Creating distance maps in [{m_distanceMaps}]"); foreach (var q in queues) { var writer = new StreamWriter(Path.Combine(m_distanceMaps, q.QueueName)); // calculate var neighbors = q.ClosestNeighborsByEuclideanDistance(queues); // write to file foreach (var neighbor in neighbors) { writer.WriteLine(string.Join(",", neighbor.Name, neighbor.Distance)); } writer.Close(); } // and done... return(new DownloadMonthlyQueueDataOutput(queues)); } } finally { s_logger.Debug($"DownloadMonthlyQueueData ends in {Stopwatch.ElapsedMilliseconds}ms"); } }
private static void AddLineageInformation(ICslQueryProvider adx, Lineage lineage) { var databases = adx.ExecuteQuery <String>(".show databases | project DatabaseName"); List <Task <IDataReader> > allTablesQueries = new List <Task <IDataReader> >(); List <Task <IDataReader> > externalTableQueries = new List <Task <IDataReader> >(); List <Task <IDataReader> > continousExportQueries = new List <Task <IDataReader> >(); //Dictionary<string, Task<IDataReader>> allTables = new Dictionary<string, Task<IDataReader>>(); #region get Tables foreach (var aDatabase in databases) { #region internal tables var tableTask = adx.ExecuteQueryAsync(aDatabase, ".show tables | project TableName, Database=current_database()", CreateRequestProperties()); allTablesQueries.Add(tableTask); //allTables.Add(aDatabase, tableTask); #endregion #region external tables externalTableQueries.Add(adx.ExecuteQueryAsync(aDatabase, @".show external tables | project TableName, Database=current_database()", CreateRequestProperties())); continousExportQueries.Add(adx.ExecuteQueryAsync(aDatabase, @".show continuous-exports | project Name, ExternalTableName, Query, CursorScopedTables=todynamic(CursorScopedTables), Database=current_database() | mv-expand CursorScopedTables to typeof(string)", CreateRequestProperties())); #endregion } Task.WaitAll(allTablesQueries.ToArray()); #endregion List <Task <IDataReader> > updatePolicyQueries = new List <Task <IDataReader> >(); List <Task <IDataReader> > tableDetailsQueries = new List <Task <IDataReader> >(); #region get update policies foreach (var aTableTask in allTablesQueries) { IDataReader tableResult = aTableTask.Result; while (tableResult.Read()) { var tableName = tableResult.GetString(0); var databaseName = tableResult.GetString(1); lineage.AddTable(databaseName, tableName); //get update policy updatePolicyQueries.Add(adx.ExecuteQueryAsync(databaseName, @".show table " + tableName + @" policy update | mv-expand Policy=todynamic(Policy) | project EntityName, Policy", CreateRequestProperties())); //get table details tableDetailsQueries.Add(adx.ExecuteQueryAsync(databaseName, @".show table " + tableName + @" details | project-away *Policy, AuthorizedPrincipals", CreateRequestProperties())); } } Task.WaitAll(updatePolicyQueries.ToArray()); foreach (var aTask in updatePolicyQueries) { IDataReader updatePolicyResult = aTask.Result; while (updatePolicyResult.Read()) { var policy = JsonConvert.DeserializeObject <UpdatePolicy>(updatePolicyResult["Policy"].ToString()); var Entity = updatePolicyResult.GetString(0); var entitySplit = Entity.Split("].["); var database = entitySplit[0].Replace("[", ""); var table = entitySplit[1].Replace("]", ""); lineage.AddUpdatePolicy(database, table, policy); } } #endregion #region table details Task.WaitAll(tableDetailsQueries.ToArray()); foreach (var aTask in tableDetailsQueries) { IDataReader tableDetailResult = aTask.Result; while (tableDetailResult.Read()) { var database = tableDetailResult.GetString(1); var table = tableDetailResult.GetString(0); var rowCount = tableDetailResult.GetInt64(7); lineage.Databases[database].Tables[table].RowCount = rowCount; } } #endregion #region external tables Task.WaitAll(externalTableQueries.ToArray()); foreach (var aTask in externalTableQueries) { IDataReader externalTableResult = aTask.Result; while (externalTableResult.Read()) { var tableName = externalTableResult.GetString(0); var databaseName = externalTableResult.GetString(1); lineage.AddExternalTable(databaseName, tableName); } } #endregion #region continous export Task.WaitAll(continousExportQueries.ToArray()); foreach (var aTask in continousExportQueries) { IDataReader continousExportResult = aTask.Result; while (continousExportResult.Read()) { var continousExportName = continousExportResult.GetString(0); var externalTableName = continousExportResult.GetString(1); var query = continousExportResult.GetString(2); var curserScopedQuery = continousExportResult.GetString(3); var databaseName = continousExportResult.GetString(4); var ce = new ContinousExport(continousExportName, externalTableName, query, curserScopedQuery); lineage.AddContinousExport(databaseName, ce); } } #endregion }