private static QueryResults GetMultiSiteResults(ParsedQuery parsedQuery, User currentUser, AsyncQueryRunner.AsyncResult result = null) { var sites = Current.DB.Sites.All(); if (parsedQuery.TargetSites == TargetSites.AllNonMetaSites) { sites = sites.Where(s => !s.Url.Contains("meta.")).ToList(); } else if (parsedQuery.TargetSites == TargetSites.AllMetaSites) { sites = sites.Where(s => s.Url.Contains("meta.")).ToList(); } var firstSite = sites.First(); var results = QueryRunner.GetSingleSiteResults(parsedQuery, firstSite, currentUser, result); if (results.ResultSets.First().Columns.Where(c => c.Name == "Pivot").Any()) { foreach (var info in results.ResultSets.First().Columns) { if (info.Name == "Pivot") { info.Name = firstSite.Name + " Pivot"; break; } } foreach (var s in sites.Skip(1)) { try { var tmp = QueryRunner.GetSingleSiteResults(parsedQuery, s, currentUser); results.ExecutionTime += tmp.ExecutionTime; MergePivot(s, results, tmp); } catch (Exception) { // don't blow up here ... just skip the site. } } } else { results.ResultSets[0].Columns.Add(new ResultColumnInfo { Name = "Site Name", Type = ResultColumnType.Site }); foreach (var row in results.ResultSets[0].Rows) { row.Add(sites.First().SiteInfo); } foreach (var s in sites.Skip(1)) { if (result != null && result.Cancelled) { break; } try { var tmp = QueryRunner.GetSingleSiteResults(parsedQuery, s, currentUser, result); foreach (var row in tmp.ResultSets[0].Rows) { row.Add(s.SiteInfo); results.ResultSets[0].Rows.Add(row); } results.ExecutionTime += tmp.ExecutionTime; results.Messages += "\n" + tmp.Messages; } catch (Exception) { // don't blow up ... just skip the site } } } results.TargetSites = parsedQuery.TargetSites; return(results); }
private static QueryResults GetSingleSiteResults(ParsedQuery query, Site site, User user, AsyncQueryRunner.AsyncResult result = null) { QueryResults results = null; var timer = new Stopwatch(); timer.Start(); var cache = QueryUtil.GetCachedResults(query, site.Id); if (cache != null) { if (!query.IncludeExecutionPlan || cache.ExecutionPlan != null) { results = new QueryResults(); results.WithCache(cache); results.Truncated = cache.Truncated; results.Messages = cache.Messages; results.FromCache = true; // If we didn't ask for the execution plan, don't return it if (!query.IncludeExecutionPlan) { results.ExecutionPlan = null; } } } timer.Stop(); if (results == null) { results = ExecuteNonCached(query, site, user, result); results.FromCache = false; // Don't cache cancelled results, since we don't know what state they're in... if (result != null && !result.Cancelled) { AddResultToCache(results, query, site, cache != null); } } else { results.ExecutionTime = timer.ElapsedMilliseconds; } results.Url = site.Url; results.SiteId = site.Id; results.SiteName = site.Name.ToLower(); return(results); }
/// <summary> /// Executes an SQL query and populates a given <see cref="QueryResults" /> instance with the results. /// </summary> /// <param name="results"><see cref="QueryResults" /> instance to populate with results.</param> /// <param name="command">SQL command to execute.</param> /// <param name="result"><see cref="AsyncResult"/> instance to use to mark state changes.</param> /// <param name="messages"><see cref="StringBuilder" /> instance to which to append messages.</param> /// <param name="IncludeExecutionPlan">If true indciates that the query execution plans are expected to be contained /// in the results sets; otherwise, false.</param> private static void PopulateResults(QueryResults results, SqlCommand command, AsyncQueryRunner.AsyncResult result, StringBuilder messages, bool IncludeExecutionPlan) { QueryPlan plan = new QueryPlan(); using (SqlDataReader reader = command.ExecuteReader()) { if (result != null && reader.HasRows) { result.HasOutput = true; } do { // Check to see if the resultset is an execution plan if (IncludeExecutionPlan && reader.FieldCount == 1 && reader.GetName(0) == "Microsoft SQL Server 2005 XML Showplan") { if (reader.Read()) { plan.AppendStatementPlan(reader[0].ToString()); } } else { if (reader.FieldCount == 0) { if (reader.RecordsAffected >= 0) { messages.AppendFormat("({0} row(s) affected)\n\n", reader.RecordsAffected); } continue; } var resultSet = new ResultSet(); resultSet.MessagePosition = messages.Length; results.ResultSets.Add(resultSet); for (int i = 0; i < reader.FieldCount; i++) { var columnInfo = new ResultColumnInfo(); columnInfo.Name = reader.GetName(i); ResultColumnType colType; if (ResultColumnInfo.ColumnTypeMap.TryGetValue(reader.GetFieldType(i), out colType)) { columnInfo.Type = colType; } resultSet.Columns.Add(columnInfo); } int currentRow = 0; while (reader.Read()) { if (currentRow++ >= MAX_RESULTS) { results.Truncated = true; results.MaxResults = MAX_RESULTS; break; } var row = new List <object>(); resultSet.Rows.Add(row); for (int i = 0; i < reader.FieldCount; i++) { object col = reader.GetValue(i); if (col is DateTime) { var date = (DateTime)col; col = date.ToJavascriptTime(); } row.Add(col); } } if (results.Truncated) { // next result would force ado.net to fast forward // through the result set, which is way too slow break; } if (reader.RecordsAffected >= 0) { messages.AppendFormat("({0} row(s) affected)\n\n", reader.RecordsAffected); } messages.AppendFormat("({0} row(s) affected)\n\n", resultSet.Rows.Count); } } while (reader.NextResult()); command.Cancel(); } results.ExecutionPlan = plan.PlanXml; }
public static QueryResults GetResults(ParsedQuery query, Site site, User user, AsyncQueryRunner.AsyncResult result = null) { if (query.TargetSites != TargetSites.Current) { return(GetMultiSiteResults(query, user, result)); } else { return(GetSingleSiteResults(query, site, user, result)); } }
public static QueryResults ExecuteNonCached(ParsedQuery query, Site site, User user, AsyncQueryRunner.AsyncResult result) { var remoteIP = OData.GetRemoteIP(); var key = "total-" + remoteIP; var currentCount = (int?)Current.GetCachedObject(key) ?? 0; currentCount++; Current.SetCachedObjectSliding(key, currentCount, 60 * 60); if (currentCount > 130) { // clearly a robot, auto black list Current.DB.BlackList.Insert(new { CreationDate = DateTime.UtcNow, IPAddress = remoteIP }); } if (currentCount > 100) { throw new Exception("You can not run any new queries for another hour, you have exceeded your limit!"); } if (Current.DB.Query <int>("select count(*) from BlackList where IPAddress = @remoteIP", new { remoteIP }).First() > 0) { System.Threading.Thread.Sleep(2000); throw new Exception("You have been blacklisted due to abuse!"); } var results = new QueryResults(); using (SqlConnection cnn = site.GetOpenConnection()) { // well we do not want to risk blocking, if somebody needs to change this we will need to add a setting cnn.Execute("set transaction isolation level read uncommitted"); var timer = new Stopwatch(); timer.Start(); var messages = new StringBuilder(); var infoHandler = new SqlInfoMessageEventHandler((sender, args) => { // todo handle errors as well messages.AppendLine(args.Message); }); try { cnn.InfoMessage += infoHandler; if (query.IncludeExecutionPlan) { using (var command = new SqlCommand("SET STATISTICS XML ON", cnn)) { command.ExecuteNonQuery(); } } var plan = new QueryPlan(); foreach (string batch in query.ExecutionSqlBatches) { using (var command = new SqlCommand(batch, cnn)) { if (result != null) { result.Command = command; if (result.Cancelled) { continue; } } command.CommandTimeout = AppSettings.QueryTimeout; try { PopulateResults(results, command, result, messages, query.IncludeExecutionPlan); } catch (Exception ex) { // Ugh. So, if we cancel the query in-process, we get an exception... // But we have no good way of knowing that the exception here is actually // *that* exception...so we'll just assume it was if the state is Cancelled if (result == null || result.State != AsyncQueryRunner.AsyncState.Cancelled) { throw ex; } } } if (query.IncludeExecutionPlan) { plan.AppendBatchPlan(results.ExecutionPlan); results.ExecutionPlan = null; } } results.ExecutionPlan = plan.PlanXml; } finally { cnn.InfoMessage -= infoHandler; results.Messages = messages.ToString(); } timer.Stop(); results.ExecutionTime = timer.ElapsedMilliseconds; ProcessMagicColumns(results, cnn); } return(results); }