public string Build() { var sql = new StringBuilder(); AppendSelect(sql); sql.NewLine(); AppendFrom(sql); sql.NewLine(); AppendWhere(sql); sql.NewLineIf(Where.Any()); AppendSqlPartList(sql, GroupBy, "GROUP BY"); sql.NewLineIf(GroupBy.Any()); AppendSqlPartList(sql, Having, "HAVING"); sql.NewLineIf(Having.Any()); var paging = Paging.Enabled; var pagingWithCte = paging && Paging.UseCte; if (pagingWithCte) { var sqlCte = new StringBuilder(); sqlCte.Append("WITH CTE_MAIN AS ("); sqlCte.NewLine(); sqlCte.Append(sql); sqlCte.NewLine(); sqlCte.Append(string.Format("), CTE_COUNT AS (SELECT COUNT(*) AS [{0}] FROM CTE_MAIN)", Paging.CteTotalCountFieldName)); sqlCte.NewLine(); sqlCte.Append("SELECT * FROM CTE_MAIN, CTE_COUNT"); sqlCte.NewLine(); AppendSqlPartList(sqlCte, OrderBy, "ORDER BY", removeTable: true); sqlCte.NewLine(); AppendPagination(sqlCte); sql = sqlCte; } else if (paging) { AppendSqlPartList(sql, OrderBy, "ORDER BY"); sql.NewLine(); AppendPagination(sql); sql.NewLine(); } else { AppendSqlPartList(sql, OrderBy, "ORDER BY"); } if (Separator) { sql.Append(";"); } return(Prettify(sql.ToString())); }
public bool CanInLine() { return(Fields.Count <= MaxInlineColumns && From.Count == 1 && !From.First().Joins.Any() && (Where == null || Where.CanInline) && (GroupBy == null || !GroupBy.Any()) && (OrderBy == null || !OrderBy.Any())); }
public override IDataExecutionPlanNode FoldQuery(IDictionary <string, DataSource> dataSources, IQueryExecutionOptions options, IDictionary <string, Type> parameterTypes, IList <OptimizerHint> hints) { if (_folded) { return(this); } Source = Source.FoldQuery(dataSources, options, parameterTypes, hints); Source.Parent = this; // Special case for using RetrieveTotalRecordCount instead of FetchXML if (options.UseRetrieveTotalRecordCount && Source is FetchXmlScan fetch && (fetch.Entity.Items == null || fetch.Entity.Items.Length == 0) && GroupBy.Count == 0 && Aggregates.Count == 1 && Aggregates.Single().Value.AggregateType == AggregateType.CountStar && dataSources[fetch.DataSource].Metadata[fetch.Entity.name].DataProviderId == null) // RetrieveTotalRecordCountRequest is not valid for virtual entities { var count = new RetrieveTotalRecordCountNode { DataSource = fetch.DataSource, EntityName = fetch.Entity.name }; var countName = count.GetSchema(dataSources, parameterTypes).Schema.Single().Key; if (countName == Aggregates.Single().Key) { return(count); } var rename = new ComputeScalarNode { Source = count, Columns = { [Aggregates.Single().Key] = new ColumnReferenceExpression { MultiPartIdentifier = new MultiPartIdentifier { Identifiers = { new Identifier { Value = countName } } } } } }; count.Parent = rename; return(rename); } if (Source is FetchXmlScan || Source is ComputeScalarNode computeScalar && computeScalar.Source is FetchXmlScan) { // Check if all the aggregates & groupings can be done in FetchXML. Can only convert them if they can ALL // be handled - if any one needs to be calculated manually, we need to calculate them all. var canUseFetchXmlAggregate = true; // Also track if we can partition the query for larger source data sets. We can't partition DISTINCT aggregates, // and need to transform AVG(field) to SUM(field) / COUNT(field) var canPartition = true; foreach (var agg in Aggregates) { if (agg.Value.SqlExpression != null && !(agg.Value.SqlExpression is ColumnReferenceExpression)) { canUseFetchXmlAggregate = false; break; } if (agg.Value.Distinct && agg.Value.AggregateType != AggregateType.Count) { canUseFetchXmlAggregate = false; break; } if (agg.Value.AggregateType == AggregateType.First) { canUseFetchXmlAggregate = false; break; } if (agg.Value.Distinct) { canPartition = false; } } var fetchXml = Source as FetchXmlScan; computeScalar = Source as ComputeScalarNode; var partnames = new Dictionary <string, FetchXml.DateGroupingType>(StringComparer.OrdinalIgnoreCase) { ["year"] = DateGroupingType.year, ["yy"] = DateGroupingType.year, ["yyyy"] = DateGroupingType.year, ["quarter"] = DateGroupingType.quarter, ["qq"] = DateGroupingType.quarter, ["q"] = DateGroupingType.quarter, ["month"] = DateGroupingType.month, ["mm"] = DateGroupingType.month, ["m"] = DateGroupingType.month, ["day"] = DateGroupingType.day, ["dd"] = DateGroupingType.day, ["d"] = DateGroupingType.day, ["week"] = DateGroupingType.week, ["wk"] = DateGroupingType.week, ["ww"] = DateGroupingType.week }; if (computeScalar != null) { fetchXml = (FetchXmlScan)computeScalar.Source; // Groupings may be on DATEPART function, which will have been split into separate Compute Scalar node. Check if all the scalar values // being computed are DATEPART functions that can be converted to FetchXML and are used as groupings foreach (var scalar in computeScalar.Columns) { if (!(scalar.Value is FunctionCall func) || !func.FunctionName.Value.Equals("DATEPART", StringComparison.OrdinalIgnoreCase) || func.Parameters.Count != 2 || !(func.Parameters[0] is ColumnReferenceExpression datePartType) || !(func.Parameters[1] is ColumnReferenceExpression datePartCol)) { canUseFetchXmlAggregate = false; break; } if (!GroupBy.Any(g => g.MultiPartIdentifier.Identifiers.Count == 1 && g.MultiPartIdentifier.Identifiers[0].Value == scalar.Key)) { canUseFetchXmlAggregate = false; break; } if (!partnames.ContainsKey(datePartType.GetColumnName())) { canUseFetchXmlAggregate = false; break; } // FetchXML dategrouping always uses local timezone. If we're using UTC we can't use it if (!options.UseLocalTimeZone) { canUseFetchXmlAggregate = false; break; } } } var metadata = dataSources[fetchXml.DataSource].Metadata; // FetchXML is translated to QueryExpression for virtual entities, which doesn't support aggregates if (metadata[fetchXml.Entity.name].DataProviderId != null) { canUseFetchXmlAggregate = false; } // Check FetchXML supports grouping by each of the requested attributes var fetchSchema = fetchXml.GetSchema(dataSources, parameterTypes); foreach (var group in GroupBy) { if (!fetchSchema.ContainsColumn(group.GetColumnName(), out var groupCol)) { continue; } var parts = groupCol.Split('.'); string entityName; if (parts[0] == fetchXml.Alias) { entityName = fetchXml.Entity.name; } else { entityName = fetchXml.Entity.FindLinkEntity(parts[0]).name; } var attr = metadata[entityName].Attributes.SingleOrDefault(a => a.LogicalName == parts[1]); // Can't group by virtual attributes if (attr == null || attr.AttributeOf != null) { canUseFetchXmlAggregate = false; } // Can't group by multi-select picklist attributes if (attr is MultiSelectPicklistAttributeMetadata) { canUseFetchXmlAggregate = false; } } var serializer = new XmlSerializer(typeof(FetchXml.FetchType)); if (canUseFetchXmlAggregate) { // FetchXML aggregates can trigger an AggregateQueryRecordLimitExceeded error. Clone the non-aggregate FetchXML // so we can try to run the native aggregate version but fall back to in-memory processing where necessary var clonedFetchXml = new FetchXmlScan { DataSource = fetchXml.DataSource, Alias = fetchXml.Alias, AllPages = fetchXml.AllPages, FetchXml = (FetchXml.FetchType)serializer.Deserialize(new StringReader(fetchXml.FetchXmlString)), ReturnFullSchema = fetchXml.ReturnFullSchema }; if (Source == fetchXml) { Source = clonedFetchXml; clonedFetchXml.Parent = this; } else { computeScalar.Source = clonedFetchXml; clonedFetchXml.Parent = computeScalar; } fetchXml.FetchXml.aggregate = true; fetchXml.FetchXml.aggregateSpecified = true; fetchXml.FetchXml = fetchXml.FetchXml; var schema = Source.GetSchema(dataSources, parameterTypes); foreach (var grouping in GroupBy) { var colName = grouping.GetColumnName(); var alias = grouping.MultiPartIdentifier.Identifiers.Last().Value; DateGroupingType?dateGrouping = null; if (computeScalar != null && computeScalar.Columns.TryGetValue(colName, out var datePart)) { dateGrouping = partnames[((ColumnReferenceExpression)((FunctionCall)datePart).Parameters[0]).GetColumnName()]; colName = ((ColumnReferenceExpression)((FunctionCall)datePart).Parameters[1]).GetColumnName(); } schema.ContainsColumn(colName, out colName); var attribute = fetchXml.AddAttribute(colName, a => a.groupbySpecified && a.groupby == FetchBoolType.@true && a.alias == alias, metadata, out _, out var linkEntity); attribute.groupby = FetchBoolType.@true; attribute.groupbySpecified = true; attribute.alias = alias; if (dateGrouping != null) { attribute.dategrouping = dateGrouping.Value; attribute.dategroupingSpecified = true; } else if (grouping.GetType(schema, null, parameterTypes) == typeof(SqlDateTime)) { // Can't group on datetime columns without a DATEPART specification canUseFetchXmlAggregate = false; } // Add a sort order for each grouping to allow consistent paging var items = linkEntity?.Items ?? fetchXml.Entity.Items; var sort = items.OfType <FetchOrderType>().FirstOrDefault(order => order.alias == alias); if (sort == null) { if (linkEntity == null) { fetchXml.Entity.AddItem(new FetchOrderType { alias = alias }); } else { linkEntity.AddItem(new FetchOrderType { alias = alias }); } } } foreach (var agg in Aggregates) { var col = (ColumnReferenceExpression)agg.Value.SqlExpression; var colName = col == null ? (fetchXml.Alias + "." + metadata[fetchXml.Entity.name].PrimaryIdAttribute) : col.GetColumnName(); if (!schema.ContainsColumn(colName, out colName)) { canUseFetchXmlAggregate = false; } var distinct = agg.Value.Distinct ? FetchBoolType.@true : FetchBoolType.@false; FetchXml.AggregateType aggregateType; switch (agg.Value.AggregateType) { case AggregateType.Average: aggregateType = FetchXml.AggregateType.avg; break; case AggregateType.Count: aggregateType = FetchXml.AggregateType.countcolumn; break; case AggregateType.CountStar: aggregateType = FetchXml.AggregateType.count; break; case AggregateType.Max: aggregateType = FetchXml.AggregateType.max; break; case AggregateType.Min: aggregateType = FetchXml.AggregateType.min; break; case AggregateType.Sum: aggregateType = FetchXml.AggregateType.sum; break; default: throw new ArgumentOutOfRangeException(); } // min, max, sum and avg are not supported for optionset attributes var parts = colName.Split('.'); string entityName; if (parts[0] == fetchXml.Alias) { entityName = fetchXml.Entity.name; } else { entityName = fetchXml.Entity.FindLinkEntity(parts[0]).name; } var attr = metadata[entityName].Attributes.SingleOrDefault(a => a.LogicalName == parts[1]); if (attr == null) { canUseFetchXmlAggregate = false; } if (attr is EnumAttributeMetadata && (aggregateType == FetchXml.AggregateType.avg || aggregateType == FetchXml.AggregateType.max || aggregateType == FetchXml.AggregateType.min || aggregateType == FetchXml.AggregateType.sum)) { canUseFetchXmlAggregate = false; } var attribute = fetchXml.AddAttribute(colName, a => a.aggregate == aggregateType && a.alias == agg.Key && a.distinct == distinct, metadata, out _, out _); attribute.aggregate = aggregateType; attribute.aggregateSpecified = true; attribute.alias = agg.Key; if (agg.Value.Distinct) { attribute.distinct = distinct; attribute.distinctSpecified = true; } } } // FoldQuery can be called again in some circumstances. Don't repeat the folding operation and create another try/catch _folded = true; // Check how we should execute this aggregate if the FetchXML aggregate fails or is not available. Use stream aggregate // for scalar aggregates or where all the grouping fields can be folded into sorts. var nonFetchXmlAggregate = FoldToStreamAggregate(dataSources, options, parameterTypes, hints); if (!canUseFetchXmlAggregate) { return(nonFetchXmlAggregate); } IDataExecutionPlanNode firstTry = fetchXml; // If the main aggregate query fails due to having over 50K records, check if we can retry with partitioning. We // need a createdon field to be available for this to work. if (canPartition) { canPartition = metadata[fetchXml.Entity.name].Attributes.Any(a => a.LogicalName == "createdon"); } if (canUseFetchXmlAggregate && canPartition) { // Create a clone of the aggregate FetchXML query var partitionedFetchXml = new FetchXmlScan { DataSource = fetchXml.DataSource, Alias = fetchXml.Alias, AllPages = fetchXml.AllPages, FetchXml = (FetchXml.FetchType)serializer.Deserialize(new StringReader(fetchXml.FetchXmlString)), ReturnFullSchema = fetchXml.ReturnFullSchema }; var partitionedAggregates = new PartitionedAggregateNode { Source = partitionedFetchXml }; partitionedFetchXml.Parent = partitionedAggregates; var partitionedResults = (IDataExecutionPlanNode)partitionedAggregates; partitionedAggregates.GroupBy.AddRange(GroupBy); foreach (var aggregate in Aggregates) { if (aggregate.Value.AggregateType != AggregateType.Average) { partitionedAggregates.Aggregates[aggregate.Key] = aggregate.Value; } else { // Rewrite AVG as SUM / COUNT partitionedAggregates.Aggregates[aggregate.Key + "_sum"] = new Aggregate { AggregateType = AggregateType.Sum, SqlExpression = aggregate.Value.SqlExpression }; partitionedAggregates.Aggregates[aggregate.Key + "_count"] = new Aggregate { AggregateType = AggregateType.Count, SqlExpression = aggregate.Value.SqlExpression }; if (partitionedResults == partitionedAggregates) { partitionedResults = new ComputeScalarNode { Source = partitionedAggregates }; partitionedAggregates.Parent = partitionedResults; } // Handle count = 0 => null ((ComputeScalarNode)partitionedResults).Columns[aggregate.Key] = new SearchedCaseExpression { WhenClauses = { new SearchedWhenClause { WhenExpression = new BooleanComparisonExpression { FirstExpression = (aggregate.Key + "_count").ToColumnReference(), ComparisonType = BooleanComparisonType.Equals, SecondExpression = new IntegerLiteral{ Value = "0" } }, ThenExpression = new NullLiteral() } }, ElseExpression = new BinaryExpression { FirstExpression = (aggregate.Key + "_sum").ToColumnReference(), BinaryExpressionType = BinaryExpressionType.Divide, SecondExpression = (aggregate.Key + "_count").ToColumnReference() } }; // Find the AVG expression in the FetchXML and replace with _sum and _count var avg = partitionedFetchXml.Entity.FindAliasedAttribute(aggregate.Key, null, out var linkEntity); var sumCount = new object[] { new FetchAttributeType { name = avg.name, alias = avg.alias + "_sum", aggregateSpecified = true, aggregate = FetchXml.AggregateType.sum }, new FetchAttributeType { name = avg.name, alias = avg.alias + "_count", aggregateSpecified = true, aggregate = FetchXml.AggregateType.countcolumn } }; if (linkEntity == null) { partitionedFetchXml.Entity.Items = partitionedFetchXml.Entity.Items .Except(new[] { avg }) .Concat(sumCount) .ToArray(); } else { linkEntity.Items = linkEntity.Items .Except(new[] { avg }) .Concat(sumCount) .ToArray(); } } } var tryPartitioned = new TryCatchNode { TrySource = firstTry, CatchSource = partitionedResults, ExceptionFilter = ex => GetOrganizationServiceFault(ex, out var fault) && IsAggregateQueryLimitExceeded(fault) }; partitionedResults.Parent = tryPartitioned; firstTry.Parent = tryPartitioned; firstTry = tryPartitioned; } var tryCatch = new TryCatchNode { TrySource = firstTry, CatchSource = nonFetchXmlAggregate, ExceptionFilter = ex => (ex is QueryExecutionException qee && (qee.InnerException is PartitionedAggregateNode.PartitionOverflowException || qee.InnerException is FetchXmlScan.InvalidPagingException)) || (GetOrganizationServiceFault(ex, out var fault) && IsAggregateQueryRetryable(fault)) }; firstTry.Parent = tryCatch; nonFetchXmlAggregate.Parent = tryCatch; return(tryCatch); } return(FoldToStreamAggregate(dataSources, options, parameterTypes, hints)); }