/// <summary> /// Constructs a token bucket object that can be compared against other /// buckets. This dummy bucket is a patently invalid bucket, and cannot /// be used for any other operations besides comparison. /// </summary> /// <remarks> /// PERF NOTE: A dummy bucket is intentionally very light-weight, and it /// allocates a minimum of memory compared to a real bucket. /// </remarks> /// <returns>An item bucket that is invalid for everything except comparisons.</returns> internal static ItemBucket GetDummyBucketForComparisons(Dictionary <string, string> metadata) { ItemBucket bucket = new ItemBucket(); bucket._metadata = metadata; return(bucket); }
/// <summary> /// Modifies items in the world - specifically, changes their metadata. Changes to items that are part of the project manifest are backed up, so /// they can be reverted when the project is reset after the end of the build. /// </summary> /// <param name="child">The item specification to evaluate and modify.</param> /// <param name="bucket">The batching bucket.</param> /// <param name="keepMetadata">An <see cref="ISet{String}"/> of metadata names to keep.</param> /// <param name="removeMetadata">An <see cref="ISet{String}"/> of metadata names to remove.</param> private void ExecuteModify(ProjectItemGroupTaskItemInstance child, ItemBucket bucket, ISet <string> keepMetadata, ISet <string> removeMetadata) { ICollection <ProjectItemInstance> group = bucket.Lookup.GetItems(child.ItemType); if (group == null || group.Count == 0) { // No items of this type to modify return; } // Figure out what metadata names and values we need to set var metadataToSet = new Lookup.MetadataModifications(keepMetadata != null); // Filter the metadata as appropriate if (keepMetadata != null) { foreach (var metadataName in keepMetadata) { metadataToSet[metadataName] = Lookup.MetadataModification.CreateFromNoChange(); } } else if (removeMetadata != null) { foreach (var metadataName in removeMetadata) { metadataToSet[metadataName] = Lookup.MetadataModification.CreateFromRemove(); } } foreach (ProjectItemGroupTaskMetadataInstance metadataInstance in child.Metadata) { bool condition = ConditionEvaluator.EvaluateCondition ( metadataInstance.Condition, ParserOptions.AllowAll, bucket.Expander, ExpanderOptions.ExpandAll, Project.Directory, metadataInstance.ConditionLocation, LoggingContext.LoggingService, LoggingContext.BuildEventContext, FileSystems.Default); if (condition) { string evaluatedValue = bucket.Expander.ExpandIntoStringLeaveEscaped(metadataInstance.Value, ExpanderOptions.ExpandAll, metadataInstance.Location); metadataToSet[metadataInstance.Name] = Lookup.MetadataModification.CreateFromNewValue(evaluatedValue); } } // Now apply the changes. This must be done after filtering, since explicitly set metadata overrides filters. bucket.Lookup.ModifyItems(child.ItemType, group, metadataToSet); }
private List <ProjectItemInstance> FindItemsUsingMatchOnMetadata( ICollection <ProjectItemInstance> items, ProjectItemGroupTaskItemInstance child, ItemBucket bucket, HashSet <string> matchOnMetadata, MatchOnMetadataOptions options) { ErrorUtilities.VerifyThrowArgumentNull(matchOnMetadata, nameof(matchOnMetadata)); var itemSpec = new ItemSpec <ProjectPropertyInstance, ProjectItemInstance>(child.Remove, bucket.Expander, child.RemoveLocation, Project.Directory, true); ProjectFileErrorUtilities.VerifyThrowInvalidProjectFile( itemSpec.Fragments.Count == 1 && itemSpec.Fragments.First() is ItemSpec <ProjectPropertyInstance, ProjectItemInstance> .ItemExpressionFragment && matchOnMetadata.Count == 1, new BuildEventFileInfo(string.Empty), "OM_MatchOnMetadataIsRestrictedToOnlyOneReferencedItem", child.RemoveLocation, child.Remove); return(items.Where(item => itemSpec.MatchesItemOnMetadata(item, matchOnMetadata, options)).ToList()); }
/// <summary> /// Remove items from the world. Removes to items that are part of the project manifest are backed up, so /// they can be reverted when the project is reset after the end of the build. /// </summary> /// <param name="child">The item specification to evaluate and remove.</param> /// <param name="bucket">The batching bucket.</param> private void ExecuteRemove(ProjectItemGroupTaskItemInstance child, ItemBucket bucket, HashSet <string> matchOnMetadata, MatchOnMetadataOptions matchingOptions) { ICollection <ProjectItemInstance> group = bucket.Lookup.GetItems(child.ItemType); if (group == null) { // No items of this type to remove return; } List <ProjectItemInstance> itemsToRemove = null; if (matchOnMetadata == null) { itemsToRemove = FindItemsMatchingSpecification(group, child.Remove, child.RemoveLocation, bucket.Expander); } else { itemsToRemove = FindItemsUsingMatchOnMetadata(group, child, bucket, matchOnMetadata, matchingOptions); } if (itemsToRemove != null) { if (LogTaskInputs && !LoggingContext.LoggingService.OnlyLogCriticalEvents && itemsToRemove.Count > 0) { var itemGroupText = ItemGroupLoggingHelper.GetParameterText( ItemGroupLoggingHelper.ItemGroupRemoveLogMessage, child.ItemType, itemsToRemove, logItemMetadata: true); LoggingContext.LogCommentFromText(MessageImportance.Low, itemGroupText); } bucket.Lookup.RemoveItems(itemsToRemove); } }
/// <summary> /// Partitions the items consumed by the batchable object into buckets, where each bucket contains a set of items that /// have the same value set on all item metadata consumed by the object. /// </summary> /// <remarks> /// PERF NOTE: Given n items and m batching metadata that produce l buckets, it is usually the case that n > l > m, /// because a batchable object typically uses one or two item metadata to control batching, and only has a handful of /// buckets. The number of buckets is typically only large if a batchable object is using single-item batching /// (where l == n). Any algorithm devised for bucketing therefore, should try to minimize n and l in its complexity /// equation. The algorithm below has a complexity of O(n*lg(l)*m/2) in its comparisons, and is effectively O(n) when /// l is small, and O(n*lg(n)) in the worst case as l -> n. However, note that the comparison complexity is not the /// same as the operational complexity for this algorithm. The operational complexity of this algorithm is actually /// O(n*m + n*lg(l)*m/2 + n*l/2 + n + l), which is effectively O(n^2) in the worst case. The additional complexity comes /// from the array and metadata operations that are performed. However, those operations are extremely cheap compared /// to the comparison operations, which dominate the time spent in this method. /// </remarks> /// <returns>List containing ItemBucket objects (can be empty), each one representing an execution batch.</returns> private static List <ItemBucket> BucketConsumedItems ( Lookup lookup, Dictionary <string, ICollection <ProjectItemInstance> > itemListsToBeBatched, Dictionary <string, MetadataReference> consumedMetadataReferences, ElementLocation elementLocation ) { ErrorUtilities.VerifyThrow(itemListsToBeBatched.Count > 0, "Need item types consumed by the batchable object."); ErrorUtilities.VerifyThrow(consumedMetadataReferences.Count > 0, "Need item metadata consumed by the batchable object."); var buckets = new List <ItemBucket>(); // Get and iterate through the list of item names that we're supposed to batch on. foreach (KeyValuePair <string, ICollection <ProjectItemInstance> > entry in itemListsToBeBatched) { string itemName = entry.Key; // Use the previously-fetched items, if possible ICollection <ProjectItemInstance> items = entry.Value ?? lookup.GetItems(itemName); if (items != null) { foreach (ProjectItemInstance item in items) { // Get this item's values for all the metadata consumed by the batchable object. Dictionary <string, string> itemMetadataValues = GetItemMetadataValues(item, consumedMetadataReferences, elementLocation); // put the metadata into a dummy bucket we can use for searching ItemBucket dummyBucket = ItemBucket.GetDummyBucketForComparisons(itemMetadataValues); // look through all previously created buckets to find a bucket whose items have the same values as // this item for all metadata consumed by the batchable object int matchingBucketIndex = buckets.BinarySearch(dummyBucket); ItemBucket matchingBucket = (matchingBucketIndex >= 0) ? buckets[matchingBucketIndex] : null; // If we didn't find a bucket that matches this item, create a new one, adding // this item to the bucket. if (null == matchingBucket) { matchingBucket = new ItemBucket(itemListsToBeBatched.Keys, itemMetadataValues, lookup, buckets.Count); // make sure to put the new bucket into the appropriate location // in the sorted list as indicated by the binary search // NOTE: observe the ~ operator (bitwise complement) in front of // the index -- see MSDN for more information on the return value // from the List.BinarySearch() method buckets.Insert(~matchingBucketIndex, matchingBucket); } // We already have a bucket for this type of item, so add this item to // the bucket. matchingBucket.AddItem(item); } } } // Put the buckets back in the order in which they were discovered, so that the first // item declared in the project file ends up in the first batch passed into the target/task. var orderedBuckets = new List <ItemBucket>(buckets.Count); for (int i = 0; i < buckets.Count; ++i) { orderedBuckets.Add(null); } foreach (ItemBucket bucket in buckets) { orderedBuckets[bucket.BucketSequenceNumber] = bucket; } return(orderedBuckets); }
/// <summary> /// Runs all of the tasks for this target, batched as necessary. /// </summary> internal async Task ExecuteTarget(ITaskBuilder taskBuilder, BuildRequestEntry requestEntry, ProjectLoggingContext projectLoggingContext, CancellationToken cancellationToken) { #if MSBUILDENABLEVSPROFILING try { string beginTargetBuild = String.Format(CultureInfo.CurrentCulture, "Build Target {0} in Project {1} - Start", this.Name, projectFullPath); DataCollection.CommentMarkProfile(8800, beginTargetBuild); #endif try { VerifyState(_state, TargetEntryState.Execution); ErrorUtilities.VerifyThrow(!_isExecuting, "Target {0} is already executing", _target.Name); _cancellationToken = cancellationToken; _isExecuting = true; // Generate the batching buckets. Note that each bucket will get a lookup based on the baseLookup. This lookup will be in its // own scope, which we will collapse back down into the baseLookup at the bottom of the function. List <ItemBucket> buckets = BatchingEngine.PrepareBatchingBuckets(GetBatchableParametersForTarget(), _baseLookup, _target.Location); WorkUnitResult aggregateResult = new WorkUnitResult(); TargetLoggingContext targetLoggingContext = null; bool targetSuccess = false; int numberOfBuckets = buckets.Count; string projectFullPath = requestEntry.RequestConfiguration.ProjectFullPath; string parentTargetName = null; if (ParentEntry != null && ParentEntry.Target != null) { parentTargetName = ParentEntry.Target.Name; } for (int i = 0; i < numberOfBuckets; i++) { ItemBucket bucket = buckets[i]; // If one of the buckets failed, stop building. if (aggregateResult.ActionCode == WorkUnitActionCode.Stop) { break; } targetLoggingContext = projectLoggingContext.LogTargetBatchStarted(projectFullPath, _target, parentTargetName, _buildReason); WorkUnitResult bucketResult = null; targetSuccess = false; Lookup.Scope entryForInference = null; Lookup.Scope entryForExecution = null; try { // This isn't really dependency analysis. This is up-to-date checking. Based on this we will be able to determine if we should // run tasks in inference or execution mode (or both) or just skip them altogether. ItemDictionary <ProjectItemInstance> changedTargetInputs; ItemDictionary <ProjectItemInstance> upToDateTargetInputs; Lookup lookupForInference; Lookup lookupForExecution; // UNDONE: (Refactor) Refactor TargetUpToDateChecker to take a logging context, not a logging service. TargetUpToDateChecker dependencyAnalyzer = new TargetUpToDateChecker(requestEntry.RequestConfiguration.Project, _target, targetLoggingContext.LoggingService, targetLoggingContext.BuildEventContext); DependencyAnalysisResult dependencyResult = dependencyAnalyzer.PerformDependencyAnalysis(bucket, out changedTargetInputs, out upToDateTargetInputs); switch (dependencyResult) { // UNDONE: Need to enter/leave debugger scope properly for the <Target> element. case DependencyAnalysisResult.FullBuild: case DependencyAnalysisResult.IncrementalBuild: case DependencyAnalysisResult.SkipUpToDate: // Create the lookups used to hold the current set of properties and items lookupForInference = bucket.Lookup; lookupForExecution = bucket.Lookup.Clone(); // Push the lookup stack up one so that we are only modifying items and properties in that scope. entryForInference = lookupForInference.EnterScope("ExecuteTarget() Inference"); entryForExecution = lookupForExecution.EnterScope("ExecuteTarget() Execution"); // if we're doing an incremental build, we need to effectively run the task twice -- once // to infer the outputs for up-to-date input items, and once to actually execute the task; // as a result we need separate sets of item and property collections to track changes if (dependencyResult == DependencyAnalysisResult.IncrementalBuild) { // subset the relevant items to those that are up-to-date foreach (string itemType in upToDateTargetInputs.ItemTypes) { lookupForInference.PopulateWithItems(itemType, upToDateTargetInputs[itemType]); } // subset the relevant items to those that have changed foreach (string itemType in changedTargetInputs.ItemTypes) { lookupForExecution.PopulateWithItems(itemType, changedTargetInputs[itemType]); } } // We either have some work to do or at least we need to infer outputs from inputs. bucketResult = await ProcessBucket(taskBuilder, targetLoggingContext, GetTaskExecutionMode(dependencyResult), lookupForInference, lookupForExecution); // Now aggregate the result with the existing known results. There are four rules, assuming the target was not // skipped due to being up-to-date: // 1. If this bucket failed or was cancelled, the aggregate result is failure. // 2. If this bucket Succeeded and we have not previously failed, the aggregate result is a success. // 3. Otherwise, the bucket was skipped, which has no effect on the aggregate result. // 4. If the bucket's action code says to stop, then we stop, regardless of the success or failure state. if (dependencyResult != DependencyAnalysisResult.SkipUpToDate) { aggregateResult = aggregateResult.AggregateResult(bucketResult); } else { if (aggregateResult.ResultCode == WorkUnitResultCode.Skipped) { aggregateResult = aggregateResult.AggregateResult(new WorkUnitResult(WorkUnitResultCode.Success, WorkUnitActionCode.Continue, null)); } } // Pop the lookup scopes, causing them to collapse their values back down into the // bucket's lookup. // NOTE: this order is important because when we infer outputs, we are trying // to produce the same results as would be produced from a full build; as such // if we're doing both the infer and execute steps, we want the outputs from // the execute step to override the outputs of the infer step -- this models // the full build scenario more correctly than if the steps were reversed entryForInference.LeaveScope(); entryForInference = null; entryForExecution.LeaveScope(); entryForExecution = null; targetSuccess = (bucketResult != null) && (bucketResult.ResultCode == WorkUnitResultCode.Success); break; case DependencyAnalysisResult.SkipNoInputs: case DependencyAnalysisResult.SkipNoOutputs: // We have either no inputs or no outputs, so there is nothing to do. targetSuccess = true; break; } } catch (InvalidProjectFileException e) { // Make sure the Invalid Project error gets logged *before* TargetFinished. Otherwise, // the log is confusing. targetLoggingContext.LogInvalidProjectFileError(e); if (null != entryForInference) { entryForInference.LeaveScope(); } if (null != entryForExecution) { entryForExecution.LeaveScope(); } aggregateResult = aggregateResult.AggregateResult(new WorkUnitResult(WorkUnitResultCode.Failed, WorkUnitActionCode.Stop, null)); } finally { // Don't log the last target finished event until we can process the target outputs as we want to attach them to the // last target batch. if (targetLoggingContext != null && i < numberOfBuckets - 1) { targetLoggingContext.LogTargetBatchFinished(projectFullPath, targetSuccess, null); targetLoggingContext = null; } } } // Produce the final results. List <TaskItem> targetOutputItems = new List <TaskItem>(); try { // If any legacy CallTarget operations took place, integrate them back in to the main lookup now. LeaveLegacyCallTargetScopes(); // Publish the items for each bucket back into the baseLookup. Note that EnterScope() was actually called on each // bucket inside of the ItemBucket constructor, which is why you don't see it anywhere around here. foreach (ItemBucket bucket in buckets) { bucket.LeaveScope(); } string targetReturns = _target.Returns; ElementLocation targetReturnsLocation = _target.ReturnsLocation; // If there are no targets in the project file that use the "Returns" attribute, that means that we // revert to the legacy behavior in the case where Returns is not specified (null, rather // than the empty string, which indicates no returns). Legacy behavior is for all // of the target's Outputs to be returned. // On the other hand, if there is at least one target in the file that uses the Returns attribute, // then all targets in the file are run according to the new behaviour (return nothing unless otherwise // specified by the Returns attribute). if (targetReturns == null) { if (!_target.ParentProjectSupportsReturnsAttribute) { targetReturns = _target.Outputs; targetReturnsLocation = _target.OutputsLocation; } } if (!String.IsNullOrEmpty(targetReturns)) { // Determine if we should keep duplicates. bool keepDupes = ConditionEvaluator.EvaluateCondition ( _target.KeepDuplicateOutputs, ParserOptions.AllowPropertiesAndItemLists, _expander, ExpanderOptions.ExpandPropertiesAndItems, requestEntry.ProjectRootDirectory, _target.KeepDuplicateOutputsLocation, projectLoggingContext.LoggingService, projectLoggingContext.BuildEventContext, FileSystems.Default); // NOTE: we need to gather the outputs in batches, because the output specification may reference item metadata // Also, we are using the baseLookup, which has possibly had changes made to it since the project started. Because of this, the // set of outputs calculated here may differ from those which would have been calculated at the beginning of the target. It is // assumed the user intended this. List <ItemBucket> batchingBuckets = BatchingEngine.PrepareBatchingBuckets(GetBatchableParametersForTarget(), _baseLookup, _target.Location); if (keepDupes) { foreach (ItemBucket bucket in batchingBuckets) { targetOutputItems.AddRange(bucket.Expander.ExpandIntoTaskItemsLeaveEscaped(targetReturns, ExpanderOptions.ExpandAll, targetReturnsLocation)); } } else { HashSet <TaskItem> addedItems = new HashSet <TaskItem>(); foreach (ItemBucket bucket in batchingBuckets) { IList <TaskItem> itemsToAdd = bucket.Expander.ExpandIntoTaskItemsLeaveEscaped(targetReturns, ExpanderOptions.ExpandAll, targetReturnsLocation); foreach (TaskItem item in itemsToAdd) { if (!addedItems.Contains(item)) { targetOutputItems.Add(item); addedItems.Add(item); } } } } } } finally { if (targetLoggingContext != null) { // log the last target finished since we now have the target outputs. targetLoggingContext.LogTargetBatchFinished(projectFullPath, targetSuccess, targetOutputItems != null && targetOutputItems.Count > 0 ? targetOutputItems : null); } } _targetResult = new TargetResult(targetOutputItems.ToArray(), aggregateResult); if (aggregateResult.ResultCode == WorkUnitResultCode.Failed && aggregateResult.ActionCode == WorkUnitActionCode.Stop) { _state = TargetEntryState.ErrorExecution; } else { _state = TargetEntryState.Completed; } } finally { _isExecuting = false; } #if MSBUILDENABLEVSPROFILING } finally { string endTargetBuild = String.Format(CultureInfo.CurrentCulture, "Build Target {0} in Project {1} - End", this.Name, projectFullPath); DataCollection.CommentMarkProfile(8801, endTargetBuild); } #endif }
/// <summary> /// Add items to the world. This is the in-target equivalent of an item include expression outside of a target. /// </summary> /// <param name="child">The item specification to evaluate and add.</param> /// <param name="bucket">The batching bucket.</param> /// <param name="keepMetadata">An <see cref="ISet{String}"/> of metadata names to keep.</param> /// <param name="removeMetadata">An <see cref="ISet{String}"/> of metadata names to remove.</param> private void ExecuteAdd(ProjectItemGroupTaskItemInstance child, ItemBucket bucket, ISet <string> keepMetadata, ISet <string> removeMetadata) { // First, collect up the appropriate metadata collections. We need the one from the item definition, if any, and // the one we are using for this batching bucket. ProjectItemDefinitionInstance itemDefinition = null; Project.ItemDefinitions.TryGetValue(child.ItemType, out itemDefinition); // The NestedMetadataTable will handle the aggregation of the different metadata collections NestedMetadataTable metadataTable = new NestedMetadataTable(child.ItemType, bucket.Expander.Metadata, itemDefinition); IMetadataTable originalMetadataTable = bucket.Expander.Metadata; bucket.Expander.Metadata = metadataTable; // Second, expand the item include and exclude, and filter existing metadata as appropriate. List <ProjectItemInstance> itemsToAdd = ExpandItemIntoItems(child, bucket.Expander, keepMetadata, removeMetadata); // Third, expand the metadata. foreach (ProjectItemGroupTaskMetadataInstance metadataInstance in child.Metadata) { bool condition = ConditionEvaluator.EvaluateCondition ( metadataInstance.Condition, ParserOptions.AllowAll, bucket.Expander, ExpanderOptions.ExpandAll, Project.Directory, metadataInstance.Location, LoggingContext.LoggingService, LoggingContext.BuildEventContext, FileSystems.Default); if (condition) { string evaluatedValue = bucket.Expander.ExpandIntoStringLeaveEscaped(metadataInstance.Value, ExpanderOptions.ExpandAll, metadataInstance.Location); // This both stores the metadata so we can add it to all the items we just created later, and // exposes this metadata to further metadata evaluations in subsequent loop iterations. metadataTable.SetValue(metadataInstance.Name, evaluatedValue); } } // Finally, copy the added metadata onto the new items. The set call is additive. ProjectItemInstance.SetMetadata(metadataTable.AddedMetadata, itemsToAdd); // Add in one operation for potential copy-on-write // Restore the original metadata table. bucket.Expander.Metadata = originalMetadataTable; // Determine if we should NOT add duplicate entries bool keepDuplicates = ConditionEvaluator.EvaluateCondition ( child.KeepDuplicates, ParserOptions.AllowAll, bucket.Expander, ExpanderOptions.ExpandAll, Project.Directory, child.KeepDuplicatesLocation, LoggingContext.LoggingService, LoggingContext.BuildEventContext, FileSystems.Default); if (LogTaskInputs && !LoggingContext.LoggingService.OnlyLogCriticalEvents && itemsToAdd != null && itemsToAdd.Count > 0) { var itemGroupText = ItemGroupLoggingHelper.GetParameterText( ItemGroupLoggingHelper.ItemGroupIncludeLogMessagePrefix, child.ItemType, itemsToAdd, logItemMetadata: true); LoggingContext.LogCommentFromText(MessageImportance.Low, itemGroupText); } // Now add the items we created to the lookup. bucket.Lookup.AddNewItemsOfItemType(child.ItemType, itemsToAdd, !keepDuplicates); // Add in one operation for potential copy-on-write }