/// <summary> /// Runs all of the tasks for this target, batched as necessary. /// </summary> internal async Task ExecuteTarget(ITaskBuilder taskBuilder, BuildRequestEntry requestEntry, ProjectLoggingContext projectLoggingContext, CancellationToken cancellationToken) { #if MSBUILDENABLEVSPROFILING try { string beginTargetBuild = String.Format(CultureInfo.CurrentCulture, "Build Target {0} in Project {1} - Start", this.Name, projectFullPath); DataCollection.CommentMarkProfile(8800, beginTargetBuild); #endif try { VerifyState(_state, TargetEntryState.Execution); ErrorUtilities.VerifyThrow(!_isExecuting, "Target {0} is already executing", _target.Name); _cancellationToken = cancellationToken; _isExecuting = true; // Generate the batching buckets. Note that each bucket will get a lookup based on the baseLookup. This lookup will be in its // own scope, which we will collapse back down into the baseLookup at the bottom of the function. List <ItemBucket> buckets = BatchingEngine.PrepareBatchingBuckets(GetBatchableParametersForTarget(), _baseLookup, _target.Location); WorkUnitResult aggregateResult = new WorkUnitResult(); TargetLoggingContext targetLoggingContext = null; bool targetSuccess = false; int numberOfBuckets = buckets.Count; string projectFullPath = requestEntry.RequestConfiguration.ProjectFullPath; string parentTargetName = null; if (ParentEntry != null && ParentEntry.Target != null) { parentTargetName = ParentEntry.Target.Name; } for (int i = 0; i < numberOfBuckets; i++) { ItemBucket bucket = buckets[i]; // If one of the buckets failed, stop building. if (aggregateResult.ActionCode == WorkUnitActionCode.Stop) { break; } targetLoggingContext = projectLoggingContext.LogTargetBatchStarted(projectFullPath, _target, parentTargetName, _buildReason); WorkUnitResult bucketResult = null; targetSuccess = false; Lookup.Scope entryForInference = null; Lookup.Scope entryForExecution = null; try { // This isn't really dependency analysis. This is up-to-date checking. Based on this we will be able to determine if we should // run tasks in inference or execution mode (or both) or just skip them altogether. ItemDictionary <ProjectItemInstance> changedTargetInputs; ItemDictionary <ProjectItemInstance> upToDateTargetInputs; Lookup lookupForInference; Lookup lookupForExecution; // UNDONE: (Refactor) Refactor TargetUpToDateChecker to take a logging context, not a logging service. TargetUpToDateChecker dependencyAnalyzer = new TargetUpToDateChecker(requestEntry.RequestConfiguration.Project, _target, targetLoggingContext.LoggingService, targetLoggingContext.BuildEventContext); DependencyAnalysisResult dependencyResult = dependencyAnalyzer.PerformDependencyAnalysis(bucket, out changedTargetInputs, out upToDateTargetInputs); switch (dependencyResult) { // UNDONE: Need to enter/leave debugger scope properly for the <Target> element. case DependencyAnalysisResult.FullBuild: case DependencyAnalysisResult.IncrementalBuild: case DependencyAnalysisResult.SkipUpToDate: // Create the lookups used to hold the current set of properties and items lookupForInference = bucket.Lookup; lookupForExecution = bucket.Lookup.Clone(); // Push the lookup stack up one so that we are only modifying items and properties in that scope. entryForInference = lookupForInference.EnterScope("ExecuteTarget() Inference"); entryForExecution = lookupForExecution.EnterScope("ExecuteTarget() Execution"); // if we're doing an incremental build, we need to effectively run the task twice -- once // to infer the outputs for up-to-date input items, and once to actually execute the task; // as a result we need separate sets of item and property collections to track changes if (dependencyResult == DependencyAnalysisResult.IncrementalBuild) { // subset the relevant items to those that are up-to-date foreach (string itemType in upToDateTargetInputs.ItemTypes) { lookupForInference.PopulateWithItems(itemType, upToDateTargetInputs[itemType]); } // subset the relevant items to those that have changed foreach (string itemType in changedTargetInputs.ItemTypes) { lookupForExecution.PopulateWithItems(itemType, changedTargetInputs[itemType]); } } // We either have some work to do or at least we need to infer outputs from inputs. bucketResult = await ProcessBucket(taskBuilder, targetLoggingContext, GetTaskExecutionMode(dependencyResult), lookupForInference, lookupForExecution); // Now aggregate the result with the existing known results. There are four rules, assuming the target was not // skipped due to being up-to-date: // 1. If this bucket failed or was cancelled, the aggregate result is failure. // 2. If this bucket Succeeded and we have not previously failed, the aggregate result is a success. // 3. Otherwise, the bucket was skipped, which has no effect on the aggregate result. // 4. If the bucket's action code says to stop, then we stop, regardless of the success or failure state. if (dependencyResult != DependencyAnalysisResult.SkipUpToDate) { aggregateResult = aggregateResult.AggregateResult(bucketResult); } else { if (aggregateResult.ResultCode == WorkUnitResultCode.Skipped) { aggregateResult = aggregateResult.AggregateResult(new WorkUnitResult(WorkUnitResultCode.Success, WorkUnitActionCode.Continue, null)); } } // Pop the lookup scopes, causing them to collapse their values back down into the // bucket's lookup. // NOTE: this order is important because when we infer outputs, we are trying // to produce the same results as would be produced from a full build; as such // if we're doing both the infer and execute steps, we want the outputs from // the execute step to override the outputs of the infer step -- this models // the full build scenario more correctly than if the steps were reversed entryForInference.LeaveScope(); entryForInference = null; entryForExecution.LeaveScope(); entryForExecution = null; targetSuccess = (bucketResult != null) && (bucketResult.ResultCode == WorkUnitResultCode.Success); break; case DependencyAnalysisResult.SkipNoInputs: case DependencyAnalysisResult.SkipNoOutputs: // We have either no inputs or no outputs, so there is nothing to do. targetSuccess = true; break; } } catch (InvalidProjectFileException e) { // Make sure the Invalid Project error gets logged *before* TargetFinished. Otherwise, // the log is confusing. targetLoggingContext.LogInvalidProjectFileError(e); if (null != entryForInference) { entryForInference.LeaveScope(); } if (null != entryForExecution) { entryForExecution.LeaveScope(); } aggregateResult = aggregateResult.AggregateResult(new WorkUnitResult(WorkUnitResultCode.Failed, WorkUnitActionCode.Stop, null)); } finally { // Don't log the last target finished event until we can process the target outputs as we want to attach them to the // last target batch. if (targetLoggingContext != null && i < numberOfBuckets - 1) { targetLoggingContext.LogTargetBatchFinished(projectFullPath, targetSuccess, null); targetLoggingContext = null; } } } // Produce the final results. List <TaskItem> targetOutputItems = new List <TaskItem>(); try { // If any legacy CallTarget operations took place, integrate them back in to the main lookup now. LeaveLegacyCallTargetScopes(); // Publish the items for each bucket back into the baseLookup. Note that EnterScope() was actually called on each // bucket inside of the ItemBucket constructor, which is why you don't see it anywhere around here. foreach (ItemBucket bucket in buckets) { bucket.LeaveScope(); } string targetReturns = _target.Returns; ElementLocation targetReturnsLocation = _target.ReturnsLocation; // If there are no targets in the project file that use the "Returns" attribute, that means that we // revert to the legacy behavior in the case where Returns is not specified (null, rather // than the empty string, which indicates no returns). Legacy behavior is for all // of the target's Outputs to be returned. // On the other hand, if there is at least one target in the file that uses the Returns attribute, // then all targets in the file are run according to the new behaviour (return nothing unless otherwise // specified by the Returns attribute). if (targetReturns == null) { if (!_target.ParentProjectSupportsReturnsAttribute) { targetReturns = _target.Outputs; targetReturnsLocation = _target.OutputsLocation; } } if (!String.IsNullOrEmpty(targetReturns)) { // Determine if we should keep duplicates. bool keepDupes = ConditionEvaluator.EvaluateCondition ( _target.KeepDuplicateOutputs, ParserOptions.AllowPropertiesAndItemLists, _expander, ExpanderOptions.ExpandPropertiesAndItems, requestEntry.ProjectRootDirectory, _target.KeepDuplicateOutputsLocation, projectLoggingContext.LoggingService, projectLoggingContext.BuildEventContext, FileSystems.Default); // NOTE: we need to gather the outputs in batches, because the output specification may reference item metadata // Also, we are using the baseLookup, which has possibly had changes made to it since the project started. Because of this, the // set of outputs calculated here may differ from those which would have been calculated at the beginning of the target. It is // assumed the user intended this. List <ItemBucket> batchingBuckets = BatchingEngine.PrepareBatchingBuckets(GetBatchableParametersForTarget(), _baseLookup, _target.Location); if (keepDupes) { foreach (ItemBucket bucket in batchingBuckets) { targetOutputItems.AddRange(bucket.Expander.ExpandIntoTaskItemsLeaveEscaped(targetReturns, ExpanderOptions.ExpandAll, targetReturnsLocation)); } } else { HashSet <TaskItem> addedItems = new HashSet <TaskItem>(); foreach (ItemBucket bucket in batchingBuckets) { IList <TaskItem> itemsToAdd = bucket.Expander.ExpandIntoTaskItemsLeaveEscaped(targetReturns, ExpanderOptions.ExpandAll, targetReturnsLocation); foreach (TaskItem item in itemsToAdd) { if (!addedItems.Contains(item)) { targetOutputItems.Add(item); addedItems.Add(item); } } } } } } finally { if (targetLoggingContext != null) { // log the last target finished since we now have the target outputs. targetLoggingContext.LogTargetBatchFinished(projectFullPath, targetSuccess, targetOutputItems != null && targetOutputItems.Count > 0 ? targetOutputItems : null); } } _targetResult = new TargetResult(targetOutputItems.ToArray(), aggregateResult); if (aggregateResult.ResultCode == WorkUnitResultCode.Failed && aggregateResult.ActionCode == WorkUnitActionCode.Stop) { _state = TargetEntryState.ErrorExecution; } else { _state = TargetEntryState.Completed; } } finally { _isExecuting = false; } #if MSBUILDENABLEVSPROFILING } finally { string endTargetBuild = String.Format(CultureInfo.CurrentCulture, "Build Target {0} in Project {1} - End", this.Name, projectFullPath); DataCollection.CommentMarkProfile(8801, endTargetBuild); } #endif }
/// <summary> /// Execute a PropertyGroup element, including each child property /// </summary> /// <param name="lookup">The lookup use for evaluation and as a destination for these properties.</param> internal override void ExecuteTask(Lookup lookup) { foreach (ProjectPropertyGroupTaskPropertyInstance property in _taskInstance.Properties) { List <ItemBucket> buckets = null; try { // Find all the metadata references in order to create buckets List <string> parameterValues = new List <string>(); GetBatchableValuesFromProperty(parameterValues, property); buckets = BatchingEngine.PrepareBatchingBuckets(parameterValues, lookup, property.Location); // "Execute" each bucket foreach (ItemBucket bucket in buckets) { bool condition = ConditionEvaluator.EvaluateCondition ( property.Condition, ParserOptions.AllowAll, bucket.Expander, ExpanderOptions.ExpandAll, Project.Directory, property.ConditionLocation, LoggingContext.LoggingService, LoggingContext.BuildEventContext, FileSystems.Default); if (condition) { // Check for a reserved name now, so it fails right here instead of later when the property eventually reaches // the outer scope. ProjectErrorUtilities.VerifyThrowInvalidProject ( !ReservedPropertyNames.IsReservedProperty(property.Name), property.Location, "CannotModifyReservedProperty", property.Name ); string evaluatedValue = bucket.Expander.ExpandIntoStringLeaveEscaped(property.Value, ExpanderOptions.ExpandAll, property.Location); if (LogTaskInputs && !LoggingContext.LoggingService.OnlyLogCriticalEvents) { LoggingContext.LogComment(MessageImportance.Low, "PropertyGroupLogMessage", property.Name, evaluatedValue); } bucket.Lookup.SetProperty(ProjectPropertyInstance.Create(property.Name, evaluatedValue, property.Location, Project.IsImmutable)); } } } finally { if (buckets != null) { // Propagate the property changes to the bucket above foreach (ItemBucket bucket in buckets) { bucket.LeaveScope(); } } } } }
/// <summary> /// Execute an ItemGroup element, including each child item expression /// </summary> /// <param name="lookup">The lookup used for evaluation and as a destination for these items.</param> internal override void ExecuteTask(Lookup lookup) { foreach (ProjectItemGroupTaskItemInstance child in _taskInstance.Items) { List <ItemBucket> buckets = null; try { List <string> parameterValues = new List <string>(); GetBatchableValuesFromBuildItemGroupChild(parameterValues, child); buckets = BatchingEngine.PrepareBatchingBuckets(parameterValues, lookup, child.ItemType, _taskInstance.Location); // "Execute" each bucket foreach (ItemBucket bucket in buckets) { bool condition = ConditionEvaluator.EvaluateCondition ( child.Condition, ParserOptions.AllowAll, bucket.Expander, ExpanderOptions.ExpandAll, Project.Directory, child.ConditionLocation, LoggingContext.LoggingService, LoggingContext.BuildEventContext, FileSystems.Default); if (condition) { HashSet <string> keepMetadata = null; HashSet <string> removeMetadata = null; HashSet <string> matchOnMetadata = null; MatchOnMetadataOptions matchOnMetadataOptions = MatchOnMetadataConstants.MatchOnMetadataOptionsDefaultValue; if (!String.IsNullOrEmpty(child.KeepMetadata)) { var keepMetadataEvaluated = bucket.Expander.ExpandIntoStringListLeaveEscaped(child.KeepMetadata, ExpanderOptions.ExpandAll, child.KeepMetadataLocation).ToList(); if (keepMetadataEvaluated.Count > 0) { keepMetadata = new HashSet <string>(keepMetadataEvaluated); } } if (!String.IsNullOrEmpty(child.RemoveMetadata)) { var removeMetadataEvaluated = bucket.Expander.ExpandIntoStringListLeaveEscaped(child.RemoveMetadata, ExpanderOptions.ExpandAll, child.RemoveMetadataLocation).ToList(); if (removeMetadataEvaluated.Count > 0) { removeMetadata = new HashSet <string>(removeMetadataEvaluated); } } if (!String.IsNullOrEmpty(child.MatchOnMetadata)) { var matchOnMetadataEvaluated = bucket.Expander.ExpandIntoStringListLeaveEscaped(child.MatchOnMetadata, ExpanderOptions.ExpandAll, child.MatchOnMetadataLocation).ToList(); if (matchOnMetadataEvaluated.Count > 0) { matchOnMetadata = new HashSet <string>(matchOnMetadataEvaluated); } Enum.TryParse(child.MatchOnMetadataOptions, out matchOnMetadataOptions); } if ((child.Include.Length != 0) || (child.Exclude.Length != 0)) { // It's an item -- we're "adding" items to the world ExecuteAdd(child, bucket, keepMetadata, removeMetadata); } else if (child.Remove.Length != 0) { // It's a remove -- we're "removing" items from the world ExecuteRemove(child, bucket, matchOnMetadata, matchOnMetadataOptions); } else { // It's a modify -- changing existing items ExecuteModify(child, bucket, keepMetadata, removeMetadata); } } } } finally { if (buckets != null) { // Propagate the item changes to the bucket above foreach (ItemBucket bucket in buckets) { bucket.LeaveScope(); } } } } }