/// <summary> /// Remove items from the world. Removes to items that are part of the project manifest are backed up, so /// they can be reverted when the project is reset after the end of the build. /// </summary> /// <param name="child">The item specification to evaluate and remove.</param> /// <param name="bucket">The batching bucket.</param> private void ExecuteRemove(ProjectItemGroupTaskItemInstance child, ItemBucket bucket) { ICollection <ProjectItemInstance> group = bucket.Lookup.GetItems(child.ItemType); if (group == null) { // No items of this type to remove return; } List <ProjectItemInstance> itemsToRemove = FindItemsMatchingSpecification(group, child.Remove, child.RemoveLocation, bucket.Expander); if (itemsToRemove != null) { if (LogTaskInputs && !LoggingContext.LoggingService.OnlyLogCriticalEvents && itemsToRemove.Count > 0) { var itemGroupText = ItemGroupLoggingHelper.GetParameterText( ItemGroupLoggingHelper.ItemGroupRemoveLogMessage, child.ItemType, itemsToRemove); LoggingContext.LogCommentFromText(MessageImportance.Low, itemGroupText); } bucket.Lookup.RemoveItems(itemsToRemove); } }
internal static ItemBucket GetDummyBucketForComparisons(Dictionary <string, string> metadata) { ItemBucket bucket = new ItemBucket(); bucket._metadata = metadata; return(bucket); }
private void ExecuteModify(ProjectItemGroupTaskItemInstance child, ItemBucket bucket, ISet <string> keepMetadata, ISet <string> removeMetadata) { ICollection <ProjectItemInstance> group = bucket.Lookup.GetItems(child.ItemType); if (group == null || group.Count == 0) { // No items of this type to modify return; } // Figure out what metadata names and values we need to set var metadataToSet = new Lookup.MetadataModifications(keepMetadata != null); // Filter the metadata as appropriate if (keepMetadata != null) { foreach (var metadataName in keepMetadata) { metadataToSet[metadataName] = Lookup.MetadataModification.CreateFromNoChange(); } } else if (removeMetadata != null) { foreach (var metadataName in removeMetadata) { metadataToSet[metadataName] = Lookup.MetadataModification.CreateFromRemove(); } } foreach (ProjectItemGroupTaskMetadataInstance metadataInstance in child.Metadata) { bool condition = ConditionEvaluator.EvaluateCondition ( metadataInstance.Condition, ParserOptions.AllowAll, bucket.Expander, ExpanderOptions.ExpandAll, Project.Directory, metadataInstance.ConditionLocation, LoggingContext.LoggingService, LoggingContext.BuildEventContext, FileSystems.Default); if (condition) { string evaluatedValue = bucket.Expander.ExpandIntoStringLeaveEscaped(metadataInstance.Value, ExpanderOptions.ExpandAll, metadataInstance.Location); metadataToSet[metadataInstance.Name] = Lookup.MetadataModification.CreateFromNewValue(evaluatedValue); } } // Now apply the changes. This must be done after filtering, since explicitly set metadata overrides filters. bucket.Lookup.ModifyItems(child.ItemType, group, metadataToSet); }
private List <ProjectItemInstance> FindItemsUsingMatchOnMetadata( ICollection <ProjectItemInstance> items, ProjectItemGroupTaskItemInstance child, ItemBucket bucket, HashSet <string> matchOnMetadata, MatchOnMetadataOptions options) { ErrorUtilities.VerifyThrowArgumentNull(matchOnMetadata, nameof(matchOnMetadata)); var itemSpec = new ItemSpec <ProjectPropertyInstance, ProjectItemInstance>(child.Remove, bucket.Expander, child.RemoveLocation, Project.Directory, true); ProjectFileErrorUtilities.VerifyThrowInvalidProjectFile( itemSpec.Fragments.Count == 1 && itemSpec.Fragments.First() is ItemSpec <ProjectPropertyInstance, ProjectItemInstance> .ItemExpressionFragment && matchOnMetadata.Count == 1, new BuildEventFileInfo(string.Empty), "OM_MatchOnMetadataIsRestrictedToOnlyOneReferencedItem", child.RemoveLocation, child.Remove); return(items.Where(item => itemSpec.MatchesItemOnMetadata(item, matchOnMetadata, options)).ToList()); }
private void ExecuteRemove(ProjectItemGroupTaskItemInstance child, ItemBucket bucket, HashSet <string> matchOnMetadata, MatchOnMetadataOptions matchingOptions) { ICollection <ProjectItemInstance> group = bucket.Lookup.GetItems(child.ItemType); if (group == null) { // No items of this type to remove return; } List <ProjectItemInstance> itemsToRemove; if (matchOnMetadata == null) { itemsToRemove = FindItemsMatchingSpecification(group, child.Remove, child.RemoveLocation, bucket.Expander); } else { itemsToRemove = FindItemsMatchingMetadataSpecification(group, child, bucket.Expander, matchOnMetadata, matchingOptions); } if (itemsToRemove != null) { if (LogTaskInputs && !LoggingContext.LoggingService.OnlyLogCriticalEvents && itemsToRemove.Count > 0) { ItemGroupLoggingHelper.LogTaskParameter( LoggingContext, TaskParameterMessageKind.RemoveItem, child.ItemType, itemsToRemove, logItemMetadata: true, child.Location); } bucket.Lookup.RemoveItems(itemsToRemove); } }
/// <summary> /// Runs an intrinsic task. /// </summary> private void ExecuteIntrinsicTask(ItemBucket bucket) { IntrinsicTask task = IntrinsicTask.InstantiateTask ( _targetChildInstance, _targetLoggingContext, _buildRequestEntry.RequestConfiguration.Project, _taskExecutionHost.LogTaskInputs); task.ExecuteTask(bucket.Lookup); }
/// <summary> /// Initialize to run a specific batch of the current task. /// </summary> bool ITaskExecutionHost.InitializeForBatch(TaskLoggingContext loggingContext, ItemBucket batchBucket, IDictionary<string, string> taskIdentityParameters) { ErrorUtilities.VerifyThrowArgumentNull(loggingContext, "loggingContext"); ErrorUtilities.VerifyThrowArgumentNull(batchBucket, "batchBucket"); _taskLoggingContext = loggingContext; _batchBucket = batchBucket; if (_taskFactoryWrapper == null) { return false; } // If the task assembly is loaded into a separate AppDomain using LoadFrom, then we have a problem // to solve - when the task class Type is marshalled back into our AppDomain, it's not just transferred // here. Instead, NDP will try to Load (not LoadFrom!) the task assembly into our AppDomain, and since // we originally used LoadFrom, it will fail miserably not knowing where to find it. // We need to temporarily subscribe to the AppDomain.AssemblyResolve event to fix it. if (null == _resolver) { _resolver = new TaskEngineAssemblyResolver(); _resolver.Initialize(_taskFactoryWrapper.TaskFactoryLoadedType.Assembly.AssemblyFile); _resolver.InstallHandler(); } // We instantiate a new task object for each batch _taskInstance = InstantiateTask(taskIdentityParameters); if (_taskInstance == null) { return false; } _taskInstance.BuildEngine = _buildEngine; _taskInstance.HostObject = _taskHost; return true; }
/// <summary> /// Initialize the host object /// </summary> /// <param name="throwOnExecute">Should the task throw when executed</param> private void InitializeHost(bool throwOnExecute) { _loggingService = LoggingService.CreateLoggingService(LoggerMode.Synchronous, 1) as ILoggingService; _logger = new MockLogger(); _loggingService.RegisterLogger(_logger); _host = new TaskExecutionHost(); TargetLoggingContext tlc = new TargetLoggingContext(_loggingService, new BuildEventContext(1, 1, BuildEventContext.InvalidProjectContextId, 1)); // Set up a temporary project and add some items to it. ProjectInstance project = CreateTestProject(); TypeLoader typeLoader = new TypeLoader(new TypeFilter(IsTaskFactoryClass)); AssemblyLoadInfo loadInfo = AssemblyLoadInfo.Create(Assembly.GetAssembly(typeof(TaskBuilderTestTask.TaskBuilderTestTaskFactory)).FullName, null); LoadedType loadedType = new LoadedType(typeof(TaskBuilderTestTask.TaskBuilderTestTaskFactory), loadInfo); TaskBuilderTestTask.TaskBuilderTestTaskFactory taskFactory = new TaskBuilderTestTask.TaskBuilderTestTaskFactory(); taskFactory.ThrowOnExecute = throwOnExecute; string taskName = "TaskBuilderTestTask"; (_host as TaskExecutionHost)._UNITTESTONLY_TaskFactoryWrapper = new TaskFactoryWrapper(taskFactory, loadedType, taskName, null); _host.InitializeForTask ( this, tlc, project, taskName, ElementLocation.Create("none", 1, 1), this, false, null, false, CancellationToken.None ); ProjectTaskInstance taskInstance = project.Targets["foo"].Tasks.First(); TaskLoggingContext talc = tlc.LogTaskBatchStarted(".", taskInstance); ItemDictionary<ProjectItemInstance> itemsByName = new ItemDictionary<ProjectItemInstance>(); ProjectItemInstance item = new ProjectItemInstance(project, "ItemListContainingOneItem", "a.cs", "."); item.SetMetadata("Culture", "fr-fr"); itemsByName.Add(item); _oneItem = new ITaskItem[] { new TaskItem(item) }; item = new ProjectItemInstance(project, "ItemListContainingTwoItems", "b.cs", "."); ProjectItemInstance item2 = new ProjectItemInstance(project, "ItemListContainingTwoItems", "c.cs", "."); item.SetMetadata("HintPath", "c:\\foo"); item2.SetMetadata("HintPath", "c:\\bar"); itemsByName.Add(item); itemsByName.Add(item2); _twoItems = new ITaskItem[] { new TaskItem(item), new TaskItem(item2) }; _bucket = new ItemBucket(new string[0], new Dictionary<string, string>(), new Lookup(itemsByName, new PropertyDictionary<ProjectPropertyInstance>(), null), 0); _host.FindTask(null); _host.InitializeForBatch(talc, _bucket, null); _parametersSetOnTask = new Dictionary<string, object>(StringComparer.OrdinalIgnoreCase); _outputsReadFromTask = new Dictionary<string, object>(StringComparer.OrdinalIgnoreCase); }
/// <summary> /// Constructs a token bucket object that can be compared against other /// buckets. This dummy bucket is a patently invalid bucket, and cannot /// be used for any other operations besides comparison. /// </summary> /// <remarks> /// PERF NOTE: A dummy bucket is intentionally very light-weight, and it /// allocates a minimum of memory compared to a real bucket. /// </remarks> /// <param name="itemMetadata"></param> /// <returns>An item bucket that is invalid for everything except comparisons.</returns> internal static ItemBucket GetDummyBucketForComparisons(Dictionary<string, string> metadata) { ItemBucket bucket = new ItemBucket(); bucket._metadata = metadata; return bucket; }
/// <summary> /// Runs all of the tasks for this target, batched as necessary. /// </summary> internal async Task ExecuteTarget(ITaskBuilder taskBuilder, BuildRequestEntry requestEntry, ProjectLoggingContext projectLoggingContext, CancellationToken cancellationToken) { #if MSBUILDENABLEVSPROFILING try { string beginTargetBuild = String.Format(CultureInfo.CurrentCulture, "Build Target {0} in Project {1} - Start", this.Name, projectFullPath); DataCollection.CommentMarkProfile(8800, beginTargetBuild); #endif try { VerifyState(_state, TargetEntryState.Execution); ErrorUtilities.VerifyThrow(!_isExecuting, "Target {0} is already executing", _target.Name); _cancellationToken = cancellationToken; _isExecuting = true; // Generate the batching buckets. Note that each bucket will get a lookup based on the baseLookup. This lookup will be in its // own scope, which we will collapse back down into the baseLookup at the bottom of the function. List <ItemBucket> buckets = BatchingEngine.PrepareBatchingBuckets(GetBatchableParametersForTarget(), _baseLookup, _target.Location); WorkUnitResult aggregateResult = new WorkUnitResult(); TargetLoggingContext targetLoggingContext = null; bool targetSuccess = false; int numberOfBuckets = buckets.Count; string projectFullPath = requestEntry.RequestConfiguration.ProjectFullPath; string parentTargetName = null; if (ParentEntry != null && ParentEntry.Target != null) { parentTargetName = ParentEntry.Target.Name; } for (int i = 0; i < numberOfBuckets; i++) { ItemBucket bucket = buckets[i]; // If one of the buckets failed, stop building. if (aggregateResult.ActionCode == WorkUnitActionCode.Stop) { break; } targetLoggingContext = projectLoggingContext.LogTargetBatchStarted(projectFullPath, _target, parentTargetName); WorkUnitResult bucketResult = null; targetSuccess = false; Lookup.Scope entryForInference = null; Lookup.Scope entryForExecution = null; try { // This isn't really dependency analysis. This is up-to-date checking. Based on this we will be able to determine if we should // run tasks in inference or execution mode (or both) or just skip them altogether. ItemDictionary <ProjectItemInstance> changedTargetInputs; ItemDictionary <ProjectItemInstance> upToDateTargetInputs; Lookup lookupForInference; Lookup lookupForExecution; // UNDONE: (Refactor) Refactor TargetUpToDateChecker to take a logging context, not a logging service. TargetUpToDateChecker dependencyAnalyzer = new TargetUpToDateChecker(requestEntry.RequestConfiguration.Project, _target, targetLoggingContext.LoggingService, targetLoggingContext.BuildEventContext); DependencyAnalysisResult dependencyResult = dependencyAnalyzer.PerformDependencyAnalysis(bucket, out changedTargetInputs, out upToDateTargetInputs); switch (dependencyResult) { // UNDONE: Need to enter/leave debugger scope properly for the <Target> element. case DependencyAnalysisResult.FullBuild: case DependencyAnalysisResult.IncrementalBuild: case DependencyAnalysisResult.SkipUpToDate: // Create the lookups used to hold the current set of properties and items lookupForInference = bucket.Lookup; lookupForExecution = bucket.Lookup.Clone(); // Push the lookup stack up one so that we are only modifying items and properties in that scope. entryForInference = lookupForInference.EnterScope("ExecuteTarget() Inference"); entryForExecution = lookupForExecution.EnterScope("ExecuteTarget() Execution"); // if we're doing an incremental build, we need to effectively run the task twice -- once // to infer the outputs for up-to-date input items, and once to actually execute the task; // as a result we need separate sets of item and property collections to track changes if (dependencyResult == DependencyAnalysisResult.IncrementalBuild) { // subset the relevant items to those that are up-to-date foreach (string itemType in upToDateTargetInputs.ItemTypes) { lookupForInference.PopulateWithItems(itemType, upToDateTargetInputs[itemType]); } // subset the relevant items to those that have changed foreach (string itemType in changedTargetInputs.ItemTypes) { lookupForExecution.PopulateWithItems(itemType, changedTargetInputs[itemType]); } } // We either have some work to do or at least we need to infer outputs from inputs. bucketResult = await ProcessBucket(taskBuilder, targetLoggingContext, GetTaskExecutionMode(dependencyResult), lookupForInference, lookupForExecution); // Now aggregate the result with the existing known results. There are four rules, assuming the target was not // skipped due to being up-to-date: // 1. If this bucket failed or was cancelled, the aggregate result is failure. // 2. If this bucket Succeeded and we have not previously failed, the aggregate result is a success. // 3. Otherwise, the bucket was skipped, which has no effect on the aggregate result. // 4. If the bucket's action code says to stop, then we stop, regardless of the success or failure state. if (dependencyResult != DependencyAnalysisResult.SkipUpToDate) { aggregateResult = aggregateResult.AggregateResult(bucketResult); } else { if (aggregateResult.ResultCode == WorkUnitResultCode.Skipped) { aggregateResult = aggregateResult.AggregateResult(new WorkUnitResult(WorkUnitResultCode.Success, WorkUnitActionCode.Continue, null)); } } // Pop the lookup scopes, causing them to collapse their values back down into the // bucket's lookup. // NOTE: this order is important because when we infer outputs, we are trying // to produce the same results as would be produced from a full build; as such // if we're doing both the infer and execute steps, we want the outputs from // the execute step to override the outputs of the infer step -- this models // the full build scenario more correctly than if the steps were reversed entryForInference.LeaveScope(); entryForInference = null; entryForExecution.LeaveScope(); entryForExecution = null; targetSuccess = (bucketResult != null) && (bucketResult.ResultCode == WorkUnitResultCode.Success); break; case DependencyAnalysisResult.SkipNoInputs: case DependencyAnalysisResult.SkipNoOutputs: // We have either no inputs or no outputs, so there is nothing to do. targetSuccess = true; break; } } catch (InvalidProjectFileException e) { // Make sure the Invalid Project error gets logged *before* TargetFinished. Otherwise, // the log is confusing. targetLoggingContext.LogInvalidProjectFileError(e); if (null != entryForInference) { entryForInference.LeaveScope(); } if (null != entryForExecution) { entryForExecution.LeaveScope(); } aggregateResult = aggregateResult.AggregateResult(new WorkUnitResult(WorkUnitResultCode.Failed, WorkUnitActionCode.Stop, null)); } finally { // Don't log the last target finished event until we can process the target outputs as we want to attach them to the // last target batch. if (targetLoggingContext != null && i < numberOfBuckets - 1) { targetLoggingContext.LogTargetBatchFinished(projectFullPath, targetSuccess, null); targetLoggingContext = null; } } } // Produce the final results. List <TaskItem> targetOutputItems = new List <TaskItem>(); try { // If any legacy CallTarget operations took place, integrate them back in to the main lookup now. LeaveLegacyCallTargetScopes(); // Publish the items for each bucket back into the baseLookup. Note that EnterScope() was actually called on each // bucket inside of the ItemBucket constructor, which is why you don't see it anywhere around here. foreach (ItemBucket bucket in buckets) { bucket.LeaveScope(); } string targetReturns = _target.Returns; ElementLocation targetReturnsLocation = _target.ReturnsLocation; // If there are no targets in the project file that use the "Returns" attribute, that means that we // revert to the legacy behavior in the case where Returns is not specified (null, rather // than the empty string, which indicates no returns). Legacy behavior is for all // of the target's Outputs to be returned. // On the other hand, if there is at least one target in the file that uses the Returns attribute, // then all targets in the file are run according to the new behaviour (return nothing unless otherwise // specified by the Returns attribute). if (targetReturns == null) { if (!_target.ParentProjectSupportsReturnsAttribute) { targetReturns = _target.Outputs; targetReturnsLocation = _target.OutputsLocation; } } if (!String.IsNullOrEmpty(targetReturns)) { // Determine if we should keep duplicates. bool keepDupes = ConditionEvaluator.EvaluateCondition ( _target.KeepDuplicateOutputs, ParserOptions.AllowPropertiesAndItemLists, _expander, ExpanderOptions.ExpandPropertiesAndItems, requestEntry.ProjectRootDirectory, _target.KeepDuplicateOutputsLocation, projectLoggingContext.LoggingService, projectLoggingContext.BuildEventContext ); // NOTE: we need to gather the outputs in batches, because the output specification may reference item metadata // Also, we are using the baseLookup, which has possibly had changes made to it since the project started. Because of this, the // set of outputs calculated here may differ from those which would have been calculated at the beginning of the target. It is // assumed the user intended this. List <ItemBucket> batchingBuckets = BatchingEngine.PrepareBatchingBuckets(GetBatchableParametersForTarget(), _baseLookup, _target.Location); if (keepDupes) { foreach (ItemBucket bucket in batchingBuckets) { targetOutputItems.AddRange(bucket.Expander.ExpandIntoTaskItemsLeaveEscaped(targetReturns, ExpanderOptions.ExpandAll, targetReturnsLocation)); } } else { HashSet <TaskItem> addedItems = new HashSet <TaskItem>(); foreach (ItemBucket bucket in batchingBuckets) { IList <TaskItem> itemsToAdd = bucket.Expander.ExpandIntoTaskItemsLeaveEscaped(targetReturns, ExpanderOptions.ExpandAll, targetReturnsLocation); foreach (TaskItem item in itemsToAdd) { if (!addedItems.Contains(item)) { targetOutputItems.Add(item); addedItems.Add(item); } } } } } } finally { if (targetLoggingContext != null) { // log the last target finished since we now have the target outputs. targetLoggingContext.LogTargetBatchFinished(projectFullPath, targetSuccess, targetOutputItems != null && targetOutputItems.Count > 0 ? targetOutputItems : null); } } _targetResult = new TargetResult(targetOutputItems.ToArray(), aggregateResult); if (aggregateResult.ResultCode == WorkUnitResultCode.Failed && aggregateResult.ActionCode == WorkUnitActionCode.Stop) { _state = TargetEntryState.ErrorExecution; } else { _state = TargetEntryState.Completed; } } finally { _isExecuting = false; } #if MSBUILDENABLEVSPROFILING } finally { string endTargetBuild = String.Format(CultureInfo.CurrentCulture, "Build Target {0} in Project {1} - End", this.Name, projectFullPath); DataCollection.CommentMarkProfile(8801, endTargetBuild); } #endif }
/// <summary> /// Add items to the world. This is the in-target equivalent of an item include expression outside of a target. /// </summary> /// <param name="child">The item specification to evaluate and add.</param> /// <param name="bucket">The batching bucket.</param> private void ExecuteAdd(ProjectItemGroupTaskItemInstance child, ItemBucket bucket, ISet<string> keepMetadata, ISet<string> removeMetadata) { // First, collect up the appropriate metadata collections. We need the one from the item definition, if any, and // the one we are using for this batching bucket. ProjectItemDefinitionInstance itemDefinition = null; Project.ItemDefinitions.TryGetValue(child.ItemType, out itemDefinition); // The NestedMetadataTable will handle the aggregation of the different metadata collections NestedMetadataTable metadataTable = new NestedMetadataTable(child.ItemType, bucket.Expander.Metadata, itemDefinition); IMetadataTable originalMetadataTable = bucket.Expander.Metadata; bucket.Expander.Metadata = metadataTable; // Second, expand the item include and exclude, and filter existing metadata as appropriate. IList<ProjectItemInstance> itemsToAdd = ExpandItemIntoItems(child, bucket.Expander, keepMetadata, removeMetadata); // Third, expand the metadata. foreach (ProjectItemGroupTaskMetadataInstance metadataInstance in child.Metadata) { bool condition = ConditionEvaluator.EvaluateCondition ( metadataInstance.Condition, ParserOptions.AllowAll, bucket.Expander, ExpanderOptions.ExpandAll, Project.Directory, metadataInstance.Location, LoggingContext.LoggingService, LoggingContext.BuildEventContext ); if (condition) { string evaluatedValue = bucket.Expander.ExpandIntoStringLeaveEscaped(metadataInstance.Value, ExpanderOptions.ExpandAll, metadataInstance.Location); // This both stores the metadata so we can add it to all the items we just created later, and // exposes this metadata to further metadata evaluations in subsequent loop iterations. metadataTable.SetValue(metadataInstance.Name, evaluatedValue); } } // Finally, copy the added metadata onto the new items. The set call is additive. ProjectItemInstance.SetMetadata(metadataTable.AddedMetadata, itemsToAdd); // Add in one operation for potential copy-on-write // Restore the original metadata table. bucket.Expander.Metadata = originalMetadataTable; // Determine if we should NOT add duplicate entries bool keepDuplicates = ConditionEvaluator.EvaluateCondition ( child.KeepDuplicates, ParserOptions.AllowAll, bucket.Expander, ExpanderOptions.ExpandAll, Project.Directory, child.KeepDuplicatesLocation, LoggingContext.LoggingService, LoggingContext.BuildEventContext ); if (LogTaskInputs && !LoggingContext.LoggingService.OnlyLogCriticalEvents && itemsToAdd != null && itemsToAdd.Count > 0) { var itemGroupText = ItemGroupLoggingHelper.GetParameterText(ResourceUtilities.GetResourceString("ItemGroupIncludeLogMessagePrefix"), child.ItemType, itemsToAdd.ToArray()); LoggingContext.LogCommentFromText(MessageImportance.Low, itemGroupText); } // Now add the items we created to the lookup. bucket.Lookup.AddNewItemsOfItemType(child.ItemType, itemsToAdd, !keepDuplicates); // Add in one operation for potential copy-on-write }
/// <summary> /// Modifies items in the world - specifically, changes their metadata. Changes to items that are part of the project manifest are backed up, so /// they can be reverted when the project is reset after the end of the build. /// </summary> /// <param name="child">The item specification to evaluate and modify.</param> /// <param name="bucket">The batching bucket.</param> private void ExecuteModify(ProjectItemGroupTaskItemInstance child, ItemBucket bucket, ISet<string> keepMetadata, ISet<string> removeMetadata) { ICollection<ProjectItemInstance> group = bucket.Lookup.GetItems(child.ItemType); if (group == null || group.Count == 0) { // No items of this type to modify return; } // Figure out what metadata names and values we need to set var metadataToSet = new Lookup.MetadataModifications(keepMetadata != null); // Filter the metadata as appropriate if (keepMetadata != null) { foreach (var metadataName in keepMetadata) { metadataToSet[metadataName] = Lookup.MetadataModification.CreateFromNoChange(); } } else if (removeMetadata != null) { foreach (var metadataName in removeMetadata) { metadataToSet[metadataName] = Lookup.MetadataModification.CreateFromRemove(); } } foreach (ProjectItemGroupTaskMetadataInstance metadataInstance in child.Metadata) { bool condition = ConditionEvaluator.EvaluateCondition ( metadataInstance.Condition, ParserOptions.AllowAll, bucket.Expander, ExpanderOptions.ExpandAll, Project.Directory, metadataInstance.ConditionLocation, LoggingContext.LoggingService, LoggingContext.BuildEventContext ); if (condition) { string evaluatedValue = bucket.Expander.ExpandIntoStringLeaveEscaped(metadataInstance.Value, ExpanderOptions.ExpandAll, metadataInstance.Location); metadataToSet[metadataInstance.Name] = Lookup.MetadataModification.CreateFromNewValue(evaluatedValue); } } // Now apply the changes. This must be done after filtering, since explicitly set metadata overrides filters. bucket.Lookup.ModifyItems(child.ItemType, group, metadataToSet); }
/// <summary> /// Logs a task skipped message if necessary. /// </summary> private void LogSkippedTask(ItemBucket bucket, TaskExecutionMode howToExecuteTask) { // If this is an Intrinsic task, it does not log skips. if (_taskNode != null) { if (howToExecuteTask == TaskExecutionMode.ExecuteTaskAndGatherOutputs) { if (!_targetLoggingContext.LoggingService.OnlyLogCriticalEvents) { // Expand the expression for the Log. string expanded = bucket.Expander.ExpandIntoStringAndUnescape(_targetChildInstance.Condition, ExpanderOptions.ExpandAll, _targetChildInstance.ConditionLocation); // Whilst we are within the processing of the task, we haven't actually started executing it, so // our skip task message needs to be in the context of the target. However any errors should be reported // at the point where the task appears in the project. _targetLoggingContext.LogComment ( MessageImportance.Low, "TaskSkippedFalseCondition", _taskNode.Name, _targetChildInstance.Condition, expanded ); } } } }
private WorkUnitResult ExecuteTaskInSTAThread(ItemBucket bucket, TaskLoggingContext taskLoggingContext, IDictionary<string, string> taskIdentityParameters, TaskHost taskHost, TaskExecutionMode howToExecuteTask) { WorkUnitResult taskResult = new WorkUnitResult(WorkUnitResultCode.Failed, WorkUnitActionCode.Stop, null); Thread staThread = null; Exception exceptionFromExecution = null; ManualResetEvent taskRunnerFinished = new ManualResetEvent(false); try { ThreadStart taskRunnerDelegate = delegate () { Lookup.Scope scope = bucket.Lookup.EnterScope("STA Thread for Task"); try { taskResult = InitializeAndExecuteTask(taskLoggingContext, bucket, taskIdentityParameters, taskHost, howToExecuteTask).Result; } catch (Exception e) { if (ExceptionHandling.IsCriticalException(e)) { throw; } exceptionFromExecution = e; } finally { scope.LeaveScope(); taskRunnerFinished.Set(); } }; staThread = new Thread(taskRunnerDelegate); staThread.SetApartmentState(ApartmentState.STA); staThread.Name = "MSBuild STA task runner thread"; staThread.CurrentCulture = _componentHost.BuildParameters.Culture; staThread.CurrentUICulture = _componentHost.BuildParameters.UICulture; staThread.Start(); // TODO: Why not just Join on the thread??? taskRunnerFinished.WaitOne(); } finally { taskRunnerFinished.Close(); taskRunnerFinished = null; } if (exceptionFromExecution != null) { // Unfortunately this will reset the callstack throw exceptionFromExecution; } return taskResult; }
/// <summary> /// Execute a single bucket /// </summary> /// <returns>true if execution succeeded</returns> private async Task<WorkUnitResult> ExecuteBucket(TaskHost taskHost, ItemBucket bucket, TaskExecutionMode howToExecuteTask, Dictionary<string, string> lookupHash) { // On Intrinsic tasks, we do not allow batchable params, therefore metadata is excluded. ParserOptions parserOptions = (_taskNode == null) ? ParserOptions.AllowPropertiesAndItemLists : ParserOptions.AllowAll; WorkUnitResult taskResult = new WorkUnitResult(WorkUnitResultCode.Failed, WorkUnitActionCode.Stop, null); bool condition = ConditionEvaluator.EvaluateCondition ( _targetChildInstance.Condition, parserOptions, bucket.Expander, ExpanderOptions.ExpandAll, _buildRequestEntry.ProjectRootDirectory, _targetChildInstance.ConditionLocation, _targetLoggingContext.LoggingService, _targetLoggingContext.BuildEventContext ); if (!condition) { LogSkippedTask(bucket, howToExecuteTask); taskResult = new WorkUnitResult(WorkUnitResultCode.Skipped, WorkUnitActionCode.Continue, null); return taskResult; } // If this is an Intrinsic task, it gets handled in a special fashion. if (_taskNode == null) { ExecuteIntrinsicTask(bucket); taskResult = new WorkUnitResult(WorkUnitResultCode.Success, WorkUnitActionCode.Continue, null); } else { if (_componentHost.BuildParameters.SaveOperatingEnvironment) { // Change to the project root directory. // If that directory does not exist, do nothing. (Do not check first as it is almost always there and it is slow) // This is because if the project has not been saved, this directory may not exist, yet it is often useful to still be able to build the project. // No errors are masked by doing this: errors loading the project from disk are reported at load time, if necessary. NativeMethodsShared.SetCurrentDirectory(_buildRequestEntry.ProjectRootDirectory); } if (howToExecuteTask == TaskExecutionMode.ExecuteTaskAndGatherOutputs) { // We need to find the task before logging the task started event so that the using task statement comes before the task started event IDictionary<string, string> taskIdentityParameters = GatherTaskIdentityParameters(bucket.Expander); TaskRequirements? requirements = _taskExecutionHost.FindTask(taskIdentityParameters); if (requirements != null) { TaskLoggingContext taskLoggingContext = _targetLoggingContext.LogTaskBatchStarted(_projectFullPath, _targetChildInstance); try { if ( ((requirements.Value & TaskRequirements.RequireSTAThread) == TaskRequirements.RequireSTAThread) && (Thread.CurrentThread.GetApartmentState() != ApartmentState.STA) ) { taskResult = ExecuteTaskInSTAThread(bucket, taskLoggingContext, taskIdentityParameters, taskHost, howToExecuteTask); } else { taskResult = await InitializeAndExecuteTask(taskLoggingContext, bucket, taskIdentityParameters, taskHost, howToExecuteTask); } if (lookupHash != null) { List<string> overrideMessages = bucket.Lookup.GetPropertyOverrideMessages(lookupHash); if (overrideMessages != null) { foreach (string s in overrideMessages) { taskLoggingContext.LogCommentFromText(MessageImportance.Low, s); } } } } catch (InvalidProjectFileException e) { // Make sure the Invalid Project error gets logged *before* TaskFinished. Otherwise, // the log is confusing. taskLoggingContext.LogInvalidProjectFileError(e); _continueOnError = ContinueOnError.ErrorAndStop; } finally { // Flag the completion of the task. taskLoggingContext.LogTaskBatchFinished(_projectFullPath, taskResult.ResultCode == WorkUnitResultCode.Success || taskResult.ResultCode == WorkUnitResultCode.Skipped); if (taskResult.ResultCode == WorkUnitResultCode.Failed && _continueOnError == ContinueOnError.WarnAndContinue) { // We coerce the failing result to a successful result. taskResult = new WorkUnitResult(WorkUnitResultCode.Success, taskResult.ActionCode, taskResult.Exception); } } } } else { ErrorUtilities.VerifyThrow(howToExecuteTask == TaskExecutionMode.InferOutputsOnly, "should be inferring"); ErrorUtilities.VerifyThrow ( GatherTaskOutputs(null, howToExecuteTask, bucket), "The method GatherTaskOutputs() should never fail when inferring task outputs." ); if (lookupHash != null) { List<string> overrideMessages = bucket.Lookup.GetPropertyOverrideMessages(lookupHash); if (overrideMessages != null) { foreach (string s in overrideMessages) { _targetLoggingContext.LogCommentFromText(MessageImportance.Low, s); } } } taskResult = new WorkUnitResult(WorkUnitResultCode.Success, WorkUnitActionCode.Continue, null); } } return taskResult; }
/// <summary> /// Uses the given task output specification to (statically) infer the task's outputs. /// </summary> /// <param name="lookup">The lookup</param> /// <param name="taskOutputSpecification">The task output specification</param> /// <param name="taskParameterName">The task parameter name</param> /// <param name="itemName">can be null</param> /// <param name="propertyName">can be null</param> /// <param name="bucket">The bucket for the batch.</param> private void InferTaskOutputs ( Lookup lookup, ProjectTaskInstanceChild taskOutputSpecification, string taskParameterName, string itemName, string propertyName, ItemBucket bucket ) { string taskParameterAttribute = _taskNode.GetParameter(taskParameterName); if (null != taskParameterAttribute) { ProjectTaskOutputItemInstance taskItemInstance = taskOutputSpecification as ProjectTaskOutputItemInstance; if (taskItemInstance != null) { // This is an output item. // Expand only with properties first, so that expressions like Include="@(foo)" will transfer the metadata of the "foo" items as well, not just their item specs. IList<string> outputItemSpecs = bucket.Expander.ExpandIntoStringListLeaveEscaped(taskParameterAttribute, ExpanderOptions.ExpandPropertiesAndMetadata, taskItemInstance.TaskParameterLocation); ProjectItemInstanceFactory itemFactory = new ProjectItemInstanceFactory(_buildRequestEntry.RequestConfiguration.Project, itemName); foreach (string outputItemSpec in outputItemSpecs) { ICollection<ProjectItemInstance> items = bucket.Expander.ExpandIntoItemsLeaveEscaped(outputItemSpec, itemFactory, ExpanderOptions.ExpandItems, taskItemInstance.TaskParameterLocation); lookup.AddNewItemsOfItemType(itemName, items); } } else { // This is an output property. ProjectTaskOutputPropertyInstance taskPropertyInstance = (ProjectTaskOutputPropertyInstance)taskOutputSpecification; string taskParameterValue = bucket.Expander.ExpandIntoStringAndUnescape(taskParameterAttribute, ExpanderOptions.ExpandAll, taskPropertyInstance.TaskParameterLocation); if (!String.IsNullOrEmpty(taskParameterValue)) { lookup.SetProperty(ProjectPropertyInstance.Create(propertyName, taskParameterValue, taskPropertyInstance.TaskParameterLocation, _buildRequestEntry.RequestConfiguration.Project.IsImmutable)); } } } }
private DependencyAnalysisResult PerformDependencyAnalysisTestHelper ( FileWriteInfo[] filesToAnalyze, ItemDictionary<ProjectItemInstance> itemsByName, string inputs, string outputs, out ItemDictionary<ProjectItemInstance> changedTargetInputs, out ItemDictionary<ProjectItemInstance> upToDateTargetInputs ) { List<string> filesToDelete = new List<string>(); try { // first set the disk up for (int i = 0; i < filesToAnalyze.Length; ++i) { string path = ObjectModelHelpers.CreateFileInTempProjectDirectory(filesToAnalyze[i].Path, ""); File.SetCreationTime(path, filesToAnalyze[i].LastWriteTime); File.SetLastWriteTime(path, filesToAnalyze[i].LastWriteTime); filesToDelete.Add(path); } // Wait Thread.Sleep(50); // now create the project string unformattedProjectXml = ObjectModelHelpers.CleanupFileContents( @"<Project ToolsVersion='msbuilddefaulttoolsversion' xmlns='msbuildnamespace'> <Target Name='Build' Inputs=""{0}"" Outputs=""{1}""> </Target> </Project>"); string projectFile = Path.Combine(ObjectModelHelpers.TempProjectDir, "temp.proj"); string formattedProjectXml = String.Format(unformattedProjectXml, inputs, outputs); File.WriteAllText(projectFile, formattedProjectXml); // Wait Thread.Sleep(50); filesToDelete.Add(projectFile); Project project = new Project(projectFile); ProjectInstance p = project.CreateProjectInstance(); // now do the dependency analysis ItemBucket itemBucket = new ItemBucket(null, null, new Lookup(itemsByName, new PropertyDictionary<ProjectPropertyInstance>(), null), 0); TargetUpToDateChecker analyzer = new TargetUpToDateChecker(p, p.Targets["Build"], _mockHost, BuildEventContext.Invalid); return analyzer.PerformDependencyAnalysis(itemBucket, out changedTargetInputs, out upToDateTargetInputs); } finally { // finally clean up foreach (string path in filesToDelete) { if (File.Exists(path)) File.Delete(path); } ProjectCollection.GlobalProjectCollection.UnloadAllProjects(); } }
/// <summary> /// Remove items from the world. Removes to items that are part of the project manifest are backed up, so /// they can be reverted when the project is reset after the end of the build. /// </summary> /// <param name="child">The item specification to evaluate and remove.</param> /// <param name="bucket">The batching bucket.</param> private void ExecuteRemove(ProjectItemGroupTaskItemInstance child, ItemBucket bucket) { ICollection<ProjectItemInstance> group = bucket.Lookup.GetItems(child.ItemType); if (group == null) { // No items of this type to remove return; } List<ProjectItemInstance> itemsToRemove = FindItemsMatchingSpecification(group, child.Remove, child.RemoveLocation, bucket.Expander); if (itemsToRemove != null) { if (LogTaskInputs && !LoggingContext.LoggingService.OnlyLogCriticalEvents && itemsToRemove.Count > 0) { var itemGroupText = ItemGroupLoggingHelper.GetParameterText(ResourceUtilities.GetResourceString("ItemGroupRemoveLogMessage"), child.ItemType, itemsToRemove.ToArray()); LoggingContext.LogCommentFromText(MessageImportance.Low, itemGroupText); } bucket.Lookup.RemoveItems(itemsToRemove); } }
/// <summary> /// Initializes and executes the task. /// </summary> private async Task<WorkUnitResult> InitializeAndExecuteTask(TaskLoggingContext taskLoggingContext, ItemBucket bucket, IDictionary<string, string> taskIdentityParameters, TaskHost taskHost, TaskExecutionMode howToExecuteTask) { if (!_taskExecutionHost.InitializeForBatch(taskLoggingContext, bucket, taskIdentityParameters)) { ProjectErrorUtilities.ThrowInvalidProject(_targetChildInstance.Location, "TaskDeclarationOrUsageError", _taskNode.Name); } try { // UNDONE: Move this and the task host. taskHost.LoggingContext = taskLoggingContext; WorkUnitResult executionResult = await ExecuteInstantiatedTask(_taskExecutionHost, taskLoggingContext, taskHost, bucket, howToExecuteTask); ErrorUtilities.VerifyThrow(executionResult != null, "Unexpected null execution result"); return executionResult; } finally { _taskExecutionHost.CleanupForBatch(); } }
private void ExecuteAdd(ProjectItemGroupTaskItemInstance child, ItemBucket bucket, ISet <string> keepMetadata, ISet <string> removeMetadata) { // First, collect up the appropriate metadata collections. We need the one from the item definition, if any, and // the one we are using for this batching bucket. ProjectItemDefinitionInstance itemDefinition; Project.ItemDefinitions.TryGetValue(child.ItemType, out itemDefinition); // The NestedMetadataTable will handle the aggregation of the different metadata collections NestedMetadataTable metadataTable = new NestedMetadataTable(child.ItemType, bucket.Expander.Metadata, itemDefinition); IMetadataTable originalMetadataTable = bucket.Expander.Metadata; bucket.Expander.Metadata = metadataTable; // Second, expand the item include and exclude, and filter existing metadata as appropriate. List <ProjectItemInstance> itemsToAdd = ExpandItemIntoItems(child, bucket.Expander, keepMetadata, removeMetadata); // Third, expand the metadata. foreach (ProjectItemGroupTaskMetadataInstance metadataInstance in child.Metadata) { bool condition = ConditionEvaluator.EvaluateCondition ( metadataInstance.Condition, ParserOptions.AllowAll, bucket.Expander, ExpanderOptions.ExpandAll, Project.Directory, metadataInstance.Location, LoggingContext.LoggingService, LoggingContext.BuildEventContext, FileSystems.Default); if (condition) { string evaluatedValue = bucket.Expander.ExpandIntoStringLeaveEscaped(metadataInstance.Value, ExpanderOptions.ExpandAll, metadataInstance.Location); // This both stores the metadata so we can add it to all the items we just created later, and // exposes this metadata to further metadata evaluations in subsequent loop iterations. metadataTable.SetValue(metadataInstance.Name, evaluatedValue); } } // Finally, copy the added metadata onto the new items. The set call is additive. ProjectItemInstance.SetMetadata(metadataTable.AddedMetadata, itemsToAdd); // Add in one operation for potential copy-on-write // Restore the original metadata table. bucket.Expander.Metadata = originalMetadataTable; // Determine if we should NOT add duplicate entries bool keepDuplicates = ConditionEvaluator.EvaluateCondition ( child.KeepDuplicates, ParserOptions.AllowAll, bucket.Expander, ExpanderOptions.ExpandAll, Project.Directory, child.KeepDuplicatesLocation, LoggingContext.LoggingService, LoggingContext.BuildEventContext, FileSystems.Default); if (LogTaskInputs && !LoggingContext.LoggingService.OnlyLogCriticalEvents && itemsToAdd?.Count > 0) { ItemGroupLoggingHelper.LogTaskParameter( LoggingContext, TaskParameterMessageKind.AddItem, child.ItemType, itemsToAdd, logItemMetadata: true, child.Location); } // Now add the items we created to the lookup. bucket.Lookup.AddNewItemsOfItemType(child.ItemType, itemsToAdd, !keepDuplicates); // Add in one operation for potential copy-on-write }
/// <summary> /// Recomputes the task's "ContinueOnError" setting. /// </summary> /// <param name="bucket">The bucket being executed.</param> /// <param name="taskHost">The task host to use.</param> /// <remarks> /// There are four possible values: /// false - Error and stop if the task fails. /// true - Warn and continue if the task fails. /// ErrorAndContinue - Error and continue if the task fails. /// WarnAndContinue - Same as true. /// </remarks> private void UpdateContinueOnError(ItemBucket bucket, TaskHost taskHost) { string continueOnErrorAttribute = _taskNode.ContinueOnError; _continueOnError = ContinueOnError.ErrorAndStop; if (_taskNode.ContinueOnErrorLocation != null) { string expandedValue = bucket.Expander.ExpandIntoStringAndUnescape(continueOnErrorAttribute, ExpanderOptions.ExpandAll, _taskNode.ContinueOnErrorLocation); // expand embedded item vectors after expanding properties and item metadata try { if (String.Equals(XMakeAttributes.ContinueOnErrorValues.errorAndContinue, expandedValue, StringComparison.OrdinalIgnoreCase)) { _continueOnError = ContinueOnError.ErrorAndContinue; } else if (String.Equals(XMakeAttributes.ContinueOnErrorValues.warnAndContinue, expandedValue, StringComparison.OrdinalIgnoreCase)) { _continueOnError = ContinueOnError.WarnAndContinue; } else if (String.Equals(XMakeAttributes.ContinueOnErrorValues.errorAndStop, expandedValue, StringComparison.OrdinalIgnoreCase)) { _continueOnError = ContinueOnError.ErrorAndStop; } else { // if attribute doesn't exist, default to "false" // otherwise, convert its value to a boolean bool value = ConversionUtilities.ConvertStringToBool(expandedValue); _continueOnError = value ? ContinueOnError.WarnAndContinue : ContinueOnError.ErrorAndStop; } } catch (ArgumentException e) { // handle errors in string-->bool conversion ProjectErrorUtilities.VerifyThrowInvalidProject(false, _taskNode.ContinueOnErrorLocation, "InvalidContinueOnErrorAttribute", _taskNode.Name, e.Message); } } // We need to access an internal method of the EngineProxy in order to update the value // of continueOnError that will be returned to the task when the task queries IBuildEngine for it taskHost.ContinueOnError = (_continueOnError != ContinueOnError.ErrorAndStop); taskHost.ConvertErrorsToWarnings = (_continueOnError == ContinueOnError.WarnAndContinue); }
/// <summary> /// Execute a task object for a given bucket. /// </summary> /// <param name="taskExecutionHost">The host used to execute the task.</param> /// <param name="taskLoggingContext">The logging context.</param> /// <param name="taskHost">The task host for the task.</param> /// <param name="bucket">The batching bucket</param> /// <param name="howToExecuteTask">The task execution mode</param> /// <returns>The result of running the task.</returns> private async Task<WorkUnitResult> ExecuteInstantiatedTask(ITaskExecutionHost taskExecutionHost, TaskLoggingContext taskLoggingContext, TaskHost taskHost, ItemBucket bucket, TaskExecutionMode howToExecuteTask) { UpdateContinueOnError(bucket, taskHost); bool taskResult = false; WorkUnitResultCode resultCode = WorkUnitResultCode.Success; WorkUnitActionCode actionCode = WorkUnitActionCode.Continue; if (!taskExecutionHost.SetTaskParameters(_taskNode.ParametersForBuild)) { // The task cannot be initialized. ProjectErrorUtilities.VerifyThrowInvalidProject(false, _targetChildInstance.Location, "TaskParametersError", _taskNode.Name, String.Empty); } else { bool taskReturned = false; Exception taskException = null; // If this is the MSBuild task, we need to execute it's special internal method. TaskExecutionHost host = taskExecutionHost as TaskExecutionHost; Type taskType = host.TaskInstance.GetType(); try { if (taskType == typeof(MSBuild)) { MSBuild msbuildTask = host.TaskInstance as MSBuild; ErrorUtilities.VerifyThrow(msbuildTask != null, "Unexpected MSBuild internal task."); _targetBuilderCallback.EnterMSBuildCallbackState(); try { taskResult = await msbuildTask.ExecuteInternal(); } finally { _targetBuilderCallback.ExitMSBuildCallbackState(); } } else if (taskType == typeof(CallTarget)) { CallTarget callTargetTask = host.TaskInstance as CallTarget; taskResult = await callTargetTask.ExecuteInternal(); } else { using (FullTracking.Track(taskLoggingContext.TargetLoggingContext.Target.Name, _taskNode.Name, _buildRequestEntry.ProjectRootDirectory, _buildRequestEntry.RequestConfiguration.Project.PropertiesToBuildWith)) { taskResult = taskExecutionHost.Execute(); } } } catch (Exception ex) { if (ExceptionHandling.IsCriticalException(ex) || (Environment.GetEnvironmentVariable("MSBUILDDONOTCATCHTASKEXCEPTIONS") == "1")) { throw; } taskException = ex; } if (taskException == null) { taskReturned = true; // Set the property "MSBuildLastTaskResult" to reflect whether the task succeeded or not. // The main use of this is if ContinueOnError is true -- so that the next task can consult the result. // So we want it to be "false" even if ContinueOnError is true. // The constants "true" and "false" should NOT be localized. They become property values. bucket.Lookup.SetProperty(ProjectPropertyInstance.Create(ReservedPropertyNames.lastTaskResult, taskResult ? "true" : "false", true/* may be reserved */, _buildRequestEntry.RequestConfiguration.Project.IsImmutable)); } else { Type type = taskException.GetType(); if (type == typeof(LoggerException)) { // if a logger has failed, abort immediately // Polite logger failure _continueOnError = ContinueOnError.ErrorAndStop; // Rethrow wrapped in order to avoid losing the callstack throw new LoggerException(taskException.Message, taskException); } else if (type == typeof(InternalLoggerException)) { // Logger threw arbitrary exception _continueOnError = ContinueOnError.ErrorAndStop; InternalLoggerException ex = taskException as InternalLoggerException; // Rethrow wrapped in order to avoid losing the callstack throw new InternalLoggerException(taskException.Message, taskException, ex.BuildEventArgs, ex.ErrorCode, ex.HelpKeyword, ex.InitializationException); } else if (type == typeof(ThreadAbortException)) { Thread.ResetAbort(); _continueOnError = ContinueOnError.ErrorAndStop; // Cannot rethrow wrapped as ThreadAbortException is sealed and has no appropriate constructor // Stack will be lost throw taskException; } else if (type == typeof(BuildAbortedException)) { _continueOnError = ContinueOnError.ErrorAndStop; // Rethrow wrapped in order to avoid losing the callstack throw new BuildAbortedException(taskException.Message, ((BuildAbortedException)taskException)); } else if (type == typeof(CircularDependencyException)) { _continueOnError = ContinueOnError.ErrorAndStop; ProjectErrorUtilities.ThrowInvalidProject(taskLoggingContext.Task.Location, "CircularDependency", taskLoggingContext.TargetLoggingContext.Target.Name); } else if (type == typeof(InvalidProjectFileException)) { // Just in case this came out of a task, make sure it's not // marked as having been logged. InvalidProjectFileException ipex = (InvalidProjectFileException)taskException; ipex.HasBeenLogged = false; if (_continueOnError != ContinueOnError.ErrorAndStop) { taskLoggingContext.LogInvalidProjectFileError(ipex); taskLoggingContext.LogComment(MessageImportance.Normal, "ErrorConvertedIntoWarning"); } else { // Rethrow wrapped in order to avoid losing the callstack throw new InvalidProjectFileException(ipex.Message, ipex); } } else if (type == typeof(Exception) || type.IsSubclassOf(typeof(Exception))) { // Occasionally, when debugging a very uncommon task exception, it is useful to loop the build with // a debugger attached to break on 2nd chance exceptions. // That requires that there needs to be a way to not catch here, by setting an environment variable. if (ExceptionHandling.IsCriticalException(taskException) || (Environment.GetEnvironmentVariable("MSBUILDDONOTCATCHTASKEXCEPTIONS") == "1")) { // Wrapping in an Exception will unfortunately mean that this exception would fly through any IsCriticalException above. // However, we should not have any, also we should not have stashed such an exception anyway. throw new Exception(taskException.Message, taskException); } Exception exceptionToLog = taskException; if (exceptionToLog is TargetInvocationException) { exceptionToLog = exceptionToLog.InnerException; } // handle any exception thrown by the task during execution // NOTE: We catch ALL exceptions here, to attempt to completely isolate the Engine // from failures in the task. if (_continueOnError == ContinueOnError.WarnAndContinue) { taskLoggingContext.LogTaskWarningFromException ( new BuildEventFileInfo(_targetChildInstance.Location), exceptionToLog, _taskNode.Name ); // Log a message explaining why we converted the previous error into a warning. taskLoggingContext.LogComment(MessageImportance.Normal, "ErrorConvertedIntoWarning"); } else { taskLoggingContext.LogFatalTaskError ( new BuildEventFileInfo(_targetChildInstance.Location), exceptionToLog, _taskNode.Name ); } } else { ErrorUtilities.ThrowInternalErrorUnreachable(); } } // If the task returned attempt to gather its outputs. If gathering outputs fails set the taskResults // to false if (taskReturned) { taskResult = GatherTaskOutputs(taskExecutionHost, howToExecuteTask, bucket) && taskResult; } // If the taskResults are false look at ContinueOnError. If ContinueOnError=false (default) // mark the taskExecutedSuccessfully=false. Otherwise let the task succeed but log a normal // pri message that says this task is continuing because ContinueOnError=true resultCode = taskResult ? WorkUnitResultCode.Success : WorkUnitResultCode.Failed; actionCode = WorkUnitActionCode.Continue; if (resultCode == WorkUnitResultCode.Failed) { if (_continueOnError == ContinueOnError.ErrorAndStop) { actionCode = WorkUnitActionCode.Stop; } else { // This is the ErrorAndContinue or WarnAndContinue case... string settingString = "true"; if (_taskNode.ContinueOnErrorLocation != null) { settingString = bucket.Expander.ExpandIntoStringAndUnescape(_taskNode.ContinueOnError, ExpanderOptions.ExpandAll, _taskNode.ContinueOnErrorLocation); // expand embedded item vectors after expanding properties and item metadata } taskLoggingContext.LogComment ( MessageImportance.Normal, "TaskContinuedDueToContinueOnError", "ContinueOnError", _taskNode.Name, settingString ); actionCode = WorkUnitActionCode.Continue; } } } WorkUnitResult result = new WorkUnitResult(resultCode, actionCode, null); return result; }
/// <summary> /// Gathers task outputs in two ways: /// 1) Given an instantiated task that has finished executing, it extracts the outputs using .NET reflection. /// 2) Otherwise, it parses the task's output specifications and (statically) infers the outputs. /// </summary> /// <param name="taskExecutionHost">The task execution host.</param> /// <param name="howToExecuteTask">The task execution mode</param> /// <param name="bucket">The bucket to which the task execution belongs.</param> /// <returns>true, if successful</returns> private bool GatherTaskOutputs(ITaskExecutionHost taskExecutionHost, TaskExecutionMode howToExecuteTask, ItemBucket bucket) { bool gatheredTaskOutputsSuccessfully = true; foreach (ProjectTaskInstanceChild taskOutputSpecification in _taskNode.Outputs) { // if the task's outputs are supposed to be gathered bool condition = ConditionEvaluator.EvaluateCondition ( taskOutputSpecification.Condition, ParserOptions.AllowAll, bucket.Expander, ExpanderOptions.ExpandAll, _buildRequestEntry.ProjectRootDirectory, taskOutputSpecification.ConditionLocation, _targetLoggingContext.LoggingService, _targetLoggingContext.BuildEventContext ); if (condition) { string taskParameterName = null; bool outputTargetIsItem = false; string outputTargetName = null; // check where the outputs are going -- into a vector, or a property? ProjectTaskOutputItemInstance taskOutputItemInstance = taskOutputSpecification as ProjectTaskOutputItemInstance; if (taskOutputItemInstance != null) { // expand all embedded properties, item metadata and item vectors in the item type name outputTargetIsItem = true; outputTargetName = bucket.Expander.ExpandIntoStringAndUnescape(taskOutputItemInstance.ItemType, ExpanderOptions.ExpandAll, taskOutputItemInstance.ItemTypeLocation); taskParameterName = taskOutputItemInstance.TaskParameter; ProjectErrorUtilities.VerifyThrowInvalidProject ( outputTargetName.Length > 0, taskOutputItemInstance.ItemTypeLocation, "InvalidEvaluatedAttributeValue", outputTargetName, taskOutputItemInstance.ItemType, XMakeAttributes.itemName, XMakeElements.output ); } else { ProjectTaskOutputPropertyInstance taskOutputPropertyInstance = taskOutputSpecification as ProjectTaskOutputPropertyInstance; outputTargetIsItem = false; // expand all embedded properties, item metadata and item vectors in the property name outputTargetName = bucket.Expander.ExpandIntoStringAndUnescape(taskOutputPropertyInstance.PropertyName, ExpanderOptions.ExpandAll, taskOutputPropertyInstance.PropertyNameLocation); taskParameterName = taskOutputPropertyInstance.TaskParameter; ProjectErrorUtilities.VerifyThrowInvalidProject ( outputTargetName.Length > 0, taskOutputPropertyInstance.PropertyNameLocation, "InvalidEvaluatedAttributeValue", outputTargetName, taskOutputPropertyInstance.PropertyName, XMakeAttributes.propertyName, XMakeElements.output ); } string unexpandedTaskParameterName = taskParameterName; taskParameterName = bucket.Expander.ExpandIntoStringAndUnescape(taskParameterName, ExpanderOptions.ExpandAll, taskOutputSpecification.TaskParameterLocation); ProjectErrorUtilities.VerifyThrowInvalidProject ( taskParameterName.Length > 0, taskOutputSpecification.TaskParameterLocation, "InvalidEvaluatedAttributeValue", taskParameterName, unexpandedTaskParameterName, XMakeAttributes.taskParameter, XMakeElements.output ); // if we're gathering outputs by .NET reflection if (howToExecuteTask == TaskExecutionMode.ExecuteTaskAndGatherOutputs) { gatheredTaskOutputsSuccessfully = taskExecutionHost.GatherTaskOutputs(taskParameterName, taskOutputSpecification.Location, outputTargetIsItem, outputTargetName); } else { // If we're inferring outputs based on information in the task and <Output> tags ErrorUtilities.VerifyThrow(howToExecuteTask == TaskExecutionMode.InferOutputsOnly, "should be inferring"); // UNDONE: Refactor this method to use the same flag/string paradigm we use above, rather than two strings and the task output spec. InferTaskOutputs(bucket.Lookup, taskOutputSpecification, taskParameterName, outputTargetName, outputTargetName, bucket); } } if (!gatheredTaskOutputsSuccessfully) { break; } } return gatheredTaskOutputsSuccessfully; }
private static List <ItemBucket> BucketConsumedItems ( Lookup lookup, Dictionary <string, ICollection <ProjectItemInstance> > itemListsToBeBatched, Dictionary <string, MetadataReference> consumedMetadataReferences, ElementLocation elementLocation ) { ErrorUtilities.VerifyThrow(itemListsToBeBatched.Count > 0, "Need item types consumed by the batchable object."); ErrorUtilities.VerifyThrow(consumedMetadataReferences.Count > 0, "Need item metadata consumed by the batchable object."); var buckets = new List <ItemBucket>(); // Get and iterate through the list of item names that we're supposed to batch on. foreach (KeyValuePair <string, ICollection <ProjectItemInstance> > entry in itemListsToBeBatched) { string itemName = entry.Key; // Use the previously-fetched items, if possible ICollection <ProjectItemInstance> items = entry.Value ?? lookup.GetItems(itemName); if (items != null) { foreach (ProjectItemInstance item in items) { // Get this item's values for all the metadata consumed by the batchable object. Dictionary <string, string> itemMetadataValues = GetItemMetadataValues(item, consumedMetadataReferences, elementLocation); // put the metadata into a dummy bucket we can use for searching ItemBucket dummyBucket = ItemBucket.GetDummyBucketForComparisons(itemMetadataValues); // look through all previously created buckets to find a bucket whose items have the same values as // this item for all metadata consumed by the batchable object int matchingBucketIndex = buckets.BinarySearch(dummyBucket); ItemBucket matchingBucket = (matchingBucketIndex >= 0) ? buckets[matchingBucketIndex] : null; // If we didn't find a bucket that matches this item, create a new one, adding // this item to the bucket. if (matchingBucket == null) { matchingBucket = new ItemBucket(itemListsToBeBatched.Keys, itemMetadataValues, lookup, buckets.Count); // make sure to put the new bucket into the appropriate location // in the sorted list as indicated by the binary search // NOTE: observe the ~ operator (bitwise complement) in front of // the index -- see MSDN for more information on the return value // from the List.BinarySearch() method buckets.Insert(~matchingBucketIndex, matchingBucket); } // We already have a bucket for this type of item, so add this item to // the bucket. matchingBucket.AddItem(item); } } } // Put the buckets back in the order in which they were discovered, so that the first // item declared in the project file ends up in the first batch passed into the target/task. var orderedBuckets = new List <ItemBucket>(buckets.Count); for (int i = 0; i < buckets.Count; ++i) { orderedBuckets.Add(null); } foreach (ItemBucket bucket in buckets) { orderedBuckets[bucket.BucketSequenceNumber] = bucket; } return(orderedBuckets); }
/// <summary> /// Partitions the items consumed by the batchable object into buckets, where each bucket contains a set of items that /// have the same value set on all item metadata consumed by the object. /// </summary> /// <remarks> /// PERF NOTE: Given n items and m batching metadata that produce l buckets, it is usually the case that n > l > m, /// because a batchable object typically uses one or two item metadata to control batching, and only has a handful of /// buckets. The number of buckets is typically only large if a batchable object is using single-item batching /// (where l == n). Any algorithm devised for bucketing therefore, should try to minimize n and l in its complexity /// equation. The algorithm below has a complexity of O(n*lg(l)*m/2) in its comparisons, and is effectively O(n) when /// l is small, and O(n*lg(n)) in the worst case as l -> n. However, note that the comparison complexity is not the /// same as the operational complexity for this algorithm. The operational complexity of this algorithm is actually /// O(n*m + n*lg(l)*m/2 + n*l/2 + n + l), which is effectively O(n^2) in the worst case. The additional complexity comes /// from the array and metadata operations that are performed. However, those operations are extremely cheap compared /// to the comparison operations, which dominate the time spent in this method. /// </remarks> /// <returns>ArrayList containing ItemBucket objects (can be empty), each one representing an execution batch.</returns> private static List<ItemBucket> BucketConsumedItems ( Lookup lookup, Dictionary<string, ICollection<ProjectItemInstance>> itemListsToBeBatched, Dictionary<string, MetadataReference> consumedMetadataReferences, ElementLocation elementLocation ) { ErrorUtilities.VerifyThrow(itemListsToBeBatched.Count > 0, "Need item types consumed by the batchable object."); ErrorUtilities.VerifyThrow(consumedMetadataReferences.Count > 0, "Need item metadata consumed by the batchable object."); List<ItemBucket> buckets = new List<ItemBucket>(); // Get and iterate through the list of item names that we're supposed to batch on. foreach (KeyValuePair<string, ICollection<ProjectItemInstance>> entry in itemListsToBeBatched) { string itemName = (string)entry.Key; // Use the previously-fetched items, if possible ICollection<ProjectItemInstance> items = entry.Value; if (items == null) { items = lookup.GetItems(itemName); } if (items != null) { foreach (ProjectItemInstance item in items) { // Get this item's values for all the metadata consumed by the batchable object. Dictionary<string, string> itemMetadataValues = GetItemMetadataValues(item, consumedMetadataReferences, elementLocation); // put the metadata into a dummy bucket we can use for searching ItemBucket dummyBucket = ItemBucket.GetDummyBucketForComparisons(itemMetadataValues); // look through all previously created buckets to find a bucket whose items have the same values as // this item for all metadata consumed by the batchable object int matchingBucketIndex = buckets.BinarySearch(dummyBucket); ItemBucket matchingBucket = (matchingBucketIndex >= 0) ? (ItemBucket)buckets[matchingBucketIndex] : null; // If we didn't find a bucket that matches this item, create a new one, adding // this item to the bucket. if (null == matchingBucket) { matchingBucket = new ItemBucket(itemListsToBeBatched.Keys, itemMetadataValues, lookup, buckets.Count); // make sure to put the new bucket into the appropriate location // in the sorted list as indicated by the binary search // NOTE: observe the ~ operator (bitwise complement) in front of // the index -- see MSDN for more information on the return value // from the ArrayList.BinarySearch() method buckets.Insert(~matchingBucketIndex, matchingBucket); } // We already have a bucket for this type of item, so add this item to // the bucket. matchingBucket.AddItem(item); } } } // Put the buckets back in the order in which they were discovered, so that the first // item declared in the project file ends up in the first batch passed into the target/task. List<ItemBucket> orderedBuckets = new List<ItemBucket>(buckets.Count); for (int i = 0; i < buckets.Count; ++i) { orderedBuckets.Add(null); } foreach (ItemBucket bucket in buckets) { orderedBuckets[bucket.BucketSequenceNumber] = bucket; } return orderedBuckets; }