/// <inheritdoc /> public override async Task <PipResultStatus> MaterializeInputsAsync(RunnablePip runnablePip) { using (OnPipExecutionStarted(runnablePip)) { var result = await PipExecutor.MaterializeInputsAsync(runnablePip.OperationContext, runnablePip.Environment, runnablePip.Pip); return(result); } }
protected bool MustRunOnOrchestrator(RunnablePip runnablePip) { if (!AnyRemoteWorkers) { return(true); } return(runnablePip.PipType == PipType.Ipc && ((IpcPip)runnablePip.Pip).MustRunOnOrchestrator); }
/// <summary> /// Called after worker finishes executing the IPC or process pip /// </summary> private void OnPipExecutionCompletion(RunnablePip runnable) { if (!IsContentTrackingEnabled) { // Only perform this operation for distributed master. return; } var operationContext = runnable.OperationContext; var pip = runnable.Pip; var description = runnable.Description; var executionResult = runnable.ExecutionResult; Logger.Log.DistributionFinishedPipRequest(operationContext, pip.SemiStableHash, description, Name, runnable.Step.AsString()); if (executionResult == null) { return; } if ((runnable.Step == PipExecutionStep.PostProcess && !executionResult.Converged) || (!executionResult.Result.IndicatesNoOutput() && runnable.Step == PipExecutionStep.ExecuteNonProcessPip)) { // After post process, if process was not converged (i.e. process execution outputs are used // as results because there was no conflicting cache entry when storing to cache), // report that the worker has the output content // IPC pips don't use cache convergence so always report their outputs foreach (var outputContent in executionResult.OutputContent) { TryAddAvailableContent(outputContent.fileArtifact); } foreach (var directoryContent in executionResult.DirectoryOutputs) { TryAddAvailableContent(directoryContent.directoryArtifact); } } if (IsRemote && (runnable.Step == PipExecutionStep.ExecuteProcess || runnable.Step == PipExecutionStep.ExecuteNonProcessPip)) { // Log the outputs reported from the worker for the pip execution foreach (var outputFile in executionResult.OutputContent) { // NOTE: Available content is not added to the content tracking set here as the content // may be changed due to cache convergence Logger.Log.DistributionMasterWorkerProcessOutputContent( operationContext, pip.SemiStableHash, description, outputFile.fileArtifact.Path.ToString(runnable.Environment.Context.PathTable), outputFile.fileInfo.Hash.ToHex(), outputFile.fileInfo.ReparsePointInfo.ToString(), Name); } } }
/// <summary> /// Enqueues the given runnable pip /// </summary> public void Enqueue(RunnablePip runnablePip) { Contract.Requires(!IsDisposed); m_queue.Enqueue(runnablePip.Priority, runnablePip); if (runnablePip.PipType == Pips.Operations.PipType.Process) { Interlocked.Increment(ref m_processesQueued); } }
/// <summary> /// Dequeues from the priority queue /// </summary> private bool Dequeue(out RunnablePip runnablePip) { if (m_queue.Count != 0) { runnablePip = m_queue.Dequeue(); return(true); } runnablePip = null; return(false); }
public override void StartStep(RunnablePip runnablePip, PipExecutionStep step) { if (step == PipExecutionStep.PostProcess) { ExecutionResult executionResult; var removed = m_processExecutionResult.TryRemove(runnablePip.PipId, out executionResult); Contract.Assert(removed, "Execution result must be stored from ExecuteProcess step for PostProcess"); runnablePip.SetExecutionResult(executionResult); } m_workerService.StartStep(runnablePip, step); }
/// <summary> /// Try acquire given resources on the worker. This must be called from a thread-safe context to prevent race conditions. /// </summary> internal bool TryAcquire(RunnablePip runnablePip, out WorkerResource?limitingResource, double loadFactor = 1) { Contract.Requires(runnablePip.PipType == PipType.Ipc || runnablePip.PipType == PipType.Process); Contract.Ensures(Contract.Result <bool>() == (limitingResource == null), "Must set a limiting resource when resources cannot be acquired"); using (EarlyReleaseLock.AcquireReadLock()) { if (!IsAvailable) { limitingResource = WorkerResource.Status; return(false); } if (runnablePip.PipType == PipType.Ipc) { Interlocked.Increment(ref m_acquiredIpcSlots); runnablePip.AcquiredResourceWorker = this; limitingResource = null; return(true); } if (IsLocal) { // Local worker does not use load factor as it may be down throttle by the // scheduler in order to handle remote requests. loadFactor = 1; } var processRunnablePip = runnablePip as ProcessRunnablePip; // If a process has a weight higher than the total number of process slots, still allow it to run as long as there are no other // processes running (the number of acquired slots is 0) if (AcquiredProcessSlots != 0 && AcquiredProcessSlots + processRunnablePip.Weight > (EffectiveTotalProcessSlots * loadFactor)) { limitingResource = WorkerResource.AvailableProcessSlots; return(false); } StringId limitingResourceName = StringId.Invalid; if (processRunnablePip.TryAcquireResources(m_workerSemaphores, GetAdditionalResourceInfo(processRunnablePip), out limitingResourceName)) { Interlocked.Add(ref m_acquiredProcessSlots, processRunnablePip.Weight); OnWorkerResourcesChanged(WorkerResource.AvailableProcessSlots, increased: false); runnablePip.AcquiredResourceWorker = this; limitingResource = null; return(true); } limitingResource = limitingResourceName == m_ramSemaphoreNameId ? WorkerResource.AvailableMemoryMb : WorkerResource.CreateSemaphoreResource(limitingResourceName.ToString(runnablePip.Environment.Context.StringTable)); return(false); } }
public override void EndStep(RunnablePip runnablePip, PipExecutionStep step, TimeSpan duration) { if (step == PipExecutionStep.ExecuteProcess) { // For successful/unsuccessful results of ExecuteProcess, store so that when master calls worker for // PostProcess it can reuse the result rather than sending it unnecessarily // The unsuccessful results are stored as well to preserve the existing behavior where PostProcess is also done for such results. // TODO: Should we skipped PostProcess when Process failed? In such a case then PipExecutor.ReportExecutionResultOutputContent should not be in PostProcess. m_processExecutionResult[runnablePip.PipId] = runnablePip.ExecutionResult; } m_workerService.EndStep(runnablePip, step, duration); }
/// <inheritdoc /> public override async Task <PipResultStatus> MaterializeOutputsAsync(RunnablePip runnablePip) { // Need to create separate operation context since there may be concurrent operations on representing executions on remote workers using (var operationContext = runnablePip.OperationContext.StartAsyncOperation(OperationKind.PassThrough)) using (OnPipExecutionStarted(runnablePip, operationContext)) { var cachingInfo = runnablePip.ExecutionResult?.TwoPhaseCachingInfo; Task cachingInfoAvailableCompletion = Unit.VoidTask; PipResultStatus result = await PipExecutor.MaterializeOutputsAsync(operationContext, runnablePip.Environment, runnablePip.Pip); return(result); } }
protected PipExecutionScope OnPipExecutionStarted(RunnablePip runnable, OperationContext operationContext = default(OperationContext)) { operationContext = operationContext.IsValid ? operationContext : runnable.OperationContext; var scope = new PipExecutionScope(runnable, this, operationContext); if (IsContentTrackingEnabled) { // Only perform this operation for distributed orchestrator. var pip = runnable.Pip; Logger.Log.DistributionExecutePipRequest(operationContext, pip.FormattedSemiStableHash, Name, runnable.Step.AsString()); } return(scope); }
/// <summary> /// Choose a worker based on setup cost /// </summary> private Worker ChooseWorker(RunnablePip runnablePip, WorkerSetupCost[] workerSetupCosts, out WorkerResource?limitingResource) { if (MustRunOnMaster(runnablePip)) { // This is shortcut for the single-machine builds and distributed workers. return(LocalWorker.TryAcquire(runnablePip, out limitingResource, loadFactor: MaxLoadFactor) ? LocalWorker : null); } ResetStatus(); var pendingWorkerSelectionPipCount = PipQueue.GetNumQueuedByKind(DispatcherKind.ChooseWorkerCpu); bool loadBalanceWorkers = false; if (runnablePip.PipType == PipType.Process) { if (pendingWorkerSelectionPipCount + m_totalAcquiredProcessSlots < (m_totalProcessSlots / 2)) { // When there is a limited amount of work (less than half the total capacity of // the all the workers). We load balance so that each worker gets // its share of the work and the work can complete faster loadBalanceWorkers = true; } } long setupCostForBestWorker = workerSetupCosts[0].SetupBytes; limitingResource = null; foreach (var loadFactor in m_workerBalancedLoadFactors) { if (!loadBalanceWorkers && loadFactor < 1) { // Not load balancing so allow worker to be filled to capacity at least continue; } for (int i = 0; i < workerSetupCosts.Length; i++) { var worker = workerSetupCosts[i].Worker; if (worker.TryAcquire(runnablePip, out limitingResource, loadFactor: loadFactor)) { runnablePip.Performance.SetInputMaterializationCost(ByteSizeFormatter.ToMegabytes((ulong)setupCostForBestWorker), ByteSizeFormatter.ToMegabytes((ulong)workerSetupCosts[i].SetupBytes)); return(worker); } } } return(null); }
/// <summary> /// Try acquire given resources on the worker /// </summary> internal bool TryAcquire(RunnablePip runnablePip, out WorkerResource?limitingResource, double loadFactor = 1) { Contract.Ensures(Contract.Result <bool>() == (limitingResource == null), "Must set a limiting resource when resources cannot be acquired"); if (!IsAvailable) { limitingResource = WorkerResource.Status; return(false); } if (runnablePip.PipType == PipType.Ipc) { Interlocked.Increment(ref m_acquiredIpcSlots); runnablePip.AcquiredResourceWorker = this; limitingResource = null; return(true); } if (IsLocal) { // Local worker does not use load factor as it may be down throttle by the // scheduler in order to handle remote requests. loadFactor = 1; } if (AcquiredProcessSlots >= (EffectiveTotalProcessSlots * loadFactor)) { limitingResource = WorkerResource.AvailableProcessSlots; return(false); } var processRunnablePip = runnablePip as ProcessRunnablePip; StringId limitingResourceName = StringId.Invalid; if (processRunnablePip != null && processRunnablePip.TryAcquireResources(m_workerSemaphores, GetAdditionalResourceInfo(processRunnablePip), out limitingResourceName)) { Interlocked.Increment(ref m_acquiredProcessSlots); OnWorkerResourcesChanged(WorkerResource.AvailableProcessSlots, increased: false); runnablePip.AcquiredResourceWorker = this; limitingResource = null; return(true); } limitingResource = limitingResourceName == m_ramSemaphoreNameId ? WorkerResource.AvailableMemoryMb : WorkerResource.CreateSemaphoreResource(limitingResourceName.ToString(runnablePip.Environment.Context.StringTable)); return(false); }
/// <summary> /// Runs pip asynchronously /// </summary> protected async Task RunCoreAsync(RunnablePip runnablePip) { DispatcherReleaser releaser = new DispatcherReleaser(this); try { // Unhandled exceptions (Catastrophic BuildXL Failures) during a pip's execution will be thrown here without an AggregateException. await runnablePip.RunAsync(releaser); } finally { releaser.Release(); m_pipQueue.DecrementRunningOrQueuedPips(); // Trigger dispatching loop in the PipQueue } }
/// <summary> /// Dequeues from the priority queue /// </summary> private bool Dequeue(out RunnablePip runnablePip) { if (m_queue.Count != 0) { runnablePip = m_queue.Dequeue(); if (runnablePip.PipType == Pips.Operations.PipType.Process) { Interlocked.Decrement(ref m_processesQueued); } return(true); } runnablePip = null; return(false); }
/// <inheritdoc /> public override async Task <PipResult> ExecuteIpcAsync(RunnablePip runnablePip) { using (OnPipExecutionStarted(runnablePip)) { var environment = runnablePip.Environment; var ipcPip = (IpcPip)runnablePip.Pip; var operationContext = runnablePip.OperationContext; Transition(runnablePip.PipId, WorkerPipState.Executing); var executionResult = await PipExecutor.ExecuteIpcAsync(operationContext, environment, ipcPip); runnablePip.SetExecutionResult(executionResult); return(RunnablePip.CreatePipResultFromExecutionResult(runnablePip.StartTime, executionResult)); } }
internal void UnpauseChooseWorkerQueueIfEnqueuingNewPip(RunnablePip runnablePip, DispatcherKind nextQueue) { // If enqueuing a new highest priority pip to queue if (nextQueue == DispatcherKind.ChooseWorkerCpu) { if (runnablePip != m_lastIterationBlockedPip) { TogglePauseChooseWorkerQueue(pause: false); } // Capture the sequence number which will be used to compare if ChooseWorker queue is paused // waiting for resources for this pip to avoid race conditions where pip cannot acquire worker // resources become available then queue is paused potentially indefinitely (not likely but theoretically // possilbe) runnablePip.ChooseWorkerSequenceNumber = Volatile.Read(ref WorkerEnableSequenceNumber); } }
public void InitializeWorkerSetupCost(RunnablePip runnable) { for (int i = 0; i < m_context.Workers.Count; i++) { var worker = m_context.Workers[i]; int acquiredSlots = worker.AcquiredProcessSlots; if (runnable.IsLight) { // For light process and IPC pips, use Light slots to order the workers. acquiredSlots = worker.AcquiredLightSlots; } WorkerSetupCosts[i] = new WorkerSetupCost() { Worker = worker, AcquiredSlots = acquiredSlots }; } }
public override async Task OnPipCompleted(RunnablePip runnablePip) { var pipId = runnablePip.Pip.PipId; PipResultStatus overrideStatus; if (m_overridePipResults.TryGetValue(pipId, out overrideStatus)) { if (overrideStatus.IndicatesFailure()) { m_loggingContext.SpecifyErrorWasLogged(0); } runnablePip.SetPipResult( overrideStatus.IndicatesExecution() ? PipResult.CreateWithPointPerformanceInfo(overrideStatus) : PipResult.CreateForNonExecution(overrideStatus)); if (overrideStatus.IndicatesFailure()) { m_loggingContext.SpecifyErrorWasLogged(0); } } // Set the 'actual' result. NOTE: override also overrides actual result. // We set this before calling the wrapped PipCompleted handler since we may // be completing the last pip (don't want to race with a test checking pip // result after schedule completion and us setting it. PipResults[pipId] = runnablePip.Result.Value.Status; if (runnablePip.Result.HasValue && runnablePip.PipType == PipType.Process) { PathSets[pipId] = runnablePip.ExecutionResult?.PathSet; RunData.CacheLookupResults[pipId] = ((ProcessRunnablePip)runnablePip).CacheResult; RunData.ExecutionCachingInfos[pipId] = runnablePip.ExecutionResult?.TwoPhaseCachingInfo; } await base.OnPipCompleted(runnablePip); m_testPipQueue.OnPipCompleted(runnablePip.PipId); }
/// <summary> /// Executes a pip remotely /// </summary> private async Task <ExecutionResult> ExecutePipRemotely(RunnablePip runnablePip) { using (var operationContext = runnablePip.OperationContext.StartAsyncOperation(PipExecutorCounter.ExecutePipRemotelyDuration)) using (OnPipExecutionStarted(runnablePip, operationContext)) { // Send the pip to the remote machine await SendToRemote(operationContext, runnablePip); // Wait for result from remote matchine ExecutionResult result = await AwaitRemoteResult(operationContext, runnablePip); using (operationContext.StartOperation(PipExecutorCounter.HandleRemoteResultDuration)) { // Process the remote result HandleRemoteResult(runnablePip, result); } return(result); } }
/// <summary> /// Enqueues the given runnable pip /// </summary> public virtual void Enqueue(RunnablePip runnablePip) { Contract.Requires(!IsDisposed); #if NET_COREAPP_60 lock (m_lock) { m_queue.Enqueue(runnablePip, runnablePip.Priority); } #else m_queue.Enqueue(runnablePip, runnablePip.Priority); #endif Interlocked.Increment(ref m_numQueuedPips); if (runnablePip.PipType == PipType.Process) { Interlocked.Increment(ref m_numQueuedProcessPips); } }
/// <summary> /// Choose a worker based on setup cost /// </summary> protected override async Task <Worker> ChooseWorkerCore(RunnablePip runnablePip) { using (var pooledPipSetupCost = m_pipSetupCostPool.GetInstance()) { var pipSetupCost = pooledPipSetupCost.Instance; if (m_enableSetupCost) { pipSetupCost.EstimateAndSortSetupCostPerWorker(runnablePip); } else { pipSetupCost.SortByUsedSlots(runnablePip); } using (await m_chooseWorkerMutex.AcquireAsync()) { var startTime = TimestampUtilities.Timestamp; ChooseIterations++; WorkerResource?limitingResource; var chosenWorker = ChooseWorker(runnablePip, pipSetupCost.WorkerSetupCosts, out limitingResource); if (chosenWorker == null) { m_lastIterationBlockedPip = runnablePip; LastBlockedPip = runnablePip; var limitingResourceCount = m_limitingResourceCounts.GetOrAdd(limitingResource.Value, k => new BoxRef <int>()); limitingResourceCount.Value++; } else { m_lastIterationBlockedPip = null; } // If a worker is successfully chosen, then the limiting resouce would be null. LastLimitingResource = limitingResource; m_chooseTime += TimestampUtilities.Timestamp - startTime; return(chosenWorker); } } }
private PerProcessPipPerformanceInformation CreateSamplePip(int index) { Func <RunnablePip, Task <PipResult> > taskFactory = async(runnablePip) => { PipResult result; var operationTracker = new OperationTracker(runnablePip.LoggingContext); var pip = runnablePip.Pip; using (var operationContext = operationTracker.StartOperation(PipExecutorCounter.PipRunningStateDuration, pip.PipId, pip.PipType, runnablePip.LoggingContext)) { result = await TestPipExecutor.ExecuteAsync(operationContext, m_executionEnvironment, pip); } return(result); }; var pathTable = m_context.PathTable; var executable = FileArtifact.CreateSourceFile(AbsolutePath.Create(pathTable, X("/x/pkgs/tool.exe"))); var dependencies = new HashSet <FileArtifact> { executable }; var processBuilder = new ProcessBuilder() .WithExecutable(executable) .WithWorkingDirectory(AbsolutePath.Create(pathTable, X("/x/obj/working"))) .WithArguments(PipDataBuilder.CreatePipData(pathTable.StringTable, " ", PipDataFragmentEscaping.CRuntimeArgumentRules, "-loadargs")) .WithStandardDirectory(AbsolutePath.Create(pathTable, X("/x/obj/working.std"))) .WithDependencies(dependencies) .WithContext(m_context); var dataBuilder = new PipDataBuilder(m_context.PathTable.StringTable); var pipData = dataBuilder.ToPipData(" ", PipDataFragmentEscaping.NoEscaping); var pip = processBuilder.WithArguments(pipData).Build(); var pipId = m_executionEnvironment.PipTable.Add((uint)(index + 1), pip); var runnableProcessPip = (ProcessRunnablePip)(RunnablePip.Create(m_loggingContext, m_executionEnvironment, pipId, PipType.Process, 0, taskFactory, 0)); m_runnablePips.Add(index, runnableProcessPip); // For verification return(GeneratePipInfoWithRunnablePipAndIndex(ref runnableProcessPip, index)); }
private void StartStep(RunnablePip runnablePip, PipExecutionStep step) { var pipId = runnablePip.PipId; var processRunnable = runnablePip as ProcessRunnablePip; Tracing.Logger.Log.DistributionWorkerExecutePipRequest( runnablePip.LoggingContext, runnablePip.Pip.SemiStableHash, runnablePip.Description, runnablePip.Step.AsString()); var completionData = m_pendingPipCompletions[pipId]; completionData.StepExecutionStarted.SetResult(true); switch (step) { case PipExecutionStep.ExecuteProcess: if (runnablePip.PipType == PipType.Process) { SinglePipBuildRequest pipBuildRequest; bool found = m_pendingBuildRequests.TryGetValue(pipId, out pipBuildRequest); Contract.Assert(found, "Could not find corresponding build request for executed pip on worker"); m_pendingBuildRequests[pipId] = null; // Set the cache miss result with fingerprint so ExecuteProcess step can use it var fingerprint = pipBuildRequest.Fingerprint.ToFingerprint(); processRunnable.SetCacheResult(RunnableFromCacheResult.CreateForMiss(new WeakContentFingerprint(fingerprint))); processRunnable.ExpectedMemoryCounters = ProcessMemoryCounters.CreateFromMb( peakWorkingSetMb: pipBuildRequest.ExpectedPeakWorkingSetMb, averageWorkingSetMb: pipBuildRequest.ExpectedAverageWorkingSetMb, peakCommitSizeMb: pipBuildRequest.ExpectedPeakCommitSizeMb, averageCommitSizeMb: pipBuildRequest.ExpectedAverageCommitSizeMb); } break; } }
public async Task SendToRemote(OperationContext operationContext, RunnablePip runnable) { Contract.Assert(m_workerClient != null, "Calling SendToRemote before the worker is initialized"); Contract.Assert(m_attachCompletion.IsValid, "Remote worker not started"); var attachCompletionResult = await m_attachCompletion.Task; var environment = runnable.Environment; var pipId = runnable.PipId; var description = runnable.Description; var pip = runnable.Pip; var processRunnable = runnable as ProcessRunnablePip; var fingerprint = processRunnable?.CacheResult?.Fingerprint ?? ContentFingerprint.Zero; var pipCompletionTask = new PipCompletionTask(runnable.OperationContext, runnable); m_pipCompletionTasks.Add(pipId, pipCompletionTask); if (!attachCompletionResult) { FailRemotePip( pipCompletionTask, "Worker did not attach"); return; } var pipBuildRequest = new SinglePipBuildRequest { ActivityId = operationContext.LoggingContext.ActivityId.ToString(), PipIdValue = pipId.Value, Fingerprint = fingerprint.Hash.ToBondFingerprint(), Priority = runnable.Priority, Step = (int)runnable.Step, ExpectedRamUsageMb = processRunnable?.ExpectedRamUsageMb, SequenceNumber = Interlocked.Increment(ref m_nextSequenceNumber), }; m_buildRequests.Add(ValueTuple.Create(pipCompletionTask, pipBuildRequest)); }
protected override async void StartRunTaskAsync(RunnablePip runnablePip) { // Run the pip on the custom dedicated thread task scheduler await m_taskFactory.StartNew(async() => { var startTime = TimestampUtilities.Timestamp; await RunCoreAsync(runnablePip); Interlocked.Add(ref m_runTimeTicks, (TimestampUtilities.Timestamp - startTime).Ticks); if (NumRunning < MaxRunning) { Interlocked.Increment(ref m_fastChooseNextCount); // Fast path for running more work which queues the task to // execute the next item before the task completes so the // queue does not block waiting for work StartTasks(); } }).Unwrap(); }
/// <summary> /// Release pip's resources after worker is done with the task /// </summary> public void ReleaseResources(RunnablePip runnablePip) { Contract.Assert(runnablePip.AcquiredResourceWorker == this); runnablePip.AcquiredResourceWorker = null; var processRunnablePip = runnablePip as ProcessRunnablePip; if (processRunnablePip != null) { if (runnablePip.Step == PipExecutionStep.CacheLookup) { Interlocked.Decrement(ref m_acquiredCacheLookupSlots); OnWorkerResourcesChanged(WorkerResource.AvailableCacheLookupSlots, increased: true); runnablePip.SetWorker(null); } else { Contract.Assert(processRunnablePip.Resources.HasValue); Interlocked.Add(ref m_acquiredProcessSlots, -processRunnablePip.Weight); var resources = processRunnablePip.Resources.Value; m_workerSemaphores.ReleaseResources(resources); OnWorkerResourcesChanged(WorkerResource.AvailableProcessSlots, increased: true); } } if (runnablePip.PipType == PipType.Ipc) { Interlocked.Decrement(ref m_acquiredIpcSlots); } if (AcquiredSlots == 0 && Status == WorkerNodeStatus.Stopping) { DrainCompletion.TrySetResult(true); } }
public async Task <Worker> ChooseWorkerAsync(RunnablePip runnablePip) { var worker = await ChooseWorkerCore(runnablePip); if (worker == null) { runnablePip.IsWaitingForWorker = true; Interlocked.Increment(ref ChooseBlockedCount); // Attempt to pause the choose worker queue since resources are not available TogglePauseChooseWorkerQueue(pause: true, blockedPip: runnablePip); } else { runnablePip.IsWaitingForWorker = false; Interlocked.Increment(ref ChooseSuccessCount); // Ensure the queue is unpaused if we managed to choose a worker TogglePauseChooseWorkerQueue(pause: false); } return(worker); }
public void TogglePauseChooseWorkerQueue(bool pause, RunnablePip blockedPip = null) { Contract.Requires(pause == (blockedPip != null), "Must specify blocked pip if and only if pausing the choose worker queue"); if (pause) { if (blockedPip.IsLight) { // Light pips do not block the chooseworker queue. return; } using (m_chooseWorkerTogglePauseLock.AcquireWriteLock()) { // Compare with the captured sequence number before the pip re-entered the queue // to avoid race conditions where pip cannot acquire worker resources become available then queue is paused // potentially indefinitely (not likely but theoretically possilbe) if (Volatile.Read(ref WorkerEnableSequenceNumber) == blockedPip.ChooseWorkerSequenceNumber) { SetQueueMaxParallelDegree(0); } } } else { using (m_chooseWorkerTogglePauseLock.AcquireReadLock()) { // Update the sequence number. This essentially is called for every increase in resources // and successful acquisition of workers to track changes in resource state that invalidate // decision to pause choose worker queue. Interlocked.Increment(ref WorkerEnableSequenceNumber); // Unpause the queue SetQueueMaxParallelDegree(MaxParallelDegree); } } }
/// <inheritdoc /> public override async Task <PipResultStatus> MaterializeOutputsAsync(RunnablePip runnablePip) { using (var operationContext = runnablePip.OperationContext.StartAsyncOperation(PipExecutorCounter.ExecuteStepOnAllRemotesDuration)) using (runnablePip.EnterOperation(operationContext)) { Task <PipResultStatus>[] tasks = new Task <PipResultStatus> [m_workers.Length]; // Start from the remote workers for (int i = m_workers.Length - 1; i >= 0; i--) { var worker = m_workers[i]; if (worker.IsLocal) { await m_localMaterializeOutputsSemaphore.WaitAsync(); } tasks[i] = Task.Run(() => worker.MaterializeOutputsAsync(runnablePip)); } // Await the local worker first to release the semaphore. await tasks[LocalWorkerIndex]; m_localMaterializeOutputsSemaphore.Release(); var results = await Task.WhenAll(tasks); foreach (var result in results) { if (result.IndicatesFailure()) { return(result); } } return(results[0]); } }
public void HandleRemoteResult(RunnablePip runnable, ExecutionResult executionResult) { var environment = runnable.Environment; var operationContext = runnable.OperationContext; var description = runnable.Description; var pip = runnable.Pip; var pipType = runnable.PipType; bool isExecuteStep = runnable.Step == PipExecutionStep.ExecuteProcess || runnable.Step == PipExecutionStep.ExecuteNonProcessPip; if (runnable.Step == PipExecutionStep.CacheLookup && executionResult.CacheLookupPerfInfo != null) { var perfInfo = executionResult.CacheLookupPerfInfo; runnable.Performance.SetCacheLookupPerfInfo(perfInfo); if (perfInfo.CacheMissType != PipCacheMissType.Invalid) { environment.Counters.IncrementCounter((PipExecutorCounter)perfInfo.CacheMissType); } } if (isExecuteStep) { runnable.SetExecutionResult(executionResult); } if (executionResult.Result == PipResultStatus.Failed) { // Failure m_masterService.Environment.Counters.IncrementCounter(pip.PipType == PipType.Process ? PipExecutorCounter.ProcessPipsFailedRemotely : PipExecutorCounter.IpcPipsFailedRemotely); return; } if (!isExecuteStep) { return; } // Success if (pipType == PipType.Process) { m_masterService.Environment.Counters.IncrementCounter(PipExecutorCounter.ProcessPipsSucceededRemotely); // NOTE: Process outputs will be reported later during the PostProcess step. } else { Contract.Assert(pipType == PipType.Ipc); m_masterService.Environment.Counters.IncrementCounter(PipExecutorCounter.IpcPipsSucceededRemotely); // NOTE: Output content is reported for IPC but not Process because Process outputs will be reported // later during PostProcess because cache convergence can change which outputs for a process are used // Report the payload file of the IPC pip foreach (var(fileArtifact, fileInfo, pipOutputOrigin) in executionResult.OutputContent) { environment.State.FileContentManager.ReportOutputContent( operationContext, description, artifact: fileArtifact, info: fileInfo, origin: pipOutputOrigin); } } }