/// <summary> /// Create one with memory counters in bytes /// </summary> public static ProcessMemoryCounters CreateFromBytes( ulong peakVirtualMemoryUsage, ulong peakWorkingSet, ulong peakCommitUsage) { return(new ProcessMemoryCounters( (int)ByteSizeFormatter.ToMegabytes(peakVirtualMemoryUsage), (int)ByteSizeFormatter.ToMegabytes(peakWorkingSet), (int)ByteSizeFormatter.ToMegabytes(peakCommitUsage))); }
/// <summary> /// Create one with memory counters in bytes /// </summary> public static ProcessMemoryCounters CreateFromBytes( ulong peakWorkingSet, ulong averageWorkingSet, ulong peakCommitSize, ulong averageCommitSize) { return(new ProcessMemoryCounters( (int)ByteSizeFormatter.ToMegabytes(peakWorkingSet), (int)ByteSizeFormatter.ToMegabytes(averageWorkingSet), (int)ByteSizeFormatter.ToMegabytes(peakCommitSize), (int)ByteSizeFormatter.ToMegabytes(averageCommitSize))); }
/// <summary> /// Construct a new runtime data based on collected performance data /// </summary> public ProcessPipHistoricPerfData(ProcessPipExecutionPerformance executionPerformance) { Contract.Requires(executionPerformance.ExecutionLevel == PipExecutionLevel.Executed); m_entryTimeToLive = DefaultTimeToLive; // Deduct the suspended duration from the process execution time. DurationInMs = (uint)Math.Min(uint.MaxValue, Math.Max(1, executionPerformance.ProcessExecutionTime.TotalMilliseconds - executionPerformance.SuspendedDurationMs)); // For historical ram usage, we record the peak working set instead of the virtual memory due to the precision. MemoryCounters = executionPerformance.MemoryCounters; DiskIOInMB = (uint)Math.Min(uint.MaxValue, ByteSizeFormatter.ToMegabytes(executionPerformance.IO.GetAggregateIO().TransferCount)); ProcessorsInPercents = executionPerformance.ProcessorsInPercents; }
/// <summary> /// Choose a worker based on setup cost /// </summary> private Worker ChooseWorker(RunnablePip runnablePip, WorkerSetupCost[] workerSetupCosts, out WorkerResource?limitingResource) { if (MustRunOnMaster(runnablePip)) { // This is shortcut for the single-machine builds and distributed workers. return(LocalWorker.TryAcquire(runnablePip, out limitingResource, loadFactor: MaxLoadFactor) ? LocalWorker : null); } ResetStatus(); var pendingWorkerSelectionPipCount = PipQueue.GetNumQueuedByKind(DispatcherKind.ChooseWorkerCpu); bool loadBalanceWorkers = false; if (runnablePip.PipType == PipType.Process) { if (pendingWorkerSelectionPipCount + m_totalAcquiredProcessSlots < (m_totalProcessSlots / 2)) { // When there is a limited amount of work (less than half the total capacity of // the all the workers). We load balance so that each worker gets // its share of the work and the work can complete faster loadBalanceWorkers = true; } } long setupCostForBestWorker = workerSetupCosts[0].SetupBytes; limitingResource = null; foreach (var loadFactor in m_workerBalancedLoadFactors) { if (!loadBalanceWorkers && loadFactor < 1) { // Not load balancing so allow worker to be filled to capacity at least continue; } for (int i = 0; i < workerSetupCosts.Length; i++) { var worker = workerSetupCosts[i].Worker; if (worker.TryAcquire(runnablePip, out limitingResource, loadFactor: loadFactor)) { runnablePip.Performance.SetInputMaterializationCost(ByteSizeFormatter.ToMegabytes((ulong)setupCostForBestWorker), ByteSizeFormatter.ToMegabytes((ulong)workerSetupCosts[i].SetupBytes)); return(worker); } } } return(null); }
/// <summary> /// Choose a worker based on setup cost /// </summary> private Worker ChooseWorker(RunnablePip runnablePip, WorkerSetupCost[] workerSetupCosts, out WorkerResource?limitingResource) { if (MustRunOnMaster(runnablePip)) { // This is shortcut for the single-machine builds and distributed workers. return(LocalWorker.TryAcquire(runnablePip, out limitingResource, loadFactor: MaxLoadFactor) ? LocalWorker : null); } ResetStatus(); var pendingWorkerSelectionPipCount = PipQueue.GetNumQueuedByKind(DispatcherKind.ChooseWorkerCpu) + PipQueue.GetNumRunningByKind(DispatcherKind.ChooseWorkerCpu); bool loadBalanceWorkers = false; if (runnablePip.PipType == PipType.Process) { if (pendingWorkerSelectionPipCount + m_totalAcquiredProcessSlots < (m_totalProcessSlots / 2)) { // When there is a limited amount of work (less than half the total capacity of // the all the workers). We load balance so that each worker gets // its share of the work and the work can complete faster loadBalanceWorkers = true; } } double?disableLoadBalanceMultiplier = EngineEnvironmentSettings.DisableLoadBalanceMultiplier; // Disable load-balance if there is a multiplier specified including 0. loadBalanceWorkers &= !disableLoadBalanceMultiplier.HasValue; long setupCostForBestWorker = workerSetupCosts[0].SetupBytes; limitingResource = null; foreach (var loadFactor in m_workerBalancedLoadFactors) { if (!loadBalanceWorkers && loadFactor < 1) { // Not load balancing so allow worker to be filled to capacity at least continue; } for (int i = 0; i < workerSetupCosts.Length; i++) { var worker = workerSetupCosts[i].Worker; if (worker.TryAcquire(runnablePip, out limitingResource, loadFactor: loadFactor)) { runnablePip.Performance.SetInputMaterializationCost(ByteSizeFormatter.ToMegabytes((ulong)setupCostForBestWorker), ByteSizeFormatter.ToMegabytes((ulong)workerSetupCosts[i].SetupBytes)); return(worker); } // If the worker is not chosen due to the lack of process slots, // do not try the next worker immediately if 'BuildXLDisableLoadBalanceMultiplier' is specified. // We first check whether the number of pips waiting for a worker is less than the total slots times with the multiplier. // For example, if the multiplier is 1 and totalWorkerSlots is 100, then we do not try the next worker // if there are less than 100 pips waiting for a worker. // For Cosine builds, executing pips on a new worker is expensive due to the input materialization. // It is usually faster to wait for the busy worker to be available compared to trying on another worker. if (limitingResource == WorkerResource.AvailableProcessSlots && disableLoadBalanceMultiplier.HasValue && pendingWorkerSelectionPipCount < (worker.TotalProcessSlots * disableLoadBalanceMultiplier.Value)) { limitingResource = WorkerResource.DisableLoadBalance; return(null); } } } return(null); }