/// <summary>
        /// Choose a worker based on setup cost
        /// </summary>
        protected override Task <Worker> ChooseWorkerCore(RunnablePip runnablePip)
        {
            Contract.Requires(runnablePip.PipType == PipType.Process);

            var processRunnable = (ProcessRunnablePip)runnablePip;

            if (Config.Distribution.DistributeCacheLookups && AnyRemoteWorkers)
            {
                var startWorkerOffset = Interlocked.Increment(ref m_cacheLookupWorkerRoundRobinCounter);

                for (int i = 0; i < Workers.Count; i++)
                {
                    var workerId = (i + startWorkerOffset) % Workers.Count;
                    var worker   = Workers[(int)workerId];
                    if (worker.TryAcquireCacheLookup(processRunnable, force: false))
                    {
                        return(Task.FromResult(worker));
                    }
                }

                return(Task.FromResult((Worker)null));
            }

            var acquired = LocalWorker.TryAcquireCacheLookup(processRunnable, force: true);

            Contract.Assert(acquired, "The local worker must be acquired for cache lookup when force=true");

            return(Task.FromResult((Worker)LocalWorker));
        }
Esempio n. 2
0
        /// <summary>
        /// Choose a worker based on setup cost
        /// </summary>
        private Worker ChooseWorker(RunnablePip runnablePip, WorkerSetupCost[] workerSetupCosts, out WorkerResource?limitingResource)
        {
            if (MustRunOnMaster(runnablePip))
            {
                // This is shortcut for the single-machine builds and distributed workers.
                return(LocalWorker.TryAcquire(runnablePip, out limitingResource, loadFactor: MaxLoadFactor) ? LocalWorker : null);
            }

            ResetStatus();

            var pendingWorkerSelectionPipCount = PipQueue.GetNumQueuedByKind(DispatcherKind.ChooseWorkerCpu);

            bool loadBalanceWorkers = false;

            if (runnablePip.PipType == PipType.Process)
            {
                if (pendingWorkerSelectionPipCount + m_totalAcquiredProcessSlots < (m_totalProcessSlots / 2))
                {
                    // When there is a limited amount of work (less than half the total capacity of
                    // the all the workers). We load balance so that each worker gets
                    // its share of the work and the work can complete faster
                    loadBalanceWorkers = true;
                }
            }

            long setupCostForBestWorker = workerSetupCosts[0].SetupBytes;

            limitingResource = null;
            foreach (var loadFactor in m_workerBalancedLoadFactors)
            {
                if (!loadBalanceWorkers && loadFactor < 1)
                {
                    // Not load balancing so allow worker to be filled to capacity at least
                    continue;
                }

                for (int i = 0; i < workerSetupCosts.Length; i++)
                {
                    var worker = workerSetupCosts[i].Worker;
                    if (worker.TryAcquire(runnablePip, out limitingResource, loadFactor: loadFactor))
                    {
                        runnablePip.Performance.SetInputMaterializationCost(ByteSizeFormatter.ToMegabytes((ulong)setupCostForBestWorker), ByteSizeFormatter.ToMegabytes((ulong)workerSetupCosts[i].SetupBytes));
                        return(worker);
                    }
                }
            }

            return(null);
        }
        protected ChooseWorkerContext(
            LoggingContext loggingContext,
            IReadOnlyList <Worker> workers,
            IPipQueue pipQueue,
            DispatcherKind kind,
            int maxParallelDegree)
        {
            Workers           = workers;
            PipQueue          = pipQueue;
            LocalWorker       = (LocalWorker)workers[0];
            LoggingContext    = loggingContext;
            Kind              = kind;
            MaxParallelDegree = maxParallelDegree;

            foreach (var worker in Workers)
            {
                worker.ResourcesChanged += OnWorkerResourcesChanged;
            }
        }
Esempio n. 4
0
        /// <summary>
        /// Choose a worker based on setup cost
        /// </summary>
        private Worker ChooseWorker(RunnablePip runnablePip, WorkerSetupCost[] workerSetupCosts, out WorkerResource?limitingResource)
        {
            if (MustRunOnMaster(runnablePip))
            {
                // This is shortcut for the single-machine builds and distributed workers.
                return(LocalWorker.TryAcquire(runnablePip, out limitingResource, loadFactor: MaxLoadFactor) ? LocalWorker : null);
            }

            ResetStatus();

            var pendingWorkerSelectionPipCount = PipQueue.GetNumQueuedByKind(DispatcherKind.ChooseWorkerCpu) + PipQueue.GetNumRunningByKind(DispatcherKind.ChooseWorkerCpu);

            bool loadBalanceWorkers = false;

            if (runnablePip.PipType == PipType.Process)
            {
                if (pendingWorkerSelectionPipCount + m_totalAcquiredProcessSlots < (m_totalProcessSlots / 2))
                {
                    // When there is a limited amount of work (less than half the total capacity of
                    // the all the workers). We load balance so that each worker gets
                    // its share of the work and the work can complete faster
                    loadBalanceWorkers = true;
                }
            }

            double?disableLoadBalanceMultiplier = EngineEnvironmentSettings.DisableLoadBalanceMultiplier;

            // Disable load-balance if there is a multiplier specified including 0.
            loadBalanceWorkers &= !disableLoadBalanceMultiplier.HasValue;

            long setupCostForBestWorker = workerSetupCosts[0].SetupBytes;

            limitingResource = null;
            foreach (var loadFactor in m_workerBalancedLoadFactors)
            {
                if (!loadBalanceWorkers && loadFactor < 1)
                {
                    // Not load balancing so allow worker to be filled to capacity at least
                    continue;
                }

                for (int i = 0; i < workerSetupCosts.Length; i++)
                {
                    var worker = workerSetupCosts[i].Worker;
                    if (worker.TryAcquire(runnablePip, out limitingResource, loadFactor: loadFactor))
                    {
                        runnablePip.Performance.SetInputMaterializationCost(ByteSizeFormatter.ToMegabytes((ulong)setupCostForBestWorker), ByteSizeFormatter.ToMegabytes((ulong)workerSetupCosts[i].SetupBytes));
                        return(worker);
                    }

                    // If the worker is not chosen due to the lack of process slots,
                    // do not try the next worker immediately if 'BuildXLDisableLoadBalanceMultiplier' is specified.
                    // We first check whether the number of pips waiting for a worker is less than the total slots times with the multiplier.
                    // For example, if the multiplier is 1 and totalWorkerSlots is 100, then we do not try the next worker
                    // if there are less than 100 pips waiting for a worker.
                    // For Cosine builds, executing pips on a new worker is expensive due to the input materialization.
                    // It is usually faster to wait for the busy worker to be available compared to trying on another worker.
                    if (limitingResource == WorkerResource.AvailableProcessSlots &&
                        disableLoadBalanceMultiplier.HasValue &&
                        pendingWorkerSelectionPipCount < (worker.TotalProcessSlots * disableLoadBalanceMultiplier.Value))
                    {
                        limitingResource = WorkerResource.DisableLoadBalance;
                        return(null);
                    }
                }
            }

            return(null);
        }