/// <summary>
        /// Struct initializer.
        /// </summary>
        private PerformanceMeasurement(
            LoggingContext parentLoggingContext,
            PerformanceCollector.Aggregator aggregator,
            string phaseFriendlyName,
            Action <LoggingContext> endAction)
        {
            Contract.Requires(parentLoggingContext != null);
            Contract.Requires(endAction != null);

            LoggingContext = new LoggingContext(parentLoggingContext, phaseFriendlyName);
            m_aggregator   = aggregator;
            m_endAction    = endAction;

            if (!string.IsNullOrWhiteSpace(phaseFriendlyName))
            {
                m_stopwatch = new Stopwatch();
                m_stopwatch.Start();
            }
            else
            {
                m_stopwatch = null;
            }

            m_isDisposed = false;
        }
        /// <summary>
        /// Logs a PerfCollector
        /// </summary>
        public static void LogPerformanceCollector(PerformanceCollector.Aggregator aggregator, LoggingContext loggingContext, string description, long?duration = null)
        {
            Contract.RequiresNotNull(aggregator);
            Contract.RequiresNotNull(loggingContext);
            Contract.RequiresNotNullOrWhiteSpace(description);

            // Only log if there was more than one sample taken
            if (aggregator.ProcessThreadCount.Count > 0)
            {
                int processAverageThreadCount         = ConvertToInt(aggregator.ProcessThreadCount.Average);
                int processMaximumPrivateMegaBytes    = ConvertToInt(aggregator.ProcessPrivateMB.Maximum);
                int processMaximumWorkingSetMegaBytes = ConvertToInt(aggregator.ProcessWorkingSetMB.Maximum);
                int processAverageWorkingSetMegaBytes = ConvertToInt(aggregator.ProcessWorkingSetMB.Average);
                int processMaximumHeldMegaBytess      = ConvertToInt(aggregator.ProcessHeldMB.Maximum);
                int processAverageCPUTime             = ConvertToInt(aggregator.ProcessCpu.Average);
                int machineAverageCPUTime             = ConvertToInt(aggregator.MachineCpu.Average);
                int machineAverageCPUWMITime          = ConvertToInt(aggregator.MachineCpuWMI.Average);
                int jobObjectAverageCPUTime           = ConvertToInt(aggregator.JobObjectCpu.Average);

                int machineMinimumAvailableMemoryMegabytes = ConvertToInt(aggregator.MachineAvailablePhysicalMB.Minimum);

                Dictionary <string, long> dict = new Dictionary <string, long>(8 + aggregator.DiskStats.Count);
                dict.Add(GetCategorizedStatisticName(description, Statistics.Counter_ProcessAverageThreadCount), processAverageThreadCount);
                dict.Add(GetCategorizedStatisticName(description, Statistics.Counter_ProcessMaximumPrivateMB), processMaximumPrivateMegaBytes);
                dict.Add(GetCategorizedStatisticName(description, Statistics.Counter_ProcessMaximumWorkingSetPrivateMB), processMaximumWorkingSetMegaBytes);
                dict.Add(GetCategorizedStatisticName(description, Statistics.Counter_ProcessAverageWorkingSetPrivateMB), processAverageWorkingSetMegaBytes);
                if (processMaximumHeldMegaBytess > 0)
                {
                    dict.Add(GetCategorizedStatisticName(description, Statistics.Counter_ProcessMaximumHeldMB), processMaximumHeldMegaBytess);
                }

                dict.Add(GetCategorizedStatisticName(description, Statistics.Counter_ProcessAverageCPUTime), processAverageCPUTime);
                dict.Add(GetCategorizedStatisticName(description, Statistics.Counter_MachineAverageCPUTime), machineAverageCPUTime);
                dict.Add(GetCategorizedStatisticName(description, Statistics.Counter_MachineAverageCPUWMITime), machineAverageCPUWMITime);
                dict.Add(GetCategorizedStatisticName(description, Statistics.Counter_JobObjectAverageCPUTime), jobObjectAverageCPUTime);

                dict.Add(GetCategorizedStatisticName(description, Statistics.Counter_MachineMinimumAvailableMemoryMB), machineMinimumAvailableMemoryMegabytes);

                if (duration != null)
                {
                    dict.Add(GetCategorizedStatisticName(description, "DurationMs"), duration.Value);
                }

                foreach (var diskStat in aggregator.DiskStats)
                {
                    var activeTime = diskStat.CalculateActiveTime(lastOnly: false);
                    dict.Add(GetCategorizedStatisticName(GetCategorizedStatisticName(description, Statistics.MachineAverageDiskActiveTime), diskStat.Drive), activeTime);
                }

                Logger.Log.BulkStatistic(loggingContext, dict);
            }
        }
Exemple #3
0
        /// <summary>
        /// Updates the limiting resource based on observed state
        /// </summary>
        /// <param name="aggregator">Performance Aggregator</param>
        /// <param name="readyProcessPips">Process pips whose graph dependencies have been satisfied but are not currently executing</param>
        /// <param name="executinProcessPips">Number of process pips that are currently executing</param>
        /// <param name="lastLimitingResource">The most recent limiting worker resource</param>
        internal LimitingResource OnPerfSample(PerformanceCollector.Aggregator aggregator, long readyProcessPips, long executinProcessPips, WorkerResource?lastLimitingResource)
        {
            if (m_lastSnapshotUtc == DateTime.MinValue)
            {
                // We don't have a window, so don't collect this sample and just remember when the next window starts
                m_lastSnapshotUtc = DateTime.UtcNow;
                return(LimitingResource.Other);
            }

            LimitingResource limitingResource = DetermineLimitingResource(aggregator, readyProcessPips, executinProcessPips, lastLimitingResource);

            UpdateAggregations(limitingResource);

            return(limitingResource);
        }
Exemple #4
0
        /// <summary>
        /// Private constructor to enforce creation to go through the Start() method
        /// </summary>
        private TimedBlock(
            LoggingContext parentLoggingContext,
            PerformanceCollector.Aggregator aggregator,
            string phaseFriendlyName,
            Action <LoggingContext, TEndObject> endAction,
            Func <TEndObject> endObjGetter)
        {
            Contract.Requires(parentLoggingContext != null);
            Contract.Requires(endAction != null);
            Contract.Requires(endObjGetter != null);

            m_loggingContext = new LoggingContext(parentLoggingContext, phaseFriendlyName);
            m_aggregator     = aggregator;
            m_endAction      = endAction;
            m_endObjGetter   = endObjGetter;
            m_stopwatch      = Stopwatch.StartNew();
            m_isDisposed     = false;
        }
Exemple #5
0
 public MachinePerformanceCollector()
 {
     _perfStatsAggregator = _collector.CreateAggregator();
 }
Exemple #6
0
        /// <summary>
        /// Determines what build execution is being limited by for the sample period
        /// </summary>
        private LimitingResource DetermineLimitingResource(PerformanceCollector.Aggregator aggregator, long readyProcessPips, long executingProcessPips, WorkerResource?workerResource)
        {
            // Determining the heuristic on distributed builds requires some more thought. For now just bucket them as Other
            // to keep from showing possibly incorrect data
            if (m_isDistributed)
            {
                return(LimitingResource.Other);
            }

            // High CPU trumps all other factors
            if (aggregator.MachineCpu.Latest > 95)
            {
                return(LimitingResource.CPU);
            }

            // Next up is low available memory. Getting too low will cause memory paging to disk which is very bad, but
            // it will also cause more cycles to be spent in the GC and limit the effectiveness of filesystem caching.
            // Hence the number is set to a few hundred MB instead of zero
            if (aggregator.MachineAvailablePhysicalMB.Latest < 300)
            {
                return(LimitingResource.Memory);
            }

            // The scheduler has backed off on executing additional process pips because of projected memory usage,
            // even though the graph and concurrency configuration would allow it
            if (workerResource.HasValue && workerResource.Value == WorkerResource.AvailableMemoryMb)
            {
                return(LimitingResource.ProjectedMemory);
            }

            // Some other user configured semaphore is preventing the scheduler from launching additional processes.
            if (workerResource.HasValue &&
                workerResource.Value != WorkerResource.AvailableMemoryMb &&
                workerResource.Value != WorkerResource.AvailableProcessSlots &&
                workerResource.Value != WorkerResource.ResourcesAvailable &&
                workerResource.Value != WorkerResource.Status &&
                workerResource.Value != WorkerResource.TotalProcessSlots)
            {
                return(LimitingResource.Semaphore);
            }

            // Next we look for any disk with a relatively high percentage of active time
            foreach (var disk in aggregator.DiskStats)
            {
                if (disk.CalculateActiveTime(lastOnly: true) > 95)
                {
                    return(LimitingResource.Disk);
                }
            }

            // Then we look for low-ish available ready pips. This isn't zero because we are sampling and we might
            // just hit a sample where the queue wasn't completely drained. The number 3 isn't very scientific
            if (readyProcessPips < 3)
            {
                return(LimitingResource.GraphShape);
            }

            // If the number of running pips is what the queue maximum is, and no machine resources are constrained, the
            // pips are probably contending with themselves. There may be headroom to add more pips
            if (((1.0 * executingProcessPips) / m_maxProcessPips) > .95)
            {
                return(LimitingResource.ConcurrencyLimit);
            }

            // We really don't expect to fall through to this case. But track it separately so we know if the heuristic
            // needs to be updated.
            // DEBUGGING ONLY
            // Console.WriteLine("CPU:{0}, AvailableMB:{1}, ReadyPips:{2}, RunningPips:{3}", aggregator.MachineCpu.Latest, aggregator.MachineAvailablePhysicalMB.Latest, readyPips, runningPips);
            // Console.WriteLine();
            return(LimitingResource.Other);
        }
        /// <summary>
        /// Determines what build execution is being limited by for the sample period
        /// </summary>
        private LimitingResource DetermineLimitingResource(PerformanceCollector.Aggregator aggregator, long readyProcessPips, long executingProcessPips, WorkerResource?lastConcurrencyLimiter)
        {
            // (1) We first focus on the specific limiting resources such as not launching processes due to projected memory, user-specified semaphores.

            if (lastConcurrencyLimiter.HasValue)
            {
                // Blocking on semaphore trumps all other factors
                // Some other user configured semaphore is preventing the scheduler from launching additional processes.
                if (lastConcurrencyLimiter.Value.PrecedenceType == WorkerResource.Precedence.SemaphorePrecedence)
                {
                    return(LimitingResource.Semaphore);
                }

                // The scheduler has backed off on executing additional process pips because of projected memory usage,
                // even though the graph and concurrency configuration would allow it
                if (lastConcurrencyLimiter.Value == WorkerResource.AvailableMemoryMb || lastConcurrencyLimiter.Value == WorkerResource.AvailableCommitMb)
                {
                    return(LimitingResource.ProjectedMemory);
                }

                // Check whether any dispatcher queue is blocked due to unavailable slots.
                if (lastConcurrencyLimiter.Value == WorkerResource.AvailableProcessSlots ||
                    lastConcurrencyLimiter.Value == WorkerResource.AvailableMaterializeInputSlots ||
                    lastConcurrencyLimiter.Value == WorkerResource.AvailableCacheLookupSlots ||
                    lastConcurrencyLimiter.Value == WorkerResource.AvailableLightSlots ||
                    lastConcurrencyLimiter.Value == WorkerResource.ModuleAffinity)
                {
                    return(LimitingResource.UnavailableSlots);
                }
            }

            // (2) Then, we focus on the high resource consumption.

            // High CPU trumps all other factors
            if (aggregator.MachineCpu.Latest > 98)
            {
                return(LimitingResource.CPU);
            }

            // Next up is low available memory. Getting too low will cause memory paging to disk which is very bad, but
            // it will also cause more cycles to be spent in the GC and limit the effectiveness of filesystem caching.
            // Hence the number is set to a few hundred MB instead of zero
            if (aggregator.MachineAvailablePhysicalMB.Latest < 300)
            {
                return(LimitingResource.Memory);
            }

            // Next we look for any disk with a relatively high percentage of active time
            foreach (var disk in aggregator.DiskStats)
            {
                if (disk.CalculateActiveTime(lastOnly: true) > 95)
                {
                    return(LimitingResource.Disk);
                }
            }

            // Then we look for low-ish available ready pips. This isn't zero because we are sampling and we might
            // just hit a sample where the queue wasn't completely drained. The number 3 isn't very scientific
            if (readyProcessPips < 3)
            {
                return(LimitingResource.GraphShape);
            }

            // If the number of running pips is what the queue maximum is, and no machine resources are constrained, the
            // pips are probably contending with themselves. There may be headroom to add more pips
            if (!m_isDistributed && (((1.0 * executingProcessPips) / m_maxProcessPips) > .95))
            {
                return(LimitingResource.ConcurrencyLimit);
            }

            // We really don't expect to fall through to this case. But track it separately so we know if the heuristic
            // needs to be updated.
            // DEBUGGING ONLY
            // Console.WriteLine("CPU:{0}, AvailableMB:{1}, ReadyPips:{2}, RunningPips:{3}", aggregator.MachineCpu.Latest, aggregator.MachineAvailablePhysicalMB.Latest, readyPips, runningPips);
            // Console.WriteLine();
            return(LimitingResource.Other);
        }
 /// <summary>
 /// Sometimes, certain perf counters are not available on some machines. Try various counters since the intent
 /// of this test is to validate the nesting, not the counters themselves
 /// </summary>
 private static int GetMaxAggregatorCount(PerformanceCollector.Aggregator aggregator)
 {
     return(Math.Max(Math.Max(aggregator.MachineAvailablePhysicalMB.Count, aggregator.MachineCpu.Count), aggregator.ProcessCpu.Count));
 }