Used when estimating the size of the next run. Designed to help buffer NBench from influencing any benchmarks itself.
コード例 #1
0
        /// <summary>
        ///     Generates a new <see cref="BenchmarkRun" /> based on the provided settings, available system metrics,
        ///     and (optionally) the duration of the last run.
        /// </summary>
        /// <param name="warmupData">Data collected during warm-up</param>
        /// <returns>A new <see cref="BenchmarkRun" /> instance.</returns>
        public BenchmarkRun NewRun(WarmupData warmupData)
        {
            var numberOfMetrics = Settings.TotalTrackedMetrics;
            var measurements    = new List <MeasureBucket>(numberOfMetrics);
            var counterSettings = Settings.CounterMeasurements.ToList();
            var counters        = new List <Counter>(counterSettings.Count);

            // need to exclude counters first
            var settingsExceptCounters = Settings.DistinctMeasurements.Except(counterSettings);

            foreach (var setting in settingsExceptCounters)
            {
                var selector  = Settings.Collectors[setting.MetricName];
                var collector = selector.Create(Settings.RunMode, warmupData, setting);
                measurements.Add(new MeasureBucket(collector));
            }

            foreach (var counterSetting in counterSettings)
            {
                var setting                = counterSetting;
                var selector               = Settings.Collectors[setting.MetricName];
                var atomicCounter          = new AtomicCounter();
                var createCounterBenchmark = new CreateCounterBenchmarkSetting(setting, atomicCounter);
                var collector              = selector.Create(Settings.RunMode, warmupData, createCounterBenchmark);

                measurements.Add(new MeasureBucket(collector));
                counters.Add(new Counter(atomicCounter, setting.CounterName));
            }

            return(new BenchmarkRun(measurements, counters, Settings.Trace));
        }
コード例 #2
0
ファイル: TimingSelector.cs プロジェクト: petabridge/NBench
        public override MetricCollector Create(RunMode runMode, WarmupData warmup, IBenchmarkSetting setting)
        {
            var timingSetting = setting as TimingBenchmarkSetting;
            Contract.Assert(timingSetting != null);

            return new TimingCollector(timingSetting.TimingMetricName);
        }
コード例 #3
0
        public override IEnumerable<MetricCollector> Create(RunMode runMode, WarmupData warmup, IBenchmarkSetting setting)
        {

            //if (warmup.ElapsedTicks <= BenchmarkConstants.SamplingPrecisionTicks)
                return new[] {new GcTotalMemoryCollector(MemoryMetricName)};
            //return new[] {new PerformanceCounterTotalMemoryCollector(MemoryMetricName)};
        }
コード例 #4
0
ファイル: CounterSelector.cs プロジェクト: petabridge/NBench
        public override MetricCollector Create(RunMode runMode, WarmupData warmup, IBenchmarkSetting setting)
        {
            Contract.Assert(setting != null);
            Contract.Assert(setting is CreateCounterBenchmarkSetting);
            var createCounter = setting as CreateCounterBenchmarkSetting;

            // ReSharper disable once PossibleNullReferenceException
            // resolved with Code Contracts
            return new CounterMetricCollector(createCounter.BenchmarkSetting.CounterName, createCounter.Counter);
        }
コード例 #5
0
        public override MetricCollector Create(RunMode runMode, WarmupData warmup,
            IBenchmarkSetting setting)
        {
            Contract.Assert(setting != null);
            Contract.Assert(setting is GcBenchmarkSetting);
            var gcSetting = setting as GcBenchmarkSetting;

            // ReSharper disable once PossibleNullReferenceException
            // covered by Code Contracts
            if (gcSetting.Generation == GcGeneration.AllGc)
            {
                throw new InvalidOperationException($"{gcSetting.Generation} is not supported by this collector");
            }

            return CreateInstanceInternal((int) gcSetting.Generation);
        }
コード例 #6
0
ファイル: BenchmarkBuilder.cs プロジェクト: twenzel/NBench
        /// <summary>
        ///     Generates a new <see cref="BenchmarkRun" /> based on the provided settings, available system metrics,
        ///     and (optionally) the duration of the last run.
        /// </summary>
        /// <param name="warmupData">Data collected during warm-up</param>
        /// <returns>A new <see cref="BenchmarkRun" /> instance.</returns>
        public BenchmarkRun NewRun(WarmupData warmupData)
        {
            var numberOfMetrics = Settings.TotalTrackedMetrics;
            var measurements    = new List <MeasureBucket>(numberOfMetrics);
            var counters        = new List <Counter>(Settings.CounterBenchmarks.Count);

            for (var i = 0; i < Settings.DistinctMemoryBenchmarks.Count; i++)
            {
                var setting    = Settings.DistinctMemoryBenchmarks[i];
                var collectors = MemorySelectors[setting.Metric].Create(Settings.RunMode, warmupData, setting);
                foreach (var collector in collectors)
                {
                    measurements.Add(new MeasureBucket(collector));
                }
            }

            for (var i = 0; i < Settings.DistinctGcBenchmarks.Count; i++)
            {
                var setting    = Settings.DistinctGcBenchmarks[i];
                var collectors = GcSelectors[setting.Metric].Create(Settings.RunMode, warmupData, setting);
                foreach (var collector in collectors)
                {
                    measurements.Add(new MeasureBucket(collector));
                }
            }

            for (var i = 0; i < Settings.DistinctCounterBenchmarks.Count; i++)
            {
                var setting                = Settings.DistinctCounterBenchmarks[i];
                var atomicCounter          = new AtomicCounter();
                var createCounterBenchmark = new CreateCounterBenchmarkSetting(setting, atomicCounter);
                var collectors             = CounterSelector.Create(Settings.RunMode, warmupData, createCounterBenchmark);
                foreach (var collector in collectors)
                {
                    measurements.Add(new MeasureBucket(collector));
                    counters.Add(new Counter(atomicCounter, setting.CounterName));
                }
            }

            return(new BenchmarkRun(measurements, counters));
        }
コード例 #7
0
        public override IEnumerable<MetricCollector> Create(RunMode runMode, WarmupData warmup,
            IBenchmarkSetting setting)
        {
            Contract.Assert(setting != null);
            Contract.Assert(setting is GcBenchmarkSetting);
            var gcSetting = setting as GcBenchmarkSetting;

            // ReSharper disable once PossibleNullReferenceException
            // covered by Code Contracts
            if (gcSetting.Generation == GcGeneration.AllGc)
            {
                var collectors = new List<MetricCollector>(SystemInfo.MaxGcGeneration + 1);
                for (var i = 0; i <= SystemInfo.MaxGcGeneration; i++)
                {
                    collectors.Add(CreateInstanceInternal(i));
                }
                return collectors;
            }

            return new[] {CreateInstanceInternal((int) gcSetting.Generation)};
        }
コード例 #8
0
        /// <summary>
        ///     Generates a new <see cref="BenchmarkRun" /> based on the provided settings, available system metrics,
        ///     and (optionally) the duration of the last run.
        /// </summary>
        /// <param name="warmupData">Data collected during warm-up</param>
        /// <returns>A new <see cref="BenchmarkRun" /> instance.</returns>
        public BenchmarkRun NewRun(WarmupData warmupData)
        {
            var numberOfMetrics = Settings.TotalTrackedMetrics;
            var measurements = new List<MeasureBucket>(numberOfMetrics);
            var counters = new List<Counter>(Settings.CounterBenchmarks.Count);

            for (var i = 0; i < Settings.DistinctMemoryBenchmarks.Count; i++)
            {
                var setting = Settings.DistinctMemoryBenchmarks[i];
                var collectors = MemorySelectors[setting.Metric].Create(Settings.RunMode, warmupData, setting);
                foreach (var collector in collectors)
                    measurements.Add(new MeasureBucket(collector));
            }

            for (var i = 0; i < Settings.DistinctGcBenchmarks.Count; i++)
            {
                var setting = Settings.DistinctGcBenchmarks[i];
                var collectors = GcSelectors[setting.Metric].Create(Settings.RunMode, warmupData, setting);
                foreach (var collector in collectors)
                    measurements.Add(new MeasureBucket(collector));
            }

            for (var i = 0; i < Settings.DistinctCounterBenchmarks.Count; i++)
            {
                var setting = Settings.DistinctCounterBenchmarks[i];
                var atomicCounter = new AtomicCounter();
                var createCounterBenchmark = new CreateCounterBenchmarkSetting(setting, atomicCounter);
                var collectors = CounterSelector.Create(Settings.RunMode, warmupData, createCounterBenchmark);
                foreach (var collector in collectors)
                {
                    measurements.Add(new MeasureBucket(collector));
                    counters.Add(new Counter(atomicCounter, setting.CounterName));
                }
            }

            return new BenchmarkRun(measurements, counters);
        }
コード例 #9
0
ファイル: WarmupData.cs プロジェクト: petabridge/NBench
 public bool Equals(WarmupData other)
 {
     return ElapsedTicks == other.ElapsedTicks && ActualRunsMeasured == other.ActualRunsMeasured;
 }
コード例 #10
0
        /// <summary>
        ///     Warmup phase
        /// </summary>
        private void WarmUp()
        {
            var warmupStopWatch = new Stopwatch();
            var targetTime      = Settings.RunTime;

            Contract.Assert(targetTime != TimeSpan.Zero);
            var runCount = 0L;

            /* Pre-Warmup */
            RunSingleBenchmark();

            // check to see if pre-warmup threw an exception
            var faulted = _currentRun.IsFaulted;

            if (faulted)
            {
                /*
                 * Normally we don't ever queue up the warmup into the final stats, but we do it
                 * in failure cases so we can capture the exception thrown during warmup into
                 * the final report we're going to deliver to the end-user.
                 */
                CompletedRuns.Enqueue(_currentRun.ToReport(TimeSpan.Zero));

                return;
            }

            /* Esimate */
            Allocate(); // allocate all collectors needed
            PreRun();

            if (Settings.RunMode == RunMode.Throughput)
            {
                warmupStopWatch.Start();
                while (warmupStopWatch.ElapsedTicks < targetTime.Ticks)
                {
                    Invoker.InvokeRun(_currentRun.Context);
                    runCount++;
                }
                warmupStopWatch.Stop();
            }
            else
            {
                warmupStopWatch.Start();
                Invoker.InvokeRun(_currentRun.Context);
                runCount++;
                warmupStopWatch.Stop();
            }

            PostRun();
            Complete();

            // elapsed time
            var runTime = warmupStopWatch.ElapsedTicks;

            WarmupData = new WarmupData(runTime, runCount);

            var i = WarmupCount;

            /* Warmup to force CPU caching */
            while (i > 0 && !_currentRun.IsFaulted)
            {
                RunSingleBenchmark();
                i--;
            }
        }
コード例 #11
0
        public override MetricCollector Create(RunMode runMode, WarmupData warmup, IBenchmarkSetting setting)
        {
            Contract.Assert(setting != null);
            Contract.Assert(setting is PerformanceCounterBenchmarkSetting);
            var counterBenchmarkSetting = setting as PerformanceCounterBenchmarkSetting;
            var name = counterBenchmarkSetting.PerformanceCounterMetric;

            // re-use the PerformanceCounter objects in our pool if possible
            if (_cache.Exists(name))
                return new PerformanceCounterValueCollector(name, name.UnitName ?? MetricNames.DefaultUnitName, _cache.Get(name), true);

            // otherwise, warm up new ones
            var maxRetries = 3;
            var currentRetries = 0;

            if (!PerformanceCounterCategory.CounterExists(name.CounterName, name.CategoryName))
                throw new NBenchException($"Performance counter {name.ToHumanFriendlyString()} is not registered on this machine. Please create it first.");

            // check to see that the instance we're interested in is registered
            if (!string.IsNullOrEmpty(name.InstanceName))
            {
                var categories = PerformanceCounterCategory.GetCategories().Where(x => x.CategoryType == PerformanceCounterCategoryType.MultiInstance).ToList();
            #if DEBUG
                Console.WriteLine("---- DEBUG -----");
                Console.WriteLine("{0} multi-instance categories detected", categories.Count);
            #endif
                var category = categories.Single(x => x.CategoryName == name.CategoryName);
                var instances = category.GetInstanceNames();

                if (!instances.Contains(name.InstanceName))
                {
            #if DEBUG
                    Console.WriteLine("---- DEBUG -----");
                    Console.WriteLine("Multi-instance? {0}", category.CategoryType);
                    foreach (var instance in instances)
                        Console.WriteLine(instance);
            #endif
                    throw new NBenchException($"Performance counter {name.CategoryName}:{name.CounterName} exists, but we could not find an instance {name.InstanceName}.");
                }

            }

            var proxy = new PerformanceCounterProxy(() =>
            {
                var counter = new PerformanceCounter(name.CategoryName, name.CounterName,
                    name.InstanceName ?? string.Empty, true);

                return counter;
            });
            while (!CanFindPerformanceCounter(name) && currentRetries <= maxRetries)
            {
                Thread.Sleep(TimeSpan.FromMilliseconds(1000 + 100 * (currentRetries ^ 2))); // little bit of exponential backoff
                if (proxy.CanWarmup)
                    break;
                currentRetries++;
            }

            if (!proxy.CanWarmup)
                throw new NBenchException($"Performance counter {name.ToHumanFriendlyString()} is not registered on this machine. Please create it first.");

            // cache this performance counter and pool it for re-use
            _cache.Put(name, proxy);
            return new PerformanceCounterValueCollector(name, name.UnitName ?? MetricNames.DefaultUnitName, _cache.Get(name), true);
        }
コード例 #12
0
ファイル: Benchmark.cs プロジェクト: ThomasBombadil/NBench
        /// <summary>
        ///     Warmup phase
        /// </summary>
        private void WarmUp()
        {
            var warmupStopWatch = new Stopwatch();
            var targetTime = Settings.RunTime;
            Contract.Assert(targetTime != TimeSpan.Zero);
            var runCount = 0L;

            /* Pre-Warmup */
            RunSingleBenchmark();

            // check to see if pre-warmup threw an exception
            var faulted = _currentRun.IsFaulted;

            if (faulted)
            {
                /*
                 * Normally we don't ever queue up the warmup into the final stats, but we do it
                 * in failure cases so we can capture the exception thrown during warmup into
                 * the final report we're going to deliver to the end-user.
                 */
                CompletedRuns.Enqueue(_currentRun.ToReport(TimeSpan.Zero));

                return;
            }

            /* Esimate */
            Allocate(); // allocate all collectors needed
            PreRun();
           
            if (Settings.RunMode == RunMode.Throughput)
            {
                warmupStopWatch.Start();
                while (warmupStopWatch.ElapsedTicks < targetTime.Ticks)
                {
                    Invoker.InvokeRun(_currentRun.Context);
                    runCount++;
                }
                warmupStopWatch.Stop();
            }
            else
            {
                warmupStopWatch.Start();
                Invoker.InvokeRun(_currentRun.Context);
                runCount++;
                warmupStopWatch.Stop();
            }
           
            PostRun();
            Complete();

            // elapsed time
            var runTime = warmupStopWatch.ElapsedTicks;

            WarmupData = new WarmupData(runTime, runCount);

            var i = WarmupCount;

            /* Warmup to force CPU caching */
            while (i > 0 && !_currentRun.IsFaulted)
            {
                RunSingleBenchmark();
                i--;
            }
        }
コード例 #13
0
ファイル: Benchmark.cs プロジェクト: petabridge/NBench
        /// <summary>
        ///     Warmup phase
        /// </summary>
        private void WarmUp()
        {
            Trace.Debug($"Beginning Warmups for {BenchmarkName}");
            var warmupStopWatch = new Stopwatch();
            var targetTime = Settings.RunTime;
            Contract.Assert(targetTime != TimeSpan.Zero);
            var runCount = 0L;
            var runTime = 0L;

            /* Pre-Warmup */

            Trace.Debug("----- BEGIN PRE-WARMUP -----");
            /* Estimate */
            Allocate(); // allocate all collectors needed
            PreRun();

            try
            {
                if (Settings.RunMode == RunMode.Throughput)
                {
                    Trace.Debug(
                        $"Throughput mode: estimating how many invocations of {BenchmarkName} will take {targetTime.TotalSeconds}s");

                    var estimateCount = 3;
                    var runEstimates = new long[estimateCount];
                    var timeEstimates = new long[estimateCount];
                    for (var i = 0; i <= estimateCount; i++)
                    {
                        warmupStopWatch.Start();
                        while (warmupStopWatch.ElapsedTicks < targetTime.Ticks)
                        {
                            Invoker.InvokeRun(_currentRun.Context);
                            runCount++;
                        }
                        warmupStopWatch.Stop();

                        if (i > 0)
                        {
                            runEstimates[i - 1] = runCount;
                            timeEstimates[i - 1] = warmupStopWatch.ElapsedTicks;

                        }

                        runCount = 0;
                        warmupStopWatch.Reset();
                    }

                    runCount = (long)Math.Ceiling(runEstimates.Average());
                    runTime = (long) Math.Ceiling(timeEstimates.Average());

                    Trace.Debug(
                        $"Throughput mode: executed {runCount} instances of {BenchmarkName} in roughly {targetTime.TotalSeconds}s. Using that figure for benchmark.");
                }
                else
                {
                    warmupStopWatch.Start();
                    Invoker.InvokeRun(_currentRun.Context);
                    runCount++;
                    warmupStopWatch.Stop();

                    // elapsed time
                    runTime = warmupStopWatch.ElapsedTicks;
                }
            }
            catch (Exception ex)
            {
                HandleBenchmarkRunException(ex, $"Error occurred during ${BenchmarkName} RUN.");
            }

            PostRun();
            Complete(true);

            // check to see if pre-warmup threw an exception
            var faulted = _currentRun.IsFaulted;

            if (faulted)
            {
                Trace.Error($"Error occurred during pre-warmup. Exiting and producing dump...");
                /*
                 * Normally we don't ever queue up the warmup into the final stats, but we do it
                 * in failure cases so we can capture the exception thrown during warmup into
                 * the final report we're going to deliver to the end-user.
                 */
                CompletedRuns.Enqueue(_currentRun.ToReport(TimeSpan.Zero));

                return;
            }

            Trace.Debug("----- END PRE-WARMUP -----");

            WarmupData = new WarmupData(runTime, runCount);

            if (!Settings.SkipWarmups)
            {
                Trace.Debug("----- BEGIN WARMUPS -----");
                var i = _warmupCount;

                /* Warmup to force CPU caching */
                while (i > 0 && !_currentRun.IsFaulted)
                {
                    RunSingleBenchmark();
                    i--;
                }

                Trace.Debug("----- END WARMUPS -----");
            }
            else
            {
                Trace.Debug("----- SKIPPING WARMUPS -----");
            }
        }
コード例 #14
0
 /// <summary>
 ///     Creates an instance for all applicable <see cref="MetricCollector" />s for this metric type.
 /// </summary>
 /// <param name="runMode">
 ///     The <see cref="RunMode" /> for this benchmark. Influences the type of
 ///     <see cref="MetricCollector" /> used in some instances.
 /// </param>
 /// <param name="warmup">Warmup data. Influences the type of <see cref="MetricCollector" /> used in some instances.</param>
 /// <param name="setting">An implementation-specific <see cref="IBenchmarkSetting" /></param>
 /// <returns>A new <see cref="MetricCollector" /> instance.</returns>
 public abstract MetricCollector Create(RunMode runMode, WarmupData warmup,
     IBenchmarkSetting setting);
コード例 #15
0
 public bool Equals(WarmupData other)
 {
     return(ElapsedTicks == other.ElapsedTicks && ActualRunsMeasured == other.ActualRunsMeasured);
 }
コード例 #16
0
ファイル: Benchmark.cs プロジェクト: tuongntk/NBench
        /// <summary>
        ///     Warmup phase
        /// </summary>
        private void WarmUp()
        {
            Trace.Debug($"Beginning Warmups for {BenchmarkName}");
            var warmupStopWatch = new Stopwatch();
            var targetTime      = Settings.RunTime;

            Contract.Assert(targetTime != TimeSpan.Zero);
            var runCount = 0L;
            var runTime  = 0L;

            /* Pre-Warmup */


            Trace.Debug("----- BEGIN PRE-WARMUP -----");
            /* Estimate */
            Allocate(); // allocate all collectors needed
            PreRun();

            try
            {
                if (Settings.RunMode == RunMode.Throughput)
                {
                    Trace.Debug(
                        $"Throughput mode: estimating how many invocations of {BenchmarkName} will take {targetTime.TotalSeconds}s");

                    var estimateCount = 3;
                    var runEstimates  = new long[estimateCount];
                    var timeEstimates = new long[estimateCount];
                    for (var i = 0; i <= estimateCount; i++)
                    {
                        warmupStopWatch.Start();
                        while (warmupStopWatch.ElapsedTicks < targetTime.Ticks)
                        {
                            Invoker.InvokeRun(_currentRun.Context);
                            runCount++;
                        }
                        warmupStopWatch.Stop();

                        if (i > 0)
                        {
                            runEstimates[i - 1]  = runCount;
                            timeEstimates[i - 1] = warmupStopWatch.ElapsedTicks;
                        }

                        runCount = 0;
                        warmupStopWatch.Reset();
                    }

                    runCount = (long)Math.Ceiling(runEstimates.Average());
                    runTime  = (long)Math.Ceiling(timeEstimates.Average());

                    Trace.Debug(
                        $"Throughput mode: executed {runCount} instances of {BenchmarkName} in roughly {targetTime.TotalSeconds}s. Using that figure for benchmark.");
                }
                else
                {
                    warmupStopWatch.Start();
                    Invoker.InvokeRun(_currentRun.Context);
                    runCount++;
                    warmupStopWatch.Stop();

                    // elapsed time
                    runTime = warmupStopWatch.ElapsedTicks;
                }
            }
            catch (Exception ex)
            {
                HandleBenchmarkRunException(ex, $"Error occurred during ${BenchmarkName} RUN.");
            }

            PostRun();
            Complete(true);

            // check to see if pre-warmup threw an exception
            var faulted = _currentRun.IsFaulted;

            if (faulted)
            {
                Trace.Error($"Error occurred during pre-warmup. Exiting and producing dump...");

                /*
                 * Normally we don't ever queue up the warmup into the final stats, but we do it
                 * in failure cases so we can capture the exception thrown during warmup into
                 * the final report we're going to deliver to the end-user.
                 */
                CompletedRuns.Enqueue(_currentRun.ToReport(TimeSpan.Zero));

                return;
            }

            Trace.Debug("----- END PRE-WARMUP -----");


            WarmupData = new WarmupData(runTime, runCount);

            if (!Settings.SkipWarmups)
            {
                Trace.Debug("----- BEGIN WARMUPS -----");
                var i = _warmupCount;

                /* Warmup to force CPU caching */
                while (i > 0 && !_currentRun.IsFaulted)
                {
                    RunSingleBenchmark();
                    i--;
                }

                Trace.Debug("----- END WARMUPS -----");
            }
            else
            {
                Trace.Debug("----- SKIPPING WARMUPS -----");
            }
        }
コード例 #17
0
 /// <summary>
 ///     Creates an instance for all applicable <see cref="MetricCollector" />s for this metric type.
 /// </summary>
 /// <param name="runMode">
 ///     The <see cref="RunMode" /> for this benchmark. Influences the type of
 ///     <see cref="MetricCollector" /> used in some instances.
 /// </param>
 /// <param name="warmup">Warmup data. Influences the type of <see cref="MetricCollector" /> used in some instances.</param>
 /// <param name="setting">An implementation-specific <see cref="IBenchmarkSetting" /></param>
 /// <returns>At least 1 new <see cref="MetricCollector"/> instance. Each instance will be uniquely named.</returns>
 public abstract IEnumerable<MetricCollector> Create(RunMode runMode, WarmupData warmup,
     IBenchmarkSetting setting);