Ejemplo n.º 1
0
        private static void RunBencher(IBencher bencher)
        {
            bencher.ResetResults();
            if (PerformSetBenchmarks)
            {
                // set benches
                WriteLine("Set fetches");
                WriteLine("-------------------------");
                for (int i = 0; i < LoopAmount; i++)
                {
                    var result = bencher.PerformSetBenchmark();
                    ReportSetResult(bencher, result);

                    // avoid having the GC collect in the middle of a run.
                    GC.Collect();
                    GC.WaitForPendingFinalizers();
                    GC.Collect();
                }
            }
            if (PerformIndividualBenchMarks)
            {
                // individual benches
                WriteLine("Single element fetches");
                WriteLine("-------------------------");
                for (int i = 0; i < LoopAmount; i++)
                {
                    var result = bencher.PerformIndividualBenchMark(KeysForIndividualFetches);
                    ReportIndividualResult(bencher, result);

                    // avoid having the GC collect in the middle of a run.
                    GC.Collect();
                    GC.WaitForPendingFinalizers();
                    GC.Collect();

                    if (ApplyAntiFloodForVMUsage)
                    {
                        // sleep is to avoid hammering the network layer on the target server. If the target server is a VM, it might stall once or twice
                        // during benching, which is not what we want at it can skew the results a lot. In a very short time, a lot of queries are executed
                        // on the target server (LoopAmount * IndividualKeysAmount), which will hurt performance on VMs with very fast frameworks in some
                        // cases in some runs (so more than 2 runs are slow).
                        Thread.Sleep(400);
                    }
                }
            }
        }
Ejemplo n.º 2
0
		private static void RunBencher(IBencher bencher)
		{
			bencher.ResetResults();
			if (PerformSetBenchmarks)
			{
				// set benches
				Console.WriteLine("Set fetches");
				Console.WriteLine("-------------------------");
				for (int i = 0; i < LoopAmount; i++)
				{
					var result = bencher.PerformSetBenchmark();
					ReportSetResult(bencher, result);

					// avoid having the GC collect in the middle of a run.
					GC.Collect();
					GC.WaitForPendingFinalizers();
					GC.Collect();
				}
			}
			if (PerformIndividualBenchMarks)
			{
				// individual benches
				Console.WriteLine("Single element fetches");
				Console.WriteLine("-------------------------");
				for (int i = 0; i < LoopAmount; i++)
				{
					var result = bencher.PerformIndividualBenchMark(KeysForIndividualFetches);
					ReportIndividualResult(bencher, result);

					// avoid having the GC collect in the middle of a run.
					GC.Collect();
					GC.WaitForPendingFinalizers();
					GC.Collect();

					if(ApplyAntiFloodForVMUsage)
					{
						// sleep is to avoid hammering the network layer on the target server. If the target server is a VM, it might stall once or twice
						// during benching, which is not what we want at it can skew the results a lot. In a very short time, a lot of queries are executed
						// on the target server (LoopAmount * IndividualKeysAmount), which will hurt performance on VMs with very fast frameworks in some
						// cases in some runs (so more than 2 runs are slow). 
						Thread.Sleep(1000);
					}
				}
			}
		}
Ejemplo n.º 3
0
        private static void RunBencher(IBencher bencher)
        {
            bencher.ResetResults();
            Console.WriteLine("First one warm-up run of each bench type to initialize constructs. Results will not be collected.");
            var result = bencher.PerformSetBenchmark(discardResults: true);

            OriginalController.ReportSetResult(result);
            if (bencher.SupportsEagerLoading)
            {
                result = bencher.PerformEagerLoadBenchmark(discardResults: true);
                OriginalController.ReportEagerLoadResult(result);
                if (PerformAsyncBenchmarks && bencher.SupportsAsync)
                {
                    result = bencher.PerformAsyncEagerLoadBenchmark(discardResults: true);
                    OriginalController.ReportEagerLoadResult(result);
                }
            }
            if (PerformIndividualBenchMarks)
            {
                result = bencher.PerformIndividualBenchMark(KeysForIndividualFetches, discardResults: true);
                OriginalController.ReportIndividualResult(result);
            }
            Console.WriteLine("\nStarting bench runs...");
            if (PerformSetBenchmarks)
            {
                // set benches
                Console.WriteLine("Set fetches");
                Console.WriteLine("-------------------------");
                for (int i = 0; i < LoopAmount; i++)
                {
                    result = bencher.PerformSetBenchmark();
                    OriginalController.ReportSetResult(result);

                    // avoid having the GC collect in the middle of a run.
                    OriginalController.ForceGCCollect();
                }
            }
            if (PerformIndividualBenchMarks)
            {
                // individual benches
                Console.WriteLine("\nSingle element fetches");
                Console.WriteLine("-------------------------");
                for (int i = 0; i < LoopAmount; i++)
                {
                    result = bencher.PerformIndividualBenchMark(KeysForIndividualFetches);
                    OriginalController.ReportIndividualResult(result);

                    // avoid having the GC collect in the middle of a run.
                    OriginalController.ForceGCCollect();

                    if (ApplyAntiFloodForVMUsage)
                    {
                        // sleep is to avoid hammering the network layer on the target server. If the target server is a VM, it might stall once or twice
                        // during benching, which is not what we want at it can skew the results a lot. In a very short time, a lot of queries are executed
                        // on the target server (LoopAmount * IndividualKeysAmount), which will hurt performance on VMs with very fast frameworks in some
                        // cases in some runs (so more than 2 runs are slow).
#pragma warning disable CS0162
                        Thread.Sleep(400);
#pragma warning restore CS0162
                    }
                }
            }
            if (PerformEagerLoadBenchmarks && bencher.SupportsEagerLoading)
            {
                // eager load benches
                Console.WriteLine("\nEager Load fetches");
                Console.WriteLine("-------------------------");
                for (int i = 0; i < LoopAmount; i++)
                {
                    result = bencher.PerformEagerLoadBenchmark();
                    OriginalController.ReportEagerLoadResult(result);

                    // avoid having the GC collect in the middle of a run.
                    OriginalController.ForceGCCollect();
                }
            }
            if (PerformAsyncBenchmarks && bencher.SupportsEagerLoading && bencher.SupportsAsync)
            {
                // eager load benches
                Console.WriteLine("\nAsync eager Load fetches");
                Console.WriteLine("-------------------------");
                for (int i = 0; i < LoopAmount; i++)
                {
                    result = bencher.PerformAsyncEagerLoadBenchmark(discardResults: false);
                    OriginalController.ReportEagerLoadResult(result);

                    // avoid having the GC collect in the middle of a run.
                    OriginalController.ForceGCCollect();
                }
            }
        }
Ejemplo n.º 4
0
        private static void RunMemoryAnalysisForBencher(IBencher bencher)
        {
            Console.WriteLine("\nStarting bench runs...");
            BenchResult result;

            if (PerformSetBenchmarks)
            {
                // set benches
                Console.WriteLine("Set fetches");
                Console.WriteLine("-------------------------");
                result = bencher.PerformSetBenchmark(discardResults: true);
                OriginalController.ReportMemoryUsageSetResult(result);
                bencher.MemorySetBenchmarks = result.NumberOfBytesAllocated;
                // avoid having the GC collect in the middle of a run.
                OriginalController.ForceGCCollect();
            }
            if (PerformIndividualBenchMarks)
            {
                // individual benches
                Console.WriteLine("\nSingle element fetches");
                Console.WriteLine("-------------------------");
                result = bencher.PerformIndividualBenchMark(KeysForIndividualFetches, discardResults: true);
                OriginalController.ReportMemoryUsageIndividualResult(result);
                bencher.MemoryIndividualBenchmarks = result.NumberOfBytesAllocated;

                // avoid having the GC collect in the middle of a run.
                OriginalController.ForceGCCollect();

                if (ApplyAntiFloodForVMUsage)
                {
                    // sleep is to avoid hammering the network layer on the target server. If the target server is a VM, it might stall once or twice
                    // during benching, which is not what we want at it can skew the results a lot. In a very short time, a lot of queries are executed
                    // on the target server (LoopAmount * IndividualKeysAmount), which will hurt performance on VMs with very fast frameworks in some
                    // cases in some runs (so more than 2 runs are slow).
#pragma warning disable CS0162
                    Thread.Sleep(400);
#pragma warning restore CS0162
                }
            }
            if (PerformEagerLoadBenchmarks && bencher.SupportsEagerLoading)
            {
                // eager load benches
                Console.WriteLine("\nEager Load fetches");
                Console.WriteLine("-------------------------");
                result = bencher.PerformEagerLoadBenchmark(discardResults: true);
                OriginalController.ReportMemoryUsageEagerLoadResult(result);
                bencher.MemoryEagerLoadBenchmarks = result.NumberOfBytesAllocated;

                // avoid having the GC collect in the middle of a run.
                OriginalController.ForceGCCollect();
            }
            if (PerformAsyncBenchmarks && bencher.SupportsEagerLoading && bencher.SupportsAsync)
            {
                // eager load benches
                Console.WriteLine("\nAsync eager Load fetches");
                Console.WriteLine("-------------------------");
                result = bencher.PerformAsyncEagerLoadBenchmark(discardResults: true);
                OriginalController.ReportMemoryUsageEagerLoadResult(result);
                bencher.MemoryAsyncEagerLoadBenchmarks = result.NumberOfBytesAllocated;

                // avoid having the GC collect in the middle of a run.
                OriginalController.ForceGCCollect();
            }
        }
Ejemplo n.º 5
0
		private static void RunBencher(IBencher bencher)
		{
			bencher.ResetResults();
			Console.WriteLine("First one warm-up run to initialize constructs. Results will not be collected.");
			var result = bencher.PerformSetBenchmark(discardResults: true);
			ReportSetResult(bencher, result);
			Console.WriteLine("Starting bench runs...");
			if(PerformSetBenchmarks)
			{
				// set benches
				Console.WriteLine("Set fetches");
				Console.WriteLine("-------------------------");
				for(int i = 0; i < LoopAmount; i++)
				{
					result = bencher.PerformSetBenchmark();
					ReportSetResult(bencher, result);

					// avoid having the GC collect in the middle of a run.
					GC.Collect();
					GC.WaitForPendingFinalizers();
					GC.Collect();
				}
			}
			if (PerformIndividualBenchMarks)
			{
				// individual benches
				Console.WriteLine("Single element fetches");
				Console.WriteLine("-------------------------");
				for (int i = 0; i < LoopAmount; i++)
				{
					result = bencher.PerformIndividualBenchMark(KeysForIndividualFetches);
					ReportIndividualResult(bencher, result);

					// avoid having the GC collect in the middle of a run.
					GC.Collect();
					GC.WaitForPendingFinalizers();
					GC.Collect();

					if(ApplyAntiFloodForVMUsage)
					{
						// sleep is to avoid hammering the network layer on the target server. If the target server is a VM, it might stall once or twice
						// during benching, which is not what we want at it can skew the results a lot. In a very short time, a lot of queries are executed
						// on the target server (LoopAmount * IndividualKeysAmount), which will hurt performance on VMs with very fast frameworks in some
						// cases in some runs (so more than 2 runs are slow). 
#pragma warning disable CS0162
#if DNXCORE50
						throw new NotImplementedException(nameof(ApplyAntiFloodForVMUsage));
#else
						Thread.Sleep(400);
#endif
#pragma warning restore CS0162
					}
				}
			}
		}
Ejemplo n.º 6
0
        private static void RunBencher(IBencher bencher)
        {
            bencher.ResetResults();
            Console.WriteLine(
                "First one warm-up run of each bench type to initialize constructs. Results will not be collected.");
            BenchResult result = bencher.PerformSetBenchmark(discardResults: true);
            ReportSetResult(result);
            if (bencher.SupportsEagerLoading)
            {
                result = bencher.PerformEagerLoadBenchmark(discardResults: true);
                ReportSetResult(result);
            }
            if (PerformIndividualBenchMarks)
            {
                result = bencher.PerformIndividualBenchMark(KeysForIndividualFetches, discardResults: true);
                ReportIndividualResult(bencher, result);
            }
            Console.WriteLine("\nStarting bench runs...");
            if (PerformSetBenchmarks)
            {
                // set benches
                Console.WriteLine("Set fetches");
                Console.WriteLine("-------------------------");
                for (int i = 0; i < LoopAmount; i++)
                {
                    result = bencher.PerformSetBenchmark();
                    ReportSetResult(result);

                    // avoid having the GC collect in the middle of a run.
                    GC.Collect();
                    GC.WaitForPendingFinalizers();
                    GC.Collect();
                }
            }
            if (PerformIndividualBenchMarks)
            {
                // individual benches
                Console.WriteLine("\nSingle element fetches");
                Console.WriteLine("-------------------------");
                for (int i = 0; i < LoopAmount; i++)
                {
                    result = bencher.PerformIndividualBenchMark(KeysForIndividualFetches);
                    ReportIndividualResult(bencher, result);

                    // avoid having the GC collect in the middle of a run.
                    GC.Collect();
                    GC.WaitForPendingFinalizers();
                    GC.Collect();
                }
            }
            if (PerformEagerLoadBenchmarks && bencher.SupportsEagerLoading)
            {
                // eager load benches
                Console.WriteLine("\nEager Load fetches");
                Console.WriteLine("-------------------------");
                for (int i = 0; i < LoopAmount; i++)
                {
                    result = bencher.PerformEagerLoadBenchmark();
                    ReportEagerLoadResult(bencher, result);

                    // avoid having the GC collect in the middle of a run.
                    GC.Collect();
                    GC.WaitForPendingFinalizers();
                    GC.Collect();
                }
            }
        }