Пример #1
0
 private static void DisplayBencherInfo(IBencher bencher, string linePrefix, bool suffixWithDashLine)
 {
     Console.Write(linePrefix);
     Console.WriteLine("{0}. Change tracking: {1}. Caching: {2}.", bencher.CreateFrameworkName(), bencher.UsesChangeTracking, bencher.UsesCaching);
     if (suffixWithDashLine)
     {
         Console.WriteLine("--------------------------------------------------------------------------------------------");
     }
 }
Пример #2
0
 private static void DisplayBencherInfo(IBencher bencher, string linePrefix, bool suffixWithDashLine)
 {
     Console.Write(linePrefix);
     Console.WriteLine(
         "{0}. Change tracking: {1}. Caching: {2}.",
         bencher.CreateFrameworkName(),
         bencher.UsesChangeTracking,
         bencher.UsesCaching);
     if (suffixWithDashLine)
     {
         Console.WriteLine(
             "--------------------------------------------------------------------------------------------");
     }
 }
Пример #3
0
        private static void RunBencher(IBencher bencher)
        {
            bencher.ResetResults();
            if (PerformSetBenchmarks)
            {
                // set benches
                WriteLine("Set fetches");
                WriteLine("-------------------------");
                for (int i = 0; i < LoopAmount; i++)
                {
                    var result = bencher.PerformSetBenchmark();
                    ReportSetResult(bencher, result);

                    // avoid having the GC collect in the middle of a run.
                    GC.Collect();
                    GC.WaitForPendingFinalizers();
                    GC.Collect();
                }
            }
            if (PerformIndividualBenchMarks)
            {
                // individual benches
                WriteLine("Single element fetches");
                WriteLine("-------------------------");
                for (int i = 0; i < LoopAmount; i++)
                {
                    var result = bencher.PerformIndividualBenchMark(KeysForIndividualFetches);
                    ReportIndividualResult(bencher, result);

                    // avoid having the GC collect in the middle of a run.
                    GC.Collect();
                    GC.WaitForPendingFinalizers();
                    GC.Collect();

                    if (ApplyAntiFloodForVMUsage)
                    {
                        // sleep is to avoid hammering the network layer on the target server. If the target server is a VM, it might stall once or twice
                        // during benching, which is not what we want at it can skew the results a lot. In a very short time, a lot of queries are executed
                        // on the target server (LoopAmount * IndividualKeysAmount), which will hurt performance on VMs with very fast frameworks in some
                        // cases in some runs (so more than 2 runs are slow).
                        Thread.Sleep(400);
                    }
                }
            }
        }
Пример #4
0
 private static void DisplayBencherInfo(IBencher bencher)
 {
     OriginalController.DisplayBencherInfo(bencher, "\n", suffixWithDashLine: true);
 }
Пример #5
0
        private static void RunBencher(IBencher bencher)
        {
            bencher.ResetResults();
            Console.WriteLine("First one warm-up run of each bench type to initialize constructs. Results will not be collected.");
            var result = bencher.PerformSetBenchmark(discardResults: true);

            OriginalController.ReportSetResult(result);
            if (bencher.SupportsEagerLoading)
            {
                result = bencher.PerformEagerLoadBenchmark(discardResults: true);
                OriginalController.ReportEagerLoadResult(result);
                if (PerformAsyncBenchmarks && bencher.SupportsAsync)
                {
                    result = bencher.PerformAsyncEagerLoadBenchmark(discardResults: true);
                    OriginalController.ReportEagerLoadResult(result);
                }
            }
            if (PerformIndividualBenchMarks)
            {
                result = bencher.PerformIndividualBenchMark(KeysForIndividualFetches, discardResults: true);
                OriginalController.ReportIndividualResult(result);
            }
            Console.WriteLine("\nStarting bench runs...");
            if (PerformSetBenchmarks)
            {
                // set benches
                Console.WriteLine("Set fetches");
                Console.WriteLine("-------------------------");
                for (int i = 0; i < LoopAmount; i++)
                {
                    result = bencher.PerformSetBenchmark();
                    OriginalController.ReportSetResult(result);

                    // avoid having the GC collect in the middle of a run.
                    OriginalController.ForceGCCollect();
                }
            }
            if (PerformIndividualBenchMarks)
            {
                // individual benches
                Console.WriteLine("\nSingle element fetches");
                Console.WriteLine("-------------------------");
                for (int i = 0; i < LoopAmount; i++)
                {
                    result = bencher.PerformIndividualBenchMark(KeysForIndividualFetches);
                    OriginalController.ReportIndividualResult(result);

                    // avoid having the GC collect in the middle of a run.
                    OriginalController.ForceGCCollect();

                    if (ApplyAntiFloodForVMUsage)
                    {
                        // sleep is to avoid hammering the network layer on the target server. If the target server is a VM, it might stall once or twice
                        // during benching, which is not what we want at it can skew the results a lot. In a very short time, a lot of queries are executed
                        // on the target server (LoopAmount * IndividualKeysAmount), which will hurt performance on VMs with very fast frameworks in some
                        // cases in some runs (so more than 2 runs are slow).
#pragma warning disable CS0162
                        Thread.Sleep(400);
#pragma warning restore CS0162
                    }
                }
            }
            if (PerformEagerLoadBenchmarks && bencher.SupportsEagerLoading)
            {
                // eager load benches
                Console.WriteLine("\nEager Load fetches");
                Console.WriteLine("-------------------------");
                for (int i = 0; i < LoopAmount; i++)
                {
                    result = bencher.PerformEagerLoadBenchmark();
                    OriginalController.ReportEagerLoadResult(result);

                    // avoid having the GC collect in the middle of a run.
                    OriginalController.ForceGCCollect();
                }
            }
            if (PerformAsyncBenchmarks && bencher.SupportsEagerLoading && bencher.SupportsAsync)
            {
                // eager load benches
                Console.WriteLine("\nAsync eager Load fetches");
                Console.WriteLine("-------------------------");
                for (int i = 0; i < LoopAmount; i++)
                {
                    result = bencher.PerformAsyncEagerLoadBenchmark(discardResults: false);
                    OriginalController.ReportEagerLoadResult(result);

                    // avoid having the GC collect in the middle of a run.
                    OriginalController.ForceGCCollect();
                }
            }
        }
Пример #6
0
        private static void RunMemoryAnalysisForBencher(IBencher bencher)
        {
            Console.WriteLine("\nStarting bench runs...");
            BenchResult result;

            if (PerformSetBenchmarks)
            {
                // set benches
                Console.WriteLine("Set fetches");
                Console.WriteLine("-------------------------");
                result = bencher.PerformSetBenchmark(discardResults: true);
                OriginalController.ReportMemoryUsageSetResult(result);
                bencher.MemorySetBenchmarks = result.NumberOfBytesAllocated;
                // avoid having the GC collect in the middle of a run.
                OriginalController.ForceGCCollect();
            }
            if (PerformIndividualBenchMarks)
            {
                // individual benches
                Console.WriteLine("\nSingle element fetches");
                Console.WriteLine("-------------------------");
                result = bencher.PerformIndividualBenchMark(KeysForIndividualFetches, discardResults: true);
                OriginalController.ReportMemoryUsageIndividualResult(result);
                bencher.MemoryIndividualBenchmarks = result.NumberOfBytesAllocated;

                // avoid having the GC collect in the middle of a run.
                OriginalController.ForceGCCollect();

                if (ApplyAntiFloodForVMUsage)
                {
                    // sleep is to avoid hammering the network layer on the target server. If the target server is a VM, it might stall once or twice
                    // during benching, which is not what we want at it can skew the results a lot. In a very short time, a lot of queries are executed
                    // on the target server (LoopAmount * IndividualKeysAmount), which will hurt performance on VMs with very fast frameworks in some
                    // cases in some runs (so more than 2 runs are slow).
#pragma warning disable CS0162
                    Thread.Sleep(400);
#pragma warning restore CS0162
                }
            }
            if (PerformEagerLoadBenchmarks && bencher.SupportsEagerLoading)
            {
                // eager load benches
                Console.WriteLine("\nEager Load fetches");
                Console.WriteLine("-------------------------");
                result = bencher.PerformEagerLoadBenchmark(discardResults: true);
                OriginalController.ReportMemoryUsageEagerLoadResult(result);
                bencher.MemoryEagerLoadBenchmarks = result.NumberOfBytesAllocated;

                // avoid having the GC collect in the middle of a run.
                OriginalController.ForceGCCollect();
            }
            if (PerformAsyncBenchmarks && bencher.SupportsEagerLoading && bencher.SupportsAsync)
            {
                // eager load benches
                Console.WriteLine("\nAsync eager Load fetches");
                Console.WriteLine("-------------------------");
                result = bencher.PerformAsyncEagerLoadBenchmark(discardResults: true);
                OriginalController.ReportMemoryUsageEagerLoadResult(result);
                bencher.MemoryAsyncEagerLoadBenchmarks = result.NumberOfBytesAllocated;

                // avoid having the GC collect in the middle of a run.
                OriginalController.ForceGCCollect();
            }
        }
Пример #7
0
 private static void ReportIndividualResult(IBencher bencher, BenchResult result)
 {
     WriteLine("Number of elements fetched individually: {0}.\tTotal time: {1}ms.\tTime per element: {2}ms",
               KeysForIndividualFetches.Count, result.FetchTimeInMilliseconds,
               (double)result.FetchTimeInMilliseconds / (double)KeysForIndividualFetches.Count);
 }
Пример #8
0
		private static void ReportSetResult(IBencher bencher, BenchResult result)
		{
			Console.WriteLine("Number of elements fetched: {0}.\tFetch took: {1}ms.\tEnumerating result took: {2}ms",
								result.NumberOfRowsFetched, result.FetchTimeInMilliseconds, result.EnumerationTimeInMilliseconds);
		}
Пример #9
0
		private static void DisplayBencherInfo(IBencher bencher)
		{
			DisplayBencherInfo(bencher, "\n", suffixWithDashLine: true);
		}
Пример #10
0
 private static void ReportEagerLoadResult(IBencher bencher, BenchResult result)
 {
     Console.WriteLine("[{0}] Number of elements fetched: {1} ({2}).\tFetch took: {3:N2}ms.",
                       DateTime.Now.ToString("HH:mm:ss"), result.TotalNumberOfRowsFetched, string.Join(" + ", result.NumberOfRowsFetchedPerType.Select(kvp => kvp.Value).ToArray()),
                       result.FetchTimeInMilliseconds);
 }
Пример #11
0
		private static void RunBencher(IBencher bencher)
		{
			bencher.ResetResults();
			Console.WriteLine("First one warm-up run to initialize constructs. Results will not be collected.");
			var result = bencher.PerformSetBenchmark(discardResults: true);
			ReportSetResult(bencher, result);
			Console.WriteLine("Starting bench runs...");
			if(PerformSetBenchmarks)
			{
				// set benches
				Console.WriteLine("Set fetches");
				Console.WriteLine("-------------------------");
				for(int i = 0; i < LoopAmount; i++)
				{
					result = bencher.PerformSetBenchmark();
					ReportSetResult(bencher, result);

					// avoid having the GC collect in the middle of a run.
					GC.Collect();
					GC.WaitForPendingFinalizers();
					GC.Collect();
				}
			}
			if (PerformIndividualBenchMarks)
			{
				// individual benches
				Console.WriteLine("Single element fetches");
				Console.WriteLine("-------------------------");
				for (int i = 0; i < LoopAmount; i++)
				{
					result = bencher.PerformIndividualBenchMark(KeysForIndividualFetches);
					ReportIndividualResult(bencher, result);

					// avoid having the GC collect in the middle of a run.
					GC.Collect();
					GC.WaitForPendingFinalizers();
					GC.Collect();

					if(ApplyAntiFloodForVMUsage)
					{
						// sleep is to avoid hammering the network layer on the target server. If the target server is a VM, it might stall once or twice
						// during benching, which is not what we want at it can skew the results a lot. In a very short time, a lot of queries are executed
						// on the target server (LoopAmount * IndividualKeysAmount), which will hurt performance on VMs with very fast frameworks in some
						// cases in some runs (so more than 2 runs are slow). 
#pragma warning disable CS0162
#if DNXCORE50
						throw new NotImplementedException(nameof(ApplyAntiFloodForVMUsage));
#else
						Thread.Sleep(400);
#endif
#pragma warning restore CS0162
					}
				}
			}
		}
Пример #12
0
 private static void ReportIndividualResult(IBencher bencher, BenchResult result)
 {
     Console.WriteLine("[{0}] Number of elements fetched individually: {1}.\tTotal time: {2:N2}ms.\tTime per element: {3:N2}ms",
                       DateTime.Now.ToString("HH:mm:ss"), KeysForIndividualFetches.Count, result.FetchTimeInMilliseconds,
                       result.FetchTimeInMilliseconds / KeysForIndividualFetches.Count);
 }
Пример #13
0
		private static void ReportEagerLoadResult(IBencher bencher, BenchResult result)
		{
			Console.WriteLine("[{0}] Number of elements fetched: {1} ({2}).\tFetch took: {3:N2}ms.",
								DateTime.Now.ToString("HH:mm:ss"), result.TotalNumberOfRowsFetched, string.Join(" + ", result.NumberOfRowsFetchedPerType.Select(kvp=>kvp.Value).ToArray()),
								result.FetchTimeInMilliseconds);
		}
Пример #14
0
		private static void ReportIndividualResult(IBencher bencher, BenchResult result)
		{
			Console.WriteLine("[{0}] Number of elements fetched individually: {1}.\tTotal time: {2:N2}ms.\tTime per element: {3:N2}ms",
								DateTime.Now.ToString("HH:mm:ss"), KeysForIndividualFetches.Count, result.FetchTimeInMilliseconds,
								result.FetchTimeInMilliseconds / KeysForIndividualFetches.Count);
		}
Пример #15
0
		private static void RunBencher(IBencher bencher)
		{
			bencher.ResetResults();
			if (PerformSetBenchmarks)
			{
				// set benches
				Console.WriteLine("Set fetches");
				Console.WriteLine("-------------------------");
				for (int i = 0; i < LoopAmount; i++)
				{
					var result = bencher.PerformSetBenchmark();
					ReportSetResult(bencher, result);

					// avoid having the GC collect in the middle of a run.
					GC.Collect();
					GC.WaitForPendingFinalizers();
					GC.Collect();
				}
			}
			if (PerformIndividualBenchMarks)
			{
				// individual benches
				Console.WriteLine("Single element fetches");
				Console.WriteLine("-------------------------");
				for (int i = 0; i < LoopAmount; i++)
				{
					var result = bencher.PerformIndividualBenchMark(KeysForIndividualFetches);
					ReportIndividualResult(bencher, result);

					// avoid having the GC collect in the middle of a run.
					GC.Collect();
					GC.WaitForPendingFinalizers();
					GC.Collect();

					if(ApplyAntiFloodForVMUsage)
					{
						// sleep is to avoid hammering the network layer on the target server. If the target server is a VM, it might stall once or twice
						// during benching, which is not what we want at it can skew the results a lot. In a very short time, a lot of queries are executed
						// on the target server (LoopAmount * IndividualKeysAmount), which will hurt performance on VMs with very fast frameworks in some
						// cases in some runs (so more than 2 runs are slow). 
						Thread.Sleep(1000);
					}
				}
			}
		}
Пример #16
0
 private static void DisplayBencherInfo(IBencher bencher)
 {
     DisplayBencherInfo(bencher, "\n", suffixWithDashLine: true);
 }
Пример #17
0
		private static void DisplayBencherInfo(IBencher bencher)
		{
			Console.WriteLine("\n{0}. Change tracking: {1}. Caching: {2}.", bencher.CreateFrameworkName(), bencher.UsesChangeTracking, bencher.UsesCaching);
			Console.WriteLine("--------------------------------------------------------------------------------------------");
		}
Пример #18
0
 private static void ReportSetResult(IBencher bencher, BenchResult result)
 {
     WriteLine("Number of elements fetched: {0}.\tFetch took: {1}ms.\tEnumerating result took: {2}ms",
               result.NumberOfRowsFetched, result.FetchTimeInMilliseconds, result.EnumerationTimeInMilliseconds);
 }
Пример #19
0
		private static void ReportIndividualResult(IBencher bencher, BenchResult result)
		{
			Console.WriteLine("Number of elements fetched individually: {0}.\tTotal time: {1}ms.\tTime per element: {2}ms",
								KeysForIndividualFetches.Count, result.FetchTimeInMilliseconds,
								(double)result.FetchTimeInMilliseconds / (double)KeysForIndividualFetches.Count);
		}
Пример #20
0
        private static void RunBencher(IBencher bencher)
        {
            bencher.ResetResults();
            Console.WriteLine(
                "First one warm-up run of each bench type to initialize constructs. Results will not be collected.");
            BenchResult result = bencher.PerformSetBenchmark(discardResults: true);
            ReportSetResult(result);
            if (bencher.SupportsEagerLoading)
            {
                result = bencher.PerformEagerLoadBenchmark(discardResults: true);
                ReportSetResult(result);
            }
            if (PerformIndividualBenchMarks)
            {
                result = bencher.PerformIndividualBenchMark(KeysForIndividualFetches, discardResults: true);
                ReportIndividualResult(bencher, result);
            }
            Console.WriteLine("\nStarting bench runs...");
            if (PerformSetBenchmarks)
            {
                // set benches
                Console.WriteLine("Set fetches");
                Console.WriteLine("-------------------------");
                for (int i = 0; i < LoopAmount; i++)
                {
                    result = bencher.PerformSetBenchmark();
                    ReportSetResult(result);

                    // avoid having the GC collect in the middle of a run.
                    GC.Collect();
                    GC.WaitForPendingFinalizers();
                    GC.Collect();
                }
            }
            if (PerformIndividualBenchMarks)
            {
                // individual benches
                Console.WriteLine("\nSingle element fetches");
                Console.WriteLine("-------------------------");
                for (int i = 0; i < LoopAmount; i++)
                {
                    result = bencher.PerformIndividualBenchMark(KeysForIndividualFetches);
                    ReportIndividualResult(bencher, result);

                    // avoid having the GC collect in the middle of a run.
                    GC.Collect();
                    GC.WaitForPendingFinalizers();
                    GC.Collect();
                }
            }
            if (PerformEagerLoadBenchmarks && bencher.SupportsEagerLoading)
            {
                // eager load benches
                Console.WriteLine("\nEager Load fetches");
                Console.WriteLine("-------------------------");
                for (int i = 0; i < LoopAmount; i++)
                {
                    result = bencher.PerformEagerLoadBenchmark();
                    ReportEagerLoadResult(bencher, result);

                    // avoid having the GC collect in the middle of a run.
                    GC.Collect();
                    GC.WaitForPendingFinalizers();
                    GC.Collect();
                }
            }
        }