private static void RunBencher(IBencher bencher) { bencher.ResetResults(); Console.WriteLine("First one warm-up run of each bench type to initialize constructs. Results will not be collected."); BenchResult result = null; if (PerformSetBenchmarks && bencher.SupportsSetFetch) { result = bencher.PerformSetBenchmark(discardResults: true); BenchController.ReportSetResult(result); } if (bencher.SupportsEagerLoading) { result = bencher.PerformEagerLoadBenchmark(discardResults: true); BenchController.ReportEagerLoadResult(result); if (PerformAsyncBenchmarks && bencher.SupportsAsync) { result = bencher.PerformAsyncEagerLoadBenchmark(discardResults: true); BenchController.ReportEagerLoadResult(result); } } if (PerformIndividualBenchMarks && bencher.SupportsIndividualFetch) { result = bencher.PerformIndividualBenchMark(KeysForIndividualFetches, discardResults: true); BenchController.ReportIndividualResult(result); } if (PerformSetInsertBenchmarks && bencher.SupportsInserts) { result = bencher.PerformInsertSetBenchmark(InsertSetSize, InsertBatchSizeDefault, discardResults: true); BenchController.ReportInsertSetResult(result); } Console.WriteLine("Doing a GC collect..."); BenchController.ForceGCCollect(); Console.WriteLine("Done."); Console.WriteLine("\nStarting bench runs..."); if (PerformSetBenchmarks && bencher.SupportsSetFetch) { // set benches Console.WriteLine("Set fetches"); Console.WriteLine("-------------------------"); for (int i = 0; i < LoopAmount; i++) { result = bencher.PerformSetBenchmark(); BenchController.ReportSetResult(result); // avoid having the GC collect in the middle of a run. BenchController.ForceGCCollect(); } } if (PerformIndividualBenchMarks && bencher.SupportsIndividualFetch) { // individual benches Console.WriteLine("\nSingle element fetches"); Console.WriteLine("-------------------------"); for (int i = 0; i < LoopAmount; i++) { result = bencher.PerformIndividualBenchMark(KeysForIndividualFetches); BenchController.ReportIndividualResult(result); // avoid having the GC collect in the middle of a run. BenchController.ForceGCCollect(); if (ApplyAntiFloodForVMUsage) { // sleep is to avoid hammering the network layer on the target server. If the target server is a VM, it might stall once or twice // during benching, which is not what we want at it can skew the results a lot. In a very short time, a lot of queries are executed // on the target server (LoopAmount * IndividualKeysAmount), which will hurt performance on VMs with very fast frameworks in some // cases in some runs (so more than 2 runs are slow). #pragma warning disable CS0162 Thread.Sleep(400); #pragma warning restore CS0162 } } } if (PerformEagerLoadBenchmarks && bencher.SupportsEagerLoading) { // eager load benches Console.WriteLine("\nEager Load fetches"); Console.WriteLine("-------------------------"); for (int i = 0; i < LoopAmount; i++) { result = bencher.PerformEagerLoadBenchmark(); BenchController.ReportEagerLoadResult(result); // avoid having the GC collect in the middle of a run. BenchController.ForceGCCollect(); } } if (PerformAsyncBenchmarks && bencher.SupportsEagerLoading && bencher.SupportsAsync) { // eager load benches Console.WriteLine("\nAsync eager Load fetches"); Console.WriteLine("-------------------------"); for (int i = 0; i < LoopAmount; i++) { result = bencher.PerformAsyncEagerLoadBenchmark(discardResults: false); BenchController.ReportEagerLoadResult(result); // avoid having the GC collect in the middle of a run. BenchController.ForceGCCollect(); } } if (PerformSetInsertBenchmarks && bencher.SupportsInserts) { // set insert benches Console.WriteLine("\nSet Inserts"); Console.WriteLine("-------------------------"); for (int i = 0; i < LoopAmount; i++) { result = bencher.PerformInsertSetBenchmark(InsertSetSize, InsertBatchSizeDefault); BenchController.ReportInsertSetResult(result); // avoid having the GC collect in the middle of a run. BenchController.ForceGCCollect(); } } }
private static void RunMemoryAnalysisForBencher(IBencher bencher) { Console.WriteLine("\nStarting bench runs..."); BenchResult result; if (PerformSetBenchmarks && bencher.SupportsSetFetch) { // set benches Console.WriteLine("Set fetches"); Console.WriteLine("-------------------------"); result = bencher.PerformSetBenchmark(discardResults: true); BenchController.ReportMemoryUsageSetResult(result); bencher.MemorySetBenchmarks = result.NumberOfBytesAllocated; // avoid having the GC collect in the middle of a run. BenchController.ForceGCCollect(); } if (PerformIndividualBenchMarks && bencher.SupportsIndividualFetch) { // individual benches Console.WriteLine("\nSingle element fetches"); Console.WriteLine("-------------------------"); result = bencher.PerformIndividualBenchMark(KeysForIndividualFetches, discardResults: true); BenchController.ReportMemoryUsageIndividualResult(result); bencher.MemoryIndividualBenchmarks = result.NumberOfBytesAllocated; // avoid having the GC collect in the middle of a run. BenchController.ForceGCCollect(); if (ApplyAntiFloodForVMUsage) { // sleep is to avoid hammering the network layer on the target server. If the target server is a VM, it might stall once or twice // during benching, which is not what we want at it can skew the results a lot. In a very short time, a lot of queries are executed // on the target server (LoopAmount * IndividualKeysAmount), which will hurt performance on VMs with very fast frameworks in some // cases in some runs (so more than 2 runs are slow). #pragma warning disable CS0162 Thread.Sleep(400); #pragma warning restore CS0162 } } if (PerformEagerLoadBenchmarks && bencher.SupportsEagerLoading) { // eager load benches Console.WriteLine("\nEager Load fetches"); Console.WriteLine("-------------------------"); result = bencher.PerformEagerLoadBenchmark(discardResults: true); BenchController.ReportMemoryUsageEagerLoadResult(result); bencher.MemoryEagerLoadBenchmarks = result.NumberOfBytesAllocated; // avoid having the GC collect in the middle of a run. BenchController.ForceGCCollect(); } if (PerformAsyncBenchmarks && bencher.SupportsEagerLoading && bencher.SupportsAsync) { // eager load benches Console.WriteLine("\nAsync eager Load fetches"); Console.WriteLine("-------------------------"); result = bencher.PerformAsyncEagerLoadBenchmark(discardResults: true); BenchController.ReportMemoryUsageEagerLoadResult(result); bencher.MemoryAsyncEagerLoadBenchmarks = result.NumberOfBytesAllocated; // avoid having the GC collect in the middle of a run. BenchController.ForceGCCollect(); } if (PerformSetInsertBenchmarks && bencher.SupportsInserts) { // set inserts Console.WriteLine("\nSet inserts"); Console.WriteLine("-------------------------"); result = bencher.PerformInsertSetBenchmark(InsertSetSize, InsertBatchSizeDefault, discardResults: true); BenchController.ReportMemoryUsageInsertSetResult(result); bencher.MemorySetInsertBenchmarks = result.NumberOfBytesAllocated; // avoid having the GC collect in the middle of a run. BenchController.ForceGCCollect(); } }