public async Task GlobalSetupAkkaAsync() { await SetupKafkaAsync(); await SetupAkkaAsync(); StartProducer(); var consumerSettings = ConsumerSettings <Null, string> .Create(ConsumerSystem, null, null) .WithBootstrapServers(Docker.KafkaAddress) .WithGroupId(KafkaGroup); var(control, queue) = KafkaConsumer.PlainSource(consumerSettings, Subscriptions.Topics(KafkaTopic)) .ToMaterialized( Sink.Queue <ConsumeResult <Null, string> >() .AddAttributes(new Attributes(new Attributes.InputBuffer(2000, 4000))), Keep.Both) .Run(ConsumerSystem.Materializer()); _kafkaControl = control; _sink = queue; }
public static async Task <int> Main(string[] args) { // Setup await SetupKafkaAsync(); await SetupAkkaAsync(); List <CpuUsage> usageBeforeLoad; List <CpuUsage> usageAfterLoad; try { _log = Logging.GetLogger(ConsumerSystem, nameof(Program)); // Create topic on Kafka server var builder = new AdminClientBuilder(new AdminClientConfig { BootstrapServers = Benchmark.Docker.KafkaAddress }); using (var client = builder.Build()) { await client.CreateTopicsAsync(new[] { new TopicSpecification { Name = KafkaTopic, NumPartitions = 3, ReplicationFactor = 1 } }); } // Set up consumer var consumerSettings = ConsumerSettings <string, string> .Create(ConsumerSystem, null, null) .WithBootstrapServers(Benchmark.Docker.KafkaAddress) .WithStopTimeout(TimeSpan.FromSeconds(1)) .WithProperty("auto.offset.reset", "earliest") .WithGroupId(KafkaGroup); var control = KafkaConsumer.PlainPartitionedSource(consumerSettings, Subscriptions.Topics(KafkaTopic)) .GroupBy(3, tuple => tuple.Item1) .SelectAsync(8, async tuple => { var(topicPartition, source) = tuple; _log.Info($"Sub-source for {topicPartition}"); var sourceMessages = await source .Scan(0, (i, message) => i + 1) .Select(i => { ReceivedMessage.IncrementAndGet(); return(LogReceivedMessages(topicPartition, i)); }) .RunWith(Sink.Last <long>(), ConsumerSystem.Materializer()); _log.Info($"{topicPartition}: Received {sourceMessages} messages in total"); return(sourceMessages); }) .MergeSubstreams() .AsInstanceOf <Source <long, IControl> >() .Scan(0L, (i, subValue) => i + subValue) .ToMaterialized(Sink.Last <long>(), Keep.Both) .MapMaterializedValue(tuple => DrainingControl <long> .Create(tuple.Item1, tuple.Item2)) .Run(ConsumerSystem.Materializer()); // Delay before benchmark await Task.Delay(TimeSpan.FromSeconds(DefaultDelay)); // Warmup await CollectSamplesAsync(DefaultWarmUpRepeat, DefaultSampleDuration, "[Warmup]"); // Collect CPU usage before load usageBeforeLoad = await CollectSamplesAsync(DefaultRepeat, DefaultSampleDuration, "[CPU Usage Before Load]"); // Create load var producerSettings = ProducerSettings <string, string> .Create(ConsumerSystem, null, null) .WithBootstrapServers(Benchmark.Docker.KafkaAddress); await Source .From(Enumerable.Range(1, DefaultMessageCount)) .Select(elem => new ProducerRecord <string, string>(KafkaTopic, "key", elem.ToString())) .RunWith(KafkaProducer.PlainSink(producerSettings), ConsumerSystem.Materializer()); // Wait until consumer consumed all messages var stopwatch = Stopwatch.StartNew(); while (stopwatch.Elapsed.TotalSeconds < DefaultTimeout && ReceivedMessage.Current < DefaultMessageCount) { await Task.Delay(100); } stopwatch.Stop(); if (stopwatch.Elapsed.TotalSeconds > DefaultTimeout) { throw new Exception($"Timed out while waiting consumer to process {DefaultMessageCount} messages"); } // Delay before benchmark await Task.Delay(TimeSpan.FromSeconds(DefaultDelay)); // Collect CPU usage after load usageAfterLoad = await CollectSamplesAsync(DefaultRepeat, DefaultSampleDuration, "[CPU Usage After Load]"); } finally { // Tear down await TearDownAkkaAsync(); await TearDownKafkaAsync(); } Console.WriteLine("CPU Benchmark complete."); await GenerateReportAsync(usageBeforeLoad, "BeforeLoad", DefaultSampleDuration, DefaultRepeat); await GenerateReportAsync(usageAfterLoad, "AfterLoad", DefaultSampleDuration, DefaultRepeat); return(0); }