Ejemplo n.º 1
0
        public async Task PlainPartitionedSource_Should_split_messages_by_partitions()
        {
            var topic         = CreateTopic(1);
            var group         = CreateGroup(1);
            var totalMessages = 100;

            await ProduceStrings(topic, Enumerable.Range(1, totalMessages), ProducerSettings);

            var consumerSettings = CreateConsumerSettings <string>(group);
            var control          = KafkaConsumer.PlainPartitionedSource(consumerSettings, Subscriptions.Topics(topic))
                                   .SelectAsync(6, async tuple =>
            {
                var(topicPartition, source) = tuple;
                Log.Info($"Sub-source for {topicPartition}");
                var consumedPartitions = await source
                                         .Select(m => m.TopicPartition.Partition)
                                         .RunWith(Sink.Seq <Partition>(), Materializer);

                // Return flag that all messages in child source are from the same, expected partition
                return(consumedPartitions.All(partition => partition == topicPartition.Partition));
            })
                                   .As <Source <bool, IControl> >()
                                   .ToMaterialized(Sink.Aggregate <bool, bool>(true, (result, childSourceIsValid) => result && childSourceIsValid), Keep.Both)
                                   .MapMaterializedValue(tuple => DrainingControl <bool> .Create(tuple.Item1, tuple.Item2))
                                   .Run(Materializer);

            // Give it some time to consume all messages
            await Task.Delay(5000);

            var shutdown = control.DrainAndShutdown();

            AwaitCondition(() => shutdown.IsCompleted, TimeSpan.FromSeconds(10));
            shutdown.Result.Should().BeTrue();
        }
Ejemplo n.º 2
0
        public static async Task Main(string[] args)
        {
            Config fallbackConfig = ConfigurationFactory.ParseString(@"
                    akka.suppress-json-serializer-warning=true
                    akka.loglevel = DEBUG
                ").WithFallback(ConfigurationFactory.FromResource <ConsumerSettings <object, object> >("Akka.Streams.Kafka.reference.conf"));

            var system       = ActorSystem.Create("TestKafka", fallbackConfig);
            var materializer = system.Materializer();

            var consumerSettings = ConsumerSettings <Null, string> .Create(system, null, null)
                                   .WithBootstrapServers($"{EventHubNamespace}.servicebus.windows.net:9093")
                                   .WithGroupId(EventHubConsumerGroup)
                                   .WithProperties(new Dictionary <string, string>
            {
                { "security.protocol", "SASL_SSL" },
                { "sasl.mechanism", "PLAIN" },
                { "sasl.username", "$ConnectionString" },
                { "sasl.password", EventHubConnectionString },
            });

            var subscription = Subscriptions.Topics(EventHubName);

            var committerDefaults = CommitterSettings.Create(system);

            // Comment for simple no-commit consumer
            DrainingControl <NotUsed> control = KafkaConsumer.CommittableSource(consumerSettings, subscription)
                                                .SelectAsync(1, msg =>
                                                             Business(msg.Record).ContinueWith(done => (ICommittable)msg.CommitableOffset))
                                                .ToMaterialized(
                Committer.Sink(committerDefaults.WithMaxBatch(1)),
                DrainingControl <NotUsed> .Create)
                                                .Run(materializer);

            // Uncomment for simple no-commit consumer

            /*
             * await KafkaConsumer.PlainSource(consumerSettings, subscription)
             *  .RunForeach(result =>
             *  {
             *      Console.WriteLine($"Consumer: {result.Topic}/{result.Partition} {result.Offset}: {result.Value}");
             *  }, materializer);
             */

            Console.WriteLine("Press any key to stop consumer.");
            Console.ReadKey();

            // Comment for simple no-commit consumer
            await control.Stop();

            await system.Terminate();
        }
Ejemplo n.º 3
0
        public async Task PlainPartitionedSource_should_work()
        {
            var topic            = CreateTopic(1);
            var group            = CreateGroup(1);
            var totalMessages    = 100;
            var receivedMessages = new AtomicCounter(0);

            var consumerSettings = CreateConsumerSettings <string>(group);

            var control = KafkaConsumer.PlainPartitionedSource(consumerSettings, Subscriptions.Topics(topic))
                          .GroupBy(3, tuple => tuple.Item1)
                          .SelectAsync(8, async tuple =>
            {
                var(topicPartition, source) = tuple;
                Log.Info($"Sub-source for {topicPartition}");
                var sourceMessages = await source
                                     .Scan(0, (i, message) => i + 1)
                                     .Select(i =>
                {
                    receivedMessages.IncrementAndGet();
                    return(LogReceivedMessages(topicPartition, i));
                })
                                     .RunWith(Sink.Last <long>(), Materializer);

                Log.Info($"{topicPartition}: Received {sourceMessages} messages in total");
                return(sourceMessages);
            })
                          .MergeSubstreams()
                          .As <Source <long, IControl> >()
                          .Scan(0L, (i, subValue) => i + subValue)
                          .ToMaterialized(Sink.Last <long>(), Keep.Both)
                          .MapMaterializedValue(tuple => DrainingControl <long> .Create(tuple.Item1, tuple.Item2))
                          .Run(Materializer);

            await ProduceStrings(topic, Enumerable.Range(1, totalMessages), ProducerSettings);

            for (var i = 0; i < totalMessages; ++i)
            {
                await AwaitConditionAsync(() => receivedMessages.Current > i, TimeSpan.FromSeconds(10));
            }

            await Task.Delay(1000); // Wait for message handling finished after all messages received

            var shutdown = control.DrainAndShutdown();

            AwaitCondition(() => shutdown.IsCompleted, TimeSpan.FromSeconds(10));
            shutdown.Result.Should().Be(totalMessages);
        }
        public async Task ProducerFlowWithContext_should_work_with_source_with_context()
        {
            bool Duplicate(string value) => value == "1";
            bool Ignore(string value) => value == "2";

            var consumerSettings  = CreateConsumerSettings <string, string>(CreateGroup(1));
            var topic1            = CreateTopic(1);
            var topic2            = CreateTopic(2);
            var topic3            = CreateTopic(3);
            var topic4            = CreateTopic(4);
            var producerSettings  = BuildProducerSettings <string, string>();
            var committerSettings = CommitterSettings;
            var totalMessages     = 10;
            var totalConsumed     = 0;

            await ProduceStrings(topic1, Enumerable.Range(1, totalMessages), producerSettings);

            var(control2, result) = KafkaConsumer.PlainSource(consumerSettings, Subscriptions.Topics(topic2, topic3, topic4))
                                    .Scan(0, (c, _) => c + 1)
                                    .Select(consumed =>
            {
                totalConsumed = consumed;
                return(consumed);
            })
                                    .ToMaterialized(Sink.Last <int>(), Keep.Both)
                                    .Run(Materializer);

            var control = KafkaConsumer.SourceWithOffsetContext(consumerSettings, Subscriptions.Topics(topic1))
                          .Select(record =>
            {
                IEnvelope <string, string, NotUsed> output;
                if (Duplicate(record.Message.Value))
                {
                    output = ProducerMessage.Multi(new[]
                    {
                        new ProducerRecord <string, string>(topic2, record.Message.Key, record.Message.Value),
                        new ProducerRecord <string, string>(topic3, record.Message.Key, record.Message.Value)
                    }.ToImmutableSet());
                }
                else if (Ignore(record.Message.Value))
                {
                    output = ProducerMessage.PassThrough <string, string>();
                }
                else
                {
                    output = ProducerMessage.Single(new ProducerRecord <string, string>(topic4, record.Message.Key, record.Message.Value));
                }

                Log.Debug($"Giving message of type {output.GetType().Name}");
                return(output);
            })
                          .Via(KafkaProducer.FlowWithContext <string, string, ICommittableOffset>(producerSettings))
                          .AsSource()
                          .Log("Produced messages", r => $"Committing {r.Item2.Offset.Topic}:{r.Item2.Offset.Partition}[{r.Item2.Offset.Offset}]")
                          .ToMaterialized(Committer.SinkWithOffsetContext <IResults <string, string, ICommittableOffset> >(committerSettings), Keep.Both)
                          .MapMaterializedValue(tuple => DrainingControl <NotUsed> .Create(tuple.Item1, tuple.Item2))
                          .Run(Materializer);

            // One by one, wait while all `totalMessages` will be consumed
            for (var i = 1; i < totalMessages; ++i)
            {
                var consumedExpect = i;
                Log.Info($"Waiting for {consumedExpect} to be consumed...");
                try
                {
                    await AwaitConditionAsync(() => totalConsumed >= consumedExpect, TimeSpan.FromSeconds(30));
                }
                finally
                {
                    Log.Info($"Finished waiting for {consumedExpect} messages. Total: {totalConsumed}");
                }
                Log.Info($"Confirmed that {consumedExpect} messages are consumed");
            }

            AssertTaskCompletesWithin(TimeSpan.FromSeconds(10), control.DrainAndShutdown());
            AssertTaskCompletesWithin(TimeSpan.FromSeconds(10), control2.Shutdown());
            AssertTaskCompletesWithin(TimeSpan.FromSeconds(10), result).Should().Be(totalConsumed);
        }
Ejemplo n.º 5
0
        public static async Task <int> Main(string[] args)
        {
            // Setup
            await SetupKafkaAsync();
            await SetupAkkaAsync();

            List <CpuUsage> usageBeforeLoad;
            List <CpuUsage> usageAfterLoad;

            try
            {
                _log = Logging.GetLogger(ConsumerSystem, nameof(Program));

                // Create topic on Kafka server
                var builder = new AdminClientBuilder(new AdminClientConfig
                {
                    BootstrapServers = Benchmark.Docker.KafkaAddress
                });
                using (var client = builder.Build())
                {
                    await client.CreateTopicsAsync(new[] { new TopicSpecification
                                                           {
                                                               Name              = KafkaTopic,
                                                               NumPartitions     = 3,
                                                               ReplicationFactor = 1
                                                           } });
                }

                // Set up consumer
                var consumerSettings = ConsumerSettings <string, string> .Create(ConsumerSystem, null, null)
                                       .WithBootstrapServers(Benchmark.Docker.KafkaAddress)
                                       .WithStopTimeout(TimeSpan.FromSeconds(1))
                                       .WithProperty("auto.offset.reset", "earliest")
                                       .WithGroupId(KafkaGroup);

                var control = KafkaConsumer.PlainPartitionedSource(consumerSettings, Subscriptions.Topics(KafkaTopic))
                              .GroupBy(3, tuple => tuple.Item1)
                              .SelectAsync(8, async tuple =>
                {
                    var(topicPartition, source) = tuple;
                    _log.Info($"Sub-source for {topicPartition}");
                    var sourceMessages = await source
                                         .Scan(0, (i, message) => i + 1)
                                         .Select(i =>
                    {
                        ReceivedMessage.IncrementAndGet();
                        return(LogReceivedMessages(topicPartition, i));
                    })
                                         .RunWith(Sink.Last <long>(), ConsumerSystem.Materializer());

                    _log.Info($"{topicPartition}: Received {sourceMessages} messages in total");
                    return(sourceMessages);
                })
                              .MergeSubstreams()
                              .AsInstanceOf <Source <long, IControl> >()
                              .Scan(0L, (i, subValue) => i + subValue)
                              .ToMaterialized(Sink.Last <long>(), Keep.Both)
                              .MapMaterializedValue(tuple => DrainingControl <long> .Create(tuple.Item1, tuple.Item2))
                              .Run(ConsumerSystem.Materializer());

                // Delay before benchmark
                await Task.Delay(TimeSpan.FromSeconds(DefaultDelay));

                // Warmup
                await CollectSamplesAsync(DefaultWarmUpRepeat, DefaultSampleDuration, "[Warmup]");

                // Collect CPU usage before load
                usageBeforeLoad = await CollectSamplesAsync(DefaultRepeat, DefaultSampleDuration, "[CPU Usage Before Load]");

                // Create load
                var producerSettings = ProducerSettings <string, string> .Create(ConsumerSystem, null, null)
                                       .WithBootstrapServers(Benchmark.Docker.KafkaAddress);

                await Source
                .From(Enumerable.Range(1, DefaultMessageCount))
                .Select(elem => new ProducerRecord <string, string>(KafkaTopic, "key", elem.ToString()))
                .RunWith(KafkaProducer.PlainSink(producerSettings), ConsumerSystem.Materializer());

                // Wait until consumer consumed all messages
                var stopwatch = Stopwatch.StartNew();
                while (stopwatch.Elapsed.TotalSeconds < DefaultTimeout && ReceivedMessage.Current < DefaultMessageCount)
                {
                    await Task.Delay(100);
                }
                stopwatch.Stop();
                if (stopwatch.Elapsed.TotalSeconds > DefaultTimeout)
                {
                    throw new Exception($"Timed out while waiting consumer to process {DefaultMessageCount} messages");
                }

                // Delay before benchmark
                await Task.Delay(TimeSpan.FromSeconds(DefaultDelay));

                // Collect CPU usage after load
                usageAfterLoad = await CollectSamplesAsync(DefaultRepeat, DefaultSampleDuration, "[CPU Usage After Load]");
            }
            finally
            {
                // Tear down
                await TearDownAkkaAsync();
                await TearDownKafkaAsync();
            }

            Console.WriteLine("CPU Benchmark complete.");
            await GenerateReportAsync(usageBeforeLoad, "BeforeLoad", DefaultSampleDuration, DefaultRepeat);
            await GenerateReportAsync(usageAfterLoad, "AfterLoad", DefaultSampleDuration, DefaultRepeat);

            return(0);
        }