Esempio n. 1
0
        protected void Setup()
        {
            testTopic = "akka100";

            subscription = Subscriptions.Topics(testTopic);

            probe = this.CreateTestProbe();

            string configText = File.ReadAllText("akka.test.conf");

            var config = ConfigurationFactory.ParseString(configText);

            var system_producer = ActorSystem.Create("TestKafka", config);

            materializer_producer = system_producer.Materializer();

            var system_consumer = ActorSystem.Create("TestKafka", config);

            materializer_consumer = system_producer.Materializer();

            this.Sys.Settings.Config.WithFallback(config);

            producerSettings = ProducerSettings <Null, string> .Create(system_producer, null, null)
                               .WithBootstrapServers("kafka:9092");

            consumerSettings = ConsumerSettings <Null, string> .Create(system_consumer, null, null)
                               .WithBootstrapServers("kafka:9092")
                               .WithGroupId("group1");
        }
Esempio n. 2
0
        public async Task PlainSource_should_resume_stage_if_broker_unavailable()
        {
            var topic1          = CreateTopic(1);
            var group1          = CreateGroup(1);
            var topicPartition1 = new TopicPartition(topic1, 0);

            await GivenInitializedTopic(topicPartition1);

            var config = ConsumerSettings <Null, string> .Create(Sys, null, null)
                         .WithBootstrapServers("localhost:10092")
                         .WithGroupId(group1);

            var regex    = new Regex("\\[localhost:10092\\/bootstrap: Connect to [a-zA-Z0-9#:.*]* failed:");
            var logProbe = CreateTestProbe();

            Sys.EventStream.Subscribe <Info>(logProbe.Ref);

            var(control, probe) = CreateProbe(config, Subscriptions.Assignment(topicPartition1));
            probe.Request(1);

            AwaitAssert(() =>
            {
                var info = logProbe.ExpectMsg <Info>();
                regex.IsMatch(info.Message.ToString() ?? "").Should().BeTrue();
                info.Message.ToString().Should().Contain("[Resume]");
            });
            //AwaitCondition(() => control.IsShutdown.IsCompleted, TimeSpan.FromSeconds(10));
        }
Esempio n. 3
0
        public async Task PlainSource_should_resume_on_deserialization_errors()
        {
            Directive Decider(Exception cause) => cause is SerializationException
                ? Directive.Resume
                : Directive.Stop;

            int elementsCount = 10;
            var topic1        = CreateTopic(1);
            var group1        = CreateGroup(1);

            await Produce(topic1, Enumerable.Range(1, elementsCount), ProducerSettings);

            var settings = ConsumerSettings <Null, int> .Create(Sys, null, new IntDeserializer())
                           .WithBootstrapServers(KafkaUrl)
                           .WithProperty("auto.offset.reset", "earliest")
                           .WithGroupId(group1);

            var probe = KafkaConsumer
                        .PlainSource(settings, Subscriptions.Assignment(new TopicPartition(topic1, 0)))
                        .WithAttributes(ActorAttributes.CreateSupervisionStrategy(Decider))
                        .Select(c => c.Value)
                        .RunWith(this.SinkProbe <int>(), _materializer);

            probe.Request(elementsCount);
            probe.ExpectNoMsg(TimeSpan.FromSeconds(10));
            probe.Cancel();
        }
Esempio n. 4
0
 private ConsumerSettings <Null, string> CreateConsumerSettings(string group)
 {
     return(ConsumerSettings <Null, string> .Create(Sys, null, new StringDeserializer(Encoding.UTF8))
            .WithBootstrapServers(KafkaUrl)
            .WithProperty("auto.offset.reset", "earliest")
            .WithGroupId(group));
 }
Esempio n. 5
0
        public static void Main(string[] args)
        {
            Config fallbackConfig = ConfigurationFactory.ParseString(@"
                    akka.suppress-json-serializer-warning=true
                    akka.loglevel = DEBUG
                ").WithFallback(ConfigurationFactory.FromResource <ConsumerSettings <object, object> >("Akka.Streams.Kafka.reference.conf"));

            var system       = ActorSystem.Create("TestKafka", fallbackConfig);
            var materializer = system.Materializer();

            var consumerSettings = ConsumerSettings <Null, string> .Create(system, null, null)
                                   .WithBootstrapServers("localhost:29092")
                                   .WithGroupId("group1");

            var subscription = Subscriptions.Topics("akka100");

            KafkaConsumer.PlainSource(consumerSettings, subscription)
            .RunForeach(result =>
            {
                Console.WriteLine($"Consumer: {result.Topic}/{result.Partition} {result.Offset}: {result.Value}");
            }, materializer);


            Console.ReadLine();
        }
        public async Task PlainPartitionedSource_should_not_leave_gaps_when_subsource_failed()
        {
            var topic         = CreateTopic(1);
            var group         = CreateGroup(1);
            var totalMessages = 105;

            var producerSettings = BuildProducerSettings <string, string>();

            await Source
            .From(Enumerable.Range(1, totalMessages))
            .Select(elem => new ProducerRecord <string, string>(topic, elem.ToString(), elem.ToString()))
            .RunWith(KafkaProducer.PlainSink(producerSettings), Materializer);

            var queue = new ConcurrentQueue <int>();

            var consumerSettings = ConsumerSettings <string, string> .Create(Sys, null, null)
                                   .WithBootstrapServers(Fixture.KafkaServer)
                                   .WithStopTimeout(TimeSpan.FromSeconds(1))
                                   .WithProperty("auto.offset.reset", "earliest")
                                   .WithProperty("enable.auto.commit", "false")
                                   .WithGroupId(group);

            KafkaConsumer.PlainPartitionedSource(consumerSettings, Subscriptions.Topics(topic))
            .RunForeach(tuple =>
            {
                var(topicPartition, source) = tuple;

                Log.Info($"Consuming topic partition {topicPartition}");
                source
                .Select(message =>
                {
                    var value = int.Parse(message.Message.Value);
                    queue.Enqueue(value);

                    if (value % 10 == 0)
                    {
                        Log.Debug("[{0}] Reached message to fail: offset: [{1}], value: [{2}]", message.TopicPartition, message.Offset, value);
                        throw new Exception("Stopping subsource");
                    }

                    return(value);
                })
                .RunWith(Sink.Ignore <int>(), Materializer);
            }, Materializer);

            await AwaitConditionAsync(
                () => queue.Count >= totalMessages,
                TimeSpan.FromSeconds(60),
                TimeSpan.FromMilliseconds(100));

            var sorted = queue.ToImmutableSortedSet();

            sorted.Should().BeEquivalentTo(Enumerable.Range(1, totalMessages));
        }
Esempio n. 7
0
        public static async Task Main(string[] args)
        {
            Config fallbackConfig = ConfigurationFactory.ParseString(@"
                    akka.suppress-json-serializer-warning=true
                    akka.loglevel = DEBUG
                ").WithFallback(ConfigurationFactory.FromResource <ConsumerSettings <object, object> >("Akka.Streams.Kafka.reference.conf"));

            var system       = ActorSystem.Create("TestKafka", fallbackConfig);
            var materializer = system.Materializer();

            var consumerSettings = ConsumerSettings <Null, string> .Create(system, null, null)
                                   .WithBootstrapServers($"{EventHubNamespace}.servicebus.windows.net:9093")
                                   .WithGroupId(EventHubConsumerGroup)
                                   .WithProperties(new Dictionary <string, string>
            {
                { "security.protocol", "SASL_SSL" },
                { "sasl.mechanism", "PLAIN" },
                { "sasl.username", "$ConnectionString" },
                { "sasl.password", EventHubConnectionString },
            });

            var subscription = Subscriptions.Topics(EventHubName);

            var committerDefaults = CommitterSettings.Create(system);

            // Comment for simple no-commit consumer
            DrainingControl <NotUsed> control = KafkaConsumer.CommittableSource(consumerSettings, subscription)
                                                .SelectAsync(1, msg =>
                                                             Business(msg.Record).ContinueWith(done => (ICommittable)msg.CommitableOffset))
                                                .ToMaterialized(
                Committer.Sink(committerDefaults.WithMaxBatch(1)),
                DrainingControl <NotUsed> .Create)
                                                .Run(materializer);

            // Uncomment for simple no-commit consumer

            /*
             * await KafkaConsumer.PlainSource(consumerSettings, subscription)
             *  .RunForeach(result =>
             *  {
             *      Console.WriteLine($"Consumer: {result.Topic}/{result.Partition} {result.Offset}: {result.Value}");
             *  }, materializer);
             */

            Console.WriteLine("Press any key to stop consumer.");
            Console.ReadKey();

            // Comment for simple no-commit consumer
            await control.Stop();

            await system.Terminate();
        }
Esempio n. 8
0
        public async Task PlainSource_should_fail_stage_if_broker_unavailable()
        {
            var topic1 = CreateTopic(1);
            var group1 = CreateGroup(1);

            await GivenInitializedTopic(topic1);

            var config = ConsumerSettings <Null, string> .Create(Sys, null, new StringDeserializer(Encoding.UTF8))
                         .WithBootstrapServers("localhost:10092")
                         .WithGroupId(group1);

            var probe = CreateProbe(config, topic1, Subscriptions.Assignment(new TopicPartition(topic1, 0)));

            probe.Request(1).ExpectError().Should().BeOfType <KafkaException>();
        }
Esempio n. 9
0
        public void ConfigSettings_must_handleNestedKafkaClientsProperties()
        {
            var conf = ConfigurationFactory.ParseString(@"
                akka.kafka.consumer.kafka-clients.bootstrap.servers = ""localhost:9092""
                akka.kafka.consumer.kafka-clients.bootstrap.foo = baz
                akka.kafka.consumer.kafka-clients.foo = bar
                akka.kafka.consumer.kafka-clients.client.id = client1
            ").WithFallback(KafkaExtensions.DefaultSettings).GetConfig("akka.kafka.consumer");

            var settings = ConsumerSettings <string, string> .Create(conf, null, null);

            settings.GetProperty("bootstrap.servers").Should().Be("localhost:9092");
            settings.GetProperty("client.id").Should().Be("client1");
            settings.GetProperty("foo").Should().Be("bar");
            settings.GetProperty("bootstrap.foo").Should().Be("baz");
        }
        public async Task PlainSource_should_fail_stage_if_broker_unavailable()
        {
            var topic1          = CreateTopic(1);
            var group1          = CreateGroup(1);
            var topicPartition1 = new TopicPartition(topic1, 0);

            await GivenInitializedTopic(topicPartition1);

            var config = ConsumerSettings <Null, string> .Create(Sys, null, null)
                         .WithBootstrapServers("localhost:10092")
                         .WithGroupId(group1);

            var(control, probe) = CreateProbe(config, Subscriptions.Assignment(topicPartition1));
            probe.Request(1);
            AwaitCondition(() => control.IsShutdown.IsCompleted, TimeSpan.FromSeconds(10));
        }
        private Source <CommittableMessage <K, V>, IControl> CreateCommitableSource(
            MockConsumer <K, V> mock, string groupId = "group1", string[] topics = null)
        {
            topics ??= new[] { "topic" };
            var settings = ConsumerSettings <K, V> .Create(Sys, Deserializers.Utf8, Deserializers.Utf8)
                           .WithGroupId(groupId)
                           .WithCloseTimeout(MockConsumer.CloseTimeout)
                           .WithStopTimeout(MockConsumer.CloseTimeout)
                           .WithCommitTimeout(TimeSpan.FromMilliseconds(500))
                           .WithConsumerFactory(_ => mock.Mock);

            mock.Settings = settings;

            return(KafkaConsumer.CommittableSource(
                       settings,
                       Subscriptions.Topics(topics)));
        }
        private Source <CommittableMessage <K, V>, IControl> CreateSourceWithMetadata(
            MockConsumer <K, V> mock,
            Func <ConsumeResult <K, V>, string> metadataFromRecord,
            string groupId  = "group1",
            string[] topics = null)
        {
            var settings = ConsumerSettings <K, V> .Create(Sys, Deserializers.Utf8, Deserializers.Utf8)
                           .WithGroupId(groupId)
                           .WithConsumerFactory(_ => mock.Mock);

            mock.Settings = settings;

            return(KafkaConsumer.CommitWithMetadataSource(
                       settings,
                       Subscriptions.Topics(topics),
                       metadataFromRecord));
        }
        public void ConsumerSettings_must_beAbleToMergeConsumerConfig()
        {
            var conf     = KafkaExtensions.DefaultSettings.GetConfig("akka.kafka.consumer");
            var settings = ConsumerSettings <string, string> .Create(conf, null, null);

            var config = new ConsumerConfig
            {
                BootstrapServers = "localhost:9092",
                AutoOffsetReset  = AutoOffsetReset.Latest,
                EnableAutoCommit = true,
                GroupId          = "group1",
                ClientId         = "client1"
            };

            settings = settings.WithConsumerConfig(config);
            settings.GetProperty("bootstrap.servers").Should().Be("localhost:9092");
            settings.GetProperty("auto.offset.reset").Should().Be("latest");
            settings.GetProperty("enable.auto.commit").Should().Be("True");
            settings.GetProperty("group.id").Should().Be("group1");
            settings.GetProperty("client.id").Should().Be("client1");
        }
Esempio n. 14
0
        public void Start(ConsumerAkkaOption consumerActorOption)
        {
            IAutoSubscription makeshop_neworder = Subscriptions.Topics(consumerActorOption.Topics);

            var consumerSettings = ConsumerSettings <Null, string> .Create(consumerSystem, null, null)
                                   .WithBootstrapServers(consumerActorOption.BootstrapServers)
                                   .WithGroupId(consumerActorOption.KafkaGroupId);


            var materializer_consumer = consumerSystem.Materializer();

            KafkaConsumer.CommittableSource(consumerSettings, makeshop_neworder)
            .RunForeach(result =>
            {
                result.CommitableOffset.Commit();
                Console.WriteLine($"Consumer: {result.Record.Partition}/{result.Record.Topic} {result.Record.Offset}: {result.Record.Value}");
                if (consumerActorOption.RelayActor != null)
                {
                    consumerActorOption.RelayActor.Tell(result.Record.Value);  //ForgetAndFire 발송
                }
            }, materializer_consumer);
        }
Esempio n. 15
0
        public async Task GlobalSetupAkkaAsync()
        {
            await SetupKafkaAsync();
            await SetupAkkaAsync();

            StartProducer();

            var consumerSettings = ConsumerSettings <Null, string>
                                   .Create(ConsumerSystem, null, null)
                                   .WithBootstrapServers(Docker.KafkaAddress)
                                   .WithGroupId(KafkaGroup);

            var(control, queue) = KafkaConsumer.PlainSource(consumerSettings, Subscriptions.Topics(KafkaTopic))
                                  .ToMaterialized(
                Sink.Queue <ConsumeResult <Null, string> >()
                .AddAttributes(new Attributes(new Attributes.InputBuffer(2000, 4000))),
                Keep.Both)
                                  .Run(ConsumerSystem.Materializer());

            _kafkaControl = control;
            _sink         = queue;
        }
Esempio n. 16
0
        private void StartConsuming()
        {
#if DEBUG
            Console.WriteLine("Start Consuming");
#endif
            allEofsFound = new TaskCompletionSource <bool>();

            //use eof to know when we are ready with a single topic partition
            var consumerSettings = ConsumerSettings <string, byte[]>
                                   .Create(settings.KafkaConfig, Deserializers.Utf8, Deserializers.ByteArray)
                                   .WithBootstrapServers(settings.KafkaConfig.GetString("bootstrap.servers"))
                                   .WithGroupId(settings.KafkaConfig.GetString("groupid.prefix") + Guid.NewGuid())
                                   .WithProperty("enable.partition.eof", "true");

            var adminClientBuilder = new AdminClientBuilder(consumerSettings.Properties);

            Metadata metadata = null;
            using (var adminClient = adminClientBuilder.Build())
            {
                metadata = adminClient.GetMetadata(TimeSpan.FromSeconds(10));
            }

            if (metadata == null)
            {
                throw new Exception("can not retrieve metadata from bootstrap servers");
            }
            var knownTopicPartitions = new HashSet <TopicPartition>();
            foreach (var metadataTopic in metadata.Topics)
            {
                foreach (var metadataTopicPartition in metadataTopic.Partitions)
                {
                    knownTopicPartitions.Add(new TopicPartition(metadataTopic.Topic, metadataTopicPartition.PartitionId));
                }
            }

            var topicsNotInKafka = 0;
            //create a consumer for all enabled partition topics
            var tposForSubscription = new List <TopicPartitionOffset>();
            foreach (var tp in enabledTopicPartitions)
            {
                if (knownTopicPartitions.Contains(tp))
                {
                    tposForSubscription.Add(currentOffsets.TryGetValue(tp, out long offset)
                        ? new TopicPartitionOffset(tp, offset + 1)
                        : new TopicPartitionOffset(tp, Offset.Beginning));
                }
                else
                {
                    //if not present in kafka, then we know we are at the end
                    eofsFound.Add(tp);
                    topicsNotInKafka++;
                }
            }

            if (tposForSubscription.Count == 0)
            {
                //empty start
                allEofsFound.SetResult(true);
                return;
            }

            var subscription = Subscriptions.AssignmentWithOffset(tposForSubscription.ToArray());
            var source       = KafkaConsumer.PlainSource(consumerSettings, subscription);

            var writeBatches = new Dictionary <TopicPartition, WriteBatch>();
            eofsFound = new HashSet <TopicPartition>();

            //normally this won't take long. The topicpartition progress is also stored in rocksdb
            //if we need to reread all events then this can take a while
            var cts = new CancellationTokenSource();

            var sourceTask = source
                             .Via(cts.Token.AsFlow <ConsumeResult <string, byte[]> >(true))
                             .RunForeach(msg =>
            {
                if (msg.IsPartitionEOF)
                {
                    //save as this is during startup; no-one can touch this topic partition yet.
                    currentOffsets[msg.TopicPartition] = msg.Offset;

                    eofsFound.Add(msg.TopicPartition);
                    if (writeBatches.TryGetValue(msg.TopicPartition, out var b))
                    {
                        database.Write(b, rocksDbWriteOptions);
                        b.Dispose();
                        writeBatches.Remove(msg.TopicPartition);
                    }

                    if (eofListeners.TryGetValue(msg.TopicPartition, out var listeners))
                    {
                        foreach (var taskCompletionSource in listeners)
                        {
                            taskCompletionSource.SetResult(true);
                        }
                        eofListeners.Remove(msg.TopicPartition);
                    }

                    if (eofsFound.Count != tposForSubscription.Count + topicsNotInKafka)
                    {
                        return;
                    }

                    allEofsFound.SetResult(true);
                    cts.Cancel();
                    cts.Dispose();

                    return;
                }
                if (eofsFound.Contains(msg.TopicPartition))
                {
                    return;
                }

                // presume we are the only one writing to this topic partition (otherwise akka persistence gets messy real quick)
                if (!writeBatches.TryGetValue(msg.TopicPartition, out var writebatch))
                {
                    writeBatches[msg.TopicPartition] = writebatch = new WriteBatch();
                }

                //add event to the writebatch
                var persistent = PersistentFromMessage(msg);
                WriteToRocksDbBatch(persistent, writebatch);

                //store current offset into (using the writebatch)
                var key = TopicPartitionKey(TopicPartitionNumericId(msg.TopicPartition));
                writebatch.Put(KeyToBytes(key), CounterToBytes(msg.Offset));
            }, materializer);

            sourceTask.ContinueWith(x =>
            {
                Console.WriteLine("Source stopped for kafka journal");
            });
        }
Esempio n. 17
0
        public async Task Committable_consumer_with_failed_downstream_stage_result_should_be_gapless()
        {
            var topic          = CreateTopic(1);
            var group          = CreateGroup(1);
            var topicPartition = new TopicPartition(topic, 0);

            var consumerSettings = ConsumerSettings <Null, string> .Create(Sys, null, null)
                                   .WithBootstrapServers(Fixture.KafkaServer)
                                   .WithStopTimeout(TimeSpan.FromSeconds(1))
                                   .WithProperty("auto.offset.reset", "earliest")
                                   .WithGroupId(group);

            var counter = 0;

            await Source.From(Enumerable.Range(1, 11))
            .Select(elem => new ProducerRecord <Null, string>(topicPartition, elem.ToString()))
            .RunWith(KafkaProducer.PlainSink(ProducerSettings), Materializer);

            var probe = KafkaConsumer.CommittableSource(consumerSettings, Subscriptions.AssignmentWithOffset(new TopicPartitionOffset(topicPartition, Offset.Unset)))
                        .Select(t =>
            {
                counter++;
                // fail once, on the 7th message
                if (counter == 7)
                {
                    throw new Exception("BOOM!");
                }
                return(t);
            })
                        .SelectAsync(1, async elem =>
            {
                await elem.CommitableOffset.Commit();
                return(elem.Record.Value);
            })
                        .ToMaterialized(this.SinkProbe <string>(), Keep.Right)
                        .Run(Materializer);

            var messages = new List <string>();

            probe.Request(11);
            for (var i = 0; i < 6; i++)
            {
                messages.Add(probe.ExpectNext(TimeSpan.FromSeconds(5)));
            }

            // stream fails at index 7
            var err = probe.ExpectEvent();

            err.Should().BeOfType <TestSubscriber.OnError>();
            var exception = ((TestSubscriber.OnError)err).Cause;

            exception.Message.Should().Be("BOOM!");

            // stream should be dead here
            probe.ExpectNoMsg(TimeSpan.FromSeconds(5));
            probe.Cancel();

            // restart dead stream
            probe = KafkaConsumer.CommittableSource(consumerSettings, Subscriptions.AssignmentWithOffset(new TopicPartitionOffset(topicPartition, Offset.Unset)))
                    .SelectAsync(1, async elem =>
            {
                await elem.CommitableOffset.Commit();
                return(elem.Record.Value);
            })
                    .ToMaterialized(this.SinkProbe <string>(), Keep.Right)
                    .Run(Materializer);

            probe.Request(11);
            for (var i = 0; i < 5; i++)
            {
                messages.Add(probe.ExpectNext(TimeSpan.FromSeconds(5)));
            }
            probe.Cancel();

            // end result should be gapless
            messages.Select(s => int.Parse(s)).Should().BeEquivalentTo(Enumerable.Range(1, 11));
        }
Esempio n. 18
0
        public async Task Directive_Restart_on_failed_Consumer_should_restart_Consumer()
        {
            var topic                  = CreateTopic(1);
            var group                  = CreateGroup(1);
            var topicPartition         = new TopicPartition(topic, 0);
            var serializationCallCount = 0;
            var callCount              = 0;

            Directive Decider(Exception cause)
            {
                callCount++;
                if (cause is ConsumeException ce && ce.Error.IsSerializationError())
                {
                    serializationCallCount++;
                    return(Directive.Restart);
                }
                return(Directive.Stop);
            }

            var serializer       = new Serializer <int>(BitConverter.GetBytes);
            var producerSettings = ProducerSettings <Null, int>
                                   .Create(Sys, null, serializer)
                                   .WithBootstrapServers(Fixture.KafkaServer);

            await Source.From(Enumerable.Range(1, 10))
            .Select(elem => new ProducerRecord <Null, int>(topicPartition, elem))
            .RunWith(KafkaProducer.PlainSink(producerSettings), Materializer);

            // Exception is injected once using the FailOnceDeserializer
            var deserializer     = new FailOnceDeserializer <int>(5, data => BitConverter.ToInt32(data.Span));
            var consumerSettings = ConsumerSettings <Null, int> .Create(Sys, null, deserializer)
                                   .WithBootstrapServers(Fixture.KafkaServer)
                                   .WithStopTimeout(TimeSpan.FromSeconds(1))
                                   .WithProperty("auto.offset.reset", "earliest")
                                   .WithGroupId(group);

            var(_, probe) = KafkaConsumer
                            .PlainSource(consumerSettings, Subscriptions.Assignment(topicPartition))
                            .WithAttributes(ActorAttributes.CreateSupervisionStrategy(Decider))
                            .Select(c => c.Message.Value)
                            .ToMaterialized(this.SinkProbe <int>(), Keep.Both)
                            .Run(Materializer);

            probe.Request(20);
            var pulled = new List <int>();

            for (var i = 0; i < 14; i++)
            {
                var msg = probe.ExpectNext();
                pulled.Add(msg);
            }

            probe.ExpectNoMsg(TimeSpan.FromSeconds(2));
            probe.Cancel();

            pulled.Should().BeEquivalentTo(new[] { 1, 2, 3, 4, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }, opt => opt.WithStrictOrdering());

            // Decider should be called twice, because deciders are called in BaseSingleSourceLogic and KafkaConsumerActor
            callCount.Should().Be(2);
            serializationCallCount.Should().Be(2);
        }
Esempio n. 19
0
        public static async Task <int> Main(string[] args)
        {
            // Setup
            await SetupKafkaAsync();
            await SetupAkkaAsync();

            List <CpuUsage> usageBeforeLoad;
            List <CpuUsage> usageAfterLoad;

            try
            {
                _log = Logging.GetLogger(ConsumerSystem, nameof(Program));

                // Create topic on Kafka server
                var builder = new AdminClientBuilder(new AdminClientConfig
                {
                    BootstrapServers = Benchmark.Docker.KafkaAddress
                });
                using (var client = builder.Build())
                {
                    await client.CreateTopicsAsync(new[] { new TopicSpecification
                                                           {
                                                               Name              = KafkaTopic,
                                                               NumPartitions     = 3,
                                                               ReplicationFactor = 1
                                                           } });
                }

                // Set up consumer
                var consumerSettings = ConsumerSettings <string, string> .Create(ConsumerSystem, null, null)
                                       .WithBootstrapServers(Benchmark.Docker.KafkaAddress)
                                       .WithStopTimeout(TimeSpan.FromSeconds(1))
                                       .WithProperty("auto.offset.reset", "earliest")
                                       .WithGroupId(KafkaGroup);

                var control = KafkaConsumer.PlainPartitionedSource(consumerSettings, Subscriptions.Topics(KafkaTopic))
                              .GroupBy(3, tuple => tuple.Item1)
                              .SelectAsync(8, async tuple =>
                {
                    var(topicPartition, source) = tuple;
                    _log.Info($"Sub-source for {topicPartition}");
                    var sourceMessages = await source
                                         .Scan(0, (i, message) => i + 1)
                                         .Select(i =>
                    {
                        ReceivedMessage.IncrementAndGet();
                        return(LogReceivedMessages(topicPartition, i));
                    })
                                         .RunWith(Sink.Last <long>(), ConsumerSystem.Materializer());

                    _log.Info($"{topicPartition}: Received {sourceMessages} messages in total");
                    return(sourceMessages);
                })
                              .MergeSubstreams()
                              .AsInstanceOf <Source <long, IControl> >()
                              .Scan(0L, (i, subValue) => i + subValue)
                              .ToMaterialized(Sink.Last <long>(), Keep.Both)
                              .MapMaterializedValue(tuple => DrainingControl <long> .Create(tuple.Item1, tuple.Item2))
                              .Run(ConsumerSystem.Materializer());

                // Delay before benchmark
                await Task.Delay(TimeSpan.FromSeconds(DefaultDelay));

                // Warmup
                await CollectSamplesAsync(DefaultWarmUpRepeat, DefaultSampleDuration, "[Warmup]");

                // Collect CPU usage before load
                usageBeforeLoad = await CollectSamplesAsync(DefaultRepeat, DefaultSampleDuration, "[CPU Usage Before Load]");

                // Create load
                var producerSettings = ProducerSettings <string, string> .Create(ConsumerSystem, null, null)
                                       .WithBootstrapServers(Benchmark.Docker.KafkaAddress);

                await Source
                .From(Enumerable.Range(1, DefaultMessageCount))
                .Select(elem => new ProducerRecord <string, string>(KafkaTopic, "key", elem.ToString()))
                .RunWith(KafkaProducer.PlainSink(producerSettings), ConsumerSystem.Materializer());

                // Wait until consumer consumed all messages
                var stopwatch = Stopwatch.StartNew();
                while (stopwatch.Elapsed.TotalSeconds < DefaultTimeout && ReceivedMessage.Current < DefaultMessageCount)
                {
                    await Task.Delay(100);
                }
                stopwatch.Stop();
                if (stopwatch.Elapsed.TotalSeconds > DefaultTimeout)
                {
                    throw new Exception($"Timed out while waiting consumer to process {DefaultMessageCount} messages");
                }

                // Delay before benchmark
                await Task.Delay(TimeSpan.FromSeconds(DefaultDelay));

                // Collect CPU usage after load
                usageAfterLoad = await CollectSamplesAsync(DefaultRepeat, DefaultSampleDuration, "[CPU Usage After Load]");
            }
            finally
            {
                // Tear down
                await TearDownAkkaAsync();
                await TearDownKafkaAsync();
            }

            Console.WriteLine("CPU Benchmark complete.");
            await GenerateReportAsync(usageBeforeLoad, "BeforeLoad", DefaultSampleDuration, DefaultRepeat);
            await GenerateReportAsync(usageAfterLoad, "AfterLoad", DefaultSampleDuration, DefaultRepeat);

            return(0);
        }
Esempio n. 20
0
        protected override void OnStartup(StartupEventArgs e)
        {
            GUID = GetGUID();

            _mtx = new Mutex(true, GUID, out var mtxSuccess);

            // 뮤텍스를 얻지 못하면 에러
            if (!mtxSuccess)
            {
                MessageBox.Show("이미 실행중입니다.");
                Shutdown();
                return;
            }

            try
            {
                var authority = AkkaHelper.ReadConfigurationFromHoconFile(Assembly.GetExecutingAssembly(), "conf")
                                .WithFallback(ConfigurationFactory
                                              .FromResource <ConsumerSettings <object, object> >("Akka.Streams.Kafka.reference.conf"))
                                .GetInt("ui.notification.authority-level");

                if (authority < 1 || authority > 5)
                {
                    MessageBox.Show("authority-level은 1~5까지 지정할 수 있습니다.", "Error");
                    Shutdown();
                    return;
                }

                var assembly        = Assembly.GetExecutingAssembly();
                var fileVersionInfo = FileVersionInfo.GetVersionInfo(assembly.Location);
                _version = fileVersionInfo.ProductVersion;

                var config = AkkaHelper.ReadConfigurationFromHoconFile(Assembly.GetExecutingAssembly(), "conf")
                             .WithFallback(ConfigurationFactory.FromResource <ConsumerSettings <object, object> >("Akka.Streams.Kafka.reference.conf"));


                CreateTrayIcon(config);
                CreateNotifier(config);

                var system = ActorSystem.Create("BLUECATS-ToastNotifier", config);

                notificationActor = system.ActorOf(NotificationActor.Props(Notifier), nameof(NotificationActor));
                var parserActor         = system.ActorOf(ParserActor.Props(notificationActor), nameof(ParserActor));
                var eventSubscribeActor = system.ActorOf(EventSubscribeActor.Props(notificationActor), nameof(EventSubscribeActor));
                system.EventStream.Subscribe(eventSubscribeActor, typeof(Akka.Event.Error));

                var bootStrapServers = GetBootStrapServers(config);

                var consumerSettings = ConsumerSettings <Null, string> .Create(system, null, Deserializers.Utf8)
                                       .WithBootstrapServers(bootStrapServers)
                                       .WithGroupId(GUID);

                notificationActor.Tell((NotificationLevel.Info, $"BLUE CATS: Client Start\n{GUID}"));

                RestartSource.WithBackoff(() =>
                                          KafkaConsumer.PlainSource(consumerSettings, GetSubscription(config)),
                                          minBackoff: TimeSpan.FromSeconds(3),
                                          maxBackoff: TimeSpan.FromSeconds(30),
                                          randomFactor: 0.2)
                .RunForeach(result =>
                {
                    parserActor.Tell(result);
                }, system.Materializer());
            }
            catch (Exception ex)
            {
                MessageBox.Show(ex.ToString());
                Current.Shutdown();
            }

            base.OnStartup(e);
        }