Ejemplo n.º 1
0
        public async Task PlainPartitionedSource_should_be_signalled_about_serialization_errors()
        {
            var topic = CreateTopic(1);
            var group = CreateGroup(1);

            await ProduceStrings(topic, new int[] { 0 }, ProducerSettings); // Produce "0" string

            var settings = CreateConsumerSettings <int>(group).WithValueDeserializer(Deserializers.Int32);

            var(control1, partitionedProbe) = KafkaConsumer.PlainPartitionedSource(settings, Subscriptions.Topics(topic))
                                              .WithAttributes(ActorAttributes.CreateSupervisionStrategy(Deciders.StoppingDecider))
                                              .ToMaterialized(this.SinkProbe <(TopicPartition, Source <ConsumeResult <Null, int>, NotUsed>)>(), Keep.Both)
                                              .Run(Materializer);

            partitionedProbe.Request(3);

            var subsources = partitionedProbe.Within(TimeSpan.FromSeconds(10), () => partitionedProbe.ExpectNextN(3).Select(t => t.Item2).ToList());
            var substream  = subsources.Aggregate((s1, s2) => s1.Merge(s2)).RunWith(this.SinkProbe <ConsumeResult <Null, int> >(), Materializer);

            substream.Request(1);

            await ProduceStrings(topic, new int[] { 0 }, ProducerSettings); // Produce "0" string

            Within(TimeSpan.FromSeconds(10), () => substream.ExpectError().Should().BeOfType <SerializationException>());

            var shutdown = control1.Shutdown();

            AwaitCondition(() => shutdown.IsCompleted, TimeSpan.FromSeconds(10));
        }
Ejemplo n.º 2
0
        public async Task PlainPartitionedSource_Should_split_messages_by_partitions()
        {
            var topic         = CreateTopic(1);
            var group         = CreateGroup(1);
            var totalMessages = 100;

            var consumerSettings = CreateConsumerSettings <string>(group);

            var control = KafkaConsumer.PlainPartitionedSource(consumerSettings, Subscriptions.Topics(topic))
                          .SelectAsync(6, async tuple =>
            {
                var(topicPartition, source) = tuple;
                Log.Info($"Sub-source for {topicPartition}");
                var consumedPartitions = await source
                                         .Select(m => m.TopicPartition.Partition)
                                         .RunWith(Sink.Seq <Partition>(), Materializer);

                // Return flag that all messages in child source are from the same, expected partition
                return(consumedPartitions.All(partition => partition == topicPartition.Partition));
            })
                          .As <Source <bool, IControl> >()
                          .ToMaterialized(Sink.Aggregate <bool, bool>(true, (result, childSourceIsValid) => result && childSourceIsValid), Keep.Both)
                          .MapMaterializedValue(tuple => DrainingControl <bool> .Create(tuple.Item1, tuple.Item2))
                          .Run(Materializer);

            await ProduceStrings(topic, Enumerable.Range(1, totalMessages), ProducerSettings);

            // Give it some time to consume all messages
            await Task.Delay(5000);

            var shutdown = control.DrainAndShutdown();

            AwaitCondition(() => shutdown.IsCompleted, TimeSpan.FromSeconds(10));
            shutdown.Result.Should().BeTrue();
        }
Ejemplo n.º 3
0
        static void StartConsume()
        {
            KafkaConsumer consumer = new KafkaConsumer();

            try
            {
                Console.WriteLine("Started consuming, press CTRL + C to break.");
                CancellationTokenSource cancelSource = new CancellationTokenSource();
                Console.CancelKeyPress += (_, e) =>
                {
                    e.Cancel = true;                     // prevent the process from terminating.
                    cancelSource.Cancel();
                };
                Guid groupId = Guid.NewGuid();
                consumer.ConsumeAsync(KafkaBootstrapper, TopicName, groupId.ToString(), cancelSource, (result) =>
                {
                    Console.WriteLine(result.Topic);
                    Console.WriteLine(result.Broker);
                    Console.WriteLine(result.Partition);
                    Console.WriteLine(result.Message);
                });
                Console.WriteLine("Done!");

                Console.ReadLine();
            }
            catch (Exception ex)
            {
                Console.WriteLine(ex.Message);
            }
        }
Ejemplo n.º 4
0
        public async Task ProduceToMultiplePartitions(int numberOfPartitions, int numberOfKeys, int numberOfMessages)
        {
            var keySerializer      = new Int32Serializer();
            var valueSerializer    = new StringSerializer();
            var messagePartitioner = new Int32Partitioner();

            using (var temporaryTopic = testCluster.CreateTemporaryTopic(partitions: 2))
                using (var brokers = new KafkaBrokers(testCluster.CreateBrokerUris()))
                {
                    var topic = temporaryTopic.Name;
                    {
                        var producer = KafkaProducer.Create(brokers, keySerializer, valueSerializer, messagePartitioner);
                        var messages =
                            Enumerable
                            .Range(0, numberOfMessages)
                            .Select(i => KeyedMessage.Create(topic, i % numberOfKeys, i % numberOfPartitions, "Message " + i));
                        await producer.SendAsync(messages, CancellationToken.None);
                    }

                    {
                        var selectors =
                            Enumerable
                            .Range(0, numberOfPartitions)
                            .Select(partition => new TopicSelector {
                            Partition = partition, Topic = topic
                        })
                            .ToArray();
                        var consumer = KafkaConsumer.Create(defaultConsumerGroup, brokers, keySerializer, valueSerializer, selectors);

                        var responses = await consumer.ReceiveAsync(CancellationToken.None);

                        Assert.That(responses, Has.Count.EqualTo(numberOfMessages));
                        var received = new bool[numberOfMessages];
                        var offsets  = new long[numberOfPartitions];
                        foreach (var response in responses)
                        {
                            var split = response.Value.Split(' ');
                            Assert.That(split, Has.Length.EqualTo(2));
                            Assert.That(split[0], Is.EqualTo("Message"));
                            int messageNumber;
                            var parsed = Int32.TryParse(split[1], out messageNumber);
                            Assert.That(parsed, Is.True);
                            Assert.That(messageNumber, Is.InRange(0, numberOfMessages - 1));
                            var key = messageNumber % numberOfKeys;
                            Assert.That(response.Key, Is.EqualTo(key));

                            var partition = messageNumber % numberOfPartitions;
                            Assert.That(response.Partition, Is.EqualTo(partition));

                            Assert.That(received[messageNumber], Is.False);
                            received[messageNumber] = true;

                            Assert.That(response.Offset, Is.EqualTo(offsets[response.Partition]));
                            offsets[response.Partition] += 1;

                            Assert.That(response.Topic, Is.EqualTo(topic));
                        }
                    }
                }
        }
Ejemplo n.º 5
0
        public async Task PlainSource_should_stop_on_errors()
        {
            int elementsCount = 10;
            var topic1        = CreateTopic(1);
            var group1        = CreateGroup(1);

            await ProduceStrings(new TopicPartition(topic1, 0), Enumerable.Range(1, elementsCount), ProducerSettings);

            var settings = CreateConsumerSettings <int>(group1).WithValueDeserializer(Deserializers.Int32);

            var probe = KafkaConsumer
                        .PlainSource(settings, Subscriptions.Assignment(new TopicPartition(topic1, 0)))
                        .WithAttributes(ActorAttributes.CreateSupervisionStrategy(Deciders.StoppingDecider))
                        .Select(c => c.Value)
                        .RunWith(this.SinkProbe <int>(), Materializer);

            var error = probe.Request(elementsCount).ExpectEvent(TimeSpan.FromSeconds(5));

            error.Should().BeOfType <TestSubscriber.OnError>();
            var exception = ((TestSubscriber.OnError)error).Cause;

            exception.Should().BeOfType <ConsumeException>();
            ((ConsumeException)exception).Error.IsSerializationError().Should().BeTrue();

            probe.ExpectNoMsg(TimeSpan.FromSeconds(5));
            probe.Cancel();
        }
Ejemplo n.º 6
0
        public async Task PlainPartitionedManualOffsetSource_Should_begin_consuming_with_offset()
        {
            var topic            = CreateTopic(1);
            var group            = CreateGroup(1);
            var consumerSettings = CreateConsumerSettings <string>(group);

            await ProduceStrings(topic, Enumerable.Range(1, 100), ProducerSettings);

            var probe = KafkaConsumer.PlainPartitionedManualOffsetSource(
                consumerSettings,
                Subscriptions.Topics(topic),
                getOffsetsOnAssign: topicPartitions =>
            {
                // Skip first message from first partition
                var firstPartition = topicPartitions.OrderBy(tp => tp.Partition.Value).First();
                var offset         = ImmutableHashSet <TopicPartitionOffset> .Empty.Add(new TopicPartitionOffset(firstPartition, 1));
                return(Task.FromResult <IImmutableSet <TopicPartitionOffset> >(offset));
            },
                onRevoke: _ => { }
                ).MergeMany(3, tuple => tuple.Item2.MapMaterializedValue(notUsed => new NoopControl()))
                        .Select(m => m.Value)
                        .RunWith(this.SinkProbe <string>(), Materializer);

            probe.Request(99);
            var messages = probe.Within(TimeSpan.FromSeconds(10), () => probe.ExpectNextN(99));

            messages.ToHashSet().Count.Should().Be(99); // All consumed messages should be different (only one value is missing)

            probe.Cancel();
        }
Ejemplo n.º 7
0
        public void KafkaConsumerTest_CanCreateConsumer()
        {
            var consumerMock    = new Mock <IConsumer <string, string> >();
            var objectUnderTest = new KafkaConsumer(_loggerMock.Object, consumerMock.Object);

            Assert.NotNull(objectUnderTest);
        }
Ejemplo n.º 8
0
        public async Task TestSimpleConsumerWorksOk()
        {
            var keySerializer      = new NullSerializer <object>();
            var valueSerializer    = new StringSerializer();
            var messagePartitioner = new LoadBalancedPartitioner <object>();

            using (var temporaryTopic = testCluster.CreateTemporaryTopic())
                using (var brokers = new KafkaBrokers(testCluster.CreateBrokerUris()))
                {
                    var topic    = temporaryTopic.Name;
                    var producer = KafkaProducer.Create(brokers, keySerializer, valueSerializer, messagePartitioner);
                    var consumer = KafkaConsumer.Create(defaultConsumerGroup, brokers, keySerializer, valueSerializer,
                                                        new TopicSelector {
                        Partition = 0, Topic = topic
                    });

                    await producer.SendAsync(KeyedMessage.Create(topic, "Message"), CancellationToken.None);

                    var responses = await consumer.ReceiveAsync(CancellationToken.None);

                    Assert.That(responses, Is.Not.Null);
                    Assert.That(responses, Has.Count.EqualTo(1));

                    var first = responses.First();
                    Assert.That(first.Key, Is.Null);
                    Assert.That(first.Offset, Is.EqualTo(0));
                    Assert.That(first.Partition, Is.EqualTo(0));
                    Assert.That(first.Topic, Is.EqualTo(topic));
                    Assert.That(first.Value, Is.EqualTo("Message"));
                }
        }
Ejemplo n.º 9
0
        private static void OnLog(
            LogMessage logMessage,
            KafkaConsumer consumer,
            ISilverbackLogger logger)
        {
            switch (logMessage.Level)
            {
            case SyslogLevel.Emergency:
            case SyslogLevel.Alert:
            case SyslogLevel.Critical:
                logger.LogConfluentConsumerLogCritical(logMessage, consumer);
                break;

            case SyslogLevel.Error:
                logger.LogConfluentConsumerLogError(logMessage, consumer);
                break;

            case SyslogLevel.Warning:
                logger.LogConfluentConsumerLogWarning(logMessage, consumer);
                break;

            case SyslogLevel.Notice:
            case SyslogLevel.Info:
                logger.LogConfluentConsumerLogInformation(logMessage, consumer);
                break;

            default:
                logger.LogConfluentConsumerLogDebug(logMessage, consumer);
                break;
            }
        }
Ejemplo n.º 10
0
        public void KafkaConsumerTest_ProcessMessages_Many()
        {
            var consumerMock = new Mock <IConsumer <string, string> >();

            consumerMock.Setup(p => p.Subscribe(It.IsAny <string>()));
            var result1 = new ConsumeResult <string, string>()
            {
                Message = new Message <string, string> {
                    Key = "key1", Value = "value1"
                }
                ,
                TopicPartitionOffset = new TopicPartitionOffset(new TopicPartition("t", new Partition()), new Offset(0))
            };
            var result2 = new ConsumeResult <string, string>()
            {
                Message = new Message <string, string> {
                    Key = "key2", Value = "value2"
                }
                ,
                TopicPartitionOffset = new TopicPartitionOffset(new TopicPartition("t", new Partition()), new Offset(1))
            };

            consumerMock.Setup(p => p.Consume(It.IsAny <TimeSpan>())).Returns(result1);
            consumerMock.Setup(p => p.Consume(It.IsAny <TimeSpan>())).Returns(result2);

            var objectUnderTest = new KafkaConsumer(_loggerMock.Object, consumerMock.Object);
            var ctn             = new CancellationTokenSource(TimeSpan.FromMilliseconds(17));

            objectUnderTest.ProcessMessages(ctn.Token, It.IsAny <string>(), It.IsAny <IMessageHandler>());

            consumerMock.Verify(x => x.Subscribe(It.IsAny <string>()), Times.Once());
            //consumerMock.Verify(x => x.Consume(It.IsAny<TimeSpan>()),Times.Exactly(2));
        }
Ejemplo n.º 11
0
        private static void OnOffsetsCommitted(
            CommittedOffsets offsets,
            KafkaConsumer consumer,
            IBrokerCallbacksInvoker callbacksInvoker,
            ISilverbackLogger logger)
        {
            foreach (var topicPartitionOffsetError in offsets.Offsets)
            {
                if (topicPartitionOffsetError.Offset == Offset.Unset)
                {
                    continue;
                }

                if (topicPartitionOffsetError.Error != null &&
                    topicPartitionOffsetError.Error.Code != ErrorCode.NoError)
                {
                    logger.LogOffsetCommitError(topicPartitionOffsetError, consumer);
                }
                else
                {
                    logger.LogOffsetCommitted(topicPartitionOffsetError.TopicPartitionOffset, consumer);
                }
            }

            callbacksInvoker.Invoke <IKafkaOffsetCommittedCallback>(
                handler => handler.OnOffsetsCommitted(offsets, consumer));
        }
Ejemplo n.º 12
0
        public async Task CommitWithMetadataSource_Commit_metadata_in_message_Should_work()
        {
            var topic = CreateTopic(1);
            var group = CreateGroup(1);

            string MetadataFromMessage <K, V>(ConsumeResult <K, V> message) => message.Offset.ToString();

            await ProduceStrings(topic, Enumerable.Range(1, 10), ProducerSettings);

            var(control, probe) = KafkaConsumer.CommitWithMetadataSource(CreateConsumerSettings <string>(group), Subscriptions.Topics(topic), MetadataFromMessage)
                                  .ToMaterialized(this.SinkProbe <CommittableMessage <Null, string> >(), Keep.Both)
                                  .Run(Materializer);

            probe.Request(10);

            probe.Within(TimeSpan.FromSeconds(10), () => probe.ExpectNextN(10)).ForEach(message =>
            {
                var offsetWithMeta = message.CommitableOffset as ICommittableOffsetMetadata;
                offsetWithMeta.Should().NotBeNull();
                offsetWithMeta.Metadata.Should().Be(message.CommitableOffset.Offset.Offset.ToString());
            });

            probe.Cancel();

            AwaitCondition(() => control.IsShutdown.IsCompletedSuccessfully, TimeSpan.FromSeconds(10));
        }
Ejemplo n.º 13
0
        public async Task CanConsumeEvent_Lots()
        {
            SetLogLevelTo(LogEventLevel.Information);

            var receivedEvents = new ConcurrentQueue <string>();

            const string groupName = "default6";

            var consumer1 = new KafkaConsumer(
                address: "localhost:9092",
                topics: new[] { "lots" },
                group: groupName,
                eventHandler: async evt => receivedEvents.Enqueue(evt.Body)
                );

            Using(consumer1);

            var consumer2 = new KafkaConsumer(
                address: "localhost:9092",
                topics: new[] { "lots" },
                group: groupName,
                eventHandler: async evt => receivedEvents.Enqueue(evt.Body)
                );

            Using(consumer2);

            consumer1.Start();
            consumer2.Start();

            await receivedEvents.WaitOrDie(q => q.Count == 11000, timeoutSeconds : 70);

            await Task.Delay(TimeSpan.FromSeconds(5));
        }
Ejemplo n.º 14
0
        public async Task PlainSource_should_resume_on_deserialization_errors()
        {
            Directive Decider(Exception cause) => cause is SerializationException
                ? Directive.Resume
                : Directive.Stop;

            int elementsCount = 10;
            var topic1        = CreateTopic(1);
            var group1        = CreateGroup(1);

            await Produce(topic1, Enumerable.Range(1, elementsCount), ProducerSettings);

            var settings = ConsumerSettings <Null, int> .Create(Sys, null, new IntDeserializer())
                           .WithBootstrapServers(KafkaUrl)
                           .WithProperty("auto.offset.reset", "earliest")
                           .WithGroupId(group1);

            var probe = KafkaConsumer
                        .PlainSource(settings, Subscriptions.Assignment(new TopicPartition(topic1, 0)))
                        .WithAttributes(ActorAttributes.CreateSupervisionStrategy(Decider))
                        .Select(c => c.Value)
                        .RunWith(this.SinkProbe <int>(), _materializer);

            probe.Request(elementsCount);
            probe.ExpectNoMsg(TimeSpan.FromSeconds(10));
            probe.Cancel();
        }
Ejemplo n.º 15
0
        public void KafkaConsumerTest_When_Consumer_ConsumeException()
        {
            var mockLogger = new Mock <ILogger <KafkaConsumer> >();

            mockLogger.Setup(x => x.Log(It.IsAny <LogLevel>(), It.IsAny <EventId>(), It.IsAny <FormattedLogValues>(), It.IsAny <Exception>(), It.IsAny <Func <object, Exception, string> >()));
            var consumerMock = new Mock <IConsumer <string, string> >();

            consumerMock.Setup(p => p.Subscribe(It.IsAny <string>()));
            var messageHandlerMock = new Mock <IMessageHandler>();
            var result1            = new ConsumeResult <byte[], byte[]>()
            {
                Message = new Message <byte[], byte[]>()
            };

            consumerMock.Setup(p => p.Consume(It.IsAny <TimeSpan>())).Throws(new ConsumeException(result1, new Error(ErrorCode.BrokerNotAvailable, "failure")));

            var objectUnderTest = new KafkaConsumer(mockLogger.Object, consumerMock.Object);
            var ctn             = new CancellationTokenSource(TimeSpan.FromMilliseconds(10));

            objectUnderTest.ProcessMessages(ctn.Token, It.IsAny <string>(), messageHandlerMock.Object);
            consumerMock.Verify(x => x.Subscribe(It.IsAny <string>()), Times.Once());
            consumerMock.Verify(x => x.Consume(It.IsAny <TimeSpan>()), Times.Once());
            mockLogger.Verify(x => x.Log(LogLevel.Error, It.IsAny <EventId>(), It.IsAny <FormattedLogValues>(), It.IsAny <Exception>(), It.IsAny <Func <object, Exception, string> >()), Times.Once());
            messageHandlerMock.Verify(x => x.HandleMessage(It.IsAny <string>(), It.IsAny <string>()), Times.Never());
        }
        public async Task Transactional_source_with_sink_Should_work()
        {
            var       settings        = CreateConsumerSettings <string>(CreateGroup(1));
            var       sourceTopic     = CreateTopic(1);
            var       targetTopic     = CreateTopic(2);
            var       transactionalId = Guid.NewGuid().ToString();
            const int totalMessages   = 10;

            var control = KafkaConsumer.TransactionalSource(settings, Subscriptions.Topics(sourceTopic))
                          .Via(Business <TransactionalMessage <Null, string> >())
                          .Select(message =>
            {
                return(ProducerMessage.Single(
                           new ProducerRecord <Null, string>(targetTopic, message.Record.Key, message.Record.Value),
                           passThrough: message.PartitionOffset));
            })
                          .ToMaterialized(KafkaProducer.TransactionalSink(ProducerSettings, transactionalId), Keep.Both)
                          .MapMaterializedValue(DrainingControl <NotUsed> .Create)
                          .Run(Materializer);

            var consumer = ConsumeStrings(targetTopic, totalMessages);

            await ProduceStrings(sourceTopic, Enumerable.Range(1, totalMessages), ProducerSettings);

            AssertTaskCompletesWithin(TimeSpan.FromSeconds(totalMessages), consumer.IsShutdown);
            AssertTaskCompletesWithin(TimeSpan.FromSeconds(totalMessages), control.DrainAndShutdown());

            consumer.DrainAndShutdown().Result.Should().HaveCount(totalMessages);
        }
Ejemplo n.º 17
0
        public void KafkaConsumerTest_ProcessMessages_Success()
        {
            var consumerMock       = new Mock <IConsumer <string, string> >();
            var ctn                = new CancellationTokenSource();
            var messageHandlerMock = new Mock <IMessageHandler>();
            var objectUnderTest    = new KafkaConsumer(_loggerMock.Object, consumerMock.Object);
            var consumeRes         = new ConsumeResult <string, string>()
            {
                Message = new Message <string, string> {
                    Key = "key1", Value = "value1"
                },
                TopicPartitionOffset = new TopicPartitionOffset(new TopicPartition("t", new Partition()), new Offset(0))
            };

            consumerMock.Setup(x => x.Consume(TimeSpan.FromMilliseconds(100))).Returns(consumeRes).Callback(() => ctn.Cancel());
            consumerMock.Setup(x => x.Subscribe(It.IsAny <string>()));
            //ACTION
            objectUnderTest.ProcessMessages(ctn.Token, "TestTopic", messageHandlerMock.Object);

            //ASSERT
            //1. Verify consumer.subscribe has been called
            //2. verify on success next method is called
            consumerMock.Verify(x => x.Subscribe(It.IsAny <string>()), Times.Once);
            consumerMock.Verify(x => x.Consume(TimeSpan.FromMilliseconds(100)), Times.AtLeastOnce);
            messageHandlerMock.Verify(x => x.HandleMessage(It.IsAny <string>(), It.IsAny <string>()), Times.Once);
        }
Ejemplo n.º 18
0
        public async Task Overriden_default_decider_on_PlainSource_should_work()
        {
            int elementsCount = 10;
            var topic1        = CreateTopic(1);
            var group1        = CreateGroup(1);

            var sourceTask = ProduceStrings(new TopicPartition(topic1, 0), Enumerable.Range(1, elementsCount), ProducerSettings);

            await GuardWithTimeoutAsync(sourceTask, TimeSpan.FromSeconds(3));

            var settings = CreateConsumerSettings <int>(group1).WithValueDeserializer(Deserializers.Int32);
            var decider  = new OverridenConsumerDecider(settings.AutoCreateTopicsEnabled);

            var probe = KafkaConsumer
                        .PlainSource(settings, Subscriptions.Assignment(new TopicPartition(topic1, 0)))
                        .WithAttributes(ActorAttributes.CreateSupervisionStrategy(decider.Decide))
                        .Select(c => c.Value)
                        .RunWith(this.SinkProbe <int>(), Materializer);

            probe.Request(elementsCount);
            probe.ExpectNoMsg(TimeSpan.FromSeconds(10));
            // this is twice elementCount because Decider is called twice on each exceptions
            decider.CallCount.Should().Be(elementsCount * 2);
            probe.Cancel();
        }
Ejemplo n.º 19
0
        private static async Task Main()
        {
            //todo temp - for manual tests
            // var message = new KafkaMessage
            // {
            //     UtcStartDateTime = DateTime.Now.AddMinutes(-1).ToUniversalTime(),
            //     FilePath = @"<some path>"
            // };

            Log.Logger =
                new LoggerConfiguration()
                .MinimumLevel.Information()
                .Enrich.With(new UtcTimestampEnricher())
                .WriteTo.ColoredConsole(
                    outputTemplate:
                    "{UtcTimestamp:yyyy-MM-dd HH:mm:ss.fff} [{Level}] {CallerMemberName} {Message:lj}{NewLine}{Exception}",
                    formatProvider: CultureInfo.InvariantCulture)
                .CreateLogger();

            var cfg = JsonConvert.DeserializeObject <MainConfig>(await File.ReadAllTextAsync("config.json"));

            cfg.ValidateConfig();

            await KafkaConsumer.ConsumeMessage(cfg, new CancellationToken(), Log.Logger);

            //// <comment upper line and uncomment this to use without Kafka in testing>
            //await SortWorker.ProcessSorting(cfg, message, Log.Logger);
            //Console.ReadKey();
        }
        public async Task AtMostOnceSource_Should_work()
        {
            var topic         = CreateTopic(1);
            var settings      = CreateConsumerSettings <string>(CreateGroup(1));
            var totalMessages = 10;
            var lastMessage   = new TaskCompletionSource <Done>();

            await ProduceStrings(topic, Enumerable.Range(1, 10), ProducerSettings);

            var(task, probe) = KafkaConsumer.AtMostOnceSource(settings, Subscriptions.Topics(topic))
                               .SelectAsync(1, m =>
            {
                if (m.Value == totalMessages.ToString())
                {
                    lastMessage.SetResult(Done.Instance);
                }

                return(Task.FromResult(Done.Instance));
            })
                               .ToMaterialized(this.SinkProbe <Done>(), Keep.Both)
                               .Run(Materializer);

            probe.Request(10);

            await lastMessage.Task;

            probe.Cancel();

            probe.ExpectNextN(10);
        }
Ejemplo n.º 21
0
        public async Task IntegrationTest()
        {
            int counter = 0;

            //消费
            var consumer = new KafkaConsumer($"integration.{group}", hosts)
            {
                EnableAutoCommit = false
            };
            await consumer.ListenAsync(new string[] { $"integration.{topic}" }, result =>
            {
                Output.WriteLine($"integration.{topic}({result.Key}):" + result.Message);
                counter++;
                result.Commit();
            });

            //发布
            var producer = new KafkaProducer(hosts)
            {
                DefaultTopic = $"integration.{topic}",
                DefaultKey   = $"integration.key"
            };
            await producer.PublishAsync("hello kafka");

            BlockUntil(() => counter >= 1, 3000);

            producer.Dispose();
            consumer.Dispose();

            Assert.Equal(1, counter);
        }
        public async Task CommitableSource_consumes_messages_from_Producer_without_commits()
        {
            int elementsCount   = 100;
            var topic1          = CreateTopic(1);
            var group1          = CreateGroup(1);
            var topicPartition1 = new TopicPartition(topic1, 0);

            await GivenInitializedTopic(topicPartition1);

            await Source
            .From(Enumerable.Range(1, elementsCount))
            .Select(elem => new ProducerRecord <Null, string>(topicPartition1, elem.ToString()))
            .RunWith(KafkaProducer.PlainSink(ProducerSettings), Materializer);

            var consumerSettings = CreateConsumerSettings <string>(group1);

            var probe = KafkaConsumer
                        .CommittableSource(consumerSettings, Subscriptions.Assignment(topicPartition1))
                        .Where(c => !c.Record.Value.Equals(InitialMsg))
                        .Select(c => c.Record.Value)
                        .RunWith(this.SinkProbe <string>(), Materializer);

            probe.Request(elementsCount);
            foreach (var i in Enumerable.Range(1, elementsCount).Select(c => c.ToString()))
            {
                probe.ExpectNext(i, TimeSpan.FromSeconds(10));
            }

            probe.Cancel();
        }
Ejemplo n.º 23
0
        public async Task VerifyABrokerStoppingAndRestartingCanBeHandledByTheConsumer()
        {
            using (var cluster = new KafkaTestCluster("server.home", 1))
            {
                var topic = "test";
                cluster.CreateTopic(topic);
                using (var brokers = new KafkaBrokers(cluster.CreateBrokerUris()))
                {
                    var producer = KafkaProducer.Create(brokers, new StringSerializer());
                    await producer.SendAsync(KeyedMessage.Create(topic, "Test"), CancellationToken.None);

                    await Task.Delay(1000);

                    cluster.StopKafkaBroker(0);
                    cluster.RestartKafkaBroker(0);

                    var consumer = KafkaConsumer.Create(topic, brokers, new StringSerializer(),
                                                        new TopicSelector {
                        Topic = topic, Partition = 0, Offset = 0
                    });
                    var result = await consumer.ReceiveAsync(CancellationToken.None);

                    Assert.That(result, Has.Count.EqualTo(1));
                    var first = result[0];

                    Assert.That(first.Value, Is.EqualTo("Test"));
                    Assert.That(first.Offset, Is.EqualTo(0));
                }
                cluster.DeleteTopic(topic);
            }
        }
Ejemplo n.º 24
0
        public async Task StartAsync(CancellationToken stopCancellationToken = default)
        {
            foreach (var consumerConfiguration in this.configuration.Clusters.SelectMany(cl => cl.Consumers))
            {
                var dependencyScope = this.dependencyResolver.CreateScope();

                var middlewares = consumerConfiguration.MiddlewareConfiguration.Factories
                                  .Select(factory => factory(dependencyScope.Resolver))
                                  .ToList();

                var consumerWorkerPool = new ConsumerWorkerPool(
                    dependencyScope.Resolver,
                    this.logHandler,
                    new MiddlewareExecutor(middlewares),
                    consumerConfiguration.DistributionStrategyFactory);

                var consumer = new KafkaConsumer(
                    consumerConfiguration,
                    this.consumerManager,
                    this.logHandler,
                    consumerWorkerPool,
                    stopCancellationToken);

                this.consumers.Add(consumer);

                await consumer.StartAsync().ConfigureAwait(false);
            }
        }
Ejemplo n.º 25
0
        public async Task PlainSource_with_directive_override_should_resume_on_deserialization_errors()
        {
            var callCount = 0;

            Directive Decider(Exception cause)
            {
                if (cause is ConsumeException ex && ex.Error.IsSerializationError())
                {
                    callCount++;
                    return(Directive.Resume);
                }
                return(Directive.Stop);
            }

            int elementsCount = 10;
            var topic1        = CreateTopic(1);
            var group1        = CreateGroup(1);

            await ProduceStrings(new TopicPartition(topic1, 0), Enumerable.Range(1, elementsCount), ProducerSettings);

            var settings = CreateConsumerSettings <int>(group1).WithValueDeserializer(Deserializers.Int32);

            var probe = KafkaConsumer
                        .PlainSource(settings, Subscriptions.Assignment(new TopicPartition(topic1, 0)))
                        .WithAttributes(ActorAttributes.CreateSupervisionStrategy(Decider))
                        .Select(c => c.Value)
                        .RunWith(this.SinkProbe <int>(), Materializer);

            probe.Request(elementsCount);
            probe.ExpectNoMsg(TimeSpan.FromSeconds(10));
            // this is twice elementCount because Decider is called twice on each exceptions
            callCount.Should().Be(elementsCount * 2);
            probe.Cancel();
        }
Ejemplo n.º 26
0
        public async Task PlainPartitionedSource_should_stop_partition_sources_when_stopped()
        {
            var topic         = CreateTopic(1);
            var group         = CreateGroup(1);
            var totalMessages = 100;

            await ProduceStrings(topic, Enumerable.Range(1, totalMessages), ProducerSettings);

            var consumerSettings = CreateConsumerSettings <string>(group).WithStopTimeout(TimeSpan.FromMilliseconds(10));

            var(control, probe) = KafkaConsumer.PlainPartitionedSource(consumerSettings, Subscriptions.Topics(topic))
                                  .MergeMany(3, tuple => tuple.Item2.MapMaterializedValue(notUsed => new NoopControl()))
                                  .Select(message =>
            {
                Log.Debug($"Consumed partition {message.Partition.Value}");
                return(message.Value);
            })
                                  .ToMaterialized(this.SinkProbe <string>(), Keep.Both)
                                  .Run(Materializer);

            probe.Request(totalMessages).Within(TimeSpan.FromSeconds(10), () => probe.ExpectNextN(totalMessages));

            var stopped = control.Stop();

            probe.ExpectComplete();

            AwaitCondition(() => stopped.IsCompleted, TimeSpan.FromSeconds(10));

            await control.Shutdown();

            probe.Cancel();
        }
Ejemplo n.º 27
0
        private static void OnError(Error error, KafkaConsumer consumer, ISilverbackIntegrationLogger logger)
        {
            // Ignore errors if not consuming anymore
            // (lidrdkafka randomly throws some "brokers are down" while disconnecting)
            if (!consumer.IsConnected)
            {
                return;
            }

            try
            {
                if (consumer.Endpoint.Events.ErrorHandler != null &&
                    consumer.Endpoint.Events.ErrorHandler.Invoke(error, consumer))
                {
                    return;
                }
            }
            catch (Exception ex)
            {
                logger.LogError(
                    KafkaEventIds.KafkaErrorHandlerError,
                    ex,
                    "Error in Kafka consumer error handler.");
            }

            logger.Log(
                error.IsFatal ? LogLevel.Critical : LogLevel.Error,
                KafkaEventIds.ConsumerError,
                "Error in Kafka consumer ({consumerId}): {error} (topic(s): {topics})",
                consumer.Id,
                error,
                consumer.Endpoint.Names);
        }
Ejemplo n.º 28
0
        public async Task PlainPartitionedSource_should_not_leave_gaps_when_subsource_is_cancelled()
        {
            var topic         = CreateTopic(1);
            var group         = CreateGroup(1);
            var totalMessages = 100;

            await ProduceStrings(topic, Enumerable.Range(1, totalMessages), ProducerSettings);

            var consumedMessagesTask = KafkaConsumer.PlainPartitionedSource(CreateConsumerSettings <string>(group), Subscriptions.Topics(topic))
                                       .Log(topic, m => $"Consuming topic partition {m.Item1}")
                                       .MergeMany(3, tuple =>
            {
                var(topicPartition, source) = tuple;
                return(source
                       .MapMaterializedValue(notUsed => new NoopControl())
                       .Log(topicPartition.ToString(), m => $"Consumed offset {m.Offset} (value: {m.Value})")
                       .Take(10));
            })
                                       .Select(m => int.Parse(m.Value))
                                       .Log("Merged stream", m => m)
                                       .Scan(0, (c, _) => c + 1)
                                       .TakeWhile(m => m < totalMessages, inclusive: true)
                                       .RunWith(Sink.Last <int>(), Materializer);

            AwaitCondition(() => consumedMessagesTask.IsCompleted, TimeSpan.FromSeconds(10));

            consumedMessagesTask.Result.Should().Be(totalMessages);
        }
Ejemplo n.º 29
0
        public async Task SubscribeAsync <T>(Func <T, SubscribeContext, Task> handler, SubscribeOptions subscribeOptions = null, CancellationToken cancellationToken = default(CancellationToken)) where T : class
        {
            string topic = GetTopic(typeof(T));

            var groupId = subscribeOptions?.GroupId;

            groupId = !string.IsNullOrEmpty(groupId) ? groupId : _kafkaOptions.ConsumerConfig.GroupId;

            var threadCount = subscribeOptions?.ConsumerThreadCount ?? 0;

            threadCount = threadCount > 0 ? threadCount : _kafkaOptions.DefaultConsumerThreadCount;
            AssertUtils.IsTrue(threadCount > 0, "消费者线程数必须大于0");

            ValidateSubscribe(topic, groupId);

            _logger.LogInformation($"订阅Topic:{topic},GroupId:{groupId},ConsumerThreadCount:{threadCount}");
            for (int i = 0; i < threadCount; i++)
            {
                var consumer = new KafkaConsumer <string, KafkaMessageBusData>(_serviceProvider);
                consumer.OnMessage += consumeResult =>
                {
                    return(With.NoException(_logger, async() =>
                    {
                        var obj = _kafkaOptions.Serializer.Deserialize <T>(consumeResult.Message.Value.Data);
                        var subscribeContext = new SubscribeContext {
                            Topic = topic, GroupId = groupId
                        };
                        await handler(obj, subscribeContext);
                    }, $"消费数据{consumeResult.Message.Value.Topic}"));
                };

                _consumerList.Add(consumer);
                await consumer.Subscribe(topic, groupId, cancellationToken);
            }
        }
Ejemplo n.º 30
0
        public static void Main(string[] args)
        {
            Config fallbackConfig = ConfigurationFactory.ParseString(@"
                    akka.suppress-json-serializer-warning=true
                    akka.loglevel = DEBUG
                ").WithFallback(ConfigurationFactory.FromResource <ConsumerSettings <object, object> >("Akka.Streams.Kafka.reference.conf"));

            var system       = ActorSystem.Create("TestKafka", fallbackConfig);
            var materializer = system.Materializer();

            var consumerSettings = ConsumerSettings <Null, string> .Create(system, null, null)
                                   .WithBootstrapServers("localhost:29092")
                                   .WithGroupId("group1");

            var subscription = Subscriptions.Topics("akka100");

            KafkaConsumer.PlainSource(consumerSettings, subscription)
            .RunForeach(result =>
            {
                Console.WriteLine($"Consumer: {result.Topic}/{result.Partition} {result.Offset}: {result.Value}");
            }, materializer);


            Console.ReadLine();
        }
Ejemplo n.º 31
0
        static void Main(string[] args)
        {
            var host = "192.168.33.12:9092";
            var topic = "test2";
            var count = 50000000;

            var prop = new Properties();
            prop.put("bootstrap.servers", host);
            prop.put("group.id", "test3");
            prop.put("auto.offset.reset", "earliest");
            prop.put("enable.auto.commit", "true");
            prop.put("auto.commit.interval.ms", "1000");
            prop.put("socket.receive.buffer.bytes", (2*1024*1024).ToString());
            prop.put("fetch.message.max.bytes", (1024*1024).ToString());

            var c = new KafkaConsumer(prop, new ByteArrayDeserializer(), new ByteArrayDeserializer());
            
            var topics = new ArrayList(1);
            topics.add(topic);
            var time = DateTime.UtcNow;
            c.subscribe(topics);
            var bytes = 0;
            var i = count;
            var recordCount = 0;
            while (i > 0)
            {
                var r = c.poll(1000);
                var records = r.records(topic);
                for (var it = records.iterator(); it.hasNext() && i > 0; i--, recordCount++)
                {
                    var rec = (ConsumerRecord)it.next();
                    var b = (byte[]) rec.value();
                    bytes += b.Length;
                }
                Console.WriteLine(recordCount);
            }
            var mb = bytes / 1024.0 / 1024.0;
            var seconds = (DateTime.UtcNow - time).TotalSeconds;
            Console.WriteLine($"{mb / seconds} MB/sec");
            Console.WriteLine($"{count / seconds} Msg/sec");
        }