public async Task ExternalPlainSource_should_be_stopped_on_serialization_error_only_when_requested_messages()
        {
            var topic = CreateTopic(1);
            var group = CreateGroup(1);

            // Make consumer expect numeric messages
            var settings = CreateConsumerSettings <int>(group).WithValueDeserializer(Deserializers.Int32);
            var consumer = Sys.ActorOf(KafkaConsumerActorMetadata.GetProps(settings));

            // Subscribe to partitions
            var(control1, probe1) = CreateExternalPlainSourceProbe <int>(consumer, Subscriptions.Assignment(new TopicPartition(topic, 0)));
            var(control2, probe2) = CreateExternalPlainSourceProbe <int>(consumer, Subscriptions.Assignment(new TopicPartition(topic, 1)));
            var(control3, probe3) = CreateExternalPlainSourceProbe <int>(consumer, Subscriptions.Assignment(new TopicPartition(topic, 2)));

            // request from 2 streams
            probe1.Request(1);
            probe2.Request(1);
            await Task.Delay(500); // To establish demand

            // Send string messages
            await ProduceStrings(new TopicPartition(topic, 0), new int[] { 1 }, ProducerSettings);
            await ProduceStrings(new TopicPartition(topic, 1), new int[] { 1 }, ProducerSettings);

            // First two stages should fail, and only stage without demand should keep going
            probe1.ExpectError().Should().BeOfType <SerializationException>();
            probe2.ExpectError().Should().BeOfType <SerializationException>();
            probe3.Cancel();

            // Make sure source tasks finish accordingly
            AwaitCondition(() => control1.IsShutdown.IsCompleted && control2.IsShutdown.IsCompleted && control3.IsShutdown.IsCompletedSuccessfully);

            // Cleanup
            consumer.Tell(KafkaConsumerActorMetadata.Internal.Stop.Instance, ActorRefs.NoSender);
        }
Example #2
0
        public async Task Overriden_default_decider_on_PlainSource_should_work()
        {
            int elementsCount = 10;
            var topic1        = CreateTopic(1);
            var group1        = CreateGroup(1);

            var sourceTask = ProduceStrings(new TopicPartition(topic1, 0), Enumerable.Range(1, elementsCount), ProducerSettings);

            await GuardWithTimeoutAsync(sourceTask, TimeSpan.FromSeconds(3));

            var settings = CreateConsumerSettings <int>(group1).WithValueDeserializer(Deserializers.Int32);
            var decider  = new OverridenConsumerDecider(settings.AutoCreateTopicsEnabled);

            var probe = KafkaConsumer
                        .PlainSource(settings, Subscriptions.Assignment(new TopicPartition(topic1, 0)))
                        .WithAttributes(ActorAttributes.CreateSupervisionStrategy(decider.Decide))
                        .Select(c => c.Value)
                        .RunWith(this.SinkProbe <int>(), Materializer);

            probe.Request(elementsCount);
            probe.ExpectNoMsg(TimeSpan.FromSeconds(10));
            // this is twice elementCount because Decider is called twice on each exceptions
            decider.CallCount.Should().Be(elementsCount * 2);
            probe.Cancel();
        }
        public async Task CommitableSource_consumes_messages_from_Producer_without_commits()
        {
            int elementsCount   = 100;
            var topic1          = CreateTopic(1);
            var group1          = CreateGroup(1);
            var topicPartition1 = new TopicPartition(topic1, 0);

            await GivenInitializedTopic(topicPartition1);

            await Source
            .From(Enumerable.Range(1, elementsCount))
            .Select(elem => new ProducerRecord <Null, string>(topicPartition1, elem.ToString()))
            .RunWith(KafkaProducer.PlainSink(ProducerSettings), Materializer);

            var consumerSettings = CreateConsumerSettings <string>(group1);

            var probe = KafkaConsumer
                        .CommittableSource(consumerSettings, Subscriptions.Assignment(topicPartition1))
                        .Where(c => !c.Record.Value.Equals(InitialMsg))
                        .Select(c => c.Record.Value)
                        .RunWith(this.SinkProbe <string>(), Materializer);

            probe.Request(elementsCount);
            foreach (var i in Enumerable.Range(1, elementsCount).Select(c => c.ToString()))
            {
                probe.ExpectNext(i, TimeSpan.FromSeconds(10));
            }

            probe.Cancel();
        }
Example #4
0
        public async Task PlainSource_should_resume_stage_if_broker_unavailable()
        {
            var topic1          = CreateTopic(1);
            var group1          = CreateGroup(1);
            var topicPartition1 = new TopicPartition(topic1, 0);

            await GivenInitializedTopic(topicPartition1);

            var config = ConsumerSettings <Null, string> .Create(Sys, null, null)
                         .WithBootstrapServers("localhost:10092")
                         .WithGroupId(group1);

            var regex    = new Regex("\\[localhost:10092\\/bootstrap: Connect to [a-zA-Z0-9#:.*]* failed:");
            var logProbe = CreateTestProbe();

            Sys.EventStream.Subscribe <Info>(logProbe.Ref);

            var(control, probe) = CreateProbe(config, Subscriptions.Assignment(topicPartition1));
            probe.Request(1);

            AwaitAssert(() =>
            {
                var info = logProbe.ExpectMsg <Info>();
                regex.IsMatch(info.Message.ToString() ?? "").Should().BeTrue();
                info.Message.ToString().Should().Contain("[Resume]");
            });
            //AwaitCondition(() => control.IsShutdown.IsCompleted, TimeSpan.FromSeconds(10));
        }
Example #5
0
        public async Task PlainSource_with_directive_override_should_resume_on_deserialization_errors()
        {
            var callCount = 0;

            Directive Decider(Exception cause)
            {
                if (cause is ConsumeException ex && ex.Error.IsSerializationError())
                {
                    callCount++;
                    return(Directive.Resume);
                }
                return(Directive.Stop);
            }

            int elementsCount = 10;
            var topic1        = CreateTopic(1);
            var group1        = CreateGroup(1);

            await ProduceStrings(new TopicPartition(topic1, 0), Enumerable.Range(1, elementsCount), ProducerSettings);

            var settings = CreateConsumerSettings <int>(group1).WithValueDeserializer(Deserializers.Int32);

            var probe = KafkaConsumer
                        .PlainSource(settings, Subscriptions.Assignment(new TopicPartition(topic1, 0)))
                        .WithAttributes(ActorAttributes.CreateSupervisionStrategy(Decider))
                        .Select(c => c.Value)
                        .RunWith(this.SinkProbe <int>(), Materializer);

            probe.Request(elementsCount);
            probe.ExpectNoMsg(TimeSpan.FromSeconds(10));
            // this is twice elementCount because Decider is called twice on each exceptions
            callCount.Should().Be(elementsCount * 2);
            probe.Cancel();
        }
Example #6
0
        public async Task PlainSource_should_resume_on_deserialization_errors()
        {
            Directive Decider(Exception cause) => cause is SerializationException
                ? Directive.Resume
                : Directive.Stop;

            int elementsCount = 10;
            var topic1        = CreateTopic(1);
            var group1        = CreateGroup(1);

            await Produce(topic1, Enumerable.Range(1, elementsCount), ProducerSettings);

            var settings = ConsumerSettings <Null, int> .Create(Sys, null, new IntDeserializer())
                           .WithBootstrapServers(KafkaUrl)
                           .WithProperty("auto.offset.reset", "earliest")
                           .WithGroupId(group1);

            var probe = KafkaConsumer
                        .PlainSource(settings, Subscriptions.Assignment(new TopicPartition(topic1, 0)))
                        .WithAttributes(ActorAttributes.CreateSupervisionStrategy(Decider))
                        .Select(c => c.Value)
                        .RunWith(this.SinkProbe <int>(), _materializer);

            probe.Request(elementsCount);
            probe.ExpectNoMsg(TimeSpan.FromSeconds(10));
            probe.Cancel();
        }
Example #7
0
        public async Task PlainSource_should_stop_on_errors()
        {
            int elementsCount = 10;
            var topic1        = CreateTopic(1);
            var group1        = CreateGroup(1);

            await ProduceStrings(new TopicPartition(topic1, 0), Enumerable.Range(1, elementsCount), ProducerSettings);

            var settings = CreateConsumerSettings <int>(group1).WithValueDeserializer(Deserializers.Int32);

            var probe = KafkaConsumer
                        .PlainSource(settings, Subscriptions.Assignment(new TopicPartition(topic1, 0)))
                        .WithAttributes(ActorAttributes.CreateSupervisionStrategy(Deciders.StoppingDecider))
                        .Select(c => c.Value)
                        .RunWith(this.SinkProbe <int>(), Materializer);

            var error = probe.Request(elementsCount).ExpectEvent(TimeSpan.FromSeconds(5));

            error.Should().BeOfType <TestSubscriber.OnError>();
            var exception = ((TestSubscriber.OnError)error).Cause;

            exception.Should().BeOfType <ConsumeException>();
            ((ConsumeException)exception).Error.IsSerializationError().Should().BeTrue();

            probe.ExpectNoMsg(TimeSpan.FromSeconds(5));
            probe.Cancel();
        }
        public async Task PlainSink_should_resume_on_deserialization_errors()
        {
            var callCount = 0;

            Directive Decider(Exception cause)
            {
                callCount++;
                switch (cause)
                {
                case ProduceException <Null, string> ex when ex.Error.IsSerializationError():
                    return(Directive.Resume);

                default:
                    return(Directive.Stop);
                }
            }

            var elementsCount = 10;
            var topic1        = CreateTopic(1);
            var group1        = CreateGroup(1);

            var producerSettings = ProducerSettings <Null, string>
                                   .Create(Sys, null, new FailingSerializer())
                                   .WithBootstrapServers(Fixture.KafkaServer);

            var sink = KafkaProducer.PlainSink(producerSettings)
                       .AddAttributes(ActorAttributes.CreateSupervisionStrategy(Decider));

            var sourceTask = Source
                             .From(new [] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 })
                             .Select(elem => new ProducerRecord <Null, string>(new TopicPartition(topic1, 0), elem.ToString()))
                             .RunWith(sink, Materializer);

            var timeoutTask  = Task.Delay(TimeSpan.FromSeconds(5));
            var completeTask = await Task.WhenAny(sourceTask, timeoutTask);

            if (completeTask == timeoutTask)
            {
                throw new Exception("Producer timed out");
            }

            var settings = CreateConsumerSettings <Null, string>(group1).WithValueDeserializer(new StringDeserializer());
            var probe    = KafkaConsumer
                           .PlainSource(settings, Subscriptions.Assignment(new TopicPartition(topic1, 0)))
                           .Select(c => c.Value)
                           .RunWith(this.SinkProbe <string>(), Materializer);

            probe.Request(elementsCount);
            for (var i = 0; i < 9; i++)
            {
                Log.Info($">>>>>>>>>>> {i}");
                probe.ExpectNext();
            }
            callCount.Should().Be(1);
            probe.Cancel();
        }
Example #9
0
        public async Task SupervisionStrategy_Decider_on_PlainSink_should_work()
        {
            var callCount = 0;

            Directive Decider(Exception cause)
            {
                callCount++;
                switch (cause)
                {
                case ProduceException <Null, string> ex when ex.Error.IsSerializationError():
                    return(Directive.Resume);

                default:
                    return(Directive.Stop);
                }
            }

            var topic1 = CreateTopic(1);
            var group1 = CreateGroup(1);

            var producerSettings = ProducerSettings <Null, string>
                                   .Create(Sys, null, new FailingSerializer())
                                   .WithBootstrapServers(Fixture.KafkaServer);

            // Exception is injected into the sink by the FailingSerializer serializer, it throws an exceptions
            // when the message "5" is encountered.
            var sourceTask = Source
                             .From(new [] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 })
                             .Select(elem => new ProducerRecord <Null, string>(new TopicPartition(topic1, 0), elem.ToString()))
                             .RunWith(
                KafkaProducer.PlainSink(producerSettings)
                .WithAttributes(ActorAttributes.CreateSupervisionStrategy(Decider)),
                Materializer);

            await GuardWithTimeoutAsync(sourceTask, TimeSpan.FromSeconds(5));

            var settings = CreateConsumerSettings <Null, string>(group1).WithValueDeserializer(new StringDeserializer());
            var probe    = KafkaConsumer
                           .PlainSource(settings, Subscriptions.Assignment(new TopicPartition(topic1, 0)))
                           .Select(c => c.Value)
                           .RunWith(this.SinkProbe <string>(), Materializer);

            probe.Request(10);
            for (var i = 0; i < 9; i++)
            {
                var message = probe.ExpectNext();
                Log.Info($"> [{i}]: {message}");
            }
            callCount.Should().Be(1);
            probe.Cancel();
        }
Example #10
0
        public async Task CommitterFlow_commits_offsets_from_CommittableSource(int batchSize)
        {
            var topic1          = CreateTopic(1);
            var topicPartition1 = new TopicPartition(topic1, 0);
            var group1          = CreateGroup(1);

            await GivenInitializedTopic(topicPartition1);

            await Source
            .From(Enumerable.Range(1, 100))
            .Select(elem => new ProducerRecord <Null, string>(topicPartition1, elem.ToString()))
            .RunWith(KafkaProducer.PlainSink(ProducerSettings), Materializer);

            var consumerSettings  = CreateConsumerSettings <string>(group1);
            var committedElements = new ConcurrentQueue <string>();
            var committerSettings = CommitterSettings.WithMaxBatch(batchSize);

            var(task, probe1) = KafkaConsumer.CommittableSource(consumerSettings, Subscriptions.Assignment(topicPartition1))
                                .WhereNot(c => c.Record.Value == InitialMsg)
                                .SelectAsync(10, elem =>
            {
                committedElements.Enqueue(elem.Record.Value);
                return(Task.FromResult(elem.CommitableOffset as ICommittable));
            })
                                .Via(Committer.Flow(committerSettings))
                                .ToMaterialized(this.SinkProbe <Done>(), Keep.Both)
                                .Run(Materializer);

            probe1.Request(25 / batchSize);

            foreach (var _ in Enumerable.Range(1, 25 / batchSize))
            {
                probe1.ExpectNext(Done.Instance, TimeSpan.FromSeconds(10));
            }

            probe1.Cancel();

            AwaitCondition(() => task.IsShutdown.IsCompletedSuccessfully);

            var probe2 = KafkaConsumer.PlainSource(consumerSettings, Subscriptions.Assignment(new TopicPartition(topic1, 0)))
                         .Select(_ => _.Value)
                         .RunWith(this.SinkProbe <string>(), Materializer);

            probe2.Request(75);
            foreach (var i in Enumerable.Range(committedElements.Count + 1, 75).Select(c => c.ToString()))
            {
                probe2.ExpectNext(i, TimeSpan.FromSeconds(10));
            }

            probe2.Cancel();
        }
Example #11
0
        public async Task SupervisionStrategy_Decider_on_Consumer_Downstream_should_work()
        {
            var topic          = CreateTopic(1);
            var group          = CreateGroup(1);
            var topicPartition = new TopicPartition(topic, 0);
            var callCount      = 0;

            Directive Decider(Exception cause)
            {
                callCount++;
                if (cause.Message == "BOOM!")
                {
                    return(Directive.Restart);
                }
                return(Directive.Stop);
            }

            var consumerSettings = CreateConsumerSettings <string>(group);
            var counter          = 0;

            await Source.From(Enumerable.Range(1, 11))
            .Select(elem => new ProducerRecord <Null, string>(topicPartition, elem.ToString()))
            .RunWith(KafkaProducer.PlainSink(ProducerSettings), Materializer);

            var(_, probe) = KafkaConsumer
                            .PlainSource(consumerSettings, Subscriptions.Assignment(topicPartition))
                            .Select(c =>
            {
                counter++;
                // fail once on counter 5
                if (counter == 5)
                {
                    throw new Exception("BOOM!");
                }
                return(c.Message.Value);
            })
                            .WithAttributes(ActorAttributes.CreateSupervisionStrategy(Decider))
                            .ToMaterialized(this.SinkProbe <string>(), Keep.Both)
                            .Run(Materializer);

            probe.Request(10);
            for (var i = 0; i < 9; i++)
            {
                var message = probe.ExpectNext(TimeSpan.FromSeconds(10));
                Log.Info(message);
            }
            probe.Cancel();

            callCount.Should().Be(1);
        }
Example #12
0
        public async Task PlainSource_should_fail_stage_if_broker_unavailable()
        {
            var topic1 = CreateTopic(1);
            var group1 = CreateGroup(1);

            await GivenInitializedTopic(topic1);

            var config = ConsumerSettings <Null, string> .Create(Sys, null, new StringDeserializer(Encoding.UTF8))
                         .WithBootstrapServers("localhost:10092")
                         .WithGroupId(group1);

            var probe = CreateProbe(config, topic1, Subscriptions.Assignment(new TopicPartition(topic1, 0)));

            probe.Request(1).ExpectError().Should().BeOfType <KafkaException>();
        }
        public async Task PlainSource_should_fail_stage_if_broker_unavailable()
        {
            var topic1          = CreateTopic(1);
            var group1          = CreateGroup(1);
            var topicPartition1 = new TopicPartition(topic1, 0);

            await GivenInitializedTopic(topicPartition1);

            var config = ConsumerSettings <Null, string> .Create(Sys, null, null)
                         .WithBootstrapServers("localhost:10092")
                         .WithGroupId(group1);

            var(control, probe) = CreateProbe(config, Subscriptions.Assignment(topicPartition1));
            probe.Request(1);
            AwaitCondition(() => control.IsShutdown.IsCompleted, TimeSpan.FromSeconds(10));
        }
Example #14
0
        public async Task SupervisionStrategy_Decider_on_Producer_Upstream_should_work()
        {
            var topic          = CreateTopic(1);
            var group          = CreateGroup(1);
            var topicPartition = new TopicPartition(topic, 0);
            var callCount      = 0;

            // create a custom Decider with a "Restart" directive in the event of DivideByZeroException
            Directive Decider(Exception cause)
            {
                callCount++;
                return(cause is DivideByZeroException
                    ? Directive.Restart
                    : Directive.Stop);
            }

            var consumerSettings = CreateConsumerSettings <string>(group);
            var numbers          = Source.From(new [] { 9, 8, 7, 6, 0, 5, 4, 3, 2, 1 });
            await numbers
            // a DivideByZeroException will be thrown here, and since this happens upstream of the producer sink,
            // the whole stream got restarted when the exception happened, and the offending message will be ignored.
            // All the messages prior and after the exception are sent to the Kafka producer.
            .Via(Flow.Create <int>().Select(x => $"1/{x} is {1/x} w/ integer division"))
            .WithAttributes(ActorAttributes.CreateSupervisionStrategy(Decider))
            .Select(elem => new ProducerRecord <Null, string>(topicPartition, elem))
            .RunWith(KafkaProducer.PlainSink(ProducerSettings), Materializer);

            var(_, probe) = KafkaConsumer
                            .PlainSource(consumerSettings, Subscriptions.Assignment(topicPartition))
                            .Select(c => c.Message.Value)
                            .ToMaterialized(this.SinkProbe <string>(), Keep.Both)
                            .Run(Materializer);

            probe.Request(10);
            for (var i = 0; i < 9; i++)
            {
                Log.Info(probe.ExpectNext(TimeSpan.FromSeconds(10)));
            }
            probe.Cancel();

            callCount.Should().BeGreaterThan(0);
        }
Example #15
0
        public async Task PlainSource_consumes_messages_from_KafkaProducer_with_topicPartition_assignment()
        {
            int elementsCount = 100;
            var topic1        = CreateTopic(1);
            var group1        = CreateGroup(1);

            await GivenInitializedTopic(topic1);

            await Produce(topic1, Enumerable.Range(1, elementsCount), ProducerSettings);

            var consumerSettings = CreateConsumerSettings(group1);

            var probe = CreateProbe(consumerSettings, topic1, Subscriptions.Assignment(new TopicPartition(topic1, 0)));

            probe.Request(elementsCount);
            foreach (var i in Enumerable.Range(1, elementsCount).Select(c => c.ToString()))
            {
                probe.ExpectNext(i, TimeSpan.FromSeconds(10));
            }

            probe.Cancel();
        }
Example #16
0
        public async Task Default_Decider_on_PlainSource_should_resume_on_KafkaException()
        {
            int elementsCount = 10;
            var topic1        = CreateTopic(1);
            var group1        = CreateGroup(1);

            var sourceTask = ProduceStrings(new TopicPartition(topic1, 0), Enumerable.Range(1, elementsCount), ProducerSettings);

            await GuardWithTimeoutAsync(sourceTask, TimeSpan.FromSeconds(3));

            var settings = CreateConsumerSettings <Null, string>(group1).WithAutoCreateTopicsEnabled(false);

            // Stage produce Error with ErrorCode.Local_UnknownPartition because we're trying to subscribe to partition 5, which does not exist.
            var probe = KafkaConsumer
                        .PlainSource(settings, Subscriptions.Assignment(new TopicPartition(topic1, 5)))
                        .Select(c => c.Value)
                        .RunWith(this.SinkProbe <string>(), Materializer);

            probe.Request(elementsCount);
            probe.ExpectNoMsg(TimeSpan.FromSeconds(1));

            probe.Cancel();
        }
        public async Task PlainSource_should_resume_on_deserialization_errors()
        {
            Directive Decider(Exception cause) => cause is SerializationException
                ? Directive.Resume
                : Directive.Stop;

            int elementsCount = 10;
            var topic1        = CreateTopic(1);
            var group1        = CreateGroup(1);

            await ProduceStrings(new TopicPartition(topic1, 0), Enumerable.Range(1, elementsCount), ProducerSettings);

            var settings = CreateConsumerSettings <int>(group1).WithValueDeserializer(Deserializers.Int32);

            var probe = KafkaConsumer
                        .PlainSource(settings, Subscriptions.Assignment(new TopicPartition(topic1, 0)))
                        .WithAttributes(ActorAttributes.CreateSupervisionStrategy(Decider))
                        .Select(c => c.Value)
                        .RunWith(this.SinkProbe <int>(), Materializer);

            probe.Request(elementsCount);
            probe.ExpectNoMsg(TimeSpan.FromSeconds(10));
            probe.Cancel();
        }
Example #18
0
        public async Task Directive_Restart_on_failed_Consumer_should_restart_Consumer()
        {
            var topic                  = CreateTopic(1);
            var group                  = CreateGroup(1);
            var topicPartition         = new TopicPartition(topic, 0);
            var serializationCallCount = 0;
            var callCount              = 0;

            Directive Decider(Exception cause)
            {
                callCount++;
                if (cause is ConsumeException ce && ce.Error.IsSerializationError())
                {
                    serializationCallCount++;
                    return(Directive.Restart);
                }
                return(Directive.Stop);
            }

            var serializer       = new Serializer <int>(BitConverter.GetBytes);
            var producerSettings = ProducerSettings <Null, int>
                                   .Create(Sys, null, serializer)
                                   .WithBootstrapServers(Fixture.KafkaServer);

            await Source.From(Enumerable.Range(1, 10))
            .Select(elem => new ProducerRecord <Null, int>(topicPartition, elem))
            .RunWith(KafkaProducer.PlainSink(producerSettings), Materializer);

            // Exception is injected once using the FailOnceDeserializer
            var deserializer     = new FailOnceDeserializer <int>(5, data => BitConverter.ToInt32(data.Span));
            var consumerSettings = ConsumerSettings <Null, int> .Create(Sys, null, deserializer)
                                   .WithBootstrapServers(Fixture.KafkaServer)
                                   .WithStopTimeout(TimeSpan.FromSeconds(1))
                                   .WithProperty("auto.offset.reset", "earliest")
                                   .WithGroupId(group);

            var(_, probe) = KafkaConsumer
                            .PlainSource(consumerSettings, Subscriptions.Assignment(topicPartition))
                            .WithAttributes(ActorAttributes.CreateSupervisionStrategy(Decider))
                            .Select(c => c.Message.Value)
                            .ToMaterialized(this.SinkProbe <int>(), Keep.Both)
                            .Run(Materializer);

            probe.Request(20);
            var pulled = new List <int>();

            for (var i = 0; i < 14; i++)
            {
                var msg = probe.ExpectNext();
                pulled.Add(msg);
            }

            probe.ExpectNoMsg(TimeSpan.FromSeconds(2));
            probe.Cancel();

            pulled.Should().BeEquivalentTo(new[] { 1, 2, 3, 4, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }, opt => opt.WithStrictOrdering());

            // Decider should be called twice, because deciders are called in BaseSingleSourceLogic and KafkaConsumerActor
            callCount.Should().Be(2);
            serializationCallCount.Should().Be(2);
        }
        public async Task AtMostOnceSource_Should_stop_consuming_actor_when_used_with_Take()
        {
            var topic = CreateTopic(1);
            var group = CreateGroup(1);

            await ProduceStrings(new TopicPartition(topic, 0), Enumerable.Range(1, 10), ProducerSettings);

            var(control, result) = KafkaConsumer.AtMostOnceSource(CreateConsumerSettings <string>(group), Subscriptions.Assignment(new TopicPartition(topic, 0)))
                                   .Select(m => m.Value)
                                   .Take(5)
                                   .ToMaterialized(Sink.Seq <string>(), Keep.Both)
                                   .Run(Materializer);

            AwaitCondition(() => control.IsShutdown.IsCompletedSuccessfully, TimeSpan.FromSeconds(10));

            result.Result.Should().BeEquivalentTo(Enumerable.Range(1, 5).Select(i => i.ToString()));
        }
        public async Task ExternalPlainSource_verify_consuming_actor_pause_resume_partitions_works_fine()
        {
            var topic = CreateTopic(1);
            var group = CreateGroup(1);

            // Create consumer actor
            var consumer = Sys.ActorOf(KafkaConsumerActorMetadata.GetProps(CreateConsumerSettings <string>(group)));

            // Send one message per each partition
            await ProduceStrings(new TopicPartition(topic, 0), Enumerable.Range(1, 100), ProducerSettings);
            await ProduceStrings(new TopicPartition(topic, 1), Enumerable.Range(1, 100), ProducerSettings);

            // Subscribe to partitions
            var(control1, probe1) = CreateExternalPlainSourceProbe <string>(consumer, Subscriptions.Assignment(new TopicPartition(topic, 0)));
            var(control2, probe2) = CreateExternalPlainSourceProbe <string>(consumer, Subscriptions.Assignment(new TopicPartition(topic, 1)));

            var probes = new[] { probe1, probe2 };

            // All partitions resumed
            probes.ForEach(p => p.Request(1));
            probes.ForEach(p => p.ExpectNext(TimeSpan.FromSeconds(10)));

            await Task.Delay(1000); // All partitions become paused when now demand

            // Make resumed and second paused
            probe1.Request(1);
            probe1.ExpectNext(TimeSpan.FromSeconds(10));

            await Task.Delay(1000); // All partitions become paused when now demand

            // Make second resumed and first paused
            probe2.Request(1);
            probe2.ExpectNext(TimeSpan.FromSeconds(10));

            await Task.Delay(1000); // All partitions become paused when now demand

            // All partitions resumed back
            probes.ForEach(p => p.Request(1));
            probes.ForEach(p => p.ExpectNext(TimeSpan.FromSeconds(10)));

            // Stop and check gracefull shutdown
            probes.ForEach(p => p.Cancel());
            AwaitCondition(() => control1.IsShutdown.IsCompletedSuccessfully && control2.IsShutdown.IsCompletedSuccessfully);

            // Cleanup
            consumer.Tell(KafkaConsumerActorMetadata.Internal.Stop.Instance, ActorRefs.NoSender);
        }
        public async Task ExternalPlainSource_with_external_consumer_Should_work()
        {
            var elementsCount = 10;
            var topic         = CreateTopic(1);
            var group         = CreateGroup(1);

            //Consumer is represented by actor
            var consumer = Sys.ActorOf(KafkaConsumerActorMetadata.GetProps(CreateConsumerSettings <string>(group)));

            //Manually assign topic partition to it
            var(control1, probe1) = CreateExternalPlainSourceProbe <string>(consumer, Subscriptions.Assignment(new TopicPartition(topic, 0)));
            var(control2, probe2) = CreateExternalPlainSourceProbe <string>(consumer, Subscriptions.Assignment(new TopicPartition(topic, 1)));

            // Produce messages to partitions
            await ProduceStrings(new TopicPartition(topic, new Partition(0)), Enumerable.Range(1, elementsCount), ProducerSettings);
            await ProduceStrings(new TopicPartition(topic, new Partition(1)), Enumerable.Range(1, elementsCount), ProducerSettings);

            // Request for produced messages and consume them
            probe1.Request(elementsCount);
            probe2.Request(elementsCount);
            probe1.Within(TimeSpan.FromSeconds(10), () => probe1.ExpectNextN(elementsCount));
            probe2.Within(TimeSpan.FromSeconds(10), () => probe2.ExpectNextN(elementsCount));

            // Stop stages
            probe1.Cancel();
            probe2.Cancel();

            // Make sure stages are stopped gracefully
            AwaitCondition(() => control1.IsShutdown.IsCompletedSuccessfully && control2.IsShutdown.IsCompletedSuccessfully);

            // Cleanup
            consumer.Tell(KafkaConsumerActorMetadata.Internal.Stop.Instance, ActorRefs.NoSender);
        }
        public async Task CommitableSource_resume_from_commited_offset()
        {
            var topic1          = CreateTopic(1);
            var topicPartition1 = new TopicPartition(topic1, 0);
            var group1          = CreateGroup(1);
            var group2          = CreateGroup(2);

            await GivenInitializedTopic(topicPartition1);

            await Source
            .From(Enumerable.Range(1, 100))
            .Select(elem => new ProducerRecord <Null, string>(topicPartition1, elem.ToString()))
            .RunWith(KafkaProducer.PlainSink(ProducerSettings), Materializer);

            var consumerSettings  = CreateConsumerSettings <string>(group1);
            var committedElements = new ConcurrentQueue <string>();

            var(task, probe1) = KafkaConsumer.CommittableSource(consumerSettings, Subscriptions.Assignment(topicPartition1))
                                .WhereNot(c => c.Record.Value == InitialMsg)
                                .SelectAsync(10, async elem =>
            {
                await elem.CommitableOffset.Commit();
                committedElements.Enqueue(elem.Record.Value);
                return(Done.Instance);
            })
                                .ToMaterialized(this.SinkProbe <Done>(), Keep.Both)
                                .Run(Materializer);

            probe1.Request(25);

            foreach (var _ in Enumerable.Range(1, 25))
            {
                probe1.ExpectNext(Done.Instance, TimeSpan.FromSeconds(10));
            }

            probe1.Cancel();

            AwaitCondition(() => task.IsShutdown.IsCompletedSuccessfully);

            var probe2 = KafkaConsumer.CommittableSource(consumerSettings, Subscriptions.Assignment(new TopicPartition(topic1, 0)))
                         .Select(_ => _.Record.Value)
                         .RunWith(this.SinkProbe <string>(), Materializer);

            // Note that due to buffers and SelectAsync(10) the committed offset is more
            // than 26, and that is not wrong

            // some concurrent publish
            await Source
            .From(Enumerable.Range(101, 100))
            .Select(elem => new ProducerRecord <Null, string>(topicPartition1, elem.ToString()))
            .RunWith(KafkaProducer.PlainSink(ProducerSettings), Materializer);

            probe2.Request(100);
            foreach (var i in Enumerable.Range(committedElements.Count + 1, 100).Select(c => c.ToString()))
            {
                probe2.ExpectNext(i, TimeSpan.FromSeconds(10));
            }

            probe2.Cancel();

            // another consumer should see all
            var probe3 = KafkaConsumer.CommittableSource(consumerSettings.WithGroupId(group2), Subscriptions.Assignment(new TopicPartition(topic1, 0)))
                         .WhereNot(c => c.Record.Value == InitialMsg)
                         .Select(_ => _.Record.Value)
                         .RunWith(this.SinkProbe <string>(), Materializer);

            probe3.Request(100);
            foreach (var i in Enumerable.Range(1, 100).Select(c => c.ToString()))
            {
                probe3.ExpectNext(i, TimeSpan.FromSeconds(10));
            }

            probe3.Cancel();
        }
Example #23
0
        public async Task SupervisionStrategy_Decider_on_complex_stream_should_work()
        {
            var topic                   = CreateTopic(1);
            var group                   = CreateGroup(1);
            var topicPartition          = new TopicPartition(topic, 0);
            var committedTopicPartition = new TopicPartition($"{topic}-done", 0);
            var callCount               = 0;

            Directive Decider(Exception cause)
            {
                callCount++;
                return(Directive.Resume);
            }

            var committerSettings = CommitterSettings.Create(Sys);
            var consumerSettings  = CreateConsumerSettings <string>(group);
            var counter           = 0;

            // arrange
            await Source.From(Enumerable.Range(1, 10))
            .Select(elem => new ProducerRecord <Null, string>(topicPartition, elem.ToString()))
            .RunWith(KafkaProducer.PlainSink(ProducerSettings), Materializer);

            // act
            var drainingControl = KafkaConsumer.CommittableSource(consumerSettings, Subscriptions.Assignment(topicPartition))
                                  .Via(Flow.Create <CommittableMessage <Null, string> >().Select(x =>
            {
                counter++;
                // Exception happened here, fail once, when counter is 5
                if (counter == 5)
                {
                    throw new Exception("BOOM!");
                }
                return(x);
            }))
                                  .WithAttributes(Attributes.CreateName("CommitableSource").And(ActorAttributes.CreateSupervisionStrategy(Decider)))
                                  .Select(c => (c.Record.Topic, c.Record.Message.Value, c.CommitableOffset))
                                  .SelectAsync(1, async t =>
            {
                Log.Info($"[{t.Topic}]: {t.Value}");
                // simulate a request-response call that takes 10ms to complete here
                await Task.Delay(10);
                return(t);
            })
                                  .Select(t => ProducerMessage.Single(new ProducerRecord <Null, string>(committedTopicPartition, t.Value),
                                                                      t.CommitableOffset))
                                  .Via(KafkaProducer.FlexiFlow <Null, string, ICommittableOffset>(ProducerSettings)).WithAttributes(Attributes.CreateName("FlexiFlow"))
                                  .Select(m => (ICommittable)m.PassThrough)
                                  .AlsoToMaterialized(Committer.Sink(committerSettings), DrainingControl <NotUsed> .Create)
                                  .To(Flow.Create <ICommittable>()
                                      .Async()
                                      .GroupedWithin(1000, TimeSpan.FromSeconds(1))
                                      .Select(c => c.Count())
                                      .Log("MsgCount").AddAttributes(Attributes.CreateLogLevels(LogLevel.InfoLevel))
                                      .To(Sink.Ignore <int>()))
                                  .Run(Sys.Materializer());

            await Task.Delay(TimeSpan.FromSeconds(5));

            await GuardWithTimeoutAsync(drainingControl.DrainAndShutdown(), TimeSpan.FromSeconds(10));

            // There should be only 1 decider call
            callCount.Should().Be(1);

            // Assert that all of the messages, except for those that failed in the stage, got committed
            var settings = CreateConsumerSettings <Null, string>(group);
            var probe    = KafkaConsumer
                           .PlainSource(settings, Subscriptions.Assignment(committedTopicPartition))
                           .Select(c => c.Message.Value)
                           .RunWith(this.SinkProbe <string>(), Materializer);

            probe.Request(9);
            var messages = new List <string>();

            for (var i = 0; i < 9; ++i)
            {
                var message = probe.RequestNext();
                messages.Add(message);
            }

            // Message "5" is missing because the exception happened downstream of the source and we chose to
            // ignore it in the decider
            messages.Should().BeEquivalentTo(new[] { "1", "2", "3", "4", "6", "7", "8", "9", "10" });
            probe.Cancel();
        }