コード例 #1
0
        /// <inheritdoc />
        protected override IActorRef CreateConsumerActor()
        {
            var partitionsAssignedHandler = GetAsyncCallback <IEnumerable <TopicPartition> >(PartitionsAssigned);
            var partitionsRevokedHandler  = GetAsyncCallback <IEnumerable <TopicPartitionOffset> >(PartitionsRevoked);

            IPartitionEventHandler internalHandler = new PartitionEventHandlers.AsyncCallbacks(partitionsAssignedHandler, partitionsRevokedHandler);

            // If custom partition events handler specified - add it to the chain
            var eventHandler = _subscription is IAutoSubscription autoSubscription && autoSubscription.PartitionEventsHandler.HasValue
                ? new PartitionEventHandlers.Chain(autoSubscription.PartitionEventsHandler.Value, internalHandler)
                : internalHandler;

            IStatisticsHandler statisticsHandler = _subscription.StatisticsHandler.HasValue
                ? _subscription.StatisticsHandler.Value
                : new StatisticsHandlers.Empty();

            // This allows to override partition events handling by subclasses
            eventHandler = AddToPartitionAssignmentHandler(eventHandler);

            if (!(Materializer is ActorMaterializer actorMaterializer))
            {
                throw new ArgumentException($"Expected {typeof(ActorMaterializer)} but got {Materializer.GetType()}");
            }

            var extendedActorSystem = actorMaterializer.System.AsInstanceOf <ExtendedActorSystem>();
            var actor = extendedActorSystem.SystemActorOf(KafkaConsumerActorMetadata.GetProps(SourceActor.Ref, _settings, eventHandler, statisticsHandler),
                                                          $"kafka-consumer-{_actorNumber}");

            return(actor);
        }
        public async Task ExternalPlainSource_should_be_stopped_on_serialization_error_only_when_requested_messages()
        {
            var topic = CreateTopic(1);
            var group = CreateGroup(1);

            // Make consumer expect numeric messages
            var settings = CreateConsumerSettings <int>(group).WithValueDeserializer(Deserializers.Int32);
            var consumer = Sys.ActorOf(KafkaConsumerActorMetadata.GetProps(settings));

            // Subscribe to partitions
            var(control1, probe1) = CreateExternalPlainSourceProbe <int>(consumer, Subscriptions.Assignment(new TopicPartition(topic, 0)));
            var(control2, probe2) = CreateExternalPlainSourceProbe <int>(consumer, Subscriptions.Assignment(new TopicPartition(topic, 1)));
            var(control3, probe3) = CreateExternalPlainSourceProbe <int>(consumer, Subscriptions.Assignment(new TopicPartition(topic, 2)));

            // request from 2 streams
            probe1.Request(1);
            probe2.Request(1);
            await Task.Delay(500); // To establish demand

            // Send string messages
            await ProduceStrings(new TopicPartition(topic, 0), new int[] { 1 }, ProducerSettings);
            await ProduceStrings(new TopicPartition(topic, 1), new int[] { 1 }, ProducerSettings);

            // First two stages should fail, and only stage without demand should keep going
            probe1.ExpectError().Should().BeOfType <SerializationException>();
            probe2.ExpectError().Should().BeOfType <SerializationException>();
            probe3.Cancel();

            // Make sure source tasks finish accordingly
            AwaitCondition(() => control1.IsShutdown.IsCompleted && control2.IsShutdown.IsCompleted && control3.IsShutdown.IsCompletedSuccessfully);

            // Cleanup
            consumer.Tell(KafkaConsumerActorMetadata.Internal.Stop.Instance, ActorRefs.NoSender);
        }
        public async Task ExternalPlainSource_with_external_consumer_Should_work()
        {
            var elementsCount = 10;
            var topic         = CreateTopic(1);
            var group         = CreateGroup(1);

            //Consumer is represented by actor
            var consumer = Sys.ActorOf(KafkaConsumerActorMetadata.GetProps(CreateConsumerSettings <string>(group)));

            //Manually assign topic partition to it
            var(control1, probe1) = CreateExternalPlainSourceProbe <string>(consumer, Subscriptions.Assignment(new TopicPartition(topic, 0)));
            var(control2, probe2) = CreateExternalPlainSourceProbe <string>(consumer, Subscriptions.Assignment(new TopicPartition(topic, 1)));

            // Produce messages to partitions
            await ProduceStrings(new TopicPartition(topic, new Partition(0)), Enumerable.Range(1, elementsCount), ProducerSettings);
            await ProduceStrings(new TopicPartition(topic, new Partition(1)), Enumerable.Range(1, elementsCount), ProducerSettings);

            // Request for produced messages and consume them
            probe1.Request(elementsCount);
            probe2.Request(elementsCount);
            probe1.Within(TimeSpan.FromSeconds(10), () => probe1.ExpectNextN(elementsCount));
            probe2.Within(TimeSpan.FromSeconds(10), () => probe2.ExpectNextN(elementsCount));

            // Stop stages
            probe1.Cancel();
            probe2.Cancel();

            // Make sure stages are stopped gracefully
            AwaitCondition(() => control1.IsShutdown.IsCompletedSuccessfully && control2.IsShutdown.IsCompletedSuccessfully);

            // Cleanup
            consumer.Tell(KafkaConsumerActorMetadata.Internal.Stop.Instance, ActorRefs.NoSender);
        }
        public async Task ExternalPlainSource_verify_consuming_actor_pause_resume_partitions_works_fine()
        {
            var topic = CreateTopic(1);
            var group = CreateGroup(1);

            // Create consumer actor
            var consumer = Sys.ActorOf(KafkaConsumerActorMetadata.GetProps(CreateConsumerSettings <string>(group)));

            // Send one message per each partition
            await ProduceStrings(new TopicPartition(topic, 0), Enumerable.Range(1, 100), ProducerSettings);
            await ProduceStrings(new TopicPartition(topic, 1), Enumerable.Range(1, 100), ProducerSettings);

            // Subscribe to partitions
            var(control1, probe1) = CreateExternalPlainSourceProbe <string>(consumer, Subscriptions.Assignment(new TopicPartition(topic, 0)));
            var(control2, probe2) = CreateExternalPlainSourceProbe <string>(consumer, Subscriptions.Assignment(new TopicPartition(topic, 1)));

            var probes = new[] { probe1, probe2 };

            // All partitions resumed
            probes.ForEach(p => p.Request(1));
            probes.ForEach(p => p.ExpectNext(TimeSpan.FromSeconds(10)));

            await Task.Delay(1000); // All partitions become paused when now demand

            // Make resumed and second paused
            probe1.Request(1);
            probe1.ExpectNext(TimeSpan.FromSeconds(10));

            await Task.Delay(1000); // All partitions become paused when now demand

            // Make second resumed and first paused
            probe2.Request(1);
            probe2.ExpectNext(TimeSpan.FromSeconds(10));

            await Task.Delay(1000); // All partitions become paused when now demand

            // All partitions resumed back
            probes.ForEach(p => p.Request(1));
            probes.ForEach(p => p.ExpectNext(TimeSpan.FromSeconds(10)));

            // Stop and check gracefull shutdown
            probes.ForEach(p => p.Cancel());
            AwaitCondition(() => control1.IsShutdown.IsCompletedSuccessfully && control2.IsShutdown.IsCompletedSuccessfully);

            // Cleanup
            consumer.Tell(KafkaConsumerActorMetadata.Internal.Stop.Instance, ActorRefs.NoSender);
        }
コード例 #5
0
        /// <inheritdoc />
        protected override IActorRef CreateConsumerActor()
        {
            var partitionsAssignedHandler = GetAsyncCallback <IEnumerable <TopicPartition> >(PartitionsAssigned);
            var partitionsRevokedHandler  = GetAsyncCallback <IEnumerable <TopicPartitionOffset> >(PartitionsRevoked);

            IPartitionEventHandler <K, V> eventHandler = new AsyncCallbacksPartitionEventHandler <K, V>(partitionsAssignedHandler, partitionsRevokedHandler);

            // This allows to override partition events handling by subclasses
            eventHandler = AddToPartitionAssignmentHandler(eventHandler);

            if (!(Materializer is ActorMaterializer actorMaterializer))
            {
                throw new ArgumentException($"Expected {typeof(ActorMaterializer)} but got {Materializer.GetType()}");
            }

            var extendedActorSystem = actorMaterializer.System.AsInstanceOf <ExtendedActorSystem>();
            var actor = extendedActorSystem.SystemActorOf(KafkaConsumerActorMetadata.GetProps(SourceActor.Ref, _settings, eventHandler),
                                                          $"kafka-consumer-{_actorNumber}");

            return(actor);
        }