示例#1
0
        public async Task PlainSource_consumes_messages_from_KafkaProducer_with_topicPartitionOffset_assignment()
        {
            int elementsCount = 100;
            int offset        = 50;
            var topic1        = CreateTopic(1);
            var group1        = CreateGroup(1);

            await GivenInitializedTopic(topic1);

            await Produce(topic1, Enumerable.Range(1, elementsCount), ProducerSettings);

            var consumerSettings = CreateConsumerSettings(group1);

            var probe = CreateProbe(consumerSettings, topic1, Subscriptions.AssignmentWithOffset(new TopicPartitionOffset(topic1, 0, new Offset(offset))));

            probe.Request(elementsCount);
            foreach (var i in Enumerable.Range(offset, elementsCount - offset).Select(c => c.ToString()))
            {
                probe.ExpectNext(i, TimeSpan.FromSeconds(10));
            }

            probe.Cancel();
        }
示例#2
0
        public async Task Committable_consumer_with_failed_downstream_stage_result_should_be_gapless()
        {
            var topic          = CreateTopic(1);
            var group          = CreateGroup(1);
            var topicPartition = new TopicPartition(topic, 0);

            var consumerSettings = ConsumerSettings <Null, string> .Create(Sys, null, null)
                                   .WithBootstrapServers(Fixture.KafkaServer)
                                   .WithStopTimeout(TimeSpan.FromSeconds(1))
                                   .WithProperty("auto.offset.reset", "earliest")
                                   .WithGroupId(group);

            var counter = 0;

            await Source.From(Enumerable.Range(1, 11))
            .Select(elem => new ProducerRecord <Null, string>(topicPartition, elem.ToString()))
            .RunWith(KafkaProducer.PlainSink(ProducerSettings), Materializer);

            var probe = KafkaConsumer.CommittableSource(consumerSettings, Subscriptions.AssignmentWithOffset(new TopicPartitionOffset(topicPartition, Offset.Unset)))
                        .Select(t =>
            {
                counter++;
                // fail once, on the 7th message
                if (counter == 7)
                {
                    throw new Exception("BOOM!");
                }
                return(t);
            })
                        .SelectAsync(1, async elem =>
            {
                await elem.CommitableOffset.Commit();
                return(elem.Record.Value);
            })
                        .ToMaterialized(this.SinkProbe <string>(), Keep.Right)
                        .Run(Materializer);

            var messages = new List <string>();

            probe.Request(11);
            for (var i = 0; i < 6; i++)
            {
                messages.Add(probe.ExpectNext(TimeSpan.FromSeconds(5)));
            }

            // stream fails at index 7
            var err = probe.ExpectEvent();

            err.Should().BeOfType <TestSubscriber.OnError>();
            var exception = ((TestSubscriber.OnError)err).Cause;

            exception.Message.Should().Be("BOOM!");

            // stream should be dead here
            probe.ExpectNoMsg(TimeSpan.FromSeconds(5));
            probe.Cancel();

            // restart dead stream
            probe = KafkaConsumer.CommittableSource(consumerSettings, Subscriptions.AssignmentWithOffset(new TopicPartitionOffset(topicPartition, Offset.Unset)))
                    .SelectAsync(1, async elem =>
            {
                await elem.CommitableOffset.Commit();
                return(elem.Record.Value);
            })
                    .ToMaterialized(this.SinkProbe <string>(), Keep.Right)
                    .Run(Materializer);

            probe.Request(11);
            for (var i = 0; i < 5; i++)
            {
                messages.Add(probe.ExpectNext(TimeSpan.FromSeconds(5)));
            }
            probe.Cancel();

            // end result should be gapless
            messages.Select(s => int.Parse(s)).Should().BeEquivalentTo(Enumerable.Range(1, 11));
        }
示例#3
0
        private void StartConsuming()
        {
#if DEBUG
            Console.WriteLine("Start Consuming");
#endif
            allEofsFound = new TaskCompletionSource <bool>();

            //use eof to know when we are ready with a single topic partition
            var consumerSettings = ConsumerSettings <string, byte[]>
                                   .Create(settings.KafkaConfig, Deserializers.Utf8, Deserializers.ByteArray)
                                   .WithBootstrapServers(settings.KafkaConfig.GetString("bootstrap.servers"))
                                   .WithGroupId(settings.KafkaConfig.GetString("groupid.prefix") + Guid.NewGuid())
                                   .WithProperty("enable.partition.eof", "true");

            var adminClientBuilder = new AdminClientBuilder(consumerSettings.Properties);

            Metadata metadata = null;
            using (var adminClient = adminClientBuilder.Build())
            {
                metadata = adminClient.GetMetadata(TimeSpan.FromSeconds(10));
            }

            if (metadata == null)
            {
                throw new Exception("can not retrieve metadata from bootstrap servers");
            }
            var knownTopicPartitions = new HashSet <TopicPartition>();
            foreach (var metadataTopic in metadata.Topics)
            {
                foreach (var metadataTopicPartition in metadataTopic.Partitions)
                {
                    knownTopicPartitions.Add(new TopicPartition(metadataTopic.Topic, metadataTopicPartition.PartitionId));
                }
            }

            var topicsNotInKafka = 0;
            //create a consumer for all enabled partition topics
            var tposForSubscription = new List <TopicPartitionOffset>();
            foreach (var tp in enabledTopicPartitions)
            {
                if (knownTopicPartitions.Contains(tp))
                {
                    tposForSubscription.Add(currentOffsets.TryGetValue(tp, out long offset)
                        ? new TopicPartitionOffset(tp, offset + 1)
                        : new TopicPartitionOffset(tp, Offset.Beginning));
                }
                else
                {
                    //if not present in kafka, then we know we are at the end
                    eofsFound.Add(tp);
                    topicsNotInKafka++;
                }
            }

            if (tposForSubscription.Count == 0)
            {
                //empty start
                allEofsFound.SetResult(true);
                return;
            }

            var subscription = Subscriptions.AssignmentWithOffset(tposForSubscription.ToArray());
            var source       = KafkaConsumer.PlainSource(consumerSettings, subscription);

            var writeBatches = new Dictionary <TopicPartition, WriteBatch>();
            eofsFound = new HashSet <TopicPartition>();

            //normally this won't take long. The topicpartition progress is also stored in rocksdb
            //if we need to reread all events then this can take a while
            var cts = new CancellationTokenSource();

            var sourceTask = source
                             .Via(cts.Token.AsFlow <ConsumeResult <string, byte[]> >(true))
                             .RunForeach(msg =>
            {
                if (msg.IsPartitionEOF)
                {
                    //save as this is during startup; no-one can touch this topic partition yet.
                    currentOffsets[msg.TopicPartition] = msg.Offset;

                    eofsFound.Add(msg.TopicPartition);
                    if (writeBatches.TryGetValue(msg.TopicPartition, out var b))
                    {
                        database.Write(b, rocksDbWriteOptions);
                        b.Dispose();
                        writeBatches.Remove(msg.TopicPartition);
                    }

                    if (eofListeners.TryGetValue(msg.TopicPartition, out var listeners))
                    {
                        foreach (var taskCompletionSource in listeners)
                        {
                            taskCompletionSource.SetResult(true);
                        }
                        eofListeners.Remove(msg.TopicPartition);
                    }

                    if (eofsFound.Count != tposForSubscription.Count + topicsNotInKafka)
                    {
                        return;
                    }

                    allEofsFound.SetResult(true);
                    cts.Cancel();
                    cts.Dispose();

                    return;
                }
                if (eofsFound.Contains(msg.TopicPartition))
                {
                    return;
                }

                // presume we are the only one writing to this topic partition (otherwise akka persistence gets messy real quick)
                if (!writeBatches.TryGetValue(msg.TopicPartition, out var writebatch))
                {
                    writeBatches[msg.TopicPartition] = writebatch = new WriteBatch();
                }

                //add event to the writebatch
                var persistent = PersistentFromMessage(msg);
                WriteToRocksDbBatch(persistent, writebatch);

                //store current offset into (using the writebatch)
                var key = TopicPartitionKey(TopicPartitionNumericId(msg.TopicPartition));
                writebatch.Put(KeyToBytes(key), CounterToBytes(msg.Offset));
            }, materializer);

            sourceTask.ContinueWith(x =>
            {
                Console.WriteLine("Source stopped for kafka journal");
            });
        }