Exemplo n.º 1
0
        private static void PublisherWithPartition(string topic, int value)
        {
            using (var producer = new Producer <Null, string>(GetConfig()))
            {
                try
                {
                    var key = value % 5;

                    var partition = new Partition(key);

                    var topicPartition = new TopicPartition(topic, partition);

                    var message = new Message <Null, string> {
                        Value = $"value-{value}"
                    };

                    var dr = producer.ProduceAsync(topicPartition, message).GetAwaiter().GetResult();

                    Console.WriteLine($"Delivered '{dr.Value}' to '{dr.TopicPartitionOffset}'");
                }
                catch (KafkaException e)
                {
                    Console.WriteLine($"Delivery failed: {e.Error.Reason}");
                }
            }
        }
Exemplo n.º 2
0
        public StreamTask(string threadId, TaskId id, TopicPartition partition, ProcessorTopology processorTopology, IConsumer <byte[], byte[]> consumer, IStreamConfig configuration, IKafkaSupplier kafkaSupplier, IProducer <byte[], byte[]> producer)
            : base(id, partition, processorTopology, consumer, configuration)
        {
            this.threadId        = threadId;
            this.kafkaSupplier   = kafkaSupplier;
            this.consumedOffsets = new Dictionary <TopicPartition, long>();

            // eos enabled
            if (producer == null)
            {
                this.producer = CreateEOSProducer();
                InitializeTransaction();
                eosEnabled = true;
            }
            else
            {
                this.producer = producer;
            }

            this.collector = new RecordCollector(logPrefix);
            collector.Init(ref this.producer);

            var sourceTimestampExtractor = (processorTopology.GetSourceProcessor(id.Topic) as ISourceProcessor).Extractor;

            Context   = new ProcessorContext(configuration, stateMgr).UseRecordCollector(collector);
            processor = processorTopology.GetSourceProcessor(partition.Topic);
            queue     = new RecordQueue <ConsumeResult <byte[], byte[]> >(
                100,
                logPrefix,
                $"record-queue-{id.Topic}-{id.Partition}",
                sourceTimestampExtractor == null ? configuration.DefaultTimestampExtractor : sourceTimestampExtractor);
        }
        internal DeliveryReport <byte[], byte[]> Produce(TopicPartition topicPartition, Message <byte[], byte[]> message)
        {
            DeliveryReport <byte[], byte[]> r = new DeliveryReport <byte[], byte[]>();

            r.Status = PersistenceStatus.NotPersisted;
            CreateTopic(topicPartition.Topic);
            if (topics[topicPartition.Topic].PartitionNumber > topicPartition.Partition)
            {
                topics[topicPartition.Topic].AddMessage(message.Key, message.Value, topicPartition.Partition);
                r.Status = PersistenceStatus.Persisted;
            }
            else
            {
                topics[topicPartition.Topic].CreateNewPartitions(topicPartition.Partition);
                topics[topicPartition.Topic].AddMessage(message.Key, message.Value, topicPartition.Partition);
                r.Status = PersistenceStatus.Persisted;
            }
            r.Message   = message;
            r.Partition = topicPartition.Partition;
            r.Topic     = topicPartition.Topic;
            r.Timestamp = new Timestamp(DateTime.Now);
            r.Error     = new Error(ErrorCode.NoError);
            r.Status    = PersistenceStatus.Persisted;
            // TODO r.Offset
            return(r);
        }
Exemplo n.º 4
0
        public KafkaSink(
            string bootstrapServers,
            int batchSizeLimit,
            int period,
            SecurityProtocol securityProtocol,
            SaslMechanism saslMechanism,
            string topic,
            string saslUsername,
            string saslPassword,
            string sslCaLocation) : base(batchSizeLimit, TimeSpan.FromSeconds(period))
        {
            var config = new ProducerConfig()
                         .SetValue("ApiVersionFallbackMs", 0)
                         .SetValue("EnableDeliveryReports", false)
                         .LoadFromEnvironmentVariables()
                         .SetValue("BootstrapServers", bootstrapServers)
                         .SetValue("SecurityProtocol", securityProtocol)
                         .SetValue("SaslMechanism", saslMechanism)
                         .SetValue("SslCaLocation", string.IsNullOrEmpty(sslCaLocation) ? null : Path.Combine(Path.GetDirectoryName(Assembly.GetExecutingAssembly().Location), sslCaLocation))
                         .SetValue("SaslUsername", saslUsername)
                         .SetValue("SaslPassword", saslPassword);

            producer = new ProducerBuilder <Null, byte[]>(config)
                       .Build();

            formatter  = new Formatting.Json.JsonFormatter(renderMessage: true);
            this.topic = new TopicPartition(topic, Partition.Any);
        }
        public void Constuctor()
        {
            var tp = new TopicPartition("mytopic", 42);

            Assert.Equal(tp.Topic, "mytopic");
            Assert.Equal(tp.Partition, 42);
        }
        public async Task Custom_partition_event_handling_Should_work()
        {
            int elementsCount   = 100;
            var topic1          = CreateTopic(1);
            var group1          = CreateGroup(1);
            var topicPartition1 = new TopicPartition(topic1, 0);

            await GivenInitializedTopic(topicPartition1);

            await ProduceStrings(new TopicPartition(topic1, 0), Enumerable.Range(1, elementsCount), ProducerSettings);

            var consumerSettings = CreateConsumerSettings <string>(group1);

            var customHandler = new CustomEventsHandler();

            var(control, probe) = CreateProbe(consumerSettings, Subscriptions.Topics(topic1).WithPartitionEventsHandler(customHandler));

            probe.Request(elementsCount);
            foreach (var i in Enumerable.Range(1, elementsCount).Select(c => c.ToString()))
            {
                probe.ExpectNext(i, TimeSpan.FromSeconds(10));
            }

            var shutdown = control.Shutdown();

            await AwaitConditionAsync(() => shutdown.IsCompleted);

            customHandler.AssignmentEventsCounter.Current.Should().BeGreaterThan(0);
            customHandler.StopEventsCounter.Current.Should().BeGreaterThan(0);
        }
Exemplo n.º 7
0
        /// <summary>
        /// 异步发送消息
        /// </summary>
        /// <param name="kafkaMessage"></param>
        /// <returns></returns>
        public async Task <DeliveryResult> PublishAsync(KafkaMessage kafkaMessage)
        {
            var topic = string.IsNullOrEmpty(kafkaMessage.Topic) ? DefaultTopic : kafkaMessage.Topic;

            if (string.IsNullOrEmpty(topic))
            {
                throw new ArgumentException("topic can not be empty", nameof(kafkaMessage.Topic));
            }
            var key = string.IsNullOrEmpty(kafkaMessage.Key) ? DefaultKey : kafkaMessage.Key;

            if (string.IsNullOrEmpty(key))
            {
                throw new ArgumentException("key can not be empty", nameof(kafkaMessage.Key));
            }

            var producer = RentProducer();
            DeliveryResult <string, object> deliveryResult;

            if (kafkaMessage.Partition == null)
            {
                deliveryResult = await producer.ProduceAsync(topic, new Message <string, object>() { Key = key, Value = kafkaMessage.Message });
            }
            else
            {
                var topicPartition = new TopicPartition(topic, new Partition(kafkaMessage.Partition.Value));
                deliveryResult = await producer.ProduceAsync(topicPartition, new Message <string, object>() { Key = key, Value = kafkaMessage.Message });
            }

            producer.Flush(new TimeSpan(0, 0, 0, 0, FlushTimeOut));

            ReturnProducer(producer);

            return(new DeliveryResult(deliveryResult));
        }
        public void WithNullMaterialize()
        {
            // CERTIFIED THAT SAME IF Materialize is null, a state store exist for count processor with a generated namestore
            var config = new StreamConfig <StringSerDes, StringSerDes>();
            var serdes = new StringSerDes();

            config.ApplicationId = "test-count";

            var builder = new StreamBuilder();
            Materialized <string, long, IKeyValueStore <Bytes, byte[]> > m = null;

            builder
            .Table <string, string>("topic")
            .GroupBy((k, v) => KeyValuePair.Create(k.ToUpper(), v))
            .Count(m);

            var    topology = builder.Build();
            TaskId id       = new TaskId {
                Id = 0, Partition = 0
            };
            var processorTopology = topology.Builder.BuildTopology(id);

            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());
            var consumer = supplier.GetConsumer(config.ToConsumerConfig(), null);


            var        part = new TopicPartition("topic", 0);
            StreamTask task = new StreamTask(
                "thread-0",
                id,
                new List <TopicPartition> {
                part
            },
                processorTopology,
                consumer,
                config,
                supplier,
                null);

            task.GroupMetadata = consumer as SyncConsumer;
            task.InitializeStateStores();
            task.InitializeTopology();

            Assert.AreEqual(2, task.Context.States.StateStoreNames.Count());
            var nameStore1 = task.Context.States.StateStoreNames.ElementAt(0);
            var nameStore2 = task.Context.States.StateStoreNames.ElementAt(1);

            Assert.IsNotNull(nameStore1);
            Assert.IsNotNull(nameStore2);
            Assert.AreNotEqual(string.Empty, nameStore1);
            Assert.AreNotEqual(string.Empty, nameStore2);
            var store1 = task.GetStore(nameStore1);
            var store2 = task.GetStore(nameStore2);

            Assert.IsInstanceOf <TimestampedKeyValueStore <string, string> >(store1);
            Assert.IsInstanceOf <TimestampedKeyValueStore <string, long> >(store2);
            Assert.AreEqual(0, (store1 as TimestampedKeyValueStore <string, string>).ApproximateNumEntries());
            Assert.AreEqual(0, (store2 as TimestampedKeyValueStore <string, long>).ApproximateNumEntries());
        }
Exemplo n.º 9
0
        public void AddRecords(TopicPartition partition, IEnumerable <ConsumeResult <byte[], byte[]> > records)
        {
            foreach (var r in records)
            {
                queue.AddRecord(r);
            }

            // TODO : NO PAUSE FOR MOMENT
            //if (queue.MaxSize <= queue.Size)
            //    consumer.Pause(new List<TopicPartition> { partition });

            int newQueueSize = queue.Size;

            if (log.IsDebugEnabled)
            {
                log.Debug($"{logPrefix}Added records into the buffered queue of partition {partition}, new queue size is {newQueueSize}");
            }

            //// if after adding these records, its partition queue's buffered size has been
            //// increased beyond the threshold, we can then pause the consumption for this partition
            //if (newQueueSize > maxBufferedSize)
            //{
            //    consumer.pause(singleton(partition));
            //}
        }
        public void Begin()
        {
            config = new StreamConfig();
            config.ApplicationId = $"unit-test-rocksdb-w";
            config.UseRandomRocksDbConfigForTest();

            id = new TaskId {
                Id = 0, Partition = 0
            };
            partition    = new TopicPartition("source", 0);
            stateManager = new ProcessorStateManager(
                id,
                new List <TopicPartition> {
                partition
            },
                null,
                new MockChangelogRegister(),
                new MockOffsetCheckpointManager());

            task = new Mock <AbstractTask>();
            task.Setup(k => k.Id).Returns(id);

            context = new ProcessorContext(task.Object, config, stateManager, new StreamMetricsRegistry());

            store = new RocksDbWindowStore(
                new RocksDbSegmentedBytesStore("test-w-store", (long)defaultRetention.TotalMilliseconds, 5000, new RocksDbWindowKeySchema()),
                (long)defaultSize.TotalMilliseconds);

            store.Init(context, store);
        }
Exemplo n.º 11
0
        public void Begin()
        {
            config = new StreamConfig();
            config.ApplicationId = $"unit-test-rocksdb-kv";
            config.UseRandomRocksDbConfigForTest();

            id = new TaskId {
                Id = 0, Partition = 0
            };
            partition    = new TopicPartition("source", 0);
            stateManager = new ProcessorStateManager(
                id,
                new List <TopicPartition> {
                partition
            },
                null,
                new MockChangelogRegister(),
                new MockOffsetCheckpointManager());

            task = new Mock <AbstractTask>();
            task.Setup(k => k.Id).Returns(id);

            context = new ProcessorContext(task.Object, config, stateManager, new StreamMetricsRegistry());

            store = new RocksDbKeyValueStore("test-store");
            store.Init(context, store);
        }
        public KafkaConsumerProcessorTest()
        {
            _topicPartition = new TopicPartition("topic-a", 0);

            var consumerSettings = new ConsumerSettings
            {
                MessageType  = typeof(SomeMessage),
                Topic        = _topicPartition.Topic,
                ConsumerType = typeof(SomeMessageConsumer),
                ConsumerMode = ConsumerMode.Consumer
            };

            consumerSettings.SetGroup("group-a");

            var massageBusMock = new MessageBusMock();

            massageBusMock.BusSettings.Consumers.Add(consumerSettings);
            massageBusMock.DependencyResolverMock.Setup(x => x.Resolve(typeof(SomeMessageConsumer))).Returns(_consumer);

            byte[] MessageValueProvider(Message m) => m.Value;

            var consumerInstancePoolMock = new Mock <ConsumerInstancePoolMessageProcessor <Message> >(consumerSettings, massageBusMock.Bus, (Func <Message, byte[]>)MessageValueProvider, null);

            _messageQueueWorkerMock = new Mock <MessageQueueWorker <Message> >(consumerInstancePoolMock.Object, _checkpointTrigger.Object);
            _subject = new KafkaConsumerProcessor(consumerSettings, _topicPartition, _commitControllerMock.Object, massageBusMock.Bus, _messageQueueWorkerMock.Object);
        }
Exemplo n.º 13
0
 private void AddToOffsetDict(TopicPartition topicPartition, TopicPartitionOffset TopicPartitionOffset)
 {
     _currentOffsets.AddOrUpdate(topicPartition, TopicPartitionOffset, (key, oldValue) =>
     {
         return(TopicPartitionOffset.Offset > oldValue.Offset ? TopicPartitionOffset : oldValue);
     });
 }
Exemplo n.º 14
0
        public async Task <ProduceResult> Publish(MessageValue mv)
        {
            try
            {
                using (var p = new ProducerBuilder <Null, string>(_config).Build())
                {
                    Console.WriteLine($"Produce message {mv.Value}");
                    var            random = new Random();
                    TopicPartition tp     = new TopicPartition(this._topicName, new Partition(random.Next(0, 5)));
                    var            dr     = await p.ProduceAsync(tp, new Message <Null, string>
                    {
                        Value = mv.Value
                    });

                    return(new ProduceResult()
                    {
                        TopicPartitionOffset = dr.TopicPartitionOffset.Offset.Value,
                        TopicPartition = dr.TopicPartition.Partition.Value
                    });
                }
            }
            catch (System.Exception e)
            {
                Console.Write($"Error: {e.Message}");
            }

            return(new ProduceResult());
        }
Exemplo n.º 15
0
        public async Task <string> SendMessage(string topic, string message, bool display, int key)
        {
            var msg = new Message <string, string>
            {
                Key   = key.ToString(),
                Value = message
            };

            DeliveryResult <string, string> delRep;

            if (key > 1)
            {
                var p  = new Partition(key);
                var tp = new TopicPartition(topic, p);
                delRep = await producer.ProduceAsync(tp, msg);
            }
            else
            {
                delRep = await producer.ProduceAsync(topic, msg);
            }

            var topicOffset = delRep.TopicPartitionOffset;

            if (display)
            {
                Console.WriteLine($"Delivered '{delRep.Value}' to: {topicOffset}");
            }

            return(message);
        }
        public void NoteProcessed(TopicPartition topicPartition, long offset)
        {
            var logContext = $"OffsetTracker.NoteProcessed for topicPartition='{topicPartition}', offset={offset}";

            _logger.LogDebug($"{logContext}");
            Fetch(topicPartition).NoteProcessed(offset);
        }
        /// <summary>
        /// Смещает offset в конец раздела, если разница между текущим и максимальным offset-ом больше 10.
        /// Это происходит если продьсер пишет в топик, а консьюмер еще не читает.
        /// </summary>
        private void SetHightOffset()
        {
            foreach (var topic in _topics)
            {
                try
                {
                    var blockTime      = TimeSpan.FromSeconds(1);
                    var topicPartition = new TopicPartition(topic, 0);
                    var currentOffset  = _consumer.Position(new List <TopicPartition> {
                        topicPartition
                    }).FirstOrDefault()?.Offset.Value;
                    var hightOffset = _consumer.QueryWatermarkOffsets(topicPartition, blockTime).High;

                    if ((currentOffset == -1001) || (hightOffset - currentOffset) > 10)
                    {
                        var topicPartitionOffsets = new List <TopicPartitionOffset> {
                            new TopicPartitionOffset(topic, 0, hightOffset)
                        };
                        _consumer.CommitAsync(topicPartitionOffsets).Wait(blockTime);
                    }
                }
                catch (KafkaException ex)
                {
                    //LOG
                    Console.WriteLine(ex);
                }
            }
        }
Exemplo n.º 18
0
 public KafkaMessage(string topic, int partition, string key, string message)
 {
     TopicPartition = new TopicPartition(topic, partition);
     Message        = new Message <string, string> {
         Key = key, Value = message
     };
 }
Exemplo n.º 19
0
        public async Task PlainSource_should_resume_stage_if_broker_unavailable()
        {
            var topic1          = CreateTopic(1);
            var group1          = CreateGroup(1);
            var topicPartition1 = new TopicPartition(topic1, 0);

            await GivenInitializedTopic(topicPartition1);

            var config = ConsumerSettings <Null, string> .Create(Sys, null, null)
                         .WithBootstrapServers("localhost:10092")
                         .WithGroupId(group1);

            var regex    = new Regex("\\[localhost:10092\\/bootstrap: Connect to [a-zA-Z0-9#:.*]* failed:");
            var logProbe = CreateTestProbe();

            Sys.EventStream.Subscribe <Info>(logProbe.Ref);

            var(control, probe) = CreateProbe(config, Subscriptions.Assignment(topicPartition1));
            probe.Request(1);

            AwaitAssert(() =>
            {
                var info = logProbe.ExpectMsg <Info>();
                regex.IsMatch(info.Message.ToString() ?? "").Should().BeTrue();
                info.Message.ToString().Should().Contain("[Resume]");
            });
            //AwaitCondition(() => control.IsShutdown.IsCompleted, TimeSpan.FromSeconds(10));
        }
        public void ToStringTest()
        {
            var tp = new TopicPartition("mytopic", 42);

            Assert.True(tp.ToString().Contains(tp.Topic));
            Assert.True(tp.ToString().Contains(tp.Partition.ToString()));
        }
Exemplo n.º 21
0
        public static WatermarkOffsets BuscarOffsetsDisponiveis(this ConsumidorAbstrato ob, string topico, string consumerGroup)
        {
            ob.ConsumerConfig.GroupId = consumerGroup;

            using (var consumer = new ConsumerBuilder <string, string>(ob.ConsumerConfig).Build())
            {
                consumer.Subscribe(topico);

                var assignment = consumer.Assignment;

                while (assignment.Count == 0)
                {
                    Thread.Sleep(50); //ha um bug no driver para .net, as vezes é preciso aguardar a informação chegar
                    assignment = consumer.Assignment;
                }

                var topicPartition = new TopicPartition(assignment[0].Topic, assignment[0].Partition);

                var wmo = consumer.GetWatermarkOffsets(topicPartition);
                while (wmo.High.Value == -1001) //ha um bug no driver para .net, as vezes é preciso aguardar a informação chegar
                {
                    wmo = consumer.GetWatermarkOffsets(topicPartition);
                    Thread.Sleep(50);
                }

                return(wmo);
            }
        }
        public async Task PlainSource_consumes_messages_from_KafkaProducer_with_subscribe_to_topic()
        {
            int elementsCount   = 100;
            var topic1          = CreateTopic(1);
            var group1          = CreateGroup(1);
            var topicPartition1 = new TopicPartition(topic1, 0);

            await GivenInitializedTopic(topicPartition1);

            await ProduceStrings(new TopicPartition(topic1, 0), Enumerable.Range(1, elementsCount), ProducerSettings);

            var consumerSettings = CreateConsumerSettings <string>(group1);

            var(control, probe) = CreateProbe(consumerSettings, Subscriptions.Topics(topic1));

            probe.Request(elementsCount);
            foreach (var i in Enumerable.Range(1, elementsCount).Select(c => c.ToString()))
            {
                probe.ExpectNext(i, TimeSpan.FromSeconds(10));
            }

            var shutdown = control.Shutdown();

            AwaitCondition(() => shutdown.IsCompleted);
        }
Exemplo n.º 23
0
        /**
         * This is the primary function of <TwitchCommandConsumer> and is to be run as a thread
         */
        public void TwiConThread()
        {
            Console.WriteLine("DiscordMessageConsumerThread Start");
            Active = true;

            //objs: Kafka Consumer client
            //
            //consumer - the Kafka consumer client
            //topicp - the topic partition from <consumer> to consume from
            var consumer = new ConsumerBuilder <string, string>(config).Build();
            var topicp   = new TopicPartition(topic, 0);

            consumer.Assign(topicp);
            while (Active)
            {
                try
                {
                    //var: consumerresult
                    //The result of checking for the next new message on the Kafka server
                    var consumerresult = consumer.Consume(canceltoken);

                    if (!consumerresult.IsPartitionEOF)
                    {
                        messageQueue.Enqueue(consumerresult.Key + " " + consumerresult.Value);
                    }
                    else
                    {
                        Thread.Sleep(100);
                    }
                }
                catch (System.OperationCanceledException e)
                {
                }
            }
        }
Exemplo n.º 24
0
        internal WatermarkOffsets GetWatermarkOffsets(TopicPartition topicPartition)
        {
            var topic = topics[topicPartition.Topic];
            var p     = topic.GetPartition(topicPartition.Partition);

            return(new WatermarkOffsets(new Offset(p.LowOffset), new Offset(p.HighOffset)));
        }
Exemplo n.º 25
0
        public TaskSynchronousTopologyDriver(string clientId, InternalTopologyBuilder topologyBuilder, IStreamConfig configuration, IStreamConfig topicConfiguration, IKafkaSupplier supplier, CancellationToken token)
        {
            this.configuration          = configuration;
            this.configuration.ClientId = clientId;
            this.topicConfiguration     = topicConfiguration;

            this.token    = token;
            builder       = topologyBuilder;
            this.supplier = supplier ?? new SyncKafkaSupplier();
            producer      = this.supplier.GetProducer(configuration.ToProducerConfig()) as SyncProducer;

            foreach (var sourceTopic in builder.GetSourceTopics().Union(builder.GetGlobalTopics()))
            {
                var part   = new TopicPartition(sourceTopic, 0);
                var taskId = builder.GetTaskIdFromPartition(part);
                if (partitionsByTaskId.ContainsKey(taskId))
                {
                    partitionsByTaskId[taskId].Add(part);
                }
                else
                {
                    partitionsByTaskId.Add(taskId, new List <TopicPartition> {
                        part
                    });
                }
            }
        }
Exemplo n.º 26
0
        /// <summary>
        /// Handles the subscription of a new TopicPartition to this fetcher.
        /// Keeps track of the subscribed partitions in order to not fetch messages if the FlowControlState is Closed.
        /// </summary>
        /// <param name="topicPartition"></param>
        /// <returns></returns>
        public IDisposable Subscribe(TopicPartition topicPartition)
        {
            _topicPartitions.Add(topicPartition);
            EtwTrace.Log.FetcherPartitionSubscribed(_id, topicPartition.PartitionId);

            // cleanup
            var topicPartitionCleanup = Disposable.Create(() => _topicPartitions.Remove(topicPartition));
            var receivedMessagesSubscriptionCleanup = ReceivedMessages.Where(rm => rm.Topic == topicPartition.Topic && rm.Partition == topicPartition.PartitionId)
                    .Subscribe(topicPartition);
            var flowControlCleanup = topicPartition.FlowControl.
                // we need to wake up from waiting loop any time flow control hits low watermark and becomes enabled
                Where(enabled => enabled).
                Subscribe(_ => _wakeupSignal.OnNext(true));
            
            var cleanup = new CompositeDisposable
            {
                topicPartitionCleanup,
                receivedMessagesSubscriptionCleanup,
                flowControlCleanup
            };

            if (_log.IsDebugEnabled)
            {
                cleanup.Add(Disposable.Create(() => _log.Debug("Fetcher #{0} {1} topicPartition is unsubscribing", _id, topicPartition)));
            }

            _log.Debug("Fetcher #{0} added {1}", _id, topicPartition);

            return cleanup;
        }
        public KafkaTopicScalerTest()
        {
            consumer = new Mock <IConsumer <string, byte[]> >();

            partition0 = new TopicPartition(TopicName, new Partition(0));
            partition1 = new TopicPartition(TopicName, new Partition(1));
            partition2 = new TopicPartition(TopicName, new Partition(2));
            partition3 = new TopicPartition(TopicName, new Partition(3));

            partitions = new List <TopicPartition>
            {
                partition0,
                partition1,
                partition2,
                partition3
            };

            topicScaler = new KafkaTopicScalerForTest <string, byte[]>(
                TopicName,
                "consumer-group-test",
                "testfunction",
                consumer.Object, new AdminClientConfig(),
                NullLogger.Instance);

            topicScaler.WithPartitions(partitions);
        }
Exemplo n.º 28
0
        static void Pause()
        {
            using (var consumer = new Consumer <Ignore, string>(GetConfig()))
            {
                try
                {
                    var assignment = consumer.Assignment;

                    var partition = new Partition(1);

                    var topicPartition = new TopicPartition(TOPIC, partition);

                    consumer.Pause(new List <TopicPartition> {
                        topicPartition
                    });

                    //consumer.Position(new List<TopicPartition> { topicPartition });

                    //var pos = consumer.Position(new List<TopicPartition> { topicPartition }).First();

                    //consumer.Seek(new TopicPartitionOffset(topicPartition, new Offset(50)));
                }
                catch (Exception ex)
                {
                    throw;
                }

                consumer.Close();
            }
        }
        public async Task CommitableSource_consumes_messages_from_Producer_without_commits()
        {
            int elementsCount   = 100;
            var topic1          = CreateTopic(1);
            var group1          = CreateGroup(1);
            var topicPartition1 = new TopicPartition(topic1, 0);

            await GivenInitializedTopic(topicPartition1);

            await Source
            .From(Enumerable.Range(1, elementsCount))
            .Select(elem => new ProducerRecord <Null, string>(topicPartition1, elem.ToString()))
            .RunWith(KafkaProducer.PlainSink(ProducerSettings), Materializer);

            var consumerSettings = CreateConsumerSettings <string>(group1);

            var probe = KafkaConsumer
                        .CommittableSource(consumerSettings, Subscriptions.Assignment(topicPartition1))
                        .Where(c => !c.Record.Value.Equals(InitialMsg))
                        .Select(c => c.Record.Value)
                        .RunWith(this.SinkProbe <string>(), Materializer);

            probe.Request(elementsCount);
            foreach (var i in Enumerable.Range(1, elementsCount).Select(c => c.ToString()))
            {
                probe.ExpectNext(i, TimeSpan.FromSeconds(10));
            }

            probe.Cancel();
        }
        public async Task Consumir(string topico, Offset offset)
        {
            ConsumerConfig.GroupId          = ConsumerGroup;
            ConsumerConfig.EnableAutoCommit = false;

            using (var consumer = new ConsumerBuilder <string, string>(ConsumerConfig).Build())
            {
                consumer.Subscribe(topico);

                var assignment = consumer.Assignment;

                while (assignment.Count == 0)
                {
                    Thread.Sleep(50);
                    assignment = consumer.Assignment;
                }

                var topicPartition = new TopicPartition(assignment[0].Topic, assignment[0].Partition);
                consumer.Assign(topicPartition);

                await this.SeekAsync(consumer, assignment[0].Topic, assignment[0].Partition, offset);

                var cr = consumer.Consume();
                Console.WriteLine($"Consumido registro via offset {cr.Offset} da partição {cr.Partition} com chave '{cr.Message.Key}' e valor '{cr.Message.Value}'");
            }
        }
        private ConsumeResult <byte[], byte[]> ConsumeInternal(TimeSpan timeout)
        {
            DateTime dt = DateTime.Now;
            ConsumeResult <byte[], byte[]> result = null;

            foreach (var kp in offsets)
            {
                if (timeout != TimeSpan.Zero && (dt + timeout) < DateTime.Now)
                {
                    break;
                }

                var tp = new TopicPartition(kp.Key, 0);
                if (producer != null &&
                    ((partitionsState.ContainsKey(tp) && !partitionsState[tp]) ||
                     !partitionsState.ContainsKey(tp)))
                {
                    var messages = producer.GetHistory(kp.Key).ToArray();
                    if (messages.Length > kp.Value.OffsetConsumed)
                    {
                        result = new ConsumeResult <byte[], byte[]>
                        {
                            Offset    = kp.Value.OffsetConsumed,
                            Topic     = kp.Key,
                            Partition = 0,
                            Message   = messages[kp.Value.OffsetConsumed]
                        };
                        ++kp.Value.OffsetConsumed;
                        return(result);
                    }
                }
            }
            return(result);
        }
Exemplo n.º 32
0
 internal void OnTopicPartitionComplete(TopicPartition topicPartition)
 {
     // called back from TopicPartition when its subscription is completed. 
     // Remove from our dictionary, and if all TopicPartitions are removed, call our own OnComplete.
     _topicPartitions.Remove(topicPartition.PartitionId);
     if (_topicPartitions.Count == 0)
     {
         // If we call OnCompleted right now, any last message that may be being sent will not process. Just tell the scheduler to do it in a moment.
         _cluster.Scheduler.Schedule(() => OnMessageArrivedInput.OnCompleted());
     }
 }
Exemplo n.º 33
0
        /// <summary>For every patition, resolve offsets and build TopicPartition object</summary>
        private async Task<IEnumerable<TopicPartition>> BuildTopicPartitionsAsync()
        {
            // if they didn't specify explicit locations, initialize them here.
            var startPositionProvider = Configuration.StartPosition;
            if (startPositionProvider.StartLocation != ConsumerLocation.SpecifiedLocations)
            {
                // no offsets provided. Need to issue an offset request to get start/end locations and use them for consuming
                var partitions = await _cluster.FetchPartitionOffsetsAsync(Topic, startPositionProvider.StartLocation);

                if (_log.IsDebugEnabled)
                    _log.Debug("Consumer for topic {0} got time->offset resolved for location {1}. parts: [{2}]",
                        Topic, startPositionProvider,
                        string.Join(",", partitions.Partitions.OrderBy(p => p).Select(p => string.Format("{0}:{1}", p, partitions.NextOffset(p)))));

                IStartPositionProvider origProvider = startPositionProvider;
                // the new explicit offsets provider should use only the partitions included in the original one.
                startPositionProvider = new TopicPartitionOffsets(partitions.Topic, partitions.GetPartitionsOffset.Where(kv=> origProvider.ShouldConsumePartition(kv.Key)));
            }

            // we now have specified locations to start from, just get the partition metadata, and build the TopicPartitions
            var partitionMeta = await _cluster.GetOrFetchMetaForTopicAsync(Topic);
            return partitionMeta
                // only new partitions we don't already have in our dictionary
                .Where(pm => !_topicPartitions.ContainsKey(pm.Id))
                // only partitions we are "told" to.
                .Where(pm => startPositionProvider.ShouldConsumePartition(pm.Id))
                .Select(part =>
                {
                    var tp = new TopicPartition(_cluster, Topic, part.Id, startPositionProvider.GetStartOffset(part.Id));
                    _topicPartitions.Add(tp.PartitionId, tp);
                    return tp;
                });
        }