예제 #1
0
        public void TestNoBroker()
        {
            var config = new ProducerConfig();

            config.Brokers = TestUtils.GetBrokerListFromConfigs(this.props);

            // create topic metadata with 0 partitions
            var topic1Metadata      = new TopicMetadata("topic1", new List <PartitionMetadata>());
            var topicPartitionInfos = new Dictionary <string, TopicMetadata> {
                { "topic1", topic1Metadata }
            };

            var producerPool = new ProducerPool(config);

            var producerDataList = new List <KeyedMessage <string, string> >();

            producerDataList.Add(new KeyedMessage <string, string>("topic1", "msg1"));

            var handler = new DefaultEventHandler <string, string>(
                config, null, new StringEncoder(), new StringEncoder(), producerPool, topicPartitionInfos);

            try
            {
                handler.Handle(producerDataList);
                Assert.True(false, "Should fail with FailedToSendMessageException");
            }
            catch (FailedToSendMessageException)
            {
                // expted
            }
        }
        private static void TestBug1490652ReadData(TestHelperOptions testOptions)
        {
            KafkaSimpleManagerConfiguration config = new KafkaSimpleManagerConfiguration()
            {
                FetchSize    = KafkaSimpleManagerConfiguration.DefaultFetchSize,
                BufferSize   = KafkaSimpleManagerConfiguration.DefaultBufferSize,
                MaxWaitTime  = 0,
                MinWaitBytes = 0,
                Zookeeper    = testOptions.Zookeeper
            };

            config.Verify();

            using (KafkaSimpleManager <int, Message> kafkaSimpleManager = new KafkaSimpleManager <int, Message>(config))
            {
                TopicMetadata topicMetadata = kafkaSimpleManager.RefreshMetadata(0, "ClientID", 0, testOptions.Topic, true);
                PartitionCount = topicMetadata.PartitionsMetadata.Count();
                for (int i = 0; i < PartitionCount; i++)
                {
                    #region Get real offset and adjust
                    long earliest   = 0;
                    long latest     = 0;
                    long offsetBase = 0;
                    OffsetHelper.GetAdjustedOffset <int, Message>(testOptions.Topic, kafkaSimpleManager, i, KafkaOffsetType.Earliest, 0,
                                                                  0, out earliest, out latest, out offsetBase);
                    #endregion

                    TestBug1490652DataRead.Add(i, ConsumeDataOfOnePartitionTotally <int, Message>(testOptions.Topic, kafkaSimpleManager, i, KafkaOffsetType.Earliest,
                                                                                                  0, 0, latest, 0, 100, -1, "DumpLog.log"));
                }
            }
        }
예제 #3
0
        public Metadata GetMetadata(string topic, TimeSpan timeout)
        {
            var error = new Error(ErrorCode.NoError);

            var brokersMetadata = new List <BrokerMetadata> {
                new BrokerMetadata(1, "localhost", 9092)
            };

            var partitionsMetadata = new List <PartitionMetadata>
            {
                new PartitionMetadata(1, 1, new int[1] {
                    1
                }, new int[1] {
                    1
                }, error)
            };

            var topicMetadata = new TopicMetadata(topic, partitionsMetadata, error);

            return(new Metadata(brokersMetadata,
                                new List <TopicMetadata>()
            {
                topicMetadata
            },
                                1, "localhost"));
        }
        public IDictionary <int, Broker> GetBrokerPartitionLeaders(string topic)
        {
            TopicMetadata metadata = this.topicPartitionInfo[topic];

            if (metadata.Error != ErrorMapping.NoError)
            {
                throw new KafkaException(string.Format("The metadata status for topic {0} is abnormal, detail: ", topic), metadata.Error);
            }

            Dictionary <int, Broker> partitionLeaders = new Dictionary <int, Broker>();

            foreach (var p in metadata.PartitionsMetadata)
            {
                if (p.Leader != null && !partitionLeaders.ContainsKey(p.PartitionId))
                {
                    partitionLeaders.Add(p.PartitionId, p.Leader);
                    Logger.DebugFormat("Topic {0} partition {1} has leader {2}", topic,
                                       p.PartitionId, p.Leader.Id);
                }

                if (p.Leader == null)
                {
                    Logger.DebugFormat("Topic {0} partition {1} does not have a leader yet", topic,
                                       p.PartitionId);
                }
            }
            return(partitionLeaders);
        }
        /// <summary>
        /// MANIFOLD use .  get one consumer from the pool.
        /// </summary>
        public Consumer GetConsumerFromPool(short versionId, string clientId, int correlationId
                                            , string topic, ConsumerConfiguration cosumerConfigTemplate, int partitionId)
        {
            if (!this.TopicPartitionsLeaderConsumers.ContainsKey(topic))
            {
                TopicMetadata topicMetadata = RefreshMetadata(versionId, clientId, correlationId, topic, false);
            }

            ConcurrentDictionary <int, Consumer> consumers = GetConsumerPoolForTopic(topic);

            if (!consumers.ContainsKey(partitionId))
            {
                lock (GetConsumeLockOfTopicPartition(topic, partitionId))
                {
                    if (!consumers.ContainsKey(partitionId))
                    {
                        ConsumerConfiguration config = new ConsumerConfiguration(cosumerConfigTemplate, GetLeaderBrokerOfPartition(topic, partitionId));
                        Consumer consumer            = new Consumer(config);
                        if (consumers.TryAdd(partitionId, consumer))
                        {
                            Logger.InfoFormat("Create one consumer for client {0} topic {1} partitoin {2} addOneConsumer return value:{3} ", clientId, topic, partitionId, true);
                        }
                        else
                        {
                            Logger.WarnFormat("Create one consumer for client {0} topic {1} partitoin {2} addOneConsumer return value:{3} ", clientId, topic, partitionId, false);
                        }
                    }
                }
            }

            return(consumers[partitionId]);
        }
예제 #6
0
        public Producer <TKey, TData> RefreshMetadataAndRecreateProducerOfOnePartition(short versionId,
                                                                                       string clientId,
                                                                                       int correlationId,
                                                                                       string topic,
                                                                                       int partitionId,
                                                                                       bool forceRefreshMetadata,
                                                                                       bool forceRecreateEvenHostPortSame,
                                                                                       ProducerConfiguration producerConfigTemplate,
                                                                                       bool randomReturnIfProducerOfTargetPartionNotExists)
        {
            Logger.InfoFormat(
                "RefreshMetadataAndRecreateProducerWithPartition ==  enter:  Topic:{0}  partitionId:{1} forceRefreshMetadata:{2}  forceRecreateEvenHostPortSame:{3} randomReturnIfProducerOfTargetPartionNotExists:{4} ",
                topic, partitionId, forceRefreshMetadata, forceRecreateEvenHostPortSame,
                randomReturnIfProducerOfTargetPartionNotExists);

            TopicMetadata topicMetadata = null;

            if (forceRefreshMetadata)
            {
                topicMetadata = RefreshMetadata(versionId, clientId, correlationId, topic, forceRefreshMetadata);
            }

            if (!TopicMetadataPartitionsLeaders[topic].ContainsKey(partitionId))
            {
                throw new NoLeaderForPartitionException(
                          string.Format("No leader for topic {0} parition {1} ", topic, partitionId));
            }

            var value = TopicMetadataPartitionsLeaders[topic][partitionId];

            CreateProducerOfOnePartition(topic, partitionId, value.Item2, producerConfigTemplate,
                                         forceRecreateEvenHostPortSame);

            return(GetProducerOfPartition(topic, partitionId, randomReturnIfProducerOfTargetPartionNotExists));
        }
예제 #7
0
        public void Constuctor()
        {
            var partitions = new List <PartitionMetadata>();
            var tm         = new TopicMetadata("mytopic", partitions, ErrorCode._ALL_BROKERS_DOWN);

            Assert.Equal(tm.Topic, "mytopic");
            Assert.Same(partitions, tm.Partitions);
            Assert.Equal(tm.Error, new Error(ErrorCode._ALL_BROKERS_DOWN));
        }
예제 #8
0
        public void Constuctor()
        {
            var partitions = new List <PartitionMetadata>();
            var tm         = new TopicMetadata("mytopic", partitions, ErrorCode.Local_AllBrokersDown);

            Assert.Equal("mytopic", tm.Topic);
            Assert.Same(partitions, tm.Partitions);
            Assert.Equal(new Error(ErrorCode.Local_AllBrokersDown), tm.Error);
        }
        private static void TestBug1490652SendData(TestHelperOptions testOptions)
        {
            int           correlationID = 0;
            Random        rand          = new Random();
            StringBuilder sb            = new StringBuilder();

            try
            {
                KafkaSimpleManagerConfiguration config = new KafkaSimpleManagerConfiguration()
                {
                    Zookeeper      = testOptions.Zookeeper,
                    MaxMessageSize = SyncProducerConfiguration.DefaultMaxMessageSize
                };
                config.Verify();
                using (KafkaSimpleManager <int, Kafka.Client.Messages.Message> kafkaSimpleManager = new KafkaSimpleManager <int, Kafka.Client.Messages.Message>(config))
                {
                    TopicMetadata topicMetadata = kafkaSimpleManager.RefreshMetadata(0, "ClientID", correlationID++, testOptions.Topic, true);
                    PartitionCount = topicMetadata.PartitionsMetadata.Count();
                    List <ProducerData <int, Message> > listOfDataNeedSendInOneBatch = new List <ProducerData <int, Message> >();
                    for (int i = 0; i < PartitionCount; i++)
                    {
                        TestBug1490652DataSent.Add(i, new Dictionary <int, string>());
                        for (int j = 0; j < TestBug1490652MessageCountPerPartition; j++)
                        {
                            string val  = KafkaClientHelperUtils.GetRandomString(testOptions.MessageSize);
                            byte[] bVal = System.Text.Encoding.UTF8.GetBytes(val);
                            //Set the key to partitionID, so it can directly fall into  that partition.
                            Message message = new Message(bVal, CompressionCodecs.DefaultCompressionCodec);
                            listOfDataNeedSendInOneBatch.Add(new ProducerData <int, Message>(testOptions.Topic, i, message));
                            TestBug1490652DataSent[i].Add(j, val);
                        }
                    }

                    ProducerConfiguration producerConfig = new ProducerConfiguration(new List <BrokerConfiguration>()
                    {
                    })
                    {
                        PartitionerClass = ProducerConfiguration.DefaultPartitioner,
                        RequiredAcks     = 1,
                        BufferSize       = config.BufferSize,
                        ZooKeeper        = config.ZookeeperConfig,
                        MaxMessageSize   = Math.Max(config.MaxMessageSize, Math.Max(SyncProducerConfiguration.DefaultMaxMessageSize, testOptions.MessageSize))
                    };
                    producerConfig.SyncProducerOfOneBroker = 1;
                    Producer <int, Kafka.Client.Messages.Message> producer = new Producer <int, Kafka.Client.Messages.Message>(producerConfig);
                    producer.Send(listOfDataNeedSendInOneBatch);
                }
            }
            catch (Exception ex)
            {
                Logger.ErrorFormat("Produce data Got exception:{0}\r\ninput parameter: {1}\r\n"
                                   , ex.FormatException(), testOptions.ToString());
            }
        }
        public int InitializeProducerPoolForTopic(short versionId, string clientId, int correlationId, string topic, bool forceRefreshMetadata, ProducerConfiguration producerConfigTemplate, bool forceRecreateEvenHostPortSame)
        {
            Logger.InfoFormat("InitializeProducerPoolForTopic ==  enter:  Topic:{0} forceRefreshMetadata:{1}  forceRecreateEvenHostPortSame:{2} ", topic, forceRefreshMetadata, forceRecreateEvenHostPortSame);
            TopicMetadata topicMetadata = null;

            if (forceRefreshMetadata)
            {
                topicMetadata = RefreshMetadata(versionId, clientId, correlationId, topic, forceRefreshMetadata);
            }

            Dictionary <int, Tuple <Broker, BrokerConfiguration> > partitionLeaders = this.TopicMetadataPartitionsLeaders[topic];

            //TODO: but some times the partition maybe has no leader

            //For use partitioner calss.  Totally only create one producer.
            if (string.IsNullOrEmpty(this.Config.PartitionerClass))
            {
                foreach (KeyValuePair <int, Tuple <Broker, BrokerConfiguration> > kv in partitionLeaders)
                {
                    CreateProducerOfOnePartition(topic, kv.Key, kv.Value.Item2, producerConfigTemplate, forceRecreateEvenHostPortSame);
                }
                Logger.InfoFormat("InitializeProducerPoolForTopic ==  exit:  Topic:{0} forceRefreshMetadata:{1}  forceRecreateEvenHostPortSame:{2} this.TopicPartitionsLeaderProducers[topic].Count:{3}", topic, forceRefreshMetadata, forceRecreateEvenHostPortSame, this.TopicPartitionsLeaderProducers[topic].Count);
                return(this.TopicPartitionsLeaderProducers[topic].Count);
            }
            else
            {
                ProducerConfiguration producerConfig = new ProducerConfiguration(producerConfigTemplate);
                producerConfig.ZooKeeper        = this.Config.ZookeeperConfig;
                producerConfig.PartitionerClass = this.Config.PartitionerClass;

                Producer <TKey, TData> oldProducer = null;
                if (TopicProducersWithPartitionerClass.TryGetValue(topic, out oldProducer))
                {
                    bool removeOldProducer = false;
                    if (oldProducer != null)
                    {
                        oldProducer.Dispose();
                        removeOldProducer = TopicProducersWithPartitionerClass.TryRemove(topic, out oldProducer);
                        Logger.InfoFormat("InitializeProducerPoolForTopic == Remove producer from TopicProducersWithPartitionerClass for  topic {0}  removeOldProducer:{1} ", topic, removeOldProducer);
                    }
                }

                Producer <TKey, TData> producer = new Producer <TKey, TData>(producerConfig);
                bool addNewProducer             = TopicProducersWithPartitionerClass.TryAdd(topic, producer);
                Logger.InfoFormat("InitializeProducerPoolForTopic == Add producer  TopicProducersWithPartitionerClass for  topic {0}  SyncProducerOfOneBroker:{1} addNewProducer:{2}   END.", topic, producerConfig.SyncProducerOfOneBroker, addNewProducer);

                return(addNewProducer ? 1 : 0);
            }
        }
예제 #11
0
        static void Main(string[] args)
        {
            int    correlationID = 0;
            string topic         = "t4";

            KafkaSimpleManagerConfiguration config = new KafkaSimpleManagerConfiguration()
            {
                FetchSize    = 100,
                BufferSize   = 100,
                MaxWaitTime  = 5000,
                MinWaitBytes = 1,
                Zookeeper    = "10.1.1.231:2181,10.1.1.232:2181,10.1.1.233:2181/kafka"
            };

            ProducerConfiguration producerConfiguration = new ProducerConfiguration(new [] { new BrokerConfiguration() });

            config.Verify();

            using (KafkaSimpleManager <int, Message> kafkaSimpleManager = new KafkaSimpleManager <int, Message>(config))
            {
                TopicMetadata topicMetadata = kafkaSimpleManager.RefreshMetadata(0, ClientID, correlationID++, topic, true);

                kafkaSimpleManager.InitializeProducerPoolForTopic(0, ClientID, correlationID, topic, true,
                                                                  producerConfiguration, true);

                var producer1 = kafkaSimpleManager.GetProducerOfPartition(topic, 0, true);
                var producer2 = kafkaSimpleManager.GetProducerOfPartition(topic, 4, true);

                for (int i = 0; i < 100; i++)
                {
                    var producer = i % 2 == 0 ? producer1 : producer2;
                    var tKey     = Encoding.UTF8.GetBytes(DateTime.Now.Ticks.ToString());
                    var tValue   = Encoding.UTF8.GetBytes("Hello world " + i);
                    producer.Send(new ProducerData <int, Message>(topic,
                                                                  new Message(tValue, tKey, CompressionCodecs.DefaultCompressionCodec)));
                }

                producer1.Dispose();
                producer2.Dispose();

                Console.WriteLine("Topic is: " + topicMetadata.Topic);
            }

            //Console.ReadKey();
        }
        private void RefreshMetadataInternal(short versionId, string clientId, int correlationId, string topic, Dictionary <string, TopicMetadata> tempTopicMetadatas, Dictionary <string, DateTime> tempTopicMetadatasLastUpdateTime, Dictionary <int, Tuple <Broker, BrokerConfiguration> > partitionLeaders)
        {
            Logger.InfoFormat("RefreshMetadataInternal enter: {0} {1} {2} Topic:{3} ", versionId, clientId, correlationId, topic);

            lock (syncProducerPoolForMetadataLock)
            {
                BrokerPartitionInfo brokerPartitionInfo = new BrokerPartitionInfo(this.syncProducerPoolForMetaData, tempTopicMetadatas, tempTopicMetadatasLastUpdateTime, ProducerConfiguration.DefaultTopicMetaDataRefreshIntervalMS, this.syncProducerPoolForMetaData.zkClient);
                brokerPartitionInfo.UpdateInfo(versionId, correlationId, clientId, topic);
            }
            if (!tempTopicMetadatas.ContainsKey(topic))
            {
                throw new NoBrokerForTopicException(string.Format("There is no metadata for topic {0}.  Please check if all brokers of that topic live.", topic));
            }
            TopicMetadata metadata = tempTopicMetadatas[topic];

            if (metadata.Error != ErrorMapping.NoError)
            {
                throw new KafkaException(string.Format("The metadata status for topic {0} is abnormal, detail: ", topic), metadata.Error);;
            }

            foreach (var p in metadata.PartitionsMetadata)
            {
                if (p.Leader != null && !partitionLeaders.ContainsKey(p.PartitionId))
                {
                    partitionLeaders.Add(p.PartitionId, new Tuple <Broker, BrokerConfiguration>(
                                             p.Leader,
                                             new BrokerConfiguration()
                    {
                        BrokerId = p.Leader.Id,
                        Host     = p.Leader.Host,
                        Port     = p.Leader.Port
                    }));
                    Logger.DebugFormat("RefreshMetadataInternal Topic {0} partition {1} has leader {2}", topic, p.PartitionId, p.Leader.Id);
                }
                if (p.Leader == null)
                {
                    Logger.ErrorFormat("RefreshMetadataInternal Topic {0} partition {1} does not have a leader yet.", topic, p.PartitionId);
                }
            }
            Logger.InfoFormat("RefreshMetadataInternal exit: {0} {1} {2} Topic:{3} ", versionId, clientId, correlationId, topic);
        }
        /// <summary>
        /// Get leader broker of one topic partition. without retry.
        /// If got exception from this, probably need client code call RefreshMetadata with force = true.
        /// </summary>
        internal BrokerConfiguration GetLeaderBrokerOfPartition(string topic, int partitionID)
        {
            if (!this.TopicMetadatas.ContainsKey(topic))
            {
                throw new KafkaException(string.Format("There is no  metadata  for topic {0}.  Please call RefreshMetadata with force = true and try again.", topic));
            }

            TopicMetadata metadata = this.TopicMetadatas[topic];

            if (metadata.Error != ErrorMapping.NoError)
            {
                throw new KafkaException(string.Format("The metadata status for topic {0} is abnormal, detail: ", topic), metadata.Error);
            }

            if (!this.TopicMetadataPartitionsLeaders[topic].ContainsKey(partitionID))
            {
                throw new NoLeaderForPartitionException(string.Format("No leader for topic {0} parition {1} ", topic, partitionID));
            }

            return(this.TopicMetadataPartitionsLeaders[topic][partitionID].Item2);
        }
예제 #14
0
        public ValueTask <int> GetPartition(TopicMetadata topic, ReadOnlySequence <byte> keyBytes)
        {
            int numPartitions = topic.Partitions.Count;
            int partition;

            if (topic.Partitions.Count == 1)
            {
                partition = 0;
            }
            else if (keyBytes.Length == 0)
            {
                partition = random.Next(0, numPartitions);
            }
            else
            {
                // hash the keyBytes to choose a partition
                partition = (int)(Hash.Murmur2.Compute(keyBytes) % numPartitions);
            }

            return(new ValueTask <int>(partition));
        }
예제 #15
0
        public void ShouldUpdateInfoWhenCachedErrorResponse()
        {
            var pool               = new Mock <ISyncProducerPool>();
            var producer           = new Mock <ISyncProducer>();
            var partitionMetadatas = new List <PartitionMetadata>()
            {
                new PartitionMetadata(0, new Broker(0, "host1", 1234), Enumerable.Empty <Broker>(),
                                      Enumerable.Empty <Broker>())
            };
            var cachedPartitionMetadatas = new List <PartitionMetadata>()
            {
                new PartitionMetadata(1, new Broker(1, "host1", 1234), Enumerable.Empty <Broker>(),
                                      Enumerable.Empty <Broker>())
            };
            var metadatas = new List <TopicMetadata>()
            {
                new TopicMetadata("test", partitionMetadatas, ErrorMapping.NoError)
            };

            producer.Setup(p => p.Send(It.IsAny <TopicMetadataRequest>())).Returns(() => metadatas);
            pool.Setup(p => p.GetShuffledProducers()).Returns(() => new List <ISyncProducer>()
            {
                producer.Object
            });
            var cache = new Dictionary <string, TopicMetadata>();

            cache["test"] = new TopicMetadata("test", cachedPartitionMetadatas, ErrorMapping.NotLeaderForPartitionCode);
            var info = new BrokerPartitionInfo(pool.Object, cache, new Dictionary <string, DateTime> {
                { "test", DateTime.MinValue }
            }, 1, null);
            var partitions = info.GetBrokerPartitionInfo(1, "test", 1, "test");

            partitions.Count.Should().Be(1);
            var partition = partitions.First();

            partition.Topic.Should().Be("test");
            partition.Leader.BrokerId.Should().Be(0);
        }
        /// <summary>
        /// MANIFOLD use .  Force recreate consumer for some partition and return it back.
        /// </summary>
        public Consumer GetConsumerFromPoolAfterRecreate(short versionId, string clientId, int correlationId
                                                         , string topic, ConsumerConfiguration cosumerConfigTemplate, int partitionId, int notRecreateTimeRangeInMs = -1)
        {
            TopicMetadata topicMetadata = RefreshMetadata(versionId, clientId, correlationId, topic, true);
            ConcurrentDictionary <int, Consumer> consumers = GetConsumerPoolForTopic(topic);

            lock (GetConsumeLockOfTopicPartition(topic, partitionId))
            {
                ConsumerConfiguration config = new ConsumerConfiguration(cosumerConfigTemplate, GetLeaderBrokerOfPartition(topic, partitionId));
                Consumer oldConsumer         = null;
                if (consumers.TryGetValue(partitionId, out oldConsumer))
                {
                    if ((DateTime.UtcNow.Ticks - oldConsumer.CreatedTimeInUTC) / 10000.0 < notRecreateTimeRangeInMs)
                    {
                        Logger.WarnFormat("Do NOT recreate consumer for client {0} topic {1} partitoin {2} since it only created {3} ms. less than {4} ", clientId, topic, partitionId
                                          , (DateTime.UtcNow.Ticks - oldConsumer.CreatedTimeInUTC) / 10000.0, notRecreateTimeRangeInMs);
                    }
                    else
                    {
                        Logger.InfoFormat("Destroy one old consumer for client {0} topic {1} partitoin {2} ", clientId, topic, partitionId);
                        if (oldConsumer != null)
                        {
                            oldConsumer.Dispose();
                        }

                        consumers[partitionId] = new Consumer(config);
                        Logger.InfoFormat("Create one consumer for client {0} topic {1} partitoin {2} ", clientId, topic, partitionId);
                    }
                }
                else
                {
                    consumers[partitionId] = new Consumer(config);
                    Logger.InfoFormat("Newly Create one consumer for client {0} topic {1} partitoin {2} ", clientId, topic, partitionId);
                }
            }

            return(consumers[partitionId]);
        }
        public async Task CanLoadWatermarksWithValidParamsAsync()
        {
            // Arrange
            var topic        = new TopicName("test");
            var clientMock   = new Mock <IAdminClient>();
            var client       = clientMock.Object;
            var timeout      = 1000;
            var loader       = new TopicWatermarkLoader(topic, client, timeout);
            var consumerMock = new Mock <IConsumer <object, object> >();

            IConsumer <object, object> consumerFactory() => consumerMock.Object;

            var adminClientPartition = new TopicPartition(topic.Value, new Partition(1));

            var adminParitions = new[] { adminClientPartition };

            var borkerMeta = new BrokerMetadata(1, "testHost", 1000);

            var partitionMeta = new PartitionMetadata(1, 1, new[] { 1 }, new[] { 1 }, null);

            var topicMeta = new TopicMetadata(topic.Value, new[] { partitionMeta }.ToList(), null);

            var meta = new Confluent.Kafka.Metadata(
                new[] { borkerMeta }.ToList(),
                new[] { topicMeta }.ToList(), 1, "test"
                );

            clientMock.Setup(c => c.GetMetadata(topic.Value, TimeSpan.FromSeconds(timeout))).Returns(meta);

            var offets = new WatermarkOffsets(new Offset(1), new Offset(2));

            consumerMock.Setup(x => x.QueryWatermarkOffsets(adminClientPartition, TimeSpan.FromSeconds(timeout))).Returns(offets);

            TopicWatermark result = null !;

            // Act
            var exception = await Record.ExceptionAsync(async() => result = await loader.LoadWatermarksAsync(consumerFactory, CancellationToken.None));

            // Assert
            exception.Should().BeNull();

            consumerMock.Verify(x => x.Close(), Times.Once);

            consumerMock.Verify(x => x.Dispose(), Times.Once);

            result.Should().NotBeNull();

            var watermarks = result.Watermarks.ToList();

            watermarks.Should().ContainSingle();

            clientMock.Verify(c => c.GetMetadata(topic.Value, TimeSpan.FromSeconds(timeout)), Times.Once);

            consumerMock.Verify(x => x.QueryWatermarkOffsets(adminClientPartition, TimeSpan.FromSeconds(timeout)), Times.Once);

            watermarks.Single().TopicName.Should().Be(topic);

            watermarks.Single().Partition.Value.Should().Be(partitionMeta.PartitionId);

            watermarks.Single().Offset.Should().Be(offets);
        }
예제 #18
0
        public async Task Subscribe(string topicName)
        {
            topic = await metadataManager.GetTopic(topicName);

            Fetch();
        }
예제 #19
0
        private static void ProduceByPartitionerClass(ProduceSimpleHelperOption produceOptions, int partitionCountInZookeeper)
        {
            using (KafkaSimpleManager <byte[], Message> kafkaSimpleManager = new KafkaSimpleManager <byte[], Message>(kafkaSimpleManagerConfig))
            {
                Stopwatch stopwatch2 = new Stopwatch();
                stopwatch2.Start();
                int nProducers = kafkaSimpleManager.InitializeProducerPoolForTopic(KafkaNETExampleConstants.DefaultVersionId, ClientID, ProducerRequestId++, produceOptions.Topic, true, producerConfigTemplate, false);
                stopwatch2.Stop();
                Console.WriteLine("Spent {0:0.00} ms to create {1} producers. average {2:0.00} ms per producer. syncProducerOfOnePartition:{3} ", stopwatch2.Elapsed.TotalMilliseconds, nProducers, stopwatch2.Elapsed.TotalMilliseconds / nProducers, producerConfigTemplate.SyncProducerOfOneBroker);

                while (true)
                {
                    Producer <byte[], Message> producer = null;
                    try
                    {
                        producer = kafkaSimpleManager.GetProducerWithPartionerClass(produceOptions.Topic);
                        //Here the
                        totalPartitionCount = kafkaSimpleManager.GetTopicMetadta(produceOptions.Topic).PartitionsMetadata.Count();
                        Logger.InfoFormat("Get GetProducerWithPartionerClass Producer:{0}  totalPartitionCount has metadata:{1} partitionCountInZookeeper:{2} "
                                          , producer == null ? "null" : producer.ToString(), totalPartitionCount, partitionCountInZookeeper);

                        stopWatch.Start();
                        CountExpectCount(listOfKeys, produceOptions.PartitionerClass, partitionCountInZookeeper);
                        producer.Send(listOfDataNeedSendInOneBatch);
                        successMessageCount += produceOptions.MessageCountPerBatch;

                        stopWatch.Stop();
                        sentBatchCount++;

                        if (sentBatchCount % 10 == 0)
                        {
                            RegularStatistics(produceOptions, sentBatchCount);
                        }
                        if (produceOptions.BatchCount > 0 && sentBatchCount >= produceOptions.BatchCount)
                        {
                            break;
                        }
                    }
                    catch (FailedToSendMessageException <byte[]> e)
                    {
                        //Sometime the sent maybe partially success.
                        //For example, 100 message,  averagely to 5 partitions, if one partitoin has no leader.
                        // here will get the failed 20 message, you can retry or do something for them.
                        Logger.Error("===FAILED=========");
                        Logger.ErrorFormat("{0}", e.Message);
                        if (e.ProduceDispatchSeralizeResult != null &&
                            e.ProduceDispatchSeralizeResult.FailedProducerDatas != null)
                        {
                            Logger.ErrorFormat("Failed produce message key: ");
                            foreach (ProducerData <byte[], Message> a in e.ProduceDispatchSeralizeResult.FailedProducerDatas)
                            {
                                Logger.ErrorFormat("Key:{0} ", System.Text.Encoding.Default.GetString(a.Key));
                            }
                        }
                        //Here partially success we also consider
                        failedMessageCount  += e.CountFailed;
                        successMessageCount += e.CountAll - e.CountFailed;
                        sentBatchCount++;
                        if (sentBatchCount % 10 == 0)
                        {
                            RegularStatistics(produceOptions, sentBatchCount);
                        }
                        if (produceOptions.BatchCount > 0 && sentBatchCount >= produceOptions.BatchCount)
                        {
                            break;
                        }
                        Logger.Error("  \r\n");
                    }
                    catch (Exception e)
                    {
                        Logger.ErrorFormat("Got exception, maybe leader change for some partition, will refresh metadata and recreate producer {0}", e.FormatException());
                        TopicMetadata topicMetadata = kafkaSimpleManager.RefreshMetadata(KafkaNETExampleConstants.DefaultVersionId, ClientID, ProducerRequestId++, produceOptions.Topic, true);
                        totalPartitionCount = topicMetadata.PartitionsMetadata.Count();
                        Logger.InfoFormat("Get GetProducerWithPartionerClass Producer:{0}  totalPartitionCount has metadata:{1} partitionCountInZookeeper:{2} "
                                          , producer == null ? "null" : producer.ToString(), totalPartitionCount, partitionCountInZookeeper);
                    }

                    if (produceOptions.BatchCount > 0 && sentBatchCount >= produceOptions.BatchCount)
                    {
                        break;
                    }
                }
            }

            RegularStatistics(produceOptions, sentBatchCount);
        }
예제 #20
0
        public void TestPartitionAndCollateEvents()
        {
            var producerDataList = new List <KeyedMessage <int, Message> >();

            // use bogus key and partition key override for some messages
            producerDataList.Add(new KeyedMessage <int, Message>("topic1", 0, new Message(Encoding.UTF8.GetBytes("msg1"))));
            producerDataList.Add(new KeyedMessage <int, Message>("topic2", -99, 1, new Message(Encoding.UTF8.GetBytes("msg2"))));
            producerDataList.Add(new KeyedMessage <int, Message>("topic1", 2, new Message(Encoding.UTF8.GetBytes("msg3"))));
            producerDataList.Add(new KeyedMessage <int, Message>("topic1", -101, 3, new Message(Encoding.UTF8.GetBytes("msg4"))));
            producerDataList.Add(new KeyedMessage <int, Message>("topic2", 4, new Message(Encoding.UTF8.GetBytes("msg5"))));

            var broker1 = new Broker(0, "localhost", 9092);
            var broker2 = new Broker(1, "localhost", 9093);

            // form expected partitions metadata
            var partition1Metadata = new PartitionMetadata(0, broker1, new List <Broker> {
                broker1, broker2
            });
            var partition2Metadata = new PartitionMetadata(1, broker2, new List <Broker> {
                broker1, broker2
            });
            var topic1Metadata = new TopicMetadata(
                "topic1", new List <PartitionMetadata> {
                partition1Metadata, partition2Metadata
            });
            var topic2Metadata = new TopicMetadata(
                "topic2", new List <PartitionMetadata> {
                partition1Metadata, partition2Metadata
            });

            var topicPartitionInfos = new Dictionary <string, TopicMetadata>
            {
                { "topic1", topic1Metadata },
                { "topic2", topic2Metadata }
            };
            var intPartitioner = new IntPartitioner();

            var config = new ProducerConfig();

            config.Brokers = TestUtils.GetBrokerListFromConfigs(this.props);

            var producerPool = new ProducerPool(config);
            var handler      = new DefaultEventHandler <int, string>(
                config, intPartitioner, null, new IntEncoder(), producerPool, topicPartitionInfos);

            var topic1Broker1Data = new List <KeyedMessage <int, Message> >
            {
                new KeyedMessage <int, Message>(
                    "topic1",
                    0,
                    new Message(
                        Encoding.UTF8.GetBytes("msg1"))),
                new KeyedMessage <int, Message>(
                    "topic1",
                    2,
                    new Message(
                        Encoding.UTF8.GetBytes("msg3")))
            };
            var topic1Broker2Data = new List <KeyedMessage <int, Message> >
            {
                new KeyedMessage <int, Message>(
                    "topic1",
                    -101,
                    3,
                    new Message(
                        Encoding.UTF8.GetBytes("msg4")))
            };

            var topic2Broker1Data = new List <KeyedMessage <int, Message> >
            {
                new KeyedMessage <int, Message>(
                    "topic2",
                    4,
                    new Message(
                        Encoding.UTF8.GetBytes("msg5")))
            };
            var topic2Broker2Data = new List <KeyedMessage <int, Message> >
            {
                new KeyedMessage <int, Message>(
                    "topic2",
                    -99,
                    1,
                    new Message(
                        Encoding.UTF8.GetBytes("msg2")))
            };

            var expectedResult = new Dictionary <int, Dictionary <TopicAndPartition, List <KeyedMessage <int, Message> > > >
            {
                { 0,
                  new Dictionary <TopicAndPartition, List <KeyedMessage <int, Message> > >
                  {
                      { new TopicAndPartition("topic1", 0), topic1Broker1Data },
                      { new TopicAndPartition("topic2", 0), topic2Broker1Data }
                  } },
                { 1,
                  new Dictionary <TopicAndPartition, List <KeyedMessage <int, Message> > >
                  {
                      { new TopicAndPartition("topic1", 1), topic1Broker2Data },
                      { new TopicAndPartition("topic2", 1), topic2Broker2Data }
                  } },
            };

            var actualResut = handler.PartitionAndCollate(producerDataList);

            Assert.Equal(expectedResult.Count, actualResut.Count);
            Assert.True(expectedResult.Keys.SequenceEqual(actualResut.Keys));
            foreach (var key in expectedResult.Keys)
            {
                var exptectedInnerDict = expectedResult[key];
                var actualInnerDict    = actualResut[key];
                Assert.Equal(exptectedInnerDict.Count, actualInnerDict.Count);
                foreach (var topicAndPartition in exptectedInnerDict.Keys)
                {
                    var exptectedKeyedMsgs = exptectedInnerDict[topicAndPartition];
                    var actualKeyedMsgs    = actualInnerDict[topicAndPartition];
                    Assert.True(exptectedKeyedMsgs.SequenceEqual(actualKeyedMsgs));
                }
            }
        }
예제 #21
0
        public void TestNumPartitions()
        {
            var metadata = new TopicMetadata(1234);

            Assert.Equal(1234, metadata.NumPartitions());
        }
예제 #22
0
        static void Main(string[] args)
        {
            int correlationID = 0;

            KafkaSimpleManagerConfiguration config = new KafkaSimpleManagerConfiguration()
            {
                FetchSize    = 10,
                BufferSize   = 100000,
                MaxWaitTime  = 500,
                MinWaitBytes = 50,
                Zookeeper    = "10.1.1.231:2181,10.1.1.232:2181,10.1.1.233:2181/kafka"
            };

            using (KafkaSimpleManager <int, Message> kafkaSimpleManager = new KafkaSimpleManager <int, Message>(config))
            {
                TopicMetadata topicMetadata = kafkaSimpleManager.RefreshMetadata(0, ClientID, correlationID++, "test", true);

                Consumer consumer = kafkaSimpleManager.GetConsumer("test", 0);

                //FetchResponse response = consumer.Fetch(Assembly.GetExecutingAssembly().ManifestModule.ToString(), "test", correlationID, 0, 0, 100, 5000, 100);
                //var messages = response.PartitionData("test", 0).GetMessageAndOffsets();

                //messages.ForEach(message => Console.WriteLine(Encoding.UTF8.GetString(message.Message.Payload)));

                //var messages = FetchAndGetMessageAndOffsetList(consumer, correlationID, "test", 0, 0, 100, 5000, 100);
                //messages.ForEach(message => Console.WriteLine(Encoding.UTF8.GetString(message.Message.Payload)));
                long offsetLast = -1;
                long l          = 0;

                long totalCount = 0, offsetBase = 0, partitionID = 0, lastNotifytotalCount = 0, latest = 0, earliest = 0;

                while (true)
                {
                    correlationID++;
                    List <MessageAndOffset> messages = FetchAndGetMessageAndOffsetList(consumer,
                                                                                       correlationID++,
                                                                                       "test",
                                                                                       0,
                                                                                       0,
                                                                                       10,
                                                                                       5000,
                                                                                       1);



                    if (messages == null)
                    {
                        Logger.Error("PullMessage got null  List<MessageAndOffset>, please check log for detail.");
                        break;
                    }
                    else
                    {
                        #region dump response.Payload
                        if (messages.Any())
                        {
                            offsetLast  = messages.Last().MessageOffset;
                            totalCount += messages.Count;
                            Logger.InfoFormat("Finish read partition {0} to {1}.  ", partitionID, offsetLast);
                            offsetBase = offsetLast + 1;
                            if (totalCount - lastNotifytotalCount > 1000)
                            {
                                Console.WriteLine("Partition: {0} totally read  {1}  will continue read from   {2}", partitionID, totalCount, offsetBase);
                                lastNotifytotalCount = totalCount;
                            }

                            // return messages
                            messages.ForEach(message => Console.WriteLine(Encoding.UTF8.GetString(message.Message.Payload)));
                        }
                        else
                        {
                            Logger.InfoFormat("Finish read partition {0} to {1}.   Earliese:{2} latest:{3} ", partitionID, offsetLast, earliest, latest);
                            Console.WriteLine("Partition: {0} totally read  {1}  Hit end of queue   {2}", partitionID, totalCount, offsetBase);
                            break;
                        }
                        #endregion
                    }
                }
            }

            Console.ReadKey();
        }
        internal static string DumpTopicMetadataAndOffsetInternal(ZooKeeperClient zkClient, string topic,
                                                                  string zookeeper,
                                                                  int partitionIndex,
                                                                  bool includePartitionDetailInfo,
                                                                  bool includeOffsetInfo,
                                                                  DateTime timestamp,
                                                                  SortedDictionary <int, int> parttionBrokerID_LeaderCountDistribAll,
                                                                  SortedDictionary <int, int> parttionBrokerID_ReplicaCountDistribAll,
                                                                  SortedDictionary <int, long> latestOffset,
                                                                  SortedDictionary <int, long> latestLength)
        {
            StringBuilder sb = new StringBuilder();
            string        s  = string.Empty;
            //BrokerID -->Count of as leader
            SortedDictionary <int, int> parttionBrokerID_LeaderCountDistrib = new SortedDictionary <int, int>();
            //BrokerID -->Count of as replica
            SortedDictionary <int, int> parttionBrokerID_ReplicaCountDistrib = new SortedDictionary <int, int>();

            try
            {
                if (string.IsNullOrEmpty(zookeeper))
                {
                    Logger.Error(" zookeeper  should be provided");
                    sb.AppendFormat(DumpTopicError, topic);
                }
                else
                {
                    KafkaSimpleManagerConfiguration config = new KafkaSimpleManagerConfiguration()
                    {
                        Zookeeper = zookeeper
                    };
                    config.Verify();
                    Dictionary <int, int[]> detailDataInZookeeper = ZkUtils.GetTopicMetadataInzookeeper(zkClient, topic);
                    using (KafkaSimpleManager <int, Message> kafkaSimpleManager = new KafkaSimpleManager <int, Message>(config))
                    {
                        TopicMetadata topicMetadata = kafkaSimpleManager.RefreshMetadata(KafkaNETExampleConstants.DefaultVersionId, ClientID, TopicMetadataRequestID++, topic, true);

                        int partitionCount = topicMetadata.PartitionsMetadata.Count();
                        sb.AppendFormat("Topic:{0}\tPartitionCount:{1}\t", topic, partitionCount);

                        int replicationFactor = Enumerable.Count <Broker>(topicMetadata.PartitionsMetadata.First().Replicas);
                        sb.AppendFormat("ReplicationFactor:{0}\t", replicationFactor);

                        //TODO:  compare detailDataInZookeeper and check which one missed.
                        StringBuilder sbDetail = new StringBuilder();
                        if (includePartitionDetailInfo)
                        {
                            long sumEndOffset = 0;
                            long sumLength    = 0;
                            foreach (PartitionMetadata p in topicMetadata.PartitionsMetadata.OrderBy(r => r.PartitionId).ToList())
                            {
                                int[] replicaInZookeeper = null;
                                if (detailDataInZookeeper.ContainsKey(p.PartitionId))
                                {
                                    replicaInZookeeper = detailDataInZookeeper[p.PartitionId];
                                    detailDataInZookeeper.Remove(p.PartitionId);
                                }

                                #region One partition
                                long earliest = 0;
                                long latest   = 0;
                                if (partitionIndex == -1 || p.PartitionId == partitionIndex)
                                {
                                    //sbDetail.AppendFormat("\tTopic:{0}", topic);
                                    sbDetail.AppendFormat("\tPartition:{0}", p.PartitionId);
                                    if (p.Leader != null)
                                    {
                                        sbDetail.AppendFormat("\tLeader:{0}", KafkaConsoleUtil.GetBrokerIDAndIP(p.Leader.Id));

                                        if (parttionBrokerID_LeaderCountDistrib.ContainsKey(p.Leader.Id))
                                        {
                                            parttionBrokerID_LeaderCountDistrib[p.Leader.Id]++;
                                        }
                                        else
                                        {
                                            parttionBrokerID_LeaderCountDistrib.Add(p.Leader.Id, 1);
                                        }

                                        if (parttionBrokerID_LeaderCountDistribAll.ContainsKey(p.Leader.Id))
                                        {
                                            parttionBrokerID_LeaderCountDistribAll[p.Leader.Id]++;
                                        }
                                        else
                                        {
                                            parttionBrokerID_LeaderCountDistribAll.Add(p.Leader.Id, 1);
                                        }
                                    }
                                    else
                                    {
                                        sbDetail.AppendFormat("\tLeader:NoLeader!");
                                    }

                                    sbDetail.AppendFormat("\tReplicas:{0}", string.Join(",", p.Replicas.Select(r => KafkaConsoleUtil.GetBrokerIDAndIP(r.Id)).ToArray()));
                                    foreach (Broker b in p.Replicas)
                                    {
                                        if (parttionBrokerID_ReplicaCountDistrib.ContainsKey(b.Id))
                                        {
                                            parttionBrokerID_ReplicaCountDistrib[b.Id]++;
                                        }
                                        else
                                        {
                                            parttionBrokerID_ReplicaCountDistrib.Add(b.Id, 1);
                                        }

                                        if (parttionBrokerID_ReplicaCountDistribAll.ContainsKey(b.Id))
                                        {
                                            parttionBrokerID_ReplicaCountDistribAll[b.Id]++;
                                        }
                                        else
                                        {
                                            parttionBrokerID_ReplicaCountDistribAll.Add(b.Id, 1);
                                        }
                                    }

                                    //sbDetail.AppendFormat("\tIsr:{0}", string.Join(",", p.Isr.Select(r => r.Id).ToArray()));
                                    ArrayList isrs = GetIsr(zkClient, topic, p.PartitionId);
                                    sbDetail.AppendFormat("\tIsr:{0}", string.Join(",", isrs.ToArray().Select(r => KafkaConsoleUtil.GetBrokerIDAndIP(Convert.ToInt32(r)))));
                                    //TODO: add missed replica

                                    #region Offset
                                    if (includeOffsetInfo)
                                    {
                                        try
                                        {
                                            kafkaSimpleManager.RefreshAndGetOffset(KafkaNETExampleConstants.DefaultVersionId, ClientID, TopicMetadataRequestID++, topic, p.PartitionId, true, out earliest, out latest);
                                            sumEndOffset += latest;
                                            sumLength    += (latest - earliest);
                                            sbDetail.AppendFormat("\tlength:{2}\tearliest:{0}\tlatest:{1}"
                                                                  , earliest
                                                                  , latest
                                                                  , (latest - earliest) == 0 ? "(empty)" : (latest - earliest).ToString());
                                            sbDetail.AppendFormat("\r\n");

                                            latestOffset.Add(p.PartitionId, latest);
                                            latestLength.Add(p.PartitionId, latest - earliest);
                                        }
                                        catch (NoLeaderForPartitionException e)
                                        {
                                            sbDetail.AppendFormat(" ERROR:{0}\r\n", e.Message);
                                        }
                                        catch (UnableToConnectToHostException e)
                                        {
                                            sbDetail.AppendFormat(" ERROR:{0}\r\n", e.Message);
                                        }

                                        if (timestamp != DateTime.MinValue)
                                        {
                                            long timestampLong = KafkaClientHelperUtils.ToUnixTimestampMillis(timestamp);
                                            try
                                            {
                                                long timeStampOffset = kafkaSimpleManager.RefreshAndGetOffsetByTimeStamp(KafkaNETExampleConstants.DefaultVersionId, ClientID, TopicMetadataRequestID++, topic, p.PartitionId, timestamp);
                                                sbDetail.AppendFormat("\t\ttimeStampOffset:{0}\ttimestamp(UTC):{1}\tUnixTimestamp:{2}\t"
                                                                      , timeStampOffset
                                                                      , KafkaClientHelperUtils.DateTimeFromUnixTimestampMillis(timestampLong).ToString("s")
                                                                      , timestampLong);
                                                sbDetail.AppendFormat("\r\n");
                                            }
                                            catch (TimeStampTooSmallException)
                                            {
                                                sbDetail.AppendFormat("\t\ttimeStampOffset:{0}\ttimestamp(UTC):{1}\tUnixTimestamp:{2}\t"
                                                                      , "NA since no data before the time you specified, please retry with a bigger value."
                                                                      , KafkaClientHelperUtils.DateTimeFromUnixTimestampMillis(timestampLong).ToString("s")
                                                                      , timestampLong);
                                                sbDetail.AppendFormat("\r\n");
                                            }
                                        }
                                    }
                                    #endregion
                                }
                                #endregion
                            }
                            if (includeOffsetInfo)
                            {
                                sb.AppendFormat("SumeEndOffset:{0:0,0}  SumLength:{1:0,0}\r\n", sumEndOffset, sumLength);
                            }
                            else
                            {
                                sb.AppendFormat("\r\n");
                            }

                            if (detailDataInZookeeper.Any())
                            {
                                foreach (KeyValuePair <int, int[]> kv in detailDataInZookeeper)
                                {
                                    sb.AppendFormat("=ERROR=MISSED partition= {0}  Replicas {1} ", kv.Key, string.Join(",", kv.Value.Select(r => r.ToString()).ToArray()));
                                }
                            }
                        }

                        sb.Append(sbDetail.ToString());
                        sb.AppendFormat("\tBroker as leader distribution======={0}=======\r\n", topic);
                        sb.AppendFormat("\r\tBrokerID\tLeadPartition count\r\n");
                        foreach (KeyValuePair <int, int> kv in parttionBrokerID_LeaderCountDistrib)
                        {
                            sb.AppendFormat("\t\t{0}\t{1}\r\n", KafkaConsoleUtil.GetBrokerIDAndIP(kv.Key), kv.Value);
                        }

                        sb.AppendFormat("\tBroker as replica distribution========={0}=====\r\n", topic);
                        sb.AppendFormat("\r\tBrokerID\tReplication count count\r\n");
                        foreach (KeyValuePair <int, int> kv in parttionBrokerID_ReplicaCountDistrib)
                        {
                            sb.AppendFormat("\t\t{0}\t{1}\r\n", KafkaConsoleUtil.GetBrokerIDAndIP(kv.Key), kv.Value);
                        }

                        sb.AppendFormat("\r\n");
                    }
                }

                s = sb.ToString();
            }
            catch (NoBrokerForTopicException e)
            {
                sb.AppendFormat("\r\nTopic:{0}\t ==NoBrokerForTopicException:{1}!!!== \r\n", topic, e.Message);
                s = sb.ToString();
            }
            catch (UnableToConnectToHostException e)
            {
                sb.AppendFormat("\r\nTopic:{0}\t ==UnableToConnectToHostException:{1}!!!== \r\n", topic, e.Message);
                s = sb.ToString();
            }
            catch (Exception ex)
            {
                Logger.ErrorFormat("Dump topic got exception:{0}\r\ninput parameter:Topic:{1}\tZookeeper:{2}\tKafka:{3}\tPartionIndex:{4}\tincludePartitionDetailInfo:{5}\tincludeOffsetInfo:{6}\ttimestamp:{7}\r\nPartial result:{8}"
                                   , ExceptionUtil.GetExceptionDetailInfo(ex),
                                   topic,
                                   zookeeper,
                                   string.Empty,
                                   partitionIndex,
                                   includePartitionDetailInfo,
                                   includeOffsetInfo,
                                   timestamp,
                                   s);
            }

            return(s);
        }
        internal static void ConsumeDataSimple(ConsumeDataHelperArguments dumpdataOptions)
        {
            correlationID        = 0;
            totalCountUTF8       = 0;
            totalCountOriginal   = 0;
            totalCount           = 0;
            lastNotifytotalCount = 0;
            KafkaSimpleManagerConfiguration config = new KafkaSimpleManagerConfiguration()
            {
                FetchSize    = dumpdataOptions.FetchSize,
                BufferSize   = dumpdataOptions.BufferSize,
                MaxWaitTime  = dumpdataOptions.MaxWaitTime,
                MinWaitBytes = dumpdataOptions.MinWaitBytes,
                Zookeeper    = dumpdataOptions.Zookeeper
            };

            config.Verify();

            bool finish = false;

            try
            {
                using (KafkaSimpleManager <int, Message> kafkaSimpleManager = new KafkaSimpleManager <int, Message>(config))
                {
                    TopicMetadata topicMetadata = kafkaSimpleManager.RefreshMetadata(0, ClientID, correlationID++, dumpdataOptions.Topic, true);
                    while (true)
                    {
                        try
                        {
                            for (int i = 0; i <= topicMetadata.PartitionsMetadata.Max(r => r.PartitionId); i++)
                            {
                                if (dumpdataOptions.PartitionIndex == -1 || i == dumpdataOptions.PartitionIndex)
                                {
                                    #region Get real offset and adjust
                                    long earliest   = 0;
                                    long latest     = 0;
                                    long offsetBase = 0;
                                    OffsetHelper.GetAdjustedOffset <int, Message>(dumpdataOptions.Topic
                                                                                  , kafkaSimpleManager, i
                                                                                  , KafkaNetLibraryExample.ConvertOffsetType(dumpdataOptions.Offset)
                                                                                  , KafkaNetLibraryExample.ConvertOffset(dumpdataOptions.Offset)
                                                                                  , dumpdataOptions.LastMessagesCount, out earliest, out latest, out offsetBase);
                                    #endregion
                                    Console.WriteLine("Topic:{0} Partition:{1} will read from {2} earliest:{3} latest:{4}", dumpdataOptions.Topic, i, offsetBase, earliest, latest);
                                    finish = ConsumeDataOfOnePartition(kafkaSimpleManager, i, offsetBase, earliest, latest, dumpdataOptions);
                                    if (finish)
                                    {
                                        break;
                                    }
                                }
                            }

                            finish = true;
                        }
                        catch (Exception ex)
                        {
                            Logger.ErrorFormat("ConsumeDataSimple Got exception, will refresh metadata. {0}", ex.FormatException());
                            kafkaSimpleManager.RefreshMetadata(0, ClientID, correlationID++, dumpdataOptions.Topic, true);
                        }

                        if (finish)
                        {
                            break;
                        }
                    }
                }

                Logger.InfoFormat("Topic:{0} Finish Read.     totalCount:{1} ", dumpdataOptions.Topic, totalCount);
            }
            catch (Exception ex)
            {
                Logger.ErrorFormat("ConsumeDataSimple  Got exception:{0}\r\ninput parameter: {1}", ex.FormatException(), dumpdataOptions.ToString());
            }
        }