private ProducePerfTestKafkaSimpleManagerWrapper()
        {
            config = new KafkaSimpleManagerConfiguration()
            {
                Zookeeper = produceOptions.Zookeeper,
                PartitionerClass = produceOptions.PartitionerClass,
                MaxMessageSize = SyncProducerConfiguration.DefaultMaxMessageSize
            };

            config.Verify();
            producerConfigTemplate = new ProducerConfiguration(
                  new List<BrokerConfiguration>() { }) //The Brokers will be replaced inside of KafkaSimpleManager
            {
                ForceToPartition = -1,
                PartitionerClass = config.PartitionerClass,
                TotalNumPartitions = 0,
                RequiredAcks = produceOptions.RequiredAcks,
                AckTimeout = produceOptions.AckTimeout,
                SendTimeout = produceOptions.SendTimeout,
                ReceiveTimeout = produceOptions.ReceiveTimeout,
                CompressionCodec = KafkaNetLibraryExample.ConvertToCodec(produceOptions.Compression.ToString()),
                BufferSize = produceOptions.BufferSize,
                SyncProducerOfOneBroker = produceOptions.SyncProducerOfOneBroker, //Actually it's sync producer socket count of one partition
                MaxMessageSize = Math.Max(SyncProducerConfiguration.DefaultMaxMessageSize, produceOptions.MessageSize)
            };

            kafkaSimpleManage = new KafkaSimpleManager<byte[], Message>(config);
            int correlationId = Interlocked.Increment(ref correlationIDGetProducer);
            kafkaSimpleManage.InitializeProducerPoolForTopic(0, clientId, correlationId, produceOptions.Topic, true, producerConfigTemplate, true);
        }
Example #2
0
        public OffsetManager(KafkaSimpleManager <int, Message> manager)
        {
            _manager = manager;

            zkClient = new ZooKeeperClient(manager.Config.ZookeeperConfig.ZkConnect, manager.Config.ZookeeperConfig.ZkSessionTimeoutMs, ZooKeeperNetBinarySerializer.Serializer);
            zkClient.Connect();
        }
Example #3
0
        private ProducePerfTestKafkaSimpleManagerWrapper()
        {
            config = new KafkaSimpleManagerConfiguration()
            {
                Zookeeper        = produceOptions.Zookeeper,
                PartitionerClass = produceOptions.PartitionerClass,
                MaxMessageSize   = SyncProducerConfiguration.DefaultMaxMessageSize
            };

            config.Verify();
            producerConfigTemplate = new ProducerConfiguration(
                new List <BrokerConfiguration>()
            {
            })                                         //The Brokers will be replaced inside of KafkaSimpleManager
            {
                ForceToPartition        = -1,
                PartitionerClass        = config.PartitionerClass,
                TotalNumPartitions      = 0,
                RequiredAcks            = produceOptions.RequiredAcks,
                AckTimeout              = produceOptions.AckTimeout,
                SendTimeout             = produceOptions.SendTimeout,
                ReceiveTimeout          = produceOptions.ReceiveTimeout,
                CompressionCodec        = KafkaNetLibraryExample.ConvertToCodec(produceOptions.Compression.ToString()),
                BufferSize              = produceOptions.BufferSize,
                SyncProducerOfOneBroker = produceOptions.SyncProducerOfOneBroker, //Actually it's sync producer socket count of one partition
                MaxMessageSize          = Math.Max(SyncProducerConfiguration.DefaultMaxMessageSize, produceOptions.MessageSize)
            };

            kafkaSimpleManage = new KafkaSimpleManager <byte[], Message>(config);
            int correlationId = Interlocked.Increment(ref correlationIDGetProducer);

            kafkaSimpleManage.InitializeProducerPoolForTopic(0, clientId, correlationId, produceOptions.Topic, true, producerConfigTemplate, true);
        }
        private static void TestBug1490652ReadData(TestHelperOptions testOptions)
        {
            KafkaSimpleManagerConfiguration config = new KafkaSimpleManagerConfiguration()
            {
                FetchSize    = KafkaSimpleManagerConfiguration.DefaultFetchSize,
                BufferSize   = KafkaSimpleManagerConfiguration.DefaultBufferSize,
                MaxWaitTime  = 0,
                MinWaitBytes = 0,
                Zookeeper    = testOptions.Zookeeper
            };

            config.Verify();

            using (KafkaSimpleManager <int, Message> kafkaSimpleManager = new KafkaSimpleManager <int, Message>(config))
            {
                TopicMetadata topicMetadata = kafkaSimpleManager.RefreshMetadata(0, "ClientID", 0, testOptions.Topic, true);
                PartitionCount = topicMetadata.PartitionsMetadata.Count();
                for (int i = 0; i < PartitionCount; i++)
                {
                    #region Get real offset and adjust
                    long earliest   = 0;
                    long latest     = 0;
                    long offsetBase = 0;
                    OffsetHelper.GetAdjustedOffset <int, Message>(testOptions.Topic, kafkaSimpleManager, i, KafkaOffsetType.Earliest, 0,
                                                                  0, out earliest, out latest, out offsetBase);
                    #endregion

                    TestBug1490652DataRead.Add(i, ConsumeDataOfOnePartitionTotally <int, Message>(testOptions.Topic, kafkaSimpleManager, i, KafkaOffsetType.Earliest,
                                                                                                  0, 0, latest, 0, 100, -1, "DumpLog.log"));
                }
            }
        }
Example #5
0
        private void MainLoop(Consumer consumer, int correlationId, KafkaSimpleManager <int, Message> manager)
        {
            var sw = new Stopwatch();

            sw.Start();

            var offsetManager = new OffsetManager(manager);

            var messageOffset = offsetManager.GetOffset(_processorOptions.Topic, _processorOptions.ClientId,
                                                        _processorOptions.PartitionId);

            while (true)
            {
                var response = consumer.Fetch(_processorOptions.ClientId, _processorOptions.Topic, correlationId,
                                              _processorOptions.PartitionId, messageOffset,
                                              consumer.Config.FetchSize, manager.Config.MaxWaitTime, manager.Config.MinWaitBytes);

                var partitionData = response.PartitionData(_processorOptions.Topic, _processorOptions.PartitionId);

                var messageAndOffsets = partitionData.MessageSet;

                var count     = 0;
                var lstOffset = messageOffset;
                var fstOffset = messageOffset;

                foreach (var messageAndOffset in messageAndOffsets)
                {
                    count++;
                    ProcessPayload(messageAndOffset);
                    lstOffset = messageAndOffset.MessageOffset;
                }

                if (lstOffset == fstOffset)
                {
                    Thread.Sleep(30000);
                    Console.Write('.');
                    continue;
                }

                messageOffset = lstOffset;

                offsetManager.SetOffset(_processorOptions.Topic,
                                        _processorOptions.ClientId, _processorOptions.PartitionId, lstOffset);


                Console.WriteLine("{3} [{4}-{5}] | {0}/{1} seg => {2}",
                                  count, sw.Elapsed.TotalSeconds,
                                  count / sw.Elapsed.TotalSeconds,
                                  _processorOptions.PartitionId,
                                  fstOffset,
                                  lstOffset);

                sw.Restart();
            }
        }
        private static void TestBug1490652SendData(TestHelperOptions testOptions)
        {
            int           correlationID = 0;
            Random        rand          = new Random();
            StringBuilder sb            = new StringBuilder();

            try
            {
                KafkaSimpleManagerConfiguration config = new KafkaSimpleManagerConfiguration()
                {
                    Zookeeper      = testOptions.Zookeeper,
                    MaxMessageSize = SyncProducerConfiguration.DefaultMaxMessageSize
                };
                config.Verify();
                using (KafkaSimpleManager <int, Kafka.Client.Messages.Message> kafkaSimpleManager = new KafkaSimpleManager <int, Kafka.Client.Messages.Message>(config))
                {
                    TopicMetadata topicMetadata = kafkaSimpleManager.RefreshMetadata(0, "ClientID", correlationID++, testOptions.Topic, true);
                    PartitionCount = topicMetadata.PartitionsMetadata.Count();
                    List <ProducerData <int, Message> > listOfDataNeedSendInOneBatch = new List <ProducerData <int, Message> >();
                    for (int i = 0; i < PartitionCount; i++)
                    {
                        TestBug1490652DataSent.Add(i, new Dictionary <int, string>());
                        for (int j = 0; j < TestBug1490652MessageCountPerPartition; j++)
                        {
                            string val  = KafkaClientHelperUtils.GetRandomString(testOptions.MessageSize);
                            byte[] bVal = System.Text.Encoding.UTF8.GetBytes(val);
                            //Set the key to partitionID, so it can directly fall into  that partition.
                            Message message = new Message(bVal, CompressionCodecs.DefaultCompressionCodec);
                            listOfDataNeedSendInOneBatch.Add(new ProducerData <int, Message>(testOptions.Topic, i, message));
                            TestBug1490652DataSent[i].Add(j, val);
                        }
                    }

                    ProducerConfiguration producerConfig = new ProducerConfiguration(new List <BrokerConfiguration>()
                    {
                    })
                    {
                        PartitionerClass = ProducerConfiguration.DefaultPartitioner,
                        RequiredAcks     = 1,
                        BufferSize       = config.BufferSize,
                        ZooKeeper        = config.ZookeeperConfig,
                        MaxMessageSize   = Math.Max(config.MaxMessageSize, Math.Max(SyncProducerConfiguration.DefaultMaxMessageSize, testOptions.MessageSize))
                    };
                    producerConfig.SyncProducerOfOneBroker = 1;
                    Producer <int, Kafka.Client.Messages.Message> producer = new Producer <int, Kafka.Client.Messages.Message>(producerConfig);
                    producer.Send(listOfDataNeedSendInOneBatch);
                }
            }
            catch (Exception ex)
            {
                Logger.ErrorFormat("Produce data Got exception:{0}\r\ninput parameter: {1}\r\n"
                                   , ex.FormatException(), testOptions.ToString());
            }
        }
Example #7
0
        public void Dispose()
        {
            if (_running)
            {
                Shutdown();
            }

            _consumer?.Dispose();
            _consumer = null;

            _manager?.Dispose();
            _manager = null;
        }
Example #8
0
        internal static void GetAdjustedOffset <TKey, TData>(string topic, KafkaSimpleManager <TKey, TData> kafkaSimpleManager,
                                                             int partitionID,
                                                             KafkaOffsetType offsetType,
                                                             long offset,
                                                             int lastMessageCount, out long earliest, out long latest, out long offsetBase)
        {
            StringBuilder sbSummaryOnfOnePartition = new StringBuilder();

            kafkaSimpleManager.RefreshAndGetOffset(0, string.Empty, 0, topic, partitionID, true, out earliest, out latest);
            sbSummaryOnfOnePartition.AppendFormat("\t\tearliest:{0}\tlatest:{1}\tlength:{2}"
                                                  , earliest
                                                  , latest
                                                  , (latest - earliest) == 0 ? "(empty)" : (latest - earliest).ToString());

            if (offsetType == KafkaOffsetType.Timestamp)
            {
                DateTime timestampVal = KafkaClientHelperUtils.DateTimeFromUnixTimestampMillis(offset);

                long timestampLong = KafkaClientHelperUtils.ToUnixTimestampMillis(timestampVal);
                try
                {
                    long timeStampOffset = kafkaSimpleManager.RefreshAndGetOffsetByTimeStamp(0, string.Empty, 0, topic, partitionID, timestampVal);

                    sbSummaryOnfOnePartition.AppendFormat("\r\n");
                    sbSummaryOnfOnePartition.AppendFormat("\t\ttimeStampOffset:{0}\ttimestamp(UTC):{1}\tTime(Local):{2}\tUnixTimestamp:{3}\t"
                                                          , timeStampOffset
                                                          , KafkaClientHelperUtils.DateTimeFromUnixTimestampMillis(timestampLong).ToString("s")
                                                          , timestampVal.ToString("s")
                                                          , timestampLong);

                    offsetBase = KafkaClientHelperUtils.GetValidStartReadOffset(offsetType, earliest, latest, timeStampOffset, lastMessageCount);
                }
                catch (TimeStampTooSmallException e)
                {
                    sbSummaryOnfOnePartition.AppendFormat("\r\n");
                    sbSummaryOnfOnePartition.AppendFormat("\t\ttimeStampOffset:{0}\ttimestamp(UTC):{1}\tTime(Local):{2}\tUnixTimestamp:{3}\t"
                                                          , "NA since no data before the time you specified, please retry with a bigger value."
                                                          , KafkaClientHelperUtils.DateTimeFromUnixTimestampMillis(timestampLong).ToString("s")
                                                          , timestampVal.ToString("s")
                                                          , timestampLong);

                    throw new ApplicationException(sbSummaryOnfOnePartition.ToString(), e);
                }
            }
            else
            {
                offsetBase = KafkaClientHelperUtils.GetValidStartReadOffset(offsetType, earliest, latest, 0, lastMessageCount);
            }

            Logger.Info(sbSummaryOnfOnePartition.ToString());
        }
Example #9
0
        static void Main(string[] args)
        {
            int    correlationID = 0;
            string topic         = "t4";

            KafkaSimpleManagerConfiguration config = new KafkaSimpleManagerConfiguration()
            {
                FetchSize    = 100,
                BufferSize   = 100,
                MaxWaitTime  = 5000,
                MinWaitBytes = 1,
                Zookeeper    = "10.1.1.231:2181,10.1.1.232:2181,10.1.1.233:2181/kafka"
            };

            ProducerConfiguration producerConfiguration = new ProducerConfiguration(new [] { new BrokerConfiguration() });

            config.Verify();

            using (KafkaSimpleManager <int, Message> kafkaSimpleManager = new KafkaSimpleManager <int, Message>(config))
            {
                TopicMetadata topicMetadata = kafkaSimpleManager.RefreshMetadata(0, ClientID, correlationID++, topic, true);

                kafkaSimpleManager.InitializeProducerPoolForTopic(0, ClientID, correlationID, topic, true,
                                                                  producerConfiguration, true);

                var producer1 = kafkaSimpleManager.GetProducerOfPartition(topic, 0, true);
                var producer2 = kafkaSimpleManager.GetProducerOfPartition(topic, 4, true);

                for (int i = 0; i < 100; i++)
                {
                    var producer = i % 2 == 0 ? producer1 : producer2;
                    var tKey     = Encoding.UTF8.GetBytes(DateTime.Now.Ticks.ToString());
                    var tValue   = Encoding.UTF8.GetBytes("Hello world " + i);
                    producer.Send(new ProducerData <int, Message>(topic,
                                                                  new Message(tValue, tKey, CompressionCodecs.DefaultCompressionCodec)));
                }

                producer1.Dispose();
                producer2.Dispose();

                Console.WriteLine("Topic is: " + topicMetadata.Topic);
            }

            //Console.ReadKey();
        }
Example #10
0
        protected virtual void Dispose(bool disposing)
        {
            if (!this.disposed)
            {
                if (disposing)
                {
                    lock (lockForDictionaryChange)
                    {
                        Logger.Info("Got lock Will dispose KafkaSimpleManager ...");
                        kafkaSimpleManage.Dispose();
                        kafkaSimpleManage = null;
                        config            = null;
                        Logger.Info("Finish dispose KafkaSimpleManager ...will release lock.");
                    }
                }

                disposed = true;
            }
        }
Example #11
0
        public KafkaSimpleConsumerStream(IZookeeperConnection zkConnect, string topicName, int partition, long offset)
        {
            _topicName = topicName;
            _partition = partition;
            _manager   = zkConnect.CreateSimpleManager();

            _manager.RefreshMetadata(
                KafkaConfig.VersionId,
                KafkaConfig.ClientId,
                KafkaConfig.NextCorrelationId(),
                _topicName,
                true);

            _consumer = _manager.GetConsumer(topicName, partition);

            _thread = new Thread(RunConsumer);

            _nextOffset = offset;
        }
Example #12
0
        private static bool TopicExsits(string topic)
        {
            var managerConfig = new KafkaSimpleManagerConfiguration
            {
                FetchSize  = KafkaSimpleManagerConfiguration.DefaultFetchSize,
                BufferSize = KafkaSimpleManagerConfiguration.DefaultBufferSize,
                Zookeeper  = ZooKeeperSetting.Address
            };

            using (var kafkaManager = new KafkaSimpleManager <int, Message>(managerConfig))
            {
                try
                {
                    var allPartitions = kafkaManager.GetTopicPartitionsFromZK(topic);
                    return(allPartitions.Count > 0);
                }
                catch (Exception)
                {
                    return(false);
                }
            }
        }
Example #13
0
        bool TopicExsits(string topic)
        {
            var managerConfig = new KafkaSimpleManagerConfiguration()
            {
                FetchSize  = KafkaSimpleManagerConfiguration.DefaultFetchSize,
                BufferSize = KafkaSimpleManagerConfiguration.DefaultBufferSize,
                Zookeeper  = _zkConnectionString
            };

            using (var kafkaManager = new KafkaSimpleManager <string, Kafka.Client.Messages.Message>(managerConfig))
            {
                try
                {
                    // get all available partitions for a topic through the manager
                    var allPartitions = kafkaManager.GetTopicPartitionsFromZK(topic);
                    return(allPartitions.Count > 0);
                }
                catch (Exception)
                {
                    return(false);
                }
            }
        }
Example #14
0
        public override void Process()
        {
            const int factor = 1;
            var       config = new KafkaSimpleManagerConfiguration()
            {
                FetchSize  = KafkaSimpleManagerConfiguration.DefaultFetchSize / factor,
                BufferSize = KafkaSimpleManagerConfiguration.DefaultBufferSize / factor,
                Zookeeper  = _processorOptions.Zookeeper,
            };

            var manager = new KafkaSimpleManager <int, Message>(config);

            const int correlationId = 0;
            var       clientId      = _processorOptions.ClientId;
            const int versionId     = 1;

            manager.RefreshMetadata(versionId, clientId, correlationId,
                                    _processorOptions.Topic, true);
            var consumer = manager.GetConsumer(_processorOptions.Topic, _processorOptions.PartitionId);

            consumer.Config.AutoCommit = true;

            MainLoop(consumer, correlationId, manager);
        }
Example #15
0
        private static void ProduceToRandomOrSpecificPartition(ProduceSimpleHelperOption produceOptions)
        {
            using (KafkaSimpleManager <byte[], Message> kafkaSimpleManager = new KafkaSimpleManager <byte[], Message>(kafkaSimpleManagerConfig))
            {
                Stopwatch stopwatch2 = new Stopwatch();
                stopwatch2.Start();
                int nProducers = kafkaSimpleManager.InitializeProducerPoolForTopic(KafkaNETExampleConstants.DefaultVersionId, ClientID, ProducerRequestId++, produceOptions.Topic, true, producerConfigTemplate, false);
                stopwatch2.Stop();
                Console.WriteLine("Spent {0:0.00} ms to create {1} producers. average {2:0.00} ms per producer. syncProducerOfOnePartition:{3} ", stopwatch2.Elapsed.TotalMilliseconds, nProducers, stopwatch2.Elapsed.TotalMilliseconds / nProducers, producerConfigTemplate.SyncProducerOfOneBroker);
                totalPartitionCount = kafkaSimpleManager.GetTopicMetadta(produceOptions.Topic).PartitionsMetadata.Count();

                int targetPartitionID = Int16.MinValue;
                while (true)
                {
                    Producer <byte[], Message> producer = null;
                    try
                    {
                        targetPartitionID = produceOptions.PartitionId >= 0 ? produceOptions.PartitionId : rand.Next(totalPartitionCount);
                        Logger.InfoFormat("Will try get producer for partition:{0} ", targetPartitionID);
                        producer = kafkaSimpleManager.GetProducerOfPartition(produceOptions.Topic, targetPartitionID, false);
                        Logger.InfoFormat("Get producer for  partition:{0}  Producer:{1} ", targetPartitionID, producer == null ? "null" : producer.ToString());
                        stopWatch.Start();
                        producer.Send(listOfDataNeedSendInOneBatch);
                        successMessageCount += produceOptions.MessageCountPerBatch;
                        if (!produceMessagePerPartition.ContainsKey(targetPartitionID))
                        {
                            produceMessagePerPartition.Add(targetPartitionID, 0);
                        }

                        produceMessagePerPartition[targetPartitionID] = produceMessagePerPartition[targetPartitionID] + listOfDataNeedSendInOneBatch.Count;
                        stopWatch.Stop();
                        sentBatchCount++;

                        if (sentBatchCount % 10 == 0)
                        {
                            RegularStatistics(produceOptions, sentBatchCount);
                        }

                        if (produceOptions.BatchCount > 0 && sentBatchCount >= produceOptions.BatchCount)
                        {
                            break;
                        }

                        //Maybe partition increased, need regular check if partition number has changed.
                        if (DateTime.UtcNow > lastTimeRefreshMetadata.AddMinutes(10))
                        {
                            List <string> partitoins = kafkaSimpleManager.GetTopicPartitionsFromZK(produceOptions.Topic);
                            if (partitoins.Count != targetPartitionID)
                            {
                                nProducers          = kafkaSimpleManager.InitializeProducerPoolForTopic(KafkaNETExampleConstants.DefaultVersionId, ClientID, ProducerRequestId++, produceOptions.Topic, true, producerConfigTemplate, false);
                                totalPartitionCount = kafkaSimpleManager.GetTopicMetadta(produceOptions.Topic).PartitionsMetadata.Count();
                                targetPartitionID   = produceOptions.PartitionId >= 0 ? produceOptions.PartitionId : rand.Next(totalPartitionCount);
                                Logger.InfoFormat("Will try get producer for partition:{0} ", targetPartitionID);
                                producer = kafkaSimpleManager.GetProducerOfPartition(produceOptions.Topic, targetPartitionID, false);
                                Logger.InfoFormat("Get producer for  partition:{0}  Producer:{1} ", targetPartitionID, producer == null ? "null" : producer.ToString());
                            }
                            lastTimeRefreshMetadata = DateTime.UtcNow;
                        }
                    }
                    catch (Exception e)
                    {
                        Logger.ErrorFormat("Got exception, maybe leader change or not available for some partition, will refresh metadata and recreate producer {0}", e.FormatException());
                        try
                        {
                            producer = kafkaSimpleManager.RefreshMetadataAndRecreateProducerOfOnePartition(KafkaNETExampleConstants.DefaultVersionId, ClientID, ProducerRequestId++, produceOptions.Topic, targetPartitionID, true, true, producerConfigTemplate, false);
                        }
                        catch (Exception ex)
                        {
                            Logger.ErrorFormat("Got exception while RefreshMetadataAndRecreateProducerOfOnePartition, maybe leader change for some partition, will refresh metadata and recreate producer {0} after 3 seconds ...", ex.FormatException());
                        }

                        totalPartitionCount = kafkaSimpleManager.GetTopicMetadta(produceOptions.Topic).PartitionsMetadata.Count();
                        if (targetPartitionID >= totalPartitionCount)
                        {
                            targetPartitionID = produceOptions.PartitionId >= 0 ? produceOptions.PartitionId : rand.Next(totalPartitionCount);
                            Logger.InfoFormat("Will try get producer for partition:{0} ", targetPartitionID);
                            producer = kafkaSimpleManager.GetProducerOfPartition(produceOptions.Topic, targetPartitionID, false);
                            Logger.InfoFormat("Get producer for  partition:{0}  Producer:{1} ", targetPartitionID, producer == null ? "null" : producer.ToString());
                        }
                    }

                    if (produceOptions.BatchCount > 0 && sentBatchCount >= produceOptions.BatchCount)
                    {
                        break;
                    }
                }
            }

            RegularStatistics(produceOptions, sentBatchCount);
        }
Example #16
0
        private static void ProduceByPartitionerClass(ProduceSimpleHelperOption produceOptions, int partitionCountInZookeeper)
        {
            using (KafkaSimpleManager <byte[], Message> kafkaSimpleManager = new KafkaSimpleManager <byte[], Message>(kafkaSimpleManagerConfig))
            {
                Stopwatch stopwatch2 = new Stopwatch();
                stopwatch2.Start();
                int nProducers = kafkaSimpleManager.InitializeProducerPoolForTopic(KafkaNETExampleConstants.DefaultVersionId, ClientID, ProducerRequestId++, produceOptions.Topic, true, producerConfigTemplate, false);
                stopwatch2.Stop();
                Console.WriteLine("Spent {0:0.00} ms to create {1} producers. average {2:0.00} ms per producer. syncProducerOfOnePartition:{3} ", stopwatch2.Elapsed.TotalMilliseconds, nProducers, stopwatch2.Elapsed.TotalMilliseconds / nProducers, producerConfigTemplate.SyncProducerOfOneBroker);

                while (true)
                {
                    Producer <byte[], Message> producer = null;
                    try
                    {
                        producer = kafkaSimpleManager.GetProducerWithPartionerClass(produceOptions.Topic);
                        //Here the
                        totalPartitionCount = kafkaSimpleManager.GetTopicMetadta(produceOptions.Topic).PartitionsMetadata.Count();
                        Logger.InfoFormat("Get GetProducerWithPartionerClass Producer:{0}  totalPartitionCount has metadata:{1} partitionCountInZookeeper:{2} "
                                          , producer == null ? "null" : producer.ToString(), totalPartitionCount, partitionCountInZookeeper);

                        stopWatch.Start();
                        CountExpectCount(listOfKeys, produceOptions.PartitionerClass, partitionCountInZookeeper);
                        producer.Send(listOfDataNeedSendInOneBatch);
                        successMessageCount += produceOptions.MessageCountPerBatch;

                        stopWatch.Stop();
                        sentBatchCount++;

                        if (sentBatchCount % 10 == 0)
                        {
                            RegularStatistics(produceOptions, sentBatchCount);
                        }
                        if (produceOptions.BatchCount > 0 && sentBatchCount >= produceOptions.BatchCount)
                        {
                            break;
                        }
                    }
                    catch (FailedToSendMessageException <byte[]> e)
                    {
                        //Sometime the sent maybe partially success.
                        //For example, 100 message,  averagely to 5 partitions, if one partitoin has no leader.
                        // here will get the failed 20 message, you can retry or do something for them.
                        Logger.Error("===FAILED=========");
                        Logger.ErrorFormat("{0}", e.Message);
                        if (e.ProduceDispatchSeralizeResult != null &&
                            e.ProduceDispatchSeralizeResult.FailedProducerDatas != null)
                        {
                            Logger.ErrorFormat("Failed produce message key: ");
                            foreach (ProducerData <byte[], Message> a in e.ProduceDispatchSeralizeResult.FailedProducerDatas)
                            {
                                Logger.ErrorFormat("Key:{0} ", System.Text.Encoding.Default.GetString(a.Key));
                            }
                        }
                        //Here partially success we also consider
                        failedMessageCount  += e.CountFailed;
                        successMessageCount += e.CountAll - e.CountFailed;
                        sentBatchCount++;
                        if (sentBatchCount % 10 == 0)
                        {
                            RegularStatistics(produceOptions, sentBatchCount);
                        }
                        if (produceOptions.BatchCount > 0 && sentBatchCount >= produceOptions.BatchCount)
                        {
                            break;
                        }
                        Logger.Error("  \r\n");
                    }
                    catch (Exception e)
                    {
                        Logger.ErrorFormat("Got exception, maybe leader change for some partition, will refresh metadata and recreate producer {0}", e.FormatException());
                        TopicMetadata topicMetadata = kafkaSimpleManager.RefreshMetadata(KafkaNETExampleConstants.DefaultVersionId, ClientID, ProducerRequestId++, produceOptions.Topic, true);
                        totalPartitionCount = topicMetadata.PartitionsMetadata.Count();
                        Logger.InfoFormat("Get GetProducerWithPartionerClass Producer:{0}  totalPartitionCount has metadata:{1} partitionCountInZookeeper:{2} "
                                          , producer == null ? "null" : producer.ToString(), totalPartitionCount, partitionCountInZookeeper);
                    }

                    if (produceOptions.BatchCount > 0 && sentBatchCount >= produceOptions.BatchCount)
                    {
                        break;
                    }
                }
            }

            RegularStatistics(produceOptions, sentBatchCount);
        }
        internal static string DumpTopicMetadataAndOffsetInternal(ZooKeeperClient zkClient, string topic,
           string zookeeper,
           int partitionIndex,
           bool includePartitionDetailInfo,
           bool includeOffsetInfo,
           DateTime timestamp,
           SortedDictionary<int, int> parttionBrokerID_LeaderCountDistribAll,
           SortedDictionary<int, int> parttionBrokerID_ReplicaCountDistribAll,
           SortedDictionary<int, long> latestOffset,
           SortedDictionary<int, long> latestLength)
        {
            StringBuilder sb = new StringBuilder();
            string s = string.Empty;
            //BrokerID -->Count of as leader
            SortedDictionary<int, int> parttionBrokerID_LeaderCountDistrib = new SortedDictionary<int, int>();
            //BrokerID -->Count of as replica
            SortedDictionary<int, int> parttionBrokerID_ReplicaCountDistrib = new SortedDictionary<int, int>();
            try
            {
                if (string.IsNullOrEmpty(zookeeper))
                {
                    Logger.Error(" zookeeper  should be provided");
                    sb.AppendFormat(DumpTopicError, topic);
                }
                else
                {
                    KafkaSimpleManagerConfiguration config = new KafkaSimpleManagerConfiguration()
                    {
                        Zookeeper = zookeeper
                    };
                    config.Verify();
                    Dictionary<int, int[]> detailDataInZookeeper = ZkUtils.GetTopicMetadataInzookeeper(zkClient, topic);
                    using (KafkaSimpleManager<int, Message> kafkaSimpleManager = new KafkaSimpleManager<int, Message>(config))
                    {
                        TopicMetadata topicMetadata = kafkaSimpleManager.RefreshMetadata(KafkaNETExampleConstants.DefaultVersionId, ClientID, TopicMetadataRequestID++, topic, true);

                        int partitionCount = topicMetadata.PartitionsMetadata.Count();
                        sb.AppendFormat("Topic:{0}\tPartitionCount:{1}\t", topic, partitionCount);

                        int replicationFactor = Enumerable.Count<Broker>(topicMetadata.PartitionsMetadata.First().Replicas);
                        sb.AppendFormat("ReplicationFactor:{0}\t", replicationFactor);

                        //TODO:  compare detailDataInZookeeper and check which one missed.
                        StringBuilder sbDetail = new StringBuilder();
                        if (includePartitionDetailInfo)
                        {
                            long sumEndOffset = 0;
                            long sumLength = 0;
                            foreach (PartitionMetadata p in topicMetadata.PartitionsMetadata.OrderBy(r => r.PartitionId).ToList())
                            {
                                int[] replicaInZookeeper = null;
                                if (detailDataInZookeeper.ContainsKey(p.PartitionId))
                                {
                                    replicaInZookeeper = detailDataInZookeeper[p.PartitionId];
                                    detailDataInZookeeper.Remove(p.PartitionId);
                                }

                                #region One partition
                                long earliest = 0;
                                long latest = 0;
                                if (partitionIndex == -1 || p.PartitionId == partitionIndex)
                                {
                                    //sbDetail.AppendFormat("\tTopic:{0}", topic);
                                    sbDetail.AppendFormat("\tPartition:{0}", p.PartitionId);
                                    if (p.Leader != null)
                                    {
                                        sbDetail.AppendFormat("\tLeader:{0}", KafkaConsoleUtil.GetBrokerIDAndIP(p.Leader.Id));

                                        if (parttionBrokerID_LeaderCountDistrib.ContainsKey(p.Leader.Id))
                                            parttionBrokerID_LeaderCountDistrib[p.Leader.Id]++;
                                        else
                                            parttionBrokerID_LeaderCountDistrib.Add(p.Leader.Id, 1);

                                        if (parttionBrokerID_LeaderCountDistribAll.ContainsKey(p.Leader.Id))
                                            parttionBrokerID_LeaderCountDistribAll[p.Leader.Id]++;
                                        else
                                            parttionBrokerID_LeaderCountDistribAll.Add(p.Leader.Id, 1);
                                    }
                                    else
                                        sbDetail.AppendFormat("\tLeader:NoLeader!");

                                    sbDetail.AppendFormat("\tReplicas:{0}", string.Join(",", p.Replicas.Select(r => KafkaConsoleUtil.GetBrokerIDAndIP(r.Id)).ToArray()));
                                    foreach (Broker b in p.Replicas)
                                    {
                                        if (parttionBrokerID_ReplicaCountDistrib.ContainsKey(b.Id))
                                            parttionBrokerID_ReplicaCountDistrib[b.Id]++;
                                        else
                                            parttionBrokerID_ReplicaCountDistrib.Add(b.Id, 1);

                                        if (parttionBrokerID_ReplicaCountDistribAll.ContainsKey(b.Id))
                                            parttionBrokerID_ReplicaCountDistribAll[b.Id]++;
                                        else
                                            parttionBrokerID_ReplicaCountDistribAll.Add(b.Id, 1);
                                    }

                                    //sbDetail.AppendFormat("\tIsr:{0}", string.Join(",", p.Isr.Select(r => r.Id).ToArray()));
                                    ArrayList isrs = GetIsr(zkClient, topic, p.PartitionId);
                                    sbDetail.AppendFormat("\tIsr:{0}", string.Join(",", isrs.ToArray().Select(r => KafkaConsoleUtil.GetBrokerIDAndIP(Convert.ToInt32(r)))));
                                    //TODO: add missed replica

                                    #region Offset
                                    if (includeOffsetInfo)
                                    {
                                        try
                                        {
                                            kafkaSimpleManager.RefreshAndGetOffset(KafkaNETExampleConstants.DefaultVersionId, ClientID, TopicMetadataRequestID++, topic, p.PartitionId, true, out earliest, out latest);
                                            sumEndOffset += latest;
                                            sumLength += (latest - earliest);
                                            sbDetail.AppendFormat("\tlength:{2}\tearliest:{0}\tlatest:{1}"
                                                , earliest
                                                , latest
                                                , (latest - earliest) == 0 ? "(empty)" : (latest - earliest).ToString());
                                            sbDetail.AppendFormat("\r\n");

                                            latestOffset.Add(p.PartitionId, latest);
                                            latestLength.Add(p.PartitionId, latest - earliest);
                                        }
                                        catch (NoLeaderForPartitionException e)
                                        {
                                            sbDetail.AppendFormat(" ERROR:{0}\r\n", e.Message);
                                        }
                                        catch (UnableToConnectToHostException e)
                                        {
                                            sbDetail.AppendFormat(" ERROR:{0}\r\n", e.Message);
                                        }

                                        if (timestamp != DateTime.MinValue)
                                        {

                                            long timestampLong = KafkaClientHelperUtils.ToUnixTimestampMillis(timestamp);
                                            try
                                            {
                                                long timeStampOffset = kafkaSimpleManager.RefreshAndGetOffsetByTimeStamp(KafkaNETExampleConstants.DefaultVersionId, ClientID, TopicMetadataRequestID++, topic, p.PartitionId, timestamp);
                                                sbDetail.AppendFormat("\t\ttimeStampOffset:{0}\ttimestamp(UTC):{1}\tUnixTimestamp:{2}\t"
                                               , timeStampOffset
                                               , KafkaClientHelperUtils.DateTimeFromUnixTimestampMillis(timestampLong).ToString("s")
                                               , timestampLong);
                                                sbDetail.AppendFormat("\r\n");
                                            }
                                            catch (TimeStampTooSmallException)
                                            {
                                                sbDetail.AppendFormat("\t\ttimeStampOffset:{0}\ttimestamp(UTC):{1}\tUnixTimestamp:{2}\t"
                                                 , "NA since no data before the time you specified, please retry with a bigger value."
                                                 , KafkaClientHelperUtils.DateTimeFromUnixTimestampMillis(timestampLong).ToString("s")
                                                 , timestampLong);
                                                sbDetail.AppendFormat("\r\n");
                                            }

                                        }
                                    }
                                    #endregion
                                }
                                #endregion
                            }
                            if (includeOffsetInfo)
                            {
                                sb.AppendFormat("SumeEndOffset:{0:0,0}  SumLength:{1:0,0}\r\n", sumEndOffset, sumLength);
                            }
                            else
                            {
                                sb.AppendFormat("\r\n");
                            }

                            if (detailDataInZookeeper.Any())
                            {
                                foreach (KeyValuePair<int, int[]> kv in detailDataInZookeeper)
                                {
                                    sb.AppendFormat("=ERROR=MISSED partition= {0}  Replicas {1} ", kv.Key, string.Join(",", kv.Value.Select(r => r.ToString()).ToArray()));
                                }
                            }
                        }

                        sb.Append(sbDetail.ToString());
                        sb.AppendFormat("\tBroker as leader distribution======={0}=======\r\n", topic);
                        sb.AppendFormat("\r\tBrokerID\tLeadPartition count\r\n");
                        foreach (KeyValuePair<int, int> kv in parttionBrokerID_LeaderCountDistrib)
                        {
                            sb.AppendFormat("\t\t{0}\t{1}\r\n", KafkaConsoleUtil.GetBrokerIDAndIP(kv.Key), kv.Value);
                        }

                        sb.AppendFormat("\tBroker as replica distribution========={0}=====\r\n", topic);
                        sb.AppendFormat("\r\tBrokerID\tReplication count count\r\n");
                        foreach (KeyValuePair<int, int> kv in parttionBrokerID_ReplicaCountDistrib)
                        {
                            sb.AppendFormat("\t\t{0}\t{1}\r\n", KafkaConsoleUtil.GetBrokerIDAndIP(kv.Key), kv.Value);
                        }

                        sb.AppendFormat("\r\n");
                    }
                }

                s = sb.ToString();
            }
            catch (NoBrokerForTopicException e)
            {
                sb.AppendFormat("\r\nTopic:{0}\t ==NoBrokerForTopicException:{1}!!!== \r\n", topic, e.Message);
                s = sb.ToString();
            }
            catch (UnableToConnectToHostException e)
            {
                sb.AppendFormat("\r\nTopic:{0}\t ==UnableToConnectToHostException:{1}!!!== \r\n", topic, e.Message);
                s = sb.ToString();
            }
            catch (Exception ex)
            {
                Logger.ErrorFormat("Dump topic got exception:{0}\r\ninput parameter:Topic:{1}\tZookeeper:{2}\tKafka:{3}\tPartionIndex:{4}\tincludePartitionDetailInfo:{5}\tincludeOffsetInfo:{6}\ttimestamp:{7}\r\nPartial result:{8}"
                     , ExceptionUtil.GetExceptionDetailInfo(ex),
                     topic,
                     zookeeper,
                     string.Empty,
                     partitionIndex,
                     includePartitionDetailInfo,
                     includeOffsetInfo,
                     timestamp,
                     s);
            }

            return s;
        }
        protected virtual void Dispose(bool disposing)
        {
            if (!this.disposed)
            {
                if (disposing)
                {
                    lock (lockForDictionaryChange)
                    {
                        Logger.Info("Got lock Will dispose KafkaSimpleManager ...");
                        kafkaSimpleManage.Dispose();
                        kafkaSimpleManage = null;
                        config = null;
                        Logger.Info("Finish dispose KafkaSimpleManager ...will release lock.");
                    }
                }

                disposed = true;
            }
        }
        internal static Dictionary <int, string> ConsumeDataOfOnePartitionTotally <TKey, TData>(string topic, KafkaSimpleManager <TKey, TData> kafkaSimpleManager,
                                                                                                int partitionID,
                                                                                                KafkaOffsetType offsetType,
                                                                                                long offsetBase,
                                                                                                long earliest,
                                                                                                long latest,
                                                                                                int lastMessageCount,
                                                                                                int wait,
                                                                                                long count,
                                                                                                string file
                                                                                                )
        {
            int totalCount                = 0;
            int correlationID             = 0;
            Dictionary <int, string> dict = new Dictionary <int, string>();

            Random        rand = new Random();
            StringBuilder sb   = new StringBuilder();

            using (FileStream fs = File.Open(file, FileMode.Append, FileAccess.Write, FileShare.Read))
            {
                using (StreamWriter sw = new StreamWriter(fs))
                {
                    #region repeatly consume and dump data
                    long offsetLast = -1;
                    long l          = 0;
                    sw.WriteLine("Will read partition {0} from {1}.   Earliese:{2} Last:{3} ", partitionID, offsetBase, earliest, latest);
                    Logger.InfoFormat("Will read partition {0} from {1}.   Earliese:{2} Last:{3} ", partitionID, offsetBase, earliest, latest);
                    using (Consumer consumer = kafkaSimpleManager.GetConsumer(topic, partitionID))
                    {
                        while (true)
                        {
                            correlationID++;

                            List <MessageAndOffset> listMessageAndOffsets = new List <MessageAndOffset>();

                            if (listMessageAndOffsets == null)
                            {
                                Logger.Error("PullMessage got null  List<MessageAndOffset>, please check log for detail.");
                                break;
                            }
                            else
                            {
                                offsetLast = listMessageAndOffsets.Last().MessageOffset;
                                #region dump response.Payload
                                foreach (var a in listMessageAndOffsets)
                                {
                                    dict.Add((int)a.MessageOffset, Encoding.UTF8.GetString(a.Message.Payload));
                                }

                                if (listMessageAndOffsets.Any())
                                {
                                    totalCount += listMessageAndOffsets.Count;
                                    sw.WriteLine("Finish read partition {0} to {1}.   Earliese:{2} Last:{3} ", partitionID, offsetLast, earliest, latest);
                                    Logger.InfoFormat("Finish read partition {0} to {1}.   Earliese:{2} Last:{3} ", partitionID, offsetLast, earliest, latest);
                                    offsetBase = offsetLast + 1;
                                }
                                else
                                {
                                    if (offsetBase == latest)
                                    {
                                        sw.WriteLine("Hit end of queue.");
                                    }
                                    sw.WriteLine("Finish read partition {0} to {1}.   Earliese:{2} Last:{3} ", partitionID, offsetLast, earliest, latest);
                                    Logger.InfoFormat("Finish read partition {0} to {1}.   Earliese:{2} Last:{3} ", partitionID, offsetLast, earliest, latest);
                                    break;
                                }
                                Thread.Sleep(wait);
                                #endregion
                            }
                            l++;
                            if (offsetBase == latest)
                            {
                                break;
                            }
                        }
                    }
                    #endregion
                    Logger.InfoFormat("Topic:{0} Partitoin:{1} Finish Read.    Earliest:{2} Latest:{3},  totalCount:{4} "
                                      , topic, partitionID, earliest, latest, totalCount);
                    sw.WriteLine("Topic:{0} Partitoin:{1} Finish Read.    Earliest:{2} Latest:{3},  totalCount:{4} \r\n "
                                 , topic, partitionID, earliest, latest, totalCount);
                }
            }
            return(dict);
        }
Example #20
0
        static void Main(string[] args)
        {
            int correlationID = 0;

            KafkaSimpleManagerConfiguration config = new KafkaSimpleManagerConfiguration()
            {
                FetchSize    = 10,
                BufferSize   = 100000,
                MaxWaitTime  = 500,
                MinWaitBytes = 50,
                Zookeeper    = "10.1.1.231:2181,10.1.1.232:2181,10.1.1.233:2181/kafka"
            };

            using (KafkaSimpleManager <int, Message> kafkaSimpleManager = new KafkaSimpleManager <int, Message>(config))
            {
                TopicMetadata topicMetadata = kafkaSimpleManager.RefreshMetadata(0, ClientID, correlationID++, "test", true);

                Consumer consumer = kafkaSimpleManager.GetConsumer("test", 0);

                //FetchResponse response = consumer.Fetch(Assembly.GetExecutingAssembly().ManifestModule.ToString(), "test", correlationID, 0, 0, 100, 5000, 100);
                //var messages = response.PartitionData("test", 0).GetMessageAndOffsets();

                //messages.ForEach(message => Console.WriteLine(Encoding.UTF8.GetString(message.Message.Payload)));

                //var messages = FetchAndGetMessageAndOffsetList(consumer, correlationID, "test", 0, 0, 100, 5000, 100);
                //messages.ForEach(message => Console.WriteLine(Encoding.UTF8.GetString(message.Message.Payload)));
                long offsetLast = -1;
                long l          = 0;

                long totalCount = 0, offsetBase = 0, partitionID = 0, lastNotifytotalCount = 0, latest = 0, earliest = 0;

                while (true)
                {
                    correlationID++;
                    List <MessageAndOffset> messages = FetchAndGetMessageAndOffsetList(consumer,
                                                                                       correlationID++,
                                                                                       "test",
                                                                                       0,
                                                                                       0,
                                                                                       10,
                                                                                       5000,
                                                                                       1);



                    if (messages == null)
                    {
                        Logger.Error("PullMessage got null  List<MessageAndOffset>, please check log for detail.");
                        break;
                    }
                    else
                    {
                        #region dump response.Payload
                        if (messages.Any())
                        {
                            offsetLast  = messages.Last().MessageOffset;
                            totalCount += messages.Count;
                            Logger.InfoFormat("Finish read partition {0} to {1}.  ", partitionID, offsetLast);
                            offsetBase = offsetLast + 1;
                            if (totalCount - lastNotifytotalCount > 1000)
                            {
                                Console.WriteLine("Partition: {0} totally read  {1}  will continue read from   {2}", partitionID, totalCount, offsetBase);
                                lastNotifytotalCount = totalCount;
                            }

                            // return messages
                            messages.ForEach(message => Console.WriteLine(Encoding.UTF8.GetString(message.Message.Payload)));
                        }
                        else
                        {
                            Logger.InfoFormat("Finish read partition {0} to {1}.   Earliese:{2} latest:{3} ", partitionID, offsetLast, earliest, latest);
                            Console.WriteLine("Partition: {0} totally read  {1}  Hit end of queue   {2}", partitionID, totalCount, offsetBase);
                            break;
                        }
                        #endregion
                    }
                }
            }

            Console.ReadKey();
        }
        internal static void ConsumeDataSimple(ConsumeDataHelperArguments dumpdataOptions)
        {
            correlationID        = 0;
            totalCountUTF8       = 0;
            totalCountOriginal   = 0;
            totalCount           = 0;
            lastNotifytotalCount = 0;
            KafkaSimpleManagerConfiguration config = new KafkaSimpleManagerConfiguration()
            {
                FetchSize    = dumpdataOptions.FetchSize,
                BufferSize   = dumpdataOptions.BufferSize,
                MaxWaitTime  = dumpdataOptions.MaxWaitTime,
                MinWaitBytes = dumpdataOptions.MinWaitBytes,
                Zookeeper    = dumpdataOptions.Zookeeper
            };

            config.Verify();

            bool finish = false;

            try
            {
                using (KafkaSimpleManager <int, Message> kafkaSimpleManager = new KafkaSimpleManager <int, Message>(config))
                {
                    TopicMetadata topicMetadata = kafkaSimpleManager.RefreshMetadata(0, ClientID, correlationID++, dumpdataOptions.Topic, true);
                    while (true)
                    {
                        try
                        {
                            for (int i = 0; i <= topicMetadata.PartitionsMetadata.Max(r => r.PartitionId); i++)
                            {
                                if (dumpdataOptions.PartitionIndex == -1 || i == dumpdataOptions.PartitionIndex)
                                {
                                    #region Get real offset and adjust
                                    long earliest   = 0;
                                    long latest     = 0;
                                    long offsetBase = 0;
                                    OffsetHelper.GetAdjustedOffset <int, Message>(dumpdataOptions.Topic
                                                                                  , kafkaSimpleManager, i
                                                                                  , KafkaNetLibraryExample.ConvertOffsetType(dumpdataOptions.Offset)
                                                                                  , KafkaNetLibraryExample.ConvertOffset(dumpdataOptions.Offset)
                                                                                  , dumpdataOptions.LastMessagesCount, out earliest, out latest, out offsetBase);
                                    #endregion
                                    Console.WriteLine("Topic:{0} Partition:{1} will read from {2} earliest:{3} latest:{4}", dumpdataOptions.Topic, i, offsetBase, earliest, latest);
                                    finish = ConsumeDataOfOnePartition(kafkaSimpleManager, i, offsetBase, earliest, latest, dumpdataOptions);
                                    if (finish)
                                    {
                                        break;
                                    }
                                }
                            }

                            finish = true;
                        }
                        catch (Exception ex)
                        {
                            Logger.ErrorFormat("ConsumeDataSimple Got exception, will refresh metadata. {0}", ex.FormatException());
                            kafkaSimpleManager.RefreshMetadata(0, ClientID, correlationID++, dumpdataOptions.Topic, true);
                        }

                        if (finish)
                        {
                            break;
                        }
                    }
                }

                Logger.InfoFormat("Topic:{0} Finish Read.     totalCount:{1} ", dumpdataOptions.Topic, totalCount);
            }
            catch (Exception ex)
            {
                Logger.ErrorFormat("ConsumeDataSimple  Got exception:{0}\r\ninput parameter: {1}", ex.FormatException(), dumpdataOptions.ToString());
            }
        }
        private static bool ConsumeDataOfOnePartition <TKey, TData>(KafkaSimpleManager <TKey, TData> kafkaSimpleManager,
                                                                    int partitionID,
                                                                    long offsetBase, long earliest, long latest,
                                                                    ConsumeDataHelperArguments dumpdataOptions)
        {
            Random        rand = new Random();
            StringBuilder sb   = new StringBuilder();

            using (FileStream fs = File.Open(dumpdataOptions.File, FileMode.Append, FileAccess.Write, FileShare.Read))
            {
                using (StreamWriter sw = new StreamWriter(fs))
                {
                    #region repeatly consume and dump data
                    long offsetLast = -1;
                    long l          = 0;
                    sw.WriteLine("Will read partition {0} from {1}.   Earliese:{2} latest:{3} ", partitionID, offsetBase, earliest, latest);
                    Logger.InfoFormat("Will read partition {0} from {1}.   Earliese:{2} latest:{3} ", partitionID, offsetBase, earliest, latest);
                    using (Consumer consumer = kafkaSimpleManager.GetConsumer(dumpdataOptions.Topic, partitionID))
                    {
                        while (true)
                        {
                            correlationID++;
                            List <MessageAndOffset> listMessageAndOffsets = ConsumeSimpleHelper.FetchAndGetMessageAndOffsetList(consumer
                                                                                                                                , correlationID++,
                                                                                                                                dumpdataOptions.Topic, partitionID, offsetBase,
                                                                                                                                consumer.Config.FetchSize,
                                                                                                                                kafkaSimpleManager.Config.MaxWaitTime,
                                                                                                                                kafkaSimpleManager.Config.MinWaitBytes);

                            if (listMessageAndOffsets == null)
                            {
                                Logger.Error("PullMessage got null  List<MessageAndOffset>, please check log for detail.");
                                break;
                            }
                            else
                            {
                                #region dump response.Payload
                                if (listMessageAndOffsets.Any())
                                {
                                    offsetLast  = listMessageAndOffsets.Last().MessageOffset;
                                    totalCount += listMessageAndOffsets.Count;
                                    KafkaConsoleUtil.DumpDataToFile(dumpdataOptions.DumpDataAsUTF8, dumpdataOptions.DumpBinaryData, sw, fs, listMessageAndOffsets, dumpdataOptions.Count, offsetBase, ref totalCountUTF8, ref totalCountOriginal);
                                    sw.WriteLine("Finish read partition {0} to {1}.   Earliese:{2} latest:{3} ", partitionID, offsetLast, earliest, latest);
                                    Logger.InfoFormat("Finish read partition {0} to {1}.   Earliese:{2} latest:{3} ", partitionID, offsetLast, earliest, latest);
                                    offsetBase = offsetLast + 1;
                                    if (totalCount - lastNotifytotalCount > 1000)
                                    {
                                        Console.WriteLine("Partition: {0} totally read  {1}  will continue read from   {2}", partitionID, totalCount, offsetBase);
                                        lastNotifytotalCount = totalCount;
                                    }
                                }
                                else
                                {
                                    if (offsetBase == latest)
                                    {
                                        sw.WriteLine("Hit end of queue.");
                                    }
                                    sw.WriteLine("Finish read partition {0} to {1}.   Earliese:{2} latest:{3} ", partitionID, offsetLast, earliest, latest);
                                    Logger.InfoFormat("Finish read partition {0} to {1}.   Earliese:{2} latest:{3} ", partitionID, offsetLast, earliest, latest);
                                    Console.WriteLine("Partition: {0} totally read  {1}  Hit end of queue   {2}", partitionID, totalCount, offsetBase);
                                    break;
                                }
                                Thread.Sleep(1000);
                                #endregion
                            }
                            l++;
                            if (totalCount >= dumpdataOptions.Count && dumpdataOptions.Count > 0)
                            {
                                return(true);
                            }
                        }
                    }
                    #endregion
                    Logger.InfoFormat("Topic:{0} Partitoin:{1} Finish Read.    Earliest:{2} Latest:{3},  totalCount:{4} "
                                      , dumpdataOptions.Topic, partitionID, earliest, latest, totalCount);
                    sw.WriteLine("Topic:{0} Partitoin:{1} Finish Read.    Earliest:{2} Latest:{3},  totalCount:{4} \r\n "
                                 , dumpdataOptions.Topic, partitionID, earliest, latest, totalCount);
                }
            }

            if (totalCount >= dumpdataOptions.Count && dumpdataOptions.Count > 0)
            {
                return(true);
            }
            else
            {
                return(false);
            }
        }
        internal static string DumpTopicMetadataAndOffsetInternal(ZooKeeperClient zkClient, string topic,
                                                                  string zookeeper,
                                                                  int partitionIndex,
                                                                  bool includePartitionDetailInfo,
                                                                  bool includeOffsetInfo,
                                                                  DateTime timestamp,
                                                                  SortedDictionary <int, int> parttionBrokerID_LeaderCountDistribAll,
                                                                  SortedDictionary <int, int> parttionBrokerID_ReplicaCountDistribAll,
                                                                  SortedDictionary <int, long> latestOffset,
                                                                  SortedDictionary <int, long> latestLength)
        {
            StringBuilder sb = new StringBuilder();
            string        s  = string.Empty;
            //BrokerID -->Count of as leader
            SortedDictionary <int, int> parttionBrokerID_LeaderCountDistrib = new SortedDictionary <int, int>();
            //BrokerID -->Count of as replica
            SortedDictionary <int, int> parttionBrokerID_ReplicaCountDistrib = new SortedDictionary <int, int>();

            try
            {
                if (string.IsNullOrEmpty(zookeeper))
                {
                    Logger.Error(" zookeeper  should be provided");
                    sb.AppendFormat(DumpTopicError, topic);
                }
                else
                {
                    KafkaSimpleManagerConfiguration config = new KafkaSimpleManagerConfiguration()
                    {
                        Zookeeper = zookeeper
                    };
                    config.Verify();
                    Dictionary <int, int[]> detailDataInZookeeper = ZkUtils.GetTopicMetadataInzookeeper(zkClient, topic);
                    using (KafkaSimpleManager <int, Message> kafkaSimpleManager = new KafkaSimpleManager <int, Message>(config))
                    {
                        TopicMetadata topicMetadata = kafkaSimpleManager.RefreshMetadata(KafkaNETExampleConstants.DefaultVersionId, ClientID, TopicMetadataRequestID++, topic, true);

                        int partitionCount = topicMetadata.PartitionsMetadata.Count();
                        sb.AppendFormat("Topic:{0}\tPartitionCount:{1}\t", topic, partitionCount);

                        int replicationFactor = Enumerable.Count <Broker>(topicMetadata.PartitionsMetadata.First().Replicas);
                        sb.AppendFormat("ReplicationFactor:{0}\t", replicationFactor);

                        //TODO:  compare detailDataInZookeeper and check which one missed.
                        StringBuilder sbDetail = new StringBuilder();
                        if (includePartitionDetailInfo)
                        {
                            long sumEndOffset = 0;
                            long sumLength    = 0;
                            foreach (PartitionMetadata p in topicMetadata.PartitionsMetadata.OrderBy(r => r.PartitionId).ToList())
                            {
                                int[] replicaInZookeeper = null;
                                if (detailDataInZookeeper.ContainsKey(p.PartitionId))
                                {
                                    replicaInZookeeper = detailDataInZookeeper[p.PartitionId];
                                    detailDataInZookeeper.Remove(p.PartitionId);
                                }

                                #region One partition
                                long earliest = 0;
                                long latest   = 0;
                                if (partitionIndex == -1 || p.PartitionId == partitionIndex)
                                {
                                    //sbDetail.AppendFormat("\tTopic:{0}", topic);
                                    sbDetail.AppendFormat("\tPartition:{0}", p.PartitionId);
                                    if (p.Leader != null)
                                    {
                                        sbDetail.AppendFormat("\tLeader:{0}", KafkaConsoleUtil.GetBrokerIDAndIP(p.Leader.Id));

                                        if (parttionBrokerID_LeaderCountDistrib.ContainsKey(p.Leader.Id))
                                        {
                                            parttionBrokerID_LeaderCountDistrib[p.Leader.Id]++;
                                        }
                                        else
                                        {
                                            parttionBrokerID_LeaderCountDistrib.Add(p.Leader.Id, 1);
                                        }

                                        if (parttionBrokerID_LeaderCountDistribAll.ContainsKey(p.Leader.Id))
                                        {
                                            parttionBrokerID_LeaderCountDistribAll[p.Leader.Id]++;
                                        }
                                        else
                                        {
                                            parttionBrokerID_LeaderCountDistribAll.Add(p.Leader.Id, 1);
                                        }
                                    }
                                    else
                                    {
                                        sbDetail.AppendFormat("\tLeader:NoLeader!");
                                    }

                                    sbDetail.AppendFormat("\tReplicas:{0}", string.Join(",", p.Replicas.Select(r => KafkaConsoleUtil.GetBrokerIDAndIP(r.Id)).ToArray()));
                                    foreach (Broker b in p.Replicas)
                                    {
                                        if (parttionBrokerID_ReplicaCountDistrib.ContainsKey(b.Id))
                                        {
                                            parttionBrokerID_ReplicaCountDistrib[b.Id]++;
                                        }
                                        else
                                        {
                                            parttionBrokerID_ReplicaCountDistrib.Add(b.Id, 1);
                                        }

                                        if (parttionBrokerID_ReplicaCountDistribAll.ContainsKey(b.Id))
                                        {
                                            parttionBrokerID_ReplicaCountDistribAll[b.Id]++;
                                        }
                                        else
                                        {
                                            parttionBrokerID_ReplicaCountDistribAll.Add(b.Id, 1);
                                        }
                                    }

                                    //sbDetail.AppendFormat("\tIsr:{0}", string.Join(",", p.Isr.Select(r => r.Id).ToArray()));
                                    ArrayList isrs = GetIsr(zkClient, topic, p.PartitionId);
                                    sbDetail.AppendFormat("\tIsr:{0}", string.Join(",", isrs.ToArray().Select(r => KafkaConsoleUtil.GetBrokerIDAndIP(Convert.ToInt32(r)))));
                                    //TODO: add missed replica

                                    #region Offset
                                    if (includeOffsetInfo)
                                    {
                                        try
                                        {
                                            kafkaSimpleManager.RefreshAndGetOffset(KafkaNETExampleConstants.DefaultVersionId, ClientID, TopicMetadataRequestID++, topic, p.PartitionId, true, out earliest, out latest);
                                            sumEndOffset += latest;
                                            sumLength    += (latest - earliest);
                                            sbDetail.AppendFormat("\tlength:{2}\tearliest:{0}\tlatest:{1}"
                                                                  , earliest
                                                                  , latest
                                                                  , (latest - earliest) == 0 ? "(empty)" : (latest - earliest).ToString());
                                            sbDetail.AppendFormat("\r\n");

                                            latestOffset.Add(p.PartitionId, latest);
                                            latestLength.Add(p.PartitionId, latest - earliest);
                                        }
                                        catch (NoLeaderForPartitionException e)
                                        {
                                            sbDetail.AppendFormat(" ERROR:{0}\r\n", e.Message);
                                        }
                                        catch (UnableToConnectToHostException e)
                                        {
                                            sbDetail.AppendFormat(" ERROR:{0}\r\n", e.Message);
                                        }

                                        if (timestamp != DateTime.MinValue)
                                        {
                                            long timestampLong = KafkaClientHelperUtils.ToUnixTimestampMillis(timestamp);
                                            try
                                            {
                                                long timeStampOffset = kafkaSimpleManager.RefreshAndGetOffsetByTimeStamp(KafkaNETExampleConstants.DefaultVersionId, ClientID, TopicMetadataRequestID++, topic, p.PartitionId, timestamp);
                                                sbDetail.AppendFormat("\t\ttimeStampOffset:{0}\ttimestamp(UTC):{1}\tUnixTimestamp:{2}\t"
                                                                      , timeStampOffset
                                                                      , KafkaClientHelperUtils.DateTimeFromUnixTimestampMillis(timestampLong).ToString("s")
                                                                      , timestampLong);
                                                sbDetail.AppendFormat("\r\n");
                                            }
                                            catch (TimeStampTooSmallException)
                                            {
                                                sbDetail.AppendFormat("\t\ttimeStampOffset:{0}\ttimestamp(UTC):{1}\tUnixTimestamp:{2}\t"
                                                                      , "NA since no data before the time you specified, please retry with a bigger value."
                                                                      , KafkaClientHelperUtils.DateTimeFromUnixTimestampMillis(timestampLong).ToString("s")
                                                                      , timestampLong);
                                                sbDetail.AppendFormat("\r\n");
                                            }
                                        }
                                    }
                                    #endregion
                                }
                                #endregion
                            }
                            if (includeOffsetInfo)
                            {
                                sb.AppendFormat("SumeEndOffset:{0:0,0}  SumLength:{1:0,0}\r\n", sumEndOffset, sumLength);
                            }
                            else
                            {
                                sb.AppendFormat("\r\n");
                            }

                            if (detailDataInZookeeper.Any())
                            {
                                foreach (KeyValuePair <int, int[]> kv in detailDataInZookeeper)
                                {
                                    sb.AppendFormat("=ERROR=MISSED partition= {0}  Replicas {1} ", kv.Key, string.Join(",", kv.Value.Select(r => r.ToString()).ToArray()));
                                }
                            }
                        }

                        sb.Append(sbDetail.ToString());
                        sb.AppendFormat("\tBroker as leader distribution======={0}=======\r\n", topic);
                        sb.AppendFormat("\r\tBrokerID\tLeadPartition count\r\n");
                        foreach (KeyValuePair <int, int> kv in parttionBrokerID_LeaderCountDistrib)
                        {
                            sb.AppendFormat("\t\t{0}\t{1}\r\n", KafkaConsoleUtil.GetBrokerIDAndIP(kv.Key), kv.Value);
                        }

                        sb.AppendFormat("\tBroker as replica distribution========={0}=====\r\n", topic);
                        sb.AppendFormat("\r\tBrokerID\tReplication count count\r\n");
                        foreach (KeyValuePair <int, int> kv in parttionBrokerID_ReplicaCountDistrib)
                        {
                            sb.AppendFormat("\t\t{0}\t{1}\r\n", KafkaConsoleUtil.GetBrokerIDAndIP(kv.Key), kv.Value);
                        }

                        sb.AppendFormat("\r\n");
                    }
                }

                s = sb.ToString();
            }
            catch (NoBrokerForTopicException e)
            {
                sb.AppendFormat("\r\nTopic:{0}\t ==NoBrokerForTopicException:{1}!!!== \r\n", topic, e.Message);
                s = sb.ToString();
            }
            catch (UnableToConnectToHostException e)
            {
                sb.AppendFormat("\r\nTopic:{0}\t ==UnableToConnectToHostException:{1}!!!== \r\n", topic, e.Message);
                s = sb.ToString();
            }
            catch (Exception ex)
            {
                Logger.ErrorFormat("Dump topic got exception:{0}\r\ninput parameter:Topic:{1}\tZookeeper:{2}\tKafka:{3}\tPartionIndex:{4}\tincludePartitionDetailInfo:{5}\tincludeOffsetInfo:{6}\ttimestamp:{7}\r\nPartial result:{8}"
                                   , ExceptionUtil.GetExceptionDetailInfo(ex),
                                   topic,
                                   zookeeper,
                                   string.Empty,
                                   partitionIndex,
                                   includePartitionDetailInfo,
                                   includeOffsetInfo,
                                   timestamp,
                                   s);
            }

            return(s);
        }