private static void TestBug1490652SendData(TestHelperOptions testOptions)
        {
            int           correlationID = 0;
            Random        rand          = new Random();
            StringBuilder sb            = new StringBuilder();

            try
            {
                KafkaSimpleManagerConfiguration config = new KafkaSimpleManagerConfiguration()
                {
                    Zookeeper      = testOptions.Zookeeper,
                    MaxMessageSize = SyncProducerConfiguration.DefaultMaxMessageSize
                };
                config.Verify();
                using (KafkaSimpleManager <int, Kafka.Client.Messages.Message> kafkaSimpleManager = new KafkaSimpleManager <int, Kafka.Client.Messages.Message>(config))
                {
                    TopicMetadata topicMetadata = kafkaSimpleManager.RefreshMetadata(0, "ClientID", correlationID++, testOptions.Topic, true);
                    PartitionCount = topicMetadata.PartitionsMetadata.Count();
                    List <ProducerData <int, Message> > listOfDataNeedSendInOneBatch = new List <ProducerData <int, Message> >();
                    for (int i = 0; i < PartitionCount; i++)
                    {
                        TestBug1490652DataSent.Add(i, new Dictionary <int, string>());
                        for (int j = 0; j < TestBug1490652MessageCountPerPartition; j++)
                        {
                            string val  = KafkaClientHelperUtils.GetRandomString(testOptions.MessageSize);
                            byte[] bVal = System.Text.Encoding.UTF8.GetBytes(val);
                            //Set the key to partitionID, so it can directly fall into  that partition.
                            Message message = new Message(bVal, CompressionCodecs.DefaultCompressionCodec);
                            listOfDataNeedSendInOneBatch.Add(new ProducerData <int, Message>(testOptions.Topic, i, message));
                            TestBug1490652DataSent[i].Add(j, val);
                        }
                    }

                    ProducerConfiguration producerConfig = new ProducerConfiguration(new List <BrokerConfiguration>()
                    {
                    })
                    {
                        PartitionerClass = ProducerConfiguration.DefaultPartitioner,
                        RequiredAcks     = 1,
                        BufferSize       = config.BufferSize,
                        ZooKeeper        = config.ZookeeperConfig,
                        MaxMessageSize   = Math.Max(config.MaxMessageSize, Math.Max(SyncProducerConfiguration.DefaultMaxMessageSize, testOptions.MessageSize))
                    };
                    producerConfig.SyncProducerOfOneBroker = 1;
                    Producer <int, Kafka.Client.Messages.Message> producer = new Producer <int, Kafka.Client.Messages.Message>(producerConfig);
                    producer.Send(listOfDataNeedSendInOneBatch);
                }
            }
            catch (Exception ex)
            {
                Logger.ErrorFormat("Produce data Got exception:{0}\r\ninput parameter: {1}\r\n"
                                   , ex.FormatException(), testOptions.ToString());
            }
        }
示例#2
0
        internal static void GetAdjustedOffset <TKey, TData>(string topic, KafkaSimpleManager <TKey, TData> kafkaSimpleManager,
                                                             int partitionID,
                                                             KafkaOffsetType offsetType,
                                                             long offset,
                                                             int lastMessageCount, out long earliest, out long latest, out long offsetBase)
        {
            StringBuilder sbSummaryOnfOnePartition = new StringBuilder();

            kafkaSimpleManager.RefreshAndGetOffset(0, string.Empty, 0, topic, partitionID, true, out earliest, out latest);
            sbSummaryOnfOnePartition.AppendFormat("\t\tearliest:{0}\tlatest:{1}\tlength:{2}"
                                                  , earliest
                                                  , latest
                                                  , (latest - earliest) == 0 ? "(empty)" : (latest - earliest).ToString());

            if (offsetType == KafkaOffsetType.Timestamp)
            {
                DateTime timestampVal = KafkaClientHelperUtils.DateTimeFromUnixTimestampMillis(offset);

                long timestampLong = KafkaClientHelperUtils.ToUnixTimestampMillis(timestampVal);
                try
                {
                    long timeStampOffset = kafkaSimpleManager.RefreshAndGetOffsetByTimeStamp(0, string.Empty, 0, topic, partitionID, timestampVal);

                    sbSummaryOnfOnePartition.AppendFormat("\r\n");
                    sbSummaryOnfOnePartition.AppendFormat("\t\ttimeStampOffset:{0}\ttimestamp(UTC):{1}\tTime(Local):{2}\tUnixTimestamp:{3}\t"
                                                          , timeStampOffset
                                                          , KafkaClientHelperUtils.DateTimeFromUnixTimestampMillis(timestampLong).ToString("s")
                                                          , timestampVal.ToString("s")
                                                          , timestampLong);

                    offsetBase = KafkaClientHelperUtils.GetValidStartReadOffset(offsetType, earliest, latest, timeStampOffset, lastMessageCount);
                }
                catch (TimeStampTooSmallException e)
                {
                    sbSummaryOnfOnePartition.AppendFormat("\r\n");
                    sbSummaryOnfOnePartition.AppendFormat("\t\ttimeStampOffset:{0}\ttimestamp(UTC):{1}\tTime(Local):{2}\tUnixTimestamp:{3}\t"
                                                          , "NA since no data before the time you specified, please retry with a bigger value."
                                                          , KafkaClientHelperUtils.DateTimeFromUnixTimestampMillis(timestampLong).ToString("s")
                                                          , timestampVal.ToString("s")
                                                          , timestampLong);

                    throw new ApplicationException(sbSummaryOnfOnePartition.ToString(), e);
                }
            }
            else
            {
                offsetBase = KafkaClientHelperUtils.GetValidStartReadOffset(offsetType, earliest, latest, 0, lastMessageCount);
            }

            Logger.Info(sbSummaryOnfOnePartition.ToString());
        }
        private static void Initializer(ProducePerfTestHelperOption producewrapperOption)
        {
            Logger.InfoFormat("prepare perf test, {0} ", DateTime.Now);
            listOfDataNeedSendInOneBatch = new List <ProducerData <byte[], Message> >();
            for (int i = 0; i < producewrapperOption.MessageCountPerBatch; i++)
            {
                String vKey = KafkaClientHelperUtils.GetRandomString(32);
                byte[] bKey = System.Text.Encoding.UTF8.GetBytes(vKey);

                String val  = KafkaClientHelperUtils.GetRandomString(producewrapperOption.MessageSize);
                byte[] bVal = System.Text.Encoding.UTF8.GetBytes(val);

                Message message = new Message(bVal, bKey, producewrapperOption.CompressionCodec);
                listOfDataNeedSendInOneBatch.Add(new ProducerData <byte[], Message>(producewrapperOption.Topic, message));
            }
        }
示例#4
0
        private static void PrepareSentMessages(ProduceSimpleHelperOption produceroundrobinOptions)
        {
            Console.WriteLine("start perf test, {0} ", DateTime.Now);
            listOfDataNeedSendInOneBatch = new List <ProducerData <byte[], Message> >();
            for (int i = 0; i < produceroundrobinOptions.MessageCountPerBatch; i++)
            {
                String val  = KafkaClientHelperUtils.GetRandomString(produceroundrobinOptions.MessageSize);
                byte[] bVal = System.Text.Encoding.UTF8.GetBytes(val);
                byte[] bKey = System.Text.Encoding.UTF8.GetBytes(string.Format("{0:000000}", i));
                if (produceroundrobinOptions.ConstantMessageKey)
                {
                    bKey = System.Text.Encoding.UTF8.GetBytes(string.Format("{0:000000}", 0));
                }

                Message message = new Message(bVal, bKey, produceroundrobinOptions.CompressionCodec);
                listOfKeys.Add(bKey);
                listOfDataNeedSendInOneBatch.Add(new ProducerData <byte[], Message>(produceroundrobinOptions.Topic, bKey, message));
            }
        }
示例#5
0
        internal static long ConvertOffset(string offset)
        {
            long offsetTime = 0;
            bool success    = false;

            if (string.IsNullOrEmpty(offset))
            {
                throw new ArgumentNullException("offset");
            }

            switch (offset.ToLower(CultureInfo.InvariantCulture))
            {
            case "earliest":
            case "latest":
            case "last":
                offsetTime = 0;
                break;

            default:
                DateTime dateTimeOffset;
                if (DateTime.TryParse(offset, out dateTimeOffset))
                {
                    offsetTime = KafkaClientHelperUtils.ToUnixTimestampMillis(dateTimeOffset);
                    success    = true;
                }
                else if (long.TryParse(offset, out offsetTime))
                {
                    success = true;
                }

                if (!success)
                {
                    Logger.Error(string.Format("Error: invalid offset={0}, it should be either earliest|latest|last or an unsigned integer or a timestamp.", offset));
                    throw new ArgumentException(string.Format("invalid offset={0}", offset));
                }

                break;
            }

            return(offsetTime);
        }
        internal static string DumpTopicMetadataAndOffsetInternal(ZooKeeperClient zkClient, string topic,
                                                                  string zookeeper,
                                                                  int partitionIndex,
                                                                  bool includePartitionDetailInfo,
                                                                  bool includeOffsetInfo,
                                                                  DateTime timestamp,
                                                                  SortedDictionary <int, int> parttionBrokerID_LeaderCountDistribAll,
                                                                  SortedDictionary <int, int> parttionBrokerID_ReplicaCountDistribAll,
                                                                  SortedDictionary <int, long> latestOffset,
                                                                  SortedDictionary <int, long> latestLength)
        {
            StringBuilder sb = new StringBuilder();
            string        s  = string.Empty;
            //BrokerID -->Count of as leader
            SortedDictionary <int, int> parttionBrokerID_LeaderCountDistrib = new SortedDictionary <int, int>();
            //BrokerID -->Count of as replica
            SortedDictionary <int, int> parttionBrokerID_ReplicaCountDistrib = new SortedDictionary <int, int>();

            try
            {
                if (string.IsNullOrEmpty(zookeeper))
                {
                    Logger.Error(" zookeeper  should be provided");
                    sb.AppendFormat(DumpTopicError, topic);
                }
                else
                {
                    KafkaSimpleManagerConfiguration config = new KafkaSimpleManagerConfiguration()
                    {
                        Zookeeper = zookeeper
                    };
                    config.Verify();
                    Dictionary <int, int[]> detailDataInZookeeper = ZkUtils.GetTopicMetadataInzookeeper(zkClient, topic);
                    using (KafkaSimpleManager <int, Message> kafkaSimpleManager = new KafkaSimpleManager <int, Message>(config))
                    {
                        TopicMetadata topicMetadata = kafkaSimpleManager.RefreshMetadata(KafkaNETExampleConstants.DefaultVersionId, ClientID, TopicMetadataRequestID++, topic, true);

                        int partitionCount = topicMetadata.PartitionsMetadata.Count();
                        sb.AppendFormat("Topic:{0}\tPartitionCount:{1}\t", topic, partitionCount);

                        int replicationFactor = Enumerable.Count <Broker>(topicMetadata.PartitionsMetadata.First().Replicas);
                        sb.AppendFormat("ReplicationFactor:{0}\t", replicationFactor);

                        //TODO:  compare detailDataInZookeeper and check which one missed.
                        StringBuilder sbDetail = new StringBuilder();
                        if (includePartitionDetailInfo)
                        {
                            long sumEndOffset = 0;
                            long sumLength    = 0;
                            foreach (PartitionMetadata p in topicMetadata.PartitionsMetadata.OrderBy(r => r.PartitionId).ToList())
                            {
                                int[] replicaInZookeeper = null;
                                if (detailDataInZookeeper.ContainsKey(p.PartitionId))
                                {
                                    replicaInZookeeper = detailDataInZookeeper[p.PartitionId];
                                    detailDataInZookeeper.Remove(p.PartitionId);
                                }

                                #region One partition
                                long earliest = 0;
                                long latest   = 0;
                                if (partitionIndex == -1 || p.PartitionId == partitionIndex)
                                {
                                    //sbDetail.AppendFormat("\tTopic:{0}", topic);
                                    sbDetail.AppendFormat("\tPartition:{0}", p.PartitionId);
                                    if (p.Leader != null)
                                    {
                                        sbDetail.AppendFormat("\tLeader:{0}", KafkaConsoleUtil.GetBrokerIDAndIP(p.Leader.Id));

                                        if (parttionBrokerID_LeaderCountDistrib.ContainsKey(p.Leader.Id))
                                        {
                                            parttionBrokerID_LeaderCountDistrib[p.Leader.Id]++;
                                        }
                                        else
                                        {
                                            parttionBrokerID_LeaderCountDistrib.Add(p.Leader.Id, 1);
                                        }

                                        if (parttionBrokerID_LeaderCountDistribAll.ContainsKey(p.Leader.Id))
                                        {
                                            parttionBrokerID_LeaderCountDistribAll[p.Leader.Id]++;
                                        }
                                        else
                                        {
                                            parttionBrokerID_LeaderCountDistribAll.Add(p.Leader.Id, 1);
                                        }
                                    }
                                    else
                                    {
                                        sbDetail.AppendFormat("\tLeader:NoLeader!");
                                    }

                                    sbDetail.AppendFormat("\tReplicas:{0}", string.Join(",", p.Replicas.Select(r => KafkaConsoleUtil.GetBrokerIDAndIP(r.Id)).ToArray()));
                                    foreach (Broker b in p.Replicas)
                                    {
                                        if (parttionBrokerID_ReplicaCountDistrib.ContainsKey(b.Id))
                                        {
                                            parttionBrokerID_ReplicaCountDistrib[b.Id]++;
                                        }
                                        else
                                        {
                                            parttionBrokerID_ReplicaCountDistrib.Add(b.Id, 1);
                                        }

                                        if (parttionBrokerID_ReplicaCountDistribAll.ContainsKey(b.Id))
                                        {
                                            parttionBrokerID_ReplicaCountDistribAll[b.Id]++;
                                        }
                                        else
                                        {
                                            parttionBrokerID_ReplicaCountDistribAll.Add(b.Id, 1);
                                        }
                                    }

                                    //sbDetail.AppendFormat("\tIsr:{0}", string.Join(",", p.Isr.Select(r => r.Id).ToArray()));
                                    ArrayList isrs = GetIsr(zkClient, topic, p.PartitionId);
                                    sbDetail.AppendFormat("\tIsr:{0}", string.Join(",", isrs.ToArray().Select(r => KafkaConsoleUtil.GetBrokerIDAndIP(Convert.ToInt32(r)))));
                                    //TODO: add missed replica

                                    #region Offset
                                    if (includeOffsetInfo)
                                    {
                                        try
                                        {
                                            kafkaSimpleManager.RefreshAndGetOffset(KafkaNETExampleConstants.DefaultVersionId, ClientID, TopicMetadataRequestID++, topic, p.PartitionId, true, out earliest, out latest);
                                            sumEndOffset += latest;
                                            sumLength    += (latest - earliest);
                                            sbDetail.AppendFormat("\tlength:{2}\tearliest:{0}\tlatest:{1}"
                                                                  , earliest
                                                                  , latest
                                                                  , (latest - earliest) == 0 ? "(empty)" : (latest - earliest).ToString());
                                            sbDetail.AppendFormat("\r\n");

                                            latestOffset.Add(p.PartitionId, latest);
                                            latestLength.Add(p.PartitionId, latest - earliest);
                                        }
                                        catch (NoLeaderForPartitionException e)
                                        {
                                            sbDetail.AppendFormat(" ERROR:{0}\r\n", e.Message);
                                        }
                                        catch (UnableToConnectToHostException e)
                                        {
                                            sbDetail.AppendFormat(" ERROR:{0}\r\n", e.Message);
                                        }

                                        if (timestamp != DateTime.MinValue)
                                        {
                                            long timestampLong = KafkaClientHelperUtils.ToUnixTimestampMillis(timestamp);
                                            try
                                            {
                                                long timeStampOffset = kafkaSimpleManager.RefreshAndGetOffsetByTimeStamp(KafkaNETExampleConstants.DefaultVersionId, ClientID, TopicMetadataRequestID++, topic, p.PartitionId, timestamp);
                                                sbDetail.AppendFormat("\t\ttimeStampOffset:{0}\ttimestamp(UTC):{1}\tUnixTimestamp:{2}\t"
                                                                      , timeStampOffset
                                                                      , KafkaClientHelperUtils.DateTimeFromUnixTimestampMillis(timestampLong).ToString("s")
                                                                      , timestampLong);
                                                sbDetail.AppendFormat("\r\n");
                                            }
                                            catch (TimeStampTooSmallException)
                                            {
                                                sbDetail.AppendFormat("\t\ttimeStampOffset:{0}\ttimestamp(UTC):{1}\tUnixTimestamp:{2}\t"
                                                                      , "NA since no data before the time you specified, please retry with a bigger value."
                                                                      , KafkaClientHelperUtils.DateTimeFromUnixTimestampMillis(timestampLong).ToString("s")
                                                                      , timestampLong);
                                                sbDetail.AppendFormat("\r\n");
                                            }
                                        }
                                    }
                                    #endregion
                                }
                                #endregion
                            }
                            if (includeOffsetInfo)
                            {
                                sb.AppendFormat("SumeEndOffset:{0:0,0}  SumLength:{1:0,0}\r\n", sumEndOffset, sumLength);
                            }
                            else
                            {
                                sb.AppendFormat("\r\n");
                            }

                            if (detailDataInZookeeper.Any())
                            {
                                foreach (KeyValuePair <int, int[]> kv in detailDataInZookeeper)
                                {
                                    sb.AppendFormat("=ERROR=MISSED partition= {0}  Replicas {1} ", kv.Key, string.Join(",", kv.Value.Select(r => r.ToString()).ToArray()));
                                }
                            }
                        }

                        sb.Append(sbDetail.ToString());
                        sb.AppendFormat("\tBroker as leader distribution======={0}=======\r\n", topic);
                        sb.AppendFormat("\r\tBrokerID\tLeadPartition count\r\n");
                        foreach (KeyValuePair <int, int> kv in parttionBrokerID_LeaderCountDistrib)
                        {
                            sb.AppendFormat("\t\t{0}\t{1}\r\n", KafkaConsoleUtil.GetBrokerIDAndIP(kv.Key), kv.Value);
                        }

                        sb.AppendFormat("\tBroker as replica distribution========={0}=====\r\n", topic);
                        sb.AppendFormat("\r\tBrokerID\tReplication count count\r\n");
                        foreach (KeyValuePair <int, int> kv in parttionBrokerID_ReplicaCountDistrib)
                        {
                            sb.AppendFormat("\t\t{0}\t{1}\r\n", KafkaConsoleUtil.GetBrokerIDAndIP(kv.Key), kv.Value);
                        }

                        sb.AppendFormat("\r\n");
                    }
                }

                s = sb.ToString();
            }
            catch (NoBrokerForTopicException e)
            {
                sb.AppendFormat("\r\nTopic:{0}\t ==NoBrokerForTopicException:{1}!!!== \r\n", topic, e.Message);
                s = sb.ToString();
            }
            catch (UnableToConnectToHostException e)
            {
                sb.AppendFormat("\r\nTopic:{0}\t ==UnableToConnectToHostException:{1}!!!== \r\n", topic, e.Message);
                s = sb.ToString();
            }
            catch (Exception ex)
            {
                Logger.ErrorFormat("Dump topic got exception:{0}\r\ninput parameter:Topic:{1}\tZookeeper:{2}\tKafka:{3}\tPartionIndex:{4}\tincludePartitionDetailInfo:{5}\tincludeOffsetInfo:{6}\ttimestamp:{7}\r\nPartial result:{8}"
                                   , ExceptionUtil.GetExceptionDetailInfo(ex),
                                   topic,
                                   zookeeper,
                                   string.Empty,
                                   partitionIndex,
                                   includePartitionDetailInfo,
                                   includeOffsetInfo,
                                   timestamp,
                                   s);
            }

            return(s);
        }
        /// <summary>
        /// Initialize SyncProducerPool with either the list of brokers provided by the caller or query Zookeeper
        /// </summary>
        private void InitSyncProducerPool()
        {
            if (this.syncProducerList != null)
            {
                // already initialized
                return;
            }

            if (!string.IsNullOrEmpty(this.HelperConfiguration.KafkaBrokerList))
            {
                // Honor KafkaBrokerList first
                this.syncProducerList = new List <ISyncProducer>();
                string[] brokers = this.HelperConfiguration.KafkaBrokerList.Split(new char[] { ',' });

                int i = 0;
                foreach (string v in brokers)
                {
                    i++;
                    string[] brokerParams = v.Split(new char[] { ':' });
                    int      port         = 0;
                    if (brokerParams.Count() == 2 && !string.IsNullOrEmpty(brokerParams[0]) && int.TryParse(brokerParams[1], out port))
                    {
                        var syncProducer = KafkaClientHelperUtils.TryCreateSyncProducer(i, brokerParams[0], port);
                        if (syncProducer != null)
                        {
                            this.syncProducerList.Add(syncProducer);
                        }
                    }
                }
            }
            else if (this.HelperConfiguration.ZookeeperConfig != null)
            {
                // Honor Zookeeper connection string only when KafkaBroker list not provided
                var producerConfig = new ProducerConfiguration(new List <BrokerConfiguration>());
                producerConfig.ZooKeeper = this.HelperConfiguration.ZookeeperConfig;
                //Backward compatible, so set to 1.
                producerConfig.SyncProducerOfOneBroker = 1;
                this.syncProducerPool = new SyncProducerPool(producerConfig);
                this.syncProducerList = this.syncProducerPool.GetShuffledProducers();
            }
            else if (this.HelperConfiguration.LeaderConfig != null)
            {
                // if only leader is provided, the leader to be added to syncProducerList
                var leader = this.HelperConfiguration.LeaderConfig;
                this.syncProducerList = new List <ISyncProducer>()
                {
                    new SyncProducer(new SyncProducerConfiguration()
                    {
                        BrokerId = leader.BrokerId, Host = leader.Host, Port = leader.Port
                    })
                };
            }

            if (this.syncProducerList == null || this.syncProducerList.Count == 0)
            {
                string s = string.Format("KafkaClientHelperWrapper[{0}] SyncProducerPool Initialization produced empty syncProducer list,Kafka={1},Zookeeper={2}",
                                         this.GetHashCode().ToString("X"), this.HelperConfiguration.KafkaBrokerList, this.HelperConfiguration.ZookeeperConfig);

                Logger.Debug(s);
                this.syncProducerList = null;
                throw new ArgumentException(s);
            }

            this.RoundRobinSyncProducer(this.syncProducerList.Count);
            this.lastTimeBrokerListUpdated = DateTime.Now;
            Logger.DebugFormat("KafkaClientHelperWrapper[{0}] SyncProducerPool initialized", this.GetHashCode().ToString("X"));
        }
        /// <summary>
        /// Find a leader broker by issuing a topicMetaReqeust
        /// </summary>
        /// <returns>number of leaders
        public int CallKafkaAndFindLeader()
        {
            string s = string.Empty;

            // retry at least once
            int maxRetry   = 1;
            int retryCount = 0;

            while (this.leaders.Count == 0 && retryCount < maxRetry)
            {
                SyncProducerConfiguration producerConfig = null;
                try
                {
                    Console.WriteLine("in try catch block");
                    if (this.syncProducerList == null)
                    {
                        // initialize SyncProducerPool
                        this.InitSyncProducerPool();
                    }

                    // retry each broker two times. Observed non-leader broker responds with error message, and some leader knows only a small set of partitions
                    maxRetry = this.syncProducerList == null ? maxRetry : this.syncProducerList.Count * 2;
                    if (retryCount > 0)
                    {
                        // not the first time, rotate the producer, wait a few seconds, before retry
                        this.RoundRobinSyncProducer(this.syncProducerList.Count);
                        Thread.Sleep(this.HelperConfiguration.MaxRetryWaitTime);
                    }

                    producerConfig = this.syncProducerList[this.producerIndex].Config;
                    Console.WriteLine("got producer config");
                    IEnumerable <TopicMetadata> topicMetaData = this.syncProducerList[this.producerIndex].Send(this.topicMetaRequest);
                    Console.WriteLine("got topic meta data");
                    if (topicMetaData.Count() == 0)
                    {
                        s = "FindLeader found ZERO count topicMetaData,topic={0},producerIndex[{1}]=[{2}]";
                        Logger.ErrorFormat(s, this.topic, this.producerIndex, KafkaClientHelperUtils.SyncProducerConfigToString(producerConfig));
                        continue;
                    }
                    else if (topicMetaData.First().PartitionsMetadata.Count() == 0)
                    {
                        s = "FindLeader found ZERO count partitionMetaData,topic={0},producerIndex[{1}]=[{2}]";
                        Logger.ErrorFormat(s, this.topic, this.producerIndex, KafkaClientHelperUtils.SyncProducerConfigToString(producerConfig));
                        continue;
                    }
                    else if (topicMetaData.Count() > 1)
                    {
                        s = "FindLeader found more than one topicData,topicMetaData.Count={0},topic={1},producerIndex[{2}]=[{3}],will use first one - expect only one";
                        Logger.ErrorFormat(s, topicMetaData.Count(), this.topic, this.producerIndex, KafkaClientHelperUtils.SyncProducerConfigToString(producerConfig));
                    }

                    this.PartitionsMetadata = topicMetaData.First().PartitionsMetadata.OrderBy(r => r.PartitionId);
                    this.leaders.Clear();
                    foreach (var m in this.PartitionsMetadata)
                    {
                        this.leaders.Add(m.PartitionId, new BrokerConfiguration()
                        {
                            BrokerId = m.Leader.Id,
                            Host     = m.Leader.Host,
                            Port     = m.Leader.Port
                        });
                    }

                    if (this.leaders.Count == 0)
                    {
                        s = "no leader found,topic={0},producerIndex[{1}]=[{2}],retryCount={3}";
                        Logger.ErrorFormat(s, this.topic, this.producerIndex, KafkaClientHelperUtils.SyncProducerConfigToString(producerConfig), retryCount);
                        continue;
                    }
                    else
                    {
                        s = "KafkaClientHelperWrapper[{0}] leader found,leaders.Count={1},topic={2},producerIndex[{3}]=[{4}],retryCount={5}";
                        Logger.DebugFormat(s, this.GetHashCode().ToString("X"), this.leaders.Count, this.topic, this.producerIndex,
                                           KafkaClientHelperUtils.SyncProducerConfigToString(producerConfig), retryCount);
                        this.lastTimeLeaderFound = DateTime.Now;
                    }
                }
                catch (Exception e)
                {
                    s = "FindLeader hit exception,topic={0}, producerIndex[{1}]=[{2}],retryCount={3},maxRetry={4}";
                    Logger.Error(string.Format(s, this.topic, this.producerIndex, KafkaClientHelperUtils.SyncProducerConfigToString(producerConfig), retryCount, maxRetry), e);

                    if (retryCount >= maxRetry)
                    {
                        // reach maximum retry, bail
                        throw;
                    }
                }
                finally
                {
                    ++retryCount;
                }
            } // end of while loop

            return(this.leaders.Count);
        }
 public long RefreshAndGetOffsetByTimeStamp(short versionId, string clientId, int correlationId, string topic, int partitionId, DateTime timeStampInUTC)
 {
     //Get
     using (Consumer consumer = this.GetConsumer(topic, partitionId))
     {
         Dictionary <string, List <PartitionOffsetRequestInfo> > offsetRequestInfoEarliest = new Dictionary <string, List <PartitionOffsetRequestInfo> >();
         List <PartitionOffsetRequestInfo> offsetRequestInfoForPartitionsEarliest          = new List <PartitionOffsetRequestInfo>();
         offsetRequestInfoForPartitionsEarliest.Add(new PartitionOffsetRequestInfo(partitionId, KafkaClientHelperUtils.ToUnixTimestampMillis(timeStampInUTC), 8));
         offsetRequestInfoEarliest.Add(topic, offsetRequestInfoForPartitionsEarliest);
         OffsetRequest offsetRequestEarliest = new OffsetRequest(offsetRequestInfoEarliest);
         //Earliest
         OffsetResponse offsetResponseEarliest = consumer.GetOffsetsBefore(offsetRequestEarliest);
         List <PartitionOffsetsResponse> partitionOffsetByTimeStamp = null;
         if (offsetResponseEarliest.ResponseMap.TryGetValue(topic, out partitionOffsetByTimeStamp))
         {
             foreach (var p in partitionOffsetByTimeStamp)
             {
                 if (p.PartitionId == partitionId)
                 {
                     return(partitionOffsetByTimeStamp[0].Offsets[0]);
                 }
             }
         }
     }
     return(-1);
 }