internal static void Run(ProduceSimpleHelperOption produceOptions) { failedMessageCount = 0; successMessageCount = 0; sentBatchCount = 0; produceMessagePerPartition = new Dictionary <int, int>(); produceMessagePerPartitionExpect = new Dictionary <int, int>(); PrepareSentMessages(produceOptions); kafkaSimpleManagerConfig = new KafkaSimpleManagerConfiguration() { Zookeeper = produceOptions.Zookeeper, MaxMessageSize = SyncProducerConfiguration.DefaultMaxMessageSize, PartitionerClass = produceOptions.PartitionerClass }; kafkaSimpleManagerConfig.Verify(); producerConfigTemplate = new ProducerConfiguration( new List <BrokerConfiguration>() { }) //The Brokers will be replaced inside of KafkaSimpleManager { ForceToPartition = -1, PartitionerClass = kafkaSimpleManagerConfig.PartitionerClass, TotalNumPartitions = 0, RequiredAcks = produceOptions.RequiredAcks, AckTimeout = produceOptions.AckTimeout, SendTimeout = produceOptions.SendTimeout, ReceiveTimeout = produceOptions.ReceiveTimeout, CompressionCodec = KafkaNetLibraryExample.ConvertToCodec(produceOptions.Compression.ToString()), BufferSize = produceOptions.BufferSize, SyncProducerOfOneBroker = produceOptions.SyncProducerOfOneBroker, //Actually it's sync producer socket count of one partition MaxMessageSize = Math.Max(SyncProducerConfiguration.DefaultMaxMessageSize, produceOptions.MessageSize) }; using (ZooKeeperClient zkClient = new ZooKeeperClient(produceOptions.Zookeeper, ZooKeeperConfiguration.DefaultSessionTimeout, ZooKeeperStringSerializer.Serializer)) { zkClient.Connect(); Dictionary <int, int[]> topicDataInZookeeper = ZkUtils.GetTopicMetadataInzookeeper(zkClient, produceOptions.Topic); // -2 by default or customized partitioner class. (you need specify PartitionerClass by -l) if (produceOptions.PartitionId == -2) { if (string.IsNullOrEmpty(kafkaSimpleManagerConfig.PartitionerClass)) { throw new ArgumentException("The partitioer class must not be empty if you want to send to partition by partitioner."); } //if (producerConfigTemplate.TotalNumPartitions <= 0) // throw new ArgumentException(string.Format("Please also specify the TotalNumPartitions if you want to send to partition by partitioner.")); ProduceByPartitionerClass(produceOptions, topicDataInZookeeper.Count); } else { ProduceToRandomOrSpecificPartition(produceOptions); } } }
/// <summary> /// Force get topic metadata and update /// </summary> public void UpdateInfo(short versionId, int correlationId, string clientId, string topic) { Logger.InfoFormat("Will update metadata for topic:{0}", topic); Guard.NotNullNorEmpty(topic, "topic"); var shuffledBrokers = this.syncProducerPool.GetShuffledProducers(); var i = 0; var hasFetchedInfo = false; while (i < shuffledBrokers.Count && !hasFetchedInfo) { ISyncProducer producer = shuffledBrokers[i++]; try { var topicMetadataRequest = TopicMetadataRequest.Create(new List <string>() { topic }, versionId, correlationId, clientId); var topicMetadataList = producer.Send(topicMetadataRequest); var topicMetadata = topicMetadataList.Any() ? topicMetadataList.First() : null; if (topicMetadata != null) { if (topicMetadata.Error != ErrorMapping.NoError) { Logger.WarnFormat("Try get metadata of topic {0} from {1}({2}) . Got error: {3}", topic, producer.Config.BrokerId, producer.Config.Host, topicMetadata.Error.ToString()); } else { this.topicPartitionInfo[topic] = topicMetadata; this.topicPartitionInfoLastUpdateTime[topic] = DateTime.UtcNow; Logger.InfoFormat("Will Update metadata info, topic {0} ", topic); //TODO: For all partitions which has metadata, here return the sorted list. //But sometimes kafka didn't return metadata for all topics. this.topicPartitionInfoList[topic] = topicMetadata.PartitionsMetadata.Select(m => { Partition partition = new Partition(topic, m.PartitionId); if (m.Leader != null) { var leaderReplica = new Replica(m.Leader.Id, topic); partition.Leader = leaderReplica; Logger.InfoFormat("Topic {0} partition {1} has leader {2}", topic, m.PartitionId, m.Leader.Id); return(partition); } Logger.WarnFormat("Topic {0} partition {1} does not have a leader yet", topic, m.PartitionId); return(partition); } ).OrderBy(x => x.PartId).ToList();; hasFetchedInfo = true; Logger.InfoFormat("Finish Update metadata info, topic {0} Partitions:{1} No leader:{2}", topic, this.topicPartitionInfoList[topic].Count, this.topicPartitionInfoList[topic].Where(r => r.Leader == null).Count()); //In very weired case, the kafka broker didn't return metadata of all broker. need break and retry. https://issues.apache.org/jira/browse/KAFKA-1998 // http://qnalist.com/questions/5899394/topicmetadata-response-miss-some-partitions-information-sometimes if (zkClient != null) { Dictionary <int, int[]> topicMetaDataInZookeeper = ZkUtils.GetTopicMetadataInzookeeper(this.zkClient, topic); if (topicMetaDataInZookeeper != null && topicMetaDataInZookeeper.Any()) { topicDataInZookeeper[topic] = topicMetaDataInZookeeper; if (this.topicPartitionInfoList[topic].Count != topicMetaDataInZookeeper.Count) { Logger.ErrorFormat("NOT all partition has metadata. Topic partition in zookeeper :{0} topics has partition metadata: {1}", topicMetaDataInZookeeper.Count, this.topicPartitionInfoList[topic].Count); throw new UnavailableProducerException(string.Format("Please make sure every partition at least has one broker running and retry again. NOT all partition has metadata. Topic partition in zookeeper :{0} topics has partition metadata: {1}", topicMetaDataInZookeeper.Count, this.topicPartitionInfoList[topic].Count)); } } } } } } catch (Exception e) { Logger.ErrorFormat("Try get metadata of topic {0} from {1}({2}) . Got error: {3}", topic, producer.Config.BrokerId, producer.Config.Host, e.FormatException()); } } }
internal static string DumpTopicMetadataAndOffsetInternal(ZooKeeperClient zkClient, string topic, string zookeeper, int partitionIndex, bool includePartitionDetailInfo, bool includeOffsetInfo, DateTime timestamp, SortedDictionary <int, int> parttionBrokerID_LeaderCountDistribAll, SortedDictionary <int, int> parttionBrokerID_ReplicaCountDistribAll, SortedDictionary <int, long> latestOffset, SortedDictionary <int, long> latestLength) { StringBuilder sb = new StringBuilder(); string s = string.Empty; //BrokerID -->Count of as leader SortedDictionary <int, int> parttionBrokerID_LeaderCountDistrib = new SortedDictionary <int, int>(); //BrokerID -->Count of as replica SortedDictionary <int, int> parttionBrokerID_ReplicaCountDistrib = new SortedDictionary <int, int>(); try { if (string.IsNullOrEmpty(zookeeper)) { Logger.Error(" zookeeper should be provided"); sb.AppendFormat(DumpTopicError, topic); } else { KafkaSimpleManagerConfiguration config = new KafkaSimpleManagerConfiguration() { Zookeeper = zookeeper }; config.Verify(); Dictionary <int, int[]> detailDataInZookeeper = ZkUtils.GetTopicMetadataInzookeeper(zkClient, topic); using (KafkaSimpleManager <int, Message> kafkaSimpleManager = new KafkaSimpleManager <int, Message>(config)) { TopicMetadata topicMetadata = kafkaSimpleManager.RefreshMetadata(KafkaNETExampleConstants.DefaultVersionId, ClientID, TopicMetadataRequestID++, topic, true); int partitionCount = topicMetadata.PartitionsMetadata.Count(); sb.AppendFormat("Topic:{0}\tPartitionCount:{1}\t", topic, partitionCount); int replicationFactor = Enumerable.Count <Broker>(topicMetadata.PartitionsMetadata.First().Replicas); sb.AppendFormat("ReplicationFactor:{0}\t", replicationFactor); //TODO: compare detailDataInZookeeper and check which one missed. StringBuilder sbDetail = new StringBuilder(); if (includePartitionDetailInfo) { long sumEndOffset = 0; long sumLength = 0; foreach (PartitionMetadata p in topicMetadata.PartitionsMetadata.OrderBy(r => r.PartitionId).ToList()) { int[] replicaInZookeeper = null; if (detailDataInZookeeper.ContainsKey(p.PartitionId)) { replicaInZookeeper = detailDataInZookeeper[p.PartitionId]; detailDataInZookeeper.Remove(p.PartitionId); } #region One partition long earliest = 0; long latest = 0; if (partitionIndex == -1 || p.PartitionId == partitionIndex) { //sbDetail.AppendFormat("\tTopic:{0}", topic); sbDetail.AppendFormat("\tPartition:{0}", p.PartitionId); if (p.Leader != null) { sbDetail.AppendFormat("\tLeader:{0}", KafkaConsoleUtil.GetBrokerIDAndIP(p.Leader.Id)); if (parttionBrokerID_LeaderCountDistrib.ContainsKey(p.Leader.Id)) { parttionBrokerID_LeaderCountDistrib[p.Leader.Id]++; } else { parttionBrokerID_LeaderCountDistrib.Add(p.Leader.Id, 1); } if (parttionBrokerID_LeaderCountDistribAll.ContainsKey(p.Leader.Id)) { parttionBrokerID_LeaderCountDistribAll[p.Leader.Id]++; } else { parttionBrokerID_LeaderCountDistribAll.Add(p.Leader.Id, 1); } } else { sbDetail.AppendFormat("\tLeader:NoLeader!"); } sbDetail.AppendFormat("\tReplicas:{0}", string.Join(",", p.Replicas.Select(r => KafkaConsoleUtil.GetBrokerIDAndIP(r.Id)).ToArray())); foreach (Broker b in p.Replicas) { if (parttionBrokerID_ReplicaCountDistrib.ContainsKey(b.Id)) { parttionBrokerID_ReplicaCountDistrib[b.Id]++; } else { parttionBrokerID_ReplicaCountDistrib.Add(b.Id, 1); } if (parttionBrokerID_ReplicaCountDistribAll.ContainsKey(b.Id)) { parttionBrokerID_ReplicaCountDistribAll[b.Id]++; } else { parttionBrokerID_ReplicaCountDistribAll.Add(b.Id, 1); } } //sbDetail.AppendFormat("\tIsr:{0}", string.Join(",", p.Isr.Select(r => r.Id).ToArray())); ArrayList isrs = GetIsr(zkClient, topic, p.PartitionId); sbDetail.AppendFormat("\tIsr:{0}", string.Join(",", isrs.ToArray().Select(r => KafkaConsoleUtil.GetBrokerIDAndIP(Convert.ToInt32(r))))); //TODO: add missed replica #region Offset if (includeOffsetInfo) { try { kafkaSimpleManager.RefreshAndGetOffset(KafkaNETExampleConstants.DefaultVersionId, ClientID, TopicMetadataRequestID++, topic, p.PartitionId, true, out earliest, out latest); sumEndOffset += latest; sumLength += (latest - earliest); sbDetail.AppendFormat("\tlength:{2}\tearliest:{0}\tlatest:{1}" , earliest , latest , (latest - earliest) == 0 ? "(empty)" : (latest - earliest).ToString()); sbDetail.AppendFormat("\r\n"); latestOffset.Add(p.PartitionId, latest); latestLength.Add(p.PartitionId, latest - earliest); } catch (NoLeaderForPartitionException e) { sbDetail.AppendFormat(" ERROR:{0}\r\n", e.Message); } catch (UnableToConnectToHostException e) { sbDetail.AppendFormat(" ERROR:{0}\r\n", e.Message); } if (timestamp != DateTime.MinValue) { long timestampLong = KafkaClientHelperUtils.ToUnixTimestampMillis(timestamp); try { long timeStampOffset = kafkaSimpleManager.RefreshAndGetOffsetByTimeStamp(KafkaNETExampleConstants.DefaultVersionId, ClientID, TopicMetadataRequestID++, topic, p.PartitionId, timestamp); sbDetail.AppendFormat("\t\ttimeStampOffset:{0}\ttimestamp(UTC):{1}\tUnixTimestamp:{2}\t" , timeStampOffset , KafkaClientHelperUtils.DateTimeFromUnixTimestampMillis(timestampLong).ToString("s") , timestampLong); sbDetail.AppendFormat("\r\n"); } catch (TimeStampTooSmallException) { sbDetail.AppendFormat("\t\ttimeStampOffset:{0}\ttimestamp(UTC):{1}\tUnixTimestamp:{2}\t" , "NA since no data before the time you specified, please retry with a bigger value." , KafkaClientHelperUtils.DateTimeFromUnixTimestampMillis(timestampLong).ToString("s") , timestampLong); sbDetail.AppendFormat("\r\n"); } } } #endregion } #endregion } if (includeOffsetInfo) { sb.AppendFormat("SumeEndOffset:{0:0,0} SumLength:{1:0,0}\r\n", sumEndOffset, sumLength); } else { sb.AppendFormat("\r\n"); } if (detailDataInZookeeper.Any()) { foreach (KeyValuePair <int, int[]> kv in detailDataInZookeeper) { sb.AppendFormat("=ERROR=MISSED partition= {0} Replicas {1} ", kv.Key, string.Join(",", kv.Value.Select(r => r.ToString()).ToArray())); } } } sb.Append(sbDetail.ToString()); sb.AppendFormat("\tBroker as leader distribution======={0}=======\r\n", topic); sb.AppendFormat("\r\tBrokerID\tLeadPartition count\r\n"); foreach (KeyValuePair <int, int> kv in parttionBrokerID_LeaderCountDistrib) { sb.AppendFormat("\t\t{0}\t{1}\r\n", KafkaConsoleUtil.GetBrokerIDAndIP(kv.Key), kv.Value); } sb.AppendFormat("\tBroker as replica distribution========={0}=====\r\n", topic); sb.AppendFormat("\r\tBrokerID\tReplication count count\r\n"); foreach (KeyValuePair <int, int> kv in parttionBrokerID_ReplicaCountDistrib) { sb.AppendFormat("\t\t{0}\t{1}\r\n", KafkaConsoleUtil.GetBrokerIDAndIP(kv.Key), kv.Value); } sb.AppendFormat("\r\n"); } } s = sb.ToString(); } catch (NoBrokerForTopicException e) { sb.AppendFormat("\r\nTopic:{0}\t ==NoBrokerForTopicException:{1}!!!== \r\n", topic, e.Message); s = sb.ToString(); } catch (UnableToConnectToHostException e) { sb.AppendFormat("\r\nTopic:{0}\t ==UnableToConnectToHostException:{1}!!!== \r\n", topic, e.Message); s = sb.ToString(); } catch (Exception ex) { Logger.ErrorFormat("Dump topic got exception:{0}\r\ninput parameter:Topic:{1}\tZookeeper:{2}\tKafka:{3}\tPartionIndex:{4}\tincludePartitionDetailInfo:{5}\tincludeOffsetInfo:{6}\ttimestamp:{7}\r\nPartial result:{8}" , ExceptionUtil.GetExceptionDetailInfo(ex), topic, zookeeper, string.Empty, partitionIndex, includePartitionDetailInfo, includeOffsetInfo, timestamp, s); } return(s); }