private static void TestBug1490652ReadData(TestHelperOptions testOptions) { KafkaSimpleManagerConfiguration config = new KafkaSimpleManagerConfiguration() { FetchSize = KafkaSimpleManagerConfiguration.DefaultFetchSize, BufferSize = KafkaSimpleManagerConfiguration.DefaultBufferSize, MaxWaitTime = 0, MinWaitBytes = 0, Zookeeper = testOptions.Zookeeper }; config.Verify(); using (KafkaSimpleManager <int, Message> kafkaSimpleManager = new KafkaSimpleManager <int, Message>(config)) { TopicMetadata topicMetadata = kafkaSimpleManager.RefreshMetadata(0, "ClientID", 0, testOptions.Topic, true); PartitionCount = topicMetadata.PartitionsMetadata.Count(); for (int i = 0; i < PartitionCount; i++) { #region Get real offset and adjust long earliest = 0; long latest = 0; long offsetBase = 0; OffsetHelper.GetAdjustedOffset <int, Message>(testOptions.Topic, kafkaSimpleManager, i, KafkaOffsetType.Earliest, 0, 0, out earliest, out latest, out offsetBase); #endregion TestBug1490652DataRead.Add(i, ConsumeDataOfOnePartitionTotally <int, Message>(testOptions.Topic, kafkaSimpleManager, i, KafkaOffsetType.Earliest, 0, 0, latest, 0, 100, -1, "DumpLog.log")); } } }
private ProducePerfTestKafkaSimpleManagerWrapper() { config = new KafkaSimpleManagerConfiguration() { Zookeeper = produceOptions.Zookeeper, PartitionerClass = produceOptions.PartitionerClass, MaxMessageSize = SyncProducerConfiguration.DefaultMaxMessageSize }; config.Verify(); producerConfigTemplate = new ProducerConfiguration( new List <BrokerConfiguration>() { }) //The Brokers will be replaced inside of KafkaSimpleManager { ForceToPartition = -1, PartitionerClass = config.PartitionerClass, TotalNumPartitions = 0, RequiredAcks = produceOptions.RequiredAcks, AckTimeout = produceOptions.AckTimeout, SendTimeout = produceOptions.SendTimeout, ReceiveTimeout = produceOptions.ReceiveTimeout, CompressionCodec = KafkaNetLibraryExample.ConvertToCodec(produceOptions.Compression.ToString()), BufferSize = produceOptions.BufferSize, SyncProducerOfOneBroker = produceOptions.SyncProducerOfOneBroker, //Actually it's sync producer socket count of one partition MaxMessageSize = Math.Max(SyncProducerConfiguration.DefaultMaxMessageSize, produceOptions.MessageSize) }; kafkaSimpleManage = new KafkaSimpleManager <byte[], Message>(config); int correlationId = Interlocked.Increment(ref correlationIDGetProducer); kafkaSimpleManage.InitializeProducerPoolForTopic(0, clientId, correlationId, produceOptions.Topic, true, producerConfigTemplate, true); }
private ProducePerfTestKafkaSimpleManagerWrapper() { config = new KafkaSimpleManagerConfiguration() { Zookeeper = produceOptions.Zookeeper, PartitionerClass = produceOptions.PartitionerClass, MaxMessageSize = SyncProducerConfiguration.DefaultMaxMessageSize }; config.Verify(); producerConfigTemplate = new ProducerConfiguration( new List<BrokerConfiguration>() { }) //The Brokers will be replaced inside of KafkaSimpleManager { ForceToPartition = -1, PartitionerClass = config.PartitionerClass, TotalNumPartitions = 0, RequiredAcks = produceOptions.RequiredAcks, AckTimeout = produceOptions.AckTimeout, SendTimeout = produceOptions.SendTimeout, ReceiveTimeout = produceOptions.ReceiveTimeout, CompressionCodec = KafkaNetLibraryExample.ConvertToCodec(produceOptions.Compression.ToString()), BufferSize = produceOptions.BufferSize, SyncProducerOfOneBroker = produceOptions.SyncProducerOfOneBroker, //Actually it's sync producer socket count of one partition MaxMessageSize = Math.Max(SyncProducerConfiguration.DefaultMaxMessageSize, produceOptions.MessageSize) }; kafkaSimpleManage = new KafkaSimpleManager<byte[], Message>(config); int correlationId = Interlocked.Increment(ref correlationIDGetProducer); kafkaSimpleManage.InitializeProducerPoolForTopic(0, clientId, correlationId, produceOptions.Topic, true, producerConfigTemplate, true); }
internal static void Run(ProduceSimpleHelperOption produceOptions) { failedMessageCount = 0; successMessageCount = 0; sentBatchCount = 0; produceMessagePerPartition = new Dictionary <int, int>(); produceMessagePerPartitionExpect = new Dictionary <int, int>(); PrepareSentMessages(produceOptions); kafkaSimpleManagerConfig = new KafkaSimpleManagerConfiguration() { Zookeeper = produceOptions.Zookeeper, MaxMessageSize = SyncProducerConfiguration.DefaultMaxMessageSize, PartitionerClass = produceOptions.PartitionerClass }; kafkaSimpleManagerConfig.Verify(); producerConfigTemplate = new ProducerConfiguration( new List <BrokerConfiguration>() { }) //The Brokers will be replaced inside of KafkaSimpleManager { ForceToPartition = -1, PartitionerClass = kafkaSimpleManagerConfig.PartitionerClass, TotalNumPartitions = 0, RequiredAcks = produceOptions.RequiredAcks, AckTimeout = produceOptions.AckTimeout, SendTimeout = produceOptions.SendTimeout, ReceiveTimeout = produceOptions.ReceiveTimeout, CompressionCodec = KafkaNetLibraryExample.ConvertToCodec(produceOptions.Compression.ToString()), BufferSize = produceOptions.BufferSize, SyncProducerOfOneBroker = produceOptions.SyncProducerOfOneBroker, //Actually it's sync producer socket count of one partition MaxMessageSize = Math.Max(SyncProducerConfiguration.DefaultMaxMessageSize, produceOptions.MessageSize) }; using (ZooKeeperClient zkClient = new ZooKeeperClient(produceOptions.Zookeeper, ZooKeeperConfiguration.DefaultSessionTimeout, ZooKeeperStringSerializer.Serializer)) { zkClient.Connect(); Dictionary <int, int[]> topicDataInZookeeper = ZkUtils.GetTopicMetadataInzookeeper(zkClient, produceOptions.Topic); // -2 by default or customized partitioner class. (you need specify PartitionerClass by -l) if (produceOptions.PartitionId == -2) { if (string.IsNullOrEmpty(kafkaSimpleManagerConfig.PartitionerClass)) { throw new ArgumentException("The partitioer class must not be empty if you want to send to partition by partitioner."); } //if (producerConfigTemplate.TotalNumPartitions <= 0) // throw new ArgumentException(string.Format("Please also specify the TotalNumPartitions if you want to send to partition by partitioner.")); ProduceByPartitionerClass(produceOptions, topicDataInZookeeper.Count); } else { ProduceToRandomOrSpecificPartition(produceOptions); } } }
private static void TestBug1490652SendData(TestHelperOptions testOptions) { int correlationID = 0; Random rand = new Random(); StringBuilder sb = new StringBuilder(); try { KafkaSimpleManagerConfiguration config = new KafkaSimpleManagerConfiguration() { Zookeeper = testOptions.Zookeeper, MaxMessageSize = SyncProducerConfiguration.DefaultMaxMessageSize }; config.Verify(); using (KafkaSimpleManager <int, Kafka.Client.Messages.Message> kafkaSimpleManager = new KafkaSimpleManager <int, Kafka.Client.Messages.Message>(config)) { TopicMetadata topicMetadata = kafkaSimpleManager.RefreshMetadata(0, "ClientID", correlationID++, testOptions.Topic, true); PartitionCount = topicMetadata.PartitionsMetadata.Count(); List <ProducerData <int, Message> > listOfDataNeedSendInOneBatch = new List <ProducerData <int, Message> >(); for (int i = 0; i < PartitionCount; i++) { TestBug1490652DataSent.Add(i, new Dictionary <int, string>()); for (int j = 0; j < TestBug1490652MessageCountPerPartition; j++) { string val = KafkaClientHelperUtils.GetRandomString(testOptions.MessageSize); byte[] bVal = System.Text.Encoding.UTF8.GetBytes(val); //Set the key to partitionID, so it can directly fall into that partition. Message message = new Message(bVal, CompressionCodecs.DefaultCompressionCodec); listOfDataNeedSendInOneBatch.Add(new ProducerData <int, Message>(testOptions.Topic, i, message)); TestBug1490652DataSent[i].Add(j, val); } } ProducerConfiguration producerConfig = new ProducerConfiguration(new List <BrokerConfiguration>() { }) { PartitionerClass = ProducerConfiguration.DefaultPartitioner, RequiredAcks = 1, BufferSize = config.BufferSize, ZooKeeper = config.ZookeeperConfig, MaxMessageSize = Math.Max(config.MaxMessageSize, Math.Max(SyncProducerConfiguration.DefaultMaxMessageSize, testOptions.MessageSize)) }; producerConfig.SyncProducerOfOneBroker = 1; Producer <int, Kafka.Client.Messages.Message> producer = new Producer <int, Kafka.Client.Messages.Message>(producerConfig); producer.Send(listOfDataNeedSendInOneBatch); } } catch (Exception ex) { Logger.ErrorFormat("Produce data Got exception:{0}\r\ninput parameter: {1}\r\n" , ex.FormatException(), testOptions.ToString()); } }
public ZookeeperConnection(string zkConnect) { _zkConnect = zkConnect; var kafkaConfig = new KafkaSimpleManagerConfiguration() { Zookeeper = zkConnect, MaxMessageSize = SyncProducerConfiguration.DefaultMaxMessageSize, PartitionerClass = ProducerConfiguration.DefaultPartitioner }; kafkaConfig.Verify(); }
static void Main(string[] args) { int correlationID = 0; string topic = "t4"; KafkaSimpleManagerConfiguration config = new KafkaSimpleManagerConfiguration() { FetchSize = 100, BufferSize = 100, MaxWaitTime = 5000, MinWaitBytes = 1, Zookeeper = "10.1.1.231:2181,10.1.1.232:2181,10.1.1.233:2181/kafka" }; ProducerConfiguration producerConfiguration = new ProducerConfiguration(new [] { new BrokerConfiguration() }); config.Verify(); using (KafkaSimpleManager <int, Message> kafkaSimpleManager = new KafkaSimpleManager <int, Message>(config)) { TopicMetadata topicMetadata = kafkaSimpleManager.RefreshMetadata(0, ClientID, correlationID++, topic, true); kafkaSimpleManager.InitializeProducerPoolForTopic(0, ClientID, correlationID, topic, true, producerConfiguration, true); var producer1 = kafkaSimpleManager.GetProducerOfPartition(topic, 0, true); var producer2 = kafkaSimpleManager.GetProducerOfPartition(topic, 4, true); for (int i = 0; i < 100; i++) { var producer = i % 2 == 0 ? producer1 : producer2; var tKey = Encoding.UTF8.GetBytes(DateTime.Now.Ticks.ToString()); var tValue = Encoding.UTF8.GetBytes("Hello world " + i); producer.Send(new ProducerData <int, Message>(topic, new Message(tValue, tKey, CompressionCodecs.DefaultCompressionCodec))); } producer1.Dispose(); producer2.Dispose(); Console.WriteLine("Topic is: " + topicMetadata.Topic); } //Console.ReadKey(); }
protected virtual void Dispose(bool disposing) { if (!this.disposed) { if (disposing) { lock (lockForDictionaryChange) { Logger.Info("Got lock Will dispose KafkaSimpleManager ..."); kafkaSimpleManage.Dispose(); kafkaSimpleManage = null; config = null; Logger.Info("Finish dispose KafkaSimpleManager ...will release lock."); } } disposed = true; } }
private static bool TopicExsits(string topic) { var managerConfig = new KafkaSimpleManagerConfiguration { FetchSize = KafkaSimpleManagerConfiguration.DefaultFetchSize, BufferSize = KafkaSimpleManagerConfiguration.DefaultBufferSize, Zookeeper = ZooKeeperSetting.Address }; using (var kafkaManager = new KafkaSimpleManager <int, Message>(managerConfig)) { try { var allPartitions = kafkaManager.GetTopicPartitionsFromZK(topic); return(allPartitions.Count > 0); } catch (Exception) { return(false); } } }
bool TopicExsits(string topic) { var managerConfig = new KafkaSimpleManagerConfiguration() { FetchSize = KafkaSimpleManagerConfiguration.DefaultFetchSize, BufferSize = KafkaSimpleManagerConfiguration.DefaultBufferSize, Zookeeper = _zkConnectionString }; using (var kafkaManager = new KafkaSimpleManager <string, Kafka.Client.Messages.Message>(managerConfig)) { try { // get all available partitions for a topic through the manager var allPartitions = kafkaManager.GetTopicPartitionsFromZK(topic); return(allPartitions.Count > 0); } catch (Exception) { return(false); } } }
public override void Process() { const int factor = 1; var config = new KafkaSimpleManagerConfiguration() { FetchSize = KafkaSimpleManagerConfiguration.DefaultFetchSize / factor, BufferSize = KafkaSimpleManagerConfiguration.DefaultBufferSize / factor, Zookeeper = _processorOptions.Zookeeper, }; var manager = new KafkaSimpleManager <int, Message>(config); const int correlationId = 0; var clientId = _processorOptions.ClientId; const int versionId = 1; manager.RefreshMetadata(versionId, clientId, correlationId, _processorOptions.Topic, true); var consumer = manager.GetConsumer(_processorOptions.Topic, _processorOptions.PartitionId); consumer.Config.AutoCommit = true; MainLoop(consumer, correlationId, manager); }
internal static string DumpTopicMetadataAndOffsetInternal(ZooKeeperClient zkClient, string topic, string zookeeper, int partitionIndex, bool includePartitionDetailInfo, bool includeOffsetInfo, DateTime timestamp, SortedDictionary <int, int> parttionBrokerID_LeaderCountDistribAll, SortedDictionary <int, int> parttionBrokerID_ReplicaCountDistribAll, SortedDictionary <int, long> latestOffset, SortedDictionary <int, long> latestLength) { StringBuilder sb = new StringBuilder(); string s = string.Empty; //BrokerID -->Count of as leader SortedDictionary <int, int> parttionBrokerID_LeaderCountDistrib = new SortedDictionary <int, int>(); //BrokerID -->Count of as replica SortedDictionary <int, int> parttionBrokerID_ReplicaCountDistrib = new SortedDictionary <int, int>(); try { if (string.IsNullOrEmpty(zookeeper)) { Logger.Error(" zookeeper should be provided"); sb.AppendFormat(DumpTopicError, topic); } else { KafkaSimpleManagerConfiguration config = new KafkaSimpleManagerConfiguration() { Zookeeper = zookeeper }; config.Verify(); Dictionary <int, int[]> detailDataInZookeeper = ZkUtils.GetTopicMetadataInzookeeper(zkClient, topic); using (KafkaSimpleManager <int, Message> kafkaSimpleManager = new KafkaSimpleManager <int, Message>(config)) { TopicMetadata topicMetadata = kafkaSimpleManager.RefreshMetadata(KafkaNETExampleConstants.DefaultVersionId, ClientID, TopicMetadataRequestID++, topic, true); int partitionCount = topicMetadata.PartitionsMetadata.Count(); sb.AppendFormat("Topic:{0}\tPartitionCount:{1}\t", topic, partitionCount); int replicationFactor = Enumerable.Count <Broker>(topicMetadata.PartitionsMetadata.First().Replicas); sb.AppendFormat("ReplicationFactor:{0}\t", replicationFactor); //TODO: compare detailDataInZookeeper and check which one missed. StringBuilder sbDetail = new StringBuilder(); if (includePartitionDetailInfo) { long sumEndOffset = 0; long sumLength = 0; foreach (PartitionMetadata p in topicMetadata.PartitionsMetadata.OrderBy(r => r.PartitionId).ToList()) { int[] replicaInZookeeper = null; if (detailDataInZookeeper.ContainsKey(p.PartitionId)) { replicaInZookeeper = detailDataInZookeeper[p.PartitionId]; detailDataInZookeeper.Remove(p.PartitionId); } #region One partition long earliest = 0; long latest = 0; if (partitionIndex == -1 || p.PartitionId == partitionIndex) { //sbDetail.AppendFormat("\tTopic:{0}", topic); sbDetail.AppendFormat("\tPartition:{0}", p.PartitionId); if (p.Leader != null) { sbDetail.AppendFormat("\tLeader:{0}", KafkaConsoleUtil.GetBrokerIDAndIP(p.Leader.Id)); if (parttionBrokerID_LeaderCountDistrib.ContainsKey(p.Leader.Id)) { parttionBrokerID_LeaderCountDistrib[p.Leader.Id]++; } else { parttionBrokerID_LeaderCountDistrib.Add(p.Leader.Id, 1); } if (parttionBrokerID_LeaderCountDistribAll.ContainsKey(p.Leader.Id)) { parttionBrokerID_LeaderCountDistribAll[p.Leader.Id]++; } else { parttionBrokerID_LeaderCountDistribAll.Add(p.Leader.Id, 1); } } else { sbDetail.AppendFormat("\tLeader:NoLeader!"); } sbDetail.AppendFormat("\tReplicas:{0}", string.Join(",", p.Replicas.Select(r => KafkaConsoleUtil.GetBrokerIDAndIP(r.Id)).ToArray())); foreach (Broker b in p.Replicas) { if (parttionBrokerID_ReplicaCountDistrib.ContainsKey(b.Id)) { parttionBrokerID_ReplicaCountDistrib[b.Id]++; } else { parttionBrokerID_ReplicaCountDistrib.Add(b.Id, 1); } if (parttionBrokerID_ReplicaCountDistribAll.ContainsKey(b.Id)) { parttionBrokerID_ReplicaCountDistribAll[b.Id]++; } else { parttionBrokerID_ReplicaCountDistribAll.Add(b.Id, 1); } } //sbDetail.AppendFormat("\tIsr:{0}", string.Join(",", p.Isr.Select(r => r.Id).ToArray())); ArrayList isrs = GetIsr(zkClient, topic, p.PartitionId); sbDetail.AppendFormat("\tIsr:{0}", string.Join(",", isrs.ToArray().Select(r => KafkaConsoleUtil.GetBrokerIDAndIP(Convert.ToInt32(r))))); //TODO: add missed replica #region Offset if (includeOffsetInfo) { try { kafkaSimpleManager.RefreshAndGetOffset(KafkaNETExampleConstants.DefaultVersionId, ClientID, TopicMetadataRequestID++, topic, p.PartitionId, true, out earliest, out latest); sumEndOffset += latest; sumLength += (latest - earliest); sbDetail.AppendFormat("\tlength:{2}\tearliest:{0}\tlatest:{1}" , earliest , latest , (latest - earliest) == 0 ? "(empty)" : (latest - earliest).ToString()); sbDetail.AppendFormat("\r\n"); latestOffset.Add(p.PartitionId, latest); latestLength.Add(p.PartitionId, latest - earliest); } catch (NoLeaderForPartitionException e) { sbDetail.AppendFormat(" ERROR:{0}\r\n", e.Message); } catch (UnableToConnectToHostException e) { sbDetail.AppendFormat(" ERROR:{0}\r\n", e.Message); } if (timestamp != DateTime.MinValue) { long timestampLong = KafkaClientHelperUtils.ToUnixTimestampMillis(timestamp); try { long timeStampOffset = kafkaSimpleManager.RefreshAndGetOffsetByTimeStamp(KafkaNETExampleConstants.DefaultVersionId, ClientID, TopicMetadataRequestID++, topic, p.PartitionId, timestamp); sbDetail.AppendFormat("\t\ttimeStampOffset:{0}\ttimestamp(UTC):{1}\tUnixTimestamp:{2}\t" , timeStampOffset , KafkaClientHelperUtils.DateTimeFromUnixTimestampMillis(timestampLong).ToString("s") , timestampLong); sbDetail.AppendFormat("\r\n"); } catch (TimeStampTooSmallException) { sbDetail.AppendFormat("\t\ttimeStampOffset:{0}\ttimestamp(UTC):{1}\tUnixTimestamp:{2}\t" , "NA since no data before the time you specified, please retry with a bigger value." , KafkaClientHelperUtils.DateTimeFromUnixTimestampMillis(timestampLong).ToString("s") , timestampLong); sbDetail.AppendFormat("\r\n"); } } } #endregion } #endregion } if (includeOffsetInfo) { sb.AppendFormat("SumeEndOffset:{0:0,0} SumLength:{1:0,0}\r\n", sumEndOffset, sumLength); } else { sb.AppendFormat("\r\n"); } if (detailDataInZookeeper.Any()) { foreach (KeyValuePair <int, int[]> kv in detailDataInZookeeper) { sb.AppendFormat("=ERROR=MISSED partition= {0} Replicas {1} ", kv.Key, string.Join(",", kv.Value.Select(r => r.ToString()).ToArray())); } } } sb.Append(sbDetail.ToString()); sb.AppendFormat("\tBroker as leader distribution======={0}=======\r\n", topic); sb.AppendFormat("\r\tBrokerID\tLeadPartition count\r\n"); foreach (KeyValuePair <int, int> kv in parttionBrokerID_LeaderCountDistrib) { sb.AppendFormat("\t\t{0}\t{1}\r\n", KafkaConsoleUtil.GetBrokerIDAndIP(kv.Key), kv.Value); } sb.AppendFormat("\tBroker as replica distribution========={0}=====\r\n", topic); sb.AppendFormat("\r\tBrokerID\tReplication count count\r\n"); foreach (KeyValuePair <int, int> kv in parttionBrokerID_ReplicaCountDistrib) { sb.AppendFormat("\t\t{0}\t{1}\r\n", KafkaConsoleUtil.GetBrokerIDAndIP(kv.Key), kv.Value); } sb.AppendFormat("\r\n"); } } s = sb.ToString(); } catch (NoBrokerForTopicException e) { sb.AppendFormat("\r\nTopic:{0}\t ==NoBrokerForTopicException:{1}!!!== \r\n", topic, e.Message); s = sb.ToString(); } catch (UnableToConnectToHostException e) { sb.AppendFormat("\r\nTopic:{0}\t ==UnableToConnectToHostException:{1}!!!== \r\n", topic, e.Message); s = sb.ToString(); } catch (Exception ex) { Logger.ErrorFormat("Dump topic got exception:{0}\r\ninput parameter:Topic:{1}\tZookeeper:{2}\tKafka:{3}\tPartionIndex:{4}\tincludePartitionDetailInfo:{5}\tincludeOffsetInfo:{6}\ttimestamp:{7}\r\nPartial result:{8}" , ExceptionUtil.GetExceptionDetailInfo(ex), topic, zookeeper, string.Empty, partitionIndex, includePartitionDetailInfo, includeOffsetInfo, timestamp, s); } return(s); }
internal static string DumpTopicMetadataAndOffsetInternal(ZooKeeperClient zkClient, string topic, string zookeeper, int partitionIndex, bool includePartitionDetailInfo, bool includeOffsetInfo, DateTime timestamp, SortedDictionary<int, int> parttionBrokerID_LeaderCountDistribAll, SortedDictionary<int, int> parttionBrokerID_ReplicaCountDistribAll, SortedDictionary<int, long> latestOffset, SortedDictionary<int, long> latestLength) { StringBuilder sb = new StringBuilder(); string s = string.Empty; //BrokerID -->Count of as leader SortedDictionary<int, int> parttionBrokerID_LeaderCountDistrib = new SortedDictionary<int, int>(); //BrokerID -->Count of as replica SortedDictionary<int, int> parttionBrokerID_ReplicaCountDistrib = new SortedDictionary<int, int>(); try { if (string.IsNullOrEmpty(zookeeper)) { Logger.Error(" zookeeper should be provided"); sb.AppendFormat(DumpTopicError, topic); } else { KafkaSimpleManagerConfiguration config = new KafkaSimpleManagerConfiguration() { Zookeeper = zookeeper }; config.Verify(); Dictionary<int, int[]> detailDataInZookeeper = ZkUtils.GetTopicMetadataInzookeeper(zkClient, topic); using (KafkaSimpleManager<int, Message> kafkaSimpleManager = new KafkaSimpleManager<int, Message>(config)) { TopicMetadata topicMetadata = kafkaSimpleManager.RefreshMetadata(KafkaNETExampleConstants.DefaultVersionId, ClientID, TopicMetadataRequestID++, topic, true); int partitionCount = topicMetadata.PartitionsMetadata.Count(); sb.AppendFormat("Topic:{0}\tPartitionCount:{1}\t", topic, partitionCount); int replicationFactor = Enumerable.Count<Broker>(topicMetadata.PartitionsMetadata.First().Replicas); sb.AppendFormat("ReplicationFactor:{0}\t", replicationFactor); //TODO: compare detailDataInZookeeper and check which one missed. StringBuilder sbDetail = new StringBuilder(); if (includePartitionDetailInfo) { long sumEndOffset = 0; long sumLength = 0; foreach (PartitionMetadata p in topicMetadata.PartitionsMetadata.OrderBy(r => r.PartitionId).ToList()) { int[] replicaInZookeeper = null; if (detailDataInZookeeper.ContainsKey(p.PartitionId)) { replicaInZookeeper = detailDataInZookeeper[p.PartitionId]; detailDataInZookeeper.Remove(p.PartitionId); } #region One partition long earliest = 0; long latest = 0; if (partitionIndex == -1 || p.PartitionId == partitionIndex) { //sbDetail.AppendFormat("\tTopic:{0}", topic); sbDetail.AppendFormat("\tPartition:{0}", p.PartitionId); if (p.Leader != null) { sbDetail.AppendFormat("\tLeader:{0}", KafkaConsoleUtil.GetBrokerIDAndIP(p.Leader.Id)); if (parttionBrokerID_LeaderCountDistrib.ContainsKey(p.Leader.Id)) parttionBrokerID_LeaderCountDistrib[p.Leader.Id]++; else parttionBrokerID_LeaderCountDistrib.Add(p.Leader.Id, 1); if (parttionBrokerID_LeaderCountDistribAll.ContainsKey(p.Leader.Id)) parttionBrokerID_LeaderCountDistribAll[p.Leader.Id]++; else parttionBrokerID_LeaderCountDistribAll.Add(p.Leader.Id, 1); } else sbDetail.AppendFormat("\tLeader:NoLeader!"); sbDetail.AppendFormat("\tReplicas:{0}", string.Join(",", p.Replicas.Select(r => KafkaConsoleUtil.GetBrokerIDAndIP(r.Id)).ToArray())); foreach (Broker b in p.Replicas) { if (parttionBrokerID_ReplicaCountDistrib.ContainsKey(b.Id)) parttionBrokerID_ReplicaCountDistrib[b.Id]++; else parttionBrokerID_ReplicaCountDistrib.Add(b.Id, 1); if (parttionBrokerID_ReplicaCountDistribAll.ContainsKey(b.Id)) parttionBrokerID_ReplicaCountDistribAll[b.Id]++; else parttionBrokerID_ReplicaCountDistribAll.Add(b.Id, 1); } //sbDetail.AppendFormat("\tIsr:{0}", string.Join(",", p.Isr.Select(r => r.Id).ToArray())); ArrayList isrs = GetIsr(zkClient, topic, p.PartitionId); sbDetail.AppendFormat("\tIsr:{0}", string.Join(",", isrs.ToArray().Select(r => KafkaConsoleUtil.GetBrokerIDAndIP(Convert.ToInt32(r))))); //TODO: add missed replica #region Offset if (includeOffsetInfo) { try { kafkaSimpleManager.RefreshAndGetOffset(KafkaNETExampleConstants.DefaultVersionId, ClientID, TopicMetadataRequestID++, topic, p.PartitionId, true, out earliest, out latest); sumEndOffset += latest; sumLength += (latest - earliest); sbDetail.AppendFormat("\tlength:{2}\tearliest:{0}\tlatest:{1}" , earliest , latest , (latest - earliest) == 0 ? "(empty)" : (latest - earliest).ToString()); sbDetail.AppendFormat("\r\n"); latestOffset.Add(p.PartitionId, latest); latestLength.Add(p.PartitionId, latest - earliest); } catch (NoLeaderForPartitionException e) { sbDetail.AppendFormat(" ERROR:{0}\r\n", e.Message); } catch (UnableToConnectToHostException e) { sbDetail.AppendFormat(" ERROR:{0}\r\n", e.Message); } if (timestamp != DateTime.MinValue) { long timestampLong = KafkaClientHelperUtils.ToUnixTimestampMillis(timestamp); try { long timeStampOffset = kafkaSimpleManager.RefreshAndGetOffsetByTimeStamp(KafkaNETExampleConstants.DefaultVersionId, ClientID, TopicMetadataRequestID++, topic, p.PartitionId, timestamp); sbDetail.AppendFormat("\t\ttimeStampOffset:{0}\ttimestamp(UTC):{1}\tUnixTimestamp:{2}\t" , timeStampOffset , KafkaClientHelperUtils.DateTimeFromUnixTimestampMillis(timestampLong).ToString("s") , timestampLong); sbDetail.AppendFormat("\r\n"); } catch (TimeStampTooSmallException) { sbDetail.AppendFormat("\t\ttimeStampOffset:{0}\ttimestamp(UTC):{1}\tUnixTimestamp:{2}\t" , "NA since no data before the time you specified, please retry with a bigger value." , KafkaClientHelperUtils.DateTimeFromUnixTimestampMillis(timestampLong).ToString("s") , timestampLong); sbDetail.AppendFormat("\r\n"); } } } #endregion } #endregion } if (includeOffsetInfo) { sb.AppendFormat("SumeEndOffset:{0:0,0} SumLength:{1:0,0}\r\n", sumEndOffset, sumLength); } else { sb.AppendFormat("\r\n"); } if (detailDataInZookeeper.Any()) { foreach (KeyValuePair<int, int[]> kv in detailDataInZookeeper) { sb.AppendFormat("=ERROR=MISSED partition= {0} Replicas {1} ", kv.Key, string.Join(",", kv.Value.Select(r => r.ToString()).ToArray())); } } } sb.Append(sbDetail.ToString()); sb.AppendFormat("\tBroker as leader distribution======={0}=======\r\n", topic); sb.AppendFormat("\r\tBrokerID\tLeadPartition count\r\n"); foreach (KeyValuePair<int, int> kv in parttionBrokerID_LeaderCountDistrib) { sb.AppendFormat("\t\t{0}\t{1}\r\n", KafkaConsoleUtil.GetBrokerIDAndIP(kv.Key), kv.Value); } sb.AppendFormat("\tBroker as replica distribution========={0}=====\r\n", topic); sb.AppendFormat("\r\tBrokerID\tReplication count count\r\n"); foreach (KeyValuePair<int, int> kv in parttionBrokerID_ReplicaCountDistrib) { sb.AppendFormat("\t\t{0}\t{1}\r\n", KafkaConsoleUtil.GetBrokerIDAndIP(kv.Key), kv.Value); } sb.AppendFormat("\r\n"); } } s = sb.ToString(); } catch (NoBrokerForTopicException e) { sb.AppendFormat("\r\nTopic:{0}\t ==NoBrokerForTopicException:{1}!!!== \r\n", topic, e.Message); s = sb.ToString(); } catch (UnableToConnectToHostException e) { sb.AppendFormat("\r\nTopic:{0}\t ==UnableToConnectToHostException:{1}!!!== \r\n", topic, e.Message); s = sb.ToString(); } catch (Exception ex) { Logger.ErrorFormat("Dump topic got exception:{0}\r\ninput parameter:Topic:{1}\tZookeeper:{2}\tKafka:{3}\tPartionIndex:{4}\tincludePartitionDetailInfo:{5}\tincludeOffsetInfo:{6}\ttimestamp:{7}\r\nPartial result:{8}" , ExceptionUtil.GetExceptionDetailInfo(ex), topic, zookeeper, string.Empty, partitionIndex, includePartitionDetailInfo, includeOffsetInfo, timestamp, s); } return s; }
public KafkaSimpleManager(KafkaSimpleManagerConfiguration config) { this.Config = config; this.RecreateSyncProducerPoolForMetadata(); }
internal static void ConsumeDataSimple(ConsumeDataHelperArguments dumpdataOptions) { correlationID = 0; totalCountUTF8 = 0; totalCountOriginal = 0; totalCount = 0; lastNotifytotalCount = 0; KafkaSimpleManagerConfiguration config = new KafkaSimpleManagerConfiguration() { FetchSize = dumpdataOptions.FetchSize, BufferSize = dumpdataOptions.BufferSize, MaxWaitTime = dumpdataOptions.MaxWaitTime, MinWaitBytes = dumpdataOptions.MinWaitBytes, Zookeeper = dumpdataOptions.Zookeeper }; config.Verify(); bool finish = false; try { using (KafkaSimpleManager <int, Message> kafkaSimpleManager = new KafkaSimpleManager <int, Message>(config)) { TopicMetadata topicMetadata = kafkaSimpleManager.RefreshMetadata(0, ClientID, correlationID++, dumpdataOptions.Topic, true); while (true) { try { for (int i = 0; i <= topicMetadata.PartitionsMetadata.Max(r => r.PartitionId); i++) { if (dumpdataOptions.PartitionIndex == -1 || i == dumpdataOptions.PartitionIndex) { #region Get real offset and adjust long earliest = 0; long latest = 0; long offsetBase = 0; OffsetHelper.GetAdjustedOffset <int, Message>(dumpdataOptions.Topic , kafkaSimpleManager, i , KafkaNetLibraryExample.ConvertOffsetType(dumpdataOptions.Offset) , KafkaNetLibraryExample.ConvertOffset(dumpdataOptions.Offset) , dumpdataOptions.LastMessagesCount, out earliest, out latest, out offsetBase); #endregion Console.WriteLine("Topic:{0} Partition:{1} will read from {2} earliest:{3} latest:{4}", dumpdataOptions.Topic, i, offsetBase, earliest, latest); finish = ConsumeDataOfOnePartition(kafkaSimpleManager, i, offsetBase, earliest, latest, dumpdataOptions); if (finish) { break; } } } finish = true; } catch (Exception ex) { Logger.ErrorFormat("ConsumeDataSimple Got exception, will refresh metadata. {0}", ex.FormatException()); kafkaSimpleManager.RefreshMetadata(0, ClientID, correlationID++, dumpdataOptions.Topic, true); } if (finish) { break; } } } Logger.InfoFormat("Topic:{0} Finish Read. totalCount:{1} ", dumpdataOptions.Topic, totalCount); } catch (Exception ex) { Logger.ErrorFormat("ConsumeDataSimple Got exception:{0}\r\ninput parameter: {1}", ex.FormatException(), dumpdataOptions.ToString()); } }
static void Main(string[] args) { int correlationID = 0; KafkaSimpleManagerConfiguration config = new KafkaSimpleManagerConfiguration() { FetchSize = 10, BufferSize = 100000, MaxWaitTime = 500, MinWaitBytes = 50, Zookeeper = "10.1.1.231:2181,10.1.1.232:2181,10.1.1.233:2181/kafka" }; using (KafkaSimpleManager <int, Message> kafkaSimpleManager = new KafkaSimpleManager <int, Message>(config)) { TopicMetadata topicMetadata = kafkaSimpleManager.RefreshMetadata(0, ClientID, correlationID++, "test", true); Consumer consumer = kafkaSimpleManager.GetConsumer("test", 0); //FetchResponse response = consumer.Fetch(Assembly.GetExecutingAssembly().ManifestModule.ToString(), "test", correlationID, 0, 0, 100, 5000, 100); //var messages = response.PartitionData("test", 0).GetMessageAndOffsets(); //messages.ForEach(message => Console.WriteLine(Encoding.UTF8.GetString(message.Message.Payload))); //var messages = FetchAndGetMessageAndOffsetList(consumer, correlationID, "test", 0, 0, 100, 5000, 100); //messages.ForEach(message => Console.WriteLine(Encoding.UTF8.GetString(message.Message.Payload))); long offsetLast = -1; long l = 0; long totalCount = 0, offsetBase = 0, partitionID = 0, lastNotifytotalCount = 0, latest = 0, earliest = 0; while (true) { correlationID++; List <MessageAndOffset> messages = FetchAndGetMessageAndOffsetList(consumer, correlationID++, "test", 0, 0, 10, 5000, 1); if (messages == null) { Logger.Error("PullMessage got null List<MessageAndOffset>, please check log for detail."); break; } else { #region dump response.Payload if (messages.Any()) { offsetLast = messages.Last().MessageOffset; totalCount += messages.Count; Logger.InfoFormat("Finish read partition {0} to {1}. ", partitionID, offsetLast); offsetBase = offsetLast + 1; if (totalCount - lastNotifytotalCount > 1000) { Console.WriteLine("Partition: {0} totally read {1} will continue read from {2}", partitionID, totalCount, offsetBase); lastNotifytotalCount = totalCount; } // return messages messages.ForEach(message => Console.WriteLine(Encoding.UTF8.GetString(message.Message.Payload))); } else { Logger.InfoFormat("Finish read partition {0} to {1}. Earliese:{2} latest:{3} ", partitionID, offsetLast, earliest, latest); Console.WriteLine("Partition: {0} totally read {1} Hit end of queue {2}", partitionID, totalCount, offsetBase); break; } #endregion } } } Console.ReadKey(); }