private ProducePerfTestKafkaSimpleManagerWrapper() { config = new KafkaSimpleManagerConfiguration() { Zookeeper = produceOptions.Zookeeper, PartitionerClass = produceOptions.PartitionerClass, MaxMessageSize = SyncProducerConfiguration.DefaultMaxMessageSize }; config.Verify(); producerConfigTemplate = new ProducerConfiguration( new List <BrokerConfiguration>() { }) //The Brokers will be replaced inside of KafkaSimpleManager { ForceToPartition = -1, PartitionerClass = config.PartitionerClass, TotalNumPartitions = 0, RequiredAcks = produceOptions.RequiredAcks, AckTimeout = produceOptions.AckTimeout, SendTimeout = produceOptions.SendTimeout, ReceiveTimeout = produceOptions.ReceiveTimeout, CompressionCodec = KafkaNetLibraryExample.ConvertToCodec(produceOptions.Compression.ToString()), BufferSize = produceOptions.BufferSize, SyncProducerOfOneBroker = produceOptions.SyncProducerOfOneBroker, //Actually it's sync producer socket count of one partition MaxMessageSize = Math.Max(SyncProducerConfiguration.DefaultMaxMessageSize, produceOptions.MessageSize) }; kafkaSimpleManage = new KafkaSimpleManager <byte[], Message>(config); int correlationId = Interlocked.Increment(ref correlationIDGetProducer); kafkaSimpleManage.InitializeProducerPoolForTopic(0, clientId, correlationId, produceOptions.Topic, true, producerConfigTemplate, true); }
internal static void Run(ProduceSimpleHelperOption produceOptions) { failedMessageCount = 0; successMessageCount = 0; sentBatchCount = 0; produceMessagePerPartition = new Dictionary <int, int>(); produceMessagePerPartitionExpect = new Dictionary <int, int>(); PrepareSentMessages(produceOptions); kafkaSimpleManagerConfig = new KafkaSimpleManagerConfiguration() { Zookeeper = produceOptions.Zookeeper, MaxMessageSize = SyncProducerConfiguration.DefaultMaxMessageSize, PartitionerClass = produceOptions.PartitionerClass }; kafkaSimpleManagerConfig.Verify(); producerConfigTemplate = new ProducerConfiguration( new List <BrokerConfiguration>() { }) //The Brokers will be replaced inside of KafkaSimpleManager { ForceToPartition = -1, PartitionerClass = kafkaSimpleManagerConfig.PartitionerClass, TotalNumPartitions = 0, RequiredAcks = produceOptions.RequiredAcks, AckTimeout = produceOptions.AckTimeout, SendTimeout = produceOptions.SendTimeout, ReceiveTimeout = produceOptions.ReceiveTimeout, CompressionCodec = KafkaNetLibraryExample.ConvertToCodec(produceOptions.Compression.ToString()), BufferSize = produceOptions.BufferSize, SyncProducerOfOneBroker = produceOptions.SyncProducerOfOneBroker, //Actually it's sync producer socket count of one partition MaxMessageSize = Math.Max(SyncProducerConfiguration.DefaultMaxMessageSize, produceOptions.MessageSize) }; using (ZooKeeperClient zkClient = new ZooKeeperClient(produceOptions.Zookeeper, ZooKeeperConfiguration.DefaultSessionTimeout, ZooKeeperStringSerializer.Serializer)) { zkClient.Connect(); Dictionary <int, int[]> topicDataInZookeeper = ZkUtils.GetTopicMetadataInzookeeper(zkClient, produceOptions.Topic); // -2 by default or customized partitioner class. (you need specify PartitionerClass by -l) if (produceOptions.PartitionId == -2) { if (string.IsNullOrEmpty(kafkaSimpleManagerConfig.PartitionerClass)) { throw new ArgumentException("The partitioer class must not be empty if you want to send to partition by partitioner."); } //if (producerConfigTemplate.TotalNumPartitions <= 0) // throw new ArgumentException(string.Format("Please also specify the TotalNumPartitions if you want to send to partition by partitioner.")); ProduceByPartitionerClass(produceOptions, topicDataInZookeeper.Count); } else { ProduceToRandomOrSpecificPartition(produceOptions); } } }
internal override void Parse(string[] args) { base.BuildDictionary(args); GetString("-z", "--zookeeper", ref this.Zookeeper); CheckZookeeperPort(); if (string.IsNullOrEmpty(this.Zookeeper)) { throw new ArgumentException("Must specify zookeeper with port by -z. Example: -z localhost:2181"); } GetString("-t", "--topic", ref this.Topic); if (string.IsNullOrEmpty(this.Topic)) { throw new ArgumentException("Must specify topic by -t."); } GetInt("-p", "--PartitionId", ref this.PartitionId); GetString("-l", "--PartitionerClass", ref this.PartitionerClass); GetInt("-c", "--batchcount", ref this.BatchCount); GetInt("-b", "--MessageCountPerBatch", ref this.MessageCountPerBatch); GetInt("-m", "--MessageSize", ref this.MessageSize); GetInt("-r", "--Compression", ref this.Compression); this.CompressionCodec = KafkaNetLibraryExample.ConvertToCodec(this.Compression.ToString()); GetShort("-a", "--RequiredAcks", ref this.RequiredAcks); GetInt("-k", "--AckTimeout", ref this.AckTimeout); GetInt("-s", "--SendTimeout", ref this.SendTimeout); GetInt("-e", "--ReceiveTimeout", ref this.ReceiveTimeout); GetInt("-i", "--BufferSize", ref this.BufferSize); GetInt("-y", "--SyncProducerOfOneBroker", ref this.SyncProducerOfOneBroker); GetInt("-d", "--ThreadCount", ref this.ThreadCount); GetInt("-o", "--SpeedConstrolMBPerSecond", ref this.SpeedConstrolMBPerSecond); GetInt("-f", "--SleepInMSWhenException", ref this.SleepInMSWhenException); if (this.PartitionId == -2) { if (string.IsNullOrEmpty(this.PartitionerClass)) { throw new ArgumentException(string.Format("You specify partitionID as -2, please also specify the partitioner class full name, for example: {0}", ProducerConfiguration.DefaultPartitioner)); } } }
internal bool Run(ZooKeeperClient zkClient, string zookeeper) { bool success = true; SortedDictionary <int, long> latestCommited = GetComsumerGroupOffsets(zkClient, this.topic, this.group); SortedDictionary <int, long> latestOffsetDict = new SortedDictionary <int, long>(); SortedDictionary <int, int> parttionBrokerID_LeaderCountDistrib = new SortedDictionary <int, int>(); //BrokerID -->Count of as replica SortedDictionary <int, int> parttionBrokerID_ReplicaCountDistrib = new SortedDictionary <int, int>(); SortedDictionary <int, long> latestLength = new SortedDictionary <int, long>(); //Owner SortedDictionary <int, string> latestOwners = GetComsumerGroupOwners(zkClient, this.topic, this.group); TopicHelper.DumpTopicMetadataAndOffsetInternal(zkClient, this.topic, zookeeper, -1, true, true, DateTime.MinValue, parttionBrokerID_LeaderCountDistrib, parttionBrokerID_ReplicaCountDistrib, latestOffsetDict, latestLength); if (latestOffsetDictLastValue == null) { StringBuilder sb = new StringBuilder(); sb.AppendFormat("====Partitions====\r\n"); foreach (KeyValuePair <int, long> kv in latestOffsetDict) { sb.AppendFormat("{0,-9} ", kv.Key); } Logger.Info(sb.ToString()); KafkaNetLibraryExample.AppendLineToFile(GetFile(), sb.ToString()); sb = new StringBuilder(); sb.AppendFormat("====LatestOffset====\r\n"); foreach (KeyValuePair <int, long> kv in latestOffsetDict) { sb.AppendFormat("{0,-9} ", kv.Value); } Logger.Info(sb.ToString()); KafkaNetLibraryExample.AppendLineToFile(GetFile(), sb.ToString()); sb = new StringBuilder(); sb.AppendFormat("====ConsumedOffset====\r\n"); foreach (KeyValuePair <int, long> kv in latestCommited) { sb.AppendFormat("{0,-9} ", kv.Value); } Logger.Info(sb.ToString()); KafkaNetLibraryExample.AppendLineToFile(GetFile(), sb.ToString()); sb = new StringBuilder(); sb.AppendFormat("===Latest-Earliest: Initial value====\r\n"); foreach (KeyValuePair <int, long> kv in latestLength) { sb.AppendFormat("{0,-9} ", kv.Value); } Logger.Info(sb.ToString()); KafkaNetLibraryExample.AppendLineToFile(GetFile(), sb.ToString()); sb = new StringBuilder(); sb.AppendFormat("====Latest-Commited: Initial Value====\r\n"); foreach (KeyValuePair <int, long> kv in latestOffsetDict) { if (latestCommited.ContainsKey(kv.Key)) { sb.AppendFormat("{0,-9} ", kv.Value - latestCommited[kv.Key]); } else { sb.AppendFormat("NotComiited:{0}-{1,-9} ", kv.Key, kv.Value); } } Logger.Info(sb.ToString()); KafkaNetLibraryExample.AppendLineToFile(GetFile(), sb.ToString()); sb = new StringBuilder(); if (latestOffsetDict.Count == latestOwners.Count) { sb.AppendFormat("====Owners== all {0} partitions has owner. ==\r\n", latestOwners.Count); } else { sb.AppendFormat("====Owners ERROR. partitoins: {0} partition has owner: {1} ====\r\n", latestOffsetDict.Count, latestOwners.Count); } foreach (var ownerByOwnerName in (from o in latestOwners group o by o.Value into g select new { owner = g.Key, partitions = g.ToArray() }).OrderBy(r => r.owner)) { sb.AppendFormat("{0}:\t{1}\t", ownerByOwnerName.owner, ownerByOwnerName.partitions.Length); for (int k = 0; k < ownerByOwnerName.partitions.Length; k++) { if (k == 0) { sb.AppendFormat("{0}", ownerByOwnerName.partitions[k]); } else { sb.AppendFormat(", {0}", ownerByOwnerName.partitions[k]); } } sb.AppendFormat("\r\n"); } Logger.Info(sb.ToString()); KafkaNetLibraryExample.AppendToFile(GetFile(), sb.ToString()); } else { //Length StringBuilder sb = new StringBuilder(); sb.Append("Latest-Earliest: "); foreach (KeyValuePair <int, long> kv in latestLength) { sb.AppendFormat("{0,-9} ", kv.Value); } Logger.Info(sb.ToString()); KafkaNetLibraryExample.AppendLineToFile(GetFile(), sb.ToString()); //Latest Delta sb = new StringBuilder(); long latestDelta = 0; long aggregateLatestDelta = 0; foreach (KeyValuePair <int, long> kv in latestOffsetDictLastValue) { if (latestOffsetDict.ContainsKey(kv.Key)) { sb.AppendFormat("{0,-9} ", latestOffsetDict[kv.Key] - kv.Value); latestDelta += latestOffsetDict[kv.Key] - kv.Value; } else { sb.AppendFormat("Latest:{0,-9} ", kv.Value); } if (latestOffsetDictFirstValue.ContainsKey(kv.Key)) { aggregateLatestDelta += kv.Value - latestOffsetDictFirstValue[kv.Key]; } } foreach (KeyValuePair <int, long> kv in latestOffsetDict) { if (!latestOffsetDictLastValue.ContainsKey(kv.Key)) { sb.AppendFormat("NewLatest:{0}-{1,-9} ", kv.Key, kv.Value); } } sb.Insert(0, string.Format("Latest Delta: {0}", latestDelta)); Logger.Info(sb.ToString()); KafkaNetLibraryExample.AppendLineToFile(GetFile(), sb.ToString()); //Commited Delta sb = new StringBuilder(); long latestDeltaCommited = 0; long aggregateLatestCommite = 0; foreach (KeyValuePair <int, long> kv in latestCommitedDictLastValue) { if (latestCommited.ContainsKey(kv.Key)) { sb.AppendFormat("{0,-9} ", latestCommited[kv.Key] - kv.Value); latestDeltaCommited += latestCommited[kv.Key] - kv.Value; } else { sb.AppendFormat("Commited:{0,-9} ", kv.Value); } if (latestCommitedDictFirstValue.ContainsKey(kv.Key)) { aggregateLatestCommite += kv.Value - latestCommitedDictFirstValue[kv.Key]; } } foreach (KeyValuePair <int, long> kv in latestCommited) { if (!latestCommitedDictLastValue.ContainsKey(kv.Key)) { sb.AppendFormat("NewCommited:{0}-{1,-9} ", kv.Key, kv.Value); } } sb.Insert(0, string.Format("Commited Delta: {0}", latestDeltaCommited)); Logger.Info(sb.ToString()); KafkaNetLibraryExample.AppendLineToFile(GetFile(), sb.ToString()); //Gap sb = new StringBuilder(); sb.AppendFormat("Latest-Commited: {0}= ", latestOffsetDict.Count); foreach (KeyValuePair <int, long> kv in latestOffsetDict) { if (latestCommited.ContainsKey(kv.Key)) { sb.AppendFormat("{0,-9} ", kv.Value - latestCommited[kv.Key]); } else { sb.AppendFormat("NotComiited:{0}-{1,-9} ", kv.Key, kv.Value); } } Logger.Info(sb.ToString()); KafkaNetLibraryExample.AppendLineToFile(GetFile(), sb.ToString()); //Owner sb = new StringBuilder(); if (latestOffsetDict.Count == latestOwners.Count) { sb.AppendFormat("====Owners== all {0} partitions has owner. ==\r\n", latestOwners.Count); } else { sb.AppendFormat("====Owners ERROR. partitoins: {0} partition has owner: {1} ====\r\n", latestOffsetDict.Count, latestOwners.Count); success = false; } foreach (var ownerByOwnerName in (from o in latestOwners group o by o.Value into g select new { owner = g.Key, partitions = g.ToArray() }).OrderBy(r => r.owner)) { sb.AppendFormat("{0}:\t{1}\t", ownerByOwnerName.owner, ownerByOwnerName.partitions.Length); for (int k = 0; k < ownerByOwnerName.partitions.Length; k++) { if (k == 0) { sb.AppendFormat("{0}", ownerByOwnerName.partitions[k]); } else { sb.AppendFormat(", {0}", ownerByOwnerName.partitions[k]); } } sb.AppendFormat("\r\n"); } Logger.Info(sb.ToString()); KafkaNetLibraryExample.AppendToFile(GetFile(), sb.ToString()); sb = new StringBuilder(); sb.AppendFormat("In last {0:0.0} seconds. Totally latest offset change:{1} Totally commited offset change:{2} . Percentage:{3:P2} Time:{4}\r\n" , (DateTime.UtcNow - startTime).TotalSeconds, aggregateLatestDelta, aggregateLatestCommite, aggregateLatestCommite * 1.0 / aggregateLatestDelta, DateTime.Now); Logger.Info(sb.ToString()); KafkaNetLibraryExample.AppendLineToFile(GetFile(), sb.ToString()); } previousOwners = latestOwners; latestOffsetDictLastValue = latestOffsetDict; latestCommitedDictLastValue = latestCommited; if (latestOffsetDictFirstValue == null) { latestOffsetDictFirstValue = latestOffsetDict; } if (latestCommitedDictFirstValue == null) { latestCommitedDictFirstValue = latestCommited; } return(success); }
internal static void ConsumeDataSimple(ConsumeDataHelperArguments dumpdataOptions) { correlationID = 0; totalCountUTF8 = 0; totalCountOriginal = 0; totalCount = 0; lastNotifytotalCount = 0; KafkaSimpleManagerConfiguration config = new KafkaSimpleManagerConfiguration() { FetchSize = dumpdataOptions.FetchSize, BufferSize = dumpdataOptions.BufferSize, MaxWaitTime = dumpdataOptions.MaxWaitTime, MinWaitBytes = dumpdataOptions.MinWaitBytes, Zookeeper = dumpdataOptions.Zookeeper }; config.Verify(); bool finish = false; try { using (KafkaSimpleManager <int, Message> kafkaSimpleManager = new KafkaSimpleManager <int, Message>(config)) { TopicMetadata topicMetadata = kafkaSimpleManager.RefreshMetadata(0, ClientID, correlationID++, dumpdataOptions.Topic, true); while (true) { try { for (int i = 0; i <= topicMetadata.PartitionsMetadata.Max(r => r.PartitionId); i++) { if (dumpdataOptions.PartitionIndex == -1 || i == dumpdataOptions.PartitionIndex) { #region Get real offset and adjust long earliest = 0; long latest = 0; long offsetBase = 0; OffsetHelper.GetAdjustedOffset <int, Message>(dumpdataOptions.Topic , kafkaSimpleManager, i , KafkaNetLibraryExample.ConvertOffsetType(dumpdataOptions.Offset) , KafkaNetLibraryExample.ConvertOffset(dumpdataOptions.Offset) , dumpdataOptions.LastMessagesCount, out earliest, out latest, out offsetBase); #endregion Console.WriteLine("Topic:{0} Partition:{1} will read from {2} earliest:{3} latest:{4}", dumpdataOptions.Topic, i, offsetBase, earliest, latest); finish = ConsumeDataOfOnePartition(kafkaSimpleManager, i, offsetBase, earliest, latest, dumpdataOptions); if (finish) { break; } } } finish = true; } catch (Exception ex) { Logger.ErrorFormat("ConsumeDataSimple Got exception, will refresh metadata. {0}", ex.FormatException()); kafkaSimpleManager.RefreshMetadata(0, ClientID, correlationID++, dumpdataOptions.Topic, true); } if (finish) { break; } } } Logger.InfoFormat("Topic:{0} Finish Read. totalCount:{1} ", dumpdataOptions.Topic, totalCount); } catch (Exception ex) { Logger.ErrorFormat("ConsumeDataSimple Got exception:{0}\r\ninput parameter: {1}", ex.FormatException(), dumpdataOptions.ToString()); } }
internal static void Run(ProduceMonitorHelperOptions produceMonitorOptions) { using (ZooKeeperClient zkClient = new ZooKeeperClient(produceMonitorOptions.Zookeeper, ZooKeeperConfiguration.DefaultSessionTimeout, ZooKeeperStringSerializer.Serializer)) { zkClient.Connect(); while (true) { SortedDictionary <int, long> latestOffsetDict = new SortedDictionary <int, long>(); SortedDictionary <int, int> parttionBrokerID_LeaderCountDistrib = new SortedDictionary <int, int>(); //BrokerID -->Count of as replica SortedDictionary <int, int> parttionBrokerID_ReplicaCountDistrib = new SortedDictionary <int, int>(); SortedDictionary <int, long> latestLength = new SortedDictionary <int, long>(); TopicHelper.DumpTopicMetadataAndOffsetInternal(zkClient, produceMonitorOptions.Topic, produceMonitorOptions.Zookeeper, -1, true, true, DateTime.MinValue, parttionBrokerID_LeaderCountDistrib, parttionBrokerID_ReplicaCountDistrib, latestOffsetDict, latestLength); if (latestOffsetDictLastValue == null) { StringBuilder sb = new StringBuilder(); sb.AppendFormat("====Partitions====\r\n"); foreach (KeyValuePair <int, long> kv in latestOffsetDict) { sb.AppendFormat("{0,-9} ", kv.Key); } Logger.Info(sb.ToString()); KafkaNetLibraryExample.AppendLineToFile(produceMonitorOptions.File, sb.ToString()); sb = new StringBuilder(); sb.AppendFormat("====LatestOffset====\r\n"); foreach (KeyValuePair <int, long> kv in latestOffsetDict) { sb.AppendFormat("{0,-9} ", kv.Value); } Logger.Info(sb.ToString()); KafkaNetLibraryExample.AppendLineToFile(produceMonitorOptions.File, sb.ToString()); } else { StringBuilder sb = new StringBuilder(); sb.Append("Latest Delta: "); foreach (KeyValuePair <int, long> kv in latestOffsetDictLastValue) { if (latestOffsetDict.ContainsKey(kv.Key)) { sb.AppendFormat("{0,-9} ", latestOffsetDict[kv.Key] - kv.Value); } else { sb.AppendFormat("Latest:{0,-9} ", kv.Value); } } foreach (KeyValuePair <int, long> kv in latestOffsetDict) { if (!latestOffsetDictLastValue.ContainsKey(kv.Key)) { sb.AppendFormat("NewLatest:{0}-{1,-9} ", kv.Key, kv.Value); } } Logger.Info(sb.ToString()); KafkaNetLibraryExample.AppendLineToFile(produceMonitorOptions.File, sb.ToString()); } latestOffsetDictLastValue = latestOffsetDict; Thread.Sleep(produceMonitorOptions.IntervalInSeconds * 1000); } } }