internal static void Run(ProduceMonitorHelperOptions produceMonitorOptions) { using (ZooKeeperClient zkClient = new ZooKeeperClient(produceMonitorOptions.Zookeeper, ZooKeeperConfiguration.DefaultSessionTimeout, ZooKeeperStringSerializer.Serializer)) { zkClient.Connect(); while (true) { SortedDictionary <int, long> latestOffsetDict = new SortedDictionary <int, long>(); SortedDictionary <int, int> parttionBrokerID_LeaderCountDistrib = new SortedDictionary <int, int>(); //BrokerID -->Count of as replica SortedDictionary <int, int> parttionBrokerID_ReplicaCountDistrib = new SortedDictionary <int, int>(); SortedDictionary <int, long> latestLength = new SortedDictionary <int, long>(); TopicHelper.DumpTopicMetadataAndOffsetInternal(zkClient, produceMonitorOptions.Topic, produceMonitorOptions.Zookeeper, -1, true, true, DateTime.MinValue, parttionBrokerID_LeaderCountDistrib, parttionBrokerID_ReplicaCountDistrib, latestOffsetDict, latestLength); if (latestOffsetDictLastValue == null) { StringBuilder sb = new StringBuilder(); sb.AppendFormat("====Partitions====\r\n"); foreach (KeyValuePair <int, long> kv in latestOffsetDict) { sb.AppendFormat("{0,-9} ", kv.Key); } Logger.Info(sb.ToString()); KafkaNetLibraryExample.AppendLineToFile(produceMonitorOptions.File, sb.ToString()); sb = new StringBuilder(); sb.AppendFormat("====LatestOffset====\r\n"); foreach (KeyValuePair <int, long> kv in latestOffsetDict) { sb.AppendFormat("{0,-9} ", kv.Value); } Logger.Info(sb.ToString()); KafkaNetLibraryExample.AppendLineToFile(produceMonitorOptions.File, sb.ToString()); } else { StringBuilder sb = new StringBuilder(); sb.Append("Latest Delta: "); foreach (KeyValuePair <int, long> kv in latestOffsetDictLastValue) { if (latestOffsetDict.ContainsKey(kv.Key)) { sb.AppendFormat("{0,-9} ", latestOffsetDict[kv.Key] - kv.Value); } else { sb.AppendFormat("Latest:{0,-9} ", kv.Value); } } foreach (KeyValuePair <int, long> kv in latestOffsetDict) { if (!latestOffsetDictLastValue.ContainsKey(kv.Key)) { sb.AppendFormat("NewLatest:{0}-{1,-9} ", kv.Key, kv.Value); } } Logger.Info(sb.ToString()); KafkaNetLibraryExample.AppendLineToFile(produceMonitorOptions.File, sb.ToString()); } latestOffsetDictLastValue = latestOffsetDict; Thread.Sleep(produceMonitorOptions.IntervalInSeconds * 1000); } } }
internal bool Run(ZooKeeperClient zkClient, string zookeeper) { bool success = true; SortedDictionary <int, long> latestCommited = GetComsumerGroupOffsets(zkClient, this.topic, this.group); SortedDictionary <int, long> latestOffsetDict = new SortedDictionary <int, long>(); SortedDictionary <int, int> parttionBrokerID_LeaderCountDistrib = new SortedDictionary <int, int>(); //BrokerID -->Count of as replica SortedDictionary <int, int> parttionBrokerID_ReplicaCountDistrib = new SortedDictionary <int, int>(); SortedDictionary <int, long> latestLength = new SortedDictionary <int, long>(); //Owner SortedDictionary <int, string> latestOwners = GetComsumerGroupOwners(zkClient, this.topic, this.group); TopicHelper.DumpTopicMetadataAndOffsetInternal(zkClient, this.topic, zookeeper, -1, true, true, DateTime.MinValue, parttionBrokerID_LeaderCountDistrib, parttionBrokerID_ReplicaCountDistrib, latestOffsetDict, latestLength); if (latestOffsetDictLastValue == null) { StringBuilder sb = new StringBuilder(); sb.AppendFormat("====Partitions====\r\n"); foreach (KeyValuePair <int, long> kv in latestOffsetDict) { sb.AppendFormat("{0,-9} ", kv.Key); } Logger.Info(sb.ToString()); KafkaNetLibraryExample.AppendLineToFile(GetFile(), sb.ToString()); sb = new StringBuilder(); sb.AppendFormat("====LatestOffset====\r\n"); foreach (KeyValuePair <int, long> kv in latestOffsetDict) { sb.AppendFormat("{0,-9} ", kv.Value); } Logger.Info(sb.ToString()); KafkaNetLibraryExample.AppendLineToFile(GetFile(), sb.ToString()); sb = new StringBuilder(); sb.AppendFormat("====ConsumedOffset====\r\n"); foreach (KeyValuePair <int, long> kv in latestCommited) { sb.AppendFormat("{0,-9} ", kv.Value); } Logger.Info(sb.ToString()); KafkaNetLibraryExample.AppendLineToFile(GetFile(), sb.ToString()); sb = new StringBuilder(); sb.AppendFormat("===Latest-Earliest: Initial value====\r\n"); foreach (KeyValuePair <int, long> kv in latestLength) { sb.AppendFormat("{0,-9} ", kv.Value); } Logger.Info(sb.ToString()); KafkaNetLibraryExample.AppendLineToFile(GetFile(), sb.ToString()); sb = new StringBuilder(); sb.AppendFormat("====Latest-Commited: Initial Value====\r\n"); foreach (KeyValuePair <int, long> kv in latestOffsetDict) { if (latestCommited.ContainsKey(kv.Key)) { sb.AppendFormat("{0,-9} ", kv.Value - latestCommited[kv.Key]); } else { sb.AppendFormat("NotComiited:{0}-{1,-9} ", kv.Key, kv.Value); } } Logger.Info(sb.ToString()); KafkaNetLibraryExample.AppendLineToFile(GetFile(), sb.ToString()); sb = new StringBuilder(); if (latestOffsetDict.Count == latestOwners.Count) { sb.AppendFormat("====Owners== all {0} partitions has owner. ==\r\n", latestOwners.Count); } else { sb.AppendFormat("====Owners ERROR. partitoins: {0} partition has owner: {1} ====\r\n", latestOffsetDict.Count, latestOwners.Count); } foreach (var ownerByOwnerName in (from o in latestOwners group o by o.Value into g select new { owner = g.Key, partitions = g.ToArray() }).OrderBy(r => r.owner)) { sb.AppendFormat("{0}:\t{1}\t", ownerByOwnerName.owner, ownerByOwnerName.partitions.Length); for (int k = 0; k < ownerByOwnerName.partitions.Length; k++) { if (k == 0) { sb.AppendFormat("{0}", ownerByOwnerName.partitions[k]); } else { sb.AppendFormat(", {0}", ownerByOwnerName.partitions[k]); } } sb.AppendFormat("\r\n"); } Logger.Info(sb.ToString()); KafkaNetLibraryExample.AppendToFile(GetFile(), sb.ToString()); } else { //Length StringBuilder sb = new StringBuilder(); sb.Append("Latest-Earliest: "); foreach (KeyValuePair <int, long> kv in latestLength) { sb.AppendFormat("{0,-9} ", kv.Value); } Logger.Info(sb.ToString()); KafkaNetLibraryExample.AppendLineToFile(GetFile(), sb.ToString()); //Latest Delta sb = new StringBuilder(); long latestDelta = 0; long aggregateLatestDelta = 0; foreach (KeyValuePair <int, long> kv in latestOffsetDictLastValue) { if (latestOffsetDict.ContainsKey(kv.Key)) { sb.AppendFormat("{0,-9} ", latestOffsetDict[kv.Key] - kv.Value); latestDelta += latestOffsetDict[kv.Key] - kv.Value; } else { sb.AppendFormat("Latest:{0,-9} ", kv.Value); } if (latestOffsetDictFirstValue.ContainsKey(kv.Key)) { aggregateLatestDelta += kv.Value - latestOffsetDictFirstValue[kv.Key]; } } foreach (KeyValuePair <int, long> kv in latestOffsetDict) { if (!latestOffsetDictLastValue.ContainsKey(kv.Key)) { sb.AppendFormat("NewLatest:{0}-{1,-9} ", kv.Key, kv.Value); } } sb.Insert(0, string.Format("Latest Delta: {0}", latestDelta)); Logger.Info(sb.ToString()); KafkaNetLibraryExample.AppendLineToFile(GetFile(), sb.ToString()); //Commited Delta sb = new StringBuilder(); long latestDeltaCommited = 0; long aggregateLatestCommite = 0; foreach (KeyValuePair <int, long> kv in latestCommitedDictLastValue) { if (latestCommited.ContainsKey(kv.Key)) { sb.AppendFormat("{0,-9} ", latestCommited[kv.Key] - kv.Value); latestDeltaCommited += latestCommited[kv.Key] - kv.Value; } else { sb.AppendFormat("Commited:{0,-9} ", kv.Value); } if (latestCommitedDictFirstValue.ContainsKey(kv.Key)) { aggregateLatestCommite += kv.Value - latestCommitedDictFirstValue[kv.Key]; } } foreach (KeyValuePair <int, long> kv in latestCommited) { if (!latestCommitedDictLastValue.ContainsKey(kv.Key)) { sb.AppendFormat("NewCommited:{0}-{1,-9} ", kv.Key, kv.Value); } } sb.Insert(0, string.Format("Commited Delta: {0}", latestDeltaCommited)); Logger.Info(sb.ToString()); KafkaNetLibraryExample.AppendLineToFile(GetFile(), sb.ToString()); //Gap sb = new StringBuilder(); sb.AppendFormat("Latest-Commited: {0}= ", latestOffsetDict.Count); foreach (KeyValuePair <int, long> kv in latestOffsetDict) { if (latestCommited.ContainsKey(kv.Key)) { sb.AppendFormat("{0,-9} ", kv.Value - latestCommited[kv.Key]); } else { sb.AppendFormat("NotComiited:{0}-{1,-9} ", kv.Key, kv.Value); } } Logger.Info(sb.ToString()); KafkaNetLibraryExample.AppendLineToFile(GetFile(), sb.ToString()); //Owner sb = new StringBuilder(); if (latestOffsetDict.Count == latestOwners.Count) { sb.AppendFormat("====Owners== all {0} partitions has owner. ==\r\n", latestOwners.Count); } else { sb.AppendFormat("====Owners ERROR. partitoins: {0} partition has owner: {1} ====\r\n", latestOffsetDict.Count, latestOwners.Count); success = false; } foreach (var ownerByOwnerName in (from o in latestOwners group o by o.Value into g select new { owner = g.Key, partitions = g.ToArray() }).OrderBy(r => r.owner)) { sb.AppendFormat("{0}:\t{1}\t", ownerByOwnerName.owner, ownerByOwnerName.partitions.Length); for (int k = 0; k < ownerByOwnerName.partitions.Length; k++) { if (k == 0) { sb.AppendFormat("{0}", ownerByOwnerName.partitions[k]); } else { sb.AppendFormat(", {0}", ownerByOwnerName.partitions[k]); } } sb.AppendFormat("\r\n"); } Logger.Info(sb.ToString()); KafkaNetLibraryExample.AppendToFile(GetFile(), sb.ToString()); sb = new StringBuilder(); sb.AppendFormat("In last {0:0.0} seconds. Totally latest offset change:{1} Totally commited offset change:{2} . Percentage:{3:P2} Time:{4}\r\n" , (DateTime.UtcNow - startTime).TotalSeconds, aggregateLatestDelta, aggregateLatestCommite, aggregateLatestCommite * 1.0 / aggregateLatestDelta, DateTime.Now); Logger.Info(sb.ToString()); KafkaNetLibraryExample.AppendLineToFile(GetFile(), sb.ToString()); } previousOwners = latestOwners; latestOffsetDictLastValue = latestOffsetDict; latestCommitedDictLastValue = latestCommited; if (latestOffsetDictFirstValue == null) { latestOffsetDictFirstValue = latestOffsetDict; } if (latestCommitedDictFirstValue == null) { latestCommitedDictFirstValue = latestCommited; } return(success); }
public static void MainInternal(int taskIndexInStorm, string[] args) { GenerateAssistFile("producesimple"); GenerateAssistFile("produceperftest"); GenerateAssistFile("producemonitor"); GenerateAssistFile("eventserverperftest"); GenerateAssistFile("consumesimple"); GenerateAssistFile("consumegroup"); GenerateAssistFile("consumegroupmonitor"); GenerateAssistFile("topic"); ServicePointManager.DefaultConnectionLimit = 5000; ServicePointManager.UseNagleAlgorithm = false; var log4netSection = ConfigurationManager.GetSection("log4net"); if (log4netSection != null) { //XmlConfigurator.Configure(); } KafkaNETExampleCommandVerb commandOptions = new KafkaNETExampleCommandVerb(); try { commandOptions.Parse(args); } catch (Exception e) { Logger.ErrorFormat("{0}", e.FormatException()); Console.WriteLine(KafkaNETExampleCommandVerb.GetUsage()); Environment.Exit(-1); } KafkaNETExampleSubArguments realOption = KafkaNETExampleCommandVerb.ActiveSubOption; try { realOption.Parse(args); } catch (Exception e) { Logger.ErrorFormat("{0}", e.FormatException()); Console.WriteLine(realOption.GetUsage(false)); Environment.Exit(-1); } Logger.InfoFormat("All arguments of {0}: \r\n{1}", KafkaNETExampleCommandVerb.AssemblyName, realOption.GetArgDict()); switch (KafkaNETExampleCommandVerb.Verb) { case "producesimple": case "produceroundrobin": ProduceSimpleHelperOption produceroundrobinOptions = (ProduceSimpleHelperOption)realOption; ProduceSimpleHelper.Run(produceroundrobinOptions); break; case "produceperftest": case "producewrapper": ProducePerfTestHelperOption producewrapperOption = (ProducePerfTestHelperOption)realOption; (new ProducePerfTestHelper()).Run(producewrapperOption); break; case "producem": case "producemonitor": ProduceMonitorHelperOptions produceMonitorOptions = (ProduceMonitorHelperOptions)realOption; ProduceMonitorHelper.Run(produceMonitorOptions); break; case "eventserverperftest": JavaEventServerPerfTestHelperOptions evetServerPerfTestOptions = (JavaEventServerPerfTestHelperOptions)realOption; (new JavaEventServerPerfTestHelper()).Run(evetServerPerfTestOptions); break; case "consumesimple": case "dumpdata": ConsumeDataHelperArguments dumpdataOptions = (ConsumeDataHelperArguments)realOption; ConsumeSimpleHelper.ConsumeDataSimple(dumpdataOptions); break; case "consumegroup": case "dumpdataasconsumergroup": ConsumeGroupHelperOptions cgOptions = (ConsumeGroupHelperOptions)realOption; if (taskIndexInStorm >= 0) { cgOptions.ConsumerId = cgOptions.ConsumerId + taskIndexInStorm.ToString(); cgOptions.File = cgOptions.ConsumerId + taskIndexInStorm.ToString() + cgOptions.File; } ConsumerGroupHelper.DumpMessageAsConsumerGroup(cgOptions); break; case "latestoffsetofconsumergroup": case "consumegroupmonitor": case "consumegroupm": case "consumem": ConsumeGroupMonitorHelperOptions dcgOptions = (ConsumeGroupMonitorHelperOptions)realOption; ConsumeGroupMonitorHelper.DumpConsumerGroupOffsets(dcgOptions); break; case "topic": TopicHelperArguments dtOptions = (TopicHelperArguments)realOption; TopicHelper.DumpTopicMetadataAndOffset(dtOptions); break; case "test": var testOptions = (TestHelperOptions)realOption; TestHelper.Run(testOptions); break; default: Logger.Error(string.Format("Invalid verb={0}", KafkaNETExampleCommandVerb.Verb)); return; } }