internal static void GetAdjustedOffset <TKey, TData>(string topic, KafkaSimpleManager <TKey, TData> kafkaSimpleManager, int partitionID, KafkaOffsetType offsetType, long offset, int lastMessageCount, out long earliest, out long latest, out long offsetBase) { StringBuilder sbSummaryOnfOnePartition = new StringBuilder(); kafkaSimpleManager.RefreshAndGetOffset(0, string.Empty, 0, topic, partitionID, true, out earliest, out latest); sbSummaryOnfOnePartition.AppendFormat("\t\tearliest:{0}\tlatest:{1}\tlength:{2}" , earliest , latest , (latest - earliest) == 0 ? "(empty)" : (latest - earliest).ToString()); if (offsetType == KafkaOffsetType.Timestamp) { DateTime timestampVal = KafkaClientHelperUtils.DateTimeFromUnixTimestampMillis(offset); long timestampLong = KafkaClientHelperUtils.ToUnixTimestampMillis(timestampVal); try { long timeStampOffset = kafkaSimpleManager.RefreshAndGetOffsetByTimeStamp(0, string.Empty, 0, topic, partitionID, timestampVal); sbSummaryOnfOnePartition.AppendFormat("\r\n"); sbSummaryOnfOnePartition.AppendFormat("\t\ttimeStampOffset:{0}\ttimestamp(UTC):{1}\tTime(Local):{2}\tUnixTimestamp:{3}\t" , timeStampOffset , KafkaClientHelperUtils.DateTimeFromUnixTimestampMillis(timestampLong).ToString("s") , timestampVal.ToString("s") , timestampLong); offsetBase = KafkaClientHelperUtils.GetValidStartReadOffset(offsetType, earliest, latest, timeStampOffset, lastMessageCount); } catch (TimeStampTooSmallException e) { sbSummaryOnfOnePartition.AppendFormat("\r\n"); sbSummaryOnfOnePartition.AppendFormat("\t\ttimeStampOffset:{0}\ttimestamp(UTC):{1}\tTime(Local):{2}\tUnixTimestamp:{3}\t" , "NA since no data before the time you specified, please retry with a bigger value." , KafkaClientHelperUtils.DateTimeFromUnixTimestampMillis(timestampLong).ToString("s") , timestampVal.ToString("s") , timestampLong); throw new ApplicationException(sbSummaryOnfOnePartition.ToString(), e); } } else { offsetBase = KafkaClientHelperUtils.GetValidStartReadOffset(offsetType, earliest, latest, 0, lastMessageCount); } Logger.Info(sbSummaryOnfOnePartition.ToString()); }
internal static long ConvertOffset(string offset) { long offsetTime = 0; bool success = false; if (string.IsNullOrEmpty(offset)) { throw new ArgumentNullException("offset"); } switch (offset.ToLower(CultureInfo.InvariantCulture)) { case "earliest": case "latest": case "last": offsetTime = 0; break; default: DateTime dateTimeOffset; if (DateTime.TryParse(offset, out dateTimeOffset)) { offsetTime = KafkaClientHelperUtils.ToUnixTimestampMillis(dateTimeOffset); success = true; } else if (long.TryParse(offset, out offsetTime)) { success = true; } if (!success) { Logger.Error(string.Format("Error: invalid offset={0}, it should be either earliest|latest|last or an unsigned integer or a timestamp.", offset)); throw new ArgumentException(string.Format("invalid offset={0}", offset)); } break; } return(offsetTime); }
internal static string DumpTopicMetadataAndOffsetInternal(ZooKeeperClient zkClient, string topic, string zookeeper, int partitionIndex, bool includePartitionDetailInfo, bool includeOffsetInfo, DateTime timestamp, SortedDictionary <int, int> parttionBrokerID_LeaderCountDistribAll, SortedDictionary <int, int> parttionBrokerID_ReplicaCountDistribAll, SortedDictionary <int, long> latestOffset, SortedDictionary <int, long> latestLength) { StringBuilder sb = new StringBuilder(); string s = string.Empty; //BrokerID -->Count of as leader SortedDictionary <int, int> parttionBrokerID_LeaderCountDistrib = new SortedDictionary <int, int>(); //BrokerID -->Count of as replica SortedDictionary <int, int> parttionBrokerID_ReplicaCountDistrib = new SortedDictionary <int, int>(); try { if (string.IsNullOrEmpty(zookeeper)) { Logger.Error(" zookeeper should be provided"); sb.AppendFormat(DumpTopicError, topic); } else { KafkaSimpleManagerConfiguration config = new KafkaSimpleManagerConfiguration() { Zookeeper = zookeeper }; config.Verify(); Dictionary <int, int[]> detailDataInZookeeper = ZkUtils.GetTopicMetadataInzookeeper(zkClient, topic); using (KafkaSimpleManager <int, Message> kafkaSimpleManager = new KafkaSimpleManager <int, Message>(config)) { TopicMetadata topicMetadata = kafkaSimpleManager.RefreshMetadata(KafkaNETExampleConstants.DefaultVersionId, ClientID, TopicMetadataRequestID++, topic, true); int partitionCount = topicMetadata.PartitionsMetadata.Count(); sb.AppendFormat("Topic:{0}\tPartitionCount:{1}\t", topic, partitionCount); int replicationFactor = Enumerable.Count <Broker>(topicMetadata.PartitionsMetadata.First().Replicas); sb.AppendFormat("ReplicationFactor:{0}\t", replicationFactor); //TODO: compare detailDataInZookeeper and check which one missed. StringBuilder sbDetail = new StringBuilder(); if (includePartitionDetailInfo) { long sumEndOffset = 0; long sumLength = 0; foreach (PartitionMetadata p in topicMetadata.PartitionsMetadata.OrderBy(r => r.PartitionId).ToList()) { int[] replicaInZookeeper = null; if (detailDataInZookeeper.ContainsKey(p.PartitionId)) { replicaInZookeeper = detailDataInZookeeper[p.PartitionId]; detailDataInZookeeper.Remove(p.PartitionId); } #region One partition long earliest = 0; long latest = 0; if (partitionIndex == -1 || p.PartitionId == partitionIndex) { //sbDetail.AppendFormat("\tTopic:{0}", topic); sbDetail.AppendFormat("\tPartition:{0}", p.PartitionId); if (p.Leader != null) { sbDetail.AppendFormat("\tLeader:{0}", KafkaConsoleUtil.GetBrokerIDAndIP(p.Leader.Id)); if (parttionBrokerID_LeaderCountDistrib.ContainsKey(p.Leader.Id)) { parttionBrokerID_LeaderCountDistrib[p.Leader.Id]++; } else { parttionBrokerID_LeaderCountDistrib.Add(p.Leader.Id, 1); } if (parttionBrokerID_LeaderCountDistribAll.ContainsKey(p.Leader.Id)) { parttionBrokerID_LeaderCountDistribAll[p.Leader.Id]++; } else { parttionBrokerID_LeaderCountDistribAll.Add(p.Leader.Id, 1); } } else { sbDetail.AppendFormat("\tLeader:NoLeader!"); } sbDetail.AppendFormat("\tReplicas:{0}", string.Join(",", p.Replicas.Select(r => KafkaConsoleUtil.GetBrokerIDAndIP(r.Id)).ToArray())); foreach (Broker b in p.Replicas) { if (parttionBrokerID_ReplicaCountDistrib.ContainsKey(b.Id)) { parttionBrokerID_ReplicaCountDistrib[b.Id]++; } else { parttionBrokerID_ReplicaCountDistrib.Add(b.Id, 1); } if (parttionBrokerID_ReplicaCountDistribAll.ContainsKey(b.Id)) { parttionBrokerID_ReplicaCountDistribAll[b.Id]++; } else { parttionBrokerID_ReplicaCountDistribAll.Add(b.Id, 1); } } //sbDetail.AppendFormat("\tIsr:{0}", string.Join(",", p.Isr.Select(r => r.Id).ToArray())); ArrayList isrs = GetIsr(zkClient, topic, p.PartitionId); sbDetail.AppendFormat("\tIsr:{0}", string.Join(",", isrs.ToArray().Select(r => KafkaConsoleUtil.GetBrokerIDAndIP(Convert.ToInt32(r))))); //TODO: add missed replica #region Offset if (includeOffsetInfo) { try { kafkaSimpleManager.RefreshAndGetOffset(KafkaNETExampleConstants.DefaultVersionId, ClientID, TopicMetadataRequestID++, topic, p.PartitionId, true, out earliest, out latest); sumEndOffset += latest; sumLength += (latest - earliest); sbDetail.AppendFormat("\tlength:{2}\tearliest:{0}\tlatest:{1}" , earliest , latest , (latest - earliest) == 0 ? "(empty)" : (latest - earliest).ToString()); sbDetail.AppendFormat("\r\n"); latestOffset.Add(p.PartitionId, latest); latestLength.Add(p.PartitionId, latest - earliest); } catch (NoLeaderForPartitionException e) { sbDetail.AppendFormat(" ERROR:{0}\r\n", e.Message); } catch (UnableToConnectToHostException e) { sbDetail.AppendFormat(" ERROR:{0}\r\n", e.Message); } if (timestamp != DateTime.MinValue) { long timestampLong = KafkaClientHelperUtils.ToUnixTimestampMillis(timestamp); try { long timeStampOffset = kafkaSimpleManager.RefreshAndGetOffsetByTimeStamp(KafkaNETExampleConstants.DefaultVersionId, ClientID, TopicMetadataRequestID++, topic, p.PartitionId, timestamp); sbDetail.AppendFormat("\t\ttimeStampOffset:{0}\ttimestamp(UTC):{1}\tUnixTimestamp:{2}\t" , timeStampOffset , KafkaClientHelperUtils.DateTimeFromUnixTimestampMillis(timestampLong).ToString("s") , timestampLong); sbDetail.AppendFormat("\r\n"); } catch (TimeStampTooSmallException) { sbDetail.AppendFormat("\t\ttimeStampOffset:{0}\ttimestamp(UTC):{1}\tUnixTimestamp:{2}\t" , "NA since no data before the time you specified, please retry with a bigger value." , KafkaClientHelperUtils.DateTimeFromUnixTimestampMillis(timestampLong).ToString("s") , timestampLong); sbDetail.AppendFormat("\r\n"); } } } #endregion } #endregion } if (includeOffsetInfo) { sb.AppendFormat("SumeEndOffset:{0:0,0} SumLength:{1:0,0}\r\n", sumEndOffset, sumLength); } else { sb.AppendFormat("\r\n"); } if (detailDataInZookeeper.Any()) { foreach (KeyValuePair <int, int[]> kv in detailDataInZookeeper) { sb.AppendFormat("=ERROR=MISSED partition= {0} Replicas {1} ", kv.Key, string.Join(",", kv.Value.Select(r => r.ToString()).ToArray())); } } } sb.Append(sbDetail.ToString()); sb.AppendFormat("\tBroker as leader distribution======={0}=======\r\n", topic); sb.AppendFormat("\r\tBrokerID\tLeadPartition count\r\n"); foreach (KeyValuePair <int, int> kv in parttionBrokerID_LeaderCountDistrib) { sb.AppendFormat("\t\t{0}\t{1}\r\n", KafkaConsoleUtil.GetBrokerIDAndIP(kv.Key), kv.Value); } sb.AppendFormat("\tBroker as replica distribution========={0}=====\r\n", topic); sb.AppendFormat("\r\tBrokerID\tReplication count count\r\n"); foreach (KeyValuePair <int, int> kv in parttionBrokerID_ReplicaCountDistrib) { sb.AppendFormat("\t\t{0}\t{1}\r\n", KafkaConsoleUtil.GetBrokerIDAndIP(kv.Key), kv.Value); } sb.AppendFormat("\r\n"); } } s = sb.ToString(); } catch (NoBrokerForTopicException e) { sb.AppendFormat("\r\nTopic:{0}\t ==NoBrokerForTopicException:{1}!!!== \r\n", topic, e.Message); s = sb.ToString(); } catch (UnableToConnectToHostException e) { sb.AppendFormat("\r\nTopic:{0}\t ==UnableToConnectToHostException:{1}!!!== \r\n", topic, e.Message); s = sb.ToString(); } catch (Exception ex) { Logger.ErrorFormat("Dump topic got exception:{0}\r\ninput parameter:Topic:{1}\tZookeeper:{2}\tKafka:{3}\tPartionIndex:{4}\tincludePartitionDetailInfo:{5}\tincludeOffsetInfo:{6}\ttimestamp:{7}\r\nPartial result:{8}" , ExceptionUtil.GetExceptionDetailInfo(ex), topic, zookeeper, string.Empty, partitionIndex, includePartitionDetailInfo, includeOffsetInfo, timestamp, s); } return(s); }
public long RefreshAndGetOffsetByTimeStamp(short versionId, string clientId, int correlationId, string topic, int partitionId, DateTime timeStampInUTC) { //Get using (Consumer consumer = this.GetConsumer(topic, partitionId)) { Dictionary <string, List <PartitionOffsetRequestInfo> > offsetRequestInfoEarliest = new Dictionary <string, List <PartitionOffsetRequestInfo> >(); List <PartitionOffsetRequestInfo> offsetRequestInfoForPartitionsEarliest = new List <PartitionOffsetRequestInfo>(); offsetRequestInfoForPartitionsEarliest.Add(new PartitionOffsetRequestInfo(partitionId, KafkaClientHelperUtils.ToUnixTimestampMillis(timeStampInUTC), 8)); offsetRequestInfoEarliest.Add(topic, offsetRequestInfoForPartitionsEarliest); OffsetRequest offsetRequestEarliest = new OffsetRequest(offsetRequestInfoEarliest); //Earliest OffsetResponse offsetResponseEarliest = consumer.GetOffsetsBefore(offsetRequestEarliest); List <PartitionOffsetsResponse> partitionOffsetByTimeStamp = null; if (offsetResponseEarliest.ResponseMap.TryGetValue(topic, out partitionOffsetByTimeStamp)) { foreach (var p in partitionOffsetByTimeStamp) { if (p.PartitionId == partitionId) { return(partitionOffsetByTimeStamp[0].Offsets[0]); } } } } return(-1); }