Ejemplo n.º 1
0
        private IDictionary <string, IList <string> > GetConsumersPerTopic(string group)
        {
            var consumers            = this.zkClient.GetChildrenParentMayNotExist(this.dirs.ConsumerRegistryDir);
            var consumersPerTopicMap = new Dictionary <string, IList <string> >();

            foreach (var consumer in consumers)
            {
                TopicCount topicCount = GetTopicCount(consumer);
                foreach (KeyValuePair <string, IList <string> > consumerThread in topicCount.GetConsumerThreadIdsPerTopic())
                {
                    foreach (string consumerThreadId in consumerThread.Value)
                    {
                        if (!consumersPerTopicMap.ContainsKey(consumerThread.Key))
                        {
                            consumersPerTopicMap.Add(consumerThread.Key, new List <string> {
                                consumerThreadId
                            });
                        }
                        else
                        {
                            consumersPerTopicMap[consumerThread.Key].Add(consumerThreadId);
                        }
                    }
                }
            }

            foreach (KeyValuePair <string, IList <string> > item in consumersPerTopicMap)
            {
                item.Value.ToList().Sort();
            }

            return(consumersPerTopicMap);
        }
        public static IDictionary <string, List <string> > GetConsumersPerTopic(ZkClient zkClient, string group)
        {
            var dirs                = new ZKGroupDirs(group);
            var consumers           = GetChildrenParentMayNotExist(zkClient, dirs.ConsumerRegistryDir);
            var consumerPerTopicMap = new Dictionary <string, List <string> >();

            foreach (var consumer in consumers)
            {
                var topicCount = TopicCount.ConstructTopicCount(group, consumer, zkClient);
                foreach (var topicAndConsumer in topicCount.GetConsumerThreadIdsPerTopic())
                {
                    var topic = topicAndConsumer.Key;
                    var consumerThreadIdSet = topicAndConsumer.Value;
                    foreach (var consumerThreadId in consumerThreadIdSet)
                    {
                        var curConsumers = consumerPerTopicMap.Get(topic);
                        if (curConsumers != null)
                        {
                            curConsumers.Add(consumerThreadId);
                        }
                        else
                        {
                            consumerPerTopicMap[topic] = new List <string> {
                                consumerThreadId
                            };
                        }
                    }
                }
            }

            consumerPerTopicMap = consumerPerTopicMap.ToDictionary(x => x.Key, x => x.Value.OrderBy(y => y).ToList());

            return(consumerPerTopicMap);
        }
Ejemplo n.º 3
0
 public ZKSessionExpireListener(ZKGroupDirs dirs, string consumerIdString, TopicCount topicCount, ZKRebalancerListener loadBalancerListener, ZookeeperConsumerConnector zkConsumerConnector)
 {
     this.consumerIdString     = consumerIdString;
     this.loadBalancerListener = loadBalancerListener;
     this.zkConsumerConnector  = zkConsumerConnector;
     this.dirs       = dirs;
     this.topicCount = topicCount;
 }
Ejemplo n.º 4
0
 internal ZKRebalancerListener(
     ConsumerConfiguration config,
     string consumerIdString,
     IDictionary <string, IDictionary <int, PartitionTopicInfo> > topicRegistry,
     IZooKeeperClient zkClient,
     ZookeeperConsumerConnector zkConsumerConnector,
     IDictionary <Tuple <string, string>, BlockingCollection <FetchedDataChunk> > queues,
     Fetcher fetcher,
     IDictionary <string, IList <KafkaMessageStream <TData> > > kafkaMessageStreams,
     TopicCount topicCount)
 {
     this.consumerIdString = consumerIdString;
     this.config           = config;
     this.topicRegistry    = topicRegistry;
     this.zkClient         = zkClient;
     dirs = new ZKGroupDirs(config.GroupId);
     this.zkConsumerConnector = zkConsumerConnector;
     this.queues              = queues;
     this.fetcher             = fetcher;
     this.kafkaMessageStreams = kafkaMessageStreams;
     this.topicCount          = topicCount;
 }
Ejemplo n.º 5
0
        private TopicCount GetTopicCount(string consumerId)
        {
            var topicCountJson = zkClient.ReadData <string>(dirs.ConsumerRegistryDir + "/" + consumerId);

            return(TopicCount.ConstructTopicCount(consumerId, topicCountJson));
        }
Ejemplo n.º 6
0
        private bool Rebalance(Cluster cluster, CancellationTokenSource cancellationTokenSource)
        {
            TopicCount topicCount = this.GetTopicCount(this.consumerIdString);
            IDictionary <string, IList <string> > topicThreadIdsMap = topicCount.GetConsumerThreadIdsPerTopic();

            if (!topicThreadIdsMap.Any())
            {
                Logger.ErrorFormat("Consumer ID is not registered to any topics in ZK. Exiting rebalance");
                return(false);
            }
            var consumersPerTopicMap = this.GetConsumersPerTopic(this.config.GroupId);

            var brokers = ZkUtils.GetAllBrokersInCluster(zkClient);

            if (!brokers.Any())
            {
                Logger.Warn("No brokers found when trying to rebalance.");
                zkClient.Subscribe(ZooKeeperClient.DefaultBrokerIdsPath, this);
                this.zkConsumerConnector.subscribedChildCollection.Add(new Tuple <string, IZooKeeperChildListener>(ZooKeeperClient.DefaultBrokerIdsPath, this));
                Logger.ErrorFormat("Subscribe count: subscribedChildCollection:{0} , subscribedZookeeperStateCollection:{1} subscribedZookeeperDataCollection:{2} "
                                   , this.zkConsumerConnector.subscribedChildCollection.Count, this.zkConsumerConnector.subscribedZookeeperStateCollection.Count, this.zkConsumerConnector.subscribedZookeeperDataCollection.Count);
                return(false);
            }

            var partitionsPerTopicMap = ZkUtils.GetPartitionsForTopics(this.zkClient, topicThreadIdsMap.Keys);

            // Check if we've been canceled externally before we dive into the rebalance
            if (cancellationTokenSource.IsCancellationRequested)
            {
                Logger.ErrorFormat("Rebalance operation has been canceled externally by a future rebalance event. Exiting immediately");
                return(false);
            }

            this.CloseFetchers(cluster, topicThreadIdsMap, this.zkConsumerConnector);
            this.ReleasePartitionOwnership(topicThreadIdsMap);

            try
            {
                foreach (var item in topicThreadIdsMap)
                {
                    var topic = item.Key;
                    var consumerThreadIdSet = item.Value;

                    topicRegistry.Add(topic, new ConcurrentDictionary <int, PartitionTopicInfo>());

                    var           topicDirs    = new ZKGroupTopicDirs(config.GroupId, topic);
                    List <string> curConsumers = new List <string>(consumersPerTopicMap[topic]);
                    curConsumers.Sort();

                    List <string> curPartitions = partitionsPerTopicMap[topic].OrderBy(p => int.Parse(p)).ToList();

                    Logger.InfoFormat(
                        "{4} Partitions. {5} ConsumerClients.  Consumer {0} rebalancing the following partitions: {1} for topic {2} with consumers: {3}",
                        this.consumerIdString,
                        string.Join(",", curPartitions),
                        topic,
                        string.Join(",", curConsumers),
                        curPartitions.Count,
                        curConsumers.Count);

                    var numberOfPartsPerConsumer = curPartitions.Count / curConsumers.Count;
                    Logger.Info("Number of partitions per consumer is: " + numberOfPartsPerConsumer);

                    var numberOfConsumersWithExtraPart = curPartitions.Count % curConsumers.Count;
                    Logger.Info("Number of consumers with an extra partition are: " + numberOfConsumersWithExtraPart);

                    foreach (string consumerThreadId in consumerThreadIdSet)
                    {
                        var myConsumerPosition = curConsumers.IndexOf(consumerThreadId);
                        Logger.Info("Consumer position for consumer " + consumerThreadId + " is: " + myConsumerPosition);

                        if (myConsumerPosition < 0)
                        {
                            continue;
                        }

                        var startPart = (numberOfPartsPerConsumer * myConsumerPosition) +
                                        Math.Min(myConsumerPosition, numberOfConsumersWithExtraPart);
                        Logger.Info("Starting partition is: " + startPart);

                        var numberOfParts = numberOfPartsPerConsumer + (myConsumerPosition + 1 > numberOfConsumersWithExtraPart ? 0 : 1);
                        Logger.Info("Number of partitions to work on is: " + numberOfParts);

                        if (numberOfParts <= 0)
                        {
                            Logger.InfoFormat("No broker partitions consumed by consumer thread {0} for topic {1}", consumerThreadId, item.Key);
                        }
                        else
                        {
                            for (int i = startPart; i < startPart + numberOfParts; i++)
                            {
                                var partition = curPartitions[i];

                                Logger.InfoFormat("{0} attempting to claim partition {1}", consumerThreadId, partition);
                                bool ownPartition = ProcessPartition(topicDirs, partition, topic, consumerThreadId, curConsumers, curPartitions, cancellationTokenSource);
                                if (!ownPartition)
                                {
                                    Logger.InfoFormat("{0} failed to claim partition {1} for topic {2}. Exiting rebalance", consumerThreadId, partition, topic);
                                    return(false);
                                }
                            }
                        }
                    }
                }
            }
            catch (Exception ex)
            {
                Logger.ErrorFormat("error when rebalance: {0}", ex.FormatException());
                return(false);
            }

            // If we get here, we know that we have owned all partitions successfully,
            // therefore it is safe to update fetcher threads and begin dequeuing
            Logger.Info("All partitions were successfully owned. Updating fetchers");

            this.UpdateFetcher(cluster);

            return(true);
        }