示例#1
0
        private IDictionary <string, IList <KafkaMessageStream> > Consume(IDictionary <string, int> topicCountDict)
        {
            Logger.Debug("entering consume");

            if (topicCountDict == null)
            {
                throw new ArgumentNullException();
            }

            var dirs   = new ZKGroupDirs(this.config.GroupId);
            var result = new Dictionary <string, IList <KafkaMessageStream> >();

            var    guid             = Guid.NewGuid().ToString().Replace("-", string.Empty).Substring(0, 8);
            string consumerUuid     = string.Format("{0}-{1}-{2}", Dns.GetHostName(), DateTime.Now.Ticks, guid);
            string consumerIdString = this.config.GroupId + "_" + consumerUuid;
            var    topicCount       = new TopicCount(consumerIdString, topicCountDict);

            // listener to consumer and partition changes
            var loadBalancerListener = new ZKRebalancerListener(
                this.config,
                consumerIdString,
                this.topicRegistry,
                this.zkClient,
                this,
                queues,
                this.fetcher,
                this.syncLock,
                result);

            this.RegisterConsumerInZk(dirs, consumerIdString, topicCount);
            this.zkClient.Subscribe(dirs.ConsumerRegistryDir, loadBalancerListener);

            //// create a queue per topic per consumer thread
            var consumerThreadIdsPerTopicMap = topicCount.GetConsumerThreadIdsPerTopic();

            foreach (var topic in consumerThreadIdsPerTopicMap.Keys)
            {
                var streamList = new List <KafkaMessageStream>();
                foreach (string threadId in consumerThreadIdsPerTopicMap[topic])
                {
                    var stream = new BlockingCollection <FetchedDataChunk>(new ConcurrentQueue <FetchedDataChunk>(), config.MaxQueuedChunks);
                    this.queues.Add(new Tuple <string, string>(topic, threadId), stream);
                    streamList.Add(new KafkaMessageStream(stream, this.config.Timeout));
                }

                result.Add(topic, streamList);
                Logger.DebugFormat(CultureInfo.CurrentCulture, "adding topic {0} and stream to map...", topic);

                // register on broker partition path changes
                string partitionPath = ZooKeeperClient.DefaultBrokerTopicsPath + "/" + topic;
                this.zkClient.MakeSurePersistentPathExists(partitionPath);
                this.zkClient.Subscribe(partitionPath, loadBalancerListener);
            }

            //// register listener for session expired event
            this.zkClient.Subscribe(new ZKSessionExpireListener(dirs, consumerIdString, topicCount, loadBalancerListener, this));

            //// explicitly trigger load balancing for this consumer););
            lock (this.syncLock)
            {
                loadBalancerListener.SyncedRebalance();
            }

            return(result);
        }
示例#2
0
        private IDictionary <string, IList <KafkaMessageStream <TData> > > Consume <TData>(IDictionary <string, int> topicCountDict, IDecoder <TData> decoder)
        {
            Logger.Debug("entering consume");

            if (topicCountDict == null)
            {
                throw new ArgumentNullException(nameof(topicCountDict));
            }

            var dirs   = new ZKGroupDirs(this.config.GroupId);
            var result = new Dictionary <string, IList <KafkaMessageStream <TData> > >();

            string consumerIdString = GetConsumerIdString();
            var    topicCount       = new TopicCount(consumerIdString, topicCountDict);

            //// create a queue per topic per consumer thread
            var consumerThreadIdsPerTopicMap = topicCount.GetConsumerThreadIdsPerTopic();

            foreach (var topic in consumerThreadIdsPerTopicMap.Keys)
            {
                var streamList = new List <KafkaMessageStream <TData> >();
                foreach (string threadId in consumerThreadIdsPerTopicMap[topic])
                {
                    var stream = new BlockingCollection <FetchedDataChunk>(new ConcurrentQueue <FetchedDataChunk>());
                    this.queues.Add(new Tuple <string, string>(topic, threadId), stream);
                    streamList.Add(new KafkaMessageStream <TData>(topic, stream, this.config.Timeout, decoder));
                }

                result.Add(topic, streamList);
                Logger.InfoFormat("adding topic {0} and stream to map...", topic);
            }

            // listener to consumer and partition changes
            var loadBalancerListener = new ZKRebalancerListener <TData>(
                this.config,
                consumerIdString,
                this.topicRegistry,
                this.GetZkClient(),
                this,
                queues,
                this.fetcher,
                result,
                topicCount);

            if (this.consumerRebalanceHandler != null)
            {
                loadBalancerListener.ConsumerRebalance += this.consumerRebalanceHandler;
            }

            stopAsyncRebalancing.Add(loadBalancerListener.StopRebalance);
            this.RegisterConsumerInZk(dirs, consumerIdString, topicCount);

            //// register listener for session expired event
            var zkSessionExpireListener = new ZKSessionExpireListener <TData>(dirs, consumerIdString, topicCount, loadBalancerListener, this);

            if (this.zkSessionDisconnectedHandler != null)
            {
                zkSessionExpireListener.ZKSessionDisconnected += this.zkSessionDisconnectedHandler;
            }

            if (this.zkSessionExpiredHandler != null)
            {
                zkSessionExpireListener.ZKSessionExpired += this.zkSessionExpiredHandler;
            }

            this.GetZkClient().Subscribe(zkSessionExpireListener);
            this.subscribedZookeeperStateCollection.Add(zkSessionExpireListener);

            this.GetZkClient().Subscribe(dirs.ConsumerRegistryDir, loadBalancerListener);
            this.subscribedChildCollection.Add(new Tuple <string, IZooKeeperChildListener>(dirs.ConsumerRegistryDir, loadBalancerListener));

            result.ForEach(topicAndStreams =>
            {
                // register on broker partition path changes
                string partitionPath = ZooKeeperClient.DefaultBrokerTopicsPath + "/" + topicAndStreams.Key;
                if (this.GetZkClient().Exists(partitionPath))
                {
                    this.GetZkClient().Subscribe(partitionPath, loadBalancerListener);
                    this.subscribedChildCollection.Add(new Tuple <string, IZooKeeperChildListener>(partitionPath, loadBalancerListener));
                    // Create a mapping of all topic partitions and their current leaders
                    var topicsAndPartitions = ZkUtils.GetPartitionsForTopics(this.GetZkClient(), new[] { topicAndStreams.Key });
                    Dictionary <string, int> partitionLeaderMap = new Dictionary <string, int>();
                    foreach (var partitionId in topicsAndPartitions[topicAndStreams.Key])
                    {
                        // Find/parse current partition leader for this partition and add it
                        // to the mapping object
                        var partitionStatePath = partitionPath + "/partitions/" + partitionId + "/state";
                        this.GetZkClient().MakeSurePersistentPathExists(partitionStatePath);
                        int?partitionLeader = ZkUtils.GetLeaderForPartition(this.GetZkClient(), topicAndStreams.Key, int.Parse(partitionId));
                        partitionLeaderMap.Add(partitionStatePath, partitionLeader.GetValueOrDefault(-1));
                    }

                    // listen for changes on the state nodes for the partitions
                    // this will indicate when a leader switches, or the in sync replicas change
                    var leaderListener = new ZkPartitionLeaderListener <TData>(loadBalancerListener, partitionLeaderMap);
                    foreach (var partitionId in topicsAndPartitions[topicAndStreams.Key])
                    {
                        var partitionStatePath = partitionPath + "/partitions/" + partitionId + "/state";
                        this.GetZkClient().Subscribe(partitionStatePath, leaderListener);
                        this.subscribedZookeeperDataCollection.Add(new Tuple <string, IZooKeeperDataListener>(partitionStatePath, leaderListener));
                    }
                }
                else
                {
                    Logger.WarnFormat("The topic path at {0}, does not exist.", partitionPath);
                }
            });

            //// explicitly trigger load balancing for this consumer
            Logger.Info("Performing rebalancing. A new consumer has been added to consumer group: " + dirs.ConsumerRegistryDir + ", consumer: " + consumerIdString);
            Logger.InfoFormat("Subscribe count: subscribedChildCollection:{0} , subscribedZookeeperStateCollection:{1} subscribedZookeeperDataCollection:{2} "
                              , subscribedChildCollection.Count, subscribedZookeeperStateCollection.Count, subscribedZookeeperDataCollection.Count);

            //// When a new consumer join, need wait for rebalance finish to make sure Fetcher thread started.
            loadBalancerListener.AsyncRebalance(DefaultWaitTimeForInitialRebalanceInSeconds * 1000);

            return(result);
        }