示例#1
0
 public ZKSessionExpireListener(ZookeeperConsumerConnector parent, ZKGroupDirs dirs, string consumerIdString, TopicCount topicCount, IZKRebalancerListener loadbalancerListener)
 {
     this.parent               = parent;
     this.Dirs                 = dirs;
     this.ConsumerIdString     = consumerIdString;
     this.TopicCount           = topicCount;
     this.LoadbalancerListener = loadbalancerListener;
 }
示例#2
0
        private void ReinitializeConsumer <TKey, TValue>(
            TopicCount topicCount, IList <Tuple <BlockingCollection <FetchedDataChunk>, KafkaStream <TKey, TValue> > > queuesAndStreams)
        {
            var dirs = new ZKGroupDirs(this.Config.GroupId);

            // listener to consumer and partition changes
            if (loadBalancerListener == null)
            {
                var topicStreamsMaps = new Dictionary <string, IList <KafkaStream <TKey, TValue> > >();
                loadBalancerListener = new ZKRebalancerListener <TKey, TValue>(this, this.Config.GroupId, consumerIdString, topicStreamsMaps);
            }

            // create listener for session expired event if not exist yet
            if (sessionExpirationListener == null)
            {
                sessionExpirationListener = new ZKSessionExpireListener(this,
                                                                        dirs, consumerIdString, topicCount, loadBalancerListener);
            }

            // create listener for topic partition change event if not exist yet
            if (topicPartitionChangeListener == null)
            {
                topicPartitionChangeListener = new ZKTopicPartitionChangeListener(this, loadBalancerListener);
            }

            var topicStreamsMap = (IDictionary <string, IList <KafkaStream <TKey, TValue> > >)loadBalancerListener.KafkaMessageAndMetadataStreams;

            // map of {topic -> Set(thread-1, thread-2, ...)}
            var consumerThreadIdsPerTopic = topicCount.GetConsumerThreadIdsPerTopic();

            IList <Tuple <BlockingCollection <FetchedDataChunk>, KafkaStream <TKey, TValue> > > allQueuesAndStreams = null;

            if (topicCount is WildcardTopicCount)
            {
                /*
                 * Wild-card consumption streams share the same queues, so we need to
                 * duplicate the list for the subsequent zip operation.
                 */
                allQueuesAndStreams = Enumerable.Range(1, consumerThreadIdsPerTopic.Keys.Count).SelectMany(_ => queuesAndStreams).ToList();
            }
            else if (topicCount is StaticTopicCount)
            {
                allQueuesAndStreams = queuesAndStreams;
            }

            var topicThreadIds = consumerThreadIdsPerTopic.SelectMany(topicAndThreadIds =>
            {
                var topic     = topicAndThreadIds.Key;
                var threadIds = topicAndThreadIds.Value;
                return(threadIds.Select(id => Tuple.Create(topic, id)));
            }).ToList();

            Contract.Assert(topicThreadIds.Count == allQueuesAndStreams.Count, string.Format("Mismatch betwen thread ID count ({0}) adn queue count ({1})", topicThreadIds.Count, allQueuesAndStreams.Count));

            var threadQueueStreamPairs = topicThreadIds.Zip(allQueuesAndStreams, Tuple.Create).ToList();

            foreach (var e in threadQueueStreamPairs)
            {
                var topicThreadId = e.Item1;
                var q             = e.Item2.Item1;
                topicThreadIdAndQueues[topicThreadId] = q;
                Logger.DebugFormat("Adding topicThreadId {0} and queue {1} to topicThreadIdAndQueues Data structure", topicThreadId, string.Join(",", q));
                MetersFactory.NewGauge(this.Config.ClientId + "-" + this.Config.GroupId + "-" + topicThreadId.Item1 + "-" + topicThreadId.Item2 + "-FetchQueueSize", () => q.Count);
            }

            var groupedByTopic = threadQueueStreamPairs.GroupBy(x => x.Item1.Item1).ToList();

            foreach (var e in groupedByTopic)
            {
                var topic   = e.Key;
                var streams = e.Select(x => x.Item2.Item2).ToList();
                topicStreamsMap[topic] = streams;
                Logger.DebugFormat("adding topic {0} and {1} stream to map", topic, streams.Count);
            }

            // listener to consumer and partition changes
            zkClient.SubscribeStateChanges(sessionExpirationListener);

            zkClient.SubscribeChildChanges(dirs.ConsumerRegistryDir, loadBalancerListener);

            foreach (var topicAndSteams in topicStreamsMap)
            {
                // register on broker partition path changes
                var topicPath = ZkUtils.BrokerTopicsPath + "/" + topicAndSteams.Key;
                zkClient.SubscribeDataChanges(topicPath, topicPartitionChangeListener);
            }

            // explicitly trigger load balancing for this consumer
            loadBalancerListener.SyncedRebalance();
        }
示例#3
0
 public ZKTopicPartitionChangeListener(
     ZookeeperConsumerConnector parent, IZKRebalancerListener loadBalancerListener)
 {
     this.parent = parent;
     this.LoadbalancerListener = loadBalancerListener;
 }