Esempio n. 1
0
        private IDictionary <string, IList <KafkaStream <TKey, TValue> > > Consume <TKey, TValue>(IDictionary <string, int> topicCountMap, IDecoder <TKey> keyDecoder, IDecoder <TValue> valueDecoder)
        {
            Logger.Debug("entering consume");
            if (topicCountMap == null)
            {
                throw new ArgumentNullException("topicCountMap");
            }

            var topicCount = TopicCount.ConstructTopicCount(consumerIdString, topicCountMap);

            var topicThreadIds = topicCount.GetConsumerThreadIdsPerTopic();

            // make a list of (queue,stream) pairs, one pair for each threadId
            var queuesAndStreams = topicThreadIds.Values.SelectMany(threadIdSet => threadIdSet.Select(_ =>
            {
                var queue  = new BlockingCollection <FetchedDataChunk>(this.Config.QueuedMaxMessages);
                var stream = new KafkaStream <TKey, TValue>(
                    queue, this.Config.ConsumerTimeoutMs, keyDecoder, valueDecoder, this.Config.ClientId);
                return(Tuple.Create(queue, stream));
            })).ToList();

            var dirs = new ZKGroupDirs(this.Config.GroupId);

            this.RegisterConsumerInZK(dirs, consumerIdString, topicCount);
            ReinitializeConsumer(topicCount, queuesAndStreams);

            return((IDictionary <string, IList <KafkaStream <TKey, TValue> > >)loadBalancerListener.KafkaMessageAndMetadataStreams);
        }
Esempio n. 2
0
            internal WildcardStreamsHandler(
                ZookeeperConsumerConnector parent,
                TopicFilter topicFilter,
                int numStreams,
                IDecoder <TKey> keyDecoder,
                IDecoder <TValue> valueDecoder)
            {
                this.parent       = parent;
                this.topicFilter  = topicFilter;
                this.numStreams   = numStreams;
                this.keyDecoder   = keyDecoder;
                this.valueDecoder = valueDecoder;

                if (parent.messageStreamCreated.GetAndSet(true))
                {
                    throw new Exception("Each consumer connector can create message streams by filter at most once.");
                }

                this.wildcardQueuesAndStreams = Enumerable.Range(1, numStreams).Select(e =>
                {
                    var queue  = new BlockingCollection <FetchedDataChunk>(this.parent.Config.QueuedMaxMessages);
                    var stream = new KafkaStream <TKey, TValue>(
                        queue,
                        this.parent.Config.ConsumerTimeoutMs,
                        keyDecoder,
                        valueDecoder,
                        this.parent.Config.ClientId);
                    return(Tuple.Create(queue, stream));
                }).ToList();

                this.wildcardTopics =
                    ZkUtils.GetChildrenParentMayNotExist(this.parent.zkClient, ZkUtils.BrokerTopicsPath)
                    .Where(topicFilter.IsTopicAllowed)
                    .ToList();

                this.wildcardTopicCount = TopicCount.ConstructTopicCount(
                    this.parent.consumerIdString, topicFilter, numStreams, this.parent.zkClient);

                this.dirs = new ZKGroupDirs(this.parent.Config.GroupId);

                this.parent.RegisterConsumerInZK(dirs, this.parent.consumerIdString, this.wildcardTopicCount);
                this.parent.ReinitializeConsumer(this.wildcardTopicCount, this.wildcardQueuesAndStreams);

                // Topic events will trigger subsequent synced rebalances.
                Logger.InfoFormat("Creating topic event watcher for topics {0}", topicFilter);
                this.parent.wildcardTopicWatcher = new ZookeeperTopicEventWatcher(this.parent.zkClient, this);
            }