public static int?WaitUntilLeaderIsElectedOrChanged(
            ZkClient zkClient, string topic, int partition, long timeoutMs, int?oldLeaderOpt = null)
        {
            var leaderLock            = new ReentrantLock();
            var leaderExistsOrChanged = leaderLock.NewCondition();

            if (oldLeaderOpt.HasValue == false)
            {
                Logger.InfoFormat("Waiting for leader to be elected for partition [{0},{1}]", topic, partition);
            }
            else
            {
                Logger.InfoFormat("Waiting for leader for partition [{0},{1}] to be changed from old leader {2}", topic, partition, oldLeaderOpt.Value);
            }

            leaderLock.Lock();
            try
            {
                zkClient.SubscribeDataChanges(ZkUtils.GetTopicPartitionLeaderAndIsrPath(topic, partition), new LeaderExistsOrChangedListener(topic, partition, leaderLock, leaderExistsOrChanged, oldLeaderOpt, zkClient));
                leaderExistsOrChanged.Await(TimeSpan.FromMilliseconds(timeoutMs));

                // check if leader is elected
                var leader = ZkUtils.GetLeaderForPartition(zkClient, topic, partition);
                if (leader != null)
                {
                    if (oldLeaderOpt.HasValue == false)
                    {
                        Logger.InfoFormat("Leader {0} is elected for partition [{1},{2}]", leader, topic, partition);
                    }
                    else
                    {
                        Logger.InfoFormat(
                            "Leader for partition [{0},{1}] is changed from {2} to {3}",
                            topic,
                            partition,
                            oldLeaderOpt.Value,
                            leader);
                    }
                }
                else
                {
                    Logger.ErrorFormat("Timing out after {0} ms since leader is not elected for partition [{1},{2}]", timeoutMs, topic, partition);
                }

                return(leader);
            }
            finally
            {
                leaderLock.Unlock();
            }
        }
Example #2
0
        private void AddPartitionTopicInfo(ZKGroupTopicDirs topicDirs,
                                           string partition,
                                           string topic,
                                           string consumerThreadId)
        {
            var partitionId      = int.Parse(partition);
            var partTopicInfoMap = topicRegistry[topic];

            //find the leader for this partition
            var leaderOpt = ZkUtils.GetLeaderForPartition(zkClient, topic, partitionId);

            if (!leaderOpt.HasValue)
            {
                throw new NoBrokersForPartitionException(
                          string.Format("No leader available for partitions {0} on topic {1}", partition, topic));
            }
            Logger.InfoFormat("Leader for partition {0} for topic {1} is {2}", partition, topic, leaderOpt.Value);
            var leader = leaderOpt.Value;
            var znode  = topicDirs.ConsumerOffsetDir + "/" + partition;

            var offsetCommitedString = zkClient.ReadData <string>(znode, true);

            //if first time starting a consumer, set the initial offset based on the config
            long offset         = -1;
            long offsetCommited = -1;

            if (offsetCommitedString != null)
            {
                offsetCommited = long.Parse(offsetCommitedString);
                offset         = offsetCommited + 1;
            }
            Logger.InfoFormat("Final offset {0} for topic {1} partition {2} OffsetCommited {3}"
                              , offset, topic, partition, offsetCommited);

            var queue         = queues[new Tuple <string, string>(topic, consumerThreadId)];
            var partTopicInfo = new PartitionTopicInfo(
                topic,
                leader,
                partitionId,
                queue,
                offsetCommited,
                offset,
                config.FetchSize,
                offsetCommited);

            partTopicInfoMap[partitionId] = partTopicInfo;
            Logger.InfoFormat("{0} selected new offset {1}", partTopicInfo, offset);
        }
Example #3
0
        private IDictionary <string, IList <KafkaMessageStream <TData> > > Consume <TData>(IDictionary <string, int> topicCountDict, IDecoder <TData> decoder)
        {
            Logger.Debug("entering consume");

            if (topicCountDict == null)
            {
                throw new ArgumentNullException(nameof(topicCountDict));
            }

            var dirs   = new ZKGroupDirs(this.config.GroupId);
            var result = new Dictionary <string, IList <KafkaMessageStream <TData> > >();

            string consumerIdString = GetConsumerIdString();
            var    topicCount       = new TopicCount(consumerIdString, topicCountDict);

            //// create a queue per topic per consumer thread
            var consumerThreadIdsPerTopicMap = topicCount.GetConsumerThreadIdsPerTopic();

            foreach (var topic in consumerThreadIdsPerTopicMap.Keys)
            {
                var streamList = new List <KafkaMessageStream <TData> >();
                foreach (string threadId in consumerThreadIdsPerTopicMap[topic])
                {
                    var stream = new BlockingCollection <FetchedDataChunk>(new ConcurrentQueue <FetchedDataChunk>());
                    this.queues.Add(new Tuple <string, string>(topic, threadId), stream);
                    streamList.Add(new KafkaMessageStream <TData>(topic, stream, this.config.Timeout, decoder));
                }

                result.Add(topic, streamList);
                Logger.InfoFormat("adding topic {0} and stream to map...", topic);
            }

            // listener to consumer and partition changes
            var loadBalancerListener = new ZKRebalancerListener <TData>(
                this.config,
                consumerIdString,
                this.topicRegistry,
                this.GetZkClient(),
                this,
                queues,
                this.fetcher,
                result,
                topicCount);

            if (this.consumerRebalanceHandler != null)
            {
                loadBalancerListener.ConsumerRebalance += this.consumerRebalanceHandler;
            }

            stopAsyncRebalancing.Add(loadBalancerListener.StopRebalance);
            this.RegisterConsumerInZk(dirs, consumerIdString, topicCount);

            //// register listener for session expired event
            var zkSessionExpireListener = new ZKSessionExpireListener <TData>(dirs, consumerIdString, topicCount, loadBalancerListener, this);

            if (this.zkSessionDisconnectedHandler != null)
            {
                zkSessionExpireListener.ZKSessionDisconnected += this.zkSessionDisconnectedHandler;
            }

            if (this.zkSessionExpiredHandler != null)
            {
                zkSessionExpireListener.ZKSessionExpired += this.zkSessionExpiredHandler;
            }

            this.GetZkClient().Subscribe(zkSessionExpireListener);
            this.subscribedZookeeperStateCollection.Add(zkSessionExpireListener);

            this.GetZkClient().Subscribe(dirs.ConsumerRegistryDir, loadBalancerListener);
            this.subscribedChildCollection.Add(new Tuple <string, IZooKeeperChildListener>(dirs.ConsumerRegistryDir, loadBalancerListener));

            result.ForEach(topicAndStreams =>
            {
                // register on broker partition path changes
                string partitionPath = ZooKeeperClient.DefaultBrokerTopicsPath + "/" + topicAndStreams.Key;
                if (this.GetZkClient().Exists(partitionPath))
                {
                    this.GetZkClient().Subscribe(partitionPath, loadBalancerListener);
                    this.subscribedChildCollection.Add(new Tuple <string, IZooKeeperChildListener>(partitionPath, loadBalancerListener));
                    // Create a mapping of all topic partitions and their current leaders
                    var topicsAndPartitions = ZkUtils.GetPartitionsForTopics(this.GetZkClient(), new[] { topicAndStreams.Key });
                    Dictionary <string, int> partitionLeaderMap = new Dictionary <string, int>();
                    foreach (var partitionId in topicsAndPartitions[topicAndStreams.Key])
                    {
                        // Find/parse current partition leader for this partition and add it
                        // to the mapping object
                        var partitionStatePath = partitionPath + "/partitions/" + partitionId + "/state";
                        this.GetZkClient().MakeSurePersistentPathExists(partitionStatePath);
                        int?partitionLeader = ZkUtils.GetLeaderForPartition(this.GetZkClient(), topicAndStreams.Key, int.Parse(partitionId));
                        partitionLeaderMap.Add(partitionStatePath, partitionLeader.GetValueOrDefault(-1));
                    }

                    // listen for changes on the state nodes for the partitions
                    // this will indicate when a leader switches, or the in sync replicas change
                    var leaderListener = new ZkPartitionLeaderListener <TData>(loadBalancerListener, partitionLeaderMap);
                    foreach (var partitionId in topicsAndPartitions[topicAndStreams.Key])
                    {
                        var partitionStatePath = partitionPath + "/partitions/" + partitionId + "/state";
                        this.GetZkClient().Subscribe(partitionStatePath, leaderListener);
                        this.subscribedZookeeperDataCollection.Add(new Tuple <string, IZooKeeperDataListener>(partitionStatePath, leaderListener));
                    }
                }
                else
                {
                    Logger.WarnFormat("The topic path at {0}, does not exist.", partitionPath);
                }
            });

            //// explicitly trigger load balancing for this consumer
            Logger.Info("Performing rebalancing. A new consumer has been added to consumer group: " + dirs.ConsumerRegistryDir + ", consumer: " + consumerIdString);
            Logger.InfoFormat("Subscribe count: subscribedChildCollection:{0} , subscribedZookeeperStateCollection:{1} subscribedZookeeperDataCollection:{2} "
                              , subscribedChildCollection.Count, subscribedZookeeperStateCollection.Count, subscribedZookeeperDataCollection.Count);

            //// When a new consumer join, need wait for rebalance finish to make sure Fetcher thread started.
            loadBalancerListener.AsyncRebalance(DefaultWaitTimeForInitialRebalanceInSeconds * 1000);

            return(result);
        }
        private void AddPartitionTopicInfo(ZKGroupTopicDirs topicDirs, string partition, string topic, string consumerThreadId)
        {
            var partitionId      = int.Parse(partition);
            var partTopicInfoMap = this.topicRegistry[topic];

            //find the leader for this partition
            var leaderOpt = ZkUtils.GetLeaderForPartition(this.zkClient, topic, partitionId);

            if (!leaderOpt.HasValue)
            {
                throw new NoBrokersForPartitionException(string.Format("No leader available for partitions {0} on topic {1}", partition, topic));
            }
            else
            {
                Logger.InfoFormat("Leader for partition {0} for topic {1} is {2}", partition, topic, leaderOpt.Value);
            }
            var leader       = leaderOpt.Value;
            var znode        = topicDirs.ConsumerOffsetDir + "/" + partition;
            var offsetString = this.zkClient.ReadData <string>(znode, true);

            //if first time starting a consumer, set the initial offset based on the config
            long offset         = 0;
            long offsetCommited = 0;

            if (offsetString == null)
            {
                switch (config.AutoOffsetReset)
                {
                case OffsetRequest.SmallestTime:
                    offset = this.EarliestOrLatestOffset(topic, leader, partitionId, OffsetRequest.EarliestTime);
                    break;

                case OffsetRequest.LargestTime:
                    offset = this.EarliestOrLatestOffset(topic, leader, partitionId, OffsetRequest.LatestTime);
                    break;

                default:
                    throw new ConfigurationErrorsException("Wrong value in autoOffsetReset in ConsumerConfig");
                }
            }
            else
            {
                offsetCommited = long.Parse(offsetString);
                long latestOffset = this.EarliestOrLatestOffset(topic, leader, partitionId, OffsetRequest.LatestTime);
                offset = Math.Min(offsetCommited + 1, latestOffset);
                Logger.InfoFormat("Final offset {0} for topic {1} partition {2} OffsetCommited {3} latestOffset {4}"
                                  , offset, topic, partition, offsetCommited, latestOffset);
            }

            var queue         = this.queues[new Tuple <string, string>(topic, consumerThreadId)];
            var partTopicInfo = new PartitionTopicInfo(
                topic,
                leader,
                partitionId,
                queue,
                offsetCommited,
                offset,
                offset,
                this.config.FetchSize,
                offsetCommited);

            partTopicInfoMap[partitionId] = partTopicInfo;
            Logger.InfoFormat("{0} selected new offset {1}", partTopicInfo, offset);
        }
Example #5
0
        public void TestSendToNewTopic()
        {
            var producerConfig1 = new ProducerConfig
            {
                Serializer       = typeof(StringEncoder).AssemblyQualifiedName,
                KeySerializer    = typeof(StringEncoder).AssemblyQualifiedName,
                PartitionerClass =
                    typeof(StaticPartitioner).AssemblyQualifiedName,
                Brokers =
                    TestUtils.GetBrokerListFromConfigs(
                        new List <TempKafkaConfig> {
                    this.config1, this.config2
                }),
                RequestRequiredAcks = 2,
                RequestTimeoutMs    = 1000
            };

            var producerConfig2 = new ProducerConfig
            {
                Serializer       = typeof(StringEncoder).AssemblyQualifiedName,
                KeySerializer    = typeof(StringEncoder).AssemblyQualifiedName,
                PartitionerClass =
                    typeof(StaticPartitioner).AssemblyQualifiedName,
                Brokers =
                    TestUtils.GetBrokerListFromConfigs(
                        new List <TempKafkaConfig> {
                    this.config1, this.config2
                }),
                RequestRequiredAcks = 3,
                RequestTimeoutMs    = 1000
            };

            var topic = "new-topic";

            // create topic with 1 partition and await leadership
            AdminUtils.CreateTopic(this.ZkClient, topic, 1, 2, new Dictionary <string, string>());
            TestUtils.WaitUntilMetadataIsPropagated(this.servers, topic, 0, 1000);
            TestUtils.WaitUntilLeaderIsElectedOrChanged(this.ZkClient, topic, 0, 500);

            var producer1 = new Producer <string, string>(producerConfig1);
            var producer2 = new Producer <string, string>(producerConfig2);

            // Available partition ids should be 0.
            producer1.Send(new KeyedMessage <string, string>(topic, "test", "test1"));
            producer1.Send(new KeyedMessage <string, string>(topic, "test", "test2"));

            // get the leader
            var leaderOpt = ZkUtils.GetLeaderForPartition(ZkClient, topic, 0);

            Assert.True(leaderOpt.HasValue);

            var leader = leaderOpt.Value;

            var messageSet = (leader == this.config1.BrokerId)
                                 ? this.consumer1.Fetch(new FetchRequestBuilder().AddFetch(topic, 0, 0, 10000).Build())
                             .MessageSet("new-topic", 0)
                             .Iterator()
                             .ToEnumerable()
                             .ToList()
                                 : this.consumer2.Fetch(new FetchRequestBuilder().AddFetch(topic, 0, 0, 10000).Build())
                             .MessageSet("new-topic", 0)
                             .Iterator()
                             .ToEnumerable()
                             .ToList();

            Assert.Equal(2, messageSet.Count());
            Assert.Equal(new Message(Encoding.UTF8.GetBytes("test1"), Encoding.UTF8.GetBytes("test")), messageSet[0].Message);
            Assert.Equal(new Message(Encoding.UTF8.GetBytes("test2"), Encoding.UTF8.GetBytes("test")), messageSet[1].Message);
            producer1.Dispose();

            try
            {
                producer2.Send(new KeyedMessage <string, string>(topic, "test", "test2"));
                Assert.False(true, "Should have timed out for 3 acks.");
            }
            catch (FailedToSendMessageException)
            {
            }
            finally
            {
                producer2.Dispose();
            }
        }