public void TestConsumerDecoder()
        {
            // send some messages to each broker
            var sentMessages = this.SendMessagesToBrokerPartition(
                Configs.First(), Topic, 0, nMessages)
                               .Union(
                this.SendMessagesToBrokerPartition(
                    Configs.First(), Topic, 1, nMessages))
                               .ToList();

            TestUtils.WaitUntilMetadataIsPropagated(this.Servers, Topic, 0, 1000);
            TestUtils.WaitUntilMetadataIsPropagated(this.Servers, Topic, 1, 1000);

            // create a consuemr
            var consumerConfig = TestUtils.CreateConsumerProperties(ZkConnect, Group, Consumer1);

            TestUtils.WaitUntilLeaderIsElectedOrChanged(this.ZkClient, Topic, 0, 500);
            TestUtils.WaitUntilLeaderIsElectedOrChanged(this.ZkClient, Topic, 1, 500);

            var zkConsumerConnector = new ZookeeperConsumerConnector(consumerConfig);
            var topicMessageStreams =
                zkConsumerConnector.CreateMessageStreams(
                    new Dictionary <string, int> {
                { Topic, 1 }
            }, new StringDecoder(), new StringDecoder());

            var receivedMessages = this.GetMessages(nMessages * 2, topicMessageStreams);

            Assert.Equal(sentMessages.OrderBy(x => x).ToArray(), receivedMessages.OrderBy(x => x).ToArray());

            zkConsumerConnector.Shutdown();
        }
示例#2
0
        static void BalancedConsumer(string consumerGroupId, string uniqueConsumerId, string topic, int threads, string zookeeperServer, Action <Message> ProcessMessage)
        {
            // Here we create a balanced consumer on one consumer machine for consumerGroupId. All machines consuming for this group will get balanced together
            ConsumerConfiguration config = new ConsumerConfiguration
            {
                AutoCommit = false,
                GroupId    = consumerGroupId,
                ConsumerId = uniqueConsumerId,
                ZooKeeper  = new ZooKeeperConfiguration(zookeeperServer, 30000, 30000, 2000)
            };
            var balancedConsumer = new ZookeeperConsumerConnector(config, true);

            // grab streams for desired topics
            var topicMap = new Dictionary <string, int>()
            {
                { topic, threads }
            };
            var streams            = balancedConsumer.CreateMessageStreams(topicMap, new DefaultDecoder());
            var KafkaMessageStream = streams[topic][0];

            // start consuming stream
            foreach (Message message in KafkaMessageStream.GetCancellable(new CancellationToken()))
            {
                ProcessMessage(message);
                balancedConsumer.CommitOffsets();
            }
        }
示例#3
0
        private void CloseFetchersForQueues(Cluster.Cluster cluster,
                                            IEnumerable <BlockingCollection <FetchedDataChunk> > queuesToBeCleared,
                                            IDictionary <string, IList <KafkaMessageStream <TData> > > kafkaMessageStreams,
                                            ZookeeperConsumerConnector zkConsumerConnector)
        {
            if (fetcher != null)
            {
                var allPartitionInfos = new List <PartitionTopicInfo>();
                foreach (var item in topicRegistry.Values)
                {
                    foreach (var partitionTopicInfo in item.Values)
                    {
                        allPartitionInfos.Add(partitionTopicInfo);
                    }
                }
                fetcher.Shutdown();
                fetcher.ClearFetcherQueues(allPartitionInfos, cluster, queuesToBeCleared, kafkaMessageStreams);
                Logger.Info("Committing all offsets after clearing the fetcher queues");

                if (config.AutoCommit)
                {
                    zkConsumerConnector.CommitOffsets();
                }
            }
        }
        public void ConsumerPorformsRebalancingWhenBrokerIsRemovedFromTopic()
        {
            var    config          = this.ZooKeeperBasedConsumerConfig;
            string brokerPath      = ZooKeeperClient.DefaultBrokerIdsPath + "/" + 2345;
            string brokerTopicPath = ZooKeeperClient.DefaultBrokerTopicsPath + "/test/" + 2345;

            using (var consumerConnector = new ZookeeperConsumerConnector(config, true))
            {
                var client = ReflectionHelper.GetInstanceField <ZooKeeperClient>("zkClient", consumerConnector);
                Assert.IsNotNull(client);
                client.DeleteRecursive("/consumers/group1");
                var topicCount = new Dictionary <string, int> {
                    { "test", 1 }
                };
                consumerConnector.CreateMessageStreams(topicCount);
                WaitUntillIdle(client, 1000);
                client.CreateEphemeral(brokerPath, "192.168.1.39-1310449279123:192.168.1.39:9102");
                client.CreateEphemeral(brokerTopicPath, 1);
                WaitUntillIdle(client, 1000);
                client.DeleteRecursive(brokerTopicPath);
                WaitUntillIdle(client, 1000);

                IList <string> children = client.GetChildren("/consumers/group1/owners/test", false);
                Assert.That(children.Count, Is.EqualTo(2));
                Assert.That(children, Has.None.EqualTo("2345-0"));
                var topicRegistry = ReflectionHelper.GetInstanceField <IDictionary <string, IDictionary <Partition, PartitionTopicInfo> > >("topicRegistry", consumerConnector);
                Assert.That(topicRegistry, Is.Not.Null.And.Not.Empty);
                Assert.That(topicRegistry.Count, Is.EqualTo(1));
                var item = topicRegistry["test"];
                Assert.That(item.Count, Is.EqualTo(2));
                Assert.That(item.Where(x => x.Value.BrokerId == 2345).Count(), Is.EqualTo(0));
            }
        }
示例#5
0
        public void SimpleSyncProducerSendsLotsOfMessagesIncreasingTheSizeAndConsumerConnectorGetsThemBack()
        {
            var prodConfig     = this.SyncProducerConfig1;
            var consumerConfig = this.ZooKeeperBasedConsumerConfig;
            var consConf       = this.ConsumerConfig1;

            consumerConfig.AutoCommitInterval = 1000;
            int    numberOfMessagesToSend = 2000;
            string topic = CurrentTestTopic;

            var msgList = new List <Message>();

            using (var producer = new SyncProducer(prodConfig))
            {
                for (int i = 0; i < numberOfMessagesToSend; i++)
                {
                    string payload     = CreatePayloadByNumber(i);
                    byte[] payloadData = Encoding.UTF8.GetBytes(payload);
                    var    msg         = new Message(payloadData);
                    msgList.Add(msg);
                    var producerRequest = new ProducerRequest(topic, 0, new List <Message>()
                    {
                        msg
                    });
                    producer.Send(producerRequest);
                }
            }

            Thread.Sleep(3000);

            // now consuming
            int messageNumberCounter = 0;

            using (IConsumerConnector consumerConnector = new ZookeeperConsumerConnector(consumerConfig, true))
            {
                var topicCount = new Dictionary <string, int> {
                    { topic, 1 }
                };
                var messages = consumerConnector.CreateMessageStreams(topicCount);
                var sets     = messages[topic];

                try
                {
                    foreach (var set in sets)
                    {
                        foreach (var message in set)
                        {
                            Assert.AreEqual(CreatePayloadByNumber(messageNumberCounter), Encoding.UTF8.GetString(message.Payload));
                            messageNumberCounter++;
                        }
                    }
                }
                catch (ConsumerTimeoutException)
                {
                    // do nothing, this is expected
                }
            }

            Assert.AreEqual(numberOfMessagesToSend, messageNumberCounter);
        }
示例#6
0
        public void MaxFetchSizeBugShouldNotAppearWhenSmallFetchSizeAndSingleMessageSmallerThanFetchSize()
        {
            var prodConfig     = this.SyncProducerConfig1;
            var consumerConfig = this.ZooKeeperBasedConsumerConfig;

            consumerConfig.FetchSize          = 256;
            consumerConfig.NumberOfTries      = 1;
            consumerConfig.AutoCommitInterval = 1000;
            int    numberOfMessagesToSend = 100;
            string topic = CurrentTestTopic;

            var msgList = new List <Message>();

            using (var producer = new SyncProducer(prodConfig))
            {
                for (int i = 0; i < numberOfMessagesToSend; i++)
                {
                    string payload     = CreatePayloadByNumber(i + 100);
                    byte[] payloadData = Encoding.UTF8.GetBytes(payload);
                    var    msg         = new Message(payloadData);
                    msgList.Add(msg);
                    var producerRequest = new ProducerRequest(topic, 0, new List <Message>()
                    {
                        msg
                    });
                    producer.Send(producerRequest);
                }
            }

            // now consuming
            int messageNumberCounter = 0;

            using (IConsumerConnector consumerConnector = new ZookeeperConsumerConnector(consumerConfig, true))
            {
                var topicCount = new Dictionary <string, int> {
                    { topic, 1 }
                };
                var messages = consumerConnector.CreateMessageStreams(topicCount);
                var sets     = messages[topic];

                try
                {
                    foreach (var set in sets)
                    {
                        foreach (var message in set)
                        {
                            Assert.AreEqual(CreatePayloadByNumber(messageNumberCounter + 100), Encoding.UTF8.GetString(message.Payload));
                            messageNumberCounter++;
                        }
                    }
                }
                catch (ConsumerTimeoutException)
                {
                    // do nothing, this is expected
                }
            }

            Assert.AreEqual(numberOfMessagesToSend, messageNumberCounter);
        }
示例#7
0
 public ZKSessionExpireListener(ZKGroupDirs dirs, string consumerIdString, TopicCount topicCount, ZKRebalancerListener loadBalancerListener, ZookeeperConsumerConnector zkConsumerConnector)
 {
     this.consumerIdString     = consumerIdString;
     this.loadBalancerListener = loadBalancerListener;
     this.zkConsumerConnector  = zkConsumerConnector;
     this.dirs       = dirs;
     this.topicCount = topicCount;
 }
示例#8
0
        public void SimpleSyncProducerSends2CompressedMessagesAndConsumerConnectorGetsThemBack()
        {
            var prodConfig     = this.SyncProducerConfig1;
            var consumerConfig = this.ZooKeeperBasedConsumerConfig;

            // first producing
            string payload1 = "kafka 1.";

            byte[] payloadData1 = Encoding.UTF8.GetBytes(payload1);
            var    msg1         = new Message(payloadData1);

            string payload2 = "kafka 2.";

            byte[] payloadData2 = Encoding.UTF8.GetBytes(payload2);
            var    msg2         = new Message(payloadData2);

            Message compressedMessage = CompressionUtils.Compress(new List <Message> {
                msg1, msg2
            }, CompressionCodecs.DefaultCompressionCodec);
            var producerRequest = new ProducerRequest(CurrentTestTopic, 0, new List <Message> {
                compressedMessage
            });

            using (var producer = new SyncProducer(prodConfig))
            {
                producer.Send(producerRequest);
            }

            // now consuming
            var resultMessages = new List <Message>();

            using (IConsumerConnector consumerConnector = new ZookeeperConsumerConnector(consumerConfig, true))
            {
                var topicCount = new Dictionary <string, int> {
                    { CurrentTestTopic, 1 }
                };
                var messages = consumerConnector.CreateMessageStreams(topicCount);
                var sets     = messages[CurrentTestTopic];
                try
                {
                    foreach (var set in sets)
                    {
                        foreach (var message in set)
                        {
                            resultMessages.Add(message);
                        }
                    }
                }
                catch (ConsumerTimeoutException)
                {
                    // do nothing, this is expected
                }
            }

            Assert.AreEqual(2, resultMessages.Count);
            Assert.AreEqual(msg1.ToString(), resultMessages[0].ToString());
            Assert.AreEqual(msg2.ToString(), resultMessages[1].ToString());
        }
示例#9
0
        void WhenConsumerConsumesAndLaterOthersJoinAndRebalanceOccursThenMessagesShouldNotBeDuplicated_DoWork(object sender, DoWorkEventArgs e)
        {
            var consumerConfig = this.ZooKeeperBasedConsumerConfig;

            consumerConfig.FetchSize = 256;

            int resultMessages             = 0;
            HashSet <string> resultSet     = new HashSet <string>();
            int           nrOfDuplicates   = 0;
            StringBuilder sb               = new StringBuilder();
            var           receivedMessages = new List <Message>();

            using (IConsumerConnector consumerConnector = new ZookeeperConsumerConnector(consumerConfig, true))
            {
                var topicCount = new Dictionary <string, int> {
                    { CurrentTestTopic, 1 }
                };
                var messages = consumerConnector.CreateMessageStreams(topicCount);
                var sets     = messages[CurrentTestTopic];

                try
                {
                    foreach (var set in sets)
                    {
                        foreach (var message in set)
                        {
                            receivedMessages.Add(message);
                            var msgString = Encoding.UTF8.GetString(message.Payload);
                            sb.AppendLine(msgString);
                            if (resultSet.Contains(msgString))
                            {
                                nrOfDuplicates++;
                            }
                            else
                            {
                                resultSet.Add(msgString);
                            }
                            resultMessages++;
                        }
                    }
                }
                catch (ConsumerTimeoutException)
                {
                    // do nothing, this is expected
                }
            }
            var threadId = Thread.CurrentThread.ManagedThreadId.ToString();

            using (StreamWriter outfile = new StreamWriter("ConsumerTestDumpThread-" + threadId + ".txt"))
            {
                outfile.Write(sb.ToString());
            }
            lock (WhenConsumerConsumesAndLaterOthersJoinAndRebalanceOccursThenMessagesShouldNotBeDuplicated_BackgorundThreadsResultCounterLock)
            {
                WhenConsumerConsumesAndLaterOthersJoinAndRebalanceOccursThenMessagesShouldNotBeDuplicated_BackgorundThreadsReceivedMessages.AddRange(receivedMessages);
                WhenConsumerConsumesAndLaterOthersJoinAndRebalanceOccursThenMessagesShouldNotBeDuplicated_BackgorundThreadsDoneNr++;
            }
        }
示例#10
0
        public void OneMessageIsSentAndReceivedThenExceptionsWhenNoMessageThenAnotherMessageIsSentAndReceived()
        {
            var prodConfig     = this.SyncProducerConfig1;
            var consumerConfig = this.ZooKeeperBasedConsumerConfig;

            // first producing
            string payload1 = "kafka 1.";

            byte[] payloadData1 = Encoding.UTF8.GetBytes(payload1);
            var    msg1         = new Message(payloadData1);

            using (var producer = new SyncProducer(prodConfig))
            {
                var producerRequest = new ProducerRequest(CurrentTestTopic, 0, new List <Message> {
                    msg1
                });
                producer.Send(producerRequest);

                // now consuming
                using (IConsumerConnector consumerConnector = new ZookeeperConsumerConnector(consumerConfig, true))
                {
                    var topicCount = new Dictionary <string, int> {
                        { CurrentTestTopic, 1 }
                    };
                    var messages = consumerConnector.CreateMessageStreams(topicCount);
                    var sets     = messages[CurrentTestTopic];
                    KafkaMessageStream myStream = sets[0];
                    var enumerator = myStream.GetEnumerator();

                    Assert.IsTrue(enumerator.MoveNext());
                    Assert.AreEqual(msg1.ToString(), enumerator.Current.ToString());

                    Assert.Throws <ConsumerTimeoutException>(() => enumerator.MoveNext());

                    Assert.Throws <IllegalStateException>(() => enumerator.MoveNext()); // iterator is in failed state

                    enumerator.Reset();

                    // producing again
                    string payload2     = "kafka 2.";
                    byte[] payloadData2 = Encoding.UTF8.GetBytes(payload2);
                    var    msg2         = new Message(payloadData2);

                    var producerRequest2 = new ProducerRequest(CurrentTestTopic, 0, new List <Message> {
                        msg2
                    });
                    producer.Send(producerRequest2);
                    Thread.Sleep(3000);

                    Assert.IsTrue(enumerator.MoveNext());
                    Assert.AreEqual(msg2.ToString(), enumerator.Current.ToString());
                }
            }
        }
示例#11
0
        public void SimpleSyncProducerSendsLotsOfMessagesAndConsumerConnectorGetsThemBackWithMaxQueuedChunksRefillCheck()
        {
            var prodConfig     = this.SyncProducerConfig1;
            var consumerConfig = this.ZooKeeperBasedConsumerConfig;

            consumerConfig.FetchSize = 100;
            int numberOfMessages = 1000;

            List <Message> messagesToSend = new List <Message>();

            using (var producer = new SyncProducer(prodConfig))
            {
                for (int i = 0; i < numberOfMessages; i++)
                {
                    string payload1     = "kafka 1.";
                    byte[] payloadData1 = Encoding.UTF8.GetBytes(payload1);
                    var    msg          = new Message(payloadData1);
                    messagesToSend.Add(msg);
                    producer.Send(CurrentTestTopic, 0, new List <Message>()
                    {
                        msg
                    });
                }
            }

            Thread.Sleep(2000);

            using (IConsumerConnector consumerConnector = new ZookeeperConsumerConnector(consumerConfig, true))
            {
                var topicCount = new Dictionary <string, int> {
                    { CurrentTestTopic, 1 }
                };
                var messages = consumerConnector.CreateMessageStreams(topicCount);

                Thread.Sleep(5000);

                var queues =
                    ReflectionHelper.GetInstanceField
                    <IDictionary <Tuple <string, string>, BlockingCollection <FetchedDataChunk> > >("queues",
                                                                                                    consumerConnector);
                var queue = queues.First().Value;

                Assert.AreEqual(ConsumerConfiguration.DefaultMaxQueuedChunks, queue.Count);

                var sets     = messages[CurrentTestTopic];
                var firstSet = sets[0];
                firstSet.Take(5);   //this should take at least one chunk from the queue

                Thread.Sleep(2000); //a new chunk should be immediately inserted into the queue

                // the queue should refill to the default max amount of chunks
                Assert.AreEqual(ConsumerConfiguration.DefaultMaxQueuedChunks, queue.Count);
            }
        }
示例#12
0
        private void CloseFetchers(Cluster.Cluster cluster,
                                   IDictionary <string, IList <string> > relevantTopicThreadIdsMap,
                                   ZookeeperConsumerConnector zkConsumerConnector)
        {
            Logger.Info("enter CloseFetchers ...");
            var queuesToBeCleared = queues.Where(q => relevantTopicThreadIdsMap.ContainsKey(q.Key.Item1))
                                    .Select(q => q.Value)
                                    .ToList();

            CloseFetchersForQueues(cluster, queuesToBeCleared, kafkaMessageStreams, zkConsumerConnector);
            Logger.Info("exit CloseFetchers");
        }
示例#13
0
        public void ConsumerPorformsRebalancingOnStart()
        {
            var config = this.ZooKeeperBasedConsumerConfig;

            using (var consumerConnector = new ZookeeperConsumerConnector(config, true))
            {
                var client = ReflectionHelper.GetInstanceField <ZooKeeperClient>("zkClient", consumerConnector);
                Assert.IsNotNull(client);
                client.DeleteRecursive("/consumers/group1");
                var topicCount = new Dictionary <string, int> {
                    { "test", 1 }
                };
                consumerConnector.CreateMessageStreams(topicCount);
                WaitUntillIdle(client, 1000);
                IList <string> children = client.GetChildren("/consumers", false);
                Assert.That(children, Is.Not.Null.And.Not.Empty);
                Assert.That(children, Contains.Item("group1"));
                children = client.GetChildren("/consumers/group1", false);
                Assert.That(children, Is.Not.Null.And.Not.Empty);
                Assert.That(children, Contains.Item("ids"));
                Assert.That(children, Contains.Item("owners"));
                children = client.GetChildren("/consumers/group1/ids", false);
                Assert.That(children, Is.Not.Null.And.Not.Empty);
                string consumerId = children[0];
                children = client.GetChildren("/consumers/group1/owners", false);
                Assert.That(children, Is.Not.Null.And.Not.Empty);
                Assert.That(children.Count, Is.EqualTo(1));
                Assert.That(children, Contains.Item("test"));
                children = client.GetChildren("/consumers/group1/owners/test", false);
                Assert.That(children, Is.Not.Null.And.Not.Empty);
                Assert.That(children.Count, Is.EqualTo(2));
                string partId = children[0];
                var    data   = client.ReadData <string>("/consumers/group1/owners/test/" + partId);
                Assert.That(data, Is.Not.Null.And.Not.Empty);
                Assert.That(data, Contains.Substring(consumerId));
                data = client.ReadData <string>("/consumers/group1/ids/" + consumerId);
                Assert.That(data, Is.Not.Null.And.Not.Empty);
                Assert.That(data, Is.EqualTo("{ \"test\": 1 }"));
            }

            using (var client = new ZooKeeperClient(config.ZooKeeper.ZkConnect, config.ZooKeeper.ZkSessionTimeoutMs, ZooKeeperStringSerializer.Serializer))
            {
                client.Connect();
                //// Should be created as ephemeral
                IList <string> children = client.GetChildren("/consumers/group1/ids");
                Assert.That(children, Is.Null.Or.Empty);
                //// Should be created as ephemeral
                children = client.GetChildren("/consumers/group1/owners/test");
                Assert.That(children, Is.Null.Or.Empty);
            }
        }
示例#14
0
 public void Start()
 {
     ZkConsumerConnector = new ZookeeperConsumerConnector(ConsumerConfiguration, true,
                                                          ZkRebalanceHandler,
                                                          ZkDisconnectedHandler,
                                                          ZkExpiredHandler);
     _cancellationTokenSource = new CancellationTokenSource();
     _consumerTask            = Task.Factory.StartNew(cs => ReceiveMessages(cs as CancellationTokenSource,
                                                                            _onMessageReceived),
                                                      _cancellationTokenSource,
                                                      _cancellationTokenSource.Token,
                                                      TaskCreationOptions.LongRunning,
                                                      TaskScheduler.Default);
 }
示例#15
0
        public void SimpleSyncProducerSendsLotsOfMessagesAndConsumerConnectorGetsThemBackWithMaxQueuedChunksCheck(int?maxQueuedChunks)
        {
            var prodConfig     = this.SyncProducerConfig1;
            var consumerConfig = this.ZooKeeperBasedConsumerConfig;

            consumerConfig.FetchSize = 100;
            var consConf         = this.ConsumerConfig1;
            int numberOfMessages = 1000;

            List <Message> messagesToSend = new List <Message>();

            using (var producer = new SyncProducer(prodConfig))
            {
                for (int i = 0; i < numberOfMessages; i++)
                {
                    string payload1     = "kafka 1.";
                    byte[] payloadData1 = Encoding.UTF8.GetBytes(payload1);
                    var    msg          = new Message(payloadData1);
                    messagesToSend.Add(msg);
                    producer.Send(CurrentTestTopic, 0, new List <Message>()
                    {
                        msg
                    });
                }
            }

            Thread.Sleep(2000);

            if (maxQueuedChunks.HasValue)
            {
                consumerConfig.MaxQueuedChunks = maxQueuedChunks.Value;
            }
            using (IConsumerConnector consumerConnector = new ZookeeperConsumerConnector(consumerConfig, true))
            {
                var topicCount = new Dictionary <string, int> {
                    { CurrentTestTopic, 1 }
                };
                var messages = consumerConnector.CreateMessageStreams(topicCount);

                Thread.Sleep(5000);

                var queues =
                    ReflectionHelper.GetInstanceField
                    <IDictionary <Tuple <string, string>, BlockingCollection <FetchedDataChunk> > >("queues",
                                                                                                    consumerConnector);
                var queue = queues.First().Value;

                Assert.AreEqual(maxQueuedChunks.HasValue ? maxQueuedChunks.Value : ConsumerConfiguration.DefaultMaxQueuedChunks, queue.Count);
            }
        }
        public void TestLeaderSelectionForPartition()
        {
            var zkClient = new ZkClient(this.zookeeperConnect, 6000, 30000, new ZkStringSerializer());

            // create topic topic1 with 1 partition on broker 0
            AdminUtils.CreateTopic(zkClient, Topic, 1, 1, new Dictionary <string, string>());
            TestUtils.WaitUntilMetadataIsPropagated(this.Servers, Topic, 0, 3000);

            var sentMessages1 = this.SendMessages(
                Configs.First(), nMessages, "batch1", CompressionCodecs.NoCompressionCodec, 1);

            TestUtils.WaitUntilMetadataIsPropagated(this.Servers, Topic, 0, 1000);

            // create a consuemr
            var consumerConfig1      = TestUtils.CreateConsumerProperties(ZkConnect, Group, Consumer1);
            var zkConsumerConnector1 = new ZookeeperConsumerConnector(consumerConfig1);
            var topicMessageStreams1 =
                zkConsumerConnector1.CreateMessageStreams(
                    new Dictionary <string, int> {
                { Topic, 1 }
            }, new StringDecoder(), new StringDecoder());

            var topicRegistry = zkConsumerConnector1.TopicRegistry;

            Assert.Equal(1, topicRegistry.Select(x => x.Key).Count());
            Assert.Equal(Topic, topicRegistry.Select(x => x.Key).First());

            var topicsAndPartitionsInRegistry =
                topicRegistry.Select(x => Tuple.Create(x.Key, x.Value.Select(p => p.Value))).ToList();

            var brokerPartition = topicsAndPartitionsInRegistry.First().Item2.First();

            Assert.Equal(0, brokerPartition.PartitionId);

            // also check partition ownership
            var actual_1   = this.GetZKChildrenValues(this.dirs.ConsumerOwnerDir);
            var expected_1 = new List <Tuple <string, string> >
            {
                Tuple.Create("0", "group1_consumer1-0"),
            };

            Assert.Equal(expected_1, actual_1);

            var receivedMessages1 = this.GetMessages(nMessages, topicMessageStreams1);

            Assert.Equal(sentMessages1, receivedMessages1);
            zkConsumerConnector1.Shutdown();
            zkClient.Dispose();
        }
示例#17
0
        public void ConsumerPerformsRebalancingWhenConsumerIsRemovedAndTakesItsPartitions()
        {
            var            config   = this.ZooKeeperBasedConsumerConfig;
            string         basePath = "/consumers/" + config.GroupId;
            IList <string> ids;
            IList <string> owners;

            using (var consumerConnector = new ZookeeperConsumerConnector(config, true))
            {
                var client = ReflectionHelper.GetInstanceField <ZooKeeperClient>("zkClient", consumerConnector);
                Assert.IsNotNull(client);
                client.DeleteRecursive("/consumers/group1");
                var topicCount = new Dictionary <string, int> {
                    { "test", 1 }
                };
                consumerConnector.CreateMessageStreams(topicCount);
                WaitUntillIdle(client, 1000);
                using (var consumerConnector2 = new ZookeeperConsumerConnector(config, true))
                {
                    consumerConnector2.CreateMessageStreams(topicCount);
                    WaitUntillIdle(client, 1000);
                    ids    = client.GetChildren("/consumers/group1/ids", false).ToList();
                    owners = client.GetChildren("/consumers/group1/owners/test", false).ToList();
                    Assert.That(ids, Is.Not.Null.And.Not.Empty);
                    Assert.That(ids.Count, Is.EqualTo(2));
                    Assert.That(owners, Is.Not.Null.And.Not.Empty);
                    Assert.That(owners.Count, Is.EqualTo(2));
                }

                WaitUntillIdle(client, 1000);
                ids    = client.GetChildren("/consumers/group1/ids", false).ToList();
                owners = client.GetChildren("/consumers/group1/owners/test", false).ToList();

                Assert.That(ids, Is.Not.Null.And.Not.Empty);
                Assert.That(ids.Count, Is.EqualTo(1));
                Assert.That(owners, Is.Not.Null.And.Not.Empty);
                Assert.That(owners.Count, Is.EqualTo(2));

                var data1 = client.ReadData <string>("/consumers/group1/owners/test/" + owners[0], false);
                var data2 = client.ReadData <string>("/consumers/group1/owners/test/" + owners[1], false);

                Assert.That(data1, Is.Not.Null.And.Not.Empty);
                Assert.That(data2, Is.Not.Null.And.Not.Empty);
                Assert.That(data1, Is.EqualTo(data2));
                Assert.That(data1, Is.StringStarting(ids[0]));
            }
        }
示例#18
0
 internal ZKRebalancerListener(
     ConsumerConfiguration config,
     string consumerIdString,
     IDictionary <string, IDictionary <Partition, PartitionTopicInfo> > topicRegistry,
     IZooKeeperClient zkClient,
     ZookeeperConsumerConnector zkConsumerConnector,
     IDictionary <Tuple <string, string>, BlockingCollection <FetchedDataChunk> > queues,
     Fetcher fetcher,
     object syncLock)
 {
     this.syncLock            = syncLock;
     this.consumerIdString    = consumerIdString;
     this.config              = config;
     this.topicRegistry       = topicRegistry;
     this.zkClient            = zkClient;
     this.dirs                = new ZKGroupDirs(config.GroupId);
     this.zkConsumerConnector = zkConsumerConnector;
     this.queues              = queues;
     this.fetcher             = fetcher;
 }
示例#19
0
 internal ZKRebalancerListener(
     ConsumerConfiguration config,
     string consumerIdString,
     IDictionary <string, IDictionary <int, PartitionTopicInfo> > topicRegistry,
     IZooKeeperClient zkClient,
     ZookeeperConsumerConnector zkConsumerConnector,
     IDictionary <Tuple <string, string>, BlockingCollection <FetchedDataChunk> > queues,
     Fetcher fetcher,
     IDictionary <string, IList <KafkaMessageStream <TData> > > kafkaMessageStreams,
     TopicCount topicCount)
 {
     this.consumerIdString = consumerIdString;
     this.config           = config;
     this.topicRegistry    = topicRegistry;
     this.zkClient         = zkClient;
     dirs = new ZKGroupDirs(config.GroupId);
     this.zkConsumerConnector = zkConsumerConnector;
     this.queues              = queues;
     this.fetcher             = fetcher;
     this.kafkaMessageStreams = kafkaMessageStreams;
     this.topicCount          = topicCount;
 }
示例#20
0
        public void ConsumerPorformsRebalancingWhenNewBrokerIsAddedToTopic()
        {
            var    config          = this.ZooKeeperBasedConsumerConfig;
            string brokerPath      = ZooKeeperClient.DefaultBrokerIdsPath + "/" + 2345;
            string brokerTopicPath = ZooKeeperClient.DefaultBrokerTopicsPath + "/test/" + 2345;

            using (var consumerConnector = new ZookeeperConsumerConnector(config, true))
            {
                var client = ReflectionHelper.GetInstanceField <ZooKeeperClient>(
                    "zkClient", consumerConnector);
                Assert.IsNotNull(client);
                client.DeleteRecursive("/consumers/group1");
                var topicCount = new Dictionary <string, int> {
                    { "test", 1 }
                };
                consumerConnector.CreateMessageStreams(topicCount);
                WaitUntillIdle(client, 1000);
                IList <string> children   = client.GetChildren("/consumers/group1/ids", false);
                string         consumerId = children[0];
                client.CreateEphemeral(brokerPath, "192.168.1.39-1310449279123:192.168.1.39:9102");
                client.CreateEphemeral(brokerTopicPath, 1);
                WaitUntillIdle(client, 500);
                children = client.GetChildren("/consumers/group1/owners/test", false);
                Assert.That(children.Count, Is.EqualTo(3));
                Assert.That(children, Contains.Item("2345-0"));
                var data = client.ReadData <string>("/consumers/group1/owners/test/2345-0");
                Assert.That(data, Is.Not.Null);
                Assert.That(data, Contains.Substring(consumerId));
                var topicRegistry =
                    ReflectionHelper.GetInstanceField <IDictionary <string, IDictionary <Partition, PartitionTopicInfo> > >(
                        "topicRegistry", consumerConnector);
                Assert.That(topicRegistry, Is.Not.Null.And.Not.Empty);
                Assert.That(topicRegistry.Count, Is.EqualTo(1));
                var item = topicRegistry["test"];
                Assert.That(item.Count, Is.EqualTo(3));
                var broker = topicRegistry["test"].SingleOrDefault(x => x.Key.BrokerId == 2345);
                Assert.That(broker, Is.Not.Null);
            }
        }
        public void TestCompressionSetConsumption()
        {
            // send some messages to each broker
            var sentMessages = this.SendMessagesToBrokerPartition(
                Configs.First(), Topic, 0, 200, CompressionCodecs.DefaultCompressionCodec)
                               .Union(
                this.SendMessagesToBrokerPartition(
                    Configs.First(), Topic, 1, 200, CompressionCodecs.DefaultCompressionCodec))
                               .ToList();

            TestUtils.WaitUntilMetadataIsPropagated(this.Servers, Topic, 0, 1000);
            TestUtils.WaitUntilMetadataIsPropagated(this.Servers, Topic, 1, 1000);

            // create a consuemr
            var consumerConfig1      = TestUtils.CreateConsumerProperties(ZkConnect, Group, Consumer0);
            var zkConsumerConnector1 = new ZookeeperConsumerConnector(consumerConfig1);
            var topicMessageStreams1 =
                zkConsumerConnector1.CreateMessageStreams(
                    new Dictionary <string, int> {
                { Topic, 1 }
            }, new StringDecoder(), new StringDecoder());

            var receivedMessages1 = this.GetMessages(400, topicMessageStreams1);

            Assert.Equal(sentMessages.OrderBy(x => x).ToArray(), receivedMessages1.OrderBy(x => x).ToArray());

            // also check partition ownership
            var actual_2   = this.GetZKChildrenValues(this.dirs.ConsumerOwnerDir);
            var expected_2 = new List <Tuple <string, string> >
            {
                Tuple.Create("0", "group1_consumer0-0"),
                Tuple.Create("1", "group1_consumer0-0")
            };

            Assert.Equal(expected_2, actual_2);
            zkConsumerConnector1.Shutdown();
        }
示例#22
0
        public void ConsumerConnectorReceivesAShutdownSignal()
        {
            var consumerConfig = this.ZooKeeperBasedConsumerConfig;

            // now consuming
            using (IConsumerConnector consumerConnector = new ZookeeperConsumerConnector(consumerConfig, true))
            {
                var topicCount = new Dictionary <string, int> {
                    { CurrentTestTopic, 1 }
                };
                var messages = consumerConnector.CreateMessageStreams(topicCount);

                // putting the shutdown command into the queue
                FieldInfo fi = typeof(ZookeeperConsumerConnector).GetField(
                    "queues", BindingFlags.NonPublic | BindingFlags.Instance);
                var value =
                    (IDictionary <Tuple <string, string>, BlockingCollection <FetchedDataChunk> >)
                    fi.GetValue(consumerConnector);
                foreach (var topicConsumerQueueMap in value)
                {
                    topicConsumerQueueMap.Value.Add(ZookeeperConsumerConnector.ShutdownCommand);
                }

                var sets           = messages[CurrentTestTopic];
                var resultMessages = new List <Message>();

                foreach (var set in sets)
                {
                    foreach (var message in set)
                    {
                        resultMessages.Add(message);
                    }
                }

                Assert.AreEqual(0, resultMessages.Count);
            }
        }
示例#23
0
 public KafkaConsumer(string zkConnectionString, string topic, string groupId, string consumerId, int backOffIncrement = 30, int fullLoadThreshold = 1000, int waitInterval = 1000)
 {
     _fullLoadThreshold = fullLoadThreshold;
     _waitInterval      = waitInterval;
     SlidingDoors       = new ConcurrentDictionary <int, SlidingDoor>();
     ZkConnectionString = zkConnectionString;
     Topic                 = topic;
     GroupId               = groupId;
     ConsumerId            = consumerId ?? string.Empty;
     ConsumerConfiguration = new ConsumerConfiguration
     {
         BackOffIncrement     = backOffIncrement,
         AutoCommit           = false,
         GroupId              = GroupId,
         ConsumerId           = ConsumerId,
         BufferSize           = ConsumerConfiguration.DefaultBufferSize,
         MaxFetchBufferLength = ConsumerConfiguration.DefaultMaxFetchBufferLength,
         FetchSize            = ConsumerConfiguration.DefaultFetchSize,
         AutoOffsetReset      = OffsetRequest.LargestTime,
         ZooKeeper            = KafkaClient.GetZooKeeperConfiguration(zkConnectionString),
         ShutdownTimeout      = 100
     };
     ZkConsumerConnector = new ZookeeperConsumerConnector(ConsumerConfiguration, true);
 }
示例#24
0
 protected override void Stop()
 {
     _consumer.Dispose();
     _consumer = null;
 }
示例#25
0
        internal void Consume()
        {
            // connects to zookeeper
            using (ZookeeperConsumerConnector connector = new ZookeeperConsumerConnector(configSettings, true))
            {
                if (this.ThreadID == 0)
                {
                    ConsumerGroupHelper.initialOffset = connector.GetOffset(cgOptions.Topic);

                    //Logger.InfoFormat("======Original offset \r\n{0}", ConsumerGroupHelper.initialOffset == null ? "(NULL)" : ConsumeGroupMonitorHelper.GetComsumerGroupOffsetsAsLog(ConsumerGroupHelper.initialOffset));
                }

                // defines collection of topics and number of threads to consume it with
                // ===============NOTE============================
                // For example , if there is 80 partitions for one topic.
                //
                // Normally start more than 96 = 80*120% clients with same GroupId.  ( the extra 20% are buffer for autopilot IMP).  And set  FetchThreadCountPerConsumer as 1.
                // Then 80 clients can lock partitions,  can set MACHINENAME_ProcessID as ConsumerId, other 16 are idle.  Strongly recomand take this method.
                //
                // If start 40 clients,  and  set  FetchThreadCountPerConsumer as 1. then every client can lock 2 partitions at least.   And if some client not available for autopilot
                // IMP reason, then some of the client maybe lock 3 partitions.
                //
                //  If start 40 clients, and set  FetchThreadCountPerConsumer as 2,  you will get two IEnumerator<Message>:topicData[0].GetEnumerator(),topicData[1].GetEnumerator()
                //  you need start TWO threads to process them in dependently.
                //  If the client get 2 partitions, each thread will handle 1 partition,
                //  If the client get 3 partitions, then one thread get 2 partitions, the other one get 1 partition.  It will make the situaiton complex and the consume of partition not balance.
                //==================NOTE=============================
                IDictionary <string, int> topicMap = new Dictionary <string, int> {
                    { cgOptions.Topic, cgOptions.FetchThreadCountPerConsumer }
                };

                // get references to topic streams.
                IDictionary <string, IList <KafkaMessageStream <Message> > > streams = connector.CreateMessageStreams(topicMap, new DefaultDecoder());
                IList <KafkaMessageStream <Message> > topicData = streams[cgOptions.Topic];
                long latestTotalCount = 0;

                bool hitEndAndCommited = false;
                if (cgOptions.CancellationTimeoutMs == 5000)
                {
                    // Get the message enumerator.
                    IEnumerator <Message> messageEnumerator = topicData[0].GetEnumerator();
                    //TODO:  the enumerator count equal with FetchThreadCountPerConsumer . For example,  if that value is 5, then here should get 5 enumerator.
                    //IF have 100 partitions, and only 20 consumers, need set this value to 5.  and start 5 thread handle each one.

                    // Add tuples until maximum receive message count is reached or no new messages read after consumer configured timeout.
                    while (true)
                    {
                        bool noMoreMessage = false;
                        try
                        {
                            messageEnumerator.MoveNext();
                            Message m = messageEnumerator.Current;
                            latestTotalCount = Interlocked.Increment(ref ConsumerGroupHelper.totalCount);
                            Logger.InfoFormat("Message {0} from Partition:{1}, Offset:{2}, key:{3}, value:{4}", latestTotalCount, m.PartitionId, m.Offset, m.Key == null ? "(null)" : Encoding.UTF8.GetString(m.Key), m.Payload == null ? "(null)" : Encoding.UTF8.GetString(m.Payload));
                            if (latestTotalCount == 1)
                            {
                                Logger.WarnFormat("Read FIRST message, it's offset: {0}  PartitionID:{1}", m.Offset, ((ConsumerIterator <Message>)messageEnumerator).currentTopicInfo.PartitionId);
                            }

                            hitEndAndCommited = false;
                            if (latestTotalCount % cgOptions.CommitBatchSize == 0)
                            {
                                //NOTE======
                                //Normally, just directly call .CommitOffsets()
                                //    CommitOffset(string topic, int partition, long offset)  only used when customer has strong requirement for reprocess messages as few as possible.
                                //Need tune the frequecy of calling  .CommitOffsets(), it will directly increate zookeeper load and impact your overall performance
                                if (cgOptions.CommitOffsetWithPartitionIDOffset)
                                {
                                    connector.CommitOffset(cgOptions.Topic, m.PartitionId.Value, m.Offset);
                                }
                                else
                                {
                                    connector.CommitOffsets();
                                }
                                Console.WriteLine("\tRead some and commit once,  LATEST message offset: {0}. PartitionID:{1} -- {2}  Totally read  {3}  will commit offset. {4} FetchOffset:{5}  ConsumeOffset:{6} CommitedOffset:{7}"
                                                  , m.Offset, m.PartitionId.Value, ((ConsumerIterator <Message>)messageEnumerator).currentTopicInfo.PartitionId, latestTotalCount, DateTime.Now
                                                  , ((ConsumerIterator <Message>)messageEnumerator).currentTopicInfo.FetchOffset
                                                  , ((ConsumerIterator <Message>)messageEnumerator).currentTopicInfo.ConsumeOffset
                                                  , ((ConsumerIterator <Message>)messageEnumerator).currentTopicInfo.CommitedOffset);
                            }

                            if (cgOptions.Count > 0 && latestTotalCount >= cgOptions.Count)
                            {
                                Logger.WarnFormat("Read LAST message, it's offset: {0}. PartitionID:{1}   Totally read {2}  want {3} will exit.", m.Offset, ((ConsumerIterator <Message>)messageEnumerator).currentTopicInfo.PartitionId, latestTotalCount, cgOptions.Count);
                                break;
                            }
                        }
                        catch (ConsumerTimeoutException)
                        {
                            if (!hitEndAndCommited)
                            {
                                Logger.WarnFormat("Totally Read {0}  will commit offset. {1}", latestTotalCount, DateTime.Now);
                                connector.CommitOffsets();
                                hitEndAndCommited = true;
                            }
                            // Thrown if no new messages read after consumer configured timeout.
                            noMoreMessage = true;
                        }

                        if (noMoreMessage)
                        {
                            Logger.InfoFormat("No more message , hit end ,will Sleep(1), {0}", DateTime.Now);
                            if (cgOptions.SleepTypeWhileAlwaysRead == 0)
                            {
                                Thread.Sleep(0);
                            }
                            else if (cgOptions.SleepTypeWhileAlwaysRead == 1)
                            {
                                Thread.Sleep(1);        //Best choice is Thread.Sleep(1).  Other 3 choice still make the CPU 100%
                            }
                            else if (cgOptions.SleepTypeWhileAlwaysRead == 2)
                            {
                                Thread.Yield();
                            }
                            else
                            {
                            }
                        }
                    }
                }
                else
                {
                    //Siphon scenario, repeatly take some messages and process. if no enough messages, will stop current batch after timeout.
                    while (true)
                    {
#if NET45
                        bool    noMoreMessage = false;
                        Message lastMessage   = null;
                        int     count         = 0;
                        KafkaMessageStream <Message> messagesStream = null;
                        ConsumerIterator <Message>   iterator       = null;
                        using (CancellationTokenSource cancellationTokenSource = new CancellationTokenSource(cgOptions.CancellationTimeoutMs))
                        {
                            lastMessage = null;
                            IEnumerable <Message> messages = topicData[0].GetCancellable(cancellationTokenSource.Token);
                            messagesStream = (KafkaMessageStream <Message>)messages;
                            iterator       = (ConsumerIterator <Message>)messagesStream.iterator;
                            foreach (Message message in messages)
                            {
                                latestTotalCount = Interlocked.Increment(ref ConsumerGroupHelper.totalCount);
                                lastMessage      = message;
                                if (latestTotalCount == 1)
                                {
                                    PartitionTopicInfo p = iterator.currentTopicInfo;
                                    Logger.InfoFormat("Read FIRST message, it's offset: {0}  PartitionID:{1}", lastMessage.Offset, p == null ? "null" : p.PartitionId.ToString());
                                }
                                hitEndAndCommited = false;
                                if (++count >= cgOptions.CommitBatchSize)
                                {
                                    cancellationTokenSource.Cancel();
                                }
                            }
                        }
                        if (count > 0)
                        {
                            connector.CommitOffsets();
                            consumedTotalCount += count;
                            PartitionTopicInfo p = iterator.currentTopicInfo;
                            Console.WriteLine("\tRead some and commit once, Thread: {8}  consumedTotalCount:{9} Target:{10} LATEST message offset: {0}. PartitionID:{1} -- {2}  Totally read  {3}  will commit offset. {4} FetchOffset:{5}  ConsumeOffset:{6} CommitedOffset:{7}"
                                              , lastMessage.Offset, lastMessage.PartitionId.Value, p == null ? "null" : p.PartitionId.ToString(), latestTotalCount, DateTime.Now
                                              , p == null ? "null" : p.FetchOffset.ToString()
                                              , p == null ? "null" : p.ConsumeOffset.ToString()
                                              , p == null ? "null" : p.CommitedOffset.ToString()
                                              , this.ThreadID
                                              , this.consumedTotalCount
                                              , this.Count);
                        }
                        else
                        {
                            noMoreMessage = true;
                        }

                        if (this.Count > 0 && consumedTotalCount >= this.Count)
                        {
                            Logger.InfoFormat("Current thrad Read LAST message, Totally read {0}  want {1} will exit current thread.", consumedTotalCount, this.Count);
                            break;
                        }

                        if (noMoreMessage)
                        {
                            Logger.InfoFormat("No more message , hit end ,will Sleep(2000), {0}", DateTime.Now);
                            if (cgOptions.SleepTypeWhileAlwaysRead == 0)
                            {
                                Thread.Sleep(0);
                            }
                            else if (cgOptions.SleepTypeWhileAlwaysRead == 1)
                            {
                                Thread.Sleep(2000);        //Best choice is Thread.Sleep(1).  Other 3 choice still make the CPU 100%
                            }
                            else if (cgOptions.SleepTypeWhileAlwaysRead == 2)
                            {
                                Thread.Yield();
                            }
                            else
                            {
                            }
                        }
#endif
#if NET4
                        throw new NotSupportedException("Please use .net45 to compile .");
#endif
                    }
                }

                Logger.InfoFormat("Read {0}  will commit offset. {1}", latestTotalCount, DateTime.Now);
                connector.CommitOffsets();

                latestTotalCount = Interlocked.Read(ref ConsumerGroupHelper.totalCount);

                Logger.InfoFormat("Totally read {0}  want {1} . ", latestTotalCount, cgOptions.Count);
                if (this.ThreadID == 0)
                {
                    ConsumerGroupHelper.newOffset = connector.GetOffset(cgOptions.Topic);
                }
            }

            this.resetEvent.Set();
        }
示例#26
0
        public void SimpleSyncProducerSendsLotsOfMessagesAndConsumerConnectorGetsThemBackWithVerySmallAutoCommitInterval()
        {
            var prodConfig     = this.SyncProducerConfig1;
            var consumerConfig = this.ZooKeeperBasedConsumerConfig;

            consumerConfig.AutoCommit         = true;
            consumerConfig.AutoCommitInterval = 10;
            int numberOfMessages = 500;
            int messageSize      = 0;

            List <Message> messagesToSend = new List <Message>();

            using (var producer = new SyncProducer(prodConfig))
            {
                for (int i = 0; i < numberOfMessages; i++)
                {
                    string payload1     = "kafka 1.";
                    byte[] payloadData1 = Encoding.UTF8.GetBytes(payload1);
                    var    msg          = new Message(payloadData1);
                    messagesToSend.Add(msg);
                    if (i == 0)
                    {
                        messageSize = msg.Size;
                    }
                    producer.Send(CurrentTestTopic, 0, new List <Message>()
                    {
                        msg
                    });
                }
            }

            Thread.Sleep(2000);

            // now consuming
            int resultCount = 0;

            using (IConsumerConnector consumerConnector = new ZookeeperConsumerConnector(consumerConfig, true))
            {
                var topicCount = new Dictionary <string, int> {
                    { CurrentTestTopic, 1 }
                };
                var messages = consumerConnector.CreateMessageStreams(topicCount);
                var sets     = messages[CurrentTestTopic];

                try
                {
                    foreach (var set in sets)
                    {
                        foreach (var message in set)
                        {
                            Assert.AreEqual(messageSize, message.Size);
                            resultCount++;
                        }
                    }
                }
                catch (ConsumerTimeoutException)
                {
                    // do nothing, this is expected
                }
            }

            Assert.AreEqual(numberOfMessages, resultCount);
        }
        public void TestBasic()
        {
            // test consumer timeout logic
            var consumerConfig0      = TestUtils.CreateConsumerProperties(ZkConnect, Group, Consumer0, 200);
            var zkConsumerConnector0 = new ZookeeperConsumerConnector(consumerConfig0);
            var topicMessageSterams0 =
                zkConsumerConnector0.CreateMessageStreams(
                    new Dictionary <string, int> {
                { Topic, 1 }
            }, new StringDecoder(), new StringDecoder());

            // no messages to consume, we should hit timeout;
            // also the iterator should support re-entrant, so loop it twice
            for (var i = 0; i < 2; i++)
            {
                Assert.Throws <ConsumerTimeoutException>(
                    () => this.GetMessages(nMessages * 2, topicMessageSterams0));
            }

            zkConsumerConnector0.Shutdown();

            // send some messages to each broker
            var sentMessages1 =
                this.SendMessagesToBrokerPartition(Configs.First(), Topic, 0, nMessages)
                .Union(this.SendMessagesToBrokerPartition(Configs.Last(), Topic, 0, nMessages)).ToList();

            // wait to make sure the topic and partition have a leader for the successful case
            TestUtils.WaitUntilLeaderIsElectedOrChanged(this.ZkClient, Topic, 0, 500);
            TestUtils.WaitUntilLeaderIsElectedOrChanged(this.ZkClient, Topic, 1, 500);

            TestUtils.WaitUntilMetadataIsPropagated(this.Servers, Topic, 0, 1000);
            TestUtils.WaitUntilMetadataIsPropagated(this.Servers, Topic, 1, 1000);

            // create a consuemr
            var consumerConfig1      = TestUtils.CreateConsumerProperties(ZkConnect, Group, Consumer1);
            var zkConsumerConnector1 = new ZookeeperConsumerConnector(consumerConfig1);
            var topicMessageStreams1 =
                zkConsumerConnector1.CreateMessageStreams(
                    new Dictionary <string, int> {
                { Topic, 1 }
            }, new StringDecoder(), new StringDecoder());

            var receivedMessages1 = this.GetMessages(nMessages * 2, topicMessageStreams1);

            Assert.Equal(sentMessages1.OrderBy(x => x).ToArray(), receivedMessages1.OrderBy(x => x).ToArray());

            // also check partition ownership
            var actual_1   = this.GetZKChildrenValues(this.dirs.ConsumerOwnerDir);
            var expected_1 = new List <Tuple <string, string> >
            {
                Tuple.Create("0", "group1_consumer1-0"),
                Tuple.Create("1", "group1_consumer1-0")
            };

            Assert.Equal(expected_1, actual_1);

            // commit consumer offsets
            zkConsumerConnector1.CommitOffsets();

            // create a consumer
            var consumerConfig2 = TestUtils.CreateConsumerProperties(ZkConnect, Group, Consumer2);

            consumerConfig2.RebalanceBackoffMs = RebalanceBackoutMs;

            var zkConsumerConnector2 = new ZookeeperConsumerConnector(consumerConfig2);
            var topicMessageStreams2 =
                zkConsumerConnector2.CreateMessageStreams(
                    new Dictionary <string, int> {
                { Topic, 1 }
            }, new StringDecoder(), new StringDecoder());

            // send some messages to each broker
            var sentMessages2 =
                this.SendMessagesToBrokerPartition(Configs.First(), Topic, 0, nMessages)
                .Union(this.SendMessagesToBrokerPartition(Configs.Last(), Topic, 1, nMessages)).ToList();

            // wait to make sure the topic and partition have a leader for the successful case
            TestUtils.WaitUntilLeaderIsElectedOrChanged(this.ZkClient, Topic, 0, 500);
            TestUtils.WaitUntilLeaderIsElectedOrChanged(this.ZkClient, Topic, 1, 500);

            var receivedMessages2 =
                this.GetMessages(nMessages, topicMessageStreams1)
                .Union(this.GetMessages(nMessages, topicMessageStreams2))
                .ToList();

            Assert.Equal(sentMessages2.OrderBy(x => x).ToList(), receivedMessages2.OrderBy(x => x).ToList());

            // also check partition ownership
            var actual_2   = this.GetZKChildrenValues(this.dirs.ConsumerOwnerDir);
            var expected_2 = new List <Tuple <string, string> >
            {
                Tuple.Create("0", "group1_consumer1-0"),
                Tuple.Create("1", "group1_consumer2-0")
            };

            Assert.Equal(expected_2, actual_2);

            // create a consumer with empty map
            var consumerConfig3      = TestUtils.CreateConsumerProperties(ZkConnect, Group, Consumer3);
            var zkConsumerConnector3 = new ZookeeperConsumerConnector(consumerConfig3);

            zkConsumerConnector3.CreateMessageStreams(new Dictionary <string, int>());

            // send some messages to each broker
            var sentMessages3 =
                this.SendMessagesToBrokerPartition(Configs.First(), Topic, 0, nMessages)
                .Union(this.SendMessagesToBrokerPartition(Configs.Last(), Topic, 1, nMessages))
                .ToList();

            // wait to make sure the topic and partition have a leader for the successful case
            TestUtils.WaitUntilLeaderIsElectedOrChanged(this.ZkClient, Topic, 0, 500);
            TestUtils.WaitUntilLeaderIsElectedOrChanged(this.ZkClient, Topic, 1, 500);

            var receivedMessages3 =
                this.GetMessages(nMessages, topicMessageStreams1)
                .Union(this.GetMessages(nMessages, topicMessageStreams2))
                .ToList();

            Assert.Equal(sentMessages3.OrderBy(x => x).ToList(), receivedMessages3.OrderBy(x => x).ToList());

            // also check partition ownership
            var actual_3 = this.GetZKChildrenValues(this.dirs.ConsumerOwnerDir);

            Assert.Equal(expected_2, actual_3);

            zkConsumerConnector1.Shutdown();
            zkConsumerConnector2.Shutdown();
            zkConsumerConnector3.Shutdown();

            Logger.Info("all consumer connectors stopped");
        }
示例#28
0
        public void WhenConsumerConsumesAndLaterOthersJoinAndRebalanceOccursThenMessagesShouldNotBeDuplicated()
        {
            var prodConfig     = this.SyncProducerConfig1;
            var consumerConfig = this.ZooKeeperBasedConsumerConfig;

            consumerConfig.FetchSize = 256;

            int originalNrOfMessages = 3000;

            using (var producer = new SyncProducer(prodConfig))
            {
                for (int i = 0; i < originalNrOfMessages; i++)
                {
                    var sourceMessage   = new Message(Encoding.UTF8.GetBytes("test message" + i));
                    var producerRequest = new ProducerRequest(CurrentTestTopic, 0, new List <Message> {
                        sourceMessage
                    });
                    producer.Send(producerRequest);
                }
            }

            BackgroundWorker bw1 = new BackgroundWorker();

            bw1.WorkerSupportsCancellation = true;
            bw1.DoWork += new DoWorkEventHandler(WhenConsumerConsumesAndLaterOthersJoinAndRebalanceOccursThenMessagesShouldNotBeDuplicated_DoWork);
            int runBw1AfterNIterations = 50;

            BackgroundWorker bw2 = new BackgroundWorker();

            bw2.WorkerSupportsCancellation = true;
            bw2.DoWork += new DoWorkEventHandler(WhenConsumerConsumesAndLaterOthersJoinAndRebalanceOccursThenMessagesShouldNotBeDuplicated_DoWork);
            int runBw2AfterNIterations = 150;

            // now consuming
            int           messageNumberCounter = 0;
            StringBuilder sb = new StringBuilder();
            var           receivedMessages = new List <Message>();

            using (IConsumerConnector consumerConnector = new ZookeeperConsumerConnector(consumerConfig, true))
            {
                var topicCount = new Dictionary <string, int> {
                    { CurrentTestTopic, 1 }
                };
                var messages = consumerConnector.CreateMessageStreams(topicCount);
                var sets     = messages[CurrentTestTopic];

                try
                {
                    foreach (var set in sets)
                    {
                        foreach (var message in set)
                        {
                            receivedMessages.Add(message);
                            if (messageNumberCounter == runBw1AfterNIterations)
                            {
                                bw1.RunWorkerAsync();
                            }
                            if (messageNumberCounter == runBw2AfterNIterations)
                            {
                                bw2.RunWorkerAsync();
                            }
                            var msgString = Encoding.UTF8.GetString(message.Payload);
                            sb.AppendLine(msgString);
                            messageNumberCounter++;
                        }
                    }
                }
                catch (ConsumerTimeoutException)
                {
                    // do nothing, this is expected
                }
            }
            int finishedThreads             = 0;
            var receivedFromBackgroundSoFar = new List <Message>();

            while (WhenConsumerConsumesAndLaterOthersJoinAndRebalanceOccursThenMessagesShouldNotBeDuplicated_BackgorundThreadsDoneNr < 2 && (messageNumberCounter + WhenConsumerConsumesAndLaterOthersJoinAndRebalanceOccursThenMessagesShouldNotBeDuplicated_BackgorundThreadsResultCounter < originalNrOfMessages))
            {
                lock (WhenConsumerConsumesAndLaterOthersJoinAndRebalanceOccursThenMessagesShouldNotBeDuplicated_BackgorundThreadsResultCounterLock)
                {
                    finishedThreads =
                        WhenConsumerConsumesAndLaterOthersJoinAndRebalanceOccursThenMessagesShouldNotBeDuplicated_BackgorundThreadsDoneNr;
                    receivedFromBackgroundSoFar.Clear();
                    receivedFromBackgroundSoFar.AddRange(WhenConsumerConsumesAndLaterOthersJoinAndRebalanceOccursThenMessagesShouldNotBeDuplicated_BackgorundThreadsReceivedMessages);
                }
                if (finishedThreads >= 2 || (receivedMessages.Count + receivedFromBackgroundSoFar.Count) >= originalNrOfMessages)
                {
                    break;
                }
                Thread.Sleep(1000);
            }
            using (StreamWriter outfile = new StreamWriter("ConsumerTestDumpMain.txt"))
            {
                outfile.Write(sb.ToString());
            }
            receivedMessages.AddRange(WhenConsumerConsumesAndLaterOthersJoinAndRebalanceOccursThenMessagesShouldNotBeDuplicated_BackgorundThreadsReceivedMessages);
            HashSet <string> resultSet = new HashSet <string>();
            int nrOfDuplicates         = 0;

            foreach (var receivedMessage in receivedMessages)
            {
                var msgString = Encoding.UTF8.GetString(receivedMessage.Payload);
                if (resultSet.Contains(msgString))
                {
                    nrOfDuplicates++;
                }
                else
                {
                    resultSet.Add(msgString);
                }
            }

            int totalMessagesFromAllThreads = receivedMessages.Count;

            Assert.AreEqual(originalNrOfMessages, totalMessagesFromAllThreads);

            Assert.AreEqual(0, nrOfDuplicates);
        }
示例#29
0
 protected override void Start()
 {
     _consumer = KafkaUtils.CreateBalancedConsumer(_topic);
 }
示例#30
0
        public void ConsumerConnectorConsumesTwoDifferentCompressedTopics()
        {
            var prodConfig     = this.SyncProducerConfig1;
            var consumerConfig = this.ZooKeeperBasedConsumerConfig;

            string topic1 = CurrentTestTopic + "1";
            string topic2 = CurrentTestTopic + "2";

            // first producing
            string payload1 = "kafka 1.";

            byte[]  payloadData1       = Encoding.UTF8.GetBytes(payload1);
            var     msg1               = new Message(payloadData1);
            Message compressedMessage1 = CompressionUtils.Compress(new List <Message> {
                msg1
            }, CompressionCodecs.GZIPCompressionCodec);

            string payload2 = "kafka 2.";

            byte[]  payloadData2       = Encoding.UTF8.GetBytes(payload2);
            var     msg2               = new Message(payloadData2);
            Message compressedMessage2 = CompressionUtils.Compress(new List <Message> {
                msg2
            }, CompressionCodecs.GZIPCompressionCodec);

            var producerRequest1 = new ProducerRequest(topic1, 0, new List <Message> {
                compressedMessage1
            });
            var producerRequest2 = new ProducerRequest(topic2, 0, new List <Message> {
                compressedMessage2
            });

            using (var producer = new SyncProducer(prodConfig))
            {
                producer.Send(producerRequest1);
                producer.Send(producerRequest2);
            }

            // now consuming
            var resultMessages1 = new List <Message>();
            var resultMessages2 = new List <Message>();

            using (IConsumerConnector consumerConnector = new ZookeeperConsumerConnector(consumerConfig, true))
            {
                var topicCount = new Dictionary <string, int> {
                    { topic1, 1 }, { topic2, 1 }
                };
                var messages = consumerConnector.CreateMessageStreams(topicCount);

                Assert.IsTrue(messages.ContainsKey(topic1));
                Assert.IsTrue(messages.ContainsKey(topic2));

                var sets1 = messages[topic1];
                try
                {
                    foreach (var set in sets1)
                    {
                        foreach (var message in set)
                        {
                            resultMessages1.Add(message);
                        }
                    }
                }
                catch (ConsumerTimeoutException)
                {
                    // do nothing, this is expected
                }

                var sets2 = messages[topic2];
                try
                {
                    foreach (var set in sets2)
                    {
                        foreach (var message in set)
                        {
                            resultMessages2.Add(message);
                        }
                    }
                }
                catch (ConsumerTimeoutException)
                {
                    // do nothing, this is expected
                }
            }

            Assert.AreEqual(1, resultMessages1.Count);
            Assert.AreEqual(msg1.ToString(), resultMessages1[0].ToString());

            Assert.AreEqual(1, resultMessages2.Count);
            Assert.AreEqual(msg2.ToString(), resultMessages2[0].ToString());
        }