public void TestMessageSizeTooLarge()
        {
            var props = TestUtils.GetSyncProducerConfig(this.Configs[0].Port);

            var producer = new SyncProducer(props);

            AdminUtils.CreateTopic(this.ZkClient, "test", 1, 1, new Dictionary <string, string>());
            TestUtils.WaitUntilLeaderIsElectedOrChanged(this.ZkClient, "test", 0, 500);
            TestUtils.WaitUntilMetadataIsPropagated(Servers, "test", 0, 2000);

            var message1    = new Message(new byte[Configs[0].MessageMaxBytes + 1]);
            var messageSet1 = new ByteBufferMessageSet(CompressionCodecs.NoCompressionCodec, new List <Message> {
                message1
            });
            var response1 = producer.Send(TestUtils.ProduceRequest("test", 0, messageSet1, acks: 1));

            Assert.Equal(1, response1.Status.Count(kvp => kvp.Value.Error != ErrorMapping.NoError));
            Assert.Equal(ErrorMapping.MessageSizeTooLargeCode, response1.Status[new TopicAndPartition("test", 0)].Error);
            Assert.Equal(-1L, response1.Status[new TopicAndPartition("test", 0)].Offset);

            var safeSize    = Configs[0].MessageMaxBytes - Message.MessageOverhead - MessageSet.LogOverhead - 1;
            var message2    = new Message(new byte[safeSize]);
            var messageSet2 = new ByteBufferMessageSet(
                CompressionCodecs.NoCompressionCodec, new List <Message> {
                message2
            });
            var response2 = producer.Send(TestUtils.ProduceRequest("test", 0, messageSet2, acks: 1));

            Assert.Equal(0, response2.Status.Count(kvp => kvp.Value.Error != ErrorMapping.NoError));
            Assert.Equal(ErrorMapping.NoError, response2.Status[new TopicAndPartition("test", 0)].Error);
            Assert.Equal(0, response2.Status[new TopicAndPartition("test", 0)].Offset);
        }
Exemple #2
0
        public void ProducersSendMessagesToDifferentPartitionsAndConsumerConnectorGetsThemBack()
        {
            var prodConfig     = this.SyncProducerConfig1;
            var consumerConfig = this.ZooKeeperBasedConsumerConfig;

            // first producing
            string payload1 = "kafka 1.";

            byte[] payloadData1 = Encoding.UTF8.GetBytes(payload1);
            var    msg1         = new Message(payloadData1);

            string payload2 = "kafka 2.";

            byte[] payloadData2 = Encoding.UTF8.GetBytes(payload2);
            var    msg2         = new Message(payloadData2);

            using (var producer = new SyncProducer(prodConfig))
            {
                var producerRequest1 = new ProducerRequest(CurrentTestTopic, 0, new List <Message> {
                    msg1
                });
                producer.Send(producerRequest1);
                var producerRequest2 = new ProducerRequest(CurrentTestTopic, 1, new List <Message> {
                    msg2
                });
                producer.Send(producerRequest2);
            }

            // now consuming
            var resultMessages = new List <Message>();

            using (IConsumerConnector consumerConnector = new ZookeeperConsumerConnector(consumerConfig, true))
            {
                var topicCount = new Dictionary <string, int> {
                    { CurrentTestTopic, 1 }
                };
                var messages = consumerConnector.CreateMessageStreams(topicCount);
                var sets     = messages[CurrentTestTopic];
                try
                {
                    foreach (var set in sets)
                    {
                        foreach (var message in set)
                        {
                            resultMessages.Add(message);
                        }
                    }
                }
                catch (ConsumerTimeoutException)
                {
                    // do nothing, this is expected
                }
            }

            Assert.AreEqual(2, resultMessages.Count);
            Assert.AreEqual(msg1.ToString(), resultMessages[0].ToString());
            Assert.AreEqual(msg2.ToString(), resultMessages[1].ToString());
        }
Exemple #3
0
        public void OneMessageIsSentAndReceivedThenExceptionsWhenNoMessageThenAnotherMessageIsSentAndReceived()
        {
            var prodConfig     = this.SyncProducerConfig1;
            var consumerConfig = this.ZooKeeperBasedConsumerConfig;

            // first producing
            string payload1 = "kafka 1.";

            byte[] payloadData1 = Encoding.UTF8.GetBytes(payload1);
            var    msg1         = new Message(payloadData1);

            using (var producer = new SyncProducer(prodConfig))
            {
                var producerRequest = new ProducerRequest(CurrentTestTopic, 0, new List <Message> {
                    msg1
                });
                producer.Send(producerRequest);

                // now consuming
                using (IConsumerConnector consumerConnector = new ZookeeperConsumerConnector(consumerConfig, true))
                {
                    var topicCount = new Dictionary <string, int> {
                        { CurrentTestTopic, 1 }
                    };
                    var messages = consumerConnector.CreateMessageStreams(topicCount);
                    var sets     = messages[CurrentTestTopic];
                    KafkaMessageStream myStream = sets[0];
                    var enumerator = myStream.GetEnumerator();

                    Assert.IsTrue(enumerator.MoveNext());
                    Assert.AreEqual(msg1.ToString(), enumerator.Current.ToString());

                    Assert.Throws <ConsumerTimeoutException>(() => enumerator.MoveNext());

                    Assert.Throws <IllegalStateException>(() => enumerator.MoveNext()); // iterator is in failed state

                    enumerator.Reset();

                    // producing again
                    string payload2     = "kafka 2.";
                    byte[] payloadData2 = Encoding.UTF8.GetBytes(payload2);
                    var    msg2         = new Message(payloadData2);

                    var producerRequest2 = new ProducerRequest(CurrentTestTopic, 0, new List <Message> {
                        msg2
                    });
                    producer.Send(producerRequest2);
                    Thread.Sleep(3000);

                    Assert.IsTrue(enumerator.MoveNext());
                    Assert.AreEqual(msg2.ToString(), enumerator.Current.ToString());
                }
            }
        }
        public void TestReachableServer()
        {
            var props      = TestUtils.GetSyncProducerConfig(this.Configs[0].Port);
            var producer   = new SyncProducer(props);
            var firstStart = DateTime.Now;
            {
                var response =
                    producer.Send(
                        TestUtils.ProduceRequest(
                            "test",
                            0,
                            new ByteBufferMessageSet(
                                CompressionCodecs.NoCompressionCodec, new List <Message> {
                    new Message(messageBytes)
                }),
                            acks: 1));
                Assert.NotNull(response);
            }

            var firstEnd = DateTime.Now;

            Assert.True(firstEnd - firstStart < TimeSpan.FromMilliseconds(500));
            var secondStart = DateTime.Now;
            {
                var response =
                    producer.Send(
                        TestUtils.ProduceRequest(
                            "test",
                            0,
                            new ByteBufferMessageSet(
                                CompressionCodecs.NoCompressionCodec, new List <Message> {
                    new Message(messageBytes)
                }),
                            acks: 1));
                Assert.NotNull(response);
            }
            var secondEnd = DateTime.Now;

            Assert.True(secondEnd - secondStart < TimeSpan.FromMilliseconds(500));
            {
                var response =
                    producer.Send(
                        TestUtils.ProduceRequest(
                            "test",
                            0,
                            new ByteBufferMessageSet(
                                CompressionCodecs.NoCompressionCodec, new List <Message> {
                    new Message(this.messageBytes)
                }),
                            acks: 1));
                Assert.NotNull(response);
            }
        }
        public void TestProduceCorrectlyReceivesResponse()
        {
            var props = TestUtils.GetSyncProducerConfig(this.Configs[0].Port);

            var producer = new SyncProducer(props);
            var messages = new ByteBufferMessageSet(
                CompressionCodecs.NoCompressionCodec, new List <Message> {
                new Message(this.messageBytes)
            });

            // #1 - test that we get an error when partition does not belong to broker in response
            var request = TestUtils.ProduceRequestWithAcks(
                new List <string> {
                "topic1", "topic2", "topic3"
            }, new List <int> {
                0
            }, messages, 1);
            var response = producer.Send(request);

            Assert.NotNull(response);
            Assert.Equal(request.CorrelationId, response.CorrelationId);
            Assert.Equal(3, response.Status.Count);

            foreach (var responseStatus in response.Status.Values)
            {
                Assert.Equal(ErrorMapping.UnknownTopicOrPartitionCode, responseStatus.Error);
                Assert.Equal(-1L, responseStatus.Offset);
            }

            // #2 - test that we get correct offsets when partition is owned by broker
            AdminUtils.CreateTopic(this.ZkClient, "topic1", 1, 1, new Dictionary <string, string>());
            AdminUtils.CreateTopic(this.ZkClient, "topic3", 1, 1, new Dictionary <string, string>());
            TestUtils.WaitUntilLeaderIsElectedOrChanged(this.ZkClient, "topic3", 0, 5000);
            TestUtils.WaitUntilMetadataIsPropagated(Servers, "topic3", 0, 2000);

            var response2 = producer.Send(request);

            Assert.NotNull(response2);
            Assert.Equal(request.CorrelationId, response2.CorrelationId);
            Assert.Equal(3, response2.Status.Count);

            // the first and last message should have been accepted by broker
            Assert.Equal(ErrorMapping.NoError, response2.Status[new TopicAndPartition("topic1", 0)].Error);
            Assert.Equal(ErrorMapping.NoError, response2.Status[new TopicAndPartition("topic3", 0)].Error);
            Assert.Equal(0, response2.Status[new TopicAndPartition("topic1", 0)].Offset);
            Assert.Equal(0, response2.Status[new TopicAndPartition("topic3", 0)].Offset);

            // the middle message should have been rejected because broker doesn't lead partition
            Assert.Equal(ErrorMapping.UnknownTopicOrPartitionCode, response2.Status[new TopicAndPartition("topic2", 0)].Error);
            Assert.Equal(-1L, response2.Status[new TopicAndPartition("topic2", 0)].Offset);
        }
Exemple #6
0
        public void SimpleSyncProducerSendsLotsOfMessagesIncreasingTheSizeAndConsumerConnectorGetsThemBack()
        {
            var prodConfig     = this.SyncProducerConfig1;
            var consumerConfig = this.ZooKeeperBasedConsumerConfig;
            var consConf       = this.ConsumerConfig1;

            consumerConfig.AutoCommitInterval = 1000;
            int    numberOfMessagesToSend = 2000;
            string topic = CurrentTestTopic;

            var msgList = new List <Message>();

            using (var producer = new SyncProducer(prodConfig))
            {
                for (int i = 0; i < numberOfMessagesToSend; i++)
                {
                    string payload     = CreatePayloadByNumber(i);
                    byte[] payloadData = Encoding.UTF8.GetBytes(payload);
                    var    msg         = new Message(payloadData);
                    msgList.Add(msg);
                    var producerRequest = new ProducerRequest(topic, 0, new List <Message>()
                    {
                        msg
                    });
                    producer.Send(producerRequest);
                }
            }

            Thread.Sleep(3000);

            // now consuming
            int messageNumberCounter = 0;

            using (IConsumerConnector consumerConnector = new ZookeeperConsumerConnector(consumerConfig, true))
            {
                var topicCount = new Dictionary <string, int> {
                    { topic, 1 }
                };
                var messages = consumerConnector.CreateMessageStreams(topicCount);
                var sets     = messages[topic];

                try
                {
                    foreach (var set in sets)
                    {
                        foreach (var message in set)
                        {
                            Assert.AreEqual(CreatePayloadByNumber(messageNumberCounter), Encoding.UTF8.GetString(message.Payload));
                            messageNumberCounter++;
                        }
                    }
                }
                catch (ConsumerTimeoutException)
                {
                    // do nothing, this is expected
                }
            }

            Assert.AreEqual(numberOfMessagesToSend, messageNumberCounter);
        }
Exemple #7
0
        private int SendMessages(int messagesPerNode, List <SyncProducerConfiguration> configs)
        {
            var count = 0;

            foreach (var syncProducerConfiguration in configs)
            {
                using (var producer = new SyncProducer(syncProducerConfiguration))
                {
                    var messageList = new List <Message>();
                    for (int i = 0; i < messagesPerNode; i++)
                    {
                        string payload1     = "kafka " + i.ToString();
                        byte[] payloadData1 = Encoding.UTF8.GetBytes(payload1);
                        var    msg1         = new Message(payloadData1);
                        messageList.Add(msg1);
                    }

                    var mSet    = new BufferedMessageSet(CompressionCodecs.NoCompressionCodec, messageList);
                    var request = new ProducerRequest(this.CurrentTestTopic, 0, mSet);
                    producer.Send(request);
                    count += mSet.Messages.Count();
                }
            }
            return(count);
        }
Exemple #8
0
        public void MaxFetchSizeBugShouldNotAppearWhenSmallFetchSizeAndSingleMessageSmallerThanFetchSize()
        {
            var prodConfig     = this.SyncProducerConfig1;
            var consumerConfig = this.ZooKeeperBasedConsumerConfig;

            consumerConfig.FetchSize          = 256;
            consumerConfig.NumberOfTries      = 1;
            consumerConfig.AutoCommitInterval = 1000;
            int    numberOfMessagesToSend = 100;
            string topic = CurrentTestTopic;

            var msgList = new List <Message>();

            using (var producer = new SyncProducer(prodConfig))
            {
                for (int i = 0; i < numberOfMessagesToSend; i++)
                {
                    string payload     = CreatePayloadByNumber(i + 100);
                    byte[] payloadData = Encoding.UTF8.GetBytes(payload);
                    var    msg         = new Message(payloadData);
                    msgList.Add(msg);
                    var producerRequest = new ProducerRequest(topic, 0, new List <Message>()
                    {
                        msg
                    });
                    producer.Send(producerRequest);
                }
            }

            // now consuming
            int messageNumberCounter = 0;

            using (IConsumerConnector consumerConnector = new ZookeeperConsumerConnector(consumerConfig, true))
            {
                var topicCount = new Dictionary <string, int> {
                    { topic, 1 }
                };
                var messages = consumerConnector.CreateMessageStreams(topicCount);
                var sets     = messages[topic];

                try
                {
                    foreach (var set in sets)
                    {
                        foreach (var message in set)
                        {
                            Assert.AreEqual(CreatePayloadByNumber(messageNumberCounter + 100), Encoding.UTF8.GetString(message.Payload));
                            messageNumberCounter++;
                        }
                    }
                }
                catch (ConsumerTimeoutException)
                {
                    // do nothing, this is expected
                }
            }

            Assert.AreEqual(numberOfMessagesToSend, messageNumberCounter);
        }
Exemple #9
0
        public void SimpleSyncProducerSendsLotsOfMessagesAndConsumerConnectorGetsThemBackWithMaxQueuedChunksRefillCheck()
        {
            var prodConfig     = this.SyncProducerConfig1;
            var consumerConfig = this.ZooKeeperBasedConsumerConfig;

            consumerConfig.FetchSize = 100;
            int numberOfMessages = 1000;

            List <Message> messagesToSend = new List <Message>();

            using (var producer = new SyncProducer(prodConfig))
            {
                for (int i = 0; i < numberOfMessages; i++)
                {
                    string payload1     = "kafka 1.";
                    byte[] payloadData1 = Encoding.UTF8.GetBytes(payload1);
                    var    msg          = new Message(payloadData1);
                    messagesToSend.Add(msg);
                    producer.Send(CurrentTestTopic, 0, new List <Message>()
                    {
                        msg
                    });
                }
            }

            Thread.Sleep(2000);

            using (IConsumerConnector consumerConnector = new ZookeeperConsumerConnector(consumerConfig, true))
            {
                var topicCount = new Dictionary <string, int> {
                    { CurrentTestTopic, 1 }
                };
                var messages = consumerConnector.CreateMessageStreams(topicCount);

                Thread.Sleep(5000);

                var queues =
                    ReflectionHelper.GetInstanceField
                    <IDictionary <Tuple <string, string>, BlockingCollection <FetchedDataChunk> > >("queues",
                                                                                                    consumerConnector);
                var queue = queues.First().Value;

                Assert.AreEqual(ConsumerConfiguration.DefaultMaxQueuedChunks, queue.Count);

                var sets     = messages[CurrentTestTopic];
                var firstSet = sets[0];
                firstSet.Take(5);   //this should take at least one chunk from the queue

                Thread.Sleep(2000); //a new chunk should be immediately inserted into the queue

                // the queue should refill to the default max amount of chunks
                Assert.AreEqual(ConsumerConfiguration.DefaultMaxQueuedChunks, queue.Count);
            }
        }
        public void TestMessageSizeTooLargeWithAckZero()
        {
            var props = TestUtils.GetSyncProducerConfig(this.Configs[0].Port);

            props.RequestRequiredAcks = 0;

            var producer = new SyncProducer(props);

            AdminUtils.CreateTopic(this.ZkClient, "test", 1, 1, new Dictionary <string, string>());
            TestUtils.WaitUntilLeaderIsElectedOrChanged(this.ZkClient, "test", 0, 500);

            // This message will be dropped silently since message size too large.
            producer.Send(
                TestUtils.ProduceRequest(
                    "test",
                    0,
                    new ByteBufferMessageSet(
                        CompressionCodecs.NoCompressionCodec,
                        new List <Message> {
                new Message(new byte[Configs[0].MessageMaxBytes + 1])
            })));

            // Send another message whose size is large enough to exceed the buffer size so
            // the socket buffer will be flushed immediately;
            // this send should fail since the socket has been closed
            try
            {
                producer.Send(
                    TestUtils.ProduceRequest(
                        "test",
                        0,
                        new ByteBufferMessageSet(
                            CompressionCodecs.NoCompressionCodec,
                            new List <Message> {
                    new Message(new byte[Configs[0].MessageMaxBytes + 1])
                })));
            }
            catch (IOException)
            {
            }
        }
 public void ShouldThrowMessageTooLarge()
 {
     var connection = new Mock<IKafkaConnection>();
     var config = new SyncProducerConfiguration { MaxMessageSize = 99 };
     var producer = new SyncProducer(config, connection.Object);
     producer.Send(new ProducerRequest(1, "client", 0, 0, new List<TopicData>()
     {
         new TopicData("test",
             new List<PartitionData>()
             {
                 new PartitionData(0, new BufferedMessageSet(new List<Message>() {new Message(new byte[100])}, 0))
             })
     }));
 }
Exemple #12
0
        public void SimpleSyncProducerSendsLotsOfMessagesAndConsumerConnectorGetsThemBackWithMaxQueuedChunksCheck(int?maxQueuedChunks)
        {
            var prodConfig     = this.SyncProducerConfig1;
            var consumerConfig = this.ZooKeeperBasedConsumerConfig;

            consumerConfig.FetchSize = 100;
            var consConf         = this.ConsumerConfig1;
            int numberOfMessages = 1000;

            List <Message> messagesToSend = new List <Message>();

            using (var producer = new SyncProducer(prodConfig))
            {
                for (int i = 0; i < numberOfMessages; i++)
                {
                    string payload1     = "kafka 1.";
                    byte[] payloadData1 = Encoding.UTF8.GetBytes(payload1);
                    var    msg          = new Message(payloadData1);
                    messagesToSend.Add(msg);
                    producer.Send(CurrentTestTopic, 0, new List <Message>()
                    {
                        msg
                    });
                }
            }

            Thread.Sleep(2000);

            if (maxQueuedChunks.HasValue)
            {
                consumerConfig.MaxQueuedChunks = maxQueuedChunks.Value;
            }
            using (IConsumerConnector consumerConnector = new ZookeeperConsumerConnector(consumerConfig, true))
            {
                var topicCount = new Dictionary <string, int> {
                    { CurrentTestTopic, 1 }
                };
                var messages = consumerConnector.CreateMessageStreams(topicCount);

                Thread.Sleep(5000);

                var queues =
                    ReflectionHelper.GetInstanceField
                    <IDictionary <Tuple <string, string>, BlockingCollection <FetchedDataChunk> > >("queues",
                                                                                                    consumerConnector);
                var queue = queues.First().Value;

                Assert.AreEqual(maxQueuedChunks.HasValue ? maxQueuedChunks.Value : ConsumerConfiguration.DefaultMaxQueuedChunks, queue.Count);
            }
        }
        public void TestProduceRequestWithNoResponse()
        {
            var props = TestUtils.GetSyncProducerConfig(this.Configs[0].Port);

            var correlationId = 0;
            var clientId      = SyncProducerConfig.DefaultClientId;
            var ackTimeoutMs  = SyncProducerConfig.DefaultAckTimeout;
            var ack           = (short)0;
            var emptyRequest  = new ProducerRequest(
                correlationId, clientId, ack, ackTimeoutMs, new Dictionary <TopicAndPartition, ByteBufferMessageSet>());
            var producer = new SyncProducer(props);
            var response = producer.Send(emptyRequest);

            Assert.True(response == null);
        }
Exemple #14
0
        public void ProducerSendsMessageWithLongTopic()
        {
            var prodConfig = this.SyncProducerConfig1;

            var    msg   = new Message(Encoding.UTF8.GetBytes("test message"));
            string topic = "ThisIsAVeryLongTopicThisIsAVeryLongTopicThisIsAVeryLongTopicThisIsAVeryLongTopicThisIsAVeryLongTopicThisIsAVeryLongTopic";

            using (var producer = new SyncProducer(prodConfig))
            {
                var producerRequest = new ProducerRequest(topic, 0, new List <Message> {
                    msg
                });
                producer.Send(producerRequest);
            }
        }
        public void TestEmptyProduceRequest()
        {
            var props = TestUtils.GetSyncProducerConfig(this.Configs[0].Port);

            var   correlationId = 0;
            var   clientId      = SyncProducerConfig.DefaultClientId;
            var   acktimeoutMs  = SyncProducerConfig.DefaultAckTimeout;
            short ack           = 1;
            var   emptyResult   = new ProducerRequest(
                correlationId, clientId, ack, acktimeoutMs, new Dictionary <TopicAndPartition, ByteBufferMessageSet>());

            var producer = new SyncProducer(props);
            var response = producer.Send(emptyResult);

            Assert.True(response != null);
            Assert.True(!response.HasError() && response.Status.Count() == 0);
        }
Exemple #16
0
        public void ProducerSendsAndConsumerReceivesSingleSimpleMessage()
        {
            var prodConfig     = this.SyncProducerConfig1;
            var consumerConfig = this.ConsumerConfig1;

            var  sourceMessage = new Message(Encoding.UTF8.GetBytes("test message"));
            long currentOffset = TestHelper.GetCurrentKafkaOffset(CurrentTestTopic, consumerConfig);

            using (var producer = new SyncProducer(prodConfig))
            {
                var producerRequest = new ProducerRequest(CurrentTestTopic, 0, new List <Message> {
                    sourceMessage
                });
                producer.Send(producerRequest);
            }

            IConsumer          consumer = new Consumer(consumerConfig);
            var                request  = new FetchRequest(CurrentTestTopic, 0, currentOffset);
            BufferedMessageSet response;
            int                totalWaitTimeInMiliseconds = 0;
            int                waitSingle = 100;

            while (true)
            {
                Thread.Sleep(waitSingle);
                response = consumer.Fetch(request);
                if (response != null && response.Messages.Count() > 0)
                {
                    break;
                }

                totalWaitTimeInMiliseconds += waitSingle;
                if (totalWaitTimeInMiliseconds >= MaxTestWaitTimeInMiliseconds)
                {
                    break;
                }
            }

            Assert.NotNull(response);
            Assert.AreEqual(1, response.Messages.Count());
            Message resultMessage = response.Messages.First();

            Assert.AreEqual(sourceMessage.ToString(), resultMessage.ToString());
        }
        public void ShouldThrowMessageTooLarge()
        {
            var connection = new Mock <IKafkaConnection>();
            var config     = new SyncProducerConfiguration {
                MaxMessageSize = 99
            };
            var producer = new SyncProducer(config, connection.Object);

            producer.Send(new ProducerRequest(1, "client", 0, 0, new List <TopicData>()
            {
                new TopicData("test",
                              new List <PartitionData>()
                {
                    new PartitionData(0, new BufferedMessageSet(new List <Message>()
                    {
                        new Message(new byte[100])
                    }, 0))
                })
            }));
        }
Exemple #18
0
        public void ProducerSendsMessage()
        {
            var prodConfig = this.SyncProducerConfig1;

            string payload1 = "kafka 1.";

            byte[] payloadData1 = Encoding.UTF8.GetBytes(payload1);
            var    msg1         = new Message(payloadData1);

            string payload2 = "kafka 2.";

            byte[] payloadData2 = Encoding.UTF8.GetBytes(payload2);
            var    msg2         = new Message(payloadData2);

            using (var producer = new SyncProducer(prodConfig))
            {
                var producerRequest = new ProducerRequest(CurrentTestTopic, 0, new List <Message> {
                    msg1, msg2
                });
                producer.Send(producerRequest);
            }
        }
Exemple #19
0
        public void SimpleSyncProducerSends2MessagesAndConsumerConnectorGetsThemBack()
        {
            var prodConfig     = this.SyncProducerConfig1;
            var consumerConfig = this.ZooKeeperBasedConsumerConfig;
            var consConf       = this.ConsumerConfig1;

            // first producing
            string payload1 = "kafka 1.";

            byte[] payloadData1 = Encoding.UTF8.GetBytes(payload1);
            var    msg1         = new Message(payloadData1);

            string payload2 = "kafka 2.";

            byte[] payloadData2 = Encoding.UTF8.GetBytes(payload2);
            var    msg2         = new Message(payloadData2);

            var producerRequest = new ProducerRequest(CurrentTestTopic, 0, new List <Message> {
                msg1, msg2
            });

            using (var producer = new SyncProducer(prodConfig))
            {
                producer.Send(producerRequest);
            }

            var  consumer = new Consumer(consConf);
            long offset   = 0;
            var  result   = consumer.Fetch(
                new FetchRequest(CurrentTestTopic, 0, offset, 400));

            foreach (var resultItem in result)
            {
                offset += resultItem.Offset;
            }

            Thread.Sleep(3000);

            // now consuming
            var resultMessages = new List <Message>();

            using (IConsumerConnector consumerConnector = new ZookeeperConsumerConnector(consumerConfig, true))
            {
                var topicCount = new Dictionary <string, int> {
                    { CurrentTestTopic, 1 }
                };
                var messages = consumerConnector.CreateMessageStreams(topicCount);
                var sets     = messages[CurrentTestTopic];
                try
                {
                    foreach (var set in sets)
                    {
                        foreach (var message in set)
                        {
                            resultMessages.Add(message);
                        }
                    }
                }
                catch (ConsumerTimeoutException)
                {
                    // do nothing, this is expected
                }
            }

            Assert.AreEqual(2, resultMessages.Count);
            Assert.AreEqual(msg1.ToString(), resultMessages[0].ToString());
            Assert.AreEqual(msg2.ToString(), resultMessages[1].ToString());
        }
Exemple #20
0
        public void SimpleSyncProducerSendsLotsOfTwiceCompressedMessagesAndConsumerConnectorGetsThemBack()
        {
            var prodConfig              = this.SyncProducerConfig1;
            var consumerConfig          = this.ZooKeeperBasedConsumerConfig;
            int numberOfMessages        = 500;
            int messagesPerPackage      = 5;
            int messageSize             = 0;
            int messagesPerInnerPackage = 5;

            using (var producer = new SyncProducer(prodConfig))
            {
                for (int i = 0; i < numberOfMessages; i++)
                {
                    var messagePackageList = new List <Message>();
                    for (int messageInPackageNr = 0; messageInPackageNr < messagesPerPackage; messageInPackageNr++)
                    {
                        var innerMessagePackageList = new List <Message>();
                        for (int inner = 0; inner < messagesPerInnerPackage; inner++)
                        {
                            string payload1     = "kafka 1.";
                            byte[] payloadData1 = Encoding.UTF8.GetBytes(payload1);
                            var    msg          = new Message(payloadData1);
                            innerMessagePackageList.Add(msg);
                        }
                        var innerPackageMessage = CompressionUtils.Compress(innerMessagePackageList, CompressionCodecs.GZIPCompressionCodec);
                        messagePackageList.Add(innerPackageMessage);
                    }
                    var packageMessage = CompressionUtils.Compress(messagePackageList, CompressionCodecs.GZIPCompressionCodec);

                    producer.Send(CurrentTestTopic, 0, new List <Message>()
                    {
                        packageMessage
                    });
                }
            }

            Thread.Sleep(2000);

            // now consuming
            int resultCount = 0;

            using (IConsumerConnector consumerConnector = new ZookeeperConsumerConnector(consumerConfig, true))
            {
                var topicCount = new Dictionary <string, int> {
                    { CurrentTestTopic, 1 }
                };
                var messages = consumerConnector.CreateMessageStreams(topicCount);
                var sets     = messages[CurrentTestTopic];

                try
                {
                    foreach (var set in sets)
                    {
                        foreach (var message in set)
                        {
                            resultCount++;
                        }
                    }
                }
                catch (ConsumerTimeoutException)
                {
                    // do nothing, this is expected
                }
            }

            Assert.AreEqual(numberOfMessages * messagesPerPackage * messagesPerInnerPackage, resultCount);
        }
Exemple #21
0
        public void SimpleSyncProducerSendsLotsOfCompressedMessagesWithIncreasedSizeAndConsumerConnectorGetsThemBack()
        {
            var prodConfig     = this.SyncProducerConfig1;
            var consumerConfig = this.ZooKeeperBasedConsumerConfig;

            consumerConfig.AutoCommit         = true;
            consumerConfig.AutoCommitInterval = 100;
            int    numberOfMessages   = 2000;
            int    messagesPerPackage = 5;
            string topic = CurrentTestTopic;

            var multipleBrokersHelper = new TestMultipleBrokersHelper(CurrentTestTopic);

            multipleBrokersHelper.GetCurrentOffsets(
                new[] { prodConfig });

            int  msgNr     = 0;
            long totalSize = 0;

            using (var producer = new SyncProducer(prodConfig))
            {
                for (int i = 0; i < numberOfMessages; i++)
                {
                    var messagePackageList = new List <Message>();
                    for (int messageInPackageNr = 0; messageInPackageNr < messagesPerPackage; messageInPackageNr++)
                    {
                        string payload1     = CreatePayloadByNumber(msgNr);
                        byte[] payloadData1 = Encoding.UTF8.GetBytes(payload1);
                        var    msg          = new Message(payloadData1);
                        totalSize += msg.Size;
                        messagePackageList.Add(msg);
                        msgNr++;
                    }
                    var packageMessage = CompressionUtils.Compress(messagePackageList, CompressionCodecs.GZIPCompressionCodec);
                    producer.Send(topic, 0, new List <Message>()
                    {
                        packageMessage
                    });
                }
            }

            // now consuming
            int  resultCount = 0;
            long resultSize  = 0;

            using (IConsumerConnector consumerConnector = new ZookeeperConsumerConnector(consumerConfig, true))
            {
                var topicCount = new Dictionary <string, int> {
                    { topic, 1 }
                };
                var messages = consumerConnector.CreateMessageStreams(topicCount);
                var sets     = messages[topic];

                try
                {
                    foreach (var set in sets)
                    {
                        foreach (var message in set)
                        {
                            Assert.AreEqual(CreatePayloadByNumber(resultCount), Encoding.UTF8.GetString(message.Payload));
                            resultCount++;
                            resultSize += message.Size;
                        }
                    }
                }
                catch (ConsumerTimeoutException)
                {
                    // do nothing, this is expected
                }
            }

            Assert.AreEqual(numberOfMessages * messagesPerPackage, resultCount);
            Assert.AreEqual(totalSize, resultSize);
        }
Exemple #22
0
        public void SimpleSyncProducerSendsLotsOfMessagesAndConsumerConnectorGetsThemBackWithVerySmallAutoCommitInterval()
        {
            var prodConfig     = this.SyncProducerConfig1;
            var consumerConfig = this.ZooKeeperBasedConsumerConfig;

            consumerConfig.AutoCommit         = true;
            consumerConfig.AutoCommitInterval = 10;
            int numberOfMessages = 500;
            int messageSize      = 0;

            List <Message> messagesToSend = new List <Message>();

            using (var producer = new SyncProducer(prodConfig))
            {
                for (int i = 0; i < numberOfMessages; i++)
                {
                    string payload1     = "kafka 1.";
                    byte[] payloadData1 = Encoding.UTF8.GetBytes(payload1);
                    var    msg          = new Message(payloadData1);
                    messagesToSend.Add(msg);
                    if (i == 0)
                    {
                        messageSize = msg.Size;
                    }
                    producer.Send(CurrentTestTopic, 0, new List <Message>()
                    {
                        msg
                    });
                }
            }

            Thread.Sleep(2000);

            // now consuming
            int resultCount = 0;

            using (IConsumerConnector consumerConnector = new ZookeeperConsumerConnector(consumerConfig, true))
            {
                var topicCount = new Dictionary <string, int> {
                    { CurrentTestTopic, 1 }
                };
                var messages = consumerConnector.CreateMessageStreams(topicCount);
                var sets     = messages[CurrentTestTopic];

                try
                {
                    foreach (var set in sets)
                    {
                        foreach (var message in set)
                        {
                            Assert.AreEqual(messageSize, message.Size);
                            resultCount++;
                        }
                    }
                }
                catch (ConsumerTimeoutException)
                {
                    // do nothing, this is expected
                }
            }

            Assert.AreEqual(numberOfMessages, resultCount);
        }
Exemple #23
0
        public void ProducerSendsAndConsumerReceivesLotsOfMessagesManyFetchesAndOffsetsShouldBeCorrect()
        {
            var prodConfig     = this.SyncProducerConfig1;
            var consumerConfig = this.ConsumerConfig1;

            var  sourceMessage = new Message(Encoding.UTF8.GetBytes("test message"));
            long currentOffset = TestHelper.GetCurrentKafkaOffset(CurrentTestTopic, consumerConfig);

            int nrOfMessages = 1000;

            using (var producer = new SyncProducer(prodConfig))
            {
                for (int i = 0; i < nrOfMessages; i++)
                {
                    var producerRequest = new ProducerRequest(CurrentTestTopic, 0, new List <Message> {
                        sourceMessage
                    });
                    producer.Send(producerRequest);
                }
            }

            IConsumer consumer            = new Consumer(consumerConfig);
            int       messagesCounter     = 0;
            long      totalSizeDownloaded = 0;

            while (messagesCounter < nrOfMessages)
            {
                var request = new FetchRequest(CurrentTestTopic, 0, currentOffset);
                BufferedMessageSet response;
                int totalWaitTimeInMiliseconds = 0;
                int waitSingle = 100;
                while (true)
                {
                    Thread.Sleep(waitSingle);
                    response = consumer.Fetch(request);
                    if (response != null && response.Messages.Count() > 0)
                    {
                        break;
                    }

                    totalWaitTimeInMiliseconds += waitSingle;
                    if (totalWaitTimeInMiliseconds >= MaxTestWaitTimeInMiliseconds)
                    {
                        break;
                    }
                }

                Assert.NotNull(response);
                long currentCheckOffset = currentOffset + 4 + sourceMessage.Size;
                while (response.MoveNext())
                {
                    Assert.AreEqual(currentCheckOffset, response.Current.Offset);
                    currentCheckOffset += 4 + response.Current.Message.Size;
                    messagesCounter++;
                    currentOffset        = response.Current.Offset;
                    totalSizeDownloaded += response.Current.Message.Size + 4;
                }
            }

            Assert.AreEqual(nrOfMessages, messagesCounter);
            Assert.AreEqual(nrOfMessages * (4 + sourceMessage.Size), totalSizeDownloaded);
        }
Exemple #24
0
        public void ConsumerConnectorConsumesTwoDifferentCompressedTopics()
        {
            var prodConfig     = this.SyncProducerConfig1;
            var consumerConfig = this.ZooKeeperBasedConsumerConfig;

            string topic1 = CurrentTestTopic + "1";
            string topic2 = CurrentTestTopic + "2";

            // first producing
            string payload1 = "kafka 1.";

            byte[]  payloadData1       = Encoding.UTF8.GetBytes(payload1);
            var     msg1               = new Message(payloadData1);
            Message compressedMessage1 = CompressionUtils.Compress(new List <Message> {
                msg1
            }, CompressionCodecs.GZIPCompressionCodec);

            string payload2 = "kafka 2.";

            byte[]  payloadData2       = Encoding.UTF8.GetBytes(payload2);
            var     msg2               = new Message(payloadData2);
            Message compressedMessage2 = CompressionUtils.Compress(new List <Message> {
                msg2
            }, CompressionCodecs.GZIPCompressionCodec);

            var producerRequest1 = new ProducerRequest(topic1, 0, new List <Message> {
                compressedMessage1
            });
            var producerRequest2 = new ProducerRequest(topic2, 0, new List <Message> {
                compressedMessage2
            });

            using (var producer = new SyncProducer(prodConfig))
            {
                producer.Send(producerRequest1);
                producer.Send(producerRequest2);
            }

            // now consuming
            var resultMessages1 = new List <Message>();
            var resultMessages2 = new List <Message>();

            using (IConsumerConnector consumerConnector = new ZookeeperConsumerConnector(consumerConfig, true))
            {
                var topicCount = new Dictionary <string, int> {
                    { topic1, 1 }, { topic2, 1 }
                };
                var messages = consumerConnector.CreateMessageStreams(topicCount);

                Assert.IsTrue(messages.ContainsKey(topic1));
                Assert.IsTrue(messages.ContainsKey(topic2));

                var sets1 = messages[topic1];
                try
                {
                    foreach (var set in sets1)
                    {
                        foreach (var message in set)
                        {
                            resultMessages1.Add(message);
                        }
                    }
                }
                catch (ConsumerTimeoutException)
                {
                    // do nothing, this is expected
                }

                var sets2 = messages[topic2];
                try
                {
                    foreach (var set in sets2)
                    {
                        foreach (var message in set)
                        {
                            resultMessages2.Add(message);
                        }
                    }
                }
                catch (ConsumerTimeoutException)
                {
                    // do nothing, this is expected
                }
            }

            Assert.AreEqual(1, resultMessages1.Count);
            Assert.AreEqual(msg1.ToString(), resultMessages1[0].ToString());

            Assert.AreEqual(1, resultMessages2.Count);
            Assert.AreEqual(msg2.ToString(), resultMessages2[0].ToString());
        }
Exemple #25
0
        public void WhenConsumerConsumesAndLaterOthersJoinAndRebalanceOccursThenMessagesShouldNotBeDuplicated()
        {
            var prodConfig     = this.SyncProducerConfig1;
            var consumerConfig = this.ZooKeeperBasedConsumerConfig;

            consumerConfig.FetchSize = 256;

            int originalNrOfMessages = 3000;

            using (var producer = new SyncProducer(prodConfig))
            {
                for (int i = 0; i < originalNrOfMessages; i++)
                {
                    var sourceMessage   = new Message(Encoding.UTF8.GetBytes("test message" + i));
                    var producerRequest = new ProducerRequest(CurrentTestTopic, 0, new List <Message> {
                        sourceMessage
                    });
                    producer.Send(producerRequest);
                }
            }

            BackgroundWorker bw1 = new BackgroundWorker();

            bw1.WorkerSupportsCancellation = true;
            bw1.DoWork += new DoWorkEventHandler(WhenConsumerConsumesAndLaterOthersJoinAndRebalanceOccursThenMessagesShouldNotBeDuplicated_DoWork);
            int runBw1AfterNIterations = 50;

            BackgroundWorker bw2 = new BackgroundWorker();

            bw2.WorkerSupportsCancellation = true;
            bw2.DoWork += new DoWorkEventHandler(WhenConsumerConsumesAndLaterOthersJoinAndRebalanceOccursThenMessagesShouldNotBeDuplicated_DoWork);
            int runBw2AfterNIterations = 150;

            // now consuming
            int           messageNumberCounter = 0;
            StringBuilder sb = new StringBuilder();
            var           receivedMessages = new List <Message>();

            using (IConsumerConnector consumerConnector = new ZookeeperConsumerConnector(consumerConfig, true))
            {
                var topicCount = new Dictionary <string, int> {
                    { CurrentTestTopic, 1 }
                };
                var messages = consumerConnector.CreateMessageStreams(topicCount);
                var sets     = messages[CurrentTestTopic];

                try
                {
                    foreach (var set in sets)
                    {
                        foreach (var message in set)
                        {
                            receivedMessages.Add(message);
                            if (messageNumberCounter == runBw1AfterNIterations)
                            {
                                bw1.RunWorkerAsync();
                            }
                            if (messageNumberCounter == runBw2AfterNIterations)
                            {
                                bw2.RunWorkerAsync();
                            }
                            var msgString = Encoding.UTF8.GetString(message.Payload);
                            sb.AppendLine(msgString);
                            messageNumberCounter++;
                        }
                    }
                }
                catch (ConsumerTimeoutException)
                {
                    // do nothing, this is expected
                }
            }
            int finishedThreads             = 0;
            var receivedFromBackgroundSoFar = new List <Message>();

            while (WhenConsumerConsumesAndLaterOthersJoinAndRebalanceOccursThenMessagesShouldNotBeDuplicated_BackgorundThreadsDoneNr < 2 && (messageNumberCounter + WhenConsumerConsumesAndLaterOthersJoinAndRebalanceOccursThenMessagesShouldNotBeDuplicated_BackgorundThreadsResultCounter < originalNrOfMessages))
            {
                lock (WhenConsumerConsumesAndLaterOthersJoinAndRebalanceOccursThenMessagesShouldNotBeDuplicated_BackgorundThreadsResultCounterLock)
                {
                    finishedThreads =
                        WhenConsumerConsumesAndLaterOthersJoinAndRebalanceOccursThenMessagesShouldNotBeDuplicated_BackgorundThreadsDoneNr;
                    receivedFromBackgroundSoFar.Clear();
                    receivedFromBackgroundSoFar.AddRange(WhenConsumerConsumesAndLaterOthersJoinAndRebalanceOccursThenMessagesShouldNotBeDuplicated_BackgorundThreadsReceivedMessages);
                }
                if (finishedThreads >= 2 || (receivedMessages.Count + receivedFromBackgroundSoFar.Count) >= originalNrOfMessages)
                {
                    break;
                }
                Thread.Sleep(1000);
            }
            using (StreamWriter outfile = new StreamWriter("ConsumerTestDumpMain.txt"))
            {
                outfile.Write(sb.ToString());
            }
            receivedMessages.AddRange(WhenConsumerConsumesAndLaterOthersJoinAndRebalanceOccursThenMessagesShouldNotBeDuplicated_BackgorundThreadsReceivedMessages);
            HashSet <string> resultSet = new HashSet <string>();
            int nrOfDuplicates         = 0;

            foreach (var receivedMessage in receivedMessages)
            {
                var msgString = Encoding.UTF8.GetString(receivedMessage.Payload);
                if (resultSet.Contains(msgString))
                {
                    nrOfDuplicates++;
                }
                else
                {
                    resultSet.Add(msgString);
                }
            }

            int totalMessagesFromAllThreads = receivedMessages.Count;

            Assert.AreEqual(originalNrOfMessages, totalMessagesFromAllThreads);

            Assert.AreEqual(0, nrOfDuplicates);
        }