Exemple #1
0
        public void ConsumerShouldBeAbleToGetCurrentOffsetInformation()
        {
            using (var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri)))
                using (var producer = new Producer(router))
                {
                    var startOffsets = producer.GetTopicOffsetAsync(IntegrationConfig.IntegrationTopic).Result
                                       .Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray();

                    using (var consumer = new BlockingConsumer(new ConsumerOptions(IntegrationConfig.IntegrationTopic, router), startOffsets))
                    {
                        for (int i = 0; i < 20; i++)
                        {
                            producer.SendMessageAsync(IntegrationConfig.IntegrationTopic, new[] { new Message(i.ToString(), "1") }).Wait();
                        }

                        var results = consumer.Consume().Take(20).ToList();

                        //ensure the produced messages arrived
                        for (int i = 0; i < 20; i++)
                        {
                            Assert.That(results[i].Value.ToUtf8String(), Is.EqualTo(i.ToString()));
                        }

                        //the current offsets should be 20 positions higher than start
                        var currentOffsets = consumer.GetOffsetPosition();
                        Assert.That(currentOffsets.Sum(x => x.Offset) - startOffsets.Sum(x => x.Offset), Is.EqualTo(20));
                    }
                }
        }
Exemple #2
0
        public void ConsumerShouldNotLoseMessageWhenBlocked()
        {
            var testId = Guid.NewGuid().ToString();

            using (var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri)))
                using (var producer = new Producer(router))
                {
                    var offsets = producer.GetTopicOffsetAsync(IntegrationConfig.IntegrationTopic).Result;

                    //create consumer with buffer size of 1 (should block upstream)
                    using (var consumer = new BlockingConsumer(new ConsumerOptions(IntegrationConfig.IntegrationTopic, router)
                    {
                        ConsumerBufferSize = 1
                    },
                                                               offsets.Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray()))
                    {
                        for (int i = 0; i < 20; i++)
                        {
                            producer.SendMessageAsync(IntegrationConfig.IntegrationTopic, new[] { new Message(i.ToString(), testId) }).Wait();
                        }

                        for (int i = 0; i < 20; i++)
                        {
                            var result = consumer.Consume().Take(1).First();
                            Assert.That(result.Key.ToUtf8String(), Is.EqualTo(testId));
                            Assert.That(result.Value.ToUtf8String(), Is.EqualTo(i.ToString()));
                        }
                    }
                }
        }
        public void CancellationShouldInterruptConsumption()
        {
            var routerProxy = new BrokerRouterProxy(new MoqMockingKernel());

            routerProxy.BrokerConn0.FetchResponseFunction = () => { return(new FetchResponse()); };

            var router = routerProxy.Create();

            var options = CreateOptions(router);

            using (var consumer = new BlockingConsumer(options))
            {
                var tokenSrc = new CancellationTokenSource();

                var consumeTask = Task.Run(() => consumer.Consume(tokenSrc.Token).FirstOrDefault());

                //wait until the fake broker is running and requesting fetches
                TaskTest.WaitFor(() => routerProxy.BrokerConn0.FetchRequestCallCount > 10);

                tokenSrc.Cancel();

                Assert.That(
                    Assert.Throws <AggregateException>(consumeTask.Wait).InnerException,
                    Is.TypeOf <OperationCanceledException>());
            }
        }
        public void EnsureConsumerDisposesRouter()
        {
            var router   = new MoqMockingKernel().GetMock <IBrokerRouter>();
            var consumer = new BlockingConsumer(CreateOptions(router.Object));

            using (consumer) { }
            router.Verify(x => x.Dispose(), Times.Once());
        }
Exemple #5
0
        static void Main(string[] args)
        {
            const string topicName = "TestHarness";

            //create an options file that sets up driver preferences
            var options = new KafkaOptions(new Uri("http://CSDKAFKA01:9092"), new Uri("http://CSDKAFKA02:9092"))
            {
                Log = new ConsoleLog()
            };

            //start an out of process thread that runs a consumer that will write all received messages to the console
            Task.Run(() =>
            {
                var consumer = new BlockingConsumer(new ConsumerOptions(topicName, new BrokerRouter(options))
                {
                    Log = new ConsoleLog()
                });
                foreach (var data in consumer.Consume())
                {
                    Console.WriteLine("Response: P{0},O{1} : {2}", data.Meta.PartitionId, data.Meta.Offset, data.Value.ToUtf8String());
                }
            });

            //create a producer to send messages with
            var producer = new Producer(new BrokerRouter(options))
            {
                BatchSize      = 100,
                BatchDelayTime = TimeSpan.FromMilliseconds(2000)
            };


            //take in console read messages
            Console.WriteLine("Type a message and press enter...");
            while (true)
            {
                var message = Console.ReadLine();
                if (message == "quit")
                {
                    break;
                }

                if (string.IsNullOrEmpty(message))
                {
                    //send a random batch of messages
                    SendRandomBatch(producer, topicName, 200);
                }
                else
                {
                    producer.SendMessageAsync(topicName, new[] { new Message(message) });
                }
            }

            using (producer)
            {
            }
        }
        public void ConsumerShouldCreateTaskForEachBroker()
        {
            var routerProxy = new BrokerRouterProxy(new MoqMockingKernel());

            routerProxy.BrokerConn0.FetchResponseFunction = () => { return(new FetchResponse()); };
            var router  = routerProxy.Create();
            var options = CreateOptions(router);

            options.PartitionWhitelist = new List <int>();
            using (var consumer = new BlockingConsumer(options))
            {
                var test = consumer.Consume();
                TaskTest.WaitFor(() => consumer.ConsumerTaskCount >= 2);

                Assert.That(consumer.ConsumerTaskCount, Is.EqualTo(2));
            }
        }
Exemple #7
0
        public void ConsumerShouldBeAbleToSeekBackToEarlierOffset()
        {
            var expected = new List <string> {
                "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19"
            };
            var testId = Guid.NewGuid().ToString();

            using (var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri)))
                using (var producer = new Producer(router))
                {
                    var offsets = producer.GetTopicOffsetAsync(IntegrationConfig.IntegrationTopic).Result
                                  .Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray();

                    using (var consumer = new BlockingConsumer(new ConsumerOptions(IntegrationConfig.IntegrationTopic, router), offsets))
                    {
                        for (int i = 0; i < 20; i++)
                        {
                            producer.SendMessageAsync(IntegrationConfig.IntegrationTopic, new[] { new Message(i.ToString(), testId) }).Wait();
                        }

                        var sentMessages = consumer.Consume().Take(20).ToList();

                        //ensure the produced messages arrived
                        Console.WriteLine("Message order:  {0}", string.Join(", ", sentMessages.Select(x => x.Value.ToUtf8String()).ToList()));

                        Assert.That(sentMessages.Count, Is.EqualTo(20));
                        Assert.That(sentMessages.Select(x => x.Value.ToUtf8String()).ToList(), Is.EqualTo(expected));
                        Assert.That(sentMessages.Any(x => x.Key.ToUtf8String() != testId), Is.False);

                        //seek back to initial offset
                        consumer.SetOffsetPosition(offsets);

                        var resetPositionMessages = consumer.Consume().Take(20).ToList();

                        //ensure all produced messages arrive again
                        Console.WriteLine("Message order:  {0}", string.Join(", ", resetPositionMessages.Select(x => x.Value).ToList()));

                        Assert.That(resetPositionMessages.Count, Is.EqualTo(20));
                        Assert.That(resetPositionMessages.Select(x => x.Value.ToUtf8String()).ToList(), Is.EqualTo(expected));
                        Assert.That(resetPositionMessages.Any(x => x.Key.ToUtf8String() != testId), Is.False);
                    }
                }
        }
Exemple #8
0
        public void EnsureGzipCanDecompressMessageFromKafka()
        {
            var router   = new BrokerRouter(_options);
            var producer = new Producer(router);

            var offsets = producer.GetTopicOffsetAsync(IntegrationConfig.IntegrationCompressionTopic).Result;

            var consumer = new BlockingConsumer(new ConsumerOptions(IntegrationConfig.IntegrationCompressionTopic, router),
                                                offsets.Select(x => new OffsetPosition(x.PartitionId, 0)).ToArray());

            var results = consumer.Consume().Take(3).ToList();

            for (int i = 0; i < 3; i++)
            {
                Assert.That(results[i].Value, Is.EqualTo(i.ToString()));
            }

            using (producer)
                using (consumer) { }
        }
Exemple #9
0
        public async void ConsumerShouldMoveToNextAvailableOffsetWhenQueryingForNextMessage()
        {
            const int expectedCount = 1000;
            var       options       = new KafkaOptions(IntegrationConfig.IntegrationUri)
            {
                Log = new ConsoleLog()
            };

            using (var producerRouter = new BrokerRouter(options))
                using (var producer = new Producer(producerRouter))
                {
                    //get current offset and reset consumer to top of log
                    var offsets = await producer.GetTopicOffsetAsync(IntegrationConfig.IntegrationTopic).ConfigureAwait(false);

                    using (var consumerRouter = new BrokerRouter(options))
                        using (var consumer = new BlockingConsumer(new ConsumerOptions(IntegrationConfig.IntegrationTopic, consumerRouter),
                                                                   offsets.Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray()))
                        {
                            Console.WriteLine("Sending {0} test messages", expectedCount);
                            var response = await producer.SendMessageAsync(IntegrationConfig.IntegrationTopic,
                                                                           Enumerable.Range(0, expectedCount).Select(x => new Message(x.ToString())));

                            Assert.That(response.Any(x => x.Error != (int)ErrorResponseCode.NoError), Is.False, "Error occured sending test messages to server.");

                            var stream = consumer.Consume();

                            Console.WriteLine("Reading message back out from consumer.");
                            var data = stream.Take(expectedCount).ToList();

                            var consumerOffset = consumer.GetOffsetPosition().OrderBy(x => x.Offset).ToList();
                            var serverOffset   = await producer.GetTopicOffsetAsync(IntegrationConfig.IntegrationTopic).ConfigureAwait(false);

                            var positionOffset = serverOffset.Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max()))
                                                 .OrderBy(x => x.Offset)
                                                 .ToList();

                            Assert.That(consumerOffset, Is.EqualTo(positionOffset), "The consumerOffset position should match the server offset position.");
                            Assert.That(data.Count, Is.EqualTo(expectedCount), "We should have received 2000 messages from the server.");
                        }
                }
        }
        public void ConsumerWithEmptyWhitelistShouldConsumeAllPartition()
        {
            var routerProxy = new BrokerRouterProxy(new MoqMockingKernel());

            var router  = routerProxy.Create();
            var options = CreateOptions(router);

            options.PartitionWhitelist = new List <int>();

            using (var consumer = new BlockingConsumer(options))
            {
                var test = consumer.Consume();

                TaskTest.WaitFor(() => consumer.ConsumerTaskCount > 0);
                TaskTest.WaitFor(() => routerProxy.BrokerConn0.FetchRequestCallCount > 0);
                TaskTest.WaitFor(() => routerProxy.BrokerConn1.FetchRequestCallCount > 0);

                Assert.That(consumer.ConsumerTaskCount, Is.EqualTo(2), "Consumer should create one consuming thread for each partition.");
                Assert.That(routerProxy.BrokerConn0.FetchRequestCallCount, Is.GreaterThanOrEqualTo(1), "BrokerConn0 not sent FetchRequest");
                Assert.That(routerProxy.BrokerConn1.FetchRequestCallCount, Is.GreaterThanOrEqualTo(1), "BrokerConn1 not sent FetchRequest");
            }
        }
        public void ConsumerWhitelistShouldOnlyConsumeSpecifiedPartition()
        {
            var routerProxy = new BrokerRouterProxy(new MoqMockingKernel());

            routerProxy.BrokerConn0.FetchResponseFunction = () => { return(new FetchResponse()); };
            var router  = routerProxy.Create();
            var options = CreateOptions(router);

            options.PartitionWhitelist = new List <int> {
                0
            };
            using (var consumer = new BlockingConsumer(options))
            {
                var test = consumer.Consume();

                TaskTest.WaitFor(() => consumer.ConsumerTaskCount > 0);
                TaskTest.WaitFor(() => routerProxy.BrokerConn0.FetchRequestCallCount > 0);

                Assert.That(consumer.ConsumerTaskCount, Is.EqualTo(1), "Consumer should only create one consuming thread for partition 0.");
                Assert.That(routerProxy.BrokerConn0.FetchRequestCallCount, Is.GreaterThanOrEqualTo(1));
                Assert.That(routerProxy.BrokerConn1.FetchRequestCallCount, Is.EqualTo(0));
            }
        }