// Connect all the INode events. private INode ObserveNode(INode node) { node.Dead += n => OnNodeEvent(() => ProcessDeadNode(n)); node.ConnectionError += (n, e) => OnNodeEvent(() => ProcessNodeError(n, e)); node.DecodeError += (n, e) => OnNodeEvent(() => ProcessDecodeError(n, e)); node.InternalError += (n, e) => OnNodeEvent(() => ProcessNodeError(n, e)); node.RequestSent += _ => Statistics.UpdateRequestSent(); node.ResponseReceived += (n, l) => Statistics.UpdateResponseReceived(GetNodeId(n), l); node.ProduceBatchSent += (_, c, s) => { Statistics.UpdateRawProduced(c); Statistics.UpdateRawProducedBytes(s); }; node.FetchResponseReceived += (_, c, s) => { Statistics.UpdateRawReceived(c); Statistics.UpdateRawReceivedBytes(s); }; node.Connected += n => OnNodeEvent(() => Logger.LogInformation(string.Format("Connected to {0}", GetNodeName(n)))); node.ProduceAcknowledgement += (n, ack) => ProduceRouter.Acknowledge(ack); node.FetchAcknowledgement += (n, r) => ConsumeRouter.Acknowledge(r); node.OffsetAcknowledgement += (n, r) => ConsumeRouter.Acknowledge(r); node.NoMoreRequestSlot += n => NodeMaxRequestReached(n); node.RequestTimeout += n => Statistics.UpdateRequestTimeout(GetNodeId(n)); return(node); }
public void TestConsumer_ConsumerGroupStartConsume() { var mocks = InitCluster(); var consumer = new ConsumeRouter(mocks.Cluster.Object, new Configuration { TaskScheduler = new CurrentThreadTaskScheduler(), ConsumeBatchSize = 1 }, 1); consumer.StartConsumeSubscription(mocks.Group.Object, new[] { "the topic" }); mocks.Group.Verify(g => g.Join(It.IsAny <IEnumerable <string> >()), Times.Once); mocks.Node.Verify(n => n.Fetch(It.IsAny <FetchMessage>()), Times.Once); // 1 partition with specific offset mocks.Node.Verify(n => n.Offset(It.IsAny <OffsetMessage>()), Times.Once); // 1 partition with offset -1 Thread.Sleep(20); // wait for at least one heartbeat to be sent mocks.Group.Verify(g => g.Heartbeat()); consumer.Acknowledge(new CommonAcknowledgement <FetchResponse> { ReceivedDate = DateTime.UtcNow, Response = new FetchResponse { FetchPartitionResponse = new CommonResponse <FetchPartitionResponse> { TopicsResponse = new[] { new TopicData <FetchPartitionResponse> { TopicName = "the topic", PartitionsData = new[] { new FetchPartitionResponse { Partition = 1, Messages = new List <ResponseMessage> { new ResponseMessage { Offset = 28, Message = new Message() } } } } } } } } }); mocks.Node.Verify(n => n.Fetch(It.IsAny <FetchMessage>()), Times.Exactly(2)); // response should have triggered one more fetch mocks.Group.Verify(g => g.Commit(It.IsAny <IEnumerable <TopicData <OffsetCommitPartitionData> > >())); // should have auto commited consumer.Stop().Wait(); }
public void TestConsumer_ConsumerGroupCommit() { var mocks = InitCluster(); mocks.Group.SetupGet(g => g.Configuration) .Returns(new ConsumerGroupConfiguration { AutoCommitEveryMs = -1, SessionTimeoutMs = 10 }); var consumer = new ConsumeRouter(mocks.Cluster.Object, new Configuration { TaskScheduler = new CurrentThreadTaskScheduler(), ConsumeBatchSize = 1 }, 1); consumer.StartConsumeSubscription(mocks.Group.Object, new[] { "the topic" }); Thread.Sleep(10); // wait for at least one heartbeat to be sent consumer.Acknowledge(new CommonAcknowledgement <FetchResponse> { ReceivedDate = DateTime.UtcNow, Response = new FetchResponse { FetchPartitionResponse = new CommonResponse <FetchPartitionResponse> { TopicsResponse = new[] { new TopicData <FetchPartitionResponse> { TopicName = "the topic", PartitionsData = new[] { new FetchPartitionResponse { Partition = 1, Messages = new List <ResponseMessage> { new ResponseMessage { Offset = 28, Message = new Message() } } } } } } } } }); mocks.Group.Verify(g => g.Commit(It.IsAny <IEnumerable <TopicData <OffsetCommitPartitionData> > >()), Times.Never); // no auto commit consumer.RequireCommit(); mocks.Group.Verify(g => g.Commit(It.Is <IEnumerable <TopicData <OffsetCommitPartitionData> > >(l => l.Count() == 1 && l.First().TopicName == "the topic" && l.First().PartitionsData.Count() == 2 && l.First().PartitionsData.First().Partition == 1 && l.First().PartitionsData.First().Metadata == "" && l.First().PartitionsData.First().Offset == 29)), // Offset saved should be next expected offset Times.Once); }
public void TestMessagesAreFilteredAndPropagated() { var node = new Mock <INode>(); var cluster = new Mock <ICluster>(); cluster.Setup(c => c.RequireNewRoutingTable()) .Returns(() => Task.FromResult( new RoutingTable(new Dictionary <string, Partition[]> { { TOPIC, new[] { new Partition { Id = PARTITION, Leader = node.Object }, } } }))); var configuration = new Configuration { TaskScheduler = new CurrentThreadTaskScheduler() }; var consumer = new ConsumeRouter(cluster.Object, configuration, 1); consumer.StartConsume(TOPIC, PARTITION, OFFSET); consumer.StopConsume(TOPIC, PARTITION, OFFSET + 1); int messageReceivedRaised = 0; consumer.MessageReceived += kr => { Assert.AreEqual(TOPIC, kr.Topic); Assert.AreEqual(PARTITION, kr.Partition); Assert.IsTrue(kr.Offset == OFFSET || kr.Offset == OFFSET + 1); ++messageReceivedRaised; }; consumer.Acknowledge(new CommonAcknowledgement <FetchPartitionResponse> { Response = new CommonResponse <FetchPartitionResponse> { TopicsResponse = new[] { new TopicData <FetchPartitionResponse> { TopicName = TOPIC, PartitionsData = new[] { new FetchPartitionResponse { ErrorCode = ErrorCode.NoError, Partition = PARTITION, HighWatermarkOffset = 432515L, Messages = new List <ResponseMessage> { new ResponseMessage { Offset = OFFSET - 1, Message = new Message() }, // Will be filtered new ResponseMessage { Offset = OFFSET, Message = new Message() }, new ResponseMessage { Offset = OFFSET + 1, Message = new Message() }, new ResponseMessage { Offset = OFFSET + 2, Message = new Message() }, } } } } } }, ReceivedDate = DateTime.UtcNow }); Assert.AreEqual(2, messageReceivedRaised); }
public void TestFetchResponseIsFollowedByFetchRequest_NoError() { var node = new Mock <INode>(); var cluster = new Mock <ICluster>(); cluster.Setup(c => c.RequireNewRoutingTable()) .Returns(() => Task.FromResult( new RoutingTable(new Dictionary <string, Partition[]> { { TOPIC, new[] { new Partition { Id = PARTITION, Leader = node.Object }, new Partition { Id = PARTITION + 1, Leader = node.Object }, } }, { TOPIC2, new[] { new Partition { Id = PARTITION, Leader = node.Object }, } } }))); var configuration = new Configuration { TaskScheduler = new CurrentThreadTaskScheduler() }; var consumer = new ConsumeRouter(cluster.Object, configuration, 1); consumer.StartConsume(TOPIC, PARTITION, Offsets.Earliest); consumer.StartConsume(TOPIC, PARTITION + 1, Offsets.Earliest); consumer.StartConsume(TOPIC2, PARTITION, Offsets.Earliest); consumer.Acknowledge(new CommonAcknowledgement <FetchPartitionResponse> { Response = new CommonResponse <FetchPartitionResponse> { TopicsResponse = new[] { new TopicData <FetchPartitionResponse> { TopicName = TOPIC, PartitionsData = new[] { new FetchPartitionResponse { ErrorCode = ErrorCode.NoError, Partition = PARTITION, HighWatermarkOffset = 432515L, Messages = new List <ResponseMessage> { new ResponseMessage { Offset = OFFSET, Message = new Message() }, new ResponseMessage { Offset = OFFSET + 1, Message = new Message() }, } }, new FetchPartitionResponse { ErrorCode = ErrorCode.NoError, Partition = PARTITION + 1, HighWatermarkOffset = 432515L, Messages = new List <ResponseMessage> { new ResponseMessage { Offset = OFFSET, Message = new Message() }, } }, } }, new TopicData <FetchPartitionResponse> { TopicName = TOPIC2, PartitionsData = new[] { new FetchPartitionResponse { ErrorCode = ErrorCode.NoError, Partition = PARTITION, HighWatermarkOffset = 432515L, Messages = new List <ResponseMessage> { new ResponseMessage { Offset = OFFSET, Message = new Message() }, new ResponseMessage { Offset = OFFSET + 1, Message = new Message() }, new ResponseMessage { Offset = OFFSET + 2, Message = new Message() }, } }, } } } }, ReceivedDate = DateTime.UtcNow }); node.Verify(n => n.Fetch(It.IsAny <FetchMessage>()), Times.Exactly(3)); node.Verify(n => n.Fetch(It.Is <FetchMessage>(fm => fm.Topic == TOPIC)), Times.Exactly(2)); node.Verify( n => n.Fetch( It.Is <FetchMessage>( fm => fm.Topic == TOPIC && fm.Partition == PARTITION && fm.Offset == OFFSET + 2)), Times.Once()); node.Verify( n => n.Fetch( It.Is <FetchMessage>( fm => fm.Topic == TOPIC && fm.Partition == PARTITION + 1 && fm.Offset == OFFSET + 1)), Times.Once()); node.Verify( n => n.Fetch( It.Is <FetchMessage>( fm => fm.Topic == TOPIC2 && fm.Offset == OFFSET + 3 && fm.Partition == PARTITION)), Times.Exactly(1)); }
public void TestStopConsumeAfterFetchLoopMultiple() { var node = new Mock <INode>(); var cluster = new Mock <ICluster>(); cluster.Setup(c => c.RequireNewRoutingTable()) .Returns(() => Task.FromResult( new RoutingTable(new Dictionary <string, Partition[]> { { TOPIC, new[] { new Partition { Id = 0, Leader = node.Object }, new Partition { Id = 1, Leader = node.Object }, new Partition { Id = 2, Leader = node.Object } } } }))); var configuration = new Configuration { TaskScheduler = new CurrentThreadTaskScheduler() }; var consumer = new ConsumeRouter(cluster.Object, configuration, 1); consumer.StartConsume(TOPIC, 0, OFFSET); consumer.StartConsume(TOPIC, 1, OFFSET); consumer.StopConsume(TOPIC, Partitions.All, Offsets.Now); // Now simulate a fetch response getting out of range, this should not trigger any new fetch request. consumer.Acknowledge(new CommonAcknowledgement <FetchPartitionResponse> { Response = new CommonResponse <FetchPartitionResponse> { TopicsResponse = new[] { new TopicData <FetchPartitionResponse> { TopicName = TOPIC, PartitionsData = new[] { new FetchPartitionResponse { ErrorCode = ErrorCode.NoError, Partition = 0, HighWatermarkOffset = 432515L, Messages = new List <ResponseMessage> { new ResponseMessage { Offset = OFFSET, Message = new Message() }, new ResponseMessage { Offset = OFFSET + 1, Message = new Message() }, new ResponseMessage { Offset = OFFSET + 2, Message = new Message() }, } }, new FetchPartitionResponse { ErrorCode = ErrorCode.NoError, Partition = 1, HighWatermarkOffset = 432515L, Messages = new List <ResponseMessage> { new ResponseMessage { Offset = OFFSET, Message = new Message() }, new ResponseMessage { Offset = OFFSET + 1, Message = new Message() }, new ResponseMessage { Offset = OFFSET + 2, Message = new Message() }, } } } } } }, ReceivedDate = DateTime.UtcNow }); // Check node.Verify(n => n.Fetch(It.IsAny <FetchMessage>()), Times.Exactly(2)); }
public void TestStopConsumeBeforeFetchLoop(long offset) { var node = new Mock <INode>(); var cluster = new Mock <ICluster>(); cluster.Setup(c => c.RequireNewRoutingTable()) .Returns(() => Task.FromResult( new RoutingTable(new Dictionary <string, Partition[]> { { TOPIC, new[] { new Partition { Id = 0, Leader = node.Object }, new Partition { Id = 1, Leader = node.Object }, new Partition { Id = 2, Leader = node.Object } } } }))); var configuration = new Configuration { TaskScheduler = new CurrentThreadTaskScheduler() }; var consumer = new ConsumeRouter(cluster.Object, configuration, 1); consumer.StartConsume(TOPIC, PARTITION, Offsets.Latest); consumer.StopConsume(TOPIC, PARTITION, offset); // Now simulate an offset response, this should not trigger any fetch request. consumer.Acknowledge(new CommonAcknowledgement <OffsetPartitionResponse> { Response = new CommonResponse <OffsetPartitionResponse> { TopicsResponse = new[] { new TopicData <OffsetPartitionResponse> { TopicName = TOPIC, PartitionsData = new[] { new OffsetPartitionResponse { ErrorCode = ErrorCode.NoError, Partition = PARTITION, Offsets = new[] { OFFSET } } } } } }, ReceivedDate = DateTime.UtcNow }); // Check node.Verify(n => n.Fetch(It.IsAny <FetchMessage>()), Times.Never()); }
public async Task TestConsumer_ConsumerGroupRestartConsume() { var mocks = InitCluster(); var consumer = new ConsumeRouter(mocks.Cluster.Object, new Configuration { TaskScheduler = new CurrentThreadTaskScheduler(), ConsumeBatchSize = 1 }, 1); var consumerStartEvent = new AutoResetEvent(false); var consumerStopEvent = new AutoResetEvent(false); consumer.ConsumerStopped += () => { consumerStopEvent.Set(); }; consumer.StartConsumeSubscription(mocks.Group.Object, new[] { "the topic" }); mocks.Group.Verify(g => g.Join(It.IsAny <IEnumerable <string> >()), Times.Once); mocks.Node.Verify(n => n.Fetch(It.IsAny <FetchMessage>()), Times.Once); // 1 partition with specific offset mocks.Node.Verify(n => n.Offset(It.IsAny <OffsetMessage>()), Times.Once); // 1 partition with offset -1 WaitOneSecondMaxForEvent("heatbeat", mocks.HeartbeatCalled); mocks.Group.Verify(g => g.Heartbeat()); consumer.Acknowledge(new CommonAcknowledgement <FetchResponse> { ReceivedDate = DateTime.UtcNow, Response = new FetchResponse { FetchPartitionResponse = new CommonResponse <FetchPartitionResponse> { TopicsResponse = new[] { new TopicData <FetchPartitionResponse> { TopicName = "the topic", PartitionsData = new[] { new FetchPartitionResponse { Partition = 1, Messages = new List <ResponseMessage> { new ResponseMessage { Offset = 28, Message = new Message() } } } } } } } } }); consumer.StopConsume("the topic", Partitions.All, Offsets.Now); WaitOneSecondMaxForEvent("stop", consumerStopEvent); mocks.Node.Verify(n => n.Fetch(It.IsAny <FetchMessage>()), Times.Exactly(2)); // response should have triggered one more fetch consumer.Acknowledge(new CommonAcknowledgement <FetchResponse> { ReceivedDate = DateTime.UtcNow, Response = new FetchResponse { FetchPartitionResponse = new CommonResponse <FetchPartitionResponse> { TopicsResponse = new[] { new TopicData <FetchPartitionResponse> { TopicName = "the topic", PartitionsData = new[] { new FetchPartitionResponse { Partition = 1, Messages = new List <ResponseMessage> { new ResponseMessage { Offset = 29, Message = new Message() } } } } } } } } }); consumer.ConsumerStarted += () => { consumerStartEvent.Set(); }; consumer.StartConsume("the topic", Partitions.All, Offsets.Now); WaitOneSecondMaxForEvent("start", consumerStartEvent); mocks.Node.Verify(n => n.Fetch(It.IsAny <FetchMessage>()), Times.Exactly(3)); consumer.Stop().Wait(); }