public async Task TestConsumer_ConsumerGroupCommitAsync() { var mocks = InitCluster(); mocks.Group.SetupGet(g => g.Configuration) .Returns(new ConsumerGroupConfiguration { AutoCommitEveryMs = -1, SessionTimeoutMs = 10 }); var consumer = new ConsumeRouter(mocks.Cluster.Object, new Configuration { TaskScheduler = new CurrentThreadTaskScheduler(), ConsumeBatchSize = 1 }, 1); consumer.StartConsumeSubscription(mocks.Group.Object, new[] { "the topic" }); await consumer.CommitAsync("the topic", 1, 42); mocks.Group.Verify(g => g.Commit(It.Is <IEnumerable <TopicData <OffsetCommitPartitionData> > >(l => l.Count() == 1 && l.First().TopicName == "the topic" && l.First().PartitionsData.Count() == 1 && l.First().PartitionsData.First().Partition == 1 && l.First().PartitionsData.First().Metadata == "" && l.First().PartitionsData.First().Offset == 42)), // Offset saved should be next expected offset Times.Once); mocks.Group.Setup(g => g.Commit(It.IsAny <IEnumerable <TopicData <OffsetCommitPartitionData> > >())) .ThrowsAsync(new InvalidOperationException()); // NUnit 3 (used for .Net Core build) requires to use ThrowsAsync which doesn't exist in 2.6 Assert.Throws <InvalidOperationException>(consumer.CommitAsync("the topic", 1, 42).GetAwaiter().GetResult); }
// Connect all the INode events. private INode ObserveNode(INode node) { node.Dead += n => OnNodeEvent(() => ProcessDeadNode(n)); node.ConnectionError += (n, e) => OnNodeEvent(() => ProcessNodeError(n, e)); node.DecodeError += (n, e) => OnNodeEvent(() => ProcessDecodeError(n, e)); node.InternalError += (n, e) => OnNodeEvent(() => ProcessNodeError(n, e)); node.RequestSent += _ => Statistics.UpdateRequestSent(); node.ResponseReceived += (n, l) => Statistics.UpdateResponseReceived(GetNodeId(n), l); node.ProduceBatchSent += (_, c, s) => { Statistics.UpdateRawProduced(c); Statistics.UpdateRawProducedBytes(s); }; node.FetchResponseReceived += (_, c, s) => { Statistics.UpdateRawReceived(c); Statistics.UpdateRawReceivedBytes(s); }; node.Connected += n => OnNodeEvent(() => Logger.LogInformation(string.Format("Connected to {0}", GetNodeName(n)))); node.ProduceAcknowledgement += (n, ack) => ProduceRouter.Acknowledge(ack); node.FetchAcknowledgement += (n, r) => ConsumeRouter.Acknowledge(r); node.OffsetAcknowledgement += (n, r) => ConsumeRouter.Acknowledge(r); node.NoMoreRequestSlot += n => NodeMaxRequestReached(n); node.RequestTimeout += n => Statistics.UpdateRequestTimeout(GetNodeId(n)); return(node); }
public async Task TestConsumer_ConsumerGroupLeaveWhenStop() { var mocks = InitCluster(); mocks.Group.SetupGet(g => g.Configuration) .Returns(new ConsumerGroupConfiguration { AutoCommitEveryMs = -1, SessionTimeoutMs = 10 }); var consumer = new ConsumeRouter(mocks.Cluster.Object, new Configuration { TaskScheduler = new CurrentThreadTaskScheduler(), ConsumeBatchSize = 1 }, 1); mocks.Group.Setup(g => g.Heartbeat()).ReturnsAsync(ErrorCode.RebalanceInProgress); consumer.StartConsumeSubscription(mocks.Group.Object, new[] { "the topic" }); var partitionsRevokedEventIsCalled = false; consumer.PartitionsRevoked += () => partitionsRevokedEventIsCalled = true; await consumer.Stop(); mocks.Group.Verify(g => g.LeaveGroup(), Times.Once); mocks.Group.Verify(g => g.Commit(It.IsAny <IEnumerable <TopicData <OffsetCommitPartitionData> > >())); Assert.That(partitionsRevokedEventIsCalled, Is.True); }
public async Task TestConsumer_RaisesPartitionsRevokedOnRebalance() { var mocks = InitCluster(); mocks.Group.Setup(g => g.Heartbeat()).ReturnsAsync(ErrorCode.RebalanceInProgress); mocks.Group.SetupGet(g => g.Configuration).Returns( new ConsumerGroupConfiguration { AutoCommitEveryMs = -1, SessionTimeoutMs = 10 }); var consumer = new ConsumeRouter( cluster: mocks.Cluster.Object, configuration: new Configuration { TaskScheduler = new CurrentThreadTaskScheduler(), ConsumeBatchSize = 1 }, resolution: 1); var partitionsRevokedEventIsCalled = false; consumer.PartitionsRevoked += () => partitionsRevokedEventIsCalled = true; consumer.StartConsumeSubscription(mocks.Group.Object, new[] { "the topic" }); Thread.Sleep(20); Assert.That(partitionsRevokedEventIsCalled, Is.True); await consumer.Stop(); }
private async Task HeartbeatFinishedProcessing(Mocks mock, ConsumeRouter router) { // First we wait to be sure that a heartbeat has started being processed WaitOneSecondMaxForEvent("heatbeat", mock.HeartbeatCalled); // Then we wait to be sure that the current message is finished processing // (this message being the heartbeat or a following message) await router.StopProcessingTask(); }
public void TestConsumer_ConsumerGroupStartConsume() { var mocks = InitCluster(); var consumer = new ConsumeRouter(mocks.Cluster.Object, new Configuration { TaskScheduler = new CurrentThreadTaskScheduler(), ConsumeBatchSize = 1 }, 1); consumer.StartConsumeSubscription(mocks.Group.Object, new[] { "the topic" }); mocks.Group.Verify(g => g.Join(It.IsAny <IEnumerable <string> >()), Times.Once); mocks.Node.Verify(n => n.Fetch(It.IsAny <FetchMessage>()), Times.Once); // 1 partition with specific offset mocks.Node.Verify(n => n.Offset(It.IsAny <OffsetMessage>()), Times.Once); // 1 partition with offset -1 Thread.Sleep(20); // wait for at least one heartbeat to be sent mocks.Group.Verify(g => g.Heartbeat()); consumer.Acknowledge(new CommonAcknowledgement <FetchResponse> { ReceivedDate = DateTime.UtcNow, Response = new FetchResponse { FetchPartitionResponse = new CommonResponse <FetchPartitionResponse> { TopicsResponse = new[] { new TopicData <FetchPartitionResponse> { TopicName = "the topic", PartitionsData = new[] { new FetchPartitionResponse { Partition = 1, Messages = new List <ResponseMessage> { new ResponseMessage { Offset = 28, Message = new Message() } } } } } } } } }); mocks.Node.Verify(n => n.Fetch(It.IsAny <FetchMessage>()), Times.Exactly(2)); // response should have triggered one more fetch mocks.Group.Verify(g => g.Commit(It.IsAny <IEnumerable <TopicData <OffsetCommitPartitionData> > >())); // should have auto commited consumer.Stop().Wait(); }
public void TestConsumer_OnlyOneConsumerGroup() { var mocks = InitCluster(); var consumer = new ConsumeRouter(mocks.Cluster.Object, new Configuration { TaskScheduler = new CurrentThreadTaskScheduler(), ConsumeBatchSize = 1 }, 1); consumer.StartConsumeSubscription(mocks.Group.Object, new[] { "the topic" }); Assert.That(() => consumer.StartConsumeSubscription(mocks.Group.Object, new[] { "the topic" }), Throws.ArgumentException); }
public void TestStart_AllPartitions_KnownOffset() { var node = new Mock <INode>(); var cluster = new Mock <ICluster>(); cluster.Setup(c => c.RequireAllPartitionsForTopic(TOPIC)) .Returns(Task.FromResult(new[] { 0, 1, 2 })); cluster.Setup(c => c.RequireNewRoutingTable()) .Returns(() => Task.FromResult( new RoutingTable(new Dictionary <string, Partition[]> { { TOPIC, new[] { new Partition { Id = 0, Leader = node.Object }, new Partition { Id = 1, Leader = node.Object }, new Partition { Id = 2, Leader = node.Object } } } }))); var configuration = new Configuration { TaskScheduler = new CurrentThreadTaskScheduler() }; var consumer = new ConsumeRouter(cluster.Object, configuration, 1); consumer.StartConsume(TOPIC, Partitions.All, OFFSET); cluster.Verify(c => c.RequireAllPartitionsForTopic(It.Is <string>(t => t == TOPIC)), Times.AtLeastOnce()); cluster.Verify(c => c.RequireAllPartitionsForTopic(It.Is <string>(s => s != TOPIC)), Times.Never()); cluster.Verify(c => c.RequireNewRoutingTable(), Times.AtLeastOnce()); node.Verify(n => n.Fetch(It.IsAny <FetchMessage>()), Times.Exactly(3)); node.Verify(n => n.Fetch(It.Is <FetchMessage>(fm => fm.Partition == 0)), Times.Once()); node.Verify(n => n.Fetch(It.Is <FetchMessage>(fm => fm.Partition == 1)), Times.Once()); node.Verify(n => n.Fetch(It.Is <FetchMessage>(fm => fm.Partition == 2)), Times.Once()); node.Verify(n => n.Fetch(It.Is <FetchMessage>(fm => fm.Topic != TOPIC)), Times.Never()); node.Verify(n => n.Fetch(It.Is <FetchMessage>(fm => fm.Offset != OFFSET)), Times.Never()); node.Verify(n => n.Fetch(It.Is <FetchMessage>(fm => fm.MaxBytes != configuration.FetchMessageMaxBytes)), Times.Never()); }
public async Task TestConsumer_ConsumerGroupHeartbeatErrors() { var mocks = InitCluster(); mocks.Group.SetupGet(g => g.Configuration) .Returns(new ConsumerGroupConfiguration { AutoCommitEveryMs = -1, SessionTimeoutMs = 10 }); var consumer = new ConsumeRouter(mocks.Cluster.Object, new Configuration { TaskScheduler = new CurrentThreadTaskScheduler(), ConsumeBatchSize = 1 }, 1); mocks.Group.Setup(g => g.Heartbeat()).ReturnsAsync(ErrorCode.RebalanceInProgress) .Callback(() => mocks.HeartbeatCalled.Set()); consumer.StartConsumeSubscription(mocks.Group.Object, new[] { "the topic" }); await HeartbeatFinishedProcessing(mocks, consumer); // At least 2 Join (one on start, one on next heartbeat) mocks.Group.Verify(g => g.Join(It.IsAny <IEnumerable <string> >()), Times.AtLeast(2)); // Commit should have been called due to RebalanceInProgressError mocks.Group.Verify(g => g.Commit(It.IsAny <IEnumerable <TopicData <OffsetCommitPartitionData> > >())); consumer.Stop().Wait(); mocks = InitCluster(); mocks.Group.SetupGet(g => g.Configuration) .Returns(new ConsumerGroupConfiguration { AutoCommitEveryMs = -1, SessionTimeoutMs = 10 }); mocks.Group.Setup(g => g.Heartbeat()).ThrowsAsync(new Exception()) .Callback(() => mocks.HeartbeatCalled.Set()); consumer = new ConsumeRouter(mocks.Cluster.Object, new Configuration { TaskScheduler = new CurrentThreadTaskScheduler(), ConsumeBatchSize = 1 }, 1); consumer.StartConsumeSubscription(mocks.Group.Object, new[] { "the topic" }); await HeartbeatFinishedProcessing(mocks, consumer); mocks.Group.Verify(g => g.Join(It.IsAny <IEnumerable <string> >()), Times.AtLeast(2)); // No Commit tried in case of ""hard" heartbeat errors mocks.Group.Verify(g => g.Commit(It.IsAny <IEnumerable <TopicData <OffsetCommitPartitionData> > >()), Times.Never); }
public void TestStart_AllPartitions_KnownOffset_GlobalBatching() { var node = new Mock <INode>(); var cluster = new Mock <ICluster>(); cluster.Setup(c => c.RequireAllPartitionsForTopic(TOPIC)) .Returns(Task.FromResult(new[] { 0, 1, 2 })); cluster.Setup(c => c.RequireNewRoutingTable()) .Returns(() => Task.FromResult( new RoutingTable(new Dictionary <string, Partition[]> { { TOPIC, new[] { new Partition { Id = 0, Leader = node.Object }, new Partition { Id = 1, Leader = node.Object }, new Partition { Id = 2, Leader = node.Object } } } }))); var configuration = new Configuration { TaskScheduler = new CurrentThreadTaskScheduler(), BatchStrategy = BatchStrategy.Global, ConsumeBatchSize = 3, ConsumeBufferingTime = TimeSpan.FromHours(28) }; var consumer = new ConsumeRouter(cluster.Object, configuration, 1); consumer.StartConsume(TOPIC, Partitions.All, OFFSET); cluster.Verify(c => c.RequireAllPartitionsForTopic(It.Is <string>(t => t == TOPIC)), Times.AtLeastOnce()); cluster.Verify(c => c.RequireAllPartitionsForTopic(It.Is <string>(s => s != TOPIC)), Times.Never()); cluster.Verify(c => c.RequireNewRoutingTable(), Times.AtLeastOnce()); node.Verify(n => n.Fetch(It.IsAny <FetchMessage>()), Times.Never()); node.Verify(n => n.Post(It.IsAny <IBatchByTopic <FetchMessage> >())); }
public async Task Stop() { if (!_started) { return; } _timeoutScheduler.Dispose(); _refreshMetadataTimer.Dispose(); await ConsumeRouter.Stop(); await ProduceRouter.Stop(); _agent.Complete(); await _agent.Completion; await Task.WhenAll(_nodes.Keys.Select(n => n.Stop())); _started = false; }
public void TestStart_OnePartition_OffsetUnknown(long offset) { var node = new Mock <INode>(); var cluster = new Mock <ICluster>(); cluster.Setup(c => c.RequireNewRoutingTable()) .Returns(() => Task.FromResult( new RoutingTable(new Dictionary <string, Partition[]> { { TOPIC, new[] { new Partition { Id = 0, Leader = node.Object }, new Partition { Id = 1, Leader = node.Object }, new Partition { Id = 2, Leader = node.Object } } } }))); var configuration = new Configuration { TaskScheduler = new CurrentThreadTaskScheduler() }; var consumer = new ConsumeRouter(cluster.Object, configuration, 1); consumer.StartConsume(TOPIC, PARTITION, offset); cluster.Verify(c => c.RequireAllPartitionsForTopic(It.IsAny <string>()), Times.Never()); cluster.Verify(c => c.RequireNewRoutingTable(), Times.AtLeastOnce()); node.Verify(n => n.Offset(It.Is <OffsetMessage>(om => om.Partition == PARTITION)), Times.Once()); node.Verify(n => n.Offset(It.Is <OffsetMessage>(om => om.Partition != PARTITION)), Times.Never()); node.Verify(n => n.Offset(It.Is <OffsetMessage>(om => om.Topic != TOPIC)), Times.Never()); node.Verify(n => n.Offset(It.Is <OffsetMessage>(om => om.Time != offset)), Times.Never()); }
public async Task TestConsumer_RaisesPartitionsAssignedEventOnJoin() { var mocks = InitCluster(); var consumer = new ConsumeRouter( cluster: mocks.Cluster.Object, configuration: new Configuration { TaskScheduler = new CurrentThreadTaskScheduler(), ConsumeBatchSize = 1 }, resolution: 1); var partitionsAssignedEventIsCalled = false; consumer.PartitionsAssigned += _ => partitionsAssignedEventIsCalled = true; consumer.StartConsumeSubscription(mocks.Group.Object, new[] { "the topic" }); await consumer.Stop(); Assert.That(partitionsAssignedEventIsCalled, Is.True); }
public void TestPostponeIfNoRoute() { var cluster = new Mock <ICluster>(); cluster.SetupGet(c => c.Logger).Returns(new DevNullLogger()); cluster.Setup(c => c.RequireNewRoutingTable()) .Returns(() => Task.FromResult( new RoutingTable(new Dictionary <string, Partition[]> { { TOPIC, new[] { new Partition { Id = PARTITION + 1 }, } } }))); var configuration = new Configuration { TaskScheduler = new CurrentThreadTaskScheduler() }; var consumer = new ConsumeRouter(cluster.Object, configuration, 1); int postponedRaised = 0; consumer.FetchPostponed += (t, p) => { Assert.AreEqual(TOPIC, t); Assert.AreEqual(PARTITION, p); ++postponedRaised; }; consumer.StartConsume(TOPIC, PARTITION, Offsets.Earliest); Assert.AreEqual(1, postponedRaised); }
public void TestConsumer_ConsumerGroupCommit() { var mocks = InitCluster(); mocks.Group.SetupGet(g => g.Configuration) .Returns(new ConsumerGroupConfiguration { AutoCommitEveryMs = -1, SessionTimeoutMs = 10 }); var consumer = new ConsumeRouter(mocks.Cluster.Object, new Configuration { TaskScheduler = new CurrentThreadTaskScheduler(), ConsumeBatchSize = 1 }, 1); consumer.StartConsumeSubscription(mocks.Group.Object, new[] { "the topic" }); Thread.Sleep(10); // wait for at least one heartbeat to be sent consumer.Acknowledge(new CommonAcknowledgement <FetchResponse> { ReceivedDate = DateTime.UtcNow, Response = new FetchResponse { FetchPartitionResponse = new CommonResponse <FetchPartitionResponse> { TopicsResponse = new[] { new TopicData <FetchPartitionResponse> { TopicName = "the topic", PartitionsData = new[] { new FetchPartitionResponse { Partition = 1, Messages = new List <ResponseMessage> { new ResponseMessage { Offset = 28, Message = new Message() } } } } } } } } }); mocks.Group.Verify(g => g.Commit(It.IsAny <IEnumerable <TopicData <OffsetCommitPartitionData> > >()), Times.Never); // no auto commit consumer.RequireCommit(); mocks.Group.Verify(g => g.Commit(It.Is <IEnumerable <TopicData <OffsetCommitPartitionData> > >(l => l.Count() == 1 && l.First().TopicName == "the topic" && l.First().PartitionsData.Count() == 2 && l.First().PartitionsData.First().Partition == 1 && l.First().PartitionsData.First().Metadata == "" && l.First().PartitionsData.First().Offset == 29)), // Offset saved should be next expected offset Times.Once); }
public void TestMessagesAreFilteredAndPropagated() { var node = new Mock <INode>(); var cluster = new Mock <ICluster>(); cluster.Setup(c => c.RequireNewRoutingTable()) .Returns(() => Task.FromResult( new RoutingTable(new Dictionary <string, Partition[]> { { TOPIC, new[] { new Partition { Id = PARTITION, Leader = node.Object }, } } }))); var configuration = new Configuration { TaskScheduler = new CurrentThreadTaskScheduler() }; var consumer = new ConsumeRouter(cluster.Object, configuration, 1); consumer.StartConsume(TOPIC, PARTITION, OFFSET); consumer.StopConsume(TOPIC, PARTITION, OFFSET + 1); int messageReceivedRaised = 0; consumer.MessageReceived += kr => { Assert.AreEqual(TOPIC, kr.Topic); Assert.AreEqual(PARTITION, kr.Partition); Assert.IsTrue(kr.Offset == OFFSET || kr.Offset == OFFSET + 1); ++messageReceivedRaised; }; consumer.Acknowledge(new CommonAcknowledgement <FetchPartitionResponse> { Response = new CommonResponse <FetchPartitionResponse> { TopicsResponse = new[] { new TopicData <FetchPartitionResponse> { TopicName = TOPIC, PartitionsData = new[] { new FetchPartitionResponse { ErrorCode = ErrorCode.NoError, Partition = PARTITION, HighWatermarkOffset = 432515L, Messages = new List <ResponseMessage> { new ResponseMessage { Offset = OFFSET - 1, Message = new Message() }, // Will be filtered new ResponseMessage { Offset = OFFSET, Message = new Message() }, new ResponseMessage { Offset = OFFSET + 1, Message = new Message() }, new ResponseMessage { Offset = OFFSET + 2, Message = new Message() }, } } } } } }, ReceivedDate = DateTime.UtcNow }); Assert.AreEqual(2, messageReceivedRaised); }
public async Task TestConsumer_ConsumerGroupRestartConsume() { var mocks = InitCluster(); var consumer = new ConsumeRouter(mocks.Cluster.Object, new Configuration { TaskScheduler = new CurrentThreadTaskScheduler(), ConsumeBatchSize = 1 }, 1); var consumerStartEvent = new AutoResetEvent(false); var consumerStopEvent = new AutoResetEvent(false); consumer.ConsumerStopped += () => { consumerStopEvent.Set(); }; consumer.StartConsumeSubscription(mocks.Group.Object, new[] { "the topic" }); mocks.Group.Verify(g => g.Join(It.IsAny <IEnumerable <string> >()), Times.Once); mocks.Node.Verify(n => n.Fetch(It.IsAny <FetchMessage>()), Times.Once); // 1 partition with specific offset mocks.Node.Verify(n => n.Offset(It.IsAny <OffsetMessage>()), Times.Once); // 1 partition with offset -1 WaitOneSecondMaxForEvent("heatbeat", mocks.HeartbeatCalled); mocks.Group.Verify(g => g.Heartbeat()); consumer.Acknowledge(new CommonAcknowledgement <FetchResponse> { ReceivedDate = DateTime.UtcNow, Response = new FetchResponse { FetchPartitionResponse = new CommonResponse <FetchPartitionResponse> { TopicsResponse = new[] { new TopicData <FetchPartitionResponse> { TopicName = "the topic", PartitionsData = new[] { new FetchPartitionResponse { Partition = 1, Messages = new List <ResponseMessage> { new ResponseMessage { Offset = 28, Message = new Message() } } } } } } } } }); consumer.StopConsume("the topic", Partitions.All, Offsets.Now); WaitOneSecondMaxForEvent("stop", consumerStopEvent); mocks.Node.Verify(n => n.Fetch(It.IsAny <FetchMessage>()), Times.Exactly(2)); // response should have triggered one more fetch consumer.Acknowledge(new CommonAcknowledgement <FetchResponse> { ReceivedDate = DateTime.UtcNow, Response = new FetchResponse { FetchPartitionResponse = new CommonResponse <FetchPartitionResponse> { TopicsResponse = new[] { new TopicData <FetchPartitionResponse> { TopicName = "the topic", PartitionsData = new[] { new FetchPartitionResponse { Partition = 1, Messages = new List <ResponseMessage> { new ResponseMessage { Offset = 29, Message = new Message() } } } } } } } } }); consumer.ConsumerStarted += () => { consumerStartEvent.Set(); }; consumer.StartConsume("the topic", Partitions.All, Offsets.Now); WaitOneSecondMaxForEvent("start", consumerStartEvent); mocks.Node.Verify(n => n.Fetch(It.IsAny <FetchMessage>()), Times.Exactly(3)); consumer.Stop().Wait(); }
public void TestFetchResponseIsFollowedByFetchRequest_NoError() { var node = new Mock <INode>(); var cluster = new Mock <ICluster>(); cluster.Setup(c => c.RequireNewRoutingTable()) .Returns(() => Task.FromResult( new RoutingTable(new Dictionary <string, Partition[]> { { TOPIC, new[] { new Partition { Id = PARTITION, Leader = node.Object }, new Partition { Id = PARTITION + 1, Leader = node.Object }, } }, { TOPIC2, new[] { new Partition { Id = PARTITION, Leader = node.Object }, } } }))); var configuration = new Configuration { TaskScheduler = new CurrentThreadTaskScheduler() }; var consumer = new ConsumeRouter(cluster.Object, configuration, 1); consumer.StartConsume(TOPIC, PARTITION, Offsets.Earliest); consumer.StartConsume(TOPIC, PARTITION + 1, Offsets.Earliest); consumer.StartConsume(TOPIC2, PARTITION, Offsets.Earliest); consumer.Acknowledge(new CommonAcknowledgement <FetchPartitionResponse> { Response = new CommonResponse <FetchPartitionResponse> { TopicsResponse = new[] { new TopicData <FetchPartitionResponse> { TopicName = TOPIC, PartitionsData = new[] { new FetchPartitionResponse { ErrorCode = ErrorCode.NoError, Partition = PARTITION, HighWatermarkOffset = 432515L, Messages = new List <ResponseMessage> { new ResponseMessage { Offset = OFFSET, Message = new Message() }, new ResponseMessage { Offset = OFFSET + 1, Message = new Message() }, } }, new FetchPartitionResponse { ErrorCode = ErrorCode.NoError, Partition = PARTITION + 1, HighWatermarkOffset = 432515L, Messages = new List <ResponseMessage> { new ResponseMessage { Offset = OFFSET, Message = new Message() }, } }, } }, new TopicData <FetchPartitionResponse> { TopicName = TOPIC2, PartitionsData = new[] { new FetchPartitionResponse { ErrorCode = ErrorCode.NoError, Partition = PARTITION, HighWatermarkOffset = 432515L, Messages = new List <ResponseMessage> { new ResponseMessage { Offset = OFFSET, Message = new Message() }, new ResponseMessage { Offset = OFFSET + 1, Message = new Message() }, new ResponseMessage { Offset = OFFSET + 2, Message = new Message() }, } }, } } } }, ReceivedDate = DateTime.UtcNow }); node.Verify(n => n.Fetch(It.IsAny <FetchMessage>()), Times.Exactly(3)); node.Verify(n => n.Fetch(It.Is <FetchMessage>(fm => fm.Topic == TOPIC)), Times.Exactly(2)); node.Verify( n => n.Fetch( It.Is <FetchMessage>( fm => fm.Topic == TOPIC && fm.Partition == PARTITION && fm.Offset == OFFSET + 2)), Times.Once()); node.Verify( n => n.Fetch( It.Is <FetchMessage>( fm => fm.Topic == TOPIC && fm.Partition == PARTITION + 1 && fm.Offset == OFFSET + 1)), Times.Once()); node.Verify( n => n.Fetch( It.Is <FetchMessage>( fm => fm.Topic == TOPIC2 && fm.Offset == OFFSET + 3 && fm.Partition == PARTITION)), Times.Exactly(1)); }
public void TestStopConsumeAfterFetchLoopMultiple() { var node = new Mock <INode>(); var cluster = new Mock <ICluster>(); cluster.Setup(c => c.RequireNewRoutingTable()) .Returns(() => Task.FromResult( new RoutingTable(new Dictionary <string, Partition[]> { { TOPIC, new[] { new Partition { Id = 0, Leader = node.Object }, new Partition { Id = 1, Leader = node.Object }, new Partition { Id = 2, Leader = node.Object } } } }))); var configuration = new Configuration { TaskScheduler = new CurrentThreadTaskScheduler() }; var consumer = new ConsumeRouter(cluster.Object, configuration, 1); consumer.StartConsume(TOPIC, 0, OFFSET); consumer.StartConsume(TOPIC, 1, OFFSET); consumer.StopConsume(TOPIC, Partitions.All, Offsets.Now); // Now simulate a fetch response getting out of range, this should not trigger any new fetch request. consumer.Acknowledge(new CommonAcknowledgement <FetchPartitionResponse> { Response = new CommonResponse <FetchPartitionResponse> { TopicsResponse = new[] { new TopicData <FetchPartitionResponse> { TopicName = TOPIC, PartitionsData = new[] { new FetchPartitionResponse { ErrorCode = ErrorCode.NoError, Partition = 0, HighWatermarkOffset = 432515L, Messages = new List <ResponseMessage> { new ResponseMessage { Offset = OFFSET, Message = new Message() }, new ResponseMessage { Offset = OFFSET + 1, Message = new Message() }, new ResponseMessage { Offset = OFFSET + 2, Message = new Message() }, } }, new FetchPartitionResponse { ErrorCode = ErrorCode.NoError, Partition = 1, HighWatermarkOffset = 432515L, Messages = new List <ResponseMessage> { new ResponseMessage { Offset = OFFSET, Message = new Message() }, new ResponseMessage { Offset = OFFSET + 1, Message = new Message() }, new ResponseMessage { Offset = OFFSET + 2, Message = new Message() }, } } } } } }, ReceivedDate = DateTime.UtcNow }); // Check node.Verify(n => n.Fetch(It.IsAny <FetchMessage>()), Times.Exactly(2)); }
public void TestStopConsumeBeforeFetchLoop(long offset) { var node = new Mock <INode>(); var cluster = new Mock <ICluster>(); cluster.Setup(c => c.RequireNewRoutingTable()) .Returns(() => Task.FromResult( new RoutingTable(new Dictionary <string, Partition[]> { { TOPIC, new[] { new Partition { Id = 0, Leader = node.Object }, new Partition { Id = 1, Leader = node.Object }, new Partition { Id = 2, Leader = node.Object } } } }))); var configuration = new Configuration { TaskScheduler = new CurrentThreadTaskScheduler() }; var consumer = new ConsumeRouter(cluster.Object, configuration, 1); consumer.StartConsume(TOPIC, PARTITION, Offsets.Latest); consumer.StopConsume(TOPIC, PARTITION, offset); // Now simulate an offset response, this should not trigger any fetch request. consumer.Acknowledge(new CommonAcknowledgement <OffsetPartitionResponse> { Response = new CommonResponse <OffsetPartitionResponse> { TopicsResponse = new[] { new TopicData <OffsetPartitionResponse> { TopicName = TOPIC, PartitionsData = new[] { new OffsetPartitionResponse { ErrorCode = ErrorCode.NoError, Partition = PARTITION, Offsets = new[] { OFFSET } } } } } }, ReceivedDate = DateTime.UtcNow }); // Check node.Verify(n => n.Fetch(It.IsAny <FetchMessage>()), Times.Never()); }