public void ConsumerShouldNotLoseMessageWhenBlocked() { var testId = Guid.NewGuid().ToString(); using (var router = new BrokerRouter(new KafkaOptions(TestConfig.IntegrationUri))) using (var producer = new Producer(router)) { var offsets = producer.BrokerRouter.GetTopicOffsetsAsync(TestConfig.TopicName(), CancellationToken.None).Result; //create consumer with buffer size of 1 (should block upstream) using (var consumer = new OldConsumer(new ConsumerOptions(TestConfig.TopicName(), router) { ConsumerBufferSize = 1, MaxWaitTimeForMinimumBytes = TimeSpan.Zero }, offsets.Select(x => new OffsetPosition(x.PartitionId, x.Offset)).ToArray())) { for (var i = 0; i < 20; i++) { producer.SendMessageAsync(new Message(i.ToString(), testId), TestConfig.TopicName(), CancellationToken.None).Wait(); } for (var i = 0; i < 20; i++) { var result = consumer.Consume().Take(1).First(); Assert.That(result.Key.ToUtf8String(), Is.EqualTo(testId)); Assert.That(result.Value.ToUtf8String(), Is.EqualTo(i.ToString())); } } } }
public async Task ProducerShouldUsePartitionIdInsteadOfMessageKeyToChoosePartition() { var partitionSelector = Substitute.For <IPartitionSelector>(); partitionSelector.Select(null, null) .ReturnsForAnyArgs(_ => _.Arg <MetadataResponse.Topic>().Partitions.Single(p => p.PartitionId == 1)); using (var router = new BrokerRouter(new KafkaOptions(TestConfig.IntegrationUri, partitionSelector: partitionSelector))) { var offset = await router.GetTopicOffsetAsync(TestConfig.TopicName(), 0, CancellationToken.None); using (var producer = new Producer(router)) { //message should send to PartitionId and not use the key to Select Broker Route !! for (var i = 0; i < 20; i++) { await producer.SendMessageAsync(new Message(i.ToString(), "key"), offset.TopicName, offset.PartitionId, CancellationToken.None); } } using (var consumer = new Consumer(router)) { using (var source = new CancellationTokenSource()) { var i = 0; await consumer.FetchAsync( offset, 20, message => { Assert.That(message.Value.ToUtf8String(), Is.EqualTo(i++.ToString())); if (i >= 20) { source.Cancel(); } return(Task.FromResult(0)); }, source.Token); } } } }
public async Task UpdateOrCreateOffsetConsumerGroupExistsTest() { // Creating a broker router and a protocol gateway for the producer and consumer var brokerRouter = new BrokerRouter(_kafkaUri, new ConnectionFactory(), _config); var partitionId = 0; var consumerGroup = TestConfig.ConsumerName(); var topicName = TestConfig.TopicName(); var offest = 5; var newOffset = 10; await brokerRouter.GetTopicOffsetAsync(topicName, partitionId, CancellationToken.None); await brokerRouter.CommitTopicOffsetAsync(topicName, partitionId, consumerGroup, offest, CancellationToken.None); var res = await brokerRouter.GetTopicOffsetAsync(topicName, partitionId, consumerGroup, CancellationToken.None); Assert.AreEqual(offest, res.Offset); await brokerRouter.CommitTopicOffsetAsync(topicName, partitionId, consumerGroup, newOffset, CancellationToken.None); res = await brokerRouter.GetTopicOffsetAsync(topicName, partitionId, consumerGroup, CancellationToken.None); Assert.AreEqual(newOffset, res.Offset); }
public async Task OffsetCommitShouldStoreAndReturnSuccess() { const int partitionId = 0; var router = new BrokerRouter(_options); await router.GetTopicMetadataAsync(TestConfig.TopicName(), CancellationToken.None); var conn = router.GetBrokerRoute(TestConfig.TopicName(), partitionId); // ensure the group exists var group = new GroupCoordinatorRequest(TestConfig.ConsumerName()); var groupResponse = await conn.Connection.SendAsync(group, CancellationToken.None); Assert.That(groupResponse, Is.Not.Null); Assert.That(groupResponse.ErrorCode, Is.EqualTo(ErrorResponseCode.None)); var commit = new OffsetCommitRequest(group.GroupId, new [] { new OffsetCommitRequest.Topic(TestConfig.TopicName(), partitionId, 10, null) }); var response = await conn.Connection.SendAsync(commit, CancellationToken.None); var topic = response.Topics.FirstOrDefault(); Assert.That(topic, Is.Not.Null); Assert.That(topic.ErrorCode, Is.EqualTo(ErrorResponseCode.None)); router.Dispose(); }
public async Task FetchMessagesCacheContainsAllRequestTest() { // Creating a broker router and a protocol gateway for the producer and consumer var brokerRouter = new BrokerRouter(_options); var producer = new Producer(brokerRouter); var topic = TestConfig.TopicName(); var consumer = new Consumer(brokerRouter, _consumerConfig); var offset = await brokerRouter.GetTopicOffsetAsync(topic, _partitionId, CancellationToken.None); // Creating 5 messages var messages = CreateTestMessages(10, 1); await producer.SendMessagesAsync(messages, topic, _partitionId, new SendMessageConfiguration(ackTimeout : TimeSpan.FromSeconds(3)), CancellationToken.None); // Now let's consume var result = (await consumer.FetchMessagesAsync(offset, 5, CancellationToken.None)).ToList(); CheckMessages(messages.Take(5).ToList(), result); // Now let's consume again result = (await consumer.FetchMessagesAsync(offset.TopicName, offset.PartitionId, offset.Offset + 5, 5, CancellationToken.None)).ToList(); CheckMessages(messages.Skip(5).ToList(), result); }
public async Task FetchMessagesBufferUnderRunTest() { // Creating a broker router and a protocol gateway for the producer and consumer var brokerRouter = new BrokerRouter(_options); var smallMessageSet = 4096 / 2; var producer = new Producer(brokerRouter); var topic = TestConfig.TopicName(); var consumer = new Consumer(brokerRouter, new ConsumerConfiguration(maxPartitionFetchBytes: smallMessageSet)); var offset = await brokerRouter.GetTopicOffsetAsync(topic, _partitionId, CancellationToken.None); // Creating 5 messages var messages = CreateTestMessages(10, 4096); await producer.SendMessagesAsync(messages, topic, _partitionId, new SendMessageConfiguration(ackTimeout : TimeSpan.FromSeconds(3)), CancellationToken.None); try { // Now let's consume await consumer.FetchMessagesAsync(offset, 5, CancellationToken.None); Assert.Fail("should have thrown BufferUnderRunException"); } catch (BufferUnderRunException ex) { Console.WriteLine(ex.ToString()); } }
public async Task EnsureGzipCompressedMessageCanSend() { var topicName = TestConfig.TopicName(); TestConfig.InfoLog.Info(() => LogEvent.Create(">> Start EnsureGzipCompressedMessageCanSend")); using (var conn = GetKafkaConnection()) { await conn.SendAsync(new MetadataRequest(topicName), CancellationToken.None); } using (var router = new BrokerRouter(_options)) { TestConfig.InfoLog.Info(() => LogEvent.Create(">> Start GetTopicMetadataAsync")); await router.GetTopicMetadataAsync(topicName, CancellationToken.None); TestConfig.InfoLog.Info(() => LogEvent.Create(">> End GetTopicMetadataAsync")); var conn = router.GetBrokerRoute(topicName, 0); var request = new ProduceRequest(new ProduceRequest.Payload(topicName, 0, new [] { new Message("0", "1"), new Message("1", "1"), new Message("2", "1") }, MessageCodec.CodecGzip)); TestConfig.InfoLog.Info(() => LogEvent.Create(">> start SendAsync")); var response = await conn.Connection.SendAsync(request, CancellationToken.None); TestConfig.InfoLog.Info(() => LogEvent.Create("end SendAsync")); Assert.That(response.Errors.Any(e => e != ErrorResponseCode.None), Is.False); TestConfig.InfoLog.Info(() => LogEvent.Create("start dispose")); } TestConfig.InfoLog.Info(() => LogEvent.Create(">> End EnsureGzipCompressedMessageCanSend")); }
public async Task ConsumerShouldBeAbleToGetCurrentOffsetInformation() { var totalMessages = 20; var expected = totalMessages.Repeat(i => i.ToString()).ToList(); var testId = Guid.NewGuid().ToString(); using (var router = new BrokerRouter(TestConfig.IntegrationUri, log: TestConfig.InfoLog)) { using (var producer = new Producer(router)) { var offset = await producer.BrokerRouter.GetTopicOffsetAsync(TestConfig.TopicName(), 0, CancellationToken.None); for (var i = 0; i < totalMessages; i++) { await producer.SendMessageAsync(new Message(i.ToString(), testId), TestConfig.TopicName(), 0, CancellationToken.None); } using (var consumer = new Consumer(router, new ConsumerConfiguration(maxServerWait: TimeSpan.Zero))) { var results = await consumer.FetchMessagesAsync(offset, totalMessages, CancellationToken.None); TestConfig.InfoLog.Info(() => LogEvent.Create($"Message order: {string.Join(", ", results.Select(x => x.Value.ToUtf8String()).ToList())}")); Assert.That(results.Count, Is.EqualTo(totalMessages)); Assert.That(results.Select(x => x.Value.ToUtf8String()).ToList(), Is.EqualTo(expected), "Expected the message list in the correct order."); var newOffset = await producer.BrokerRouter.GetTopicOffsetAsync(offset.TopicName, offset.PartitionId, CancellationToken.None); Assert.That(newOffset.Offset - offset.Offset, Is.EqualTo(totalMessages)); } } } }
public async Task FetchLastOffsetPartitionDoesntExistTest() { // Creating a broker router and a protocol gateway for the producer and consumer var brokerRouter = new BrokerRouter(_kafkaUri, new ConnectionFactory(), _config); var partitionId = 100; var topic = TestConfig.TopicName(); Assert.ThrowsAsync <CachedMetadataException>(async() => await brokerRouter.GetTopicOffsetAsync(topic, partitionId, CancellationToken.None)); }
public async Task FetchLastOffsetTopicDoesntExistTest() { // Creating a broker router and a protocol gateway for the producer and consumer var brokerRouter = new BrokerRouter(_kafkaUri, new ConnectionFactory(), _config, log: TestConfig.InfoLog); var topic = TestConfig.TopicName(); await brokerRouter.GetTopicOffsetAsync(topic, _partitionId, CancellationToken.None); }
public async Task FetchOffsetConsumerGroupDoesntExistTest() { // Creating a broker router and a protocol gateway for the producer and consumer var brokerRouter = new BrokerRouter(_kafkaUri, new ConnectionFactory(), _config); var partitionId = 0; var consumerGroup = Guid.NewGuid().ToString(); var topicName = TestConfig.TopicName(); await brokerRouter.GetTopicOffsetAsync(topicName, partitionId, consumerGroup, CancellationToken.None); }
public async Task ConsumerProducerSpeedUnderLoad(int totalMessages, int batchSize, int timeoutInMs) { var expected = totalMessages.Repeat(i => i.ToString()).ToList(); using (var router = new BrokerRouter(TestConfig.IntegrationUri, log: TestConfig.WarnLog)) { using (var producer = new Producer(router, new ProducerConfiguration(batchSize: totalMessages / 10, batchMaxDelay: TimeSpan.FromMilliseconds(25)))) { var offset = await producer.BrokerRouter.GetTopicOffsetAsync(TestConfig.TopicName(), 0, CancellationToken.None); var stopwatch = new Stopwatch(); stopwatch.Start(); var sendList = new List <Task>(totalMessages); for (var i = 0; i < totalMessages; i += batchSize) { var sendTask = producer.SendMessagesAsync(batchSize.Repeat(x => new Message(x.ToString())), offset.TopicName, offset.PartitionId, CancellationToken.None); sendList.Add(sendTask); } var maxTimeToRun = TimeSpan.FromMilliseconds(timeoutInMs); var doneSend = Task.WhenAll(sendList.ToArray()); await Task.WhenAny(doneSend, Task.Delay(maxTimeToRun)); stopwatch.Stop(); if (!doneSend.IsCompleted) { var completed = sendList.Count(t => t.IsCompleted); Assert.Inconclusive($"Only finished sending {completed} of {totalMessages} in {timeoutInMs} ms."); } await doneSend; TestConfig.InfoLog.Info(() => LogEvent.Create($">> done send, time Milliseconds:{stopwatch.ElapsedMilliseconds}")); stopwatch.Restart(); using (var consumer = new Consumer(router, new ConsumerConfiguration(maxServerWait: TimeSpan.Zero))) { var fetched = ImmutableList <Message> .Empty; stopwatch.Restart(); while (fetched.Count < totalMessages) { var doneFetch = consumer.FetchMessagesAsync(offset.TopicName, offset.PartitionId, offset.Offset + fetched.Count, totalMessages, CancellationToken.None); var delay = Task.Delay((int)Math.Max(0, maxTimeToRun.TotalMilliseconds - stopwatch.ElapsedMilliseconds)); await Task.WhenAny(doneFetch, delay); if (delay.IsCompleted && !doneFetch.IsCompleted) { Assert.Fail($"Received {fetched.Count} of {totalMessages} in {timeoutInMs} ms."); } var results = await doneFetch; fetched = fetched.AddRange(results); } stopwatch.Stop(); TestConfig.InfoLog.Info(() => LogEvent.Create($">> done Consume, time Milliseconds:{stopwatch.ElapsedMilliseconds}")); // Assert.That(fetched.Select(x => x.Value.ToUtf8String()).ToList(), Is.EqualTo(expected), "Expected the message list in the correct order."); Assert.That(fetched.Count, Is.EqualTo(totalMessages)); } } } }
public async Task FetchLastOffsetSimpleTest() { // Creating a broker router and a protocol gateway for the producer and consumer var brokerRouter = new BrokerRouter(_kafkaUri, new ConnectionFactory(), _config); var topic = TestConfig.TopicName(); var offset = await brokerRouter.GetTopicOffsetAsync(topic, _partitionId, CancellationToken.None); Assert.AreNotEqual(-1, offset.Offset); }
public async Task UpdateOrCreateOffsetConsumerGroupArgumentNull([Values(null, "")] string group) { // Creating a broker router and a protocol gateway for the producer and consumer var brokerRouter = new BrokerRouter(_kafkaUri, new ConnectionFactory(), _config); var partitionId = 0; var topic = TestConfig.TopicName(); var offest = 5; Assert.ThrowsAsync <ArgumentNullException>(async() => await brokerRouter.CommitTopicOffsetAsync(topic, partitionId, group, offest, CancellationToken.None)); }
public async Task UpdateOrCreateOffsetTopicDoesntExistTest() { // Creating a broker router and a protocol gateway for the producer and consumer var brokerRouter = new BrokerRouter(_kafkaUri, new ConnectionFactory(), _config); var partitionId = 0; var topic = TestConfig.TopicName(); var consumerGroup = TestConfig.ConsumerName(); var offest = 5; await brokerRouter.CommitTopicOffsetAsync(topic, partitionId, consumerGroup, offest, CancellationToken.None); }
public async Task UpdateOrCreateOffsetNegativeOffsetTest() { // Creating a broker router and a protocol gateway for the producer and consumer var brokerRouter = new BrokerRouter(_kafkaUri, new ConnectionFactory(), _config); var partitionId = 0; var topic = TestConfig.TopicName(); var consumerGroup = TestConfig.ConsumerName(); var offest = -5; Assert.ThrowsAsync <ArgumentOutOfRangeException>(async() => await brokerRouter.CommitTopicOffsetAsync(topic, partitionId, consumerGroup, offest, CancellationToken.None)); }
public async Task SendAsyncShouldGetAResultForEachPartitionSentTo() { using (var router = new BrokerRouter(new KafkaOptions(TestConfig.IntegrationUri))) using (var producer = new Producer(router)) { var messages = new[] { new Message("1"), new Message("2"), new Message("3") }; var result = await producer.SendMessagesAsync(messages, TestConfig.TopicName(), CancellationToken.None); Assert.That(result.Count, Is.EqualTo(messages.Distinct().Count())); Assert.That(result.Count, Is.EqualTo(messages.Count())); } }
public async Task FetchMessagesPartitionDoesntExist() { // Creating a broker router and a protocol gateway for the producer and consumer var brokerRouter = new BrokerRouter(_kafkaUri, new ConnectionFactory(), _config); var partitionId = 100; var topic = TestConfig.TopicName(); var consumer = new Consumer(brokerRouter, new ConsumerConfiguration(maxPartitionFetchBytes: DefaultMaxMessageSetSize * 2)); var offset = 0; Assert.ThrowsAsync <CachedMetadataException>(async() => await consumer.FetchMessagesAsync(topic, partitionId, offset, 5, CancellationToken.None)); }
public async Task UpdateOrCreateOffsetPartitionDoesntExistTest() { // Creating a broker router and a protocol gateway for the producer and consumer var brokerRouter = new BrokerRouter(_kafkaUri, new ConnectionFactory(), _config); var partitionId = 100; var consumerGroup = Guid.NewGuid().ToString(); var topicName = TestConfig.TopicName(); var offest = 5; Assert.ThrowsAsync <CachedMetadataException>(async() => await brokerRouter.CommitTopicOffsetAsync(topicName, partitionId, consumerGroup, offest, CancellationToken.None)); }
public async Task FetchMessagesNoNewMessagesInQueueTest() { // Creating a broker router and a protocol gateway for the producer and consumer var brokerRouter = new BrokerRouter(_kafkaUri, new ConnectionFactory(), _config); var consumer = new Consumer(brokerRouter, _consumerConfig); var offset = await brokerRouter.GetTopicOffsetAsync(TestConfig.TopicName(), _partitionId, CancellationToken.None); // Now let's consume var result = (await consumer.FetchMessagesAsync(offset, 5, CancellationToken.None)).ToList(); Assert.AreEqual(0, result.Count, "Should not get any messages"); }
public async Task FetchMessagesTopicDoesntExist() { // Creating a broker router and a protocol gateway for the producer and consumer var brokerRouter = new BrokerRouter(_kafkaUri, new ConnectionFactory(), _config); var topic = TestConfig.TopicName(); var consumer = new Consumer(brokerRouter, new ConsumerConfiguration(maxPartitionFetchBytes: DefaultMaxMessageSetSize * 2)); var offset = 0; // Now let's consume await consumer.FetchMessagesAsync(topic, _partitionId, offset, 5, CancellationToken.None); }
public async Task ConsumerMetadataRequestShouldReturnWithoutError() { using (var router = new BrokerRouter(_options)) { var conn = await router.GetBrokerRouteAsync(TestConfig.TopicName(), 0, CancellationToken.None); var request = new GroupCoordinatorRequest(TestConfig.ConsumerName()); var response = await conn.Connection.SendAsync(request, CancellationToken.None); Assert.That(response, Is.Not.Null); Assert.That(response.ErrorCode, Is.EqualTo(ErrorResponseCode.None)); } }
public static async Task DeleteTopicAsync(this IRouter router, [CallerMemberName] string name = null) { var topicName = TestConfig.TopicName(name); try { var response = await router.SendToAnyAsync(new DeleteTopicsRequest(new [] { topicName }, TimeSpan.FromMilliseconds(500)), CancellationToken.None); if (response.Errors.Any(e => e == ErrorCode.REQUEST_TIMED_OUT)) { Assert.Inconclusive("Cannot validate when topic remains"); } } catch (RequestException ex) when(ex.ErrorCode == ErrorCode.TOPIC_ALREADY_EXISTS) { // ignore already exists } }
public static async Task TemporaryTopicAsync(this IRouter router, Func <string, Task> asyncAction, int partitions = 1, [CallerMemberName] string name = null) { var topicName = TestConfig.TopicName(name); try { await router.SendToAnyAsync(new CreateTopicsRequest(new [] { new CreateTopicsRequest.Topic(topicName, partitions, 1) }, TimeSpan.FromSeconds(1)), CancellationToken.None); } catch (RequestException ex) when(ex.ErrorCode == ErrorCode.TOPIC_ALREADY_EXISTS) { // ignore already exists } try { await asyncAction(topicName); } finally { // right now deleting the topic isn't propagating properly, so subsequent runs of the test fail // await router.SendToAnyAsync(new DeleteTopicsRequest(new [] { topicName }, TimeSpan.FromSeconds(1)), CancellationToken.None); } }
public async Task SendAsyncShouldGetOneResultForEachPartitionThroughBatching() { using (var router = new BrokerRouter(new KafkaOptions(TestConfig.IntegrationUri))) using (var producer = new Producer(router)) { var tasks = new[] { producer.SendMessageAsync(new Message("1"), TestConfig.TopicName(), CancellationToken.None), producer.SendMessageAsync(new Message("2"), TestConfig.TopicName(), CancellationToken.None), producer.SendMessageAsync(new Message("3"), TestConfig.TopicName(), CancellationToken.None), }; await Task.WhenAll(tasks); var result = tasks.Select(x => x.Result).Distinct().ToList(); Assert.That(result.Count, Is.EqualTo(tasks.Length)); } }
public async Task FetchMessagesOffsetBiggerThanLastOffsetInQueueTest() { // Creating a broker router and a protocol gateway for the producer and consumer var brokerRouter = new BrokerRouter(_kafkaUri, new ConnectionFactory(), _config, log: TestConfig.InfoLog); var consumer = new Consumer(brokerRouter, _consumerConfig); var offset = await brokerRouter.GetTopicOffsetAsync(TestConfig.TopicName(), _partitionId, CancellationToken.None); try { // Now let's consume await consumer.FetchMessagesAsync(offset.TopicName, offset.PartitionId, offset.Offset + 1, 5, CancellationToken.None); Assert.Fail("should have thrown FetchOutOfRangeException"); } catch (FetchOutOfRangeException ex) when(ex.Message.StartsWith("Kafka returned OffsetOutOfRange for Fetch request")) { Console.WriteLine(ex.ToString()); } }
public async Task CanFetch() { int partitionId = 0; var router = new BrokerRouter(new KafkaOptions(TestConfig.IntegrationUri)); var producer = new Producer(router); string messageValue = Guid.NewGuid().ToString(); var response = await producer.SendMessageAsync(new Message(messageValue), TestConfig.TopicName(), partitionId, CancellationToken.None); var offset = response.Offset; var fetch = new FetchRequest.Topic(TestConfig.TopicName(), partitionId, offset, 32000); var fetchRequest = new FetchRequest(fetch, minBytes: 10); var r = await router.SendAsync(fetchRequest, TestConfig.TopicName(), partitionId, CancellationToken.None); Assert.IsTrue(r.Topics.First().Messages.First().Value.ToUtf8String() == messageValue); }
public async Task ConsumerShouldMoveToNextAvailableOffsetWhenQueryingForNextMessage() { const int expectedCount = 1000; var options = new KafkaOptions(TestConfig.IntegrationUri); using (var producerRouter = new BrokerRouter(options)) using (var producer = new Producer(producerRouter)) { //get current offset and reset consumer to top of log var offsets = await producer.BrokerRouter.GetTopicOffsetsAsync(TestConfig.TopicName(), CancellationToken.None).ConfigureAwait(false); using (var consumerRouter = new BrokerRouter(options)) using (var consumer = new OldConsumer(new ConsumerOptions(TestConfig.TopicName(), consumerRouter) { MaxWaitTimeForMinimumBytes = TimeSpan.Zero }, offsets.Select(x => new OffsetPosition(x.PartitionId, x.Offset)).ToArray())) { Console.WriteLine("Sending {0} test messages", expectedCount); var response = await producer.SendMessagesAsync(Enumerable.Range(0, expectedCount).Select(x => new Message(x.ToString())), TestConfig.TopicName(), CancellationToken.None); Assert.That(response.Any(x => x.ErrorCode != (int)ErrorResponseCode.None), Is.False, "Error occured sending test messages to server."); var stream = consumer.Consume(); Console.WriteLine("Reading message back out from consumer."); var data = stream.Take(expectedCount).ToList(); var consumerOffset = consumer.GetOffsetPosition().OrderBy(x => x.PartitionId).ToList(); var serverOffset = await producer.BrokerRouter.GetTopicOffsetsAsync(TestConfig.TopicName(), CancellationToken.None).ConfigureAwait(false); var positionOffset = serverOffset.Select(x => new OffsetPosition(x.PartitionId, x.Offset)) .OrderBy(x => x.PartitionId) .ToList(); Assert.That(consumerOffset, Is.EqualTo(positionOffset), "The consumerOffset position should match the server offset position."); Assert.That(data.Count, Is.EqualTo(expectedCount), "We should have received 2000 messages from the server."); } } }
public async Task ConsumeByOffsetShouldGetSameMessageProducedAtSameOffset() { long offsetResponse; var messge = Guid.NewGuid(); using (var router = new BrokerRouter(TestConfig.IntegrationUri, log: TestConfig.InfoLog)) { using (var producer = new Producer(router)) { var responseAckLevel1 = await producer.SendMessageAsync(new Message(messge.ToString()), TestConfig.TopicName(), 0, new SendMessageConfiguration(acks : 1), CancellationToken.None); offsetResponse = responseAckLevel1.Offset; } } using (var router = new BrokerRouter(TestConfig.IntegrationUri, log: TestConfig.InfoLog)) { using (var consumer = new Consumer(router, new ConsumerConfiguration(maxServerWait: TimeSpan.Zero))) { var result = await consumer.FetchMessagesAsync(TestConfig.TopicName(), 0, offsetResponse, 1, CancellationToken.None); Assert.AreEqual(messge.ToString(), result[0].Value.ToUtf8String()); } } }
public async Task ConsumerShouldBeAbleToSeekBackToEarlierOffset([Values(20)] int sends, [Values(1, 10)] int messagesPerSend) { var totalMessages = sends * messagesPerSend; var testId = Guid.NewGuid().ToString(); using (var router = new BrokerRouter(TestConfig.IntegrationUri, log: TestConfig.InfoLog)) { using (var producer = new Producer(router)) { var offset = await producer.BrokerRouter.GetTopicOffsetAsync(TestConfig.TopicName(), 0, CancellationToken.None); for (var i = 0; i < sends; i++) { if (messagesPerSend == 1) { await producer.SendMessageAsync(new Message(i.ToString(), testId), TestConfig.TopicName(), 0, CancellationToken.None); } else { var current = i * messagesPerSend; var messages = messagesPerSend.Repeat(_ => new Message((current + _).ToString(), testId)).ToList(); await producer.SendMessagesAsync(messages, TestConfig.TopicName(), 0, CancellationToken.None); } } using (var consumer = new Consumer(router, new ConsumerConfiguration(maxServerWait: TimeSpan.Zero))) { var results1 = await consumer.FetchMessagesAsync(offset, totalMessages, CancellationToken.None); TestConfig.InfoLog.Info(() => LogEvent.Create($"Message order: {string.Join(", ", results1.Select(x => x.Value.ToUtf8String()).ToList())}")); var results2 = await consumer.FetchMessagesAsync(offset, totalMessages, CancellationToken.None); TestConfig.InfoLog.Info(() => LogEvent.Create($"Message order: {string.Join(", ", results2.Select(x => x.Value.ToUtf8String()).ToList())}")); Assert.That(results1.Count, Is.EqualTo(totalMessages)); Assert.That(results1.Count, Is.EqualTo(results2.Count)); Assert.That(results1.Select(x => x.Value.ToUtf8String()).ToList(), Is.EqualTo(results2.Select(x => x.Value.ToUtf8String()).ToList()), "Expected the message list in the correct order."); } } } }