public async Task ConsumerShouldConsumeInSameOrderAsAsyncProduced_dataLoad(int numberOfMessage, int timeoutInMs) { int partition = 0; Stopwatch stopwatch = new Stopwatch(); stopwatch.Start(); IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("create BrokerRouter ,time Milliseconds:{0}", stopwatch.ElapsedMilliseconds)); var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri) { Log = IntegrationConfig.NoDebugLog }); stopwatch.Restart(); var producer = new Producer(router) { BatchDelayTime = TimeSpan.FromMilliseconds(10), BatchSize = numberOfMessage / 10 }; IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("create producer ,time Milliseconds:{0}", stopwatch.ElapsedMilliseconds)); stopwatch.Restart(); List<OffsetResponse> offsets = await producer.GetTopicOffsetAsync(IntegrationConfig.IntegrationTopic); IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("request Offset,time Milliseconds:{0}", stopwatch.ElapsedMilliseconds)); stopwatch.Restart(); List<Task> sendList = new List<Task>(numberOfMessage); for (int i = 0; i < numberOfMessage; i++) { var sendTask = producer.SendMessageAsync(IntegrationConfig.IntegrationTopic, new[] { new Message(i.ToString()) }, 1, null, MessageCodec.CodecNone, partition); sendList.Add(sendTask); } TimeSpan maxTimeToRun = TimeSpan.FromMilliseconds(timeoutInMs); var doneSend = Task.WhenAll(sendList.ToArray()); await Task.WhenAny(doneSend, Task.Delay(maxTimeToRun)); Assert.IsTrue(doneSend.IsCompleted, "not done to send in time"); IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("done send ,time Milliseconds:{0}", stopwatch.ElapsedMilliseconds)); stopwatch.Restart(); ConsumerOptions consumerOptions = new ConsumerOptions(IntegrationConfig.IntegrationTopic, router); consumerOptions.PartitionWhitelist = new List<int> { partition }; consumerOptions.MaxWaitTimeForMinimumBytes = TimeSpan.Zero; Consumer consumer = new Consumer(consumerOptions, offsets.Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray()); int expected = 0; IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("start Consume ,time Milliseconds:{0}", stopwatch.ElapsedMilliseconds)); IEnumerable<Message> messages = null; var doneConsume = Task.Run((() => { stopwatch.Restart(); messages = consumer.Consume().Take(numberOfMessage).ToArray(); IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("done Consume ,time Milliseconds:{0}", stopwatch.ElapsedMilliseconds)); stopwatch.Restart(); })); await Task.WhenAny(doneConsume, Task.Delay(maxTimeToRun)); Assert.IsTrue(doneConsume.IsCompleted, "not done to Consume in time"); Assert.IsTrue(messages.Count() == numberOfMessage, "not Consume all ,messages"); foreach (Message message in messages) { Assert.That(message.Value.ToUtf8String(), Is.EqualTo(expected.ToString()), "Expected the message list in the correct order."); expected++; } stopwatch.Restart(); IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("start producer Dispose ,time Milliseconds:{0}", stopwatch.ElapsedMilliseconds)); producer.Dispose(); IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("start consumer Dispose ,time Milliseconds:{0}", stopwatch.ElapsedMilliseconds)); consumer.Dispose(); stopwatch.Restart(); IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("start router Dispose,time Milliseconds:{0}", stopwatch.ElapsedMilliseconds)); router.Dispose(); }
public async Task ProducerShouldUsePartitionIdInsteadOfMessageKeyToChoosePartition() { Mock<IPartitionSelector> partitionSelector = new Mock<IPartitionSelector>(); partitionSelector.Setup(x => x.Select(It.IsAny<Topic>(), It.IsAny<byte[]>())).Returns((Topic y, byte[] y1) => { return y.Partitions.Find(p => p.PartitionId == 1); }); var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri) { PartitionSelector = partitionSelector.Object }); var producer = new Producer(router); var offsets = await producer.GetTopicOffsetAsync(IntegrationConfig.IntegrationTopic); int partitionId = 0; //message should send to PartitionId and not use the key to Select Broker Route !! for (int i = 0; i < 20; i++) { await producer.SendMessageAsync(IntegrationConfig.IntegrationTopic, new[] { new Message(i.ToString(), "key") }, 1, null, MessageCodec.CodecNone, partitionId); } //consume form partitionId to verify that date is send to currect partion !!. var consumer = new Consumer(new ConsumerOptions(IntegrationConfig.IntegrationTopic, router) { PartitionWhitelist = { partitionId } }, offsets.Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray()); for (int i = 0; i < 20; i++) { Message result = null;// = consumer.Consume().Take(1).First(); await Task.Run(() => result = consumer.Consume().Take(1).First()); Assert.That(result.Value.ToUtf8String(), Is.EqualTo(i.ToString())); } consumer.Dispose(); producer.Dispose(); }
/// <summary> /// order Should remain in the same ack leve and partition /// </summary> /// <returns></returns> public async Task ConsumerShouldConsumeInSameOrderAsAsyncProduced() { int partition = 0; int numberOfMessage = 200; IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("create BrokerRouter")); var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri)); int causesRaceConditionOldVersion = 2; var producer = new Producer(router, causesRaceConditionOldVersion) { BatchDelayTime = TimeSpan.Zero };//this is slow on purpose //this is not slow var producer = new Producer(router); IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("create producer")); List<OffsetResponse> offsets = await producer.GetTopicOffsetAsync(IntegrationConfig.IntegrationTopic); IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("request Offset")); List<Task> sendList = new List<Task>(numberOfMessage); for (int i = 0; i < numberOfMessage; i++) { var sendTask = producer.SendMessageAsync(IntegrationConfig.IntegrationTopic, new[] { new Message(i.ToString()) }, 1, null, MessageCodec.CodecNone, partition); sendList.Add(sendTask); } await Task.WhenAll(sendList.ToArray()); IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("done send")); IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("create Consumer")); ConsumerOptions consumerOptions = new ConsumerOptions(IntegrationConfig.IntegrationTopic, router); consumerOptions.PartitionWhitelist = new List<int> { partition }; Consumer consumer = new Consumer(consumerOptions, offsets.Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray()); int expected = 0; IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("start Consume")); await Task.Run((() => { var results = consumer.Consume().Take(numberOfMessage).ToList(); Assert.IsTrue(results.Count() == numberOfMessage, "not Consume all ,messages"); IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("done Consume")); foreach (Message message in results) { Assert.That(message.Value.ToUtf8String(), Is.EqualTo(expected.ToString()), "Expected the message list in the correct order."); expected++; } })); IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("start producer Dispose")); producer.Dispose(); IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("start consumer Dispose")); consumer.Dispose(); IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("start router Dispose")); router.Dispose(); }