public async Task Test_KafkaDataProvider_HappyPath_Async() { var reports = new List <DeliveryResult <Null, AuditEvent> >(); const string topic = "my-audit-topic-happypath-async"; const string host = "localhost:9092"; var pConfig = new ProducerConfig() { BootstrapServers = host, ClientId = Dns.GetHostName() }; Audit.Core.Configuration.Setup() .UseKafka(_ => _ .ProducerConfig(pConfig) .Topic(topic) .ResultHandler(rpt => { reports.Add(rpt); })) .WithCreationPolicy(EventCreationPolicy.InsertOnStartInsertOnEnd); var guid = Guid.NewGuid(); var scope = await AuditScope.CreateAsync("type1", null, new { custom_field = guid }); scope.Event.CustomFields["custom_field"] = "UPDATED:" + guid; await scope.DisposeAsync(); Assert.AreEqual(2, reports.Count); Assert.AreEqual(PersistenceStatus.Persisted, reports[0].Status); Assert.AreEqual(PersistenceStatus.Persisted, reports[1].Status); var cv = new ConsumerBuilder <Null, AuditEvent>(new ConsumerConfig() { BootstrapServers = host, ClientId = Dns.GetHostName(), GroupId = "test-" + guid, AutoOffsetReset = AutoOffsetReset.Earliest, }).SetValueDeserializer(new DefaultJsonSerializer <AuditEvent>()).Build(); cv.Subscribe(topic); await Task.Delay(1000); cv.Seek(new TopicPartitionOffset(topic, reports[0].Partition, reports[0].Offset)); var r1 = cv.Consume(1000); cv.Seek(new TopicPartitionOffset(topic, reports[1].Partition, reports[1].Offset)); var r2 = cv.Consume(1000); Assert.IsNotNull(r1); Assert.IsNotNull(r2); Assert.AreEqual(guid.ToString(), r1.Message.Value.CustomFields["custom_field"]); Assert.AreEqual("UPDATED:" + guid, r2.Message.Value.CustomFields["custom_field"]); }
public static void Consumer_Seek(string bootstrapServers, string singlePartitionTopic, string partitionedTopic) { LogToFile("start Consumer_Seek"); var consumerConfig = new ConsumerConfig { GroupId = Guid.NewGuid().ToString(), BootstrapServers = bootstrapServers }; var producerConfig = new ProducerConfig { BootstrapServers = bootstrapServers }; using (var producer = new ProducerBuilder <byte[], byte[]>(producerConfig).Build()) using (var consumer = new ConsumerBuilder <Null, string>(consumerConfig) .SetErrorHandler((_, e) => Assert.True(false, e.Reason)) .Build()) { const string checkValue = "check value"; var dr = producer.ProduceAsync(singlePartitionTopic, new Message <byte[], byte[]> { Value = Serializers.Utf8.Serialize(checkValue, true, null, null) }).Result; var dr2 = producer.ProduceAsync(singlePartitionTopic, new Message <byte[], byte[]> { Value = Serializers.Utf8.Serialize("second value", true, null, null) }).Result; var dr3 = producer.ProduceAsync(singlePartitionTopic, new Message <byte[], byte[]> { Value = Serializers.Utf8.Serialize("third value", true, null, null) }).Result; consumer.Assign(new TopicPartitionOffset[] { new TopicPartitionOffset(singlePartitionTopic, 0, dr.Offset) }); var record = consumer.Consume(TimeSpan.FromSeconds(10)); Assert.NotNull(record.Message); record = consumer.Consume(TimeSpan.FromSeconds(10)); Assert.NotNull(record.Message); record = consumer.Consume(TimeSpan.FromSeconds(10)); Assert.NotNull(record.Message); consumer.Seek(dr.TopicPartitionOffset); // position is that of the last consumed offset. it shouldn't be equal to the seek position. var pos = consumer.Position(new List <TopicPartition> { dr.TopicPartition }).First(); Assert.NotEqual(dr.Offset, pos.Offset); record = consumer.Consume(TimeSpan.FromSeconds(10)); Assert.NotNull(record.Message); Assert.Equal(checkValue, record.Message.Value); } Assert.Equal(0, Library.HandleCount); LogToFile("end Consumer_Seek"); }
public void Test_KafkaDataProvider_KeyedHappyPath() { var reports = new List <DeliveryResult <string, AuditEvent> >(); const string topic = "my-audit-topic-keyed-happypath"; const string host = "localhost:9092"; var pConfig = new ProducerConfig() { BootstrapServers = host, ClientId = Dns.GetHostName() }; Audit.Core.Configuration.Setup() .UseKafka <string>(_ => _ .ProducerConfig(pConfig) .Topic(topic) .ResultHandler(rpt => { reports.Add(rpt); }) .KeySelector(ev => ev.EventType)) .WithCreationPolicy(EventCreationPolicy.InsertOnEnd); var guid = Guid.NewGuid(); var scope = AuditScope.Create("key1", null, new { custom_field = guid }); scope.Event.CustomFields["custom_field"] = "UPDATED:" + guid; scope.Dispose(); Assert.AreEqual(1, reports.Count); Assert.AreEqual(PersistenceStatus.Persisted, reports[0].Status); var cv = new ConsumerBuilder <string, AuditEvent>(new ConsumerConfig() { BootstrapServers = host, ClientId = Dns.GetHostName(), GroupId = "test-" + guid, AutoOffsetReset = AutoOffsetReset.Earliest, }).SetValueDeserializer(new DefaultJsonSerializer <AuditEvent>()).Build(); cv.Subscribe(topic); Thread.Sleep(1000); cv.Seek(new TopicPartitionOffset(topic, reports[0].Partition, reports[0].Offset)); var r1 = cv.Consume(1000); Assert.IsNotNull(r1); Assert.AreEqual("UPDATED:" + guid, r1.Message.Value.CustomFields["custom_field"]); Assert.AreEqual("key1", r1.Message.Key); }
public async Task <IEnumerable <Models.KafkaMessageModel> > GetMessages(string topic, long count = -1) { List <Models.KafkaMessageModel> toReturn = new List <Models.KafkaMessageModel>(); if (!string.IsNullOrEmpty(this.BootstrapServers) && !string.IsNullOrEmpty(this.GroupId)) { ConsumerConfig config = new ConsumerConfig() { BootstrapServers = this.BootstrapServers, GroupId = this.GroupId, AutoOffsetReset = AutoOffsetReset.Latest }; await Task.Run(() => { using (IConsumer <string, string> consumer = new ConsumerBuilder <string, string>(config).Build()) { ConsumeResult <string, string> result = null; try { consumer.Subscribe(topic); while (!consumer.Assignment.Any()) { } TopicPartition tp = consumer.Assignment.FirstOrDefault(); WatermarkOffsets wo = consumer.QueryWatermarkOffsets(tp, TimeSpan.FromSeconds(this.TimeoutSeconds)); long numMessages = wo.High - wo.Low; if (count > 0 && count < numMessages) { numMessages = count; } consumer.Seek(new TopicPartitionOffset(tp, wo.High - numMessages)); do { result = consumer.Consume(TimeSpan.FromSeconds(this.TimeoutSeconds)); if (result != null) { try { toReturn.Add(new Models.KafkaMessageModel() { Topic = result.Topic, Value = JsonConvert.DeserializeObject <MessageModel>(result.Message.Value), Raw = result.Message.Value }); } catch (JsonSerializationException) { } /* We may add events in the future, and don't want to stop collecting current events if we haven't accounted for the structure */ } } while (result != null && result.TopicPartitionOffset.Offset.Value <= wo.High - 1); } catch (Exception) { } consumer.Unsubscribe(); consumer.Close(); } }); } return(toReturn); }
public void NativeKafkaIConsumerSeekShouldNotFail() { var topic = "topic-1"; var group = "group-1"; var totalMessages = 105; var producerConfig = new ProducerConfig { BootstrapServers = Fixture.KafkaServer }; var producer = new ProducerBuilder <string, string>(producerConfig).Build(); foreach (var i in Enumerable.Range(1, totalMessages)) { producer.Produce(topic, new Message <string, string> { Key = i.ToString(), Value = i.ToString() }); } producer.Flush(); var consumerConfig = new ConsumerConfig { BootstrapServers = Fixture.KafkaServer, GroupId = group, AutoOffsetReset = AutoOffsetReset.Earliest, EnableAutoCommit = false }; var consumer = new ConsumerBuilder <string, string>(consumerConfig) .Build(); consumer.Subscribe(topic); const int consumeTimeout = 50; const int testTimeout = 10_000; var consumeCount = 0; var offsets = new List <TopicPartitionOffset> { new TopicPartitionOffset(new TopicPartition(topic, 0), 0), new TopicPartitionOffset(new TopicPartition(topic, 1), 0), new TopicPartitionOffset(new TopicPartition(topic, 2), 0), }; var watch = Stopwatch.StartNew(); while (consumeCount < totalMessages && watch.ElapsedMilliseconds < testTimeout) { var consumed = ConsumeAllMessages(consumer, totalMessages, offsets, consumeTimeout); //Log.Info($"Polled {consumed.consumed} messages"); if (consumed.consumed != 0) { for (var i = 0; i < 3; i++) { var seekResult = CheckForSeek(consumed.messages[i]); consumeCount += seekResult.count; offsets[i] = seekResult.seekOffset; } foreach (var offset in offsets) { if (offset != null) { Log.Info($"Seeking {offset.TopicPartition} to {offset.Offset}"); consumer.Seek(offset); } } } Thread.Sleep(consumeTimeout); } watch.Stop(); consumeCount.Should().Be(totalMessages); }