void AssertRecords(RawKafkaRecord expected, RawKafkaRecord record) { Assert.That(record, Is.Not.Null); Assert.That(record.Topic, Is.EqualTo(expected.Topic)); CollectionAssert.AreEqual(expected.Key as byte[], record.Key as byte[]); CollectionAssert.AreEqual(expected.Value as byte[], record.Value as byte[]); Assert.That(record.Partition, Is.EqualTo(expected.Partition)); Assert.That(record.Offset, Is.EqualTo(expected.Offset)); }
public void TestConsumerMessageReceived() { _cluster.Start(); const int msgLag = 10; var msg = new RawKafkaRecord { Topic = "myTopic", Lag = msgLag }; _consumeMock.Raise(r => r.MessageReceived += null, msg); Assert.AreEqual(0, _internalErrors); AssertStatistics(_cluster.Statistics, received: 1, consumerLag: msgLag); }
public void TestMessageReceivedObservable() { RawKafkaRecord record = null; _client.Messages.Subscribe(kr => record = kr); var expected = new RawKafkaRecord { Topic = Topic, Key = KeyB, Value = ValueB, Partition = 1, Offset = 123 }; _consumer.Raise(c => c.MessageReceived += null, expected); AssertRecords(expected, record); }
public void TestMessageDiscardedObservable() { RawKafkaRecord record = null; _client.DiscardedMessages.Subscribe(kr => record = kr); _producer.Raise(c => c.MessageDiscarded += null, Topic, new Message { Key = KeyB, Value = ValueB }); AssertRecords( new RawKafkaRecord { Topic = Topic, Key = KeyB, Value = ValueB, Partition = Partitions.None, Offset = 0 }, record); }
public void TestMessageExpired() { RawKafkaRecord record = null; _client.MessageExpired += kr => record = kr; _producer.Raise(c => c.MessageExpired += null, Topic, new Message { Key = KeyB, Value = ValueB }); AssertRecords( new RawKafkaRecord { Topic = Topic, Key = KeyB, Value = ValueB, Partition = Partitions.None, Offset = 0 }, record); }
private void UpdateConsumerMessageStatistics(RawKafkaRecord kr) { Statistics.UpdateReceived(); Statistics.UpdateConsumerLag(kr.Topic, kr.Lag); }
public void TestConsumer() { var client = new Mock <IClusterClient>(); client.SetupGet(c => c.Messages) .Returns(Observable.FromEvent <RawKafkaRecord>(a => client.Object.MessageReceived += a, a => client.Object.MessageReceived -= a)); // Bad arguments Assert.That(() => new KafkaConsumer <string, string>(null, client.Object), Throws.ArgumentException); Assert.That(() => new KafkaConsumer <string, string>("", client.Object), Throws.ArgumentException); Assert.That(() => new KafkaConsumer <string, string>("toto", null), Throws.InstanceOf <ArgumentNullException>()); using (var consumer = new KafkaConsumer <string, string>("topic", client.Object)) { // Double new on same topic/TKey/TValue Assert.That(() => new KafkaConsumer <string, string>("topic", client.Object), Throws.ArgumentException); // Consume / Stop consumer.Consume(2, 42); consumer.ConsumeFromLatest(); consumer.ConsumeFromLatest(2); consumer.ConsumeFromEarliest(); consumer.ConsumeFromEarliest(2); consumer.StopConsume(); consumer.StopConsume(2); consumer.StopConsume(2, 42); client.Verify(c => c.Consume(It.IsAny <string>(), It.IsAny <int>(), It.IsAny <long>()), Times.Once()); client.Verify(c => c.StopConsume("topic", 2, 42)); client.Verify(c => c.ConsumeFromLatest(It.IsAny <string>()), Times.Once()); client.Verify(c => c.ConsumeFromLatest("topic")); client.Verify(c => c.ConsumeFromLatest(It.IsAny <string>(), It.IsAny <int>()), Times.Once()); client.Verify(c => c.ConsumeFromLatest("topic", 2)); client.Verify(c => c.ConsumeFromEarliest(It.IsAny <string>()), Times.Once()); client.Verify(c => c.ConsumeFromEarliest("topic")); client.Verify(c => c.ConsumeFromEarliest(It.IsAny <string>(), It.IsAny <int>()), Times.Once()); client.Verify(c => c.ConsumeFromEarliest("topic", 2)); client.Verify(c => c.StopConsume(It.IsAny <string>()), Times.Once()); client.Verify(c => c.StopConsume("topic")); client.Verify(c => c.StopConsume(It.IsAny <string>(), It.IsAny <int>()), Times.Once()); client.Verify(c => c.StopConsume("topic", 2)); client.Verify(c => c.StopConsume(It.IsAny <string>(), It.IsAny <int>(), It.IsAny <long>()), Times.Once()); client.Verify(c => c.StopConsume("topic", 2, 42)); bool messageObserved = false; bool messageEvent = false; KafkaRecord <string, string> received = default(KafkaRecord <string, string>); consumer.MessageReceived += kr => { received = kr; messageEvent = true; }; consumer.Messages.Subscribe(kr => { messageObserved = true; }); var record = new RawKafkaRecord { Topic = "topic", Key = "key", Value = "data", Partition = 2, Offset = 42 }; client.Raise(c => c.MessageReceived += null, record); Assert.IsTrue(messageEvent); Assert.IsTrue(messageObserved); Assert.AreEqual("topic", received.Topic); Assert.AreEqual("key", received.Key); Assert.AreEqual("data", received.Value); Assert.AreEqual(2, received.Partition); Assert.AreEqual(42, received.Offset); record.Key = null; messageObserved = false; messageEvent = false; received = default(KafkaRecord <string, string>); client.Raise(c => c.MessageReceived += null, record); Assert.IsTrue(messageEvent); Assert.IsTrue(messageObserved); Assert.IsTrue(messageEvent); Assert.IsTrue(messageObserved); Assert.AreEqual("topic", received.Topic); Assert.IsNull(received.Key); Assert.AreEqual("data", received.Value); Assert.AreEqual(2, received.Partition); Assert.AreEqual(42, received.Offset); } // Dispose: can register another producer with same Topic/TKey/TValue once // the previous one has been disposed. client = new Mock <IClusterClient>(); client.SetupGet(c => c.Messages) .Returns(Observable.FromEvent <RawKafkaRecord>(a => client.Object.MessageReceived += a, a => client.Object.MessageReceived -= a)); var consumer2 = new KafkaConsumer <string, string>("topic", client.Object); // Dispose: observable are completed and events no longer subscribed bool messageCompleted = false; bool messageEvent2 = false; consumer2.Messages.Subscribe(kr => { }, () => messageCompleted = true); consumer2.MessageReceived += _ => messageEvent2 = true; consumer2.Dispose(); client.Verify(c => c.StopConsume(It.IsAny <string>()), Times.Once()); // Dispose stops all client.Verify(c => c.StopConsume("topic"), Times.Once()); var record2 = new RawKafkaRecord { Topic = "topic", Key = "key", Value = "data", Partition = 2, Offset = 42 }; client.Raise(c => c.MessageReceived += null, record2); Assert.IsTrue(messageCompleted); Assert.IsFalse(messageEvent2); // Consume / Stop no longer work consumer2.Consume(2, 42); consumer2.ConsumeFromLatest(); consumer2.ConsumeFromLatest(2); consumer2.ConsumeFromEarliest(); consumer2.ConsumeFromEarliest(2); consumer2.StopConsume(); consumer2.StopConsume(2); consumer2.StopConsume(2, 42); client.Verify(c => c.Consume(It.IsAny <string>(), It.IsAny <int>(), It.IsAny <long>()), Times.Never()); client.Verify(c => c.ConsumeFromLatest(It.IsAny <string>()), Times.Never()); client.Verify(c => c.ConsumeFromLatest(It.IsAny <string>(), It.IsAny <int>()), Times.Never()); client.Verify(c => c.ConsumeFromEarliest(It.IsAny <string>()), Times.Never()); client.Verify(c => c.ConsumeFromEarliest(It.IsAny <string>(), It.IsAny <int>()), Times.Never()); client.Verify(c => c.StopConsume(It.IsAny <string>()), Times.Once()); client.Verify(c => c.StopConsume(It.IsAny <string>(), It.IsAny <int>()), Times.Never()); client.Verify(c => c.StopConsume(It.IsAny <string>(), It.IsAny <int>(), It.IsAny <long>()), Times.Never()); // Dispose: can dispose the same consumer multiple times with no effect Assert.That(() => consumer2.Dispose(), Throws.Nothing); }
public void TestProducer() { var client = new Mock <IClusterClient>(); client.SetupGet(c => c.DiscardedMessages) .Returns(Observable.FromEvent <RawKafkaRecord>(a => client.Object.MessageDiscarded += a, a => client.Object.MessageDiscarded -= a)); client.SetupGet(c => c.ExpiredMessages) .Returns(Observable.FromEvent <RawKafkaRecord>(a => client.Object.MessageExpired += a, a => client.Object.MessageExpired -= a)); // Bad arguments Assert.That(() => new KafkaProducer <string, string>(null, client.Object), Throws.ArgumentException); Assert.That(() => new KafkaProducer <string, string>("", client.Object), Throws.ArgumentException); Assert.That(() => new KafkaProducer <string, string>("toto", null), Throws.InstanceOf <ArgumentNullException>()); using (var producer = new KafkaProducer <string, string>("topic", client.Object)) { // Double new on same topic/TKey/TValue Assert.That(() => new KafkaProducer <string, string>("topic", client.Object), Throws.ArgumentException); // Produce are forwarded to underlying cluster client producer.Produce("data"); producer.Produce("key", "data"); producer.Produce("key", "data", 42); client.Verify(c => c.Produce(It.IsAny <string>(), It.IsAny <object>(), It.IsAny <object>(), It.IsAny <int>()), Times.Exactly(3)); client.Verify(c => c.Produce("topic", null, "data", Partitions.Any), Times.Once()); client.Verify(c => c.Produce("topic", "key", "data", Partitions.Any), Times.Once()); client.Verify(c => c.Produce("topic", "key", "data", 42), Times.Once()); // Discarded/Expired messages are correctly transmitted bool discardedThroughEvent = false; bool observedDiscard = false; bool expiredThroughEvent = false; bool observedExpired = false; Action <KafkaRecord <string, string> > checkRecord = kr => { Assert.AreEqual("topic", kr.Topic); Assert.AreEqual("key", kr.Key); Assert.AreEqual("data", kr.Value); Assert.AreEqual(Partitions.None, kr.Partition); Assert.AreEqual(0, kr.Offset); }; producer.MessageDiscarded += kr => { checkRecord(kr); discardedThroughEvent = true; }; producer.MessageExpired += kr => { checkRecord(kr); expiredThroughEvent = true; }; producer.DiscardedMessages.Subscribe(kr => { checkRecord(kr); observedDiscard = true; }); producer.ExpiredMessages.Subscribe(kr => { checkRecord(kr); observedExpired = true; }); var record = new RawKafkaRecord { Topic = "topic", Key = "key", Value = "data", Partition = Partitions.None }; client.Raise(c => c.MessageDiscarded += null, record); client.Raise(c => c.MessageExpired += null, record); Assert.IsTrue(discardedThroughEvent); Assert.IsTrue(expiredThroughEvent); Assert.IsTrue(observedDiscard); Assert.IsTrue(observedExpired); } // Dispose: can register another producer with same Topic/TKey/TValue once // the previous one has been disposed. client = new Mock <IClusterClient>(); client.SetupGet(c => c.DiscardedMessages) .Returns(Observable.FromEvent <RawKafkaRecord>(a => client.Object.MessageDiscarded += a, a => client.Object.MessageDiscarded -= a)); client.SetupGet(c => c.ExpiredMessages) .Returns(Observable.FromEvent <RawKafkaRecord>(a => client.Object.MessageExpired += a, a => client.Object.MessageExpired -= a)); var producer2 = new KafkaProducer <string, string>("topic", client.Object); // Dispose: observable are completed and events no longer subscribed bool discardedCompleted = false; bool expiredCompleted = false; bool discardedEvent = false; bool expiredEvent = false; producer2.DiscardedMessages.Subscribe(kr => { }, () => discardedCompleted = true); producer2.ExpiredMessages.Subscribe(kr => { }, () => expiredCompleted = true); producer2.MessageDiscarded += _ => discardedEvent = true; producer2.MessageExpired += _ => expiredEvent = true; producer2.Dispose(); var record2 = new RawKafkaRecord { Topic = "topic", Key = "key", Value = "data", Partition = Partitions.None }; client.Raise(c => c.MessageDiscarded += null, record2); client.Raise(c => c.MessageExpired += null, record2); Assert.IsTrue(discardedCompleted); Assert.IsTrue(expiredCompleted); Assert.IsFalse(discardedEvent); Assert.IsFalse(expiredEvent); // Dispose: Produce do no longer work producer2.Produce("data"); client.Verify(c => c.Produce(It.IsAny <string>(), It.IsAny <object>(), It.IsAny <object>(), It.IsAny <int>()), Times.Never()); // Dispose: can dispose the same producer multiple times with no effect Assert.That(() => producer2.Dispose(), Throws.Nothing); }
private void _cluster_MessageReceived(RawKafkaRecord record) { _logger.LogInformation(message: $"Received: {Encoding.UTF8.GetString(record.Value as byte[])}"); }