public void Dispose() { var producer = this.kafkaProducer; if (producer != null) { this.kafkaProducer = null; producer.CloseAsync(TimeSpan.FromSeconds(10)).Wait(); } }
private async Task<Producer> CreateKafkaProducerAsync() { // Note: Currently the Producer does not want the protocol passed in var connectionString = string.Join(", ", this.options.Brokers.Select(uri => string.IsNullOrEmpty(uri.Authority) ? uri.AbsoluteUri : uri.Authority)); // ReSharper disable once UseObjectOrCollectionInitializer -- Harder to debug lambda var producer = new Producer(connectionString, this.producerConfiguration); producer.OnPermError = (exception, messages) => { this.kafkaProducer = null; }; producer.OnTempError += messages => { Contract.Assume(false, "Kafka Message Delilvery Error"); }; await producer.ConnectAsync().ConfigureAwait(false); return producer; }
public async void SimulateLongBufferedMessageHandling() { var count = 2000; var topic = "topic11." + _rnd.Next(); VagrantBrokerUtil.CreateTopic(topic, 1, 1); var producer = new Producer(_seed2Addresses, new ProducerConfiguration(topic, batchFlushSize: 2)); await producer.ConnectAsync(); var src = Observable.Interval(TimeSpan.FromMilliseconds(10)).Take(count). Select(i => new Message { Value = BitConverter.GetBytes((int)i) }).Publish().RefCount(); src.Subscribe(producer.Send); await src; await Task.Delay(200); await producer.CloseAsync(TimeSpan.FromSeconds(5)); _log.Debug("Start consumer"); var consumer = new Consumer(new ConsumerConfiguration(_seed2Addresses, topic, new StartPositionTopicStart())); var stream = consumer.OnMessageArrived. Buffer(TimeSpan.FromSeconds(1), count / 4). Where(_ => _.Count > 0). Select(i => { _log.Debug("Handling batch {0} ...", i.Count); Thread.Sleep(65*1000); _log.Debug("Complete batch"); return i.Count; }). Scan(0, (i, i1) => { _log.Debug("Scnning {0} {1}", i, i1); return i + i1; }). Do(i => _log.Debug("Do {0}", i)). Where(i => i == count).FirstAsync().ToTask(); await consumer.IsConnected; _log.Debug("Awaiting for consumer"); var count2 = await stream; consumer.Dispose(); Assert.AreEqual(count, count2); _log.Info("Complete"); }
public async void ProducerSendBufferGrowsAutomatically() { kafka4net.Tracing.EtwTrace.Marker("ProducerSendBufferGrowsAutomatically"); // now publish messages const int count2 = 25000; var topic = "part13." + _rnd.Next(); var producer = new Producer(_seed2Addresses, new ProducerConfiguration(topic)); _log.Debug("Connecting"); await producer.ConnectAsync(); _log.Debug("Filling out {0} with {1} messages", topic, count2); var sentList = await Enumerable.Range(0, count2) .Select(i => new Message { Value = BitConverter.GetBytes(i) }) .ToObservable() .Do(producer.Send) .Select(msg => BitConverter.ToInt32(msg.Value, 0)) .ToList(); await Task.Delay(TimeSpan.FromSeconds(1)); _log.Info("Done sending messages. Closing producer."); await producer.CloseAsync(TimeSpan.FromSeconds(5)); _log.Info("Producer closed, starting consumer subscription."); // create a topic with 3 partitions var topicName = "part33." + _rnd.Next(); VagrantBrokerUtil.CreateTopic(topicName, 3, 3); // sender is configured with 50ms batch period var receivedSubject = new ReplaySubject<Message>(); producer = new Producer(_seed2Addresses, new ProducerConfiguration(topicName, TimeSpan.FromMilliseconds(50), sendBuffersInitialSize: 1)); producer.OnSuccess += ms => ms.ForEach(receivedSubject.OnNext); await producer.ConnectAsync(); // send 1000 messages const int count = 1000; await Observable.Interval(TimeSpan.FromMilliseconds(10)) .Do(l => producer.Send(new Message() {Value = BitConverter.GetBytes((int) l)})) .Take(count); var receivedMessages = await receivedSubject.Take(count).TakeUntil(DateTime.Now.AddSeconds(2)).ToArray(); Assert.AreEqual(count,receivedMessages.Length); await producer.CloseAsync(TimeSpan.FromSeconds(5)); kafka4net.Tracing.EtwTrace.Marker("/ProducerSendBufferGrowsAutomatically"); }
public async void ConsumerFollowsRebalancingPartitions() { kafka4net.Tracing.EtwTrace.Marker("ConsumerFollowsRebalancingPartitions"); // create a topic var topic = "topic33." + _rnd.Next(); VagrantBrokerUtil.CreateTopic(topic,11,3); // Stop two brokers to let leadership shift to broker1. VagrantBrokerUtil.StopBroker("broker2"); VagrantBrokerUtil.StopBroker("broker3"); await Task.Delay(TimeSpan.FromSeconds(5)); // now start back up VagrantBrokerUtil.StartBroker("broker2"); VagrantBrokerUtil.StartBroker("broker3"); // wait a little for everything to start await Task.Delay(TimeSpan.FromSeconds(5)); // we should have all of them with leader 1 var cluster = new Cluster(_seed2Addresses); await cluster.ConnectAsync(); var partitionMeta = await cluster.GetOrFetchMetaForTopicAsync(topic); // make sure they're all on a single leader Assert.AreEqual(1, partitionMeta.GroupBy(p=>p.Leader).Count()); // now publish messages const int count = 25000; var producer = new Producer(cluster, new ProducerConfiguration(topic)); _log.Debug("Connecting"); await producer.ConnectAsync(); _log.Debug("Filling out {0} with {1} messages", topic, count); var sentList = await Enumerable.Range(0, count) .Select(i => new Message { Value = BitConverter.GetBytes(i) }) .ToObservable() .Do(producer.Send) .Select(msg => BitConverter.ToInt32(msg.Value, 0)) .ToList(); await Task.Delay(TimeSpan.FromSeconds(1)); _log.Info("Done sending messages. Closing producer."); await producer.CloseAsync(TimeSpan.FromSeconds(5)); _log.Info("Producer closed, starting consumer subscription."); await Task.Delay(TimeSpan.FromSeconds(1)); var heads = await cluster.FetchPartitionOffsetsAsync(topic, ConsumerLocation.TopicStart); var tails = await cluster.FetchPartitionOffsetsAsync(topic, ConsumerLocation.TopicEnd); var messagesInTopic = (int)tails.MessagesSince(heads); _log.Info("Topic offsets indicate producer sent {0} messages.", messagesInTopic); var consumer = new Consumer(new ConsumerConfiguration(_seed2Addresses, topic, new StartPositionTopicStart(), maxBytesPerFetch: 4 * 8)); var current = 0; var received = new ReplaySubject<ReceivedMessage>(); Task rebalanceTask = null; var consumerSubscription = consumer.OnMessageArrived. Subscribe(async msg => { current++; if (current == 18) { rebalanceTask = Task.Factory.StartNew(VagrantBrokerUtil.RebalanceLeadership, CancellationToken.None, TaskCreationOptions.None, TaskScheduler.Default); } received.OnNext(msg); //_log.Info("Got: {0}", BitConverter.ToInt32(msg.Value, 0)); }); await consumer.IsConnected; _log.Info("Waiting for receiver complete"); var receivedList = await received.Select(msg => BitConverter.ToInt32(msg.Value, 0)). Take(messagesInTopic). TakeUntil(DateTime.Now.AddMinutes(3)). ToList(). ToTask(); if (rebalanceTask != null) { _log.Info("Waiting for rebalance complete"); await rebalanceTask;//.TimeoutAfter(TimeSpan.FromSeconds(10)); _log.Info("Rebalance complete"); } _log.Info("Receiver complete. Disposing Subscription"); consumerSubscription.Dispose(); _log.Info("Consumer subscription disposed. Closing consumer."); consumer.Dispose(); _log.Info("Consumer closed."); tails = await cluster.FetchPartitionOffsetsAsync(topic, ConsumerLocation.TopicEnd); await cluster.CloseAsync(TimeSpan.FromSeconds(5)); _log.Info("Sum of offsets: {0}", tails.MessagesSince(heads)); _log.Info("Offsets: [{0}]", string.Join(",", tails.Partitions.Select(p => string.Format("{0}:{1}", p, tails.NextOffset(p))))); if (messagesInTopic != receivedList.Count) { // log some debug info. _log.Error("Did not receive all messages. Messages sent but NOT received: {0}", string.Join(",", sentList.Except(receivedList).OrderBy(i => i))); } Assert.AreEqual(messagesInTopic, receivedList.Count); kafka4net.Tracing.EtwTrace.Marker("/ConsumerFollowsRebalancingPartitions"); }
public async void ListenerRecoveryTest() { kafka4net.Tracing.EtwTrace.Marker("ListenerRecoveryTest"); const int count = 10000; var topic = "part33." + _rnd.Next(); VagrantBrokerUtil.CreateTopic(topic, 6, 3); var producer = new Producer(_seed2Addresses, new ProducerConfiguration(topic)); _log.Debug("Connecting"); await producer.ConnectAsync(); _log.Debug("Filling out {0} with {1} messages", topic, count); var sentList = await Enumerable.Range(0, count) .Select(i => new Message { Value = BitConverter.GetBytes(i) }) .ToObservable() .Do(producer.Send) .Select(msg => BitConverter.ToInt32(msg.Value, 0)) .ToList(); await Task.Delay(TimeSpan.FromSeconds(1)); var heads = await producer.Cluster.FetchPartitionOffsetsAsync(topic, ConsumerLocation.TopicStart); var tails = await producer.Cluster.FetchPartitionOffsetsAsync(topic, ConsumerLocation.TopicEnd); _log.Info("Done sending messages. Closing producer."); await producer.CloseAsync(TimeSpan.FromSeconds(5)); _log.Info("Producer closed, starting consumer subscription."); var messagesInTopic = (int)tails.MessagesSince(heads); _log.Info("Topic offsets indicate producer sent {0} messages.", messagesInTopic); var consumer = new Consumer(new ConsumerConfiguration(_seed2Addresses, topic, new StartPositionTopicStart(), maxBytesPerFetch: 4 * 8)); var current = 0; var received = new ReplaySubject<ReceivedMessage>(); Task stopBrokerTask = null; var consumerSubscription = consumer.OnMessageArrived. Subscribe(async msg => { current++; if (current == 18) { stopBrokerTask = Task.Factory.StartNew(() => VagrantBrokerUtil.StopBrokerLeaderForPartition(consumer.Cluster, consumer.Topic, msg.Partition), CancellationToken.None, TaskCreationOptions.None, TaskScheduler.Default); } received.OnNext(msg); //_log.Info("Got: {0}", BitConverter.ToInt32(msg.Value, 0)); }); await consumer.IsConnected; _log.Info("Waiting for receiver complete"); var receivedList = await received.Select(msg => BitConverter.ToInt32(msg.Value, 0)).Take(messagesInTopic). TakeUntil(DateTime.Now.AddSeconds(60)).ToList().ToTask(); if (stopBrokerTask != null) await stopBrokerTask.TimeoutAfter(TimeSpan.FromSeconds(10)); tails = await consumer.Cluster.FetchPartitionOffsetsAsync(topic, ConsumerLocation.TopicEnd); _log.Info("Receiver complete. Disposing Subscription"); consumerSubscription.Dispose(); _log.Info("Consumer subscription disposed. Closing consumer."); consumer.Dispose(); _log.Info("Consumer closed."); _log.Info("Sum of offsets: {0}", tails.MessagesSince(heads)); _log.Info("Offsets: [{0}]", string.Join(",", tails.Partitions.Select(p => string.Format("{0}:{1}", p, tails.NextOffset(p))))); if (messagesInTopic != receivedList.Count) { // log some debug info. _log.Error("Did not receive all messages. Messages sent but NOT received: {0}", string.Join(",", sentList.Except(receivedList).OrderBy(i => i))); } Assert.AreEqual(messagesInTopic, receivedList.Count); kafka4net.Tracing.EtwTrace.Marker("/ListenerRecoveryTest"); }
public async void ListenerOnNonExistentTopicWaitsForTopicCreation() { kafka4net.Tracing.EtwTrace.Marker("ListenerOnNonExistentTopicWaitsForTopicCreation"); const int numMessages = 400; var topic = "topic." + _rnd.Next(); var consumer = new Consumer(new ConsumerConfiguration(_seed2Addresses, topic, new StartPositionTopicStart())); var cancelSubject = new Subject<bool>(); var receivedValuesTask = consumer.OnMessageArrived .Select(msg=>BitConverter.ToInt32(msg.Value, 0)) .Do(val=>_log.Info("Received: {0}", val)) .Take(numMessages) .TakeUntil(cancelSubject) .ToList().ToTask(); //receivedValuesTask.Start(); await consumer.IsConnected; // wait a couple seconds for things to "stabilize" await Task.Delay(TimeSpan.FromSeconds(4)); // now produce 400 messages var producer = new Producer(_seed2Addresses, new ProducerConfiguration(topic)); await producer.ConnectAsync(); Enumerable.Range(1, numMessages). Select(i => new Message { Value = BitConverter.GetBytes(i) }). ForEach(producer.Send); await Task.Delay(TimeSpan.FromSeconds(2)); await producer.CloseAsync(TimeSpan.FromSeconds(5)); // wait another little while, and stop the producer. await Task.Delay(TimeSpan.FromSeconds(2)); cancelSubject.OnNext(true); var receivedValues = await receivedValuesTask; Assert.AreEqual(numMessages,receivedValues.Count); kafka4net.Tracing.EtwTrace.Marker("/ListenerOnNonExistentTopicWaitsForTopicCreation"); }
public async void Memory() { var topic = "topic11." + _rnd.Next(); VagrantBrokerUtil.CreateTopic(topic, 1, 1); for (int i = 0; i < 10; ++i) { var producer = new Producer(_seed2Addresses, new ProducerConfiguration(topic)); await producer.ConnectAsync(); for (int j = 0; j < (int)1e6; j++) { var msg = new Message { Key = BitConverter.GetBytes(1), Value = Encoding.UTF8.GetBytes($@"SomeLongText - {j}") }; producer.Send(msg); } await producer.CloseAsync(TimeSpan.FromSeconds(60)); } var before = GC.GetTotalMemory(false); GC.Collect(); var after = GC.GetTotalMemory(false); await Task.Delay(3000); GC.Collect(); GC.WaitForFullGCComplete(); _log.Info($"Memory: Before: {before}, after: {after}"); }
public async void TopicIsAutocreatedByProducer() { kafka4net.Tracing.EtwTrace.Marker("TopicIsAutocreatedByProducer"); var topic ="autocreate.test." + _rnd.Next(); const int producedCount = 10; var lala = Encoding.UTF8.GetBytes("la-la-la"); // TODO: set wait to 5sec // // Produce // In order to make sure that topic was created by producer, send and wait for producer // completion before performing validation read. // var producer = new Producer(_seed2Addresses, new ProducerConfiguration(topic)); await producer.ConnectAsync(); _log.Debug("Producing..."); await Observable.Interval(TimeSpan.FromSeconds(1)). Take(producedCount). Do(_ => producer.Send(new Message { Value = lala })). ToTask(); await producer.CloseAsync(TimeSpan.FromSeconds(10)); // // Validate by reading published messages // var consumer = new Consumer(new ConsumerConfiguration(_seed2Addresses, topic, new StartPositionTopicStart(), maxWaitTimeMs: 1000, minBytesPerFetch: 1)); var msgs = consumer.OnMessageArrived.Publish().RefCount(); var receivedTxt = new List<string>(); var consumerSubscription = msgs. Select(m => Encoding.UTF8.GetString(m.Value)). Synchronize(). // protect receivedTxt Do(m => _log.Info("Received {0}", m)). Do(receivedTxt.Add). Subscribe(); await consumer.IsConnected; _log.Debug("Waiting for consumer"); await msgs.Take(producedCount).TakeUntil(DateTimeOffset.Now.AddSeconds(5)).LastOrDefaultAsync().ToTask(); Assert.AreEqual(producedCount, receivedTxt.Count, "Did not received all messages"); Assert.IsTrue(receivedTxt.All(m => m == "la-la-la"), "Unexpected message content"); consumerSubscription.Dispose(); consumer.Dispose(); kafka4net.Tracing.EtwTrace.Marker("/TopicIsAutocreatedByProducer"); }
public async void ReadFromHead() { kafka4net.Tracing.EtwTrace.Marker("ReadFromHead"); const int count = 100; var topic = "part32." + _rnd.Next(); VagrantBrokerUtil.CreateTopic(topic,3,2); // fill it out with 100 messages var producer = new Producer(_seed2Addresses, new ProducerConfiguration(topic)); await producer.ConnectAsync(); _log.Info("Sending data"); Enumerable.Range(1, count). Select(i => new Message { Value = BitConverter.GetBytes(i) }). ForEach(producer.Send); _log.Debug("Closing producer"); await producer.CloseAsync(TimeSpan.FromSeconds(5)); // read starting from the head var consumer = new Consumer(new ConsumerConfiguration(_seed2Addresses, topic, new StartPositionTopicStart())); var count2 = await consumer.OnMessageArrived.TakeUntil(DateTimeOffset.Now.AddSeconds(5)) //.Do(val=>_log.Info("received value {0}", BitConverter.ToInt32(val.Value,0))) .Count().ToTask(); //await consumer.IsConnected; Assert.AreEqual(count, count2); kafka4net.Tracing.EtwTrace.Marker("/ReadFromHead"); }
public async void StartAndStopAtExplicitOffset() { kafka4net.Tracing.EtwTrace.Marker("StartAndStopAtExplicitOffset"); // create new topic with 3 partitions var topic = "part33." + _rnd.Next(); VagrantBrokerUtil.CreateTopic(topic, 3, 3); // fill it out with 10K messages const int count = 10 * 1000; var producer = new Producer(_seed2Addresses, new ProducerConfiguration(topic)); await producer.ConnectAsync(); var sentMessagesObservable = Observable.FromEvent<Message[]>(evtHandler => producer.OnSuccess += evtHandler, evtHandler => { }) .SelectMany(msgs => msgs) .Take(count) .TakeUntil(DateTime.Now.AddSeconds(10)) .ToList(); _log.Info("Sending data"); Enumerable.Range(1, count). Select(i => new Message { Value = BitConverter.GetBytes(i) }). ForEach(producer.Send); var sentMsgs = await sentMessagesObservable; _log.Info("Producer sent {0} messages.", sentMsgs.Count); _log.Debug("Closing producer"); await producer.CloseAsync(TimeSpan.FromSeconds(5)); var offsetFetchCluster = new Cluster(_seed2Addresses); await offsetFetchCluster.ConnectAsync(); await Task.Delay(TimeSpan.FromSeconds(1)); var offsets = (await offsetFetchCluster.FetchPartitionOffsetsAsync(topic, ConsumerLocation.TopicEnd)); _log.Info("Sum of offsets {0}. Raw: {1}", offsets.Partitions.Sum(p => offsets.NextOffset(p)), offsets); // consume first 300 for each partition var offsetStarts = new TopicPartitionOffsets(topic, offsets.GetPartitionsOffset.ToDictionary(pair => pair.Key, pair => pair.Value > 300 ? 300 : pair.Value)); var offsetStops = new TopicPartitionOffsets(topic, offsets.GetPartitionsOffset.ToDictionary(pair => pair.Key, pair => pair.Value > 600 ? 600 : pair.Value)); var numMessages = offsetStops.MessagesSince(offsetStarts); var startStopProvider = new StartAndStopAtExplicitOffsets(offsetStarts, offsetStops); _log.Info("Attempting to consume {0} messages and stop at {1}", numMessages, offsetStops); var consumer = new Consumer(new ConsumerConfiguration(_seed2Addresses, topic, startStopProvider, stopPosition: startStopProvider)); var messages = await consumer.OnMessageArrived.ToList(); consumer.Dispose(); Assert.AreEqual(numMessages, messages.Count); kafka4net.Tracing.EtwTrace.Marker("/StartAndStopAtExplicitOffset"); }
internal async Task<ProducerResponse> SendBatchAsync(int leader, IEnumerable<Message> batch, Producer producer) { CheckConnected(); // TODO: do state checking. Introduce this.Connected task to wait if needed var request = new ProduceRequest { Broker = _metadata.Brokers.First(b => b.NodeId == leader), RequiredAcks = producer.Configuration.RequiredAcks, Timeout = producer.Configuration.ProduceRequestTimeoutMs, TopicData = new[] { new TopicData { TopicName = producer.Topic, PartitionsData = ( from msg in batch // group messages belonging to the same partition group msg by msg.PartitionId into partitionGrp select new PartitionData { Pub = producer, OriginalMessages = partitionGrp.ToArray(), Partition = partitionGrp.Key, Messages = ( from msg in partitionGrp select new MessageData { Key = msg.Key, Value = msg.Value } ) } ) } } }; var response = await _protocol.Produce(request).ConfigureAwait(false); _log.Debug("#{0} SendBatchAsync complete", _id); return response; }
private async Task<Producer> GetOrCreateKafkaProducerAsync() { // Use a temp producer variable here beacuase this.kafkaProducer could be reset to null in another thread var producer = this.kafkaProducer; if (producer == null) { this.kafkaProducer = producer = await this.CreateKafkaProducerAsync().ConfigureAwait(false); } return producer; }
public async void ProducerConnectWhenOneBrokerIsDownAndThanUp() { // Scenario: 1 broker is down, Producer connects. Broker is brought up and forced to become master. // See https://github.com/ntent-ad/kafka4net/issues/14 var topic = "topic11." + _rnd.Next(); // Create topic VagrantBrokerUtil.CreateTopic(topic, 1, 2); var cluster = new Cluster(_seed3Addresses); await cluster.ConnectAsync(); await cluster.GetOrFetchMetaForTopicAsync(topic); VagrantBrokerUtil.DescribeTopic(topic); // Stop the leader //var partitionDown = cluster.PartitionStateChanges.FirstAsync(_ => _.ErrorCode.IsFailure()); var preferredBroker = VagrantBrokerUtil.StopBrokerLeaderForPartition(cluster, topic, 0); //_log.Info("Waiting for partition to be down"); //await partitionDown; await cluster.CloseAsync(TimeSpan.FromSeconds(3)); await Task.Delay(30 * 1000); VagrantBrokerUtil.DescribeTopic(topic); // Create new cluster and publisher, while preferred leader is down cluster = new Cluster(_seed3Addresses); cluster.NewBrokers.Subscribe(b => _log.Info("Discovered new broker: {0}", b)); _log.Info("Connecting cluster"); await cluster.ConnectAsync(); var producer = new Producer(cluster, new ProducerConfiguration(topic)); _log.Info("Connecting producer"); await producer.ConnectAsync(); // Start preferred leader up _log.Info("Starting preferred broker"); VagrantBrokerUtil.StartBroker(preferredBroker); _log.Info("Waiting for preferred broker ({0}) to start up", preferredBroker); await Task.Delay(30 * 1000); //VagrantBrokerUtil.RebalanceLeadership(); _log.Info("Stopping 2nd leader broker"); VagrantBrokerUtil.StopBrokerLeaderForPartition(cluster, topic, 0); _log.Info("Producer Send data"); producer.Send(new Message() { Value = new byte[]{0,0,0,0}}); _log.Info("Waiting for producer to complete"); await producer.CloseAsync(TimeSpan.FromSeconds(60)); _log.Info("Done"); }
public async void SaveOffsetsAndResumeConsuming() { kafka4net.Tracing.EtwTrace.Marker("SaveOffsetsAndResumeConsuming"); var sentEvents = new Subject<Message>(); var topic = "part12." + _rnd.Next(); VagrantBrokerUtil.CreateTopic(topic, 5, 2); var producer = new Producer(_seed2Addresses, new ProducerConfiguration(topic)); producer.OnSuccess += e => e.ForEach(sentEvents.OnNext); await producer.ConnectAsync(); // send 100 messages Enumerable.Range(1, 100). Select(i => new Message { Value = BitConverter.GetBytes(i) }). ForEach(producer.Send); _log.Info("Waiting for 100 sent messages"); sentEvents.Subscribe(msg => _log.Debug("Sent {0}", BitConverter.ToInt32(msg.Value, 0))); await sentEvents.Take(100).ToTask(); var offsets1 = await producer.Cluster.FetchPartitionOffsetsAsync(topic, ConsumerLocation.TopicStart); _log.Info("Closing producer"); await producer.CloseAsync(TimeSpan.FromSeconds(5)); // now consume the "first" 50. Stop, save offsets, and restart. var consumer1 = new Consumer(new ConsumerConfiguration(_seed2Addresses, topic, offsets1)); var receivedEvents = new List<int>(100); _log.Info("Consuming first half of messages."); await consumer1.OnMessageArrived .Do(msg => { var value = BitConverter.ToInt32(msg.Value, 0); _log.Info("Consumer1 Received value {0} from partition {1} at offset {2}", value, msg.Partition, msg.Offset); receivedEvents.Add(value); offsets1.UpdateOffset(msg.Partition, msg.Offset); }) .Take(50); //await consumer1.IsConnected; _log.Info("Closing first consumer"); consumer1.Dispose(); // now serialize the offsets. var offsetBytes = offsets1.WriteOffsets(); // load a new set of offsets, and a new consumer var offsets2 = new TopicPartitionOffsets(offsetBytes); var consumer2 = new Consumer(new ConsumerConfiguration(_seed2Addresses, offsets2.Topic, offsets2)); await consumer2.OnMessageArrived .Do(msg => { var value = BitConverter.ToInt32(msg.Value, 0); _log.Info("Consumer2 Received value {0} from partition {1} at offset {2}", value, msg.Partition, msg.Offset); receivedEvents.Add(value); offsets2.UpdateOffset(msg.Partition, msg.Offset); }) .Take(50); //await consumer2.IsConnected; _log.Info("Closing second consumer"); consumer2.Dispose(); Assert.AreEqual(100, receivedEvents.Distinct().Count()); Assert.AreEqual(100, receivedEvents.Count); kafka4net.Tracing.EtwTrace.Marker("/SaveOffsetsAndResumeConsuming"); }
public async void ProducerTestWhenPartitionReassignmentOccurs() { // Scenario: Broker gives away it's topic to another broker // See https://github.com/ntent-ad/kafka4net/issues/27 var topic = "topic11." + _rnd.Next(); // Create topic VagrantBrokerUtil.CreateTopic(topic, 1, 1); // Create cluster and producer var cluster = new Cluster(_seed2Addresses); //await cluster.ConnectAsync(); //await cluster.GetOrFetchMetaForTopicAsync(topic); VagrantBrokerUtil.DescribeTopic(topic); var producer = new Producer(cluster, new ProducerConfiguration(topic, batchFlushSize: 1)); var ctx = SynchronizationContext.Current; producer.OnPermError += (exception, messages) => ctx.Post(d => { throw exception; }, null); int successfullySent = 0; producer.OnSuccess += messages => successfullySent++; _log.Info("Connecting producer"); await producer.ConnectAsync(); _log.Info("Producer Send data before reassignment"); producer.Send(new Message { Value = new byte[] { 0, 0, 0, 0 } }); // Run the reassignment VagrantBrokerUtil.ReassignPartitions(cluster, topic, 0); _log.Info("Waiting for reassignment completion"); await Task.Delay(5 * 1000); VagrantBrokerUtil.DescribeTopic(topic); _log.Info("Producer Send data after reassignment"); producer.Send(new Message { Value = new byte[] { 1, 1, 1, 1 } }); _log.Info("Waiting for producer to complete"); await producer.CloseAsync(TimeSpan.FromSeconds(60)); Assert.That(successfullySent, Is.EqualTo(2)); _log.Info("Done"); }
public async void ReadOffsets() { kafka4net.Tracing.EtwTrace.Marker("ReadOffsets"); var sentEvents = new Subject<Message>(); var topic = "part12." + _rnd.Next(); VagrantBrokerUtil.CreateTopic(topic,1,1); var cluster = new Cluster(_seed2Addresses); await cluster.ConnectAsync(); var producer = new Producer(cluster, new ProducerConfiguration(topic, maxMessageSetSizeInBytes: 1024*1024)); producer.OnSuccess += e => e.ForEach(sentEvents.OnNext); await producer.ConnectAsync(); // read offsets of empty queue var heads = await cluster.FetchPartitionOffsetsAsync(topic, ConsumerLocation.TopicStart); var tails = await cluster.FetchPartitionOffsetsAsync(topic, ConsumerLocation.TopicEnd); Assert.AreEqual(1, heads.Partitions.Count(), "Expected just one head partition"); Assert.AreEqual(1, tails.Partitions.Count(), "Expected just one tail partition"); Assert.AreEqual(0L, heads.NextOffset(heads.Partitions.First()), "Expected start at 0"); Assert.AreEqual(0L, tails.NextOffset(tails.Partitions.First()), "Expected end at 0"); // log the broker selected as master var brokerMeta = cluster.FindBrokerMetaForPartitionId(topic, heads.Partitions.First()); _log.Info("Partition Leader is {0}", brokerMeta); // saw some inconsistency, so run this a few times. const int count = 1100; const int loops = 10; for (int i = 0; i < loops; i++) { // NOTE that the configuration for the test machines through vagrant are set to 1MB rolling file segments // so we need to generate large messages to force multiple segments to be created. // send count messages var t = sentEvents.Take(count).ToTask(); Enumerable.Range(1, count). Select(_ => new Message { Value = new byte[1024] }). ForEach(producer.Send); _log.Info("Waiting for {0} sent messages", count); await t; // re-read offsets after messages published await Task.Delay(TimeSpan.FromSeconds(2)); // NOTE: There seems to be a race condition on the Kafka broker that the offsets are not immediately available after getting a successful produce response tails = await cluster.FetchPartitionOffsetsAsync(topic, ConsumerLocation.TopicEnd); _log.Info("2:After loop {0} of {1} messages, Next Offset is {2}", i + 1, count, tails.NextOffset(tails.Partitions.First())); Assert.AreEqual(count * (i + 1), tails.NextOffset(tails.Partitions.First()), "Expected end at " + count * (i + 1)); } _log.Info("Closing producer"); await producer.CloseAsync(TimeSpan.FromSeconds(5)); await Task.Delay(TimeSpan.FromSeconds(1)); // re-read offsets after messages published heads = await cluster.FetchPartitionOffsetsAsync(topic, ConsumerLocation.TopicStart); tails = await cluster.FetchPartitionOffsetsAsync(topic, ConsumerLocation.TopicEnd); Assert.AreEqual(1, heads.Partitions.Count(), "Expected just one head partition"); Assert.AreEqual(1, tails.Partitions.Count(), "Expected just one tail partition"); Assert.AreEqual(0L, heads.NextOffset(heads.Partitions.First()), "Expected start at 0"); Assert.AreEqual(count*loops, tails.NextOffset(tails.Partitions.First()), "Expected end at " + count); kafka4net.Tracing.EtwTrace.Marker("/ReadOffsets"); }
public async void LeaderDownProducerAndConsumerRecovery() { kafka4net.Tracing.EtwTrace.Marker("LeaderDownProducerAndConsumerRecovery"); string topic = "part32." + _rnd.Next(); VagrantBrokerUtil.CreateTopic(topic, 3, 2); var sent = new List<string>(); var confirmedSent1 = new List<string>(); var producer = new Producer(_seed2Addresses, new ProducerConfiguration(topic)); producer.OnSuccess += msgs => { msgs.ForEach(msg => confirmedSent1.Add(Encoding.UTF8.GetString(msg.Value))); _log.Debug("Sent {0} messages", msgs.Length); }; await producer.ConnectAsync(); var consumer = new Consumer(new ConsumerConfiguration(_seed2Addresses, topic, new StartPositionTopicEnd())); const int postCount = 100; const int postCount2 = 50; // // Read messages // var received = new List<ReceivedMessage>(); var receivedEvents = new ReplaySubject<ReceivedMessage>(); var consumerSubscription = consumer.OnMessageArrived. Synchronize(). Subscribe(msg => { received.Add(msg); receivedEvents.OnNext(msg); _log.Debug("Received {0}/{1}", Encoding.UTF8.GetString(msg.Value), received.Count); }); await consumer.IsConnected; // // Send #1 // _log.Info("Start sender"); Observable.Interval(TimeSpan.FromMilliseconds(200)). Take(postCount). Subscribe( i => { var msg = "msg " + i; producer.Send(new Message { Value = Encoding.UTF8.GetBytes(msg) }); sent.Add("msg " + i); }, () => _log.Info("Producer complete") ); // wait for first 50 messages to arrive _log.Info("Waiting for first {0} messages to arrive", postCount2); await receivedEvents.Take(postCount2).Count().ToTask(); Assert.AreEqual(postCount2, received.Count); _log.Info("Stopping broker"); var stoppedBroker = VagrantBrokerUtil.StopBrokerLeaderForPartition(producer.Cluster, topic, 0); _log.Debug("Stopped broker {0}", stoppedBroker); // post another 50 messages _log.Info("Sending another {0} messages", postCount2); var sender2 = Observable.Interval(TimeSpan.FromMilliseconds(200)). Take(postCount2). Publish().RefCount(); // // Send #2 // sender2.Subscribe( i => { var msg = "msg #2 " + i; producer.Send(new Message { Value = Encoding.UTF8.GetBytes(msg) }); sent.Add(msg); _log.Debug("Sent msg #2 {0}", i); }, () => _log.Info("Producer #2 complete") ); _log.Info("Waiting for #2 sender to complete"); await sender2.ToTask(); _log.Info("Waiting for producer.Close"); await producer.CloseAsync(TimeSpan.FromSeconds(60)); _log.Info("Waiting 4sec for remaining messages"); await Task.Delay(TimeSpan.FromSeconds(4)); // if unexpected messages arrive, let them in to detect failure _log.Info("Waiting for consumer.CloseAsync"); consumer.Dispose(); consumerSubscription.Dispose(); if (postCount + postCount2 != received.Count) { var receivedStr = received.Select(m => Encoding.UTF8.GetString(m.Value)).ToArray(); var diff = sent.Except(received.Select(m => Encoding.UTF8.GetString(m.Value))).OrderBy(s => s); _log.Info("Not received {0}: \n {1}", diff.Count(), string.Join("\n ", diff)); var diff2 = sent.Except(confirmedSent1).OrderBy(s => s); _log.Info("Not confirmed {0}: \n {1}", diff2.Count(), string.Join("\n ", diff2)); var diff3 = received.Select(m => Encoding.UTF8.GetString(m.Value)).Except(sent).OrderBy(s => s); _log.Info("Received extra: {0}: \n {1}", diff3.Count(), string.Join("\n ", diff3)); var diff4 = confirmedSent1.Except(sent).OrderBy(s => s); _log.Info("Confirmed extra {0}: \n {1}", diff4.Count(), string.Join("\n ", diff4)); var dups = receivedStr.GroupBy(s => s).Where(g => g.Count() > 1).Select(g => string.Format("{0}: {1}", g.Count(), g.Key)); _log.Info("Receved dups: \n {0}", string.Join("\n ", dups)); _log.Debug("Received: \n{0}", string.Join("\n ", received.Select(m => Encoding.UTF8.GetString(m.Value)))); } Assert.AreEqual(postCount + postCount2, received.Count, "Received.Count"); _log.Info("Done"); kafka4net.Tracing.EtwTrace.Marker("/LeaderDownProducerAndConsumerRecovery"); }
public async void MultipleProducersOneCluster() { kafka4net.Tracing.EtwTrace.Marker("MultipleProducersOneCluster"); var cluster = new Cluster(_seed2Addresses); var topic1 = "topic." + _rnd.Next(); var topic2 = "topic." + _rnd.Next(); VagrantBrokerUtil.CreateTopic(topic1, 6, 3); VagrantBrokerUtil.CreateTopic(topic2, 6, 3); // declare two producers var producer1 = new Producer(cluster, new ProducerConfiguration(topic1)); await producer1.ConnectAsync(); var producer2 = new Producer(cluster, new ProducerConfiguration(topic2)); await producer2.ConnectAsync(); // run them both for a little while (~10 seconds) var msgs = await Observable.Interval(TimeSpan.FromMilliseconds(100)) .Do(l => { producer1.Send(new Message {Value = BitConverter.GetBytes(l)}); producer2.Send(new Message {Value = BitConverter.GetBytes(l)}); }).Take(100); _log.Info("Done Sending, await on producer close."); // now stop them. await Task.WhenAll(new [] { producer1.CloseAsync(TimeSpan.FromSeconds(5)), producer2.CloseAsync(TimeSpan.FromSeconds(5)) }); await Task.Delay(TimeSpan.FromSeconds(2)); // check we got all 100 on each topic. _log.Info("Closed Producers. Checking Offsets"); var topic1Heads = await cluster.FetchPartitionOffsetsAsync(topic1, ConsumerLocation.TopicStart); var topic2Heads = await cluster.FetchPartitionOffsetsAsync(topic2, ConsumerLocation.TopicStart); var topic1Tails = await cluster.FetchPartitionOffsetsAsync(topic1, ConsumerLocation.TopicEnd); var topic2Tails = await cluster.FetchPartitionOffsetsAsync(topic2, ConsumerLocation.TopicEnd); Assert.AreEqual(100, topic1Tails.MessagesSince(topic1Heads)); Assert.AreEqual(100, topic2Tails.MessagesSince(topic2Heads)); kafka4net.Tracing.EtwTrace.Marker("/MultipleProducersOneCluster"); }
public async void ProducerRecoveryTest() { kafka4net.Tracing.EtwTrace.Marker("ProducerRecoveryTest"); const int count = 200; var topic = "part62." + _rnd.Next(); VagrantBrokerUtil.CreateTopic(topic, 6, 2); var producer = new Producer(_seed2Addresses, new ProducerConfiguration(topic)); _log.Debug("Connecting"); await producer.ConnectAsync(); _log.Debug("Filling out {0}", topic); // when we get a confirm back, add to list actually sent. var actuallySentList = new List<int>(count); producer.OnSuccess += msgs => actuallySentList.AddRange(msgs.Select(msg => BitConverter.ToInt32(msg.Value, 0))); Task stopBrokerTask = null; var sentList = await Observable.Interval(TimeSpan.FromMilliseconds(100)) .Select(l => (int)l) .Do(l => { if (l == 20) stopBrokerTask = Task.Factory.StartNew(() => VagrantBrokerUtil.StopBroker("broker2"), CancellationToken.None, TaskCreationOptions.None, TaskScheduler.Default); }) .Select(i => new Message { Value = BitConverter.GetBytes(i) }) .Take(count) .Do(producer.Send) .Select(msg => BitConverter.ToInt32(msg.Value, 0)) .ToList(); _log.Info("Done waiting for sending. Closing producer."); await producer.CloseAsync(TimeSpan.FromSeconds(30)); _log.Info("Producer closed."); if (stopBrokerTask != null) await stopBrokerTask.TimeoutAfter(TimeSpan.FromSeconds(10)); // // Check length of result topic // var c2 = new Cluster(_seed2Addresses); await c2.ConnectAsync(); var heads = await c2.FetchPartitionOffsetsAsync(topic, ConsumerLocation.TopicStart); var tails = await c2.FetchPartitionOffsetsAsync(topic, ConsumerLocation.TopicEnd); _log.Info("Sum of offsets: {0}", tails.MessagesSince(heads)); _log.Info("Offsets: [{0}]", string.Join(",", tails.Partitions.Select(p => string.Format("{0}:{1}", p, tails.NextOffset(p))))); // if (sentList.Count != actuallySentList.Count) { // log some debug info. _log.Error("Did not send all messages. Messages sent but NOT acknowledged: {0}", string.Join(",", sentList.Except(actuallySentList).OrderBy(i => i))); } Assert.AreEqual(sentList.Count, actuallySentList.Count, "Actually sent"); Assert.AreEqual(sentList.Count, tails.MessagesSince(heads), "Offsets"); kafka4net.Tracing.EtwTrace.Marker("/ProducerRecoveryTest"); }
public async void SchedulerThreadIsIsolatedFromUserCode() { kafka4net.Tracing.EtwTrace.Marker("SchedulerThreadIsIsolatedFromUserCode"); const string threadName = "kafka-scheduler"; _log.Info("Test Runner is using thread {0}", Thread.CurrentThread.Name); var topic = "topic." + _rnd.Next(); VagrantBrokerUtil.CreateTopic(topic,6,3); var cluster = new Cluster(_seed2Addresses); await cluster.ConnectAsync(); Assert.AreNotEqual(threadName, Thread.CurrentThread.Name); await cluster.FetchPartitionOffsetsAsync(topic, ConsumerLocation.TopicStart); Assert.AreNotEqual(threadName, Thread.CurrentThread.Name); var topics = await cluster.GetAllTopicsAsync(); Assert.AreNotEqual(threadName, Thread.CurrentThread.Name); // now create a producer var producer = new Producer(cluster, new ProducerConfiguration(topic)); await producer.ConnectAsync(); Assert.AreNotEqual(threadName, Thread.CurrentThread.Name); // create a producer that also creates a cluster var producerWithCluster = new Producer(_seed2Addresses, new ProducerConfiguration(topic)); await producerWithCluster.ConnectAsync(); Assert.AreNotEqual(threadName, Thread.CurrentThread.Name); // TODO: Subscribe and check thread on notification observables! // run them both for a little while (~5 seconds) var msgs = await Observable.Interval(TimeSpan.FromMilliseconds(100)) .Do(l => { producer.Send(new Message { Value = BitConverter.GetBytes(l) }); producerWithCluster.Send(new Message { Value = BitConverter.GetBytes(l) }); _log.Debug("After Producer Send using thread {0}", Thread.CurrentThread.Name); }).Take(50).ToArray(); Assert.AreNotEqual(threadName, Thread.CurrentThread.Name); // now consumer(s) var consumer = new Consumer(new ConsumerConfiguration(_seed2Addresses, topic, new StartPositionTopicStart())); Assert.AreNotEqual(threadName, Thread.CurrentThread.Name); var msgsRcv = new List<long>(); var messageSubscription = consumer.OnMessageArrived .Do(msg => Assert.AreEqual(threadName, Thread.CurrentThread.Name), exception => Assert.AreEqual(threadName, Thread.CurrentThread.Name), () => Assert.AreEqual(threadName, Thread.CurrentThread.Name)) .Take(50) .TakeUntil(DateTime.Now.AddSeconds(500)) .ObserveOn(System.Reactive.Concurrency.DefaultScheduler.Instance) .Do(msg => Assert.AreNotEqual(threadName, Thread.CurrentThread.Name), exception => Assert.AreNotEqual(threadName, Thread.CurrentThread.Name), () => Assert.AreNotEqual(threadName, Thread.CurrentThread.Name)) .Subscribe( msg=> { msgsRcv.Add(BitConverter.ToInt64(msg.Value,0)); Assert.AreNotEqual(threadName, Thread.CurrentThread.Name); _log.Debug("In Consumer Subscribe OnNext using thread {0}", Thread.CurrentThread.Name); }, exception => { _log.Debug("In Consumer Subscribe OnError using thread {0} Error: {1}", Thread.CurrentThread.Name, exception.Message); throw exception; }, () => { Assert.AreNotEqual(threadName, Thread.CurrentThread.Name); _log.Debug("In Consumer Subscribe OnComplete using thread {0}", Thread.CurrentThread.Name); }); await consumer.IsConnected; _log.Info("Waitng for consumer to read"); await Task.Delay(TimeSpan.FromSeconds(6)); _log.Debug("After Consumer Subscribe using thread {0}", Thread.CurrentThread.Name); consumer.Dispose(); Assert.AreNotEqual(threadName, Thread.CurrentThread.Name); Assert.AreEqual(msgs.Length, msgsRcv.Count); messageSubscription.Dispose(); Assert.AreNotEqual(threadName, Thread.CurrentThread.Name); // now close down await producer.CloseAsync(TimeSpan.FromSeconds(5)); _log.Debug("After Consumer Close using thread {0}", Thread.CurrentThread.Name); Assert.AreNotEqual(threadName, Thread.CurrentThread.Name); await producerWithCluster.CloseAsync(TimeSpan.FromSeconds(5)); _log.Debug("After Producer Subscribe using thread {0}", Thread.CurrentThread.Name); Assert.AreNotEqual(threadName, Thread.CurrentThread.Name); Assert.AreNotEqual(threadName, Thread.CurrentThread.Name); await cluster.CloseAsync(TimeSpan.FromSeconds(5)); _log.Debug("After Cluster Close using thread {0}", Thread.CurrentThread.Name); Assert.AreNotEqual(threadName, Thread.CurrentThread.Name); kafka4net.Tracing.EtwTrace.Marker("/SchedulerThreadIsIsolatedFromUserCode"); }
public async void CleanShutdownTest() { kafka4net.Tracing.EtwTrace.Marker("CleanShutdownTest"); const string topic = "shutdown.test"; // set producer long batching period, 20 sec var producer = new Producer(_seed2Addresses, new ProducerConfiguration(topic, TimeSpan.FromSeconds(20), int.MaxValue)); _log.Debug("Connecting"); await producer.ConnectAsync(); // start listener at the end of queue and accumulate received messages var received = new HashSet<string>(); var consumer = new Consumer(new ConsumerConfiguration(_seed2Addresses, topic, new StartPositionTopicEnd(), maxWaitTimeMs: 30 * 1000)); _log.Info("Subscribing to consumer"); var consumerSubscription = consumer.OnMessageArrived .Select(msg => Encoding.UTF8.GetString(msg.Value)) .Subscribe(m => received.Add(m)); _log.Info("Connecting consumer"); await consumer.IsConnected; _log.Info("Subscribed to consumer"); _log.Info("Starting sender"); // send data, 5 msg/sec, for 5 seconds var sent = new HashSet<string>(); var sender = Observable.Interval(TimeSpan.FromSeconds(1.0 / 5)). Select(i => string.Format("msg {0} {1}", i, Guid.NewGuid())). Synchronize(). Do(m => sent.Add(m)). Select(msg => new Message { Value = Encoding.UTF8.GetBytes(msg) }). TakeUntil(DateTimeOffset.Now.AddSeconds(5)). Publish().RefCount(); sender.Subscribe(producer.Send); _log.Debug("Waiting for sender"); await sender; _log.Debug("Waiting for producer complete"); await producer.CloseAsync(TimeSpan.FromSeconds(4)); // how to make sure nothing is sent after shutdown? listen to logger? have connection events? // wait for 5sec for receiver to get all the messages _log.Info("Waiting for consumer to fetch"); await Task.Delay(5000); _log.Info("Disposing consumer subscription"); consumerSubscription.Dispose(); _log.Info("Closing consumer"); consumer.Dispose(); _log.Info("Closed consumer"); // assert we received all the messages Assert.AreEqual(sent.Count, received.Count, string.Format("Sent and Receved size differs. Sent: {0} Recevied: {1}", sent.Count, received.Count)); // compare sets and not lists, because of 2 partitions, send order and receive orser are not the same Assert.True(received.SetEquals(sent), "Sent and Received set differs"); kafka4net.Tracing.EtwTrace.Marker("/CleanShutdownTest"); }
private async Task FillOutQueue(string topic, int count) { var producer = new Producer(_seed2Addresses, new ProducerConfiguration(topic, TimeSpan.FromSeconds(20))); _log.Debug("Connecting producer"); await producer.ConnectAsync(); _log.Info("Starting sender"); var sender = Observable.Range(1, count). Select(i => string.Format("msg {0} {1}", i, Guid.NewGuid())). Synchronize(). Select(msg => new Message { Value = Encoding.UTF8.GetBytes(msg) }). Publish().RefCount(); sender.Subscribe(producer.Send); _log.Debug("Waiting for sender"); await sender.LastOrDefaultAsync().ToTask(); _log.Debug("Waiting for producer complete"); await producer.CloseAsync(TimeSpan.FromSeconds(4)); _log.Debug("Producer complete"); }
public async void KeyedMessagesPreserveOrder() { kafka4net.Tracing.EtwTrace.Marker("KeyedMessagesPreserveOrder"); // create a topic with 3 partitions var topicName = "part33." + _rnd.Next(); VagrantBrokerUtil.CreateTopic(topicName, 3, 3); // create listener in a separate connection/broker var receivedMsgs = new List<ReceivedMessage>(); var consumer = new Consumer(new ConsumerConfiguration(_seed2Addresses, topicName, new StartPositionTopicEnd())); var consumerSubscription = consumer.OnMessageArrived.Synchronize().Subscribe(msg => { lock (receivedMsgs) { receivedMsgs.Add(msg); } }); await consumer.IsConnected; // sender is configured with 50ms batch period var producer = new Producer(_seed2Addresses, new ProducerConfiguration(topicName, TimeSpan.FromMilliseconds(50))); await producer.ConnectAsync(); // // generate messages with 100ms interval in 10 threads // var sentMsgs = new List<Message>(); _log.Info("Start sending"); var senders = Enumerable.Range(1, 1). Select(thread => Observable. Interval(TimeSpan.FromMilliseconds(10)). Synchronize(). // protect adding to sentMsgs Select(i => { var str = "msg " + i + " thread " + thread + " " + Guid.NewGuid(); var bin = Encoding.UTF8.GetBytes(str); var msg = new Message { Key = BitConverter.GetBytes((int)(i + thread) % 10), Value = bin }; return Tuple.Create(msg, i, str); }). Subscribe(msg => { lock (sentMsgs) { producer.Send(msg.Item1); sentMsgs.Add(msg.Item1); Assert.AreEqual(msg.Item2, sentMsgs.Count-1); } }) ). ToArray(); // wait for around 10K messages (10K/(10*10) = 100sec) and close producer _log.Info("Waiting for producer to produce enough..."); await Task.Delay(100*1000); _log.Info("Closing senders intervals"); senders.ForEach(s => s.Dispose()); _log.Info("Closing producer"); await producer.CloseAsync(TimeSpan.FromSeconds(5)); _log.Info("Waiting for additional 10sec"); await Task.Delay(10*1000); _log.Info("Disposing consumer"); consumerSubscription.Dispose(); _log.Info("Closing consumer"); consumer.Dispose(); _log.Info("Done with networking"); // compare sent and received messages // TODO: for some reason preformance is not what I'd expect it to be and only 6K is generated. Assert.GreaterOrEqual(sentMsgs.Count, 4000, "Expected around 10K messages to be sent"); if (sentMsgs.Count != receivedMsgs.Count) { var sentStr = sentMsgs.Select(m => Encoding.UTF8.GetString(m.Value)).ToArray(); var receivedStr = receivedMsgs.Select(m => Encoding.UTF8.GetString(m.Value)).ToArray(); sentStr.Except(receivedStr). ForEach(m => _log.Error("Not received: '{0}'", m)); receivedStr.Except(sentStr). ForEach(m => _log.Error("Not sent but received: '{0}'", m)); } Assert.AreEqual(sentMsgs.Count, receivedMsgs.Count, "Sent and received messages count differs"); // // group messages by key and compare lists in each key to be the same (order should be preserved within key) // var keysSent = sentMsgs.GroupBy(m => BitConverter.ToInt32(m.Key, 0), m => Encoding.UTF8.GetString(m.Value), (i, mm) => new { Key = i, Msgs = mm.ToArray() }).ToArray(); var keysReceived = receivedMsgs.GroupBy(m => BitConverter.ToInt32(m.Key, 0), m => Encoding.UTF8.GetString(m.Value), (i, mm) => new { Key = i, Msgs = mm.ToArray() }).ToArray(); Assert.AreEqual(10, keysSent.Count(), "Expected 10 unique keys 0-9"); Assert.AreEqual(keysSent.Count(), keysReceived.Count(), "Keys count does not match"); // compare order within each key var notInOrder = keysSent .OrderBy(k => k.Key) .Zip(keysReceived.OrderBy(k => k.Key), (s, r) => new { s, r, ok = s.Msgs.SequenceEqual(r.Msgs) }).Where(_ => !_.ok).ToArray(); if (notInOrder.Any()) { _log.Error("{0} keys are out of order", notInOrder.Count()); notInOrder.ForEach(_ => _log.Error("Failed order in:\n{0}", string.Join(" \n", DumpOutOfOrder(_.s.Msgs, _.r.Msgs)))); } Assert.IsTrue(!notInOrder.Any(), "Detected out of order messages"); kafka4net.Tracing.EtwTrace.Marker("/KeyedMessagesPreserveOrder"); }
public async void SimulateSchedulerHanging() { var topic = "topic11."+_rnd.Next(); VagrantBrokerUtil.CreateTopic(topic, 1, 1); var producer = new Producer(_seed2Addresses, new ProducerConfiguration(topic, batchFlushSize: 2)); await producer.ConnectAsync(); // hung upon 1st confirmation int c = 0; producer.OnSuccess += messages => { if(c++ == 1) new ManualResetEvent(false).WaitOne(); }; var ctx = SynchronizationContext.Current; producer.OnPermError += (exception, messages) => ctx.Post(d => { throw exception; }, null); var source = Observable.Interval(TimeSpan.FromSeconds(1)).Take(1000).Publish(); source.Connect(); source.//Do(i => {if(i == 2) producer.DebugHangScheduler();}). Select(i => new Message{Value = BitConverter.GetBytes(i)}). Subscribe(producer.Send); await source; }
public async void ExplicitOffset() { kafka4net.Tracing.EtwTrace.Marker("ExplicitOffset"); // create new topic with 3 partitions var topic = "part33." + _rnd.Next(); VagrantBrokerUtil.CreateTopic(topic,3,3); // fill it out with 10K messages const int count = 10*1000; var producer = new Producer(_seed2Addresses, new ProducerConfiguration(topic)); await producer.ConnectAsync(); var sentMessagesObservable = Observable.FromEvent<Message[]>(evtHandler => producer.OnSuccess += evtHandler, evtHandler => { }) .SelectMany(msgs=>msgs) .Take(count) .TakeUntil(DateTime.Now.AddSeconds(10)) .ToList(); _log.Info("Sending data"); Enumerable.Range(1, count). Select(i => new Message { Value = BitConverter.GetBytes(i) }). ForEach(producer.Send); var sentMsgs = await sentMessagesObservable; _log.Info("Producer sent {0} messages.", sentMsgs.Count); _log.Debug("Closing producer"); await producer.CloseAsync(TimeSpan.FromSeconds(5)); var offsetFetchCluster = new Cluster(_seed2Addresses); await offsetFetchCluster.ConnectAsync(); // consume tail-300 for each partition await Task.Delay(TimeSpan.FromSeconds(1)); var offsets = new TopicPartitionOffsets( topic, (await offsetFetchCluster.FetchPartitionOffsetsAsync(topic, ConsumerLocation.TopicEnd)) .GetPartitionsOffset.Select(kv=>new KeyValuePair<int,long>(kv.Key,kv.Value-300))); _log.Info("Sum of offsets {0}. Raw: {1}",offsets.Partitions.Sum(p=>offsets.NextOffset(p)), offsets); var consumer = new Consumer(new ConsumerConfiguration(_seed2Addresses, topic, offsets)); var messages = consumer.OnMessageArrived. GroupBy(m => m.Partition).Replay(); messages.Connect(); await consumer.IsConnected; var consumerSubscription = messages.Subscribe(p => p.Take(10).Subscribe( m => _log.Debug("Got message {0}/{1}", m.Partition, BitConverter.ToInt32(m.Value, 0)), e => _log.Error("Error", e), () => _log.Debug("Complete part {0}", p.Key) )); // wait for 3 partitions to arrrive and every partition to read at least 100 messages await messages.Select(g => g.Take(100)).Take(3).ToTask(); consumerSubscription.Dispose(); consumer.Dispose(); kafka4net.Tracing.EtwTrace.Marker("/ExplicitOffset"); }
internal async Task <ProducerResponse> SendBatchAsync(int leader, IEnumerable <Message> batch, Producer producer) { CheckConnected(); // TODO: do state checking. Introduce this.Connected task to wait if needed var request = new ProduceRequest { Broker = _metadata.Brokers.First(b => b.NodeId == leader), RequiredAcks = producer.Configuration.RequiredAcks, Timeout = producer.Configuration.ProduceRequestTimeoutMs, TopicData = new[] { new TopicData { TopicName = producer.Topic, PartitionsData = ( from msg in batch // group messages belonging to the same partition group msg by msg.PartitionId into partitionGrp select new PartitionData { Pub = producer, OriginalMessages = partitionGrp.ToArray(), Partition = partitionGrp.Key, Messages = ( from msg in partitionGrp select new MessageData { Key = msg.Key, Value = msg.Value } ) } ) } } }; var response = await _protocol.Produce(request).ConfigureAwait(false); _log.Debug("#{0} SendBatchAsync complete", _id); return(response); }