public async void StopAtExplicitOffsetOnEmptyTopic() { kafka4net.Tracing.EtwTrace.Marker("StopAtExplicitOffsetOnEmptyTopic"); // create new topic with 3 partitions var topic = "part33." + _rnd.Next(); VagrantBrokerUtil.CreateTopic(topic, 3, 3); var offsetFetchCluster = new Cluster(_seed2Addresses); await offsetFetchCluster.ConnectAsync(); await Task.Delay(TimeSpan.FromSeconds(1)); var offsets = (await offsetFetchCluster.FetchPartitionOffsetsAsync(topic, ConsumerLocation.TopicEnd)); _log.Info("Sum of offsets {0}. Raw: {1}", offsets.Partitions.Sum(p => offsets.NextOffset(p)), offsets); var startStopProvider = new StartAndStopAtExplicitOffsets(offsets, offsets); _log.Info("Attempting to consume {0} messages and stop at {1}", 0, offsets); var consumer = new Consumer(new ConsumerConfiguration(_seed2Addresses, topic, startStopProvider, stopPosition: startStopProvider)); var startTime = DateTime.Now; var timeout = startTime.AddSeconds(30); var messages = await consumer.OnMessageArrived.TakeUntil(timeout).ToList(); _log.Info("Finished"); Assert.IsTrue(DateTime.Now < timeout); Assert.AreEqual(0, messages.Count); consumer.Dispose(); kafka4net.Tracing.EtwTrace.Marker("/StopAtExplicitOffsetOnEmptyTopic"); }
public async void StartAndStopAtExplicitOffset() { kafka4net.Tracing.EtwTrace.Marker("StartAndStopAtExplicitOffset"); // create new topic with 3 partitions var topic = "part33." + _rnd.Next(); VagrantBrokerUtil.CreateTopic(topic, 3, 3); // fill it out with 10K messages const int count = 10 * 1000; var producer = new Producer(_seed2Addresses, new ProducerConfiguration(topic)); await producer.ConnectAsync(); var sentMessagesObservable = Observable.FromEvent<Message[]>(evtHandler => producer.OnSuccess += evtHandler, evtHandler => { }) .SelectMany(msgs => msgs) .Take(count) .TakeUntil(DateTime.Now.AddSeconds(10)) .ToList(); _log.Info("Sending data"); Enumerable.Range(1, count). Select(i => new Message { Value = BitConverter.GetBytes(i) }). ForEach(producer.Send); var sentMsgs = await sentMessagesObservable; _log.Info("Producer sent {0} messages.", sentMsgs.Count); _log.Debug("Closing producer"); await producer.CloseAsync(TimeSpan.FromSeconds(5)); var offsetFetchCluster = new Cluster(_seed2Addresses); await offsetFetchCluster.ConnectAsync(); await Task.Delay(TimeSpan.FromSeconds(1)); var offsets = (await offsetFetchCluster.FetchPartitionOffsetsAsync(topic, ConsumerLocation.TopicEnd)); _log.Info("Sum of offsets {0}. Raw: {1}", offsets.Partitions.Sum(p => offsets.NextOffset(p)), offsets); // consume first 300 for each partition var offsetStarts = new TopicPartitionOffsets(topic, offsets.GetPartitionsOffset.ToDictionary(pair => pair.Key, pair => pair.Value > 300 ? 300 : pair.Value)); var offsetStops = new TopicPartitionOffsets(topic, offsets.GetPartitionsOffset.ToDictionary(pair => pair.Key, pair => pair.Value > 600 ? 600 : pair.Value)); var numMessages = offsetStops.MessagesSince(offsetStarts); var startStopProvider = new StartAndStopAtExplicitOffsets(offsetStarts, offsetStops); _log.Info("Attempting to consume {0} messages and stop at {1}", numMessages, offsetStops); var consumer = new Consumer(new ConsumerConfiguration(_seed2Addresses, topic, startStopProvider, stopPosition: startStopProvider)); var messages = await consumer.OnMessageArrived.ToList(); consumer.Dispose(); Assert.AreEqual(numMessages, messages.Count); kafka4net.Tracing.EtwTrace.Marker("/StartAndStopAtExplicitOffset"); }