static void Main(string[] args) { try { _topic = ConfigurationManager.AppSettings["topic"]; var brokers = from x in ConfigurationManager.AppSettings["kafkaBrokers"].Split(',') select new Uri(x); Console.WriteLine("Connecting to kafka queue brokers {0} with topic {1}", string.Join(",", brokers), _topic); var options = new KafkaOptions(brokers.ToArray()); var router = new BrokerRouter(options); var coption = new ConsumerOptions(_topic, router); _consumer = new KafkaNet.Consumer(coption); var offset = _consumer.GetTopicOffsetAsync(_topic, 1000000).Result; var t = from x in offset select new OffsetPosition(x.PartitionId, x.Offsets.Max()); _consumer.SetOffsetPosition(t.ToArray()); foreach (var message in _consumer.Consume()) { Console.WriteLine("Response: P{0},O{1} : {2}", message.Meta.PartitionId, message.Meta.Offset, System.Text.Encoding.Default.GetString(message.Value)); } } catch(Exception ex) { Console.WriteLine(ex.Message); } Console.ReadLine(); }
public Consumer(ConsumerOptions options, params OffsetPosition[] positions) { _options = options; _fetchResponseQueue = new BlockingCollection<Message>(_options.ConsumerBufferSize); _metadataQueries = new MetadataQueries(_options.Router); SetOffsetPosition(positions); }
public void Run() { var options = new KafkaOptions(new Uri("http://localhost:9092")); var router = new BrokerRouter(options); var consumerOptions = new ConsumerOptions("test", router); _consumer = new KafkaNet.Consumer(consumerOptions, new OffsetPosition(0, 100)); Process(); }
public static FetchRequest ToFetchRequest(this List<Fetch> fetches, ConsumerOptions options) { return new FetchRequest { MaxWaitTime = (int)Math.Min(int.MaxValue, options.MaxWaitTimeForMinimumBytes.TotalMilliseconds), MinBytes = options.MinimumBytes, Fetches = fetches }; }
public PartitionConsumer(ConsumerOptions options, string topic, int partitionId, Action<Message> messageFetchedCallback) : base(options.Router) { this._options = options; this._messageFetchedCallback = messageFetchedCallback; this.PartitionId = partitionId; this.Topic = topic; this.FetchSize = Fetch.DefaultMaxBytes; }
private void ReadMessageForever(ConsumerOptions consumerOptions, OffsetPosition[] maxOffsets) { using (var consumer = new Consumer(consumerOptions, maxOffsets)) { var blockingEnumerableOfMessage = consumer.Consume(); foreach (var message in blockingEnumerableOfMessage) { _log.InfoFormat("Offset{0}", message.Meta.Offset); } } }
public void ConsumerFailure() { string topic = "TestTopicIssue13-2-3R-1P"; using (var router = new BrokerRouter(_options)) { var producer = new Producer(router); var offsets = producer.GetTopicOffsetAsync(topic).Result; var maxOffsets = offsets.Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray(); var consumerOptions = new ConsumerOptions(topic, router) { PartitionWhitelist = new List<int>() { 0 }, MaxWaitTimeForMinimumBytes = TimeSpan.Zero }; SandMessageForever(producer, topic); ReadMessageForever(consumerOptions, maxOffsets); } }
public Consumer(ConsumerOptions options, params OffsetPosition[] positions) : base(options.Router) { _options = options; _fetchResponseQueue = new BlockingCollection<Message>(_options.ConsumerBufferSize); //this timer will periodically look for new partitions and automatically add them to the consuming queue //using the same whitelist logic _topicPartitionQueryTimer = new ScheduledTimer() .Do(RefreshTopicPartition) .Every(TimeSpan.FromMilliseconds(_options.TopicPartitionQueryTimeMs)) .StartingAt(DateTime.Now); SetOffsetPosition(positions); }
public async Task ConsumerShouldConsumeInSameOrderAsAsyncProduced_dataLoad(int numberOfMessage, int timeoutInMs) { int partition = 0; Stopwatch stopwatch = new Stopwatch(); stopwatch.Start(); IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("create BrokerRouter ,time Milliseconds:{0}", stopwatch.ElapsedMilliseconds)); var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri) { Log = IntegrationConfig.NoDebugLog }); stopwatch.Restart(); var producer = new Producer(router) { BatchDelayTime = TimeSpan.FromMilliseconds(10), BatchSize = numberOfMessage / 10 }; IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("create producer ,time Milliseconds:{0}", stopwatch.ElapsedMilliseconds)); stopwatch.Restart(); List<OffsetResponse> offsets = await producer.GetTopicOffsetAsync(IntegrationConfig.IntegrationTopic); IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("request Offset,time Milliseconds:{0}", stopwatch.ElapsedMilliseconds)); stopwatch.Restart(); List<Task> sendList = new List<Task>(numberOfMessage); for (int i = 0; i < numberOfMessage; i++) { var sendTask = producer.SendMessageAsync(IntegrationConfig.IntegrationTopic, new[] { new Message(i.ToString()) }, 1, null, MessageCodec.CodecNone, partition); sendList.Add(sendTask); } TimeSpan maxTimeToRun = TimeSpan.FromMilliseconds(timeoutInMs); var doneSend = Task.WhenAll(sendList.ToArray()); await Task.WhenAny(doneSend, Task.Delay(maxTimeToRun)); Assert.IsTrue(doneSend.IsCompleted, "not done to send in time"); IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("done send ,time Milliseconds:{0}", stopwatch.ElapsedMilliseconds)); stopwatch.Restart(); ConsumerOptions consumerOptions = new ConsumerOptions(IntegrationConfig.IntegrationTopic, router); consumerOptions.PartitionWhitelist = new List<int> { partition }; consumerOptions.MaxWaitTimeForMinimumBytes = TimeSpan.Zero; Consumer consumer = new Consumer(consumerOptions, offsets.Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray()); int expected = 0; IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("start Consume ,time Milliseconds:{0}", stopwatch.ElapsedMilliseconds)); IEnumerable<Message> messages = null; var doneConsume = Task.Run((() => { stopwatch.Restart(); messages = consumer.Consume().Take(numberOfMessage).ToArray(); IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("done Consume ,time Milliseconds:{0}", stopwatch.ElapsedMilliseconds)); stopwatch.Restart(); })); await Task.WhenAny(doneConsume, Task.Delay(maxTimeToRun)); Assert.IsTrue(doneConsume.IsCompleted, "not done to Consume in time"); Assert.IsTrue(messages.Count() == numberOfMessage, "not Consume all ,messages"); foreach (Message message in messages) { Assert.That(message.Value.ToUtf8String(), Is.EqualTo(expected.ToString()), "Expected the message list in the correct order."); expected++; } stopwatch.Restart(); IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("start producer Dispose ,time Milliseconds:{0}", stopwatch.ElapsedMilliseconds)); producer.Dispose(); IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("start consumer Dispose ,time Milliseconds:{0}", stopwatch.ElapsedMilliseconds)); consumer.Dispose(); stopwatch.Restart(); IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("start router Dispose,time Milliseconds:{0}", stopwatch.ElapsedMilliseconds)); router.Dispose(); }
/// <summary> /// order Should remain in the same ack leve and partition /// </summary> /// <returns></returns> public async Task ConsumerShouldConsumeInSameOrderAsAsyncProduced() { int partition = 0; int numberOfMessage = 200; IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("create BrokerRouter")); var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri)); int causesRaceConditionOldVersion = 2; var producer = new Producer(router, causesRaceConditionOldVersion) { BatchDelayTime = TimeSpan.Zero };//this is slow on purpose //this is not slow var producer = new Producer(router); IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("create producer")); List<OffsetResponse> offsets = await producer.GetTopicOffsetAsync(IntegrationConfig.IntegrationTopic); IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("request Offset")); List<Task> sendList = new List<Task>(numberOfMessage); for (int i = 0; i < numberOfMessage; i++) { var sendTask = producer.SendMessageAsync(IntegrationConfig.IntegrationTopic, new[] { new Message(i.ToString()) }, 1, null, MessageCodec.CodecNone, partition); sendList.Add(sendTask); } await Task.WhenAll(sendList.ToArray()); IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("done send")); IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("create Consumer")); ConsumerOptions consumerOptions = new ConsumerOptions(IntegrationConfig.IntegrationTopic, router); consumerOptions.PartitionWhitelist = new List<int> { partition }; Consumer consumer = new Consumer(consumerOptions, offsets.Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray()); int expected = 0; IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("start Consume")); await Task.Run((() => { var results = consumer.Consume().Take(numberOfMessage).ToList(); Assert.IsTrue(results.Count() == numberOfMessage, "not Consume all ,messages"); IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("done Consume")); foreach (Message message in results) { Assert.That(message.Value.ToUtf8String(), Is.EqualTo(expected.ToString()), "Expected the message list in the correct order."); expected++; } })); IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("start producer Dispose")); producer.Dispose(); IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("start consumer Dispose")); consumer.Dispose(); IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("start router Dispose")); router.Dispose(); }