static void Main(string[] args) { try { _topic = ConfigurationManager.AppSettings["topic"]; var brokers = from x in ConfigurationManager.AppSettings["kafkaBrokers"].Split(',') select new Uri(x); Console.WriteLine("Connecting to kafka queue brokers {0} with topic {1}", string.Join(",", brokers), _topic); var options = new KafkaOptions(brokers.ToArray()); var router = new BrokerRouter(options); var coption = new ConsumerOptions(_topic, router); _consumer = new KafkaNet.Consumer(coption); var offset = _consumer.GetTopicOffsetAsync(_topic, 1000000).Result; var t = from x in offset select new OffsetPosition(x.PartitionId, x.Offsets.Max()); _consumer.SetOffsetPosition(t.ToArray()); foreach (var message in _consumer.Consume()) { Console.WriteLine("Response: P{0},O{1} : {2}", message.Meta.PartitionId, message.Meta.Offset, System.Text.Encoding.Default.GetString(message.Value)); } } catch(Exception ex) { Console.WriteLine(ex.Message); } Console.ReadLine(); }
static void Main(string[] args) { try { var options = new KafkaOptions(new Uri("http://localhost:9092")) { Log = new ConsoleLog() }; //start an out of process thread that runs a consumer that will write all received messages to the console Task.Factory.StartNew(() => { var consumer = new KafkaNet.Consumer(new ConsumerOptions("TestHarness", new BrokerRouter(options)) { Log = new ConsoleLog() }); foreach (var data in consumer.Consume()) { Console.WriteLine("Response: P{0},O{1} : {2}", data.Meta.PartitionId, data.Meta.Offset, data.Value.ToUtf8String()); } }); } catch (Exception) { throw; } }
public void CancellationShouldInterruptConsumption() { var routerProxy = new BrokerRouterProxy(new MoqMockingKernel()); routerProxy.BrokerConn0.FetchResponseFunction = () => { return new FetchResponse(); }; var router = routerProxy.Create(); var options = CreateOptions(router); using (var consumer = new Consumer(options)) { var tokenSrc = new CancellationTokenSource(); var consumeTask = Task.Run(() => consumer.Consume(tokenSrc.Token).FirstOrDefault()); //wait until the fake broker is running and requesting fetches TaskTest.WaitFor(() => routerProxy.BrokerConn0.FetchRequestCallCount > 10); tokenSrc.Cancel(); Assert.That( Assert.Throws<AggregateException>(consumeTask.Wait).InnerException, Is.TypeOf<OperationCanceledException>()); } }
public void ConsumerShouldConsumeInSameOrderAsProduced() { var expected = new List<string> { "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19" }; var testId = Guid.NewGuid().ToString(); using (var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri))) using (var producer = new Producer(router)) { var offsets = producer.GetTopicOffsetAsync(IntegrationConfig.IntegrationTopic).Result; using (var consumer = new Consumer(new ConsumerOptions(IntegrationConfig.IntegrationTopic, router), offsets.Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray())) { for (int i = 0; i < 20; i++) { producer.SendMessageAsync(IntegrationConfig.IntegrationTopic, new[] { new Message(i.ToString(), testId) }).Wait(); } var results = consumer.Consume().Take(20).ToList(); //ensure the produced messages arrived Console.WriteLine("Message order: {0}", string.Join(", ", results.Select(x => x.Value.ToUtf8String()).ToList())); Assert.That(results.Count, Is.EqualTo(20)); Assert.That(results.Select(x => x.Value.ToUtf8String()).ToList(), Is.EqualTo(expected), "Expected the message list in the correct order."); Assert.That(results.Any(x => x.Key.ToUtf8String() != testId), Is.False); } } }
static void kafkaConsumer() { OffsetPosition[] offsetPositions = new OffsetPosition[] { new OffsetPosition() { Offset = 1, PartitionId = 0 } }; var options = new KafkaOptions(new Uri("http://localhost:9092"), new Uri("http://localhost:9092")); var consumer = new KafkaNet.Consumer(new ConsumerOptions("test", new BrokerRouter(options)), offsetPositions); using (FileStream fileStream = new FileStream("tweets.txt", FileMode.OpenOrCreate)) { using (StreamWriter writer = new StreamWriter(fileStream)) { using (TextWriter originalConsoleOut = Console.Out) { foreach (var message in consumer.Consume()) { Console.WriteLine("Response: P{0},O{1} : {2}", message.Meta.PartitionId, message.Meta.Offset, Encoding.UTF8.GetString(message.Value)); Console.SetOut(writer); Console.WriteLine(originalConsoleOut); } } } Console.WriteLine("Hello to console only"); } }
public void Consume() { foreach (var message in consumer.Consume()) { Console.WriteLine(Encoding.UTF8.GetString(message.Value)); } }
static void Main(string[] args) { var options = new KafkaOptions(new Uri("http://CSDKAFKA01:9092"), new Uri("http://CSDKAFKA02:9092")) { Log = new ConsoleLog() }; var router = new BrokerRouter(options); var client = new Producer(router); Task.Factory.StartNew(() => { var consumer = new Consumer(new ConsumerOptions("TestHarness", router)); foreach (var data in consumer.Consume()) { Console.WriteLine("Response: P{0},O{1} : {2}", data.Meta.PartitionId, data.Meta.Offset, data.Value); } }); Console.WriteLine("Type a message and press enter..."); while (true) { var message = Console.ReadLine(); if (message == "quit") break; client.SendMessageAsync("TestHarness", new[] {new Message {Value = message}}); } using (client) using (router) { } }
public void ConsumerShouldBeAbleToGetCurrentOffsetInformation() { var producer = new Producer(_router); var startOffsets = producer.GetTopicOffsetAsync("LoadTest").Result .Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray(); var consumer = new Consumer(new ConsumerOptions("LoadTest", _router), startOffsets); var tasks = new List<Task<List<ProduceResponse>>>(); for (int i = 0; i < 20; i++) { tasks.Add(producer.SendMessageAsync("LoadTest", new[] { new Message { Value = i.ToString(), Key = "1" } })); } Task.WaitAll(tasks.ToArray()); var results = consumer.Consume().Take(20).ToList(); //ensure the produced messages arrived for (int i = 0; i < 20; i++) { Assert.That(results[i].Value == i.ToString()); } //the current offsets should be 20 positions higher than start var currentOffsets = consumer.GetOffsetPosition(); Assert.That(currentOffsets.Sum(x => x.Offset) - startOffsets.Sum(x => x.Offset), Is.EqualTo(20)); }
static void Main(string[] args) { try { _topic = ConfigurationManager.AppSettings["topic"]; var brokers = from x in ConfigurationManager.AppSettings["kafkaBrokers"].Split(',') select new Uri(x); Console.WriteLine("Connecting to kafka queue brokers {0} with topic {1}", string.Join(",", brokers), _topic); var options = new KafkaOptions(brokers.ToArray()); var router = new BrokerRouter(options); var coption = new ConsumerOptions(_topic, router); _consumer = new KafkaNet.Consumer(coption); var offset = _consumer.GetTopicOffsetAsync(_topic, 1000000).Result; var t = from x in offset select new OffsetPosition(x.PartitionId, x.Offsets.Max()); _consumer.SetOffsetPosition(t.ToArray()); foreach (var message in _consumer.Consume()) { Console.WriteLine("Response: P{0},O{1} : {2}", message.Meta.PartitionId, message.Meta.Offset, System.Text.Encoding.Default.GetString(message.Value)); } } catch (Exception ex) { Console.WriteLine(ex.Message); } Console.ReadLine(); }
public void Main(string[] args) { var options = GetOptions(args); if (options == null) return; StartReporting(); var kafkaOptions = new KafkaOptions(options.KafkaNodeUri); using (var router = new BrokerRouter(kafkaOptions)) using (var client = new KafkaNet.Consumer(new ConsumerOptions("TestHarness", router) { Log = new ConsoleLog(), MinimumBytes = 1 })) { Console.WriteLine("Listening for messages..."); foreach (var message in client.Consume()) { try { var received = DateTime.Now; var sent = new DateTime(BitConverter.ToInt64(message.Value, 0)); var diff = received - sent; lock (sync) receivedItems.Add(new ReceivedMessage { DateTime = received, TotalMilliseconds = diff.TotalMilliseconds }); } catch (Exception ex) { Console.Error.WriteLine("Oops... " + ex); } } } }
public void ConsumerShouldBeAbleToSeekBackToEarlierOffset() { var producer = new Producer(_router); var offsets = producer.GetTopicOffsetAsync("LoadTest").Result .Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray(); var consumer = new Consumer(new ConsumerOptions("LoadTest", _router), offsets); var tasks = new List<Task<List<ProduceResponse>>>(); for (int i = 0; i < 20; i++) { tasks.Add(producer.SendMessageAsync("LoadTest", new[] { new Message { Value = i.ToString(), Key = "1" } })); } Task.WaitAll(tasks.ToArray()); var results = consumer.Consume().Take(20).ToList(); //ensure the produced messages arrived for (int i = 0; i < 20; i++) { Assert.That(results[i].Value == i.ToString()); } //seek back to initial offset consumer.SetOffsetPosition(offsets); //ensure all produced messages arrive again for (int i = 0; i < 20; i++) { Assert.That(results[i].Value == i.ToString()); } }
private async Task ObterAsync() { foreach (var msg in _consumer.Consume()) { using (HttpClient client = new HttpClient()) { await client.PostAsync("https://localhost:44332/api/operations", ConvertObjectToByteArrayContent(Encoding.UTF8.GetString(msg.Value))); } } }
private void ReadMessageForever(ConsumerOptions consumerOptions, OffsetPosition[] maxOffsets) { using (var consumer = new Consumer(consumerOptions, maxOffsets)) { var blockingEnumerableOfMessage = consumer.Consume(); foreach (var message in blockingEnumerableOfMessage) { _log.InfoFormat("Offset{0}", message.Meta.Offset); } } }
static void Main(string[] args) { const string topicName = "kafka.SimpleTopic"; TopicClient topicClient = new TopicClient(); TopicView topicView = topicClient.getTopic(topicName); List<Uri> brokers = getBrokerList(topicView); //create an options file that sets up driver preferences var options = new KafkaOptions() { Log = new ConsoleLog() }; options.KafkaServerUri = brokers; //start an out of process thread that runs a consumer that will write all received messages to the console Task.Run(() => { var consumer = new Consumer(new ConsumerOptions(topicName, new BrokerRouter(options)) { Log = new ConsoleLog() }); foreach (var data in consumer.Consume()) { Console.WriteLine("Response: P{0},O{1} : {2}", data.Meta.PartitionId, data.Meta.Offset, data.Value.ToUtf8String()); } }); //create a producer to send messages with var producer = new KafkaNet.Producer(new BrokerRouter(options)) { BatchSize = 100, BatchDelayTime = TimeSpan.FromMilliseconds(2000) }; //take in console read messages Console.WriteLine("Type a message and press enter..."); while (true) { var message = Console.ReadLine(); if (message == "quit") break; if (string.IsNullOrEmpty(message)) { //send a random batch of messages SendRandomBatch(producer, topicName, 200); } else { producer.SendMessageAsync(topicName, new[] { new Message(message) }); } } using (producer) { } }
private static void Consume(string broker, string topic) { var options = new KafkaOptions(new Uri(broker)); var router = new BrokerRouter(options); var consumer = new Consumer(new ConsumerOptions(topic, router)); //Consume returns a blocking IEnumerable (ie: never ending stream) foreach (var message in consumer.Consume()) { Console.WriteLine("Response: Partition {0},Offset {1} : {2}", message.Meta.PartitionId, message.Meta.Offset, message.Value.ToUtf8String()); } }
static void Main(string[] args) { const string topicName = "TestHarness"; //create an options file that sets up driver preferences var options = new KafkaOptions(new Uri("http://CSDKAFKA01:9092"), new Uri("http://CSDKAFKA02:9092")) { Log = new ConsoleLog() }; //start an out of process thread that runs a consumer that will write all received messages to the console Task.Run(() => { var consumer = new Consumer(new ConsumerOptions(topicName, new BrokerRouter(options)) { Log = new ConsoleLog() }); foreach (var data in consumer.Consume()) { Console.WriteLine("Response: P{0},O{1} : {2}", data.Meta.PartitionId, data.Meta.Offset, data.Value.ToUtf8String()); } }); //create a producer to send messages with var producer = new Producer(new BrokerRouter(options)) { BatchSize = 100, BatchDelayTime = TimeSpan.FromMilliseconds(2000) }; //take in console read messages Console.WriteLine("Type a message and press enter..."); while (true) { var message = Console.ReadLine(); if (message == "quit") break; if (string.IsNullOrEmpty(message)) { //send a random batch of messages SendRandomBatch(producer, topicName, 200); } else { producer.SendMessageAsync(topicName, new[] { new Message(message) }); } } using (producer) { } }
public static void ConsumirMsg() { var options = new KafkaOptions(new Uri("http://localhost:9092") /*, new Uri("http://localhost:9092")*/); //var router = new BrokerRouter(options); var consumer = new KafkaNet.Consumer(new ConsumerOptions("PingPongTopic", new BrokerRouter(options))); //Consume returns a blocking IEnumerable (ie: never ending stream) foreach (var message in consumer.Consume()) { ContadorMensajesRecibidos++; Console.WriteLine("Response: P{0},O{1} : {2}, key: " + Encoding.UTF8.GetString(message.Key) + ", ConsumerTaskAccount: " + consumer.ConsumerTaskCount, message.Meta.PartitionId, message.Meta.Offset, Encoding.UTF8.GetString(message.Value)); Productor.ProducirMsg("Pong_Message", Encoding.UTF8.GetString(message.Key)); } }
static void Main(string[] args) { var options = new KafkaOptions(new Uri("http://localhost:9092"), new Uri("http://localhost:9092")); var router = new BrokerRouter(options); var consumer = new KafkaNet.Consumer(new ConsumerOptions("desk-msg", new BrokerRouter(options))); foreach (var message in consumer.Consume()) { Console.WriteLine("Response: P{0},O{1} : {2}", message.Meta.PartitionId, message.Meta.Offset, Encoding.UTF8.GetString(message.Value)); } }
static void Main(string[] args) { var options = new KafkaOptions(new Uri("http://localhost:9092") ); var router = new BrokerRouter(options); var consumer = new KafkaNet.Consumer(new ConsumerOptions("TestTopic", new BrokerRouter(options))); //Consume returns a blocking IEnumerable (ie: never ending stream) foreach (var message in consumer.Consume()) { Console.WriteLine("Response: P{0},O{1} : {2}", message.Meta.PartitionId, message.Meta.Offset, Encoding.UTF8.GetString(message.Value)); } }
public void EnsureGzipCanDecompressMessageFromKafka() { var producer = new Producer(_router); var offsets = producer.GetTopicOffsetAsync(CompressTopic).Result; var consumer = new Consumer(new ConsumerOptions("Empty", _router), offsets.Select(x => new OffsetPosition(x.PartitionId, 0)).ToArray()); var results = consumer.Consume().Take(3).ToList(); for (int i = 0; i < 3; i++) { Assert.That(results[i].Value, Is.EqualTo(i.ToString())); } }
public void ConsumerShouldReturnOffset() { var routerProxy = new BrokerRouterProxy(_kernel); routerProxy.BrokerConn0.FetchResponseFunction = () => { return new FetchResponse(); }; var router = routerProxy.Create(); var options = CreateOptions(router); options.PartitionWhitelist = new List<int>(); var consumer = new Consumer(options); var test = consumer.Consume().Take(1); while (consumer.ConsumerTaskCount <= 0) { Thread.Sleep(100); } Assert.That(consumer.ConsumerTaskCount, Is.EqualTo(2)); }
public void ConsumerWhitelistShouldOnlyConsumeSpecifiedPartition() { var routerProxy = new BrokerRouterProxy(_kernel); routerProxy.BrokerConn0.FetchResponseFunction = () => { return new FetchResponse(); }; var router = routerProxy.Create(); var options = CreateOptions(router); options.PartitionWhitelist = new List<int> { 0 }; var consumer = new Consumer(options); var test = consumer.Consume().Take(1); while (consumer.ConsumerTaskCount <= 0) { Thread.Sleep(100); } Assert.That(routerProxy.BrokerConn0.FetchRequestCallCount, Is.GreaterThanOrEqualTo(1)); Assert.That(routerProxy.BrokerConn1.FetchRequestCallCount, Is.EqualTo(0)); }
/*public static string Consumir(string idMsg) * { * //Consume returns a blocking IEnumerable (ie: never ending stream) * foreach (var message in consumer.Consume()) * { * //Console.WriteLine("Response: P{0},O{1} : {2}, key: " + Encoding.UTF8.GetString(message.Key) + ", ConsumerTaskAccount: " + consumer.ConsumerTaskCount, message.Meta.PartitionId, message.Meta.Offset, Encoding.UTF8.GetString(message.Value)); * if (Encoding.UTF8.GetString(message.Key).Equals(idMsg)) * { * return Encoding.UTF8.GetString(message.Value); * } * } * * return string.Empty; * }*/ public static void ConsumirRespuestas() { try { //Consume returns a blocking IEnumerable (ie: never ending stream) foreach (var message in consumer.Consume()) { string keyMsg = Encoding.UTF8.GetString(message.Key); if (!respuestas.ContainsKey(keyMsg)) { respuestas.Add(keyMsg, Encoding.UTF8.GetString(message.Value)); } } } catch (Exception ex) { Console.WriteLine(ex.Message); } }
public void ConsumerWhitelistShouldOnlyConsumeSpecifiedPartition() { var routerProxy = new BrokerRouterProxy(new MoqMockingKernel()); routerProxy.BrokerConn0.FetchResponseFunction = () => { return new FetchResponse(); }; var router = routerProxy.Create(); var options = CreateOptions(router); options.PartitionWhitelist = new List<int> { 0 }; using (var consumer = new Consumer(options)) { var test = consumer.Consume(); TaskTest.WaitFor(() => consumer.ConsumerTaskCount > 0); TaskTest.WaitFor(() => routerProxy.BrokerConn0.FetchRequestCallCount > 0); Assert.That(consumer.ConsumerTaskCount, Is.EqualTo(1), "Consumer should only create one consuming thread for partition 0."); Assert.That(routerProxy.BrokerConn0.FetchRequestCallCount, Is.GreaterThanOrEqualTo(1)); Assert.That(routerProxy.BrokerConn1.FetchRequestCallCount, Is.EqualTo(0)); } }
public void EnsureGzipCanDecompressMessageFromKafka() { var router = new BrokerRouter(_options); var producer = new Producer(router); var offsets = producer.GetTopicOffsetAsync(IntegrationConfig.IntegrationCompressionTopic).Result; var consumer = new Consumer(new ConsumerOptions(IntegrationConfig.IntegrationCompressionTopic, router), offsets.Select(x => new OffsetPosition(x.PartitionId, 0)).ToArray()); var results = consumer.Consume().Take(3).ToList(); for (int i = 0; i < 3; i++) { Assert.That(results[i].Value, Is.EqualTo(i.ToString())); } using (producer) using (consumer) { } }
public void ConsumerWithEmptyWhitelistShouldConsumeAllPartition() { var routerProxy = new BrokerRouterProxy(new MoqMockingKernel()); var router = routerProxy.Create(); var options = CreateOptions(router); options.PartitionWhitelist = new List<int>(); using (var consumer = new Consumer(options)) { var test = consumer.Consume(); TaskTest.WaitFor(() => consumer.ConsumerTaskCount > 0); TaskTest.WaitFor(() => routerProxy.BrokerConn0.FetchRequestCallCount > 0); TaskTest.WaitFor(() => routerProxy.BrokerConn1.FetchRequestCallCount > 0); Assert.That(consumer.ConsumerTaskCount, Is.EqualTo(2), "Consumer should create one consuming thread for each partition."); Assert.That(routerProxy.BrokerConn0.FetchRequestCallCount, Is.GreaterThanOrEqualTo(1), "BrokerConn0 not sent FetchRequest"); Assert.That(routerProxy.BrokerConn1.FetchRequestCallCount, Is.GreaterThanOrEqualTo(1), "BrokerConn1 not sent FetchRequest"); } }
/// <summary> /// In this example /// - consumer group functionality (i.e. .Subscribe + offset commits) is not used. /// - the consumer is manually assigned to a partition and always starts consumption /// from a specific offset (0). /// </summary> public static void Run_ManualAssign(string brokerList, List <string> topics, CancellationToken cancellationToken) { var config = new ConsumerConfig { // the group.id property must be specified when creating a consumer, even // if you do not intend to use any consumer group functionality. GroupId = new Guid().ToString(), BootstrapServers = brokerList, // partition offsets can be committed to a group even by consumers not // subscribed to the group. in this example, auto commit is disabled // to prevent this from occuring. EnableAutoCommit = true }; using (var consumer = new Consumer <Ignore, string>(config)) { consumer.Assign(topics.Select(topic => new TopicPartitionOffset(topic, 0, Offset.Beginning)).ToList()); consumer.OnError += (_, e) => Console.WriteLine($"Error: {e.Reason}"); consumer.OnPartitionEOF += (_, topicPartitionOffset) => Console.WriteLine($"End of partition: {topicPartitionOffset}"); while (!cancellationToken.IsCancellationRequested) { try { var consumeResult = consumer.Consume(cancellationToken); Console.WriteLine($"Received message at {consumeResult.TopicPartitionOffset}: ${consumeResult.Message}"); } catch (ConsumeException e) { Console.WriteLine($"Consume error: {e.Error}"); } } consumer.Close(); } }
static void Main(string[] args) { var options = new KafkaOptions(new Uri("http://sjkap556:9092"), new Uri("http://sjkap556:9092")); var router = new BrokerRouter(options); OffsetPosition[] offsetPositions = new OffsetPosition[] { new OffsetPosition() { Offset = 0, PartitionId = 0 } }; var consumer = new KafkaNet.Consumer(new ConsumerOptions("testCockpit", new BrokerRouter(options)), offsetPositions); //Consume returns a blocking IEnumerable (ie: never ending stream) foreach (var message in consumer.Consume()) { Console.WriteLine("Response: P{0},O{1} : {2}", message.Meta.PartitionId, message.Meta.Offset, Encoding.UTF8.GetString(message.Value)); } }
public void CancellationShouldInterruptConsumption() { var routerProxy = new BrokerRouterProxy(_kernel); routerProxy.BrokerConn0.FetchResponseFunction = () => { while (true) Thread.Yield(); }; var router = routerProxy.Create(); var options = CreateOptions(router); var consumer = new Consumer(options); var tokenSrc = new CancellationTokenSource(); var consumeTask = Task.Run(() => consumer.Consume(tokenSrc.Token).FirstOrDefault()); if (consumeTask.Wait(TimeSpan.FromSeconds(3))) Assert.Fail(); tokenSrc.Cancel(); Assert.That( Assert.Throws<AggregateException>(consumeTask.Wait).InnerException, Is.TypeOf<OperationCanceledException>()); }
public async Task ConsumerShouldConsumeInSameOrderAsAsyncProduced_dataLoad(int numberOfMessage, int timeoutInMs) { int partition = 0; Stopwatch stopwatch = new Stopwatch(); stopwatch.Start(); IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("create BrokerRouter ,time Milliseconds:{0}", stopwatch.ElapsedMilliseconds)); var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri) { Log = IntegrationConfig.NoDebugLog }); stopwatch.Restart(); var producer = new Producer(router) { BatchDelayTime = TimeSpan.FromMilliseconds(10), BatchSize = numberOfMessage / 10 }; IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("create producer ,time Milliseconds:{0}", stopwatch.ElapsedMilliseconds)); stopwatch.Restart(); List<OffsetResponse> offsets = await producer.GetTopicOffsetAsync(IntegrationConfig.IntegrationTopic); IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("request Offset,time Milliseconds:{0}", stopwatch.ElapsedMilliseconds)); stopwatch.Restart(); List<Task> sendList = new List<Task>(numberOfMessage); for (int i = 0; i < numberOfMessage; i++) { var sendTask = producer.SendMessageAsync(IntegrationConfig.IntegrationTopic, new[] { new Message(i.ToString()) }, 1, null, MessageCodec.CodecNone, partition); sendList.Add(sendTask); } TimeSpan maxTimeToRun = TimeSpan.FromMilliseconds(timeoutInMs); var doneSend = Task.WhenAll(sendList.ToArray()); await Task.WhenAny(doneSend, Task.Delay(maxTimeToRun)); Assert.IsTrue(doneSend.IsCompleted, "not done to send in time"); IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("done send ,time Milliseconds:{0}", stopwatch.ElapsedMilliseconds)); stopwatch.Restart(); ConsumerOptions consumerOptions = new ConsumerOptions(IntegrationConfig.IntegrationTopic, router); consumerOptions.PartitionWhitelist = new List<int> { partition }; consumerOptions.MaxWaitTimeForMinimumBytes = TimeSpan.Zero; Consumer consumer = new Consumer(consumerOptions, offsets.Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray()); int expected = 0; IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("start Consume ,time Milliseconds:{0}", stopwatch.ElapsedMilliseconds)); IEnumerable<Message> messages = null; var doneConsume = Task.Run((() => { stopwatch.Restart(); messages = consumer.Consume().Take(numberOfMessage).ToArray(); IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("done Consume ,time Milliseconds:{0}", stopwatch.ElapsedMilliseconds)); stopwatch.Restart(); })); await Task.WhenAny(doneConsume, Task.Delay(maxTimeToRun)); Assert.IsTrue(doneConsume.IsCompleted, "not done to Consume in time"); Assert.IsTrue(messages.Count() == numberOfMessage, "not Consume all ,messages"); foreach (Message message in messages) { Assert.That(message.Value.ToUtf8String(), Is.EqualTo(expected.ToString()), "Expected the message list in the correct order."); expected++; } stopwatch.Restart(); IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("start producer Dispose ,time Milliseconds:{0}", stopwatch.ElapsedMilliseconds)); producer.Dispose(); IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("start consumer Dispose ,time Milliseconds:{0}", stopwatch.ElapsedMilliseconds)); consumer.Dispose(); stopwatch.Restart(); IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("start router Dispose,time Milliseconds:{0}", stopwatch.ElapsedMilliseconds)); router.Dispose(); }
public void ConsumerShouldNotLoseMessageWhenBlocked() { var testId = Guid.NewGuid().ToString(); using (var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri))) using (var producer = new Producer(router)) { var offsets = producer.GetTopicOffsetAsync(IntegrationConfig.IntegrationTopic).Result; //create consumer with buffer size of 1 (should block upstream) using (var consumer = new Consumer(new ConsumerOptions(IntegrationConfig.IntegrationTopic, router) { ConsumerBufferSize = 1 }, offsets.Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray())) { for (int i = 0; i < 20; i++) { producer.SendMessageAsync(IntegrationConfig.IntegrationTopic, new[] { new Message(i.ToString(), testId) }).Wait(); } for (int i = 0; i < 20; i++) { var result = consumer.Consume().Take(1).First(); Assert.That(result.Key.ToUtf8String(), Is.EqualTo(testId)); Assert.That(result.Value.ToUtf8String(), Is.EqualTo(i.ToString())); } } } }
public async void ConsumerShouldMoveToNextAvailableOffsetWhenQueryingForNextMessage() { const int expectedCount = 1000; var options = new KafkaOptions(IntegrationConfig.IntegrationUri) { Log = new ConsoleLog() }; using (var producerRouter = new BrokerRouter(options)) using (var producer = new Producer(producerRouter)) { //get current offset and reset consumer to top of log var offsets = await producer.GetTopicOffsetAsync(IntegrationConfig.IntegrationTopic).ConfigureAwait(false); using (var consumerRouter = new BrokerRouter(options)) using (var consumer = new Consumer(new ConsumerOptions(IntegrationConfig.IntegrationTopic, consumerRouter), offsets.Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray())) { Console.WriteLine("Sending {0} test messages", expectedCount); var response = await producer.SendMessageAsync(IntegrationConfig.IntegrationTopic, Enumerable.Range(0, expectedCount).Select(x => new Message(x.ToString()))); Assert.That(response.Any(x => x.Error != (int)ErrorResponseCode.NoError), Is.False, "Error occured sending test messages to server."); var stream = consumer.Consume(); Console.WriteLine("Reading message back out from consumer."); var data = stream.Take(expectedCount).ToList(); var consumerOffset = consumer.GetOffsetPosition().OrderBy(x => x.Offset).ToList(); var serverOffset = await producer.GetTopicOffsetAsync(IntegrationConfig.IntegrationTopic).ConfigureAwait(false); var positionOffset = serverOffset.Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())) .OrderBy(x => x.Offset) .ToList(); Assert.That(consumerOffset, Is.EqualTo(positionOffset), "The consumerOffset position should match the server offset position."); Assert.That(data.Count, Is.EqualTo(expectedCount), "We should have received 2000 messages from the server."); } } }
/// <summary> /// In this example /// - offsets are manually committed. /// - no extra thread is created for the Poll (Consume) loop. /// </summary> public static void Run_Consume(string brokerList, List <string> topics, CancellationToken cancellationToken) { var config = new ConsumerConfig { BootstrapServers = brokerList, GroupId = "csharp-consumer", EnableAutoCommit = false, StatisticsIntervalMs = 5000, SessionTimeoutMs = 6000, AutoOffsetReset = AutoOffsetResetType.Earliest }; const int commitPeriod = 5; using (var serdeProvider = new AvroSerdeProvider(new AvroSerdeProviderConfig { SchemaRegistryUrl = "192.168.100.153:8081" })) using (var consumer = new Consumer <Ignore, com.landoop.social.reddit.post.reddit_post>(config, null, serdeProvider.GetDeserializerGenerator <com.landoop.social.reddit.post.reddit_post>())) // using (var consumer = new Consumer<Ingore, string>(config)) { // Note: All event handlers are called on the main .Consume thread. // Raised when the consumer has been notified of a new assignment set. // You can use this event to perform actions such as retrieving offsets // from an external source / manually setting start offsets using // the Assign method. You can even call Assign with a different set of // partitions than those in the assignment. If you do not call Assign // in a handler of this event, the consumer will be automatically // assigned to the partitions of the assignment set and consumption // will start from last committed offsets or in accordance with // the auto.offset.reset configuration parameter for partitions where // there is no committed offset. consumer.OnPartitionsAssigned += (_, partitions) => Console.WriteLine($"Assigned partitions: [{string.Join(", ", partitions)}], member id: {consumer.MemberId}"); // Raised when the consumer's current assignment set has been revoked. consumer.OnPartitionsRevoked += (_, partitions) => Console.WriteLine($"Revoked partitions: [{string.Join(", ", partitions)}]"); consumer.OnPartitionEOF += (_, tpo) => Console.WriteLine($"Reached end of topic {tpo.Topic} partition {tpo.Partition}, next message will be at offset {tpo.Offset}"); consumer.OnError += (_, e) => Console.WriteLine($"Error: {e.Reason}"); //consumer.OnStatistics += (_, json) // => Console.WriteLine($"Statistics: {json}"); consumer.Subscribe(topics); while (!cancellationToken.IsCancellationRequested) { try { var consumeResult = consumer.Consume(cancellationToken); Console.WriteLine($"Topic: {consumeResult.Topic}"); Console.WriteLine($"Partition: {consumeResult.Partition} Offset: {consumeResult.Offset}"); Console.WriteLine($"Subreddit Topic: { consumeResult.Value.subreddit }"); Console.WriteLine($"{ consumeResult.Value.body }"); Console.WriteLine("---"); Thread.Sleep(1000); if (consumeResult.Offset % commitPeriod == 0) { // The Commit method sends a "commit offsets" request to the Kafka // cluster and synchronously waits for the response. This is very // slow compared to the rate at which the consumer is capable of // consuming messages. A high performance application will typically // commit offsets relatively infrequently and be designed handle // duplicate messages in the event of failure. var committedOffsets = consumer.Commit(consumeResult); Console.WriteLine($"Committed offset: {committedOffsets}"); } } catch (ConsumeException e) { Console.WriteLine($"Consume error: {e.Error}"); } } consumer.Close(); } }
public void ConsumerShouldBeAbleToGetCurrentOffsetInformation() { using (var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri))) using (var producer = new Producer(router)) { var startOffsets = producer.GetTopicOffsetAsync(IntegrationConfig.IntegrationTopic).Result .Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray(); using (var consumer = new Consumer(new ConsumerOptions(IntegrationConfig.IntegrationTopic, router), startOffsets)) { for (int i = 0; i < 20; i++) { producer.SendMessageAsync(IntegrationConfig.IntegrationTopic, new[] { new Message(i.ToString(), "1") }).Wait(); } var results = consumer.Consume().Take(20).ToList(); //ensure the produced messages arrived for (int i = 0; i < 20; i++) { Assert.That(results[i].Value.ToUtf8String(), Is.EqualTo(i.ToString())); } //the current offsets should be 20 positions higher than start var currentOffsets = consumer.GetOffsetPosition(); Assert.That(currentOffsets.Sum(x => x.Offset) - startOffsets.Sum(x => x.Offset), Is.EqualTo(20)); } } }
public async Task ConsumeByOffsetShouldGetSameMessageProducedAtSameOffset() { long offsetResponse; Guid messge = Guid.NewGuid(); using (var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri) { Log = IntegrationConfig.NoDebugLog })) using (var producer = new Producer(router)) { ProduceResponse responseAckLevel1 = await producer.SendMessageAsync(new Message(messge.ToString()), IntegrationConfig.IntegrationTopic, acks: 1, partition: 0); offsetResponse = responseAckLevel1.Offset; } using (var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri) { Log = IntegrationConfig.NoDebugLog })) using (var consumer = new Consumer(new ConsumerOptions(IntegrationConfig.IntegrationTopic, router) { MaxWaitTimeForMinimumBytes = TimeSpan.Zero }, new OffsetPosition[] { new OffsetPosition(0, offsetResponse) })) { var result = consumer.Consume().Take(1).ToList().FirstOrDefault(); Assert.AreEqual(messge.ToString(), result.Value.ToUtf8String()); } }
public void EnsureConsumerDisposesAllTasks() { var routerProxy = new BrokerRouterProxy(new MoqMockingKernel()); routerProxy.BrokerConn0.FetchResponseFunction = async () => { return new FetchResponse(); }; var router = routerProxy.Create(); var options = CreateOptions(router); options.PartitionWhitelist = new List<int>(); var consumer = new Consumer(options); using (consumer) { var test = consumer.Consume(); TaskTest.WaitFor(() => consumer.ConsumerTaskCount >= 2); } TaskTest.WaitFor(() => consumer.ConsumerTaskCount <= 0); Assert.That(consumer.ConsumerTaskCount, Is.EqualTo(0)); }
public async Task ProducerShouldUsePartitionIdInsteadOfMessageKeyToChoosePartition() { Mock<IPartitionSelector> partitionSelector = new Mock<IPartitionSelector>(); partitionSelector.Setup(x => x.Select(It.IsAny<Topic>(), It.IsAny<byte[]>())).Returns((Topic y, byte[] y1) => { return y.Partitions.Find(p => p.PartitionId == 1); }); var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri) { PartitionSelector = partitionSelector.Object }); var producer = new Producer(router); var offsets = await producer.GetTopicOffsetAsync(IntegrationConfig.IntegrationTopic); int partitionId = 0; //message should send to PartitionId and not use the key to Select Broker Route !! for (int i = 0; i < 20; i++) { await producer.SendMessageAsync(IntegrationConfig.IntegrationTopic, new[] { new Message(i.ToString(), "key") }, 1, null, MessageCodec.CodecNone, partitionId); } //consume form partitionId to verify that date is send to currect partion !!. var consumer = new Consumer(new ConsumerOptions(IntegrationConfig.IntegrationTopic, router) { PartitionWhitelist = { partitionId } }, offsets.Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray()); for (int i = 0; i < 20; i++) { Message result = null;// = consumer.Consume().Take(1).First(); await Task.Run(() => result = consumer.Consume().Take(1).First()); Assert.That(result.Value.ToUtf8String(), Is.EqualTo(i.ToString())); } consumer.Dispose(); producer.Dispose(); }
public void EnsureGzipCanDecompressMessageFromKafka() { var router = new BrokerRouter(_options); var producer = new Producer(router); var offsets = producer.GetTopicOffsetAsync(IntegrationConfig.IntegrationCompressionTopic).Result; var consumer = new Consumer(new ConsumerOptions(IntegrationConfig.IntegrationCompressionTopic, router) { PartitionWhitelist = new List<int>() { 0 } }, offsets.Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray()); int numberOfmessage = 3; for (int i = 0; i < numberOfmessage; i++) { producer.SendMessageAsync(IntegrationConfig.IntegrationCompressionTopic, new[] { new Message(i.ToString()) }, codec: MessageCodec.CodecGzip, partition: 0); } var results = consumer.Consume(new CancellationTokenSource(TimeSpan.FromMinutes(1)).Token).Take(numberOfmessage).ToList(); for (int i = 0; i < numberOfmessage; i++) { Assert.That(results[i].Value.ToUtf8String(), Is.EqualTo(i.ToString())); } using (producer) using (consumer) { } }
public void ConsumerShouldCreateTaskForEachBroker() { var routerProxy = new BrokerRouterProxy(new MoqMockingKernel()); routerProxy.BrokerConn0.FetchResponseFunction = () => { return new FetchResponse(); }; var router = routerProxy.Create(); var options = CreateOptions(router); options.PartitionWhitelist = new List<int>(); using (var consumer = new Consumer(options)) { var test = consumer.Consume(); TaskTest.WaitFor(() => consumer.ConsumerTaskCount >= 2); Assert.That(consumer.ConsumerTaskCount, Is.EqualTo(2)); } }
/// <summary> /// order Should remain in the same ack leve and partition /// </summary> /// <returns></returns> public async Task ConsumerShouldConsumeInSameOrderAsAsyncProduced() { int partition = 0; int numberOfMessage = 200; IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("create BrokerRouter")); var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri)); int causesRaceConditionOldVersion = 2; var producer = new Producer(router, causesRaceConditionOldVersion) { BatchDelayTime = TimeSpan.Zero };//this is slow on purpose //this is not slow var producer = new Producer(router); IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("create producer")); List<OffsetResponse> offsets = await producer.GetTopicOffsetAsync(IntegrationConfig.IntegrationTopic); IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("request Offset")); List<Task> sendList = new List<Task>(numberOfMessage); for (int i = 0; i < numberOfMessage; i++) { var sendTask = producer.SendMessageAsync(IntegrationConfig.IntegrationTopic, new[] { new Message(i.ToString()) }, 1, null, MessageCodec.CodecNone, partition); sendList.Add(sendTask); } await Task.WhenAll(sendList.ToArray()); IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("done send")); IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("create Consumer")); ConsumerOptions consumerOptions = new ConsumerOptions(IntegrationConfig.IntegrationTopic, router); consumerOptions.PartitionWhitelist = new List<int> { partition }; Consumer consumer = new Consumer(consumerOptions, offsets.Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray()); int expected = 0; IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("start Consume")); await Task.Run((() => { var results = consumer.Consume().Take(numberOfMessage).ToList(); Assert.IsTrue(results.Count() == numberOfMessage, "not Consume all ,messages"); IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("done Consume")); foreach (Message message in results) { Assert.That(message.Value.ToUtf8String(), Is.EqualTo(expected.ToString()), "Expected the message list in the correct order."); expected++; } })); IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("start producer Dispose")); producer.Dispose(); IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("start consumer Dispose")); consumer.Dispose(); IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("start router Dispose")); router.Dispose(); }
public static void kafkaConsumer() { OffsetPosition[] offsetPositions = new OffsetPosition[] { new OffsetPosition() { Offset = 1, PartitionId = 0 } }; var options = new KafkaOptions(new Uri("http://*****:*****@'); int indexOfAtSign = tweet.IndexOf("@"); int indexOfColon = tweet.IndexOf(":"); string name = " "; string body = " "; if (indexOfAtSign >= 1 && indexOfColon >= 1) { try { name = tweet.Substring(indexOfAtSign + 1, indexOfColon - indexOfAtSign - 1); } catch (Exception) { name = tweet.Substring(3, 10); } body = tweet.Substring(indexOfColon + 1, tweet.Length - indexOfColon - 1); } var newLine = $"{name},{ dt},{body}"; csv.AppendLine(newLine); File.WriteAllText(@"C:\Users\billm\source\repos\ConsoleApp2\testingTweets.csv", csv.ToString()); } } } Console.WriteLine("Hello to console only"); } }
public void ConsumerShouldConsumeInSameOrderAsProduced() { var producer = new Producer(_router); var offsets = producer.GetTopicOffsetAsync("LoadTest").Result; var consumer = new Consumer(new ConsumerOptions("LoadTest", _router), offsets.Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray()); var tasks = new List<Task<List<ProduceResponse>>>(); for (int i = 0; i < 20; i++) { tasks.Add(producer.SendMessageAsync("LoadTest", new[] { new Message { Value = i.ToString(), Key = "1" } })); } Task.WaitAll(tasks.ToArray()); var results = consumer.Consume().Take(20).ToList(); for (int i = 0; i < 20; i++) { Assert.That(results[i].Value == i.ToString()); } }
public void ConsumerShouldBeAbleToSeekBackToEarlierOffset() { var expected = new List<string> { "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19" }; var testId = Guid.NewGuid().ToString(); using (var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri))) using (var producer = new Producer(router)) { var offsets = producer.GetTopicOffsetAsync(IntegrationConfig.IntegrationTopic).Result .Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray(); using (var consumer = new Consumer(new ConsumerOptions(IntegrationConfig.IntegrationTopic, router) { MaxWaitTimeForMinimumBytes = TimeSpan.Zero }, offsets)) { for (int i = 0; i < 20; i++) { producer.SendMessageAsync(IntegrationConfig.IntegrationTopic, new[] { new Message(i.ToString(), testId) }).Wait(); } var sentMessages = consumer.Consume().Take(20).ToList(); //ensure the produced messages arrived IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("Message order: {0}", string.Join(", ", sentMessages.Select(x => x.Value.ToUtf8String()).ToList()))); Assert.That(sentMessages.Count, Is.EqualTo(20)); Assert.That(sentMessages.Select(x => x.Value.ToUtf8String()).ToList(), Is.EqualTo(expected)); Assert.That(sentMessages.Any(x => x.Key.ToUtf8String() != testId), Is.False); //seek back to initial offset consumer.SetOffsetPosition(offsets); var resetPositionMessages = consumer.Consume().Take(20).ToList(); //ensure all produced messages arrive again IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("Message order: {0}", string.Join(", ", resetPositionMessages.Select(x => x.Value).ToList()))); Assert.That(resetPositionMessages.Count, Is.EqualTo(20)); Assert.That(resetPositionMessages.Select(x => x.Value.ToUtf8String()).ToList(), Is.EqualTo(expected)); Assert.That(resetPositionMessages.Any(x => x.Key.ToUtf8String() != testId), Is.False); } } }