static void Consume(string topic, ClientConfig config) { var consumerConfig = new ConsumerConfig(config); consumerConfig.GroupId = "dotnet-example-group-1"; consumerConfig.AutoOffsetReset = AutoOffsetReset.Earliest; consumerConfig.EnableAutoCommit = false; CancellationTokenSource cts = new CancellationTokenSource(); Console.CancelKeyPress += (_, e) => { e.Cancel = true; // prevent the process from terminating. cts.Cancel(); }; using (var consumer = new ConsumerBuilder <string, string>(consumerConfig).Build()) { consumer.Subscribe(topic); var totalCount = 0; try { while (true) { var cr = consumer.Consume(cts.Token); totalCount += JObject.Parse(cr.Value).Value <int>("count"); Console.WriteLine($"Consumed record with key {cr.Key} and value {cr.Value}, and updated total count to {totalCount}"); } } catch (OperationCanceledException) { // Ctrl-C was pressed. } finally { consumer.Close(); } } }
public static void Main(string[] args) { var configuration = new ConsumerConfig { GroupId = "test-consumer-group", BootstrapServers = "127.0.0.1:9092", AutoOffsetReset = AutoOffsetReset.Earliest }; using var builder = new ConsumerBuilder <Ignore, string>(configuration).Build(); { builder.Subscribe("test-topic"); var cancellationToken = new CancellationTokenSource(); Console.CancelKeyPress += (_, e) => { e.Cancel = true; cancellationToken.Cancel(); }; try { while (true) { try { var consumerResult = builder.Consume(cancellationToken.Token); Console.WriteLine($"Consumed message '{consumerResult.Value}' at: '{consumerResult.TopicPartitionOffset}'."); } catch (ConsumeException e) { Console.WriteLine($"Error occured: {e.Error.Reason}"); } } } catch (OperationCanceledException) { builder.Close(); } } }
public ConsumeResult <Ignore, string> ManualConsume(string topic, AutoOffsetReset offset) { Console.WriteLine("Starting Manual Consume"); var consumerConfig = new ConsumerConfig { GroupId = this.UniqueServiceId, BootstrapServers = this.Brokers, AutoOffsetReset = offset }; using (var consumer = new ConsumerBuilder <Ignore, string>(consumerConfig).Build()) { consumer.Subscribe(topic); try { ConsumeResult <Ignore, string> consumeResult = consumer.Consume(); consumer.Close(); return(consumeResult); } catch (OperationCanceledException) { consumer.Close(); return(null); } } }
public void Work(System.Threading.CancellationToken token) { var config = new ConsumerConfig { BootstrapServers = KafkaSettings.BootstrapServers, GroupId = "foo", AutoOffsetReset = AutoOffsetReset.Earliest }; using (var consumer = new ConsumerBuilder <Ignore, string>(config).Build()) { consumer.Subscribe(KafkaSettings.SearchTopic); while (!token.IsCancellationRequested) { var consumeResult = consumer.Consume(token); Console.WriteLine(consumeResult.Message); } consumer.Close(); } }
public long GetStashedCount() { var conf = new ConsumerConfig { GroupId = this._topic, BootstrapServers = this._bootstrapServers, EnableAutoCommit = true, StatisticsIntervalMs = 5000, SessionTimeoutMs = 6000, AutoOffsetReset = AutoOffsetReset.Earliest, EnablePartitionEof = true }; using var consumer = new ConsumerBuilder <string, string>(conf).Build(); consumer.Subscribe(this._topic); var i = 0; while (true) { CancellationTokenSource cts = new CancellationTokenSource(); Console.CancelKeyPress += (_, e) => { e.Cancel = true; cts.Cancel(); }; var result = consumer.Consume(cts.Token); if (result.IsPartitionEOF) { break; } i += 1; } consumer.Close(); return(i); }
static void Main(string[] args) { if (args.Length == 0) { Console.WriteLine("You must input a consumer group id"); return; } string groupId = args[0]; Console.WriteLine($"Consumer in group id: {groupId}"); Console.WriteLine("Running consumer"); var consumerConfig = new ConsumerConfig { BootstrapServers = "localhost:9092", GroupId = groupId }; using var consumer = new ConsumerBuilder <Ignore, string>(consumerConfig).Build(); consumer.Subscribe("Student"); while (true) { var result = consumer.Consume(5); if (string.IsNullOrEmpty(result?.Message?.Value)) { continue; } var obj = JsonSerializer.Deserialize <StudentRecordUpdate>(result.Message.Value); Console.WriteLine("Received a message"); Console.WriteLine($"Student Id: {obj.StudentId} updated state to {obj.State.ToString()}"); } }
static void Main(string[] args) { var config = new ConsumerConfig { GroupId = "ORDER", BootstrapServers = "localhost:9092", EnableAutoCommit = false }; using var c = new ConsumerBuilder <Ignore, string>(config).Build(); c.Subscribe("orders"); var cts = new CancellationTokenSource(); Console.CancelKeyPress += (_, e) => { e.Cancel = true; cts.Cancel(); }; try { while (true) { var cr = c.Consume(cts.Token); Order order = JsonConvert.DeserializeObject <Order>(cr.Message.Value); //Console.WriteLine($"Consumed message '{order.Id}' from topic {cr.Topic}, partition {cr.Partition}, offset {cr.Offset}"); Console.WriteLine($"Kafka => Order '{order.Id}' is accepted from topic '{cr.Topic}'"); } } catch (OperationCanceledException) { } finally { c.Close(); } }
public void ConsumeNotificationsMessages(CancellationToken cancellationToken) { Console.WriteLine("Consuming Notification messages"); var config = GetConsumerConfig(); using (var consumer = new ConsumerBuilder <Ignore, string>(config).Build()) { consumer.Subscribe("notificationstopic"); CancellationTokenSource cts = new CancellationTokenSource(); Console.CancelKeyPress += (_, e) => { e.Cancel = true; // prevent the process from terminating. cts.Cancel(); }; try { while (!cancellationToken.IsCancellationRequested) { try { var cr = consumer.Consume(cts.Token); Console.WriteLine("Consumed Notification message"); dynamic data = JObject.Parse(cr.Message.Value); _manager.CallNotificationService(cr.Value); } catch (ConsumeException e) { Console.WriteLine($"Error occured: {e.Error.Reason}"); } } } catch (OperationCanceledException) { // Ensure the consumer leaves the group cleanly and final offsets are committed. consumer.Close(); } } }
static void Main(string[] args) { var conf = new ConsumerConfig { GroupId = "test-consumer-group", BootstrapServers = "PLAINTEXT://kafka:9092", AutoOffsetReset = AutoOffsetReset.Earliest }; using var c = new ConsumerBuilder <Ignore, string>(conf).Build(); c.Subscribe("quickstart-events"); var cts = new CancellationTokenSource(); Console.CancelKeyPress += (_, e) => { e.Cancel = true; cts.Cancel(); }; try { Console.WriteLine("Ready to Consume events:"); while (true) { var cr = c.Consume(cts.Token); Console.WriteLine($"Consumed message from topic {cr.Topic}, partition {cr.Partition}, offset {cr.Offset}. Message:"); Console.WriteLine(cr.Message.Value); } } catch (OperationCanceledException) { } finally { c.Close(); } }
public async Task <List <string> > ExecuteAsync(CancellationToken stopingToken, string topicName) { var result = new List <string>(); var config = new ConsumerConfig { BootstrapServers = _kafcaConnection, GroupId = $"{topicName}-group-0", AutoOffsetReset = AutoOffsetReset.Earliest }; try { using var consumer = new ConsumerBuilder <Ignore, string>(config).Build(); consumer.Subscribe(topicName); try { var message = StartProcess; while (!string.IsNullOrEmpty(message)) { var cr = consumer.Consume(stopingToken); message = cr.Message.Value; result.Add(message); } } catch (OperationCanceledException) { consumer.Close(); } } catch (Exception ex) { } return(result); }
/// <summary> /// Materialize the count state in the Topic_Counts change log topic for the /// specified partitions into a Dictionary<string, int> /// </summary> public static void LoadCountState(RocksDb db, string brokerList, IEnumerable <Partition> partitions, ColumnFamilyHandle columnFamily, CancellationToken ct) { var cConfig = new ConsumerConfig { BootstrapServers = brokerList, GroupId = ConsumerGroup_LoadState, EnablePartitionEof = true }; int msgCount = 0; using (var consumer = new ConsumerBuilder <string, int>(cConfig).Build()) { consumer.Assign(partitions.Select(p => new TopicPartitionOffset(Topic_Counts, p, Offset.Beginning))); int eofCount = 0; while (true) { var cr = consumer.Consume(); if (cr.IsPartitionEOF) { eofCount += 1; if (eofCount == partitions.Count()) { break; } } else { msgCount += 1; db.Put(Encoding.UTF8.GetBytes(cr.Message.Key), BitConverter.GetBytes(cr.Message.Value), columnFamily); } } } Console.WriteLine($"Finished materializing word counts state. Backed by {msgCount} messages in Kafka topic '{Topic_Counts}'"); }
protected override async Task ExecuteAsync(CancellationToken stoppingToken) { _config.GroupId = Constants.GroupId; using var c = new ConsumerBuilder <Ignore, AnimalInfo>(_config) .SetValueDeserializer(new MyDeserializer <AnimalInfo>()) .SetErrorHandler((_, e) => Console.WriteLine($"Error: {e.Reason}")).Build(); c.Subscribe(Constants.Topic); try { var workerBlock = new ActionBlock <ConsumeResult <Ignore, AnimalInfo> >(ProcessMessage, new ExecutionDataflowBlockOptions { MaxDegreeOfParallelism = 6 }); while (!stoppingToken.IsCancellationRequested) { try { var cr = c.Consume(stoppingToken); workerBlock.Post(cr); } catch (ConsumeException e) { Console.WriteLine($"Error occurred: {e.Error.Reason}"); } } } catch (OperationCanceledException) { // Ensure the consumer leaves the group cleanly and final offsets are committed. c.Close(); } }
public static void Subscribe <T>(string urlServer, string topic, string groupId, Action <string> action) { var config = KafkaConfigManagement.Instance; using (var consumer = new ConsumerBuilder <Ignore, string>(config.GetConsumerConfig(urlServer, groupId)).Build()) { consumer.Subscribe(topic); try { while (true) { try { var cr = consumer.Consume(); var msg = JsonConvert.DeserializeObject <T>(cr.Value); if (msg != null) { action(cr.Value); } } catch (ConsumeException e) { Console.WriteLine($"Error occured: {e.Error.Reason}"); } } } catch (OperationCanceledException) { consumer.Close(); } } }
private static void MonitorGasTankIsEmpty() { using (var c = new ConsumerBuilder <Ignore, string>(ConsumerConfig).Build()) { c.Subscribe(nameof(GasTankIsEmptyEvent)); CancellationTokenSource cts = new CancellationTokenSource(); Console.CancelKeyPress += (_, e) => { e.Cancel = true; cts.Cancel(); }; try { while (true) { try { var cr = c.Consume(cts.Token); var ev = JsonConvert.DeserializeObject <GasTankIsEmptyEvent>(cr.Value); HandleGasTankIsEmptyEvent(ev); } catch (ConsumeException e) { Console.WriteLine($"Error occured: {e.Error.Reason}"); } } } catch (OperationCanceledException) { c.Close(); } } }
public void Subscribe() { using (var c = new ConsumerBuilder <Ignore, string>(_consumerConfig).Build()) { c.Subscribe("my-topic"); var cts = new CancellationTokenSource(); Console.CancelKeyPress += (_, e) => { e.Cancel = true; // prevent the process from terminating. cts.Cancel(); }; try { while (true) { try { var cr = c.Consume(cts.Token); Console.WriteLine( $"Consumed message '{cr.Message.Value}' at: '{cr.TopicPartitionOffset}'."); } catch (ConsumeException e) { Console.WriteLine($"Error occured: {e.Error.Reason}"); } } } catch (OperationCanceledException) { // Ensure the consumer leaves the group cleanly and final offsets are committed. c.Close(); } } }
public Task StartAsync(CancellationToken cancellationToken) { var conf = new ConsumerConfig { GroupId = "helloword", BootstrapServers = "localhost:9092" }; using (var c = new ConsumerBuilder <Null, string>(conf).Build()) { c.Subscribe("messagesHelloWord"); var cts = new CancellationTokenSource(); try { while (true) { try { var cr = c.Consume(cts.Token); _logger.LogInformation($"Mensagem: {cr.Message.Value} recebida de {cr.TopicPartitionOffset}"); } catch (ConsumeException e) { _logger.LogError($"Consume error: {e.Error.Reason}"); } } } catch (OperationCanceledException) { c.Close(); } } return(Task.CompletedTask); }
public void Listen() { #region MyRegion New Style var config = new ConsumerConfig { GroupId = "booking", BootstrapServers = "localhost:9092", EnableAutoCommit = false }; using var consumer = new ConsumerBuilder <Ignore, string>(config).Build(); consumer.Subscribe("booking"); var cts = new CancellationTokenSource(); Console.CancelKeyPress += (_, e) => { e.Cancel = true; cts.Cancel(); }; try { while (true) { var cr = consumer.Consume(cts.Token); bookingStream.Publish(new Model.BookingMessage { Message = cr.Message.Value }); } } catch (OperationCanceledException) { } finally { consumer.Close(); } #endregion }
protected override async Task ExecuteAsync(CancellationToken stoppingToken) { // TODO: Move this to a service and inject it using DI using var consumer = new ConsumerBuilder <Null, string>(_config).Build(); var topic = "mihai"; try { consumer.Subscribe(topic); _logger.LogInformation($"Subscribed to topic {topic}."); } catch (Exception ex) { _logger.LogError(ex, "A Kafka error occurred."); } while (!stoppingToken.IsCancellationRequested) { try { var cr = consumer.Consume(stoppingToken); var message = JsonSerializer.Deserialize <DockerHubPayload>(cr.Message.Value); _logger.LogInformation("Received following message from Kafka: {@message}", message); await _kubernetesService.PatchAllDeploymentAsync(message?.Repository?.RepoName, message?.PushData?.Tag, stoppingToken); } catch (ConsumeException ex) { _logger.LogError(ex, "Could not consume the specified Kafka topic."); } catch (Exception ex) { _logger.LogCritical(ex, "An error occurred while calling K8s API."); } } }
public void Listen() { using (var consumer = new ConsumerBuilder <Ignore, string>(_kafkaConfig).Build()) { consumer.Subscribe(_kafkaTopics); var cts = new CancellationTokenSource(); while (true) { try { var message = consumer.Consume(cts.Token); _alertStream.Publish(new Alert { Message = message.Value }); Console.WriteLine($"Consumed message: {message.Value}"); } catch (ConsumeException e) { Console.WriteLine($"Error occured: {e.Error.Reason}"); } } } }
public void Run() { ConsumerConfig configs = new ConsumerConfig { GroupId = groupId, BootstrapServers = bootstrapServer, AutoOffsetReset = AutoOffsetReset.Earliest }; using (var c = new ConsumerBuilder <String, String>(configs).Build()) { c.Subscribe("twitter"); CancellationTokenSource cancellationToken = new CancellationTokenSource(); Console.CancelKeyPress += (_, e) => { e.Cancel = true; cancellationToken.Cancel(); }; try { while (true) { try { ConsumeResult <string, string> result = c.Consume(cancellationToken.Token); Console.WriteLine($"Consumed message '{result.Message.Value}' at: '{result.TopicPartitionOffset}'."); } catch (ConsumeException e) { Console.WriteLine($"Error ocurried: {e.Error.Reason}"); } } } catch (OperationCanceledException) { c.Close(); } } }
static void Main(string[] args) { IEnumerable <KeyValuePair <string, string> > config = BuildConsumerConfig(); using (var consumer = new ConsumerBuilder <Ignore, string>(config).Build()) { consumer.Subscribe("logistics.instruction.job-state-change-dev"); while (true) { var consumeResult = consumer.Consume(5000); if (consumeResult != null) { consumer.Commit(consumeResult); Console.WriteLine($"message consumed: \n {consumeResult.Message.Value}"); } else { Console.WriteLine("No messages. Trying again"); } } } }
protected override async Task ExecuteAsync(CancellationToken stoppingToken) { _logger.LogInformation("Worker running at: {time}", DateTimeOffset.Now); MetricsRegistry _metricsRegistry = new MetricsRegistry(_configuration); Logger _receivedMessageounter = _metricsRegistry._receivedMessage(); var counter = _receivedMessageounter.CountOperation("counter", "operation(s)", true, LogEventLevel.Information); counter.Increment(); try { var config = new ConsumerConfig { GroupId = _groupId, BootstrapServers = _bootstrapServers, }; using (var consumer = new ConsumerBuilder <Null, string>(config).Build()) { consumer.Subscribe(_topic); while (!stoppingToken.IsCancellationRequested) { var cr = consumer.Consume(); Item item = JsonConvert.DeserializeObject <Item>(cr.Message.Value); Console.WriteLine("Product Name " + item.Name); await Task.Delay(1000, stoppingToken); } } } catch (Exception ex) { _logger.LogError("Exception Occured: {ex}", ex.ToString()); } }
public static async Task ConsumeMessage(MainConfig cfg, CancellationToken cancellationToken, ILogger logger) { var config = new ConsumerConfig { BootstrapServers = cfg.Kafka.BootstrapServers, GroupId = cfg.Kafka.GroupId, ClientId = cfg.Kafka.ClientId }; using (var consumer = new ConsumerBuilder <Ignore, string>(config).Build()) { consumer.Subscribe(cfg.Kafka.Topic); while (!cancellationToken.IsCancellationRequested) { try { var consumeResult = consumer.Consume(cancellationToken); if (consumeResult.Message != null) { //todo warning! "Application maximum poll interval (300000ms) exceeded by X ms" //https://github.com/confluentinc/confluent-kafka-dotnet/issues/785 var message = consumeResult.Message.Value; await KafkaMessageHandler.NotifyFromKafkaMessage(message, logger, cfg); } } catch (ConsumeException e) { logger.Information($"Error occured while consuming: {e.Error.Reason}", e); //throw; } } consumer.Close(); } }
public void Consumer_Exiting(string bootstrapServers) { LogToFile("start Consumer_Exiting"); int N = 2; var firstProduced = Util.ProduceNullStringMessages(bootstrapServers, singlePartitionTopic, 100, N); var consumerConfig = new ConsumerConfig { BootstrapServers = bootstrapServers, SessionTimeoutMs = 6000, Debug = "all" }; for (int i = 0; i < 4; ++i) { consumerConfig.Set("group.id", Guid.NewGuid().ToString()); using (var consumer = new ConsumerBuilder <byte[], byte[]>(consumerConfig) .SetPartitionsAssignedHandler((c, partitions) => { return(partitions.Select(p => new TopicPartitionOffset(p, firstProduced.Offset))); }) .Build()) { consumer.Subscribe(singlePartitionTopic); int tryCount = 10; while (tryCount-- > 0) { var record = consumer.Consume(TimeSpan.FromSeconds(10)); if (record != null) { break; } } Assert.True(tryCount > 0); // there should be no ill effect doing any of this before disposing a consumer. switch (i) { case 0: LogToFile(" -- Unsubscribe"); consumer.Unsubscribe(); break; case 1: LogToFile(" -- Commit"); consumer.Commit(); break; case 3: LogToFile(" -- Close"); consumer.Close(); break; case 4: break; } } } Assert.Equal(0, Library.HandleCount); LogToFile("end Consumer_Exiting"); }
public void CommandThread() { Console.WriteLine("CommandThread Start"); Active = true; var consumer = new ConsumerBuilder <string, string>(config).Build(); var topicp = new TopicPartition(topic, 0); consumer.Assign(topicp); while (Active) { try { var consumeresult = consumer.Consume(canceltoken); if (!consumeresult.IsPartitionEOF) { var input = consumeresult.Value; string command; string parameter; if (input.Contains(" ")) { command = input.Substring(0, input.IndexOf(" ")).Trim(); parameter = input.Substring(input.IndexOf(" "), input.Length - command.Length).Trim(); } else { command = input; parameter = ""; } Console.WriteLine("COMMAND----------> " + input); if (command.Equals("Add-Channel")) { controller.addThread(parameter); } else if (command.Equals("Drop-Channel")) { controller.dropThread(parameter); } else if (command.Equals("SCPList-Channels")) { Console.WriteLine("The active threads are:"); controller.listThreads(); } else if (command.Equals("SCPCount")) { Console.WriteLine("The message queue has " + controller.queueSize() + " messages in it right now"); } else if (command.Equals("SCPExit") || command.Equals("System-Shutdown")) { controller.exit(); Active = false; source.Cancel(); } else if (command.Equals("SCPBlacklist")) { blacklist.Add(parameter); } else if (command.Equals("SCPUnblacklist")) { blacklist.Remove(parameter); } } else { Thread.Sleep(100); } } catch (System.OperationCanceledException e) { } } }
public void Consumer_Poll_Error(string bootstrapServers) { LogToFile("start Consumer_Poll_Error"); var producerConfig = new ProducerConfig { BootstrapServers = bootstrapServers }; TopicPartitionOffset firstProduced = null; using (var producer = new ProducerBuilder <byte[], byte[]>(producerConfig).Build()) { var keyData = Encoding.UTF8.GetBytes("key"); firstProduced = producer.ProduceAsync(singlePartitionTopic, new Message <byte[], byte[]> { Key = keyData }).Result.TopicPartitionOffset; var valData = Encoding.UTF8.GetBytes("val"); producer.ProduceAsync(singlePartitionTopic, new Message <byte[], byte[]> { Value = valData }); Assert.True(producer.Flush(TimeSpan.FromSeconds(10)) == 0); } var consumerConfig = new ConsumerConfig { GroupId = Guid.NewGuid().ToString(), BootstrapServers = bootstrapServers, SessionTimeoutMs = 6000, EnablePartitionEof = true }; // test key deserialization error behavior using (var consumer = new ConsumerBuilder <Null, string>(consumerConfig) .SetPartitionsAssignedHandler((c, partitions) => { Assert.Single(partitions); Assert.Equal(firstProduced.TopicPartition, partitions[0]); return(partitions.Select(p => new TopicPartitionOffset(p, firstProduced.Offset))); }) .Build()) { consumer.Subscribe(singlePartitionTopic); int msgCnt = 0; int errCnt = 0; while (true) { var s = consumer.Subscription; try { var record = consumer.Consume(TimeSpan.FromSeconds(10)); if (record == null) { continue; } if (record.IsPartitionEOF) { break; } msgCnt += 1; } catch (ConsumeException e) { errCnt += 1; Assert.Equal(ErrorCode.Local_KeyDeserialization, e.Error.Code); Assert.Equal(firstProduced.Offset.Value, e.ConsumerRecord.Offset.Value); } } Assert.Equal(1, msgCnt); Assert.Equal(1, errCnt); consumer.Close(); } // test value deserialization error behavior. using (var consumer = new ConsumerBuilder <string, Null>(consumerConfig) .SetPartitionsAssignedHandler((c, partitions) => { Assert.Single(partitions); Assert.Equal(firstProduced.TopicPartition, partitions[0]); return(partitions.Select(p => new TopicPartitionOffset(p, firstProduced.Offset))); }) .Build()) { consumer.Subscribe(singlePartitionTopic); int msgCnt = 0; int errCnt = 0; while (true) { try { var record = consumer.Consume(TimeSpan.FromSeconds(10)); if (record == null) { continue; } if (record.IsPartitionEOF) { break; } msgCnt += 1; } catch (ConsumeException e) { errCnt += 1; Assert.Equal(ErrorCode.Local_ValueDeserialization, e.Error.Code); Assert.Equal(firstProduced.Offset.Value + 1, e.ConsumerRecord.Offset.Value); } } Assert.Equal(1, msgCnt); Assert.Equal(1, errCnt); consumer.Close(); } Assert.Equal(0, Library.HandleCount); LogToFile("end Consumer_Poll_Error"); }
public void MessageHeaderProduceConsume(string bootstrapServers) { LogToFile("start MessageHeaderProduceConsume"); var producerConfig = new ProducerConfig { BootstrapServers = bootstrapServers, EnableIdempotence = true }; var consumerConfig = new ConsumerConfig { GroupId = Guid.NewGuid().ToString(), BootstrapServers = bootstrapServers, SessionTimeoutMs = 6000 }; var drs = new List <DeliveryReport <Null, string> >(); DeliveryResult <Null, string> dr_single, dr_empty, dr_null, dr_multiple, dr_duplicate; DeliveryResult <Null, string> dr_ol1, dr_ol3; using (var producer = new ProducerBuilder <Null, string>(producerConfig).Build()) { // single header value. var headers = new Headers(); headers.Add("test-header", new byte[] { 142 }); dr_single = producer.ProduceAsync( singlePartitionTopic, new Message <Null, string> { Value = "the value", Headers = headers }).Result; Assert.Single(dr_single.Message.Headers); Assert.Equal("test-header", dr_single.Message.Headers[0].Key); Assert.Equal(new byte[] { 142 }, dr_single.Message.Headers[0].GetValueBytes()); // empty header values var headers0 = new Headers(); dr_empty = producer.ProduceAsync( singlePartitionTopic, new Message <Null, string> { Value = "the value", Headers = headers0 }).Result; Assert.Empty(dr_empty.Message.Headers); // null header value dr_null = producer.ProduceAsync( singlePartitionTopic, new Message <Null, string> { Value = "the value" }).Result; Assert.Empty(dr_null.Message.Headers); // multiple header values (also Headers no Dictionary, since order is tested). var headers2 = new Headers(); headers2.Add("test-header-a", new byte[] { 111 }); headers2.Add("test-header-b", new byte[] { 112 }); dr_multiple = producer.ProduceAsync( singlePartitionTopic, new Message <Null, string> { Value = "the value", Headers = headers2 }).Result; Assert.Equal(2, dr_multiple.Message.Headers.Count); Assert.Equal("test-header-a", dr_multiple.Message.Headers[0].Key); Assert.Equal(new byte[] { 111 }, dr_multiple.Message.Headers[0].GetValueBytes()); Assert.Equal("test-header-b", dr_multiple.Message.Headers[1].Key); Assert.Equal(new byte[] { 112 }, dr_multiple.Message.Headers[1].GetValueBytes()); // duplicate header values (also List not Dictionary) var headers3 = new Headers(); headers3.Add(new Header("test-header-a", new byte[] { 111 })); headers3.Add(new Header("test-header-b", new byte[] { 112 })); headers3.Add(new Header("test-header-a", new byte[] { 113 })); headers3.Add(new Header("test-header-b", new byte[] { 114 })); headers3.Add(new Header("test-header-c", new byte[] { 115 })); dr_duplicate = producer.ProduceAsync(singlePartitionTopic, new Message <Null, string> { Value = "the value", Headers = headers3 }).Result; Assert.Equal(5, dr_duplicate.Message.Headers.Count); Assert.Equal("test-header-a", dr_duplicate.Message.Headers[0].Key); Assert.Equal(new byte[] { 111 }, dr_duplicate.Message.Headers[0].GetValueBytes()); Assert.Equal("test-header-a", dr_duplicate.Message.Headers[2].Key); Assert.Equal(new byte[] { 113 }, dr_duplicate.Message.Headers[2].GetValueBytes()); // Test headers work as expected with all serializing ProduceAsync variants. dr_ol1 = producer.ProduceAsync(singlePartitionTopic, new Message <Null, string> { Value = "the value" }).Result; Assert.Empty(dr_ol1.Message.Headers); dr_ol3 = producer.ProduceAsync( new TopicPartition(singlePartitionTopic, 0), new Message <Null, string> { Value = "the value", Headers = headers } ).Result; Assert.Single(dr_ol3.Message.Headers); Assert.Equal("test-header", dr_ol3.Message.Headers[0].Key); Assert.Equal(new byte[] { 142 }, dr_ol3.Message.Headers[0].GetValueBytes()); Action <DeliveryReport <Null, string> > dh = (DeliveryReport <Null, string> dr) => drs.Add(dr); // Test headers work as expected with all serializing Produce variants. producer.BeginProduce(singlePartitionTopic, new Message <Null, string> { Value = "the value" }, dh); producer.BeginProduce( new TopicPartition(singlePartitionTopic, 0), new Message <Null, string> { Value = "the value", Headers = headers2 }, dh); producer.Flush(TimeSpan.FromSeconds(10)); Assert.Empty(drs[0].Message.Headers); // TODO: this is intermittently not working. Assert.Equal(2, drs[1].Message.Headers.Count); } List <DeliveryReport <byte[], byte[]> > drs_2 = new List <DeliveryReport <byte[], byte[]> >(); DeliveryResult <byte[], byte[]> dr_ol4, dr_ol5, dr_ol6, dr_ol7; using (var producer = new ProducerBuilder <byte[], byte[]>(producerConfig).Build()) { var headers = new Headers(); headers.Add("hkey", new byte[] { 44 }); // Test headers work as expected with all non-serializing ProduceAsync variants. dr_ol4 = producer.ProduceAsync(singlePartitionTopic, new Message <byte[], byte[]> { Headers = null }).Result; Assert.Empty(dr_ol4.Message.Headers); dr_ol5 = producer.ProduceAsync(singlePartitionTopic, new Message <byte[], byte[]> { Headers = null }).Result; Assert.Empty(dr_ol5.Message.Headers); dr_ol6 = producer.ProduceAsync(singlePartitionTopic, new Message <byte[], byte[]> { Headers = headers }).Result; Assert.Single(dr_ol6.Message.Headers); dr_ol7 = producer.ProduceAsync(singlePartitionTopic, new Message <byte[], byte[]> { Headers = headers }).Result; Assert.Single(dr_ol7.Message.Headers); // Test headers work as expected with all non-serializing BeginProduce variants. Action <DeliveryReport <byte[], byte[]> > dh = (DeliveryReport <byte[], byte[]> dr) => drs_2.Add(dr); producer.BeginProduce(singlePartitionTopic, new Message <byte[], byte[]> { Headers = headers }, dh); producer.BeginProduce(singlePartitionTopic, new Message <byte[], byte[]> { Headers = null }, dh); producer.BeginProduce(singlePartitionTopic, new Message <byte[], byte[]> { Headers = headers }, dh); producer.BeginProduce(singlePartitionTopic, new Message <byte[], byte[]> { Headers = headers }, dh); producer.Flush(TimeSpan.FromSeconds(10)); Assert.Single(drs_2[0].Message.Headers); Assert.Empty(drs_2[1].Message.Headers); // TODO: this is intermittently not working. Assert.Single(drs_2[2].Message.Headers); Assert.Single(drs_2[3].Message.Headers); } using (var consumer = new ConsumerBuilder <byte[], byte[]>(consumerConfig).Build()) { consumer.Assign(new List <TopicPartitionOffset>() { dr_single.TopicPartitionOffset }); var record = consumer.Consume(TimeSpan.FromSeconds(10)); Assert.NotNull(record.Message); Assert.Single(record.Message.Headers); Assert.Equal("test-header", record.Message.Headers[0].Key); Assert.Equal(new byte[] { 142 }, record.Message.Headers[0].GetValueBytes()); consumer.Assign(new List <TopicPartitionOffset>() { dr_empty.TopicPartitionOffset }); var record2 = consumer.Consume(TimeSpan.FromSeconds(10)); Assert.NotNull(record2.Message); // following Java, alway instantiate a new Headers instance, even in the empty case. Assert.NotNull(record2.Message.Headers); Assert.Empty(record2.Message.Headers); consumer.Assign(new List <TopicPartitionOffset>() { dr_null.TopicPartitionOffset }); var record3 = consumer.Consume(TimeSpan.FromSeconds(10)); Assert.NotNull(record3.Message); Assert.NotNull(record3.Message.Headers); Assert.Empty(record3.Message.Headers); consumer.Assign(new List <TopicPartitionOffset>() { dr_multiple.TopicPartitionOffset }); var record4 = consumer.Consume(TimeSpan.FromSeconds(10)); Assert.NotNull(record4.Message); Assert.Equal(2, record4.Message.Headers.Count); Assert.Equal("test-header-a", record4.Message.Headers[0].Key); Assert.Equal("test-header-b", record4.Message.Headers[1].Key); Assert.Equal(new byte[] { 111 }, record4.Message.Headers[0].GetValueBytes()); Assert.Equal(new byte[] { 112 }, record4.Message.Headers[1].GetValueBytes()); consumer.Assign(new List <TopicPartitionOffset>() { dr_duplicate.TopicPartitionOffset }); var record5 = consumer.Consume(TimeSpan.FromSeconds(10)); Assert.NotNull(record5.Message); Assert.Equal(5, record5.Message.Headers.Count); Assert.Equal("test-header-a", record5.Message.Headers[0].Key); Assert.Equal("test-header-b", record5.Message.Headers[1].Key); Assert.Equal("test-header-a", record5.Message.Headers[2].Key); Assert.Equal("test-header-b", record5.Message.Headers[3].Key); Assert.Equal("test-header-c", record5.Message.Headers[4].Key); Assert.Equal(new byte[] { 111 }, record5.Message.Headers[0].GetValueBytes()); Assert.Equal(new byte[] { 112 }, record5.Message.Headers[1].GetValueBytes()); Assert.Equal(new byte[] { 113 }, record5.Message.Headers[2].GetValueBytes()); Assert.Equal(new byte[] { 114 }, record5.Message.Headers[3].GetValueBytes()); Assert.Equal(new byte[] { 115 }, record5.Message.Headers[4].GetValueBytes()); Assert.Equal(new byte[] { 113 }, record5.Message.Headers.GetLastBytes("test-header-a")); Assert.Equal(new byte[] { 114 }, record5.Message.Headers.GetLastBytes("test-header-b")); Assert.Equal(new byte[] { 115 }, record5.Message.Headers.GetLastBytes("test-header-c")); // Test headers work with all produce method variants. // async, serializing consumer.Assign(new List <TopicPartitionOffset>() { dr_ol1.TopicPartitionOffset }); var record6 = consumer.Consume(TimeSpan.FromSeconds(10)); Assert.NotNull(record6.Message); Assert.Empty(record6.Message.Headers); consumer.Assign(new List <TopicPartitionOffset>() { dr_ol3.TopicPartitionOffset }); var record8 = consumer.Consume(TimeSpan.FromSeconds(10)); Assert.NotNull(record8.Message); Assert.Single(record8.Message.Headers); // delivery-handler, serializing. consumer.Assign(new List <TopicPartitionOffset>() { drs[0].TopicPartitionOffset }); var record9 = consumer.Consume(TimeSpan.FromSeconds(10)); Assert.NotNull(record9.Message); Assert.Empty(record9.Message.Headers); consumer.Assign(new List <TopicPartitionOffset>() { drs[1].TopicPartitionOffset }); var record11 = consumer.Consume(TimeSpan.FromSeconds(10)); Assert.NotNull(record11.Message); Assert.Equal(2, record11.Message.Headers.Count); // async, non-serializing consumer.Assign(new List <TopicPartitionOffset>() { dr_ol4.TopicPartitionOffset }); var record12 = consumer.Consume(TimeSpan.FromSeconds(10)); Assert.NotNull(record12.Message); Assert.Empty(record12.Message.Headers); consumer.Assign(new List <TopicPartitionOffset>() { dr_ol5.TopicPartitionOffset }); var record13 = consumer.Consume(TimeSpan.FromSeconds(10)); Assert.NotNull(record13.Message); Assert.Empty(record13.Message.Headers); consumer.Assign(new List <TopicPartitionOffset>() { dr_ol6.TopicPartitionOffset }); var record14 = consumer.Consume(TimeSpan.FromSeconds(10)); Assert.NotNull(record14.Message); Assert.Single(record14.Message.Headers); consumer.Assign(new List <TopicPartitionOffset>() { dr_ol7.TopicPartitionOffset }); var record15 = consumer.Consume(TimeSpan.FromSeconds(10)); Assert.NotNull(record15.Message); Assert.Single(record15.Message.Headers); // delivery handler, non-serializing consumer.Assign(new List <TopicPartitionOffset>() { drs_2[0].TopicPartitionOffset }); var record16 = consumer.Consume(TimeSpan.FromSeconds(10)); Assert.NotNull(record16.Message); Assert.Single(record16.Message.Headers); consumer.Assign(new List <TopicPartitionOffset>() { drs_2[1].TopicPartitionOffset }); var record17 = consumer.Consume(TimeSpan.FromSeconds(10)); Assert.NotNull(record17.Message); Assert.Empty(record17.Message.Headers); consumer.Assign(new List <TopicPartitionOffset>() { drs_2[2].TopicPartitionOffset }); var record18 = consumer.Consume(TimeSpan.FromSeconds(10)); Assert.NotNull(record18.Message); Assert.Single(record18.Message.Headers); consumer.Assign(new List <TopicPartitionOffset>() { drs_2[3].TopicPartitionOffset }); var record19 = consumer.Consume(TimeSpan.FromSeconds(10)); Assert.NotNull(record19.Message); Assert.Single(record19.Message.Headers); } // null key using (var producer = new ProducerBuilder <byte[], byte[]>(producerConfig).Build()) { var headers = new Headers(); var threw = false; try { headers.Add(null, new byte[] { 142 }); } catch { threw = true; } finally { Assert.True(threw); } var headers2 = new List <Header>(); Assert.Throws <ArgumentNullException>(() => headers2.Add(new Header(null, new byte[] { 42 }))); } // null value DeliveryResult <Null, string> nulldr; using (var producer = new ProducerBuilder <Null, string>(producerConfig).Build()) { var headers = new Headers(); headers.Add("my-header", null); nulldr = producer.ProduceAsync(singlePartitionTopic, new Message <Null, string> { Value = "test-value", Headers = headers }).Result; Assert.Single(nulldr.Headers); Assert.Null(nulldr.Headers[0].GetValueBytes()); } using (var consumer = new ConsumerBuilder <byte[], byte[]>(consumerConfig).Build()) { consumer.Assign(new TopicPartitionOffset(singlePartitionTopic, 0, nulldr.Offset)); var cr = consumer.Consume(TimeSpan.FromSeconds(10)); Assert.NotNull(cr?.Message); Assert.Single(cr.Headers); Assert.Equal("my-header", cr.Message.Headers[0].Key); Assert.Null(cr.Message.Headers[0].GetValueBytes()); } Assert.Equal(0, Library.HandleCount); LogToFile("end MessageHeaderProduceConsume"); }
public void Subscribe(Action <T> onchange, CacheNotifyAction cacheNotifyAction) { if (ClientConfig == null) { MemoryCacheNotify.Subscribe(onchange, cacheNotifyAction); return; } var channelName = GetChannelName(cacheNotifyAction); Cts[channelName] = new CancellationTokenSource(); Actions[channelName] = onchange; void action() { var conf = new ConsumerConfig(ClientConfig) { GroupId = Guid.NewGuid().ToString() }; using var c = new ConsumerBuilder <AscCacheItem, T>(conf) .SetErrorHandler((_, e) => Log.Error(e)) .SetKeyDeserializer(KeyDeserializer) .SetValueDeserializer(ValueDeserializer) .Build(); c.Assign(new TopicPartition(channelName, new Partition())); try { while (true) { try { var cr = c.Consume(Cts[channelName].Token); if (cr != null && cr.Value != null && !(new Guid(cr.Key.Id.ToByteArray())).Equals(Key) && Actions.TryGetValue(channelName, out var act)) { try { act(cr.Value); } catch (Exception e) { Log.Error("Kafka onmessage", e); } } } catch (ConsumeException e) { Log.Error(e); } } } catch (OperationCanceledException) { c.Close(); } } var task = new Task(action, TaskCreationOptions.LongRunning); task.Start(); }
public void CheckEvents() { // Note: The AutoOffsetReset property determines the start offset in the event there are not yet any committed offsets for the // consumer group for the topic/partitions of interest. By default, offsets are committed automatically, so in this example, // consumption will only start from the earliest message in the topic 'my-topic' the first time you run the program. consumerConfig.AutoOffsetReset = AutoOffsetReset.Earliest; using (var c = new ConsumerBuilder <Ignore, string>(consumerConfig).Build()) { c.Subscribe(kafkaTopic); CancellationTokenSource cts = new CancellationTokenSource(); Console.CancelKeyPress += (_, e) => { e.Cancel = true; // prevent the process from terminating. cts.Cancel(); }; try { while (true) { try { var cr = c.Consume(cts.Token); //Business logic if (cr.Value.ToLower().Contains("eventtype")) { var orderRequestEventReceived = JsonConvert.DeserializeObject <OrderRequestEvent>(cr.Value); //if (myEventsAsConsumer.Contains(orderRequestEventReceived.EventType)) if (handlers.ContainsKey(orderRequestEventReceived.EventType)) { Console.ForegroundColor = ConsoleColor.Yellow; Console.WriteLine(); Console.WriteLine("Message Found:"); Console.ResetColor(); Console.WriteLine($"{ orderRequestEventReceived.EventType } - Topic: {cr.Topic} Partition: {cr.Partition} Offset: {cr.Offset} {cr.Value}"); IDomainEventHandler handler = handlers.GetValueOrDefault(orderRequestEventReceived.EventType); handler.Handler(cr.Value); //if (orderRequestEventReceived.EventType == "OrderRequestCreatedEvent") //{ // // Validate ... // // Processing ... // // Saving Database ... // //if success // if (orderRequestEventReceived.HotelId > 0 && orderRequestEventReceived.HotelRoomId > 0) // { // //Producer(kafkaTopic, "HotelRequestSucceedEvent", orderRequestEventReceived); // } // //if failed // else // { // //Producer(kafkaTopic, "HotelRequestFailedEvent", orderRequestEventReceived); // } //} //if (orderRequestEventReceived.EventType == "FlightRequestFailedEvent") //{ // // Compensation // //Producer(kafkaTopic, "HotelRequestFailedEvent", orderRequestEventReceived); //} } } } catch (ConsumeException e) { Console.WriteLine($"Error occured: {e.Error.Reason}"); } // use the following code just to make demonstration System.Threading.Thread.Sleep(1000); } } catch (OperationCanceledException) { // Ensure the consumer leaves the group cleanly and final offsets are committed. c.Close(); } } }
/// <summary> /// Main method for console app. /// </summary> /// <param name="args">No arguments used.</param> public static void Main(string[] args) { Console.WriteLine("Starting .net consumer."); var consumerConfig = new ConsumerConfig { BootstrapServers = "kafka:9092", GroupId = "csharp-consumer-prev", AutoOffsetReset = AutoOffsetReset.Earliest, PluginLibraryPaths = "monitoring-interceptor", }; using (var consumer = new ConsumerBuilder <string, string>(consumerConfig) .SetPartitionsAssignedHandler((c, partitions) => { var timestamp = new Confluent.Kafka.Timestamp(DateTime.Now.AddMinutes(-5)); var timestamps = partitions.Select(tp => new TopicPartitionTimestamp(tp, timestamp)); var offsets = c.OffsetsForTimes(timestamps, TimeSpan.FromMinutes(1)); foreach (var offset in offsets) { Console.WriteLine($"Moving partion {offset.Partition.Value} to {offset.Offset.Value}"); } return(offsets); }) .Build()) { consumer.Subscribe(KafkaTopic); CancellationTokenSource cts = new CancellationTokenSource(); Console.CancelKeyPress += (_, e) => { e.Cancel = true; // prevent the process from terminating. cts.Cancel(); }; try { int recordCount = 0; while (true) { try { var cr = consumer.Consume(cts.Token); Console.WriteLine($"{cr.Key},{cr.Value}"); recordCount++; if (recordCount >= 100) { break; } } catch (ConsumeException e) { Console.WriteLine($"Error occured: {e.Error.Reason}"); } } } catch (OperationCanceledException) { // Ensure the consumer leaves the group cleanly and final offsets are committed. Console.WriteLine("Closing consumer."); consumer.Close(); } } }