public void Listen(Action <string> message) { // configuration for kafka to find brokers var config = new Dictionary <string, object> { { "group.id", "booking_consumer" }, { "bootstrap.servers", "localhost:9092" }, { "enable.auto.commit", "false" } }; // subscribe consumer to producer topics // null is there because we are not using consumer/producer keys using (var consumer = new Confluent.Kafka.Consumer <Confluent.Kafka.Null, string>(config, null, new Confluent.Kafka.Serialization.StringDeserializer(Encoding.UTF8))) { // consumer subscribing to the topic consumer.Subscribe("first_topic"); consumer.OnMessage += (_, msg) => { message(msg.Value); }; while (true) { consumer.Poll(100); } } }
public static void Run_Poll(Dictionary <string, object> constructConfig, List <string> topics, CancellationTokenSource cancellationTokenSource) { StartProcess sp = new StartProcess(); try { using (var consumer = new Confluent.Kafka.Consumer <Confluent.Kafka.Null, string>(constructConfig, null, new StringDeserializer(Encoding.UTF8))) { sp.Log("Run_Poll Start"); // Note: All event handlers are called on the main thread. consumer.OnMessage += (_, msg) => { /*sp.Log(msg.Value)*/; ProcessKafkaMessage.InsertInto_StgKafka(msg); }; consumer.OnPartitionEOF += (_, end) => { sp.Log($"Reached end of topic {end.Topic} partition {end.Partition}, next message will be at offset {end.Offset}"); Console.WriteLine($"Reached end of topic {end.Topic} partition {end.Partition}, next message will be at offset {end.Offset}"); checkReachedEnd = true; }; // Raised on critical errors, e.g. connection failures or all brokers down. consumer.OnError += (_, error) => { sp.Log($"Error: {error}"); Console.WriteLine($"Error: {error}"); }; // Raised on deserialization errors or when a consumed message has an error != NoError. consumer.OnConsumeError += (_, msg) => { sp.Log($"Error consuming from topic/partition/offset {msg.Topic}/{msg.Partition}/{msg.Offset}: {msg.Error}"); Console.WriteLine($"Error consuming from topic/partition/offset {msg.Topic}/{msg.Partition}/{msg.Offset}: {msg.Error}"); }; consumer.OnOffsetsCommitted += (_, commit) => { Console.WriteLine($"[{string.Join(", ", commit.Offsets)}]"); sp.Log(string.Join(", ", commit.Offsets)); if (commit.Error) { Console.WriteLine($"Failed to commit offsets: {commit.Error}"); } Console.WriteLine($"Successfully committed offsets: [{string.Join(", ", commit.Offsets)}]"); }; consumer.OnPartitionsAssigned += (_, partitions) => { Console.WriteLine($"Assigned partitions: [{string.Join(", ", partitions)}], member id: {consumer.MemberId}"); //partitions.RemoveAt(2); //partitions.RemoveAt(1); consumer.Assign(partitions); }; consumer.OnPartitionsRevoked += (_, partitions) => { Console.WriteLine($"Revoked partitions: [{string.Join(", ", partitions)}]"); consumer.Unassign(); }; consumer.OnStatistics += (_, json) => Console.WriteLine($"Statistics: {json}"); consumer.Subscribe(topics); Console.WriteLine($"Subscribed to: [{string.Join(", ", consumer.Subscription)}]"); //var cancelled = false; //Console.CancelKeyPress += (_, e) => //{ // e.Cancel = true; // prevent the process from terminating. // cancelled = true; //}; sp.Log("Berfore Wile loop"); Console.WriteLine("Ctrl-C to exit."); while (!cancellationTokenSource.IsCancellationRequested) { consumer.Poll(TimeSpan.FromMilliseconds(1000)); if (checkReachedEnd) { break; } } //consumer.CommitAsync(); sp.Log("Run_Poll End"); } } catch (Exception ex) { sp.Log("Exeception in Run_Poll "); sp.Log(ex.ToString()); throw; } }
static void Main(string[] args) { var builder = new ConfigurationBuilder() .SetBasePath(Directory.GetCurrentDirectory()) .AddJsonFile("appSettings.json"); Configuration = builder.Build(); var brokerList = Configuration.GetSection("brokers").GetChildren().Select(x => x.Value).ToArray(); var config = new Dictionary <string, object> { { "bootstrap.servers", String.Join(" ", brokerList) }, { "group.id", Configuration["groupId"] }, { "enable.auto.commit", true }, { "auto.commit.interval.ms", 5000 }, { "statistics.interval.ms", 60000 }, { "default.topic.config", new Dictionary <string, object>() { { "auto.offset.reset", "smallest" } } } }; using (var consumer = new Confluent.Kafka.Consumer <Confluent.Kafka.Null, string>(config, null, new StringDeserializer(Encoding.UTF8))) { var topicName = Configuration["topicName"]; consumer.Assign(new List <Confluent.Kafka.TopicPartitionOffset> { new Confluent.Kafka.TopicPartitionOffset(topicName, 0, 0) }); consumer.OnMessage += (_, msg) => Console.WriteLine($"Topic: {msg.Topic} Partition: {msg.Partition} Offset: {msg.Offset} {msg.Value}"); consumer.OnPartitionEOF += (_, end) => Console.WriteLine($"Reached end of topic {end.Topic} partition {end.Partition}, next message will be at offset {end.Offset}"); consumer.OnError += (_, error) => Console.WriteLine($"Error: {error}"); consumer.OnConsumeError += (_, msg) => Console.WriteLine($"Error consuming from topic/partition/offset {msg.Topic}/{msg.Partition}/{msg.Offset}: {msg.Error}"); consumer.OnOffsetsCommitted += (_, commit) => { Console.WriteLine($"[{string.Join(", ", commit.Offsets)}]"); if (commit.Error) { Console.WriteLine($"Failed to commit offsets: {commit.Error}"); } Console.WriteLine($"Successfully committed offsets: [{string.Join(", ", commit.Offsets)}]"); }; consumer.OnPartitionsAssigned += (_, partitions) => { Console.WriteLine($"Assigned partitions: [{string.Join(", ", partitions)}], member id: {consumer.MemberId}"); consumer.Assign(partitions); }; consumer.OnPartitionsRevoked += (_, partitions) => { Console.WriteLine($"Revoked partitions: [{string.Join(", ", partitions)}]"); consumer.Unassign(); }; consumer.OnStatistics += (_, json) => Console.WriteLine($"Statistics: {json}"); consumer.Subscribe(topicName); Console.WriteLine($"Subscribed to: [{string.Join(", ", consumer.Subscription)}]"); var cancelled = false; Console.CancelKeyPress += (_, e) => { e.Cancel = true; // prevent the process from terminating. cancelled = true; }; Console.WriteLine("Ctrl-C to exit."); while (!cancelled) { consumer.Poll(TimeSpan.FromMilliseconds(100)); } } }