internal void Init(RdKafkaType type, IntPtr config, Config.LogCallback logger) { logger = logger ?? ((string handle, int level, string fac, string buf) => { var now = DateTime.Now.ToString("yyyy-MM-dd HH:mm:ss.fff", CultureInfo.InvariantCulture); Console.WriteLine($"{level}|{now}|{handle}|{fac}| {buf}"); }); LogDelegate = (IntPtr rk, int level, string fac, string buf) => { // The log_cb is called very early during construction, before // SafeKafkaHandle or any of the C# wrappers are ready. // So we can't really pass rk on, just pass the rk name instead. var name = Marshal.PtrToStringAnsi(LibRdKafka.name(rk)); logger(name, level, fac, buf); }; LibRdKafka.conf_set_log_cb(config, LogDelegate); StatsDelegate = (IntPtr rk, IntPtr json, UIntPtr json_len, IntPtr opaque) => { OnStatistics?.Invoke(this, Marshal.PtrToStringAnsi(json)); return 0; }; LibRdKafka.conf_set_stats_cb(config, StatsDelegate); handle = SafeKafkaHandle.Create(type, config); callbackCts = new CancellationTokenSource(); callbackTask = StartCallbackTask(callbackCts.Token); }
public void DumpedConfigLooksReasonable() { var config = new Config(); config["client.id"] = "test"; Dictionary<string,string> dump = config.Dump(); Assert.Equal(dump["client.id"], "test"); }
public static void Main(string[] args) { using (var producer = new Producer("localhost:9092")) using (var topic = producer.Topic("test")) { var data = Encoding.UTF8.GetBytes("Hello Kafka"); var deliveryReport = topic.Produce(data).Result; Console.WriteLine($"Produced to Partition: {deliveryReport.Partition}, Offset: {deliveryReport.Offset}"); } var config = new Config {GroupId = "test-consumer"}; using (var consumer = new EventConsumer(config, "localhost:9092")) { consumer.OnMessage += (obj, msg) => { var text = Encoding.UTF8.GetString(msg.Payload, 0, msg.Payload.Length); Console.WriteLine($"Topic: {msg.Topic} Partition: {msg.Partition} Offset: {msg.Offset} {text}"); }; consumer.Subscribe(new List<string> {"test"}); consumer.Start(); Console.ReadLine(); } }
public Producer(Config config, string brokerList = null) { config = config ?? new Config(); IntPtr cfgPtr = config.handle.Dup(); LibRdKafka.conf_set_dr_msg_cb(cfgPtr, DeliveryReportDelegate); Init(RdKafkaType.Producer, cfgPtr, config.Logger); if (brokerList != null) { handle.AddBrokers(brokerList); } }
private static Thread StartProducer(CancellationTokenSource tokenSource) { var timer = Metric.Timer("Published", Unit.Events); var thread = new Thread(() => { var topicConfig = new TopicConfig(); topicConfig["request.required.acks"] = "0"; // Don't require an ack from the broker. var config = new Config { GroupId = consumerGroupName, EnableAutoCommit = true, StatisticsInterval = TimeSpan.FromSeconds(10), DefaultTopicConfig = topicConfig }; config["socket.blocking.max.ms"] = "1"; // Maximum time a broker socket operation may block. config["queue.buffering.max.ms"] = "1"; // Maximum time to buffer data when using async mode. using (var publisher = new Producer(config, brokers)) using (var topic = publisher.Topic(topicName)) { while (!tokenSource.IsCancellationRequested) { Thread.Sleep(1000); for (var i = 0; i < 100; i++) { var ticks = DateTime.UtcNow.Ticks; topic.Produce(Encoding.UTF8.GetBytes(ticks.ToString()), partition: (int)(ticks % 2)) .ContinueWith(task => { if (task.Exception != null) { Console.WriteLine("{0}: Error publishing message - {1}", DateTime.Now.ToLongTimeString(), task.Exception); return; } timer.Record((DateTime.UtcNow.Ticks - ticks) / 10000, TimeUnit.Milliseconds); reports.Add(task.Result); }); } } Console.WriteLine("Producer cancelled."); Thread.CurrentThread.Abort(); } }); thread.Start(); return thread; }
public Consumer(Config config, string brokerList = null) { RebalanceDelegate = RebalanceCallback; CommitDelegate = CommitCallback; IntPtr cfgPtr = config.handle.Dup(); LibRdKafka.conf_set_rebalance_cb(cfgPtr, RebalanceDelegate); LibRdKafka.conf_set_offset_commit_cb(cfgPtr, CommitDelegate); if (config.DefaultTopicConfig != null) { LibRdKafka.conf_set_default_topic_conf(cfgPtr, config.DefaultTopicConfig.handle.Dup()); } Init(RdKafkaType.Consumer, cfgPtr, config.Logger); if (brokerList != null) { handle.AddBrokers(brokerList); } }
public static void Main(string[] args) { string brokerList = args[0]; var topics = args.Skip(1).ToList(); var config = new Config() { GroupId = "simple-csharp-consumer" }; using (var consumer = new EventConsumer(config, brokerList)) { consumer.OnMessage += (obj, msg) => { string text = Encoding.UTF8.GetString(msg.Payload, 0, msg.Payload.Length); Console.WriteLine($"Topic: {msg.Topic} Partition: {msg.Partition} Offset: {msg.Offset} {text}"); }; consumer.Assign(new List<TopicPartitionOffset> {new TopicPartitionOffset("foo", 0, 5)}); consumer.Start(); Console.WriteLine("Started consumer, press enter to stop consuming"); Console.ReadLine(); } }
public void GettingUnknownParameterThrows() { var config = new Config(); Assert.Throws<InvalidOperationException>(() => config["unknown"]); }
public void SettingParameterToInvalidValueThrows() { var config = new Config(); Assert.Throws<InvalidOperationException>(() => config["session.timeout.ms"] = "string"); }
public void SetAndGetParameterWorks() { var config = new Config(); config["client.id"] = "test"; Assert.Equal(config["client.id"], "test"); }
public EventConsumer(Config config, string brokerList = null) : base(config, brokerList) {}
private static Task StartPollingConsumer(string topicName, CancellationTokenSource tokenSource) { var timer = Metric.Timer("Received", Unit.Events); var handle = new AutoResetEvent(false); var config = new Config { GroupId = consumerGroupName, EnableAutoCommit = true, StatisticsInterval = TimeSpan.FromSeconds(10) }; var consumer = new Consumer(config, brokers); var cancelCount = 0; Console.CancelKeyPress += (sender, args) => { cancelCount++; if (cancelCount >= 2) return; Console.WriteLine("Shutting down... Press Ctrl+C again to force quit, but please wait a little while first."); tokenSource.Cancel(); args.Cancel = true; }; Action action = () => { consumer.OnPartitionsAssigned += (obj, partitions) => { Console.WriteLine($"Assigned partitions: [{string.Join(", ", partitions)}]"); consumer.Assign(partitions); handle.Set(); }; consumer.OnPartitionsRevoked += (obj, partitions) => { Console.WriteLine($"Revoked partitions: [{string.Join(", ", partitions)}]"); consumer.Unassign(); }; consumer.OnOffsetCommit += (obj, commit) => { if (commit.Error != ErrorCode.NO_ERROR && commit.Error != ErrorCode._NO_OFFSET) { Console.WriteLine($"Failed to commit offsets: {commit.Error}"); } }; //var messageAndErrorType = typeof(MessageAndError); //var errorMember = messageAndErrorType.GetField("Error", System.Reflection.BindingFlags.Instance | System.Reflection.BindingFlags.NonPublic); //var messageMember = messageAndErrorType.GetField("Message", System.Reflection.BindingFlags.Instance | System.Reflection.BindingFlags.NonPublic); while (!tokenSource.IsCancellationRequested) { var result = consumer.Consume(TimeSpan.FromMilliseconds(1000)); var time = DateTime.UtcNow.Ticks; if (!result.HasValue) continue; var value = result.Value; var error = value.Error; // (ErrorCode)errorMember.GetValue(value); var msg = value.Message; // (Message)messageMember.GetValue(value); if (error == ErrorCode._PARTITION_EOF) continue; if (error != ErrorCode.NO_ERROR) { Console.Error.WriteLine(error); continue; } if (msg.Payload == null || msg.Payload.Length == 0) { Console.WriteLine("no payload..."); continue; } var diff = (time - long.Parse(Encoding.UTF8.GetString(msg.Payload))) / 10000; timer.Record(diff, TimeUnit.Milliseconds); receivedMessages.Add(msg); } Console.WriteLine("Shutting down consumer."); consumer.Unsubscribe(); consumer.Dispose(); }; consumer.Subscribe(new List<string> { topicName }); var task = Task.Run(action); Console.WriteLine("Waiting for partitions..."); handle.WaitOne(); return task; }
public static void Run(string brokerList, List<string> topics) { bool enableAutoCommit = false; var config = new Config() { GroupId = "advanced-csharp-consumer", EnableAutoCommit = enableAutoCommit, StatisticsInterval = TimeSpan.FromSeconds(60) }; using (var consumer = new EventConsumer(config, brokerList)) { consumer.OnMessage += (obj, msg) => { string text = Encoding.UTF8.GetString(msg.Payload, 0, msg.Payload.Length); Console.WriteLine($"Topic: {msg.Topic} Partition: {msg.Partition} Offset: {msg.Offset} {text}"); if (!enableAutoCommit && msg.Offset % 10 == 0) { Console.WriteLine($"Committing offset"); consumer.Commit(msg).Wait(); Console.WriteLine($"Committed offset"); } }; consumer.OnError += (obj, errorCode) => { Console.WriteLine($"Error: {errorCode}"); }; consumer.OnEndReached += (obj, end) => { Console.WriteLine($"Reached end of topic {end.Topic} partition {end.Partition}, next message will be at offset {end.Offset}"); }; if (enableAutoCommit) { consumer.OnOffsetCommit += (obj, commit) => { if (commit.Error != ErrorCode.NO_ERROR) { Console.WriteLine($"Failed to commit offsets: {commit.Error}"); } Console.WriteLine($"Successfully committed offsets: [{string.Join(", ", commit.Offsets)}]"); }; } consumer.OnPartitionsAssigned += (obj, partitions) => { Console.WriteLine($"Assigned partitions: [{string.Join(", ", partitions)}]"); consumer.Assign(partitions); }; consumer.OnPartitionsRevoked += (obj, partitions) => { Console.WriteLine($"Revoked partitions: [{string.Join(", ", partitions)}]"); consumer.Unassign(); }; consumer.OnStatistics += (obj, json) => { Console.WriteLine($"Statistics: {json}"); }; consumer.Subscribe(topics); consumer.Start(); Console.WriteLine($"Assigned to: [{string.Join(", ", consumer.Assignment)}]"); Console.WriteLine($"Subscribed to: [{string.Join(", ", consumer.Subscription)}]"); Console.WriteLine($"Started consumer {consumer.MemberId}, press enter to stop consuming"); Console.ReadLine(); } }