private SafeTopicHandle getKafkaTopicHandle(string topic) { // TODO: We should consider getting rid of these and add proper support in librdkafka itself // (producev() with RD_KAFKA_V_TOPIC() is one step closer) if (topicHandles.ContainsKey(topic)) { return(topicHandles[topic]); } var topicConfigHandle = SafeTopicConfigHandle.Create(); if (topicConfig != null) { topicConfig .ToList() .ForEach((kvp) => { topicConfigHandle.Set(kvp.Key, kvp.Value.ToString()); }); } topicConfigHandle.Set("produce.offset.report", "true"); IntPtr configPtr = topicConfigHandle.DangerousGetHandle(); // note: there is a possible (benign) race condition here - topicHandle could have already // been created for the topic (and possibly added to topicHandles). If the topicHandle has // already been created, rdkafka will return it and not create another. the call to rdkafka // is threadsafe. var topicHandle = kafkaHandle.Topic(topic, configPtr); topicHandles.Add(topic, topicHandle); return(topicHandle); }
internal static extern SafeTopicConfigHandle rd_kafka_topic_conf_dup( SafeTopicConfigHandle conf);
internal Producer(ProducerBuilder <TKey, TValue> builder) { var baseConfig = builder.ConstructBaseConfig(this); var partitioners = baseConfig.partitioners; var defaultPartitioner = baseConfig.defaultPartitioner; // TODO: Make Tasks auto complete when EnableDeliveryReportsPropertyName is set to false. // TODO: Hijack the "delivery.report.only.error" configuration parameter and add functionality to enforce that Tasks // that never complete are never created when this is set to true. this.statisticsHandler = baseConfig.statisticsHandler; this.logHandler = baseConfig.logHandler; this.errorHandler = baseConfig.errorHandler; this.oAuthBearerTokenRefreshHandler = baseConfig.oAuthBearerTokenRefreshHandler; var config = Confluent.Kafka.Config.ExtractCancellationDelayMaxMs(baseConfig.config, out this.cancellationDelayMaxMs); this.DeliveryReportCallback = DeliveryReportCallbackImpl; Librdkafka.Initialize(null); var modifiedConfig = Library.NameAndVersionConfig .Concat(config .Where(prop => prop.Key != ConfigPropertyNames.Producer.EnableBackgroundPoll && prop.Key != ConfigPropertyNames.Producer.EnableDeliveryReports && prop.Key != ConfigPropertyNames.Producer.DeliveryReportFields)) .ToList(); if (modifiedConfig.Where(obj => obj.Key == "delivery.report.only.error").Count() > 0) { // A managed object is kept alive over the duration of the produce request. If there is no // delivery report generated, there will be a memory leak. We could possibly support this // property by keeping track of delivery reports in managed code, but this seems like // more trouble than it's worth. throw new ArgumentException("The 'delivery.report.only.error' property is not supported by this client"); } var enableBackgroundPollObj = config.FirstOrDefault(prop => prop.Key == ConfigPropertyNames.Producer.EnableBackgroundPoll).Value; if (enableBackgroundPollObj != null) { this.manualPoll = !bool.Parse(enableBackgroundPollObj); } var enableDeliveryReportsObj = config.FirstOrDefault(prop => prop.Key == ConfigPropertyNames.Producer.EnableDeliveryReports).Value; if (enableDeliveryReportsObj != null) { this.enableDeliveryReports = bool.Parse(enableDeliveryReportsObj); } var deliveryReportEnabledFieldsObj = config.FirstOrDefault(prop => prop.Key == ConfigPropertyNames.Producer.DeliveryReportFields).Value; if (deliveryReportEnabledFieldsObj != null) { var fields = deliveryReportEnabledFieldsObj.Replace(" ", ""); if (fields != "all") { this.enableDeliveryReportKey = false; this.enableDeliveryReportValue = false; this.enableDeliveryReportHeaders = false; this.enableDeliveryReportTimestamp = false; this.enableDeliveryReportPersistedStatus = false; if (fields != "none") { var parts = fields.Split(','); foreach (var part in parts) { switch (part) { case "key": this.enableDeliveryReportKey = true; break; case "value": this.enableDeliveryReportValue = true; break; case "timestamp": this.enableDeliveryReportTimestamp = true; break; case "headers": this.enableDeliveryReportHeaders = true; break; case "status": this.enableDeliveryReportPersistedStatus = true; break; default: throw new ArgumentException( $"Unknown delivery report field name '{part}' in config value '{ConfigPropertyNames.Producer.DeliveryReportFields}'."); } } } } } var configHandle = SafeConfigHandle.Create(); IntPtr configPtr = configHandle.DangerousGetHandle(); modifiedConfig.ForEach((kvp) => { if (kvp.Value == null) { throw new ArgumentNullException($"'{kvp.Key}' configuration parameter must not be null."); } configHandle.Set(kvp.Key, kvp.Value); }); if (enableDeliveryReports) { Librdkafka.conf_set_dr_msg_cb(configPtr, DeliveryReportCallback); } // Explicitly keep references to delegates so they are not reclaimed by the GC. errorCallbackDelegate = ErrorCallback; logCallbackDelegate = LogCallback; statisticsCallbackDelegate = StatisticsCallback; oAuthBearerTokenRefreshCallbackDelegate = OAuthBearerTokenRefreshCallback; if (errorHandler != null) { Librdkafka.conf_set_error_cb(configPtr, errorCallbackDelegate); } if (logHandler != null) { Librdkafka.conf_set_log_cb(configPtr, logCallbackDelegate); } if (statisticsHandler != null) { Librdkafka.conf_set_stats_cb(configPtr, statisticsCallbackDelegate); } if (oAuthBearerTokenRefreshHandler != null) { Librdkafka.conf_set_oauthbearer_token_refresh_cb(configPtr, oAuthBearerTokenRefreshCallbackDelegate); } Action <SafeTopicConfigHandle, PartitionerDelegate> addPartitionerToTopicConfig = (topicConfigHandle, partitioner) => { Librdkafka.PartitionerDelegate librdkafkaPartitioner = (IntPtr rkt, IntPtr keydata, UIntPtr keylen, int partition_cnt, IntPtr rkt_opaque, IntPtr msg_opaque) => { unsafe { var topicNamePtr = Librdkafka.topic_name(rkt); var topic = Util.Marshal.PtrToStringUTF8(topicNamePtr); var keyIsNull = keydata == IntPtr.Zero; var keyBytes = keyIsNull ? ReadOnlySpan <byte> .Empty : new ReadOnlySpan <byte>(keydata.ToPointer(), (int)keylen); return(partitioner(topic, partition_cnt, keyBytes, keyIsNull)); } }; this.partitionerHandles.Add(GCHandle.Alloc(librdkafkaPartitioner)); Librdkafka.topic_conf_set_partitioner_cb(topicConfigHandle.DangerousGetHandle(), librdkafkaPartitioner); }; // Configure the default custom partitioner. if (defaultPartitioner != null) { // The default topic config may have been modified by topic-level // configuraton parameters passed down from the top level config. // If that's the case, duplicate the default topic config to avoid // colobbering any already configured values. var defaultTopicConfigHandle = configHandle.GetDefaultTopicConfig(); SafeTopicConfigHandle topicConfigHandle = defaultTopicConfigHandle.DangerousGetHandle() != IntPtr.Zero ? defaultTopicConfigHandle.Duplicate() : SafeTopicConfigHandle.Create(); addPartitionerToTopicConfig(topicConfigHandle, defaultPartitioner); Librdkafka.conf_set_default_topic_conf(configPtr, topicConfigHandle.DangerousGetHandle()); } this.ownedKafkaHandle = SafeKafkaHandle.Create(RdKafkaType.Producer, configPtr, this); configHandle.SetHandleAsInvalid(); // ownership was transferred. // Per-topic partitioners. foreach (var partitioner in partitioners) { var topicConfigHandle = this.ownedKafkaHandle.DuplicateDefaultTopicConfig(); addPartitionerToTopicConfig(topicConfigHandle, partitioner.Value); this.ownedKafkaHandle.newTopic(partitioner.Key, topicConfigHandle.DangerousGetHandle()); } if (!manualPoll) { callbackCts = new CancellationTokenSource(); callbackTask = StartPollTask(callbackCts.Token); } InitializeSerializers( builder.KeySerializer, builder.ValueSerializer, builder.AsyncKeySerializer, builder.AsyncValueSerializer); }
internal Producer(ProducerBuilder <TKey, TValue> builder) { var baseConfig = builder.ConstructBaseConfig(this); // TODO: Make Tasks auto complete when EnableDeliveryReportsPropertyName is set to false. // TODO: Hijack the "delivery.report.only.error" configuration parameter and add functionality to enforce that Tasks // that never complete are never created when this is set to true. this.statisticsHandler = baseConfig.statisticsHandler; this.logHandler = baseConfig.logHandler; this.errorHandler = baseConfig.errorHandler; this.partitioners = baseConfig.partitioners; var config = Confluent.Kafka.Config.ExtractCancellationDelayMaxMs(baseConfig.config, out this.cancellationDelayMaxMs); this.DeliveryReportCallback = DeliveryReportCallbackImpl; Librdkafka.Initialize(null); var modifiedConfig = Library.NameAndVersionConfig .Concat(config .Where(prop => prop.Key != ConfigPropertyNames.Producer.EnableBackgroundPoll && prop.Key != ConfigPropertyNames.Producer.EnableDeliveryReports && prop.Key != ConfigPropertyNames.Producer.DeliveryReportFields)) .ToList(); if (modifiedConfig.Where(obj => obj.Key == "delivery.report.only.error").Count() > 0) { // A managed object is kept alive over the duration of the produce request. If there is no // delivery report generated, there will be a memory leak. We could possibly support this // property by keeping track of delivery reports in managed code, but this seems like // more trouble than it's worth. throw new ArgumentException("The 'delivery.report.only.error' property is not supported by this client"); } var enableBackgroundPollObj = config.FirstOrDefault(prop => prop.Key == ConfigPropertyNames.Producer.EnableBackgroundPoll).Value; if (enableBackgroundPollObj != null) { this.manualPoll = !bool.Parse(enableBackgroundPollObj); } var enableDeliveryReportsObj = config.FirstOrDefault(prop => prop.Key == ConfigPropertyNames.Producer.EnableDeliveryReports).Value; if (enableDeliveryReportsObj != null) { this.enableDeliveryReports = bool.Parse(enableDeliveryReportsObj); } var deliveryReportEnabledFieldsObj = config.FirstOrDefault(prop => prop.Key == ConfigPropertyNames.Producer.DeliveryReportFields).Value; if (deliveryReportEnabledFieldsObj != null) { var fields = deliveryReportEnabledFieldsObj.Replace(" ", ""); if (fields != "all") { this.enableDeliveryReportKey = false; this.enableDeliveryReportValue = false; this.enableDeliveryReportHeaders = false; this.enableDeliveryReportTimestamp = false; this.enableDeliveryReportPersistedStatus = false; if (fields != "none") { var parts = fields.Split(','); foreach (var part in parts) { switch (part) { case "key": this.enableDeliveryReportKey = true; break; case "value": this.enableDeliveryReportValue = true; break; case "timestamp": this.enableDeliveryReportTimestamp = true; break; case "headers": this.enableDeliveryReportHeaders = true; break; case "status": this.enableDeliveryReportPersistedStatus = true; break; default: throw new ArgumentException( $"Unknown delivery report field name '{part}' in config value '{ConfigPropertyNames.Producer.DeliveryReportFields}'."); } } } } } var configHandle = SafeConfigHandle.Create(); modifiedConfig.ForEach((kvp) => { if (kvp.Value == null) { throw new ArgumentNullException($"'{kvp.Key}' configuration parameter must not be null."); } configHandle.Set(kvp.Key, kvp.Value); }); IntPtr configPtr = configHandle.DangerousGetHandle(); if (enableDeliveryReports) { Librdkafka.conf_set_dr_msg_cb(configPtr, DeliveryReportCallback); } // Explicitly keep references to delegates so they are not reclaimed by the GC. errorCallbackDelegate = ErrorCallback; logCallbackDelegate = LogCallback; statisticsCallbackDelegate = StatisticsCallback; if (errorHandler != null) { Librdkafka.conf_set_error_cb(configPtr, errorCallbackDelegate); } if (logHandler != null) { Librdkafka.conf_set_log_cb(configPtr, logCallbackDelegate); } if (statisticsHandler != null) { Librdkafka.conf_set_stats_cb(configPtr, statisticsCallbackDelegate); } this.ownedKafkaHandle = SafeKafkaHandle.Create(RdKafkaType.Producer, configPtr, this); if (this.partitioners?.Any() ?? false) { foreach (var partitioner in this.partitioners) { var topicConfigHandle = SafeTopicConfigHandle.Create(); IntPtr topicConfigPtr = topicConfigHandle.DangerousGetHandle(); Librdkafka.PartitionerDelegate partitionerDelegate = (IntPtr rkt, IntPtr keydata, UIntPtr keylen, int partition_cnt, IntPtr rkt_opaque, IntPtr msg_opaque) => { if (this.ownedKafkaHandle.IsClosed) { return(Partition.Any); } var topic = partitioner.Key; var providedPartitioner = partitioner.Value; return(CallCustomPartitioner(topic, providedPartitioner, keydata, keylen, partition_cnt, rkt_opaque, msg_opaque)); }; this.partitionerCallbacks.Add(partitioner.Key, partitionerDelegate); // Set partitioner on the topic_conf... Librdkafka.topic_conf_set_partitioner_cb(topicConfigPtr, partitionerDelegate); // Associate topic_conf with topic // this also caches the topic handle (and topic_conf) this.ownedKafkaHandle.getKafkaTopicHandle(partitioner.Key, topicConfigPtr); // topic_conf ownership was transferred topicConfigHandle.SetHandleAsInvalid(); } } configHandle.SetHandleAsInvalid(); // config object is no longer usable. if (!manualPoll) { callbackCts = new CancellationTokenSource(); callbackTask = StartPollTask(callbackCts.Token); } InitializeSerializers( builder.KeySerializer, builder.ValueSerializer, builder.AsyncKeySerializer, builder.AsyncValueSerializer); }
public TopicConfig() { handle = SafeTopicConfigHandle.Create(); }