internal Consumer(ConsumerBuilder <TKey, TValue> builder) { var baseConfig = builder.ConstructBaseConfig(this); this.statisticsHandler = baseConfig.statisticsHandler; this.logHandler = baseConfig.logHandler; this.errorHandler = baseConfig.errorHandler; this.partitionsAssignedHandler = baseConfig.partitionsAssignedHandler; this.partitionsRevokedHandler = baseConfig.partitionsRevokedHandler; this.partitionsLostHandler = baseConfig.partitionsLostHandler; this.offsetsCommittedHandler = baseConfig.offsetsCommittedHandler; this.oAuthBearerTokenRefreshHandler = baseConfig.oAuthBearerTokenRefreshHandler; this.revokedOrLostHandlerIsFunc = baseConfig.revokedOrLostHandlerIsFunc; Librdkafka.Initialize(null); var config = Confluent.Kafka.Config.ExtractCancellationDelayMaxMs(baseConfig.config, out this.cancellationDelayMaxMs); if (config.FirstOrDefault(prop => string.Equals(prop.Key, "group.id", StringComparison.Ordinal)).Value == null) { throw new ArgumentException("'group.id' configuration parameter is required and was not specified."); } var modifiedConfig = Library.NameAndVersionConfig .Concat(config.Where(prop => prop.Key != ConfigPropertyNames.Consumer.ConsumeResultFields)) .ToList(); var enabledFieldsObj = config.FirstOrDefault(prop => prop.Key == ConfigPropertyNames.Consumer.ConsumeResultFields).Value; if (enabledFieldsObj != null) { var fields = enabledFieldsObj.Replace(" ", ""); if (fields != "all") { this.enableHeaderMarshaling = false; this.enableTimestampMarshaling = false; this.enableTopicNameMarshaling = false; if (fields != "none") { var parts = fields.Split(','); foreach (var part in parts) { switch (part) { case "headers": this.enableHeaderMarshaling = true; break; case "timestamp": this.enableTimestampMarshaling = true; break; case "topic": this.enableTopicNameMarshaling = true; break; default: throw new ArgumentException( $"Unexpected consume result field name '{part}' in config value '{ConfigPropertyNames.Consumer.ConsumeResultFields}'."); } } } } } var configHandle = SafeConfigHandle.Create(); modifiedConfig.ForEach((kvp) => { if (kvp.Value == null) { throw new ArgumentNullException($"'{kvp.Key}' configuration parameter must not be null."); } configHandle.Set(kvp.Key, kvp.Value); }); // Explicitly keep references to delegates so they are not reclaimed by the GC. rebalanceDelegate = RebalanceCallback; commitDelegate = CommitCallback; errorCallbackDelegate = ErrorCallback; logCallbackDelegate = LogCallback; statisticsCallbackDelegate = StatisticsCallback; oAuthBearerTokenRefreshCallbackDelegate = OAuthBearerTokenRefreshCallback; IntPtr configPtr = configHandle.DangerousGetHandle(); if (partitionsAssignedHandler != null || partitionsRevokedHandler != null || partitionsLostHandler != null) { Librdkafka.conf_set_rebalance_cb(configPtr, rebalanceDelegate); } if (offsetsCommittedHandler != null) { Librdkafka.conf_set_offset_commit_cb(configPtr, commitDelegate); } if (errorHandler != null) { Librdkafka.conf_set_error_cb(configPtr, errorCallbackDelegate); } if (logHandler != null) { Librdkafka.conf_set_log_cb(configPtr, logCallbackDelegate); } if (statisticsHandler != null) { Librdkafka.conf_set_stats_cb(configPtr, statisticsCallbackDelegate); } if (oAuthBearerTokenRefreshHandler != null) { Librdkafka.conf_set_oauthbearer_token_refresh_cb(configPtr, oAuthBearerTokenRefreshCallbackDelegate); } this.kafkaHandle = SafeKafkaHandle.Create(RdKafkaType.Consumer, configPtr, this); configHandle.SetHandleAsInvalid(); // config object is no longer useable. var pollSetConsumerError = kafkaHandle.PollSetConsumer(); if (pollSetConsumerError != ErrorCode.NoError) { throw new KafkaException(new Error(pollSetConsumerError, $"Failed to redirect the poll queue to consumer_poll queue: {ErrorCodeExtensions.GetReason(pollSetConsumerError)}")); } // setup key deserializer. if (builder.KeyDeserializer == null) { if (!defaultDeserializers.TryGetValue(typeof(TKey), out object deserializer)) { throw new InvalidOperationException( $"Key deserializer was not specified and there is no default deserializer defined for type {typeof(TKey).Name}."); } this.keyDeserializer = (IDeserializer <TKey>)deserializer; } else { this.keyDeserializer = builder.KeyDeserializer; } // setup value deserializer. if (builder.ValueDeserializer == null) { if (!defaultDeserializers.TryGetValue(typeof(TValue), out object deserializer)) { throw new InvalidOperationException( $"Value deserializer was not specified and there is no default deserializer defined for type {typeof(TValue).Name}."); } this.valueDeserializer = (IDeserializer <TValue>)deserializer; } else { this.valueDeserializer = builder.ValueDeserializer; } }
public KafkaException(ErrorCode code) : base(ErrorCodeExtensions.GetReason(code)) { Error = new Error(code); }
/// <summary> /// Creates a new <see cref="Confluent.Kafka.ConsumerBase" /> instance. /// </summary> /// <param name="config"> /// A collection of librdkafka configuration parameters /// (refer to https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md) /// and parameters specific to this client (refer to: /// <see cref="Confluent.Kafka.ConfigPropertyNames" />). /// At a minimum, 'bootstrap.servers' and 'group.id' must be /// specified. /// </param> public ConsumerBase(IEnumerable <KeyValuePair <string, string> > config) { Librdkafka.Initialize(null); config = Config.GetCancellationDelayMaxMs(config, out this.cancellationDelayMaxMs); if (config.FirstOrDefault(prop => string.Equals(prop.Key, "group.id", StringComparison.Ordinal)).Value == null) { throw new ArgumentException("'group.id' configuration parameter is required and was not specified."); } var modifiedConfig = config .Where(prop => prop.Key != ConfigPropertyNames.Consumer.ConsumeResultFields); var enabledFieldsObj = config.FirstOrDefault(prop => prop.Key == ConfigPropertyNames.Consumer.ConsumeResultFields).Value; if (enabledFieldsObj != null) { var fields = enabledFieldsObj.ToString().Replace(" ", ""); if (fields != "all") { this.enableHeaderMarshaling = false; this.enableTimestampMarshaling = false; this.enableTopicNameMarshaling = false; if (fields != "none") { var parts = fields.Split(','); foreach (var part in parts) { switch (part) { case "headers": this.enableHeaderMarshaling = true; break; case "timestamp": this.enableTimestampMarshaling = true; break; case "topic": this.enableTopicNameMarshaling = true; break; default: throw new ArgumentException( $"Unexpected consume result field name '{part}' in config value '{ConfigPropertyNames.Consumer.ConsumeResultFields}'."); } } } } } var configHandle = SafeConfigHandle.Create(); modifiedConfig .ToList() .ForEach((kvp) => { if (kvp.Value == null) { throw new ArgumentException($"'{kvp.Key}' configuration parameter must not be null."); } configHandle.Set(kvp.Key, kvp.Value.ToString()); }); // Explicitly keep references to delegates so they are not reclaimed by the GC. rebalanceDelegate = RebalanceCallback; commitDelegate = CommitCallback; errorCallbackDelegate = ErrorCallback; logCallbackDelegate = LogCallback; statsCallbackDelegate = StatsCallback; IntPtr configPtr = configHandle.DangerousGetHandle(); Librdkafka.conf_set_rebalance_cb(configPtr, rebalanceDelegate); Librdkafka.conf_set_offset_commit_cb(configPtr, commitDelegate); Librdkafka.conf_set_error_cb(configPtr, errorCallbackDelegate); Librdkafka.conf_set_log_cb(configPtr, logCallbackDelegate); Librdkafka.conf_set_stats_cb(configPtr, statsCallbackDelegate); this.kafkaHandle = SafeKafkaHandle.Create(RdKafkaType.Consumer, configPtr, this); configHandle.SetHandleAsInvalid(); // config object is no longer useable. var pollSetConsumerError = kafkaHandle.PollSetConsumer(); if (pollSetConsumerError != ErrorCode.NoError) { throw new KafkaException(new Error(pollSetConsumerError, $"Failed to redirect the poll queue to consumer_poll queue: {ErrorCodeExtensions.GetReason(pollSetConsumerError)}")); } }