コード例 #1
0
        /// <summary>
        ///     Initializes a new Producer instance.
        /// </summary>
        /// <param name="config">
        ///     librdkafka configuration parameters (refer to https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md)
        ///     TODO: Link to confluent-kafka-dotnet page with dotnet specific parameters also (i.e. default.topic.config).
        /// </param>
        /// <param name="manualPoll">
        ///     If true, does not start a dedicated polling thread to trigger events or receive delivery reports -
        ///     you must call the Poll method periodically instead.
        /// </param>
        /// <param name="disableDeliveryReports">
        ///     If true, disables notification of delivery reports. Note: if set to true and you use a ProduceAsync variant that return
        ///     a Task, the Tasks will never complete. Generally you should leave this parameter as false. Set it to true for "fire and
        ///     forget" semantics and a small boost in performance.
        /// </param>
        public Producer(IEnumerable <KeyValuePair <string, object> > config, bool manualPoll = false, bool disableDeliveryReports = false)
        {
            this.topicConfig            = (IEnumerable <KeyValuePair <string, object> >)config.FirstOrDefault(prop => prop.Key == "default.topic.config").Value;
            this.manualPoll             = manualPoll;
            this.disableDeliveryReports = disableDeliveryReports;

            var configHandle = SafeConfigHandle.Create();

            config
            .Where(prop => prop.Key != "default.topic.config")
            .ToList()
            .ForEach((kvp) => { configHandle.Set(kvp.Key, kvp.Value.ToString()); });

            IntPtr configPtr = configHandle.DangerousGetHandle();

            if (!disableDeliveryReports)
            {
                LibRdKafka.conf_set_dr_msg_cb(configPtr, DeliveryReportCallback);
            }

            // TODO: provide some mechanism whereby calls to the error and log callbacks are cached until
            //       such time as event handlers have had a chance to be registered.
            LibRdKafka.conf_set_error_cb(configPtr, ErrorCallback);
            LibRdKafka.conf_set_log_cb(configPtr, LogCallback);
            LibRdKafka.conf_set_stats_cb(configPtr, StatsCallback);

            this.kafkaHandle = SafeKafkaHandle.Create(RdKafkaType.Producer, configPtr);

            if (!manualPoll)
            {
                callbackCts  = new CancellationTokenSource();
                callbackTask = StartPollTask(callbackCts.Token);
            }
        }
 public static void ProcessConfigSettings(
     SafeConfigHandle configHandle,
     IntPtr configPtr,
     ManualConfigSettings ManualConfigSettings)
 {
     ProcessInMemoryCertificatesIfConfigured(
         configHandle,
         configPtr,
         ManualConfigSettings);
 }
        private static void ProcessInMemoryCertificatesIfConfigured(
            SafeConfigHandle configHandle,
            IntPtr configPtr,
            ManualConfigSettings ManualConfigSettings)
        {
            if (ManualConfigSettings.X509Certificate == null)
            {
                return;
            }

            X509Certificate2 caCertificate = ManualConfigSettings.CAX509Certificate;

            if (caCertificate == null)
            {
                caCertificate = AttemptToResolveCACertificateFromUserCertificate(ManualConfigSettings.X509Certificate);
            }

            if (caCertificate == null)
            {
                throw new InvalidOperationException("An accompanying in-memory CA certificate must be provided");
            }

            PrivateKeyAlgorithmAndBytes privateKeyAlgorithmAndBytes = ManualConfigSettings.PrivateKeyAlgorithmAndBytes;

            if (privateKeyAlgorithmAndBytes == null)
            {
                privateKeyAlgorithmAndBytes = AttemptToResolvePrivateKeyFromUserCertificate(ManualConfigSettings.X509Certificate);
            }

            if (privateKeyAlgorithmAndBytes == null)
            {
                throw new InvalidOperationException("A private key must accompany the in-memory certificate");
            }

            LibRdKafka.conf_set_bytes(
                configPtr,
                ManualConfigSettings.SettingNameSslCertificateLocationInMemory,
                ManualConfigSettings.X509Certificate.RawData);

            LibRdKafka.conf_set_bytes(
                configPtr,
                ManualConfigSettings.SettingNameSslCALocationInMemory,
                caCertificate.RawData);

            LibRdKafka.conf_set_bytes(
                configPtr,
                ManualConfigSettings.SettingNameSslKeyInMemory,
                privateKeyAlgorithmAndBytes.RawData);

            configHandle.Set(
                ManualConfigSettings.SettingNameSslKeyInMemoryType,
                privateKeyAlgorithmAndBytes.PrivateKeyAlgorithmInUse);
        }
コード例 #4
0
 internal static extern SafeTopicConfigHandle rd_kafka_conf_get_default_topic_conf(
     SafeConfigHandle conf);
コード例 #5
0
        internal Producer(ProducerBuilder <TKey, TValue> builder)
        {
            var baseConfig = builder.ConstructBaseConfig(this);

            // TODO: Make Tasks auto complete when EnableDeliveryReportsPropertyName is set to false.
            // TODO: Hijack the "delivery.report.only.error" configuration parameter and add functionality to enforce that Tasks
            //       that never complete are never created when this is set to true.

            this.statisticsHandler = baseConfig.statisticsHandler;
            this.logHandler        = baseConfig.logHandler;
            this.errorHandler      = baseConfig.errorHandler;

            var config = Confluent.Kafka.Config.ExtractCancellationDelayMaxMs(baseConfig.config, out this.cancellationDelayMaxMs);

            this.DeliveryReportCallback = DeliveryReportCallbackImpl;

            Librdkafka.Initialize(null);

            var modifiedConfig = config
                                 .Where(prop =>
                                        prop.Key != ConfigPropertyNames.Producer.EnableBackgroundPoll &&
                                        prop.Key != ConfigPropertyNames.Producer.EnableDeliveryReports &&
                                        prop.Key != ConfigPropertyNames.Producer.DeliveryReportFields);

            if (modifiedConfig.Where(obj => obj.Key == "delivery.report.only.error").Count() > 0)
            {
                // A managed object is kept alive over the duration of the produce request. If there is no
                // delivery report generated, there will be a memory leak. We could possibly support this
                // property by keeping track of delivery reports in managed code, but this seems like
                // more trouble than it's worth.
                throw new ArgumentException("The 'delivery.report.only.error' property is not supported by this client");
            }

            var enableBackgroundPollObj = config.FirstOrDefault(prop => prop.Key == ConfigPropertyNames.Producer.EnableBackgroundPoll).Value;

            if (enableBackgroundPollObj != null)
            {
                this.manualPoll = !bool.Parse(enableBackgroundPollObj);
            }

            var enableDeliveryReportsObj = config.FirstOrDefault(prop => prop.Key == ConfigPropertyNames.Producer.EnableDeliveryReports).Value;

            if (enableDeliveryReportsObj != null)
            {
                this.enableDeliveryReports = bool.Parse(enableDeliveryReportsObj);
            }

            var deliveryReportEnabledFieldsObj = config.FirstOrDefault(prop => prop.Key == ConfigPropertyNames.Producer.DeliveryReportFields).Value;

            if (deliveryReportEnabledFieldsObj != null)
            {
                var fields = deliveryReportEnabledFieldsObj.Replace(" ", "");
                if (fields != "all")
                {
                    this.enableDeliveryReportKey             = false;
                    this.enableDeliveryReportValue           = false;
                    this.enableDeliveryReportHeaders         = false;
                    this.enableDeliveryReportTimestamp       = false;
                    this.enableDeliveryReportPersistedStatus = false;
                    if (fields != "none")
                    {
                        var parts = fields.Split(',');
                        foreach (var part in parts)
                        {
                            switch (part)
                            {
                            case "key": this.enableDeliveryReportKey = true; break;

                            case "value": this.enableDeliveryReportValue = true; break;

                            case "timestamp": this.enableDeliveryReportTimestamp = true; break;

                            case "headers": this.enableDeliveryReportHeaders = true; break;

                            case "status": this.enableDeliveryReportPersistedStatus = true; break;

                            default: throw new ArgumentException(
                                          $"Unknown delivery report field name '{part}' in config value '{ConfigPropertyNames.Producer.DeliveryReportFields}'.");
                            }
                        }
                    }
                }
            }

            var configHandle = SafeConfigHandle.Create();

            modifiedConfig.ToList().ForEach((kvp) => {
                if (kvp.Value == null)
                {
                    throw new ArgumentNullException($"'{kvp.Key}' configuration parameter must not be null.");
                }
                configHandle.Set(kvp.Key, kvp.Value);
            });


            IntPtr configPtr = configHandle.DangerousGetHandle();

            if (enableDeliveryReports)
            {
                Librdkafka.conf_set_dr_msg_cb(configPtr, DeliveryReportCallback);
            }

            // Explicitly keep references to delegates so they are not reclaimed by the GC.
            errorCallbackDelegate      = ErrorCallback;
            logCallbackDelegate        = LogCallback;
            statisticsCallbackDelegate = StatisticsCallback;

            Librdkafka.conf_set_error_cb(configPtr, errorCallbackDelegate);
            Librdkafka.conf_set_log_cb(configPtr, logCallbackDelegate);
            Librdkafka.conf_set_stats_cb(configPtr, statisticsCallbackDelegate);

            this.ownedKafkaHandle = SafeKafkaHandle.Create(RdKafkaType.Producer, configPtr, this);
            configHandle.SetHandleAsInvalid(); // config object is no longer useable.

            if (!manualPoll)
            {
                callbackCts  = new CancellationTokenSource();
                callbackTask = StartPollTask(callbackCts.Token);
            }

            InitializeSerializers(
                builder.KeySerializer, builder.ValueSerializer,
                builder.AsyncKeySerializer, builder.AsyncValueSerializer);
        }
コード例 #6
0
        internal Producer(ProducerBuilder <TKey, TValue> builder)
        {
            var baseConfig         = builder.ConstructBaseConfig(this);
            var partitioners       = baseConfig.partitioners;
            var defaultPartitioner = baseConfig.defaultPartitioner;

            // TODO: Make Tasks auto complete when EnableDeliveryReportsPropertyName is set to false.
            // TODO: Hijack the "delivery.report.only.error" configuration parameter and add functionality to enforce that Tasks
            //       that never complete are never created when this is set to true.

            this.statisticsHandler = baseConfig.statisticsHandler;
            this.logHandler        = baseConfig.logHandler;
            this.errorHandler      = baseConfig.errorHandler;
            this.oAuthBearerTokenRefreshHandler = baseConfig.oAuthBearerTokenRefreshHandler;

            var config = Confluent.Kafka.Config.ExtractCancellationDelayMaxMs(baseConfig.config, out this.cancellationDelayMaxMs);

            this.DeliveryReportCallback = DeliveryReportCallbackImpl;

            Librdkafka.Initialize(null);

            var modifiedConfig = Library.NameAndVersionConfig
                                 .Concat(config
                                         .Where(prop =>
                                                prop.Key != ConfigPropertyNames.Producer.EnableBackgroundPoll &&
                                                prop.Key != ConfigPropertyNames.Producer.EnableDeliveryReports &&
                                                prop.Key != ConfigPropertyNames.Producer.DeliveryReportFields))
                                 .ToList();

            if (modifiedConfig.Where(obj => obj.Key == "delivery.report.only.error").Count() > 0)
            {
                // A managed object is kept alive over the duration of the produce request. If there is no
                // delivery report generated, there will be a memory leak. We could possibly support this
                // property by keeping track of delivery reports in managed code, but this seems like
                // more trouble than it's worth.
                throw new ArgumentException("The 'delivery.report.only.error' property is not supported by this client");
            }

            var enableBackgroundPollObj = config.FirstOrDefault(prop => prop.Key == ConfigPropertyNames.Producer.EnableBackgroundPoll).Value;

            if (enableBackgroundPollObj != null)
            {
                this.manualPoll = !bool.Parse(enableBackgroundPollObj);
            }

            var enableDeliveryReportsObj = config.FirstOrDefault(prop => prop.Key == ConfigPropertyNames.Producer.EnableDeliveryReports).Value;

            if (enableDeliveryReportsObj != null)
            {
                this.enableDeliveryReports = bool.Parse(enableDeliveryReportsObj);
            }

            var deliveryReportEnabledFieldsObj = config.FirstOrDefault(prop => prop.Key == ConfigPropertyNames.Producer.DeliveryReportFields).Value;

            if (deliveryReportEnabledFieldsObj != null)
            {
                var fields = deliveryReportEnabledFieldsObj.Replace(" ", "");
                if (fields != "all")
                {
                    this.enableDeliveryReportKey             = false;
                    this.enableDeliveryReportValue           = false;
                    this.enableDeliveryReportHeaders         = false;
                    this.enableDeliveryReportTimestamp       = false;
                    this.enableDeliveryReportPersistedStatus = false;
                    if (fields != "none")
                    {
                        var parts = fields.Split(',');
                        foreach (var part in parts)
                        {
                            switch (part)
                            {
                            case "key": this.enableDeliveryReportKey = true; break;

                            case "value": this.enableDeliveryReportValue = true; break;

                            case "timestamp": this.enableDeliveryReportTimestamp = true; break;

                            case "headers": this.enableDeliveryReportHeaders = true; break;

                            case "status": this.enableDeliveryReportPersistedStatus = true; break;

                            default: throw new ArgumentException(
                                          $"Unknown delivery report field name '{part}' in config value '{ConfigPropertyNames.Producer.DeliveryReportFields}'.");
                            }
                        }
                    }
                }
            }

            var    configHandle = SafeConfigHandle.Create();
            IntPtr configPtr    = configHandle.DangerousGetHandle();

            modifiedConfig.ForEach((kvp) =>
            {
                if (kvp.Value == null)
                {
                    throw new ArgumentNullException($"'{kvp.Key}' configuration parameter must not be null.");
                }
                configHandle.Set(kvp.Key, kvp.Value);
            });

            if (enableDeliveryReports)
            {
                Librdkafka.conf_set_dr_msg_cb(configPtr, DeliveryReportCallback);
            }

            // Explicitly keep references to delegates so they are not reclaimed by the GC.
            errorCallbackDelegate      = ErrorCallback;
            logCallbackDelegate        = LogCallback;
            statisticsCallbackDelegate = StatisticsCallback;
            oAuthBearerTokenRefreshCallbackDelegate = OAuthBearerTokenRefreshCallback;

            if (errorHandler != null)
            {
                Librdkafka.conf_set_error_cb(configPtr, errorCallbackDelegate);
            }
            if (logHandler != null)
            {
                Librdkafka.conf_set_log_cb(configPtr, logCallbackDelegate);
            }
            if (statisticsHandler != null)
            {
                Librdkafka.conf_set_stats_cb(configPtr, statisticsCallbackDelegate);
            }
            if (oAuthBearerTokenRefreshHandler != null)
            {
                Librdkafka.conf_set_oauthbearer_token_refresh_cb(configPtr, oAuthBearerTokenRefreshCallbackDelegate);
            }

            Action <SafeTopicConfigHandle, PartitionerDelegate> addPartitionerToTopicConfig = (topicConfigHandle, partitioner) =>
            {
                Librdkafka.PartitionerDelegate librdkafkaPartitioner = (IntPtr rkt, IntPtr keydata, UIntPtr keylen, int partition_cnt, IntPtr rkt_opaque, IntPtr msg_opaque) =>
                {
                    unsafe
                    {
                        var topicNamePtr = Librdkafka.topic_name(rkt);
                        var topic        = Util.Marshal.PtrToStringUTF8(topicNamePtr);
                        var keyIsNull    = keydata == IntPtr.Zero;
                        var keyBytes     = keyIsNull
                                ? ReadOnlySpan <byte> .Empty
                                : new ReadOnlySpan <byte>(keydata.ToPointer(), (int)keylen);
                        return(partitioner(topic, partition_cnt, keyBytes, keyIsNull));
                    }
                };
                this.partitionerHandles.Add(GCHandle.Alloc(librdkafkaPartitioner));
                Librdkafka.topic_conf_set_partitioner_cb(topicConfigHandle.DangerousGetHandle(), librdkafkaPartitioner);
            };

            // Configure the default custom partitioner.
            if (defaultPartitioner != null)
            {
                // The default topic config may have been modified by topic-level
                // configuraton parameters passed down from the top level config.
                // If that's the case, duplicate the default topic config to avoid
                // colobbering any already configured values.
                var defaultTopicConfigHandle            = configHandle.GetDefaultTopicConfig();
                SafeTopicConfigHandle topicConfigHandle =
                    defaultTopicConfigHandle.DangerousGetHandle() != IntPtr.Zero
                        ? defaultTopicConfigHandle.Duplicate()
                        : SafeTopicConfigHandle.Create();
                addPartitionerToTopicConfig(topicConfigHandle, defaultPartitioner);
                Librdkafka.conf_set_default_topic_conf(configPtr, topicConfigHandle.DangerousGetHandle());
            }

            this.ownedKafkaHandle = SafeKafkaHandle.Create(RdKafkaType.Producer, configPtr, this);
            configHandle.SetHandleAsInvalid();  // ownership was transferred.

            // Per-topic partitioners.
            foreach (var partitioner in partitioners)
            {
                var topicConfigHandle = this.ownedKafkaHandle.DuplicateDefaultTopicConfig();
                addPartitionerToTopicConfig(topicConfigHandle, partitioner.Value);
                this.ownedKafkaHandle.newTopic(partitioner.Key, topicConfigHandle.DangerousGetHandle());
            }

            if (!manualPoll)
            {
                callbackCts  = new CancellationTokenSource();
                callbackTask = StartPollTask(callbackCts.Token);
            }

            InitializeSerializers(
                builder.KeySerializer, builder.ValueSerializer,
                builder.AsyncKeySerializer, builder.AsyncValueSerializer);
        }
コード例 #7
0
        internal Consumer(ConsumerBuilder <TKey, TValue> builder)
        {
            var baseConfig = builder.ConstructBaseConfig(this);

            this.statisticsHandler              = baseConfig.statisticsHandler;
            this.logHandler                     = baseConfig.logHandler;
            this.errorHandler                   = baseConfig.errorHandler;
            this.partitionsAssignedHandler      = baseConfig.partitionsAssignedHandler;
            this.partitionsRevokedHandler       = baseConfig.partitionsRevokedHandler;
            this.partitionsLostHandler          = baseConfig.partitionsLostHandler;
            this.offsetsCommittedHandler        = baseConfig.offsetsCommittedHandler;
            this.oAuthBearerTokenRefreshHandler = baseConfig.oAuthBearerTokenRefreshHandler;
            this.revokedOrLostHandlerIsFunc     = baseConfig.revokedOrLostHandlerIsFunc;
            Librdkafka.Initialize(null);

            var config = Confluent.Kafka.Config.ExtractCancellationDelayMaxMs(baseConfig.config, out this.cancellationDelayMaxMs);

            if (config.FirstOrDefault(prop => string.Equals(prop.Key, "group.id", StringComparison.Ordinal)).Value == null)
            {
                throw new ArgumentException("'group.id' configuration parameter is required and was not specified.");
            }

            var modifiedConfig = Library.NameAndVersionConfig
                                 .Concat(config.Where(prop => prop.Key != ConfigPropertyNames.Consumer.ConsumeResultFields))
                                 .ToList();

            var enabledFieldsObj = config.FirstOrDefault(prop => prop.Key == ConfigPropertyNames.Consumer.ConsumeResultFields).Value;

            if (enabledFieldsObj != null)
            {
                var fields = enabledFieldsObj.Replace(" ", "");
                if (fields != "all")
                {
                    this.enableHeaderMarshaling    = false;
                    this.enableTimestampMarshaling = false;
                    this.enableTopicNameMarshaling = false;
                    if (fields != "none")
                    {
                        var parts = fields.Split(',');
                        foreach (var part in parts)
                        {
                            switch (part)
                            {
                            case "headers": this.enableHeaderMarshaling = true; break;

                            case "timestamp": this.enableTimestampMarshaling = true; break;

                            case "topic": this.enableTopicNameMarshaling = true; break;

                            default: throw new ArgumentException(
                                          $"Unexpected consume result field name '{part}' in config value '{ConfigPropertyNames.Consumer.ConsumeResultFields}'.");
                            }
                        }
                    }
                }
            }

            var configHandle = SafeConfigHandle.Create();

            modifiedConfig.ForEach((kvp) =>
            {
                if (kvp.Value == null)
                {
                    throw new ArgumentNullException($"'{kvp.Key}' configuration parameter must not be null.");
                }
                configHandle.Set(kvp.Key, kvp.Value);
            });

            // Explicitly keep references to delegates so they are not reclaimed by the GC.
            rebalanceDelegate          = RebalanceCallback;
            commitDelegate             = CommitCallback;
            errorCallbackDelegate      = ErrorCallback;
            logCallbackDelegate        = LogCallback;
            statisticsCallbackDelegate = StatisticsCallback;
            oAuthBearerTokenRefreshCallbackDelegate = OAuthBearerTokenRefreshCallback;

            IntPtr configPtr = configHandle.DangerousGetHandle();

            if (partitionsAssignedHandler != null || partitionsRevokedHandler != null || partitionsLostHandler != null)
            {
                Librdkafka.conf_set_rebalance_cb(configPtr, rebalanceDelegate);
            }
            if (offsetsCommittedHandler != null)
            {
                Librdkafka.conf_set_offset_commit_cb(configPtr, commitDelegate);
            }

            if (errorHandler != null)
            {
                Librdkafka.conf_set_error_cb(configPtr, errorCallbackDelegate);
            }
            if (logHandler != null)
            {
                Librdkafka.conf_set_log_cb(configPtr, logCallbackDelegate);
            }
            if (statisticsHandler != null)
            {
                Librdkafka.conf_set_stats_cb(configPtr, statisticsCallbackDelegate);
            }
            if (oAuthBearerTokenRefreshHandler != null)
            {
                Librdkafka.conf_set_oauthbearer_token_refresh_cb(configPtr, oAuthBearerTokenRefreshCallbackDelegate);
            }

            this.kafkaHandle = SafeKafkaHandle.Create(RdKafkaType.Consumer, configPtr, this);
            configHandle.SetHandleAsInvalid(); // config object is no longer useable.

            var pollSetConsumerError = kafkaHandle.PollSetConsumer();

            if (pollSetConsumerError != ErrorCode.NoError)
            {
                throw new KafkaException(new Error(pollSetConsumerError,
                                                   $"Failed to redirect the poll queue to consumer_poll queue: {ErrorCodeExtensions.GetReason(pollSetConsumerError)}"));
            }

            // setup key deserializer.
            if (builder.KeyDeserializer == null)
            {
                if (!defaultDeserializers.TryGetValue(typeof(TKey), out object deserializer))
                {
                    throw new InvalidOperationException(
                              $"Key deserializer was not specified and there is no default deserializer defined for type {typeof(TKey).Name}.");
                }
                this.keyDeserializer = (IDeserializer <TKey>)deserializer;
            }
            else
            {
                this.keyDeserializer = builder.KeyDeserializer;
            }

            // setup value deserializer.
            if (builder.ValueDeserializer == null)
            {
                if (!defaultDeserializers.TryGetValue(typeof(TValue), out object deserializer))
                {
                    throw new InvalidOperationException(
                              $"Value deserializer was not specified and there is no default deserializer defined for type {typeof(TValue).Name}.");
                }
                this.valueDeserializer = (IDeserializer <TValue>)deserializer;
            }
            else
            {
                this.valueDeserializer = builder.ValueDeserializer;
            }
        }
コード例 #8
0
        /// <summary>
        ///     Creates a new <see cref="Confluent.Kafka.ConsumerBase" /> instance.
        /// </summary>
        /// <param name="config">
        ///     A collection of librdkafka configuration parameters
        ///     (refer to https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md)
        ///     and parameters specific to this client (refer to:
        ///     <see cref="Confluent.Kafka.ConfigPropertyNames" />).
        ///     At a minimum, 'bootstrap.servers' and 'group.id' must be
        ///     specified.
        /// </param>
        public ConsumerBase(IEnumerable <KeyValuePair <string, string> > config)
        {
            Librdkafka.Initialize(null);

            config = Config.GetCancellationDelayMaxMs(config, out this.cancellationDelayMaxMs);

            if (config.FirstOrDefault(prop => string.Equals(prop.Key, "group.id", StringComparison.Ordinal)).Value == null)
            {
                throw new ArgumentException("'group.id' configuration parameter is required and was not specified.");
            }

            var modifiedConfig = config
                                 .Where(prop => prop.Key != ConfigPropertyNames.Consumer.ConsumeResultFields);

            var enabledFieldsObj = config.FirstOrDefault(prop => prop.Key == ConfigPropertyNames.Consumer.ConsumeResultFields).Value;

            if (enabledFieldsObj != null)
            {
                var fields = enabledFieldsObj.ToString().Replace(" ", "");
                if (fields != "all")
                {
                    this.enableHeaderMarshaling    = false;
                    this.enableTimestampMarshaling = false;
                    this.enableTopicNameMarshaling = false;
                    if (fields != "none")
                    {
                        var parts = fields.Split(',');
                        foreach (var part in parts)
                        {
                            switch (part)
                            {
                            case "headers": this.enableHeaderMarshaling = true; break;

                            case "timestamp": this.enableTimestampMarshaling = true; break;

                            case "topic": this.enableTopicNameMarshaling = true; break;

                            default: throw new ArgumentException(
                                          $"Unexpected consume result field name '{part}' in config value '{ConfigPropertyNames.Consumer.ConsumeResultFields}'.");
                            }
                        }
                    }
                }
            }

            var configHandle = SafeConfigHandle.Create();

            modifiedConfig
            .ToList()
            .ForEach((kvp) => {
                if (kvp.Value == null)
                {
                    throw new ArgumentException($"'{kvp.Key}' configuration parameter must not be null.");
                }
                configHandle.Set(kvp.Key, kvp.Value.ToString());
            });

            // Explicitly keep references to delegates so they are not reclaimed by the GC.
            rebalanceDelegate     = RebalanceCallback;
            commitDelegate        = CommitCallback;
            errorCallbackDelegate = ErrorCallback;
            logCallbackDelegate   = LogCallback;
            statsCallbackDelegate = StatsCallback;

            IntPtr configPtr = configHandle.DangerousGetHandle();

            Librdkafka.conf_set_rebalance_cb(configPtr, rebalanceDelegate);
            Librdkafka.conf_set_offset_commit_cb(configPtr, commitDelegate);

            Librdkafka.conf_set_error_cb(configPtr, errorCallbackDelegate);
            Librdkafka.conf_set_log_cb(configPtr, logCallbackDelegate);
            Librdkafka.conf_set_stats_cb(configPtr, statsCallbackDelegate);

            this.kafkaHandle = SafeKafkaHandle.Create(RdKafkaType.Consumer, configPtr, this);
            configHandle.SetHandleAsInvalid(); // config object is no longer useable.

            var pollSetConsumerError = kafkaHandle.PollSetConsumer();

            if (pollSetConsumerError != ErrorCode.NoError)
            {
                throw new KafkaException(new Error(pollSetConsumerError,
                                                   $"Failed to redirect the poll queue to consumer_poll queue: {ErrorCodeExtensions.GetReason(pollSetConsumerError)}"));
            }
        }
コード例 #9
0
ファイル: Config.cs プロジェクト: zhijia1122/rdkafka-dotnet
 public Config()
 {
     handle = SafeConfigHandle.Create();
 }
コード例 #10
0
        internal Producer(ProducerBuilder <TKey, TValue> builder)
        {
            var baseConfig = builder.ConstructBaseConfig(this);

            // TODO: Make Tasks auto complete when EnableDeliveryReportsPropertyName is set to false.
            // TODO: Hijack the "delivery.report.only.error" configuration parameter and add functionality to enforce that Tasks
            //       that never complete are never created when this is set to true.

            this.statisticsHandler = baseConfig.statisticsHandler;
            this.logHandler        = baseConfig.logHandler;
            this.errorHandler      = baseConfig.errorHandler;
            this.oAuthBearerTokenRefreshHandler = baseConfig.oAuthBearerTokenRefreshHandler;
            this.partitioners = baseConfig.partitioners;

            var config = Confluent.Kafka.Config.ExtractCancellationDelayMaxMs(baseConfig.config, out this.cancellationDelayMaxMs);

            this.DeliveryReportCallback = DeliveryReportCallbackImpl;

            Librdkafka.Initialize(null);

            var modifiedConfig = Library.NameAndVersionConfig
                                 .Concat(config
                                         .Where(prop =>
                                                prop.Key != ConfigPropertyNames.Producer.EnableBackgroundPoll &&
                                                prop.Key != ConfigPropertyNames.Producer.EnableDeliveryReports &&
                                                prop.Key != ConfigPropertyNames.Producer.DeliveryReportFields))
                                 .ToList();

            if (modifiedConfig.Where(obj => obj.Key == "delivery.report.only.error").Count() > 0)
            {
                // A managed object is kept alive over the duration of the produce request. If there is no
                // delivery report generated, there will be a memory leak. We could possibly support this
                // property by keeping track of delivery reports in managed code, but this seems like
                // more trouble than it's worth.
                throw new ArgumentException("The 'delivery.report.only.error' property is not supported by this client");
            }

            var enableBackgroundPollObj = config.FirstOrDefault(prop => prop.Key == ConfigPropertyNames.Producer.EnableBackgroundPoll).Value;

            if (enableBackgroundPollObj != null)
            {
                this.manualPoll = !bool.Parse(enableBackgroundPollObj);
            }

            var enableDeliveryReportsObj = config.FirstOrDefault(prop => prop.Key == ConfigPropertyNames.Producer.EnableDeliveryReports).Value;

            if (enableDeliveryReportsObj != null)
            {
                this.enableDeliveryReports = bool.Parse(enableDeliveryReportsObj);
            }

            var deliveryReportEnabledFieldsObj = config.FirstOrDefault(prop => prop.Key == ConfigPropertyNames.Producer.DeliveryReportFields).Value;

            if (deliveryReportEnabledFieldsObj != null)
            {
                var fields = deliveryReportEnabledFieldsObj.Replace(" ", "");
                if (fields != "all")
                {
                    this.enableDeliveryReportKey             = false;
                    this.enableDeliveryReportValue           = false;
                    this.enableDeliveryReportHeaders         = false;
                    this.enableDeliveryReportTimestamp       = false;
                    this.enableDeliveryReportPersistedStatus = false;
                    if (fields != "none")
                    {
                        var parts = fields.Split(',');
                        foreach (var part in parts)
                        {
                            switch (part)
                            {
                            case "key": this.enableDeliveryReportKey = true; break;

                            case "value": this.enableDeliveryReportValue = true; break;

                            case "timestamp": this.enableDeliveryReportTimestamp = true; break;

                            case "headers": this.enableDeliveryReportHeaders = true; break;

                            case "status": this.enableDeliveryReportPersistedStatus = true; break;

                            default: throw new ArgumentException(
                                          $"Unknown delivery report field name '{part}' in config value '{ConfigPropertyNames.Producer.DeliveryReportFields}'.");
                            }
                        }
                    }
                }
            }

            var configHandle = SafeConfigHandle.Create();

            modifiedConfig.ForEach((kvp) =>
            {
                if (kvp.Value == null)
                {
                    throw new ArgumentNullException($"'{kvp.Key}' configuration parameter must not be null.");
                }
                configHandle.Set(kvp.Key, kvp.Value);
            });


            IntPtr configPtr = configHandle.DangerousGetHandle();

            if (enableDeliveryReports)
            {
                Librdkafka.conf_set_dr_msg_cb(configPtr, DeliveryReportCallback);
            }

            // Explicitly keep references to delegates so they are not reclaimed by the GC.
            errorCallbackDelegate      = ErrorCallback;
            logCallbackDelegate        = LogCallback;
            statisticsCallbackDelegate = StatisticsCallback;
            oAuthBearerTokenRefreshCallbackDelegate = OAuthBearerTokenRefreshCallback;

            if (errorHandler != null)
            {
                Librdkafka.conf_set_error_cb(configPtr, errorCallbackDelegate);
            }
            if (logHandler != null)
            {
                Librdkafka.conf_set_log_cb(configPtr, logCallbackDelegate);
            }
            if (statisticsHandler != null)
            {
                Librdkafka.conf_set_stats_cb(configPtr, statisticsCallbackDelegate);
            }
            if (oAuthBearerTokenRefreshHandler != null)
            {
                Librdkafka.conf_set_oauthbearer_token_refresh_cb(configPtr, oAuthBearerTokenRefreshCallbackDelegate);
            }

            this.ownedKafkaHandle = SafeKafkaHandle.Create(RdKafkaType.Producer, configPtr, this);

            if (this.partitioners?.Any() ?? false)
            {
                foreach (var partitioner in this.partitioners)
                {
                    var    topicConfigHandle = SafeTopicConfigHandle.Create();
                    IntPtr topicConfigPtr    = topicConfigHandle.DangerousGetHandle();

                    Librdkafka.PartitionerDelegate partitionerDelegate =
                        (IntPtr rkt, IntPtr keydata, UIntPtr keylen, int partition_cnt,
                         IntPtr rkt_opaque, IntPtr msg_opaque) =>
                    {
                        if (this.ownedKafkaHandle.IsClosed)
                        {
                            return(Partition.Any);
                        }

                        var topic = partitioner.Key;
                        var providedPartitioner = partitioner.Value;

                        return(CallCustomPartitioner(topic, providedPartitioner, keydata, keylen, partition_cnt, rkt_opaque, msg_opaque));
                    };

                    this.partitionerCallbacks.Add(partitioner.Key, partitionerDelegate);

                    // Set partitioner on the topic_conf...
                    Librdkafka.topic_conf_set_partitioner_cb(topicConfigPtr, partitionerDelegate);

                    // Associate topic_conf with topic
                    // this also caches the topic handle (and topic_conf)
                    this.ownedKafkaHandle.getKafkaTopicHandle(partitioner.Key, topicConfigPtr);

                    // topic_conf ownership was transferred
                    topicConfigHandle.SetHandleAsInvalid();
                }
            }

            configHandle.SetHandleAsInvalid(); // config object is no longer usable.

            if (!manualPoll)
            {
                callbackCts  = new CancellationTokenSource();
                callbackTask = StartPollTask(callbackCts.Token);
            }

            InitializeSerializers(
                builder.KeySerializer, builder.ValueSerializer,
                builder.AsyncKeySerializer, builder.AsyncValueSerializer);
        }