internal Dictionary <string, string> Dump() { UIntPtr cntp = (UIntPtr)0; IntPtr data = Librdkafka.conf_dump(handle, out cntp); if (data == IntPtr.Zero) { throw new Exception("Zero data"); } try { if (((int)cntp & 1) != 0) { // Expect Key -> Value, so even number of strings throw new Exception("Invalid number of config entries"); } var dict = new Dictionary <string, string>(); for (int i = 0; i < (int)cntp / 2; i++) { dict.Add(Util.Marshal.PtrToStringUTF8(Marshal.ReadIntPtr(data, 2 * i * Util.Marshal.SizeOf <IntPtr>())), Util.Marshal.PtrToStringUTF8(Marshal.ReadIntPtr(data, (2 * i + 1) * Util.Marshal.SizeOf <IntPtr>()))); } // Filter out callback pointers return(dict.Where(kv => !kv.Key.EndsWith("_cb")).ToDictionary(kv => kv.Key, kv => kv.Value)); } finally { Librdkafka.conf_dump_free(data, cntp); } }
private ConfigEntryResult extractConfigEntry(IntPtr configEntryPtr) { var synonyms = new List <ConfigSynonym>(); var synonymsPtr = Librdkafka.ConfigEntry_synonyms(configEntryPtr, out UIntPtr synonymsCount); if (synonymsPtr != IntPtr.Zero) { IntPtr[] synonymsPtrArr = new IntPtr[(int)synonymsCount]; Marshal.Copy(synonymsPtr, synonymsPtrArr, 0, (int)synonymsCount); synonyms = synonymsPtrArr .Select(synonymPtr => extractConfigEntry(synonymPtr)) .Select(e => new ConfigSynonym { Name = e.Name, Value = e.Value, Source = e.Source }) .ToList(); } return(new ConfigEntryResult { Name = PtrToStringUTF8(Librdkafka.ConfigEntry_name(configEntryPtr)), Value = PtrToStringUTF8(Librdkafka.ConfigEntry_value(configEntryPtr)), IsDefault = (int)Librdkafka.ConfigEntry_is_default(configEntryPtr) == 1, IsSensitive = (int)Librdkafka.ConfigEntry_is_sensitive(configEntryPtr) == 1, IsReadOnly = (int)Librdkafka.ConfigEntry_is_read_only(configEntryPtr) == 1, Source = Librdkafka.ConfigEntry_source(configEntryPtr), Synonyms = synonyms }); }
private List <AclBinding> extractAclBindings(IntPtr aclBindingsPtr, int aclBindingsCnt) { if (aclBindingsCnt == 0) { return(new List <AclBinding> { }); } IntPtr[] aclBindingsPtrArr = new IntPtr[aclBindingsCnt]; Marshal.Copy(aclBindingsPtr, aclBindingsPtrArr, 0, aclBindingsCnt); return(aclBindingsPtrArr.Select(aclBindingPtr => new AclBinding() { Pattern = new ResourcePattern { Type = Librdkafka.AclBinding_restype(aclBindingPtr), Name = PtrToStringUTF8(Librdkafka.AclBinding_name(aclBindingPtr)), ResourcePatternType = Librdkafka.AclBinding_resource_pattern_type(aclBindingPtr) }, Entry = new AccessControlEntry { Principal = PtrToStringUTF8(Librdkafka.AclBinding_principal(aclBindingPtr)), Host = PtrToStringUTF8(Librdkafka.AclBinding_host(aclBindingPtr)), Operation = Librdkafka.AclBinding_operation(aclBindingPtr), PermissionType = Librdkafka.AclBinding_permission_type(aclBindingPtr) } } ).ToList()); }
private List <DescribeConfigsExceptionResult> extractResultConfigs(IntPtr configResourcesPtr, int configResourceCount) { var result = new List <DescribeConfigsExceptionResult>(); IntPtr[] configResourcesPtrArr = new IntPtr[configResourceCount]; Marshal.Copy(configResourcesPtr, configResourcesPtrArr, 0, configResourceCount); foreach (var configResourcePtr in configResourcesPtrArr) { var resourceName = PtrToStringUTF8(Librdkafka.ConfigResource_name(configResourcePtr)); var errorCode = Librdkafka.ConfigResource_error(configResourcePtr); var errorReason = PtrToStringUTF8(Librdkafka.ConfigResource_error_string(configResourcePtr)); var resourceConfigType = Librdkafka.ConfigResource_type(configResourcePtr); var configEntriesPtr = Librdkafka.ConfigResource_configs(configResourcePtr, out UIntPtr configEntryCount); IntPtr[] configEntriesPtrArr = new IntPtr[(int)configEntryCount]; if ((int)configEntryCount > 0) { Marshal.Copy(configEntriesPtr, configEntriesPtrArr, 0, (int)configEntryCount); } var configEntries = configEntriesPtrArr .Select(configEntryPtr => extractConfigEntry(configEntryPtr)) .ToDictionary(e => e.Name); result.Add(new DescribeConfigsExceptionResult { ConfigResource = new ConfigResource { Name = resourceName, Type = resourceConfigType }, Entries = configEntries, Error = new Error(errorCode, errorReason) }); } return(result); }
protected override bool ReleaseHandle() { Librdkafka.topic_destroy(handle); // This corresponds to the DangerousAddRef call when // the TopicHandle was created. kafkaHandle.DangerousRelease(); return(true); }
private void LogCallback(IntPtr rk, SyslogLevel level, string fac, string buf) { // Ensure registered handlers are never called as a side-effect of Dispose/Finalize (prevents deadlocks in common scenarios). // Note: kafkaHandle can be null if the callback is during construction (in that case, we want the delegate to run). if (ownedKafkaHandle != null && ownedKafkaHandle.IsClosed) { return; } logHandler?.Invoke(new LogMessage(Util.Marshal.PtrToStringUTF8(Librdkafka.name(rk)), level, fac, buf)); }
internal static SafeConfigHandle Create() { var ch = Librdkafka.conf_new(); if (ch.IsInvalid) { throw new Exception("Failed to create config"); } return(ch); }
private List <CreateAclReport> extractCreateAclReports(IntPtr aclResultsPtr, int aclResultsCount) { IntPtr[] aclsResultsPtrArr = new IntPtr[aclResultsCount]; Marshal.Copy(aclResultsPtr, aclsResultsPtrArr, 0, aclResultsCount); return(aclsResultsPtrArr.Select(aclResultPtr => new CreateAclReport { Error = new Error(Librdkafka.acl_result_error(aclResultPtr), false) } ).ToList()); }
private List <CreateTopicExceptionResult> extractTopicResults(IntPtr topicResultsPtr, int topicResultsCount) { IntPtr[] topicResultsPtrArr = new IntPtr[topicResultsCount]; Marshal.Copy(topicResultsPtr, topicResultsPtrArr, 0, topicResultsCount); return(topicResultsPtrArr.Select(topicResultPtr => new CreateTopicExceptionResult { Topic = PtrToStringUTF8(Librdkafka.topic_result_name(topicResultPtr)), Error = new Error( Librdkafka.topic_result_error(topicResultPtr), PtrToStringUTF8(Librdkafka.topic_result_error_string(topicResultPtr))) }).ToList()); }
private DescribeAclsReport extractDescribeAclsReport(IntPtr resultPtr) { var errCode = Librdkafka.event_error(resultPtr); var errString = Librdkafka.event_error_string(resultPtr); var resultAcls = Librdkafka.DescribeAcls_result_acls(resultPtr, out UIntPtr resultAclCntPtr); return(new DescribeAclsReport { Error = new Error(errCode, errString, false), AclBindings = extractAclBindings(resultAcls, (int)resultAclCntPtr) }); }
private static List <DeleteGroupReport> extractDeleteGroupsReport(IntPtr eventPtr) { IntPtr groupsResultPtr = Librdkafka.DeleteGroups_result_groups(eventPtr, out UIntPtr resultCountPtr); int groupsResultCount = (int)resultCountPtr; IntPtr[] groupsResultPtrArr = new IntPtr[groupsResultCount]; Marshal.Copy(groupsResultPtr, groupsResultPtrArr, 0, groupsResultCount); return(groupsResultPtrArr.Select(groupResultPtr => new DeleteGroupReport { Group = PtrToStringUTF8(Librdkafka.group_result_name(groupResultPtr)), Error = new Error(Librdkafka.group_result_error(groupResultPtr), false) }).ToList()); }
private List <DeleteAclsReport> extractDeleteAclsReports(IntPtr resultPtr) { var resultResponsesPtr = Librdkafka.DeleteAcls_result_responses(resultPtr, out UIntPtr resultResponsesCntPtr); IntPtr[] resultResponsesPtrArr = new IntPtr[(int)resultResponsesCntPtr]; Marshal.Copy(resultResponsesPtr, resultResponsesPtrArr, 0, (int)resultResponsesCntPtr); return(resultResponsesPtrArr.Select(resultResponsePtr => { var matchingAcls = Librdkafka.DeleteAcls_result_response_matching_acls( resultResponsePtr, out UIntPtr resultResponseAclCntPtr); return new DeleteAclsReport { Error = new Error(Librdkafka.DeleteAcls_result_response_error(resultResponsePtr), false), AclBindings = extractAclBindings(matchingAcls, (int)resultResponseAclCntPtr) }; }).ToList()); }
private void LogCallback(IntPtr rk, SyslogLevel level, string fac, string buf) { if (kafkaHandle != null && kafkaHandle.IsClosed) { return; } try { // Ensure registered handlers are never called as a side-effect of Dispose/Finalize (prevents deadlocks in common scenarios). // Note: kafkaHandle can be null if the callback is during construction (in that case the delegate should be called). logHandler?.Invoke(new LogMessage(Util.Marshal.PtrToStringUTF8(Librdkafka.name(rk)), level, fac, buf)); } catch (Exception) { // Eat any exception thrown by user log handler code. } }
/// <summary> /// Initialize a new Error instance from a native pointer to /// a rd_kafka_error_t object, then destroy the native object. /// </summary> internal Error(IntPtr error) { if (error == IntPtr.Zero) { Code = ErrorCode.NoError; reason = null; IsFatal = false; IsRetriable = false; TxnRequiresAbort = false; return; } Code = Librdkafka.error_code(error); IsFatal = Librdkafka.error_is_fatal(error); TxnRequiresAbort = Librdkafka.error_txn_requires_abort(error); IsRetriable = Librdkafka.error_is_retriable(error); reason = Librdkafka.error_string(error); Librdkafka.error_destroy(error); }
internal string Get(string name) { UIntPtr destSize = (UIntPtr)0; StringBuilder sb = null; ConfRes res = Librdkafka.conf_get(handle, name, null, ref destSize); if (res == ConfRes.Ok) { sb = new StringBuilder((int)destSize); res = Librdkafka.conf_get(handle, name, sb, ref destSize); } if (res != ConfRes.Ok) { if (res == ConfRes.Unknown) { throw new InvalidOperationException($"No such configuration property: {name}"); } throw new Exception("Unknown error while getting configuration property"); } return(sb?.ToString()); }
internal void Set(string name, string value) { var errorStringBuilder = new StringBuilder(Librdkafka.MaxErrorStringLength); ConfRes res = Librdkafka.conf_set(handle, name, value, errorStringBuilder, (UIntPtr)errorStringBuilder.Capacity); if (res == ConfRes.Ok) { return; } else if (res == ConfRes.Invalid) { throw new ArgumentException(errorStringBuilder.ToString()); } else if (res == ConfRes.Unknown) { throw new InvalidOperationException(errorStringBuilder.ToString()); } else { throw new Exception("Unknown error while setting configuration property"); } }
internal Producer(ProducerBuilder <TKey, TValue> builder) { var baseConfig = builder.ConstructBaseConfig(this); // TODO: Make Tasks auto complete when EnableDeliveryReportsPropertyName is set to false. // TODO: Hijack the "delivery.report.only.error" configuration parameter and add functionality to enforce that Tasks // that never complete are never created when this is set to true. this.statisticsHandler = baseConfig.statisticsHandler; this.logHandler = baseConfig.logHandler; this.errorHandler = baseConfig.errorHandler; this.oAuthBearerTokenRefreshHandler = baseConfig.oAuthBearerTokenRefreshHandler; this.partitioners = baseConfig.partitioners; var config = Confluent.Kafka.Config.ExtractCancellationDelayMaxMs(baseConfig.config, out this.cancellationDelayMaxMs); this.DeliveryReportCallback = DeliveryReportCallbackImpl; Librdkafka.Initialize(null); var modifiedConfig = Library.NameAndVersionConfig .Concat(config .Where(prop => prop.Key != ConfigPropertyNames.Producer.EnableBackgroundPoll && prop.Key != ConfigPropertyNames.Producer.EnableDeliveryReports && prop.Key != ConfigPropertyNames.Producer.DeliveryReportFields)) .ToList(); if (modifiedConfig.Where(obj => obj.Key == "delivery.report.only.error").Count() > 0) { // A managed object is kept alive over the duration of the produce request. If there is no // delivery report generated, there will be a memory leak. We could possibly support this // property by keeping track of delivery reports in managed code, but this seems like // more trouble than it's worth. throw new ArgumentException("The 'delivery.report.only.error' property is not supported by this client"); } var enableBackgroundPollObj = config.FirstOrDefault(prop => prop.Key == ConfigPropertyNames.Producer.EnableBackgroundPoll).Value; if (enableBackgroundPollObj != null) { this.manualPoll = !bool.Parse(enableBackgroundPollObj); } var enableDeliveryReportsObj = config.FirstOrDefault(prop => prop.Key == ConfigPropertyNames.Producer.EnableDeliveryReports).Value; if (enableDeliveryReportsObj != null) { this.enableDeliveryReports = bool.Parse(enableDeliveryReportsObj); } var deliveryReportEnabledFieldsObj = config.FirstOrDefault(prop => prop.Key == ConfigPropertyNames.Producer.DeliveryReportFields).Value; if (deliveryReportEnabledFieldsObj != null) { var fields = deliveryReportEnabledFieldsObj.Replace(" ", ""); if (fields != "all") { this.enableDeliveryReportKey = false; this.enableDeliveryReportValue = false; this.enableDeliveryReportHeaders = false; this.enableDeliveryReportTimestamp = false; this.enableDeliveryReportPersistedStatus = false; if (fields != "none") { var parts = fields.Split(','); foreach (var part in parts) { switch (part) { case "key": this.enableDeliveryReportKey = true; break; case "value": this.enableDeliveryReportValue = true; break; case "timestamp": this.enableDeliveryReportTimestamp = true; break; case "headers": this.enableDeliveryReportHeaders = true; break; case "status": this.enableDeliveryReportPersistedStatus = true; break; default: throw new ArgumentException( $"Unknown delivery report field name '{part}' in config value '{ConfigPropertyNames.Producer.DeliveryReportFields}'."); } } } } } var configHandle = SafeConfigHandle.Create(); modifiedConfig.ForEach((kvp) => { if (kvp.Value == null) { throw new ArgumentNullException($"'{kvp.Key}' configuration parameter must not be null."); } configHandle.Set(kvp.Key, kvp.Value); }); IntPtr configPtr = configHandle.DangerousGetHandle(); if (enableDeliveryReports) { Librdkafka.conf_set_dr_msg_cb(configPtr, DeliveryReportCallback); } // Explicitly keep references to delegates so they are not reclaimed by the GC. errorCallbackDelegate = ErrorCallback; logCallbackDelegate = LogCallback; statisticsCallbackDelegate = StatisticsCallback; oAuthBearerTokenRefreshCallbackDelegate = OAuthBearerTokenRefreshCallback; if (errorHandler != null) { Librdkafka.conf_set_error_cb(configPtr, errorCallbackDelegate); } if (logHandler != null) { Librdkafka.conf_set_log_cb(configPtr, logCallbackDelegate); } if (statisticsHandler != null) { Librdkafka.conf_set_stats_cb(configPtr, statisticsCallbackDelegate); } if (oAuthBearerTokenRefreshHandler != null) { Librdkafka.conf_set_oauthbearer_token_refresh_cb(configPtr, oAuthBearerTokenRefreshCallbackDelegate); } this.ownedKafkaHandle = SafeKafkaHandle.Create(RdKafkaType.Producer, configPtr, this); if (this.partitioners?.Any() ?? false) { foreach (var partitioner in this.partitioners) { var topicConfigHandle = SafeTopicConfigHandle.Create(); IntPtr topicConfigPtr = topicConfigHandle.DangerousGetHandle(); Librdkafka.PartitionerDelegate partitionerDelegate = (IntPtr rkt, IntPtr keydata, UIntPtr keylen, int partition_cnt, IntPtr rkt_opaque, IntPtr msg_opaque) => { if (this.ownedKafkaHandle.IsClosed) { return(Partition.Any); } var topic = partitioner.Key; var providedPartitioner = partitioner.Value; return(CallCustomPartitioner(topic, providedPartitioner, keydata, keylen, partition_cnt, rkt_opaque, msg_opaque)); }; this.partitionerCallbacks.Add(partitioner.Key, partitionerDelegate); // Set partitioner on the topic_conf... Librdkafka.topic_conf_set_partitioner_cb(topicConfigPtr, partitionerDelegate); // Associate topic_conf with topic // this also caches the topic handle (and topic_conf) this.ownedKafkaHandle.getKafkaTopicHandle(partitioner.Key, topicConfigPtr); // topic_conf ownership was transferred topicConfigHandle.SetHandleAsInvalid(); } } configHandle.SetHandleAsInvalid(); // config object is no longer usable. if (!manualPoll) { callbackCts = new CancellationTokenSource(); callbackTask = StartPollTask(callbackCts.Token); } InitializeSerializers( builder.KeySerializer, builder.ValueSerializer, builder.AsyncKeySerializer, builder.AsyncValueSerializer); }
internal string GetName() => Util.Marshal.PtrToStringUTF8(Librdkafka.topic_name(handle));
internal bool PartitionAvailable(int partition) { return(Librdkafka.topic_partition_available(handle, partition)); }
internal IntPtr Dup() { return(Librdkafka.conf_dup(handle)); }
internal SafeTopicConfigHandle Duplicate() => Librdkafka.topic_conf_dup(this);
internal Producer(ProducerBuilder <TKey, TValue> builder) { var baseConfig = builder.ConstructBaseConfig(this); // TODO: Make Tasks auto complete when EnableDeliveryReportsPropertyName is set to false. // TODO: Hijack the "delivery.report.only.error" configuration parameter and add functionality to enforce that Tasks // that never complete are never created when this is set to true. this.statisticsHandler = baseConfig.statisticsHandler; this.logHandler = baseConfig.logHandler; this.errorHandler = baseConfig.errorHandler; var config = Confluent.Kafka.Config.ExtractCancellationDelayMaxMs(baseConfig.config, out this.cancellationDelayMaxMs); this.DeliveryReportCallback = DeliveryReportCallbackImpl; Librdkafka.Initialize(null); var modifiedConfig = config .Where(prop => prop.Key != ConfigPropertyNames.Producer.EnableBackgroundPoll && prop.Key != ConfigPropertyNames.Producer.EnableDeliveryReports && prop.Key != ConfigPropertyNames.Producer.DeliveryReportFields); if (modifiedConfig.Where(obj => obj.Key == "delivery.report.only.error").Count() > 0) { // A managed object is kept alive over the duration of the produce request. If there is no // delivery report generated, there will be a memory leak. We could possibly support this // property by keeping track of delivery reports in managed code, but this seems like // more trouble than it's worth. throw new ArgumentException("The 'delivery.report.only.error' property is not supported by this client"); } var enableBackgroundPollObj = config.FirstOrDefault(prop => prop.Key == ConfigPropertyNames.Producer.EnableBackgroundPoll).Value; if (enableBackgroundPollObj != null) { this.manualPoll = !bool.Parse(enableBackgroundPollObj); } var enableDeliveryReportsObj = config.FirstOrDefault(prop => prop.Key == ConfigPropertyNames.Producer.EnableDeliveryReports).Value; if (enableDeliveryReportsObj != null) { this.enableDeliveryReports = bool.Parse(enableDeliveryReportsObj); } var deliveryReportEnabledFieldsObj = config.FirstOrDefault(prop => prop.Key == ConfigPropertyNames.Producer.DeliveryReportFields).Value; if (deliveryReportEnabledFieldsObj != null) { var fields = deliveryReportEnabledFieldsObj.Replace(" ", ""); if (fields != "all") { this.enableDeliveryReportKey = false; this.enableDeliveryReportValue = false; this.enableDeliveryReportHeaders = false; this.enableDeliveryReportTimestamp = false; this.enableDeliveryReportPersistedStatus = false; if (fields != "none") { var parts = fields.Split(','); foreach (var part in parts) { switch (part) { case "key": this.enableDeliveryReportKey = true; break; case "value": this.enableDeliveryReportValue = true; break; case "timestamp": this.enableDeliveryReportTimestamp = true; break; case "headers": this.enableDeliveryReportHeaders = true; break; case "status": this.enableDeliveryReportPersistedStatus = true; break; default: throw new ArgumentException( $"Unknown delivery report field name '{part}' in config value '{ConfigPropertyNames.Producer.DeliveryReportFields}'."); } } } } } var configHandle = SafeConfigHandle.Create(); modifiedConfig.ToList().ForEach((kvp) => { if (kvp.Value == null) { throw new ArgumentNullException($"'{kvp.Key}' configuration parameter must not be null."); } configHandle.Set(kvp.Key, kvp.Value); }); IntPtr configPtr = configHandle.DangerousGetHandle(); if (enableDeliveryReports) { Librdkafka.conf_set_dr_msg_cb(configPtr, DeliveryReportCallback); } // Explicitly keep references to delegates so they are not reclaimed by the GC. errorCallbackDelegate = ErrorCallback; logCallbackDelegate = LogCallback; statisticsCallbackDelegate = StatisticsCallback; Librdkafka.conf_set_error_cb(configPtr, errorCallbackDelegate); Librdkafka.conf_set_log_cb(configPtr, logCallbackDelegate); Librdkafka.conf_set_stats_cb(configPtr, statisticsCallbackDelegate); this.ownedKafkaHandle = SafeKafkaHandle.Create(RdKafkaType.Producer, configPtr, this); configHandle.SetHandleAsInvalid(); // config object is no longer useable. if (!manualPoll) { callbackCts = new CancellationTokenSource(); callbackTask = StartPollTask(callbackCts.Token); } InitializeSerializers( builder.KeySerializer, builder.ValueSerializer, builder.AsyncKeySerializer, builder.AsyncValueSerializer); }
/// <summary> /// Creates a new <see cref="Confluent.Kafka.ConsumerBase" /> instance. /// </summary> /// <param name="config"> /// A collection of librdkafka configuration parameters /// (refer to https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md) /// and parameters specific to this client (refer to: /// <see cref="Confluent.Kafka.ConfigPropertyNames" />). /// At a minimum, 'bootstrap.servers' and 'group.id' must be /// specified. /// </param> public ConsumerBase(IEnumerable <KeyValuePair <string, string> > config) { Librdkafka.Initialize(null); config = Config.GetCancellationDelayMaxMs(config, out this.cancellationDelayMaxMs); if (config.FirstOrDefault(prop => string.Equals(prop.Key, "group.id", StringComparison.Ordinal)).Value == null) { throw new ArgumentException("'group.id' configuration parameter is required and was not specified."); } var modifiedConfig = config .Where(prop => prop.Key != ConfigPropertyNames.Consumer.ConsumeResultFields); var enabledFieldsObj = config.FirstOrDefault(prop => prop.Key == ConfigPropertyNames.Consumer.ConsumeResultFields).Value; if (enabledFieldsObj != null) { var fields = enabledFieldsObj.ToString().Replace(" ", ""); if (fields != "all") { this.enableHeaderMarshaling = false; this.enableTimestampMarshaling = false; this.enableTopicNameMarshaling = false; if (fields != "none") { var parts = fields.Split(','); foreach (var part in parts) { switch (part) { case "headers": this.enableHeaderMarshaling = true; break; case "timestamp": this.enableTimestampMarshaling = true; break; case "topic": this.enableTopicNameMarshaling = true; break; default: throw new ArgumentException( $"Unexpected consume result field name '{part}' in config value '{ConfigPropertyNames.Consumer.ConsumeResultFields}'."); } } } } } var configHandle = SafeConfigHandle.Create(); modifiedConfig .ToList() .ForEach((kvp) => { if (kvp.Value == null) { throw new ArgumentException($"'{kvp.Key}' configuration parameter must not be null."); } configHandle.Set(kvp.Key, kvp.Value.ToString()); }); // Explicitly keep references to delegates so they are not reclaimed by the GC. rebalanceDelegate = RebalanceCallback; commitDelegate = CommitCallback; errorCallbackDelegate = ErrorCallback; logCallbackDelegate = LogCallback; statsCallbackDelegate = StatsCallback; IntPtr configPtr = configHandle.DangerousGetHandle(); Librdkafka.conf_set_rebalance_cb(configPtr, rebalanceDelegate); Librdkafka.conf_set_offset_commit_cb(configPtr, commitDelegate); Librdkafka.conf_set_error_cb(configPtr, errorCallbackDelegate); Librdkafka.conf_set_log_cb(configPtr, logCallbackDelegate); Librdkafka.conf_set_stats_cb(configPtr, statsCallbackDelegate); this.kafkaHandle = SafeKafkaHandle.Create(RdKafkaType.Consumer, configPtr, this); configHandle.SetHandleAsInvalid(); // config object is no longer useable. var pollSetConsumerError = kafkaHandle.PollSetConsumer(); if (pollSetConsumerError != ErrorCode.NoError) { throw new KafkaException(new Error(pollSetConsumerError, $"Failed to redirect the poll queue to consumer_poll queue: {ErrorCodeExtensions.GetReason(pollSetConsumerError)}")); } }
private void DeliveryReportCallbackImpl(IntPtr rk, IntPtr rkmessage, IntPtr opaque) { // Ensure registered handlers are never called as a side-effect of Dispose/Finalize (prevents deadlocks in common scenarios). if (ownedKafkaHandle.IsClosed) { return; } var msg = Util.Marshal.PtrToStructure <rd_kafka_message>(rkmessage); // the msg._private property has dual purpose. Here, it is an opaque pointer set // by Topic.Produce to be an IDeliveryHandler. When Consuming, it's for internal // use (hence the name). if (msg._private == IntPtr.Zero) { // Note: this can occur if the ProduceAsync overload that accepts a DeliveryHandler // was used and the delivery handler was set to null. return; } var gch = GCHandle.FromIntPtr(msg._private); var deliveryHandler = (IDeliveryHandler)gch.Target; gch.Free(); Headers headers = null; if (this.enableDeliveryReportHeaders) { headers = new Headers(); Librdkafka.message_headers(rkmessage, out IntPtr hdrsPtr); if (hdrsPtr != IntPtr.Zero) { for (var i = 0; ; ++i) { var err = Librdkafka.header_get_all(hdrsPtr, (IntPtr)i, out IntPtr namep, out IntPtr valuep, out IntPtr sizep); if (err != ErrorCode.NoError) { break; } var headerName = Util.Marshal.PtrToStringUTF8(namep); byte[] headerValue = null; if (valuep != IntPtr.Zero) { headerValue = new byte[(int)sizep]; Marshal.Copy(valuep, headerValue, 0, (int)sizep); } headers.Add(headerName, headerValue); } } } IntPtr timestampType = (IntPtr)TimestampType.NotAvailable; long timestamp = 0; if (enableDeliveryReportTimestamp) { timestamp = Librdkafka.message_timestamp(rkmessage, out timestampType); } PersistenceStatus messageStatus = PersistenceStatus.PossiblyPersisted; if (enableDeliveryReportPersistedStatus) { messageStatus = Librdkafka.message_status(rkmessage); } deliveryHandler.HandleDeliveryReport( new DeliveryReport <Null, Null> { // Topic is not set here in order to avoid the marshalling cost. // Instead, the delivery handler is expected to cache the topic string. Partition = msg.partition, Offset = msg.offset, Error = KafkaHandle.CreatePossiblyFatalError(msg.err, null), Status = messageStatus, Message = new Message <Null, Null> { Timestamp = new Timestamp(timestamp, (TimestampType)timestampType), Headers = headers } } ); }
internal Producer(ProducerBuilder <TKey, TValue> builder) { var baseConfig = builder.ConstructBaseConfig(this); var partitioners = baseConfig.partitioners; var defaultPartitioner = baseConfig.defaultPartitioner; // TODO: Make Tasks auto complete when EnableDeliveryReportsPropertyName is set to false. // TODO: Hijack the "delivery.report.only.error" configuration parameter and add functionality to enforce that Tasks // that never complete are never created when this is set to true. this.statisticsHandler = baseConfig.statisticsHandler; this.logHandler = baseConfig.logHandler; this.errorHandler = baseConfig.errorHandler; this.oAuthBearerTokenRefreshHandler = baseConfig.oAuthBearerTokenRefreshHandler; var config = Confluent.Kafka.Config.ExtractCancellationDelayMaxMs(baseConfig.config, out this.cancellationDelayMaxMs); this.DeliveryReportCallback = DeliveryReportCallbackImpl; Librdkafka.Initialize(null); var modifiedConfig = Library.NameAndVersionConfig .Concat(config .Where(prop => prop.Key != ConfigPropertyNames.Producer.EnableBackgroundPoll && prop.Key != ConfigPropertyNames.Producer.EnableDeliveryReports && prop.Key != ConfigPropertyNames.Producer.DeliveryReportFields)) .ToList(); if (modifiedConfig.Where(obj => obj.Key == "delivery.report.only.error").Count() > 0) { // A managed object is kept alive over the duration of the produce request. If there is no // delivery report generated, there will be a memory leak. We could possibly support this // property by keeping track of delivery reports in managed code, but this seems like // more trouble than it's worth. throw new ArgumentException("The 'delivery.report.only.error' property is not supported by this client"); } var enableBackgroundPollObj = config.FirstOrDefault(prop => prop.Key == ConfigPropertyNames.Producer.EnableBackgroundPoll).Value; if (enableBackgroundPollObj != null) { this.manualPoll = !bool.Parse(enableBackgroundPollObj); } var enableDeliveryReportsObj = config.FirstOrDefault(prop => prop.Key == ConfigPropertyNames.Producer.EnableDeliveryReports).Value; if (enableDeliveryReportsObj != null) { this.enableDeliveryReports = bool.Parse(enableDeliveryReportsObj); } var deliveryReportEnabledFieldsObj = config.FirstOrDefault(prop => prop.Key == ConfigPropertyNames.Producer.DeliveryReportFields).Value; if (deliveryReportEnabledFieldsObj != null) { var fields = deliveryReportEnabledFieldsObj.Replace(" ", ""); if (fields != "all") { this.enableDeliveryReportKey = false; this.enableDeliveryReportValue = false; this.enableDeliveryReportHeaders = false; this.enableDeliveryReportTimestamp = false; this.enableDeliveryReportPersistedStatus = false; if (fields != "none") { var parts = fields.Split(','); foreach (var part in parts) { switch (part) { case "key": this.enableDeliveryReportKey = true; break; case "value": this.enableDeliveryReportValue = true; break; case "timestamp": this.enableDeliveryReportTimestamp = true; break; case "headers": this.enableDeliveryReportHeaders = true; break; case "status": this.enableDeliveryReportPersistedStatus = true; break; default: throw new ArgumentException( $"Unknown delivery report field name '{part}' in config value '{ConfigPropertyNames.Producer.DeliveryReportFields}'."); } } } } } var configHandle = SafeConfigHandle.Create(); IntPtr configPtr = configHandle.DangerousGetHandle(); modifiedConfig.ForEach((kvp) => { if (kvp.Value == null) { throw new ArgumentNullException($"'{kvp.Key}' configuration parameter must not be null."); } configHandle.Set(kvp.Key, kvp.Value); }); if (enableDeliveryReports) { Librdkafka.conf_set_dr_msg_cb(configPtr, DeliveryReportCallback); } // Explicitly keep references to delegates so they are not reclaimed by the GC. errorCallbackDelegate = ErrorCallback; logCallbackDelegate = LogCallback; statisticsCallbackDelegate = StatisticsCallback; oAuthBearerTokenRefreshCallbackDelegate = OAuthBearerTokenRefreshCallback; if (errorHandler != null) { Librdkafka.conf_set_error_cb(configPtr, errorCallbackDelegate); } if (logHandler != null) { Librdkafka.conf_set_log_cb(configPtr, logCallbackDelegate); } if (statisticsHandler != null) { Librdkafka.conf_set_stats_cb(configPtr, statisticsCallbackDelegate); } if (oAuthBearerTokenRefreshHandler != null) { Librdkafka.conf_set_oauthbearer_token_refresh_cb(configPtr, oAuthBearerTokenRefreshCallbackDelegate); } Action <SafeTopicConfigHandle, PartitionerDelegate> addPartitionerToTopicConfig = (topicConfigHandle, partitioner) => { Librdkafka.PartitionerDelegate librdkafkaPartitioner = (IntPtr rkt, IntPtr keydata, UIntPtr keylen, int partition_cnt, IntPtr rkt_opaque, IntPtr msg_opaque) => { unsafe { var topicNamePtr = Librdkafka.topic_name(rkt); var topic = Util.Marshal.PtrToStringUTF8(topicNamePtr); var keyIsNull = keydata == IntPtr.Zero; var keyBytes = keyIsNull ? ReadOnlySpan <byte> .Empty : new ReadOnlySpan <byte>(keydata.ToPointer(), (int)keylen); return(partitioner(topic, partition_cnt, keyBytes, keyIsNull)); } }; this.partitionerHandles.Add(GCHandle.Alloc(librdkafkaPartitioner)); Librdkafka.topic_conf_set_partitioner_cb(topicConfigHandle.DangerousGetHandle(), librdkafkaPartitioner); }; // Configure the default custom partitioner. if (defaultPartitioner != null) { // The default topic config may have been modified by topic-level // configuraton parameters passed down from the top level config. // If that's the case, duplicate the default topic config to avoid // colobbering any already configured values. var defaultTopicConfigHandle = configHandle.GetDefaultTopicConfig(); SafeTopicConfigHandle topicConfigHandle = defaultTopicConfigHandle.DangerousGetHandle() != IntPtr.Zero ? defaultTopicConfigHandle.Duplicate() : SafeTopicConfigHandle.Create(); addPartitionerToTopicConfig(topicConfigHandle, defaultPartitioner); Librdkafka.conf_set_default_topic_conf(configPtr, topicConfigHandle.DangerousGetHandle()); } this.ownedKafkaHandle = SafeKafkaHandle.Create(RdKafkaType.Producer, configPtr, this); configHandle.SetHandleAsInvalid(); // ownership was transferred. // Per-topic partitioners. foreach (var partitioner in partitioners) { var topicConfigHandle = this.ownedKafkaHandle.DuplicateDefaultTopicConfig(); addPartitionerToTopicConfig(topicConfigHandle, partitioner.Value); this.ownedKafkaHandle.newTopic(partitioner.Key, topicConfigHandle.DangerousGetHandle()); } if (!manualPoll) { callbackCts = new CancellationTokenSource(); callbackTask = StartPollTask(callbackCts.Token); } InitializeSerializers( builder.KeySerializer, builder.ValueSerializer, builder.AsyncKeySerializer, builder.AsyncValueSerializer); }
/// <summary> /// Refer to <see cref="Confluent.Kafka.IConsumer{TKey, TValue}.Consume(int)" /> /// </summary> public ConsumeResult <TKey, TValue> Consume(int millisecondsTimeout) { var msgPtr = kafkaHandle.ConsumerPoll((IntPtr)millisecondsTimeout); if (this.handlerException != null) { var ex = this.handlerException; this.handlerException = null; if (msgPtr != IntPtr.Zero) { Librdkafka.message_destroy(msgPtr); } throw ex; } if (msgPtr == IntPtr.Zero) { return(null); } try { var msg = Util.Marshal.PtrToStructure <rd_kafka_message>(msgPtr); string topic = null; if (this.enableTopicNameMarshaling) { if (msg.rkt != IntPtr.Zero) { topic = Util.Marshal.PtrToStringUTF8(Librdkafka.topic_name(msg.rkt)); } } if (msg.err == ErrorCode.Local_PartitionEOF) { return(new ConsumeResult <TKey, TValue> { TopicPartitionOffset = new TopicPartitionOffset(topic, msg.partition, msg.offset), Message = null, IsPartitionEOF = true }); } long timestampUnix = 0; IntPtr timestampType = (IntPtr)TimestampType.NotAvailable; if (enableTimestampMarshaling) { timestampUnix = Librdkafka.message_timestamp(msgPtr, out timestampType); } var timestamp = new Timestamp(timestampUnix, (TimestampType)timestampType); Headers headers = null; if (enableHeaderMarshaling) { headers = new Headers(); Librdkafka.message_headers(msgPtr, out IntPtr hdrsPtr); if (hdrsPtr != IntPtr.Zero) { for (var i = 0; ; ++i) { var err = Librdkafka.header_get_all(hdrsPtr, (IntPtr)i, out IntPtr namep, out IntPtr valuep, out IntPtr sizep); if (err != ErrorCode.NoError) { break; } var headerName = Util.Marshal.PtrToStringUTF8(namep); byte[] headerValue = null; if (valuep != IntPtr.Zero) { headerValue = new byte[(int)sizep]; Marshal.Copy(valuep, headerValue, 0, (int)sizep); } headers.Add(headerName, headerValue); } } } if (msg.err != ErrorCode.NoError) { throw new ConsumeException( new ConsumeResult <byte[], byte[]> { TopicPartitionOffset = new TopicPartitionOffset(topic, msg.partition, msg.offset), Message = new Message <byte[], byte[]> { Timestamp = timestamp, Headers = headers, Key = KeyAsByteArray(msg), Value = ValueAsByteArray(msg) }, IsPartitionEOF = false }, kafkaHandle.CreatePossiblyFatalMessageError(msgPtr)); } TKey key; try { unsafe { key = keyDeserializer.Deserialize( msg.key == IntPtr.Zero ? ReadOnlySpan <byte> .Empty : new ReadOnlySpan <byte>(msg.key.ToPointer(), (int)msg.key_len), msg.key == IntPtr.Zero, new SerializationContext(MessageComponentType.Key, topic, headers)); } } catch (Exception ex) { throw new ConsumeException( new ConsumeResult <byte[], byte[]> { TopicPartitionOffset = new TopicPartitionOffset(topic, msg.partition, msg.offset), Message = new Message <byte[], byte[]> { Timestamp = timestamp, Headers = headers, Key = KeyAsByteArray(msg), Value = ValueAsByteArray(msg) }, IsPartitionEOF = false }, new Error(ErrorCode.Local_KeyDeserialization), ex); } TValue val; try { unsafe { val = valueDeserializer.Deserialize( msg.val == IntPtr.Zero ? ReadOnlySpan <byte> .Empty : new ReadOnlySpan <byte>(msg.val.ToPointer(), (int)msg.len), msg.val == IntPtr.Zero, new SerializationContext(MessageComponentType.Value, topic, headers)); } } catch (Exception ex) { throw new ConsumeException( new ConsumeResult <byte[], byte[]> { TopicPartitionOffset = new TopicPartitionOffset(topic, msg.partition, msg.offset), Message = new Message <byte[], byte[]> { Timestamp = timestamp, Headers = headers, Key = KeyAsByteArray(msg), Value = ValueAsByteArray(msg) }, IsPartitionEOF = false }, new Error(ErrorCode.Local_ValueDeserialization), ex); } return(new ConsumeResult <TKey, TValue> { TopicPartitionOffset = new TopicPartitionOffset(topic, msg.partition, msg.offset), Message = new Message <TKey, TValue> { Timestamp = timestamp, Headers = headers, Key = key, Value = val }, IsPartitionEOF = false }); } finally { Librdkafka.message_destroy(msgPtr); } }
internal Consumer(ConsumerBuilder <TKey, TValue> builder) { var baseConfig = builder.ConstructBaseConfig(this); this.statisticsHandler = baseConfig.statisticsHandler; this.logHandler = baseConfig.logHandler; this.errorHandler = baseConfig.errorHandler; this.partitionsAssignedHandler = baseConfig.partitionsAssignedHandler; this.partitionsRevokedHandler = baseConfig.partitionsRevokedHandler; this.partitionsLostHandler = baseConfig.partitionsLostHandler; this.offsetsCommittedHandler = baseConfig.offsetsCommittedHandler; this.oAuthBearerTokenRefreshHandler = baseConfig.oAuthBearerTokenRefreshHandler; this.revokedOrLostHandlerIsFunc = baseConfig.revokedOrLostHandlerIsFunc; Librdkafka.Initialize(null); var config = Confluent.Kafka.Config.ExtractCancellationDelayMaxMs(baseConfig.config, out this.cancellationDelayMaxMs); if (config.FirstOrDefault(prop => string.Equals(prop.Key, "group.id", StringComparison.Ordinal)).Value == null) { throw new ArgumentException("'group.id' configuration parameter is required and was not specified."); } var modifiedConfig = Library.NameAndVersionConfig .Concat(config.Where(prop => prop.Key != ConfigPropertyNames.Consumer.ConsumeResultFields)) .ToList(); var enabledFieldsObj = config.FirstOrDefault(prop => prop.Key == ConfigPropertyNames.Consumer.ConsumeResultFields).Value; if (enabledFieldsObj != null) { var fields = enabledFieldsObj.Replace(" ", ""); if (fields != "all") { this.enableHeaderMarshaling = false; this.enableTimestampMarshaling = false; this.enableTopicNameMarshaling = false; if (fields != "none") { var parts = fields.Split(','); foreach (var part in parts) { switch (part) { case "headers": this.enableHeaderMarshaling = true; break; case "timestamp": this.enableTimestampMarshaling = true; break; case "topic": this.enableTopicNameMarshaling = true; break; default: throw new ArgumentException( $"Unexpected consume result field name '{part}' in config value '{ConfigPropertyNames.Consumer.ConsumeResultFields}'."); } } } } } var configHandle = SafeConfigHandle.Create(); modifiedConfig.ForEach((kvp) => { if (kvp.Value == null) { throw new ArgumentNullException($"'{kvp.Key}' configuration parameter must not be null."); } configHandle.Set(kvp.Key, kvp.Value); }); // Explicitly keep references to delegates so they are not reclaimed by the GC. rebalanceDelegate = RebalanceCallback; commitDelegate = CommitCallback; errorCallbackDelegate = ErrorCallback; logCallbackDelegate = LogCallback; statisticsCallbackDelegate = StatisticsCallback; oAuthBearerTokenRefreshCallbackDelegate = OAuthBearerTokenRefreshCallback; IntPtr configPtr = configHandle.DangerousGetHandle(); if (partitionsAssignedHandler != null || partitionsRevokedHandler != null || partitionsLostHandler != null) { Librdkafka.conf_set_rebalance_cb(configPtr, rebalanceDelegate); } if (offsetsCommittedHandler != null) { Librdkafka.conf_set_offset_commit_cb(configPtr, commitDelegate); } if (errorHandler != null) { Librdkafka.conf_set_error_cb(configPtr, errorCallbackDelegate); } if (logHandler != null) { Librdkafka.conf_set_log_cb(configPtr, logCallbackDelegate); } if (statisticsHandler != null) { Librdkafka.conf_set_stats_cb(configPtr, statisticsCallbackDelegate); } if (oAuthBearerTokenRefreshHandler != null) { Librdkafka.conf_set_oauthbearer_token_refresh_cb(configPtr, oAuthBearerTokenRefreshCallbackDelegate); } this.kafkaHandle = SafeKafkaHandle.Create(RdKafkaType.Consumer, configPtr, this); configHandle.SetHandleAsInvalid(); // config object is no longer useable. var pollSetConsumerError = kafkaHandle.PollSetConsumer(); if (pollSetConsumerError != ErrorCode.NoError) { throw new KafkaException(new Error(pollSetConsumerError, $"Failed to redirect the poll queue to consumer_poll queue: {ErrorCodeExtensions.GetReason(pollSetConsumerError)}")); } // setup key deserializer. if (builder.KeyDeserializer == null) { if (!defaultDeserializers.TryGetValue(typeof(TKey), out object deserializer)) { throw new InvalidOperationException( $"Key deserializer was not specified and there is no default deserializer defined for type {typeof(TKey).Name}."); } this.keyDeserializer = (IDeserializer <TKey>)deserializer; } else { this.keyDeserializer = builder.KeyDeserializer; } // setup value deserializer. if (builder.ValueDeserializer == null) { if (!defaultDeserializers.TryGetValue(typeof(TValue), out object deserializer)) { throw new InvalidOperationException( $"Value deserializer was not specified and there is no default deserializer defined for type {typeof(TValue).Name}."); } this.valueDeserializer = (IDeserializer <TValue>)deserializer; } else { this.valueDeserializer = builder.ValueDeserializer; } }
/// <summary> /// Loads the native librdkafka library from the specified path (note: the /// specified path needs to include the filename). Does nothing if the /// library is already loaded. /// </summary> /// <returns> /// true if librdkafka was loaded as a result of this call, false if the /// library has already been loaded. /// </returns> /// <remarks> /// You will not typically need to call this method - librdkafka is loaded /// automatically on first use of a Producer or Consumer instance. /// </remarks> public static bool Load(string path) => Librdkafka.Initialize(path);
internal SafeTopicConfigHandle GetDefaultTopicConfig() => Librdkafka.conf_get_default_topic_conf(this);