public void SetConsumerEventsHandlers( KafkaConsumer ownerConsumer, IConfluentConsumerBuilder consumerBuilder) => consumerBuilder .SetStatisticsHandler(OnConsumerStatistics) .SetPartitionsAssignedHandler((consumer, list) => OnPartitionsAssigned(ownerConsumer, consumer, list)) .SetPartitionsRevokedHandler((consumer, list) => OnPartitionsRevoked(ownerConsumer, consumer, list)) .SetOffsetsCommittedHandler(OnOffsetsCommitted) .SetErrorHandler((_, error) => OnConsumerError(ownerConsumer, error));
public ConsumeLoopHandler( KafkaConsumer consumer, IConsumer <byte[]?, byte[]?> confluenceConsumer, ChannelsManager?channelsManager, ISilverbackIntegrationLogger logger) { _consumer = Check.NotNull(consumer, nameof(consumer)); _confluenceConsumer = Check.NotNull(confluenceConsumer, nameof(confluenceConsumer)); _channelsManager = channelsManager; _logger = Check.NotNull(logger, nameof(logger)); }
/// <inheritdoc cref="IKafkaConsumerLogCallback.OnConsumerLog" /> public bool OnConsumerLog(LogMessage logMessage, KafkaConsumer consumer) { if (consumer == null || logMessage == null) { return(false); } if (logMessage.Facility == "MAXPOLL") { return(consumer.OnPollTimeout(logMessage)); } return(false); }
private void OnPartitionsRevoked( KafkaConsumer ownerConsumer, IConsumer <byte[]?, byte[]?> consumer, List <TopicPartitionOffset> partitions) { ownerConsumer.OnPartitionsRevoked(); partitions.ForEach( partition => { _logger.LogInformation( KafkaEventIds.PartitionsRevoked, "Revoked partition {topic} {partition}, member id: {memberId}", partition.Topic, partition.Partition, consumer.MemberId); }); CreateScopeAndPublishEvent(new KafkaPartitionsRevokedEvent(partitions, consumer.MemberId)); }
private IEnumerable <TopicPartitionOffset> OnPartitionsAssigned( KafkaConsumer ownerConsumer, IConsumer <byte[]?, byte[]?> consumer, List <TopicPartition> partitions) { partitions.ForEach( partition => { _logger.LogInformation( KafkaEventIds.PartitionsAssigned, "Assigned partition {topic} {partition}, member id: {memberId}", partition.Topic, partition.Partition, consumer.MemberId); }); var partitionsAssignedEvent = new KafkaPartitionsAssignedEvent(partitions, consumer.MemberId); CreateScopeAndPublishEvent(partitionsAssignedEvent); foreach (var topicPartitionOffset in partitionsAssignedEvent.Partitions) { if (topicPartitionOffset.Offset != Offset.Unset) { _logger.LogDebug( KafkaEventIds.PartitionOffsetReset, "{topic} {partition} offset will be reset to {offset}.", topicPartitionOffset.Topic, topicPartitionOffset.Partition, topicPartitionOffset.Offset); } } ownerConsumer.OnPartitionsAssigned( partitionsAssignedEvent.Partitions.Select( topicPartitionOffset => topicPartitionOffset.TopicPartition).ToList()); return(partitionsAssignedEvent.Partitions); }
private void OnConsumerError(KafkaConsumer consumer, Error error) { // Ignore errors if not consuming anymore // (lidrdkafka randomly throws some "brokers are down" // while disconnecting) if (!consumer.IsConnected) { return; } var kafkaErrorEvent = new KafkaErrorEvent(error); try { CreateScopeAndPublishEvent(kafkaErrorEvent); } catch (Exception ex) { _logger.LogError( KafkaEventIds.KafkaErrorHandlerError, ex, "Error in KafkaErrorEvent subscriber."); } if (kafkaErrorEvent.Handled) { return; } _logger.Log( error.IsFatal ? LogLevel.Critical : LogLevel.Error, KafkaEventIds.ConsumerError, "Error in Kafka consumer: {error} (topic(s): {topics})", error, consumer.Endpoint.Names); }
public ChannelsManager( IList <TopicPartition> partitions, KafkaConsumer consumer, IList <ISequenceStore> sequenceStores, ISilverbackIntegrationLogger logger) { // Copy the partitions array to avoid concurrency issues if a rebalance occurs while initializing _partitions = Check.NotNull(partitions, nameof(partitions)).ToList(); _consumer = Check.NotNull(consumer, nameof(consumer)); _sequenceStores = Check.NotNull(sequenceStores, nameof(sequenceStores)); _logger = Check.NotNull(logger, nameof(logger)); _channels = consumer.Endpoint.ProcessPartitionsIndependently ? new Channel <ConsumeResult <byte[]?, byte[]?> > [partitions.Count] : new Channel <ConsumeResult <byte[]?, byte[]?> > [1]; if (consumer.Endpoint.MaxDegreeOfParallelism < _channels.Length) { _messagesLimiterSemaphoreSlim = new SemaphoreSlim( consumer.Endpoint.MaxDegreeOfParallelism, consumer.Endpoint.MaxDegreeOfParallelism); } _readCancellationTokenSource = new CancellationTokenSource[_channels.Length]; _readTaskCompletionSources = new TaskCompletionSource <bool> [_channels.Length]; IsReading = new bool[_channels.Length]; consumer.CreateSequenceStores(_channels.Length); for (int i = 0; i < _channels.Length; i++) { _channels[i] = CreateBoundedChannel(); _readCancellationTokenSource[i] = new CancellationTokenSource(); _readTaskCompletionSources[i] = new TaskCompletionSource <bool>(); } }