Esempio n. 1
0
        /// <inheritdoc cref="IBroker.ConnectAsync" />
        public async Task ConnectAsync()
        {
            if (IsConnected)
            {
                return;
            }

            if (_consumers == null)
            {
                throw new ObjectDisposedException(GetType().FullName);
            }

            _endpointsConfiguratorsInvoker.Invoke();

            _logger.LogDebug(
                IntegrationEventIds.BrokerConnecting,
                "Connecting to message broker ({broker})...",
                GetType().Name);

            await ConnectAsync(_consumers).ConfigureAwait(false);

            IsConnected = true;

            _logger.LogInformation(
                IntegrationEventIds.BrokerConnected,
                "Connected to message broker ({broker})!",
                GetType().Name);
        }
Esempio n. 2
0
        private static void OnStatistics(string statistics, KafkaConsumer consumer, ISilverbackIntegrationLogger logger)
        {
            logger.LogDebug(KafkaEventIds.ConsumerStatisticsReceived, $"Statistics: {statistics}");

            consumer.Endpoint.Events.StatisticsHandler?.Invoke(
                KafkaStatisticsDeserializer.TryDeserialize(statistics, logger),
                statistics,
                consumer);
        }
Esempio n. 3
0
        /// <inheritdoc cref="IConsumer.ConnectAsync" />
        public async Task ConnectAsync()
        {
            if (IsConnected)
            {
                return;
            }

            await ConnectCoreAsync().ConfigureAwait(false);

            IsConnected = true;
            _statusInfo.SetConnected();
            _logger.LogDebug(
                IntegrationEventIds.ConsumerConnected,
                "Connected consumer to endpoint {endpoint}.",
                Endpoint.Name);

            Start();
        }
Esempio n. 4
0
        private async Task TryHandleMessageAsync(object sender, BasicDeliverEventArgs deliverEventArgs)
        {
            try
            {
                var offset = new RabbitOffset(deliverEventArgs.ConsumerTag, deliverEventArgs.DeliveryTag);

                Dictionary <string, string> logData = new Dictionary <string, string>
                {
                    ["deliveryTag"] = $"{offset.DeliveryTag.ToString(CultureInfo.InvariantCulture)}",
                    ["routingKey"]  = deliverEventArgs.RoutingKey
                };

                _logger.LogDebug(
                    RabbitEventIds.ConsumingMessage,
                    "Consuming message {offset} from endpoint {endpointName}.",
                    offset.Value,
                    Endpoint.Name);

                // TODO: Test this!
                if (_disconnecting)
                {
                    return;
                }

                await HandleMessageAsync(
                    deliverEventArgs.Body.ToArray(),
                    deliverEventArgs.BasicProperties.Headers.ToSilverbackHeaders(),
                    Endpoint.Name,
                    offset,
                    logData)
                .ConfigureAwait(false);
            }
            catch (Exception ex)
            {
                // TODO: Prevent duplicate log (FatalExceptionLoggerConsumerBehavior)
                _logger.LogCritical(
                    IntegrationEventIds.ConsumerFatalError,
                    ex,
                    "Fatal error occurred processing the consumed message. The consumer will be stopped.");

                await DisconnectAsync().ConfigureAwait(false);
            }
        }
Esempio n. 5
0
        // There's unfortunately no async version of Confluent.Kafka.IConsumer.Consume() so we need to run
        // synchronously to stay within a single long-running thread with the Consume loop.
        public void Write(ConsumeResult <byte[]?, byte[]?> consumeResult, CancellationToken cancellationToken)
        {
            int channelIndex = GetChannelIndex(consumeResult.TopicPartition);

            _logger.LogDebug(
                KafkaEventIds.ConsumingMessage,
                "Writing message ({topic} {partition} @{offset}) to channel {channelIndex}.",
                consumeResult.Topic,
                consumeResult.Partition,
                consumeResult.Offset,
                channelIndex);

            AsyncHelper.RunSynchronously(
                () => _channels[channelIndex].Writer.WriteAsync(consumeResult, cancellationToken));
        }
Esempio n. 6
0
        private static IEnumerable <TopicPartitionOffset> OnPartitionsAssigned(
            List <TopicPartition> partitions,
            KafkaConsumer consumer,
            ISilverbackIntegrationLogger logger)
        {
            partitions.ForEach(
                partition =>
            {
                logger.LogInformation(
                    KafkaEventIds.PartitionsAssigned,
                    "Assigned partition {topic}[{partition}], member id: {memberId}",
                    partition.Topic,
                    partition.Partition.Value,
                    consumer.MemberId);
            });

            var topicPartitionOffsets =
                consumer.Endpoint.Events.PartitionsAssignedHandler?.Invoke(partitions, consumer).ToList() ??
                partitions.Select(partition => new TopicPartitionOffset(partition, Offset.Unset)).ToList();

            foreach (var topicPartitionOffset in topicPartitionOffsets)
            {
                if (topicPartitionOffset.Offset != Offset.Unset)
                {
                    logger.LogDebug(
                        KafkaEventIds.PartitionOffsetReset,
                        "{topic}[{partition}] offset will be reset to {offset}.",
                        topicPartitionOffset.Topic,
                        topicPartitionOffset.Partition.Value,
                        topicPartitionOffset.Offset);
                }
            }

            consumer.OnPartitionsAssigned(
                topicPartitionOffsets.Select(
                    topicPartitionOffset =>
                    topicPartitionOffset.TopicPartition).ToList());

            return(topicPartitionOffsets);
        }
Esempio n. 7
0
        private static void OnOffsetsCommitted(
            CommittedOffsets offsets,
            KafkaConsumer consumer,
            ISilverbackIntegrationLogger logger)
        {
            foreach (var offset in offsets.Offsets)
            {
                if (offset.Offset == Offset.Unset)
                {
                    continue;
                }

                if (offset.Error != null && offset.Error.Code != ErrorCode.NoError)
                {
                    logger.LogError(
                        KafkaEventIds.KafkaEventsHandlerErrorWhileCommittingOffset,
                        "Error occurred committing the offset {topic}[{partition}] @{offset}: {errorCode} - {errorReason}",
                        offset.Topic,
                        offset.Partition.Value,
                        offset.Offset,
                        offset.Error.Code,
                        offset.Error.Reason);
                }
                else
                {
                    logger.LogDebug(
                        KafkaEventIds.OffsetCommitted,
                        "Successfully committed offset {topic}[{partition}] @{offset}",
                        offset.Topic,
                        offset.Partition.Value,
                        offset.Offset);
                }
            }

            consumer.Endpoint.Events.OffsetsCommittedHandler?.Invoke(offsets, consumer);
        }
Esempio n. 8
0
 private void OnProducerStatistics(IProducer <byte[]?, byte[]?> producer, string statistics)
 {
     _logger.LogDebug(KafkaEventIds.ProducerStatisticsReceived, $"Statistics: {statistics}");
     CreateScopeAndPublishEvent(new KafkaStatisticsEvent(statistics));
 }
Esempio n. 9
0
        private bool ConsumeOnce(CancellationToken cancellationToken)
        {
            try
            {
                var consumeResult = _confluenceConsumer.Consume(cancellationToken);

                if (consumeResult == null)
                {
                    return(true);
                }

                _logger.LogDebug(
                    KafkaEventIds.ConsumingMessage,
                    "Consuming message: {topic} {partition} @{offset}.",
                    consumeResult.Topic,
                    consumeResult.Partition,
                    consumeResult.Offset);

                if (_channelsManager == null)
                {
                    _logger.LogDebug(
                        KafkaEventIds.ConsumingMessage,
                        "Waiting for channels manager to be initialized...");

                    // Wait until the ChannelsManager is set (after the partitions have been assigned)
                    while (_channelsManager == null)
                    {
                        Task.Delay(50, cancellationToken).Wait(cancellationToken);

                        cancellationToken.ThrowIfCancellationRequested();
                    }
                }

                _channelsManager.Write(consumeResult, cancellationToken);
            }
            catch (OperationCanceledException)
            {
                if (cancellationToken.IsCancellationRequested)
                {
                    _logger.LogTrace(KafkaEventIds.ConsumingCanceled, "Consuming canceled.");
                }
            }
            catch (KafkaException ex)
            {
                if (!_consumer.AutoRecoveryIfEnabled(ex, cancellationToken))
                {
                    return(false);
                }
            }
            catch (Exception ex)
            {
                _logger.LogCritical(
                    IntegrationEventIds.ConsumerFatalError,
                    ex,
                    "Fatal error occurred while consuming. The consumer will be stopped.");

                return(false);
            }

            return(true);
        }