Ejemplo n.º 1
0
        private static void UseKafka(IConfiguration configuration)
        {
            lifeTime.ApplicationStarted.Register(() =>
            {
                try
                {
                    var conf = new Confluent.Kafka.ConsumerConfig
                    {
                        GroupId          = configuration["TomatoLog:Flow:Kafka:Group"],
                        BootstrapServers = configuration["TomatoLog:Flow:Kafka:BootstrapServers"],
                        AutoOffsetReset  = Confluent.Kafka.AutoOffsetReset.Earliest
                    };

                    using (var consumer = new Confluent.Kafka.ConsumerBuilder <Confluent.Kafka.Ignore, string>(conf).Build())
                    {
                        var topic = configuration["TomatoLog:Flow:Kafka:Topic"];
                        consumer.Subscribe(topic);

                        logger.LogInformation($"Kafka Consume started.");

                        var cancellationToken = cts_kafka.Token;
                        while (!cancellationToken.IsCancellationRequested)
                        {
                            Confluent.Kafka.ConsumeResult <Confluent.Kafka.Ignore, string> result = null;
                            try
                            {
                                result         = consumer.Consume(cancellationToken);
                                LogMessage log = JsonConvert.DeserializeObject <LogMessage>(result.Value);
                                logWriter.Write(log);
                                filterService.Filter(log);
                            }
                            catch (Exception e)
                            {
                                logger.LogError($"Error occured: {e.Message}{e.StackTrace}");
                            }
                            finally
                            {
                                consumer.Commit(result);
                            }
                        }
                    }
                }
                catch (OperationCanceledException ex)
                {
                    logger.LogError("{0}/{1}", ex.Message, ex.StackTrace);
                }
            });

            lifeTime.ApplicationStopping.Register(() =>
            {
                cts_kafka.Cancel();
                logger.LogInformation($"Kafka Consume stoped.");
            });
        }
Ejemplo n.º 2
0
        public void Compare_SameInstance_ReturnsTrue()
        {
            var config1 = new Confluent.Kafka.ConsumerConfig
            {
                BootstrapServers            = "myserver",
                PartitionAssignmentStrategy = Confluent.Kafka.PartitionAssignmentStrategy.Range,
                EnableAutoCommit            = false
            };
            var config2 = config1;

            _dictionary.TryAdd(config1, null);

            _dictionary.Should().ContainKey(config2);
        }
Ejemplo n.º 3
0
        private Confluent.Kafka.IConsumer <byte[], byte[]> BuildConfluentConsumer(Confluent.Kafka.ConsumerConfig config)
        {
            return(new Confluent.Kafka.ConsumerBuilder <byte[], byte[]>(config)
                   .SetPartitionsAssignedHandler((_, partitions) =>
            {
                partitions.ForEach(partition =>
                                   _logger.LogTrace("Assigned topic {topic} partition {partition}, member id: {memberId}",
                                                    partition.Topic, partition.Partition, _innerConsumer.MemberId));
            })
                   .SetPartitionsRevokedHandler((_, partitions) =>
            {
                partitions.ForEach(partition =>
                                   _logger.LogTrace("Revoked topic {topic} partition {partition}, member id: {memberId}",
                                                    partition.Topic, partition.Partition, _innerConsumer.MemberId));
            })
                   .SetOffsetsCommittedHandler((_, offsets) =>
            {
                foreach (var offset in offsets.Offsets)
                {
                    if (offset.Offset == Confluent.Kafka.Offset.Unset)
                    {
                        continue;
                    }

                    if (offset.Error != null && offset.Error.Code != Confluent.Kafka.ErrorCode.NoError)
                    {
                        _logger.LogError(
                            "Error occurred committing the offset {topic} {partition} @{offset}: {errorCode} - {errorReason} ",
                            offset.Topic, offset.Partition, offset.Offset, offset.Error.Code, offset.Error.Reason);
                    }
                    else
                    {
                        _logger.LogTrace("Successfully committed offset {topic} {partition} @{offset}.",
                                         offset.Topic, offset.Partition, offset.Offset);
                    }
                }
            })
                   .SetErrorHandler((_, e) =>
            {
                _logger.Log(e.IsFatal ? LogLevel.Critical : LogLevel.Warning,
                            "Error in Kafka consumer: {reason}.", e.Reason);
            })
                   .SetStatisticsHandler((_, json) =>
            {
                _logger.LogInformation($"Statistics: {json}");
            })
                   .Build());
        }
Ejemplo n.º 4
0
        public void Compare_DifferentParameters_ReturnsFalse()
        {
            var config1 = new Confluent.Kafka.ConsumerConfig
            {
                BootstrapServers            = "myserver",
                PartitionAssignmentStrategy = Confluent.Kafka.PartitionAssignmentStrategy.Range,
                EnableAutoCommit            = false
            };
            var config2 = new Confluent.Kafka.ConsumerConfig
            {
                PartitionAssignmentStrategy = Confluent.Kafka.PartitionAssignmentStrategy.Range,
                BootstrapServers            = "myserver"
            };

            _dictionary.TryAdd(config1, null);

            _dictionary.Should().NotContainKey(config2);
        }
 public void WithReceiverConfig(Confluent.Kafka.ConsumerConfig config)
 {
     ConsumerConfig = config;
 }
Ejemplo n.º 6
0
 public InnerConsumerWrapper(Confluent.Kafka.ConsumerConfig config, CancellationToken cancellationToken, ILogger logger)
 {
     _innerConsumer     = BuildConfluentConsumer(config);
     _cancellationToken = cancellationToken;
     _logger            = logger;
 }
Ejemplo n.º 7
0
 public InnerConsumerWrapper(Confluent.Kafka.ConsumerConfig config, bool enableAutoRecovery, ILogger logger)
 {
     _config             = config;
     _enableAutoRecovery = enableAutoRecovery;
     _logger             = logger;
 }