public KafkaMessageProducerFactory(
     KafkaMessagingGatewayConfiguration globalConfiguration
     ) : this(globalConfiguration, new KafkaPublication {
     MakeChannels = OnMissingChannel.Create
 })
 {
 }
 public KafkaMessageProducerFactory(
     KafkaMessagingGatewayConfiguration globalConfiguration,
     KafkaPublication publication)
 {
     _globalConfiguration = globalConfiguration;
     _publication         = publication;
     _configHook          = null;
 }
 /// <summary>
 /// This constructs a <see cref="KafkaProducerRegistryFactory"/> which can be used to create a <see cref="KafkaMessageProducer"/>.
 /// It takes a dependency on a <see cref="KafkaMessagingGatewayConfiguration"/> to connect to the broker, and a <see cref="KafkaPublication"/>
 /// that determines how we publish to Kafka and the parameters of any topic if required.
 /// </summary>
 /// <param name="globalConfiguration">Configures how we connect to the broker</param>
 /// <param name="publication">How do we publish, both producer parameters and topic configuration</param>
 public KafkaProducerRegistryFactory(
     KafkaMessagingGatewayConfiguration globalConfiguration,
     IEnumerable <KafkaPublication> publications)
 {
     _globalConfiguration = globalConfiguration;
     _publications        = publications;
     _configHook          = null;
 }
        public KafkaMessageProducer(KafkaMessagingGatewayConfiguration globalConfiguration,
                                    KafkaMessagingProducerConfiguration producerConfiguration)
        {
            var serialiser = new StringSerializer(Encoding.UTF8);
            var config     = globalConfiguration.ToConfig();

            config    = config.Concat(producerConfiguration.ToConfig());
            _producer = new Producer <Null, string>(config, null, serialiser);
        }
        public KafkaMessageConsumer(string groupId, string topic,
                                    KafkaMessagingGatewayConfiguration globalConfiguration,
                                    KafkaMessagingConsumerConfiguration consumerConfiguration)
        {
            _topic             = topic;
            _autoCommitEnabled = consumerConfiguration.EnableAutoCommit;
            _consumerConfig    = new ConsumerConfig()
            {
                GroupId            = groupId,
                ClientId           = globalConfiguration.Name,
                BootstrapServers   = string.Join(",", globalConfiguration.BootStrapServers),
                MaxInFlight        = globalConfiguration.MaxInFlightRequestsPerConnection,
                SessionTimeoutMs   = 6000,
                EnablePartitionEof = true,

                /*/
                 * By default, we always call acknowledge after processing a handler and commit then.
                 * This has the potential to cause a lot of traffic for the Kafka cluster as every commit is a new message on the consumer_offsets topic.
                 * To lower the load, you can enable AutoCommit and the AutoCommitIntervalMs.  The downside being that if the consumer dies, you may process a message more than once when a new consumer resumes reading a partition.
                 * /*/

                AutoCommitIntervalMs  = consumerConfiguration.AutoCommitIntervalMs,
                EnableAutoCommit      = consumerConfiguration.EnableAutoCommit,
                AllowAutoCreateTopics = true,
                AutoOffsetReset       = consumerConfiguration.OffsetDefault
            };


            _consumer = new ConsumerBuilder <Null, string>(_consumerConfig)
                        .SetPartitionsAssignedHandler((consumer, list) =>
            {
                _logger.Value.InfoFormat($"Assigned partitions: [{string.Join(", ", list)}], member id: {consumer.MemberId}");
            })
                        .SetPartitionsRevokedHandler((consumer, list) =>
            {
                _logger.Value.InfoFormat($"Revoked partitions: [{string.Join(", ", list)}], member id: {consumer.MemberId}");
            })
                        .SetErrorHandler((consumer, error) =>
            {
                if (error.IsBrokerError)
                {
                    _logger.Value.Error($"BrokerError: Member id: {consumer.MemberId}, error: {error}");
                }
                else
                {
                    _logger.Value.Error($"ConsumeError: Member Id: {consumer.MemberId}, error: {error}");
                }
            })
                        .Build();

            _logger.Value.InfoFormat($"Kakfa consumer subscribing to {_topic}");

            _consumer.Subscribe(new [] { _topic });

            _creator = new KafkaMessageCreator();
        }
Beispiel #6
0
        public KafkaMessageProducer(
            KafkaMessagingGatewayConfiguration configuration,
            KafkaPublication publication)
        {
            if (string.IsNullOrEmpty(publication.Topic))
            {
                throw new ConfigurationException("Topic is required for a publication");
            }

            _clientConfig = new ClientConfig
            {
                Acks             = (Confluent.Kafka.Acks)((int)publication.Replication),
                BootstrapServers = string.Join(",", configuration.BootStrapServers),
                ClientId         = configuration.Name,
                Debug            = configuration.Debug,
                SaslMechanism    = configuration.SaslMechanisms.HasValue ? (Confluent.Kafka.SaslMechanism?)((int)configuration.SaslMechanisms.Value) : null,
                                       SaslKerberosPrincipal = configuration.SaslKerberosPrincipal,
                                       SaslUsername          = configuration.SaslUsername,
                                       SaslPassword          = configuration.SaslPassword,
                                       SecurityProtocol      = configuration.SecurityProtocol.HasValue ? (Confluent.Kafka.SecurityProtocol?)((int)configuration.SecurityProtocol.Value) : null,
                                                                   SslCaLocation  = configuration.SslCaLocation,
                                                                   SslKeyLocation = configuration.SslKeystoreLocation,
            };

            _producerConfig = new ProducerConfig(_clientConfig)
            {
                BatchNumMessages          = publication.BatchNumberMessages,
                EnableIdempotence         = publication.EnableIdempotence,
                MaxInFlight               = publication.MaxInFlightRequestsPerConnection,
                LingerMs                  = publication.LingerMs,
                MessageTimeoutMs          = publication.MessageTimeoutMs,
                MessageSendMaxRetries     = publication.MessageSendMaxRetries,
                Partitioner               = (Confluent.Kafka.Partitioner)((int)publication.Partitioner),
                QueueBufferingMaxMessages = publication.QueueBufferingMaxMessages,
                QueueBufferingMaxKbytes   = publication.QueueBufferingMaxKbytes,
                RequestTimeoutMs          = publication.RequestTimeoutMs,
                RetryBackoffMs            = publication.RetryBackoff,
                TransactionalId           = publication.TransactionalId,
            };

            MakeChannels           = publication.MakeChannels;
            Topic                  = publication.Topic;
            NumPartitions          = publication.NumPartitions;
            ReplicationFactor      = publication.ReplicationFactor;
            TopicFindTimeoutMs     = publication.TopicFindTimeoutMs;
            MaxOutStandingMessages = publication.MaxOutStandingMessages;
            MaxOutStandingCheckIntervalMilliSeconds = publication.MaxOutStandingCheckIntervalMilliSeconds;
        }
        public KafkaMessageConsumer(string groupId, string topic,
                                    KafkaMessagingGatewayConfiguration globalConfiguration,
                                    KafkaMessagingConsumerConfiguration consumerConfiguration)
        {
            _topic          = topic;
            _consumerConfig = new ConsumerConfig()
            {
                GroupId            = groupId,
                ClientId           = globalConfiguration.Name,
                BootstrapServers   = string.Join(",", globalConfiguration.BootStrapServers),
                MaxInFlight        = globalConfiguration.MaxInFlightRequestsPerConnection,
                SessionTimeoutMs   = 6000,
                EnablePartitionEof = true,

                //We always call acknowledge after processing a handler and commit then.
                AutoCommitIntervalMs = 0,
                EnableAutoCommit     = false,

                AutoOffsetReset = consumerConfiguration.OffsetDefault
            };


            _consumer = new ConsumerBuilder <Null, string>(_consumerConfig)
                        .SetPartitionsAssignedHandler((consumer, list) =>
            {
                _logger.Value.InfoFormat($"Assigned partitions: [{string.Join(", ", list)}], member id: {consumer.MemberId}");
            })
                        .SetPartitionsRevokedHandler((consumer, list) =>
            {
                _logger.Value.InfoFormat($"Revoked partitions: [{string.Join(", ", list)}], member id: {consumer.MemberId}");
            })
                        .SetErrorHandler((consumer, error) =>
            {
                if (error.IsBrokerError)
                {
                    _logger.Value.Error($"BrokerError: Member id: {consumer.MemberId}, error: {error}");
                }
                else
                {
                    _logger.Value.Error($"ConsumeError: Member Id: {consumer.MemberId}, error: {error}");
                }
            })
                        .Build();

            _logger.Value.InfoFormat($"Kakfa consumer subscribing to {_topic}");

            _consumer.Subscribe(new [] { _topic });
        }
        public KafkaMessageConsumer(string groupId, string topic,
                                    KafkaMessagingGatewayConfiguration globalConfiguration,
                                    KafkaMessagingConsumerConfiguration consumerConfiguration)
        {
            var consumerConfig = new ConsumerConfig()
            {
                GroupId            = groupId,
                ClientId           = globalConfiguration.Name,
                BootstrapServers   = string.Join(",", globalConfiguration.BootStrapServers),
                MaxInFlight        = globalConfiguration.MaxInFlightRequestsPerConnection,
                SessionTimeoutMs   = 6000,
                EnablePartitionEof = true,

                AutoCommitIntervalMs = consumerConfiguration.AutoCommitInterval.Milliseconds,
                EnableAutoCommit     = consumerConfiguration.EnableAutoCommit,
                AutoOffsetReset      = consumerConfiguration.AutoResetOffset
            };


            _consumer = new ConsumerBuilder <Null, string>(consumerConfig)
                        .SetPartitionsAssignedHandler((consumer, list) =>
            {
                _logger.Value.InfoFormat($"Assigned partitions: [{string.Join(", ", list)}], member id: {consumer.MemberId}");
            })
                        .SetPartitionsRevokedHandler((consumer, list) =>
            {
                _logger.Value.InfoFormat($"Revoked partitions: [{string.Join(", ", list)}], member id: {consumer.MemberId}");
            })
                        .SetErrorHandler((consumer, error) =>
            {
                if (error.IsBrokerError)
                {
                    _logger.Value.Error($"BrokerError: Member id: {consumer.MemberId}, error: {error}");
                }
                else
                {
                    _logger.Value.Error($"ConsumeError: Member Id: {consumer.MemberId}, error: {error}");
                }
            })
                        .Build();

            _consumer.Subscribe(new [] { topic });
        }
Beispiel #9
0
        public KafkaMessageProducer(KafkaMessagingGatewayConfiguration globalConfiguration,
                                    KafkaMessagingProducerConfiguration producerConfiguration)
        {
            _producerConfig = new ProducerConfig
            {
                BootstrapServers          = string.Join(",", globalConfiguration.BootStrapServers),
                ClientId                  = globalConfiguration.Name,
                MaxInFlight               = globalConfiguration.MaxInFlightRequestsPerConnection,
                QueueBufferingMaxMessages = producerConfiguration.QueueBufferingMaxMessages,
                Acks = producerConfiguration.Acks,
                QueueBufferingMaxKbytes = producerConfiguration.QueueBufferingMaxKbytes,
                MessageSendMaxRetries   = producerConfiguration.MessageSendMaxRetries,
                BatchNumMessages        = producerConfiguration.BatchNumberMessages,
                LingerMs         = producerConfiguration.QueueBufferingMax,
                RequestTimeoutMs = producerConfiguration.RequestTimeout,
                MessageTimeoutMs = producerConfiguration.MessageTimeout,
                RetryBackoffMs   = producerConfiguration.RetryBackoff
            };

            _producer = new ProducerBuilder <Null, string>(_producerConfig).Build();
        }
        public KafkaMessageConsumer(string groupId, string topic,
                                    KafkaMessagingGatewayConfiguration globalConfiguration,
                                    KafkaMessagingConsumerConfiguration consumerConfiguration)
        {
            var config = globalConfiguration.ToConfig();

            config    = config.Concat(consumerConfiguration.ToConfig());
            config    = config.Concat(new[] { new KeyValuePair <string, object>("group.id", groupId) });
            _consumer = new Consumer <Null, string>(config, null, new StringDeserializer(Encoding.UTF8));

            _consumer.OnPartitionsAssigned += (_, partitions) => OnPartionsAssigned(partitions);
            _consumer.OnPartitionsRevoked  += (_, partitions) => OnPartionsRevoked(partitions);

            if (_logger.Value.IsErrorEnabled())
            {
                _consumer.OnError += (_, error) =>
                                     _logger.Value.Error($"BrokerError: Member id: {_consumer.MemberId}, error: {error}");
                _consumer.OnConsumeError += (_, error) =>
                                            _logger.Value.Error($"ConsumeError: Member Id: {_consumer.MemberId}, error: {error}");
            }

            _consumer.Subscribe(new [] { topic });
        }
        public KafkaMessageConsumer(
            KafkaMessagingGatewayConfiguration configuration,
            RoutingKey routingKey,
            string groupId,
            AutoOffsetReset offsetDefault         = AutoOffsetReset.Earliest,
            int sessionTimeoutMs                  = 10000,
            int maxPollIntervalMs                 = 10000,
            IsolationLevel isolationLevel         = IsolationLevel.ReadCommitted,
            long commitBatchSize                  = 10,
            int sweepUncommittedOffsetsIntervalMs = 30000,
            int readCommittedOffsetsTimeoutMs     = 5000,
            int numPartitions             = 1,
            short replicationFactor       = 1,
            int topicFindTimeoutMs        = 10000,
            OnMissingChannel makeChannels = OnMissingChannel.Create
            )
        {
            if (configuration is null)
            {
                throw new ConfigurationException("You must set a KafkaMessaginGatewayConfiguration to connect to a broker");
            }

            if (routingKey is null)
            {
                throw new ConfigurationException("You must set a RoutingKey as the Topic for the consumer");
            }

            if (groupId is null)
            {
                throw new ConfigurationException("You must set a GroupId for the consumer");
            }

            Topic = routingKey;

            _clientConfig = new ClientConfig
            {
                BootstrapServers = string.Join(",", configuration.BootStrapServers),
                ClientId         = configuration.Name,
                Debug            = configuration.Debug,
                SaslMechanism    = configuration.SaslMechanisms.HasValue ? (Confluent.Kafka.SaslMechanism?)((int)configuration.SaslMechanisms.Value) : null,
                                       SaslKerberosPrincipal = configuration.SaslKerberosPrincipal,
                                       SaslUsername          = configuration.SaslUsername,
                                       SaslPassword          = configuration.SaslPassword,
                                       SecurityProtocol      = configuration.SecurityProtocol.HasValue ? (Confluent.Kafka.SecurityProtocol?)((int)configuration.SecurityProtocol.Value) : null,
                                                                   SslCaLocation = configuration.SslCaLocation
            };
            _consumerConfig = new ConsumerConfig(_clientConfig)
            {
                GroupId               = groupId,
                ClientId              = configuration.Name,
                AutoOffsetReset       = offsetDefault,
                BootstrapServers      = string.Join(",", configuration.BootStrapServers),
                SessionTimeoutMs      = sessionTimeoutMs,
                MaxPollIntervalMs     = maxPollIntervalMs,
                EnablePartitionEof    = true,
                AllowAutoCreateTopics = false, //We will do this explicit always so as to allow us to set parameters for the topic
                IsolationLevel        = isolationLevel,
                //We commit the last offset for acknowledged requests when a batch of records has been processed.
                EnableAutoOffsetStore = false,
                EnableAutoCommit      = false,
                // https://www.confluent.io/blog/cooperative-rebalancing-in-kafka-streams-consumer-ksqldb/
                PartitionAssignmentStrategy = PartitionAssignmentStrategy.CooperativeSticky,
            };

            _maxBatchSize                  = commitBatchSize;
            _sweepUncommittedInterval      = TimeSpan.FromMilliseconds(sweepUncommittedOffsetsIntervalMs);
            _readCommittedOffsetsTimeoutMs = readCommittedOffsetsTimeoutMs;

            _consumer = new ConsumerBuilder <string, string>(_consumerConfig)
                        .SetPartitionsAssignedHandler((consumer, list) =>
            {
                var partitions = list.Select(p => $"{p.Topic} : {p.Partition.Value}");

                _logger.Value.InfoFormat("Parition Added {0}", String.Join(",", partitions));

                _partitions.AddRange(list);
            })
                        .SetPartitionsRevokedHandler((consumer, list) =>
            {
                _consumer.Commit(list);
                var revokedPartitions = list.Select(tpo => $"{tpo.Topic} : {tpo.Partition}").ToList();

                _logger.Value.InfoFormat("Partitions for consumer revoked {0}", string.Join(",", revokedPartitions));

                _partitions = _partitions.Where(tp => list.All(tpo => tpo.TopicPartition != tp)).ToList();
            })
                        .SetPartitionsLostHandler((consumer, list) =>
            {
                var lostPartitions = list.Select(tpo => $"{tpo.Topic} : {tpo.Partition}").ToList();

                _logger.Value.InfoFormat("Partitions for consumer lost {0}", string.Join(",", lostPartitions));

                _partitions = _partitions.Where(tp => list.All(tpo => tpo.TopicPartition != tp)).ToList();
            })
                        .SetErrorHandler((consumer, error) =>
            {
                _logger.Value.Error($"Code: {error.Code}, Reason: {error.Reason}, Fatal: {error.IsFatal}");
            })
                        .Build();

            _logger.Value.InfoFormat($"Kakfa consumer subscribing to {Topic}");
            _consumer.Subscribe(new [] { Topic.Value });

            _creator = new KafkaMessageCreator();

            MakeChannels       = makeChannels;
            Topic              = routingKey;
            NumPartitions      = numPartitions;
            ReplicationFactor  = replicationFactor;
            TopicFindTimeoutMs = topicFindTimeoutMs;

            EnsureTopic();
        }