private void LoadDataFromKafka2Db(IMessageFlow messageFlowForKafkaTopic,
                                          IReadOnlyCollection <Type> dataObjectTypes,
                                          DataConnection dataConnection,
                                          int batchSize,
                                          int bulkReplaceCommandTimeoutSec)
        {
            var targetMessageFlowDescription = messageFlowForKafkaTopic.GetType().Name;

            var actors = CreateActors(dataObjectTypes,
                                      dataConnection,
                                      new BulkCopyOptions
            {
                BulkCopyTimeout = bulkReplaceCommandTimeoutSec
            });

            using var receiver = _receiverFactory.Create(messageFlowForKafkaTopic);
            // retry добавлен из-за https://github.com/confluentinc/confluent-kafka-dotnet/issues/86
            var lastTargetMessageOffset =
                Policy.Handle <KafkaException>(exception => exception.Error.Code == ErrorCode.LeaderNotAvailable)
                .WaitAndRetryForever(i => TimeSpan.FromSeconds(5),
                                     (exception, waitSpan) =>
                                     _tracer.Warn(exception,
                                                  $"Can't get size of kafka topic. Message flow: {targetMessageFlowDescription}. Wait span: {waitSpan}"))
                .ExecuteAndCapture(() => _kafkaMessageFlowInfoProvider.GetFlowSize(messageFlowForKafkaTopic) - 1)
                .Result;

            _tracer.Info($"Receiving messages from kafka for flow: {targetMessageFlowDescription}. Last target message offset: {lastTargetMessageOffset}");

            var resolvedCommandFactories = _commandFactories.Where(f => f.AppropriateFlows.Contains(messageFlowForKafkaTopic))
                                           .ToList();

            for (var distance = lastTargetMessageOffset; distance > 0;)
            {
                var batch = receiver.ReceiveBatch(batchSize);

                var lastMessageOffset = batch.Last().Offset.Value;
                distance = lastTargetMessageOffset - lastMessageOffset;

                _tracer.Info($"Flow: {targetMessageFlowDescription}. Received messages: {batch.Count}. Last message offset for received batch: {lastMessageOffset}. Target and current offsets distance: {distance}");

                var bulkCommands = resolvedCommandFactories.SelectMany(factory => factory.CreateCommands(batch)).ToList();
                if (bulkCommands.Count > 0)
                {
                    using var scope = new TransactionScope(TransactionScopeOption.RequiresNew, new TransactionOptions { IsolationLevel = IsolationLevel.Serializable, Timeout = TimeSpan.Zero });
                    foreach (var actor in actors)
                    {
                        actor.ExecuteCommands(bulkCommands);
                    }
                    scope.Complete();
                }

                receiver.CompleteBatch(batch);
            }

            _tracer.Info($"Receiving messages from kafka for flow: {targetMessageFlowDescription} finished");
        }
        private void LoadDataFromKafka2Db(IMessageFlow messageFlowForKafkaTopic,
                                          IReadOnlyCollection <Type> dataObjectTypes,
                                          DataConnection dataConnection,
                                          int bulkReplaceCommandTimeoutSec)
        {
            var targetMessageFlowDescription = messageFlowForKafkaTopic.GetType().Name;

            var actors = CreateActors(dataObjectTypes,
                                      dataConnection,
                                      new BulkCopyOptions
            {
                BulkCopyTimeout = bulkReplaceCommandTimeoutSec
            });

            using (var receiver = _receiverFactory.Create(messageFlowForKafkaTopic))
            {
                // retry добавлен из-за https://github.com/confluentinc/confluent-kafka-dotnet/issues/86
                var lastTargetMessageOffset =
                    Policy.Handle <Confluent.Kafka.KafkaException>(exception => exception.Error.Code == Confluent.Kafka.ErrorCode.LeaderNotAvailable)
                    .WaitAndRetryForever(i => TimeSpan.FromSeconds(5),
                                         (exception, waitSpan) =>
                                         _tracer.Warn(exception,
                                                      $"Can't get size of kafka topic. Message flow: {targetMessageFlowDescription}. Wait span: {waitSpan}"))
                    .ExecuteAndCapture(() => _kafkaMessageFlowInfoProvider.GetFlowSize(messageFlowForKafkaTopic) - 1)
                    .Result;

                _tracer.Info($"Receiving messages from kafka for flow: {targetMessageFlowDescription}. Last target message offset: {lastTargetMessageOffset}");

                var resolvedCommandFactories = _commandFactories.Where(f => f.AppropriateFlows.Contains(messageFlowForKafkaTopic))
                                               .ToList();

                using (var transation = new TransactionScope(TransactionScopeOption.RequiresNew,
                                                             new TransactionOptions
                {
                    IsolationLevel = IsolationLevel.Serializable,
                    Timeout = TimeSpan.Zero
                }))
                {
                    long currentMessageOffset     = 0;
                    int  receivedMessagesQuantity = 0;
                    while (currentMessageOffset < lastTargetMessageOffset)
                    {
                        var batch = receiver.ReceiveBatch(_batchSizeSettings.BatchSize);
                        // крутим цикл пока не получим сообщения от kafka,
                        // т.к. у клиента kafka есть некоторое время прогрева, то после запуска некоторое время могут возвращаться пустые batch,
                        // несмотря на фактическое наличие сообщений в topic\partition
                        if (batch.Count == 0)
                        {
                            continue;
                        }

                        receivedMessagesQuantity += batch.Count;
                        currentMessageOffset      = batch.Last().Offset.Value;

                        _tracer.Info($"Flow: {targetMessageFlowDescription}. Received messages: {batch.Count}. Last message offset for received batch: {currentMessageOffset}. Target and current offsets distance: {lastTargetMessageOffset - currentMessageOffset}");

                        var bulkCommands = resolvedCommandFactories.SelectMany(factory => factory.CreateCommands(batch))
                                           .ToList();

                        if (bulkCommands.Count > 0)
                        {
                            foreach (var actor in actors)
                            {
                                actor.ExecuteCommands(bulkCommands);
                            }
                        }

                        receiver.CompleteBatch(batch);
                    }

                    _tracer.Info($"Receiving messages from kafka for flow: {targetMessageFlowDescription} finished. Received messages quantity: {receivedMessagesQuantity}");
                    transation.Complete();
                }
            }
        }
        public IKafkaMessageFlowReceiverSettings CreateReceiverSettings(IMessageFlow messageFlow)
        {
            if (!_flows2ConsumerSettingsMap.TryGetValue(messageFlow, out var kafkaConfig))
            {
                throw new ArgumentOutOfRangeException($"Can't create kafka receiver settings. Specified message flow \"{messageFlow.GetType().Name}\" doesn't has appropriate config");
            }

            return(new KafkaMessageFlowReceiverSettings
            {
                Config = kafkaConfig.KafkaClientSpecific,
                TopicPartitionOffsets = new [] { new TopicPartitionOffset(kafkaConfig.Topic, SingleSupportedPartition, _offset) },
                PollTimeout = kafkaConfig.PoolTimeout
            });
        }
Esempio n. 4
0
        public IKafkaMessageFlowReceiverSettings CreateReceiverSettings(IMessageFlow messageFlow)
        {
            if (!_flows2ConsumerSettingsMap.TryGetValue(messageFlow, out var settings))
            {
                throw new ArgumentOutOfRangeException($"Can't create kafka info settings. Specified message flow \"{messageFlow.GetType().Name}\" doesn't has appropriate config");
            }

            return(settings);
        }
        public IKafkaMessageFlowInfoSettings CreateInfoSettings(IMessageFlow messageFlow)
        {
            if (!_flows2ConsumerSettingsMap.TryGetValue(messageFlow, out var kafkaConfig))
            {
                throw new ArgumentOutOfRangeException($"Can't create kafka info settings. Specified message flow \"{messageFlow.GetType().Name}\" doesn't has appropriate config");
            }

            return(new KafkaMessageFlowInfoSettings
            {
                Config = kafkaConfig.KafkaClientSpecific,
                TopicPartition = new TopicPartition(kafkaConfig.Topic, SingleSupportedPartition),
                InfoTimeout = kafkaConfig.InfoTimeout
            });
        }