public IKafkaMessageFlowReceiver Create(IMessageFlow messageFlow) { var settings = _kafkaSettingsFactory.CreateReceiverSettings(messageFlow); var stateInitSettings = new StateInitKafkaMessageFlowReceiverSettings(settings); return(new KafkaMessageFlowReceiver(stateInitSettings, _tracer)); }
public IServiceBusMessageReceiverSettings CreateReceiverSettings(IMessageFlow messageFlow) { if (messageFlow.Id == ImportFactsFromErmFlow.Instance.Id) return new Settings { ConnectionString = _serviceBusConnectionString, TransportEntityPath = _ermOperationsFlowTopic.Value, }; if (messageFlow.Id == CommonEventsFlow.Instance.Id) return new Settings { ConnectionString = _serviceBusConnectionString, TransportEntityPath = _commonEventsFlowTopic.Value, }; if (messageFlow.Id == StatisticsEventsFlow.Instance.Id) return new Settings { ConnectionString = _serviceBusConnectionString, TransportEntityPath = _statisticsEventsFlowTopic.Value, }; throw new ArgumentException($"Flow '{messageFlow.Description}' settings for MS ServiceBus are undefined"); }
private IEventLoggingStrategy <TEvent> ResolveServiceBusStrategy <TEvent>(IMessageFlow flow) { var serviceBusSettings = _settingsFactory.CreateSenderSettings(flow); var strategy = _unityContainer.Resolve <SessionlessServiceBusEventLoggingStrategy <TEvent> >(new DependencyOverride <IServiceBusMessageSenderSettings>(serviceBusSettings)); return(strategy); }
public IServiceBusMessageReceiverSettings CreateReceiverSettings(IMessageFlow messageFlow) { if (ErmFactsFlow.Instance.Equals(messageFlow)) { return new Settings { ConnectionString = _serviceBusConnectionString, TransportEntityPath = _ermOperationsFlowTopic.Value, } } ; if (AggregatesFlow.Instance.Equals(messageFlow)) { return new Settings { ConnectionString = _serviceBusConnectionString, TransportEntityPath = _commonEventsFlowTopic.Value, } } ; if (MessagesFlow.Instance.Equals(messageFlow)) { return new Settings { ConnectionString = _serviceBusConnectionString, TransportEntityPath = _mesageEventsFlowTopic.Value, } } ; throw new ArgumentException($"Flow '{messageFlow.Description}' settings for MS ServiceBus are undefined"); }
private void LoadDataFromKafka2Db(IMessageFlow messageFlow, IReadOnlyCollection <Type> dataObjectTypes, DataConnection dataConnection, int batchSize, int bulkReplaceCommandTimeoutSec) { var actors = CreateActors(dataObjectTypes, dataConnection, new BulkCopyOptions { BulkCopyTimeout = bulkReplaceCommandTimeoutSec }); var initialStats = _kafkaMessageFlowInfoProvider.GetFlowStats(messageFlow) .ToDictionary(x => x.TopicPartition, x => new MessageFlowStats(x.TopicPartition, x.End, Offset.Unset)); using var receiver = _receiverFactory.Create(messageFlow); while (true) { var batch = receiver.ReceiveBatch(batchSize); var bulkCommands = _commandFactory.CreateCommands(batch); if (bulkCommands.Count > 0) { using var scope = new TransactionScope(TransactionScopeOption.RequiresNew, new TransactionOptions { IsolationLevel = IsolationLevel.Serializable, Timeout = TimeSpan.Zero }); foreach (var actor in actors) { actor.ExecuteCommands(bulkCommands); } scope.Complete(); } receiver.CompleteBatch(batch); var batchStats = batch .GroupBy(x => x.TopicPartition) .ToDictionary(x => x.Key, x => x.Max(y => y.Offset.Value)); foreach (var batchStat in batchStats) { if (!initialStats.TryGetValue(batchStat.Key, out var initialStat)) { throw new KeyNotFoundException(batchStat.Key.ToString()); } var currentStat = new MessageFlowStats(batchStat.Key, initialStat.End, batchStat.Value + 1); _tracer.Info($"Topic {currentStat.TopicPartition}, End: {currentStat.End}, Offset: {currentStat.Offset}, Lag: {currentStat.Lag}"); initialStats[batchStat.Key] = currentStat; } var complete = initialStats.Values.All(x => x.Lag <= 0); if (complete) { _tracer.Info($"Kafka state init for flow {messageFlow.Description} complete"); break; } } }
public IKafkaMessageFlowReceiverSettings CreateReceiverSettings(IMessageFlow messageFlow) { if (!_flows2ConsumerSettingsMap.TryGetValue(messageFlow, out var settings)) { throw new ArgumentOutOfRangeException($"Can't create kafka info settings. Specified message flow \"{messageFlow.GetType().Name}\" doesn't has appropriate config"); } return(settings); }
private void LoadDataFromKafka2Db(IMessageFlow messageFlowForKafkaTopic, IReadOnlyCollection <Type> dataObjectTypes, DataConnection dataConnection, int batchSize, int bulkReplaceCommandTimeoutSec) { var targetMessageFlowDescription = messageFlowForKafkaTopic.GetType().Name; var actors = CreateActors(dataObjectTypes, dataConnection, new BulkCopyOptions { BulkCopyTimeout = bulkReplaceCommandTimeoutSec }); using var receiver = _receiverFactory.Create(messageFlowForKafkaTopic); // retry добавлен из-за https://github.com/confluentinc/confluent-kafka-dotnet/issues/86 var lastTargetMessageOffset = Policy.Handle <KafkaException>(exception => exception.Error.Code == ErrorCode.LeaderNotAvailable) .WaitAndRetryForever(i => TimeSpan.FromSeconds(5), (exception, waitSpan) => _tracer.Warn(exception, $"Can't get size of kafka topic. Message flow: {targetMessageFlowDescription}. Wait span: {waitSpan}")) .ExecuteAndCapture(() => _kafkaMessageFlowInfoProvider.GetFlowSize(messageFlowForKafkaTopic) - 1) .Result; _tracer.Info($"Receiving messages from kafka for flow: {targetMessageFlowDescription}. Last target message offset: {lastTargetMessageOffset}"); var resolvedCommandFactories = _commandFactories.Where(f => f.AppropriateFlows.Contains(messageFlowForKafkaTopic)) .ToList(); for (var distance = lastTargetMessageOffset; distance > 0;) { var batch = receiver.ReceiveBatch(batchSize); var lastMessageOffset = batch.Last().Offset.Value; distance = lastTargetMessageOffset - lastMessageOffset; _tracer.Info($"Flow: {targetMessageFlowDescription}. Received messages: {batch.Count}. Last message offset for received batch: {lastMessageOffset}. Target and current offsets distance: {distance}"); var bulkCommands = resolvedCommandFactories.SelectMany(factory => factory.CreateCommands(batch)).ToList(); if (bulkCommands.Count > 0) { using var scope = new TransactionScope(TransactionScopeOption.RequiresNew, new TransactionOptions { IsolationLevel = IsolationLevel.Serializable, Timeout = TimeSpan.Zero }); foreach (var actor in actors) { actor.ExecuteCommands(bulkCommands); } scope.Complete(); } receiver.CompleteBatch(batch); } _tracer.Info($"Receiving messages from kafka for flow: {targetMessageFlowDescription} finished"); }
public long GetFlowSize(IMessageFlow messageFlow) { var settings = _kafkaSettingsFactory.CreateInfoSettings(messageFlow); using (var consumer = new Consumer(settings.Config)) { var offsets = consumer.QueryWatermarkOffsets(settings.TopicPartition, settings.InfoTimeout); return(offsets.High); } }
public long GetFlowProcessedSize(IMessageFlow messageFlow) { var settings = _kafkaSettingsFactory.CreateInfoSettings(messageFlow); using (var consumer = new Consumer(settings.Config)) { var committedOffset = consumer.Committed(new[] { settings.TopicPartition }, settings.InfoTimeout).First(); return(committedOffset.Offset); } }
public long GetFlowSize(IMessageFlow messageFlow) { var settings = _kafkaSettingsFactory.CreateReceiverSettings(messageFlow); var topicPartition = new TopicPartition(settings.TopicPartitionOffset.Topic, ZeroPartition); using var consumer = new ConsumerBuilder <Ignore, Ignore>(settings.Config).Build(); var offsets = consumer.QueryWatermarkOffsets(topicPartition, settings.PollTimeout); return(offsets.High); }
public long GetFlowProcessedSize(IMessageFlow messageFlow) { var settings = _kafkaSettingsFactory.CreateReceiverSettings(messageFlow); var topicPartition = new TopicPartition(settings.TopicPartitionOffset.Topic, ZeroPartition); using var consumer = new ConsumerBuilder <Ignore, Ignore>(settings.Config).Build(); var committedOffset = consumer.Committed(new[] { topicPartition }, settings.PollTimeout).First(); return(committedOffset.Offset); }
public ConsumeResult <Ignore, Ignore> TryGetFlowLastMessage(IMessageFlow messageFlow) { var settings = _kafkaSettingsFactory.CreateReceiverSettings(messageFlow); var topicPartition = new TopicPartition(settings.TopicPartitionOffset.Topic, ZeroPartition); using var consumer = new ConsumerBuilder <Ignore, Ignore>(settings.Config).Build(); var offsets = consumer.QueryWatermarkOffsets(topicPartition, settings.PollTimeout); consumer.Assign(new[] { new TopicPartitionOffset(topicPartition, offsets.High - 1) }); return(consumer.Consume(settings.PollTimeout)); }
public bool TryGetFlowLastMessage(IMessageFlow messageFlow, out Message message) { var settings = _kafkaSettingsFactory.CreateInfoSettings(messageFlow); using (var consumer = new Consumer(settings.Config)) { var offsets = consumer.QueryWatermarkOffsets(settings.TopicPartition, settings.InfoTimeout); consumer.Assign(new[] { new TopicPartitionOffset(settings.TopicPartition, offsets.High - 1) }); return(consumer.Consume(out message, settings.InfoTimeout)); } }
public IServiceBusMessageReceiverSettings CreateReceiverSettings(IMessageFlow messageFlow) { if (ErmFactsFlow.Instance.Equals(messageFlow)) { return new Settings { ConnectionString = ServiceBusConnectionString, TransportEntityPath = ErmFactsTopic.Value, } } ; throw new ArgumentException($"Flow '{messageFlow.Description}' settings for MS ServiceBus are undefined"); }
public IKafkaMessageFlowInfoSettings CreateInfoSettings(IMessageFlow messageFlow) { if (!_flows2ConsumerSettingsMap.TryGetValue(messageFlow, out var kafkaConfig)) { throw new ArgumentOutOfRangeException($"Can't create kafka info settings. Specified message flow \"{messageFlow.GetType().Name}\" doesn't has appropriate config"); } return(new KafkaMessageFlowInfoSettings { Config = kafkaConfig.KafkaClientSpecific, TopicPartition = new TopicPartition(kafkaConfig.Topic, SingleSupportedPartition), InfoTimeout = kafkaConfig.InfoTimeout }); }
public IKafkaMessageFlowReceiverSettings CreateReceiverSettings(IMessageFlow messageFlow) { if (!_flows2ConsumerSettingsMap.TryGetValue(messageFlow, out var kafkaConfig)) { throw new ArgumentOutOfRangeException($"Can't create kafka receiver settings. Specified message flow \"{messageFlow.GetType().Name}\" doesn't has appropriate config"); } return(new KafkaMessageFlowReceiverSettings { Config = kafkaConfig.KafkaClientSpecific, TopicPartitionOffsets = new [] { new TopicPartitionOffset(kafkaConfig.Topic, SingleSupportedPartition, _offset) }, PollTimeout = kafkaConfig.PoolTimeout }); }
public IReadOnlyCollection <MessageFlowStats> GetFlowStats(IMessageFlow messageFlow) { var settings = _kafkaSettingsFactory.CreateReceiverSettings(messageFlow); using var consumer = new ConsumerBuilder <Ignore, Ignore>(settings.Config).Build(); var topicPartitions = GetTopicPartitions(settings.TopicPartitionOffsets.Select(x => x.Topic)); var stats = consumer.Committed(topicPartitions, settings.PollTimeout).Select(x => { var offsets = consumer.QueryWatermarkOffsets(x.TopicPartition, settings.PollTimeout); return(new MessageFlowStats(x.TopicPartition, offsets.High, x.Offset)); }).ToList(); return(stats); }
public KafkaMessageFlowReceiverSettings CreateReceiverSettings(IMessageFlow messageFlow) { var settings = _wrap.CreateReceiverSettings(messageFlow); // stateinit всегда начинает читать все partitions с Offset.Beginning settings.TopicPartitionOffsets = settings.TopicPartitionOffsets .Select(x => new TopicPartitionOffset(x.Topic, Partition.Any, Offset.Beginning)); // хак для InfoRussia, интересные нам данные начинаются только с определённого offset if (messageFlow.Equals(InfoRussiaFactsFlow.Instance)) { const long InfoRussiaOffset = 7_000_000; settings.TopicPartitionOffsets = settings.TopicPartitionOffsets.Select(x => new TopicPartitionOffset(x.Topic, x.Partition, InfoRussiaOffset)); } return(settings); }
public KafkaMessageFlowReceiverSettings CreateReceiverSettings(IMessageFlow messageFlow) { var topics = new List <string>(); if (messageFlow.Equals(KafkaFactsFlow.Instance)) { topics.Add(ConfigFileSetting.String.Required("AmsFactsTopic").Value); topics.Add(ConfigFileSetting.String.Required("RulesetFactsTopic").Value); topics.Add(ConfigFileSetting.String.Required("InfoRussiaFactsTopic").Value); topics.Add(ConfigFileSetting.String.Required("FijiFactsTopic").Value); } else if (messageFlow.Equals(AmsFactsFlow.Instance)) { topics.Add(ConfigFileSetting.String.Required("AmsFactsTopic").Value); } else if (messageFlow.Equals(RulesetFactsFlow.Instance)) { topics.Add(ConfigFileSetting.String.Required("RulesetFactsTopic").Value); } else if (messageFlow.Equals(InfoRussiaFactsFlow.Instance)) { topics.Add(ConfigFileSetting.String.Required("InfoRussiaFactsTopic").Value); } else if (messageFlow.Equals(FijiFactsFlow.Instance)) { topics.Add(ConfigFileSetting.String.Required("FijiFactsTopic").Value); } if (topics.Count == 0) { throw new ArgumentException($"Unknown message flows provided"); } return(new KafkaMessageFlowReceiverSettings { Config = _kafkaConfig, TopicPartitionOffsets = topics.Select(x => new TopicPartitionOffset(x, Partition.Any, Offset.Unset)) }); }
public IEnumerable <IMessageFlow> GetConsumableFlows(IMessageFlow flow) => flow switch {
public IServiceBusMessageFlowReceiver Create(IMessageFlow messageFlow) { var settings = _settingsFactory.CreateReceiverSettings(messageFlow); return(new ServiceBusMessageFlowReceiver(_tracer, settings, _renewer, messageFlow)); }
private void LoadDataFromKafka2Db(IMessageFlow messageFlowForKafkaTopic, IReadOnlyCollection <Type> dataObjectTypes, DataConnection dataConnection, int bulkReplaceCommandTimeoutSec) { var targetMessageFlowDescription = messageFlowForKafkaTopic.GetType().Name; var actors = CreateActors(dataObjectTypes, dataConnection, new BulkCopyOptions { BulkCopyTimeout = bulkReplaceCommandTimeoutSec }); using (var receiver = _receiverFactory.Create(messageFlowForKafkaTopic)) { // retry добавлен из-за https://github.com/confluentinc/confluent-kafka-dotnet/issues/86 var lastTargetMessageOffset = Policy.Handle <Confluent.Kafka.KafkaException>(exception => exception.Error.Code == Confluent.Kafka.ErrorCode.LeaderNotAvailable) .WaitAndRetryForever(i => TimeSpan.FromSeconds(5), (exception, waitSpan) => _tracer.Warn(exception, $"Can't get size of kafka topic. Message flow: {targetMessageFlowDescription}. Wait span: {waitSpan}")) .ExecuteAndCapture(() => _kafkaMessageFlowInfoProvider.GetFlowSize(messageFlowForKafkaTopic) - 1) .Result; _tracer.Info($"Receiving messages from kafka for flow: {targetMessageFlowDescription}. Last target message offset: {lastTargetMessageOffset}"); var resolvedCommandFactories = _commandFactories.Where(f => f.AppropriateFlows.Contains(messageFlowForKafkaTopic)) .ToList(); using (var transation = new TransactionScope(TransactionScopeOption.RequiresNew, new TransactionOptions { IsolationLevel = IsolationLevel.Serializable, Timeout = TimeSpan.Zero })) { long currentMessageOffset = 0; int receivedMessagesQuantity = 0; while (currentMessageOffset < lastTargetMessageOffset) { var batch = receiver.ReceiveBatch(_batchSizeSettings.BatchSize); // крутим цикл пока не получим сообщения от kafka, // т.к. у клиента kafka есть некоторое время прогрева, то после запуска некоторое время могут возвращаться пустые batch, // несмотря на фактическое наличие сообщений в topic\partition if (batch.Count == 0) { continue; } receivedMessagesQuantity += batch.Count; currentMessageOffset = batch.Last().Offset.Value; _tracer.Info($"Flow: {targetMessageFlowDescription}. Received messages: {batch.Count}. Last message offset for received batch: {currentMessageOffset}. Target and current offsets distance: {lastTargetMessageOffset - currentMessageOffset}"); var bulkCommands = resolvedCommandFactories.SelectMany(factory => factory.CreateCommands(batch)) .ToList(); if (bulkCommands.Count > 0) { foreach (var actor in actors) { actor.ExecuteCommands(bulkCommands); } } receiver.CompleteBatch(batch); } _tracer.Info($"Receiving messages from kafka for flow: {targetMessageFlowDescription} finished. Received messages quantity: {receivedMessagesQuantity}"); transation.Complete(); } } }
public FlowEvent(IMessageFlow flow, IEvent @event) => (Flow, Event) = (flow, @event);
public IServiceBusMessageFlowReceiver Create(IMessageFlow messageFlow) { var settings = _settingsFactory.CreateReceiverSettings(messageFlow); return new ServiceBusMessageFlowReceiver(_tracer, settings, _renewer, messageFlow); }
public IServiceBusMessageFlowReceiver Create(IMessageFlow messageFlow) { return(_container.Resolve <ServiceBusMessageFlowReceiver>(new DependencyOverride(typeof(IMessageFlow), messageFlow))); }
public KafkaReplicationCommand(IMessageFlow messageFlow, ReplicateInBulkCommand replicateInBulkCommand, int batchSize = 5000) { MessageFlow = messageFlow; ReplicateInBulkCommand = replicateInBulkCommand; BatchSize = batchSize; }
public FlowEvent(IMessageFlow flow, IEvent @event) { Flow = flow; Event = @event; }
public PerformedOperationFinalProcessing Serialize(AggregateOperation operation, IMessageFlow targetFlow) { var entityType = _registry.GetEntityName(operation.AggregateType); return(new PerformedOperationFinalProcessing { CreatedOn = DateTime.UtcNow, MessageFlowId = targetFlow.Id, EntityId = operation.AggregateId, EntityTypeId = entityType.Id, OperationId = GetIdentity(operation), }); }
public IServiceBusMessageSenderSettings CreateSenderSettings(IMessageFlow messageFlow) { throw new ArgumentException($"Flow '{messageFlow.Description}' settings for MS ServiceBus are undefined"); }
public IKafkaMessageFlowReceiver Create(IMessageFlow messageFlow) { var settings = _kafkaSettingsFactory.CreateReceiverSettings(messageFlow); return(new KafkaTopicConsumerWrapper(settings, _tracer)); }
public KafkaReplicationCommand(IMessageFlow messageFlow, ReplicateInBulkCommand replicateInBulkCommand) { MessageFlow = messageFlow; ReplicateInBulkCommand = replicateInBulkCommand; }
public PerformedOperationFinalProcessing Serialize(RecalculateStatisticsOperation operation, IMessageFlow targetFlow) { return(new PerformedOperationFinalProcessing { CreatedOn = DateTime.UtcNow, MessageFlowId = targetFlow.Id, Context = Serialize(operation).ToString(), OperationId = GetIdentity(operation), }); }
public IServiceBusMessageFlowReceiver Create(IMessageFlow messageFlow) { return _container.Resolve<ServiceBusMessageFlowReceiver>(new DependencyOverride(typeof(IMessageFlow), messageFlow)); }