/// <inheritdoc cref="IInMemoryTopic.Subscribe" /> public void Subscribe(MockedKafkaConsumer consumer) { Check.NotNull(consumer, nameof(consumer)); lock (_consumersLock) { _consumers.Add(consumer); if (!_committedOffsets.ContainsKey(consumer.GroupId)) { _committedOffsets[consumer.GroupId] = new ConcurrentDictionary <Partition, Offset>( _partitions.Select( partition => new KeyValuePair <Partition, Offset>(partition.Partition.Value, Offset.Unset))); } if (!_groupsPendingRebalance.Contains(consumer.GroupId)) { _groupsPendingRebalance.Add(consumer.GroupId); // Rebalance asynchronously to mimic the real Kafka Task.Run(() => Rebalance(consumer.GroupId)); } } }
/// <inheritdoc cref="IInMemoryTopic.Unsubscribe" /> public void Unsubscribe(MockedKafkaConsumer consumer) { Check.NotNull(consumer, nameof(consumer)); lock (_consumersLock) { _consumers.Remove(consumer); Rebalance(consumer.GroupId); } }
/// <inheritdoc cref="IConfluentConsumerBuilder.Build" /> public IConsumer <byte[]?, byte[]?> Build() { if (_config == null) { throw new InvalidOperationException("SetConfig must be called to provide the consumer configuration."); } var consumer = new MockedKafkaConsumer(_config, _topics); consumer.StatisticsHandler = _statisticsHandler; consumer.ErrorHandler = _errorHandler; consumer.PartitionsAssignedHandler = _partitionsAssignedHandler; consumer.PartitionsRevokedHandler = _partitionsRevokedHandler; consumer.OffsetsCommittedHandler = _offsetsCommittedHandler; return(consumer); }
private bool HasFinishedConsuming(MockedKafkaConsumer consumer) { if (consumer.Disposed) { return(true); } if (!consumer.PartitionsAssigned) { return(false); } var partitionsOffsets = _committedOffsets[consumer.GroupId]; return(consumer.Assignment.All( topicPartition => { var lastOffset = _partitions[topicPartition.Partition].LastOffset; return lastOffset < 0 || partitionsOffsets[topicPartition.Partition] > lastOffset; })); }