public static void SetSubscription(this IConsumer <string, Kadaster.KadastraalOnroerendeZaakEvent> consumer, IEnumerable <string> topics, DateTimeOffset?van, string vanafEventIdentificatie, ILogger logger) { if (van.HasValue) { var timesForTopics = (from topic in topics select new TopicPartitionTimestamp(topic, 0, new Timestamp(van.Value.UtcDateTime))).ToList(); var offsets = consumer.OffsetsForTimes(timesForTopics, TimeSpan.FromSeconds(5)); consumer.Assign(offsets, logger); } else if (!string.IsNullOrWhiteSpace(vanafEventIdentificatie)) { consumer.Assign(new[] { vanafEventIdentificatie.ParseIdentificatie().ToTopicPartitionOffset() }, logger); } else { consumer.Subscribe(topics); } }
public override void PreStart() { base.PreStart(); _consumer = _settings.CreateKafkaConsumer(); Log.Debug($"Consumer started: {_consumer.Name}"); _consumer.OnRecord += HandleOnMessage; _consumer.OnConsumeError += HandleConsumeError; _consumer.OnError += HandleOnError; _consumer.OnPartitionsAssigned += HandleOnPartitionsAssigned; _consumer.OnPartitionsRevoked += HandleOnPartitionsRevoked; switch (_subscription) { case TopicSubscription ts: _consumer.Subscribe(ts.Topics); break; case Assignment a: _consumer.Assign(a.TopicPartitions); break; case AssignmentWithOffset awo: _consumer.Assign(awo.TopicPartitions); break; } _messagesReceived = GetAsyncCallback <ConsumerRecord <K, V> >(MessagesReceived); _partitionsAssigned = GetAsyncCallback <IEnumerable <TopicPartition> >(PartitionsAssigned); _partitionsRevoked = GetAsyncCallback <IEnumerable <TopicPartition> >(PartitionsRevoked); ScheduleRepeatedly(TimerKey, _settings.PollInterval); }
public SynergyPersistenceStash(IActorContext context, List <KeyValuePair <string, string> > kafkaConfig) { _actorCell = (ActorCell)context; _context = context; _bootstrapServers = kafkaConfig.First(kvp => kvp.Key == "bootstrapserver").Value; _topic = kafkaConfig.First(kvp => kvp.Key == "topic").Value; _producerConf = new ProducerConfig { BootstrapServers = this._bootstrapServers, MessageTimeoutMs = 10000 }; _producerBuilder = new ProducerBuilder <string, string>(_producerConf) .SetErrorHandler(ProducerBuilderErrorHandler) .Build(); _consumerConf = new ConsumerConfig { GroupId = this._topic, BootstrapServers = this._bootstrapServers, EnableAutoCommit = true }; _consumerBuilder = new ConsumerBuilder <string, string>(_consumerConf) .SetErrorHandler(ConsumerBuilderErrorHandler) .Build(); var partitionList = new List <TopicPartition>() { new TopicPartition(this._topic, 0) }; _consumerBuilder.Assign(partitionList); }
public static Contracts.KadastraalOnroerendeZaakEvent Consume(this IConsumer <string, Kadaster.KadastraalOnroerendeZaakEvent> consumer, string topic, int partition, long offset, bool includeVorigToestand, IMapper mapper) { Contracts.KadastraalOnroerendeZaakEvent retval = null; consumer.Assign(new TopicPartitionOffset(topic, partition, offset)); var result = consumer.Consume(TimeSpan.FromSeconds(5)); if (result != null && !result.IsPartitionEOF) { retval = mapper.Map <Contracts.KadastraalOnroerendeZaakEvent>(result); var vorigEventIdentificatie = result.Message.Value.VorigEventIdentificatie; if (includeVorigToestand && !string.IsNullOrWhiteSpace(vorigEventIdentificatie)) { var(t, p, o) = vorigEventIdentificatie.ParseIdentificatie(); retval.VorigToestandKadastraalOnroerendeZaak = consumer.Consume(t, p, o, false, mapper).NieuweToestandKadastraalOnroerendeZaak; } } return(retval); }
private void KafkaSub() { var conf = new ConsumerConfig { GroupId = _GroupId, BootstrapServers = _brokerList, AutoOffsetReset = AutoOffsetReset.Earliest, EnableAutoCommit = true }; KafkaSubscribe = new ConsumerBuilder <string, string>(conf) .SetErrorHandler((_, e) => { Logger.Log.Error(true, string.Concat("组编号", _GroupId, ",主题", string.Join(",", _topic), "消息队列服务异常", e.Reason)); }) .SetPartitionsAssignedHandler((c, partitions) => { Console.WriteLine($"分配分区: [{string.Join(", ", partitions)}]"); }) .SetPartitionsRevokedHandler((c, partitions) => { Console.WriteLine($"取消分配: [{string.Join(", ", partitions)}]"); }) .Build(); List <TopicPartition> topicPartitions = new List <TopicPartition>(); foreach (var item in _topic) { topicPartitions.Add(new TopicPartition(item, new Partition())); } KafkaSubscribe.Assign(topicPartitions); KafkaSubscribe.Subscribe(_topic); }
public Task Initialize(TimeSpan timeout) { _consumer = new ConsumerBuilder <byte[], byte[]>(_options.ToConsumerProperties()) .SetErrorHandler((sender, errorEvent) => _logger.LogError( "Consume error reason: {reason}, code: {code}, is broker error: {errorType}", errorEvent.Reason, errorEvent.Code, errorEvent.IsBrokerError )) .Build(); var offsetMode = Offset.Stored; switch (_options.ConsumeMode) { case ConsumeMode.LastCommittedMessage: offsetMode = Offset.Stored; break; case ConsumeMode.StreamEnd: offsetMode = Offset.End; break; case ConsumeMode.StreamStart: offsetMode = Offset.Beginning; break; } _consumer.Assign(new TopicPartitionOffset(_queueProperties.Namespace, (int)_queueProperties.PartitionId, offsetMode)); return(Task.CompletedTask); }
public void Initialize() { IDictionary <TopicPartition, long> partitionOffsets = globalStateMaintainer.Initialize(); globalConsumer.Assign(partitionOffsets.Keys.Select(x => new TopicPartitionOffset(x, partitionOffsets[x]))); lastFlush = DateTime.Now; }
/// <summary> /// Assing consumer to a partition as topic. /// </summary> /// <typeparam name="K">Message key.</typeparam> /// <typeparam name="V">Message value.</typeparam> /// <param name="consumer">Consumer.</param> public void AssingWithConsumer <K, V>(IConsumer <K, V> consumer) { if (consumer is null) { throw new ArgumentNullException(nameof(consumer)); } consumer.Assign(new TopicPartition(_topicName.Value, _partition)); }
/// <summary> /// Assing consumer for a topic with offset. /// </summary> /// <typeparam name="K">Message key.</typeparam> /// <typeparam name="V">Message value.</typeparam> /// <param name="consumer">Consumer.</param> public void AssignWithConsumer <K, V>(IConsumer <K, V> consumer) { if (consumer is null) { throw new ArgumentNullException(nameof(consumer)); } consumer.Assign(Watermarks.Select(watermark => watermark.CreateTopicPartitionWithHighOffset())); }
private void StartChangeLogConsumer(CancellationToken ct) { _noCollect2 = new Thread(() => { changeLogConsumer.Assign(new TopicPartition(this.tableSpecification.ChangeLogTopicName(this.columnName), this.partition)); while (!ct.IsCancellationRequested) { var cr = changeLogConsumer.Consume(TimeSpan.FromSeconds(1)); if (cr == null) { continue; } if (cr.IsPartitionEOF) { lock (readyMonitor) { if (!ready) { ready = true; Monitor.PulseAll(readyMonitor); } } continue; } changeLogConsumer.Commit(cr); // materialize. var columnValue = cr.Key; var columnName = this.columnName; if (cr.Value == null) { // materialized.Remove(columnValue); } else { var o = (JObject)JsonConvert.DeserializeObject(cr.Value); Dictionary <string, string> dataAsDict = new Dictionary <string, string>(); foreach (var v in o.Descendants()) { if (v.GetType() != typeof(JProperty)) { continue; } dataAsDict.Add(((JProperty)v).Name, ((JProperty)v).Value.ToString()); } } // TODO: only read from CL on startup. else maybe check? // materialized.Add(columnValue, dataAsDict); } }); _noCollect2.Start(); }
private static void assertCloseToNow_byte(IConsumer <byte[], byte[]> consumer, TopicPartitionOffset tpo) { consumer.Assign(new List <TopicPartitionOffset>() { tpo }); var cr = consumer.Consume(TimeSpan.FromSeconds(10)); Assert.NotNull(cr.Message); Assert.Equal(TimestampType.CreateTime, cr.Message.Timestamp.Type); Assert.True(Math.Abs((cr.Message.Timestamp.UtcDateTime - DateTime.UtcNow).TotalSeconds) < 120); }
public Task StartAsync(CancellationToken cancellationToken) { _logger.LogInformation("starting consuming"); // _consumer.Subscribe("my_topic"); TopicPartition partitionToReadFrom = new TopicPartition("my_topic", 1); // we can pass here list of partitions as well _consumer.Assign(partitionToReadFrom); long offsetToReadFrom = 10L; _consumer.Seek(new TopicPartitionOffset(partitionToReadFrom, offsetToReadFrom)); int numberOfMessagesToRead = 5; Boolean keepOnReading = true; int numberOfMessagesReadSoFar = 0; try { while (keepOnReading) { try { ConsumeResult <string, string> cr = _consumer.Consume(cancellationToken); numberOfMessagesReadSoFar++; Console.WriteLine($"Consumed message '{cr.Message.Value}' at: '{cr.TopicPartitionOffset.Offset}' with key: {cr.Message.Key}."); if (numberOfMessagesReadSoFar >= numberOfMessagesToRead) { keepOnReading = false; } } catch (ConsumeException e) { Console.WriteLine($"Error occured: {e.Error.Reason}"); } } } catch (OperationCanceledException) { Console.WriteLine("Received shut down info"); // Ensure the consumer leaves the group cleanly and final offsets are committed. } finally { _consumer.Close(); } return(Task.CompletedTask); }
private static void ConsumeMessage(IConsumer <byte[], byte[]> consumer, DeliveryResult <Null, string> dr, string testString) { consumer.Assign(new List <TopicPartitionOffset>() { dr.TopicPartitionOffset }); var r = consumer.Consume(TimeSpan.FromSeconds(10)); Assert.NotNull(r?.Message); Assert.Equal(testString, r.Message.Value == null ? null : Encoding.UTF8.GetString(r.Message.Value, 0, r.Message.Value.Length)); Assert.Null(r.Message.Key); Assert.Equal(r.Message.Timestamp.Type, dr.Message.Timestamp.Type); Assert.Equal(r.Message.Timestamp.UnixTimestampMs, dr.Message.Timestamp.UnixTimestampMs); }
/// <summary> /// 设置监听主题 /// </summary> /// <param name="consumer"></param> private void SetSubscribers(IConsumer <string, object> consumer, params KafkaSubscriber[] subscribers) { var topics = subscribers.Where(f => f.Partition == null).Select(f => f.Topic).ToArray(); var topicPartitions = subscribers.Where(f => f.Partition != null).Select(f => new TopicPartition(f.Topic, new Partition(f.Partition.Value))).ToArray(); if (topics.Length > 0) { consumer.Subscribe(topics); } if (topicPartitions.Length > 0) { consumer.Assign(topicPartitions); } }
private static void Assign(this IConsumer <string, Kadaster.KadastraalOnroerendeZaakEvent> consumer, IEnumerable <TopicPartitionOffset> offsets, ILogger logger) { foreach (var offset in offsets) { try { consumer.Assign(offset); } catch (KafkaException ex) { logger.LogError(ex, $"Assign({offset}) throws an exception"); } } }
private static IObservable <ConsumeResult <TKey, TValue> > CreateObservable <TKey, TValue>( IConsumer <TKey, TValue> consumer, TopicPartition tp, SeekPosition startingPosition, CancellationToken cancellationToken) { return(Observable.Create <ConsumeResult <TKey, TValue> >(o => { var cts = CancellationTokenSource.CreateLinkedTokenSource(cancellationToken); var innerCancellationToken = cts.Token; _ = Task.Run( () => { ConfluentKafkaOffset confluentKafkaOffset = startingPosition switch { SeekPosition.FromWatermark o => new ConfluentKafkaOffset(o.Watermark.Item), _ => ConfluentKafkaOffset.End, }; var tpo = new TopicPartitionOffset(tp, confluentKafkaOffset); if (tpo.Partition.Value == -1 && tpo.Offset.Value == -1) { consumer.Subscribe(topic: tp.Topic); } else { Console.Out.WriteLine($"consumerAssign(topic={tpo.Topic} partition={tpo.Partition.Value} offset={tpo.Offset.Value})"); consumer.Assign(tpo); } while (!innerCancellationToken.IsCancellationRequested) { var msg = consumer.Consume(innerCancellationToken); Console.WriteLine($"Received {msg.Topic}#{msg.Partition.Value}#{msg.Offset.Value}: {msg.Message.Value}"); o.OnNext(msg); innerCancellationToken.ThrowIfCancellationRequested(); } o.OnCompleted(); }, innerCancellationToken); return new CancellationDisposable(cts); })); }
private void Run() { var random = new Random(); while (!token.IsCancellationRequested) { consumer.Consume(timeout); if (consumer.Assignment.Count > 0 && random.Next(0, 100) >= 99) { // P = 1/100 consumer.Unassign(); var p = random.Next(0, 8); Console.WriteLine($"Manually consumer {consumer.MemberId} assigned to partition {p}"); consumer.Assign(new TopicPartition("test", p)); } } }
/// <summary> /// Replays messages. /// </summary> protected virtual async Task Replay(IConsumer <string, byte[]> consumer, List <TopicPartitionOffset> startOffsets, DateTime endTimestamp, Func <IReceiverMessage, Task> callback, bool enableAutoOffsetStore) { consumer.Assign(startOffsets); var partitionsFinished = new bool[startOffsets.Count]; while (true) { var result = consumer.Consume(TimeSpan.FromSeconds(5)); if (result is null) { return; } var afterEndTimestamp = false; for (int i = 0; i < startOffsets.Count; i++) { if (result.TopicPartition == startOffsets[i].TopicPartition && result.Message.Timestamp.UtcDateTime > endTimestamp) { afterEndTimestamp = partitionsFinished[i] = true; break; } } if (partitionsFinished.All(finished => finished is true)) { return; } if (afterEndTimestamp) { continue; } var message = new KafkaReceiverMessage(consumer, result, enableAutoOffsetStore); try { await callback(message); } catch { /* TODO: Something? */ } } }
/// <summary> /// Handle new partition assignment /// </summary> /// <param name="partitions"></param> private void HandleAssign(IEnumerable <TopicPartition> partitions) { Dbg($"New assignment: {string.Join(", ", partitions)}"); if (currentAssignment != null) { Fatal($"Received new assignment {partitions} with already existing assignment in place: {currentAssignment}"); } currentAssignment = new Dictionary <TopicPartition, AssignedPartition>(); foreach (var p in partitions) { currentAssignment[p] = new AssignedPartition(); } consumer.Assign(partitions); SendPartitions("partitions_assigned", partitions); }
public KafkaConsumer(string bootstrapServers, RecordConfig recordConfig, string topic, string groupId, int partition, int offset, Action <string, dynamic, DateTime> consumeResultHandler, Action <string> errorHandler) { if (consumeResultHandler == null || errorHandler == null) { throw new Exception("Empty handler"); } _consumeResultHandler = consumeResultHandler; _cts = new CancellationTokenSource(); //1 var schemaRegistry = new CachedSchemaRegistryClient(new SchemaRegistryConfig { SchemaRegistryUrl = schemaRegistryUrl }); //var schemaRegistry = new SchemaRegistryClient(new Schema(recordConfig.Subject, recordConfig.Version, recordConfig.Id, recordConfig.SchemaString)); //1 var schemaRegistry = new SchemaRegistryClient(new Schema(recordConfig.Subject, recordConfig.Version, recordConfig.Id, recordConfig.SchemaString)); //1 _avroDeserializer = new AvroDeserializer <GenericRecord>(schemaRegistry); _consumer = new ConsumerBuilder <string, byte[]>( new ConsumerConfig { BootstrapServers = bootstrapServers, GroupId = groupId, AutoOffsetReset = AutoOffsetReset.Earliest }) .SetKeyDeserializer(Deserializers.Utf8) .SetValueDeserializer(Deserializers.ByteArray /*new AvroDeserializer<T>(schemaRegistry).AsSyncOverAsync()*/) .SetErrorHandler((_, e) => errorHandler(e.Reason)) .Build(); _consumer.Assign(new List <TopicPartitionOffset> { new TopicPartitionOffset(topic, partition, offset) }); _topic = topic; }
public void Reject(object sender) { _consumerClient.Assign(_consumerClient.Assignment); }
public void Assign(TopicPartition partition) { _consumer.Assign(partition); }
private void PartitionsAssigned(IEnumerable <TopicPartition> partitions) { Log.Debug($"Partitions were assigned: {_consumer.Name}"); _consumer.Assign(partitions); _assignedPartitions = partitions; }
public void Reject() { _consumerClient.Assign(_consumerClient.Assignment); }
public void Reject(MessageContext messageContext) { _consumerClient.Assign(_consumerClient.Assignment); }