/// <summary> /// Process event. /// </summary> /// <param name="cancellationToken">Cancellation token</param> /// <returns>Task which will complete when Process finishes.</returns> public override async Task Process(CancellationToken cancellationToken = default) { // Build chain of handlers BuildHandlerChain(); // Consume event var sourceEvent = consumer.ConsumeEvent(cancellationToken); // Return if EOF if (sourceEvent == null) { return; } // Invoke handler chain var sourceMessage = new Message <TSourceKey, TSourceValue>(sourceEvent.Key, sourceEvent.Value); var sinkMessage = await handlers[0].HandleMessage(sourceMessage) as Message <TSinkKey, TSinkValue>; // Return if message filtered out if (sinkMessage == null) { return; } // Produce event var sinkEvent = new Confluent.Kafka.Message <TSinkKey, TSinkValue> { Key = sinkMessage.Key, Value = sinkMessage.Value }; producer.ProduceEvent(sinkEvent); }
/// <summary> /// Process event. /// </summary> /// <param name="cancellationToken">Cancellation token</param> /// <returns>Task which will complete when Process finishes.</returns> public override async Task <Confluent.Kafka.DeliveryResult <TSinkKey, TSinkValue> > ProcessWithResult(CancellationToken cancellationToken = default) { // Build chain of handlers BuildHandlerChain(); // Consume event var sourceEvent = consumer.ConsumeEvent(cancellationToken); // Return null if EOF if (sourceEvent == null) { return(null); } // Invoke handler chain var sourceMessage = new Abstractions.Message <TSourceKey, TSourceValue>(sourceEvent.Key, sourceEvent.Value); var sinkMessage = await handlers[0].HandleMessage(sourceMessage) as Abstractions.Message <TSinkKey, TSinkValue>; // Produce event var sinkEvent = new Confluent.Kafka.Message <TSinkKey, TSinkValue> { Key = sinkMessage.Key, Value = sinkMessage.Value }; return(await producer.ProduceEventAsync(sinkEvent)); }
private async Task OnMessageReceived(Confluent.Kafka.Message <byte[], byte[]> message, Confluent.Kafka.TopicPartitionOffset tpo) { // Checking if the message was sent to the subscribed topic is necessary // when reusing the same consumer for multiple topics. if (!Endpoint.Names.Any(endpointName => tpo.Topic.Equals(endpointName, StringComparison.InvariantCultureIgnoreCase))) { return; } await TryHandleMessage(message, tpo); }
protected override async Task <IOffset> ProduceAsync(object message, byte[] serializedMessage, IEnumerable <MessageHeader> headers) { var kafkaMessage = new Confluent.Kafka.Message <byte[], byte[]> { Key = KeyHelper.GetMessageKey(message), Value = serializedMessage }; if (headers != null && headers.Any()) { kafkaMessage.Headers = new Confluent.Kafka.Headers(); headers.ForEach(h => kafkaMessage.Headers.Add(h.ToConfluentHeader())); } var deliveryReport = await GetInnerProducer().ProduceAsync(Endpoint.Name, kafkaMessage); return(new KafkaOffset(deliveryReport.TopicPartitionOffset)); }
private async Task TryHandleMessage(Confluent.Kafka.Message <byte[], byte[]> message, Confluent.Kafka.TopicPartitionOffset tpo) { try { _messagesSinceCommit++; await HandleMessage( message.Value, message.Headers?.Select(h => h.ToSilverbackHeader()).ToList(), new KafkaOffset(tpo)); } catch (Exception ex) { _logger.LogCritical(ex, "Fatal error occurred consuming the message: {topic} {partition} @{offset}. " + "The consumer will be stopped.", tpo.Topic, tpo.Partition, tpo.Offset); Disconnect(); } }
protected override async Task <IOffset> ProduceAsync(RawBrokerMessage message) { try { var kafkaMessage = new Confluent.Kafka.Message <byte[], byte[]> { Key = GetPartitioningKey(message.Headers), Value = message.RawContent }; if (message.Headers != null && message.Headers.Any()) { kafkaMessage.Headers = new Confluent.Kafka.Headers(); message.Headers.ForEach(h => kafkaMessage.Headers.Add(h.ToConfluentHeader())); } var deliveryReport = await GetInnerProducer().ProduceAsync(Endpoint.Name, kafkaMessage); if (Endpoint.Configuration.ArePersistenceStatusReportsEnabled) { CheckPersistenceStatus(deliveryReport); } return(new KafkaOffset(deliveryReport.TopicPartitionOffset)); } catch (Confluent.Kafka.KafkaException ex) { // Disposing and re-creating the producer will maybe fix the issue if (Endpoint.Configuration.DisposeOnException) { DisposeInnerProducer(); } throw new ProduceException("Error occurred producing the message. See inner exception for details.", ex); } }
/// <summary> /// Produce Kafka event. /// </summary> /// <param name="sinkEvent">Kafka sink message.</param> /// <returns>Task which will complete when Produce finishes.</returns> public async Task <Confluent.Kafka.DeliveryResult <TKey, TValue> > ProduceEventAsync(Confluent.Kafka.Message <TKey, TValue> sinkEvent) { var result = await producer.ProduceAsync(topic, sinkEvent); logger?.LogInformation($"Message produced: {sinkEvent.Key} {sinkEvent.Value}"); return(result); }
/// <summary> /// Produce Kafka event. /// </summary> /// <param name="sinkEvent">Kafka sink message.</param> public void ProduceEvent(Confluent.Kafka.Message <TKey, TValue> sinkEvent) { producer.Produce(topic, sinkEvent); logger?.LogInformation($"Message produced: {sinkEvent.Key} {sinkEvent.Value}"); }