Beispiel #1
0
        public async Task <OperateResult> SendAsync(TransportMessage message)
        {
            var producer = _connectionPool.RentProducer();

            try
            {
                var headers = new Confluent.Kafka.Headers();

                foreach (var header in message.Headers)
                {
                    headers.Add(header.Value != null
                        ? new Header(header.Key, Encoding.UTF8.GetBytes(header.Value))
                        : new Header(header.Key, null));
                }

                var result = await producer.ProduceAsync(message.GetName(), new Message <string, byte[]>
                {
                    Headers = headers,
                    Key     = message.Headers.TryGetValue(KafkaHeaders.KafkaKey, out string kafkaMessageKey) && !string.IsNullOrEmpty(kafkaMessageKey) ? kafkaMessageKey : message.GetId(),
                    Value   = message.Body
                });

                if (result.Status == PersistenceStatus.Persisted || result.Status == PersistenceStatus.PossiblyPersisted)
                {
                    _logger.LogDebug($"kafka topic message [{message.GetName()}] has been published.");

                    return(OperateResult.Success);
                }

                throw new PublisherSentFailedException("kafka message persisted failed!");
            }
        public async Task <OperateResult> SendAsync(TransportMessage message)
        {
            var producer = _connectionPool.RentProducer();

            try
            {
                var headers = new Confluent.Kafka.Headers();

                foreach (var header in message.Headers)
                {
                    headers.Add(header.Value != null
                        ? new Header(header.Key, Encoding.UTF8.GetBytes(header.Value))
                        : new Header(header.Key, null));
                }

                var result = await producer.ProduceAsync(message.GetName(), new Message <string, byte[]>
                {
                    Headers = headers,
                    Key     = message.GetId(),
                    Value   = message.Body
                });

                if (result.Status == PersistenceStatus.Persisted || result.Status == PersistenceStatus.PossiblyPersisted)
                {
                    _logger.LogDebug($"kafka topic message [{message.GetName()}] has been published.");

                    return(OperateResult.Success);
                }

                var ex = new PublisherSentFailedException("kafka message persisted failed!");

                return(OperateResult.Failed(ex));
            }
            catch (Exception ex)
            {
                var wapperEx = new PublisherSentFailedException(ex.Message, ex);

                return(OperateResult.Failed(wapperEx));
            }
            finally
            {
                var returned = _connectionPool.Return(producer);
                if (!returned)
                {
                    producer.Dispose();
                }
            }
        }
Beispiel #3
0
        private static Message <byte[], byte[]> CreateMessage(IMessageContext context)
        {
            var headers = new Confluent.Kafka.Headers();

            foreach (var header in context.Headers)
            {
                headers.Add(header.Value != null ? new Header(header.Key, header.Value) : new Header(header.Key, null));
            }
            return(new Message <byte[], byte[]>
            {
                Key = context.PartitionKey,
                Value = GetMessageContent(context),
                Headers = headers,
                Timestamp = Timestamp.Default
            });
        }
        public void GetKafkaHeader_ShouldReturnKafkaHeaders()
        {
            // Arrange
            var kafkaHeaders = new Headers {
                { key, this.value }
            };
            var messageHeaders = new MessageHeaders();

            messageHeaders.Add(key, this.value);

            var result = new Confluent.Kafka.Headers();

            foreach (var header in messageHeaders)
            {
                result.Add(header.Value != null
                    ? new Header(header.Key, header.Value)
                    : new Header(header.Key, null));
            }
            // Assert
            result.Should().BeEquivalentTo(kafkaHeaders);
        }
        private ConsumeResult <K, V> ConsumeImpl <K, V>(
            int millisecondsTimeout,
            IDeserializer <K> keyDeserializer,
            IDeserializer <V> valueDeserializer)
        {
            var msgPtr = kafkaHandle.ConsumerPoll((IntPtr)millisecondsTimeout);

            if (msgPtr == IntPtr.Zero)
            {
                return(null);
            }

            try
            {
                var msg = Util.Marshal.PtrToStructure <rd_kafka_message>(msgPtr);

                string topic = null;
                if (this.enableTopicNameMarshaling)
                {
                    if (msg.rkt != IntPtr.Zero)
                    {
                        topic = Util.Marshal.PtrToStringUTF8(Librdkafka.topic_name(msg.rkt));
                    }
                }

                if (msg.err == ErrorCode.Local_PartitionEOF)
                {
                    return(new ConsumeResult <K, V>
                    {
                        TopicPartitionOffset = new TopicPartitionOffset(topic, msg.partition, msg.offset),
                        Message = null,
                        IsPartitionEOF = true
                    });
                }

                long   timestampUnix = 0;
                IntPtr timestampType = (IntPtr)TimestampType.NotAvailable;
                if (enableTimestampMarshaling)
                {
                    timestampUnix = Librdkafka.message_timestamp(msgPtr, out timestampType);
                }
                var timestamp = new Timestamp(timestampUnix, (TimestampType)timestampType);

                Headers headers = null;
                if (enableHeaderMarshaling)
                {
                    headers = new Headers();
                    Librdkafka.message_headers(msgPtr, out IntPtr hdrsPtr);
                    if (hdrsPtr != IntPtr.Zero)
                    {
                        for (var i = 0; ; ++i)
                        {
                            var err = Librdkafka.header_get_all(hdrsPtr, (IntPtr)i, out IntPtr namep, out IntPtr valuep, out IntPtr sizep);
                            if (err != ErrorCode.NoError)
                            {
                                break;
                            }
                            var    headerName  = Util.Marshal.PtrToStringUTF8(namep);
                            byte[] headerValue = null;
                            if (valuep != IntPtr.Zero)
                            {
                                headerValue = new byte[(int)sizep];
                                Marshal.Copy(valuep, headerValue, 0, (int)sizep);
                            }
                            headers.Add(headerName, headerValue);
                        }
                    }
                }

                if (msg.err != ErrorCode.NoError)
                {
                    throw new ConsumeException(
                              new ConsumeResult <byte[], byte[]>
                    {
                        TopicPartitionOffset = new TopicPartitionOffset(topic, msg.partition, msg.offset),
                        Message = new Message <byte[], byte[]>
                        {
                            Timestamp = timestamp,
                            Headers   = headers,
                            Key       = KeyAsByteArray(msg),
                            Value     = ValueAsByteArray(msg)
                        },
                        IsPartitionEOF = false
                    },
                              kafkaHandle.CreatePossiblyFatalError(msg.err, null));
                }

                K key;
                try
                {
                    unsafe
                    {
                        key = keyDeserializer.Deserialize(
                            msg.key == IntPtr.Zero
                                ? ReadOnlySpan <byte> .Empty
                                : new ReadOnlySpan <byte>(msg.key.ToPointer(), (int)msg.key_len),
                            msg.key == IntPtr.Zero,
                            new SerializationContext(MessageComponentType.Key, topic));
                    }
                }
                catch (Exception ex)
                {
                    throw new ConsumeException(
                              new ConsumeResult <byte[], byte[]>
                    {
                        TopicPartitionOffset = new TopicPartitionOffset(topic, msg.partition, msg.offset),
                        Message = new Message <byte[], byte[]>
                        {
                            Timestamp = timestamp,
                            Headers   = headers,
                            Key       = KeyAsByteArray(msg),
                            Value     = ValueAsByteArray(msg)
                        },
                        IsPartitionEOF = false
                    },
                              new Error(ErrorCode.Local_KeyDeserialization),
                              ex);
                }

                V val;
                try
                {
                    unsafe
                    {
                        val = valueDeserializer.Deserialize(
                            msg.val == IntPtr.Zero
                                ? ReadOnlySpan <byte> .Empty
                                : new ReadOnlySpan <byte>(msg.val.ToPointer(), (int)msg.len),
                            msg.val == IntPtr.Zero,
                            new SerializationContext(MessageComponentType.Value, topic));
                    }
                }
                catch (Exception ex)
                {
                    throw new ConsumeException(
                              new ConsumeResult <byte[], byte[]>
                    {
                        TopicPartitionOffset = new TopicPartitionOffset(topic, msg.partition, msg.offset),
                        Message = new Message <byte[], byte[]>
                        {
                            Timestamp = timestamp,
                            Headers   = headers,
                            Key       = KeyAsByteArray(msg),
                            Value     = ValueAsByteArray(msg)
                        },
                        IsPartitionEOF = false
                    },
                              new Error(ErrorCode.Local_ValueDeserialization),
                              ex);
                }

                return(new ConsumeResult <K, V>
                {
                    TopicPartitionOffset = new TopicPartitionOffset(topic, msg.partition, msg.offset),
                    Message = new Message <K, V>
                    {
                        Timestamp = timestamp,
                        Headers = headers,
                        Key = key,
                        Value = val
                    },
                    IsPartitionEOF = false
                });
            }
            finally
            {
                Librdkafka.message_destroy(msgPtr);
            }
        }
        private void DeliveryReportCallbackImpl(IntPtr rk, IntPtr rkmessage, IntPtr opaque)
        {
            // Ensure registered handlers are never called as a side-effect of Dispose/Finalize (prevents deadlocks in common scenarios).
            if (ownedKafkaHandle.IsClosed)
            {
                return;
            }

            var msg = Util.Marshal.PtrToStructure <rd_kafka_message>(rkmessage);

            // the msg._private property has dual purpose. Here, it is an opaque pointer set
            // by Topic.Produce to be an IDeliveryHandler. When Consuming, it's for internal
            // use (hence the name).
            if (msg._private == IntPtr.Zero)
            {
                // Note: this can occur if the ProduceAsync overload that accepts a DeliveryHandler
                // was used and the delivery handler was set to null.
                return;
            }

            var gch             = GCHandle.FromIntPtr(msg._private);
            var deliveryHandler = (IDeliveryHandler)gch.Target;

            gch.Free();

            Headers headers = null;

            if (this.enableDeliveryReportHeaders)
            {
                headers = new Headers();
                Librdkafka.message_headers(rkmessage, out IntPtr hdrsPtr);
                if (hdrsPtr != IntPtr.Zero)
                {
                    for (var i = 0; ; ++i)
                    {
                        var err = Librdkafka.header_get_all(hdrsPtr, (IntPtr)i, out IntPtr namep, out IntPtr valuep, out IntPtr sizep);
                        if (err != ErrorCode.NoError)
                        {
                            break;
                        }
                        var    headerName  = Util.Marshal.PtrToStringUTF8(namep);
                        byte[] headerValue = null;
                        if (valuep != IntPtr.Zero)
                        {
                            headerValue = new byte[(int)sizep];
                            Marshal.Copy(valuep, headerValue, 0, (int)sizep);
                        }
                        headers.Add(headerName, headerValue);
                    }
                }
            }

            IntPtr timestampType = (IntPtr)TimestampType.NotAvailable;
            long   timestamp     = 0;

            if (enableDeliveryReportTimestamp)
            {
                timestamp = Librdkafka.message_timestamp(rkmessage, out timestampType);
            }

            PersistenceStatus messageStatus = PersistenceStatus.PossiblyPersisted;

            if (enableDeliveryReportPersistedStatus)
            {
                messageStatus = Librdkafka.message_status(rkmessage);
            }

            deliveryHandler.HandleDeliveryReport(
                new DeliveryReport <Null, Null>
            {
                // Topic is not set here in order to avoid the marshalling cost.
                // Instead, the delivery handler is expected to cache the topic string.
                Partition = msg.partition,
                Offset    = msg.offset,
                Error     = KafkaHandle.CreatePossiblyFatalError(msg.err, null),
                Status    = messageStatus,
                Message   = new Message <Null, Null> {
                    Timestamp = new Timestamp(timestamp, (TimestampType)timestampType), Headers = headers
                }
            }
                );
        }
Beispiel #7
0
        /// <inheritdoc/>
        public void Produce(
            TopicPartition topicPartition,
            Message <TKey, TValue> message,
            Action <DeliveryReport <TKey, TValue> > deliveryHandler = null)
        {
            if (deliveryHandler != null && !enableDeliveryReports)
            {
                throw new InvalidOperationException("A delivery handler was specified, but delivery reports are disabled.");
            }

            Headers headers = message.Headers ?? new Headers();

            byte[] keyBytes;
            try
            {
                keyBytes = (keySerializer != null)
                    ? keySerializer.Serialize(message.Key, new SerializationContext(MessageComponentType.Key, topicPartition.Topic, headers))
                    : throw new InvalidOperationException("Produce called with an IAsyncSerializer key serializer configured but an ISerializer is required.");
            }
            catch (Exception ex)
            {
                throw new ProduceException <TKey, TValue>(
                          new Error(ErrorCode.Local_KeySerialization, ex.ToString()),
                          new DeliveryResult <TKey, TValue>
                {
                    Message = message,
                    TopicPartitionOffset = new TopicPartitionOffset(topicPartition, Offset.Unset),
                }
                          );
            }

            byte[] valBytes;
            try
            {
                valBytes = (valueSerializer != null)
                    ? valueSerializer.Serialize(message.Value, new SerializationContext(MessageComponentType.Value, topicPartition.Topic, headers))
                    : throw new InvalidOperationException("Produce called with an IAsyncSerializer value serializer configured but an ISerializer is required.");
            }
            catch (Exception ex)
            {
                throw new ProduceException <TKey, TValue>(
                          new Error(ErrorCode.Local_ValueSerialization, ex.ToString()),
                          new DeliveryResult <TKey, TValue>
                {
                    Message = message,
                    TopicPartitionOffset = new TopicPartitionOffset(topicPartition, Offset.Unset),
                }
                          );
            }

            try
            {
                ProduceImpl(
                    topicPartition.Topic,
                    valBytes, 0, valBytes == null ? 0 : valBytes.Length,
                    keyBytes, 0, keyBytes == null ? 0 : keyBytes.Length,
                    message.Timestamp, topicPartition.Partition,
                    headers,
                    new TypedDeliveryHandlerShim_Action(
                        topicPartition.Topic,
                        enableDeliveryReportKey ? message.Key : default(TKey),
                        enableDeliveryReportValue ? message.Value : default(TValue),
                        deliveryHandler));
            }
            catch (KafkaException ex)
            {
                throw new ProduceException <TKey, TValue>(
                          ex.Error,
                          new DeliveryReport <TKey, TValue>
                {
                    Message = message,
                    TopicPartitionOffset = new TopicPartitionOffset(topicPartition, Offset.Unset)
                });
            }
        }
Beispiel #8
0
        /// <inheritdoc/>
        public async Task <DeliveryResult <TKey, TValue> > ProduceAsync(
            TopicPartition topicPartition,
            Message <TKey, TValue> message,
            CancellationToken cancellationToken)
        {
            Headers headers = message.Headers ?? new Headers();

            byte[] keyBytes;
            try
            {
                keyBytes = (keySerializer != null)
                    ? keySerializer.Serialize(message.Key, new SerializationContext(MessageComponentType.Key, topicPartition.Topic, headers))
                    : await asyncKeySerializer.SerializeAsync(message.Key, new SerializationContext(MessageComponentType.Key, topicPartition.Topic, headers)).ConfigureAwait(false);
            }
            catch (Exception ex)
            {
                throw new ProduceException <TKey, TValue>(
                          new Error(ErrorCode.Local_KeySerialization),
                          new DeliveryResult <TKey, TValue>
                {
                    Message = message,
                    TopicPartitionOffset = new TopicPartitionOffset(topicPartition, Offset.Unset)
                },
                          ex);
            }

            byte[] valBytes;
            try
            {
                valBytes = (valueSerializer != null)
                    ? valueSerializer.Serialize(message.Value, new SerializationContext(MessageComponentType.Value, topicPartition.Topic, headers))
                    : await asyncValueSerializer.SerializeAsync(message.Value, new SerializationContext(MessageComponentType.Value, topicPartition.Topic, headers)).ConfigureAwait(false);
            }
            catch (Exception ex)
            {
                throw new ProduceException <TKey, TValue>(
                          new Error(ErrorCode.Local_ValueSerialization),
                          new DeliveryResult <TKey, TValue>
                {
                    Message = message,
                    TopicPartitionOffset = new TopicPartitionOffset(topicPartition, Offset.Unset)
                },
                          ex);
            }

            try
            {
                if (enableDeliveryReports)
                {
                    var handler = new TypedTaskDeliveryHandlerShim(
                        topicPartition.Topic,
                        enableDeliveryReportKey ? message.Key : default(TKey),
                        enableDeliveryReportValue ? message.Value : default(TValue));

                    if (cancellationToken != null && cancellationToken.CanBeCanceled)
                    {
                        handler.CancellationTokenRegistration
                            = cancellationToken.Register(() => handler.TrySetCanceled());
                    }

                    ProduceImpl(
                        topicPartition.Topic,
                        valBytes, 0, valBytes == null ? 0 : valBytes.Length,
                        keyBytes, 0, keyBytes == null ? 0 : keyBytes.Length,
                        message.Timestamp, topicPartition.Partition, headers,
                        handler);

                    return(await handler.Task.ConfigureAwait(false));
                }
                else
                {
                    ProduceImpl(
                        topicPartition.Topic,
                        valBytes, 0, valBytes == null ? 0 : valBytes.Length,
                        keyBytes, 0, keyBytes == null ? 0 : keyBytes.Length,
                        message.Timestamp, topicPartition.Partition, headers,
                        null);

                    var result = new DeliveryResult <TKey, TValue>
                    {
                        TopicPartitionOffset = new TopicPartitionOffset(topicPartition, Offset.Unset),
                        Message = message
                    };

                    return(result);
                }
            }
            catch (KafkaException ex)
            {
                throw new ProduceException <TKey, TValue>(
                          ex.Error,
                          new DeliveryResult <TKey, TValue>
                {
                    Message = message,
                    TopicPartitionOffset = new TopicPartitionOffset(topicPartition, Offset.Unset)
                });
            }
        }