コード例 #1
0
 protected override bool ReleaseHandle()
 {
     LibRdKafka.topic_destroy(handle);
     // See SafeKafkaHandle.Topic
     kafkaHandle.DangerousRelease();
     return(true);
 }
        internal Dictionary <string, string> Dump()
        {
            ThrowIfHandleClosed();
            UIntPtr cntp = (UIntPtr)0;
            IntPtr  data = LibRdKafka.conf_dump(handle, out cntp);

            if (data == IntPtr.Zero)
            {
                throw new Exception("Zero data");
            }

            try
            {
                if (((int)cntp & 1) != 0)
                {
                    // Expect Key -> Value, so even number of strings
                    throw new Exception("Invalid number of config entries");
                }

                var dict = new Dictionary <string, string>();
                for (int i = 0; i < (int)cntp / 2; i++)
                {
                    dict.Add(Util.Marshal.PtrToStringUTF8(Marshal.ReadIntPtr(data, 2 * i * Util.Marshal.SizeOf <IntPtr>())),
                             Util.Marshal.PtrToStringUTF8(Marshal.ReadIntPtr(data, (2 * i + 1) * Util.Marshal.SizeOf <IntPtr>())));
                }
                // Filter out callback pointers
                return(dict.Where(kv => !kv.Key.EndsWith("_cb")).ToDictionary(kv => kv.Key, kv => kv.Value));
            }
            finally
            {
                LibRdKafka.conf_dump_free(data, cntp);
            }
        }
コード例 #3
0
        internal void Assign(ICollection <TopicPartitionOffset> partitions)
        {
            IntPtr list = IntPtr.Zero;

            if (partitions != null)
            {
                list = LibRdKafka.topic_partition_list_new((IntPtr)partitions.Count);
                if (list == IntPtr.Zero)
                {
                    throw new Exception("Failed to create topic partition list");
                }
                foreach (var partition in partitions)
                {
                    IntPtr ptr = LibRdKafka.topic_partition_list_add(list, partition.Topic, partition.Partition);
                    Marshal.WriteInt64(
                        ptr,
                        (int)Marshal.OffsetOf <rd_kafka_topic_partition>("offset"),
                        partition.Offset);
                }
            }

            ErrorCode err = LibRdKafka.assign(handle, list);

            if (list != IntPtr.Zero)
            {
                LibRdKafka.topic_partition_list_destroy(list);
            }
            if (err != ErrorCode.NO_ERROR)
            {
                throw new KafkaException(err, "Failed to assign partitions");
            }
        }
コード例 #4
0
        internal long Produce(byte[] val, int valOffset, int valLength, byte[] key, int keyOffset, int keyLength, int partition, long?timestamp, IntPtr opaque, bool blockIfQueueFull)
        {
            var pValue = IntPtr.Zero;
            var pKey   = IntPtr.Zero;

            var gchValue = default(GCHandle);
            var gchKey   = default(GCHandle);

            if (val != null)
            {
                gchValue = GCHandle.Alloc(val, GCHandleType.Pinned);
                pValue   = Marshal.UnsafeAddrOfPinnedArrayElement(val, valOffset);
            }

            if (key != null)
            {
                gchKey = GCHandle.Alloc(key, GCHandleType.Pinned);
                pKey   = Marshal.UnsafeAddrOfPinnedArrayElement(key, keyOffset);
            }

            try
            {
                // TODO: when refactor complete, reassess the below note.
                // Note: since the message queue threshold limit also includes delivery reports, it is important that another
                // thread of the application calls poll() for a blocking produce() to ever unblock.
                if (timestamp == null)
                {
                    return((long)LibRdKafka.produce(
                               handle,
                               partition,
                               (IntPtr)(MsgFlags.MSG_F_COPY | (blockIfQueueFull ? MsgFlags.MSG_F_BLOCK : 0)),
                               pValue, (UIntPtr)valLength,
                               pKey, (UIntPtr)keyLength,
                               opaque));
                }
                else
                {
                    return((long)LibRdKafka.producev(
                               handle,
                               partition,
                               (IntPtr)(MsgFlags.MSG_F_COPY | (blockIfQueueFull ? MsgFlags.MSG_F_BLOCK : 0)),
                               pValue, (UIntPtr)valLength,
                               pKey, (UIntPtr)keyLength,
                               timestamp.Value,
                               opaque));
                }
            }
            finally
            {
                if (val != null)
                {
                    gchValue.Free();
                }

                if (key != null)
                {
                    gchKey.Free();
                }
            }
        }
        internal void Assign(IEnumerable <TopicPartitionOffset> partitions)
        {
            ThrowIfHandleClosed();
            IntPtr list = IntPtr.Zero;

            if (partitions != null)
            {
                list = LibRdKafka.topic_partition_list_new((IntPtr)partitions.Count());
                if (list == IntPtr.Zero)
                {
                    throw new Exception("Failed to create topic partition list");
                }
                foreach (var partition in partitions)
                {
                    IntPtr ptr = LibRdKafka.topic_partition_list_add(list, partition.Topic, partition.Partition);
                    Marshal.WriteInt64(
                        ptr,
                        (int)Util.Marshal.OffsetOf <rd_kafka_topic_partition>("offset"),
                        partition.Offset);
                }
            }

            ErrorCode err = LibRdKafka.assign(handle, list);

            if (list != IntPtr.Zero)
            {
                LibRdKafka.topic_partition_list_destroy(list);
            }
            if (err != ErrorCode.NoError)
            {
                throw new KafkaException(err);
            }
        }
        /// <summary>
        ///     - allTopics=true - request all topics from cluster
        ///     - allTopics=false, topic=null - request only locally known topics (topic_new():ed topics or otherwise locally referenced once, such as consumed topics)
        ///     - allTopics=false, topic=valid - request specific topic
        /// </summary>
        internal Metadata GetMetadata(bool allTopics, SafeTopicHandle topic, int millisecondsTimeout)
        {
            ThrowIfHandleClosed();
            IntPtr    metaPtr;
            ErrorCode err = LibRdKafka.metadata(
                handle, allTopics,
                topic?.DangerousGetHandle() ?? IntPtr.Zero,
                /* const struct rd_kafka_metadata ** */ out metaPtr,
                (IntPtr)millisecondsTimeout);

            if (err == ErrorCode.NoError)
            {
                try {
                    var meta = Util.Marshal.PtrToStructure <rd_kafka_metadata>(metaPtr);

                    var brokers = Enumerable.Range(0, meta.broker_cnt)
                                  .Select(i => Util.Marshal.PtrToStructure <rd_kafka_metadata_broker>(
                                              meta.brokers + i * Util.Marshal.SizeOf <rd_kafka_metadata_broker>()))
                                  .Select(b => new BrokerMetadata(b.id, b.host, b.port))
                                  .ToList();

                    var topics = Enumerable.Range(0, meta.topic_cnt)
                                 .Select(i => Util.Marshal.PtrToStructure <rd_kafka_metadata_topic>(
                                             meta.topics + i * Util.Marshal.SizeOf <rd_kafka_metadata_topic>()))
                                 .Select(t => new TopicMetadata(
                                             t.topic,
                                             Enumerable.Range(0, t.partition_cnt)
                                             .Select(j => Util.Marshal.PtrToStructure <rd_kafka_metadata_partition>(
                                                         t.partitions + j * Util.Marshal.SizeOf <rd_kafka_metadata_partition>()))
                                             .Select(p => new PartitionMetadata(
                                                         p.id,
                                                         p.leader,
                                                         MarshalCopy(p.replicas, p.replica_cnt),
                                                         MarshalCopy(p.isrs, p.isr_cnt),
                                                         p.err
                                                         ))
                                             .ToList(),
                                             t.err
                                             ))
                                 .ToList();

                    return(new Metadata(
                               brokers,
                               topics,
                               meta.orig_broker_id,
                               meta.orig_broker_name
                               ));
                }
                finally
                {
                    LibRdKafka.metadata_destroy(metaPtr);
                }
            }
            else
            {
                throw new KafkaException(err);
            }
        }
コード例 #7
0
        internal void Unsubscribe()
        {
            ErrorCode err = LibRdKafka.unsubscribe(handle);

            if (err != ErrorCode.NO_ERROR)
            {
                throw new KafkaException(err, "Failed to unsubscribe");
            }
        }
コード例 #8
0
        internal void Unsubscribe()
        {
            ErrorCode err = LibRdKafka.unsubscribe(handle);

            if (err != ErrorCode.NoError)
            {
                throw new KafkaException(err);
            }
        }
コード例 #9
0
        internal void ConsumerClose()
        {
            ErrorCode err = LibRdKafka.consumer_close(handle);

            if (err != ErrorCode.NoError)
            {
                throw new KafkaException(err);
            }
        }
コード例 #10
0
        internal void Commit()
        {
            ErrorCode err = LibRdKafka.commit(handle, IntPtr.Zero, false);

            if (err != ErrorCode.NO_ERROR)
            {
                throw new KafkaException(err, "Failed to commit offsets");
            }
        }
コード例 #11
0
        internal void ConsumerClose()
        {
            ErrorCode err = LibRdKafka.consumer_close(handle);

            if (err != ErrorCode.NO_ERROR)
            {
                throw new KafkaException(err, "Failed to close consumer");
            }
        }
コード例 #12
0
        internal bool ConsumerPoll(out Message message, IntPtr millisecondsTimeout)
        {
            // TODO: There is a newer librdkafka interface for this now. Use that.
            IntPtr msgPtr = LibRdKafka.consumer_poll(handle, millisecondsTimeout);

            if (msgPtr == IntPtr.Zero)
            {
                message = default(Message);
                return(false);
            }

            var msg = Marshal.PtrToStructure <rd_kafka_message>(msgPtr);

            byte[] val = null;
            if (msg.val != IntPtr.Zero)
            {
                val = new byte[(int)msg.len];
                Marshal.Copy(msg.val, val, 0, (int)msg.len);
            }
            byte[] key = null;
            if (msg.key != IntPtr.Zero)
            {
                key = new byte[(int)msg.key_len];
                Marshal.Copy(msg.key, key, 0, (int)msg.key_len);
            }

            string topic = null;

            if (msg.rkt != IntPtr.Zero)
            {
                topic = Util.Marshal.PtrToStringUTF8(LibRdKafka.topic_name(msg.rkt));
            }

            IntPtr timestampType;
            long   timestamp = LibRdKafka.message_timestamp(msgPtr, out timestampType) / 1000;
            var    dateTime  = new DateTime(0);

            if ((TimestampType)timestampType != TimestampType.NotAvailable)
            {
                dateTime = Timestamp.UnixTimestampMsToDateTime(timestamp);
            }

            LibRdKafka.message_destroy(msgPtr);

            message = new Message(
                topic,
                msg.partition,
                msg.offset,
                key,
                val,
                new Timestamp(dateTime, (TimestampType)timestampType),
                msg.err
                );

            return(true);
        }
        internal static SafeConfigHandle Create()
        {
            var ch = LibRdKafka.conf_new();

            if (ch.IsInvalid)
            {
                throw new Exception("Failed to create config");
            }
            return(ch);
        }
        internal void Seek(string topic, int partition, long offset, int millisecondsTimeout)
        {
            ThrowIfHandleClosed();
            SafeTopicHandle rtk    = Topic(topic, IntPtr.Zero);
            var             result = LibRdKafka.seek(rtk.DangerousGetHandle(), partition, offset, (IntPtr)millisecondsTimeout);

            if (result != ErrorCode.NoError)
            {
                throw new KafkaException(result);
            }
        }
        internal WatermarkOffsets GetWatermarkOffsets(string topic, int partition)
        {
            ThrowIfHandleClosed();
            ErrorCode err = LibRdKafka.get_watermark_offsets(handle, topic, partition, out long low, out long high);

            if (err != ErrorCode.NoError)
            {
                throw new KafkaException(err);
            }

            return(new WatermarkOffsets(low, high));
        }
        internal WatermarkOffsets QueryWatermarkOffsets(string topic, int partition, int millisecondsTimeout)
        {
            ThrowIfHandleClosed();
            ErrorCode err = LibRdKafka.query_watermark_offsets(handle, topic, partition, out long low, out long high, (IntPtr)millisecondsTimeout);

            if (err != ErrorCode.NoError)
            {
                throw new KafkaException(err);
            }

            return(new WatermarkOffsets(low, high));
        }
コード例 #17
0
        public static SafeKafkaHandle Create(RdKafkaType type, IntPtr config)
        {
            var errorStringBuilder = new StringBuilder(512);
            var skh = LibRdKafka.kafka_new(type, config, errorStringBuilder,
                                           (UIntPtr)errorStringBuilder.Capacity);

            if (skh.IsInvalid)
            {
                throw new InvalidOperationException(errorStringBuilder.ToString());
            }
            return(skh);
        }
コード例 #18
0
        internal List <string> GetSubscription()
        {
            IntPtr    listPtr = IntPtr.Zero;
            ErrorCode err     = LibRdKafka.subscription(handle, out listPtr);

            if (err != ErrorCode.NO_ERROR)
            {
                throw new KafkaException(err, "Failed to get subscription");
            }
            // TODO: need to free anything here?
            return(GetTopicPartitionOffsetErrorList(listPtr).Select(a => a.Topic).ToList());
        }
コード例 #19
0
        internal WatermarkOffsets GetWatermarkOffsets(string topic, int partition)
        {
            long low;
            long high;

            ErrorCode err = LibRdKafka.get_watermark_offsets(handle, topic, partition, out low, out high);

            if (err != ErrorCode.NO_ERROR)
            {
                throw new KafkaException(err, "Failed to get watermark offsets");
            }

            return(new WatermarkOffsets(low, high));
        }
コード例 #20
0
        internal List <string> GetSubscription()
        {
            IntPtr    listPtr = IntPtr.Zero;
            ErrorCode err     = LibRdKafka.subscription(handle, out listPtr);

            if (err != ErrorCode.NoError)
            {
                throw new KafkaException(err);
            }
            var ret = GetTopicPartitionOffsetErrorList(listPtr).Select(a => a.Topic).ToList();

            LibRdKafka.topic_partition_list_destroy(listPtr);
            return(ret);
        }
コード例 #21
0
        internal WatermarkOffsets QueryWatermarkOffsets(string topic, int partition, int millisecondsTimeout)
        {
            long low;
            long high;

            ErrorCode err = LibRdKafka.query_watermark_offsets(handle, topic, partition, out low, out high, (IntPtr)millisecondsTimeout);

            if (err != ErrorCode.NO_ERROR)
            {
                throw new KafkaException(err, "Failed to query watermark offsets");
            }

            return(new WatermarkOffsets(low, high));
        }
        internal bool ConsumerPoll(out Message message, IntPtr millisecondsTimeout)
        {
            ThrowIfHandleClosed();
            // TODO: There is a newer librdkafka interface for this now. Use that.
            IntPtr msgPtr = LibRdKafka.consumer_poll(handle, millisecondsTimeout);

            if (msgPtr == IntPtr.Zero)
            {
                message = null;
                return(false);
            }

            var msg = Util.Marshal.PtrToStructure <rd_kafka_message>(msgPtr);

            byte[] val = null;
            if (msg.val != IntPtr.Zero)
            {
                val = new byte[(int)msg.len];
                Marshal.Copy(msg.val, val, 0, (int)msg.len);
            }
            byte[] key = null;
            if (msg.key != IntPtr.Zero)
            {
                key = new byte[(int)msg.key_len];
                Marshal.Copy(msg.key, key, 0, (int)msg.key_len);
            }

            string topic = null;

            if (msg.rkt != IntPtr.Zero)
            {
                topic = Util.Marshal.PtrToStringUTF8(LibRdKafka.topic_name(msg.rkt));
            }

            long timestamp = LibRdKafka.message_timestamp(msgPtr, out IntPtr timestampType);

            LibRdKafka.message_destroy(msgPtr);

            message = new Message(
                topic,
                msg.partition,
                msg.offset,
                key,
                val,
                new Timestamp(timestamp, (TimestampType)timestampType),
                msg.err
                );

            return(true);
        }
        /// <summary>
        ///     Store offsets for one or more partitions.
        ///
        ///     The offset will be committed (written) to the offset store according
        ///     to `auto.commit.interval.ms` or manual offset-less commit().
        /// </summary>
        /// <remarks>
        ///     `enable.auto.offset.store` must be set to "false" when using this API.
        /// </remarks>
        /// <param name="offsets">
        ///     List of offsets to be commited.
        /// </param>
        /// <returns>
        ///     For each topic/partition returns current stored offset
        ///     or a partition specific error.
        /// </returns>
        internal List <TopicPartitionOffsetError> StoreOffsets(IEnumerable <TopicPartitionOffset> offsets)
        {
            ThrowIfHandleClosed();
            IntPtr    cOffsets = GetCTopicPartitionList(offsets);
            ErrorCode err      = LibRdKafka.offsets_store(handle, cOffsets);
            var       results  = GetTopicPartitionOffsetErrorList(cOffsets);

            LibRdKafka.topic_partition_list_destroy(cOffsets);

            if (err != ErrorCode.NoError)
            {
                throw new KafkaException(err);
            }

            return(results);
        }
        internal List <TopicPartition> GetAssignment()
        {
            ThrowIfHandleClosed();
            IntPtr    listPtr = IntPtr.Zero;
            ErrorCode err     = LibRdKafka.assignment(handle, out listPtr);

            if (err != ErrorCode.NoError)
            {
                throw new KafkaException(err);
            }

            var ret = GetTopicPartitionOffsetErrorList(listPtr).Select(a => a.TopicPartition).ToList();

            LibRdKafka.topic_partition_list_destroy(listPtr);
            return(ret);
        }
        private List <GroupInfo> ListGroupsImpl(string group, int millisecondsTimeout)
        {
            ThrowIfHandleClosed();
            ErrorCode err = LibRdKafka.list_groups(handle, group, out IntPtr grplistPtr, (IntPtr)millisecondsTimeout);

            if (err == ErrorCode.NoError)
            {
                var list   = Util.Marshal.PtrToStructure <rd_kafka_group_list>(grplistPtr);
                var groups = Enumerable.Range(0, list.group_cnt)
                             .Select(i => Util.Marshal.PtrToStructure <rd_kafka_group_info>(
                                         list.groups + i * Util.Marshal.SizeOf <rd_kafka_group_info>()))
                             .Select(gi => new GroupInfo(
                                         new BrokerMetadata(
                                             gi.broker.id,
                                             gi.broker.host,
                                             gi.broker.port
                                             ),
                                         gi.group,
                                         gi.err,
                                         gi.state,
                                         gi.protocol_type,
                                         gi.protocol,
                                         Enumerable.Range(0, gi.member_cnt)
                                         .Select(j => Util.Marshal.PtrToStructure <rd_kafka_group_member_info>(
                                                     gi.members + j * Util.Marshal.SizeOf <rd_kafka_group_member_info>()))
                                         .Select(mi => new GroupMemberInfo(
                                                     mi.member_id,
                                                     mi.client_id,
                                                     mi.client_host,
                                                     CopyBytes(
                                                         mi.member_metadata,
                                                         mi.member_metadata_size),
                                                     CopyBytes(
                                                         mi.member_assignment,
                                                         mi.member_assignment_size)
                                                     ))
                                         .ToList()
                                         ))
                             .ToList();
                LibRdKafka.group_list_destroy(grplistPtr);
                return(groups);
            }
            else
            {
                throw new KafkaException(err);
            }
        }
コード例 #26
0
        internal void Subscribe(ICollection <string> topics)
        {
            IntPtr list = LibRdKafka.topic_partition_list_new((IntPtr)topics.Count);

            if (list == IntPtr.Zero)
            {
                throw new Exception("Failed to create topic partition list");
            }
            foreach (string topic in topics)
            {
                LibRdKafka.topic_partition_list_add(list, topic, RD_KAFKA_PARTITION_UA);
            }

            ErrorCode err = LibRdKafka.subscribe(handle, list);

            LibRdKafka.topic_partition_list_destroy(list);
            if (err != ErrorCode.NO_ERROR)
            {
                throw new KafkaException(err, "Failed to subscribe to topics");
            }
        }
        /// <summary>
        /// Creates and returns a C rd_kafka_topic_partition_list_t * populated by offsets.
        /// </summary>
        /// <returns>
        /// If offsets is null a null IntPtr will be returned, else a IntPtr
        /// which must destroyed with LibRdKafka.topic_partition_list_destroy()
        /// </returns>
        internal static IntPtr GetCTopicPartitionList(IEnumerable <TopicPartitionOffset> offsets)
        {
            if (offsets == null)
            {
                return(IntPtr.Zero);
            }

            IntPtr list = LibRdKafka.topic_partition_list_new((IntPtr)offsets.Count());

            if (list == IntPtr.Zero)
            {
                throw new OutOfMemoryException("Failed to create topic partition list");
            }

            foreach (var p in offsets)
            {
                IntPtr ptr = LibRdKafka.topic_partition_list_add(list, p.Topic, p.Partition);
                Marshal.WriteInt64(ptr, (int)Util.Marshal.OffsetOf <rd_kafka_topic_partition>("offset"), p.Offset);
            }
            return(list);
        }
        internal IEnumerable <TopicPartitionOffsetError> OffsetsForTimes(IEnumerable <TopicPartitionTimestamp> timestampsToSearch, int millisecondsTimeout)
        {
            var    offsets  = timestampsToSearch.Select(t => new TopicPartitionOffset(t.TopicPartition, t.Timestamp.UnixTimestampMs)).ToList();
            IntPtr cOffsets = GetCTopicPartitionList(offsets);

            try
            {
                // The timestamps to query are represented as Offset property in offsets param on input,
                // and Offset property will contain the offset on output
                var errorCode = LibRdKafka.offsets_for_times(handle, cOffsets, (IntPtr)millisecondsTimeout);
                if (errorCode != ErrorCode.NoError)
                {
                    throw new KafkaException(errorCode);
                }

                return(GetTopicPartitionOffsetErrorList(cOffsets));
            }
            finally
            {
                LibRdKafka.topic_partition_list_destroy(cOffsets);
            }
        }
コード例 #29
0
        /// <summary>
        ///     for each topic/partition returns the current position (last consumed offset + 1)
        ///     or a partition specific error.
        ///
        ///     throws KafkaException if the above information cannot be retrieved.
        /// </summary>
        internal List <TopicPartitionOffsetError> Position(ICollection <TopicPartition> partitions)
        {
            IntPtr list = LibRdKafka.topic_partition_list_new((IntPtr)partitions.Count);

            if (list == IntPtr.Zero)
            {
                throw new Exception("Failed to create position list");
            }
            foreach (var partition in partitions)
            {
                LibRdKafka.topic_partition_list_add(list, partition.Topic, partition.Partition);
            }
            ErrorCode err    = LibRdKafka.position(handle, list);
            var       result = GetTopicPartitionOffsetErrorList(list);

            LibRdKafka.topic_partition_list_destroy(list);
            if (err != ErrorCode.NO_ERROR)
            {
                throw new KafkaException(err, "Failed to fetch position");
            }
            return(result);
        }
        internal void Subscribe(IEnumerable <string> topics)
        {
            ThrowIfHandleClosed();
            IntPtr list = LibRdKafka.topic_partition_list_new((IntPtr)topics.Count());

            if (list == IntPtr.Zero)
            {
                throw new Exception("Failed to create topic partition list");
            }
            foreach (string topic in topics)
            {
                LibRdKafka.topic_partition_list_add(list, topic, RD_KAFKA_PARTITION_UA);
            }

            ErrorCode err = LibRdKafka.subscribe(handle, list);

            LibRdKafka.topic_partition_list_destroy(list);
            if (err != ErrorCode.NoError)
            {
                throw new KafkaException(err);
            }
        }