internal void Assign(ICollection <TopicPartitionOffset> partitions)
        {
            IntPtr list = IntPtr.Zero;

            if (partitions != null)
            {
                list = LibRdKafka.topic_partition_list_new((IntPtr)partitions.Count);
                if (list == IntPtr.Zero)
                {
                    throw new Exception("Failed to create topic partition list");
                }
                foreach (var partition in partitions)
                {
                    IntPtr ptr = LibRdKafka.topic_partition_list_add(list, partition.Topic, partition.Partition);
                    Marshal.WriteInt64(ptr,
                                       (int)Marshal.OffsetOf <rd_kafka_topic_partition>("offset"),
                                       partition.Offset);
                }
            }

            ErrorCode err = LibRdKafka.assign(handle, list);

            if (list != IntPtr.Zero)
            {
                LibRdKafka.topic_partition_list_destroy(list);
            }
            if (err != ErrorCode.NO_ERROR)
            {
                throw RdKafkaException.FromErr(err, "Failed to assign partitions");
            }
        }
        internal void Commit()
        {
            ErrorCode err = LibRdKafka.commit(handle, IntPtr.Zero, false);

            if (err != ErrorCode.NO_ERROR)
            {
                throw RdKafkaException.FromErr(err, "Failed to commit offsets");
            }
        }
        internal void Unsubscribe()
        {
            ErrorCode err = LibRdKafka.unsubscribe(handle);

            if (err != ErrorCode.NO_ERROR)
            {
                throw RdKafkaException.FromErr(err, "Failed to unsubscribe");
            }
        }
        internal void ConsumerClose()
        {
            ErrorCode err = LibRdKafka.consumer_close(handle);

            if (err != ErrorCode.NO_ERROR)
            {
                throw RdKafkaException.FromErr(err, "Failed to close consumer");
            }
        }
        internal List <string> GetSubscription()
        {
            IntPtr    listPtr = IntPtr.Zero;
            ErrorCode err     = LibRdKafka.subscription(handle, out listPtr);

            if (err != ErrorCode.NO_ERROR)
            {
                throw RdKafkaException.FromErr(err, "Failed to get subscription");
            }
            // TODO: need to free anything here?
            return(GetTopicList(listPtr));
        }
        internal List <TopicPartition> GetAssignment()
        {
            IntPtr    listPtr = IntPtr.Zero;
            ErrorCode err     = LibRdKafka.assignment(handle, out listPtr);

            if (err != ErrorCode.NO_ERROR)
            {
                throw RdKafkaException.FromErr(err, "Failed to get assignment");
            }
            // TODO: need to free anything here?
            return(GetTopicPartitionList(listPtr));
        }
        internal List <GroupInfo> ListGroups(string group, IntPtr timeoutMs)
        {
            IntPtr    grplistPtr;
            ErrorCode err = LibRdKafka.list_groups(handle, group, out grplistPtr, timeoutMs);

            if (err == ErrorCode.NO_ERROR)
            {
                var list   = Marshal.PtrToStructure <rd_kafka_group_list>(grplistPtr);
                var groups = Enumerable.Range(0, list.group_cnt)
                             .Select(i => Marshal.PtrToStructure <rd_kafka_group_info>(
                                         list.groups + i * Marshal.SizeOf <rd_kafka_group_info>()))
                             .Select(gi => new GroupInfo()
                {
                    Broker = new BrokerMetadata()
                    {
                        BrokerId = gi.broker.id,
                        Host     = gi.broker.host,
                        Port     = gi.broker.port
                    },
                    Group        = gi.group,
                    Error        = gi.err,
                    State        = gi.state,
                    ProtocolType = gi.protocol_type,
                    Protocol     = gi.protocol,
                    Members      = Enumerable.Range(0, gi.member_cnt)
                                   .Select(j => Marshal.PtrToStructure <rd_kafka_group_member_info>(
                                               gi.members + j * Marshal.SizeOf <rd_kafka_group_member_info>()))
                                   .Select(mi => new GroupMemberInfo()
                    {
                        MemberId       = mi.member_id,
                        ClientId       = mi.client_id,
                        ClientHost     = mi.client_host,
                        MemberMetadata = CopyBytes(mi.member_metadata,
                                                   mi.member_metadata_size),
                        MemberAssignment = CopyBytes(mi.member_assignment,
                                                     mi.member_assignment_size)
                    })
                                   .ToList()
                })
                             .ToList();
                LibRdKafka.group_list_destroy(grplistPtr);
                return(groups);
            }
            else
            {
                throw RdKafkaException.FromErr(err, "Failed to fetch group list");
            }
        }
        internal Offsets GetWatermarkOffsets(string topic, int partition)
        {
            long low;
            long high;

            ErrorCode err = LibRdKafka.get_watermark_offsets(handle, topic, partition, out low, out high);

            if (err != ErrorCode.NO_ERROR)
            {
                throw RdKafkaException.FromErr(err, "Failed to get watermark offsets");
            }

            return(new Offsets {
                Low = low, High = high
            });
        }
        internal Offsets QueryWatermarkOffsets(string topic, int partition, TimeSpan timeout)
        {
            long low;
            long high;

            ErrorCode err = LibRdKafka.query_watermark_offsets(handle, topic, partition, out low, out high,
                                                               timeout == default(TimeSpan) ?  new IntPtr(-1) : (IntPtr)timeout.TotalMilliseconds);

            if (err != ErrorCode.NO_ERROR)
            {
                throw RdKafkaException.FromErr(err, "Failed to query watermark offsets");
            }

            return(new Offsets {
                Low = low, High = high
            });
        }
        // Consumer API
        internal void Subscribe(ICollection <string> topics)
        {
            IntPtr list = LibRdKafka.topic_partition_list_new((IntPtr)topics.Count);

            if (list == IntPtr.Zero)
            {
                throw new Exception("Failed to create topic partition list");
            }
            foreach (string topic in topics)
            {
                LibRdKafka.topic_partition_list_add(list, topic, RD_KAFKA_PARTITION_UA);
            }

            ErrorCode err = LibRdKafka.subscribe(handle, list);

            LibRdKafka.topic_partition_list_destroy(list);
            if (err != ErrorCode.NO_ERROR)
            {
                throw RdKafkaException.FromErr(err, "Failed to subscribe to topics");
            }
        }
        internal List <TopicPartitionOffset> Position(ICollection <TopicPartition> partitions)
        {
            IntPtr list = LibRdKafka.topic_partition_list_new((IntPtr)partitions.Count);

            if (list == IntPtr.Zero)
            {
                throw new Exception("Failed to create position list");
            }
            foreach (var partition in partitions)
            {
                LibRdKafka.topic_partition_list_add(list, partition.Topic, partition.Partition);
            }
            ErrorCode err    = LibRdKafka.position(handle, list);
            var       result = GetTopicPartitionOffsetList(list);

            LibRdKafka.topic_partition_list_destroy(list);
            if (err != ErrorCode.NO_ERROR)
            {
                throw RdKafkaException.FromErr(err, "Failed to fetch position");
            }
            return(result);
        }
Exemple #12
0
        internal List <TopicPartitionOffset> Committed(List <TopicPartition> partitions, IntPtr timeout_ms)
        {
            IntPtr list = LibRdKafka.topic_partition_list_new((IntPtr)partitions.Count);

            if (list == IntPtr.Zero)
            {
                throw new Exception("Failed to create committed partition list");
            }
            foreach (var partition in partitions)
            {
                LibRdKafka.topic_partition_list_add(list, partition.Topic, partition.Partition);
            }
            ErrorCode err    = LibRdKafka.committed(handle, list, timeout_ms);
            var       result = GetTopicPartitionOffsetList(list);

            LibRdKafka.topic_partition_list_destroy(list);
            if (err != ErrorCode.NO_ERROR)
            {
                throw RdKafkaException.FromErr(err, "Failed to fetch committed offsets");
            }
            return(result);
        }
        internal SafeTopicHandle Topic(string topic, IntPtr config)
        {
            // Increase the refcount to this handle to keep it alive for
            // at least as long as the topic handle.
            // Will be decremented by the topic handle ReleaseHandle.
            bool success = false;

            DangerousAddRef(ref success);
            if (!success)
            {
                LibRdKafka.topic_conf_destroy(config);
                throw new Exception("Failed to create topic (DangerousAddRef failed)");
            }
            var topicHandle = LibRdKafka.topic_new(handle, topic, config);

            if (topicHandle.IsInvalid)
            {
                DangerousRelease();
                throw RdKafkaException.FromErr(LibRdKafka.last_error(), "Failed to create topic");
            }
            topicHandle.kafkaHandle = this;
            return(topicHandle);
        }
        internal void Commit(ICollection <TopicPartitionOffset> offsets)
        {
            IntPtr list = LibRdKafka.topic_partition_list_new((IntPtr)offsets.Count);

            if (list == IntPtr.Zero)
            {
                throw new Exception("Failed to create offset commit list");
            }
            foreach (var offset in offsets)
            {
                IntPtr ptr = LibRdKafka.topic_partition_list_add(list, offset.Topic, offset.Partition);
                Marshal.WriteInt64(ptr,
                                   (int)Marshal.OffsetOf <rd_kafka_topic_partition>("offset"),
                                   offset.Offset);
            }
            ErrorCode err = LibRdKafka.commit(handle, list, false);

            LibRdKafka.topic_partition_list_destroy(list);
            if (err != ErrorCode.NO_ERROR)
            {
                throw RdKafkaException.FromErr(err, "Failed to commit offsets");
            }
        }
        /*
         *  allTopics  - if true: request info about all topics in cluster,
         *               else: only request info about locally known topics.
         *  onlyTopic  - only request info about this topic
         *  timeout    - maximum response time before failing.
         */
        internal Metadata Metadata(bool allTopics,
                                   SafeTopicHandle onlyTopic,
                                   bool includeInternal,
                                   TimeSpan timeout)
        {
            if (timeout == default(TimeSpan))
            {
                timeout = TimeSpan.FromSeconds(10);
            }

            IntPtr    metaPtr;
            ErrorCode err = LibRdKafka.metadata(
                handle, allTopics,
                onlyTopic?.DangerousGetHandle() ?? IntPtr.Zero,
                /* const struct rd_kafka_metadata ** */ out metaPtr,
                (IntPtr)timeout.TotalMilliseconds);

            if (err == ErrorCode.NO_ERROR)
            {
                try {
                    var meta = (rd_kafka_metadata)Marshal.PtrToStructure <rd_kafka_metadata>(metaPtr);

                    var brokers = Enumerable.Range(0, meta.broker_cnt)
                                  .Select(i => Marshal.PtrToStructure <rd_kafka_metadata_broker>(
                                              meta.brokers + i * Marshal.SizeOf <rd_kafka_metadata_broker>()))
                                  .Select(b => new BrokerMetadata()
                    {
                        BrokerId = b.id, Host = b.host, Port = b.port
                    })
                                  .ToList();

                    // TODO: filter our topics starting with __, as those are internal. Maybe add a flag to not ignore them.
                    var topics = Enumerable.Range(0, meta.topic_cnt)
                                 .Select(i => Marshal.PtrToStructure <rd_kafka_metadata_topic>(
                                             meta.topics + i * Marshal.SizeOf <rd_kafka_metadata_topic>()))
                                 .Where(t => includeInternal || !t.topic.StartsWith("__"))
                                 .Select(t => new TopicMetadata()
                    {
                        Topic      = t.topic,
                        Error      = t.err,
                        Partitions =
                            Enumerable.Range(0, t.partition_cnt)
                            .Select(j => Marshal.PtrToStructure <rd_kafka_metadata_partition>(
                                        t.partitions + j * Marshal.SizeOf <rd_kafka_metadata_partition>()))
                            .Select(p => new PartitionMetadata()
                        {
                            PartitionId    = p.id,
                            Error          = p.err,
                            Leader         = p.leader,
                            Replicas       = MarshalCopy(p.replicas, p.replica_cnt),
                            InSyncReplicas = MarshalCopy(p.isrs, p.isr_cnt)
                        })
                            .ToList()
                    })
                                 .ToList();

                    return(new Metadata()
                    {
                        Brokers = brokers,
                        Topics = topics,
                        OriginatingBrokerId = meta.orig_broker_id,
                        OriginatingBrokerName = meta.orig_broker_name
                    });
                }
                finally
                {
                    LibRdKafka.metadata_destroy(metaPtr);
                }
            }
            else
            {
                throw RdKafkaException.FromErr(err, "Could not retrieve metadata");
            }
        }