示例#1
0
        // TODO: deduplicate, merge with other one
        internal Dictionary <string, string> Dump()
        {
            UIntPtr cntp = (UIntPtr)0;
            IntPtr  data = LibRdKafka.topic_conf_dump(handle, out cntp);

            if (data == IntPtr.Zero)
            {
                throw new Exception("Zero data");
            }

            try
            {
                if (((int)cntp & 1) != 0)
                {
                    // Expect Key -> Value, so even number of strings
                    throw new Exception("Invalid number of config entries");
                }

                var dict = new Dictionary <string, string>();
                for (int i = 0; i < (int)cntp / 2; i++)
                {
                    dict.Add(Marshal.PtrToStringAnsi(Marshal.ReadIntPtr(data, 2 * i * Marshal.SizeOf <IntPtr>())),
                             Marshal.PtrToStringAnsi(Marshal.ReadIntPtr(data, (2 * i + 1) * Marshal.SizeOf <IntPtr>())));
                }
                // Filter out callback pointers
                return(dict.Where(kv => !kv.Key.EndsWith("_cb")).ToDictionary(kv => kv.Key, kv => kv.Value));
            }
            finally
            {
                LibRdKafka.conf_dump_free(data, cntp);
            }
        }
        internal void Assign(ICollection <TopicPartitionOffset> partitions)
        {
            IntPtr list = IntPtr.Zero;

            if (partitions != null)
            {
                list = LibRdKafka.topic_partition_list_new((IntPtr)partitions.Count);
                if (list == IntPtr.Zero)
                {
                    throw new Exception("Failed to create topic partition list");
                }
                foreach (var partition in partitions)
                {
                    IntPtr ptr = LibRdKafka.topic_partition_list_add(list, partition.Topic, partition.Partition);
                    Marshal.WriteInt64(ptr,
                                       (int)Marshal.OffsetOf <rd_kafka_topic_partition>("offset"),
                                       partition.Offset);
                }
            }

            ErrorCode err = LibRdKafka.assign(handle, list);

            if (list != IntPtr.Zero)
            {
                LibRdKafka.topic_partition_list_destroy(list);
            }
            if (err != ErrorCode.NO_ERROR)
            {
                throw RdKafkaException.FromErr(err, "Failed to assign partitions");
            }
        }
示例#3
0
 protected override bool ReleaseHandle()
 {
     LibRdKafka.topic_destroy(handle);
     // See SafeKafkaHandle.Topic
     kafkaHandle.DangerousRelease();
     return(true);
 }
示例#4
0
 internal long Produce(byte[] payload, int payloadCount, byte[] key, int keyCount, int partition, IntPtr opaque, bool blockIfQueueFull)
 => (long)LibRdKafka.produce(
     handle,
     partition,
     (IntPtr)(MsgFlags.MSG_F_COPY | (blockIfQueueFull ? MsgFlags.MSG_F_BLOCK : 0)),
     payload, (UIntPtr)payloadCount,
     key, (UIntPtr)keyCount,
     opaque);
示例#5
0
 internal long Produce(byte[] payload, byte[] key, int partition, IntPtr opaque)
 => (long)LibRdKafka.produce(
     handle,
     partition,
     (IntPtr)MsgFlags.MSG_F_COPY,
     payload, (UIntPtr)(payload?.Length ?? 0),
     key, (UIntPtr)(key?.Length ?? 0),
     opaque);
        internal void ConsumerClose()
        {
            ErrorCode err = LibRdKafka.consumer_close(handle);

            if (err != ErrorCode.NO_ERROR)
            {
                throw RdKafkaException.FromErr(err, "Failed to close consumer");
            }
        }
        internal void Commit()
        {
            ErrorCode err = LibRdKafka.commit(handle, IntPtr.Zero, false);

            if (err != ErrorCode.NO_ERROR)
            {
                throw RdKafkaException.FromErr(err, "Failed to commit offsets");
            }
        }
        internal void Unsubscribe()
        {
            ErrorCode err = LibRdKafka.unsubscribe(handle);

            if (err != ErrorCode.NO_ERROR)
            {
                throw RdKafkaException.FromErr(err, "Failed to unsubscribe");
            }
        }
示例#9
0
        internal static SafeTopicConfigHandle Create()
        {
            var ch = LibRdKafka.topic_conf_new();

            if (ch.IsInvalid)
            {
                throw new Exception("Failed to create TopicConfig");
            }
            return(ch);
        }
        internal static SafeKafkaHandle Create(RdKafkaType type, IntPtr config)
        {
            var errorStringBuilder = new StringBuilder(512);
            var skh = LibRdKafka.kafka_new(type, config, errorStringBuilder,
                                           (UIntPtr)errorStringBuilder.Capacity);

            if (skh.IsInvalid)
            {
                throw new InvalidOperationException(errorStringBuilder.ToString());
            }
            return(skh);
        }
        internal List <string> GetSubscription()
        {
            IntPtr    listPtr = IntPtr.Zero;
            ErrorCode err     = LibRdKafka.subscription(handle, out listPtr);

            if (err != ErrorCode.NO_ERROR)
            {
                throw RdKafkaException.FromErr(err, "Failed to get subscription");
            }
            // TODO: need to free anything here?
            return(GetTopicList(listPtr));
        }
        internal List <TopicPartition> GetAssignment()
        {
            IntPtr    listPtr = IntPtr.Zero;
            ErrorCode err     = LibRdKafka.assignment(handle, out listPtr);

            if (err != ErrorCode.NO_ERROR)
            {
                throw RdKafkaException.FromErr(err, "Failed to get assignment");
            }
            // TODO: need to free anything here?
            return(GetTopicPartitionList(listPtr));
        }
        internal string MemberId()
        {
            IntPtr strPtr = LibRdKafka.memberid(handle);

            if (strPtr == null)
            {
                return(null);
            }

            string memberId = Marshal.PtrToStringAnsi(strPtr);

            LibRdKafka.mem_free(handle, strPtr);
            return(memberId);
        }
        internal List <GroupInfo> ListGroups(string group, IntPtr timeoutMs)
        {
            IntPtr    grplistPtr;
            ErrorCode err = LibRdKafka.list_groups(handle, group, out grplistPtr, timeoutMs);

            if (err == ErrorCode.NO_ERROR)
            {
                var list   = Marshal.PtrToStructure <rd_kafka_group_list>(grplistPtr);
                var groups = Enumerable.Range(0, list.group_cnt)
                             .Select(i => Marshal.PtrToStructure <rd_kafka_group_info>(
                                         list.groups + i * Marshal.SizeOf <rd_kafka_group_info>()))
                             .Select(gi => new GroupInfo()
                {
                    Broker = new BrokerMetadata()
                    {
                        BrokerId = gi.broker.id,
                        Host     = gi.broker.host,
                        Port     = gi.broker.port
                    },
                    Group        = gi.group,
                    Error        = gi.err,
                    State        = gi.state,
                    ProtocolType = gi.protocol_type,
                    Protocol     = gi.protocol,
                    Members      = Enumerable.Range(0, gi.member_cnt)
                                   .Select(j => Marshal.PtrToStructure <rd_kafka_group_member_info>(
                                               gi.members + j * Marshal.SizeOf <rd_kafka_group_member_info>()))
                                   .Select(mi => new GroupMemberInfo()
                    {
                        MemberId       = mi.member_id,
                        ClientId       = mi.client_id,
                        ClientHost     = mi.client_host,
                        MemberMetadata = CopyBytes(mi.member_metadata,
                                                   mi.member_metadata_size),
                        MemberAssignment = CopyBytes(mi.member_assignment,
                                                     mi.member_assignment_size)
                    })
                                   .ToList()
                })
                             .ToList();
                LibRdKafka.group_list_destroy(grplistPtr);
                return(groups);
            }
            else
            {
                throw RdKafkaException.FromErr(err, "Failed to fetch group list");
            }
        }
        internal Offsets GetWatermarkOffsets(string topic, int partition)
        {
            long low;
            long high;

            ErrorCode err = LibRdKafka.get_watermark_offsets(handle, topic, partition, out low, out high);

            if (err != ErrorCode.NO_ERROR)
            {
                throw RdKafkaException.FromErr(err, "Failed to get watermark offsets");
            }

            return(new Offsets {
                Low = low, High = high
            });
        }
        internal Offsets QueryWatermarkOffsets(string topic, int partition, TimeSpan timeout)
        {
            long low;
            long high;

            ErrorCode err = LibRdKafka.query_watermark_offsets(handle, topic, partition, out low, out high,
                                                               timeout == default(TimeSpan) ?  new IntPtr(-1) : (IntPtr)timeout.TotalMilliseconds);

            if (err != ErrorCode.NO_ERROR)
            {
                throw RdKafkaException.FromErr(err, "Failed to query watermark offsets");
            }

            return(new Offsets {
                Low = low, High = high
            });
        }
        internal MessageAndError?ConsumerPoll(IntPtr timeoutMs)
        {
            IntPtr msgPtr = LibRdKafka.consumer_poll(handle, timeoutMs);

            if (msgPtr == IntPtr.Zero)
            {
                return(null);
            }
            var msg = Marshal.PtrToStructure <rd_kafka_message>(msgPtr);

            byte[] payload = null;
            byte[] key     = null;
            if (msg.payload != IntPtr.Zero)
            {
                payload = new byte[(int)msg.len];
                Marshal.Copy(msg.payload, payload, 0, (int)msg.len);
            }
            if (msg.key != IntPtr.Zero)
            {
                key = new byte[(int)msg.key_len];
                Marshal.Copy(msg.key, key, 0, (int)msg.key_len);
            }
            string topic = null;

            if (msg.rkt != IntPtr.Zero)
            {
                topic = Marshal.PtrToStringAnsi(LibRdKafka.topic_name(msg.rkt));
            }
            LibRdKafka.message_destroy(msgPtr);

            var message = new Message()
            {
                Topic     = topic,
                Partition = msg.partition,
                Offset    = msg.offset,
                Payload   = payload,
                Key       = key
            };

            return(new MessageAndError()
            {
                Message = message,
                Error = msg.err
            });
        }
        // Consumer API
        internal void Subscribe(ICollection <string> topics)
        {
            IntPtr list = LibRdKafka.topic_partition_list_new((IntPtr)topics.Count);

            if (list == IntPtr.Zero)
            {
                throw new Exception("Failed to create topic partition list");
            }
            foreach (string topic in topics)
            {
                LibRdKafka.topic_partition_list_add(list, topic, RD_KAFKA_PARTITION_UA);
            }

            ErrorCode err = LibRdKafka.subscribe(handle, list);

            LibRdKafka.topic_partition_list_destroy(list);
            if (err != ErrorCode.NO_ERROR)
            {
                throw RdKafkaException.FromErr(err, "Failed to subscribe to topics");
            }
        }
示例#19
0
        internal List <TopicPartitionOffset> Committed(List <TopicPartition> partitions, IntPtr timeout_ms)
        {
            IntPtr list = LibRdKafka.topic_partition_list_new((IntPtr)partitions.Count);

            if (list == IntPtr.Zero)
            {
                throw new Exception("Failed to create committed partition list");
            }
            foreach (var partition in partitions)
            {
                LibRdKafka.topic_partition_list_add(list, partition.Topic, partition.Partition);
            }
            ErrorCode err    = LibRdKafka.committed(handle, list, timeout_ms);
            var       result = GetTopicPartitionOffsetList(list);

            LibRdKafka.topic_partition_list_destroy(list);
            if (err != ErrorCode.NO_ERROR)
            {
                throw RdKafkaException.FromErr(err, "Failed to fetch committed offsets");
            }
            return(result);
        }
示例#20
0
        internal string Get(string name)
        {
            UIntPtr       destSize = (UIntPtr)0;
            StringBuilder sb       = null;

            ConfRes res = LibRdKafka.topic_conf_get(handle, name, null, ref destSize);

            if (res == ConfRes.Ok)
            {
                sb  = new StringBuilder((int)destSize);
                res = LibRdKafka.topic_conf_get(handle, name, sb, ref destSize);
            }
            if (res != ConfRes.Ok)
            {
                if (res == ConfRes.Unknown)
                {
                    throw new InvalidOperationException($"No such configuration property: {name}");
                }
                throw new Exception("Unknown error while getting configuration property");
            }
            return(sb?.ToString());
        }
        internal List <TopicPartitionOffset> Position(ICollection <TopicPartition> partitions)
        {
            IntPtr list = LibRdKafka.topic_partition_list_new((IntPtr)partitions.Count);

            if (list == IntPtr.Zero)
            {
                throw new Exception("Failed to create position list");
            }
            foreach (var partition in partitions)
            {
                LibRdKafka.topic_partition_list_add(list, partition.Topic, partition.Partition);
            }
            ErrorCode err    = LibRdKafka.position(handle, list);
            var       result = GetTopicPartitionOffsetList(list);

            LibRdKafka.topic_partition_list_destroy(list);
            if (err != ErrorCode.NO_ERROR)
            {
                throw RdKafkaException.FromErr(err, "Failed to fetch position");
            }
            return(result);
        }
        internal void Commit(ICollection <TopicPartitionOffset> offsets)
        {
            IntPtr list = LibRdKafka.topic_partition_list_new((IntPtr)offsets.Count);

            if (list == IntPtr.Zero)
            {
                throw new Exception("Failed to create offset commit list");
            }
            foreach (var offset in offsets)
            {
                IntPtr ptr = LibRdKafka.topic_partition_list_add(list, offset.Topic, offset.Partition);
                Marshal.WriteInt64(ptr,
                                   (int)Marshal.OffsetOf <rd_kafka_topic_partition>("offset"),
                                   offset.Offset);
            }
            ErrorCode err = LibRdKafka.commit(handle, list, false);

            LibRdKafka.topic_partition_list_destroy(list);
            if (err != ErrorCode.NO_ERROR)
            {
                throw RdKafkaException.FromErr(err, "Failed to commit offsets");
            }
        }
        internal SafeTopicHandle Topic(string topic, IntPtr config)
        {
            // Increase the refcount to this handle to keep it alive for
            // at least as long as the topic handle.
            // Will be decremented by the topic handle ReleaseHandle.
            bool success = false;

            DangerousAddRef(ref success);
            if (!success)
            {
                LibRdKafka.topic_conf_destroy(config);
                throw new Exception("Failed to create topic (DangerousAddRef failed)");
            }
            var topicHandle = LibRdKafka.topic_new(handle, topic, config);

            if (topicHandle.IsInvalid)
            {
                DangerousRelease();
                throw RdKafkaException.FromErr(LibRdKafka.last_error(), "Failed to create topic");
            }
            topicHandle.kafkaHandle = this;
            return(topicHandle);
        }
示例#24
0
        internal void Set(string name, string value)
        {
            // TODO: Constant instead of 512?
            var     errorStringBuilder = new StringBuilder(512);
            ConfRes res = LibRdKafka.topic_conf_set(handle, name, value,
                                                    errorStringBuilder, (UIntPtr)errorStringBuilder.Capacity);

            if (res == ConfRes.Ok)
            {
                return;
            }
            else if (res == ConfRes.Invalid)
            {
                throw new InvalidOperationException(errorStringBuilder.ToString());
            }
            else if (res == ConfRes.Unknown)
            {
                throw new InvalidOperationException(errorStringBuilder.ToString());
            }
            else
            {
                throw new Exception("Unknown error while setting configuration property");
            }
        }
示例#25
0
 internal IntPtr Dup()
 {
     return(LibRdKafka.topic_conf_dup(handle));
 }
示例#26
0
 protected override bool ReleaseHandle()
 {
     LibRdKafka.topic_conf_destroy(handle);
     return(true);
 }
 internal string GetName() => Marshal.PtrToStringAnsi(LibRdKafka.name(handle));
 internal long Poll(IntPtr timeoutMs) => (long)LibRdKafka.poll(handle, timeoutMs);
 internal long AddBrokers(string brokers) => (long)LibRdKafka.brokers_add(handle, brokers);
 internal long GetOutQueueLength() => (long)LibRdKafka.outq_len(handle);