internal void Assign(ICollection <TopicPartitionOffset> partitions) { IntPtr list = IntPtr.Zero; if (partitions != null) { list = LibRdKafka.topic_partition_list_new((IntPtr)partitions.Count); if (list == IntPtr.Zero) { throw new Exception("Failed to create topic partition list"); } foreach (var partition in partitions) { IntPtr ptr = LibRdKafka.topic_partition_list_add(list, partition.Topic, partition.Partition); Marshal.WriteInt64(ptr, (int)Marshal.OffsetOf <rd_kafka_topic_partition>("offset"), partition.Offset); } } ErrorCode err = LibRdKafka.assign(handle, list); if (list != IntPtr.Zero) { LibRdKafka.topic_partition_list_destroy(list); } if (err != ErrorCode.NO_ERROR) { throw RdKafkaException.FromErr(err, "Failed to assign partitions"); } }
// Consumer API internal void Subscribe(ICollection <string> topics) { IntPtr list = LibRdKafka.topic_partition_list_new((IntPtr)topics.Count); if (list == IntPtr.Zero) { throw new Exception("Failed to create topic partition list"); } foreach (string topic in topics) { LibRdKafka.topic_partition_list_add(list, topic, RD_KAFKA_PARTITION_UA); } ErrorCode err = LibRdKafka.subscribe(handle, list); LibRdKafka.topic_partition_list_destroy(list); if (err != ErrorCode.NO_ERROR) { throw RdKafkaException.FromErr(err, "Failed to subscribe to topics"); } }
internal List <TopicPartitionOffset> Position(ICollection <TopicPartition> partitions) { IntPtr list = LibRdKafka.topic_partition_list_new((IntPtr)partitions.Count); if (list == IntPtr.Zero) { throw new Exception("Failed to create position list"); } foreach (var partition in partitions) { LibRdKafka.topic_partition_list_add(list, partition.Topic, partition.Partition); } ErrorCode err = LibRdKafka.position(handle, list); var result = GetTopicPartitionOffsetList(list); LibRdKafka.topic_partition_list_destroy(list); if (err != ErrorCode.NO_ERROR) { throw RdKafkaException.FromErr(err, "Failed to fetch position"); } return(result); }
internal List <TopicPartitionOffset> Committed(List <TopicPartition> partitions, IntPtr timeout_ms) { IntPtr list = LibRdKafka.topic_partition_list_new((IntPtr)partitions.Count); if (list == IntPtr.Zero) { throw new Exception("Failed to create committed partition list"); } foreach (var partition in partitions) { LibRdKafka.topic_partition_list_add(list, partition.Topic, partition.Partition); } ErrorCode err = LibRdKafka.committed(handle, list, timeout_ms); var result = GetTopicPartitionOffsetList(list); LibRdKafka.topic_partition_list_destroy(list); if (err != ErrorCode.NO_ERROR) { throw RdKafkaException.FromErr(err, "Failed to fetch committed offsets"); } return(result); }
internal void Commit(ICollection <TopicPartitionOffset> offsets) { IntPtr list = LibRdKafka.topic_partition_list_new((IntPtr)offsets.Count); if (list == IntPtr.Zero) { throw new Exception("Failed to create offset commit list"); } foreach (var offset in offsets) { IntPtr ptr = LibRdKafka.topic_partition_list_add(list, offset.Topic, offset.Partition); Marshal.WriteInt64(ptr, (int)Marshal.OffsetOf <rd_kafka_topic_partition>("offset"), offset.Offset); } ErrorCode err = LibRdKafka.commit(handle, list, false); LibRdKafka.topic_partition_list_destroy(list); if (err != ErrorCode.NO_ERROR) { throw RdKafkaException.FromErr(err, "Failed to commit offsets"); } }