예제 #1
0
        internal Topic(SafeKafkaHandle kafkaHandle, Producer producer, string topic, TopicConfig config)
        {
            this.producer = producer;

            config = config ?? new TopicConfig();
            config["produce.offset.report"] = "true";
            IntPtr configPtr = config.handle.Dup();

            if (config.CustomPartitioner != null)
            {
                PartitionerDelegate = (IntPtr rkt, IntPtr keydata, UIntPtr keylen, int partition_cnt,
                        IntPtr rkt_opaque, IntPtr msg_opaque) =>
                {
                    byte[] key = null;
                    if (keydata != IntPtr.Zero)
                    {
                        key = new byte[(int) keylen];
                        Marshal.Copy(keydata, key, 0, (int) keylen);
                    }
                    return config.CustomPartitioner(this, key, partition_cnt);
                };
                LibRdKafka.topic_conf_set_partitioner_cb(configPtr, PartitionerDelegate);
            }

            handle = kafkaHandle.Topic(topic, configPtr);
        }
예제 #2
0
        /*
         *  allTopics  - if true: request info about all topics in cluster,
         *               else: only request info about locally known topics.
         *  onlyTopic  - only request info about this topic
         *  timeout    - maximum response time before failing.
         */
        internal Metadata Metadata(bool allTopics,
                                   SafeTopicHandle onlyTopic,
                                   bool includeInternal,
                                   TimeSpan timeout)
        {
            if (timeout == default(TimeSpan))
            {
                timeout = TimeSpan.FromSeconds(10);
            }

            IntPtr    metaPtr;
            ErrorCode err = LibRdKafka.metadata(
                handle, allTopics,
                onlyTopic?.DangerousGetHandle() ?? IntPtr.Zero,
                /* const struct rd_kafka_metadata ** */ out metaPtr,
                (IntPtr)timeout.TotalMilliseconds);

            if (err == ErrorCode.NO_ERROR)
            {
                try {
                    var meta = (rd_kafka_metadata)Marshal.PtrToStructure <rd_kafka_metadata>(metaPtr);

                    var brokers = Enumerable.Range(0, meta.broker_cnt)
                                  .Select(i => Marshal.PtrToStructure <rd_kafka_metadata_broker>(
                                              meta.brokers + i * Marshal.SizeOf <rd_kafka_metadata_broker>()))
                                  .Select(b => new BrokerMetadata()
                    {
                        BrokerId = b.id, Host = b.host, Port = b.port
                    })
                                  .ToList();

                    // TODO: filter our topics starting with __, as those are internal. Maybe add a flag to not ignore them.
                    var topics = Enumerable.Range(0, meta.topic_cnt)
                                 .Select(i => Marshal.PtrToStructure <rd_kafka_metadata_topic>(
                                             meta.topics + i * Marshal.SizeOf <rd_kafka_metadata_topic>()))
                                 .Where(t => includeInternal || !t.topic.StartsWith("__"))
                                 .Select(t => new TopicMetadata()
                    {
                        Topic      = t.topic,
                        Error      = t.err,
                        Partitions =
                            Enumerable.Range(0, t.partition_cnt)
                            .Select(j => Marshal.PtrToStructure <rd_kafka_metadata_partition>(
                                        t.partitions + j * Marshal.SizeOf <rd_kafka_metadata_partition>()))
                            .Select(p => new PartitionMetadata()
                        {
                            PartitionId    = p.id,
                            Error          = p.err,
                            Leader         = p.leader,
                            Replicas       = MarshalCopy(p.replicas, p.replica_cnt),
                            InSyncReplicas = MarshalCopy(p.isrs, p.isr_cnt)
                        })
                            .ToList()
                    })
                                 .ToList();

                    return(new Metadata()
                    {
                        Brokers = brokers,
                        Topics = topics,
                        OriginatingBrokerId = meta.orig_broker_id,
                        OriginatingBrokerName = meta.orig_broker_name
                    });
                }
                finally
                {
                    LibRdKafka.metadata_destroy(metaPtr);
                }
            }
            else
            {
                throw RdKafkaException.FromErr(err, "Could not retrieve metadata");
            }
        }
예제 #3
0
        /*
         *  allTopics  - if true: request info about all topics in cluster,
         *               else: only request info about locally known topics.
         *  onlyTopic  - only request info about this topic
         *  timeout    - maximum response time before failing.
         */
        internal Metadata Metadata(bool allTopics,
                SafeTopicHandle onlyTopic,
                bool includeInternal,
                TimeSpan timeout)
        {
            if (timeout == default(TimeSpan))
            {
                timeout = TimeSpan.FromSeconds(10);
            }

            IntPtr metaPtr;
            ErrorCode err = LibRdKafka.metadata(
                handle, allTopics,
                onlyTopic?.DangerousGetHandle() ?? IntPtr.Zero,
                /* const struct rd_kafka_metadata ** */ out metaPtr,
                (IntPtr) timeout.TotalMilliseconds);

            if (err == ErrorCode.NO_ERROR)
            {
                try {
                    var meta = (rd_kafka_metadata) Marshal.PtrToStructure<rd_kafka_metadata>(metaPtr);

                    var brokers = Enumerable.Range(0, meta.broker_cnt)
                        .Select(i => Marshal.PtrToStructure<rd_kafka_metadata_broker>(
                                    meta.brokers + i * Marshal.SizeOf<rd_kafka_metadata_broker>()))
                        .Select(b => new BrokerMetadata() { BrokerId = b.id, Host = b.host, Port = b.port })
                        .ToList();

                    // TODO: filter our topics starting with __, as those are internal. Maybe add a flag to not ignore them.
                    var topics = Enumerable.Range(0, meta.topic_cnt)
                        .Select(i => Marshal.PtrToStructure<rd_kafka_metadata_topic>(
                                    meta.topics + i * Marshal.SizeOf<rd_kafka_metadata_topic>()))
                        .Where(t => includeInternal || !t.topic.StartsWith("__"))
                        .Select(t => new TopicMetadata()
                                {
                                    Topic = t.topic,
                                    Error = t.err,
                                    Partitions =
                                        Enumerable.Range(0, t.partition_cnt)
                                        .Select(j => Marshal.PtrToStructure<rd_kafka_metadata_partition>(
                                                    t.partitions + j * Marshal.SizeOf<rd_kafka_metadata_partition>()))
                                        .Select(p => new PartitionMetadata()
                                                {
                                                    PartitionId = p.id,
                                                    Error = p.err,
                                                    Leader = p.leader,
                                                    Replicas = MarshalCopy(p.replicas, p.replica_cnt),
                                                    InSyncReplicas = MarshalCopy(p.isrs, p.isr_cnt)
                                                })
                                        .ToList()
                                })
                        .ToList();

                    return new Metadata()
                    {
                        Brokers = brokers,
                        Topics = topics,
                        OriginatingBrokerId = meta.orig_broker_id,
                        OriginatingBrokerName = meta.orig_broker_name
                    };
                }
                finally
                {
                    LibRdKafka.metadata_destroy(metaPtr);
                }
            }
            else
            {
                throw RdKafkaException.FromErr(err, "Could not retrieve metadata");
            }
        }