Ejemplo n.º 1
0
            private void AddPartitionTopicInfo(
                Pool <string, Pool <int, PartitionTopicInfo> > currentTopicRegistry,
                ZKGroupTopicDirs topicDirs,
                int partition,
                string topic,
                string consumerThreadId)
            {
                var partTopicInfoMap = currentTopicRegistry.Get(topic);

                var znode        = topicDirs.ConsumerOffsetDir + "/" + partition;
                var offsetString = ZkUtils.ReadDataMaybeNull(parent.zkClient, znode).Item1;

                // If first time starting a consumer, set the initial offset to -1
                var offset = (offsetString != null) ? long.Parse(offsetString) : PartitionTopicInfo.InvalidOffset;

                var queue          = parent.topicThreadIdAndQueues.Get(Tuple.Create(topic, consumerThreadId));
                var consumedOffset = new AtomicLong(offset);
                var fetchedOffset  = new AtomicLong(offset);
                var partTopicInfo  = new PartitionTopicInfo(
                    topic,
                    partition,
                    queue,
                    consumedOffset,
                    fetchedOffset,
                    new AtomicInteger(parent.Config.FetchMessageMaxBytes),
                    parent.Config.ClientId);

                partTopicInfoMap[partition] = partTopicInfo;
                Logger.DebugFormat("{0} selected new offset {1}", partTopicInfo, offset);
                parent.checkpointedOffsets[new TopicAndPartition(topic, partition)] = offset;
            }
Ejemplo n.º 2
0
        private void CreateFetchThread(PartitionTopicInfo partition, Broker broker)
        {
            if (_fetcherThreads == null)
            {
                return;
            }

            CreateFetchThread(new List <PartitionTopicInfo> {
                partition
            }, broker);
        }
Ejemplo n.º 3
0
        /// <summary>
        /// Commit offset of specified topic/partition.
        /// Only used when customer has strong requirement for reprocess messages as few as possible.
        /// </summary>
        /// <param name="topic"></param>
        /// <param name="partition"></param>
        /// <param name="offset"></param>
        /// <param name="setPosition">Indicates whether to set the fetcher's offset to the value committed. Default = true.</param>
        public void CommitOffset(string topic, int partition, long offset, bool setPosition = true)
        {
            this.EnsuresNotDisposed();
            if (this.GetZkClient() == null)
            {
                return;
            }
            if (this.config.AutoCommit == true)
            {
                throw new ArgumentException(string.Format("When do commit offset with desired partition and offset, must set AutoCommit of ConsumerConfiguration as false!"));
            }
            try
            {
                IDictionary <int, PartitionTopicInfo> topicPartitionInfo = topicRegistry[topic];
                var topicDirs = new ZKGroupTopicDirs(this.config.GroupId, topic);
                PartitionTopicInfo partitionTopicInfo = topicPartitionInfo[partition];
                if (partitionTopicInfo.ConsumeOffsetValid)
                {
                    //Commit offset unconditionally. This would give consumes to decide which offset to read/skip
                    //if (offset > partitionTopicInfo.CommitedOffset)
                    try
                    {
                        ZkUtils.UpdatePersistentPath(GetZkClient(),
                                                     topicDirs.ConsumerOffsetDir + "/" +
                                                     partitionTopicInfo.PartitionId, offset.ToString());
                        partitionTopicInfo.CommitedOffset = offset;
                        if (setPosition)
                        {
                            partitionTopicInfo.ConsumeOffset = offset;
                            partitionTopicInfo.FetchOffset   = offset;
                        }
                    }
                    catch (Exception ex)
                    {
                        Logger.ErrorFormat("error in CommitOffsets UpdatePersistentPath : {0}", ex.FormatException());
                    }
                }
                else
                {
                    Logger.InfoFormat(
                        "Skip committing offset {0} for topic {1} because it is invalid (ZK session is disconnected)",
                        offset, partitionTopicInfo);
                }

                if (Logger.IsDebugEnabled)
                {
                    Logger.DebugFormat("Commited offset {0} for topic {1}", offset, partitionTopicInfo);
                }
            }
            catch (Exception ex)
            {
                Logger.ErrorFormat("exception during CommitOffsets: Topic:{0}  Partition:{1} offset:{2} Exception:{3} ", topic, partition, offset, ex.FormatException());
            }
        }
Ejemplo n.º 4
0
        private Message MakeNext()
        {
            if (current == null || !current.MoveNext())
            {
                Logger.Debug("Getting new FetchedDataChunk...");
                if (consumerTimeoutMs < 0)
                {
                    currentDataChunk = this.channel.Take(cancellationToken);
                }
                else
                {
                    bool done = channel.TryTake(out currentDataChunk, consumerTimeoutMs, cancellationToken);
                    if (!done)
                    {
                        Logger.Debug("Consumer iterator timing out...");
                        state = ConsumerIteratorState.NotReady;
                        throw new ConsumerTimeoutException();
                    }
                }

                if (currentDataChunk.Equals(ZookeeperConsumerConnector.ShutdownCommand))
                {
                    Logger.Debug("Received the shutdown command");
                    channel.Add(currentDataChunk);
                    return(this.AllDone());
                }

                currentTopicInfo = currentDataChunk.TopicInfo;
                Logger.DebugFormat("CurrentTopicInfo: ConsumedOffset({0}), FetchOffset({1})",
                                   currentTopicInfo.GetConsumeOffset(), currentTopicInfo.GetFetchOffset());
                if (currentTopicInfo.GetConsumeOffset() != currentDataChunk.FetchOffset)
                {
                    Logger.ErrorFormat(
                        CultureInfo.CurrentCulture,
                        "consumed offset: {0} doesn't match fetch offset: {1} for {2}; consumer may lose data",
                        currentTopicInfo.GetConsumeOffset(),
                        currentDataChunk.FetchOffset,
                        currentTopicInfo);
                    currentTopicInfo.ResetConsumeOffset(currentDataChunk.FetchOffset);
                }

                current = currentDataChunk.Messages.GetEnumerator();
                current.MoveNext();
            }

            var item = current.Current;

            consumedOffset = item.Offset;
            return(item.Message);
        }
 public FetchedDataChunk(BufferedMessageSet messages, PartitionTopicInfo topicInfo, long fetchOffset)
 {
     this.Messages    = messages;
     this.TopicInfo   = topicInfo;
     this.FetchOffset = fetchOffset;
 }
Ejemplo n.º 6
0
 internal void AddPartitionWithError(PartitionTopicInfo partition)
 {
     _partitionsNeedingLeaders.Enqueue(partition);
 }
        protected override MessageAndMetadata <TKey, TValue> MakeNext()
        {
            FetchedDataChunk currentDataChunk;
            var localCurrent = this.current.Get();

            if (localCurrent == null || !localCurrent.HasNext())
            {
                if (this.consumerTimeoutMs < 0)
                {
                    currentDataChunk = this.channel.Take();
                }
                else
                {
                    if (!this.channel.TryTake(out currentDataChunk, this.consumerTimeoutMs))
                    {
                        // reste stat to make the iterator re-iterable
                        this.ResetState();
                        throw new ConsumerTimeoutException();
                    }
                }

                if (currentDataChunk.Equals(ZookeeperConsumerConnector.ShutdownCommand))
                {
                    Logger.Debug("Received the shutdown command");
                    this.channel.Add(currentDataChunk);
                    return(this.AllDone());
                }
                else
                {
                    this.currentTopicInfo = currentDataChunk.TopicInfo;
                    var cdcFetchOffset   = currentDataChunk.FetchOffset;
                    var ctiConsumeOffset = this.currentTopicInfo.GetFetchOffset();
                    Logger.DebugFormat(
                        "CurrentTopicInfo: ConsumedOffset({0}), FetchOffset({1})",
                        this.currentTopicInfo.GetConsumeOffset(),
                        this.currentTopicInfo.GetFetchOffset());

                    if (ctiConsumeOffset < cdcFetchOffset)
                    {
                        Logger.ErrorFormat(
                            CultureInfo.CurrentCulture,
                            "consumed offset: {0} doesn't match fetch offset: {1} for {2}; consumer may lose Data",
                            ctiConsumeOffset,
                            cdcFetchOffset,
                            this.currentTopicInfo);
                        this.currentTopicInfo.ResetConsumeOffset(currentDataChunk.FetchOffset);
                    }

                    localCurrent = currentDataChunk.Messages.Iterator();
                    this.current.Set(localCurrent);
                }

                // if we just updated the current chunk and it is empty that means the fetch size is too small!
                if (currentDataChunk.Messages.ValidBytes == 0)
                {
                    throw new MessageSizeTooLargeException(
                              string.Format(
                                  "Found a message larger than the maximum fetch size of this consumer on topic "
                                  + "{0} partition {1} at fetch offset {2}. Increase the fetch size, or decrease the maximum message size the broker will allow.",
                                  currentDataChunk.TopicInfo.Topic,
                                  currentDataChunk.TopicInfo.PartitionId,
                                  currentDataChunk.FetchOffset));
                }
            }

            var item = localCurrent.Next();

            // reject the messages that have already been consumed
            while (item.Offset < this.currentTopicInfo.GetConsumeOffset() && localCurrent.HasNext())
            {
                item = localCurrent.Next();
            }

            this.consumedOffset = item.NextOffset;

            item.Message.EnsureValid(); // validate checksum of message to ensure it is valid

            return(new MessageAndMetadata <TKey, TValue>(
                       this.currentTopicInfo.Topic,
                       this.currentTopicInfo.PartitionId,
                       item.Message,
                       item.Offset,
                       this.keyDecoder,
                       this.valueDecoder));
        }
Ejemplo n.º 8
0
 private void AddPartitionTopicInfo(ZKGroupTopicDirs topicDirs, string partitionString, string topic, string consumerThreadId)
 {
     var partition = Partition.ParseFrom(partitionString);
     var partTopicInfoMap = this.topicRegistry[topic];
     var znode = topicDirs.ConsumerOffsetDir + "/" + partition.Name;
     var offsetString = this.zkClient.ReadData<string>(znode, true);
     long offset = string.IsNullOrEmpty(offsetString) ? 0 : long.Parse(offsetString, CultureInfo.InvariantCulture);
     var queue = this.queues[new Tuple<string, string>(topic, consumerThreadId)];
     var partTopicInfo = new PartitionTopicInfo(
         topic,
         partition.BrokerId,
         partition,
         queue,
         offset,
         offset,
         this.config.FetchSize);
     partTopicInfoMap.Add(partition, partTopicInfo);
     if (Logger.IsDebugEnabled)
     {
         Logger.DebugFormat(CultureInfo.CurrentCulture, "{0} selected new offset {1}", partTopicInfo, offset);
     }
 }
Ejemplo n.º 9
0
        /// <summary>
        /// Method to be used for starting a new thread
        /// </summary>
        internal void Run()
        {
            foreach (var partitionTopicInfo in partitionTopicInfos)
            {
                Logger.InfoFormat(
                    CultureInfo.CurrentCulture,
                    "{0} start fetching topic: {1} part: {2} offset: {3} from {4}:{5}",
                    this.name,
                    partitionTopicInfo.Topic,
                    partitionTopicInfo.Partition.PartId,
                    partitionTopicInfo.GetFetchOffset(),
                    this.broker.Host,
                    this.broker.Port);
            }

            try
            {
                while (!this.shouldStop)
                {
                    var requestList = new List <FetchRequest>();
                    foreach (var partitionTopicInfo in this.partitionTopicInfos)
                    {
                        var singleRequest = new FetchRequest(partitionTopicInfo.Topic, partitionTopicInfo.Partition.PartId, partitionTopicInfo.GetFetchOffset(), this.config.MaxFetchSize);
                        requestList.Add(singleRequest);
                    }

                    Logger.Debug("Fetch request: " + string.Join(", ", requestList.Select(x => x.ToString())));
                    var request  = new MultiFetchRequest(requestList);
                    var response = this.simpleConsumer.MultiFetch(request);
                    int read     = 0;
                    var items    = this.partitionTopicInfos.Zip(
                        response,
                        (x, y) =>
                        new Tuple <PartitionTopicInfo, BufferedMessageSet>(x, y));
                    foreach (Tuple <PartitionTopicInfo, BufferedMessageSet> item in items)
                    {
                        BufferedMessageSet messages = item.Item2;
                        PartitionTopicInfo info     = item.Item1;
                        try
                        {
                            bool done = false;
                            if (messages.ErrorCode == ErrorMapping.OffsetOutOfRangeCode)
                            {
                                Logger.InfoFormat(CultureInfo.CurrentCulture, "offset {0} out of range", info.GetFetchOffset());
                                //// see if we can fix this error
                                var resetOffset = this.ResetConsumerOffsets(info.Topic, info.Partition);
                                if (resetOffset >= 0)
                                {
                                    info.ResetFetchOffset(resetOffset);
                                    info.ResetConsumeOffset(resetOffset);
                                    done = true;
                                }
                            }

                            if (!done)
                            {
                                read += info.Add(messages, info.GetFetchOffset());
                            }
                        }
                        catch (Exception ex)
                        {
                            if (!shouldStop)
                            {
                                Logger.ErrorFormat(CultureInfo.CurrentCulture, "error in FetcherRunnable for {0}" + info, ex);
                            }

                            throw;
                        }
                    }

                    Logger.Info("Fetched bytes: " + read);
                    if (read == 0)
                    {
                        Logger.DebugFormat(CultureInfo.CurrentCulture, "backing off {0} ms", this.config.BackOffIncrement);
                        Thread.Sleep(this.config.BackOffIncrement);
                    }
                }
            }
            catch (Exception ex)
            {
                if (shouldStop)
                {
                    Logger.InfoFormat(CultureInfo.CurrentCulture, "FetcherRunnable {0} interrupted", this);
                }
                else
                {
                    Logger.ErrorFormat(CultureInfo.CurrentCulture, "error in FetcherRunnable {0}", ex);
                }
            }

            Logger.InfoFormat(CultureInfo.CurrentCulture, "stopping fetcher {0} to host {1}", this.name, this.broker.Host);
        }