示例#1
0
        public Consumer(ConsumerOptions options, params OffsetPosition[] positions)
        {
            _options = options;
            _fetchResponseQueue = new BlockingCollection<Message>(_options.ConsumerBufferSize);
            _metadataQueries = new MetadataQueries(_options.Router);

            //TODO this is wrong, we should only query once and then react only to errors or socket exceptions
            //this timer will periodically look for new partitions and automatically add them to the consuming queue
            //using the same whitelist logic
            _topicPartitionQueryTimer = new ScheduledTimer()
                .Do(RefreshTopicPartitions)
                .Every(TimeSpan.FromMilliseconds(_options.TopicPartitionQueryTimeMs))
                .StartingAt(DateTime.Now);

            SetOffsetPosition(positions);
        }
        // Per partition consumer read handler
        private void ProcessPartitionMessages(object state)
        {
            int partition = (int)state;

            try
            {
                Dictionary<uint, MeasurementKey> idTable = new Dictionary<uint, MeasurementKey>();
                ConsumerOptions options = new ConsumerOptions(Topic, m_router);
                LongSynchronizedOperation cacheLastConsumerOffset = null;
                OffsetPosition consumerCursor = new OffsetPosition { PartitionId = partition, Offset = 0 };
                long lastUpdateTime = 0;
                long lastMetadataUpdateCount = 0;
                long lastMeasurementTime = 0;

                options.PartitionWhitelist.Add(partition);
                options.Log = new TimeSeriesLogger((message, parameters) => OnStatusMessage(MessageLevel.Info, string.Format($"P[{partition}]: " + message, parameters)), ex => OnProcessException(MessageLevel.Warning, ex));

                // Handle consumer offset tracking, i.e., adapter will start reading messages where it left off from last run
                if (TrackConsumerOffset)
                {
                    // Parse path/filename.ext into constituent parts
                    string[] fileParts = new string[3];

                    fileParts[0] = FilePath.GetDirectoryName(ConsumerOffsetFileName);               // 0: path/
                    fileParts[1] = FilePath.GetFileNameWithoutExtension(ConsumerOffsetFileName);    // 1: filename
                    fileParts[2] = FilePath.GetExtension(ConsumerOffsetFileName);                   // 2: .ext

                    // Include partition index as part of consumer offset cache file name
                    string fileName = $"{fileParts[0]}{fileParts[1]}-P{partition}{fileParts[2]}";

                    if (File.Exists(fileName))
                    {
                        try
                        {
                            // Read last consumer offset
                            consumerCursor.Offset = long.Parse(File.ReadAllText(fileName));
                        }
                        catch (Exception ex)
                        {
                            OnProcessException(MessageLevel.Warning, new InvalidOperationException($"Failed to read last consumer offset from \"{fileName}\": {ex.Message}", ex));
                        }
                    }

                    cacheLastConsumerOffset = new LongSynchronizedOperation(() =>
                    {
                        // Do not write file any more often than defined consumer offset cache interval
                        int restTime = (int)(Ticks.FromSeconds(ConsumerOffsetCacheInterval) - (DateTime.UtcNow.Ticks - lastUpdateTime)).ToMilliseconds();

                        if (restTime > 0)
                            Thread.Sleep(restTime);

                        lastUpdateTime = DateTime.UtcNow.Ticks;

                        // Write current consumer offset
                        File.WriteAllText(fileName, consumerCursor.Offset.ToString());
                    }, 
                    ex => OnProcessException(MessageLevel.Warning, new InvalidOperationException($"Failed to cache current consumer offset to \"{fileName}\": {ex.Message}", ex)))
                    {
                        IsBackground = true
                    };
                }

                using (Consumer consumer = new Consumer(options, new OffsetPosition(partition, consumerCursor.Offset)))
                {
                    lock (m_consumers)
                        m_consumers.Add(new WeakReference<Consumer>(consumer));

                    foreach (Message message in consumer.Consume())
                    {
                        if ((object)m_metadata == null)
                            continue;

                        uint id;
                        byte metadataVersion;
                        IMeasurement measurement = message.KafkaDeserialize(out id, out metadataVersion);

                        // Kick-off a refresh for new metadata if message version numbers change
                        if (m_lastMetadataVersion != metadataVersion)
                        {
                            m_lastMetadataVersion = metadataVersion;
                            m_updateMetadata.RunOnceAsync();
                        }

                        // Clear all undefined items in dictionary when metadata gets updated
                        if (lastMetadataUpdateCount < m_metadataUpdateCount)
                        {
                            lastMetadataUpdateCount = m_metadataUpdateCount;
                            foreach (uint undefinedID in idTable.Where(item => item.Value.SignalID == Guid.Empty).Select(item => item.Key).ToArray())
                                idTable.Remove(undefinedID);
                        }

                        // Get associated measurement key, or look it up in metadata table
                        measurement.Metadata = idTable.GetOrAdd(id, lookupID => MeasurementKey.LookUpBySignalID(m_metadata?.Records?.FirstOrDefault(record => record.ID == lookupID)?.ParseSignalID() ?? Guid.Empty)).Metadata;

                        // Only publish measurements with associated metadata and are assigned to this adapter
                        if (measurement.Key != MeasurementKey.Undefined && ((object)m_outputMeasurementKeys == null || m_outputMeasurementKeys.Contains(measurement.Key)))
                            OnNewMeasurements(new[] { measurement });

                        // Cache last consumer offset
                        consumerCursor.Offset = message.Offset;

                        if ((object)cacheLastConsumerOffset != null)
                            cacheLastConsumerOffset.RunOnceAsync();

                        if (ReadDelay > -1)
                        {
                            // As a group of measurements transition from timestamp to another, inject configured read delay
                            if (lastMeasurementTime != measurement.Timestamp)
                                Thread.Sleep(ReadDelay);

                            lastMeasurementTime = measurement.Timestamp;
                        }
                    }
                }
            }
            catch (Exception ex)
            {
                OnProcessException(MessageLevel.Warning, new InvalidOperationException($"Exception while reading Kafka messages for topic \"{Topic}\" P[{partition}]: {ex.Message}", ex));
            }
        }