private StreamThread(string threadId, string clientId, TaskManager manager, IConsumer <byte[], byte[]> consumer, InternalTopologyBuilder builder, IChangelogReader storeChangelogReader, StreamMetricsRegistry streamMetricsRegistry, TimeSpan timeSpan, long commitInterval) { this.manager = manager; this.consumer = consumer; this.builder = builder; consumeTimeout = timeSpan; this.threadId = threadId; this.clientId = clientId; logPrefix = $"stream-thread[{threadId}] "; commitTimeMs = commitInterval; changelogReader = storeChangelogReader; this.streamMetricsRegistry = streamMetricsRegistry; thread = new Thread(Run); thread.Name = this.threadId; Name = this.threadId; State = ThreadState.CREATED; commitSensor = ThreadMetrics.CommitSensor(threadId, streamMetricsRegistry); pollSensor = ThreadMetrics.PollSensor(threadId, streamMetricsRegistry); pollRecordsSensor = ThreadMetrics.PollRecordsSensor(threadId, streamMetricsRegistry); pollRatioSensor = ThreadMetrics.PollRatioSensor(threadId, streamMetricsRegistry); processLatencySensor = ThreadMetrics.ProcessLatencySensor(threadId, streamMetricsRegistry); processRecordsSensor = ThreadMetrics.ProcessRecordsSensor(threadId, streamMetricsRegistry); processRateSensor = ThreadMetrics.ProcessRateSensor(threadId, streamMetricsRegistry); processRatioSensor = ThreadMetrics.ProcessRatioSensor(threadId, streamMetricsRegistry); commitRatioSensor = ThreadMetrics.CommitRatioSensor(threadId, streamMetricsRegistry); }
private static Sensor SizeOrCountSensor(TaskId taskId, string storeType, string storeName, string metricName, string metricDescription, string descriptionOfAvg, string descriptionOfMax, MetricsRecordingLevel recordingLevel, StreamMetricsRegistry streamsMetrics) { Sensor sensor = streamsMetrics.StoreLevelSensor( GetThreadId(), taskId, storeName, metricName, metricDescription, recordingLevel); string group; IDictionary <string, string> tags; group = StreamMetricsRegistry.STATE_STORE_LEVEL_GROUP; tags = streamsMetrics.StoreLevelTags(GetThreadId(), taskId.ToString(), storeName, storeType); SensorHelper.AddAvgAndMaxToSensor(sensor, group, tags, metricName, descriptionOfAvg, descriptionOfMax); return(sensor); }
private static Sensor ThroughputSensor(string threadId, TaskId taskId, string processorNodeId, string metricNamePrefix, string metricDescription, string descriptionOfRate, string descriptionOfCount, MetricsRecordingLevel recordingLevel, StreamMetricsRegistry metricsRegistry) { Sensor sensor = metricsRegistry.NodeLevelSensor(threadId, taskId, processorNodeId, metricNamePrefix, metricDescription, recordingLevel); var tags = metricsRegistry.NodeLevelTags(threadId, taskId.ToString(), processorNodeId); SensorHelper.AddInvocationRateAndCountToSensor( sensor, StreamMetricsRegistry.PROCESSOR_NODE_LEVEL_GROUP, tags, metricNamePrefix, descriptionOfRate, descriptionOfCount ); return(sensor); }
private StreamThread(string threadId, string clientId, TaskManager manager, IConsumer <byte[], byte[]> consumer, InternalTopologyBuilder builder, IChangelogReader storeChangelogReader, StreamMetricsRegistry streamMetricsRegistry, IStreamConfig configuration) : this(threadId, clientId, manager, consumer, builder, storeChangelogReader, streamMetricsRegistry, TimeSpan.FromMilliseconds(configuration.PollMs), configuration.CommitIntervalMs) { streamConfig = configuration; }
public void Initialize() { streamMetricsRegistry = new StreamMetricsRegistry(Guid.NewGuid().ToString(), MetricsRecordingLevel.DEBUG); config.ApplicationId = "test-stream-thread"; config.StateDir = Guid.NewGuid().ToString(); config.Guarantee = ProcessingGuarantee.AT_LEAST_ONCE; config.PollMs = 10; config.CommitIntervalMs = 1; var builder = new StreamBuilder(); var stream = builder.Stream <string, string>("topic"); stream.GroupByKey().Count(); stream.To("topic2"); var topo = builder.Build(); id = new TaskId { Id = 0, Partition = 0 }; var processorTopology = builder.Build().Builder.BuildTopology(id); syncKafkaSupplier = new SyncKafkaSupplier(); var producer = syncKafkaSupplier.GetProducer(config.ToProducerConfig()); var consumer = syncKafkaSupplier.GetConsumer(config.ToConsumerConfig(), null); topicPartition = new TopicPartition("topic", 0); task = new StreamTask( threadId, id, new List <TopicPartition> { topicPartition }, processorTopology, consumer, config, syncKafkaSupplier, null, new MockChangelogRegister(), streamMetricsRegistry); task.GroupMetadata = consumer as SyncConsumer; task.InitializeStateStores(); task.InitializeTopology(); task.RestorationIfNeeded(); var activeRestorationSensor = streamMetricsRegistry.GetSensors().FirstOrDefault(s => s.Name.Equals(GetSensorName(TaskMetrics.ACTIVE_RESTORATION))); Assert.AreEqual(1, activeRestorationSensor.Metrics[MetricName.NameAndGroup( TaskMetrics.ACTIVE_RESTORATION, StreamMetricsRegistry.TASK_LEVEL_GROUP)].Value); task.CompleteRestoration(); Assert.AreEqual(0, activeRestorationSensor.Metrics[MetricName.NameAndGroup( TaskMetrics.ACTIVE_RESTORATION, StreamMetricsRegistry.TASK_LEVEL_GROUP)].Value); }
internal static Sensor InvocationRateAndCountAndAvgAndMaxLatencySensor(string threadId, string metricName, string metricDescription, string descriptionOfRate, string descriptionOfCount, string descriptionOfAvg, string descriptionOfMax, MetricsRecordingLevel recordingLevel, StreamMetricsRegistry streamsMetrics) { Sensor sensor = streamsMetrics.ThreadLevelSensor(threadId, metricName, metricDescription, recordingLevel); var tags = streamsMetrics.ThreadLevelTags(threadId); SensorHelper.AddAvgAndMaxToSensor(sensor, StreamMetricsRegistry.THREAD_LEVEL_GROUP, tags, metricName + StreamMetricsRegistry.LATENCY_SUFFIX, descriptionOfAvg, descriptionOfMax); SensorHelper.AddInvocationRateAndCountToSensor(sensor, StreamMetricsRegistry.THREAD_LEVEL_GROUP, tags, metricName, descriptionOfRate, descriptionOfCount); return(sensor); }
internal GlobalProcessorContext( IStreamConfig configuration, IStateManager stateManager, StreamMetricsRegistry streamMetricsRegistry) : base(null, configuration, stateManager, streamMetricsRegistry) { }
public void Initialize() { streamMetricsRegistry = new StreamMetricsRegistry(Guid.NewGuid().ToString(), MetricsRecordingLevel.INFO); config.ApplicationId = "test-stream-thread"; config.StateDir = Guid.NewGuid().ToString(); config.Guarantee = ProcessingGuarantee.AT_LEAST_ONCE; config.PollMs = 10; config.MaxPollRecords = 10; config.CommitIntervalMs = 10; config.MetricsRecording = MetricsRecordingLevel.INFO; mockKafkaSupplier = new MockKafkaSupplier(numberPartitions); var builder = new StreamBuilder(); builder.Stream <string, string>("topic").To("topic2"); var topo = builder.Build(); thread = StreamThread.Create( threadId, "c1", topo.Builder, streamMetricsRegistry, config, mockKafkaSupplier, mockKafkaSupplier.GetAdmin(config.ToAdminConfig("admin")), 1) as StreamThread; }
private static Sensor ThroughputAndLatencySensor(TaskId taskId, string storeType, string storeName, string metricName, string metricDescription, string descriptionOfRate, string descriptionOfAvg, string descriptionOfMax, MetricsRecordingLevel recordingLevel, StreamMetricsRegistry streamsMetrics) { Sensor sensor; string latencyMetricName = metricName + StreamMetricsRegistry.LATENCY_SUFFIX; IDictionary <string, string> tags = streamsMetrics.StoreLevelTags(GetThreadId(), taskId.ToString(), storeName, storeType); sensor = streamsMetrics.StoreLevelSensor(GetThreadId(), taskId, storeName, metricName, metricDescription, recordingLevel); SensorHelper.AddInvocationRateToSensor(sensor, StreamMetricsRegistry.STATE_STORE_LEVEL_GROUP, tags, metricName, descriptionOfRate); SensorHelper.AddAvgAndMaxToSensor( sensor, StreamMetricsRegistry.STATE_STORE_LEVEL_GROUP, tags, latencyMetricName, descriptionOfAvg, descriptionOfMax ); return(sensor); }
public static Sensor StreamsAppSensor( string applicationId, string topologyDescription, Func <int> stateStreamFunc, Func <int> streamThreadsFunc, StreamMetricsRegistry metricsRegistry) { var sensor = metricsRegistry.ClientLevelSensor( APP_INFO, APP_INFO_DESCRIPTION, MetricsRecordingLevel.INFO); var tags = metricsRegistry.ClientTags(); tags.Add(APPLICATION_ID, applicationId); var tagsVersion = new Dictionary <string, string>(tags); tagsVersion.Add(VERSION, VERSION_FROM_ASSEMBLY); sensor.AddImmutableMetric( new MetricName( VERSION, StreamMetricsRegistry.CLIENT_LEVEL_GROUP, VERSION_DESCRIPTION, tagsVersion), VERSION_FROM_ASSEMBLY); sensor.AddImmutableMetric( new MetricName( APPLICATION_ID, StreamMetricsRegistry.CLIENT_LEVEL_GROUP, APPLICATION_ID_DESCRIPTION, tags), applicationId); var tagsTopo = new Dictionary <string, string>(tags); tagsTopo.Add(TOPOLOGY_DESCRIPTION, topologyDescription); sensor.AddImmutableMetric( new MetricName( TOPOLOGY_DESCRIPTION, StreamMetricsRegistry.CLIENT_LEVEL_GROUP, TOPOLOGY_DESCRIPTION_DESCRIPTION, tagsTopo), topologyDescription); sensor.AddProviderMetric( new MetricName( STATE, StreamMetricsRegistry.CLIENT_LEVEL_GROUP, STATE_DESCRIPTION, tags), stateStreamFunc); sensor.AddProviderMetric( new MetricName( STREAM_THREADS, StreamMetricsRegistry.CLIENT_LEVEL_GROUP, STREAM_THREADS_DESCRIPTION, tags), streamThreadsFunc); return(sensor); }
internal ProcessorContext(AbstractTask task, IStreamConfig configuration, IStateManager stateManager, StreamMetricsRegistry streamMetricsRegistry) { Task = task; Configuration = configuration; States = stateManager; Metrics = streamMetricsRegistry; SerDesContext = new SerDesContext(configuration); }
public void Initialize() { threadId = Thread.CurrentThread.Name ?? StreamMetricsRegistry.UNKNOWN_THREAD; streamMetricsRegistry = new StreamMetricsRegistry(Guid.NewGuid().ToString(), MetricsRecordingLevel.DEBUG); config.ApplicationId = "test-stream-thread"; }
public static Sensor ClosedTaskSensor(string threadId, StreamMetricsRegistry metricsRegistry) { return(InvocationRateAndCountSensor( threadId, CLOSE_TASK, CLOSE_TASK_DESCRIPTION, CLOSE_TASK_RATE_DESCRIPTION, CLOSE_TASK_TOTAL_DESCRIPTION, MetricsRecordingLevel.INFO, metricsRegistry)); }
public override void Register(StreamMetricsRegistry metricsRegistry) { TotalNumberOfMessagesConsumedSensor = LibrdKafkaConsumerMetrics.TotalNumberOfMessagesConsumedSensor(threadId, clientId, streamAppId, metricsRegistry); TotalNumberOfMessageBytesConsumedSensor = LibrdKafkaConsumerMetrics.TotalNumberOfMessageBytesConsumedSensor(threadId, clientId, streamAppId, metricsRegistry); NumberOfOpsWaitinInQueueSensor = LibrdKafkaConsumerMetrics.NumberOfOpsWaitinInQueueSensor(threadId, clientId, streamAppId, metricsRegistry); TotalNumberOfResponsesReceivedFromKafkaSensor = LibrdKafkaConsumerMetrics.TotalNumberOfResponsesReceivedFromKafkaSensor(threadId, clientId, streamAppId, metricsRegistry); TotalNumberOfBytesReceivedFromKafkaSensor = LibrdKafkaConsumerMetrics.TotalNumberOfBytesReceivedFromKafkaSensor(threadId, clientId, streamAppId, metricsRegistry); RebalanceAgeSensor = LibrdKafkaConsumerMetrics.RebalanceAgeSensor(threadId, clientId, streamAppId, metricsRegistry); TotalNumberOfRelabalanceSensor = LibrdKafkaConsumerMetrics.TotalNumberOfRelabalanceSensor(threadId, clientId, streamAppId, metricsRegistry); TotalNumberOfResponsesReceivedSensor = LibrdKafkaConsumerMetrics.TotalNumberOfResponsesReceivedSensor(threadId, clientId, streamAppId, metricsRegistry); TotalNumberOfBytesReceivedSensor = LibrdKafkaConsumerMetrics.TotalNumberOfBytesReceivedSensor(threadId, clientId, streamAppId, metricsRegistry); TotalNumberOfReceivedErrorsSensor = LibrdKafkaConsumerMetrics.TotalNumberOfReceivedErrorsSensor(threadId, clientId, streamAppId, metricsRegistry); NumberOfConnectionAttempsSensor = LibrdKafkaConsumerMetrics.NumberOfConnectionAttempsSensor(threadId, clientId, streamAppId, metricsRegistry); NumberOfDisconnectsSensor = LibrdKafkaConsumerMetrics.NumberOfDisconnectsSensor(threadId, clientId, streamAppId, metricsRegistry); BrokerLatencyAverageMsSensor = LibrdKafkaConsumerMetrics.BrokerLatencyAverageMsSensor(threadId, clientId, streamAppId, metricsRegistry); BatchSizeAverageBytesSensor = LibrdKafkaConsumerMetrics.BatchSizeAverageBytesSensor(threadId, clientId, streamAppId, metricsRegistry); BatchMessageCountsAverageSensor = LibrdKafkaConsumerMetrics.BatchMessageCountsAverageSensor(threadId, clientId, streamAppId, metricsRegistry); ConsumerLagSensor = LibrdKafkaConsumerMetrics.ConsumerLagSensor(threadId, clientId, streamAppId, metricsRegistry); TotalNumberOfMessagesConsumedByPartitionSensor = LibrdKafkaConsumerMetrics.TotalNumberOfMessagesConsumedByPartitionSensor(threadId, clientId, streamAppId, metricsRegistry); TotalNumberOfBytesConsumedByPartitionSensor = LibrdKafkaConsumerMetrics.TotalNumberOfBytesConsumedByPartitionSensor(threadId, clientId, streamAppId, metricsRegistry); }
public static Sensor CommitSensor(string threadId, TaskId taskId, StreamMetricsRegistry streamsMetrics) { return(InvocationRateAndCountSensor( threadId, taskId, COMMIT, COMMIT_DESCRIPTION, COMMIT_RATE_DESCRIPTION, COMMIT_TOTAL_DESCRIPTION, MetricsRecordingLevel.DEBUG, streamsMetrics )); }
public static Sensor ProcessLatencySensor(string threadId, TaskId taskId, StreamMetricsRegistry metricsRegistry) { return(AvgAndMaxSensor( threadId, taskId, PROCESS_LATENCY, PROCESS_DESCRIPTION, PROCESS_AVG_LATENCY_DESCRIPTION, PROCESS_MAX_LATENCY_DESCRIPTION, MetricsRecordingLevel.DEBUG, metricsRegistry )); }
public static Sensor PollSensor(string threadId, StreamMetricsRegistry metricsRegistry) { return(InvocationRateAndCountAndAvgAndMaxLatencySensor( threadId, POLL, POLL_DESCRIPTION, POLL_RATE_DESCRIPTION, POLL_TOTAL_DESCRIPTION, POLL_AVG_LATENCY_DESCRIPTION, POLL_MAX_LATENCY_DESCRIPTION, MetricsRecordingLevel.INFO, metricsRegistry)); }
public static Sensor ProcessSensor(string threadId, TaskId taskId, StreamMetricsRegistry metricsRegistry) { Sensor sensor = metricsRegistry.TaskLevelSensor(threadId, taskId, PROCESS, PROCESS_DESCRIPTION, MetricsRecordingLevel.DEBUG); SensorHelper.AddInvocationRateAndCountToSensor( sensor, StreamMetricsRegistry.TASK_LEVEL_GROUP, metricsRegistry.TaskLevelTags(threadId, taskId.ToString()), PROCESS, PROCESS_RATE_DESCRIPTION, PROCESS_TOTAL_DESCRIPTION ); return(sensor); }
public static Sensor EnforcedProcessingSensor(string threadId, TaskId taskId, StreamMetricsRegistry streamsMetrics) { return(InvocationRateAndCountSensor( threadId, taskId, ENFORCED_PROCESSING, ENFORCED_PROCESSING, ENFORCED_PROCESSING_RATE_DESCRIPTION, ENFORCED_PROCESSING_TOTAL_DESCRIPTION, MetricsRecordingLevel.DEBUG, streamsMetrics )); }
public static Sensor CommitSensor(string threadId, StreamMetricsRegistry metricsRegistry) { return(InvocationRateAndCountAndAvgAndMaxLatencySensor( threadId, COMMIT, COMMIT_DESCRIPTION, COMMIT_RATE_DESCRIPTION, COMMIT_TOTAL_DESCRIPTION, COMMIT_AVG_LATENCY_DESCRIPTION, COMMIT_MAX_LATENCY_DESCRIPTION, MetricsRecordingLevel.INFO, metricsRegistry )); }
public TaskCreator(InternalTopologyBuilder builder, IStreamConfig configuration, string threadId, IKafkaSupplier kafkaSupplier, IProducer <byte[], byte[]> producer, StoreChangelogReader storeChangelogReader, StreamMetricsRegistry streamMetricsRegistry) { this.builder = builder; this.configuration = configuration; this.threadId = threadId; this.kafkaSupplier = kafkaSupplier; this.producer = producer; this.storeChangelogReader = storeChangelogReader; this.streamMetricsRegistry = streamMetricsRegistry; createTaskSensor = ThreadMetrics.CreateTaskSensor(threadId, streamMetricsRegistry); }
public StoreChangelogReader( IStreamConfig config, IConsumer <byte[], byte[]> restoreConsumer, string threadId, StreamMetricsRegistry metricsRegistry) { this.restoreConsumer = restoreConsumer; this.threadId = threadId; this.metricsRegistry = metricsRegistry; pollTimeMs = config.PollMs; maxPollRestoringRecords = config.MaxPollRestoringRecords; changelogs = new Dictionary <TopicPartition, ChangelogMetadata>(); }
public GlobalStreamThreadFactory(ProcessorTopology topology, string threadClientId, IConsumer <byte[], byte[]> globalConsumer, IStreamConfig configuration, IAdminClient adminClient, StreamMetricsRegistry streamMetricsRegistry) { this.adminClient = adminClient; this.streamMetricsRegistry = streamMetricsRegistry; this.topology = topology; this.threadClientId = threadClientId; this.configuration = configuration; this.globalConsumer = globalConsumer; }
internal static IThread Create(string threadId, string clientId, InternalTopologyBuilder builder, StreamMetricsRegistry streamMetricsRegistry, IStreamConfig configuration, IKafkaSupplier kafkaSupplier, IAdminClient adminClient, int threadInd) { string logPrefix = $"stream-thread[{threadId}] "; var log = Logger.GetLogger(typeof(StreamThread)); var customerID = $"{clientId}-StreamThread-{threadInd}"; IProducer <byte[], byte[]> producer = null; // TODO : remove this limitations depends version of Kafka Cluster // Due to limitations outlined in KIP-447 (which KIP-447 overcomes), it is // currently necessary to use a separate producer per input partition. The // producerState dictionary is used to keep track of these, and the current // consumed offset. // https://cwiki.apache.org/confluence/display/KAFKA/KIP-447%3A+Producer+scalability+for+exactly+once+semantics // IF Guarantee is AT_LEAST_ONCE, producer is the same of all StreamTasks in this thread, // ELSE one producer by StreamTask. if (configuration.Guarantee == ProcessingGuarantee.AT_LEAST_ONCE) { log.LogInformation("{LogPrefix}Creating shared producer client", logPrefix); producer = kafkaSupplier.GetProducer(configuration.ToProducerConfig(GetThreadProducerClientId(threadId)).Wrap(threadId)); } var restoreConfig = configuration.ToConsumerConfig(GetRestoreConsumerClientId(customerID)); restoreConfig.GroupId = $"{configuration.ApplicationId}-restore-group"; var restoreConsumer = kafkaSupplier.GetRestoreConsumer(restoreConfig); var storeChangelogReader = new StoreChangelogReader( configuration, restoreConsumer, threadId, streamMetricsRegistry); var taskCreator = new TaskCreator(builder, configuration, threadId, kafkaSupplier, producer, storeChangelogReader, streamMetricsRegistry); var manager = new TaskManager(builder, taskCreator, adminClient, storeChangelogReader); var listener = new StreamsRebalanceListener(manager); log.LogInformation("{LogPrefix}Creating consumer client", logPrefix); var consumer = kafkaSupplier.GetConsumer(configuration.ToConsumerConfig(GetConsumerClientId(customerID)).Wrap(threadId), listener); manager.Consumer = consumer; var thread = new StreamThread(threadId, customerID, manager, consumer, builder, storeChangelogReader, streamMetricsRegistry, configuration); listener.Thread = thread; return(thread); }
public TaskSynchronousTopologyDriver(string clientId, InternalTopologyBuilder topologyBuilder, IStreamConfig configuration, IStreamConfig topicConfiguration, IKafkaSupplier supplier, CancellationToken token) { this.configuration = configuration; this.configuration.ClientId = clientId; this.topicConfiguration = topicConfiguration; metricsRegistry = new StreamMetricsRegistry(clientId, MetricsRecordingLevel.DEBUG); this.token = token; builder = topologyBuilder; this.supplier = supplier ?? new SyncKafkaSupplier(); this.supplier.MetricsRegistry = metricsRegistry; producer = this.supplier.GetProducer(configuration.ToProducerConfig()) as SyncProducer; foreach (var sourceTopic in builder .GetSourceTopics()) { var part = new TopicPartition(sourceTopic, 0); var taskId = builder.GetTaskIdFromPartition(part); if (partitionsByTaskId.ContainsKey(taskId)) { partitionsByTaskId[taskId].Add(part); } else { partitionsByTaskId.Add(taskId, new List <TopicPartition> { part }); } } ProcessorTopology globalTaskTopology = topologyBuilder.BuildGlobalStateTopology(); hasGlobalTopology = globalTaskTopology != null; if (hasGlobalTopology) { var globalConsumer = this.supplier.GetGlobalConsumer(configuration.ToGlobalConsumerConfig($"{clientId}-global-consumer")); var adminClient = this.supplier.GetAdmin(configuration.ToAdminConfig($"{clientId}-admin")); var stateManager = new GlobalStateManager(globalConsumer, globalTaskTopology, adminClient, configuration); globalProcessorContext = new GlobalProcessorContext(configuration, stateManager, metricsRegistry); stateManager.SetGlobalProcessorContext(globalProcessorContext); globalTask = new GlobalStateUpdateTask(stateManager, globalTaskTopology, globalProcessorContext); globalTask.Initialize(); } }
public static Sensor DroppedRecordsSensor(string threadId, TaskId taskId, StreamMetricsRegistry streamsMetrics) { return(InvocationRateAndCountSensor( threadId, taskId, DROPPED_RECORDS, DROPPED_RECORDS_DESCRIPTION, DROPPED_RECORDS_RATE_DESCRIPTION, DROPPED_RECORDS_TOTAL_DESCRIPTION, MetricsRecordingLevel.INFO, streamsMetrics )); }
public static Sensor ActiveRestorationSensor(string threadId, TaskId taskId, StreamMetricsRegistry metricsRegistry) { Sensor sensor = metricsRegistry.TaskLevelSensor(threadId, taskId, ACTIVE_RESTORATION, ACTIVE_RESTORATION_DESCRIPTION, MetricsRecordingLevel.DEBUG); SensorHelper.AddValueMetricToSensor( sensor, StreamMetricsRegistry.TASK_LEVEL_GROUP, metricsRegistry.TaskLevelTags(threadId, taskId.ToString()), ACTIVE_RESTORATION, ACTIVE_RESTORATION_DESCRIPTION ); return(sensor); }
public static Sensor TotalNumberOfMessagesConsumedSensor( string threadId, string librdKafkaClientId, string streamAppId, StreamMetricsRegistry metricsRegistry) { return(CreateSensor( threadId, librdKafkaClientId, streamAppId, TOTAL_MESSAGE_CONSUMED, TOTAL_MESSAGE_CONSUMED_DESCRIPTION, (false, false, false), true, metricsRegistry)); }
public static LibrdKafkaSensor TotalNumberOfBytesConsumedByPartitionSensor( string threadId, string librdKafkaClientId, string streamAppId, StreamMetricsRegistry metricsRegistry) { return(CreateSensor <LibrdKafkaSensor>( threadId, librdKafkaClientId, streamAppId, TOTAL_NUMBER_BYTES_CONSUMED, TOTAL_NUMBER_BYTES_CONSUMED_DESCRIPTION, (true, true, true), true, metricsRegistry)); }
public static LibrdKafkaSensor ConsumerLagSensor( string threadId, string librdKafkaClientId, string streamAppId, StreamMetricsRegistry metricsRegistry) { return(CreateSensor <LibrdKafkaSensor>( threadId, librdKafkaClientId, streamAppId, COUNSUMER_LAG, COUNSUMER_LAG_DESCRIPTION, (true, true, true), true, metricsRegistry)); }