public StreamTask(string threadId, TaskId id, TopicPartition partition, ProcessorTopology processorTopology, IConsumer <byte[], byte[]> consumer, IStreamConfig configuration, IKafkaSupplier kafkaSupplier, IProducer <byte[], byte[]> producer) : base(id, partition, processorTopology, consumer, configuration) { this.threadId = threadId; this.kafkaSupplier = kafkaSupplier; this.consumedOffsets = new Dictionary <TopicPartition, long>(); // eos enabled if (producer == null) { this.producer = CreateEOSProducer(); InitializeTransaction(); eosEnabled = true; } else { this.producer = producer; } this.collector = new RecordCollector(logPrefix); collector.Init(ref this.producer); var sourceTimestampExtractor = (processorTopology.GetSourceProcessor(id.Topic) as ISourceProcessor).Extractor; Context = new ProcessorContext(configuration, stateMgr).UseRecordCollector(collector); processor = processorTopology.GetSourceProcessor(partition.Topic); queue = new RecordQueue <ConsumeResult <byte[], byte[]> >( 100, logPrefix, $"record-queue-{id.Topic}-{id.Partition}", sourceTimestampExtractor == null ? configuration.DefaultTimestampExtractor : sourceTimestampExtractor); }
internal ProcessorContext UseRecordCollector(IRecordCollector collector) { if (collector != null) { RecordCollector = collector; } return(this); }
public StreamTask(string threadId, TaskId id, IEnumerable <TopicPartition> partitions, ProcessorTopology processorTopology, IConsumer <byte[], byte[]> consumer, IStreamConfig configuration, IKafkaSupplier kafkaSupplier, IProducer <byte[], byte[]> producer) : base(id, partitions, processorTopology, consumer, configuration) { this.threadId = threadId; this.kafkaSupplier = kafkaSupplier; consumedOffsets = new Dictionary <TopicPartition, long>(); maxTaskIdleMs = configuration.MaxTaskIdleMs; maxBufferedSize = configuration.BufferedRecordsPerPartition; followMetadata = configuration.FollowMetadata; idleStartTime = -1; // eos enabled if (producer == null) { this.producer = CreateEOSProducer(); InitializeTransaction(); eosEnabled = true; } else { this.producer = producer; } collector = new RecordCollector(logPrefix, configuration, id); collector.Init(ref this.producer); Context = new ProcessorContext(this, configuration, stateMgr).UseRecordCollector(collector); Context.FollowMetadata = followMetadata; var partitionsQueue = new Dictionary <TopicPartition, RecordQueue>(); foreach (var p in partitions) { var sourceProcessor = processorTopology.GetSourceProcessor(p.Topic); var sourceTimestampExtractor = sourceProcessor.Extractor ?? configuration.DefaultTimestampExtractor; var queue = new RecordQueue( logPrefix, $"record-queue-{p.Topic}-{id.Id}-{id.Partition}", sourceTimestampExtractor, p, sourceProcessor); partitionsQueue.Add(p, queue); processors.Add(sourceProcessor); } partitionGrouper = new PartitionGrouper(partitionsQueue); }
public void Begin() { valueAndTimestampSerDes = new ValueAndTimestampSerDes <string>(stringSerDes); config = new StreamConfig(); config.ApplicationId = $"unit-test-changelogging-tkv"; id = new TaskId { Id = 0, Partition = 0 }; partition = new TopicPartition("source", 0); kafkaSupplier = new SyncKafkaSupplier(); var producerConfig = new ProducerConfig(); producerConfig.ClientId = "producer-1"; var producerClient = kafkaSupplier.GetProducer(producerConfig); recordCollector = new RecordCollector("p-1", config, id, new NoRunnableSensor("s", "s", MetricsRecordingLevel.DEBUG)); recordCollector.Init(ref producerClient); var changelogsTopics = new Dictionary <string, string> { { "test-store", "test-store-changelog" } }; stateManager = new ProcessorStateManager( id, new List <TopicPartition> { partition }, changelogsTopics, new MockChangelogRegister(), new MockOffsetCheckpointManager()); task = new Mock <AbstractTask>(); task.Setup(k => k.Id).Returns(id); context = new ProcessorContext(task.Object, config, stateManager, new StreamMetricsRegistry()); context.UseRecordCollector(recordCollector); var inmemorystore = new InMemoryKeyValueStore("test-store"); store = new ChangeLoggingTimestampedKeyValueBytesStore(inmemorystore); store.Init(context, store); }
public StreamTask(string threadId, TaskId id, IEnumerable <TopicPartition> partitions, ProcessorTopology processorTopology, IConsumer <byte[], byte[]> consumer, IStreamConfig configuration, IKafkaSupplier kafkaSupplier, IProducer <byte[], byte[]> producer, IChangelogRegister changelogRegister, StreamMetricsRegistry streamMetricsRegistry) : base(id, partitions, processorTopology, consumer, configuration, changelogRegister) { this.threadId = threadId; this.kafkaSupplier = kafkaSupplier; this.streamMetricsRegistry = streamMetricsRegistry; consumedOffsets = new Dictionary <TopicPartition, long>(); maxTaskIdleMs = configuration.MaxTaskIdleMs; maxBufferedSize = configuration.BufferedRecordsPerPartition; followMetadata = configuration.FollowMetadata; idleStartTime = -1; // eos enabled if (producer == null) { this.producer = CreateEOSProducer(); InitializeTransaction(); eosEnabled = true; } else { this.producer = producer; } var droppedRecordsSensor = TaskMetrics.DroppedRecordsSensor(this.threadId, Id, this.streamMetricsRegistry); collector = new RecordCollector(logPrefix, configuration, id, droppedRecordsSensor); collector.Init(ref this.producer); Context = new ProcessorContext(this, configuration, stateMgr, streamMetricsRegistry) .UseRecordCollector(collector); Context.FollowMetadata = followMetadata; var partitionsQueue = new Dictionary <TopicPartition, RecordQueue>(); foreach (var p in partitions) { var sourceProcessor = processorTopology.GetSourceProcessor(p.Topic); sourceProcessor.SetTaskId(id); var sourceTimestampExtractor = sourceProcessor.Extractor ?? configuration.DefaultTimestampExtractor; var queue = new RecordQueue( logPrefix, $"record-queue-{p.Topic}-{id.Id}-{id.Partition}", sourceTimestampExtractor, p, sourceProcessor, droppedRecordsSensor); partitionsQueue.Add(p, queue); processors.Add(sourceProcessor); } partitionGrouper = new PartitionGrouper(partitionsQueue); closeTaskSensor = ThreadMetrics.ClosedTaskSensor(this.threadId, streamMetricsRegistry); activeBufferedRecordSensor = TaskMetrics.ActiveBufferedRecordsSensor(this.threadId, Id, streamMetricsRegistry); processSensor = TaskMetrics.ProcessSensor(this.threadId, Id, streamMetricsRegistry); processLatencySensor = TaskMetrics.ProcessLatencySensor(this.threadId, Id, streamMetricsRegistry); enforcedProcessingSensor = TaskMetrics.EnforcedProcessingSensor(this.threadId, Id, streamMetricsRegistry); commitSensor = TaskMetrics.CommitSensor(this.threadId, Id, streamMetricsRegistry); activeRestorationSensor = TaskMetrics.ActiveRestorationSensor(this.threadId, Id, streamMetricsRegistry); restorationRecordsSendsor = TaskMetrics.RestorationRecordsSensor(this.threadId, Id, streamMetricsRegistry); }