示例#1
0
        public TaskSynchronousTopologyDriver(string clientId, InternalTopologyBuilder topologyBuilder, IStreamConfig configuration, IStreamConfig topicConfiguration, IKafkaSupplier supplier, CancellationToken token)
        {
            this.configuration          = configuration;
            this.configuration.ClientId = clientId;
            this.topicConfiguration     = topicConfiguration;

            this.token    = token;
            builder       = topologyBuilder;
            this.supplier = supplier ?? new SyncKafkaSupplier();
            producer      = this.supplier.GetProducer(configuration.ToProducerConfig()) as SyncProducer;

            foreach (var sourceTopic in builder.GetSourceTopics().Union(builder.GetGlobalTopics()))
            {
                var part   = new TopicPartition(sourceTopic, 0);
                var taskId = builder.GetTaskIdFromPartition(part);
                if (partitionsByTaskId.ContainsKey(taskId))
                {
                    partitionsByTaskId[taskId].Add(part);
                }
                else
                {
                    partitionsByTaskId.Add(taskId, new List <TopicPartition> {
                        part
                    });
                }
            }
        }
        public TaskSynchronousTopologyDriver(string clientId, InternalTopologyBuilder topologyBuilder, IStreamConfig configuration, IStreamConfig topicConfiguration, CancellationToken token)
        {
            this.configuration          = configuration;
            this.configuration.ClientId = clientId;
            this.topicConfiguration     = topicConfiguration;

            this.token = token;
            builder    = topologyBuilder;
            supplier   = new SyncKafkaSupplier();
            producer   = supplier.GetProducer(configuration.ToProducerConfig()) as SyncProducer;
        }
        internal static IThread Create(string threadId, string clientId, InternalTopologyBuilder builder,
                                       StreamMetricsRegistry streamMetricsRegistry, IStreamConfig configuration, IKafkaSupplier kafkaSupplier,
                                       IAdminClient adminClient, int threadInd)
        {
            string logPrefix  = $"stream-thread[{threadId}] ";
            var    log        = Logger.GetLogger(typeof(StreamThread));
            var    customerID = $"{clientId}-StreamThread-{threadInd}";
            IProducer <byte[], byte[]> producer = null;

            // TODO : remove this limitations depends version of Kafka Cluster
            // Due to limitations outlined in KIP-447 (which KIP-447 overcomes), it is
            // currently necessary to use a separate producer per input partition. The
            // producerState dictionary is used to keep track of these, and the current
            // consumed offset.
            // https://cwiki.apache.org/confluence/display/KAFKA/KIP-447%3A+Producer+scalability+for+exactly+once+semantics
            // IF Guarantee is AT_LEAST_ONCE, producer is the same of all StreamTasks in this thread,
            // ELSE one producer by StreamTask.
            if (configuration.Guarantee == ProcessingGuarantee.AT_LEAST_ONCE)
            {
                log.LogInformation("{LogPrefix}Creating shared producer client", logPrefix);
                producer = kafkaSupplier.GetProducer(configuration.ToProducerConfig(GetThreadProducerClientId(threadId)).Wrap(threadId));
            }

            var restoreConfig = configuration.ToConsumerConfig(GetRestoreConsumerClientId(customerID));

            restoreConfig.GroupId = $"{configuration.ApplicationId}-restore-group";
            var restoreConsumer = kafkaSupplier.GetRestoreConsumer(restoreConfig);

            var storeChangelogReader = new StoreChangelogReader(
                configuration,
                restoreConsumer,
                threadId,
                streamMetricsRegistry);

            var taskCreator = new TaskCreator(builder, configuration, threadId, kafkaSupplier, producer, storeChangelogReader, streamMetricsRegistry);
            var manager     = new TaskManager(builder, taskCreator, adminClient, storeChangelogReader);

            var listener = new StreamsRebalanceListener(manager);

            log.LogInformation("{LogPrefix}Creating consumer client", logPrefix);
            var consumer = kafkaSupplier.GetConsumer(configuration.ToConsumerConfig(GetConsumerClientId(customerID)).Wrap(threadId), listener);

            manager.Consumer = consumer;

            var thread = new StreamThread(threadId, customerID, manager, consumer, builder, storeChangelogReader, streamMetricsRegistry, configuration);

            listener.Thread = thread;

            return(thread);
        }
        public void RestoreFromRecovery()
        {
            storeChangelogReader.Restore();
            stateMgr.Checkpoint();
            stateMgr.Close();
            store.Init(context, store);
            stateMgr.InitializeOffsetsFromCheckpoint();

            var producer = supplier.GetProducer(config.ToProducerConfig());

            producer.Produce(changelogTopic, CreateMessage(changelogTopic, "key3", "value3"));
            producer.Produce(changelogTopic, CreateMessage(changelogTopic, "key4", "value4"));

            restoreConsumer.Resume(new TopicPartition(changelogTopic, 0).ToSingle());

            storeChangelogReader.Restore();
            stateMgr.Checkpoint();
            stateMgr.Close();

            File.Delete(Path.Combine(config.StateDir, config.ApplicationId, "0-0", ".checkpoint"));

            store.Init(context, store);
            stateMgr.InitializeOffsetsFromCheckpoint();

            restoreConsumer.Resume(new TopicPartition(changelogTopic, 0).ToSingle());

            storeChangelogReader.Restore();

            Assert.AreEqual(4, store.All().ToList().Count);

            var metadata = storeChangelogReader.GetMetadata(new TopicPartition(changelogTopic, 0));

            Assert.IsNotNull(metadata);
            Assert.AreEqual(ChangelogState.COMPLETED, metadata.ChangelogState);
            Assert.AreEqual(3, metadata.RestoreEndOffset);
            Assert.AreEqual(2, metadata.TotalRestored);
        }
示例#5
0
        public TaskSynchronousTopologyDriver(string clientId, InternalTopologyBuilder topologyBuilder,
                                             IStreamConfig configuration, IStreamConfig topicConfiguration, IKafkaSupplier supplier,
                                             CancellationToken token)
        {
            this.configuration          = configuration;
            this.configuration.ClientId = clientId;
            this.topicConfiguration     = topicConfiguration;
            metricsRegistry             = new StreamMetricsRegistry(clientId, MetricsRecordingLevel.DEBUG);

            this.token    = token;
            builder       = topologyBuilder;
            this.supplier = supplier ?? new SyncKafkaSupplier();
            this.supplier.MetricsRegistry = metricsRegistry;
            producer = this.supplier.GetProducer(configuration.ToProducerConfig()) as SyncProducer;

            foreach (var sourceTopic in builder
                     .GetSourceTopics())
            {
                var part   = new TopicPartition(sourceTopic, 0);
                var taskId = builder.GetTaskIdFromPartition(part);
                if (partitionsByTaskId.ContainsKey(taskId))
                {
                    partitionsByTaskId[taskId].Add(part);
                }
                else
                {
                    partitionsByTaskId.Add(taskId, new List <TopicPartition> {
                        part
                    });
                }
            }

            ProcessorTopology globalTaskTopology = topologyBuilder.BuildGlobalStateTopology();

            hasGlobalTopology = globalTaskTopology != null;
            if (hasGlobalTopology)
            {
                var globalConsumer =
                    this.supplier.GetGlobalConsumer(configuration.ToGlobalConsumerConfig($"{clientId}-global-consumer"));
                var adminClient  = this.supplier.GetAdmin(configuration.ToAdminConfig($"{clientId}-admin"));
                var stateManager =
                    new GlobalStateManager(globalConsumer, globalTaskTopology, adminClient, configuration);
                globalProcessorContext = new GlobalProcessorContext(configuration, stateManager, metricsRegistry);
                stateManager.SetGlobalProcessorContext(globalProcessorContext);
                globalTask = new GlobalStateUpdateTask(stateManager, globalTaskTopology, globalProcessorContext);

                globalTask.Initialize();
            }
        }
        public void Init()
        {
            stateDir             = Path.Combine(".", Guid.NewGuid().ToString());
            config               = new StreamConfig <StringSerDes, StringSerDes>();
            config.ApplicationId = "test-storechangelog-app";
            config.StateDir      = stateDir;
            config.PollMs        = 100;

            supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());

            restoreConsumer = supplier.GetRestoreConsumer(config.ToConsumerConfig());

            var topicPart = new TopicPartition("topic", 0);

            changelogTopic = "store-changelog-topic";
            var changelogsTopics = new Dictionary <string, string>();

            changelogsTopics.Add("store", changelogTopic);
            var id = new TaskId
            {
                Id        = 0,
                Partition = 0
            };

            store = new RocksDbKeyValueStore("store");
            storeChangelogReader =
                new StoreChangelogReader(config, restoreConsumer, "thread-0", new StreamMetricsRegistry());
            stateMgr = new ProcessorStateManager(
                id,
                topicPart.ToSingle(),
                changelogsTopics,
                storeChangelogReader,
                new OffsetCheckpointFile(Path.Combine(config.StateDir, config.ApplicationId, $"{id.Id}-{id.Partition}"))
                );

            Mock <AbstractTask> moq = new Mock <AbstractTask>();

            moq.Setup(t => t.Id).Returns(new TaskId {
                Id = 0, Partition = 0
            });

            context = new ProcessorContext(moq.Object, config, stateMgr, new StreamMetricsRegistry());
            store.Init(context, store);

            producer.Produce(changelogTopic, CreateMessage(changelogTopic, "key1", "value1"));
            producer.Produce(changelogTopic, CreateMessage(changelogTopic, "key2", "value2"));
        }
        internal static IThread Create(string threadId, string clientId, InternalTopologyBuilder builder, IStreamConfig configuration, IKafkaSupplier kafkaSupplier, IAdminClient adminClient, int threadInd)
        {
            string logPrefix  = $"stream-thread[{threadId}] ";
            var    log        = Logger.GetLogger(typeof(StreamThread));
            var    customerID = $"{clientId}-StreamThread-{threadInd}";
            IProducer <byte[], byte[]> producer = null;

            // Due to limitations outlined in KIP-447 (which KIP-447 overcomes), it is
            // currently necessary to use a separate producer per input partition. The
            // producerState dictionary is used to keep track of these, and the current
            // consumed offset.
            // https://cwiki.apache.org/confluence/display/KAFKA/KIP-447%3A+Producer+scalability+for+exactly+once+semantics
            // IF Guarantee is AT_LEAST_ONCE, producer is the same of all StreamTasks in this thread,
            // ELSE one producer by StreamTask.
            if (configuration.Guarantee == ProcessingGuarantee.AT_LEAST_ONCE)
            {
                log.Info($"{logPrefix}Creating shared producer client");
                producer = kafkaSupplier.GetProducer(configuration.ToProducerConfig(GetThreadProducerClientId(threadId)));
            }

            var taskCreator = new TaskCreator(builder, configuration, threadId, kafkaSupplier, producer);
            var manager     = new TaskManager(builder, taskCreator, adminClient);

            var listener = new StreamsRebalanceListener(manager);

            log.Info($"{logPrefix}Creating consumer client");
            var consumer = kafkaSupplier.GetConsumer(configuration.ToConsumerConfig(customerID), listener);

            manager.Consumer = consumer;

            var thread = new StreamThread(threadId, customerID, manager, consumer, builder, TimeSpan.FromMilliseconds(configuration.PollMs), configuration.CommitIntervalMs);

            listener.Thread = thread;

            return(thread);
        }