Exemplo n.º 1
0
        public TestMultiInputTopic <K, V> CreateMultiInputTopic <K, V>(string[] topics, ISerDes <K> keySerdes = null,
                                                                       ISerDes <V> valueSerdes = null)
        {
            Dictionary <string, IPipeInput> pipes = new Dictionary <string, IPipeInput>();

            foreach (var topic in topics)
            {
                var builder   = CreateBuilder(topic);
                var pipeInput = builder.Input(topic, configuration);

                var topicsLink = new List <string>();
                this.builder.GetLinkTopics(topic, topicsLink);
                var consumer =
                    supplier.GetConsumer(topicConfiguration.ToConsumerConfig("consumer-repartition-forwarder"),
                                         null);

                foreach (var topicLink in topicsLink)
                {
                    pipeInput.Flushed += () => ForwardRepartitionTopic(consumer, topicLink);
                }

                pipes.Add(topic, pipeInput);
            }

            return(new TestMultiInputTopic <K, V>(pipes, configuration, keySerdes, valueSerdes));
        }
        internal static IThread Create(string threadId, string clientId, InternalTopologyBuilder builder,
                                       StreamMetricsRegistry streamMetricsRegistry, IStreamConfig configuration, IKafkaSupplier kafkaSupplier,
                                       IAdminClient adminClient, int threadInd)
        {
            string logPrefix  = $"stream-thread[{threadId}] ";
            var    log        = Logger.GetLogger(typeof(StreamThread));
            var    customerID = $"{clientId}-StreamThread-{threadInd}";
            IProducer <byte[], byte[]> producer = null;

            // TODO : remove this limitations depends version of Kafka Cluster
            // Due to limitations outlined in KIP-447 (which KIP-447 overcomes), it is
            // currently necessary to use a separate producer per input partition. The
            // producerState dictionary is used to keep track of these, and the current
            // consumed offset.
            // https://cwiki.apache.org/confluence/display/KAFKA/KIP-447%3A+Producer+scalability+for+exactly+once+semantics
            // IF Guarantee is AT_LEAST_ONCE, producer is the same of all StreamTasks in this thread,
            // ELSE one producer by StreamTask.
            if (configuration.Guarantee == ProcessingGuarantee.AT_LEAST_ONCE)
            {
                log.LogInformation("{LogPrefix}Creating shared producer client", logPrefix);
                producer = kafkaSupplier.GetProducer(configuration.ToProducerConfig(GetThreadProducerClientId(threadId)).Wrap(threadId));
            }

            var restoreConfig = configuration.ToConsumerConfig(GetRestoreConsumerClientId(customerID));

            restoreConfig.GroupId = $"{configuration.ApplicationId}-restore-group";
            var restoreConsumer = kafkaSupplier.GetRestoreConsumer(restoreConfig);

            var storeChangelogReader = new StoreChangelogReader(
                configuration,
                restoreConsumer,
                threadId,
                streamMetricsRegistry);

            var taskCreator = new TaskCreator(builder, configuration, threadId, kafkaSupplier, producer, storeChangelogReader, streamMetricsRegistry);
            var manager     = new TaskManager(builder, taskCreator, adminClient, storeChangelogReader);

            var listener = new StreamsRebalanceListener(manager);

            log.LogInformation("{LogPrefix}Creating consumer client", logPrefix);
            var consumer = kafkaSupplier.GetConsumer(configuration.ToConsumerConfig(GetConsumerClientId(customerID)).Wrap(threadId), listener);

            manager.Consumer = consumer;

            var thread = new StreamThread(threadId, customerID, manager, consumer, builder, storeChangelogReader, streamMetricsRegistry, configuration);

            listener.Thread = thread;

            return(thread);
        }
Exemplo n.º 3
0
        internal StreamTask GetTask(string topicName)
        {
            StreamTask task = null;
            var        id   = builder.GetTaskIdFromPartition(new Confluent.Kafka.TopicPartition(topicName, 0));

            if (tasks.ContainsKey(id))
            {
                task = tasks[id];
            }
            else
            {
                if (!builder.GetGlobalTopics().Contains(topicName))
                {
                    task = new StreamTask("thread-0",
                                          id,
                                          partitionsByTaskId[id],
                                          builder.BuildTopology(id),
                                          supplier.GetConsumer(configuration.ToConsumerConfig(), null),
                                          configuration,
                                          supplier,
                                          producer,
                                          new MockChangelogRegister(),
                                          metricsRegistry);
                    task.InitializeStateStores();
                    task.InitializeTopology();
                    task.RestorationIfNeeded();
                    task.CompleteRestoration();
                    tasks.Add(id, task);
                }
            }

            return(task);
        }
Exemplo n.º 4
0
        internal StreamTask GetTask(string topicName)
        {
            StreamTask task;
            var        id = builder.GetTaskIdFromPartition(new Confluent.Kafka.TopicPartition(topicName, 0));

            if (tasks.ContainsKey(id))
            {
                task = tasks[id];
            }
            else
            {
                task = new StreamTask("thread-0",
                                      id,
                                      partitionsByTaskId[id],
                                      builder.BuildTopology(id),
                                      supplier.GetConsumer(configuration.ToConsumerConfig(), null),
                                      configuration,
                                      supplier,
                                      producer);
                task.InitializeStateStores();
                task.InitializeTopology();
                tasks.Add(id, task);
            }
            return(task);
        }
        internal StreamTask GetTask(string topicName)
        {
            StreamTask task;

            if (tasks.ContainsKey(topicName))
            {
                task = tasks[topicName];
            }
            else
            {
                task = new StreamTask("thread-0",
                                      new TaskId {
                    Id = id++, Partition = 0, Topic = topicName
                },
                                      new Confluent.Kafka.TopicPartition(topicName, 0),
                                      builder.BuildTopology(topicName),
                                      supplier.GetConsumer(configuration.ToConsumerConfig(), null),
                                      configuration,
                                      supplier,
                                      producer);
                task.InitializeStateStores();
                task.InitializeTopology();
                tasks.Add(topicName, task);
            }
            return(task);
        }
Exemplo n.º 6
0
 public SyncPipeOutput(string topic, TimeSpan consumeTimeout, IStreamConfig configuration, SyncProducer producer, CancellationToken token)
 {
     this.token = token;
     topicName  = topic;
     timeout    = consumeTimeout;
     consumer   = new SyncConsumer(configuration.ToConsumerConfig($"pipe-output-{configuration.ApplicationId}-{topicName}"), producer);
     consumer.Subscribe(topicName);
 }
        public void Init()
        {
            stateDir             = Path.Combine(".", Guid.NewGuid().ToString());
            config               = new StreamConfig <StringSerDes, StringSerDes>();
            config.ApplicationId = "test-storechangelog-app";
            config.StateDir      = stateDir;
            config.PollMs        = 100;

            supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());

            restoreConsumer = supplier.GetRestoreConsumer(config.ToConsumerConfig());

            var topicPart = new TopicPartition("topic", 0);

            changelogTopic = "store-changelog-topic";
            var changelogsTopics = new Dictionary <string, string>();

            changelogsTopics.Add("store", changelogTopic);
            var id = new TaskId
            {
                Id        = 0,
                Partition = 0
            };

            store = new RocksDbKeyValueStore("store");
            storeChangelogReader =
                new StoreChangelogReader(config, restoreConsumer, "thread-0", new StreamMetricsRegistry());
            stateMgr = new ProcessorStateManager(
                id,
                topicPart.ToSingle(),
                changelogsTopics,
                storeChangelogReader,
                new OffsetCheckpointFile(Path.Combine(config.StateDir, config.ApplicationId, $"{id.Id}-{id.Partition}"))
                );

            Mock <AbstractTask> moq = new Mock <AbstractTask>();

            moq.Setup(t => t.Id).Returns(new TaskId {
                Id = 0, Partition = 0
            });

            context = new ProcessorContext(moq.Object, config, stateMgr, new StreamMetricsRegistry());
            store.Init(context, store);

            producer.Produce(changelogTopic, CreateMessage(changelogTopic, "key1", "value1"));
            producer.Produce(changelogTopic, CreateMessage(changelogTopic, "key2", "value2"));
        }
        internal static IThread Create(string threadId, string clientId, InternalTopologyBuilder builder, IStreamConfig configuration, IKafkaSupplier kafkaSupplier, IAdminClient adminClient, int threadInd)
        {
            string logPrefix  = $"stream-thread[{threadId}] ";
            var    log        = Logger.GetLogger(typeof(StreamThread));
            var    customerID = $"{clientId}-StreamThread-{threadInd}";
            IProducer <byte[], byte[]> producer = null;

            // Due to limitations outlined in KIP-447 (which KIP-447 overcomes), it is
            // currently necessary to use a separate producer per input partition. The
            // producerState dictionary is used to keep track of these, and the current
            // consumed offset.
            // https://cwiki.apache.org/confluence/display/KAFKA/KIP-447%3A+Producer+scalability+for+exactly+once+semantics
            // IF Guarantee is AT_LEAST_ONCE, producer is the same of all StreamTasks in this thread,
            // ELSE one producer by StreamTask.
            if (configuration.Guarantee == ProcessingGuarantee.AT_LEAST_ONCE)
            {
                log.Info($"{logPrefix}Creating shared producer client");
                producer = kafkaSupplier.GetProducer(configuration.ToProducerConfig(GetThreadProducerClientId(threadId)));
            }

            var taskCreator = new TaskCreator(builder, configuration, threadId, kafkaSupplier, producer);
            var manager     = new TaskManager(builder, taskCreator, adminClient);

            var listener = new StreamsRebalanceListener(manager);

            log.Info($"{logPrefix}Creating consumer client");
            var consumer = kafkaSupplier.GetConsumer(configuration.ToConsumerConfig(customerID), listener);

            manager.Consumer = consumer;

            var thread = new StreamThread(threadId, customerID, manager, consumer, builder, TimeSpan.FromMilliseconds(configuration.PollMs), configuration.CommitIntervalMs);

            listener.Thread = thread;

            return(thread);
        }