示例#1
0
        public void DeleteQueue(IStreamConfig config)
        {
            var rabbitConfig = config as RabbitConfig;
            var channel      = _connection.CreateModel();

            channel.QueueDelete(rabbitConfig.QueueName);
        }
示例#2
0
        /// <summary>
        /// Create a <see cref="KafkaStream"/> instance with your own <see cref="IKafkaSupplier" />
        /// Please DO NOT FORGET to call Close to avoid resources leak !
        /// </summary>
        /// <param name="topology">the topology specifying the computational logic</param>
        /// <param name="configuration">configuration about this stream</param>
        /// <param name="kafkaSupplier">the Kafka clients supplier which provides underlying producer and consumer clients for the new <see cref="KafkaStream"/> instance</param>
        public KafkaStream(Topology topology, IStreamConfig configuration, IKafkaSupplier kafkaSupplier)
        {
            this.topology      = topology;
            this.kafkaSupplier = kafkaSupplier;

            // check if ApplicationId & BootstrapServers has been set
            if (string.IsNullOrEmpty(configuration.ApplicationId) || string.IsNullOrEmpty(configuration.BootstrapServers))
            {
                throw new StreamConfigException($"Stream configuration is not correct. Please set ApplicationId and BootstrapServers as minimal.");
            }

            var processID = Guid.NewGuid();

            clientId  = string.IsNullOrEmpty(configuration.ClientId) ? $"{configuration.ApplicationId.ToLower()}-{processID}" : configuration.ClientId;
            logPrefix = $"stream-application[{configuration.ApplicationId}] ";

            logger.Info($"{logPrefix} Start creation of the stream application with this configuration: {configuration}");

            // re-write the physical topology according to the config
            topology.Builder.RewriteTopology(configuration);

            // sanity check
            var processorTopology = topology.Builder.BuildTopology();

            threads = new IThread[configuration.NumStreamThreads];
            var threadState = new Dictionary <long, Processors.ThreadState>();

            List <StreamThreadStateStoreProvider> stateStoreProviders = new List <StreamThreadStateStoreProvider>();

            for (int i = 0; i < configuration.NumStreamThreads; ++i)
            {
                var threadId = $"{configuration.ApplicationId.ToLower()}-stream-thread-{i}";

                var adminClient = this.kafkaSupplier.GetAdmin(configuration.ToAdminConfig(StreamThread.GetSharedAdminClientId(clientId)));

                threads[i] = StreamThread.Create(
                    threadId,
                    clientId,
                    this.topology.Builder,
                    configuration,
                    this.kafkaSupplier,
                    adminClient,
                    i);

                threadState.Add(threads[i].Id, threads[i].State);

                stateStoreProviders.Add(new StreamThreadStateStoreProvider(threads[i], this.topology.Builder));
            }

            var manager = new StreamStateManager(this, threadState);

            foreach (var t in threads)
            {
                t.StateChanged += manager.OnChange;
            }

            queryableStoreProvider = new QueryableStoreProvider(stateStoreProviders);

            StreamState = State.CREATED;
        }
示例#3
0
        private TopologyTestDriver(InternalTopologyBuilder builder, IStreamConfig config)
        {
            this.topologyBuilder = builder;
            this.configuration   = config;

            // ONLY 1 thread for test driver
            this.configuration.NumStreamThreads = 1;
            this.configuration.Guarantee        = ProcessingGuarantee.AT_LEAST_ONCE;

            this.topicConfiguration = config.Clone();
            this.topicConfiguration.ApplicationId = $"test-driver-{this.configuration.ApplicationId}";

            var processID = Guid.NewGuid();
            var clientId  = string.IsNullOrEmpty(configuration.ClientId) ? $"{this.configuration.ApplicationId.ToLower()}-{processID}" : configuration.ClientId;

            this.configuration.ClientId = clientId;

            kafkaSupplier = new MockKafkaSupplier();
            pipeBuilder   = new PipeBuilder(kafkaSupplier);

            this.processorTopology = this.topologyBuilder.BuildTopology();

            this.threadTopology = StreamThread.Create(
                $"{this.configuration.ApplicationId.ToLower()}-stream-thread-0",
                clientId,
                builder,
                config,
                kafkaSupplier,
                kafkaSupplier.GetAdmin(configuration.ToAdminConfig($"{clientId}-admin")),
                0);

            RunDriver();
        }
 private StreamThread(string threadId, string clientId, TaskManager manager, IConsumer <byte[], byte[]> consumer,
                      InternalTopologyBuilder builder, IChangelogReader storeChangelogReader,
                      StreamMetricsRegistry streamMetricsRegistry, IStreamConfig configuration)
     : this(threadId, clientId, manager, consumer, builder, storeChangelogReader, streamMetricsRegistry, TimeSpan.FromMilliseconds(configuration.PollMs), configuration.CommitIntervalMs)
 {
     streamConfig = configuration;
 }
示例#5
0
        public TaskSynchronousTopologyDriver(string clientId, InternalTopologyBuilder topologyBuilder, IStreamConfig configuration, IStreamConfig topicConfiguration, IKafkaSupplier supplier, CancellationToken token)
        {
            this.configuration          = configuration;
            this.configuration.ClientId = clientId;
            this.topicConfiguration     = topicConfiguration;

            this.token    = token;
            builder       = topologyBuilder;
            this.supplier = supplier ?? new SyncKafkaSupplier();
            producer      = this.supplier.GetProducer(configuration.ToProducerConfig()) as SyncProducer;

            foreach (var sourceTopic in builder.GetSourceTopics().Union(builder.GetGlobalTopics()))
            {
                var part   = new TopicPartition(sourceTopic, 0);
                var taskId = builder.GetTaskIdFromPartition(part);
                if (partitionsByTaskId.ContainsKey(taskId))
                {
                    partitionsByTaskId[taskId].Add(part);
                }
                else
                {
                    partitionsByTaskId.Add(taskId, new List <TopicPartition> {
                        part
                    });
                }
            }
        }
 internal TestInputTopic(IPipeInput pipe, IStreamConfig configuration, ISerDes <K> keySerdes, ISerDes <V> valueSerdes)
 {
     this.pipe          = pipe;
     this.configuration = configuration;
     this.keySerdes     = keySerdes;
     this.valueSerdes   = valueSerdes;
 }
示例#7
0
 internal GlobalProcessorContext(
     IStreamConfig configuration,
     IStateManager stateManager,
     StreamMetricsRegistry streamMetricsRegistry)
     : base(null, configuration, stateManager, streamMetricsRegistry)
 {
 }
 internal TestMultiInputTopic(Dictionary <string, IPipeInput> pipes, IStreamConfig configuration, ISerDes <K> keySerdes, ISerDes <V> valueSerdes)
 {
     this.pipes         = pipes;
     this.configuration = configuration;
     this.keySerdes     = keySerdes;
     this.valueSerdes   = valueSerdes;
 }
示例#9
0
 internal void RewriteTopology(IStreamConfig config)
 {
     foreach (var storeBuilder in globalStateBuilders.Values)
     {
         GlobalStateStores.Add(storeBuilder.Name, storeBuilder.Build());
     }
 }
示例#10
0
 public SyncPipeOutput(string topic, TimeSpan consumeTimeout, IStreamConfig configuration, SyncProducer producer, CancellationToken token)
 {
     this.token = token;
     topicName  = topic;
     timeout    = consumeTimeout;
     consumer   = new SyncConsumer(configuration.ToConsumerConfig($"pipe-output-{configuration.ApplicationId}-{topicName}"), producer);
     consumer.Subscribe(topicName);
 }
        internal ProcessorContext(AbstractTask task, IStreamConfig configuration, IStateManager stateManager)
        {
            Task          = task;
            Configuration = configuration;
            States        = stateManager;

            SerDesContext = new SerDesContext(configuration);
        }
示例#12
0
 public TaskCreator(InternalTopologyBuilder builder, IStreamConfig configuration, string threadId, IKafkaSupplier kafkaSupplier, IProducer <byte[], byte[]> producer)
     : base()
 {
     this.builder       = builder;
     this.configuration = configuration;
     this.threadId      = threadId;
     this.kafkaSupplier = kafkaSupplier;
     this.producer      = producer;
 }
示例#13
0
        internal ProcessorContext(AbstractTask task, IStreamConfig configuration, IStateManager stateManager,
                                  StreamMetricsRegistry streamMetricsRegistry)
        {
            Task          = task;
            Configuration = configuration;
            States        = stateManager;
            Metrics       = streamMetricsRegistry;

            SerDesContext = new SerDesContext(configuration);
        }
        internal void RewriteTopology(IStreamConfig config)
        {
            foreach (var storeBuilder in globalStateBuilders.Values)
            {
                GlobalStateStores.Add(storeBuilder.Name, storeBuilder.Build());
            }

            config.DefaultKeySerDes?.Initialize(new SerDes.SerDesContext(config));
            config.DefaultValueSerDes?.Initialize(new SerDes.SerDesContext(config));
        }
        public TaskSynchronousTopologyDriver(string clientId, InternalTopologyBuilder topologyBuilder, IStreamConfig configuration, IStreamConfig topicConfiguration, CancellationToken token)
        {
            this.configuration          = configuration;
            this.configuration.ClientId = clientId;
            this.topicConfiguration     = topicConfiguration;

            this.token = token;
            builder    = topologyBuilder;
            supplier   = new SyncKafkaSupplier();
            producer   = supplier.GetProducer(configuration.ToProducerConfig()) as SyncProducer;
        }
示例#16
0
        /// <summary>
        /// Create a <see cref="KafkaStream"/> instance with your own <see cref="IKafkaSupplier" />
        /// Please DO NOT FORGET to call Close to avoid resources leak !
        /// </summary>
        /// <param name="topology">the topology specifying the computational logic</param>
        /// <param name="configuration">configuration about this stream</param>
        /// <param name="kafkaSupplier">the Kafka clients supplier which provides underlying producer and consumer clients for the new <see cref="KafkaStream"/> instance</param>
        public KafkaStream(Topology topology, IStreamConfig configuration, IKafkaSupplier kafkaSupplier)
        {
            this.topology      = topology;
            this.configuration = configuration;
            this.kafkaSupplier = kafkaSupplier;

            var processID = Guid.NewGuid();

            clientId  = string.IsNullOrEmpty(configuration.ClientId) ? $"{this.configuration.ApplicationId.ToLower()}-{processID}" : configuration.ClientId;
            logPrefix = $"stream-application[{configuration.ApplicationId}] ";

            // re-write the physical topology according to the config
            topology.Builder.RewriteTopology(configuration);

            // sanity check
            this.processorTopology = topology.Builder.BuildTopology();

            this.threads = new IThread[this.configuration.NumStreamThreads];
            var threadState = new Dictionary <long, Processors.ThreadState>();

            List <StreamThreadStateStoreProvider> stateStoreProviders = new List <StreamThreadStateStoreProvider>();

            for (int i = 0; i < this.configuration.NumStreamThreads; ++i)
            {
                var threadId = $"{this.configuration.ApplicationId.ToLower()}-stream-thread-{i}";

                adminClient = this.kafkaSupplier.GetAdmin(configuration.ToAdminConfig(StreamThread.GetSharedAdminClientId(clientId)));

                this.threads[i] = StreamThread.Create(
                    threadId,
                    clientId,
                    this.topology.Builder,
                    configuration,
                    this.kafkaSupplier,
                    adminClient,
                    i);

                threadState.Add(this.threads[i].Id, this.threads[i].State);

                stateStoreProviders.Add(new StreamThreadStateStoreProvider(this.threads[i], this.topology.Builder));
            }

            var manager = new StreamStateManager(this, threadState);

            foreach (var t in threads)
            {
                t.StateChanged += manager.OnChange;
            }

            this.queryableStoreProvider = new QueryableStoreProvider(stateStoreProviders);

            StreamState = State.CREATED;
        }
        internal TestOutputTopic(IPipeOutput pipe, IStreamConfig configuration, ISerDes<K> keySerdes, ISerDes<V> valueSerdes)
        {
            this.pipe = pipe;
            this.configuration = configuration;
            this.keySerdes = keySerdes;
            this.valueSerdes = valueSerdes;

            if (this.keySerdes != null)
                this.keySerdes.Initialize(new SerDesContext(configuration));
            if (this.valueSerdes != null)
                this.valueSerdes.Initialize(new SerDesContext(configuration));
        }
 public GlobalStreamThreadFactory(ProcessorTopology topology,
                                  string threadClientId,
                                  IConsumer <byte[], byte[]> globalConsumer,
                                  IStreamConfig configuration,
                                  IAdminClient adminClient)
 {
     this.adminClient    = adminClient;
     this.topology       = topology;
     this.threadClientId = threadClientId;
     this.configuration  = configuration;
     this.globalConsumer = globalConsumer;
 }
        internal static IThread Create(string threadId, string clientId, InternalTopologyBuilder builder,
                                       StreamMetricsRegistry streamMetricsRegistry, IStreamConfig configuration, IKafkaSupplier kafkaSupplier,
                                       IAdminClient adminClient, int threadInd)
        {
            string logPrefix  = $"stream-thread[{threadId}] ";
            var    log        = Logger.GetLogger(typeof(StreamThread));
            var    customerID = $"{clientId}-StreamThread-{threadInd}";
            IProducer <byte[], byte[]> producer = null;

            // TODO : remove this limitations depends version of Kafka Cluster
            // Due to limitations outlined in KIP-447 (which KIP-447 overcomes), it is
            // currently necessary to use a separate producer per input partition. The
            // producerState dictionary is used to keep track of these, and the current
            // consumed offset.
            // https://cwiki.apache.org/confluence/display/KAFKA/KIP-447%3A+Producer+scalability+for+exactly+once+semantics
            // IF Guarantee is AT_LEAST_ONCE, producer is the same of all StreamTasks in this thread,
            // ELSE one producer by StreamTask.
            if (configuration.Guarantee == ProcessingGuarantee.AT_LEAST_ONCE)
            {
                log.LogInformation("{LogPrefix}Creating shared producer client", logPrefix);
                producer = kafkaSupplier.GetProducer(configuration.ToProducerConfig(GetThreadProducerClientId(threadId)).Wrap(threadId));
            }

            var restoreConfig = configuration.ToConsumerConfig(GetRestoreConsumerClientId(customerID));

            restoreConfig.GroupId = $"{configuration.ApplicationId}-restore-group";
            var restoreConsumer = kafkaSupplier.GetRestoreConsumer(restoreConfig);

            var storeChangelogReader = new StoreChangelogReader(
                configuration,
                restoreConsumer,
                threadId,
                streamMetricsRegistry);

            var taskCreator = new TaskCreator(builder, configuration, threadId, kafkaSupplier, producer, storeChangelogReader, streamMetricsRegistry);
            var manager     = new TaskManager(builder, taskCreator, adminClient, storeChangelogReader);

            var listener = new StreamsRebalanceListener(manager);

            log.LogInformation("{LogPrefix}Creating consumer client", logPrefix);
            var consumer = kafkaSupplier.GetConsumer(configuration.ToConsumerConfig(GetConsumerClientId(customerID)).Wrap(threadId), listener);

            manager.Consumer = consumer;

            var thread = new StreamThread(threadId, customerID, manager, consumer, builder, storeChangelogReader, streamMetricsRegistry, configuration);

            listener.Thread = thread;

            return(thread);
        }
示例#20
0
        public TaskCreator(InternalTopologyBuilder builder, IStreamConfig configuration, string threadId,
                           IKafkaSupplier kafkaSupplier, IProducer <byte[], byte[]> producer, StoreChangelogReader storeChangelogReader,
                           StreamMetricsRegistry streamMetricsRegistry)
        {
            this.builder               = builder;
            this.configuration         = configuration;
            this.threadId              = threadId;
            this.kafkaSupplier         = kafkaSupplier;
            this.producer              = producer;
            this.storeChangelogReader  = storeChangelogReader;
            this.streamMetricsRegistry = streamMetricsRegistry;

            createTaskSensor = ThreadMetrics.CreateTaskSensor(threadId, streamMetricsRegistry);
        }
示例#21
0
        protected AbstractTask(TaskId id, TopicPartition partition, ProcessorTopology topology, IConsumer <byte[], byte[]> consumer, IStreamConfig config)
        {
            log       = Logger.GetLogger(GetType());
            logPrefix = $"stream-task[{id.Topic}|{id.Partition}] ";

            Partition = partition;
            Id        = id;
            Topology  = topology;

            this.consumer = consumer;
            configuration = config;

            stateMgr = new ProcessorStateManager(id, partition);
        }
        public StoreChangelogReader(
            IStreamConfig config,
            IConsumer <byte[], byte[]> restoreConsumer,
            string threadId,
            StreamMetricsRegistry metricsRegistry)
        {
            this.restoreConsumer = restoreConsumer;
            this.threadId        = threadId;
            this.metricsRegistry = metricsRegistry;

            pollTimeMs = config.PollMs;
            maxPollRestoringRecords = config.MaxPollRestoringRecords;
            changelogs = new Dictionary <TopicPartition, ChangelogMetadata>();
        }
示例#23
0
        public GlobalStreamThread(string threadClientId,
                                  IConsumer <byte[], byte[]> globalConsumer,
                                  IStreamConfig configuration,
                                  IGlobalStateMaintainer globalStateMaintainer)
        {
            logPrefix = $"global-stream-thread {threadClientId} ";

            this.globalConsumer        = globalConsumer;
            this.configuration         = configuration;
            this.globalStateMaintainer = globalStateMaintainer;

            thread = new Thread(Run);
            State  = GlobalThreadState.CREATED;
        }
示例#24
0
        public TaskSynchronousTopologyDriver(string clientId, InternalTopologyBuilder topologyBuilder,
                                             IStreamConfig configuration, IStreamConfig topicConfiguration, IKafkaSupplier supplier,
                                             CancellationToken token)
        {
            this.configuration          = configuration;
            this.configuration.ClientId = clientId;
            this.topicConfiguration     = topicConfiguration;
            metricsRegistry             = new StreamMetricsRegistry(clientId, MetricsRecordingLevel.DEBUG);

            this.token    = token;
            builder       = topologyBuilder;
            this.supplier = supplier ?? new SyncKafkaSupplier();
            this.supplier.MetricsRegistry = metricsRegistry;
            producer = this.supplier.GetProducer(configuration.ToProducerConfig()) as SyncProducer;

            foreach (var sourceTopic in builder
                     .GetSourceTopics())
            {
                var part   = new TopicPartition(sourceTopic, 0);
                var taskId = builder.GetTaskIdFromPartition(part);
                if (partitionsByTaskId.ContainsKey(taskId))
                {
                    partitionsByTaskId[taskId].Add(part);
                }
                else
                {
                    partitionsByTaskId.Add(taskId, new List <TopicPartition> {
                        part
                    });
                }
            }

            ProcessorTopology globalTaskTopology = topologyBuilder.BuildGlobalStateTopology();

            hasGlobalTopology = globalTaskTopology != null;
            if (hasGlobalTopology)
            {
                var globalConsumer =
                    this.supplier.GetGlobalConsumer(configuration.ToGlobalConsumerConfig($"{clientId}-global-consumer"));
                var adminClient  = this.supplier.GetAdmin(configuration.ToAdminConfig($"{clientId}-admin"));
                var stateManager =
                    new GlobalStateManager(globalConsumer, globalTaskTopology, adminClient, configuration);
                globalProcessorContext = new GlobalProcessorContext(configuration, stateManager, metricsRegistry);
                stateManager.SetGlobalProcessorContext(globalProcessorContext);
                globalTask = new GlobalStateUpdateTask(stateManager, globalTaskTopology, globalProcessorContext);

                globalTask.Initialize();
            }
        }
示例#25
0
        internal DefaultKafkaClientSupplier(
            KafkaLoggerAdapter loggerAdapter,
            IStreamConfig streamConfig,
            DefaultKafkaClientBuilder builderKafkaHandler)
        {
            if (loggerAdapter == null)
            {
                throw new ArgumentNullException(nameof(loggerAdapter));
            }

            this.loggerAdapter       = loggerAdapter;
            this.streamConfig        = streamConfig;
            exposeLibrdKafka         = streamConfig?.ExposeLibrdKafkaStats ?? false;
            this.builderKafkaHandler = builderKafkaHandler ?? new DefaultKafkaClientBuilder();
        }
        public void Init()
        {
            stateDir             = Path.Combine(".", Guid.NewGuid().ToString());
            config               = new StreamConfig <StringSerDes, StringSerDes>();
            config.ApplicationId = "test-storechangelog-app";
            config.StateDir      = stateDir;
            config.PollMs        = 100;

            supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());

            restoreConsumer = supplier.GetRestoreConsumer(config.ToConsumerConfig());

            var topicPart = new TopicPartition("topic", 0);

            changelogTopic = "store-changelog-topic";
            var changelogsTopics = new Dictionary <string, string>();

            changelogsTopics.Add("store", changelogTopic);
            var id = new TaskId
            {
                Id        = 0,
                Partition = 0
            };

            store = new RocksDbKeyValueStore("store");
            storeChangelogReader =
                new StoreChangelogReader(config, restoreConsumer, "thread-0", new StreamMetricsRegistry());
            stateMgr = new ProcessorStateManager(
                id,
                topicPart.ToSingle(),
                changelogsTopics,
                storeChangelogReader,
                new OffsetCheckpointFile(Path.Combine(config.StateDir, config.ApplicationId, $"{id.Id}-{id.Partition}"))
                );

            Mock <AbstractTask> moq = new Mock <AbstractTask>();

            moq.Setup(t => t.Id).Returns(new TaskId {
                Id = 0, Partition = 0
            });

            context = new ProcessorContext(moq.Object, config, stateMgr, new StreamMetricsRegistry());
            store.Init(context, store);

            producer.Produce(changelogTopic, CreateMessage(changelogTopic, "key1", "value1"));
            producer.Produce(changelogTopic, CreateMessage(changelogTopic, "key2", "value2"));
        }
示例#27
0
        protected AbstractTask(TaskId id, IEnumerable <TopicPartition> partition, ProcessorTopology topology, IConsumer <byte[], byte[]> consumer, IStreamConfig config)
        {
            log       = Logger.GetLogger(GetType());
            logPrefix = $"stream-task[{id.Id}|{id.Partition}] ";

            var topicPartitions = partition.ToList();

            Partition = topicPartitions;
            Id        = id;
            Topology  = topology;

            this.consumer = consumer;
            configuration = config;

            stateMgr = new ProcessorStateManager(id, topicPartitions);
        }
示例#28
0
        internal TestMultiInputTopic(Dictionary <string, IPipeInput> pipes, IStreamConfig configuration, ISerDes <K> keySerdes, ISerDes <V> valueSerdes)
        {
            this.pipes         = pipes;
            this.configuration = configuration;
            this.keySerdes     = keySerdes;
            this.valueSerdes   = valueSerdes;

            if (this.keySerdes != null)
            {
                this.keySerdes.Initialize(new SerDesContext(configuration));
            }
            if (this.valueSerdes != null)
            {
                this.valueSerdes.Initialize(new SerDesContext(configuration));
            }
        }
        /// <summary>
        /// Create a <see cref="KafkaStream"/> instance.
        /// Please DO NOT FORGET to call Close to avoid resources leak !
        /// </summary>
        /// <param name="topology">the topology specifying the computational logic</param>
        /// <param name="configuration">configuration about this stream</param>
        public KafkaStream(Topology topology, IStreamConfig configuration)
        {
            this.topology      = topology;
            this.configuration = configuration;
            this.kafkaSupplier = new DefaultKafkaClientSupplier(new KafkaLoggerAdapter(configuration));

            var processID = Guid.NewGuid();

            clientId  = string.IsNullOrEmpty(configuration.ClientId) ? $"{this.configuration.ApplicationId.ToLower()}-{processID}" : configuration.ClientId;
            logPrefix = $"stream-application[{configuration.ApplicationId}] ";

            // sanity check
            this.processorTopology = topology.Builder.BuildTopology();

            this.threads = new IThread[this.configuration.NumStreamThreads];
            var threadState = new Dictionary <long, Processors.ThreadState>();

            for (int i = 0; i < this.configuration.NumStreamThreads; ++i)
            {
                var threadId = $"{this.configuration.ApplicationId.ToLower()}-stream-thread-{i}";

                adminClient = this.kafkaSupplier.GetAdmin(configuration.ToAdminConfig(StreamThread.GetSharedAdminClientId(clientId)));

                this.threads[i] = StreamThread.Create(
                    threadId,
                    clientId,
                    this.topology.Builder,
                    configuration,
                    this.kafkaSupplier,
                    adminClient,
                    i);

                threadState.Add(this.threads[i].Id, this.threads[i].State);
            }

            var manager = new StreamStateManager(this, threadState);

            foreach (var t in threads)
            {
                t.StateChanged += manager.OnChange;
            }

            StreamState = State.CREATED;
        }
        internal TopologyTestDriver(InternalTopologyBuilder builder, IStreamConfig config, Mode mode, IKafkaSupplier supplier)
        {
            topologyBuilder = builder;
            configuration   = config;

            // ONLY 1 thread for test driver (use only for ASYNC_CLUSTER_IN_MEMORY)
            configuration.NumStreamThreads = 1;
            configuration.Guarantee        = ProcessingGuarantee.AT_LEAST_ONCE;

            topicConfiguration = config.Clone();
            topicConfiguration.ApplicationId = $"test-driver-{configuration.ApplicationId}";

            var clientId = string.IsNullOrEmpty(configuration.ClientId) ? $"{configuration.ApplicationId.ToLower()}-{Guid.NewGuid()}" : configuration.ClientId;

            // sanity check
            topologyBuilder.BuildTopology();

            topologyBuilder.RewriteTopology(configuration);

            switch (mode)
            {
            case Mode.SYNC_TASK:
                behavior = new TaskSynchronousTopologyDriver(
                    clientId,
                    topologyBuilder,
                    configuration,
                    topicConfiguration,
                    supplier,
                    tokenSource.Token);
                break;

            case Mode.ASYNC_CLUSTER_IN_MEMORY:
                behavior = new ClusterInMemoryTopologyDriver(
                    clientId,
                    topologyBuilder,
                    configuration,
                    topicConfiguration,
                    supplier,
                    tokenSource.Token);
                break;
            }

            behavior.StartDriver();
        }