public void ApplyInternalChangelogTopics() { AdminClientConfig config = new AdminClientConfig(); config.BootstrapServers = "localhost:9092"; StreamConfig config2 = new StreamConfig(); DefaultTopicManager manager = new DefaultTopicManager(config2, kafkaSupplier.GetAdmin(config)); IDictionary <string, InternalTopicConfig> topics = new Dictionary <string, InternalTopicConfig>(); topics.Add("topic", new UnwindowedChangelogTopicConfig { Name = "topic", NumberPartitions = 1 }); topics.Add("topic1", new UnwindowedChangelogTopicConfig { Name = "topic1", NumberPartitions = 1 }); var r = manager.ApplyAsync(0, topics).GetAwaiter().GetResult().ToList(); Assert.AreEqual(2, r.Count); Assert.AreEqual("topic", r[0]); Assert.AreEqual("topic1", r[1]); }
public void GetCurrentPartitionMetadataTests() { var source = new System.Threading.CancellationTokenSource(); var config = new StreamConfig <StringSerDes, StringSerDes>(); config.ApplicationId = "test"; config.Guarantee = ProcessingGuarantee.AT_LEAST_ONCE; config.PollMs = 1; config.FollowMetadata = true; var configConsumer = config.Clone(); configConsumer.ApplicationId = "test-consumer"; int?h = null; var serdes = new StringSerDes(); var builder = new StreamBuilder(); builder .Stream <string, string>("topic") .MapValues((v) => { h = StreamizMetadata.GetCurrentPartitionMetadata(); return(v); }) .To("output"); var topo = builder.Build(); var supplier = new SyncKafkaSupplier(); var producer = supplier.GetProducer(config.ToProducerConfig()); var consumer = supplier.GetConsumer(configConsumer.ToConsumerConfig(), null); var thread = StreamThread.Create( "thread-0", "c0", topo.Builder, config, supplier, supplier.GetAdmin(config.ToAdminConfig("admin")), 0) as StreamThread; thread.Start(source.Token); producer.Produce("topic", new Confluent.Kafka.Message <byte[], byte[]> { Key = serdes.Serialize("key1", new SerializationContext()), Value = serdes.Serialize("coucou", new SerializationContext()) }); consumer.Subscribe("output"); ConsumeResult <byte[], byte[]> result = null; do { result = consumer.Consume(100); } while (result == null); source.Cancel(); thread.Dispose(); Assert.NotNull(h); Assert.AreEqual(0, h); }
public void CheckSetStateStartingWithDeadThread() { var config = new StreamConfig <StringSerDes, StringSerDes>(); config.ApplicationId = "test"; var builder = new StreamBuilder(); builder.Stream <string, string>("topic").To("topic2"); var topo = builder.Build(); var supplier = new SyncKafkaSupplier(); var thread = StreamThread.Create( "thread-0", "c0", topo.Builder, new StreamMetricsRegistry(), config, supplier, supplier.GetAdmin(config.ToAdminConfig("admin")), 0) as StreamThread; // MUST BE IN CREATED STATE Assert.AreEqual(ThreadState.CREATED, thread.State); thread.SetState(ThreadState.STARTING); thread.SetState(ThreadState.PENDING_SHUTDOWN); thread.SetState(ThreadState.DEAD); thread.Start(default);
public void TaskManagerCommitWithoutCommitNeeed() { var config = new StreamConfig <StringSerDes, StringSerDes>(); config.ApplicationId = "test-app"; var builder = new StreamBuilder(); builder.Stream <string, string>("topic").To("topic2"); var topology = builder.Build(); var supplier = new SyncKafkaSupplier(); var producer = supplier.GetProducer(config.ToProducerConfig()); var consumer = supplier.GetConsumer(config.ToConsumerConfig(), null); var taskCreator = new TaskCreator(topology.Builder, config, "thread-0", supplier, producer); var taskManager = new TaskManager(topology.Builder, taskCreator, supplier.GetAdmin(config.ToAdminConfig("admin")), consumer); taskManager.CreateTasks( new List <TopicPartition> { new TopicPartition("topic", 0), new TopicPartition("topic", 1), new TopicPartition("topic", 2), new TopicPartition("topic", 3), }); Assert.AreEqual(4, taskManager.ActiveTasks.Count()); Assert.AreEqual(0, taskManager.CommitAll()); taskManager.Close(); }
public void StreamThreadCommitIntervalWorkflow() { var source = new System.Threading.CancellationTokenSource(); var config = new StreamConfig <StringSerDes, StringSerDes>(); config.ApplicationId = "test"; config.Guarantee = ProcessingGuarantee.AT_LEAST_ONCE; config.PollMs = 1; config.CommitIntervalMs = 1; var serdes = new StringSerDes(); var builder = new StreamBuilder(); builder.Stream <string, string>("topic").To("topic2"); var topo = builder.Build(); var supplier = new SyncKafkaSupplier(); var producer = supplier.GetProducer(config.ToProducerConfig()); var consumer = supplier.GetConsumer(config.ToConsumerConfig("test-consum"), null); consumer.Subscribe("topic2"); var thread = StreamThread.Create( "thread-0", "c0", topo.Builder, new StreamMetricsRegistry(), config, supplier, supplier.GetAdmin(config.ToAdminConfig("admin")), 0) as StreamThread; thread.Start(source.Token); producer.Produce("topic", new Confluent.Kafka.Message <byte[], byte[]> { Key = serdes.Serialize("key1", new SerializationContext()), Value = serdes.Serialize("coucou", new SerializationContext()) }); //WAIT STREAMTHREAD PROCESS MESSAGE System.Threading.Thread.Sleep(100); var message = consumer.Consume(100); Assert.AreEqual("key1", serdes.Deserialize(message.Message.Key, new SerializationContext())); Assert.AreEqual("coucou", serdes.Deserialize(message.Message.Value, new SerializationContext())); var offsets = thread.GetCommittedOffsets(new List <TopicPartition> { new TopicPartition("topic", 0) }, TimeSpan.FromSeconds(10)).ToList(); Assert.AreEqual(1, offsets.Count); Assert.AreEqual(1, offsets[0].Offset.Value); Assert.AreEqual(0, offsets[0].TopicPartition.Partition.Value); Assert.AreEqual("topic", offsets[0].Topic); source.Cancel(); thread.Dispose(); }
public void TaskManagerAssignedUnknownPartitions() { var config = new StreamConfig <StringSerDes, StringSerDes>(); config.ApplicationId = "test-app"; var serdes = new StringSerDes(); var builder = new StreamBuilder(); builder.Stream <string, string>("topic") .Map((k, v) => KeyValuePair.Create(k.ToUpper(), v.ToUpper())) .To("topic2"); var topology = builder.Build(); var supplier = new SyncKafkaSupplier(); var producer = supplier.GetProducer(config.ToProducerConfig()); var consumer = supplier.GetConsumer(config.ToConsumerConfig(), null); var restoreConsumer = supplier.GetRestoreConsumer(config.ToConsumerConfig()); var storeChangelogReader = new StoreChangelogReader(config, restoreConsumer, "thread-0", new StreamMetricsRegistry()); var taskCreator = new TaskCreator(topology.Builder, config, "thread-0", supplier, producer, storeChangelogReader, new StreamMetricsRegistry()); var taskManager = new TaskManager(topology.Builder, taskCreator, supplier.GetAdmin(config.ToAdminConfig("admin")), consumer, storeChangelogReader); taskManager.CreateTasks( new List <TopicPartition> { new TopicPartition("topic", 0), new TopicPartition("topic", 1) }); taskManager.RevokeTasks( new List <TopicPartition> { new TopicPartition("topic", 1) }); taskManager.CreateTasks( new List <TopicPartition> { new TopicPartition("topic", 0), new TopicPartition("topic", 1), new TopicPartition("topic", 2) }); taskManager.TryToCompleteRestoration(); Assert.AreEqual(3, taskManager.ActiveTasks.Count()); Assert.AreEqual(0, taskManager.RevokedTasks.Count()); taskManager.Close(); }
public void CreateStreamThread() { var config = new StreamConfig <StringSerDes, StringSerDes>(); config.ApplicationId = "test"; var builder = new StreamBuilder(); builder.Stream <string, string>("topic").To("topic2"); var topo = builder.Build(); var supplier = new SyncKafkaSupplier(); var thread = StreamThread.Create( "thread-0", "c0", topo.Builder, new StreamMetricsRegistry(), config, supplier, supplier.GetAdmin(config.ToAdminConfig("admin")), 0) as StreamThread; Assert.AreEqual("thread-0", thread.Name); }
public void CheckIncorrectStateTransition() { var config = new StreamConfig <StringSerDes, StringSerDes>(); config.ApplicationId = "test"; var builder = new StreamBuilder(); builder.Stream <string, string>("topic").To("topic2"); var topo = builder.Build(); var supplier = new SyncKafkaSupplier(); var thread = StreamThread.Create( "thread-0", "c0", topo.Builder, new StreamMetricsRegistry(), config, supplier, supplier.GetAdmin(config.ToAdminConfig("admin")), 0) as StreamThread; // MUST BE IN CREATED STATE Assert.AreEqual(ThreadState.CREATED, thread.State); Assert.Throws <StreamsException>(() => thread.SetState(ThreadState.DEAD)); }
public void CheckSetStateWithoutStateChangedHandler() { var config = new StreamConfig <StringSerDes, StringSerDes>(); config.ApplicationId = "test"; var builder = new StreamBuilder(); builder.Stream <string, string>("topic").To("topic2"); var topo = builder.Build(); var supplier = new SyncKafkaSupplier(); var thread = StreamThread.Create( "thread-0", "c0", topo.Builder, config, supplier, supplier.GetAdmin(config.ToAdminConfig("admin")), 0) as StreamThread; // MUST BE IN CREATED STATE Assert.AreEqual(ThreadState.CREATED, thread.State); thread.SetState(ThreadState.STARTING); Assert.AreEqual(ThreadState.STARTING, thread.State); }
public void StreamThreadNormalWorkflow() { bool metricsReporterCalled = false; List <ThreadState> allStates = new List <ThreadState>(); var expectedStates = new List <ThreadState> { ThreadState.CREATED, ThreadState.STARTING, ThreadState.PARTITIONS_ASSIGNED, ThreadState.RUNNING, ThreadState.PENDING_SHUTDOWN, ThreadState.DEAD }; var source = new System.Threading.CancellationTokenSource(); var config = new StreamConfig <StringSerDes, StringSerDes>(); config.ApplicationId = "test"; config.Guarantee = ProcessingGuarantee.AT_LEAST_ONCE; config.PollMs = 1; config.MetricsReporter = (sensor) => { metricsReporterCalled = true; }; config.AddOrUpdate(StreamConfig.metricsIntervalMsCst, 10); var serdes = new StringSerDes(); var builder = new StreamBuilder(); builder.Stream <string, string>("topic").To("topic2"); var topo = builder.Build(); var supplier = new SyncKafkaSupplier(); var producer = supplier.GetProducer(config.ToProducerConfig()); var consumer = supplier.GetConsumer(config.ToConsumerConfig("test-consum"), null); consumer.Subscribe("topic2"); var thread = StreamThread.Create( "thread-0", "c0", topo.Builder, new StreamMetricsRegistry(), config, supplier, supplier.GetAdmin(config.ToAdminConfig("admin")), 0) as StreamThread; allStates.Add(thread.State); thread.StateChanged += (t, o, n) => { Assert.IsInstanceOf <ThreadState>(n); allStates.Add(n as ThreadState); }; thread.Start(source.Token); producer.Produce("topic", new Confluent.Kafka.Message <byte[], byte[]> { Key = serdes.Serialize("key1", new SerializationContext()), Value = serdes.Serialize("coucou", new SerializationContext()) }); //WAIT STREAMTHREAD PROCESS MESSAGE System.Threading.Thread.Sleep(100); var message = consumer.Consume(100); source.Cancel(); thread.Dispose(); Assert.AreEqual("key1", serdes.Deserialize(message.Message.Key, new SerializationContext())); Assert.AreEqual("coucou", serdes.Deserialize(message.Message.Value, new SerializationContext())); Assert.AreEqual(expectedStates, allStates); Assert.IsTrue(metricsReporterCalled); }
//[Test] // TODO : fix that public void WorkflowCompleteBufferedRecordsTest() { int maxBuffered = 10; var token = new System.Threading.CancellationTokenSource(); var serdes = new StringSerDes(); var config = new StreamConfig <StringSerDes, StringSerDes>(); config.ApplicationId = "test-group"; config.MaxTaskIdleMs = (long)TimeSpan.FromSeconds(100).TotalMilliseconds; config.BufferedRecordsPerPartition = maxBuffered; config.PollMs = 10; var builder = new StreamBuilder(); var stream1 = builder.Stream <string, string>("topic1"); var stream2 = builder.Stream <string, string>("topic2"); stream1 .Join(stream2, (v1, v2) => $"{v1}-{v2}", JoinWindowOptions.Of(TimeSpan.FromSeconds(10))) .To("output"); var topo = builder.Build(); var supplier = new SyncKafkaSupplier(); var producer = supplier.GetProducer(config.ToProducerConfig()); var consumer = supplier.GetConsumer(config.ToConsumerConfig("test-consum"), null); consumer.Subscribe("output"); var thread = StreamThread.Create( "thread-0", "c0", topo.Builder, config, supplier, supplier.GetAdmin(config.ToAdminConfig("admin")), 0) as StreamThread; thread.Start(token.Token); for (int i = 0; i < maxBuffered + 1; ++i) { producer.Produce("topic1", new Message <byte[], byte[]> { Key = serdes.Serialize("key", new SerializationContext()), Value = serdes.Serialize($"coucou{i}", new SerializationContext()) }); } // CONSUME PAUSE AFTER maxBuffered + 1 messages System.Threading.Thread.Sleep(50); // Add one message more with consumer in stream thread in pause producer.Produce("topic1", new Message <byte[], byte[]> { Key = serdes.Serialize("key", new SerializationContext()), Value = serdes.Serialize($"coucou{maxBuffered+1}", new SerializationContext()) }); Assert.AreEqual(1, thread.ActiveTasks.Count()); var task = thread.ActiveTasks.ToArray()[0]; Assert.IsNotNull(task.Grouper); Assert.IsFalse(task.Grouper.AllPartitionsBuffered); Assert.AreEqual(maxBuffered + 1, task.Grouper.NumBuffered()); Assert.AreEqual(maxBuffered + 1, task.Grouper.NumBuffered(new TopicPartition("topic1", 0))); Assert.AreEqual(0, task.Grouper.NumBuffered(new TopicPartition("topic2", 0))); producer.Produce("topic2", new Message <byte[], byte[]> { Key = serdes.Serialize("key", new SerializationContext()), Value = serdes.Serialize($"test", new SerializationContext()) }); List <ConsumeResult <byte[], byte[]> > records = new List <ConsumeResult <byte[], byte[]> >(); do { records.AddRange(consumer.ConsumeRecords(TimeSpan.FromMilliseconds(100)).ToList()); } while (records.Count() <= 12); Assert.AreEqual(maxBuffered + 2, records.Count()); for (int i = 0; i < maxBuffered + 2; ++i) { var message = records.ToArray()[i]; Assert.AreEqual("key", serdes.Deserialize(message.Message.Key, new SerializationContext())); Assert.IsTrue(serdes.Deserialize(message.Message.Value, new SerializationContext()).Contains($"coucou{i}-")); } token.Cancel(); thread.Dispose(); }
public void StandardWorkflowTaskManager() { var config = new StreamConfig <StringSerDes, StringSerDes>(); config.ApplicationId = "test-app"; var builder = new StreamBuilder(); builder.Stream <string, string>("topic").To("topic2"); var topology = builder.Build(); var supplier = new SyncKafkaSupplier(); var producer = supplier.GetProducer(config.ToProducerConfig()); var consumer = supplier.GetConsumer(config.ToConsumerConfig(), null); var taskCreator = new TaskCreator(topology.Builder, config, "thread-0", supplier, producer); var taskManager = new TaskManager(topology.Builder, taskCreator, supplier.GetAdmin(config.ToAdminConfig("admin")), consumer); taskManager.CreateTasks( new List <TopicPartition> { new TopicPartition("topic", 0), new TopicPartition("topic", 1), new TopicPartition("topic", 2), new TopicPartition("topic", 3), }); Assert.AreEqual(4, taskManager.ActiveTasks.Count()); for (int i = 0; i < 4; ++i) { var task = taskManager.ActiveTaskFor(new TopicPartition("topic", i)); Assert.IsNotNull(task); Assert.AreEqual("test-app", task.ApplicationId); Assert.IsFalse(task.CanProcess(DateTime.Now.GetMilliseconds())); Assert.IsFalse(task.CommitNeeded); Assert.IsFalse(task.HasStateStores); } // Revoked 2 partitions taskManager.RevokeTasks(new List <TopicPartition> { new TopicPartition("topic", 2), new TopicPartition("topic", 3), }); Assert.AreEqual(2, taskManager.ActiveTasks.Count()); Assert.AreEqual(2, taskManager.RevokedTasks.Count()); for (int i = 0; i < 2; ++i) { var task = taskManager.ActiveTaskFor(new TopicPartition("topic", i)); Assert.IsNotNull(task); Assert.AreEqual("test-app", task.ApplicationId); Assert.IsFalse(task.CanProcess(DateTime.Now.GetMilliseconds())); Assert.IsFalse(task.CommitNeeded); Assert.IsFalse(task.HasStateStores); } var taskFailed = taskManager.ActiveTaskFor(new TopicPartition("topic", 2)); Assert.IsNull(taskFailed); taskManager.Close(); Assert.AreEqual(0, taskManager.ActiveTasks.Count()); Assert.AreEqual(0, taskManager.RevokedTasks.Count()); }
public void TaskManagerCommit() { var config = new StreamConfig <StringSerDes, StringSerDes>(); config.ApplicationId = "test-app"; var serdes = new StringSerDes(); var builder = new StreamBuilder(); builder.Stream <string, string>("topic") .Map((k, v) => KeyValuePair.Create(k.ToUpper(), v.ToUpper())) .To("topic2"); var topology = builder.Build(); var supplier = new SyncKafkaSupplier(); var producer = supplier.GetProducer(config.ToProducerConfig()); var consumer = supplier.GetConsumer(config.ToConsumerConfig(), null); var taskCreator = new TaskCreator(topology.Builder, config, "thread-0", supplier, producer); var taskManager = new TaskManager(topology.Builder, taskCreator, supplier.GetAdmin(config.ToAdminConfig("admin")), consumer); taskManager.CreateTasks( new List <TopicPartition> { new TopicPartition("topic", 0), new TopicPartition("topic", 1), new TopicPartition("topic", 2), new TopicPartition("topic", 3), }); Assert.AreEqual(4, taskManager.ActiveTasks.Count()); var part = new TopicPartition("topic", 0); var task = taskManager.ActiveTaskFor(part); List <ConsumeResult <byte[], byte[]> > messages = new List <ConsumeResult <byte[], byte[]> >(); int offset = 0; for (int i = 0; i < 5; ++i) { messages.Add( new ConsumeResult <byte[], byte[]> { Message = new Message <byte[], byte[]> { Key = serdes.Serialize($"key{i + 1}", new SerializationContext()), Value = serdes.Serialize($"value{i + 1}", new SerializationContext()) }, TopicPartitionOffset = new TopicPartitionOffset(part, offset++) }); } task.AddRecords(messages); Assert.IsTrue(task.CanProcess(DateTime.Now.GetMilliseconds())); while (task.CanProcess(DateTime.Now.GetMilliseconds())) { Assert.IsTrue(task.Process()); } // ONLY ONE TASK HAVE BEEN RECORDS Assert.AreEqual(1, taskManager.CommitAll()); // CHECK IN TOPIC topic2 consumer.Subscribe("topic2"); List <ConsumeResult <byte[], byte[]> > results = new List <ConsumeResult <byte[], byte[]> >(); ConsumeResult <byte[], byte[]> result = null; do { result = consumer.Consume(100); if (result != null) { results.Add(result); consumer.Commit(result); } } while (result != null); Assert.AreEqual(5, results.Count); for (int i = 0; i < 5; ++i) { Assert.AreEqual($"KEY{i + 1}", serdes.Deserialize(results[i].Message.Key, new SerializationContext())); Assert.AreEqual($"VALUE{i+1}", serdes.Deserialize(results[i].Message.Value, new SerializationContext())); } // NO RECORD IN THIS TASKS part = new TopicPartition("topic", 2); task = taskManager.ActiveTaskFor(part); Assert.IsFalse(task.CanProcess(DateTime.Now.GetMilliseconds())); Assert.IsFalse(task.Process()); taskManager.Close(); }
public async Task exec(IConfiguration config, IServiceProvider services) { Console.WriteLine("Process"); var destTopic = config["spring.cloud.stream.bindings.output.destination"]; Console.WriteLine(destTopic); using (var scope = services.CreateScope()) { this._dataService = scope.ServiceProvider .GetRequiredService <IDataService>(); bool isRunningState = false; var timeout = TimeSpan.FromSeconds(10); DateTime dt = DateTime.Now; Order[] capture = this._dataService.readData(); // Inyectamos los datos obtenidos al Stream var sConfig = new StreamConfig <StringSerDes, StringSerDes>(); sConfig.ApplicationId = config["SPRING_CLOUD_APPLICATION_GUID"]; sConfig.BootstrapServers = config["SPRING_CLOUD_STREAM_KAFKA_BINDER_BROKERS"]; sConfig.SchemaRegistryUrl = config["SchemaRegistryUrl"]; sConfig.AutoRegisterSchemas = true; sConfig.NumStreamThreads = 10; sConfig.Acks = Acks.All; sConfig.AddConsumerConfig("allow.auto.create.topics", "true"); sConfig.InnerExceptionHandler = (e) => ExceptionHandlerResponse.CONTINUE; var schemaRegistryClient = new CachedSchemaRegistryClient (new SchemaRegistryConfig { Url = sConfig.SchemaRegistryUrl }); var supplier = new SyncKafkaSupplier(new KafkaLoggerAdapter(sConfig)); var producerConfig = sConfig.ToProducerConfig(); var adminConfig = sConfig.ToAdminConfig(sConfig.ApplicationId); var admin = supplier.GetAdmin(adminConfig); // try // { // var topic = new TopicSpecification // { // Name = destTopic, // NumPartitions = 1, // ReplicationFactor = 3 // }; // var topicProduct = new TopicSpecification // { // Name = "product-external", // NumPartitions = 1, // ReplicationFactor = 3 // }; // IList<TopicSpecification> topics = new List<TopicSpecification>(); // topics.Add(topic); // topics.Add(topicProduct); // await admin.CreateTopicsAsync(topics); // } // catch (Exception topicExists) // { // Console.WriteLine("Topic alreade exists"); // Console.Write(topicExists); // } var producer = supplier.GetProducer(producerConfig); StreamBuilder builder = new StreamBuilder(); var serdes = new SchemaAvroSerDes <Order>(); var keySerdes = new Int32SerDes(); builder.Table(destTopic, keySerdes, serdes, InMemory <int, Order> .As(config["table"])); var t = builder.Build(); KafkaStream stream = new KafkaStream(t, sConfig, supplier); stream.StateChanged += (old, @new) => { if (@new.Equals(KafkaStream.State.RUNNING)) { isRunningState = true; } }; await stream.StartAsync(); while (!isRunningState) { Thread.Sleep(250); if (DateTime.Now > dt + timeout) { break; } } if (isRunningState) { // //create a well formatted Endpoint in external topic var endpProducer = new ProducerBuilder <byte[], Endpoint>(producerConfig) .SetValueSerializer(new AvroSerializer <Endpoint>(schemaRegistryClient, new AvroSerializerConfig { AutoRegisterSchemas = true }).AsSyncOverAsync()).Build(); //create a well formatted Product in external topic var productProducer = new ProducerBuilder <byte[], Product>(producerConfig) .SetValueSerializer(new AvroSerializer <Product>(schemaRegistryClient, new AvroSerializerConfig { AutoRegisterSchemas = true }).AsSyncOverAsync()).Build(); for (int k = 1; k < 10; k++) { endpProducer.Produce("api-endpoints", new Message <byte[], Endpoint> { Key = new Int32SerDes().Serialize(k, new SerializationContext()), Value = new Endpoint { endpoint_id = ("endpoint" + k), endpoint_url = ("http://endpoint" + k + "/"), http_method = "POST" } }, (d) => { if (d.Status == PersistenceStatus.Persisted) { Console.WriteLine("Endpoint Message sent !"); } }); productProducer.Produce("product-external", new Message <byte[], Product> { Key = new Int32SerDes().Serialize(1, new SerializationContext()), Value = new Product { name = "Producto de Software", price = 1234.5F, product_id = 3 } }, (d) => { if (d.Status == PersistenceStatus.Persisted) { Console.WriteLine("Product Message sent !"); } }); } Thread.Sleep(10); for (int k = 1; k < 10; k++) { producer.Produce(destTopic, new Confluent.Kafka.Message <byte[], byte[]> { Key = keySerdes.Serialize(k, new SerializationContext()), Value = serdes.Serialize(new Order { order_id = k, price = 123.5F, product_id = k }, new SerializationContext()) }, (d) => { if (d.Status == PersistenceStatus.Persisted) { Console.WriteLine("Order Message sent !"); } }); } Thread.Sleep(50); } } }
private void TaskManagerRestorationChangelog(bool persistenStateStore = false) { var stateDir = Path.Combine(".", Guid.NewGuid().ToString()); var config = new StreamConfig <StringSerDes, StringSerDes>(); config.ApplicationId = "test-restoration-changelog-app"; config.StateDir = stateDir; var builder = new StreamBuilder(); builder.Table("topic", persistenStateStore ? RocksDb <string, string> .As("store").WithLoggingEnabled(null) : InMemory <string, string> .As("store").WithLoggingEnabled(null)); var serdes = new StringSerDes(); var topology = builder.Build(); topology.Builder.RewriteTopology(config); var supplier = new SyncKafkaSupplier(); var producer = supplier.GetProducer(config.ToProducerConfig()); var consumer = supplier.GetConsumer(config.ToConsumerConfig(), null); var restoreConsumer = supplier.GetRestoreConsumer(config.ToConsumerConfig()); var storeChangelogReader = new StoreChangelogReader(config, restoreConsumer, "thread-0", new StreamMetricsRegistry()); var taskCreator = new TaskCreator(topology.Builder, config, "thread-0", supplier, producer, storeChangelogReader, new StreamMetricsRegistry()); var taskManager = new TaskManager(topology.Builder, taskCreator, supplier.GetAdmin(config.ToAdminConfig("admin")), consumer, storeChangelogReader); var part = new TopicPartition("topic", 0); taskManager.CreateTasks( new List <TopicPartition> { part }); var task = taskManager.ActiveTaskFor(part); IDictionary <TaskId, ITask> tasks = new Dictionary <TaskId, ITask>(); tasks.Add(task.Id, task); taskManager.TryToCompleteRestoration(); storeChangelogReader.Restore(); Assert.IsTrue(taskManager.TryToCompleteRestoration()); List <ConsumeResult <byte[], byte[]> > messages = new List <ConsumeResult <byte[], byte[]> >(); int offset = 0; for (int i = 0; i < 5; ++i) { messages.Add( new ConsumeResult <byte[], byte[]> { Message = new Message <byte[], byte[]> { Key = serdes.Serialize($"key{i + 1}", new SerializationContext()), Value = serdes.Serialize($"value{i + 1}", new SerializationContext()) }, TopicPartitionOffset = new TopicPartitionOffset(part, offset++) }); } task.AddRecords(messages); // Process messages while (task.CanProcess(DateTime.Now.GetMilliseconds())) { Assert.IsTrue(task.Process()); } taskManager.CommitAll(); // Simulate Close + new open taskManager.Close(); restoreConsumer.Resume(new TopicPartition("test-restoration-changelog-app-store-changelog", 0).ToSingle()); taskManager.CreateTasks( new List <TopicPartition> { part }); task = taskManager.ActiveTaskFor(part); tasks = new Dictionary <TaskId, ITask>(); tasks.Add(task.Id, task); Assert.IsFalse(taskManager.TryToCompleteRestoration()); storeChangelogReader.Restore(); Assert.IsTrue(taskManager.TryToCompleteRestoration()); var store = task.GetStore("store"); var items = (store as ITimestampedKeyValueStore <string, string>).All().ToList(); Assert.AreEqual(5, items.Count); taskManager.Close(); if (persistenStateStore) { Directory.Delete(stateDir, true); } }