public void StreamTaskWrittingCheckpoint() { var config = new StreamConfig <StringSerDes, StringSerDes>(); config.ApplicationId = "test-app"; config.StateDir = Path.Combine(".", Guid.NewGuid().ToString()); var serdes = new StringSerDes(); var builder = new StreamBuilder(); var table = builder.Table("topic", RocksDb <string, string> .As("store").WithLoggingEnabled()); TaskId id = new TaskId { Id = 0, Partition = 0 }; var topology = builder.Build(); topology.Builder.RewriteTopology(config); var processorTopology = topology.Builder.BuildTopology(id); var supplier = new SyncKafkaSupplier(); var producer = supplier.GetProducer(config.ToProducerConfig()); var consumer = supplier.GetConsumer(config.ToConsumerConfig(), null); var part = new TopicPartition("topic", 0); StreamTask task = new StreamTask( "thread-0", id, new List <TopicPartition> { part }, processorTopology, consumer, config, supplier, producer, new MockChangelogRegister() , new StreamMetricsRegistry()); task.GroupMetadata = consumer as SyncConsumer; task.InitializeStateStores(); task.InitializeTopology(); task.RestorationIfNeeded(); task.CompleteRestoration(); List <ConsumeResult <byte[], byte[]> > messages = new List <ConsumeResult <byte[], byte[]> >(); int offset = 0; for (int i = 0; i < 5; ++i) { messages.Add( new ConsumeResult <byte[], byte[]> { Message = new Message <byte[], byte[]> { Key = serdes.Serialize($"key{i + 1}", new SerializationContext()), Value = serdes.Serialize($"value{i + 1}", new SerializationContext()) }, TopicPartitionOffset = new TopicPartitionOffset(part, offset++) }); } task.AddRecords(messages); Assert.IsTrue(task.CanProcess(DateTime.Now.GetMilliseconds())); while (task.CanProcess(DateTime.Now.GetMilliseconds())) { Assert.IsTrue(task.Process()); Assert.IsTrue(task.CommitNeeded); task.Commit(); } task.MayWriteCheckpoint(true); messages = new List <ConsumeResult <byte[], byte[]> >(); for (int i = 0; i < StateManagerTools.OFFSET_DELTA_THRESHOLD_FOR_CHECKPOINT + 10; ++i) { messages.Add( new ConsumeResult <byte[], byte[]> { Message = new Message <byte[], byte[]> { Key = serdes.Serialize($"key{i + 1}", new SerializationContext()), Value = serdes.Serialize($"value{i + 1}", new SerializationContext()) }, TopicPartitionOffset = new TopicPartitionOffset(part, offset++) }); } task.AddRecords(messages); while (task.CanProcess(DateTime.Now.GetMilliseconds())) { Assert.IsTrue(task.Process()); } task.MayWriteCheckpoint(false); var lines = File.ReadAllLines(Path.Combine(config.StateDir, config.ApplicationId, "0-0", ".checkpoint")); Assert.AreEqual(3, lines.Length); Assert.AreEqual("test-app-store-changelog 0 10014", lines[2]); task.Suspend(); task.Close(); Directory.Delete(config.StateDir, true); }
public void Dispose() { task.Suspend(); task.Close(); task = null; }
public void StreamTaskSuspendResume() { var config = new StreamConfig <StringSerDes, StringSerDes>(); config.ApplicationId = "test-app"; var serdes = new StringSerDes(); var builder = new StreamBuilder(); builder.Table <string, string>("topic", InMemory <string, string> .As("store").WithLoggingDisabled()) .MapValues((k, v) => v.ToUpper()) .ToStream() .To("topic2"); TaskId id = new TaskId { Id = 0, Partition = 0 }; var topology = builder.Build(); var processorTopology = topology.Builder.BuildTopology(id); var supplier = new SyncKafkaSupplier(); var producer = supplier.GetProducer(config.ToProducerConfig()); var consumer = supplier.GetConsumer(config.ToConsumerConfig(), null); var part = new TopicPartition("topic", 0); StreamTask task = new StreamTask( "thread-0", id, new List <TopicPartition> { part }, processorTopology, consumer, config, supplier, producer, new MockChangelogRegister() , new StreamMetricsRegistry()); task.GroupMetadata = consumer as SyncConsumer; task.InitializeStateStores(); task.InitializeTopology(); task.RestorationIfNeeded(); task.CompleteRestoration(); List <ConsumeResult <byte[], byte[]> > messages = new List <ConsumeResult <byte[], byte[]> >(); int offset = 0; for (int i = 0; i < 5; ++i) { messages.Add( new ConsumeResult <byte[], byte[]> { Message = new Message <byte[], byte[]> { Key = serdes.Serialize($"key{i + 1}", new SerializationContext()), Value = serdes.Serialize($"value{i + 1}", new SerializationContext()) }, TopicPartitionOffset = new TopicPartitionOffset(part, offset++) }); } task.AddRecords(messages); Assert.IsTrue(task.CanProcess(DateTime.Now.GetMilliseconds())); while (task.CanProcess(DateTime.Now.GetMilliseconds())) { Assert.IsTrue(task.Process()); Assert.IsTrue(task.CommitNeeded); task.Commit(); } Assert.IsNotNull(task.GetStore("store")); task.Suspend(); Assert.IsNull(task.GetStore("store")); task.Resume(); task.RestorationIfNeeded(); Assert.IsNotNull(task.GetStore("store")); task.AddRecords(messages); Assert.IsTrue(task.CanProcess(DateTime.Now.GetMilliseconds())); while (task.CanProcess(DateTime.Now.GetMilliseconds())) { Assert.IsTrue(task.Process()); Assert.IsTrue(task.CommitNeeded); task.Commit(); } // CHECK IN TOPIC topic2 consumer.Subscribe("topic2"); List <ConsumeResult <byte[], byte[]> > results = new List <ConsumeResult <byte[], byte[]> >(); ConsumeResult <byte[], byte[]> result = null; do { result = consumer.Consume(100); if (result != null) { results.Add(result); consumer.Commit(result); } } while (result != null); Assert.AreEqual(10, results.Count); task.Close(); }
public void StreamTaskSuspendResume() { var config = new StreamConfig <StringSerDes, StringSerDes>(); config.ApplicationId = "test-app"; var serdes = new StringSerDes(); var builder = new StreamBuilder(); builder.Stream <string, string>("topic") .Map((k, v) => KeyValuePair.Create(k.ToUpper(), v.ToUpper())) .To("topic2"); var topology = builder.Build(); var processorTopology = topology.Builder.BuildTopology("topic"); var supplier = new SyncKafkaSupplier(); var producer = supplier.GetProducer(config.ToProducerConfig()); var consumer = supplier.GetConsumer(config.ToConsumerConfig(), null); TaskId id = new TaskId { Id = 1, Topic = "topic", Partition = 0 }; var part = new TopicPartition("topic", 0); StreamTask task = new StreamTask( "thread-0", id, part, processorTopology, consumer, config, supplier, producer); task.GroupMetadata = consumer as SyncConsumer; task.InitializeStateStores(); task.InitializeTopology(); List <ConsumeResult <byte[], byte[]> > messages = new List <ConsumeResult <byte[], byte[]> >(); int offset = 0; for (int i = 0; i < 5; ++i) { messages.Add( new ConsumeResult <byte[], byte[]> { Message = new Message <byte[], byte[]> { Key = serdes.Serialize($"key{i+1}"), Value = serdes.Serialize($"value{i + 1}") }, TopicPartitionOffset = new TopicPartitionOffset(part, offset++) }); } task.AddRecords(messages); Assert.IsTrue(task.CanProcess); while (task.CanProcess) { Assert.IsTrue(task.Process()); Assert.IsTrue(task.CommitNeeded); task.Commit(); } task.Suspend(); task.Resume(); task.AddRecords(messages); Assert.IsTrue(task.CanProcess); while (task.CanProcess) { Assert.IsTrue(task.Process()); Assert.IsTrue(task.CommitNeeded); task.Commit(); } // CHECK IN TOPIC topic2 consumer.Subscribe("topic2"); List <ConsumeResult <byte[], byte[]> > results = new List <ConsumeResult <byte[], byte[]> >(); ConsumeResult <byte[], byte[]> result = null; do { result = consumer.Consume(100); if (result != null) { results.Add(result); consumer.Commit(result); } } while (result != null); Assert.AreEqual(10, results.Count); task.Close(); }