Ejemplo n.º 1
0
        public void GetCurrentPartitionMetadataTests()
        {
            var source = new System.Threading.CancellationTokenSource();
            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId  = "test";
            config.Guarantee      = ProcessingGuarantee.AT_LEAST_ONCE;
            config.PollMs         = 1;
            config.FollowMetadata = true;
            var configConsumer = config.Clone();

            configConsumer.ApplicationId = "test-consumer";
            int?h = null;

            var serdes  = new StringSerDes();
            var builder = new StreamBuilder();

            builder
            .Stream <string, string>("topic")
            .MapValues((v) =>
            {
                h = StreamizMetadata.GetCurrentPartitionMetadata();
                return(v);
            })
            .To("output");

            var topo = builder.Build();

            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());
            var consumer = supplier.GetConsumer(configConsumer.ToConsumerConfig(), null);

            var thread = StreamThread.Create(
                "thread-0", "c0",
                topo.Builder, config,
                supplier, supplier.GetAdmin(config.ToAdminConfig("admin")),
                0) as StreamThread;

            thread.Start(source.Token);
            producer.Produce("topic", new Confluent.Kafka.Message <byte[], byte[]>
            {
                Key   = serdes.Serialize("key1", new SerializationContext()),
                Value = serdes.Serialize("coucou", new SerializationContext())
            });

            consumer.Subscribe("output");
            ConsumeResult <byte[], byte[]> result = null;

            do
            {
                result = consumer.Consume(100);
            } while (result == null);


            source.Cancel();
            thread.Dispose();

            Assert.NotNull(h);
            Assert.AreEqual(0, h);
        }
        public void WithNullMaterialize()
        {
            // CERTIFIED THAT SAME IF Materialize is null, a state store exist for count processor with a generated namestore
            var config = new StreamConfig <StringSerDes, StringSerDes>();
            var serdes = new StringSerDes();

            config.ApplicationId = "test-count";

            var builder = new StreamBuilder();
            Materialized <string, long, IKeyValueStore <Bytes, byte[]> > m = null;

            builder
            .Table <string, string>("topic")
            .GroupBy((k, v) => KeyValuePair.Create(k.ToUpper(), v))
            .Count(m);

            var    topology = builder.Build();
            TaskId id       = new TaskId {
                Id = 0, Partition = 0
            };
            var processorTopology = topology.Builder.BuildTopology(id);

            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());
            var consumer = supplier.GetConsumer(config.ToConsumerConfig(), null);


            var        part = new TopicPartition("topic", 0);
            StreamTask task = new StreamTask(
                "thread-0",
                id,
                new List <TopicPartition> {
                part
            },
                processorTopology,
                consumer,
                config,
                supplier,
                null);

            task.GroupMetadata = consumer as SyncConsumer;
            task.InitializeStateStores();
            task.InitializeTopology();

            Assert.AreEqual(2, task.Context.States.StateStoreNames.Count());
            var nameStore1 = task.Context.States.StateStoreNames.ElementAt(0);
            var nameStore2 = task.Context.States.StateStoreNames.ElementAt(1);

            Assert.IsNotNull(nameStore1);
            Assert.IsNotNull(nameStore2);
            Assert.AreNotEqual(string.Empty, nameStore1);
            Assert.AreNotEqual(string.Empty, nameStore2);
            var store1 = task.GetStore(nameStore1);
            var store2 = task.GetStore(nameStore2);

            Assert.IsInstanceOf <TimestampedKeyValueStore <string, string> >(store1);
            Assert.IsInstanceOf <TimestampedKeyValueStore <string, long> >(store2);
            Assert.AreEqual(0, (store1 as TimestampedKeyValueStore <string, string>).ApproximateNumEntries());
            Assert.AreEqual(0, (store2 as TimestampedKeyValueStore <string, long>).ApproximateNumEntries());
        }
        public void Initialize()
        {
            streamMetricsRegistry
                = new StreamMetricsRegistry(Guid.NewGuid().ToString(),
                                            MetricsRecordingLevel.DEBUG);

            config.ApplicationId    = "test-stream-thread";
            config.StateDir         = Guid.NewGuid().ToString();
            config.Guarantee        = ProcessingGuarantee.AT_LEAST_ONCE;
            config.PollMs           = 10;
            config.CommitIntervalMs = 1;

            var builder = new StreamBuilder();
            var stream  = builder.Stream <string, string>("topic");

            stream.GroupByKey().Count();
            stream.To("topic2");

            var topo = builder.Build();

            id = new TaskId {
                Id = 0, Partition = 0
            };
            var processorTopology = builder.Build().Builder.BuildTopology(id);

            syncKafkaSupplier = new SyncKafkaSupplier();
            var producer = syncKafkaSupplier.GetProducer(config.ToProducerConfig());
            var consumer = syncKafkaSupplier.GetConsumer(config.ToConsumerConfig(), null);

            topicPartition = new TopicPartition("topic", 0);
            task           = new StreamTask(
                threadId,
                id,
                new List <TopicPartition> {
                topicPartition
            },
                processorTopology,
                consumer,
                config,
                syncKafkaSupplier,
                null,
                new MockChangelogRegister(),
                streamMetricsRegistry);
            task.GroupMetadata = consumer as SyncConsumer;

            task.InitializeStateStores();
            task.InitializeTopology();
            task.RestorationIfNeeded();
            var activeRestorationSensor = streamMetricsRegistry.GetSensors().FirstOrDefault(s => s.Name.Equals(GetSensorName(TaskMetrics.ACTIVE_RESTORATION)));

            Assert.AreEqual(1,
                            activeRestorationSensor.Metrics[MetricName.NameAndGroup(
                                                                TaskMetrics.ACTIVE_RESTORATION,
                                                                StreamMetricsRegistry.TASK_LEVEL_GROUP)].Value);
            task.CompleteRestoration();
            Assert.AreEqual(0,
                            activeRestorationSensor.Metrics[MetricName.NameAndGroup(
                                                                TaskMetrics.ACTIVE_RESTORATION,
                                                                StreamMetricsRegistry.TASK_LEVEL_GROUP)].Value);
        }
        public void TaskManagerCommitWithoutCommitNeeed()
        {
            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId = "test-app";

            var builder = new StreamBuilder();

            builder.Stream <string, string>("topic").To("topic2");

            var topology = builder.Build();

            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());
            var consumer = supplier.GetConsumer(config.ToConsumerConfig(), null);

            var taskCreator = new TaskCreator(topology.Builder, config, "thread-0", supplier, producer);
            var taskManager = new TaskManager(topology.Builder, taskCreator, supplier.GetAdmin(config.ToAdminConfig("admin")), consumer);

            taskManager.CreateTasks(
                new List <TopicPartition>
            {
                new TopicPartition("topic", 0),
                new TopicPartition("topic", 1),
                new TopicPartition("topic", 2),
                new TopicPartition("topic", 3),
            });

            Assert.AreEqual(4, taskManager.ActiveTasks.Count());
            Assert.AreEqual(0, taskManager.CommitAll());
            taskManager.Close();
        }
        public void WithNullMaterialize()
        {
            // CERTIFIED THAT SAME IF Materialize is null, a state store exist for count processor with a generated namestore
            var config = new StreamConfig <StringSerDes, StringSerDes>();
            var serdes = new StringSerDes();

            config.ApplicationId = "test-window-count";

            var builder = new StreamBuilder();
            Materialized <string, long, IWindowStore <Bytes, byte[]> > m = null;

            builder
            .Stream <string, string>("topic")
            .GroupByKey()
            .WindowedBy(TumblingWindowOptions.Of(2000))
            .Count(m);

            var    topology = builder.Build();
            TaskId id       = new TaskId {
                Id = 0, Partition = 0
            };
            var processorTopology = topology.Builder.BuildTopology(id);

            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());
            var consumer = supplier.GetConsumer(config.ToConsumerConfig(), null);

            var        part = new TopicPartition("topic", 0);
            StreamTask task = new StreamTask(
                "thread-0",
                id,
                new List <TopicPartition> {
                part
            },
                processorTopology,
                consumer,
                config,
                supplier,
                null,
                new MockChangelogRegister(),
                new StreamMetricsRegistry());

            task.GroupMetadata = consumer as SyncConsumer;
            task.InitializeStateStores();
            task.InitializeTopology();
            task.RestorationIfNeeded();
            task.CompleteRestoration();

            Assert.AreEqual(1, task.Context.States.StateStoreNames.Count());
            var nameStore = task.Context.States.StateStoreNames.ElementAt(0);

            Assert.IsNotNull(nameStore);
            Assert.AreNotEqual(string.Empty, nameStore);
            var store = task.GetStore(nameStore);

            Assert.IsInstanceOf <ITimestampedWindowStore <string, long> >(store);
            Assert.AreEqual(0, (store as ITimestampedWindowStore <string, long>).All().ToList().Count);
        }
Ejemplo n.º 6
0
        public void StreamThreadCommitIntervalWorkflow()
        {
            var source = new System.Threading.CancellationTokenSource();
            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId    = "test";
            config.Guarantee        = ProcessingGuarantee.AT_LEAST_ONCE;
            config.PollMs           = 1;
            config.CommitIntervalMs = 1;

            var serdes  = new StringSerDes();
            var builder = new StreamBuilder();

            builder.Stream <string, string>("topic").To("topic2");

            var topo = builder.Build();

            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());
            var consumer = supplier.GetConsumer(config.ToConsumerConfig("test-consum"), null);

            consumer.Subscribe("topic2");
            var thread = StreamThread.Create(
                "thread-0", "c0",
                topo.Builder, new StreamMetricsRegistry(), config,
                supplier, supplier.GetAdmin(config.ToAdminConfig("admin")),
                0) as StreamThread;

            thread.Start(source.Token);
            producer.Produce("topic", new Confluent.Kafka.Message <byte[], byte[]>
            {
                Key   = serdes.Serialize("key1", new SerializationContext()),
                Value = serdes.Serialize("coucou", new SerializationContext())
            });
            //WAIT STREAMTHREAD PROCESS MESSAGE
            System.Threading.Thread.Sleep(100);
            var message = consumer.Consume(100);

            Assert.AreEqual("key1", serdes.Deserialize(message.Message.Key, new SerializationContext()));
            Assert.AreEqual("coucou", serdes.Deserialize(message.Message.Value, new SerializationContext()));

            var offsets = thread.GetCommittedOffsets(new List <TopicPartition> {
                new TopicPartition("topic", 0)
            },
                                                     TimeSpan.FromSeconds(10)).ToList();

            Assert.AreEqual(1, offsets.Count);
            Assert.AreEqual(1, offsets[0].Offset.Value);
            Assert.AreEqual(0, offsets[0].TopicPartition.Partition.Value);
            Assert.AreEqual("topic", offsets[0].Topic);

            source.Cancel();
            thread.Dispose();
        }
Ejemplo n.º 7
0
        public void TaskManagerAssignedUnknownPartitions()
        {
            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId = "test-app";
            var serdes  = new StringSerDes();
            var builder = new StreamBuilder();

            builder.Stream <string, string>("topic")
            .Map((k, v) => KeyValuePair.Create(k.ToUpper(), v.ToUpper()))
            .To("topic2");

            var topology = builder.Build();

            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());
            var consumer = supplier.GetConsumer(config.ToConsumerConfig(), null);

            var restoreConsumer = supplier.GetRestoreConsumer(config.ToConsumerConfig());

            var storeChangelogReader =
                new StoreChangelogReader(config, restoreConsumer, "thread-0", new StreamMetricsRegistry());
            var taskCreator = new TaskCreator(topology.Builder, config, "thread-0", supplier, producer,
                                              storeChangelogReader, new StreamMetricsRegistry());
            var taskManager = new TaskManager(topology.Builder, taskCreator,
                                              supplier.GetAdmin(config.ToAdminConfig("admin")), consumer, storeChangelogReader);

            taskManager.CreateTasks(
                new List <TopicPartition>
            {
                new TopicPartition("topic", 0),
                new TopicPartition("topic", 1)
            });

            taskManager.RevokeTasks(
                new List <TopicPartition>
            {
                new TopicPartition("topic", 1)
            });

            taskManager.CreateTasks(
                new List <TopicPartition>
            {
                new TopicPartition("topic", 0),
                new TopicPartition("topic", 1),
                new TopicPartition("topic", 2)
            });

            taskManager.TryToCompleteRestoration();

            Assert.AreEqual(3, taskManager.ActiveTasks.Count());
            Assert.AreEqual(0, taskManager.RevokedTasks.Count());
            taskManager.Close();
        }
        public void ChangelogPut()
        {
            var consumerConfig = new ConsumerConfig();

            consumerConfig.GroupId = "test-result-store-changelog";
            var consumer = kafkaSupplier.GetConsumer(consumerConfig, null);

            consumer.Subscribe("test-store-changelog");

            var message = new Message <byte[], byte[]>
            {
                Headers   = new Headers(),
                Timestamp = new Timestamp(DateTime.Now)
            };

            var consumerResult = new ConsumeResult <byte[], byte[]>
            {
                Message   = message,
                Offset    = 0,
                Topic     = "test-store",
                Partition = 0
            };

            context.SetRecordMetaData(consumerResult);

            store.Put(CreateKey("test"), CreateValue("value"));

            var r = consumer.Consume();

            Assert.AreEqual("test", FromKey(r.Message.Key));
            Assert.AreEqual("value", FromKey(r.Message.Value));

            var list = store.All().ToList();

            Assert.AreEqual(1, list.Count);
            Assert.AreEqual("test", FromKey(list[0].Key.Get));
            Assert.AreEqual("value", FromValue(list[0].Value));

            consumer.Dispose();
        }
Ejemplo n.º 9
0
        public void SetUp()
        {
            mockKafkaSupplier = new SyncKafkaSupplier();
            var consumerConfig = new ConsumerConfig();

            consumerConfig.GroupId = "global-consulmer";
            var globalConsumer = mockKafkaSupplier.GetConsumer(consumerConfig, null);

            streamConfigMock = new Mock <IStreamConfig>();
            streamConfigMock.Setup(c => c.StateDir).Returns($"./{Guid.NewGuid().ToString()}");
            streamConfigMock.Setup(c => c.ApplicationId).Returns("app");

            kvStoreMock    = CreateMockStore <IKeyValueStore <object, object> >(kvStoreName);
            otherStoreMock = CreateMockStore <IKeyValueStore <object, object> >(otherStoreName);
            var globalStateStores = new Dictionary <string, IStateStore>()
            {
                { kvStoreMock.Object.Name, kvStoreMock.Object },
                { otherStoreMock.Object.Name, otherStoreMock.Object }
            };
            var storesToTopics = new Dictionary <string, string>()
            {
                { kvStoreMock.Object.Name, kvStoreTopic },
                { otherStoreMock.Object.Name, otherStoreTopic }
            };

            topology = new ProcessorTopology(
                null,
                new Dictionary <string, IProcessor>(),
                new Dictionary <string, IProcessor>(),
                new Dictionary <string, IProcessor>(),
                new Dictionary <string, IStateStore>(),
                globalStateStores,
                storesToTopics,
                new List <string>());

            adminClientMock = new Mock <IAdminClient>();
            RegisterPartitionInAdminClient(kvStoreTopic);
            RegisterPartitionInAdminClient(otherStoreTopic);

            stateManager = new GlobalStateManager(globalConsumer, topology,
                                                  adminClientMock.Object,
                                                  streamConfigMock.Object
                                                  );

            context = new GlobalProcessorContext(
                streamConfigMock.Object,
                stateManager,
                new StreamMetricsRegistry());

            stateManager.SetGlobalProcessorContext(context);
        }
Ejemplo n.º 10
0
        public void WithNullMaterialize()
        {
            // CERTIFIED THAT SAME IF Materialize is null, a state store exist for count processor with a generated namestore
            var config = new StreamConfig <StringSerDes, StringSerDes>();
            var serdes = new StringSerDes();

            config.ApplicationId = "test-window-count";

            var builder = new StreamBuilder();
            Materialized <string, int, IWindowStore <Bytes, byte[]> > m = null;

            builder
            .Stream <string, string>("topic")
            .GroupByKey()
            .WindowedBy(TumblingWindowOptions.Of(2000))
            .Aggregate(
                () => 0,
                (k, v, agg) => Math.Max(v.Length, agg),
                m);

            var    topology = builder.Build();
            TaskId id       = new TaskId {
                Id = 0, Partition = 0
            };
            var processorTopology = topology.Builder.BuildTopology(id);

            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());
            var consumer = supplier.GetConsumer(config.ToConsumerConfig(), null);


            var        part = new TopicPartition("topic", 0);
            StreamTask task = new StreamTask(
                "thread-0",
                id,
                new List <TopicPartition> {
                part
            },
                processorTopology,
                consumer,
                config,
                supplier,
                null);

            task.GroupMetadata = consumer as SyncConsumer;
            Assert.Throws <StreamsException>(() => task.InitializeStateStores());
        }
Ejemplo n.º 11
0
        public void WithNullMaterialize()
        {
            // CERTIFIED THAT SAME IF Materialize is null, a state store exist for count processor with a generated namestore
            var config = new StreamConfig();
            var serdes = new StringSerDes();

            config.ApplicationId = "test-window-reduce";

            var builder = new StreamBuilder();

            builder
            .Stream <string, string>("topic")
            .GroupByKey()
            .WindowedBy(TumblingWindowOptions.Of(2000))
            .Reduce((v1, v2) => v1.Length > v2.Length ? v1 : v2);

            var    topology = builder.Build();
            TaskId id       = new TaskId {
                Id = 0, Partition = 0
            };
            var processorTopology = topology.Builder.BuildTopology(id);

            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());
            var consumer = supplier.GetConsumer(config.ToConsumerConfig(), null);

            var        part = new TopicPartition("topic", 0);
            StreamTask task = new StreamTask(
                "thread-0",
                id,
                new List <TopicPartition> {
                part
            },
                processorTopology,
                consumer,
                config,
                supplier,
                null,
                new MockChangelogRegister(),
                new StreamMetricsRegistry());

            task.GroupMetadata = consumer as SyncConsumer;
            Assert.Throws <StreamsException>(() => task.InitializeStateStores());
        }
        public static UnassignedStreamTask Create(TaskId id)
        {
            var config = new StreamConfig();

            config.ApplicationId = "un-assigned-stream-task";
            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());
            var consumer = supplier.GetConsumer(config.ToConsumerConfig(), null);

            return(new UnassignedStreamTask(
                       "un-assigned-stream-task",
                       id,
                       new List <TopicPartition>(),
                       ProcessorTopology.EMPTY,
                       consumer,
                       config,
                       supplier,
                       producer));
        }
        public static UnassignedStreamTask Create()
        {
            var config = new StreamConfig();

            config.ApplicationId = "un-assigned-stream-task";
            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());
            var consumer = supplier.GetConsumer(config.ToConsumerConfig(), null);

            return(new UnassignedStreamTask(
                       "un-assigned-stream-task",
                       new TaskId {
                Id = -1, Partition = -1
            },
                       new List <TopicPartition>(),
                       null,
                       consumer,
                       config,
                       supplier,
                       producer));
        }
Ejemplo n.º 14
0
        public void ChangelogPut()
        {
            var consumerConfig = new ConsumerConfig();

            consumerConfig.GroupId = "test-result-store-changelog";
            var consumer = kafkaSupplier.GetConsumer(consumerConfig, null);

            consumer.Subscribe("test-store-changelog");

            var message = new Message <byte[], byte[]>
            {
                Headers   = new Headers(),
                Timestamp = new Timestamp(DateTime.Now)
            };

            var consumerResult = new ConsumeResult <byte[], byte[]>
            {
                Message   = message,
                Offset    = 0,
                Topic     = "test-store",
                Partition = 0
            };

            context.SetRecordMetaData(consumerResult);

            store.Put(CreateKey("test"), CreateValue(ValueAndTimestamp <string> .Make("value", 0)), 0);
            store.Put(CreateKey("test2"), null, 0);

            var r = consumer.Consume();

            Assert.AreEqual("test", FromKey(r.Message.Key).Key);
            Assert.AreEqual("value", FromValue(r.Message.Value));

            r = consumer.Consume();

            Assert.AreEqual("test2", FromKey(r.Message.Key).Key);
            Assert.IsNull(r.Message.Value);

            consumer.Dispose();
        }
        public void TaskManagerCommit()
        {
            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId = "test-app";
            var serdes  = new StringSerDes();
            var builder = new StreamBuilder();

            builder.Stream <string, string>("topic")
            .Map((k, v) => KeyValuePair.Create(k.ToUpper(), v.ToUpper()))
            .To("topic2");

            var topology = builder.Build();

            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());
            var consumer = supplier.GetConsumer(config.ToConsumerConfig(), null);

            var taskCreator = new TaskCreator(topology.Builder, config, "thread-0", supplier, producer);
            var taskManager = new TaskManager(topology.Builder, taskCreator, supplier.GetAdmin(config.ToAdminConfig("admin")), consumer);

            taskManager.CreateTasks(
                new List <TopicPartition>
            {
                new TopicPartition("topic", 0),
                new TopicPartition("topic", 1),
                new TopicPartition("topic", 2),
                new TopicPartition("topic", 3),
            });

            Assert.AreEqual(4, taskManager.ActiveTasks.Count());

            var part = new TopicPartition("topic", 0);
            var task = taskManager.ActiveTaskFor(part);
            List <ConsumeResult <byte[], byte[]> > messages = new List <ConsumeResult <byte[], byte[]> >();
            int offset = 0;

            for (int i = 0; i < 5; ++i)
            {
                messages.Add(
                    new ConsumeResult <byte[], byte[]>
                {
                    Message = new Message <byte[], byte[]>
                    {
                        Key   = serdes.Serialize($"key{i + 1}", new SerializationContext()),
                        Value = serdes.Serialize($"value{i + 1}", new SerializationContext())
                    },
                    TopicPartitionOffset = new TopicPartitionOffset(part, offset++)
                });
            }

            task.AddRecords(messages);

            Assert.IsTrue(task.CanProcess(DateTime.Now.GetMilliseconds()));

            while (task.CanProcess(DateTime.Now.GetMilliseconds()))
            {
                Assert.IsTrue(task.Process());
            }

            // ONLY ONE TASK HAVE BEEN RECORDS
            Assert.AreEqual(1, taskManager.CommitAll());

            // CHECK IN TOPIC topic2
            consumer.Subscribe("topic2");
            List <ConsumeResult <byte[], byte[]> > results = new List <ConsumeResult <byte[], byte[]> >();
            ConsumeResult <byte[], byte[]>         result  = null;

            do
            {
                result = consumer.Consume(100);

                if (result != null)
                {
                    results.Add(result);
                    consumer.Commit(result);
                }
            } while (result != null);

            Assert.AreEqual(5, results.Count);
            for (int i = 0; i < 5; ++i)
            {
                Assert.AreEqual($"KEY{i + 1}", serdes.Deserialize(results[i].Message.Key, new SerializationContext()));
                Assert.AreEqual($"VALUE{i+1}", serdes.Deserialize(results[i].Message.Value, new SerializationContext()));
            }

            // NO RECORD IN THIS TASKS
            part = new TopicPartition("topic", 2);
            task = taskManager.ActiveTaskFor(part);
            Assert.IsFalse(task.CanProcess(DateTime.Now.GetMilliseconds()));
            Assert.IsFalse(task.Process());

            taskManager.Close();
        }
        public void StandardWorkflowTaskManager()
        {
            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId = "test-app";

            var builder = new StreamBuilder();

            builder.Stream <string, string>("topic").To("topic2");

            var topology = builder.Build();

            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());
            var consumer = supplier.GetConsumer(config.ToConsumerConfig(), null);

            var taskCreator = new TaskCreator(topology.Builder, config, "thread-0", supplier, producer);
            var taskManager = new TaskManager(topology.Builder, taskCreator, supplier.GetAdmin(config.ToAdminConfig("admin")), consumer);

            taskManager.CreateTasks(
                new List <TopicPartition>
            {
                new TopicPartition("topic", 0),
                new TopicPartition("topic", 1),
                new TopicPartition("topic", 2),
                new TopicPartition("topic", 3),
            });

            Assert.AreEqual(4, taskManager.ActiveTasks.Count());
            for (int i = 0; i < 4; ++i)
            {
                var task = taskManager.ActiveTaskFor(new TopicPartition("topic", i));
                Assert.IsNotNull(task);
                Assert.AreEqual("test-app", task.ApplicationId);
                Assert.IsFalse(task.CanProcess(DateTime.Now.GetMilliseconds()));
                Assert.IsFalse(task.CommitNeeded);
                Assert.IsFalse(task.HasStateStores);
            }

            // Revoked 2 partitions
            taskManager.RevokeTasks(new List <TopicPartition> {
                new TopicPartition("topic", 2),
                new TopicPartition("topic", 3),
            });
            Assert.AreEqual(2, taskManager.ActiveTasks.Count());
            Assert.AreEqual(2, taskManager.RevokedTasks.Count());
            for (int i = 0; i < 2; ++i)
            {
                var task = taskManager.ActiveTaskFor(new TopicPartition("topic", i));
                Assert.IsNotNull(task);
                Assert.AreEqual("test-app", task.ApplicationId);
                Assert.IsFalse(task.CanProcess(DateTime.Now.GetMilliseconds()));
                Assert.IsFalse(task.CommitNeeded);
                Assert.IsFalse(task.HasStateStores);
            }

            var taskFailed = taskManager.ActiveTaskFor(new TopicPartition("topic", 2));

            Assert.IsNull(taskFailed);

            taskManager.Close();
            Assert.AreEqual(0, taskManager.ActiveTasks.Count());
            Assert.AreEqual(0, taskManager.RevokedTasks.Count());
        }
        public void TaskMetricsTest()
        {
            var serdes      = new StringSerDes();
            var cloneConfig = config.Clone();

            cloneConfig.ApplicationId = "consume-test";
            var producer = syncKafkaSupplier.GetProducer(cloneConfig.ToProducerConfig());
            var consumer = syncKafkaSupplier.GetConsumer(cloneConfig.ToConsumerConfig("test-consum"), null);

            consumer.Subscribe("topic2");

            int nbMessage = 1000;
            // produce 1000 messages to input topic
            List <ConsumeResult <byte[], byte[]> > messages = new List <ConsumeResult <byte[], byte[]> >();
            int offset = 0;

            for (int i = 0; i < nbMessage; ++i)
            {
                messages.Add(
                    new ConsumeResult <byte[], byte[]>
                {
                    Message = new Message <byte[], byte[]>
                    {
                        Key   = serdes.Serialize($"key{i + 1}", new SerializationContext()),
                        Value = serdes.Serialize($"value{i + 1}", new SerializationContext())
                    },
                    TopicPartitionOffset = new TopicPartitionOffset(topicPartition, offset++)
                });
            }

            task.AddRecords(messages);

            while (task.CanProcess(DateTime.Now.GetMilliseconds()))
            {
                Assert.IsTrue(task.Process());
                Assert.IsTrue(task.CommitNeeded);
                task.Commit();
            }

            var messagesSink = new List <ConsumeResult <byte[], byte[]> >();

            AssertExtensions.WaitUntil(() =>
            {
                messagesSink.AddRange(consumer.ConsumeRecords(TimeSpan.FromSeconds(1)));
                return(messagesSink.Count < nbMessage);
            }, TimeSpan.FromSeconds(5),
                                       TimeSpan.FromMilliseconds(10));

            long now     = DateTime.Now.GetMilliseconds();
            var  sensors = streamMetricsRegistry.GetThreadScopeSensor(threadId);

            var processorSensor = sensors.FirstOrDefault(s => s.Name.Equals(GetSensorName(TaskMetrics.PROCESS)));

            Assert.AreEqual(2, processorSensor.Metrics.Count());
            Assert.AreEqual(nbMessage,
                            processorSensor.Metrics[MetricName.NameAndGroup(
                                                        TaskMetrics.PROCESS + StreamMetricsRegistry.TOTAL_SUFFIX,
                                                        StreamMetricsRegistry.TASK_LEVEL_GROUP)].Value);
            Assert.IsTrue(
                (double)(processorSensor.Metrics[MetricName.NameAndGroup(
                                                     TaskMetrics.PROCESS + StreamMetricsRegistry.RATE_SUFFIX,
                                                     StreamMetricsRegistry.TASK_LEVEL_GROUP)].Value) > 0d);

            var enforcedProcessorSensor = sensors.FirstOrDefault(s => s.Name.Equals(GetSensorName(TaskMetrics.ENFORCED_PROCESSING)));

            Assert.AreEqual(2, enforcedProcessorSensor.Metrics.Count());
            Assert.AreEqual(0,
                            enforcedProcessorSensor.Metrics[MetricName.NameAndGroup(
                                                                TaskMetrics.ENFORCED_PROCESSING + StreamMetricsRegistry.TOTAL_SUFFIX,
                                                                StreamMetricsRegistry.TASK_LEVEL_GROUP)].Value);
            Assert.AreEqual(0,
                            enforcedProcessorSensor.Metrics[MetricName.NameAndGroup(
                                                                TaskMetrics.ENFORCED_PROCESSING + StreamMetricsRegistry.RATE_SUFFIX,
                                                                StreamMetricsRegistry.TASK_LEVEL_GROUP)].Value);

            var processLatency = sensors.FirstOrDefault(s => s.Name.Equals(GetSensorName(TaskMetrics.PROCESS_LATENCY)));

            Assert.AreEqual(2, processLatency.Metrics.Count());
            Assert.IsTrue(
                (double)processLatency.Metrics[MetricName.NameAndGroup(
                                                   TaskMetrics.PROCESS_LATENCY + StreamMetricsRegistry.AVG_SUFFIX,
                                                   StreamMetricsRegistry.TASK_LEVEL_GROUP)].Value > 0d);
            Assert.IsTrue(
                (double)processLatency.Metrics[MetricName.NameAndGroup(
                                                   TaskMetrics.PROCESS_LATENCY + StreamMetricsRegistry.MAX_SUFFIX,
                                                   StreamMetricsRegistry.TASK_LEVEL_GROUP)].Value > 0d);

            var commitSensor = sensors.FirstOrDefault(s => s.Name.Equals(GetSensorName(TaskMetrics.COMMIT)));

            Assert.AreEqual(2, commitSensor.Metrics.Count());
            Assert.AreEqual(nbMessage,
                            commitSensor.Metrics[MetricName.NameAndGroup(
                                                     TaskMetrics.COMMIT + StreamMetricsRegistry.TOTAL_SUFFIX,
                                                     StreamMetricsRegistry.TASK_LEVEL_GROUP)].Value);
            Assert.IsTrue(
                (double)commitSensor.Metrics[MetricName.NameAndGroup(
                                                 TaskMetrics.COMMIT + StreamMetricsRegistry.RATE_SUFFIX,
                                                 StreamMetricsRegistry.TASK_LEVEL_GROUP)].Value > 0d);

            var droppedRecordSensor = sensors.FirstOrDefault(s => s.Name.Equals(GetSensorName(TaskMetrics.DROPPED_RECORDS)));

            Assert.AreEqual(2, droppedRecordSensor.Metrics.Count());
            Assert.AreEqual(0,
                            droppedRecordSensor.Metrics[MetricName.NameAndGroup(
                                                            TaskMetrics.DROPPED_RECORDS + StreamMetricsRegistry.TOTAL_SUFFIX,
                                                            StreamMetricsRegistry.TASK_LEVEL_GROUP)].Value);
            Assert.AreEqual(0,
                            droppedRecordSensor.Metrics[MetricName.NameAndGroup(
                                                            TaskMetrics.DROPPED_RECORDS + StreamMetricsRegistry.RATE_SUFFIX,
                                                            StreamMetricsRegistry.TASK_LEVEL_GROUP)].Value);

            var activeBufferedRecordSensor = sensors.FirstOrDefault(s => s.Name.Equals(GetSensorName(TaskMetrics.ACTIVE_TASK_PREFIX + TaskMetrics.BUFFER_COUNT)));

            Assert.AreEqual(1, activeBufferedRecordSensor.Metrics.Count());
            Assert.AreEqual(0,
                            activeBufferedRecordSensor.Metrics[MetricName.NameAndGroup(
                                                                   TaskMetrics.ACTIVE_TASK_PREFIX + TaskMetrics.BUFFER_COUNT,
                                                                   StreamMetricsRegistry.TASK_LEVEL_GROUP)].Value);

            var restorationRecordsSensor = sensors.FirstOrDefault(s => s.Name.Equals(GetSensorName(TaskMetrics.RESTORATION_RECORDS)));

            Assert.AreEqual(1, restorationRecordsSensor.Metrics.Count());
            Assert.AreEqual(0,
                            restorationRecordsSensor.Metrics[MetricName.NameAndGroup(
                                                                 TaskMetrics.RESTORATION_RECORDS,
                                                                 StreamMetricsRegistry.TASK_LEVEL_GROUP)].Value);

            var activeRestorationSensor = sensors.FirstOrDefault(s => s.Name.Equals(GetSensorName(TaskMetrics.ACTIVE_RESTORATION)));

            Assert.AreEqual(1, activeRestorationSensor.Metrics.Count());
            Assert.AreEqual(0,
                            activeRestorationSensor.Metrics[MetricName.NameAndGroup(
                                                                TaskMetrics.ACTIVE_RESTORATION,
                                                                StreamMetricsRegistry.TASK_LEVEL_GROUP)].Value);
        }
Ejemplo n.º 18
0
        public void StreamTaskWithEXACTLY_ONCE()
        {
            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId = "test-app";
            config.Guarantee     = ProcessingGuarantee.EXACTLY_ONCE;

            var serdes  = new StringSerDes();
            var builder = new StreamBuilder();

            builder.Stream <string, string>("topic")
            .Map((k, v) => KeyValuePair.Create(k.ToUpper(), v.ToUpper()))
            .To("topic2");

            var    topology = builder.Build();
            TaskId id       = new TaskId {
                Id = 0, Partition = 0
            };
            var processorTopology = topology.Builder.BuildTopology(id);

            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());
            var consumer = supplier.GetConsumer(config.ToConsumerConfig(), null);


            var        part = new TopicPartition("topic", 0);
            StreamTask task = new StreamTask(
                "thread-0",
                id,
                new List <TopicPartition> {
                part
            },
                processorTopology,
                consumer,
                config,
                supplier,
                null);

            task.GroupMetadata = consumer as SyncConsumer;
            task.InitializeStateStores();
            task.InitializeTopology();

            List <ConsumeResult <byte[], byte[]> > messages = new List <ConsumeResult <byte[], byte[]> >();
            int offset = 0;

            for (int i = 0; i < 5; ++i)
            {
                messages.Add(
                    new ConsumeResult <byte[], byte[]>
                {
                    Message = new Message <byte[], byte[]>
                    {
                        Key   = serdes.Serialize($"key{i + 1}", new SerializationContext()),
                        Value = serdes.Serialize($"value{i + 1}", new SerializationContext())
                    },
                    TopicPartitionOffset = new TopicPartitionOffset(part, offset++)
                });
            }

            task.AddRecords(messages);

            Assert.IsTrue(task.CanProcess(DateTime.Now.GetMilliseconds()));

            while (task.CanProcess(DateTime.Now.GetMilliseconds()))
            {
                Assert.IsTrue(task.Process());
                Assert.IsTrue(task.CommitNeeded);
                task.Commit();
            }

            // CHECK IN TOPIC topic2
            consumer.Subscribe("topic2");
            List <ConsumeResult <byte[], byte[]> > results = new List <ConsumeResult <byte[], byte[]> >();
            ConsumeResult <byte[], byte[]>         result  = null;

            do
            {
                result = consumer.Consume(100);

                if (result != null)
                {
                    results.Add(result);
                    consumer.Commit(result);
                }
            } while (result != null);

            Assert.AreEqual(5, results.Count);
            for (int i = 0; i < 5; ++i)
            {
                Assert.AreEqual($"KEY{i + 1}", serdes.Deserialize(results[i].Message.Key, new SerializationContext()));
                Assert.AreEqual($"VALUE{i + 1}", serdes.Deserialize(results[i].Message.Value, new SerializationContext()));
            }

            task.Close();
        }
Ejemplo n.º 19
0
        public void StreamThreadNormalWorkflow()
        {
            bool metricsReporterCalled   = false;
            List <ThreadState> allStates = new List <ThreadState>();
            var expectedStates           = new List <ThreadState>
            {
                ThreadState.CREATED,
                ThreadState.STARTING,
                ThreadState.PARTITIONS_ASSIGNED,
                ThreadState.RUNNING,
                ThreadState.PENDING_SHUTDOWN,
                ThreadState.DEAD
            };

            var source = new System.Threading.CancellationTokenSource();
            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId   = "test";
            config.Guarantee       = ProcessingGuarantee.AT_LEAST_ONCE;
            config.PollMs          = 1;
            config.MetricsReporter = (sensor) => { metricsReporterCalled = true; };
            config.AddOrUpdate(StreamConfig.metricsIntervalMsCst, 10);

            var serdes  = new StringSerDes();
            var builder = new StreamBuilder();

            builder.Stream <string, string>("topic").To("topic2");

            var topo = builder.Build();

            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());
            var consumer = supplier.GetConsumer(config.ToConsumerConfig("test-consum"), null);

            consumer.Subscribe("topic2");
            var thread = StreamThread.Create(
                "thread-0", "c0",
                topo.Builder, new StreamMetricsRegistry(), config,
                supplier, supplier.GetAdmin(config.ToAdminConfig("admin")),
                0) as StreamThread;

            allStates.Add(thread.State);
            thread.StateChanged += (t, o, n) =>
            {
                Assert.IsInstanceOf <ThreadState>(n);
                allStates.Add(n as ThreadState);
            };

            thread.Start(source.Token);
            producer.Produce("topic", new Confluent.Kafka.Message <byte[], byte[]>
            {
                Key   = serdes.Serialize("key1", new SerializationContext()),
                Value = serdes.Serialize("coucou", new SerializationContext())
            });
            //WAIT STREAMTHREAD PROCESS MESSAGE
            System.Threading.Thread.Sleep(100);
            var message = consumer.Consume(100);

            source.Cancel();
            thread.Dispose();

            Assert.AreEqual("key1", serdes.Deserialize(message.Message.Key, new SerializationContext()));
            Assert.AreEqual("coucou", serdes.Deserialize(message.Message.Value, new SerializationContext()));
            Assert.AreEqual(expectedStates, allStates);
            Assert.IsTrue(metricsReporterCalled);
        }
        //[Test]
        // TODO : fix that
        public void WorkflowCompleteBufferedRecordsTest()
        {
            int maxBuffered = 10;
            var token       = new System.Threading.CancellationTokenSource();
            var serdes      = new StringSerDes();
            var config      = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId = "test-group";
            config.MaxTaskIdleMs = (long)TimeSpan.FromSeconds(100).TotalMilliseconds;
            config.BufferedRecordsPerPartition = maxBuffered;
            config.PollMs = 10;

            var builder = new StreamBuilder();

            var stream1 = builder.Stream <string, string>("topic1");
            var stream2 = builder.Stream <string, string>("topic2");

            stream1
            .Join(stream2, (v1, v2) => $"{v1}-{v2}", JoinWindowOptions.Of(TimeSpan.FromSeconds(10)))
            .To("output");

            var topo = builder.Build();

            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());
            var consumer = supplier.GetConsumer(config.ToConsumerConfig("test-consum"), null);

            consumer.Subscribe("output");
            var thread = StreamThread.Create(
                "thread-0", "c0",
                topo.Builder, config,
                supplier, supplier.GetAdmin(config.ToAdminConfig("admin")),
                0) as StreamThread;

            thread.Start(token.Token);

            for (int i = 0; i < maxBuffered + 1; ++i)
            {
                producer.Produce("topic1", new Message <byte[], byte[]>
                {
                    Key   = serdes.Serialize("key", new SerializationContext()),
                    Value = serdes.Serialize($"coucou{i}", new SerializationContext())
                });
            }
            // CONSUME PAUSE AFTER maxBuffered + 1 messages
            System.Threading.Thread.Sleep(50);

            // Add one message more with consumer in stream thread in pause
            producer.Produce("topic1", new Message <byte[], byte[]>
            {
                Key   = serdes.Serialize("key", new SerializationContext()),
                Value = serdes.Serialize($"coucou{maxBuffered+1}", new SerializationContext())
            });

            Assert.AreEqual(1, thread.ActiveTasks.Count());
            var task = thread.ActiveTasks.ToArray()[0];

            Assert.IsNotNull(task.Grouper);
            Assert.IsFalse(task.Grouper.AllPartitionsBuffered);
            Assert.AreEqual(maxBuffered + 1, task.Grouper.NumBuffered());
            Assert.AreEqual(maxBuffered + 1, task.Grouper.NumBuffered(new TopicPartition("topic1", 0)));
            Assert.AreEqual(0, task.Grouper.NumBuffered(new TopicPartition("topic2", 0)));

            producer.Produce("topic2", new Message <byte[], byte[]>
            {
                Key   = serdes.Serialize("key", new SerializationContext()),
                Value = serdes.Serialize($"test", new SerializationContext())
            });

            List <ConsumeResult <byte[], byte[]> > records = new List <ConsumeResult <byte[], byte[]> >();

            do
            {
                records.AddRange(consumer.ConsumeRecords(TimeSpan.FromMilliseconds(100)).ToList());
            } while (records.Count() <= 12);

            Assert.AreEqual(maxBuffered + 2, records.Count());
            for (int i = 0; i < maxBuffered + 2; ++i)
            {
                var message = records.ToArray()[i];
                Assert.AreEqual("key", serdes.Deserialize(message.Message.Key, new SerializationContext()));
                Assert.IsTrue(serdes.Deserialize(message.Message.Value, new SerializationContext()).Contains($"coucou{i}-"));
            }

            token.Cancel();
            thread.Dispose();
        }
Ejemplo n.º 21
0
        private void TaskManagerRestorationChangelog(bool persistenStateStore = false)
        {
            var stateDir = Path.Combine(".", Guid.NewGuid().ToString());
            var config   = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId = "test-restoration-changelog-app";
            config.StateDir      = stateDir;

            var builder = new StreamBuilder();

            builder.Table("topic",
                          persistenStateStore
                    ? RocksDb <string, string> .As("store").WithLoggingEnabled(null)
                    : InMemory <string, string> .As("store").WithLoggingEnabled(null));

            var serdes = new StringSerDes();

            var topology = builder.Build();

            topology.Builder.RewriteTopology(config);

            var supplier        = new SyncKafkaSupplier();
            var producer        = supplier.GetProducer(config.ToProducerConfig());
            var consumer        = supplier.GetConsumer(config.ToConsumerConfig(), null);
            var restoreConsumer = supplier.GetRestoreConsumer(config.ToConsumerConfig());

            var storeChangelogReader =
                new StoreChangelogReader(config, restoreConsumer, "thread-0", new StreamMetricsRegistry());
            var taskCreator = new TaskCreator(topology.Builder, config, "thread-0", supplier, producer,
                                              storeChangelogReader, new StreamMetricsRegistry());
            var taskManager = new TaskManager(topology.Builder, taskCreator,
                                              supplier.GetAdmin(config.ToAdminConfig("admin")), consumer, storeChangelogReader);

            var part = new TopicPartition("topic", 0);

            taskManager.CreateTasks(
                new List <TopicPartition>
            {
                part
            });

            var task = taskManager.ActiveTaskFor(part);

            IDictionary <TaskId, ITask> tasks = new Dictionary <TaskId, ITask>();

            tasks.Add(task.Id, task);

            taskManager.TryToCompleteRestoration();
            storeChangelogReader.Restore();
            Assert.IsTrue(taskManager.TryToCompleteRestoration());


            List <ConsumeResult <byte[], byte[]> > messages = new List <ConsumeResult <byte[], byte[]> >();
            int offset = 0;

            for (int i = 0; i < 5; ++i)
            {
                messages.Add(
                    new ConsumeResult <byte[], byte[]>
                {
                    Message = new Message <byte[], byte[]>
                    {
                        Key   = serdes.Serialize($"key{i + 1}", new SerializationContext()),
                        Value = serdes.Serialize($"value{i + 1}", new SerializationContext())
                    },
                    TopicPartitionOffset = new TopicPartitionOffset(part, offset++)
                });
            }

            task.AddRecords(messages);

            // Process messages
            while (task.CanProcess(DateTime.Now.GetMilliseconds()))
            {
                Assert.IsTrue(task.Process());
            }

            taskManager.CommitAll();

            // Simulate Close + new open
            taskManager.Close();

            restoreConsumer.Resume(new TopicPartition("test-restoration-changelog-app-store-changelog", 0).ToSingle());

            taskManager.CreateTasks(
                new List <TopicPartition>
            {
                part
            });

            task  = taskManager.ActiveTaskFor(part);
            tasks = new Dictionary <TaskId, ITask>();
            tasks.Add(task.Id, task);

            Assert.IsFalse(taskManager.TryToCompleteRestoration());
            storeChangelogReader.Restore();
            Assert.IsTrue(taskManager.TryToCompleteRestoration());

            var store = task.GetStore("store");
            var items = (store as ITimestampedKeyValueStore <string, string>).All().ToList();

            Assert.AreEqual(5, items.Count);

            taskManager.Close();

            if (persistenStateStore)
            {
                Directory.Delete(stateDir, true);
            }
        }
        public void StreamTaskWrittingCheckpoint()
        {
            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId = "test-app";
            config.StateDir      = Path.Combine(".", Guid.NewGuid().ToString());

            var serdes  = new StringSerDes();
            var builder = new StreamBuilder();

            var table = builder.Table("topic", RocksDb <string, string> .As("store").WithLoggingEnabled());

            TaskId id = new TaskId {
                Id = 0, Partition = 0
            };
            var topology = builder.Build();

            topology.Builder.RewriteTopology(config);

            var processorTopology = topology.Builder.BuildTopology(id);

            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());
            var consumer = supplier.GetConsumer(config.ToConsumerConfig(), null);

            var        part = new TopicPartition("topic", 0);
            StreamTask task = new StreamTask(
                "thread-0",
                id,
                new List <TopicPartition> {
                part
            },
                processorTopology,
                consumer,
                config,
                supplier,
                producer,
                new MockChangelogRegister()
                , new StreamMetricsRegistry());

            task.GroupMetadata = consumer as SyncConsumer;
            task.InitializeStateStores();
            task.InitializeTopology();
            task.RestorationIfNeeded();
            task.CompleteRestoration();

            List <ConsumeResult <byte[], byte[]> > messages = new List <ConsumeResult <byte[], byte[]> >();
            int offset = 0;

            for (int i = 0; i < 5; ++i)
            {
                messages.Add(
                    new ConsumeResult <byte[], byte[]>
                {
                    Message = new Message <byte[], byte[]>
                    {
                        Key   = serdes.Serialize($"key{i + 1}", new SerializationContext()),
                        Value = serdes.Serialize($"value{i + 1}", new SerializationContext())
                    },
                    TopicPartitionOffset = new TopicPartitionOffset(part, offset++)
                });
            }

            task.AddRecords(messages);

            Assert.IsTrue(task.CanProcess(DateTime.Now.GetMilliseconds()));

            while (task.CanProcess(DateTime.Now.GetMilliseconds()))
            {
                Assert.IsTrue(task.Process());
                Assert.IsTrue(task.CommitNeeded);
                task.Commit();
            }

            task.MayWriteCheckpoint(true);

            messages = new List <ConsumeResult <byte[], byte[]> >();
            for (int i = 0; i < StateManagerTools.OFFSET_DELTA_THRESHOLD_FOR_CHECKPOINT + 10; ++i)
            {
                messages.Add(
                    new ConsumeResult <byte[], byte[]>
                {
                    Message = new Message <byte[], byte[]>
                    {
                        Key   = serdes.Serialize($"key{i + 1}", new SerializationContext()),
                        Value = serdes.Serialize($"value{i + 1}", new SerializationContext())
                    },
                    TopicPartitionOffset = new TopicPartitionOffset(part, offset++)
                });
            }

            task.AddRecords(messages);

            while (task.CanProcess(DateTime.Now.GetMilliseconds()))
            {
                Assert.IsTrue(task.Process());
            }

            task.MayWriteCheckpoint(false);

            var lines = File.ReadAllLines(Path.Combine(config.StateDir, config.ApplicationId, "0-0", ".checkpoint"));

            Assert.AreEqual(3, lines.Length);
            Assert.AreEqual("test-app-store-changelog 0 10014", lines[2]);

            task.Suspend();
            task.Close();

            Directory.Delete(config.StateDir, true);
        }
        public void StreamTaskSuspendResume()
        {
            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId = "test-app";

            var serdes  = new StringSerDes();
            var builder = new StreamBuilder();

            builder.Table <string, string>("topic", InMemory <string, string> .As("store").WithLoggingDisabled())
            .MapValues((k, v) => v.ToUpper())
            .ToStream()
            .To("topic2");


            TaskId id = new TaskId {
                Id = 0, Partition = 0
            };
            var topology          = builder.Build();
            var processorTopology = topology.Builder.BuildTopology(id);

            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());
            var consumer = supplier.GetConsumer(config.ToConsumerConfig(), null);

            var        part = new TopicPartition("topic", 0);
            StreamTask task = new StreamTask(
                "thread-0",
                id,
                new List <TopicPartition> {
                part
            },
                processorTopology,
                consumer,
                config,
                supplier,
                producer,
                new MockChangelogRegister()
                , new StreamMetricsRegistry());

            task.GroupMetadata = consumer as SyncConsumer;
            task.InitializeStateStores();
            task.InitializeTopology();
            task.RestorationIfNeeded();
            task.CompleteRestoration();

            List <ConsumeResult <byte[], byte[]> > messages = new List <ConsumeResult <byte[], byte[]> >();
            int offset = 0;

            for (int i = 0; i < 5; ++i)
            {
                messages.Add(
                    new ConsumeResult <byte[], byte[]>
                {
                    Message = new Message <byte[], byte[]>
                    {
                        Key   = serdes.Serialize($"key{i + 1}", new SerializationContext()),
                        Value = serdes.Serialize($"value{i + 1}", new SerializationContext())
                    },
                    TopicPartitionOffset = new TopicPartitionOffset(part, offset++)
                });
            }

            task.AddRecords(messages);

            Assert.IsTrue(task.CanProcess(DateTime.Now.GetMilliseconds()));

            while (task.CanProcess(DateTime.Now.GetMilliseconds()))
            {
                Assert.IsTrue(task.Process());
                Assert.IsTrue(task.CommitNeeded);
                task.Commit();
            }

            Assert.IsNotNull(task.GetStore("store"));
            task.Suspend();
            Assert.IsNull(task.GetStore("store"));
            task.Resume();
            task.RestorationIfNeeded();

            Assert.IsNotNull(task.GetStore("store"));
            task.AddRecords(messages);

            Assert.IsTrue(task.CanProcess(DateTime.Now.GetMilliseconds()));

            while (task.CanProcess(DateTime.Now.GetMilliseconds()))
            {
                Assert.IsTrue(task.Process());
                Assert.IsTrue(task.CommitNeeded);
                task.Commit();
            }

            // CHECK IN TOPIC topic2
            consumer.Subscribe("topic2");
            List <ConsumeResult <byte[], byte[]> > results = new List <ConsumeResult <byte[], byte[]> >();
            ConsumeResult <byte[], byte[]>         result  = null;

            do
            {
                result = consumer.Consume(100);

                if (result != null)
                {
                    results.Add(result);
                    consumer.Commit(result);
                }
            } while (result != null);

            Assert.AreEqual(10, results.Count);

            task.Close();
        }