Пример #1
0
 public void Flush()
 {
     while (task.CanProcess)
     {
         task.Process();
     }
 }
Пример #2
0
            public void Flush()
            {
                long now = DateTime.Now.GetMilliseconds();

                while (task.CanProcess(now))
                {
                    task.Process();
                }
            }
        public void StreamTaskWrittingCheckpoint()
        {
            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId = "test-app";
            config.StateDir      = Path.Combine(".", Guid.NewGuid().ToString());

            var serdes  = new StringSerDes();
            var builder = new StreamBuilder();

            var table = builder.Table("topic", RocksDb <string, string> .As("store").WithLoggingEnabled());

            TaskId id = new TaskId {
                Id = 0, Partition = 0
            };
            var topology = builder.Build();

            topology.Builder.RewriteTopology(config);

            var processorTopology = topology.Builder.BuildTopology(id);

            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());
            var consumer = supplier.GetConsumer(config.ToConsumerConfig(), null);

            var        part = new TopicPartition("topic", 0);
            StreamTask task = new StreamTask(
                "thread-0",
                id,
                new List <TopicPartition> {
                part
            },
                processorTopology,
                consumer,
                config,
                supplier,
                producer,
                new MockChangelogRegister()
                , new StreamMetricsRegistry());

            task.GroupMetadata = consumer as SyncConsumer;
            task.InitializeStateStores();
            task.InitializeTopology();
            task.RestorationIfNeeded();
            task.CompleteRestoration();

            List <ConsumeResult <byte[], byte[]> > messages = new List <ConsumeResult <byte[], byte[]> >();
            int offset = 0;

            for (int i = 0; i < 5; ++i)
            {
                messages.Add(
                    new ConsumeResult <byte[], byte[]>
                {
                    Message = new Message <byte[], byte[]>
                    {
                        Key   = serdes.Serialize($"key{i + 1}", new SerializationContext()),
                        Value = serdes.Serialize($"value{i + 1}", new SerializationContext())
                    },
                    TopicPartitionOffset = new TopicPartitionOffset(part, offset++)
                });
            }

            task.AddRecords(messages);

            Assert.IsTrue(task.CanProcess(DateTime.Now.GetMilliseconds()));

            while (task.CanProcess(DateTime.Now.GetMilliseconds()))
            {
                Assert.IsTrue(task.Process());
                Assert.IsTrue(task.CommitNeeded);
                task.Commit();
            }

            task.MayWriteCheckpoint(true);

            messages = new List <ConsumeResult <byte[], byte[]> >();
            for (int i = 0; i < StateManagerTools.OFFSET_DELTA_THRESHOLD_FOR_CHECKPOINT + 10; ++i)
            {
                messages.Add(
                    new ConsumeResult <byte[], byte[]>
                {
                    Message = new Message <byte[], byte[]>
                    {
                        Key   = serdes.Serialize($"key{i + 1}", new SerializationContext()),
                        Value = serdes.Serialize($"value{i + 1}", new SerializationContext())
                    },
                    TopicPartitionOffset = new TopicPartitionOffset(part, offset++)
                });
            }

            task.AddRecords(messages);

            while (task.CanProcess(DateTime.Now.GetMilliseconds()))
            {
                Assert.IsTrue(task.Process());
            }

            task.MayWriteCheckpoint(false);

            var lines = File.ReadAllLines(Path.Combine(config.StateDir, config.ApplicationId, "0-0", ".checkpoint"));

            Assert.AreEqual(3, lines.Length);
            Assert.AreEqual("test-app-store-changelog 0 10014", lines[2]);

            task.Suspend();
            task.Close();

            Directory.Delete(config.StateDir, true);
        }
        public void StreamTaskSuspendResume()
        {
            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId = "test-app";

            var serdes  = new StringSerDes();
            var builder = new StreamBuilder();

            builder.Table <string, string>("topic", InMemory <string, string> .As("store").WithLoggingDisabled())
            .MapValues((k, v) => v.ToUpper())
            .ToStream()
            .To("topic2");


            TaskId id = new TaskId {
                Id = 0, Partition = 0
            };
            var topology          = builder.Build();
            var processorTopology = topology.Builder.BuildTopology(id);

            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());
            var consumer = supplier.GetConsumer(config.ToConsumerConfig(), null);

            var        part = new TopicPartition("topic", 0);
            StreamTask task = new StreamTask(
                "thread-0",
                id,
                new List <TopicPartition> {
                part
            },
                processorTopology,
                consumer,
                config,
                supplier,
                producer,
                new MockChangelogRegister()
                , new StreamMetricsRegistry());

            task.GroupMetadata = consumer as SyncConsumer;
            task.InitializeStateStores();
            task.InitializeTopology();
            task.RestorationIfNeeded();
            task.CompleteRestoration();

            List <ConsumeResult <byte[], byte[]> > messages = new List <ConsumeResult <byte[], byte[]> >();
            int offset = 0;

            for (int i = 0; i < 5; ++i)
            {
                messages.Add(
                    new ConsumeResult <byte[], byte[]>
                {
                    Message = new Message <byte[], byte[]>
                    {
                        Key   = serdes.Serialize($"key{i + 1}", new SerializationContext()),
                        Value = serdes.Serialize($"value{i + 1}", new SerializationContext())
                    },
                    TopicPartitionOffset = new TopicPartitionOffset(part, offset++)
                });
            }

            task.AddRecords(messages);

            Assert.IsTrue(task.CanProcess(DateTime.Now.GetMilliseconds()));

            while (task.CanProcess(DateTime.Now.GetMilliseconds()))
            {
                Assert.IsTrue(task.Process());
                Assert.IsTrue(task.CommitNeeded);
                task.Commit();
            }

            Assert.IsNotNull(task.GetStore("store"));
            task.Suspend();
            Assert.IsNull(task.GetStore("store"));
            task.Resume();
            task.RestorationIfNeeded();

            Assert.IsNotNull(task.GetStore("store"));
            task.AddRecords(messages);

            Assert.IsTrue(task.CanProcess(DateTime.Now.GetMilliseconds()));

            while (task.CanProcess(DateTime.Now.GetMilliseconds()))
            {
                Assert.IsTrue(task.Process());
                Assert.IsTrue(task.CommitNeeded);
                task.Commit();
            }

            // CHECK IN TOPIC topic2
            consumer.Subscribe("topic2");
            List <ConsumeResult <byte[], byte[]> > results = new List <ConsumeResult <byte[], byte[]> >();
            ConsumeResult <byte[], byte[]>         result  = null;

            do
            {
                result = consumer.Consume(100);

                if (result != null)
                {
                    results.Add(result);
                    consumer.Commit(result);
                }
            } while (result != null);

            Assert.AreEqual(10, results.Count);

            task.Close();
        }
Пример #5
0
        public void StreamTaskWithEXACTLY_ONCE()
        {
            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId = "test-app";
            config.Guarantee     = ProcessingGuarantee.EXACTLY_ONCE;

            var serdes  = new StringSerDes();
            var builder = new StreamBuilder();

            builder.Stream <string, string>("topic")
            .Map((k, v) => KeyValuePair.Create(k.ToUpper(), v.ToUpper()))
            .To("topic2");

            var    topology = builder.Build();
            TaskId id       = new TaskId {
                Id = 0, Partition = 0
            };
            var processorTopology = topology.Builder.BuildTopology(id);

            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());
            var consumer = supplier.GetConsumer(config.ToConsumerConfig(), null);


            var        part = new TopicPartition("topic", 0);
            StreamTask task = new StreamTask(
                "thread-0",
                id,
                new List <TopicPartition> {
                part
            },
                processorTopology,
                consumer,
                config,
                supplier,
                null);

            task.GroupMetadata = consumer as SyncConsumer;
            task.InitializeStateStores();
            task.InitializeTopology();

            List <ConsumeResult <byte[], byte[]> > messages = new List <ConsumeResult <byte[], byte[]> >();
            int offset = 0;

            for (int i = 0; i < 5; ++i)
            {
                messages.Add(
                    new ConsumeResult <byte[], byte[]>
                {
                    Message = new Message <byte[], byte[]>
                    {
                        Key   = serdes.Serialize($"key{i + 1}", new SerializationContext()),
                        Value = serdes.Serialize($"value{i + 1}", new SerializationContext())
                    },
                    TopicPartitionOffset = new TopicPartitionOffset(part, offset++)
                });
            }

            task.AddRecords(messages);

            Assert.IsTrue(task.CanProcess(DateTime.Now.GetMilliseconds()));

            while (task.CanProcess(DateTime.Now.GetMilliseconds()))
            {
                Assert.IsTrue(task.Process());
                Assert.IsTrue(task.CommitNeeded);
                task.Commit();
            }

            // CHECK IN TOPIC topic2
            consumer.Subscribe("topic2");
            List <ConsumeResult <byte[], byte[]> > results = new List <ConsumeResult <byte[], byte[]> >();
            ConsumeResult <byte[], byte[]>         result  = null;

            do
            {
                result = consumer.Consume(100);

                if (result != null)
                {
                    results.Add(result);
                    consumer.Commit(result);
                }
            } while (result != null);

            Assert.AreEqual(5, results.Count);
            for (int i = 0; i < 5; ++i)
            {
                Assert.AreEqual($"KEY{i + 1}", serdes.Deserialize(results[i].Message.Key, new SerializationContext()));
                Assert.AreEqual($"VALUE{i + 1}", serdes.Deserialize(results[i].Message.Value, new SerializationContext()));
            }

            task.Close();
        }
        public void TaskMetricsTest()
        {
            var serdes      = new StringSerDes();
            var cloneConfig = config.Clone();

            cloneConfig.ApplicationId = "consume-test";
            var producer = syncKafkaSupplier.GetProducer(cloneConfig.ToProducerConfig());
            var consumer = syncKafkaSupplier.GetConsumer(cloneConfig.ToConsumerConfig("test-consum"), null);

            consumer.Subscribe("topic2");

            int nbMessage = 1000;
            // produce 1000 messages to input topic
            List <ConsumeResult <byte[], byte[]> > messages = new List <ConsumeResult <byte[], byte[]> >();
            int offset = 0;

            for (int i = 0; i < nbMessage; ++i)
            {
                messages.Add(
                    new ConsumeResult <byte[], byte[]>
                {
                    Message = new Message <byte[], byte[]>
                    {
                        Key   = serdes.Serialize($"key{i + 1}", new SerializationContext()),
                        Value = serdes.Serialize($"value{i + 1}", new SerializationContext())
                    },
                    TopicPartitionOffset = new TopicPartitionOffset(topicPartition, offset++)
                });
            }

            task.AddRecords(messages);

            while (task.CanProcess(DateTime.Now.GetMilliseconds()))
            {
                Assert.IsTrue(task.Process());
                Assert.IsTrue(task.CommitNeeded);
                task.Commit();
            }

            var messagesSink = new List <ConsumeResult <byte[], byte[]> >();

            AssertExtensions.WaitUntil(() =>
            {
                messagesSink.AddRange(consumer.ConsumeRecords(TimeSpan.FromSeconds(1)));
                return(messagesSink.Count < nbMessage);
            }, TimeSpan.FromSeconds(5),
                                       TimeSpan.FromMilliseconds(10));

            long now     = DateTime.Now.GetMilliseconds();
            var  sensors = streamMetricsRegistry.GetThreadScopeSensor(threadId);

            var processorSensor = sensors.FirstOrDefault(s => s.Name.Equals(GetSensorName(TaskMetrics.PROCESS)));

            Assert.AreEqual(2, processorSensor.Metrics.Count());
            Assert.AreEqual(nbMessage,
                            processorSensor.Metrics[MetricName.NameAndGroup(
                                                        TaskMetrics.PROCESS + StreamMetricsRegistry.TOTAL_SUFFIX,
                                                        StreamMetricsRegistry.TASK_LEVEL_GROUP)].Value);
            Assert.IsTrue(
                (double)(processorSensor.Metrics[MetricName.NameAndGroup(
                                                     TaskMetrics.PROCESS + StreamMetricsRegistry.RATE_SUFFIX,
                                                     StreamMetricsRegistry.TASK_LEVEL_GROUP)].Value) > 0d);

            var enforcedProcessorSensor = sensors.FirstOrDefault(s => s.Name.Equals(GetSensorName(TaskMetrics.ENFORCED_PROCESSING)));

            Assert.AreEqual(2, enforcedProcessorSensor.Metrics.Count());
            Assert.AreEqual(0,
                            enforcedProcessorSensor.Metrics[MetricName.NameAndGroup(
                                                                TaskMetrics.ENFORCED_PROCESSING + StreamMetricsRegistry.TOTAL_SUFFIX,
                                                                StreamMetricsRegistry.TASK_LEVEL_GROUP)].Value);
            Assert.AreEqual(0,
                            enforcedProcessorSensor.Metrics[MetricName.NameAndGroup(
                                                                TaskMetrics.ENFORCED_PROCESSING + StreamMetricsRegistry.RATE_SUFFIX,
                                                                StreamMetricsRegistry.TASK_LEVEL_GROUP)].Value);

            var processLatency = sensors.FirstOrDefault(s => s.Name.Equals(GetSensorName(TaskMetrics.PROCESS_LATENCY)));

            Assert.AreEqual(2, processLatency.Metrics.Count());
            Assert.IsTrue(
                (double)processLatency.Metrics[MetricName.NameAndGroup(
                                                   TaskMetrics.PROCESS_LATENCY + StreamMetricsRegistry.AVG_SUFFIX,
                                                   StreamMetricsRegistry.TASK_LEVEL_GROUP)].Value > 0d);
            Assert.IsTrue(
                (double)processLatency.Metrics[MetricName.NameAndGroup(
                                                   TaskMetrics.PROCESS_LATENCY + StreamMetricsRegistry.MAX_SUFFIX,
                                                   StreamMetricsRegistry.TASK_LEVEL_GROUP)].Value > 0d);

            var commitSensor = sensors.FirstOrDefault(s => s.Name.Equals(GetSensorName(TaskMetrics.COMMIT)));

            Assert.AreEqual(2, commitSensor.Metrics.Count());
            Assert.AreEqual(nbMessage,
                            commitSensor.Metrics[MetricName.NameAndGroup(
                                                     TaskMetrics.COMMIT + StreamMetricsRegistry.TOTAL_SUFFIX,
                                                     StreamMetricsRegistry.TASK_LEVEL_GROUP)].Value);
            Assert.IsTrue(
                (double)commitSensor.Metrics[MetricName.NameAndGroup(
                                                 TaskMetrics.COMMIT + StreamMetricsRegistry.RATE_SUFFIX,
                                                 StreamMetricsRegistry.TASK_LEVEL_GROUP)].Value > 0d);

            var droppedRecordSensor = sensors.FirstOrDefault(s => s.Name.Equals(GetSensorName(TaskMetrics.DROPPED_RECORDS)));

            Assert.AreEqual(2, droppedRecordSensor.Metrics.Count());
            Assert.AreEqual(0,
                            droppedRecordSensor.Metrics[MetricName.NameAndGroup(
                                                            TaskMetrics.DROPPED_RECORDS + StreamMetricsRegistry.TOTAL_SUFFIX,
                                                            StreamMetricsRegistry.TASK_LEVEL_GROUP)].Value);
            Assert.AreEqual(0,
                            droppedRecordSensor.Metrics[MetricName.NameAndGroup(
                                                            TaskMetrics.DROPPED_RECORDS + StreamMetricsRegistry.RATE_SUFFIX,
                                                            StreamMetricsRegistry.TASK_LEVEL_GROUP)].Value);

            var activeBufferedRecordSensor = sensors.FirstOrDefault(s => s.Name.Equals(GetSensorName(TaskMetrics.ACTIVE_TASK_PREFIX + TaskMetrics.BUFFER_COUNT)));

            Assert.AreEqual(1, activeBufferedRecordSensor.Metrics.Count());
            Assert.AreEqual(0,
                            activeBufferedRecordSensor.Metrics[MetricName.NameAndGroup(
                                                                   TaskMetrics.ACTIVE_TASK_PREFIX + TaskMetrics.BUFFER_COUNT,
                                                                   StreamMetricsRegistry.TASK_LEVEL_GROUP)].Value);

            var restorationRecordsSensor = sensors.FirstOrDefault(s => s.Name.Equals(GetSensorName(TaskMetrics.RESTORATION_RECORDS)));

            Assert.AreEqual(1, restorationRecordsSensor.Metrics.Count());
            Assert.AreEqual(0,
                            restorationRecordsSensor.Metrics[MetricName.NameAndGroup(
                                                                 TaskMetrics.RESTORATION_RECORDS,
                                                                 StreamMetricsRegistry.TASK_LEVEL_GROUP)].Value);

            var activeRestorationSensor = sensors.FirstOrDefault(s => s.Name.Equals(GetSensorName(TaskMetrics.ACTIVE_RESTORATION)));

            Assert.AreEqual(1, activeRestorationSensor.Metrics.Count());
            Assert.AreEqual(0,
                            activeRestorationSensor.Metrics[MetricName.NameAndGroup(
                                                                TaskMetrics.ACTIVE_RESTORATION,
                                                                StreamMetricsRegistry.TASK_LEVEL_GROUP)].Value);
        }