public void StreamTaskSuspendResume()
        {
            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId = "test-app";

            var serdes  = new StringSerDes();
            var builder = new StreamBuilder();

            builder.Table <string, string>("topic", InMemory <string, string> .As("store").WithLoggingDisabled())
            .MapValues((k, v) => v.ToUpper())
            .ToStream()
            .To("topic2");


            TaskId id = new TaskId {
                Id = 0, Partition = 0
            };
            var topology          = builder.Build();
            var processorTopology = topology.Builder.BuildTopology(id);

            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());
            var consumer = supplier.GetConsumer(config.ToConsumerConfig(), null);

            var        part = new TopicPartition("topic", 0);
            StreamTask task = new StreamTask(
                "thread-0",
                id,
                new List <TopicPartition> {
                part
            },
                processorTopology,
                consumer,
                config,
                supplier,
                producer,
                new MockChangelogRegister()
                , new StreamMetricsRegistry());

            task.GroupMetadata = consumer as SyncConsumer;
            task.InitializeStateStores();
            task.InitializeTopology();
            task.RestorationIfNeeded();
            task.CompleteRestoration();

            List <ConsumeResult <byte[], byte[]> > messages = new List <ConsumeResult <byte[], byte[]> >();
            int offset = 0;

            for (int i = 0; i < 5; ++i)
            {
                messages.Add(
                    new ConsumeResult <byte[], byte[]>
                {
                    Message = new Message <byte[], byte[]>
                    {
                        Key   = serdes.Serialize($"key{i + 1}", new SerializationContext()),
                        Value = serdes.Serialize($"value{i + 1}", new SerializationContext())
                    },
                    TopicPartitionOffset = new TopicPartitionOffset(part, offset++)
                });
            }

            task.AddRecords(messages);

            Assert.IsTrue(task.CanProcess(DateTime.Now.GetMilliseconds()));

            while (task.CanProcess(DateTime.Now.GetMilliseconds()))
            {
                Assert.IsTrue(task.Process());
                Assert.IsTrue(task.CommitNeeded);
                task.Commit();
            }

            Assert.IsNotNull(task.GetStore("store"));
            task.Suspend();
            Assert.IsNull(task.GetStore("store"));
            task.Resume();
            task.RestorationIfNeeded();

            Assert.IsNotNull(task.GetStore("store"));
            task.AddRecords(messages);

            Assert.IsTrue(task.CanProcess(DateTime.Now.GetMilliseconds()));

            while (task.CanProcess(DateTime.Now.GetMilliseconds()))
            {
                Assert.IsTrue(task.Process());
                Assert.IsTrue(task.CommitNeeded);
                task.Commit();
            }

            // CHECK IN TOPIC topic2
            consumer.Subscribe("topic2");
            List <ConsumeResult <byte[], byte[]> > results = new List <ConsumeResult <byte[], byte[]> >();
            ConsumeResult <byte[], byte[]>         result  = null;

            do
            {
                result = consumer.Consume(100);

                if (result != null)
                {
                    results.Add(result);
                    consumer.Commit(result);
                }
            } while (result != null);

            Assert.AreEqual(10, results.Count);

            task.Close();
        }
        public async Task KafkaStreamProductionExceptionHandlerContinueTest()
        {
            var _return = new List <KeyValuePair <string, string> >();

            var timeout = TimeSpan.FromSeconds(10);

            bool     isRunningState = false;
            DateTime dt             = DateTime.Now;

            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId               = "test";
            config.BootstrapServers            = "127.0.0.1";
            config.PollMs                      = 10;
            config.ProductionExceptionHandler += (r) => ExceptionHandlerResponse.CONTINUE;

            var supplier = new ProducerSyncException();
            var producer = supplier.GetProducer(config.ToProducerConfig());

            var builder = new StreamBuilder();

            builder
            .Stream <string, string>("test")
            .To("test-output");

            builder.Stream <string, string>("test-output")
            .Peek((k, v) => _return.Add(KeyValuePair.Create(k, v)));

            var t      = builder.Build();
            var stream = new KafkaStream(t, config, supplier);

            stream.StateChanged += (old, @new) =>
            {
                if (@new.Equals(KafkaStream.State.RUNNING))
                {
                    isRunningState = true;
                }
            };
            await stream.StartAsync();

            while (!isRunningState)
            {
                Thread.Sleep(250);
                if (DateTime.Now > dt + timeout)
                {
                    break;
                }
            }
            Assert.IsTrue(isRunningState);

            if (isRunningState)
            {
                var serdes = new StringSerDes();
                dt = DateTime.Now;
                producer.Produce("test",
                                 new Confluent.Kafka.Message <byte[], byte[]>
                {
                    Key       = serdes.Serialize("k", new SerializationContext()),
                    Value     = serdes.Serialize("test", new SerializationContext()),
                    Timestamp = new Confluent.Kafka.Timestamp(dt)
                });
                Thread.Sleep(1000);
                var expected = new List <KeyValuePair <string, string> >();

                Assert.AreEqual(expected, _return);
            }

            stream.Dispose();
        }
示例#3
0
 private Bytes CreateKey(string key)
 => Bytes.Wrap(stringSerDes.Serialize(key, new SerializationContext()));
示例#4
0
        public void GetCurrentHeadersMetadataTests()
        {
            var source = new System.Threading.CancellationTokenSource();
            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId  = "test";
            config.Guarantee      = ProcessingGuarantee.AT_LEAST_ONCE;
            config.PollMs         = 1;
            config.FollowMetadata = true;
            var configConsumer = config.Clone();

            configConsumer.ApplicationId = "test-consumer";
            Headers h       = null;
            Headers headers = new Headers();

            headers.Add("k", new byte[1] {
                13
            });

            var serdes  = new StringSerDes();
            var builder = new StreamBuilder();

            builder
            .Stream <string, string>("topic")
            .MapValues((v) =>
            {
                h = StreamizMetadata.GetCurrentHeadersMetadata();
                return(v);
            })
            .To("output");

            var topo = builder.Build();

            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());
            var consumer = supplier.GetConsumer(configConsumer.ToConsumerConfig(), null);

            var thread = StreamThread.Create(
                "thread-0", "c0",
                topo.Builder, new StreamMetricsRegistry(), config,
                supplier, supplier.GetAdmin(config.ToAdminConfig("admin")),
                0) as StreamThread;

            thread.Start(source.Token);
            producer.Produce("topic", new Confluent.Kafka.Message <byte[], byte[]>
            {
                Key     = serdes.Serialize("key1", new SerializationContext()),
                Value   = serdes.Serialize("coucou", new SerializationContext()),
                Headers = headers
            });

            consumer.Subscribe("output");
            ConsumeResult <byte[], byte[]> result = null;

            do
            {
                result = consumer.Consume(100);
            } while (result == null);


            source.Cancel();
            thread.Dispose();

            Assert.NotNull(h);
            Assert.AreEqual(1, h.Count);
            Assert.AreEqual("k", h[0].Key);
            Assert.AreEqual(new byte[1] {
                13
            }, h[0].GetValueBytes());
        }
        //[Test]
        // TODO : fix that
        public void WorkflowCompleteBufferedRecordsTest()
        {
            int maxBuffered = 10;
            var token       = new System.Threading.CancellationTokenSource();
            var serdes      = new StringSerDes();
            var config      = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId = "test-group";
            config.MaxTaskIdleMs = (long)TimeSpan.FromSeconds(100).TotalMilliseconds;
            config.BufferedRecordsPerPartition = maxBuffered;
            config.PollMs = 10;

            var builder = new StreamBuilder();

            var stream1 = builder.Stream <string, string>("topic1");
            var stream2 = builder.Stream <string, string>("topic2");

            stream1
            .Join(stream2, (v1, v2) => $"{v1}-{v2}", JoinWindowOptions.Of(TimeSpan.FromSeconds(10)))
            .To("output");

            var topo = builder.Build();

            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());
            var consumer = supplier.GetConsumer(config.ToConsumerConfig("test-consum"), null);

            consumer.Subscribe("output");
            var thread = StreamThread.Create(
                "thread-0", "c0",
                topo.Builder, config,
                supplier, supplier.GetAdmin(config.ToAdminConfig("admin")),
                0) as StreamThread;

            thread.Start(token.Token);

            for (int i = 0; i < maxBuffered + 1; ++i)
            {
                producer.Produce("topic1", new Message <byte[], byte[]>
                {
                    Key   = serdes.Serialize("key", new SerializationContext()),
                    Value = serdes.Serialize($"coucou{i}", new SerializationContext())
                });
            }
            // CONSUME PAUSE AFTER maxBuffered + 1 messages
            System.Threading.Thread.Sleep(50);

            // Add one message more with consumer in stream thread in pause
            producer.Produce("topic1", new Message <byte[], byte[]>
            {
                Key   = serdes.Serialize("key", new SerializationContext()),
                Value = serdes.Serialize($"coucou{maxBuffered+1}", new SerializationContext())
            });

            Assert.AreEqual(1, thread.ActiveTasks.Count());
            var task = thread.ActiveTasks.ToArray()[0];

            Assert.IsNotNull(task.Grouper);
            Assert.IsFalse(task.Grouper.AllPartitionsBuffered);
            Assert.AreEqual(maxBuffered + 1, task.Grouper.NumBuffered());
            Assert.AreEqual(maxBuffered + 1, task.Grouper.NumBuffered(new TopicPartition("topic1", 0)));
            Assert.AreEqual(0, task.Grouper.NumBuffered(new TopicPartition("topic2", 0)));

            producer.Produce("topic2", new Message <byte[], byte[]>
            {
                Key   = serdes.Serialize("key", new SerializationContext()),
                Value = serdes.Serialize($"test", new SerializationContext())
            });

            List <ConsumeResult <byte[], byte[]> > records = new List <ConsumeResult <byte[], byte[]> >();

            do
            {
                records.AddRange(consumer.ConsumeRecords(TimeSpan.FromMilliseconds(100)).ToList());
            } while (records.Count() <= 12);

            Assert.AreEqual(maxBuffered + 2, records.Count());
            for (int i = 0; i < maxBuffered + 2; ++i)
            {
                var message = records.ToArray()[i];
                Assert.AreEqual("key", serdes.Deserialize(message.Message.Key, new SerializationContext()));
                Assert.IsTrue(serdes.Deserialize(message.Message.Value, new SerializationContext()).Contains($"coucou{i}-"));
            }

            token.Cancel();
            thread.Dispose();
        }
示例#6
0
        public void StreamTaskWithEXACTLY_ONCE()
        {
            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId = "test-app";
            config.Guarantee     = ProcessingGuarantee.EXACTLY_ONCE;

            var serdes  = new StringSerDes();
            var builder = new StreamBuilder();

            builder.Stream <string, string>("topic")
            .Map((k, v) => KeyValuePair.Create(k.ToUpper(), v.ToUpper()))
            .To("topic2");

            var    topology = builder.Build();
            TaskId id       = new TaskId {
                Id = 0, Partition = 0
            };
            var processorTopology = topology.Builder.BuildTopology(id);

            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());
            var consumer = supplier.GetConsumer(config.ToConsumerConfig(), null);


            var        part = new TopicPartition("topic", 0);
            StreamTask task = new StreamTask(
                "thread-0",
                id,
                new List <TopicPartition> {
                part
            },
                processorTopology,
                consumer,
                config,
                supplier,
                null);

            task.GroupMetadata = consumer as SyncConsumer;
            task.InitializeStateStores();
            task.InitializeTopology();

            List <ConsumeResult <byte[], byte[]> > messages = new List <ConsumeResult <byte[], byte[]> >();
            int offset = 0;

            for (int i = 0; i < 5; ++i)
            {
                messages.Add(
                    new ConsumeResult <byte[], byte[]>
                {
                    Message = new Message <byte[], byte[]>
                    {
                        Key   = serdes.Serialize($"key{i + 1}", new SerializationContext()),
                        Value = serdes.Serialize($"value{i + 1}", new SerializationContext())
                    },
                    TopicPartitionOffset = new TopicPartitionOffset(part, offset++)
                });
            }

            task.AddRecords(messages);

            Assert.IsTrue(task.CanProcess(DateTime.Now.GetMilliseconds()));

            while (task.CanProcess(DateTime.Now.GetMilliseconds()))
            {
                Assert.IsTrue(task.Process());
                Assert.IsTrue(task.CommitNeeded);
                task.Commit();
            }

            // CHECK IN TOPIC topic2
            consumer.Subscribe("topic2");
            List <ConsumeResult <byte[], byte[]> > results = new List <ConsumeResult <byte[], byte[]> >();
            ConsumeResult <byte[], byte[]>         result  = null;

            do
            {
                result = consumer.Consume(100);

                if (result != null)
                {
                    results.Add(result);
                    consumer.Commit(result);
                }
            } while (result != null);

            Assert.AreEqual(5, results.Count);
            for (int i = 0; i < 5; ++i)
            {
                Assert.AreEqual($"KEY{i + 1}", serdes.Deserialize(results[i].Message.Key, new SerializationContext()));
                Assert.AreEqual($"VALUE{i + 1}", serdes.Deserialize(results[i].Message.Value, new SerializationContext()));
            }

            task.Close();
        }
示例#7
0
        public void GetCurrentTimestampMetadataTests()
        {
            var source = new System.Threading.CancellationTokenSource();
            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId  = "test";
            config.Guarantee      = ProcessingGuarantee.AT_LEAST_ONCE;
            config.PollMs         = 1;
            config.FollowMetadata = true;
            var configConsumer = config.Clone();

            configConsumer.ApplicationId = "test-consumer";
            long?    h  = null;
            DateTime dt = DateTime.Now;

            var serdes  = new StringSerDes();
            var builder = new StreamBuilder();

            builder
            .Stream <string, string>("topic")
            .MapValues((v) =>
            {
                h = StreamizMetadata.GetCurrentTimestampMetadata();
                return(v);
            })
            .To("output");

            var topo = builder.Build();

            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());
            var consumer = supplier.GetConsumer(configConsumer.ToConsumerConfig(), null);

            var thread = StreamThread.Create(
                "thread-0", "c0",
                topo.Builder, new StreamMetricsRegistry(), config,
                supplier, supplier.GetAdmin(config.ToAdminConfig("admin")),
                0) as StreamThread;

            thread.Start(source.Token);
            producer.Produce("topic", new Confluent.Kafka.Message <byte[], byte[]>
            {
                Key       = serdes.Serialize("key1", new SerializationContext()),
                Value     = serdes.Serialize("coucou", new SerializationContext()),
                Timestamp = new Timestamp(DateTime.Now)
            });

            consumer.Subscribe("output");
            ConsumeResult <byte[], byte[]> result = null;

            do
            {
                result = consumer.Consume(100);
            } while (result == null);


            source.Cancel();
            thread.Dispose();

            Assert.NotNull(h);
            // TODO FIX Assert.IsTrue(h.Value > dt.GetMilliseconds());
            Assert.IsTrue(h.Value > 0);
        }
        public async Task GetWindowElementInStateStore()
        {
            var timeout = TimeSpan.FromSeconds(10);

            bool     isRunningState = false;
            DateTime dt             = DateTime.Now;

            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId    = "test";
            config.BootstrapServers = "127.0.0.1";
            config.PollMs           = 10;

            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());

            var builder = new StreamBuilder();

            builder
            .Stream <string, string>("test")
            .GroupByKey()
            .WindowedBy(TumblingWindowOptions.Of(TimeSpan.FromMinutes(1)))
            .Count(InMemoryWindows <string, long> .As("store"));

            var t      = builder.Build();
            var stream = new KafkaStream(t, config, supplier);

            stream.StateChanged += (old, @new) =>
            {
                if (@new.Equals(KafkaStream.State.RUNNING))
                {
                    isRunningState = true;
                }
            };
            await stream.StartAsync();

            while (!isRunningState)
            {
                Thread.Sleep(250);
                if (DateTime.Now > dt + timeout)
                {
                    break;
                }
            }
            Assert.IsTrue(isRunningState);

            if (isRunningState)
            {
                var serdes = new StringSerDes();
                dt = DateTime.Now;
                producer.Produce("test",
                                 new Confluent.Kafka.Message <byte[], byte[]>
                {
                    Key       = serdes.Serialize("key1", new SerializationContext()),
                    Value     = serdes.Serialize("coucou", new SerializationContext()),
                    Timestamp = new Confluent.Kafka.Timestamp(dt)
                });
                Thread.Sleep(50);
                var store = stream.Store(StoreQueryParameters.FromNameAndType("store", QueryableStoreTypes.IWindowStore <string, long>()));
                Assert.IsNotNull(store);
                var @enum = store.All();
                Assert.AreEqual(1, store.All().ToList().Count);
                var item = store.Fetch("key1", dt.AddMinutes(-1), dt.AddMinutes(1));
                Assert.IsNotNull(item);
                Assert.IsTrue(item.MoveNext());
                Assert.IsTrue(item.Current.HasValue);
                Assert.AreEqual(1, item.Current.Value.Value);
                item.Dispose();
            }

            stream.Dispose();
        }
示例#9
0
        public void StreamThreadNormalWorkflowWithRebalancing()
        {
            List <ThreadState> allStates = new List <ThreadState>();
            var expectedStates           = new List <ThreadState>
            {
                ThreadState.CREATED,
                ThreadState.STARTING,
                ThreadState.PARTITIONS_ASSIGNED,
                ThreadState.RUNNING,
                ThreadState.PARTITIONS_REVOKED,
                ThreadState.PARTITIONS_ASSIGNED,
                ThreadState.RUNNING,
                ThreadState.PENDING_SHUTDOWN,
                ThreadState.DEAD
            };

            var source = new System.Threading.CancellationTokenSource();
            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId = "test";
            config.Guarantee     = ProcessingGuarantee.AT_LEAST_ONCE;
            config.PollMs        = 1;

            var consumeConfig = config.Clone();

            consumeConfig.ApplicationId = "consume-test";

            var serdes  = new StringSerDes();
            var builder = new StreamBuilder();

            builder.Stream <string, string>("topic").To("topic2");

            var topo = builder.Build();

            var supplier = new MockKafkaSupplier(4);
            var producer = supplier.GetProducer(consumeConfig.ToProducerConfig());
            var consumer = supplier.GetConsumer(consumeConfig.ToConsumerConfig("test-consum"), null);

            consumer.Subscribe("topic2");

            var thread = StreamThread.Create(
                "thread-0", "c0",
                topo.Builder, config,
                supplier, supplier.GetAdmin(config.ToAdminConfig("admin")),
                0) as StreamThread;

            allStates.Add(thread.State);
            thread.StateChanged += (t, o, n) =>
            {
                Assert.IsInstanceOf <ThreadState>(n);
                allStates.Add(n as ThreadState);
            };

            thread.Start(source.Token);
            // WAIT PARTITONS ASSIGNED
            System.Threading.Thread.Sleep(50);

            var thread2 = StreamThread.Create(
                "thread-1", "c1",
                topo.Builder, config,
                supplier, supplier.GetAdmin(config.ToAdminConfig("admin")),
                1) as StreamThread;

            thread2.Start(source.Token);
            // WAIT PARTITONS REBALANCING
            System.Threading.Thread.Sleep(50);

            producer.Produce("topic", new Confluent.Kafka.Message <byte[], byte[]>
            {
                Key   = serdes.Serialize("key1", new SerializationContext()),
                Value = serdes.Serialize("coucou", new SerializationContext())
            });
            //WAIT STREAMTHREAD PROCESS MESSAGE
            System.Threading.Thread.Sleep(100);
            var message = consumer.Consume(100);

            // 2 CONSUMER FOR THE SAME GROUP ID => TOPIC WITH 4 PARTITIONS
            Assert.AreEqual(2, thread.ActiveTasks.Count());
            Assert.AreEqual(2, thread2.ActiveTasks.Count());

            source.Cancel();
            thread.Dispose();
            thread2.Dispose();

            Assert.AreEqual("key1", serdes.Deserialize(message.Message.Key, new SerializationContext()));
            Assert.AreEqual("coucou", serdes.Deserialize(message.Message.Value, new SerializationContext()));
            Assert.AreEqual(expectedStates, allStates);
            // Destroy in memory cluster
            supplier.Destroy();
        }
        public void TaskMetricsTest()
        {
            var serdes      = new StringSerDes();
            var cloneConfig = config.Clone();

            cloneConfig.ApplicationId = "consume-test";
            var producer = syncKafkaSupplier.GetProducer(cloneConfig.ToProducerConfig());
            var consumer = syncKafkaSupplier.GetConsumer(cloneConfig.ToConsumerConfig("test-consum"), null);

            consumer.Subscribe("topic2");

            int nbMessage = 1000;
            // produce 1000 messages to input topic
            List <ConsumeResult <byte[], byte[]> > messages = new List <ConsumeResult <byte[], byte[]> >();
            int offset = 0;

            for (int i = 0; i < nbMessage; ++i)
            {
                messages.Add(
                    new ConsumeResult <byte[], byte[]>
                {
                    Message = new Message <byte[], byte[]>
                    {
                        Key   = serdes.Serialize($"key{i + 1}", new SerializationContext()),
                        Value = serdes.Serialize($"value{i + 1}", new SerializationContext())
                    },
                    TopicPartitionOffset = new TopicPartitionOffset(topicPartition, offset++)
                });
            }

            task.AddRecords(messages);

            while (task.CanProcess(DateTime.Now.GetMilliseconds()))
            {
                Assert.IsTrue(task.Process());
                Assert.IsTrue(task.CommitNeeded);
                task.Commit();
            }

            var messagesSink = new List <ConsumeResult <byte[], byte[]> >();

            AssertExtensions.WaitUntil(() =>
            {
                messagesSink.AddRange(consumer.ConsumeRecords(TimeSpan.FromSeconds(1)));
                return(messagesSink.Count < nbMessage);
            }, TimeSpan.FromSeconds(5),
                                       TimeSpan.FromMilliseconds(10));

            long now     = DateTime.Now.GetMilliseconds();
            var  sensors = streamMetricsRegistry.GetThreadScopeSensor(threadId);

            var processorSensor = sensors.FirstOrDefault(s => s.Name.Equals(GetSensorName(TaskMetrics.PROCESS)));

            Assert.AreEqual(2, processorSensor.Metrics.Count());
            Assert.AreEqual(nbMessage,
                            processorSensor.Metrics[MetricName.NameAndGroup(
                                                        TaskMetrics.PROCESS + StreamMetricsRegistry.TOTAL_SUFFIX,
                                                        StreamMetricsRegistry.TASK_LEVEL_GROUP)].Value);
            Assert.IsTrue(
                (double)(processorSensor.Metrics[MetricName.NameAndGroup(
                                                     TaskMetrics.PROCESS + StreamMetricsRegistry.RATE_SUFFIX,
                                                     StreamMetricsRegistry.TASK_LEVEL_GROUP)].Value) > 0d);

            var enforcedProcessorSensor = sensors.FirstOrDefault(s => s.Name.Equals(GetSensorName(TaskMetrics.ENFORCED_PROCESSING)));

            Assert.AreEqual(2, enforcedProcessorSensor.Metrics.Count());
            Assert.AreEqual(0,
                            enforcedProcessorSensor.Metrics[MetricName.NameAndGroup(
                                                                TaskMetrics.ENFORCED_PROCESSING + StreamMetricsRegistry.TOTAL_SUFFIX,
                                                                StreamMetricsRegistry.TASK_LEVEL_GROUP)].Value);
            Assert.AreEqual(0,
                            enforcedProcessorSensor.Metrics[MetricName.NameAndGroup(
                                                                TaskMetrics.ENFORCED_PROCESSING + StreamMetricsRegistry.RATE_SUFFIX,
                                                                StreamMetricsRegistry.TASK_LEVEL_GROUP)].Value);

            var processLatency = sensors.FirstOrDefault(s => s.Name.Equals(GetSensorName(TaskMetrics.PROCESS_LATENCY)));

            Assert.AreEqual(2, processLatency.Metrics.Count());
            Assert.IsTrue(
                (double)processLatency.Metrics[MetricName.NameAndGroup(
                                                   TaskMetrics.PROCESS_LATENCY + StreamMetricsRegistry.AVG_SUFFIX,
                                                   StreamMetricsRegistry.TASK_LEVEL_GROUP)].Value > 0d);
            Assert.IsTrue(
                (double)processLatency.Metrics[MetricName.NameAndGroup(
                                                   TaskMetrics.PROCESS_LATENCY + StreamMetricsRegistry.MAX_SUFFIX,
                                                   StreamMetricsRegistry.TASK_LEVEL_GROUP)].Value > 0d);

            var commitSensor = sensors.FirstOrDefault(s => s.Name.Equals(GetSensorName(TaskMetrics.COMMIT)));

            Assert.AreEqual(2, commitSensor.Metrics.Count());
            Assert.AreEqual(nbMessage,
                            commitSensor.Metrics[MetricName.NameAndGroup(
                                                     TaskMetrics.COMMIT + StreamMetricsRegistry.TOTAL_SUFFIX,
                                                     StreamMetricsRegistry.TASK_LEVEL_GROUP)].Value);
            Assert.IsTrue(
                (double)commitSensor.Metrics[MetricName.NameAndGroup(
                                                 TaskMetrics.COMMIT + StreamMetricsRegistry.RATE_SUFFIX,
                                                 StreamMetricsRegistry.TASK_LEVEL_GROUP)].Value > 0d);

            var droppedRecordSensor = sensors.FirstOrDefault(s => s.Name.Equals(GetSensorName(TaskMetrics.DROPPED_RECORDS)));

            Assert.AreEqual(2, droppedRecordSensor.Metrics.Count());
            Assert.AreEqual(0,
                            droppedRecordSensor.Metrics[MetricName.NameAndGroup(
                                                            TaskMetrics.DROPPED_RECORDS + StreamMetricsRegistry.TOTAL_SUFFIX,
                                                            StreamMetricsRegistry.TASK_LEVEL_GROUP)].Value);
            Assert.AreEqual(0,
                            droppedRecordSensor.Metrics[MetricName.NameAndGroup(
                                                            TaskMetrics.DROPPED_RECORDS + StreamMetricsRegistry.RATE_SUFFIX,
                                                            StreamMetricsRegistry.TASK_LEVEL_GROUP)].Value);

            var activeBufferedRecordSensor = sensors.FirstOrDefault(s => s.Name.Equals(GetSensorName(TaskMetrics.ACTIVE_TASK_PREFIX + TaskMetrics.BUFFER_COUNT)));

            Assert.AreEqual(1, activeBufferedRecordSensor.Metrics.Count());
            Assert.AreEqual(0,
                            activeBufferedRecordSensor.Metrics[MetricName.NameAndGroup(
                                                                   TaskMetrics.ACTIVE_TASK_PREFIX + TaskMetrics.BUFFER_COUNT,
                                                                   StreamMetricsRegistry.TASK_LEVEL_GROUP)].Value);

            var restorationRecordsSensor = sensors.FirstOrDefault(s => s.Name.Equals(GetSensorName(TaskMetrics.RESTORATION_RECORDS)));

            Assert.AreEqual(1, restorationRecordsSensor.Metrics.Count());
            Assert.AreEqual(0,
                            restorationRecordsSensor.Metrics[MetricName.NameAndGroup(
                                                                 TaskMetrics.RESTORATION_RECORDS,
                                                                 StreamMetricsRegistry.TASK_LEVEL_GROUP)].Value);

            var activeRestorationSensor = sensors.FirstOrDefault(s => s.Name.Equals(GetSensorName(TaskMetrics.ACTIVE_RESTORATION)));

            Assert.AreEqual(1, activeRestorationSensor.Metrics.Count());
            Assert.AreEqual(0,
                            activeRestorationSensor.Metrics[MetricName.NameAndGroup(
                                                                TaskMetrics.ACTIVE_RESTORATION,
                                                                StreamMetricsRegistry.TASK_LEVEL_GROUP)].Value);
        }
        public async Task GetElementInStateStore()
        {
            var timeout = TimeSpan.FromSeconds(10);

            bool     isRunningState = false;
            DateTime dt             = DateTime.Now;

            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId    = "test";
            config.BootstrapServers = "127.0.0.1";
            config.PollMs           = 10;

            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());

            var builder = new StreamBuilder();

            builder.Table("topic", InMemory <string, string> .As("store"));

            var t      = builder.Build();
            var stream = new KafkaStream(t, config, supplier);

            stream.StateChanged += (old, @new) =>
            {
                if (@new.Equals(KafkaStream.State.RUNNING))
                {
                    isRunningState = true;
                }
            };
            await stream.StartAsync();

            while (!isRunningState)
            {
                Thread.Sleep(250);
                if (DateTime.Now > dt + timeout)
                {
                    break;
                }
            }
            Assert.IsTrue(isRunningState);

            if (isRunningState)
            {
                var serdes = new StringSerDes();
                producer.Produce("topic",
                                 new Confluent.Kafka.Message <byte[], byte[]>
                {
                    Key   = serdes.Serialize("key1", new SerializationContext()),
                    Value = serdes.Serialize("coucou", new SerializationContext())
                });
                Thread.Sleep(50);
                var store = stream.Store(StoreQueryParameters.FromNameAndType("store", QueryableStoreTypes.KeyValueStore <string, string>()));
                Assert.IsNotNull(store);
                Assert.AreEqual(1, store.ApproximateNumEntries());
                var item = store.Get("key1");
                Assert.IsNotNull(item);
                Assert.AreEqual("coucou", item);
            }

            stream.Dispose();
        }
示例#12
0
        public void StreamThreadRestorationPhaseStartDifferent()
        {
            var producerConfig = config.Clone();

            producerConfig.ApplicationId = "produce-test";

            var serdes   = new StringSerDes();
            var producer = mockKafkaSupplier.GetProducer(producerConfig.ToProducerConfig());

            thread1.Start(token1.Token);
            Thread.Sleep(1500);
            thread2.Start(token2.Token);

            producer.Produce(new TopicPartition("topic", 0), new Confluent.Kafka.Message <byte[], byte[]>
            {
                Key   = serdes.Serialize("key1", new SerializationContext()),
                Value = serdes.Serialize("coucou", new SerializationContext())
            });
            producer.Produce(new TopicPartition("topic", 1), new Confluent.Kafka.Message <byte[], byte[]>
            {
                Key   = serdes.Serialize("key2", new SerializationContext()),
                Value = serdes.Serialize("coucou", new SerializationContext())
            });

            AssertExtensions.WaitUntil(
                () => thread1.State == ThreadState.RUNNING &&
                thread2.State == ThreadState.RUNNING,
                TimeSpan.FromSeconds(5),
                TimeSpan.FromMilliseconds(20));

            // 2 CONSUMER FOR THE SAME GROUP ID => TOPIC WITH 2 PARTITIONS
            Assert.AreEqual(1, thread1.ActiveTasks.Count());
            Assert.AreEqual(1, thread2.ActiveTasks.Count());

            AssertExtensions.WaitUntil(
                () => thread1.ActiveTasks.ToList()[0].State == TaskState.RUNNING &&
                thread2.ActiveTasks.ToList()[0].State == TaskState.RUNNING,
                TimeSpan.FromSeconds(5),
                TimeSpan.FromMilliseconds(20));

            var storeThread1 =
                thread1.ActiveTasks.ToList()[0].GetStore("store") as ITimestampedKeyValueStore <string, string>;
            var storeThread2 =
                thread2.ActiveTasks.ToList()[0].GetStore("store") as ITimestampedKeyValueStore <string, string>;

            Assert.IsNotNull(storeThread1);
            Assert.IsNotNull(storeThread2);

            AssertExtensions.WaitUntil(
                () => storeThread1.All().ToList().Count == 1,
                TimeSpan.FromSeconds(5),
                TimeSpan.FromMilliseconds(20));

            AssertExtensions.WaitUntil(
                () => storeThread2.All().ToList().Count == 1,
                TimeSpan.FromSeconds(5),
                TimeSpan.FromMilliseconds(20));

            var totalItemsSt1 = storeThread1.All().ToList();
            var totalItemsSt2 = storeThread2.All().ToList();

            Assert.AreEqual(1, totalItemsSt1.Count);
            Assert.AreEqual(1, totalItemsSt2.Count);

            // Thread2 closed, partitions assigned from thread2 rebalance to thread1
            // Thread1 need to restore state store
            token2.Cancel();
            thread2.Dispose();

            thread2Disposed = true;

            producer.Produce(new TopicPartition("topic", 1), new Confluent.Kafka.Message <byte[], byte[]>
            {
                Key   = serdes.Serialize("key3", new SerializationContext()),
                Value = serdes.Serialize("coucou", new SerializationContext())
            });

            AssertExtensions.WaitUntil(
                () => thread1.State == ThreadState.RUNNING && thread1.ActiveTasks.Count() == 2,
                TimeSpan.FromSeconds(5),
                TimeSpan.FromMilliseconds(20));

            Assert.AreEqual(2, thread1.ActiveTasks.Count());

            AssertExtensions.WaitUntil(
                () => thread1.ActiveTasks.ToList()[0].State == TaskState.RUNNING &&
                thread1.ActiveTasks.ToList()[1].State == TaskState.RUNNING,
                TimeSpan.FromSeconds(5),
                TimeSpan.FromMilliseconds(20));

            var storeThreadTask1 =
                thread1.ActiveTasks.ToList()[0].GetStore("store") as ITimestampedKeyValueStore <string, string>;
            var storeThreadTask2 =
                thread1.ActiveTasks.ToList()[1].GetStore("store") as ITimestampedKeyValueStore <string, string>;

            Assert.IsNotNull(storeThreadTask1);
            Assert.IsNotNull(storeThreadTask2);

            bool task0Part0 = thread1.ActiveTasks.ToList()[0].Id.Partition == 0;

            AssertExtensions.WaitUntil(
                () => storeThreadTask1.All().ToList().Count == (task0Part0 ? 1 : 2),
                TimeSpan.FromSeconds(1),
                TimeSpan.FromMilliseconds(20));

            AssertExtensions.WaitUntil(
                () => storeThreadTask2.All().ToList().Count == (task0Part0 ? 2 : 1),
                TimeSpan.FromSeconds(1),
                TimeSpan.FromMilliseconds(20));

            var totalItemsSt10 = storeThreadTask1.All().ToList();
            var totalItemsSt11 = storeThreadTask2.All().ToList();

            Assert.AreEqual((task0Part0 ? 1 : 2), totalItemsSt10.Count);
            Assert.AreEqual((task0Part0 ? 2 : 1), totalItemsSt11.Count);
        }
示例#13
0
        public override byte[] Serialize(T data, SerializationContext context)
        {
            var s = Newtonsoft.Json.JsonConvert.SerializeObject(data);

            return(innerSerdes.Serialize(s, context));
        }
        public void TaskManagerCommit()
        {
            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId = "test-app";
            var serdes  = new StringSerDes();
            var builder = new StreamBuilder();

            builder.Stream <string, string>("topic")
            .Map((k, v) => KeyValuePair.Create(k.ToUpper(), v.ToUpper()))
            .To("topic2");

            var topology = builder.Build();

            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());
            var consumer = supplier.GetConsumer(config.ToConsumerConfig(), null);

            var taskCreator = new TaskCreator(topology.Builder, config, "thread-0", supplier, producer);
            var taskManager = new TaskManager(topology.Builder, taskCreator, supplier.GetAdmin(config.ToAdminConfig("admin")), consumer);

            taskManager.CreateTasks(
                new List <TopicPartition>
            {
                new TopicPartition("topic", 0),
                new TopicPartition("topic", 1),
                new TopicPartition("topic", 2),
                new TopicPartition("topic", 3),
            });

            Assert.AreEqual(4, taskManager.ActiveTasks.Count());

            var part = new TopicPartition("topic", 0);
            var task = taskManager.ActiveTaskFor(part);
            List <ConsumeResult <byte[], byte[]> > messages = new List <ConsumeResult <byte[], byte[]> >();
            int offset = 0;

            for (int i = 0; i < 5; ++i)
            {
                messages.Add(
                    new ConsumeResult <byte[], byte[]>
                {
                    Message = new Message <byte[], byte[]>
                    {
                        Key   = serdes.Serialize($"key{i + 1}", new SerializationContext()),
                        Value = serdes.Serialize($"value{i + 1}", new SerializationContext())
                    },
                    TopicPartitionOffset = new TopicPartitionOffset(part, offset++)
                });
            }

            task.AddRecords(messages);

            Assert.IsTrue(task.CanProcess(DateTime.Now.GetMilliseconds()));

            while (task.CanProcess(DateTime.Now.GetMilliseconds()))
            {
                Assert.IsTrue(task.Process());
            }

            // ONLY ONE TASK HAVE BEEN RECORDS
            Assert.AreEqual(1, taskManager.CommitAll());

            // CHECK IN TOPIC topic2
            consumer.Subscribe("topic2");
            List <ConsumeResult <byte[], byte[]> > results = new List <ConsumeResult <byte[], byte[]> >();
            ConsumeResult <byte[], byte[]>         result  = null;

            do
            {
                result = consumer.Consume(100);

                if (result != null)
                {
                    results.Add(result);
                    consumer.Commit(result);
                }
            } while (result != null);

            Assert.AreEqual(5, results.Count);
            for (int i = 0; i < 5; ++i)
            {
                Assert.AreEqual($"KEY{i + 1}", serdes.Deserialize(results[i].Message.Key, new SerializationContext()));
                Assert.AreEqual($"VALUE{i+1}", serdes.Deserialize(results[i].Message.Value, new SerializationContext()));
            }

            // NO RECORD IN THIS TASKS
            part = new TopicPartition("topic", 2);
            task = taskManager.ActiveTaskFor(part);
            Assert.IsFalse(task.CanProcess(DateTime.Now.GetMilliseconds()));
            Assert.IsFalse(task.Process());

            taskManager.Close();
        }
        public void StreamThreadNormalWorkflow()
        {
            List <ThreadState> allStates = new List <ThreadState>();
            var expectedStates           = new List <ThreadState>
            {
                ThreadState.CREATED,
                ThreadState.STARTING,
                ThreadState.PARTITIONS_ASSIGNED,
                ThreadState.RUNNING,
                ThreadState.PENDING_SHUTDOWN,
                ThreadState.DEAD
            };

            var source = new System.Threading.CancellationTokenSource();
            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId = "test";
            config.Guarantee     = ProcessingGuarantee.AT_LEAST_ONCE;
            config.PollMs        = 1;

            var serdes  = new StringSerDes();
            var builder = new StreamBuilder();

            builder.Stream <string, string>("topic").To("topic2");

            var topo = builder.Build();

            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());
            var consumer = supplier.GetConsumer(config.ToConsumerConfig("test-consum"), null);

            consumer.Subscribe("topic2");
            var thread = StreamThread.Create(
                "thread-0", "c0",
                topo.Builder, config,
                supplier, supplier.GetAdmin(config.ToAdminConfig("admin")),
                0) as StreamThread;

            allStates.Add(thread.State);
            thread.StateChanged += (t, o, n) =>
            {
                Assert.IsInstanceOf <ThreadState>(n);
                allStates.Add(n as ThreadState);
            };

            thread.Start(source.Token);
            producer.Produce("topic", new Confluent.Kafka.Message <byte[], byte[]>
            {
                Key   = serdes.Serialize("key1"),
                Value = serdes.Serialize("coucou")
            });
            //WAIT STREAMTHREAD PROCESS MESSAGE
            System.Threading.Thread.Sleep(100);
            var message = consumer.Consume(100);

            source.Cancel();
            thread.Dispose();

            Assert.AreEqual("key1", serdes.Deserialize(message.Message.Key));
            Assert.AreEqual("coucou", serdes.Deserialize(message.Message.Value));
            Assert.AreEqual(expectedStates, allStates);
        }
示例#16
0
        private void TaskManagerRestorationChangelog(bool persistenStateStore = false)
        {
            var stateDir = Path.Combine(".", Guid.NewGuid().ToString());
            var config   = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId = "test-restoration-changelog-app";
            config.StateDir      = stateDir;

            var builder = new StreamBuilder();

            builder.Table("topic",
                          persistenStateStore
                    ? RocksDb <string, string> .As("store").WithLoggingEnabled(null)
                    : InMemory <string, string> .As("store").WithLoggingEnabled(null));

            var serdes = new StringSerDes();

            var topology = builder.Build();

            topology.Builder.RewriteTopology(config);

            var supplier        = new SyncKafkaSupplier();
            var producer        = supplier.GetProducer(config.ToProducerConfig());
            var consumer        = supplier.GetConsumer(config.ToConsumerConfig(), null);
            var restoreConsumer = supplier.GetRestoreConsumer(config.ToConsumerConfig());

            var storeChangelogReader =
                new StoreChangelogReader(config, restoreConsumer, "thread-0", new StreamMetricsRegistry());
            var taskCreator = new TaskCreator(topology.Builder, config, "thread-0", supplier, producer,
                                              storeChangelogReader, new StreamMetricsRegistry());
            var taskManager = new TaskManager(topology.Builder, taskCreator,
                                              supplier.GetAdmin(config.ToAdminConfig("admin")), consumer, storeChangelogReader);

            var part = new TopicPartition("topic", 0);

            taskManager.CreateTasks(
                new List <TopicPartition>
            {
                part
            });

            var task = taskManager.ActiveTaskFor(part);

            IDictionary <TaskId, ITask> tasks = new Dictionary <TaskId, ITask>();

            tasks.Add(task.Id, task);

            taskManager.TryToCompleteRestoration();
            storeChangelogReader.Restore();
            Assert.IsTrue(taskManager.TryToCompleteRestoration());


            List <ConsumeResult <byte[], byte[]> > messages = new List <ConsumeResult <byte[], byte[]> >();
            int offset = 0;

            for (int i = 0; i < 5; ++i)
            {
                messages.Add(
                    new ConsumeResult <byte[], byte[]>
                {
                    Message = new Message <byte[], byte[]>
                    {
                        Key   = serdes.Serialize($"key{i + 1}", new SerializationContext()),
                        Value = serdes.Serialize($"value{i + 1}", new SerializationContext())
                    },
                    TopicPartitionOffset = new TopicPartitionOffset(part, offset++)
                });
            }

            task.AddRecords(messages);

            // Process messages
            while (task.CanProcess(DateTime.Now.GetMilliseconds()))
            {
                Assert.IsTrue(task.Process());
            }

            taskManager.CommitAll();

            // Simulate Close + new open
            taskManager.Close();

            restoreConsumer.Resume(new TopicPartition("test-restoration-changelog-app-store-changelog", 0).ToSingle());

            taskManager.CreateTasks(
                new List <TopicPartition>
            {
                part
            });

            task  = taskManager.ActiveTaskFor(part);
            tasks = new Dictionary <TaskId, ITask>();
            tasks.Add(task.Id, task);

            Assert.IsFalse(taskManager.TryToCompleteRestoration());
            storeChangelogReader.Restore();
            Assert.IsTrue(taskManager.TryToCompleteRestoration());

            var store = task.GetStore("store");
            var items = (store as ITimestampedKeyValueStore <string, string>).All().ToList();

            Assert.AreEqual(5, items.Count);

            taskManager.Close();

            if (persistenStateStore)
            {
                Directory.Delete(stateDir, true);
            }
        }