public void CreateProducerClient()
        {
            var supplier = new DefaultKafkaClientSupplier(new KafkaLoggerAdapter(config));
            var produce  = supplier.GetProducer(config.ToProducerConfig("produce"));

            Assert.IsNotNull(produce);
            Assert.AreEqual("produce", produce.Name.Split("#")[0]);
        }
        public void WithNullMaterialize()
        {
            // CERTIFIED THAT SAME IF Materialize is null, a state store exist for count processor with a generated namestore
            var config = new StreamConfig <StringSerDes, StringSerDes>();
            var serdes = new StringSerDes();

            config.ApplicationId = "test-count";

            var builder = new StreamBuilder();
            Materialized <string, long, IKeyValueStore <Bytes, byte[]> > m = null;

            builder
            .Table <string, string>("topic")
            .GroupBy((k, v) => KeyValuePair.Create(k.ToUpper(), v))
            .Count(m);

            var    topology = builder.Build();
            TaskId id       = new TaskId {
                Id = 0, Partition = 0
            };
            var processorTopology = topology.Builder.BuildTopology(id);

            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());
            var consumer = supplier.GetConsumer(config.ToConsumerConfig(), null);


            var        part = new TopicPartition("topic", 0);
            StreamTask task = new StreamTask(
                "thread-0",
                id,
                new List <TopicPartition> {
                part
            },
                processorTopology,
                consumer,
                config,
                supplier,
                null);

            task.GroupMetadata = consumer as SyncConsumer;
            task.InitializeStateStores();
            task.InitializeTopology();

            Assert.AreEqual(2, task.Context.States.StateStoreNames.Count());
            var nameStore1 = task.Context.States.StateStoreNames.ElementAt(0);
            var nameStore2 = task.Context.States.StateStoreNames.ElementAt(1);

            Assert.IsNotNull(nameStore1);
            Assert.IsNotNull(nameStore2);
            Assert.AreNotEqual(string.Empty, nameStore1);
            Assert.AreNotEqual(string.Empty, nameStore2);
            var store1 = task.GetStore(nameStore1);
            var store2 = task.GetStore(nameStore2);

            Assert.IsInstanceOf <TimestampedKeyValueStore <string, string> >(store1);
            Assert.IsInstanceOf <TimestampedKeyValueStore <string, long> >(store2);
            Assert.AreEqual(0, (store1 as TimestampedKeyValueStore <string, string>).ApproximateNumEntries());
            Assert.AreEqual(0, (store2 as TimestampedKeyValueStore <string, long>).ApproximateNumEntries());
        }
        public void Initialize()
        {
            streamMetricsRegistry
                = new StreamMetricsRegistry(Guid.NewGuid().ToString(),
                                            MetricsRecordingLevel.DEBUG);

            config.ApplicationId    = "test-stream-thread";
            config.StateDir         = Guid.NewGuid().ToString();
            config.Guarantee        = ProcessingGuarantee.AT_LEAST_ONCE;
            config.PollMs           = 10;
            config.CommitIntervalMs = 1;

            var builder = new StreamBuilder();
            var stream  = builder.Stream <string, string>("topic");

            stream.GroupByKey().Count();
            stream.To("topic2");

            var topo = builder.Build();

            id = new TaskId {
                Id = 0, Partition = 0
            };
            var processorTopology = builder.Build().Builder.BuildTopology(id);

            syncKafkaSupplier = new SyncKafkaSupplier();
            var producer = syncKafkaSupplier.GetProducer(config.ToProducerConfig());
            var consumer = syncKafkaSupplier.GetConsumer(config.ToConsumerConfig(), null);

            topicPartition = new TopicPartition("topic", 0);
            task           = new StreamTask(
                threadId,
                id,
                new List <TopicPartition> {
                topicPartition
            },
                processorTopology,
                consumer,
                config,
                syncKafkaSupplier,
                null,
                new MockChangelogRegister(),
                streamMetricsRegistry);
            task.GroupMetadata = consumer as SyncConsumer;

            task.InitializeStateStores();
            task.InitializeTopology();
            task.RestorationIfNeeded();
            var activeRestorationSensor = streamMetricsRegistry.GetSensors().FirstOrDefault(s => s.Name.Equals(GetSensorName(TaskMetrics.ACTIVE_RESTORATION)));

            Assert.AreEqual(1,
                            activeRestorationSensor.Metrics[MetricName.NameAndGroup(
                                                                TaskMetrics.ACTIVE_RESTORATION,
                                                                StreamMetricsRegistry.TASK_LEVEL_GROUP)].Value);
            task.CompleteRestoration();
            Assert.AreEqual(0,
                            activeRestorationSensor.Metrics[MetricName.NameAndGroup(
                                                                TaskMetrics.ACTIVE_RESTORATION,
                                                                StreamMetricsRegistry.TASK_LEVEL_GROUP)].Value);
        }
        public void TaskManagerCommitWithoutCommitNeeed()
        {
            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId = "test-app";

            var builder = new StreamBuilder();

            builder.Stream <string, string>("topic").To("topic2");

            var topology = builder.Build();

            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());
            var consumer = supplier.GetConsumer(config.ToConsumerConfig(), null);

            var taskCreator = new TaskCreator(topology.Builder, config, "thread-0", supplier, producer);
            var taskManager = new TaskManager(topology.Builder, taskCreator, supplier.GetAdmin(config.ToAdminConfig("admin")), consumer);

            taskManager.CreateTasks(
                new List <TopicPartition>
            {
                new TopicPartition("topic", 0),
                new TopicPartition("topic", 1),
                new TopicPartition("topic", 2),
                new TopicPartition("topic", 3),
            });

            Assert.AreEqual(4, taskManager.ActiveTasks.Count());
            Assert.AreEqual(0, taskManager.CommitAll());
            taskManager.Close();
        }
Exemple #5
0
        public void GetCurrentPartitionMetadataTests()
        {
            var source = new System.Threading.CancellationTokenSource();
            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId  = "test";
            config.Guarantee      = ProcessingGuarantee.AT_LEAST_ONCE;
            config.PollMs         = 1;
            config.FollowMetadata = true;
            var configConsumer = config.Clone();

            configConsumer.ApplicationId = "test-consumer";
            int?h = null;

            var serdes  = new StringSerDes();
            var builder = new StreamBuilder();

            builder
            .Stream <string, string>("topic")
            .MapValues((v) =>
            {
                h = StreamizMetadata.GetCurrentPartitionMetadata();
                return(v);
            })
            .To("output");

            var topo = builder.Build();

            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());
            var consumer = supplier.GetConsumer(configConsumer.ToConsumerConfig(), null);

            var thread = StreamThread.Create(
                "thread-0", "c0",
                topo.Builder, config,
                supplier, supplier.GetAdmin(config.ToAdminConfig("admin")),
                0) as StreamThread;

            thread.Start(source.Token);
            producer.Produce("topic", new Confluent.Kafka.Message <byte[], byte[]>
            {
                Key   = serdes.Serialize("key1", new SerializationContext()),
                Value = serdes.Serialize("coucou", new SerializationContext())
            });

            consumer.Subscribe("output");
            ConsumeResult <byte[], byte[]> result = null;

            do
            {
                result = consumer.Consume(100);
            } while (result == null);


            source.Cancel();
            thread.Dispose();

            Assert.NotNull(h);
            Assert.AreEqual(0, h);
        }
        public void GetElementInStateStore()
        {
            var      timeout        = TimeSpan.FromSeconds(10);
            var      source         = new CancellationTokenSource();
            bool     isRunningState = false;
            DateTime dt             = DateTime.Now;

            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId = "test";
            config.PollMs        = 10;
            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());

            var builder = new StreamBuilder();

            builder.Table("topic", InMemory <string, string> .As("store"));

            var t      = builder.Build();
            var stream = new KafkaStream(t, config, supplier);

            stream.StateChanged += (old, @new) =>
            {
                if (@new.Equals(KafkaStream.State.RUNNING))
                {
                    isRunningState = true;
                }
            };
            stream.Start(source.Token);
            while (!isRunningState)
            {
                Thread.Sleep(250);
                if (DateTime.Now > dt + timeout)
                {
                    break;
                }
            }
            Assert.IsTrue(isRunningState);

            if (isRunningState)
            {
                var serdes = new StringSerDes();
                producer.Produce("topic",
                                 new Confluent.Kafka.Message <byte[], byte[]>
                {
                    Key   = serdes.Serialize("key1"),
                    Value = serdes.Serialize("coucou")
                });
                Thread.Sleep(50);
                var store = stream.Store(StoreQueryParameters.FromNameAndType("store", QueryableStoreTypes.KeyValueStore <string, string>()));
                Assert.IsNotNull(store);
                Assert.AreEqual(1, store.ApproximateNumEntries());
                var item = store.Get("key1");
                Assert.IsNotNull(item);
                Assert.AreEqual("coucou", item);
            }

            source.Cancel();
            stream.Close();
        }
        public void WithNullMaterialize()
        {
            // CERTIFIED THAT SAME IF Materialize is null, a state store exist for count processor with a generated namestore
            var config = new StreamConfig <StringSerDes, StringSerDes>();
            var serdes = new StringSerDes();

            config.ApplicationId = "test-window-count";

            var builder = new StreamBuilder();
            Materialized <string, long, IWindowStore <Bytes, byte[]> > m = null;

            builder
            .Stream <string, string>("topic")
            .GroupByKey()
            .WindowedBy(TumblingWindowOptions.Of(2000))
            .Count(m);

            var    topology = builder.Build();
            TaskId id       = new TaskId {
                Id = 0, Partition = 0
            };
            var processorTopology = topology.Builder.BuildTopology(id);

            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());
            var consumer = supplier.GetConsumer(config.ToConsumerConfig(), null);

            var        part = new TopicPartition("topic", 0);
            StreamTask task = new StreamTask(
                "thread-0",
                id,
                new List <TopicPartition> {
                part
            },
                processorTopology,
                consumer,
                config,
                supplier,
                null,
                new MockChangelogRegister(),
                new StreamMetricsRegistry());

            task.GroupMetadata = consumer as SyncConsumer;
            task.InitializeStateStores();
            task.InitializeTopology();
            task.RestorationIfNeeded();
            task.CompleteRestoration();

            Assert.AreEqual(1, task.Context.States.StateStoreNames.Count());
            var nameStore = task.Context.States.StateStoreNames.ElementAt(0);

            Assert.IsNotNull(nameStore);
            Assert.AreNotEqual(string.Empty, nameStore);
            var store = task.GetStore(nameStore);

            Assert.IsInstanceOf <ITimestampedWindowStore <string, long> >(store);
            Assert.AreEqual(0, (store as ITimestampedWindowStore <string, long>).All().ToList().Count);
        }
Exemple #8
0
        public void StreamThreadCommitIntervalWorkflow()
        {
            var source = new System.Threading.CancellationTokenSource();
            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId    = "test";
            config.Guarantee        = ProcessingGuarantee.AT_LEAST_ONCE;
            config.PollMs           = 1;
            config.CommitIntervalMs = 1;

            var serdes  = new StringSerDes();
            var builder = new StreamBuilder();

            builder.Stream <string, string>("topic").To("topic2");

            var topo = builder.Build();

            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());
            var consumer = supplier.GetConsumer(config.ToConsumerConfig("test-consum"), null);

            consumer.Subscribe("topic2");
            var thread = StreamThread.Create(
                "thread-0", "c0",
                topo.Builder, new StreamMetricsRegistry(), config,
                supplier, supplier.GetAdmin(config.ToAdminConfig("admin")),
                0) as StreamThread;

            thread.Start(source.Token);
            producer.Produce("topic", new Confluent.Kafka.Message <byte[], byte[]>
            {
                Key   = serdes.Serialize("key1", new SerializationContext()),
                Value = serdes.Serialize("coucou", new SerializationContext())
            });
            //WAIT STREAMTHREAD PROCESS MESSAGE
            System.Threading.Thread.Sleep(100);
            var message = consumer.Consume(100);

            Assert.AreEqual("key1", serdes.Deserialize(message.Message.Key, new SerializationContext()));
            Assert.AreEqual("coucou", serdes.Deserialize(message.Message.Value, new SerializationContext()));

            var offsets = thread.GetCommittedOffsets(new List <TopicPartition> {
                new TopicPartition("topic", 0)
            },
                                                     TimeSpan.FromSeconds(10)).ToList();

            Assert.AreEqual(1, offsets.Count);
            Assert.AreEqual(1, offsets[0].Offset.Value);
            Assert.AreEqual(0, offsets[0].TopicPartition.Partition.Value);
            Assert.AreEqual("topic", offsets[0].Topic);

            source.Cancel();
            thread.Dispose();
        }
Exemple #9
0
        public void TaskManagerAssignedUnknownPartitions()
        {
            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId = "test-app";
            var serdes  = new StringSerDes();
            var builder = new StreamBuilder();

            builder.Stream <string, string>("topic")
            .Map((k, v) => KeyValuePair.Create(k.ToUpper(), v.ToUpper()))
            .To("topic2");

            var topology = builder.Build();

            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());
            var consumer = supplier.GetConsumer(config.ToConsumerConfig(), null);

            var restoreConsumer = supplier.GetRestoreConsumer(config.ToConsumerConfig());

            var storeChangelogReader =
                new StoreChangelogReader(config, restoreConsumer, "thread-0", new StreamMetricsRegistry());
            var taskCreator = new TaskCreator(topology.Builder, config, "thread-0", supplier, producer,
                                              storeChangelogReader, new StreamMetricsRegistry());
            var taskManager = new TaskManager(topology.Builder, taskCreator,
                                              supplier.GetAdmin(config.ToAdminConfig("admin")), consumer, storeChangelogReader);

            taskManager.CreateTasks(
                new List <TopicPartition>
            {
                new TopicPartition("topic", 0),
                new TopicPartition("topic", 1)
            });

            taskManager.RevokeTasks(
                new List <TopicPartition>
            {
                new TopicPartition("topic", 1)
            });

            taskManager.CreateTasks(
                new List <TopicPartition>
            {
                new TopicPartition("topic", 0),
                new TopicPartition("topic", 1),
                new TopicPartition("topic", 2)
            });

            taskManager.TryToCompleteRestoration();

            Assert.AreEqual(3, taskManager.ActiveTasks.Count());
            Assert.AreEqual(0, taskManager.RevokedTasks.Count());
            taskManager.Close();
        }
        private StreamTask GetTask(TaskId taskId)
        {
            var config = new StreamConfig();

            config.ClientId      = "test";
            config.ApplicationId = "test-app";

            var streamTask = new StreamTask(
                "thread",
                taskId,
                new List <TopicPartition>(),
                new Stream.Internal.ProcessorTopology(null, null, null, null, null, null, null, null),
                null, config, null, new SyncProducer(config.ToProducerConfig()), new MockChangelogRegister(), new StreamMetricsRegistry());

            return(streamTask);
        }
Exemple #11
0
        public void WithNullMaterialize()
        {
            // CERTIFIED THAT SAME IF Materialize is null, a state store exist for count processor with a generated namestore
            var config = new StreamConfig <StringSerDes, StringSerDes>();
            var serdes = new StringSerDes();

            config.ApplicationId = "test-window-count";

            var builder = new StreamBuilder();
            Materialized <string, int, IWindowStore <Bytes, byte[]> > m = null;

            builder
            .Stream <string, string>("topic")
            .GroupByKey()
            .WindowedBy(TumblingWindowOptions.Of(2000))
            .Aggregate(
                () => 0,
                (k, v, agg) => Math.Max(v.Length, agg),
                m);

            var    topology = builder.Build();
            TaskId id       = new TaskId {
                Id = 0, Partition = 0
            };
            var processorTopology = topology.Builder.BuildTopology(id);

            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());
            var consumer = supplier.GetConsumer(config.ToConsumerConfig(), null);


            var        part = new TopicPartition("topic", 0);
            StreamTask task = new StreamTask(
                "thread-0",
                id,
                new List <TopicPartition> {
                part
            },
                processorTopology,
                consumer,
                config,
                supplier,
                null);

            task.GroupMetadata = consumer as SyncConsumer;
            Assert.Throws <StreamsException>(() => task.InitializeStateStores());
        }
Exemple #12
0
        public void StreamAddCorrectConfig()
        {
            var stream = new StreamConfig();

            stream.ApplicationId = "unittest";
            stream.AddConfig("sasl.password", "coucou");

            var adminConfig    = stream.ToAdminConfig("admin");
            var consumerConfig = stream.ToConsumerConfig();
            var producerConfig = stream.ToProducerConfig();
            var globalConfig   = stream.ToGlobalConsumerConfig("global");

            Assert.AreEqual("coucou", adminConfig.SaslPassword);
            Assert.AreEqual("coucou", consumerConfig.SaslPassword);
            Assert.AreEqual("coucou", producerConfig.SaslPassword);
            Assert.AreEqual("coucou", globalConfig.SaslPassword);
        }
Exemple #13
0
        public void WithNullMaterialize()
        {
            // CERTIFIED THAT SAME IF Materialize is null, a state store exist for count processor with a generated namestore
            var config = new StreamConfig();
            var serdes = new StringSerDes();

            config.ApplicationId = "test-window-reduce";

            var builder = new StreamBuilder();

            builder
            .Stream <string, string>("topic")
            .GroupByKey()
            .WindowedBy(TumblingWindowOptions.Of(2000))
            .Reduce((v1, v2) => v1.Length > v2.Length ? v1 : v2);

            var    topology = builder.Build();
            TaskId id       = new TaskId {
                Id = 0, Partition = 0
            };
            var processorTopology = topology.Builder.BuildTopology(id);

            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());
            var consumer = supplier.GetConsumer(config.ToConsumerConfig(), null);

            var        part = new TopicPartition("topic", 0);
            StreamTask task = new StreamTask(
                "thread-0",
                id,
                new List <TopicPartition> {
                part
            },
                processorTopology,
                consumer,
                config,
                supplier,
                null,
                new MockChangelogRegister(),
                new StreamMetricsRegistry());

            task.GroupMetadata = consumer as SyncConsumer;
            Assert.Throws <StreamsException>(() => task.InitializeStateStores());
        }
        public static UnassignedStreamTask Create(TaskId id)
        {
            var config = new StreamConfig();

            config.ApplicationId = "un-assigned-stream-task";
            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());
            var consumer = supplier.GetConsumer(config.ToConsumerConfig(), null);

            return(new UnassignedStreamTask(
                       "un-assigned-stream-task",
                       id,
                       new List <TopicPartition>(),
                       ProcessorTopology.EMPTY,
                       consumer,
                       config,
                       supplier,
                       producer));
        }
        public static UnassignedStreamTask Create()
        {
            var config = new StreamConfig();

            config.ApplicationId = "un-assigned-stream-task";
            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());
            var consumer = supplier.GetConsumer(config.ToConsumerConfig(), null);

            return(new UnassignedStreamTask(
                       "un-assigned-stream-task",
                       new TaskId {
                Id = -1, Partition = -1
            },
                       new List <TopicPartition>(),
                       null,
                       consumer,
                       config,
                       supplier,
                       producer));
        }
Exemple #16
0
        public async Task exec(IConfiguration config, IServiceProvider services)
        {
            Console.WriteLine("Process");

            var destTopic = config["spring.cloud.stream.bindings.output.destination"];

            Console.WriteLine(destTopic);



            using (var scope = services.CreateScope())
            {
                this._dataService = scope.ServiceProvider
                                    .GetRequiredService <IDataService>();

                bool isRunningState = false;

                var      timeout = TimeSpan.FromSeconds(10);
                DateTime dt      = DateTime.Now;

                Order[] capture = this._dataService.readData();

                // Inyectamos los datos obtenidos al Stream


                var sConfig = new StreamConfig <StringSerDes, StringSerDes>();
                sConfig.ApplicationId       = config["SPRING_CLOUD_APPLICATION_GUID"];
                sConfig.BootstrapServers    = config["SPRING_CLOUD_STREAM_KAFKA_BINDER_BROKERS"];
                sConfig.SchemaRegistryUrl   = config["SchemaRegistryUrl"];
                sConfig.AutoRegisterSchemas = true;
                sConfig.NumStreamThreads    = 10;
                sConfig.Acks = Acks.All;
                sConfig.AddConsumerConfig("allow.auto.create.topics", "true");
                sConfig.InnerExceptionHandler = (e) => ExceptionHandlerResponse.CONTINUE;

                var schemaRegistryClient = new CachedSchemaRegistryClient
                                               (new SchemaRegistryConfig
                {
                    Url = sConfig.SchemaRegistryUrl
                });

                var supplier = new SyncKafkaSupplier(new KafkaLoggerAdapter(sConfig));

                var producerConfig = sConfig.ToProducerConfig();
                var adminConfig    = sConfig.ToAdminConfig(sConfig.ApplicationId);

                var admin = supplier.GetAdmin(adminConfig);

                // try
                // {

                //     var topic = new TopicSpecification
                //     {
                //         Name = destTopic,
                //         NumPartitions = 1,
                //         ReplicationFactor = 3
                //     };
                //     var topicProduct = new TopicSpecification
                //     {
                //         Name = "product-external",
                //         NumPartitions = 1,
                //         ReplicationFactor = 3
                //     };


                //     IList<TopicSpecification> topics = new List<TopicSpecification>();

                //     topics.Add(topic);
                //     topics.Add(topicProduct);

                //     await admin.CreateTopicsAsync(topics);
                // }
                // catch (Exception topicExists)
                // {
                //     Console.WriteLine("Topic alreade exists");
                //     Console.Write(topicExists);
                // }

                var producer = supplier.GetProducer(producerConfig);

                StreamBuilder builder = new StreamBuilder();

                var serdes    = new SchemaAvroSerDes <Order>();
                var keySerdes = new Int32SerDes();

                builder.Table(destTopic, keySerdes, serdes, InMemory <int, Order> .As(config["table"]));

                var         t      = builder.Build();
                KafkaStream stream = new KafkaStream(t, sConfig, supplier);

                stream.StateChanged += (old, @new) =>
                {
                    if (@new.Equals(KafkaStream.State.RUNNING))
                    {
                        isRunningState = true;
                    }
                };

                await stream.StartAsync();

                while (!isRunningState)
                {
                    Thread.Sleep(250);
                    if (DateTime.Now > dt + timeout)
                    {
                        break;
                    }
                }

                if (isRunningState)
                {
                    //   //create a well formatted Endpoint in external topic
                    var endpProducer = new ProducerBuilder <byte[], Endpoint>(producerConfig)
                                       .SetValueSerializer(new AvroSerializer <Endpoint>(schemaRegistryClient, new AvroSerializerConfig {
                        AutoRegisterSchemas = true
                    }).AsSyncOverAsync()).Build();

                    //create a well formatted Product in external topic
                    var productProducer = new ProducerBuilder <byte[], Product>(producerConfig)
                                          .SetValueSerializer(new AvroSerializer <Product>(schemaRegistryClient, new AvroSerializerConfig {
                        AutoRegisterSchemas = true
                    }).AsSyncOverAsync()).Build();

                    for (int k = 1; k < 10; k++)
                    {
                        endpProducer.Produce("api-endpoints",
                                             new Message <byte[], Endpoint>
                        {
                            Key   = new Int32SerDes().Serialize(k, new SerializationContext()),
                            Value = new Endpoint
                            {
                                endpoint_id  = ("endpoint" + k),
                                endpoint_url = ("http://endpoint" + k + "/"),
                                http_method  = "POST"
                            }
                        }, (d) =>
                        {
                            if (d.Status == PersistenceStatus.Persisted)
                            {
                                Console.WriteLine("Endpoint Message sent !");
                            }
                        });

                        productProducer.Produce("product-external",
                                                new Message <byte[], Product>
                        {
                            Key   = new Int32SerDes().Serialize(1, new SerializationContext()),
                            Value = new Product
                            {
                                name       = "Producto de Software",
                                price      = 1234.5F,
                                product_id = 3
                            }
                        }, (d) =>
                        {
                            if (d.Status == PersistenceStatus.Persisted)
                            {
                                Console.WriteLine("Product Message sent !");
                            }
                        });
                    }

                    Thread.Sleep(10);

                    for (int k = 1; k < 10; k++)
                    {
                        producer.Produce(destTopic,
                                         new Confluent.Kafka.Message <byte[], byte[]>
                        {
                            Key   = keySerdes.Serialize(k, new SerializationContext()),
                            Value = serdes.Serialize(new Order
                            {
                                order_id   = k,
                                price      = 123.5F,
                                product_id = k
                            }, new SerializationContext())
                        }, (d) =>
                        {
                            if (d.Status == PersistenceStatus.Persisted)
                            {
                                Console.WriteLine("Order Message sent !");
                            }
                        });
                    }


                    Thread.Sleep(50);
                }
            }
        }
Exemple #17
0
        public async Task GetWindowElementInStateStore()
        {
            var timeout = TimeSpan.FromSeconds(10);

            bool     isRunningState = false;
            DateTime dt             = DateTime.Now;

            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId    = "test";
            config.BootstrapServers = "127.0.0.1";
            config.PollMs           = 10;

            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());

            var builder = new StreamBuilder();

            builder
            .Stream <string, string>("test")
            .GroupByKey()
            .WindowedBy(TumblingWindowOptions.Of(TimeSpan.FromMinutes(1)))
            .Count(InMemoryWindows <string, long> .As("store"));

            var t      = builder.Build();
            var stream = new KafkaStream(t, config, supplier);

            stream.StateChanged += (old, @new) =>
            {
                if (@new.Equals(KafkaStream.State.RUNNING))
                {
                    isRunningState = true;
                }
            };
            await stream.StartAsync();

            while (!isRunningState)
            {
                Thread.Sleep(250);
                if (DateTime.Now > dt + timeout)
                {
                    break;
                }
            }
            Assert.IsTrue(isRunningState);

            if (isRunningState)
            {
                var serdes = new StringSerDes();
                dt = DateTime.Now;
                producer.Produce("test",
                                 new Confluent.Kafka.Message <byte[], byte[]>
                {
                    Key       = serdes.Serialize("key1", new SerializationContext()),
                    Value     = serdes.Serialize("coucou", new SerializationContext()),
                    Timestamp = new Confluent.Kafka.Timestamp(dt)
                });
                Thread.Sleep(50);
                var store = stream.Store(StoreQueryParameters.FromNameAndType("store", QueryableStoreTypes.WindowStore <string, long>()));
                Assert.IsNotNull(store);
                var @enum = store.All();
                Assert.AreEqual(1, store.All().ToList().Count);
                var item = store.Fetch("key1", dt.AddMinutes(-1), dt.AddMinutes(1));
                Assert.IsNotNull(item);
                Assert.IsTrue(item.MoveNext());
                Assert.IsTrue(item.Current.HasValue);
                Assert.AreEqual(1, item.Current.Value.Value);
                item.Dispose();
            }

            stream.Dispose();
        }
Exemple #18
0
        public async Task GetRangeKVStateStore()
        {
            var timeout = TimeSpan.FromSeconds(10);

            bool     isRunningState = false;
            DateTime dt             = DateTime.Now;

            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId    = "test";
            config.BootstrapServers = "127.0.0.1";
            config.PollMs           = 10;

            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());

            var builder = new StreamBuilder();

            builder.Table("topic", InMemory <string, string> .As("store"));

            var t      = builder.Build();
            var stream = new KafkaStream(t, config, supplier);

            stream.StateChanged += (old, @new) =>
            {
                if (@new.Equals(KafkaStream.State.RUNNING))
                {
                    isRunningState = true;
                }
            };
            await stream.StartAsync();

            while (!isRunningState)
            {
                Thread.Sleep(250);
                if (DateTime.Now > dt + timeout)
                {
                    break;
                }
            }
            Assert.IsTrue(isRunningState);

            if (isRunningState)
            {
                var serdes = new StringSerDes();
                producer.Produce("topic",
                                 new Confluent.Kafka.Message <byte[], byte[]>
                {
                    Key   = serdes.Serialize("key1", new SerializationContext()),
                    Value = serdes.Serialize("coucou", new SerializationContext())
                });
                Thread.Sleep(50);
                var store = stream.Store(StoreQueryParameters.FromNameAndType("store", QueryableStoreTypes.KeyValueStore <string, string>()));
                Assert.IsNotNull(store);
                var list = store.Range("key1", "key2").ToList();
                Assert.AreEqual(1, list.Count);
                var item = list[0];
                Assert.IsNotNull(item);
                Assert.AreEqual("coucou", item.Value);
                Assert.AreEqual("key1", item.Key);
            }

            stream.Dispose();
        }
        public void StreamTaskSuspendResume()
        {
            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId = "test-app";

            var serdes  = new StringSerDes();
            var builder = new StreamBuilder();

            builder.Table <string, string>("topic", InMemory <string, string> .As("store").WithLoggingDisabled())
            .MapValues((k, v) => v.ToUpper())
            .ToStream()
            .To("topic2");


            TaskId id = new TaskId {
                Id = 0, Partition = 0
            };
            var topology          = builder.Build();
            var processorTopology = topology.Builder.BuildTopology(id);

            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());
            var consumer = supplier.GetConsumer(config.ToConsumerConfig(), null);

            var        part = new TopicPartition("topic", 0);
            StreamTask task = new StreamTask(
                "thread-0",
                id,
                new List <TopicPartition> {
                part
            },
                processorTopology,
                consumer,
                config,
                supplier,
                producer,
                new MockChangelogRegister()
                , new StreamMetricsRegistry());

            task.GroupMetadata = consumer as SyncConsumer;
            task.InitializeStateStores();
            task.InitializeTopology();
            task.RestorationIfNeeded();
            task.CompleteRestoration();

            List <ConsumeResult <byte[], byte[]> > messages = new List <ConsumeResult <byte[], byte[]> >();
            int offset = 0;

            for (int i = 0; i < 5; ++i)
            {
                messages.Add(
                    new ConsumeResult <byte[], byte[]>
                {
                    Message = new Message <byte[], byte[]>
                    {
                        Key   = serdes.Serialize($"key{i + 1}", new SerializationContext()),
                        Value = serdes.Serialize($"value{i + 1}", new SerializationContext())
                    },
                    TopicPartitionOffset = new TopicPartitionOffset(part, offset++)
                });
            }

            task.AddRecords(messages);

            Assert.IsTrue(task.CanProcess(DateTime.Now.GetMilliseconds()));

            while (task.CanProcess(DateTime.Now.GetMilliseconds()))
            {
                Assert.IsTrue(task.Process());
                Assert.IsTrue(task.CommitNeeded);
                task.Commit();
            }

            Assert.IsNotNull(task.GetStore("store"));
            task.Suspend();
            Assert.IsNull(task.GetStore("store"));
            task.Resume();
            task.RestorationIfNeeded();

            Assert.IsNotNull(task.GetStore("store"));
            task.AddRecords(messages);

            Assert.IsTrue(task.CanProcess(DateTime.Now.GetMilliseconds()));

            while (task.CanProcess(DateTime.Now.GetMilliseconds()))
            {
                Assert.IsTrue(task.Process());
                Assert.IsTrue(task.CommitNeeded);
                task.Commit();
            }

            // CHECK IN TOPIC topic2
            consumer.Subscribe("topic2");
            List <ConsumeResult <byte[], byte[]> > results = new List <ConsumeResult <byte[], byte[]> >();
            ConsumeResult <byte[], byte[]>         result  = null;

            do
            {
                result = consumer.Consume(100);

                if (result != null)
                {
                    results.Add(result);
                    consumer.Commit(result);
                }
            } while (result != null);

            Assert.AreEqual(10, results.Count);

            task.Close();
        }
Exemple #20
0
        public void StreamThreadNormalWorkflow()
        {
            bool metricsReporterCalled   = false;
            List <ThreadState> allStates = new List <ThreadState>();
            var expectedStates           = new List <ThreadState>
            {
                ThreadState.CREATED,
                ThreadState.STARTING,
                ThreadState.PARTITIONS_ASSIGNED,
                ThreadState.RUNNING,
                ThreadState.PENDING_SHUTDOWN,
                ThreadState.DEAD
            };

            var source = new System.Threading.CancellationTokenSource();
            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId   = "test";
            config.Guarantee       = ProcessingGuarantee.AT_LEAST_ONCE;
            config.PollMs          = 1;
            config.MetricsReporter = (sensor) => { metricsReporterCalled = true; };
            config.AddOrUpdate(StreamConfig.metricsIntervalMsCst, 10);

            var serdes  = new StringSerDes();
            var builder = new StreamBuilder();

            builder.Stream <string, string>("topic").To("topic2");

            var topo = builder.Build();

            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());
            var consumer = supplier.GetConsumer(config.ToConsumerConfig("test-consum"), null);

            consumer.Subscribe("topic2");
            var thread = StreamThread.Create(
                "thread-0", "c0",
                topo.Builder, new StreamMetricsRegistry(), config,
                supplier, supplier.GetAdmin(config.ToAdminConfig("admin")),
                0) as StreamThread;

            allStates.Add(thread.State);
            thread.StateChanged += (t, o, n) =>
            {
                Assert.IsInstanceOf <ThreadState>(n);
                allStates.Add(n as ThreadState);
            };

            thread.Start(source.Token);
            producer.Produce("topic", new Confluent.Kafka.Message <byte[], byte[]>
            {
                Key   = serdes.Serialize("key1", new SerializationContext()),
                Value = serdes.Serialize("coucou", new SerializationContext())
            });
            //WAIT STREAMTHREAD PROCESS MESSAGE
            System.Threading.Thread.Sleep(100);
            var message = consumer.Consume(100);

            source.Cancel();
            thread.Dispose();

            Assert.AreEqual("key1", serdes.Deserialize(message.Message.Key, new SerializationContext()));
            Assert.AreEqual("coucou", serdes.Deserialize(message.Message.Value, new SerializationContext()));
            Assert.AreEqual(expectedStates, allStates);
            Assert.IsTrue(metricsReporterCalled);
        }
        public void StandardWorkflowTaskManager()
        {
            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId = "test-app";

            var builder = new StreamBuilder();

            builder.Stream <string, string>("topic").To("topic2");

            var topology = builder.Build();

            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());
            var consumer = supplier.GetConsumer(config.ToConsumerConfig(), null);

            var taskCreator = new TaskCreator(topology.Builder, config, "thread-0", supplier, producer);
            var taskManager = new TaskManager(topology.Builder, taskCreator, supplier.GetAdmin(config.ToAdminConfig("admin")), consumer);

            taskManager.CreateTasks(
                new List <TopicPartition>
            {
                new TopicPartition("topic", 0),
                new TopicPartition("topic", 1),
                new TopicPartition("topic", 2),
                new TopicPartition("topic", 3),
            });

            Assert.AreEqual(4, taskManager.ActiveTasks.Count());
            for (int i = 0; i < 4; ++i)
            {
                var task = taskManager.ActiveTaskFor(new TopicPartition("topic", i));
                Assert.IsNotNull(task);
                Assert.AreEqual("test-app", task.ApplicationId);
                Assert.IsFalse(task.CanProcess(DateTime.Now.GetMilliseconds()));
                Assert.IsFalse(task.CommitNeeded);
                Assert.IsFalse(task.HasStateStores);
            }

            // Revoked 2 partitions
            taskManager.RevokeTasks(new List <TopicPartition> {
                new TopicPartition("topic", 2),
                new TopicPartition("topic", 3),
            });
            Assert.AreEqual(2, taskManager.ActiveTasks.Count());
            Assert.AreEqual(2, taskManager.RevokedTasks.Count());
            for (int i = 0; i < 2; ++i)
            {
                var task = taskManager.ActiveTaskFor(new TopicPartition("topic", i));
                Assert.IsNotNull(task);
                Assert.AreEqual("test-app", task.ApplicationId);
                Assert.IsFalse(task.CanProcess(DateTime.Now.GetMilliseconds()));
                Assert.IsFalse(task.CommitNeeded);
                Assert.IsFalse(task.HasStateStores);
            }

            var taskFailed = taskManager.ActiveTaskFor(new TopicPartition("topic", 2));

            Assert.IsNull(taskFailed);

            taskManager.Close();
            Assert.AreEqual(0, taskManager.ActiveTasks.Count());
            Assert.AreEqual(0, taskManager.RevokedTasks.Count());
        }
Exemple #22
0
        public async Task KafkaStreamDeserializationExceptionHandlerContinueTest()
        {
            var _return = new List <KeyValuePair <string, string> >();

            var timeout = TimeSpan.FromSeconds(10);

            bool     isRunningState = false;
            DateTime dt             = DateTime.Now;

            var config = new StreamConfig <SerdesThrowException, SerdesThrowException>();

            config.ApplicationId    = "test";
            config.BootstrapServers = "127.0.0.1";
            config.PollMs           = 10;
            config.DeserializationExceptionHandler += (c, r, e) => ExceptionHandlerResponse.CONTINUE;

            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());

            var builder = new StreamBuilder();

            builder
            .Stream <string, string>("test")
            .Peek((k, v) => _return.Add(KeyValuePair.Create(k, v)));

            var t      = builder.Build();
            var stream = new KafkaStream(t, config, supplier);

            stream.StateChanged += (old, @new) =>
            {
                if (@new.Equals(KafkaStream.State.RUNNING))
                {
                    isRunningState = true;
                }
            };
            await stream.StartAsync();

            while (!isRunningState)
            {
                Thread.Sleep(250);
                if (DateTime.Now > dt + timeout)
                {
                    break;
                }
            }
            Assert.IsTrue(isRunningState);

            if (isRunningState)
            {
                var serdes = new StringSerDes();
                dt = DateTime.Now;
                producer.Produce("test",
                                 new Confluent.Kafka.Message <byte[], byte[]>
                {
                    Key       = serdes.Serialize("k", new SerializationContext()),
                    Value     = serdes.Serialize("test", new SerializationContext()),
                    Timestamp = new Confluent.Kafka.Timestamp(dt)
                });
                producer.Produce("test",
                                 new Confluent.Kafka.Message <byte[], byte[]>
                {
                    Key       = serdes.Serialize("v", new SerializationContext()),
                    Value     = serdes.Serialize("test2", new SerializationContext()),
                    Timestamp = new Confluent.Kafka.Timestamp(dt)
                });
                Thread.Sleep(1000);
                var expected = new List <KeyValuePair <string, string> >();
                expected.Add(KeyValuePair.Create("v", "test2"));

                Assert.AreEqual(expected, _return);
            }

            stream.Dispose();
        }
Exemple #23
0
        public void StreamTaskWithEXACTLY_ONCE()
        {
            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId = "test-app";
            config.Guarantee     = ProcessingGuarantee.EXACTLY_ONCE;

            var serdes  = new StringSerDes();
            var builder = new StreamBuilder();

            builder.Stream <string, string>("topic")
            .Map((k, v) => KeyValuePair.Create(k.ToUpper(), v.ToUpper()))
            .To("topic2");

            var    topology = builder.Build();
            TaskId id       = new TaskId {
                Id = 0, Partition = 0
            };
            var processorTopology = topology.Builder.BuildTopology(id);

            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());
            var consumer = supplier.GetConsumer(config.ToConsumerConfig(), null);


            var        part = new TopicPartition("topic", 0);
            StreamTask task = new StreamTask(
                "thread-0",
                id,
                new List <TopicPartition> {
                part
            },
                processorTopology,
                consumer,
                config,
                supplier,
                null);

            task.GroupMetadata = consumer as SyncConsumer;
            task.InitializeStateStores();
            task.InitializeTopology();

            List <ConsumeResult <byte[], byte[]> > messages = new List <ConsumeResult <byte[], byte[]> >();
            int offset = 0;

            for (int i = 0; i < 5; ++i)
            {
                messages.Add(
                    new ConsumeResult <byte[], byte[]>
                {
                    Message = new Message <byte[], byte[]>
                    {
                        Key   = serdes.Serialize($"key{i + 1}", new SerializationContext()),
                        Value = serdes.Serialize($"value{i + 1}", new SerializationContext())
                    },
                    TopicPartitionOffset = new TopicPartitionOffset(part, offset++)
                });
            }

            task.AddRecords(messages);

            Assert.IsTrue(task.CanProcess(DateTime.Now.GetMilliseconds()));

            while (task.CanProcess(DateTime.Now.GetMilliseconds()))
            {
                Assert.IsTrue(task.Process());
                Assert.IsTrue(task.CommitNeeded);
                task.Commit();
            }

            // CHECK IN TOPIC topic2
            consumer.Subscribe("topic2");
            List <ConsumeResult <byte[], byte[]> > results = new List <ConsumeResult <byte[], byte[]> >();
            ConsumeResult <byte[], byte[]>         result  = null;

            do
            {
                result = consumer.Consume(100);

                if (result != null)
                {
                    results.Add(result);
                    consumer.Commit(result);
                }
            } while (result != null);

            Assert.AreEqual(5, results.Count);
            for (int i = 0; i < 5; ++i)
            {
                Assert.AreEqual($"KEY{i + 1}", serdes.Deserialize(results[i].Message.Key, new SerializationContext()));
                Assert.AreEqual($"VALUE{i + 1}", serdes.Deserialize(results[i].Message.Value, new SerializationContext()));
            }

            task.Close();
        }
Exemple #24
0
        private void TaskManagerRestorationChangelog(bool persistenStateStore = false)
        {
            var stateDir = Path.Combine(".", Guid.NewGuid().ToString());
            var config   = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId = "test-restoration-changelog-app";
            config.StateDir      = stateDir;

            var builder = new StreamBuilder();

            builder.Table("topic",
                          persistenStateStore
                    ? RocksDb <string, string> .As("store").WithLoggingEnabled(null)
                    : InMemory <string, string> .As("store").WithLoggingEnabled(null));

            var serdes = new StringSerDes();

            var topology = builder.Build();

            topology.Builder.RewriteTopology(config);

            var supplier        = new SyncKafkaSupplier();
            var producer        = supplier.GetProducer(config.ToProducerConfig());
            var consumer        = supplier.GetConsumer(config.ToConsumerConfig(), null);
            var restoreConsumer = supplier.GetRestoreConsumer(config.ToConsumerConfig());

            var storeChangelogReader =
                new StoreChangelogReader(config, restoreConsumer, "thread-0", new StreamMetricsRegistry());
            var taskCreator = new TaskCreator(topology.Builder, config, "thread-0", supplier, producer,
                                              storeChangelogReader, new StreamMetricsRegistry());
            var taskManager = new TaskManager(topology.Builder, taskCreator,
                                              supplier.GetAdmin(config.ToAdminConfig("admin")), consumer, storeChangelogReader);

            var part = new TopicPartition("topic", 0);

            taskManager.CreateTasks(
                new List <TopicPartition>
            {
                part
            });

            var task = taskManager.ActiveTaskFor(part);

            IDictionary <TaskId, ITask> tasks = new Dictionary <TaskId, ITask>();

            tasks.Add(task.Id, task);

            taskManager.TryToCompleteRestoration();
            storeChangelogReader.Restore();
            Assert.IsTrue(taskManager.TryToCompleteRestoration());


            List <ConsumeResult <byte[], byte[]> > messages = new List <ConsumeResult <byte[], byte[]> >();
            int offset = 0;

            for (int i = 0; i < 5; ++i)
            {
                messages.Add(
                    new ConsumeResult <byte[], byte[]>
                {
                    Message = new Message <byte[], byte[]>
                    {
                        Key   = serdes.Serialize($"key{i + 1}", new SerializationContext()),
                        Value = serdes.Serialize($"value{i + 1}", new SerializationContext())
                    },
                    TopicPartitionOffset = new TopicPartitionOffset(part, offset++)
                });
            }

            task.AddRecords(messages);

            // Process messages
            while (task.CanProcess(DateTime.Now.GetMilliseconds()))
            {
                Assert.IsTrue(task.Process());
            }

            taskManager.CommitAll();

            // Simulate Close + new open
            taskManager.Close();

            restoreConsumer.Resume(new TopicPartition("test-restoration-changelog-app-store-changelog", 0).ToSingle());

            taskManager.CreateTasks(
                new List <TopicPartition>
            {
                part
            });

            task  = taskManager.ActiveTaskFor(part);
            tasks = new Dictionary <TaskId, ITask>();
            tasks.Add(task.Id, task);

            Assert.IsFalse(taskManager.TryToCompleteRestoration());
            storeChangelogReader.Restore();
            Assert.IsTrue(taskManager.TryToCompleteRestoration());

            var store = task.GetStore("store");
            var items = (store as ITimestampedKeyValueStore <string, string>).All().ToList();

            Assert.AreEqual(5, items.Count);

            taskManager.Close();

            if (persistenStateStore)
            {
                Directory.Delete(stateDir, true);
            }
        }
        public void StreamTaskWrittingCheckpoint()
        {
            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId = "test-app";
            config.StateDir      = Path.Combine(".", Guid.NewGuid().ToString());

            var serdes  = new StringSerDes();
            var builder = new StreamBuilder();

            var table = builder.Table("topic", RocksDb <string, string> .As("store").WithLoggingEnabled());

            TaskId id = new TaskId {
                Id = 0, Partition = 0
            };
            var topology = builder.Build();

            topology.Builder.RewriteTopology(config);

            var processorTopology = topology.Builder.BuildTopology(id);

            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());
            var consumer = supplier.GetConsumer(config.ToConsumerConfig(), null);

            var        part = new TopicPartition("topic", 0);
            StreamTask task = new StreamTask(
                "thread-0",
                id,
                new List <TopicPartition> {
                part
            },
                processorTopology,
                consumer,
                config,
                supplier,
                producer,
                new MockChangelogRegister()
                , new StreamMetricsRegistry());

            task.GroupMetadata = consumer as SyncConsumer;
            task.InitializeStateStores();
            task.InitializeTopology();
            task.RestorationIfNeeded();
            task.CompleteRestoration();

            List <ConsumeResult <byte[], byte[]> > messages = new List <ConsumeResult <byte[], byte[]> >();
            int offset = 0;

            for (int i = 0; i < 5; ++i)
            {
                messages.Add(
                    new ConsumeResult <byte[], byte[]>
                {
                    Message = new Message <byte[], byte[]>
                    {
                        Key   = serdes.Serialize($"key{i + 1}", new SerializationContext()),
                        Value = serdes.Serialize($"value{i + 1}", new SerializationContext())
                    },
                    TopicPartitionOffset = new TopicPartitionOffset(part, offset++)
                });
            }

            task.AddRecords(messages);

            Assert.IsTrue(task.CanProcess(DateTime.Now.GetMilliseconds()));

            while (task.CanProcess(DateTime.Now.GetMilliseconds()))
            {
                Assert.IsTrue(task.Process());
                Assert.IsTrue(task.CommitNeeded);
                task.Commit();
            }

            task.MayWriteCheckpoint(true);

            messages = new List <ConsumeResult <byte[], byte[]> >();
            for (int i = 0; i < StateManagerTools.OFFSET_DELTA_THRESHOLD_FOR_CHECKPOINT + 10; ++i)
            {
                messages.Add(
                    new ConsumeResult <byte[], byte[]>
                {
                    Message = new Message <byte[], byte[]>
                    {
                        Key   = serdes.Serialize($"key{i + 1}", new SerializationContext()),
                        Value = serdes.Serialize($"value{i + 1}", new SerializationContext())
                    },
                    TopicPartitionOffset = new TopicPartitionOffset(part, offset++)
                });
            }

            task.AddRecords(messages);

            while (task.CanProcess(DateTime.Now.GetMilliseconds()))
            {
                Assert.IsTrue(task.Process());
            }

            task.MayWriteCheckpoint(false);

            var lines = File.ReadAllLines(Path.Combine(config.StateDir, config.ApplicationId, "0-0", ".checkpoint"));

            Assert.AreEqual(3, lines.Length);
            Assert.AreEqual("test-app-store-changelog 0 10014", lines[2]);

            task.Suspend();
            task.Close();

            Directory.Delete(config.StateDir, true);
        }
Exemple #26
0
        public void StreamCompleteConfigAllProperty()
        {
            var stream = new StreamConfig();

            stream.ApplicationId                       = "test";
            stream.Acks                                = Confluent.Kafka.Acks.All;
            stream.ApiVersionFallbackMs                = 1;
            stream.ApiVersionRequest                   = false;
            stream.ApiVersionRequestTimeoutMs          = 100;
            stream.AutoOffsetReset                     = Confluent.Kafka.AutoOffsetReset.Latest;
            stream.BatchNumMessages                    = 42;
            stream.BootstrapServers                    = "127.0.0.1:9092";
            stream.BrokerAddressFamily                 = Confluent.Kafka.BrokerAddressFamily.V4;
            stream.BrokerAddressTtl                    = 100;
            stream.BrokerVersionFallback               = "0.12.0";
            stream.CheckCrcs                           = true;
            stream.ClientId                            = "test-client";
            stream.ClientRack                          = "1";
            stream.CommitIntervalMs                    = 300;
            stream.CompressionLevel                    = 2;
            stream.CompressionType                     = Confluent.Kafka.CompressionType.Snappy;
            stream.ConsumeResultFields                 = "all";
            stream.CoordinatorQueryIntervalMs          = 300;
            stream.Debug                               = "all";
            stream.DeliveryReportFields                = "key";
            stream.EnableAutoOffsetStore               = false;
            stream.EnableBackgroundPoll                = false;
            stream.EnableDeliveryReports               = false;
            stream.EnableGaplessGuarantee              = false;
            stream.EnableIdempotence                   = true;
            stream.EnablePartitionEof                  = true;
            stream.EnableSaslOauthbearerUnsecureJwt    = true;
            stream.EnableSslCertificateVerification    = false;
            stream.FetchErrorBackoffMs                 = 10;
            stream.FetchMaxBytes                       = 10;
            stream.FetchMinBytes                       = 10;
            stream.FetchWaitMaxMs                      = 10;
            stream.GroupProtocolType                   = "?";
            stream.HeartbeatIntervalMs                 = 4000;
            stream.InternalTerminationSignal           = 1;
            stream.IsolationLevel                      = Confluent.Kafka.IsolationLevel.ReadCommitted;
            stream.LingerMs                            = 12;
            stream.LogConnectionClose                  = false;
            stream.LogQueue                            = true;
            stream.LogThreadName                       = false;
            stream.MaxInFlight                         = 12;
            stream.MaxPartitionFetchBytes              = 500;
            stream.MaxPollIntervalMs                   = 400;
            stream.MessageCopyMaxBytes                 = 40;
            stream.MessageMaxBytes                     = 500;
            stream.MessageSendMaxRetries               = 4;
            stream.MessageTimeoutMs                    = 600;
            stream.MetadataMaxAgeMs                    = 6;
            stream.MetadataRequestTimeoutMs            = 83;
            stream.PartitionAssignmentStrategy         = Confluent.Kafka.PartitionAssignmentStrategy.RoundRobin;
            stream.Partitioner                         = Confluent.Kafka.Partitioner.Murmur2Random;
            stream.PluginLibraryPaths                  = "D:";
            stream.QueueBufferingBackpressureThreshold = 10;
            stream.QueueBufferingMaxKbytes             = 400;
            stream.QueueBufferingMaxMessages           = 5;
            stream.QueuedMaxMessagesKbytes             = 800;
            stream.QueuedMinMessages                   = 1;
            stream.ReceiveMessageMaxBytes              = 1000;
            stream.ReconnectBackoffMaxMs               = 9000;
            stream.ReconnectBackoffMs                  = 8000;
            stream.RequestTimeoutMs                    = 16600;
            stream.RetryBackoffMs                      = 600;
            stream.SaslKerberosKeytab                  = "test";
            stream.SaslKerberosKinitCmd                = "test";
            stream.SaslKerberosMinTimeBeforeRelogin    = 600;
            stream.SaslKerberosPrincipal               = "Princiapl";
            stream.SaslKerberosServiceName             = "kerberos";
            stream.SaslMechanism                       = Confluent.Kafka.SaslMechanism.ScramSha512;
            stream.SaslOauthbearerConfig               = "ouath";
            stream.SaslPassword                        = "******";
            stream.SaslUsername                        = "******";
            stream.SecurityProtocol                    = Confluent.Kafka.SecurityProtocol.SaslPlaintext;
            stream.SessionTimeoutMs                    = 1000;
            stream.SocketKeepaliveEnable               = true;
            stream.SocketMaxFails                      = 2;
            stream.SocketNagleDisable                  = true;
            stream.SocketReceiveBufferBytes            = 50000;
            stream.SocketSendBufferBytes               = 50000;
            stream.SocketTimeoutMs                     = 6000;
            stream.SslCaLocation                       = "D:";
            stream.SslCertificateLocation              = "D:";
            stream.SslCertificatePem                   = "D:";
            stream.SslCipherSuites                     = "ciphers";
            stream.SslCrlLocation                      = "D:";
            stream.SslCurvesList                       = "";
            stream.SslEndpointIdentificationAlgorithm  = Confluent.Kafka.SslEndpointIdentificationAlgorithm.Https;
            stream.SslKeyLocation                      = "C:";
            stream.SslKeyPassword                      = "******";
            stream.SslKeyPem                           = "pem";
            stream.SslKeystoreLocation                 = "J:";
            stream.SslKeystorePassword                 = "******";
            stream.SslSigalgsList                      = "oepn";
            stream.StatisticsIntervalMs                = 14;
            stream.TopicBlacklist                      = "*";
            stream.TopicMetadataRefreshFastIntervalMs  = 500;
            stream.TopicMetadataRefreshIntervalMs      = 200;
            stream.TopicMetadataRefreshSparse          = false;
            stream.TransactionalId                     = "transac";
            stream.TransactionTimeout                  = TimeSpan.FromSeconds(1);
            stream.TransactionTimeoutMs                = 400;

            var producerConfig = stream.ToProducerConfig();
            var consumerConfig = stream.ToConsumerConfig();
            var globalConfig   = stream.ToGlobalConsumerConfig("global");
            var adminConfig    = stream.ToAdminConfig("admin");

            #region ProducerConfig
            Assert.AreEqual(Confluent.Kafka.Acks.All, producerConfig.Acks);
            Assert.AreEqual(1, producerConfig.ApiVersionFallbackMs);
            Assert.AreEqual(false, producerConfig.ApiVersionRequest);
            Assert.AreEqual(100, producerConfig.ApiVersionRequestTimeoutMs);
            Assert.AreEqual(42, producerConfig.BatchNumMessages);
            Assert.AreEqual("127.0.0.1:9092", producerConfig.BootstrapServers);
            Assert.AreEqual(Confluent.Kafka.BrokerAddressFamily.V4, producerConfig.BrokerAddressFamily);
            Assert.AreEqual(100, producerConfig.BrokerAddressTtl);
            Assert.AreEqual("0.12.0", producerConfig.BrokerVersionFallback);
            Assert.AreEqual("test-client", producerConfig.ClientId);
            Assert.AreEqual("1", producerConfig.ClientRack);
            Assert.AreEqual(2, producerConfig.CompressionLevel);
            Assert.AreEqual(Confluent.Kafka.CompressionType.Snappy, producerConfig.CompressionType);
            Assert.AreEqual("all", producerConfig.Debug);
            Assert.AreEqual("key", producerConfig.DeliveryReportFields);
            Assert.AreEqual(false, producerConfig.EnableBackgroundPoll);
            Assert.AreEqual(false, producerConfig.EnableDeliveryReports);
            Assert.AreEqual(false, producerConfig.EnableGaplessGuarantee);
            Assert.AreEqual(true, producerConfig.EnableIdempotence);
            Assert.AreEqual(true, producerConfig.EnableSaslOauthbearerUnsecureJwt);
            Assert.AreEqual(false, producerConfig.EnableSslCertificateVerification);
            Assert.AreEqual(1, producerConfig.InternalTerminationSignal);
            Assert.AreEqual(12, producerConfig.LingerMs);
            Assert.AreEqual(false, producerConfig.LogConnectionClose);
            Assert.AreEqual(true, producerConfig.LogQueue);
            Assert.AreEqual(false, producerConfig.LogThreadName);
            Assert.AreEqual(12, producerConfig.MaxInFlight);
            Assert.AreEqual(40, producerConfig.MessageCopyMaxBytes);
            Assert.AreEqual(500, producerConfig.MessageMaxBytes);
            Assert.AreEqual(4, producerConfig.MessageSendMaxRetries);
            Assert.AreEqual(600, producerConfig.MessageTimeoutMs);
            Assert.AreEqual(6, producerConfig.MetadataMaxAgeMs);
            Assert.AreEqual(83, producerConfig.MetadataRequestTimeoutMs);
            Assert.AreEqual(Confluent.Kafka.Partitioner.Murmur2Random, producerConfig.Partitioner);
            Assert.AreEqual("D:", producerConfig.PluginLibraryPaths);
            Assert.AreEqual(10, producerConfig.QueueBufferingBackpressureThreshold);
            Assert.AreEqual(400, producerConfig.QueueBufferingMaxKbytes);
            Assert.AreEqual(5, producerConfig.QueueBufferingMaxMessages);
            Assert.AreEqual(1000, producerConfig.ReceiveMessageMaxBytes);
            Assert.AreEqual(9000, producerConfig.ReconnectBackoffMaxMs);
            Assert.AreEqual(8000, producerConfig.ReconnectBackoffMs);
            Assert.AreEqual(16600, producerConfig.RequestTimeoutMs);
            Assert.AreEqual(600, producerConfig.RetryBackoffMs);
            Assert.AreEqual("test", producerConfig.SaslKerberosKeytab);
            Assert.AreEqual("test", producerConfig.SaslKerberosKinitCmd);
            Assert.AreEqual(600, producerConfig.SaslKerberosMinTimeBeforeRelogin);
            Assert.AreEqual("Princiapl", producerConfig.SaslKerberosPrincipal);
            Assert.AreEqual("kerberos", producerConfig.SaslKerberosServiceName);
            Assert.AreEqual(Confluent.Kafka.SaslMechanism.ScramSha512, producerConfig.SaslMechanism);
            Assert.AreEqual("ouath", producerConfig.SaslOauthbearerConfig);
            Assert.AreEqual("test", producerConfig.SaslPassword);
            Assert.AreEqual("admin", producerConfig.SaslUsername);
            Assert.AreEqual(Confluent.Kafka.SecurityProtocol.SaslPlaintext, producerConfig.SecurityProtocol);
            Assert.AreEqual(true, producerConfig.SocketKeepaliveEnable);
            Assert.AreEqual(2, producerConfig.SocketMaxFails);
            Assert.AreEqual(true, producerConfig.SocketNagleDisable);
            Assert.AreEqual(50000, producerConfig.SocketReceiveBufferBytes);
            Assert.AreEqual(50000, producerConfig.SocketSendBufferBytes);
            Assert.AreEqual(6000, producerConfig.SocketTimeoutMs);
            Assert.AreEqual("D:", producerConfig.SslCaLocation);
            Assert.AreEqual("D:", producerConfig.SslCertificateLocation);
            Assert.AreEqual("D:", producerConfig.SslCertificatePem);
            Assert.AreEqual("ciphers", producerConfig.SslCipherSuites);
            Assert.AreEqual("D:", producerConfig.SslCrlLocation);
            Assert.AreEqual("", producerConfig.SslCurvesList);
            Assert.AreEqual(Confluent.Kafka.SslEndpointIdentificationAlgorithm.Https, producerConfig.SslEndpointIdentificationAlgorithm);
            Assert.AreEqual("C:", producerConfig.SslKeyLocation);
            Assert.AreEqual("test", producerConfig.SslKeyPassword);
            Assert.AreEqual("pem", producerConfig.SslKeyPem);
            Assert.AreEqual("J:", producerConfig.SslKeystoreLocation);
            Assert.AreEqual("password", producerConfig.SslKeystorePassword);
            Assert.AreEqual("oepn", producerConfig.SslSigalgsList);
            Assert.AreEqual(14, producerConfig.StatisticsIntervalMs);
            Assert.AreEqual("*", producerConfig.TopicBlacklist);
            Assert.AreEqual(500, producerConfig.TopicMetadataRefreshFastIntervalMs);
            Assert.AreEqual(200, producerConfig.TopicMetadataRefreshIntervalMs);
            Assert.AreEqual(false, producerConfig.TopicMetadataRefreshSparse);
            Assert.AreEqual("transac", producerConfig.TransactionalId);
            Assert.AreEqual(400, producerConfig.TransactionTimeoutMs);

            #endregion

            #region ConsumerConfig
            Assert.AreEqual(Confluent.Kafka.Acks.All, consumerConfig.Acks);
            Assert.AreEqual(1, consumerConfig.ApiVersionFallbackMs);
            Assert.AreEqual(false, consumerConfig.ApiVersionRequest);
            Assert.AreEqual(100, consumerConfig.ApiVersionRequestTimeoutMs);
            Assert.AreEqual(Confluent.Kafka.AutoOffsetReset.Latest, consumerConfig.AutoOffsetReset);
            Assert.AreEqual("127.0.0.1:9092", consumerConfig.BootstrapServers);
            Assert.AreEqual(Confluent.Kafka.BrokerAddressFamily.V4, consumerConfig.BrokerAddressFamily);
            Assert.AreEqual(100, consumerConfig.BrokerAddressTtl);
            Assert.AreEqual("0.12.0", consumerConfig.BrokerVersionFallback);
            Assert.AreEqual(true, consumerConfig.CheckCrcs);
            Assert.AreEqual("test-client", consumerConfig.ClientId);
            Assert.AreEqual("1", consumerConfig.ClientRack);
            Assert.AreEqual(300, consumerConfig.CoordinatorQueryIntervalMs);
            Assert.AreEqual("all", consumerConfig.Debug);
            Assert.AreEqual(false, consumerConfig.EnableAutoOffsetStore);
            Assert.AreEqual(true, consumerConfig.EnablePartitionEof);
            Assert.AreEqual(true, consumerConfig.EnableSaslOauthbearerUnsecureJwt);
            Assert.AreEqual(false, consumerConfig.EnableSslCertificateVerification);
            Assert.AreEqual(10, consumerConfig.FetchErrorBackoffMs);
            Assert.AreEqual(10, consumerConfig.FetchMaxBytes);
            Assert.AreEqual(10, consumerConfig.FetchMinBytes);
            Assert.AreEqual(10, consumerConfig.FetchWaitMaxMs);
            Assert.AreEqual("?", consumerConfig.GroupProtocolType);
            Assert.AreEqual(4000, consumerConfig.HeartbeatIntervalMs);
            Assert.AreEqual(1, consumerConfig.InternalTerminationSignal);
            Assert.AreEqual(Confluent.Kafka.IsolationLevel.ReadCommitted, consumerConfig.IsolationLevel);
            Assert.AreEqual(false, consumerConfig.LogConnectionClose);
            Assert.AreEqual(true, consumerConfig.LogQueue);
            Assert.AreEqual(false, consumerConfig.LogThreadName);
            Assert.AreEqual(12, consumerConfig.MaxInFlight);
            Assert.AreEqual(500, consumerConfig.MaxPartitionFetchBytes);
            Assert.AreEqual(400, consumerConfig.MaxPollIntervalMs);
            Assert.AreEqual(40, consumerConfig.MessageCopyMaxBytes);
            Assert.AreEqual(500, consumerConfig.MessageMaxBytes);
            Assert.AreEqual(6, consumerConfig.MetadataMaxAgeMs);
            Assert.AreEqual(83, consumerConfig.MetadataRequestTimeoutMs);
            Assert.AreEqual(Confluent.Kafka.PartitionAssignmentStrategy.RoundRobin, consumerConfig.PartitionAssignmentStrategy);
            Assert.AreEqual("D:", consumerConfig.PluginLibraryPaths);
            Assert.AreEqual(800, consumerConfig.QueuedMaxMessagesKbytes);
            Assert.AreEqual(1, consumerConfig.QueuedMinMessages);
            Assert.AreEqual(1000, consumerConfig.ReceiveMessageMaxBytes);
            Assert.AreEqual(9000, consumerConfig.ReconnectBackoffMaxMs);
            Assert.AreEqual(8000, consumerConfig.ReconnectBackoffMs);
            Assert.AreEqual("test", consumerConfig.SaslKerberosKeytab);
            Assert.AreEqual("test", consumerConfig.SaslKerberosKinitCmd);
            Assert.AreEqual(600, consumerConfig.SaslKerberosMinTimeBeforeRelogin);
            Assert.AreEqual("Princiapl", consumerConfig.SaslKerberosPrincipal);
            Assert.AreEqual("kerberos", consumerConfig.SaslKerberosServiceName);
            Assert.AreEqual(Confluent.Kafka.SaslMechanism.ScramSha512, consumerConfig.SaslMechanism);
            Assert.AreEqual("ouath", consumerConfig.SaslOauthbearerConfig);
            Assert.AreEqual("test", consumerConfig.SaslPassword);
            Assert.AreEqual("admin", consumerConfig.SaslUsername);
            Assert.AreEqual(Confluent.Kafka.SecurityProtocol.SaslPlaintext, consumerConfig.SecurityProtocol);
            Assert.AreEqual(1000, consumerConfig.SessionTimeoutMs);
            Assert.AreEqual(true, consumerConfig.SocketKeepaliveEnable);
            Assert.AreEqual(2, consumerConfig.SocketMaxFails);
            Assert.AreEqual(true, consumerConfig.SocketNagleDisable);
            Assert.AreEqual(50000, consumerConfig.SocketReceiveBufferBytes);
            Assert.AreEqual(50000, consumerConfig.SocketSendBufferBytes);
            Assert.AreEqual(6000, consumerConfig.SocketTimeoutMs);
            Assert.AreEqual("D:", consumerConfig.SslCaLocation);
            Assert.AreEqual("D:", consumerConfig.SslCertificateLocation);
            Assert.AreEqual("D:", consumerConfig.SslCertificatePem);
            Assert.AreEqual("ciphers", consumerConfig.SslCipherSuites);
            Assert.AreEqual("D:", consumerConfig.SslCrlLocation);
            Assert.AreEqual("", consumerConfig.SslCurvesList);
            Assert.AreEqual(Confluent.Kafka.SslEndpointIdentificationAlgorithm.Https, consumerConfig.SslEndpointIdentificationAlgorithm);
            Assert.AreEqual("C:", consumerConfig.SslKeyLocation);
            Assert.AreEqual("test", consumerConfig.SslKeyPassword);
            Assert.AreEqual("pem", consumerConfig.SslKeyPem);
            Assert.AreEqual("J:", consumerConfig.SslKeystoreLocation);
            Assert.AreEqual("password", consumerConfig.SslKeystorePassword);
            Assert.AreEqual("oepn", consumerConfig.SslSigalgsList);
            Assert.AreEqual(14, consumerConfig.StatisticsIntervalMs);
            Assert.AreEqual("*", consumerConfig.TopicBlacklist);
            Assert.AreEqual(500, consumerConfig.TopicMetadataRefreshFastIntervalMs);
            Assert.AreEqual(200, consumerConfig.TopicMetadataRefreshIntervalMs);
            Assert.AreEqual(false, consumerConfig.TopicMetadataRefreshSparse);
            #endregion

            #region GlobalConfig
            Assert.AreEqual(Confluent.Kafka.Acks.All, globalConfig.Acks);
            Assert.AreEqual(1, globalConfig.ApiVersionFallbackMs);
            Assert.AreEqual(false, globalConfig.ApiVersionRequest);
            Assert.AreEqual(100, globalConfig.ApiVersionRequestTimeoutMs);
            Assert.AreEqual(Confluent.Kafka.AutoOffsetReset.Latest, globalConfig.AutoOffsetReset);
            Assert.AreEqual("127.0.0.1:9092", globalConfig.BootstrapServers);
            Assert.AreEqual(Confluent.Kafka.BrokerAddressFamily.V4, globalConfig.BrokerAddressFamily);
            Assert.AreEqual(100, globalConfig.BrokerAddressTtl);
            Assert.AreEqual("0.12.0", globalConfig.BrokerVersionFallback);
            Assert.AreEqual(true, globalConfig.CheckCrcs);
            Assert.AreEqual("global", globalConfig.ClientId);
            Assert.AreEqual("1", globalConfig.ClientRack);
            Assert.AreEqual(300, globalConfig.CoordinatorQueryIntervalMs);
            Assert.AreEqual("all", globalConfig.Debug);
            Assert.AreEqual(false, globalConfig.EnableAutoOffsetStore);
            Assert.AreEqual(true, globalConfig.EnablePartitionEof);
            Assert.AreEqual(true, globalConfig.EnableSaslOauthbearerUnsecureJwt);
            Assert.AreEqual(false, globalConfig.EnableSslCertificateVerification);
            Assert.AreEqual(10, globalConfig.FetchErrorBackoffMs);
            Assert.AreEqual(10, globalConfig.FetchMaxBytes);
            Assert.AreEqual(10, globalConfig.FetchMinBytes);
            Assert.AreEqual(10, globalConfig.FetchWaitMaxMs);
            Assert.AreEqual("?", globalConfig.GroupProtocolType);
            Assert.AreEqual(4000, globalConfig.HeartbeatIntervalMs);
            Assert.AreEqual(1, globalConfig.InternalTerminationSignal);
            Assert.AreEqual(Confluent.Kafka.IsolationLevel.ReadCommitted, globalConfig.IsolationLevel);
            Assert.AreEqual(false, globalConfig.LogConnectionClose);
            Assert.AreEqual(true, globalConfig.LogQueue);
            Assert.AreEqual(false, globalConfig.LogThreadName);
            Assert.AreEqual(12, globalConfig.MaxInFlight);
            Assert.AreEqual(500, globalConfig.MaxPartitionFetchBytes);
            Assert.AreEqual(400, globalConfig.MaxPollIntervalMs);
            Assert.AreEqual(40, globalConfig.MessageCopyMaxBytes);
            Assert.AreEqual(500, globalConfig.MessageMaxBytes);
            Assert.AreEqual(6, globalConfig.MetadataMaxAgeMs);
            Assert.AreEqual(83, globalConfig.MetadataRequestTimeoutMs);
            Assert.AreEqual(Confluent.Kafka.PartitionAssignmentStrategy.RoundRobin, globalConfig.PartitionAssignmentStrategy);
            Assert.AreEqual("D:", globalConfig.PluginLibraryPaths);
            Assert.AreEqual(800, globalConfig.QueuedMaxMessagesKbytes);
            Assert.AreEqual(1, globalConfig.QueuedMinMessages);
            Assert.AreEqual(1000, globalConfig.ReceiveMessageMaxBytes);
            Assert.AreEqual(9000, globalConfig.ReconnectBackoffMaxMs);
            Assert.AreEqual(8000, globalConfig.ReconnectBackoffMs);
            Assert.AreEqual("test", globalConfig.SaslKerberosKeytab);
            Assert.AreEqual("test", globalConfig.SaslKerberosKinitCmd);
            Assert.AreEqual(600, globalConfig.SaslKerberosMinTimeBeforeRelogin);
            Assert.AreEqual("Princiapl", globalConfig.SaslKerberosPrincipal);
            Assert.AreEqual("kerberos", globalConfig.SaslKerberosServiceName);
            Assert.AreEqual(Confluent.Kafka.SaslMechanism.ScramSha512, globalConfig.SaslMechanism);
            Assert.AreEqual("ouath", globalConfig.SaslOauthbearerConfig);
            Assert.AreEqual("test", globalConfig.SaslPassword);
            Assert.AreEqual("admin", globalConfig.SaslUsername);
            Assert.AreEqual(Confluent.Kafka.SecurityProtocol.SaslPlaintext, globalConfig.SecurityProtocol);
            Assert.AreEqual(1000, globalConfig.SessionTimeoutMs);
            Assert.AreEqual(true, globalConfig.SocketKeepaliveEnable);
            Assert.AreEqual(2, globalConfig.SocketMaxFails);
            Assert.AreEqual(true, globalConfig.SocketNagleDisable);
            Assert.AreEqual(50000, globalConfig.SocketReceiveBufferBytes);
            Assert.AreEqual(50000, globalConfig.SocketSendBufferBytes);
            Assert.AreEqual(6000, globalConfig.SocketTimeoutMs);
            Assert.AreEqual("D:", globalConfig.SslCaLocation);
            Assert.AreEqual("D:", globalConfig.SslCertificateLocation);
            Assert.AreEqual("D:", globalConfig.SslCertificatePem);
            Assert.AreEqual("ciphers", globalConfig.SslCipherSuites);
            Assert.AreEqual("D:", globalConfig.SslCrlLocation);
            Assert.AreEqual("", globalConfig.SslCurvesList);
            Assert.AreEqual(Confluent.Kafka.SslEndpointIdentificationAlgorithm.Https, globalConfig.SslEndpointIdentificationAlgorithm);
            Assert.AreEqual("C:", globalConfig.SslKeyLocation);
            Assert.AreEqual("test", globalConfig.SslKeyPassword);
            Assert.AreEqual("pem", globalConfig.SslKeyPem);
            Assert.AreEqual("J:", globalConfig.SslKeystoreLocation);
            Assert.AreEqual("password", globalConfig.SslKeystorePassword);
            Assert.AreEqual("oepn", globalConfig.SslSigalgsList);
            Assert.AreEqual(14, globalConfig.StatisticsIntervalMs);
            Assert.AreEqual("*", globalConfig.TopicBlacklist);
            Assert.AreEqual(500, globalConfig.TopicMetadataRefreshFastIntervalMs);
            Assert.AreEqual(200, globalConfig.TopicMetadataRefreshIntervalMs);
            Assert.AreEqual(false, globalConfig.TopicMetadataRefreshSparse);
            #endregion

            #region AdminConfig
            Assert.AreEqual(Confluent.Kafka.Acks.All, adminConfig.Acks);
            Assert.AreEqual(1, adminConfig.ApiVersionFallbackMs);
            Assert.AreEqual(false, adminConfig.ApiVersionRequest);
            Assert.AreEqual(100, adminConfig.ApiVersionRequestTimeoutMs);
            Assert.AreEqual("127.0.0.1:9092", adminConfig.BootstrapServers);
            Assert.AreEqual(Confluent.Kafka.BrokerAddressFamily.V4, adminConfig.BrokerAddressFamily);
            Assert.AreEqual(100, adminConfig.BrokerAddressTtl);
            Assert.AreEqual("0.12.0", adminConfig.BrokerVersionFallback);
            Assert.AreEqual("admin", adminConfig.ClientId);
            Assert.AreEqual("1", adminConfig.ClientRack);
            Assert.AreEqual("all", adminConfig.Debug);
            Assert.AreEqual(true, adminConfig.EnableSaslOauthbearerUnsecureJwt);
            Assert.AreEqual(false, adminConfig.EnableSslCertificateVerification);
            Assert.AreEqual(1, adminConfig.InternalTerminationSignal);
            Assert.AreEqual(false, adminConfig.LogConnectionClose);
            Assert.AreEqual(true, adminConfig.LogQueue);
            Assert.AreEqual(false, adminConfig.LogThreadName);
            Assert.AreEqual(12, adminConfig.MaxInFlight);
            Assert.AreEqual(40, adminConfig.MessageCopyMaxBytes);
            Assert.AreEqual(500, adminConfig.MessageMaxBytes);
            Assert.AreEqual(6, adminConfig.MetadataMaxAgeMs);
            Assert.AreEqual(83, adminConfig.MetadataRequestTimeoutMs);
            Assert.AreEqual("D:", adminConfig.PluginLibraryPaths);
            Assert.AreEqual(1000, adminConfig.ReceiveMessageMaxBytes);
            Assert.AreEqual(9000, adminConfig.ReconnectBackoffMaxMs);
            Assert.AreEqual(8000, adminConfig.ReconnectBackoffMs);
            Assert.AreEqual("test", adminConfig.SaslKerberosKeytab);
            Assert.AreEqual("test", adminConfig.SaslKerberosKinitCmd);
            Assert.AreEqual(600, adminConfig.SaslKerberosMinTimeBeforeRelogin);
            Assert.AreEqual("Princiapl", adminConfig.SaslKerberosPrincipal);
            Assert.AreEqual("kerberos", adminConfig.SaslKerberosServiceName);
            Assert.AreEqual(Confluent.Kafka.SaslMechanism.ScramSha512, adminConfig.SaslMechanism);
            Assert.AreEqual("ouath", adminConfig.SaslOauthbearerConfig);
            Assert.AreEqual("test", adminConfig.SaslPassword);
            Assert.AreEqual("admin", adminConfig.SaslUsername);
            Assert.AreEqual(Confluent.Kafka.SecurityProtocol.SaslPlaintext, adminConfig.SecurityProtocol);
            Assert.AreEqual(true, adminConfig.SocketKeepaliveEnable);
            Assert.AreEqual(2, adminConfig.SocketMaxFails);
            Assert.AreEqual(true, adminConfig.SocketNagleDisable);
            Assert.AreEqual(50000, adminConfig.SocketReceiveBufferBytes);
            Assert.AreEqual(50000, adminConfig.SocketSendBufferBytes);
            Assert.AreEqual(6000, adminConfig.SocketTimeoutMs);
            Assert.AreEqual("D:", adminConfig.SslCaLocation);
            Assert.AreEqual("D:", adminConfig.SslCertificateLocation);
            Assert.AreEqual("D:", adminConfig.SslCertificatePem);
            Assert.AreEqual("ciphers", adminConfig.SslCipherSuites);
            Assert.AreEqual("D:", adminConfig.SslCrlLocation);
            Assert.AreEqual("", adminConfig.SslCurvesList);
            Assert.AreEqual(Confluent.Kafka.SslEndpointIdentificationAlgorithm.Https, adminConfig.SslEndpointIdentificationAlgorithm);
            Assert.AreEqual("C:", adminConfig.SslKeyLocation);
            Assert.AreEqual("test", adminConfig.SslKeyPassword);
            Assert.AreEqual("pem", adminConfig.SslKeyPem);
            Assert.AreEqual("J:", adminConfig.SslKeystoreLocation);
            Assert.AreEqual("password", adminConfig.SslKeystorePassword);
            Assert.AreEqual("oepn", adminConfig.SslSigalgsList);
            Assert.AreEqual(14, adminConfig.StatisticsIntervalMs);
            Assert.AreEqual("*", adminConfig.TopicBlacklist);
            Assert.AreEqual(500, adminConfig.TopicMetadataRefreshFastIntervalMs);
            Assert.AreEqual(200, adminConfig.TopicMetadataRefreshIntervalMs);
            Assert.AreEqual(false, adminConfig.TopicMetadataRefreshSparse);
            #endregion
        }
        public void TaskManagerCommit()
        {
            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId = "test-app";
            var serdes  = new StringSerDes();
            var builder = new StreamBuilder();

            builder.Stream <string, string>("topic")
            .Map((k, v) => KeyValuePair.Create(k.ToUpper(), v.ToUpper()))
            .To("topic2");

            var topology = builder.Build();

            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());
            var consumer = supplier.GetConsumer(config.ToConsumerConfig(), null);

            var taskCreator = new TaskCreator(topology.Builder, config, "thread-0", supplier, producer);
            var taskManager = new TaskManager(topology.Builder, taskCreator, supplier.GetAdmin(config.ToAdminConfig("admin")), consumer);

            taskManager.CreateTasks(
                new List <TopicPartition>
            {
                new TopicPartition("topic", 0),
                new TopicPartition("topic", 1),
                new TopicPartition("topic", 2),
                new TopicPartition("topic", 3),
            });

            Assert.AreEqual(4, taskManager.ActiveTasks.Count());

            var part = new TopicPartition("topic", 0);
            var task = taskManager.ActiveTaskFor(part);
            List <ConsumeResult <byte[], byte[]> > messages = new List <ConsumeResult <byte[], byte[]> >();
            int offset = 0;

            for (int i = 0; i < 5; ++i)
            {
                messages.Add(
                    new ConsumeResult <byte[], byte[]>
                {
                    Message = new Message <byte[], byte[]>
                    {
                        Key   = serdes.Serialize($"key{i + 1}", new SerializationContext()),
                        Value = serdes.Serialize($"value{i + 1}", new SerializationContext())
                    },
                    TopicPartitionOffset = new TopicPartitionOffset(part, offset++)
                });
            }

            task.AddRecords(messages);

            Assert.IsTrue(task.CanProcess(DateTime.Now.GetMilliseconds()));

            while (task.CanProcess(DateTime.Now.GetMilliseconds()))
            {
                Assert.IsTrue(task.Process());
            }

            // ONLY ONE TASK HAVE BEEN RECORDS
            Assert.AreEqual(1, taskManager.CommitAll());

            // CHECK IN TOPIC topic2
            consumer.Subscribe("topic2");
            List <ConsumeResult <byte[], byte[]> > results = new List <ConsumeResult <byte[], byte[]> >();
            ConsumeResult <byte[], byte[]>         result  = null;

            do
            {
                result = consumer.Consume(100);

                if (result != null)
                {
                    results.Add(result);
                    consumer.Commit(result);
                }
            } while (result != null);

            Assert.AreEqual(5, results.Count);
            for (int i = 0; i < 5; ++i)
            {
                Assert.AreEqual($"KEY{i + 1}", serdes.Deserialize(results[i].Message.Key, new SerializationContext()));
                Assert.AreEqual($"VALUE{i+1}", serdes.Deserialize(results[i].Message.Value, new SerializationContext()));
            }

            // NO RECORD IN THIS TASKS
            part = new TopicPartition("topic", 2);
            task = taskManager.ActiveTaskFor(part);
            Assert.IsFalse(task.CanProcess(DateTime.Now.GetMilliseconds()));
            Assert.IsFalse(task.Process());

            taskManager.Close();
        }
        //[Test]
        // TODO : fix that
        public void WorkflowCompleteBufferedRecordsTest()
        {
            int maxBuffered = 10;
            var token       = new System.Threading.CancellationTokenSource();
            var serdes      = new StringSerDes();
            var config      = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId = "test-group";
            config.MaxTaskIdleMs = (long)TimeSpan.FromSeconds(100).TotalMilliseconds;
            config.BufferedRecordsPerPartition = maxBuffered;
            config.PollMs = 10;

            var builder = new StreamBuilder();

            var stream1 = builder.Stream <string, string>("topic1");
            var stream2 = builder.Stream <string, string>("topic2");

            stream1
            .Join(stream2, (v1, v2) => $"{v1}-{v2}", JoinWindowOptions.Of(TimeSpan.FromSeconds(10)))
            .To("output");

            var topo = builder.Build();

            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());
            var consumer = supplier.GetConsumer(config.ToConsumerConfig("test-consum"), null);

            consumer.Subscribe("output");
            var thread = StreamThread.Create(
                "thread-0", "c0",
                topo.Builder, config,
                supplier, supplier.GetAdmin(config.ToAdminConfig("admin")),
                0) as StreamThread;

            thread.Start(token.Token);

            for (int i = 0; i < maxBuffered + 1; ++i)
            {
                producer.Produce("topic1", new Message <byte[], byte[]>
                {
                    Key   = serdes.Serialize("key", new SerializationContext()),
                    Value = serdes.Serialize($"coucou{i}", new SerializationContext())
                });
            }
            // CONSUME PAUSE AFTER maxBuffered + 1 messages
            System.Threading.Thread.Sleep(50);

            // Add one message more with consumer in stream thread in pause
            producer.Produce("topic1", new Message <byte[], byte[]>
            {
                Key   = serdes.Serialize("key", new SerializationContext()),
                Value = serdes.Serialize($"coucou{maxBuffered+1}", new SerializationContext())
            });

            Assert.AreEqual(1, thread.ActiveTasks.Count());
            var task = thread.ActiveTasks.ToArray()[0];

            Assert.IsNotNull(task.Grouper);
            Assert.IsFalse(task.Grouper.AllPartitionsBuffered);
            Assert.AreEqual(maxBuffered + 1, task.Grouper.NumBuffered());
            Assert.AreEqual(maxBuffered + 1, task.Grouper.NumBuffered(new TopicPartition("topic1", 0)));
            Assert.AreEqual(0, task.Grouper.NumBuffered(new TopicPartition("topic2", 0)));

            producer.Produce("topic2", new Message <byte[], byte[]>
            {
                Key   = serdes.Serialize("key", new SerializationContext()),
                Value = serdes.Serialize($"test", new SerializationContext())
            });

            List <ConsumeResult <byte[], byte[]> > records = new List <ConsumeResult <byte[], byte[]> >();

            do
            {
                records.AddRange(consumer.ConsumeRecords(TimeSpan.FromMilliseconds(100)).ToList());
            } while (records.Count() <= 12);

            Assert.AreEqual(maxBuffered + 2, records.Count());
            for (int i = 0; i < maxBuffered + 2; ++i)
            {
                var message = records.ToArray()[i];
                Assert.AreEqual("key", serdes.Deserialize(message.Message.Key, new SerializationContext()));
                Assert.IsTrue(serdes.Deserialize(message.Message.Value, new SerializationContext()).Contains($"coucou{i}-"));
            }

            token.Cancel();
            thread.Dispose();
        }