Beispiel #1
0
        public void KTableSourceRangeStateStore()
        {
            var builder = new StreamBuilder();

            builder.Table("table-topic", InMemory <string, string> .As("table-topic-store"));

            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId = "test-map";

            Topology t = builder.Build();

            using (var driver = new TopologyTestDriver(t, config))
            {
                var inputTopic = driver.CreateInputTopic <string, string>("table-topic");
                inputTopic.PipeInput("key1", "1");
                inputTopic.PipeInput("key2", "2");
                inputTopic.PipeInput("key3", "3");

                var store = driver.GetKeyValueStore <string, string>("table-topic-store");
                Assert.IsNotNull(store);

                var results = store.Range("key1", "key3").ToList();

                Assert.AreEqual(3, results.Count);
                Assert.AreEqual("key1", results[0].Key);
                Assert.AreEqual("1", results[0].Value);
                Assert.AreEqual("key2", results[1].Key);
                Assert.AreEqual("2", results[1].Value);
                Assert.AreEqual("key3", results[2].Key);
                Assert.AreEqual("3", results[2].Value);
            }
        }
Beispiel #2
0
        public void SimpleKTableSource()
        {
            var builder = new StreamBuilder();

            builder.Table("table-topic", InMemory <string, string> .As("table-topic-store"));

            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId = "test-map";

            Topology t = builder.Build();

            using (var driver = new TopologyTestDriver(t, config))
            {
                var inputTopic = driver.CreateInputTopic <string, string>("table-topic");
                inputTopic.PipeInput("key1", "1");
                inputTopic.PipeInput("key2", "2");

                var store = driver.GetKeyValueStore <string, string>("table-topic-store");
                Assert.IsNotNull(store);
                var resultK1 = store.Get("key1");
                var resultK2 = store.Get("key2");

                Assert.AreEqual("1", resultK1);
                Assert.AreEqual("2", resultK2);
            }
        }
Beispiel #3
0
        public async Task BuildGlobalStateStore()
        {
            var timeout = TimeSpan.FromSeconds(10);

            bool     isRunningState = false;
            DateTime dt             = DateTime.Now;

            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId    = "test";
            config.BootstrapServers = "127.0.0.1";
            config.PollMs           = 1;

            var builder = new StreamBuilder();

            builder.GlobalTable <string, string>("test", InMemory <string, string> .As("store"));

            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(new ProducerConfig());
            var t        = builder.Build();
            var stream   = new KafkaStream(t, config, supplier);

            stream.StateChanged += (old, @new) =>
            {
                if (@new.Equals(KafkaStream.State.RUNNING))
                {
                    isRunningState = true;
                }
            };
            await stream.StartAsync();

            while (!isRunningState)
            {
                Thread.Sleep(250);
                if (DateTime.Now > dt + timeout)
                {
                    break;
                }
            }
            Assert.IsTrue(isRunningState);

            if (isRunningState)
            {
                var stringSerdes = new StringSerDes();
                producer.Produce("test",
                                 new Message <byte[], byte[]>
                {
                    Key   = stringSerdes.Serialize("key", new SerializationContext()),
                    Value = stringSerdes.Serialize("value", new SerializationContext())
                });

                Thread.Sleep(250);
                var store = stream.Store(StoreQueryParameters.FromNameAndType("store", QueryableStoreTypes.KeyValueStore <string, string>()));
                Assert.IsNotNull(store);
                Assert.AreEqual(1, store.ApproximateNumEntries());
            }

            stream.Dispose();
        }
Beispiel #4
0
        public void SourceTopicAlreadyAdded()
        {
            var builder = new StreamBuilder();

            builder.Stream <string, string>("table");
            builder.Stream <string, string>("table");
            Assert.Throws <TopologyException>(() => builder.Build());
        }
        public void WithNullMaterialize()
        {
            // CERTIFIED THAT SAME IF Materialize is null, a state store exist for count processor with a generated namestore
            var config = new StreamConfig <StringSerDes, StringSerDes>();
            var serdes = new StringSerDes();

            config.ApplicationId = "test-count";

            var builder = new StreamBuilder();
            Materialized <string, long, IKeyValueStore <Bytes, byte[]> > m = null;

            builder
            .Table <string, string>("topic")
            .GroupBy((k, v) => KeyValuePair.Create(k.ToUpper(), v))
            .Count(m);

            var    topology = builder.Build();
            TaskId id       = new TaskId {
                Id = 1, Partition = 0
            };
            var processorTopology = topology.Builder.BuildTopology(id);

            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());
            var consumer = supplier.GetConsumer(config.ToConsumerConfig(), null);


            var        part = new TopicPartition("-KTABLE-AGGREGATE-STATE-STORE-0000000005-repartition", 0);
            StreamTask task = new StreamTask(
                "thread-0",
                id,
                new List <TopicPartition> {
                part
            },
                processorTopology,
                consumer,
                config,
                supplier,
                null,
                new MockChangelogRegister(),
                new StreamMetricsRegistry());

            task.GroupMetadata = consumer as SyncConsumer;
            task.InitializeStateStores();
            task.InitializeTopology();
            task.RestorationIfNeeded();
            task.CompleteRestoration();

            Assert.AreEqual(1, task.Context.States.StateStoreNames.Count());
            var nameStore1 = task.Context.States.StateStoreNames.ElementAt(0);

            Assert.IsNotNull(nameStore1);
            Assert.AreNotEqual(string.Empty, nameStore1);
            var store1 = task.GetStore(nameStore1);

            Assert.IsInstanceOf <ITimestampedKeyValueStore <string, long> >(store1);
            Assert.AreEqual(0, (store1 as ITimestampedKeyValueStore <string, long>).ApproximateNumEntries());
        }
        public void WithNullMaterialize()
        {
            // CERTIFIED THAT SAME IF Materialize is null, a state store exist for count processor with a generated namestore
            var config = new StreamConfig <StringSerDes, StringSerDes>();
            var serdes = new StringSerDes();

            config.ApplicationId = "test-window-count";

            var builder = new StreamBuilder();
            Materialized <string, long, IWindowStore <Bytes, byte[]> > m = null;

            builder
            .Stream <string, string>("topic")
            .GroupByKey()
            .WindowedBy(TumblingWindowOptions.Of(2000))
            .Count(m);

            var    topology = builder.Build();
            TaskId id       = new TaskId {
                Id = 0, Partition = 0
            };
            var processorTopology = topology.Builder.BuildTopology(id);

            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());
            var consumer = supplier.GetConsumer(config.ToConsumerConfig(), null);

            var        part = new TopicPartition("topic", 0);
            StreamTask task = new StreamTask(
                "thread-0",
                id,
                new List <TopicPartition> {
                part
            },
                processorTopology,
                consumer,
                config,
                supplier,
                null,
                new MockChangelogRegister(),
                new StreamMetricsRegistry());

            task.GroupMetadata = consumer as SyncConsumer;
            task.InitializeStateStores();
            task.InitializeTopology();
            task.RestorationIfNeeded();
            task.CompleteRestoration();

            Assert.AreEqual(1, task.Context.States.StateStoreNames.Count());
            var nameStore = task.Context.States.StateStoreNames.ElementAt(0);

            Assert.IsNotNull(nameStore);
            Assert.AreNotEqual(string.Empty, nameStore);
            var store = task.GetStore(nameStore);

            Assert.IsInstanceOf <ITimestampedWindowStore <string, long> >(store);
            Assert.AreEqual(0, (store as ITimestampedWindowStore <string, long>).All().ToList().Count);
        }
Beispiel #7
0
        public void GetCurrentTimestampMetadataTestsNotConfigured()
        {
            var source = new System.Threading.CancellationTokenSource();
            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId = "test";
            config.Guarantee     = ProcessingGuarantee.AT_LEAST_ONCE;
            config.PollMs        = 1;
            var configConsumer = config.Clone();

            configConsumer.ApplicationId = "test-consumer";
            long?h = null;

            var serdes  = new StringSerDes();
            var builder = new StreamBuilder();

            builder
            .Stream <string, string>("topic")
            .MapValues((v) =>
            {
                h = StreamizMetadata.GetCurrentTimestampMetadata();
                return(v);
            })
            .To("output");

            var topo = builder.Build();

            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());
            var consumer = supplier.GetConsumer(configConsumer.ToConsumerConfig(), null);

            var thread = StreamThread.Create(
                "thread-0", "c0",
                topo.Builder, config,
                supplier, supplier.GetAdmin(config.ToAdminConfig("admin")),
                0) as StreamThread;

            thread.Start(source.Token);
            producer.Produce("topic", new Confluent.Kafka.Message <byte[], byte[]>
            {
                Key   = serdes.Serialize("key1", new SerializationContext()),
                Value = serdes.Serialize("coucou", new SerializationContext())
            });

            consumer.Subscribe("output");
            ConsumeResult <byte[], byte[]> result = null;

            do
            {
                result = consumer.Consume(100);
            } while (result == null);


            source.Cancel();
            thread.Dispose();

            Assert.Null(h);
        }
        static async Task Main(string[] args)
        {
            CancellationTokenSource source = new CancellationTokenSource();

            var config = new StreamConfig();

            config.ApplicationId    = "test-app";
            config.BootstrapServers = "localhost:9092";
            // NEED FOR SchemaAvroSerDes
            config.SchemaRegistryUrl   = "http://localhost:8081";
            config.AutoRegisterSchemas = true;

            StreamBuilder builder = new StreamBuilder();

            var table = builder.Table("product",
                                      new Int32SerDes(),
                                      new SchemaAvroSerDes <Product>(),
                                      InMemory <int, Product> .As("product-store"));

            var orders = builder.Stream <int, Order, Int32SerDes, SchemaAvroSerDes <Order> >("orders");

            orders.Join(table, (order, product) => new OrderProduct
            {
                order_id      = order.order_id,
                price         = order.price,
                product_id    = product.product_id,
                product_name  = product.name,
                product_price = product.price
            })
            .To <Int32SerDes, SchemaAvroSerDes <OrderProduct> >("orders-output");

            orders
            .GroupByKey()
            .Aggregate <OrderAgg, SchemaAvroSerDes <OrderAgg> >(
                () => new OrderAgg(),
                (key, order, agg) =>
            {
                agg.order_id    = order.order_id;
                agg.price       = order.price;
                agg.product_id  = order.product_id;
                agg.totalPrice += order.price;
                return(agg);
            })
            .ToStream()
            .Print(Printed <int, OrderAgg> .ToOut());

            Topology t = builder.Build();

            KafkaStream stream = new KafkaStream(t, config);

            Console.CancelKeyPress += (o, e) =>
            {
                stream.Dispose();
            };

            await stream.StartAsync();
        }
Beispiel #9
0
        private static void Main(string[] args)
        {
            CancellationTokenSource source = new CancellationTokenSource();

            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId    = "test-app";
            config.BootstrapServers = "192.168.56.1:9092";
            config.SaslMechanism    = SaslMechanism.Plain;
            config.SaslUsername     = "******";
            config.SaslPassword     = "******";
            config.SecurityProtocol = SecurityProtocol.SaslPlaintext;
            config.AutoOffsetReset  = AutoOffsetReset.Earliest;
            config.NumStreamThreads = 1;

            StreamBuilder builder = new StreamBuilder();

            builder.Stream <string, string>("test")
            .GroupByKey()
            .WindowedBy(TumblingWindowOptions.Of(TimeSpan.FromMinutes(1)))
            .Count(InMemoryWindows <string, long> .As("store"))
            .ToStream()
            .Map((k, v) => KeyValuePair.Create(k.Key, v.ToString()))
            .To("output");

            Topology t = builder.Build();
            //bool taskStart = false;

            KafkaStream stream = new KafkaStream(t, config);

            // Subscribe state changed
            //stream.StateChanged += (old, @new) =>
            //{
            //    if (!taskStart && @new == KafkaStream.State.RUNNING) // If new state is running, we can quering state store.
            //    {
            //        Task.Factory.StartNew(() =>
            //        {
            //            while (!source.Token.IsCancellationRequested)
            //            {
            //                var store = stream.Store(StoreQueryParameters.FromNameAndType("store", QueryableStoreTypes.WindowStore<string, long>()));
            //                var items = store.All().ToList();
            //                Thread.Sleep(500);
            //            }
            //        }, source.Token);
            //        taskStart = true;
            //    }
            //};

            Console.CancelKeyPress += (o, e) =>
            {
                source.Cancel();
                stream.Close();
            };

            stream.Start(source.Token);
        }
Beispiel #10
0
        public void Agg2()
        {
            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId = "test-agg";

            var builder = new StreamBuilder();

            builder
            .Table <string, string>("topic")
            .GroupBy((k, v) => KeyValuePair.Create(k.ToUpper(), v))
            .Aggregate <Dictionary <char, int>, DictionarySerDes>(
                () => new Dictionary <char, int>(),
                (k, v, old) =>
            {
                var caracs = v.ToCharArray();
                foreach (var c in caracs)
                {
                    if (old.ContainsKey(c))
                    {
                        ++old[c];
                    }
                    else
                    {
                        old.Add(c, 1);
                    }
                }
                return(old);
            },
                (k, v, old) => old
                );

            var topology = builder.Build();

            using var driver = new TopologyTestDriver(topology, config);
            Dictionary <char, int> testExpected = new Dictionary <char, int>
            {
                { '1', 2 },
                { '2', 1 }
            };
            var input  = driver.CreateInputTopic <string, string>("topic");
            var output = driver.CreateOuputTopic <Dictionary <char, int>, DictionarySerDes>("output");

            input.PipeInput("test", "1");
            input.PipeInput("test", "12");

            var store = driver.GetKeyValueStore <string, Dictionary <char, int> >("KTABLE-AGGREGATE-STATE-STORE-0000000005");

            Assert.IsNotNull(store);
            Assert.AreEqual(1, store.ApproximateNumEntries());
            var el = store.Get("TEST");

            Assert.IsNotNull(el);
            Assert.AreEqual(testExpected, el);
        }
        public void StreamsAppSensorTest()
        {
            var builder = new StreamBuilder();

            builder.Stream <string, string>("topic").To("topic2");

            var sensor = GeneralClientMetrics.StreamsAppSensor(
                "my-application",
                builder.Build().Describe().ToString(),
                () => 0,
                () => 1, streamMetricsRegistry);

            Assert.AreEqual(5, sensor.Metrics.Keys.Count());

            Assert.AreEqual(Assembly.GetExecutingAssembly().GetName().Version.ToString(),
                            sensor.Metrics[MetricName.NameAndGroup(
                                               GeneralClientMetrics.VERSION,
                                               StreamMetricsRegistry.CLIENT_LEVEL_GROUP)].Value);

            Assert.AreEqual(0,
                            sensor.Metrics[MetricName.NameAndGroup(
                                               GeneralClientMetrics.STATE,
                                               StreamMetricsRegistry.CLIENT_LEVEL_GROUP)].Value);


            Assert.AreEqual(builder.Build().Describe().ToString(),
                            sensor.Metrics[MetricName.NameAndGroup(
                                               GeneralClientMetrics.TOPOLOGY_DESCRIPTION,
                                               StreamMetricsRegistry.CLIENT_LEVEL_GROUP)].Value);


            Assert.AreEqual(1,
                            sensor.Metrics[MetricName.NameAndGroup(
                                               GeneralClientMetrics.STREAM_THREADS,
                                               StreamMetricsRegistry.CLIENT_LEVEL_GROUP)].Value);


            Assert.AreEqual("my-application",
                            sensor.Metrics[MetricName.NameAndGroup(
                                               GeneralClientMetrics.APPLICATION_ID,
                                               StreamMetricsRegistry.CLIENT_LEVEL_GROUP)].Value);
        }
Beispiel #12
0
        public void StreamThreadCommitIntervalWorkflow()
        {
            var source = new System.Threading.CancellationTokenSource();
            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId    = "test";
            config.Guarantee        = ProcessingGuarantee.AT_LEAST_ONCE;
            config.PollMs           = 1;
            config.CommitIntervalMs = 1;

            var serdes  = new StringSerDes();
            var builder = new StreamBuilder();

            builder.Stream <string, string>("topic").To("topic2");

            var topo = builder.Build();

            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());
            var consumer = supplier.GetConsumer(config.ToConsumerConfig("test-consum"), null);

            consumer.Subscribe("topic2");
            var thread = StreamThread.Create(
                "thread-0", "c0",
                topo.Builder, new StreamMetricsRegistry(), config,
                supplier, supplier.GetAdmin(config.ToAdminConfig("admin")),
                0) as StreamThread;

            thread.Start(source.Token);
            producer.Produce("topic", new Confluent.Kafka.Message <byte[], byte[]>
            {
                Key   = serdes.Serialize("key1", new SerializationContext()),
                Value = serdes.Serialize("coucou", new SerializationContext())
            });
            //WAIT STREAMTHREAD PROCESS MESSAGE
            System.Threading.Thread.Sleep(100);
            var message = consumer.Consume(100);

            Assert.AreEqual("key1", serdes.Deserialize(message.Message.Key, new SerializationContext()));
            Assert.AreEqual("coucou", serdes.Deserialize(message.Message.Value, new SerializationContext()));

            var offsets = thread.GetCommittedOffsets(new List <TopicPartition> {
                new TopicPartition("topic", 0)
            },
                                                     TimeSpan.FromSeconds(10)).ToList();

            Assert.AreEqual(1, offsets.Count);
            Assert.AreEqual(1, offsets[0].Offset.Value);
            Assert.AreEqual(0, offsets[0].TopicPartition.Partition.Value);
            Assert.AreEqual("topic", offsets[0].Topic);

            source.Cancel();
            thread.Dispose();
        }
Beispiel #13
0
        public void TaskManagerAssignedUnknownPartitions()
        {
            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId = "test-app";
            var serdes  = new StringSerDes();
            var builder = new StreamBuilder();

            builder.Stream <string, string>("topic")
            .Map((k, v) => KeyValuePair.Create(k.ToUpper(), v.ToUpper()))
            .To("topic2");

            var topology = builder.Build();

            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());
            var consumer = supplier.GetConsumer(config.ToConsumerConfig(), null);

            var restoreConsumer = supplier.GetRestoreConsumer(config.ToConsumerConfig());

            var storeChangelogReader =
                new StoreChangelogReader(config, restoreConsumer, "thread-0", new StreamMetricsRegistry());
            var taskCreator = new TaskCreator(topology.Builder, config, "thread-0", supplier, producer,
                                              storeChangelogReader, new StreamMetricsRegistry());
            var taskManager = new TaskManager(topology.Builder, taskCreator,
                                              supplier.GetAdmin(config.ToAdminConfig("admin")), consumer, storeChangelogReader);

            taskManager.CreateTasks(
                new List <TopicPartition>
            {
                new TopicPartition("topic", 0),
                new TopicPartition("topic", 1)
            });

            taskManager.RevokeTasks(
                new List <TopicPartition>
            {
                new TopicPartition("topic", 1)
            });

            taskManager.CreateTasks(
                new List <TopicPartition>
            {
                new TopicPartition("topic", 0),
                new TopicPartition("topic", 1),
                new TopicPartition("topic", 2)
            });

            taskManager.TryToCompleteRestoration();

            Assert.AreEqual(3, taskManager.ActiveTasks.Count());
            Assert.AreEqual(0, taskManager.RevokedTasks.Count());
            taskManager.Close();
        }
Beispiel #14
0
        public void Start(CancellationToken token = default)
        {
            foreach (var query in querys)
            {
                query.Analyze(builder);
            }

            var topology = builder.Build();

            stream = new KafkaStream(topology, config);
            stream.Start(token);
        }
        static void Main(string[] args)
        {
            CancellationTokenSource source = new CancellationTokenSource();

            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId    = "test-app";
            config.BootstrapServers = "192.168.56.1:9092";
            config.SaslMechanism    = SaslMechanism.Plain;
            config.SaslUsername     = "******";
            config.SaslPassword     = "******";
            config.SecurityProtocol = SecurityProtocol.SaslPlaintext;
            config.AutoOffsetReset  = AutoOffsetReset.Earliest;
            config.NumStreamThreads = 1;

            StreamBuilder builder = new StreamBuilder();

            builder.Stream <string, string>("test")
            .FilterNot((k, v) => v.Contains("test"))
            .To("test-output");

            builder.Table("test-ktable", InMemory <string, string> .As("test-store"));

            Topology t = builder.Build();

            KafkaStream stream = new KafkaStream(t, config);
            bool        taskGetStateStoreRunning = false;

            stream.StateChanged += (old, @new) =>
            {
                if (@new == KafkaStream.State.RUNNING && !taskGetStateStoreRunning)
                {
                    Task.Factory.StartNew(() =>
                    {
                        taskGetStateStoreRunning = true;
                        while (!source.Token.IsCancellationRequested)
                        {
                            var store = stream.Store(StoreQueryParameters.FromNameAndType("test-store", QueryableStoreTypes.KeyValueStore <string, string>()));
                            var items = store.All().ToList();
                            Thread.Sleep(500);
                        }
                    }, source.Token);
                }
            };

            Console.CancelKeyPress += (o, e) =>
            {
                source.Cancel();
                stream.Close();
            };

            stream.Start(source.Token);
        }
        public void WithNullMaterialize()
        {
            // CERTIFIED THAT SAME IF Materialize is null, a state store exist for count processor with a generated namestore
            var config = new StreamConfig <StringSerDes, StringSerDes>();
            var serdes = new StringSerDes();

            config.ApplicationId = "test-count";

            var builder = new StreamBuilder();
            Materialized <string, long, IKeyValueStore <Bytes, byte[]> > m = null;

            builder
            .Stream <string, string>("topic")
            .GroupByKey()
            .Count(m);

            var topology          = builder.Build();
            var processorTopology = topology.Builder.BuildTopology("topic");

            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());
            var consumer = supplier.GetConsumer(config.ToConsumerConfig(), null);

            TaskId id = new TaskId {
                Id = 1, Topic = "topic", Partition = 0
            };
            var        part = new TopicPartition("topic", 0);
            StreamTask task = new StreamTask(
                "thread-0",
                id,
                part,
                processorTopology,
                consumer,
                config,
                supplier,
                null);

            task.GroupMetadata = consumer as SyncConsumer;
            task.InitializeStateStores();
            task.InitializeTopology();

            Assert.AreEqual(1, task.Context.States.StateStoreNames.Count());
            var nameStore = task.Context.States.StateStoreNames.ElementAt(0);

            Assert.IsNotNull(nameStore);
            Assert.AreNotEqual(string.Empty, nameStore);
            var store = task.GetStore(nameStore);

            Assert.IsInstanceOf <TimestampedKeyValueStore <string, long> >(store);
            Assert.AreEqual(0, (store as TimestampedKeyValueStore <string, long>).ApproximateNumEntries());
        }
        public void ProductionExceptionFatalHandlerFailTest()
        {
            bool errorState = false;
            var  _return    = new List <KeyValuePair <string, string> >();
            var  config     = new StreamConfig <StringSerDes, StringSerDes>();
            var  dt         = DateTime.Now;
            var  timeout    = TimeSpan.FromSeconds(10);

            config.ApplicationId               = "test";
            config.BootstrapServers            = "127.0.0.1";
            config.PollMs                      = 10;
            config.ProductionExceptionHandler += (r) => ExceptionHandlerResponse.FAIL;

            var options = new ProducerSyncExceptionOptions {
                IsFatal = true
            };
            var supplier = new ProducerSyncException(options);

            var builder = new StreamBuilder();

            builder
            .Stream <string, string>("test")
            .To("test-output");

            builder.Stream <string, string>("test-output")
            .Peek((k, v) => _return.Add(KeyValuePair.Create(k, v)));

            var t = builder.Build();

            using (var driver = new TopologyTestDriver(t.Builder, config,
                                                       TopologyTestDriver.Mode.ASYNC_CLUSTER_IN_MEMORY, supplier))
            {
                var inputtopic = driver.CreateInputTopic <string, string>("test");
                inputtopic.PipeInput("coucou");
                inputtopic.PipeInput("coucou");
                while (!errorState)
                {
                    errorState = driver.IsError;
                    Thread.Sleep(10);
                    if (DateTime.Now > dt + timeout)
                    {
                        break;
                    }
                }

                Assert.IsTrue(driver.IsError);
            }

            Assert.AreEqual(new List <KeyValuePair <string, string> >(), _return);
        }
Beispiel #18
0
        public async Task GetWindowStateStore()
        {
            var timeout = TimeSpan.FromSeconds(10);

            bool     isRunningState = false;
            DateTime dt             = DateTime.Now;

            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId    = "test";
            config.BootstrapServers = "127.0.0.1";

            var builder = new StreamBuilder();

            builder
            .Stream <string, string>("test")
            .GroupByKey()
            .WindowedBy(TumblingWindowOptions.Of(TimeSpan.FromMinutes(1)))
            .Count(InMemoryWindows <string, long> .As("store"));

            var t      = builder.Build();
            var stream = new KafkaStream(t, config, new SyncKafkaSupplier());

            stream.StateChanged += (old, @new) =>
            {
                if (@new.Equals(KafkaStream.State.RUNNING))
                {
                    isRunningState = true;
                }
            };
            await stream.StartAsync();

            while (!isRunningState)
            {
                Thread.Sleep(250);
                if (DateTime.Now > dt + timeout)
                {
                    break;
                }
            }
            Assert.IsTrue(isRunningState);

            if (isRunningState)
            {
                var store = stream.Store(StoreQueryParameters.FromNameAndType("store", QueryableStoreTypes.WindowStore <string, long>()));
                Assert.IsNotNull(store);
            }

            stream.Dispose();
        }
Beispiel #19
0
        public void GetWStateStoreInvalidStateStoreException()
        {
            var      timeout = TimeSpan.FromSeconds(10);
            var      source  = new CancellationTokenSource();
            bool     state   = false;
            DateTime dt      = DateTime.Now;

            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId    = "test";
            config.BootstrapServers = "127.0.0.1";

            var builder = new StreamBuilder();

            builder
            .Stream <string, string>("test")
            .GroupByKey()
            .WindowedBy(TumblingWindowOptions.Of(TimeSpan.FromMinutes(1)))
            .Count(InMemoryWindows <string, long> .As("store"));

            var t      = builder.Build();
            var stream = new KafkaStream(t, config, new SyncKafkaSupplier());

            stream.StateChanged += (old, @new) =>
            {
                if ([email protected](KafkaStream.State.RUNNING))
                {
                    if (!state)
                    {
                        Assert.Throws <InvalidStateStoreException>(() => stream.Store(StoreQueryParameters.FromNameAndType("store", QueryableStoreTypes.WindowStore <string, long>())));
                        state = true;
                    }
                }
            };
            stream.Start(source.Token);
            while (!state)
            {
                Thread.Sleep(250);
                if (DateTime.Now > dt + timeout)
                {
                    break;
                }
            }
            Assert.IsTrue(state);

            source.Cancel();
            stream.Close();
        }
Beispiel #20
0
        public void TestWithTwoSubTopology()
        {
            var config = new StreamConfig <StringSerDes, StringSerDes>()
            {
                ApplicationId = "test-reducer"
            };

            StreamBuilder builder = new StreamBuilder();

            builder
            .Stream <string, string>("topic")
            .Filter((key, value) =>
            {
                return(key == "1");
            })
            .To("tempTopic");

            builder.Stream <string, string>("tempTopic")
            .GroupByKey()
            .Reduce((v1, v2) => $"{v1}-{v2}")
            .ToStream()
            .To("finalTopic");

            var topology = builder.Build();

            using (var driver = new TopologyTestDriver(topology, config))
            {
                var input      = driver.CreateInputTopic <string, string>("topic");
                var tempTopic  = driver.CreateOuputTopic <string, string>("tempTopic");
                var finalTopic = driver.CreateOuputTopic <string, string>("finalTopic");

                input.PipeInput("1", "Once");
                input.PipeInput("2", "Once");
                input.PipeInput("1", "Twice");
                input.PipeInput("3", "Once");
                input.PipeInput("1", "Thrice");
                input.PipeInput("2", "Twice");

                var list = finalTopic.ReadKeyValueList().Select(r => KeyValuePair.Create(r.Message.Key, r.Message.Value)).ToList();

                foreach (var item in list)
                {
                    Console.WriteLine(item);
                }

                Assert.IsNotNull("x");
            }
        }
        private static void Main(string[] args)
        {
            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId = "test-test-driver-app";

            StreamBuilder builder = new StreamBuilder();

            var stream = builder
                         .Stream <string, string>("test")
                         .GroupBy((k, v) => k.ToUpper());

            stream.Count(InMemory <string, long> .As("count-store"));
            stream.Aggregate(
                () => new Dictionary <char, int>(),
                (k, v, old) =>
            {
                var caracs = v.ToCharArray();
                foreach (var c in caracs)
                {
                    if (old.ContainsKey(c))
                    {
                        ++old[c];
                    }
                    else
                    {
                        old.Add(c, 1);
                    }
                }
                return(old);
            },
                InMemory <string, Dictionary <char, int> > .As("agg-store").WithValueSerdes(new DictionarySerDes())
                );

            Topology t = builder.Build();

            using (var driver = new TopologyTestDriver(t, config))
            {
                var inputTopic  = driver.CreateInputTopic <string, string>("test");
                var outputTopic = driver.CreateOuputTopic <string, string>("test-output", TimeSpan.FromSeconds(5));
                inputTopic.PipeInput("test", "test");
                inputTopic.PipeInput("test", "test2");
                var store      = driver.GetKeyValueStore <string, Dictionary <char, int> >("agg-store");
                var el         = store.Get("TEST");
                var storeCount = driver.GetKeyValueStore <string, long>("count-store");
                var e          = storeCount.Get("TEST");
            }
        }
Beispiel #22
0
        public void TicksTest()
        {
            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId             = "test-fix-73";
            config.DefaultTimestampExtractor = new ObjectATimestampTicksExtractor();

            StreamBuilder builder = new StreamBuilder();

            BuildTopology(builder);

            using (var driver = new TopologyTestDriver(builder.Build(), config))
            {
                Assert.Throws <ArgumentOutOfRangeException>(() => AssertUseCase(driver));
            }
        }
Beispiel #23
0
        public void UnixTimestampMsTest()
        {
            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId             = "test-fix-73";
            config.DefaultTimestampExtractor = new ObjectATimestampUnixExtractor();

            StreamBuilder builder = new StreamBuilder();

            BuildTopology(builder);

            using (var driver = new TopologyTestDriver(builder.Build(), config))
            {
                AssertUseCase(driver);
            }
        }
Beispiel #24
0
        private Topology KStreamWithImplicitReKeyJoinTopology(ITimestampExtractor timestampExtractor)
        {
            StringSerDes stringSerdes = new StringSerDes();

            var builder = new StreamBuilder();

            var userRegionsTable = builder.Table <string, string>(userRegionsTopic, stringSerdes, stringSerdes, InMemory <string, string> .As("table-store"), "table", timestampExtractor);
            var userClicksStream = builder.Stream <string, string>(userClicksTopic, stringSerdes, stringSerdes, timestampExtractor);

            userClicksStream
            .SelectKey((k, v) => k)
            .Join(userRegionsTable, Join)
            .To(outputTopic);

            return(builder.Build());
        }
Beispiel #25
0
        public void StateStoreRange()
        {
            var source = new CancellationTokenSource();
            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId = "test-config";
            config.PollMs        = 50;
            var topicConfiguration = config.Clone();

            topicConfiguration.ApplicationId = $"test-driver-{config.ApplicationId}";

            var builder = new StreamBuilder();

            builder.Table("test", InMemory <string, string> .As("store"));
            var driver = new ClusterInMemoryTopologyDriver("client", builder.Build().Builder, config, topicConfiguration, TimeSpan.FromSeconds(1), source.Token);

            driver.StartDriver();
            var input = driver.CreateInputTopic("test", new StringSerDes(), new StringSerDes());
            var store = driver.GetStateStore <string, string>("store");

            Assert.IsNotNull(store);
            Assert.IsInstanceOf <MockReadOnlyKeyValueStore <string, string> >(store);
            input.PipeInput("coucou", "1");
            input.PipeInput("test", "2");
            Thread.Sleep(100);

            var range = ((MockReadOnlyKeyValueStore <string, string>)store).Range("coucou", "test").ToList();

            Assert.AreEqual(2, range.Count);
            Assert.AreEqual("coucou", range[0].Key);
            Assert.AreEqual("1", range[0].Value);
            Assert.AreEqual("test", range[1].Key);
            Assert.AreEqual("2", range[1].Value);


            var reverseRange = ((MockReadOnlyKeyValueStore <string, string>)store).ReverseRange("coucou", "test").ToList();

            Assert.AreEqual(2, reverseRange.Count);
            Assert.AreEqual("test", reverseRange[0].Key);
            Assert.AreEqual("2", reverseRange[0].Value);
            Assert.AreEqual("coucou", reverseRange[1].Key);
            Assert.AreEqual("1", reverseRange[1].Value);


            source.Cancel();
            driver.Dispose();
        }
Beispiel #26
0
        public async Task GetKVStateStoreInvalidStateStoreException()
        {
            var timeout = TimeSpan.FromSeconds(10);

            bool     state = false;
            DateTime dt    = DateTime.Now;

            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId    = "test";
            config.BootstrapServers = "127.0.0.1";

            var builder = new StreamBuilder();

            builder
            .Stream <string, string>("test")
            .GroupByKey()
            .Count(InMemory <string, long> .As("store"));

            var t      = builder.Build();
            var stream = new KafkaStream(t, config, new SyncKafkaSupplier());

            stream.StateChanged += (old, @new) =>
            {
                if ([email protected](KafkaStream.State.RUNNING))
                {
                    if (!state)
                    {
                        Assert.Throws <InvalidStateStoreException>(() => stream.Store(StoreQueryParameters.FromNameAndType("store", QueryableStoreTypes.KeyValueStore <string, long>())));
                        state = true;
                    }
                }
            };
            await stream.StartAsync();

            while (!state)
            {
                Thread.Sleep(250);
                if (DateTime.Now > dt + timeout)
                {
                    break;
                }
            }
            Assert.IsTrue(state);

            stream.Dispose();
        }
Beispiel #27
0
        public void WithNullMaterialize()
        {
            // CERTIFIED THAT SAME IF Materialize is null, a state store exist for count processor with a generated namestore
            var config = new StreamConfig <StringSerDes, StringSerDes>();
            var serdes = new StringSerDes();

            config.ApplicationId = "test-window-count";

            var builder = new StreamBuilder();
            Materialized <string, int, IWindowStore <Bytes, byte[]> > m = null;

            builder
            .Stream <string, string>("topic")
            .GroupByKey()
            .WindowedBy(TumblingWindowOptions.Of(2000))
            .Aggregate(
                () => 0,
                (k, v, agg) => Math.Max(v.Length, agg),
                m);

            var    topology = builder.Build();
            TaskId id       = new TaskId {
                Id = 0, Partition = 0
            };
            var processorTopology = topology.Builder.BuildTopology(id);

            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());
            var consumer = supplier.GetConsumer(config.ToConsumerConfig(), null);


            var        part = new TopicPartition("topic", 0);
            StreamTask task = new StreamTask(
                "thread-0",
                id,
                new List <TopicPartition> {
                part
            },
                processorTopology,
                consumer,
                config,
                supplier,
                null);

            task.GroupMetadata = consumer as SyncConsumer;
            Assert.Throws <StreamsException>(() => task.InitializeStateStores());
        }
Beispiel #28
0
        public void MultiBranchWithElements()
        {
            var builder = new StreamBuilder();

            var branchs = builder.Stream <string, int>("topic")
                          .Branch((k, v) => v % 2 == 0, (k, v) => v % 2 > 0);

            branchs[0].To("topic-pair");
            branchs[1].To("topic-impair");

            var config = new StreamConfig <StringSerDes, Int32SerDes>();

            config.ApplicationId = "test-branch";

            Topology t = builder.Build();

            using (var driver = new TopologyTestDriver(t, config))
            {
                var inputTopic        = driver.CreateInputTopic <string, int>("topic");
                var outputTopicPair   = driver.CreateOuputTopic <string, int>("topic-pair");
                var outputTopicImpair = driver.CreateOuputTopic <string, int>("topic-impair");

                var expectedPair   = new List <KeyValuePair <string, int> >();
                var expectedImpair = new List <KeyValuePair <string, int> >();
                for (int i = 0; i < 10; i++)
                {
                    string key   = i.ToString();
                    int    value = i;
                    inputTopic.PipeInput(key, value);

                    if (i % 2 == 0)
                    {
                        expectedPair.Add(KeyValuePair.Create(key, value));
                    }
                    else
                    {
                        expectedImpair.Add(KeyValuePair.Create(key, value));
                    }
                }

                var listPair   = outputTopicPair.ReadKeyValueList().Select(r => KeyValuePair.Create(r.Message.Key, r.Message.Value)).ToList();
                var listImpair = outputTopicImpair.ReadKeyValueList().Select(r => KeyValuePair.Create(r.Message.Key, r.Message.Value)).ToList();

                Assert.AreEqual(expectedPair, listPair);
                Assert.AreEqual(expectedImpair, listImpair);
            }
        }
        public void ReduceAndQueryInStateStore()
        {
            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId = "test-reduce";

            var builder = new StreamBuilder();

            var table = builder
                        .Table <string, string>("topic")
                        .MapValues(v => v.Length)
                        .GroupBy((k, v) => KeyValuePair.Create(k.ToUpper(), v));

            table.Count(InMemory <string, long> .As("count-store"));
            table.Reduce(
                (v1, v2) => Math.Max(v1, v2),
                (v1, v2) => Math.Max(v1, v2),
                InMemory <string, int> .As("reduce-store").WithValueSerdes <Int32SerDes>());

            var topology = builder.Build();

            using var driver = new TopologyTestDriver(topology, config);
            var input = driver.CreateInputTopic <string, string>("topic");

            input.PipeInput("test", "1");
            input.PipeInput("test", "120");
            input.PipeInput("test", "30");
            input.PipeInput("coucou", "120");

            var store = driver.GetKeyValueStore <string, int>("reduce-store");

            Assert.IsNotNull(store);
            Assert.AreEqual(2, store.ApproximateNumEntries());
            var el = store.Get("TEST");

            Assert.IsNotNull(el);
            Assert.AreEqual(3, el);

            var storeCount = driver.GetKeyValueStore <string, long>("count-store");

            Assert.IsNotNull(storeCount);
            Assert.AreEqual(2, store.ApproximateNumEntries());
            var e = storeCount.Get("TEST");

            Assert.IsNotNull(e);
            Assert.AreEqual(1, e);
        }
Beispiel #30
0
        public void GetStateStoreBeforeRunningState()
        {
            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId    = "test";
            config.BootstrapServers = "127.0.0.1";

            var builder = new StreamBuilder();

            builder.Table("topic", InMemory <string, string> .As("store"));

            var t      = builder.Build();
            var stream = new KafkaStream(t, config, new SyncKafkaSupplier());

            Assert.Throws <IllegalStateException>(() => stream.Store(StoreQueryParameters.FromNameAndType("store", QueryableStoreTypes.KeyValueStore <string, string>())));
            stream.Dispose();
        }