public void WithNullMaterialize() { // CERTIFIED THAT SAME IF Materialize is null, a state store exist for count processor with a generated namestore var config = new StreamConfig <StringSerDes, StringSerDes>(); var serdes = new StringSerDes(); config.ApplicationId = "test-reduce"; var builder = new StreamBuilder(); Materialized <string, int, IKeyValueStore <Bytes, byte[]> > m = null; builder .Table <string, string>("topic") .MapValues((v) => v.Length) .GroupBy((k, v) => KeyValuePair.Create(k.ToUpper(), v)) .Reduce((v1, v2) => Math.Max(v1, v2), (v1, v2) => v2, m); var topology = builder.Build(); Assert.Throws <StreamsException>(() => { using (var driver = new TopologyTestDriver(topology, config)) { var input = driver.CreateInputTopic <string, string>("topic"); input.PipeInput("test", "1"); } }); }
public void WithNullMaterialize() { // CERTIFIED THAT SAME IF Materialize is null, a state store exist for count processor with a generated namestore var config = new StreamConfig <StringSerDes, StringSerDes>(); var serdes = new StringSerDes(); config.ApplicationId = "test-count"; var builder = new StreamBuilder(); Materialized <string, long, IKeyValueStore <Bytes, byte[]> > m = null; builder .Table <string, string>("topic") .GroupBy((k, v) => KeyValuePair.Create(k.ToUpper(), v)) .Count(m); var topology = builder.Build(); TaskId id = new TaskId { Id = 0, Partition = 0 }; var processorTopology = topology.Builder.BuildTopology(id); var supplier = new SyncKafkaSupplier(); var producer = supplier.GetProducer(config.ToProducerConfig()); var consumer = supplier.GetConsumer(config.ToConsumerConfig(), null); var part = new TopicPartition("topic", 0); StreamTask task = new StreamTask( "thread-0", id, new List <TopicPartition> { part }, processorTopology, consumer, config, supplier, null); task.GroupMetadata = consumer as SyncConsumer; task.InitializeStateStores(); task.InitializeTopology(); Assert.AreEqual(2, task.Context.States.StateStoreNames.Count()); var nameStore1 = task.Context.States.StateStoreNames.ElementAt(0); var nameStore2 = task.Context.States.StateStoreNames.ElementAt(1); Assert.IsNotNull(nameStore1); Assert.IsNotNull(nameStore2); Assert.AreNotEqual(string.Empty, nameStore1); Assert.AreNotEqual(string.Empty, nameStore2); var store1 = task.GetStore(nameStore1); var store2 = task.GetStore(nameStore2); Assert.IsInstanceOf <TimestampedKeyValueStore <string, string> >(store1); Assert.IsInstanceOf <TimestampedKeyValueStore <string, long> >(store2); Assert.AreEqual(0, (store1 as TimestampedKeyValueStore <string, string>).ApproximateNumEntries()); Assert.AreEqual(0, (store2 as TimestampedKeyValueStore <string, long>).ApproximateNumEntries()); }
public void SerializeNullData() { var serdes = new StringSerDes(); var r = serdes.Serialize(null, new Confluent.Kafka.SerializationContext()); Assert.IsNull(r); }
public void StreamStreamJoinSpecificSerdes() { var stringSerdes = new StringSerDes(); var config = new StreamConfig { ApplicationId = "test-stream-stream-join" }; StreamBuilder builder = new StreamBuilder(); var stream = builder.Stream("topic1", stringSerdes, stringSerdes); builder .Stream("topic2", stringSerdes, stringSerdes) .Join( stream, (s, v) => $"{s}-{v}", JoinWindowOptions.Of(TimeSpan.FromSeconds(10)), StreamJoinProps.With( keySerde: stringSerdes, valueSerde: stringSerdes, otherValueSerde: stringSerdes)) .To("output-join"); Topology t = builder.Build(); using (var driver = new TopologyTestDriver(t, config)) { var inputTopic = driver.CreateInputTopic <string, string, StringSerDes, StringSerDes>("topic1"); var outputTopic = driver.CreateOuputTopic <string, string, StringSerDes, StringSerDes>("output-join"); inputTopic.PipeInput("test", "test"); var record = outputTopic.ReadKeyValue(); Assert.IsNull(record); } }
public void SerializeNullData() { var serdes = new StringSerDes(); var r = serdes.Serialize(null); Assert.IsNull(r); }
public void EnumeratorReverseRangeAll() { var serdes = new StringSerDes(); string deserialize(byte[] bytes) { return(serdes.Deserialize(bytes, new SerializationContext())); } byte[] key = serdes.Serialize("key", new SerializationContext()), value = serdes.Serialize("value", new SerializationContext()); byte[] key2 = serdes.Serialize("key2", new SerializationContext()), value2 = serdes.Serialize("value2", new SerializationContext()); byte[] key3 = serdes.Serialize("key3", new SerializationContext()), value3 = serdes.Serialize("value3", new SerializationContext()); store.Put(new Bytes(key), value); store.Put(new Bytes(key2), value2); store.Put(new Bytes(key3), value3); var enumerator = store.ReverseRange(new Bytes(key), new Bytes(key2)); Assert.IsTrue(enumerator.MoveNext()); Assert.AreEqual("key2", deserialize(enumerator.Current.Value.Key.Get)); Assert.AreEqual("value2", deserialize(enumerator.Current.Value.Value)); Assert.IsTrue(enumerator.MoveNext()); Assert.AreEqual("key", deserialize(enumerator.Current.Value.Key.Get)); Assert.AreEqual("value", deserialize(enumerator.Current.Value.Value)); Assert.IsFalse(enumerator.MoveNext()); enumerator.Dispose(); }
public void GetCurrentPartitionMetadataTests() { var source = new System.Threading.CancellationTokenSource(); var config = new StreamConfig <StringSerDes, StringSerDes>(); config.ApplicationId = "test"; config.Guarantee = ProcessingGuarantee.AT_LEAST_ONCE; config.PollMs = 1; config.FollowMetadata = true; var configConsumer = config.Clone(); configConsumer.ApplicationId = "test-consumer"; int?h = null; var serdes = new StringSerDes(); var builder = new StreamBuilder(); builder .Stream <string, string>("topic") .MapValues((v) => { h = StreamizMetadata.GetCurrentPartitionMetadata(); return(v); }) .To("output"); var topo = builder.Build(); var supplier = new SyncKafkaSupplier(); var producer = supplier.GetProducer(config.ToProducerConfig()); var consumer = supplier.GetConsumer(configConsumer.ToConsumerConfig(), null); var thread = StreamThread.Create( "thread-0", "c0", topo.Builder, config, supplier, supplier.GetAdmin(config.ToAdminConfig("admin")), 0) as StreamThread; thread.Start(source.Token); producer.Produce("topic", new Confluent.Kafka.Message <byte[], byte[]> { Key = serdes.Serialize("key1", new SerializationContext()), Value = serdes.Serialize("coucou", new SerializationContext()) }); consumer.Subscribe("output"); ConsumeResult <byte[], byte[]> result = null; do { result = consumer.Consume(100); } while (result == null); source.Cancel(); thread.Dispose(); Assert.NotNull(h); Assert.AreEqual(0, h); }
public void WithNullMaterialize() { // CERTIFIED THAT SAME IF Materialize is null, a state store exist for count processor with a generated namestore var config = new StreamConfig <StringSerDes, StringSerDes>(); var serdes = new StringSerDes(); config.ApplicationId = "test-agg"; var builder = new StreamBuilder(); Materialized <string, long, IKeyValueStore <Bytes, byte[]> > m = null; builder .Stream <string, string>("topic") .GroupByKey() .Aggregate(() => 0L, (k, v, agg) => agg + 1, m); var topology = builder.Build(); Assert.Throws <StreamsException>(() => { using (var driver = new TopologyTestDriver(topology, config)) { var input = driver.CreateInputTopic <string, string>("topic"); input.PipeInput("test", "1"); } }); }
public void GetElementInStateStore() { var timeout = TimeSpan.FromSeconds(10); var source = new CancellationTokenSource(); bool isRunningState = false; DateTime dt = DateTime.Now; var config = new StreamConfig <StringSerDes, StringSerDes>(); config.ApplicationId = "test"; config.PollMs = 10; var supplier = new SyncKafkaSupplier(); var producer = supplier.GetProducer(config.ToProducerConfig()); var builder = new StreamBuilder(); builder.Table("topic", InMemory <string, string> .As("store")); var t = builder.Build(); var stream = new KafkaStream(t, config, supplier); stream.StateChanged += (old, @new) => { if (@new.Equals(KafkaStream.State.RUNNING)) { isRunningState = true; } }; stream.Start(source.Token); while (!isRunningState) { Thread.Sleep(250); if (DateTime.Now > dt + timeout) { break; } } Assert.IsTrue(isRunningState); if (isRunningState) { var serdes = new StringSerDes(); producer.Produce("topic", new Confluent.Kafka.Message <byte[], byte[]> { Key = serdes.Serialize("key1"), Value = serdes.Serialize("coucou") }); Thread.Sleep(50); var store = stream.Store(StoreQueryParameters.FromNameAndType("store", QueryableStoreTypes.KeyValueStore <string, string>())); Assert.IsNotNull(store); Assert.AreEqual(1, store.ApproximateNumEntries()); var item = store.Get("key1"); Assert.IsNotNull(item); Assert.AreEqual("coucou", item); } source.Cancel(); stream.Close(); }
public async Task BuildGlobalStateStore() { var timeout = TimeSpan.FromSeconds(10); bool isRunningState = false; DateTime dt = DateTime.Now; var config = new StreamConfig <StringSerDes, StringSerDes>(); config.ApplicationId = "test"; config.BootstrapServers = "127.0.0.1"; config.PollMs = 1; var builder = new StreamBuilder(); builder.GlobalTable <string, string>("test", InMemory <string, string> .As("store")); var supplier = new SyncKafkaSupplier(); var producer = supplier.GetProducer(new ProducerConfig()); var t = builder.Build(); var stream = new KafkaStream(t, config, supplier); stream.StateChanged += (old, @new) => { if (@new.Equals(KafkaStream.State.RUNNING)) { isRunningState = true; } }; await stream.StartAsync(); while (!isRunningState) { Thread.Sleep(250); if (DateTime.Now > dt + timeout) { break; } } Assert.IsTrue(isRunningState); if (isRunningState) { var stringSerdes = new StringSerDes(); producer.Produce("test", new Message <byte[], byte[]> { Key = stringSerdes.Serialize("key", new SerializationContext()), Value = stringSerdes.Serialize("value", new SerializationContext()) }); Thread.Sleep(250); var store = stream.Store(StoreQueryParameters.FromNameAndType("store", QueryableStoreTypes.KeyValueStore <string, string>())); Assert.IsNotNull(store); Assert.AreEqual(1, store.ApproximateNumEntries()); } stream.Dispose(); }
public void DeserializeNullData() { var stringSerdes = new StringSerDes(); var serdes = new ValueAndTimestampSerDes <string>(stringSerdes); var r = serdes.Deserialize(null); Assert.IsNull(r); }
public void DeserializeNullData() { var stringSerdes = new StringSerDes(); var serdes = new ValueAndTimestampSerDes <string>(stringSerdes); var r = serdes.Deserialize(null, new Confluent.Kafka.SerializationContext()); Assert.IsNull(r); }
public void PutKeyNotExist() { var serdes = new StringSerDes(); byte[] key = serdes.Serialize("key", new SerializationContext()), value = serdes.Serialize("value", new SerializationContext()); store.Put(new Bytes(key), value); Assert.AreEqual(1, store.ApproximateNumEntries()); }
public void WithNullMaterialize() { // CERTIFIED THAT SAME IF Materialize is null, a state store exist for count processor with a generated namestore var config = new StreamConfig <StringSerDes, StringSerDes>(); var serdes = new StringSerDes(); config.ApplicationId = "test-window-count"; var builder = new StreamBuilder(); Materialized <string, long, IWindowStore <Bytes, byte[]> > m = null; builder .Stream <string, string>("topic") .GroupByKey() .WindowedBy(TumblingWindowOptions.Of(2000)) .Count(m); var topology = builder.Build(); TaskId id = new TaskId { Id = 0, Partition = 0 }; var processorTopology = topology.Builder.BuildTopology(id); var supplier = new SyncKafkaSupplier(); var producer = supplier.GetProducer(config.ToProducerConfig()); var consumer = supplier.GetConsumer(config.ToConsumerConfig(), null); var part = new TopicPartition("topic", 0); StreamTask task = new StreamTask( "thread-0", id, new List <TopicPartition> { part }, processorTopology, consumer, config, supplier, null, new MockChangelogRegister(), new StreamMetricsRegistry()); task.GroupMetadata = consumer as SyncConsumer; task.InitializeStateStores(); task.InitializeTopology(); task.RestorationIfNeeded(); task.CompleteRestoration(); Assert.AreEqual(1, task.Context.States.StateStoreNames.Count()); var nameStore = task.Context.States.StateStoreNames.ElementAt(0); Assert.IsNotNull(nameStore); Assert.AreNotEqual(string.Empty, nameStore); var store = task.GetStore(nameStore); Assert.IsInstanceOf <ITimestampedWindowStore <string, long> >(store); Assert.AreEqual(0, (store as ITimestampedWindowStore <string, long>).All().ToList().Count); }
public void PutKeyNotExist() { var serdes = new StringSerDes(); byte[] key = serdes.Serialize("key"), value = serdes.Serialize("value"); var store = new InMemoryKeyValueStore("store"); store.Put(new Bytes(key), value); Assert.AreEqual(1, store.ApproximateNumEntries()); }
public void DeserializeData() { string s = "test"; var serdes = new StringSerDes(); var r = serdes.Deserialize(serdes.Serialize(s)); Assert.IsNotNull(r); Assert.AreEqual(s, r); }
private Message <byte[], byte[]> CreateMessage(string topic, string key, string value) { StringSerDes stringSerDes = new StringSerDes(); return(new Message <byte[], byte[]> { Key = stringSerDes.Serialize(key, new SerializationContext(MessageComponentType.Key, topic)), Value = stringSerDes.Serialize(value, new SerializationContext(MessageComponentType.Key, topic)) }); }
public void DeserializeData() { string s = "test"; var serdes = new StringSerDes(); var r = serdes.Deserialize(serdes.Serialize(s, new Confluent.Kafka.SerializationContext()), new Confluent.Kafka.SerializationContext()); Assert.IsNotNull(r); Assert.AreEqual(s, r); }
public void TaskManagerAssignedUnknownPartitions() { var config = new StreamConfig <StringSerDes, StringSerDes>(); config.ApplicationId = "test-app"; var serdes = new StringSerDes(); var builder = new StreamBuilder(); builder.Stream <string, string>("topic") .Map((k, v) => KeyValuePair.Create(k.ToUpper(), v.ToUpper())) .To("topic2"); var topology = builder.Build(); var supplier = new SyncKafkaSupplier(); var producer = supplier.GetProducer(config.ToProducerConfig()); var consumer = supplier.GetConsumer(config.ToConsumerConfig(), null); var restoreConsumer = supplier.GetRestoreConsumer(config.ToConsumerConfig()); var storeChangelogReader = new StoreChangelogReader(config, restoreConsumer, "thread-0", new StreamMetricsRegistry()); var taskCreator = new TaskCreator(topology.Builder, config, "thread-0", supplier, producer, storeChangelogReader, new StreamMetricsRegistry()); var taskManager = new TaskManager(topology.Builder, taskCreator, supplier.GetAdmin(config.ToAdminConfig("admin")), consumer, storeChangelogReader); taskManager.CreateTasks( new List <TopicPartition> { new TopicPartition("topic", 0), new TopicPartition("topic", 1) }); taskManager.RevokeTasks( new List <TopicPartition> { new TopicPartition("topic", 1) }); taskManager.CreateTasks( new List <TopicPartition> { new TopicPartition("topic", 0), new TopicPartition("topic", 1), new TopicPartition("topic", 2) }); taskManager.TryToCompleteRestoration(); Assert.AreEqual(3, taskManager.ActiveTasks.Count()); Assert.AreEqual(0, taskManager.RevokedTasks.Count()); taskManager.Close(); }
public void DeletKeyNotExist() { var serdes = new StringSerDes(); byte[] key = serdes.Serialize("key", new SerializationContext()); var r = store.Delete(new Bytes(key)); Assert.IsNull(r); Assert.AreEqual(0, store.ApproximateNumEntries()); }
public void PutIfAbsent() { var serdes = new StringSerDes(); byte[] key3 = serdes.Serialize("key3", new SerializationContext()), value3 = serdes.Serialize("value3", new SerializationContext()); store.PutIfAbsent(new Bytes(key3), value3); store.PutIfAbsent(new Bytes(key3), value3); Assert.AreEqual(1, store.ApproximateNumEntries()); }
public void StreamThreadCommitIntervalWorkflow() { var source = new System.Threading.CancellationTokenSource(); var config = new StreamConfig <StringSerDes, StringSerDes>(); config.ApplicationId = "test"; config.Guarantee = ProcessingGuarantee.AT_LEAST_ONCE; config.PollMs = 1; config.CommitIntervalMs = 1; var serdes = new StringSerDes(); var builder = new StreamBuilder(); builder.Stream <string, string>("topic").To("topic2"); var topo = builder.Build(); var supplier = new SyncKafkaSupplier(); var producer = supplier.GetProducer(config.ToProducerConfig()); var consumer = supplier.GetConsumer(config.ToConsumerConfig("test-consum"), null); consumer.Subscribe("topic2"); var thread = StreamThread.Create( "thread-0", "c0", topo.Builder, new StreamMetricsRegistry(), config, supplier, supplier.GetAdmin(config.ToAdminConfig("admin")), 0) as StreamThread; thread.Start(source.Token); producer.Produce("topic", new Confluent.Kafka.Message <byte[], byte[]> { Key = serdes.Serialize("key1", new SerializationContext()), Value = serdes.Serialize("coucou", new SerializationContext()) }); //WAIT STREAMTHREAD PROCESS MESSAGE System.Threading.Thread.Sleep(100); var message = consumer.Consume(100); Assert.AreEqual("key1", serdes.Deserialize(message.Message.Key, new SerializationContext())); Assert.AreEqual("coucou", serdes.Deserialize(message.Message.Value, new SerializationContext())); var offsets = thread.GetCommittedOffsets(new List <TopicPartition> { new TopicPartition("topic", 0) }, TimeSpan.FromSeconds(10)).ToList(); Assert.AreEqual(1, offsets.Count); Assert.AreEqual(1, offsets[0].Offset.Value); Assert.AreEqual(0, offsets[0].TopicPartition.Partition.Value); Assert.AreEqual("topic", offsets[0].Topic); source.Cancel(); thread.Dispose(); }
public void DeletKeyNotExist() { var serdes = new StringSerDes(); byte[] key = serdes.Serialize("key"); var store = new InMemoryKeyValueStore("store"); var r = store.Delete(new Bytes(key)); Assert.IsNull(r); Assert.AreEqual(0, store.ApproximateNumEntries()); }
public void PutIfAbsent() { var serdes = new StringSerDes(); byte[] key3 = serdes.Serialize("key3"), value3 = serdes.Serialize("value3"); var store = new InMemoryKeyValueStore("store"); store.PutIfAbsent(new Bytes(key3), value3); store.PutIfAbsent(new Bytes(key3), value3); Assert.AreEqual(1, store.ApproximateNumEntries()); }
public void SerializeData() { string s = "coucou"; byte[] b = new byte[] { 99, 111, 117, 99, 111, 117 }; var serdes = new StringSerDes(); var r = serdes.Serialize(s, new Confluent.Kafka.SerializationContext()); Assert.IsNotNull(r); Assert.Greater(r.Length, 0); Assert.AreEqual(b, r); }
public void DeleteKeyExist() { var serdes = new StringSerDes(); byte[] key = serdes.Serialize("key", new SerializationContext()), value = serdes.Serialize("value", new SerializationContext()); store.Put(new Bytes(key), value); Assert.AreEqual(1, store.ApproximateNumEntries()); var v = store.Delete(new Bytes(key)); Assert.AreEqual(0, store.ApproximateNumEntries()); Assert.AreEqual("value", serdes.Deserialize(v, new SerializationContext())); }
public void EnumeratorAlreadyDispose() { var serdes = new StringSerDes(); byte[] key = serdes.Serialize("key", new SerializationContext()), value = serdes.Serialize("value", new SerializationContext()); store.Put(new Bytes(key), value); var enumerator = store.Range(new Bytes(key), new Bytes(key)); Assert.IsTrue(enumerator.MoveNext()); enumerator.Dispose(); Assert.Throws <ObjectDisposedException>(() => enumerator.Dispose()); }
public void PutAllWithValueNull() { var serdes = new StringSerDes(); byte[] key = serdes.Serialize("key", new SerializationContext()), value = serdes.Serialize("value", new SerializationContext()); var items = new List <KeyValuePair <Bytes, byte[]> >(); items.Add(KeyValuePair.Create(new Bytes(key), value)); items.Add(KeyValuePair.Create(new Bytes(key), (byte[])null)); store.PutAll(items); Assert.AreEqual(0, store.ApproximateNumEntries()); }
public void DeserializeData() { long millie = DateTime.Now.Millisecond; string s = "coucou"; var stringSerdes = new StringSerDes(); var serdes = new ValueAndTimestampSerDes <string>(stringSerdes); var data = ValueAndTimestamp <string> .Make(s, millie); var r = serdes.Deserialize(serdes.Serialize(data)); Assert.IsNotNull(r); Assert.AreEqual(s, r.Value); Assert.AreEqual(millie, r.Timestamp); }
private Topology KStreamWithImplicitReKeyJoinTopology(ITimestampExtractor timestampExtractor) { StringSerDes stringSerdes = new StringSerDes(); var builder = new StreamBuilder(); var userRegionsTable = builder.Table <string, string>(userRegionsTopic, stringSerdes, stringSerdes, InMemory <string, string> .As("table-store"), "table", timestampExtractor); var userClicksStream = builder.Stream <string, string>(userClicksTopic, stringSerdes, stringSerdes, timestampExtractor); userClicksStream .SelectKey((k, v) => k) .Join(userRegionsTable, Join) .To(outputTopic); return(builder.Build()); }