Beispiel #1
0
        public void Init()
        {
            config.ApplicationId = "test-stream-thread";
            config.StateDir      = Guid.NewGuid().ToString();
            config.Guarantee     = ProcessingGuarantee.AT_LEAST_ONCE;
            config.PollMs        = 10;

            mockKafkaSupplier = new MockKafkaSupplier(4);

            var builder = new StreamBuilder();

            builder.Stream <string, string>("topic").To("topic2");

            var topo = builder.Build();

            thread1 = StreamThread.Create(
                "thread-0", "c0",
                topo.Builder, new StreamMetricsRegistry(), config,
                mockKafkaSupplier, mockKafkaSupplier.GetAdmin(config.ToAdminConfig("admin")),
                0) as StreamThread;

            thread2 = StreamThread.Create(
                "thread-1", "c1",
                topo.Builder, new StreamMetricsRegistry(), config,
                mockKafkaSupplier, mockKafkaSupplier.GetAdmin(config.ToAdminConfig("admin")),
                1) as StreamThread;
        }
        public void Initialize()
        {
            streamMetricsRegistry
                = new StreamMetricsRegistry(Guid.NewGuid().ToString(),
                                            MetricsRecordingLevel.INFO);

            config.ApplicationId    = "test-stream-thread";
            config.StateDir         = Guid.NewGuid().ToString();
            config.Guarantee        = ProcessingGuarantee.AT_LEAST_ONCE;
            config.PollMs           = 10;
            config.MaxPollRecords   = 10;
            config.CommitIntervalMs = 10;
            config.MetricsRecording = MetricsRecordingLevel.INFO;

            mockKafkaSupplier = new MockKafkaSupplier(numberPartitions);

            var builder = new StreamBuilder();

            builder.Stream <string, string>("topic").To("topic2");

            var topo = builder.Build();

            thread = StreamThread.Create(
                threadId, "c1",
                topo.Builder, streamMetricsRegistry, config,
                mockKafkaSupplier, mockKafkaSupplier.GetAdmin(config.ToAdminConfig("admin")),
                1) as StreamThread;
        }
Beispiel #3
0
        public void CheckSetStateStartingWithDeadThread()
        {
            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId = "test";

            var builder = new StreamBuilder();

            builder.Stream <string, string>("topic").To("topic2");

            var topo = builder.Build();

            var supplier = new SyncKafkaSupplier();
            var thread   = StreamThread.Create(
                "thread-0", "c0",
                topo.Builder, new StreamMetricsRegistry(), config,
                supplier, supplier.GetAdmin(config.ToAdminConfig("admin")),
                0) as StreamThread;

            // MUST BE IN CREATED STATE
            Assert.AreEqual(ThreadState.CREATED, thread.State);
            thread.SetState(ThreadState.STARTING);
            thread.SetState(ThreadState.PENDING_SHUTDOWN);
            thread.SetState(ThreadState.DEAD);
            thread.Start(default);
Beispiel #4
0
        public void GetCurrentPartitionMetadataTests()
        {
            var source = new System.Threading.CancellationTokenSource();
            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId  = "test";
            config.Guarantee      = ProcessingGuarantee.AT_LEAST_ONCE;
            config.PollMs         = 1;
            config.FollowMetadata = true;
            var configConsumer = config.Clone();

            configConsumer.ApplicationId = "test-consumer";
            int?h = null;

            var serdes  = new StringSerDes();
            var builder = new StreamBuilder();

            builder
            .Stream <string, string>("topic")
            .MapValues((v) =>
            {
                h = StreamizMetadata.GetCurrentPartitionMetadata();
                return(v);
            })
            .To("output");

            var topo = builder.Build();

            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());
            var consumer = supplier.GetConsumer(configConsumer.ToConsumerConfig(), null);

            var thread = StreamThread.Create(
                "thread-0", "c0",
                topo.Builder, config,
                supplier, supplier.GetAdmin(config.ToAdminConfig("admin")),
                0) as StreamThread;

            thread.Start(source.Token);
            producer.Produce("topic", new Confluent.Kafka.Message <byte[], byte[]>
            {
                Key   = serdes.Serialize("key1", new SerializationContext()),
                Value = serdes.Serialize("coucou", new SerializationContext())
            });

            consumer.Subscribe("output");
            ConsumeResult <byte[], byte[]> result = null;

            do
            {
                result = consumer.Consume(100);
            } while (result == null);


            source.Cancel();
            thread.Dispose();

            Assert.NotNull(h);
            Assert.AreEqual(0, h);
        }
        public void TaskManagerCommitWithoutCommitNeeed()
        {
            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId = "test-app";

            var builder = new StreamBuilder();

            builder.Stream <string, string>("topic").To("topic2");

            var topology = builder.Build();

            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());
            var consumer = supplier.GetConsumer(config.ToConsumerConfig(), null);

            var taskCreator = new TaskCreator(topology.Builder, config, "thread-0", supplier, producer);
            var taskManager = new TaskManager(taskCreator, supplier.GetAdmin(config.ToAdminConfig("admin")), consumer);

            taskManager.CreateTasks(
                new List <TopicPartition>
            {
                new TopicPartition("topic", 0),
                new TopicPartition("topic", 1),
                new TopicPartition("topic", 2),
                new TopicPartition("topic", 3),
            });

            Assert.AreEqual(4, taskManager.ActiveTasks.Count());
            Assert.AreEqual(0, taskManager.CommitAll());
            taskManager.Close();
        }
        public void CreateAdminClient()
        {
            var supplier    = new DefaultKafkaClientSupplier(new KafkaLoggerAdapter(config));
            var adminClient = supplier.GetAdmin(config.ToAdminConfig("admin"));

            Assert.IsNotNull(adminClient);
            Assert.AreEqual("admin", adminClient.Name.Split("#")[0]);
        }
Beispiel #7
0
        public void StreamThreadCommitIntervalWorkflow()
        {
            var source = new System.Threading.CancellationTokenSource();
            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId    = "test";
            config.Guarantee        = ProcessingGuarantee.AT_LEAST_ONCE;
            config.PollMs           = 1;
            config.CommitIntervalMs = 1;

            var serdes  = new StringSerDes();
            var builder = new StreamBuilder();

            builder.Stream <string, string>("topic").To("topic2");

            var topo = builder.Build();

            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());
            var consumer = supplier.GetConsumer(config.ToConsumerConfig("test-consum"), null);

            consumer.Subscribe("topic2");
            var thread = StreamThread.Create(
                "thread-0", "c0",
                topo.Builder, new StreamMetricsRegistry(), config,
                supplier, supplier.GetAdmin(config.ToAdminConfig("admin")),
                0) as StreamThread;

            thread.Start(source.Token);
            producer.Produce("topic", new Confluent.Kafka.Message <byte[], byte[]>
            {
                Key   = serdes.Serialize("key1", new SerializationContext()),
                Value = serdes.Serialize("coucou", new SerializationContext())
            });
            //WAIT STREAMTHREAD PROCESS MESSAGE
            System.Threading.Thread.Sleep(100);
            var message = consumer.Consume(100);

            Assert.AreEqual("key1", serdes.Deserialize(message.Message.Key, new SerializationContext()));
            Assert.AreEqual("coucou", serdes.Deserialize(message.Message.Value, new SerializationContext()));

            var offsets = thread.GetCommittedOffsets(new List <TopicPartition> {
                new TopicPartition("topic", 0)
            },
                                                     TimeSpan.FromSeconds(10)).ToList();

            Assert.AreEqual(1, offsets.Count);
            Assert.AreEqual(1, offsets[0].Offset.Value);
            Assert.AreEqual(0, offsets[0].TopicPartition.Partition.Value);
            Assert.AreEqual("topic", offsets[0].Topic);

            source.Cancel();
            thread.Dispose();
        }
Beispiel #8
0
        public void TaskManagerAssignedUnknownPartitions()
        {
            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId = "test-app";
            var serdes  = new StringSerDes();
            var builder = new StreamBuilder();

            builder.Stream <string, string>("topic")
            .Map((k, v) => KeyValuePair.Create(k.ToUpper(), v.ToUpper()))
            .To("topic2");

            var topology = builder.Build();

            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());
            var consumer = supplier.GetConsumer(config.ToConsumerConfig(), null);

            var restoreConsumer = supplier.GetRestoreConsumer(config.ToConsumerConfig());

            var storeChangelogReader =
                new StoreChangelogReader(config, restoreConsumer, "thread-0", new StreamMetricsRegistry());
            var taskCreator = new TaskCreator(topology.Builder, config, "thread-0", supplier, producer,
                                              storeChangelogReader, new StreamMetricsRegistry());
            var taskManager = new TaskManager(topology.Builder, taskCreator,
                                              supplier.GetAdmin(config.ToAdminConfig("admin")), consumer, storeChangelogReader);

            taskManager.CreateTasks(
                new List <TopicPartition>
            {
                new TopicPartition("topic", 0),
                new TopicPartition("topic", 1)
            });

            taskManager.RevokeTasks(
                new List <TopicPartition>
            {
                new TopicPartition("topic", 1)
            });

            taskManager.CreateTasks(
                new List <TopicPartition>
            {
                new TopicPartition("topic", 0),
                new TopicPartition("topic", 1),
                new TopicPartition("topic", 2)
            });

            taskManager.TryToCompleteRestoration();

            Assert.AreEqual(3, taskManager.ActiveTasks.Count());
            Assert.AreEqual(0, taskManager.RevokedTasks.Count());
            taskManager.Close();
        }
Beispiel #9
0
        public void Init()
        {
            token1 = new System.Threading.CancellationTokenSource();
            token2 = new System.Threading.CancellationTokenSource();

            config.ApplicationId = "test-stream-thread";
            config.StateDir      = Path.Combine(".", Guid.NewGuid().ToString());
            config.Guarantee     = ProcessingGuarantee.AT_LEAST_ONCE;
            config.PollMs        = 10;

            mockKafkaSupplier = new MockKafkaSupplier(2, 0);

            var builder = new StreamBuilder();

            builder.Table("topic", InMemory <string, string> .As("store").WithLoggingEnabled());

            var topo = builder.Build();

            topo.Builder.RewriteTopology(config);
            topo.Builder.BuildTopology();

            thread1 = StreamThread.Create(
                "thread-0", "c0",
                topo.Builder, new StreamMetricsRegistry(), config,
                mockKafkaSupplier, mockKafkaSupplier.GetAdmin(config.ToAdminConfig("admin")),
                0) as StreamThread;

            thread2 = StreamThread.Create(
                "thread-1", "c1",
                topo.Builder, new StreamMetricsRegistry(), config,
                mockKafkaSupplier, mockKafkaSupplier.GetAdmin(config.ToAdminConfig("admin")),
                1) as StreamThread;

            var internalTopicManager =
                new DefaultTopicManager(config, mockKafkaSupplier.GetAdmin(config.ToAdminConfig("admin")));

            InternalTopicManagerUtils
            .New()
            .CreateInternalTopicsAsync(internalTopicManager, topo.Builder).GetAwaiter();
        }
Beispiel #10
0
        public void StreamAddCorrectConfig()
        {
            var stream = new StreamConfig();

            stream.ApplicationId = "unittest";
            stream.AddConfig("sasl.password", "coucou");

            var adminConfig    = stream.ToAdminConfig("admin");
            var consumerConfig = stream.ToConsumerConfig();
            var producerConfig = stream.ToProducerConfig();
            var globalConfig   = stream.ToGlobalConsumerConfig("global");

            Assert.AreEqual("coucou", adminConfig.SaslPassword);
            Assert.AreEqual("coucou", consumerConfig.SaslPassword);
            Assert.AreEqual("coucou", producerConfig.SaslPassword);
            Assert.AreEqual("coucou", globalConfig.SaslPassword);
        }
Beispiel #11
0
        public void CreateStreamThread()
        {
            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId = "test";

            var builder = new StreamBuilder();

            builder.Stream <string, string>("topic").To("topic2");

            var topo = builder.Build();

            var supplier = new SyncKafkaSupplier();
            var thread   = StreamThread.Create(
                "thread-0", "c0",
                topo.Builder, new StreamMetricsRegistry(), config,
                supplier, supplier.GetAdmin(config.ToAdminConfig("admin")),
                0) as StreamThread;

            Assert.AreEqual("thread-0", thread.Name);
        }
Beispiel #12
0
        public void CheckIncorrectStateTransition()
        {
            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId = "test";

            var builder = new StreamBuilder();

            builder.Stream <string, string>("topic").To("topic2");

            var topo = builder.Build();

            var supplier = new SyncKafkaSupplier();
            var thread   = StreamThread.Create(
                "thread-0", "c0",
                topo.Builder, new StreamMetricsRegistry(), config,
                supplier, supplier.GetAdmin(config.ToAdminConfig("admin")),
                0) as StreamThread;

            // MUST BE IN CREATED STATE
            Assert.AreEqual(ThreadState.CREATED, thread.State);
            Assert.Throws <StreamsException>(() => thread.SetState(ThreadState.DEAD));
        }
        public void CheckSetStateWithoutStateChangedHandler()
        {
            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId = "test";

            var builder = new StreamBuilder();

            builder.Stream <string, string>("topic").To("topic2");

            var topo = builder.Build();

            var supplier = new SyncKafkaSupplier();
            var thread   = StreamThread.Create(
                "thread-0", "c0",
                topo.Builder, config,
                supplier, supplier.GetAdmin(config.ToAdminConfig("admin")),
                0) as StreamThread;

            // MUST BE IN CREATED STATE
            Assert.AreEqual(ThreadState.CREATED, thread.State);
            thread.SetState(ThreadState.STARTING);
            Assert.AreEqual(ThreadState.STARTING, thread.State);
        }
Beispiel #14
0
        public void StreamCompleteConfigAllProperty()
        {
            var stream = new StreamConfig();

            stream.ApplicationId                       = "test";
            stream.Acks                                = Confluent.Kafka.Acks.All;
            stream.ApiVersionFallbackMs                = 1;
            stream.ApiVersionRequest                   = false;
            stream.ApiVersionRequestTimeoutMs          = 100;
            stream.AutoOffsetReset                     = Confluent.Kafka.AutoOffsetReset.Latest;
            stream.BatchNumMessages                    = 42;
            stream.BootstrapServers                    = "127.0.0.1:9092";
            stream.BrokerAddressFamily                 = Confluent.Kafka.BrokerAddressFamily.V4;
            stream.BrokerAddressTtl                    = 100;
            stream.BrokerVersionFallback               = "0.12.0";
            stream.CheckCrcs                           = true;
            stream.ClientId                            = "test-client";
            stream.ClientRack                          = "1";
            stream.CommitIntervalMs                    = 300;
            stream.CompressionLevel                    = 2;
            stream.CompressionType                     = Confluent.Kafka.CompressionType.Snappy;
            stream.ConsumeResultFields                 = "all";
            stream.CoordinatorQueryIntervalMs          = 300;
            stream.Debug                               = "all";
            stream.DeliveryReportFields                = "key";
            stream.EnableAutoOffsetStore               = false;
            stream.EnableBackgroundPoll                = false;
            stream.EnableDeliveryReports               = false;
            stream.EnableGaplessGuarantee              = false;
            stream.EnableIdempotence                   = true;
            stream.EnablePartitionEof                  = true;
            stream.EnableSaslOauthbearerUnsecureJwt    = true;
            stream.EnableSslCertificateVerification    = false;
            stream.FetchErrorBackoffMs                 = 10;
            stream.FetchMaxBytes                       = 10;
            stream.FetchMinBytes                       = 10;
            stream.FetchWaitMaxMs                      = 10;
            stream.GroupProtocolType                   = "?";
            stream.HeartbeatIntervalMs                 = 4000;
            stream.InternalTerminationSignal           = 1;
            stream.IsolationLevel                      = Confluent.Kafka.IsolationLevel.ReadCommitted;
            stream.LingerMs                            = 12;
            stream.LogConnectionClose                  = false;
            stream.LogQueue                            = true;
            stream.LogThreadName                       = false;
            stream.MaxInFlight                         = 12;
            stream.MaxPartitionFetchBytes              = 500;
            stream.MaxPollIntervalMs                   = 400;
            stream.MessageCopyMaxBytes                 = 40;
            stream.MessageMaxBytes                     = 500;
            stream.MessageSendMaxRetries               = 4;
            stream.MessageTimeoutMs                    = 600;
            stream.MetadataMaxAgeMs                    = 6;
            stream.MetadataRequestTimeoutMs            = 83;
            stream.PartitionAssignmentStrategy         = Confluent.Kafka.PartitionAssignmentStrategy.RoundRobin;
            stream.Partitioner                         = Confluent.Kafka.Partitioner.Murmur2Random;
            stream.PluginLibraryPaths                  = "D:";
            stream.QueueBufferingBackpressureThreshold = 10;
            stream.QueueBufferingMaxKbytes             = 400;
            stream.QueueBufferingMaxMessages           = 5;
            stream.QueuedMaxMessagesKbytes             = 800;
            stream.QueuedMinMessages                   = 1;
            stream.ReceiveMessageMaxBytes              = 1000;
            stream.ReconnectBackoffMaxMs               = 9000;
            stream.ReconnectBackoffMs                  = 8000;
            stream.RequestTimeoutMs                    = 16600;
            stream.RetryBackoffMs                      = 600;
            stream.SaslKerberosKeytab                  = "test";
            stream.SaslKerberosKinitCmd                = "test";
            stream.SaslKerberosMinTimeBeforeRelogin    = 600;
            stream.SaslKerberosPrincipal               = "Princiapl";
            stream.SaslKerberosServiceName             = "kerberos";
            stream.SaslMechanism                       = Confluent.Kafka.SaslMechanism.ScramSha512;
            stream.SaslOauthbearerConfig               = "ouath";
            stream.SaslPassword                        = "******";
            stream.SaslUsername                        = "******";
            stream.SecurityProtocol                    = Confluent.Kafka.SecurityProtocol.SaslPlaintext;
            stream.SessionTimeoutMs                    = 1000;
            stream.SocketKeepaliveEnable               = true;
            stream.SocketMaxFails                      = 2;
            stream.SocketNagleDisable                  = true;
            stream.SocketReceiveBufferBytes            = 50000;
            stream.SocketSendBufferBytes               = 50000;
            stream.SocketTimeoutMs                     = 6000;
            stream.SslCaLocation                       = "D:";
            stream.SslCertificateLocation              = "D:";
            stream.SslCertificatePem                   = "D:";
            stream.SslCipherSuites                     = "ciphers";
            stream.SslCrlLocation                      = "D:";
            stream.SslCurvesList                       = "";
            stream.SslEndpointIdentificationAlgorithm  = Confluent.Kafka.SslEndpointIdentificationAlgorithm.Https;
            stream.SslKeyLocation                      = "C:";
            stream.SslKeyPassword                      = "******";
            stream.SslKeyPem                           = "pem";
            stream.SslKeystoreLocation                 = "J:";
            stream.SslKeystorePassword                 = "******";
            stream.SslSigalgsList                      = "oepn";
            stream.StatisticsIntervalMs                = 14;
            stream.TopicBlacklist                      = "*";
            stream.TopicMetadataRefreshFastIntervalMs  = 500;
            stream.TopicMetadataRefreshIntervalMs      = 200;
            stream.TopicMetadataRefreshSparse          = false;
            stream.TransactionalId                     = "transac";
            stream.TransactionTimeout                  = TimeSpan.FromSeconds(1);
            stream.TransactionTimeoutMs                = 400;

            var producerConfig = stream.ToProducerConfig();
            var consumerConfig = stream.ToConsumerConfig();
            var globalConfig   = stream.ToGlobalConsumerConfig("global");
            var adminConfig    = stream.ToAdminConfig("admin");

            #region ProducerConfig
            Assert.AreEqual(Confluent.Kafka.Acks.All, producerConfig.Acks);
            Assert.AreEqual(1, producerConfig.ApiVersionFallbackMs);
            Assert.AreEqual(false, producerConfig.ApiVersionRequest);
            Assert.AreEqual(100, producerConfig.ApiVersionRequestTimeoutMs);
            Assert.AreEqual(42, producerConfig.BatchNumMessages);
            Assert.AreEqual("127.0.0.1:9092", producerConfig.BootstrapServers);
            Assert.AreEqual(Confluent.Kafka.BrokerAddressFamily.V4, producerConfig.BrokerAddressFamily);
            Assert.AreEqual(100, producerConfig.BrokerAddressTtl);
            Assert.AreEqual("0.12.0", producerConfig.BrokerVersionFallback);
            Assert.AreEqual("test-client", producerConfig.ClientId);
            Assert.AreEqual("1", producerConfig.ClientRack);
            Assert.AreEqual(2, producerConfig.CompressionLevel);
            Assert.AreEqual(Confluent.Kafka.CompressionType.Snappy, producerConfig.CompressionType);
            Assert.AreEqual("all", producerConfig.Debug);
            Assert.AreEqual("key", producerConfig.DeliveryReportFields);
            Assert.AreEqual(false, producerConfig.EnableBackgroundPoll);
            Assert.AreEqual(false, producerConfig.EnableDeliveryReports);
            Assert.AreEqual(false, producerConfig.EnableGaplessGuarantee);
            Assert.AreEqual(true, producerConfig.EnableIdempotence);
            Assert.AreEqual(true, producerConfig.EnableSaslOauthbearerUnsecureJwt);
            Assert.AreEqual(false, producerConfig.EnableSslCertificateVerification);
            Assert.AreEqual(1, producerConfig.InternalTerminationSignal);
            Assert.AreEqual(12, producerConfig.LingerMs);
            Assert.AreEqual(false, producerConfig.LogConnectionClose);
            Assert.AreEqual(true, producerConfig.LogQueue);
            Assert.AreEqual(false, producerConfig.LogThreadName);
            Assert.AreEqual(12, producerConfig.MaxInFlight);
            Assert.AreEqual(40, producerConfig.MessageCopyMaxBytes);
            Assert.AreEqual(500, producerConfig.MessageMaxBytes);
            Assert.AreEqual(4, producerConfig.MessageSendMaxRetries);
            Assert.AreEqual(600, producerConfig.MessageTimeoutMs);
            Assert.AreEqual(6, producerConfig.MetadataMaxAgeMs);
            Assert.AreEqual(83, producerConfig.MetadataRequestTimeoutMs);
            Assert.AreEqual(Confluent.Kafka.Partitioner.Murmur2Random, producerConfig.Partitioner);
            Assert.AreEqual("D:", producerConfig.PluginLibraryPaths);
            Assert.AreEqual(10, producerConfig.QueueBufferingBackpressureThreshold);
            Assert.AreEqual(400, producerConfig.QueueBufferingMaxKbytes);
            Assert.AreEqual(5, producerConfig.QueueBufferingMaxMessages);
            Assert.AreEqual(1000, producerConfig.ReceiveMessageMaxBytes);
            Assert.AreEqual(9000, producerConfig.ReconnectBackoffMaxMs);
            Assert.AreEqual(8000, producerConfig.ReconnectBackoffMs);
            Assert.AreEqual(16600, producerConfig.RequestTimeoutMs);
            Assert.AreEqual(600, producerConfig.RetryBackoffMs);
            Assert.AreEqual("test", producerConfig.SaslKerberosKeytab);
            Assert.AreEqual("test", producerConfig.SaslKerberosKinitCmd);
            Assert.AreEqual(600, producerConfig.SaslKerberosMinTimeBeforeRelogin);
            Assert.AreEqual("Princiapl", producerConfig.SaslKerberosPrincipal);
            Assert.AreEqual("kerberos", producerConfig.SaslKerberosServiceName);
            Assert.AreEqual(Confluent.Kafka.SaslMechanism.ScramSha512, producerConfig.SaslMechanism);
            Assert.AreEqual("ouath", producerConfig.SaslOauthbearerConfig);
            Assert.AreEqual("test", producerConfig.SaslPassword);
            Assert.AreEqual("admin", producerConfig.SaslUsername);
            Assert.AreEqual(Confluent.Kafka.SecurityProtocol.SaslPlaintext, producerConfig.SecurityProtocol);
            Assert.AreEqual(true, producerConfig.SocketKeepaliveEnable);
            Assert.AreEqual(2, producerConfig.SocketMaxFails);
            Assert.AreEqual(true, producerConfig.SocketNagleDisable);
            Assert.AreEqual(50000, producerConfig.SocketReceiveBufferBytes);
            Assert.AreEqual(50000, producerConfig.SocketSendBufferBytes);
            Assert.AreEqual(6000, producerConfig.SocketTimeoutMs);
            Assert.AreEqual("D:", producerConfig.SslCaLocation);
            Assert.AreEqual("D:", producerConfig.SslCertificateLocation);
            Assert.AreEqual("D:", producerConfig.SslCertificatePem);
            Assert.AreEqual("ciphers", producerConfig.SslCipherSuites);
            Assert.AreEqual("D:", producerConfig.SslCrlLocation);
            Assert.AreEqual("", producerConfig.SslCurvesList);
            Assert.AreEqual(Confluent.Kafka.SslEndpointIdentificationAlgorithm.Https, producerConfig.SslEndpointIdentificationAlgorithm);
            Assert.AreEqual("C:", producerConfig.SslKeyLocation);
            Assert.AreEqual("test", producerConfig.SslKeyPassword);
            Assert.AreEqual("pem", producerConfig.SslKeyPem);
            Assert.AreEqual("J:", producerConfig.SslKeystoreLocation);
            Assert.AreEqual("password", producerConfig.SslKeystorePassword);
            Assert.AreEqual("oepn", producerConfig.SslSigalgsList);
            Assert.AreEqual(14, producerConfig.StatisticsIntervalMs);
            Assert.AreEqual("*", producerConfig.TopicBlacklist);
            Assert.AreEqual(500, producerConfig.TopicMetadataRefreshFastIntervalMs);
            Assert.AreEqual(200, producerConfig.TopicMetadataRefreshIntervalMs);
            Assert.AreEqual(false, producerConfig.TopicMetadataRefreshSparse);
            Assert.AreEqual("transac", producerConfig.TransactionalId);
            Assert.AreEqual(400, producerConfig.TransactionTimeoutMs);

            #endregion

            #region ConsumerConfig
            Assert.AreEqual(Confluent.Kafka.Acks.All, consumerConfig.Acks);
            Assert.AreEqual(1, consumerConfig.ApiVersionFallbackMs);
            Assert.AreEqual(false, consumerConfig.ApiVersionRequest);
            Assert.AreEqual(100, consumerConfig.ApiVersionRequestTimeoutMs);
            Assert.AreEqual(Confluent.Kafka.AutoOffsetReset.Latest, consumerConfig.AutoOffsetReset);
            Assert.AreEqual("127.0.0.1:9092", consumerConfig.BootstrapServers);
            Assert.AreEqual(Confluent.Kafka.BrokerAddressFamily.V4, consumerConfig.BrokerAddressFamily);
            Assert.AreEqual(100, consumerConfig.BrokerAddressTtl);
            Assert.AreEqual("0.12.0", consumerConfig.BrokerVersionFallback);
            Assert.AreEqual(true, consumerConfig.CheckCrcs);
            Assert.AreEqual("test-client", consumerConfig.ClientId);
            Assert.AreEqual("1", consumerConfig.ClientRack);
            Assert.AreEqual(300, consumerConfig.CoordinatorQueryIntervalMs);
            Assert.AreEqual("all", consumerConfig.Debug);
            Assert.AreEqual(false, consumerConfig.EnableAutoOffsetStore);
            Assert.AreEqual(true, consumerConfig.EnablePartitionEof);
            Assert.AreEqual(true, consumerConfig.EnableSaslOauthbearerUnsecureJwt);
            Assert.AreEqual(false, consumerConfig.EnableSslCertificateVerification);
            Assert.AreEqual(10, consumerConfig.FetchErrorBackoffMs);
            Assert.AreEqual(10, consumerConfig.FetchMaxBytes);
            Assert.AreEqual(10, consumerConfig.FetchMinBytes);
            Assert.AreEqual(10, consumerConfig.FetchWaitMaxMs);
            Assert.AreEqual("?", consumerConfig.GroupProtocolType);
            Assert.AreEqual(4000, consumerConfig.HeartbeatIntervalMs);
            Assert.AreEqual(1, consumerConfig.InternalTerminationSignal);
            Assert.AreEqual(Confluent.Kafka.IsolationLevel.ReadCommitted, consumerConfig.IsolationLevel);
            Assert.AreEqual(false, consumerConfig.LogConnectionClose);
            Assert.AreEqual(true, consumerConfig.LogQueue);
            Assert.AreEqual(false, consumerConfig.LogThreadName);
            Assert.AreEqual(12, consumerConfig.MaxInFlight);
            Assert.AreEqual(500, consumerConfig.MaxPartitionFetchBytes);
            Assert.AreEqual(400, consumerConfig.MaxPollIntervalMs);
            Assert.AreEqual(40, consumerConfig.MessageCopyMaxBytes);
            Assert.AreEqual(500, consumerConfig.MessageMaxBytes);
            Assert.AreEqual(6, consumerConfig.MetadataMaxAgeMs);
            Assert.AreEqual(83, consumerConfig.MetadataRequestTimeoutMs);
            Assert.AreEqual(Confluent.Kafka.PartitionAssignmentStrategy.RoundRobin, consumerConfig.PartitionAssignmentStrategy);
            Assert.AreEqual("D:", consumerConfig.PluginLibraryPaths);
            Assert.AreEqual(800, consumerConfig.QueuedMaxMessagesKbytes);
            Assert.AreEqual(1, consumerConfig.QueuedMinMessages);
            Assert.AreEqual(1000, consumerConfig.ReceiveMessageMaxBytes);
            Assert.AreEqual(9000, consumerConfig.ReconnectBackoffMaxMs);
            Assert.AreEqual(8000, consumerConfig.ReconnectBackoffMs);
            Assert.AreEqual("test", consumerConfig.SaslKerberosKeytab);
            Assert.AreEqual("test", consumerConfig.SaslKerberosKinitCmd);
            Assert.AreEqual(600, consumerConfig.SaslKerberosMinTimeBeforeRelogin);
            Assert.AreEqual("Princiapl", consumerConfig.SaslKerberosPrincipal);
            Assert.AreEqual("kerberos", consumerConfig.SaslKerberosServiceName);
            Assert.AreEqual(Confluent.Kafka.SaslMechanism.ScramSha512, consumerConfig.SaslMechanism);
            Assert.AreEqual("ouath", consumerConfig.SaslOauthbearerConfig);
            Assert.AreEqual("test", consumerConfig.SaslPassword);
            Assert.AreEqual("admin", consumerConfig.SaslUsername);
            Assert.AreEqual(Confluent.Kafka.SecurityProtocol.SaslPlaintext, consumerConfig.SecurityProtocol);
            Assert.AreEqual(1000, consumerConfig.SessionTimeoutMs);
            Assert.AreEqual(true, consumerConfig.SocketKeepaliveEnable);
            Assert.AreEqual(2, consumerConfig.SocketMaxFails);
            Assert.AreEqual(true, consumerConfig.SocketNagleDisable);
            Assert.AreEqual(50000, consumerConfig.SocketReceiveBufferBytes);
            Assert.AreEqual(50000, consumerConfig.SocketSendBufferBytes);
            Assert.AreEqual(6000, consumerConfig.SocketTimeoutMs);
            Assert.AreEqual("D:", consumerConfig.SslCaLocation);
            Assert.AreEqual("D:", consumerConfig.SslCertificateLocation);
            Assert.AreEqual("D:", consumerConfig.SslCertificatePem);
            Assert.AreEqual("ciphers", consumerConfig.SslCipherSuites);
            Assert.AreEqual("D:", consumerConfig.SslCrlLocation);
            Assert.AreEqual("", consumerConfig.SslCurvesList);
            Assert.AreEqual(Confluent.Kafka.SslEndpointIdentificationAlgorithm.Https, consumerConfig.SslEndpointIdentificationAlgorithm);
            Assert.AreEqual("C:", consumerConfig.SslKeyLocation);
            Assert.AreEqual("test", consumerConfig.SslKeyPassword);
            Assert.AreEqual("pem", consumerConfig.SslKeyPem);
            Assert.AreEqual("J:", consumerConfig.SslKeystoreLocation);
            Assert.AreEqual("password", consumerConfig.SslKeystorePassword);
            Assert.AreEqual("oepn", consumerConfig.SslSigalgsList);
            Assert.AreEqual(14, consumerConfig.StatisticsIntervalMs);
            Assert.AreEqual("*", consumerConfig.TopicBlacklist);
            Assert.AreEqual(500, consumerConfig.TopicMetadataRefreshFastIntervalMs);
            Assert.AreEqual(200, consumerConfig.TopicMetadataRefreshIntervalMs);
            Assert.AreEqual(false, consumerConfig.TopicMetadataRefreshSparse);
            #endregion

            #region GlobalConfig
            Assert.AreEqual(Confluent.Kafka.Acks.All, globalConfig.Acks);
            Assert.AreEqual(1, globalConfig.ApiVersionFallbackMs);
            Assert.AreEqual(false, globalConfig.ApiVersionRequest);
            Assert.AreEqual(100, globalConfig.ApiVersionRequestTimeoutMs);
            Assert.AreEqual(Confluent.Kafka.AutoOffsetReset.Latest, globalConfig.AutoOffsetReset);
            Assert.AreEqual("127.0.0.1:9092", globalConfig.BootstrapServers);
            Assert.AreEqual(Confluent.Kafka.BrokerAddressFamily.V4, globalConfig.BrokerAddressFamily);
            Assert.AreEqual(100, globalConfig.BrokerAddressTtl);
            Assert.AreEqual("0.12.0", globalConfig.BrokerVersionFallback);
            Assert.AreEqual(true, globalConfig.CheckCrcs);
            Assert.AreEqual("global", globalConfig.ClientId);
            Assert.AreEqual("1", globalConfig.ClientRack);
            Assert.AreEqual(300, globalConfig.CoordinatorQueryIntervalMs);
            Assert.AreEqual("all", globalConfig.Debug);
            Assert.AreEqual(false, globalConfig.EnableAutoOffsetStore);
            Assert.AreEqual(true, globalConfig.EnablePartitionEof);
            Assert.AreEqual(true, globalConfig.EnableSaslOauthbearerUnsecureJwt);
            Assert.AreEqual(false, globalConfig.EnableSslCertificateVerification);
            Assert.AreEqual(10, globalConfig.FetchErrorBackoffMs);
            Assert.AreEqual(10, globalConfig.FetchMaxBytes);
            Assert.AreEqual(10, globalConfig.FetchMinBytes);
            Assert.AreEqual(10, globalConfig.FetchWaitMaxMs);
            Assert.AreEqual("?", globalConfig.GroupProtocolType);
            Assert.AreEqual(4000, globalConfig.HeartbeatIntervalMs);
            Assert.AreEqual(1, globalConfig.InternalTerminationSignal);
            Assert.AreEqual(Confluent.Kafka.IsolationLevel.ReadCommitted, globalConfig.IsolationLevel);
            Assert.AreEqual(false, globalConfig.LogConnectionClose);
            Assert.AreEqual(true, globalConfig.LogQueue);
            Assert.AreEqual(false, globalConfig.LogThreadName);
            Assert.AreEqual(12, globalConfig.MaxInFlight);
            Assert.AreEqual(500, globalConfig.MaxPartitionFetchBytes);
            Assert.AreEqual(400, globalConfig.MaxPollIntervalMs);
            Assert.AreEqual(40, globalConfig.MessageCopyMaxBytes);
            Assert.AreEqual(500, globalConfig.MessageMaxBytes);
            Assert.AreEqual(6, globalConfig.MetadataMaxAgeMs);
            Assert.AreEqual(83, globalConfig.MetadataRequestTimeoutMs);
            Assert.AreEqual(Confluent.Kafka.PartitionAssignmentStrategy.RoundRobin, globalConfig.PartitionAssignmentStrategy);
            Assert.AreEqual("D:", globalConfig.PluginLibraryPaths);
            Assert.AreEqual(800, globalConfig.QueuedMaxMessagesKbytes);
            Assert.AreEqual(1, globalConfig.QueuedMinMessages);
            Assert.AreEqual(1000, globalConfig.ReceiveMessageMaxBytes);
            Assert.AreEqual(9000, globalConfig.ReconnectBackoffMaxMs);
            Assert.AreEqual(8000, globalConfig.ReconnectBackoffMs);
            Assert.AreEqual("test", globalConfig.SaslKerberosKeytab);
            Assert.AreEqual("test", globalConfig.SaslKerberosKinitCmd);
            Assert.AreEqual(600, globalConfig.SaslKerberosMinTimeBeforeRelogin);
            Assert.AreEqual("Princiapl", globalConfig.SaslKerberosPrincipal);
            Assert.AreEqual("kerberos", globalConfig.SaslKerberosServiceName);
            Assert.AreEqual(Confluent.Kafka.SaslMechanism.ScramSha512, globalConfig.SaslMechanism);
            Assert.AreEqual("ouath", globalConfig.SaslOauthbearerConfig);
            Assert.AreEqual("test", globalConfig.SaslPassword);
            Assert.AreEqual("admin", globalConfig.SaslUsername);
            Assert.AreEqual(Confluent.Kafka.SecurityProtocol.SaslPlaintext, globalConfig.SecurityProtocol);
            Assert.AreEqual(1000, globalConfig.SessionTimeoutMs);
            Assert.AreEqual(true, globalConfig.SocketKeepaliveEnable);
            Assert.AreEqual(2, globalConfig.SocketMaxFails);
            Assert.AreEqual(true, globalConfig.SocketNagleDisable);
            Assert.AreEqual(50000, globalConfig.SocketReceiveBufferBytes);
            Assert.AreEqual(50000, globalConfig.SocketSendBufferBytes);
            Assert.AreEqual(6000, globalConfig.SocketTimeoutMs);
            Assert.AreEqual("D:", globalConfig.SslCaLocation);
            Assert.AreEqual("D:", globalConfig.SslCertificateLocation);
            Assert.AreEqual("D:", globalConfig.SslCertificatePem);
            Assert.AreEqual("ciphers", globalConfig.SslCipherSuites);
            Assert.AreEqual("D:", globalConfig.SslCrlLocation);
            Assert.AreEqual("", globalConfig.SslCurvesList);
            Assert.AreEqual(Confluent.Kafka.SslEndpointIdentificationAlgorithm.Https, globalConfig.SslEndpointIdentificationAlgorithm);
            Assert.AreEqual("C:", globalConfig.SslKeyLocation);
            Assert.AreEqual("test", globalConfig.SslKeyPassword);
            Assert.AreEqual("pem", globalConfig.SslKeyPem);
            Assert.AreEqual("J:", globalConfig.SslKeystoreLocation);
            Assert.AreEqual("password", globalConfig.SslKeystorePassword);
            Assert.AreEqual("oepn", globalConfig.SslSigalgsList);
            Assert.AreEqual(14, globalConfig.StatisticsIntervalMs);
            Assert.AreEqual("*", globalConfig.TopicBlacklist);
            Assert.AreEqual(500, globalConfig.TopicMetadataRefreshFastIntervalMs);
            Assert.AreEqual(200, globalConfig.TopicMetadataRefreshIntervalMs);
            Assert.AreEqual(false, globalConfig.TopicMetadataRefreshSparse);
            #endregion

            #region AdminConfig
            Assert.AreEqual(Confluent.Kafka.Acks.All, adminConfig.Acks);
            Assert.AreEqual(1, adminConfig.ApiVersionFallbackMs);
            Assert.AreEqual(false, adminConfig.ApiVersionRequest);
            Assert.AreEqual(100, adminConfig.ApiVersionRequestTimeoutMs);
            Assert.AreEqual("127.0.0.1:9092", adminConfig.BootstrapServers);
            Assert.AreEqual(Confluent.Kafka.BrokerAddressFamily.V4, adminConfig.BrokerAddressFamily);
            Assert.AreEqual(100, adminConfig.BrokerAddressTtl);
            Assert.AreEqual("0.12.0", adminConfig.BrokerVersionFallback);
            Assert.AreEqual("admin", adminConfig.ClientId);
            Assert.AreEqual("1", adminConfig.ClientRack);
            Assert.AreEqual("all", adminConfig.Debug);
            Assert.AreEqual(true, adminConfig.EnableSaslOauthbearerUnsecureJwt);
            Assert.AreEqual(false, adminConfig.EnableSslCertificateVerification);
            Assert.AreEqual(1, adminConfig.InternalTerminationSignal);
            Assert.AreEqual(false, adminConfig.LogConnectionClose);
            Assert.AreEqual(true, adminConfig.LogQueue);
            Assert.AreEqual(false, adminConfig.LogThreadName);
            Assert.AreEqual(12, adminConfig.MaxInFlight);
            Assert.AreEqual(40, adminConfig.MessageCopyMaxBytes);
            Assert.AreEqual(500, adminConfig.MessageMaxBytes);
            Assert.AreEqual(6, adminConfig.MetadataMaxAgeMs);
            Assert.AreEqual(83, adminConfig.MetadataRequestTimeoutMs);
            Assert.AreEqual("D:", adminConfig.PluginLibraryPaths);
            Assert.AreEqual(1000, adminConfig.ReceiveMessageMaxBytes);
            Assert.AreEqual(9000, adminConfig.ReconnectBackoffMaxMs);
            Assert.AreEqual(8000, adminConfig.ReconnectBackoffMs);
            Assert.AreEqual("test", adminConfig.SaslKerberosKeytab);
            Assert.AreEqual("test", adminConfig.SaslKerberosKinitCmd);
            Assert.AreEqual(600, adminConfig.SaslKerberosMinTimeBeforeRelogin);
            Assert.AreEqual("Princiapl", adminConfig.SaslKerberosPrincipal);
            Assert.AreEqual("kerberos", adminConfig.SaslKerberosServiceName);
            Assert.AreEqual(Confluent.Kafka.SaslMechanism.ScramSha512, adminConfig.SaslMechanism);
            Assert.AreEqual("ouath", adminConfig.SaslOauthbearerConfig);
            Assert.AreEqual("test", adminConfig.SaslPassword);
            Assert.AreEqual("admin", adminConfig.SaslUsername);
            Assert.AreEqual(Confluent.Kafka.SecurityProtocol.SaslPlaintext, adminConfig.SecurityProtocol);
            Assert.AreEqual(true, adminConfig.SocketKeepaliveEnable);
            Assert.AreEqual(2, adminConfig.SocketMaxFails);
            Assert.AreEqual(true, adminConfig.SocketNagleDisable);
            Assert.AreEqual(50000, adminConfig.SocketReceiveBufferBytes);
            Assert.AreEqual(50000, adminConfig.SocketSendBufferBytes);
            Assert.AreEqual(6000, adminConfig.SocketTimeoutMs);
            Assert.AreEqual("D:", adminConfig.SslCaLocation);
            Assert.AreEqual("D:", adminConfig.SslCertificateLocation);
            Assert.AreEqual("D:", adminConfig.SslCertificatePem);
            Assert.AreEqual("ciphers", adminConfig.SslCipherSuites);
            Assert.AreEqual("D:", adminConfig.SslCrlLocation);
            Assert.AreEqual("", adminConfig.SslCurvesList);
            Assert.AreEqual(Confluent.Kafka.SslEndpointIdentificationAlgorithm.Https, adminConfig.SslEndpointIdentificationAlgorithm);
            Assert.AreEqual("C:", adminConfig.SslKeyLocation);
            Assert.AreEqual("test", adminConfig.SslKeyPassword);
            Assert.AreEqual("pem", adminConfig.SslKeyPem);
            Assert.AreEqual("J:", adminConfig.SslKeystoreLocation);
            Assert.AreEqual("password", adminConfig.SslKeystorePassword);
            Assert.AreEqual("oepn", adminConfig.SslSigalgsList);
            Assert.AreEqual(14, adminConfig.StatisticsIntervalMs);
            Assert.AreEqual("*", adminConfig.TopicBlacklist);
            Assert.AreEqual(500, adminConfig.TopicMetadataRefreshFastIntervalMs);
            Assert.AreEqual(200, adminConfig.TopicMetadataRefreshIntervalMs);
            Assert.AreEqual(false, adminConfig.TopicMetadataRefreshSparse);
            #endregion
        }
        public void TaskManagerCommit()
        {
            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId = "test-app";
            var serdes  = new StringSerDes();
            var builder = new StreamBuilder();

            builder.Stream <string, string>("topic")
            .Map((k, v) => KeyValuePair.Create(k.ToUpper(), v.ToUpper()))
            .To("topic2");

            var topology = builder.Build();

            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());
            var consumer = supplier.GetConsumer(config.ToConsumerConfig(), null);

            var taskCreator = new TaskCreator(topology.Builder, config, "thread-0", supplier, producer);
            var taskManager = new TaskManager(topology.Builder, taskCreator, supplier.GetAdmin(config.ToAdminConfig("admin")), consumer);

            taskManager.CreateTasks(
                new List <TopicPartition>
            {
                new TopicPartition("topic", 0),
                new TopicPartition("topic", 1),
                new TopicPartition("topic", 2),
                new TopicPartition("topic", 3),
            });

            Assert.AreEqual(4, taskManager.ActiveTasks.Count());

            var part = new TopicPartition("topic", 0);
            var task = taskManager.ActiveTaskFor(part);
            List <ConsumeResult <byte[], byte[]> > messages = new List <ConsumeResult <byte[], byte[]> >();
            int offset = 0;

            for (int i = 0; i < 5; ++i)
            {
                messages.Add(
                    new ConsumeResult <byte[], byte[]>
                {
                    Message = new Message <byte[], byte[]>
                    {
                        Key   = serdes.Serialize($"key{i + 1}", new SerializationContext()),
                        Value = serdes.Serialize($"value{i + 1}", new SerializationContext())
                    },
                    TopicPartitionOffset = new TopicPartitionOffset(part, offset++)
                });
            }

            task.AddRecords(messages);

            Assert.IsTrue(task.CanProcess(DateTime.Now.GetMilliseconds()));

            while (task.CanProcess(DateTime.Now.GetMilliseconds()))
            {
                Assert.IsTrue(task.Process());
            }

            // ONLY ONE TASK HAVE BEEN RECORDS
            Assert.AreEqual(1, taskManager.CommitAll());

            // CHECK IN TOPIC topic2
            consumer.Subscribe("topic2");
            List <ConsumeResult <byte[], byte[]> > results = new List <ConsumeResult <byte[], byte[]> >();
            ConsumeResult <byte[], byte[]>         result  = null;

            do
            {
                result = consumer.Consume(100);

                if (result != null)
                {
                    results.Add(result);
                    consumer.Commit(result);
                }
            } while (result != null);

            Assert.AreEqual(5, results.Count);
            for (int i = 0; i < 5; ++i)
            {
                Assert.AreEqual($"KEY{i + 1}", serdes.Deserialize(results[i].Message.Key, new SerializationContext()));
                Assert.AreEqual($"VALUE{i+1}", serdes.Deserialize(results[i].Message.Value, new SerializationContext()));
            }

            // NO RECORD IN THIS TASKS
            part = new TopicPartition("topic", 2);
            task = taskManager.ActiveTaskFor(part);
            Assert.IsFalse(task.CanProcess(DateTime.Now.GetMilliseconds()));
            Assert.IsFalse(task.Process());

            taskManager.Close();
        }
Beispiel #16
0
        public async Task exec(IConfiguration config, IServiceProvider services)
        {
            Console.WriteLine("Process");

            var destTopic = config["spring.cloud.stream.bindings.output.destination"];

            Console.WriteLine(destTopic);



            using (var scope = services.CreateScope())
            {
                this._dataService = scope.ServiceProvider
                                    .GetRequiredService <IDataService>();

                bool isRunningState = false;

                var      timeout = TimeSpan.FromSeconds(10);
                DateTime dt      = DateTime.Now;

                Order[] capture = this._dataService.readData();

                // Inyectamos los datos obtenidos al Stream


                var sConfig = new StreamConfig <StringSerDes, StringSerDes>();
                sConfig.ApplicationId       = config["SPRING_CLOUD_APPLICATION_GUID"];
                sConfig.BootstrapServers    = config["SPRING_CLOUD_STREAM_KAFKA_BINDER_BROKERS"];
                sConfig.SchemaRegistryUrl   = config["SchemaRegistryUrl"];
                sConfig.AutoRegisterSchemas = true;
                sConfig.NumStreamThreads    = 10;
                sConfig.Acks = Acks.All;
                sConfig.AddConsumerConfig("allow.auto.create.topics", "true");
                sConfig.InnerExceptionHandler = (e) => ExceptionHandlerResponse.CONTINUE;

                var schemaRegistryClient = new CachedSchemaRegistryClient
                                               (new SchemaRegistryConfig
                {
                    Url = sConfig.SchemaRegistryUrl
                });

                var supplier = new SyncKafkaSupplier(new KafkaLoggerAdapter(sConfig));

                var producerConfig = sConfig.ToProducerConfig();
                var adminConfig    = sConfig.ToAdminConfig(sConfig.ApplicationId);

                var admin = supplier.GetAdmin(adminConfig);

                // try
                // {

                //     var topic = new TopicSpecification
                //     {
                //         Name = destTopic,
                //         NumPartitions = 1,
                //         ReplicationFactor = 3
                //     };
                //     var topicProduct = new TopicSpecification
                //     {
                //         Name = "product-external",
                //         NumPartitions = 1,
                //         ReplicationFactor = 3
                //     };


                //     IList<TopicSpecification> topics = new List<TopicSpecification>();

                //     topics.Add(topic);
                //     topics.Add(topicProduct);

                //     await admin.CreateTopicsAsync(topics);
                // }
                // catch (Exception topicExists)
                // {
                //     Console.WriteLine("Topic alreade exists");
                //     Console.Write(topicExists);
                // }

                var producer = supplier.GetProducer(producerConfig);

                StreamBuilder builder = new StreamBuilder();

                var serdes    = new SchemaAvroSerDes <Order>();
                var keySerdes = new Int32SerDes();

                builder.Table(destTopic, keySerdes, serdes, InMemory <int, Order> .As(config["table"]));

                var         t      = builder.Build();
                KafkaStream stream = new KafkaStream(t, sConfig, supplier);

                stream.StateChanged += (old, @new) =>
                {
                    if (@new.Equals(KafkaStream.State.RUNNING))
                    {
                        isRunningState = true;
                    }
                };

                await stream.StartAsync();

                while (!isRunningState)
                {
                    Thread.Sleep(250);
                    if (DateTime.Now > dt + timeout)
                    {
                        break;
                    }
                }

                if (isRunningState)
                {
                    //   //create a well formatted Endpoint in external topic
                    var endpProducer = new ProducerBuilder <byte[], Endpoint>(producerConfig)
                                       .SetValueSerializer(new AvroSerializer <Endpoint>(schemaRegistryClient, new AvroSerializerConfig {
                        AutoRegisterSchemas = true
                    }).AsSyncOverAsync()).Build();

                    //create a well formatted Product in external topic
                    var productProducer = new ProducerBuilder <byte[], Product>(producerConfig)
                                          .SetValueSerializer(new AvroSerializer <Product>(schemaRegistryClient, new AvroSerializerConfig {
                        AutoRegisterSchemas = true
                    }).AsSyncOverAsync()).Build();

                    for (int k = 1; k < 10; k++)
                    {
                        endpProducer.Produce("api-endpoints",
                                             new Message <byte[], Endpoint>
                        {
                            Key   = new Int32SerDes().Serialize(k, new SerializationContext()),
                            Value = new Endpoint
                            {
                                endpoint_id  = ("endpoint" + k),
                                endpoint_url = ("http://endpoint" + k + "/"),
                                http_method  = "POST"
                            }
                        }, (d) =>
                        {
                            if (d.Status == PersistenceStatus.Persisted)
                            {
                                Console.WriteLine("Endpoint Message sent !");
                            }
                        });

                        productProducer.Produce("product-external",
                                                new Message <byte[], Product>
                        {
                            Key   = new Int32SerDes().Serialize(1, new SerializationContext()),
                            Value = new Product
                            {
                                name       = "Producto de Software",
                                price      = 1234.5F,
                                product_id = 3
                            }
                        }, (d) =>
                        {
                            if (d.Status == PersistenceStatus.Persisted)
                            {
                                Console.WriteLine("Product Message sent !");
                            }
                        });
                    }

                    Thread.Sleep(10);

                    for (int k = 1; k < 10; k++)
                    {
                        producer.Produce(destTopic,
                                         new Confluent.Kafka.Message <byte[], byte[]>
                        {
                            Key   = keySerdes.Serialize(k, new SerializationContext()),
                            Value = serdes.Serialize(new Order
                            {
                                order_id   = k,
                                price      = 123.5F,
                                product_id = k
                            }, new SerializationContext())
                        }, (d) =>
                        {
                            if (d.Status == PersistenceStatus.Persisted)
                            {
                                Console.WriteLine("Order Message sent !");
                            }
                        });
                    }


                    Thread.Sleep(50);
                }
            }
        }
        public void StandardWorkflowTaskManager()
        {
            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId = "test-app";

            var builder = new StreamBuilder();

            builder.Stream <string, string>("topic").To("topic2");

            var topology = builder.Build();

            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());
            var consumer = supplier.GetConsumer(config.ToConsumerConfig(), null);

            var taskCreator = new TaskCreator(topology.Builder, config, "thread-0", supplier, producer);
            var taskManager = new TaskManager(topology.Builder, taskCreator, supplier.GetAdmin(config.ToAdminConfig("admin")), consumer);

            taskManager.CreateTasks(
                new List <TopicPartition>
            {
                new TopicPartition("topic", 0),
                new TopicPartition("topic", 1),
                new TopicPartition("topic", 2),
                new TopicPartition("topic", 3),
            });

            Assert.AreEqual(4, taskManager.ActiveTasks.Count());
            for (int i = 0; i < 4; ++i)
            {
                var task = taskManager.ActiveTaskFor(new TopicPartition("topic", i));
                Assert.IsNotNull(task);
                Assert.AreEqual("test-app", task.ApplicationId);
                Assert.IsFalse(task.CanProcess(DateTime.Now.GetMilliseconds()));
                Assert.IsFalse(task.CommitNeeded);
                Assert.IsFalse(task.HasStateStores);
            }

            // Revoked 2 partitions
            taskManager.RevokeTasks(new List <TopicPartition> {
                new TopicPartition("topic", 2),
                new TopicPartition("topic", 3),
            });
            Assert.AreEqual(2, taskManager.ActiveTasks.Count());
            Assert.AreEqual(2, taskManager.RevokedTasks.Count());
            for (int i = 0; i < 2; ++i)
            {
                var task = taskManager.ActiveTaskFor(new TopicPartition("topic", i));
                Assert.IsNotNull(task);
                Assert.AreEqual("test-app", task.ApplicationId);
                Assert.IsFalse(task.CanProcess(DateTime.Now.GetMilliseconds()));
                Assert.IsFalse(task.CommitNeeded);
                Assert.IsFalse(task.HasStateStores);
            }

            var taskFailed = taskManager.ActiveTaskFor(new TopicPartition("topic", 2));

            Assert.IsNull(taskFailed);

            taskManager.Close();
            Assert.AreEqual(0, taskManager.ActiveTasks.Count());
            Assert.AreEqual(0, taskManager.RevokedTasks.Count());
        }
        //[Test]
        // TODO : fix that
        public void WorkflowCompleteBufferedRecordsTest()
        {
            int maxBuffered = 10;
            var token       = new System.Threading.CancellationTokenSource();
            var serdes      = new StringSerDes();
            var config      = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId = "test-group";
            config.MaxTaskIdleMs = (long)TimeSpan.FromSeconds(100).TotalMilliseconds;
            config.BufferedRecordsPerPartition = maxBuffered;
            config.PollMs = 10;

            var builder = new StreamBuilder();

            var stream1 = builder.Stream <string, string>("topic1");
            var stream2 = builder.Stream <string, string>("topic2");

            stream1
            .Join(stream2, (v1, v2) => $"{v1}-{v2}", JoinWindowOptions.Of(TimeSpan.FromSeconds(10)))
            .To("output");

            var topo = builder.Build();

            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());
            var consumer = supplier.GetConsumer(config.ToConsumerConfig("test-consum"), null);

            consumer.Subscribe("output");
            var thread = StreamThread.Create(
                "thread-0", "c0",
                topo.Builder, config,
                supplier, supplier.GetAdmin(config.ToAdminConfig("admin")),
                0) as StreamThread;

            thread.Start(token.Token);

            for (int i = 0; i < maxBuffered + 1; ++i)
            {
                producer.Produce("topic1", new Message <byte[], byte[]>
                {
                    Key   = serdes.Serialize("key", new SerializationContext()),
                    Value = serdes.Serialize($"coucou{i}", new SerializationContext())
                });
            }
            // CONSUME PAUSE AFTER maxBuffered + 1 messages
            System.Threading.Thread.Sleep(50);

            // Add one message more with consumer in stream thread in pause
            producer.Produce("topic1", new Message <byte[], byte[]>
            {
                Key   = serdes.Serialize("key", new SerializationContext()),
                Value = serdes.Serialize($"coucou{maxBuffered+1}", new SerializationContext())
            });

            Assert.AreEqual(1, thread.ActiveTasks.Count());
            var task = thread.ActiveTasks.ToArray()[0];

            Assert.IsNotNull(task.Grouper);
            Assert.IsFalse(task.Grouper.AllPartitionsBuffered);
            Assert.AreEqual(maxBuffered + 1, task.Grouper.NumBuffered());
            Assert.AreEqual(maxBuffered + 1, task.Grouper.NumBuffered(new TopicPartition("topic1", 0)));
            Assert.AreEqual(0, task.Grouper.NumBuffered(new TopicPartition("topic2", 0)));

            producer.Produce("topic2", new Message <byte[], byte[]>
            {
                Key   = serdes.Serialize("key", new SerializationContext()),
                Value = serdes.Serialize($"test", new SerializationContext())
            });

            List <ConsumeResult <byte[], byte[]> > records = new List <ConsumeResult <byte[], byte[]> >();

            do
            {
                records.AddRange(consumer.ConsumeRecords(TimeSpan.FromMilliseconds(100)).ToList());
            } while (records.Count() <= 12);

            Assert.AreEqual(maxBuffered + 2, records.Count());
            for (int i = 0; i < maxBuffered + 2; ++i)
            {
                var message = records.ToArray()[i];
                Assert.AreEqual("key", serdes.Deserialize(message.Message.Key, new SerializationContext()));
                Assert.IsTrue(serdes.Deserialize(message.Message.Value, new SerializationContext()).Contains($"coucou{i}-"));
            }

            token.Cancel();
            thread.Dispose();
        }
        public void StreamThreadNormalWorkflowWithRebalancing()
        {
            List <ThreadState> allStates = new List <ThreadState>();
            var expectedStates           = new List <ThreadState>
            {
                ThreadState.CREATED,
                ThreadState.STARTING,
                ThreadState.PARTITIONS_ASSIGNED,
                ThreadState.RUNNING,
                ThreadState.PARTITIONS_REVOKED,
                ThreadState.PARTITIONS_ASSIGNED,
                ThreadState.RUNNING,
                ThreadState.PENDING_SHUTDOWN,
                ThreadState.DEAD
            };

            var source = new System.Threading.CancellationTokenSource();
            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId = "test";
            config.Guarantee     = ProcessingGuarantee.AT_LEAST_ONCE;
            config.PollMs        = 1;

            var consumeConfig = config.Clone();

            consumeConfig.ApplicationId = "consume-test";

            var serdes  = new StringSerDes();
            var builder = new StreamBuilder();

            builder.Stream <string, string>("topic").To("topic2");

            var topo = builder.Build();

            var supplier = new MockKafkaSupplier(4);
            var producer = supplier.GetProducer(consumeConfig.ToProducerConfig());
            var consumer = supplier.GetConsumer(consumeConfig.ToConsumerConfig("test-consum"), null);

            consumer.Subscribe("topic2");

            var thread = StreamThread.Create(
                "thread-0", "c0",
                topo.Builder, config,
                supplier, supplier.GetAdmin(config.ToAdminConfig("admin")),
                0) as StreamThread;

            allStates.Add(thread.State);
            thread.StateChanged += (t, o, n) =>
            {
                Assert.IsInstanceOf <ThreadState>(n);
                allStates.Add(n as ThreadState);
            };

            thread.Start(source.Token);
            // WAIT PARTITONS ASSIGNED
            System.Threading.Thread.Sleep(50);

            var thread2 = StreamThread.Create(
                "thread-1", "c1",
                topo.Builder, config,
                supplier, supplier.GetAdmin(config.ToAdminConfig("admin")),
                1) as StreamThread;

            thread2.Start(source.Token);
            // WAIT PARTITONS REBALANCING
            System.Threading.Thread.Sleep(50);

            producer.Produce("topic", new Message <byte[], byte[]>
            {
                Key   = serdes.Serialize("key1", new SerializationContext()),
                Value = serdes.Serialize("coucou", new SerializationContext())
            });
            //WAIT STREAMTHREAD PROCESS MESSAGE
            System.Threading.Thread.Sleep(100);
            var message = consumer.Consume(100);

            // 2 CONSUMER FOR THE SAME GROUP ID => TOPIC WITH 4 PARTITIONS
            Assert.AreEqual(2, thread.ActiveTasks.Count());
            Assert.AreEqual(2, thread2.ActiveTasks.Count());

            source.Cancel();
            thread.Dispose();
            thread2.Dispose();

            Assert.AreEqual("key1", serdes.Deserialize(message.Message.Key, new SerializationContext()));
            Assert.AreEqual("coucou", serdes.Deserialize(message.Message.Value, new SerializationContext()));
            Assert.AreEqual(expectedStates, allStates);
            // Destroy in memory cluster
            supplier.Destroy();
        }
Beispiel #20
0
        public void StreamThreadNormalWorkflow()
        {
            bool metricsReporterCalled   = false;
            List <ThreadState> allStates = new List <ThreadState>();
            var expectedStates           = new List <ThreadState>
            {
                ThreadState.CREATED,
                ThreadState.STARTING,
                ThreadState.PARTITIONS_ASSIGNED,
                ThreadState.RUNNING,
                ThreadState.PENDING_SHUTDOWN,
                ThreadState.DEAD
            };

            var source = new System.Threading.CancellationTokenSource();
            var config = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId   = "test";
            config.Guarantee       = ProcessingGuarantee.AT_LEAST_ONCE;
            config.PollMs          = 1;
            config.MetricsReporter = (sensor) => { metricsReporterCalled = true; };
            config.AddOrUpdate(StreamConfig.metricsIntervalMsCst, 10);

            var serdes  = new StringSerDes();
            var builder = new StreamBuilder();

            builder.Stream <string, string>("topic").To("topic2");

            var topo = builder.Build();

            var supplier = new SyncKafkaSupplier();
            var producer = supplier.GetProducer(config.ToProducerConfig());
            var consumer = supplier.GetConsumer(config.ToConsumerConfig("test-consum"), null);

            consumer.Subscribe("topic2");
            var thread = StreamThread.Create(
                "thread-0", "c0",
                topo.Builder, new StreamMetricsRegistry(), config,
                supplier, supplier.GetAdmin(config.ToAdminConfig("admin")),
                0) as StreamThread;

            allStates.Add(thread.State);
            thread.StateChanged += (t, o, n) =>
            {
                Assert.IsInstanceOf <ThreadState>(n);
                allStates.Add(n as ThreadState);
            };

            thread.Start(source.Token);
            producer.Produce("topic", new Confluent.Kafka.Message <byte[], byte[]>
            {
                Key   = serdes.Serialize("key1", new SerializationContext()),
                Value = serdes.Serialize("coucou", new SerializationContext())
            });
            //WAIT STREAMTHREAD PROCESS MESSAGE
            System.Threading.Thread.Sleep(100);
            var message = consumer.Consume(100);

            source.Cancel();
            thread.Dispose();

            Assert.AreEqual("key1", serdes.Deserialize(message.Message.Key, new SerializationContext()));
            Assert.AreEqual("coucou", serdes.Deserialize(message.Message.Value, new SerializationContext()));
            Assert.AreEqual(expectedStates, allStates);
            Assert.IsTrue(metricsReporterCalled);
        }
Beispiel #21
0
        private void TaskManagerRestorationChangelog(bool persistenStateStore = false)
        {
            var stateDir = Path.Combine(".", Guid.NewGuid().ToString());
            var config   = new StreamConfig <StringSerDes, StringSerDes>();

            config.ApplicationId = "test-restoration-changelog-app";
            config.StateDir      = stateDir;

            var builder = new StreamBuilder();

            builder.Table("topic",
                          persistenStateStore
                    ? RocksDb <string, string> .As("store").WithLoggingEnabled(null)
                    : InMemory <string, string> .As("store").WithLoggingEnabled(null));

            var serdes = new StringSerDes();

            var topology = builder.Build();

            topology.Builder.RewriteTopology(config);

            var supplier        = new SyncKafkaSupplier();
            var producer        = supplier.GetProducer(config.ToProducerConfig());
            var consumer        = supplier.GetConsumer(config.ToConsumerConfig(), null);
            var restoreConsumer = supplier.GetRestoreConsumer(config.ToConsumerConfig());

            var storeChangelogReader =
                new StoreChangelogReader(config, restoreConsumer, "thread-0", new StreamMetricsRegistry());
            var taskCreator = new TaskCreator(topology.Builder, config, "thread-0", supplier, producer,
                                              storeChangelogReader, new StreamMetricsRegistry());
            var taskManager = new TaskManager(topology.Builder, taskCreator,
                                              supplier.GetAdmin(config.ToAdminConfig("admin")), consumer, storeChangelogReader);

            var part = new TopicPartition("topic", 0);

            taskManager.CreateTasks(
                new List <TopicPartition>
            {
                part
            });

            var task = taskManager.ActiveTaskFor(part);

            IDictionary <TaskId, ITask> tasks = new Dictionary <TaskId, ITask>();

            tasks.Add(task.Id, task);

            taskManager.TryToCompleteRestoration();
            storeChangelogReader.Restore();
            Assert.IsTrue(taskManager.TryToCompleteRestoration());


            List <ConsumeResult <byte[], byte[]> > messages = new List <ConsumeResult <byte[], byte[]> >();
            int offset = 0;

            for (int i = 0; i < 5; ++i)
            {
                messages.Add(
                    new ConsumeResult <byte[], byte[]>
                {
                    Message = new Message <byte[], byte[]>
                    {
                        Key   = serdes.Serialize($"key{i + 1}", new SerializationContext()),
                        Value = serdes.Serialize($"value{i + 1}", new SerializationContext())
                    },
                    TopicPartitionOffset = new TopicPartitionOffset(part, offset++)
                });
            }

            task.AddRecords(messages);

            // Process messages
            while (task.CanProcess(DateTime.Now.GetMilliseconds()))
            {
                Assert.IsTrue(task.Process());
            }

            taskManager.CommitAll();

            // Simulate Close + new open
            taskManager.Close();

            restoreConsumer.Resume(new TopicPartition("test-restoration-changelog-app-store-changelog", 0).ToSingle());

            taskManager.CreateTasks(
                new List <TopicPartition>
            {
                part
            });

            task  = taskManager.ActiveTaskFor(part);
            tasks = new Dictionary <TaskId, ITask>();
            tasks.Add(task.Id, task);

            Assert.IsFalse(taskManager.TryToCompleteRestoration());
            storeChangelogReader.Restore();
            Assert.IsTrue(taskManager.TryToCompleteRestoration());

            var store = task.GetStore("store");
            var items = (store as ITimestampedKeyValueStore <string, string>).All().ToList();

            Assert.AreEqual(5, items.Count);

            taskManager.Close();

            if (persistenStateStore)
            {
                Directory.Delete(stateDir, true);
            }
        }