public void TestMockSchemaRegistry() { var config = new StreamConfig(); config.ApplicationId = "app-test"; config.AutoOffsetReset = AutoOffsetReset.Earliest; config.NumStreamThreads = 1; config.SchemaRegistryUrl = "mock://test"; config.Acks = Acks.All; config.AddConsumerConfig("allow.auto.create.topics", "false"); config.MaxTaskIdleMs = 50; StreamBuilder builder = new StreamBuilder(); var ss = builder.Stream <string, Order, StringSerDes, SchemaProtobufSerDes <Order> >("test-topic") .Peek((k, v) => { Console.WriteLine($"Order # {v.OrderId }"); }); Topology t = builder.Build(); using (var driver = new TopologyTestDriver(t, config)) { var inputTopic = driver.CreateInputTopic <string, Order, StringSerDes, SchemaProtobufSerDes <Order> >("test-topic"); inputTopic.PipeInput("test", new Order { OrderId = 12, Price = 150, ProductId = 1 }); } var client = MockSchemaRegistry.GetClientForScope("test"); Assert.IsAssignableFrom <MockSchemaRegistryClient>(client); Assert.NotNull(client.GetSchemaAsync(1).GetAwaiter().GetResult()); MockSchemaRegistry.DropScope("test"); }
public async Task exec(IConfiguration config, IServiceProvider services) { Console.WriteLine("Process"); var destTopic = config["spring.cloud.stream.bindings.output.destination"]; Console.WriteLine(destTopic); using (var scope = services.CreateScope()) { this._dataService = scope.ServiceProvider .GetRequiredService <IDataService>(); bool isRunningState = false; var timeout = TimeSpan.FromSeconds(10); DateTime dt = DateTime.Now; Order[] capture = this._dataService.readData(); // Inyectamos los datos obtenidos al Stream var sConfig = new StreamConfig <StringSerDes, StringSerDes>(); sConfig.ApplicationId = config["SPRING_CLOUD_APPLICATION_GUID"]; sConfig.BootstrapServers = config["SPRING_CLOUD_STREAM_KAFKA_BINDER_BROKERS"]; sConfig.SchemaRegistryUrl = config["SchemaRegistryUrl"]; sConfig.AutoRegisterSchemas = true; sConfig.NumStreamThreads = 10; sConfig.Acks = Acks.All; sConfig.AddConsumerConfig("allow.auto.create.topics", "true"); sConfig.InnerExceptionHandler = (e) => ExceptionHandlerResponse.CONTINUE; var schemaRegistryClient = new CachedSchemaRegistryClient (new SchemaRegistryConfig { Url = sConfig.SchemaRegistryUrl }); var supplier = new SyncKafkaSupplier(new KafkaLoggerAdapter(sConfig)); var producerConfig = sConfig.ToProducerConfig(); var adminConfig = sConfig.ToAdminConfig(sConfig.ApplicationId); var admin = supplier.GetAdmin(adminConfig); // try // { // var topic = new TopicSpecification // { // Name = destTopic, // NumPartitions = 1, // ReplicationFactor = 3 // }; // var topicProduct = new TopicSpecification // { // Name = "product-external", // NumPartitions = 1, // ReplicationFactor = 3 // }; // IList<TopicSpecification> topics = new List<TopicSpecification>(); // topics.Add(topic); // topics.Add(topicProduct); // await admin.CreateTopicsAsync(topics); // } // catch (Exception topicExists) // { // Console.WriteLine("Topic alreade exists"); // Console.Write(topicExists); // } var producer = supplier.GetProducer(producerConfig); StreamBuilder builder = new StreamBuilder(); var serdes = new SchemaAvroSerDes <Order>(); var keySerdes = new Int32SerDes(); builder.Table(destTopic, keySerdes, serdes, InMemory <int, Order> .As(config["table"])); var t = builder.Build(); KafkaStream stream = new KafkaStream(t, sConfig, supplier); stream.StateChanged += (old, @new) => { if (@new.Equals(KafkaStream.State.RUNNING)) { isRunningState = true; } }; await stream.StartAsync(); while (!isRunningState) { Thread.Sleep(250); if (DateTime.Now > dt + timeout) { break; } } if (isRunningState) { // //create a well formatted Endpoint in external topic var endpProducer = new ProducerBuilder <byte[], Endpoint>(producerConfig) .SetValueSerializer(new AvroSerializer <Endpoint>(schemaRegistryClient, new AvroSerializerConfig { AutoRegisterSchemas = true }).AsSyncOverAsync()).Build(); //create a well formatted Product in external topic var productProducer = new ProducerBuilder <byte[], Product>(producerConfig) .SetValueSerializer(new AvroSerializer <Product>(schemaRegistryClient, new AvroSerializerConfig { AutoRegisterSchemas = true }).AsSyncOverAsync()).Build(); for (int k = 1; k < 10; k++) { endpProducer.Produce("api-endpoints", new Message <byte[], Endpoint> { Key = new Int32SerDes().Serialize(k, new SerializationContext()), Value = new Endpoint { endpoint_id = ("endpoint" + k), endpoint_url = ("http://endpoint" + k + "/"), http_method = "POST" } }, (d) => { if (d.Status == PersistenceStatus.Persisted) { Console.WriteLine("Endpoint Message sent !"); } }); productProducer.Produce("product-external", new Message <byte[], Product> { Key = new Int32SerDes().Serialize(1, new SerializationContext()), Value = new Product { name = "Producto de Software", price = 1234.5F, product_id = 3 } }, (d) => { if (d.Status == PersistenceStatus.Persisted) { Console.WriteLine("Product Message sent !"); } }); } Thread.Sleep(10); for (int k = 1; k < 10; k++) { producer.Produce(destTopic, new Confluent.Kafka.Message <byte[], byte[]> { Key = keySerdes.Serialize(k, new SerializationContext()), Value = serdes.Serialize(new Order { order_id = k, price = 123.5F, product_id = k }, new SerializationContext()) }, (d) => { if (d.Status == PersistenceStatus.Persisted) { Console.WriteLine("Order Message sent !"); } }); } Thread.Sleep(50); } } }
public async Task process(IConfiguration config) { // creando AvroSchemas from clases AvroSerializerSettings settings = new AvroSerializerSettings(); settings.Resolver = new AvroPublicMemberContractResolver(); var endpontSchema = AvroSerializer.Create <Endpoint> (settings).WriterSchema.ToString(); var messageDSchema = AvroSerializer.Create <MessageDestination> (settings).WriterSchema.ToString(); Console.WriteLine("Endpoint Schema: " + endpontSchema); Console.WriteLine("Message Destination Schema: " + messageDSchema); Console.WriteLine("RouterProcess"); var sConfig = new StreamConfig <StringSerDes, StringSerDes> (); sConfig.ApplicationId = config["SPRING_CLOUD_APPLICATION_GROUP"]; sConfig.BootstrapServers = config["SPRING_CLOUD_STREAM_KAFKA_BINDER_BROKERS"]; sConfig.AutoOffsetReset = AutoOffsetReset.Earliest; sConfig.SchemaRegistryUrl = config["SchemaRegistryUrl"]; sConfig.AutoRegisterSchemas = true; sConfig.NumStreamThreads = 1; sConfig.Acks = Acks.All; //sConfig.Debug = "consumer,cgrp,topic,fetch"; sConfig.AddConsumerConfig("allow.auto.create.topics", "true"); sConfig.MaxTaskIdleMs = 50; sConfig.InnerExceptionHandler = (e) => ExceptionHandlerResponse.CONTINUE; var timeout = TimeSpan.FromSeconds(10); DateTime dt = DateTime.Now; MessageDestination op = new MessageDestination(); var serializer = new SchemaAvroSerDes <OrderProduct>(); StreamBuilder builder = new StreamBuilder(); var table = builder.Table(config["endpoints"], new Int32SerDes(), new SchemaAvroSerDes <Endpoint> (), InMemory <int, Endpoint> .As(config["endpoints-table"])); builder.Stream <int, OrderProduct, Int32SerDes, SchemaAvroSerDes <OrderProduct> >(config["spring.cloud.stream.bindings.input.destination"]) .Map <int, OrderProduct>((k, v) => { return(KeyValuePair.Create(v.product_id, v)); }) .Peek((k, v) => { Console.WriteLine($"Sending message {k} to endpoint {v.product_id}"); //calcular metrica }) .Join(table, (orderProduct, endpoint) => { Console.WriteLine("OrderProduct: " + orderProduct?.order_id); Console.WriteLine("Endpoint: " + endpoint?.endpoint_id); op = new MessageDestination { messageId = orderProduct.order_id, endpoint = endpoint, payload = orderProduct }; return(op); }) .Peek((k, v) => { Console.WriteLine($"Sending message {k} to endpoint {v.endpoint.endpoint_url}"); // crear metricas if (_messageCounterList != null) { var counterMessage = Metrics .CreateCounter($"router_{v.endpoint.endpoint_id}_processed_total", $"Number of messages sent to {v.endpoint.endpoint_url}"); counterMessage.Inc(); _messageCounterList.Add(counterMessage); } }) .Print(Printed <int, MessageDestination> .ToOut()); Topology t = builder.Build(); Console.WriteLine(t.Describe()); KafkaStream stream = new KafkaStream(t, sConfig); bool isRunningState = false; stream.StateChanged += (old, @new) => { if (@new.Equals(KafkaStream.State.RUNNING)) { isRunningState = true; } }; await stream.StartAsync(); while (!isRunningState) { Thread.Sleep(250); if (DateTime.Now > dt + timeout) { break; } } if (isRunningState) { Console.WriteLine("Stream running state is " + isRunningState.ToString()); } }
public async Task process(IConfiguration config) { Console.WriteLine("Process"); var sConfig = new StreamConfig <StringSerDes, StringSerDes>(); sConfig.ApplicationId = config["SPRING_CLOUD_APPLICATION_GROUP"]; sConfig.BootstrapServers = config["SPRING_CLOUD_STREAM_KAFKA_BINDER_BROKERS"]; sConfig.AutoOffsetReset = AutoOffsetReset.Earliest; sConfig.SchemaRegistryUrl = config["SchemaRegistryUrl"]; sConfig.AutoRegisterSchemas = true; sConfig.NumStreamThreads = 1; sConfig.Acks = Acks.All; //sConfig.Debug = "consumer,cgrp,topic,fetch"; sConfig.AddConsumerConfig("allow.auto.create.topics", "true"); sConfig.MaxTaskIdleMs = 50; sConfig.InnerExceptionHandler = (e) => ExceptionHandlerResponse.CONTINUE; var timeout = TimeSpan.FromSeconds(10); DateTime dt = DateTime.Now; OrderProduct op = new OrderProduct(); var serializer = new SchemaAvroSerDes <OrderProduct>(); StreamBuilder builder = new StreamBuilder(); var table = builder.Table(config["simpleNetcoreProcessor.externaltopic"], new Int32SerDes(), new SchemaAvroSerDes <Product>(), InMemory <int, Product> .As(config["simpleNetcoreProcessor.table"])); builder.Stream <int, Order, Int32SerDes, SchemaAvroSerDes <Order> >(config["spring.cloud.stream.bindings.input.destination"]) .Join(table, (order, product) => { Console.WriteLine("Order: " + order?.order_id); Console.WriteLine("Product: " + product?.product_id); op = new OrderProduct { order_id = order.order_id, price = order.price, product_id = product.product_id, product_name = product.name, product_price = product.price }; return(op); }) .To <Int32SerDes, SchemaAvroSerDes <OrderProduct> >(config["spring.cloud.stream.bindings.output.destination"]); Topology t = builder.Build(); Console.WriteLine(t.Describe()); KafkaStream stream = new KafkaStream(t, sConfig); bool isRunningState = false; stream.StateChanged += (old, @new) => { if (@new.Equals(KafkaStream.State.RUNNING)) { isRunningState = true; } }; await stream.StartAsync(); while (!isRunningState) { Thread.Sleep(250); if (DateTime.Now > dt + timeout) { break; } } if (isRunningState) { Console.WriteLine("Stream running state is " + isRunningState.ToString()); } }