private static async Task SendMessageToResultTopicAsync(LeaveApplicationReceived leaveRequest, bool isApproved, int partitionId ) { using var schemaRegistry = new CachedSchemaRegistryClient(_schemaRegistryConfig); using var producer = new ProducerBuilder <string, LeaveApplicationProcessed>(_producerConfig) .SetKeySerializer(new AvroSerializer <string>(schemaRegistry)) .SetValueSerializer(new AvroSerializer <LeaveApplicationProcessed>(schemaRegistry)) .Build(); { var leaveApplicationResult = new LeaveApplicationProcessed { EmpDepartment = leaveRequest.EmpDepartment, EmpEmail = leaveRequest.EmpEmail, LeaveDurationInHours = leaveRequest.LeaveDurationInHours, LeaveStartDateTicks = leaveRequest.LeaveStartDateTicks, ProcessedBy = $"Manager #{partitionId}", Result = isApproved ? "Approved: Your leave application has been approved." : "Declined: Your leave application has been declined." }; var result = await producer.ProduceAsync(ApplicationConstants.LeaveApplicationResultsTopicName, new Message <string, LeaveApplicationProcessed> { Key = $"{leaveRequest.EmpEmail}-{DateTime.UtcNow.Ticks}", Value = leaveApplicationResult }); Console.WriteLine( $"\nMsg: Leave request processed and queued at offset {result.Offset.Value} in the Topic {result.Topic}"); } }
async static Task ProduceSpecific(string bootstrapServers, string schemaRegistryUrl) { using (var schemaRegistry = new CachedSchemaRegistryClient(new SchemaRegistryConfig { SchemaRegistryUrl = schemaRegistryUrl })) using (var producer = new ProducerBuilder <Null, MessageTypes.LogMessage>(new ProducerConfig { BootstrapServers = bootstrapServers }) .SetValueSerializer(new AvroSerializer <MessageTypes.LogMessage>(schemaRegistry)) .Build()) { await producer.ProduceAsync("log-messages", new Message <Null, MessageTypes.LogMessage> { Value = new MessageTypes.LogMessage { IP = "192.168.0.1", Message = "a test message 2", Severity = MessageTypes.LogLevel.Info, Tags = new Dictionary <string, string> { { "location", "CA" } } } }); producer.Flush(TimeSpan.FromSeconds(30)); } }
public static void GetAllSubjects(Config config) { var topicName = Guid.NewGuid().ToString(); var testSchema1 = "{\"type\":\"record\",\"name\":\"User\",\"namespace\":\"Confluent.Kafka.Examples.AvroSpecific" + "\",\"fields\":[{\"name\":\"name\",\"type\":\"string\"},{\"name\":\"favorite_number\",\"type\":[\"i" + "nt\",\"null\"]},{\"name\":\"favorite_color\",\"type\":[\"string\",\"null\"]}]}"; var sr = new CachedSchemaRegistryClient(new SchemaRegistryConfig { Url = config.Server }); var subjectsBefore = sr.GetAllSubjectsAsync().Result; var subject = SubjectNameStrategy.Topic.ConstructKeySubjectName(topicName, null); var id = sr.RegisterSchemaAsync(subject, testSchema1).Result; var subjectsAfter = sr.GetAllSubjectsAsync().Result; Assert.Equal(1, subjectsAfter.Count - subjectsBefore.Count); sr.RegisterSchemaAsync(subject, testSchema1).Wait(); var subjectsAfter2 = sr.GetAllSubjectsAsync().Result; Assert.Equal(subjectsAfter.Count, subjectsAfter2.Count); Assert.True(subjectsAfter2.Contains(subject)); }
public static void RegisterNormalizedSchema(Config config) { var topicName = Guid.NewGuid().ToString(); var testSchema1 = "{\"type\":\"record\",\"name\":\"User\",\"namespace\":\"Confluent.Kafka.Examples.AvroSpecific" + "\",\"fields\":[{\"name\":\"name\",\"type\":{\"type\": \"string\"}},{\"name\":\"favorite_number\",\"type\":[\"i" + "nt\",\"null\"]},{\"name\":\"favorite_color\",\"type\":[\"string\",\"null\"]}]}"; var normalized = "{\"type\":\"record\",\"name\":\"User\",\"namespace\":\"Confluent.Kafka.Examples.AvroSpecific" + "\",\"fields\":[{\"name\":\"name\",\"type\":\"string\"},{\"name\":\"favorite_number\",\"type\":[\"i" + "nt\",\"null\"]},{\"name\":\"favorite_color\",\"type\":[\"string\",\"null\"]}]}"; var sr = new CachedSchemaRegistryClient(new SchemaRegistryConfig { Url = config.Server }); var subject = SubjectNameStrategy.Topic.ConstructKeySubjectName(topicName, "Confluent.Kafka.Examples.AvroSpecific.User"); Assert.Equal(topicName + "-key", subject); var id1 = sr.RegisterSchemaAsync(subject, testSchema1, true).Result; var id2 = sr.GetSchemaIdAsync(subject, testSchema1, true).Result; var schema = sr.GetSchemaAsync(id2).Result; Assert.Equal(id1, id2); Assert.Equal(normalized, schema); }
public static IProducer <TKey, TValue> CreateProducer <TKey, TValue>( BrokerOptions brokerOptions, ILogger logger) where TKey : class, IMessage <TKey>, new() where TValue : class, IMessage <TValue>, new() { var config = new ProducerConfig { BootstrapServers = brokerOptions.Brokers, // EnableIdempotence = true, // MessageSendMaxRetries = 10000000, // SecurityProtocol = brokerOptions.SecurityProtocol, // SaslMechanism = brokerOptions.SaslMechanism, // SaslUsername = brokerOptions.SaslUsername, // SaslPassword = brokerOptions.SaslPassword, }; var schemaRegistryConfig = new SchemaRegistryConfig { Url = brokerOptions.SchemaRegistryUrl, // BasicAuthCredentialsSource = AuthCredentialsSource.UserInfo, // BasicAuthUserInfo = brokerOptions.SchemaRegistryAuth, }; var schemaRegistry = new CachedSchemaRegistryClient(schemaRegistryConfig); var producer = new ProducerBuilder <TKey, TValue>(config) .SetKeySerializer(new ProtobufSerializer <TKey>(schemaRegistry)) .SetValueSerializer(new ProtobufSerializer <TValue>(schemaRegistry)) .Build(); return(producer); }
public static void RegisterIncompatibleSchema(string server) { var topicName = Guid.NewGuid().ToString(); var testSchema1 = "{\"type\":\"record\",\"name\":\"User\",\"namespace\":\"Confluent.Kafka.Examples.AvroSpecific" + "\",\"fields\":[{\"name\":\"name\",\"type\":\"string\"},{\"name\":\"favorite_number\",\"type\":[\"i" + "nt\",\"null\"]},{\"name\":\"favorite_color\",\"type\":[\"string\",\"null\"]}]}"; var sr = new CachedSchemaRegistryClient(new Dictionary <string, object> { { "schema.registry.url", server } }); var subject = sr.ConstructKeySubjectName(topicName, "Confluent.Kafka.Examples.AvroSpecific.User"); var id = sr.RegisterSchemaAsync(subject, testSchema1).Result; var testSchema2 = // incompatible with testSchema1 "{\"type\":\"record\",\"name\":\"User\",\"namespace\":\"Confluent.Kafka.Examples.AvroSpecific" + "\",\"fields\":[{\"name\":\"name\",\"type\":\"string\"},{\"name\":\"favorite_number\",\"type\":[\"i" + "nt\",\"null\"]},{\"name\":\"favorite_shape\",\"type\":[\"string\",\"null\"]}]}"; Assert.False(sr.IsCompatibleAsync(subject, testSchema2).Result); Assert.Throws <AggregateException>(() => sr.RegisterSchemaAsync(subject, testSchema2).Result); Assert.True(sr.GetAllSubjectsAsync().Result.Contains(subject)); }
/// <inheritdoc cref="IProducer.ProduceMany"/> public void ProduceMany(IList <Event> events) { using var schemaRegistry = new CachedSchemaRegistryClient(_schemaRegistryConfig); using var producerBuilder = new ProducerBuilder <string, Event>(_producerConfig) .SetKeySerializer(new AvroSerializer <string>(schemaRegistry)) .SetValueSerializer(new AvroSerializer <Event>(schemaRegistry)) .SetErrorHandler((_, error) => _logger.LogError("Kafka encountered an error: {@Error}", error)) .Build(); foreach (var @event in events) { var message = new Message <string, Event> { Key = @event.AggregateName, Value = @event }; producerBuilder.Produce(@event.AggregateName, message, report => { if (report.Error.IsFatal) { throw new ProducerException("Fatal error producing message to Kafka: " + $"ErrorCode [{report.Error.Code}] | " + $"Reason [{report.Error.Reason}]"); } }); } // wait for up to 5 seconds for any inflight messages to be delivered. producerBuilder.Flush(TimeSpan.FromSeconds(10)); }
public static void GetSubjectVersions(Config config) { var topicName = Guid.NewGuid().ToString(); var testSchema1 = "{\"type\":\"record\",\"name\":\"User\",\"namespace\":\"Confluent.Kafka.Examples.AvroSpecific" + "\",\"fields\":[{\"name\":\"name\",\"type\":\"string\"},{\"name\":\"favorite_number\",\"type\":[\"i" + "nt\",\"null\"]},{\"name\":\"favorite_color\",\"type\":[\"string\",\"null\"]}]}"; var testSchema2 = // compatible with testSchema1 "{\"type\":\"record\",\"name\":\"User\",\"namespace\":\"Confluent.Kafka.Examples.AvroSpecific" + "\",\"fields\":[{\"name\":\"name\",\"type\":\"string\"},{\"name\":\"favorite_number\",\"type\":[\"i" + "nt\",\"null\"]},{\"name\":\"favorite_color\",\"type\":[\"string\",\"null\"]}," + "{\"name\":\"favorite_shape\",\"type\":[\"string\",\"null\"], \"default\": \"square\"}]}"; var sr = new CachedSchemaRegistryClient(new SchemaRegistryConfig { Url = config.Server }); var subject = sr.ConstructValueSubjectName(topicName); var id1 = sr.RegisterSchemaAsync(subject, testSchema1).Result; var id2 = sr.RegisterSchemaAsync(subject, testSchema2).Result; var versions = sr.GetSubjectVersionsAsync(subject).Result; Assert.Equal(versions.Count, 2); }
/// <summary> /// Initializes a new instance of the <see cref="AsyncSchemaRegistrySerializer{T}" /> /// class with a Schema Registry configuration. /// </summary> /// <param name="registryConfiguration"> /// A Schema Registry configuration. Using the <see cref="SchemaRegistryConfig" /> class is /// highly recommended. /// </param> /// <param name="registerAutomatically"> /// Whether the serializer should automatically register schemas that match the type being /// serialized. /// </param> /// <param name="schemaBuilder"> /// A schema builder instance that should be used to create schemas for .NET <see cref="Type" />s /// when registering automatically. If none is provided, the default <see cref="SchemaBuilder" /> /// will be used. /// </param> /// <param name="schemaReader"> /// A schema reader instance that should be used to convert schemas received from the /// Registry into abstract representations. If none is provided, the default /// <see cref="JsonSchemaReader" /> will be used. /// </param> /// <param name="schemaWriter"> /// A schema writer instance that should be used to convert abstract schema representations /// when registering automatically. If none is provided, the default <see cref="JsonSchemaWriter" /> /// will be used. /// </param> /// <param name="serializerBuilder"> /// A serializer builder instance that should be used to generate serialization functions /// for .NET <see cref="Type" />s. If none is provided, the default <see cref="BinarySerializerBuilder" /> /// will be used. /// </param> /// <param name="subjectNameBuilder"> /// A function that determines a subject name given the topic name and a component type /// (key or value). If none is provided, the default <c>{topic name}-{component}</c> naming /// convention will be used. /// </param> /// <param name="tombstoneBehavior"> /// How the serializer should handle tombstone records. /// </param> /// <exception cref="ArgumentNullException"> /// Thrown when <paramref name="registryConfiguration" /> is <c>null</c>. /// </exception> /// <exception cref="UnsupportedTypeException"> /// Thrown when <paramref name="tombstoneBehavior" /> is incompatible with /// <typeparamref name="T" />. /// </exception> public AsyncSchemaRegistrySerializer( IEnumerable <KeyValuePair <string, string> > registryConfiguration, AutomaticRegistrationBehavior registerAutomatically = AutomaticRegistrationBehavior.Never, Abstract.ISchemaBuilder schemaBuilder = null, IJsonSchemaReader schemaReader = null, IJsonSchemaWriter schemaWriter = null, IBinarySerializerBuilder serializerBuilder = null, Func <SerializationContext, string> subjectNameBuilder = null, TombstoneBehavior tombstoneBehavior = TombstoneBehavior.None) { if (registryConfiguration == null) { throw new ArgumentNullException(nameof(registryConfiguration)); } if (tombstoneBehavior != TombstoneBehavior.None && default(T) != null) { throw new UnsupportedTypeException(typeof(T), $"{typeof(T)} cannot represent tombstone values."); } RegisterAutomatically = registerAutomatically; RegistryClient = new CachedSchemaRegistryClient(registryConfiguration); SchemaBuilder = schemaBuilder ?? new Abstract.SchemaBuilder(); SchemaReader = schemaReader ?? new JsonSchemaReader(); SchemaWriter = schemaWriter ?? new JsonSchemaWriter(); SerializerBuilder = serializerBuilder ?? new BinarySerializerBuilder(); SubjectNameBuilder = subjectNameBuilder ?? (c => $"{c.Topic}-{(c.Component == MessageComponentType.Key ? "key" : "value")}"); TombstoneBehavior = tombstoneBehavior; cache = new Dictionary <string, Task <Func <T, byte[]> > >(); disposeRegistryClient = true; }
public opcProducerTest() { var log = new NLog.Config.LoggingConfiguration(); var logconsole = new NLog.Targets.ColoredConsoleTarget("logconsole"); // Rules for mapping loggers to targets log.AddRule(LogLevel.Debug, LogLevel.Fatal, logconsole); // Apply config NLog.LogManager.Configuration = log; kafkaProducerConf conf = new kafkaProducerConf() { MessageSendMaxRetries = 100, BatchNumMessages = 23, QueueBufferingMaxKbytes = 100, QueueBufferingMaxMessages = 32, MessageTimeoutMs = 10000, LingerMs = 200 }; // schema registry var registry = new CachedSchemaRegistryClient(new SchemaRegistryConfig() { Url = "localhost:8081", ValueSubjectNameStrategy = SubjectNameStrategy.TopicRecord }); kafka = new opcKafkaProducer(conf, registry); schemas = new opcSchemas(); }
public void init(JObject config, CancellationTokenSource cts) { // setup the logger log = LogManager.GetLogger(this.GetType().Name); kafkaConfWrapper conf = config.ToObject <kafkaConfWrapper>(); // producer config var producer_conf = conf.kafkaProducer; cancel = cts; // instance the schema registry schemaRegistry = new CachedSchemaRegistryClient(new SchemaRegistryConfig() { Url = conf.KafkaSchemaRegistryURL, ValueSubjectNameStrategy = SubjectNameStrategy.TopicRecord }); // this part is only here to make sure the user has the schema registry up, otherwise // the error is misleading: "Delivery failed: Local: Key serialization error" testSchemaRegistry(); // instace producer with Avro serializers producer = new opcKafkaProducer(conf.kafkaProducer, schemaRegistry); // instance consumer in new thread kafkaRPC = new opcKafkaRPC(conf.kafkaRPC, schemaRegistry); kafkaRPC.setManager(_serv); if (conf.kafkaRPC.enableKafkaRPC) { kafkaRPC.run(cancel.Token); } }
public static void AvroWithReferences(Config config) { var srClient = new CachedSchemaRegistryClient(new SchemaRegistryConfig { Url = config.Server }); Schema schema1 = new Schema(@"{""type"":""record"",""name"":""EventA"",""namespace"":""Kafka.Avro.Examples"",""fields"":[{""name"":""EventType"",""type"":""string""},{""name"":""EventId"",""type"":""string""},{""name"":""OccuredOn"",""type"":""long""},{""name"":""A"",""type"":""string""}]}", SchemaType.Avro); Schema schema2 = new Schema(@"{""type"":""record"",""name"":""EventB"",""namespace"":""Kafka.Avro.Examples"",""fields"":[{""name"":""EventType"",""type"":""string""},{""name"":""EventId"",""type"":""string""},{""name"":""OccuredOn"",""type"":""long""},{""name"":""B"",""type"":""long""}]}", SchemaType.Avro); var id1 = srClient.RegisterSchemaAsync("events-a", schema1).Result; var id2 = srClient.RegisterSchemaAsync("events-b", schema2).Result; var avroUnion = @"[""Kafka.Avro.Examples.EventA"",""Kafka.Avro.Examples.EventB""]"; Schema unionSchema = new Schema(avroUnion, SchemaType.Avro); SchemaReference reference = new SchemaReference( "Kafka.Avro.Examples.EventA", "events-a", 1); unionSchema.References.Add(reference); reference = new SchemaReference( "Kafka.Avro.Examples.EventB", "events-b", 1); unionSchema.References.Add(reference); var id3 = srClient.RegisterSchemaAsync("events-value", unionSchema).Result; Assert.NotEqual(0, id3); }
public async Task It_should_deserialize_the_envelope_correctly() { var schemaRegistry = new CachedSchemaRegistryClient ( new SchemaRegistryConfig { SchemaRegistryUrl = _kafkaConfig.SchemaRegistryUrl } ); //var type = AvroMultipleDeserializer.Get(schema.Name); var subjects = await schemaRegistry.GetAllSubjectsAsync(); subjects.Count.ShouldBePositive(); foreach (var subject in subjects) { var schemas = await schemaRegistry.GetLatestSchemaAsync(subject); var schemaSubject = await schemaRegistry.GetSchemaAsync(1); var schema = global::Avro.Schema.Parse(schemaSubject); } }
public static void ProduceConsumeSchemaManyMessagesProtobuf(string bootstrapServers, string schemaRegistryServers) { var producerConfig = new ProducerConfig { BootstrapServers = bootstrapServers }; var schemaRegistryConfig = new SchemaRegistryConfig { Url = schemaRegistryServers }; using (var topic = new TemporaryTopic(bootstrapServers, 1)) using (var schemaRegistry = new CachedSchemaRegistryClient(schemaRegistryConfig)) using (var producer = new ProducerBuilder <string, Msg230>(producerConfig) .SetValueSerializer(new ProtobufSerializer <Msg230>(schemaRegistry)) .Build()) { var u = new Msg230(); u.Value = 41; producer.ProduceAsync(topic.Name, new Message <string, Msg230> { Key = "test1", Value = u }).Wait(); var consumerConfig = new ConsumerConfig { BootstrapServers = bootstrapServers, GroupId = Guid.NewGuid().ToString(), AutoOffsetReset = AutoOffsetReset.Earliest }; // Test the protobuf deserializer can read this message using (var consumer = new ConsumerBuilder <string, UInt32Value>(consumerConfig) .SetValueDeserializer(new ProtobufDeserializer <UInt32Value>().AsSyncOverAsync()) .Build()) { consumer.Subscribe(topic.Name); var cr = consumer.Consume(); Assert.Equal(u.Value, cr.Message.Value.Value); } // Check the pre-data bytes are as expected. using (var consumer = new ConsumerBuilder <string, byte[]>(consumerConfig).Build()) { consumer.Subscribe(topic.Name); var cr = consumer.Consume(); // magic byte + schema id + expected array index length + at least one data byte. Assert.True(cr.Message.Value.Length >= 1 + 4 + 1 + 2 + 1); // magic byte Assert.Equal(0, cr.Message.Value[0]); // index array length Assert.Equal(1, cr.Message.Value[5]); // there are 231 messages in the schema. message 230 has index 230. varint is 2 bytes: // in binary: 11100110. // -> &7f |80 -> 11100110 = 230 Assert.Equal(230, cr.Message.Value[6]); // >>7 -> 00000001 Assert.Equal(1, cr.Message.Value[7]); } } }
public static void GetId(Config config) { var topicName = Guid.NewGuid().ToString(); var testSchema1 = "{\"type\":\"record\",\"name\":\"User\",\"namespace\":\"Confluent.Kafka.Examples.AvroSpecific" + "\",\"fields\":[{\"name\":\"name\",\"type\":\"string\"},{\"name\":\"favorite_number\",\"type\":[\"i" + "nt\",\"null\"]},{\"name\":\"favorite_color\",\"type\":[\"string\",\"null\"]}]}"; var sr = new CachedSchemaRegistryClient(new SchemaRegistryConfig { SchemaRegistryUrl = config.Server }); var subject = sr.ConstructKeySubjectName(topicName); var id = sr.RegisterSchemaAsync(subject, testSchema1).Result; var id2 = sr.GetSchemaIdAsync(subject, testSchema1).Result; Assert.Equal(id, id2); Assert.Throws <SchemaRegistryException>(() => { try { sr.GetSchemaIdAsync(subject, "{\"type\": \"string\"}").Wait(); } catch (AggregateException e) { throw e.InnerException; } }); }
// Neste exemplo, utilizamos o Confluent Schema Registry e Apache Avro para serializar as mensagens enviadas // Neste exemplo, como utilizamos awaiter, o processo é interrompido de forma a aguardar o resultado do broker. // Similar a produzir de forma síncrona // Neste exemplo, a mensagem é postada com Key e Value public async Task Produzir <TChave, TValor>(string topico, TChave chave, TValor valor) { using (var schemaRegistry = new CachedSchemaRegistryClient(_brokerHelper.SchemaRegistryConfig)) using (var producer = new ProducerBuilder <TChave, TValor>(_brokerHelper.ProducerConfig) .SetKeySerializer(new AvroSerializer <TChave>(schemaRegistry)) .SetValueSerializer(new AvroSerializer <TValor>(schemaRegistry)) .Build()) { try { var deliveryReport = await producer.ProduceAsync(topico, new Message <TChave, TValor> { Key = chave, Value = valor }); _messageWriter.Write($"Entregou um objeto '{deliveryReport.Message.Value}' com a key '{deliveryReport.Message.Key}' em '{deliveryReport.TopicPartition} e offset '{deliveryReport.Offset}'", MessageType.Output); } catch (ProduceException <string, string> e) { _messageWriter.Write($"Falha na entrega: {e.Error.Reason}", MessageType.Output); } catch (Exception ex) { _messageWriter.Write($"Falha na entrega: {ex.Message}", MessageType.Output); } } }
static async Task ProduceSpecific(string bootstrapServers, string schemaRegistryUrl) { using (var schemaRegistry = new CachedSchemaRegistryClient(new SchemaRegistryConfig { SchemaRegistryUrl = schemaRegistryUrl })) using (var producer = new ProducerBuilder <Null, BadgeEvent>(new ProducerConfig { BootstrapServers = bootstrapServers }) .SetValueSerializer(new AvroSerializer <BadgeEvent>(schemaRegistry)) .Build()) { await producer.ProduceAsync("badgeevent", new Message <Null, BadgeEvent> { Value = new BadgeEvent { id = "9", name = "Teacher", userId = "16", displayName = "dragonmantank", reputation = "7636", upVotes = 56, downVotes = 3, processedDate = DateTime.UtcNow.ToString() } }); producer.Flush(TimeSpan.FromSeconds(30)); } }
public static void IsCompatible(string server) { var topicName = Guid.NewGuid().ToString(); var testSchema1 = "{\"type\":\"record\",\"name\":\"User\",\"namespace\":\"Confluent.Kafka.Examples.AvroSpecific" + "\",\"fields\":[{\"name\":\"name\",\"type\":\"string\"},{\"name\":\"favorite_number\",\"type\":[\"i" + "nt\",\"null\"]},{\"name\":\"favorite_color\",\"type\":[\"string\",\"null\"]}]}"; var sr = new CachedSchemaRegistryClient(new Dictionary <string, object> { { "schema.registry.url", server } }); var subject = sr.ConstructKeySubjectName(topicName, "Confluent.Kafka.Examples.AvroSpecific.User"); var id = sr.RegisterSchemaAsync(subject, testSchema1).Result; var testSchema2 = // incompatible with testSchema1 "{\"type\":\"record\",\"name\":\"User\",\"namespace\":\"Confluent.Kafka.Examples.AvroSpecific" + "\",\"fields\":[{\"name\":\"name\",\"type\":\"string\"},{\"name\":\"favorite_number\",\"type\":[\"i" + "nt\",\"null\"]},{\"name\":\"favorite_shape\",\"type\":[\"string\",\"null\"]}]}"; Assert.False(sr.IsCompatibleAsync(subject, testSchema2).Result); Assert.True(sr.IsCompatibleAsync(subject, testSchema1).Result); // Note: backwards / forwards compatibility scenarios are not tested here. This is really just testing the API call. }
/// <inheritdoc cref="IProducerAsync.ProduceManyAsync"/> public async Task ProduceManyAsync(IList <Event> events, CancellationToken token = default) { using var schemaRegistry = new CachedSchemaRegistryClient(_schemaRegistryConfig); using var producerBuilder = new ProducerBuilder <string, Event>(_producerConfig) .SetKeySerializer(new AvroSerializer <string>(schemaRegistry)) .SetValueSerializer(new AvroSerializer <Event>(schemaRegistry)) .SetErrorHandler((_, error) => _logger.LogError("Kafka encountered an error: {@Error}", error)) .Build(); foreach (var @event in events) { var message = new Message <string, Event> { Key = @event.AggregateName, Value = @event }; try { await producerBuilder.ProduceAsync(@event.AggregateName, message, token); } catch (Exception e) { throw new ProducerException(e.Message, e); } } }
public static void FillTheCache(Config config) { const int capacity = 16; const string testSchema = "{\"type\":\"record\",\"name\":\"User\",\"namespace\":\"Confluent.Kafka.Examples.AvroSpecific" + "\",\"fields\":[{\"name\":\"name\",\"type\":\"string\"},{\"name\":\"favorite_number\",\"type\":[\"i" + "nt\",\"null\"]},{\"name\":\"favorite_color\",\"type\":[\"string\",\"null\"]}]}"; var srConfig = new SchemaRegistryConfig { Url = config.Server, RequestTimeoutMs = 3000, MaxCachedSchemas = capacity }; var sr = new CachedSchemaRegistryClient(srConfig); var registerCount = capacity + 10; var subjects = new List <string>(); var ids = new List <int>(); // test is that this does not throw. Also, inspect in debugger. for (int i = 0; i < registerCount; ++i) { var topicName = Guid.NewGuid().ToString(); var subject = sr.ConstructValueSubjectName(topicName); subjects.Add(subject); var id = sr.RegisterSchemaAsync(subject, testSchema).Result; ids.Add(id); } }
private async Task SendMessageAvro(string message) { var config = new ProducerConfig { BootstrapServers = "kafka:29092", ClientId = Dns.GetHostName() }; String key = "key1"; String userSchema = "{\"type\":\"record\"," + "\"name\":\"myrecord\"," + "\"fields\":[{\"name\":\"f1\",\"type\":\"string\"}]}"; var schema = (RecordSchema)RecordSchema.Parse(userSchema); GenericRecord avroRecord = new GenericRecord(schema); avroRecord.Add("f1", "value"); var schemaRegistryUrl = "http://schema-registry:8085"; using (var schemaRegistry = new CachedSchemaRegistryClient(new SchemaRegistryConfig { Url = schemaRegistryUrl })) { using (var producer = new ProducerBuilder <string, GenericRecord>(config) .SetKeySerializer(new AvroSerializer <string>(schemaRegistry)) .SetValueSerializer(new AvroSerializer <GenericRecord>(schemaRegistry)) .Build()) { await producer.ProduceAsync("api-methods-requested-avro", new Message <string, GenericRecord> { Key = Guid.NewGuid().ToString("N"), Value = avroRecord }); } } }
public KafkaBackgroundReceiver(Microsoft.AspNetCore.SignalR.IHubContext <ImageMarshallingHub> imageHub) { // build the kafka consumer _imageHub = imageHub; var conf = new ConsumerConfig { GroupId = "test-consumer-group", BootstrapServers = "kafka-server1:9092", // Note: The AutoOffsetReset property detemines the start offset in the event // there are not yet any committed offsets for the consumer group for the // topic/partitions of interest. By default, offsets are committed // automatically, so in this example, consumption will only start from the // earliest message in the topic 'my-topic' the first time you run the program. AutoOffsetReset = AutoOffsetReset.Earliest }; var schemaRegistryConfig = new SchemaRegistryConfig { Url = "kafka-schema-registry:8081" }; var schemaRegistry = new CachedSchemaRegistryClient(schemaRegistryConfig); //_consumer = new ConsumerBuilder<Ignore, string>(conf).Build(); _consumer = new ConsumerBuilder <Ignore, imageResponse>(conf).SetValueDeserializer(new AvroDeserializer <imageResponse>(schemaRegistry).AsSyncOverAsync <imageResponse>()).Build(); _consumer.Subscribe("imageResponse"); }
private static async Task Produce() { try { var r = new Random(); using (var schema = new CachedSchemaRegistryClient(new SchemaRegistryConfig { Url = RegistryUrl })) using (var producer = new ProducerBuilder <byte[], AMessage>(new ProducerConfig { BootstrapServers = string.Join(',', Brokers) }).SetValueSerializer(new AvroSerializer <AMessage>(schema).AsSyncOverAsync()).Build()) { var result = await producer.ProduceAsync(Topic, new Message <byte[], AMessage> { Key = Encoding.UTF8.GetBytes("streamId"), Value = new AMessage { id = 1, noOfHeads = r.Next(100) } }); Console.WriteLine( $@"Delivered '{result.Value}' to: {result.TopicPartitionOffset}"); } } catch (Exception ex) { Console.WriteLine(ex.Message); } }
public async Task Produce() { using (var schemaRegistry = new CachedSchemaRegistryClient(new SchemaRegistryConfig { SchemaRegistryUrl = _config.SchemaRegistryUrl })) using (var producer = Build(schemaRegistry)) { var testUsers = new Faker <User>() .RuleFor(u => u.id, f => Guid.NewGuid().ToString()) .RuleFor(u => u.first_name, (f, u) => f.Name.FirstName()) .RuleFor(u => u.last_name, (f, u) => f.Name.LastName()) .RuleFor(u => u.email_name, (f, u) => f.Internet.Email(u.first_name, u.last_name)); var i = 0; while (_cts.IsCancellationRequested == false) { var user = testUsers.Generate(); await producer .ProduceAsync(_topicName, new Message <string, User> { Key = i++.ToString(), Value = user }) .ContinueWith(LogError); producer.Flush(TimeSpan.FromSeconds(5)); } } }
private void Init(ProducerConfig config) { if (_logger.IsDebug) { _logger.Debug($"Initializing {Name} type producer for Kafka..."); } try { CachedSchemaRegistryClient schemaRegistry = new CachedSchemaRegistryClient(new[] { new KeyValuePair <string, string>(SchemaRegistryConfig.PropertyNames.SchemaRegistryUrl, _schemaRegistryUrl) }); var blockAvroSerializer = new AvroSerializer <Block>(schemaRegistry).AsSyncOverAsync(); var txAvroSerializer = new AvroSerializer <FullTransaction>(schemaRegistry).AsSyncOverAsync(); ProducerBuilder <Null, Block> blockProducerBuilder = new ProducerBuilder <Null, Block>(config); blockProducerBuilder.SetValueSerializer(blockAvroSerializer); blockProducerBuilder.SetErrorHandler((s, e) => _logger.Error(e.ToString())); ProducerBuilder <Null, FullTransaction> txProducerBuilder = new ProducerBuilder <Null, FullTransaction>(config); txProducerBuilder.SetValueSerializer(txAvroSerializer); txProducerBuilder.SetErrorHandler((s, e) => _logger.Error(e.ToString())); _producerBlocks = blockProducerBuilder.Build(); _producerTransactions = txProducerBuilder.Build(); _initialized = true; if (_logger.IsDebug) { _logger.Debug($"Initialized {Name} type producer for Kafka."); } } catch (Exception e) { _logger.Error(e.Message, e); } }
/// <summary> /// Creates a deserializer. /// </summary> /// <param name="registryConfiguration"> /// Schema Registry configuration. Using the <see cref="SchemaRegistryConfig" /> class is /// highly recommended. /// </param> /// <param name="deserializerBuilder"> /// The deserializer builder to use to to generate deserialization functions for C# types. /// If none is provided, the default deserializer builder will be used. /// </param> /// <param name="schemaReader"> /// The JSON schema reader to use to convert schemas received from the registry into abstract /// representations. If none is provided, the default schema reader will be used. /// </param> /// <param name="tombstoneBehavior"> /// The behavior of the deserializer on tombstone records. /// </param> /// <exception cref="ArgumentNullException"> /// Thrown when the registry configuration is null. /// </exception> public AsyncSchemaRegistryDeserializer( IEnumerable <KeyValuePair <string, string> > registryConfiguration, IBinaryDeserializerBuilder deserializerBuilder = null, IJsonSchemaReader schemaReader = null, TombstoneBehavior tombstoneBehavior = TombstoneBehavior.None ) { if (registryConfiguration == null) { throw new ArgumentNullException(nameof(registryConfiguration)); } if (tombstoneBehavior != TombstoneBehavior.None && default(T) != null) { throw new UnsupportedTypeException(typeof(T), $"{typeof(T)} cannot represent tombstone values."); } DeserializerBuilder = deserializerBuilder ?? new BinaryDeserializerBuilder(); RegistryClient = new CachedSchemaRegistryClient(registryConfiguration); SchemaReader = schemaReader ?? new JsonSchemaReader(); TombstoneBehavior = tombstoneBehavior; _cache = new Dictionary <int, Task <Func <Stream, T> > >(); _disposeRegistryClient = true; }
private static IProducer <string, Payment> AddProducerPayment(IConfiguration configuration) { var config = new ProducerConfig { BootstrapServers = configuration["Kafka:Servers"], ClientId = configuration["Kafka:ClientId"] + "-" + Dns.GetHostName(), Acks = Acks.All, SecurityProtocol = SecurityProtocol.SaslSsl, SaslMechanism = SaslMechanism.Plain, SaslUsername = configuration["Kafka:Username"], SaslPassword = configuration["Kafka:Password"] }; var schemaConfig = new SchemaRegistryConfig { Url = configuration["SchemaRegistry:Url"], BasicAuthUserInfo = configuration["SchemaRegistry:UsernamePassword"] }; var schemaRegistry = new CachedSchemaRegistryClient(schemaConfig); return(new ProducerBuilder <string, Payment>(config) //.SetValueSerializer(new AnimaJsonSerializer<Payment>()) .SetValueSerializer(new AvroSerializer <Payment>(schemaRegistry).AsSyncOverAsync()) .Build()); }
public static void FillTheCache(string server) { const int capacity = 16; const string testSchema = "{\"type\":\"record\",\"name\":\"User\",\"namespace\":\"Confluent.Kafka.Examples.AvroSpecific" + "\",\"fields\":[{\"name\":\"name\",\"type\":\"string\"},{\"name\":\"favorite_number\",\"type\":[\"i" + "nt\",\"null\"]},{\"name\":\"favorite_color\",\"type\":[\"string\",\"null\"]}]}"; var config = new Dictionary <string, object> { { "schema.registry.url", server }, { "schema.registry.connection.timeout.ms", 3000 }, { "schema.registry.max.cached.schemas", capacity } }; var sr = new CachedSchemaRegistryClient(config); var registerCount = capacity + 10; var subjects = new List <string>(); var ids = new List <int>(); // test is that this does not throw. Also, inspect in debugger. for (int i = 0; i < registerCount; ++i) { var topicName = Guid.NewGuid().ToString(); var subject = sr.ConstructValueSubjectName(topicName); subjects.Add(subject); var id = sr.RegisterSchemaAsync(subject, testSchema).Result; ids.Add(id); } }
private static async Task Run_Producer <TKey, TValue>(string brokerList, string topicName, SecurityProtocol securityProtocol, SaslMechanism saslMechanism, string saslUsername, string saslPassword, string schemaRegistryUrl, string basicAuthUserInfo, Message <Ignore, TValue> consumerMessage) where TKey : class, IMessage <TKey>, new() where TValue : class, IMessage <TValue>, new() { var message = CreateMessage <TKey, TValue>(consumerMessage.Value); var config = new ProducerConfig { BootstrapServers = brokerList, }; var schemaRegistryConfig = new SchemaRegistryConfig { Url = schemaRegistryUrl, }; using (var schemaRegistry = new CachedSchemaRegistryClient(schemaRegistryConfig)) using (var producer = new ProducerBuilder <TKey, TValue>(config) .SetKeySerializer(new ProtobufSerializer <TKey>(schemaRegistry)) .SetValueSerializer(new ProtobufSerializer <TValue>(schemaRegistry)) .Build()) { try { var deliveryReport = await producer.ProduceAsync(topicName, message); Console.WriteLine($"delivered to: {deliveryReport.TopicPartitionOffset} for producer: {producer.Name}"); } catch (ProduceException <int, TValue> e) { Console.WriteLine($"failed to deliver message: {e.Message} [{e.Error.Code}]"); } } }
public static void Produce(string broker, string schemaRegistryUrl, string topic, NewConstructionAddressEvent item) { using (var schemaRegistry = new CachedSchemaRegistryClient( new SchemaRegistryConfig { SchemaRegistryUrl = schemaRegistryUrl } )) { var config = new ProducerConfig { BootstrapServers = broker, }; using (var producer = new ProducerBuilder <string, NewConstructionAddressEvent>(config) .SetValueSerializer(new SyncOverAsyncSerializer <NewConstructionAddressEvent>(new AvroSerializer <NewConstructionAddressEvent>(schemaRegistry))) .SetKeySerializer(new SyncOverAsyncSerializer <string>(new AvroSerializer <string>(schemaRegistry))) .Build()) { producer .Produce(topic, new Message <string, NewConstructionAddressEvent> { Value = item, Key = Guid.NewGuid().ToString() }); producer.Flush(); } } }