public void ZKAwareProducerSends1Message() { int totalWaitTimeInMiliseconds = 0; int waitSingle = 100; var originalMessage = new Message(Encoding.UTF8.GetBytes("TestData")); var multipleBrokersHelper = new TestMultipleBrokersHelper(CurrentTestTopic); multipleBrokersHelper.GetCurrentOffsets(); var producerConfig = new ProducerConfig(clientConfig); var mockPartitioner = new MockAlwaysZeroPartitioner(); using (var producer = new Producer<string, Message>(producerConfig, mockPartitioner, new DefaultEncoder())) { var producerData = new ProducerData<string, Message>( CurrentTestTopic, "somekey", new List<Message>() { originalMessage }); producer.Send(producerData); while (!multipleBrokersHelper.CheckIfAnyBrokerHasChanged()) { totalWaitTimeInMiliseconds += waitSingle; Thread.Sleep(waitSingle); if (totalWaitTimeInMiliseconds > MaxTestWaitTimeInMiliseconds) { Assert.Fail("None of the brokers changed their offset after sending a message"); } } totalWaitTimeInMiliseconds = 0; var consumerConfig = new ConsumerConfig(clientConfig) { Host = multipleBrokersHelper.BrokerThatHasChanged.Address, Port = multipleBrokersHelper.BrokerThatHasChanged.Port }; IConsumer consumer = new Consumers.Consumer(consumerConfig); var request = new FetchRequest(CurrentTestTopic, 0, multipleBrokersHelper.OffsetFromBeforeTheChange); BufferedMessageSet response; while (true) { Thread.Sleep(waitSingle); response = consumer.Fetch(request); if (response != null & response.Messages.Count() > 0) { break; } totalWaitTimeInMiliseconds += waitSingle; if (totalWaitTimeInMiliseconds >= MaxTestWaitTimeInMiliseconds) { break; } } Assert.NotNull(response); Assert.AreEqual(1, response.Messages.Count()); Assert.AreEqual(originalMessage.ToString(), response.Messages.First().ToString()); } }
public BrokerPartitionInfo(ProducerConfig producerConfig, ProducerPool producerPool, Dictionary<string, TopicMetadata> topicPartitionInfo) { this.producerConfig = producerConfig; this.producerPool = producerPool; this.topicPartitionInfo = topicPartitionInfo; this.brokerList = producerConfig.Brokers; this.brokers = ClientUtils.ParseBrokerList(this.brokerList); }
public AsyncProducerConfig(ProducerConfig config, string host, int port) : base(config, host, port) { this.QueueBufferingMaxMs = config.QueueBufferingMaxMs; this.QueueBufferingMaxMessages = config.QueueBufferingMaxMessages; this.QueueEnqueueTimeoutMs = config.QueueEnqueueTimeoutMs; this.BatchNumMessages = config.BatchNumMessages; this.Serializer = config.Serializer; this.KeySerializer = config.KeySerializer; }
/// <summary> /// Used by the producer to send a metadata request since it has access to the ProducerConfig /// </summary> /// <param name="topics">The topics for which the metadata needs to be fetched</param> /// <param name="brokers">The brokers in the cluster as configured on the client</param> /// <param name="producerConfig">The producer's config</param> /// <param name="correlationId">topic metadata response</param> /// <returns></returns> public static TopicMetadataResponse FetchTopicMetadata( ISet<string> topics, IList<Broker> brokers, ProducerConfig producerConfig, int correlationId) { var fetchMetaDataSucceeded = false; var i = 0; var topicMetadataRequest = new TopicMetadataRequest( TopicMetadataRequest.CurrentVersion, correlationId, producerConfig.ClientId, topics.ToList()); TopicMetadataResponse topicMetadataResponse = null; Exception t = null; // shuffle the list of brokers before sending metadata requests so that most requests don't get routed to the same broker var shuffledBrokers = brokers.Shuffle(); while (i < shuffledBrokers.Count() && !fetchMetaDataSucceeded) { var producer = ProducerPool.CreateSyncProducer(producerConfig, shuffledBrokers[i]); Logger.InfoFormat("Fetching metadata from broker {0} with correlation id {1} for {2} topic(s) {3}", shuffledBrokers[i], correlationId, topics.Count, string.Join(",", topics)); try { topicMetadataResponse = producer.Send(topicMetadataRequest); fetchMetaDataSucceeded = true; } catch (Exception e) { Logger.Warn(string.Format("Fetching topic metadata with correlation id {0} for topic [{1}] from broker [{2}] failed", correlationId, topics, shuffledBrokers[i]), e); t = e; } finally { i++; producer.Dispose(); } } if (!fetchMetaDataSucceeded) { throw new KafkaException( string.Format( "fetching topic metadata for topics [{0}] from broker [{1}] failed", string.Join(",", topics), string.Join(", ", shuffledBrokers)), t); } Logger.DebugFormat("Successfully fetched metadata for {0} topic(s) {1}", topics.Count(), string.Join(",", topics)); return topicMetadataResponse; }
public DefaultPartitioner(ProducerConfig config) { }
public static void Consumer_Poll_Error(string bootstrapServers, string singlePartitionTopic, string partitionedTopic) { LogToFile("start Consumer_Poll_Error"); var producerConfig = new ProducerConfig { BootstrapServers = bootstrapServers }; TopicPartitionOffset firstProduced = null; using (var producer = new Producer(producerConfig)) { var keyData = Encoding.UTF8.GetBytes("key"); firstProduced = producer.ProduceAsync(singlePartitionTopic, new Message { Key = keyData }).Result.TopicPartitionOffset; var valData = Encoding.UTF8.GetBytes("val"); producer.ProduceAsync(singlePartitionTopic, new Message { Value = valData }); producer.Flush(TimeSpan.FromSeconds(10)); } var consumerConfig = new ConsumerConfig { GroupId = Guid.NewGuid().ToString(), BootstrapServers = bootstrapServers, SessionTimeoutMs = 6000, EnablePartitionEof = true }; // test key deserialization error behavior using (var consumer = new Consumer <Null, string>(consumerConfig)) { int msgCnt = 0; int errCnt = 0; consumer.OnPartitionsAssigned += (_, partitions) => { Assert.Single(partitions); Assert.Equal(firstProduced.TopicPartition, partitions[0]); consumer.Assign(partitions.Select(p => new TopicPartitionOffset(p, firstProduced.Offset))); }; consumer.OnPartitionsRevoked += (_, partitions) => consumer.Unassign(); consumer.Subscribe(singlePartitionTopic); while (true) { try { var record = consumer.Consume(TimeSpan.FromMilliseconds(100)); if (record == null) { continue; } if (record.IsPartitionEOF) { break; } msgCnt += 1; } catch (ConsumeException e) { errCnt += 1; Assert.Equal(ErrorCode.Local_KeyDeserialization, e.Error.Code); Assert.Equal(firstProduced.Offset.Value, e.ConsumerRecord.Offset.Value); } } Assert.Equal(1, msgCnt); Assert.Equal(1, errCnt); consumer.Close(); } // test value deserialization error behavior using (var consumer = new Consumer <string, Null>(consumerConfig)) { int msgCnt = 0; int errCnt = 0; consumer.OnPartitionsAssigned += (_, partitions) => { Assert.Single(partitions); Assert.Equal(firstProduced.TopicPartition, partitions[0]); consumer.Assign(partitions.Select(p => new TopicPartitionOffset(p, firstProduced.Offset))); }; consumer.OnPartitionsRevoked += (_, partitions) => consumer.Unassign(); consumer.Subscribe(singlePartitionTopic); while (true) { try { var record = consumer.Consume(TimeSpan.FromMilliseconds(100)); if (record == null) { continue; } if (record.IsPartitionEOF) { break; } msgCnt += 1; } catch (ConsumeException e) { errCnt += 1; Assert.Equal(ErrorCode.Local_ValueDeserialization, e.Error.Code); Assert.Equal(firstProduced.Offset.Value + 1, e.ConsumerRecord.Offset.Value); } } Assert.Equal(1, msgCnt); Assert.Equal(1, errCnt); consumer.Close(); } Assert.Equal(0, Library.HandleCount); LogToFile("end Consumer_Poll_Error"); }
public ProducerWrapper(ProducerConfig config, string topicName) { this._topicName = topicName; this._config = config; this._producer = new ProducerBuilder <string, string>(this._config); }
public KafkaGenericProducer(string bootstrapServer = "") { _config = new ProducerConfig { BootstrapServers = bootstrapServer }; }
static void Main(string[] args) { ThreadPool.GetMinThreads(out int workerThreads, out int completionPortThreads); ThreadPool.SetMaxThreads(workerThreads, completionPortThreads); ThreadPool.GetMaxThreads(out workerThreads, out completionPortThreads); Console.WriteLine($"ThreadPool workerThreads: {workerThreads}, completionPortThreads: {completionPortThreads}"); var pConfig = new ProducerConfig { BootstrapServers = args[0] }; using (var producer = new ProducerBuilder <Null, string>(pConfig) .SetValueSerializer(new SimpleAsyncSerializer().SyncOverAsync()) // may deadlock due to thread pool exhaustion. // .SetValueSerializer(new SimpleSyncSerializer()) // will never deadlock. .Build()) { var topic = Guid.NewGuid().ToString(); var tasks = new List <Task>(); // will deadlock if N >= workerThreads. int N = workerThreads; for (int i = 0; i < N; ++i) { // create a unique delivery report handler for each task. Func <int, Action> actionCreator = (taskNumber) => { return(() => { Console.WriteLine($"running task {taskNumber}"); object waitObj = new object(); Action <DeliveryReport <Null, string> > handler = dr => { // in a deadlock scenario, the delivery handler will // never execute since execution of the Produce // method calls never progresses past serialization. Console.WriteLine($"delivery report: {dr.Value}"); lock (waitObj) { Monitor.Pulse(waitObj); } }; try { producer.Produce(topic, new Message <Null, string> { Value = $"value: {taskNumber}" }, handler); // will never get to after Produce, because deadlock occurs when running serializers. } catch (Exception ex) { Console.WriteLine(ex.StackTrace); } // in a deadlock scenario, this line never be hit, since the // serializer blocks during the Produce call. Console.WriteLine($"waiting for delivery report {taskNumber}"); lock (waitObj) { Monitor.Wait(waitObj); } }); }; tasks.Add(Task.Run(actionCreator(i))); } Console.WriteLine($"waiting for {tasks.Count} produce tasks to complete. --> expecting deadlock <--"); Task.WaitAll(tasks.ToArray()); Console.WriteLine($"number outstanding produce requests on exit: {producer.Flush(TimeSpan.FromSeconds(10))}"); } }
public void Producer_CustomPartitioner(string bootstrapServers) { LogToFile("start Producer_CustomPartitioner"); const int PARTITION_COUNT = 42; var producerConfig = new ProducerConfig { BootstrapServers = bootstrapServers, }; for (int j = 0; j < 3; ++j) { using (var topic = new TemporaryTopic(bootstrapServers, PARTITION_COUNT)) { Action <DeliveryReport <string, string> > dh = (DeliveryReport <string, string> dr) => { Assert.StartsWith($"test key ", dr.Message.Key); Assert.StartsWith($"test val ", dr.Message.Value); var expectedPartition = int.Parse(dr.Message.Key.Split(" ").Last()); Assert.Equal(ErrorCode.NoError, dr.Error.Code); Assert.Equal(PersistenceStatus.Persisted, dr.Status); Assert.Equal(topic.Name, dr.Topic); Assert.Equal(expectedPartition, (int)dr.Partition); Assert.True(dr.Offset >= 0); Assert.Equal(TimestampType.CreateTime, dr.Message.Timestamp.Type); Assert.True(Math.Abs((DateTime.UtcNow - dr.Message.Timestamp.UtcDateTime).TotalMinutes) < 1.0); }; ProducerBuilder <string, string> producerBuilder = null; switch (j) { case 0: // Topic level custom partitioner. producerBuilder = new ProducerBuilder <string, string>(producerConfig); producerBuilder.SetPartitioner(topic.Name, (string topicName, int partitionCount, ReadOnlySpan <byte> keyData, bool keyIsNull) => { Assert.Equal(topic.Name, topicName); var keyString = System.Text.UTF8Encoding.UTF8.GetString(keyData.ToArray()); return(int.Parse(keyString.Split(" ").Last()) % partitionCount); }); break; case 1: // Default custom partitioner producerBuilder = new ProducerBuilder <string, string>(producerConfig); producerBuilder.SetDefaultPartitioner((string topicName, int partitionCount, ReadOnlySpan <byte> keyData, bool keyIsNull) => { Assert.Equal(topic.Name, topicName); var keyString = System.Text.UTF8Encoding.UTF8.GetString(keyData.ToArray()); return(int.Parse(keyString.Split(" ").Last()) % partitionCount); }); break; case 2: // Default custom partitioner in case where default topic config is present due to topic level config in top-level config. var producerConfig2 = new ProducerConfig { BootstrapServers = bootstrapServers, MessageTimeoutMs = 10000 }; producerBuilder = new ProducerBuilder <string, string>(producerConfig2); producerBuilder.SetDefaultPartitioner((string topicName, int partitionCount, ReadOnlySpan <byte> keyData, bool keyIsNull) => { Assert.Equal(topic.Name, topicName); var keyString = System.Text.UTF8Encoding.UTF8.GetString(keyData.ToArray()); return(int.Parse(keyString.Split(" ").Last()) % partitionCount); }); break; default: Assert.True(false); break; } using (var producer = producerBuilder.Build()) { for (int i = 0; i < PARTITION_COUNT; ++i) { producer.Produce( topic.Name, new Message <string, string> { Key = $"test key {i}", Value = $"test val {i}" }, dh); } producer.Flush(TimeSpan.FromSeconds(10)); } } } // Null key using (var topic = new TemporaryTopic(bootstrapServers, PARTITION_COUNT)) using (var producer = new ProducerBuilder <Null, string>(producerConfig) .SetDefaultPartitioner((string topicName, int partitionCount, ReadOnlySpan <byte> keyData, bool keyIsNull) => { Assert.True(keyIsNull); return(0); }) .Build()) { producer.Produce(topic.Name, new Message <Null, string> { Value = "test value" }); producer.Flush(TimeSpan.FromSeconds(10)); } Assert.Equal(0, Library.HandleCount); LogToFile("end Producer_CustomPartitioner"); }
public KafkaProducer(ProducerConfig config) { _producer = new ProducerBuilder <string, string>(config).Build(); }
static void Main(string[] args) { if (args.Length != 3) { Console.WriteLine("Usage: .. bootstrapServers schemaRegistryUrl topicName"); return; } string bootstrapServers = args[0]; string schemaRegistryUrl = args[1]; string topicName = args[2]; var producerConfig = new ProducerConfig { BootstrapServers = bootstrapServers }; var schemaRegistryConfig = new SchemaRegistryConfig { // Note: you can specify more than one schema registry url using the // schema.registry.url property for redundancy (comma separated list). // The property name is not plural to follow the convention set by // the Java implementation. Url = schemaRegistryUrl }; var consumerConfig = new ConsumerConfig { BootstrapServers = bootstrapServers, GroupId = "avro-specific-example-group" }; var avroSerializerConfig = new AvroSerializerConfig { // optional Avro serializer properties: BufferBytes = 100 }; CancellationTokenSource cts = new CancellationTokenSource(); var consumeTask = Task.Run(() => { using (var schemaRegistry = new CachedSchemaRegistryClient(schemaRegistryConfig)) using (var consumer = new ConsumerBuilder <string, User>(consumerConfig) .SetKeyDeserializer(new AvroDeserializer <string>(schemaRegistry).AsSyncOverAsync()) .SetValueDeserializer(new AvroDeserializer <User>(schemaRegistry).AsSyncOverAsync()) .SetErrorHandler((_, e) => Console.WriteLine($"Error: {e.Reason}")) .Build()) { consumer.Subscribe(topicName); try { while (true) { try { var consumeResult = consumer.Consume(cts.Token); Console.WriteLine($"user name: {consumeResult.Message.Key}, favorite color: {consumeResult.Message.Value.favorite_color}, hourly_rate: {consumeResult.Message.Value.hourly_rate}"); } catch (ConsumeException e) { Console.WriteLine($"Consume error: {e.Error.Reason}"); } } } catch (OperationCanceledException) { consumer.Close(); } } }); using (var schemaRegistry = new CachedSchemaRegistryClient(schemaRegistryConfig)) using (var producer = new ProducerBuilder <string, User>(producerConfig) .SetKeySerializer(new AvroSerializer <string>(schemaRegistry, avroSerializerConfig)) .SetValueSerializer(new AvroSerializer <User>(schemaRegistry, avroSerializerConfig)) .Build()) { Console.WriteLine($"{producer.Name} producing on {topicName}. Enter user names, q to exit."); int i = 0; string text; while ((text = Console.ReadLine()) != "q") { User user = new User { name = text, favorite_color = "green", favorite_number = i++, hourly_rate = new Avro.AvroDecimal(67.99) }; producer .ProduceAsync(topicName, new Message <string, User> { Key = text, Value = user }) .ContinueWith(task => { if (!task.IsFaulted) { Console.WriteLine($"produced to: {task.Result.TopicPartitionOffset}"); } // Task.Exception is of type AggregateException. Use the InnerException property // to get the underlying ProduceException. In some cases (notably Schema Registry // connectivity issues), the InnerException of the ProduceException will contain // additional information pertaining to the root cause of the problem. Note: this // information is automatically included in the output of the ToString() method of // the ProduceException which is called implicitly in the below. Console.WriteLine($"error producing message: {task.Exception.InnerException}"); }); } } cts.Cancel(); }
public static void AutoRegisterSchemaDisabled(string bootstrapServers, string schemaRegistryServers) { using (var topic = new TemporaryTopic(bootstrapServers, 1)) { var producerConfig = new ProducerConfig { BootstrapServers = bootstrapServers }; var consumerConfig = new ConsumerConfig { BootstrapServers = bootstrapServers, GroupId = Guid.NewGuid().ToString(), SessionTimeoutMs = 6000, AutoOffsetReset = AutoOffsetResetType.Earliest }; var schemaRegistryConfig = new SchemaRegistryConfig { SchemaRegistryUrl = schemaRegistryServers }; // first a quick check the value case fails. using (var schemaRegistry = new CachedSchemaRegistryClient(schemaRegistryConfig)) using (var producer = new Producer <string, int>( producerConfig, new AvroSerializer <string>(schemaRegistry), new AvroSerializer <int>(schemaRegistry, new AvroSerializerConfig { AutoRegisterSchemas = false }))) { Assert.Throws <SerializationException>(() => { try { producer .ProduceAsync(new Guid().ToString(), new Message <string, int> { Key = "test", Value = 112 }) .Wait(); } catch (AggregateException e) { throw e.InnerException; } }); } // the following tests all check behavior in the key case. using (var schemaRegistry = new CachedSchemaRegistryClient(schemaRegistryConfig)) using (var producer = new Producer <string, int>( producerConfig, new AvroSerializer <string>(schemaRegistry, new AvroSerializerConfig { AutoRegisterSchemas = false }), new AvroSerializer <int>(schemaRegistry))) { Assert.Throws <SerializationException>(() => { try { producer.ProduceAsync(topic.Name, new Message <string, int> { Key = "test", Value = 112 }).Wait(); } catch (AggregateException e) { throw e.InnerException; } }); } // allow auto register.. using (var schemaRegistry = new CachedSchemaRegistryClient(schemaRegistryConfig)) using (var producer = new Producer <string, int>(producerConfig, new AvroSerializer <string>(schemaRegistry), new AvroSerializer <int>(schemaRegistry))) { producer.ProduceAsync(topic.Name, new Message <string, int> { Key = "test", Value = 112 }).Wait(); } // config with avro.serializer.auto.register.schemas == false should work now. using (var schemaRegistry = new CachedSchemaRegistryClient(new SchemaRegistryConfig { SchemaRegistryUrl = schemaRegistryServers })) using (var producer = new Producer <string, int>( producerConfig, new AvroSerializer <string>(schemaRegistry, new AvroSerializerConfig { AutoRegisterSchemas = false }), new AvroSerializer <int>(schemaRegistry))) { producer.ProduceAsync(topic.Name, new Message <string, int> { Key = "test", Value = 112 }).Wait(); } } }
public static void WatermarkOffsets(string bootstrapServers, string singlePartitionTopic, string partitionedTopic) { LogToFile("start WatermarkOffsets"); var producerConfig = new ProducerConfig { BootstrapServers = bootstrapServers }; var testString = "hello world"; DeliveryResult <Null, string> dr; using (var producer = new Producer <Null, string>(producerConfig)) using (var adminClient = new AdminClient(producer.Handle)) { dr = producer.ProduceAsync(singlePartitionTopic, new Message <Null, string> { Value = testString }).Result; Assert.Equal(0, producer.Flush(TimeSpan.FromSeconds(10))); // this isn't necessary. var queryOffsets = adminClient.QueryWatermarkOffsets(new TopicPartition(singlePartitionTopic, 0), TimeSpan.FromSeconds(20)); Assert.NotEqual(queryOffsets.Low, Offset.Invalid); Assert.NotEqual(queryOffsets.High, Offset.Invalid); // TODO: can anything be said about the high watermark offset c.f. dr.Offset? // I have seen queryOffsets.High < dr.Offset and also queryOffsets.High = dr.Offset + 1. // The former only once (or was I in error?). request.required.acks has a default value // of 1, so with only one broker, I assume the former should never happen. // Console.WriteLine($"Query Offsets: [{queryOffsets.Low} {queryOffsets.High}]. DR Offset: {dr.Offset}"); Assert.True(queryOffsets.Low < queryOffsets.High); } var consumerConfig = new ConsumerConfig { GroupId = Guid.NewGuid().ToString(), BootstrapServers = bootstrapServers, SessionTimeoutMs = 6000 }; using (var consumer = new Consumer(consumerConfig)) using (var adminClient = new AdminClient(consumer.Handle)) { consumer.Assign(new List <TopicPartitionOffset>() { dr.TopicPartitionOffset }); var record = consumer.Consume(TimeSpan.FromSeconds(10)); Assert.NotNull(record.Message); var getOffsets = adminClient.GetWatermarkOffsets(dr.TopicPartition); Assert.Equal(getOffsets.Low, Offset.Invalid); // the offset of the next message to be read. Assert.Equal(getOffsets.High, dr.Offset + 1); var queryOffsets = adminClient.QueryWatermarkOffsets(dr.TopicPartition, TimeSpan.FromSeconds(20)); Assert.NotEqual(queryOffsets.Low, Offset.Invalid); Assert.Equal(getOffsets.High, queryOffsets.High); } Assert.Equal(0, Library.HandleCount); LogToFile("end WatermarkOffsets"); }
public static void ConsumePartitionEOF(string bootstrapServers, string schemaRegistryServers) { var producerConfig = new ProducerConfig { BootstrapServers = bootstrapServers }; var schemaRegistryConfig = new SchemaRegistryConfig { SchemaRegistryUrl = schemaRegistryServers }; using (var topic = new TemporaryTopic(bootstrapServers, 1)) using (var schemaRegistry = new CachedSchemaRegistryClient(schemaRegistryConfig)) using (var producer = new ProducerBuilder <Null, User>(producerConfig) .SetKeySerializer(Serializers.Null) .SetValueSerializer(new AvroSerializer <User>(schemaRegistry)) .Build()) { producer.ProduceAsync(topic.Name, new Message <Null, User> { Value = new User { name = "test" } }); var consumerConfig = new ConsumerConfig { BootstrapServers = bootstrapServers, GroupId = Guid.NewGuid().ToString(), SessionTimeoutMs = 6000, AutoOffsetReset = AutoOffsetReset.Earliest, EnablePartitionEof = true }; using (var consumer = new ConsumerBuilder <Null, User>(consumerConfig) .SetKeyDeserializer(Deserializers.Null) .SetValueDeserializer(new AvroDeserializer <User>(schemaRegistry)) .SetPartitionsAssignedHandler((c, partitions) => partitions.Select(tp => new TopicPartitionOffset(tp, Offset.Beginning))) .Build()) { consumer.Subscribe(topic.Name); var cr1 = consumer.Consume(); Assert.NotNull(cr1); Assert.NotNull(cr1.Message); Assert.False(cr1.IsPartitionEOF); var cr2 = consumer.Consume(); Assert.NotNull(cr2); Assert.Null(cr2.Message); Assert.True(cr2.IsPartitionEOF); } consumerConfig = new ConsumerConfig { BootstrapServers = bootstrapServers, GroupId = Guid.NewGuid().ToString(), SessionTimeoutMs = 6000, AutoOffsetReset = AutoOffsetReset.Earliest, EnablePartitionEof = false }; using (var consumer = new ConsumerBuilder <Null, User>(consumerConfig) .SetKeyDeserializer(Deserializers.Null) .SetValueDeserializer(new AvroDeserializer <User>(schemaRegistry)) .SetPartitionsAssignedHandler((c, partitions) => partitions.Select(tp => new TopicPartitionOffset(tp, Offset.Beginning))) .Build()) { consumer.Subscribe(topic.Name); var cr1 = consumer.Consume(); Assert.NotNull(cr1); Assert.NotNull(cr1.Message); Assert.False(cr1.IsPartitionEOF); var cr2 = consumer.Consume(TimeSpan.FromSeconds(2)); Assert.Null(cr2); } } }
/// <summary> /// Adds or replace the producers config. /// </summary> /// <param name="producersConfig">A list of producer config to add or replace.</param> /// <param name="modUniqueId">The mod unique id.</param> public static void AddProducersConfig(List <ProducerConfig> producersConfig, string modUniqueId = null) { Dictionary <int, string> objects = DataLoader.Helper.Content.Load <Dictionary <int, string> >("Data\\ObjectInformation", ContentSource.GameContent); producersConfig.ForEach(producerConfig => { producerConfig.ModUniqueID = modUniqueId; if (!ValidateConfigProducerName(producerConfig.ProducerName)) { //Do nothing, already logged. } else { if (producerConfig?.LightSource is LightSourceConfig lightSource) { producerConfig.LightSource.Color = new Color(lightSource.ColorRed, lightSource.ColorGreen, lightSource.ColorBlue, lightSource.ColorAlpha); } if (producerConfig.ProducingAnimation != null) { foreach (var animation in producerConfig.ProducingAnimation.AdditionalAnimations) { if (!Int32.TryParse(animation.Key, out int outputIndex)) { KeyValuePair <int, string> pair = objects.FirstOrDefault(o => ObjectUtils.IsObjectStringFromObjectName(o.Value, animation.Key)); if (pair.Value != null) { outputIndex = pair.Key; } else { ProducerFrameworkModEntry.ModMonitor.Log($"No object found for '{animation.Key}', producer '{producerConfig.ProducerName}'. This animation will be ignored.", LogLevel.Debug); break; } } producerConfig.ProducingAnimation.AdditionalAnimationsId[outputIndex] = animation.Value; } } if (producerConfig.ReadyAnimation != null) { foreach (var animation in producerConfig.ReadyAnimation.AdditionalAnimations) { if (!Int32.TryParse(animation.Key, out int outputIndex)) { KeyValuePair <int, string> pair = objects.FirstOrDefault(o => ObjectUtils.IsObjectStringFromObjectName(o.Value, animation.Key)); if (pair.Value != null) { outputIndex = pair.Key; } else { ProducerFrameworkModEntry.ModMonitor.Log($"No object found for '{animation.Key}', producer '{producerConfig.ProducerName}'. This animation will be ignored.", LogLevel.Debug); break; } } producerConfig.ReadyAnimation.AdditionalAnimationsId[outputIndex] = animation.Value; } } AddConfigToRepository(producerConfig); foreach (var n in producerConfig.AdditionalProducerNames) { ProducerConfig newProducerConfig = null; newProducerConfig = producerConfig.DeepClone(); newProducerConfig.ProducerName = n; newProducerConfig.AdditionalProducerNames.Clear(); if (ValidateConfigProducerName(newProducerConfig.ProducerName)) { AddConfigToRepository(newProducerConfig); } } producerConfig.AdditionalProducerNames.Clear(); } }); }
public void AssignPastEnd(string bootstrapServers) { LogToFile("start AssignPastEnd"); var consumerConfig = new ConsumerConfig { GroupId = Guid.NewGuid().ToString(), BootstrapServers = bootstrapServers, SessionTimeoutMs = 6000 }; var producerConfig = new ProducerConfig { BootstrapServers = bootstrapServers }; var testString = "hello world"; DeliveryResult <Null, byte[]> dr; using (var producer = new ProducerBuilder <Null, byte[]>(producerConfig).Build()) { dr = producer.ProduceAsync(singlePartitionTopic, new Message <Null, byte[]> { Value = Serializers.Utf8.Serialize(testString, SerializationContext.Empty) }).Result; Assert.True(dr.Offset >= 0); producer.Flush(TimeSpan.FromSeconds(10)); } consumerConfig.AutoOffsetReset = AutoOffsetReset.Latest; using (var consumer = new ConsumerBuilder <Null, byte[]>(consumerConfig).Build()) { ConsumeResult <Null, byte[]> record; consumer.Assign(new List <TopicPartitionOffset>() { new TopicPartitionOffset(dr.TopicPartition, dr.Offset + 1) }); record = consumer.Consume(TimeSpan.FromSeconds(2)); Assert.Null(record); consumer.Assign(new List <TopicPartitionOffset>() { new TopicPartitionOffset(dr.TopicPartition, dr.Offset + 2) }); consumer.Consume(TimeSpan.FromSeconds(2)); Assert.Null(record); } consumerConfig.AutoOffsetReset = AutoOffsetReset.Earliest; using (var consumer = new ConsumerBuilder <byte[], byte[]>(consumerConfig).Build()) { ConsumeResult <byte[], byte[]> record; consumer.Assign(new List <TopicPartitionOffset>() { new TopicPartitionOffset(dr.TopicPartition, dr.Offset + 1) }); record = consumer.Consume(TimeSpan.FromSeconds(2)); Assert.Null(record); // Note: dr.Offset+2 is an invalid (c.f. dr.Offset+1 which is valid), so auto.offset.reset will come // into play here to determine which offset to start from (earliest). Due to the the produce call above, // there is guarenteed to be a message on the topic, so consumer.Consume will return true. consumer.Assign(new List <TopicPartitionOffset>() { new TopicPartitionOffset(dr.TopicPartition, dr.Offset + 2) }); record = consumer.Consume(TimeSpan.FromSeconds(10)); Assert.NotNull(record?.Message); } Assert.Equal(0, Library.HandleCount); LogToFile("end AssignPastEnd"); }
public void TestSendToNewTopic() { var producerConfig1 = new ProducerConfig { Serializer = typeof(StringEncoder).AssemblyQualifiedName, KeySerializer = typeof(StringEncoder).AssemblyQualifiedName, PartitionerClass = typeof(StaticPartitioner).AssemblyQualifiedName, Brokers = TestUtils.GetBrokerListFromConfigs( new List<TempKafkaConfig> { this.config1, this.config2 }), RequestRequiredAcks = 2, RequestTimeoutMs = 1000 }; var producerConfig2 = new ProducerConfig { Serializer = typeof(StringEncoder).AssemblyQualifiedName, KeySerializer = typeof(StringEncoder).AssemblyQualifiedName, PartitionerClass = typeof(StaticPartitioner).AssemblyQualifiedName, Brokers = TestUtils.GetBrokerListFromConfigs( new List<TempKafkaConfig> { this.config1, this.config2 }), RequestRequiredAcks = 3, RequestTimeoutMs = 1000 }; var topic = "new-topic"; // create topic with 1 partition and await leadership AdminUtils.CreateTopic(this.ZkClient, topic, 1, 2, new Dictionary<string, string>()); TestUtils.WaitUntilMetadataIsPropagated(this.servers, topic, 0, 1000); TestUtils.WaitUntilLeaderIsElectedOrChanged(this.ZkClient, topic, 0, 500); var producer1 = new Producer<string, string>(producerConfig1); var producer2 = new Producer<string, string>(producerConfig2); // Available partition ids should be 0. producer1.Send(new KeyedMessage<string, string>(topic, "test", "test1")); producer1.Send(new KeyedMessage<string, string>(topic, "test", "test2")); // get the leader var leaderOpt = ZkUtils.GetLeaderForPartition(ZkClient, topic, 0); Assert.True(leaderOpt.HasValue); var leader = leaderOpt.Value; var messageSet = (leader == this.config1.BrokerId) ? this.consumer1.Fetch(new FetchRequestBuilder().AddFetch(topic, 0, 0, 10000).Build()) .MessageSet("new-topic", 0) .Iterator() .ToEnumerable() .ToList() : this.consumer2.Fetch(new FetchRequestBuilder().AddFetch(topic, 0, 0, 10000).Build()) .MessageSet("new-topic", 0) .Iterator() .ToEnumerable() .ToList(); Assert.Equal(2, messageSet.Count()); Assert.Equal(new Message(Encoding.UTF8.GetBytes("test1"), Encoding.UTF8.GetBytes("test")), messageSet[0].Message); Assert.Equal(new Message(Encoding.UTF8.GetBytes("test2"), Encoding.UTF8.GetBytes("test")), messageSet[1].Message); producer1.Dispose(); try { producer2.Send(new KeyedMessage<string, string>(topic, "test", "test2")); Assert.False(true, "Should have timed out for 3 acks."); } catch (FailedToSendMessageException) { } finally { producer2.Dispose(); } }
public void CancellationDelayMax(string bootstrapServers) { LogToFile("start CancellationDelayMax"); var consumerConfig = new ConsumerConfig { GroupId = Guid.NewGuid().ToString(), BootstrapServers = bootstrapServers, SessionTimeoutMs = 6000, EnablePartitionEof = false, CancellationDelayMaxMs = 2 }; var producerConfig = new ProducerConfig { BootstrapServers = bootstrapServers, CancellationDelayMaxMs = 2 }; var adminClientConfig = new AdminClientConfig { BootstrapServers = bootstrapServers, CancellationDelayMaxMs = 2 }; using (var topic = new TemporaryTopic(bootstrapServers, 3)) using (var consumer = new ConsumerBuilder <byte[], byte[]>(consumerConfig).Build()) using (var producer = new ProducerBuilder <byte[], byte[]>(producerConfig).Build()) using (var adminClient = new AdminClientBuilder(adminClientConfig).Build()) { consumer.Subscribe(topic.Name); // for the consumer, check that the cancellation token is honored. for (int i = 0; i < 20; ++i) { var cts = new CancellationTokenSource(TimeSpan.FromMilliseconds(2)); var sw = Stopwatch.StartNew(); try { var record = consumer.Consume(cts.Token); } catch (OperationCanceledException) { // expected. } // 2ms + 2ms + quite a bit of leeway. Note: CancellationDelayMaxMs has been // reduced to 2ms in this test, and we check for an elapsed time less than // this to test that configuration is working. in practice the elapsed time // should 4 almost all of the time. A higher value is apparently required on // Windows (but still less than 50). var elapsed = sw.ElapsedMilliseconds; Skip.If(elapsed > 20); } consumer.Close(); // for the producer, make do with just a simple check that this does not throw or hang. var dr = producer.ProduceAsync(topic.Name, new Message <byte[], byte[]> { Key = new byte[] { 42 }, Value = new byte[] { 255 } }).Result; // for the admin client, make do with just simple check that this does not throw or hang. var cr = new Confluent.Kafka.Admin.ConfigResource { Type = ResourceType.Topic, Name = topic.Name }; var configs = adminClient.DescribeConfigsAsync(new ConfigResource[] { cr }).Result; } Assert.Equal(0, Library.HandleCount); LogToFile("end CancellationDelayMax"); }
public void TestSendNullMessage() { var config = new ProducerConfig { Serializer = typeof(StringEncoder).AssemblyQualifiedName, KeySerializer = typeof(StringEncoder).AssemblyQualifiedName, PartitionerClass = typeof(StaticPartitioner).AssemblyQualifiedName, Brokers = TestUtils.GetBrokerListFromConfigs( new List<TempKafkaConfig> { this.config1, this.config2 }), }; var producer = new Producer<string, string>(config); try { // create topic AdminUtils.CreateTopic(this.ZkClient, "new-topic", 2, 1, new Dictionary<string, string>()); TestUtils.WaitUntilLeaderIsElectedOrChanged(this.ZkClient, "new-topic", 0, 500); producer.Send(new KeyedMessage<string, string>("new-topic", "key", null)); } finally { producer.Dispose(); } }
public CardEventHandler(ProducerConfig producerConfig) { _producerConfig = producerConfig; }
public ProducerWrapper(string topic, ProducerConfig config) { _topic = topic; _config = config; _producer = new ProducerBuilder <Null, string>(_config).Build(); }
static void Main(string[] args) { var server = "10.0.129.75:9092"; var topic = "Kafka.Test"; var groupId = "Kafka.Test.Id"; int i = 0; Console.WriteLine("MENU"); Console.WriteLine("1-Send message"); Console.WriteLine("2-Receive message"); Console.WriteLine("3-Continue"); Console.WriteLine("4/any-Exit"); Console.WriteLine("Please enter number:"); //var str = Console.ReadLine(); //int.TryParse(str, out i); do { //if (i == 1) { #region Send message var kafkaConfig = new ProducerConfig { BootstrapServers = server }; var producer = new ProducerBuilder <Null, string>(kafkaConfig).Build(); var data = new Student { Id = 619, Name = "Doan to chau" }; producer.ProduceAsync(topic, new Message <Null, string> { Value = JsonConvert.SerializeObject(data) }).ContinueWith(c => { }); producer.Flush(TimeSpan.FromSeconds(5)); Console.WriteLine($"-->Send message:{JsonConvert.SerializeObject(data)}"); #endregion } //else if (i == 2) { #region Recive message var config = new ConsumerConfig { BootstrapServers = server, GroupId = groupId, EnableAutoCommit = true, StatisticsIntervalMs = 5000, SessionTimeoutMs = 6000, AutoOffsetReset = AutoOffsetReset.Earliest, EnablePartitionEof = true }; const int commitPeriod = 5; using (var consumer = new ConsumerBuilder <Ignore, string>(config) .SetErrorHandler((_, e) => Console.WriteLine($"Error: {e.Reason}")) .SetStatisticsHandler((_, json) => Console.Write($".")) .SetPartitionsAssignedHandler((c, partitions) => { Console.Write($">"); }) .SetPartitionsRevokedHandler((c, partitions) => { Console.Write($"<"); }) .Build()) { consumer.Subscribe(topic); try { var consumeResult = consumer.Consume(TimeSpan.FromSeconds(100)); if (consumeResult == null) { Console.WriteLine("consumeResult is null"); } if (consumeResult.IsPartitionEOF) { Console.WriteLine("_/_"); } Console.WriteLine($"-->Receive message:{consumeResult.Message.Value}"); if (consumeResult.Offset % commitPeriod == 0) { try { consumer.Commit(consumeResult); } catch (KafkaException e) { Console.WriteLine($"Commit error: {e.Error.Reason}"); } } } catch (OperationCanceledException) { Console.WriteLine("Closing consumer."); consumer.Close(); } } #endregion } //// Console.WriteLine("1-Send message"); Console.WriteLine("2-Receive message"); Console.WriteLine("3-Continue"); Console.WriteLine("4/any-Exit"); //str = Console.ReadLine(); //int.TryParse(str, out i); }while (i < 10); //while (i >= 1 && i <= 3) ; Console.WriteLine("Hello World!"); }
public void Start(string instanceId, CancellationToken cancellationToken = default(CancellationToken)) { funcExecSemaphore = new Semaphore(MaxOutstanding, MaxOutstanding); CancellationTokenSource errorCts = new CancellationTokenSource(); CancellationTokenSource compositeCts = CancellationTokenSource.CreateLinkedTokenSource(cancellationToken, errorCts.Token); CancellationToken compositeCancellationToken = compositeCts.Token; bool aMessageHasBeenProcessed = false; var cConfig = new ConsumerConfig { ClientId = $"{Name}-consumer-{instanceId}", GroupId = $"{Name}-group", BootstrapServers = BootstrapServers, EnableAutoCommit = true, EnableAutoOffsetStore = false, AutoOffsetReset = AutoOffsetReset.Latest }; if (DebugContext != null) { cConfig.Debug = DebugContext; } var cBuilder = new ConsumerBuilder <TInKey, TInValue>(cConfig); if (InKeyDeserializer != null) { cBuilder.SetKeyDeserializer(InKeyDeserializer); } if (InValueDeserializer != null) { cBuilder.SetValueDeserializer(InValueDeserializer); } if (Logger != null) { cBuilder.SetLogHandler((_, m) => { Logger(m); }); } cBuilder.SetErrorHandler((c, e) => { if (e.Code == ErrorCode.Local_AllBrokersDown || e.Code == ErrorCode.Local_Authentication) { if (!aMessageHasBeenProcessed) { // Logger.Log(e); errorCts.Cancel(); return; } } if (Logger != null) { Logger(new LogMessage(c.Name, SyslogLevel.Error, "unknown", e.Reason)); } }); var pConfig = new ProducerConfig { ClientId = $"{Name}-producer-{instanceId}", BootstrapServers = BootstrapServers, EnableIdempotence = true, LingerMs = 5, DeliveryReportFields = "none" }; if (DebugContext != null) { pConfig.Debug = DebugContext; } var pBuilder = new ProducerBuilder <TOutKey, TOutValue>(pConfig); if (OutKeySerializer != null) { pBuilder.SetKeySerializer(OutKeySerializer); } if (OutValueSerializer != null) { pBuilder.SetValueSerializer(OutValueSerializer); } if (Logger != null) { pBuilder.SetLogHandler((_, m) => { Logger(m); }); } pBuilder.SetErrorHandler((p, e) => { if (e.IsFatal) { errorCts.Cancel(); return; } if (e.Code == ErrorCode.Local_AllBrokersDown || e.Code == ErrorCode.Local_Authentication) { if (!aMessageHasBeenProcessed) { errorCts.Cancel(); return; } } if (Logger != null) { Logger(new LogMessage(p.Name, SyslogLevel.Error, "unknown", e.Reason)); } }); var partitionState = new Dictionary <TopicPartition, PartitionState>(); using (var producer = pBuilder.Build()) using (var consumer = cBuilder.Build()) { consumer.Subscribe(InputTopic); try { while (true) { ConsumeResult <TInKey, TInValue> cr; try { cr = consumer.Consume(compositeCancellationToken); } catch (ConsumeException ex) { if (ex.Error.Code == ErrorCode.Local_ValueDeserialization) { // For an in-depth discussion of what to do in the event of deserialization errors, refer to: // https://www.confluent.io/blog/kafka-connect-deep-dive-error-handling-dead-letter-queues if (ConsumeErrorTolerance == ErrorTolerance.All) { continue; } errorCts.Cancel(); // no error tolerance. } Thread.Sleep(TimeSpan.FromSeconds(10)); // ?? if not fail fast, do we want to sleep and why? continue; } if (!partitionState.ContainsKey(cr.TopicPartition)) { partitionState.Add(cr.TopicPartition, new PartitionState(this)); } partitionState[cr.TopicPartition].HandleConsumedMessage(cr, consumer, producer, funcExecSemaphore, errorCts); aMessageHasBeenProcessed = true; } } catch (OperationCanceledException) { } } if (errorCts.IsCancellationRequested) { throw new Exception("error occured, and we're failing fast."); } }
public static void ProduceConsumeGeneric(string bootstrapServers, string schemaRegistryServers) { var s = (RecordSchema)RecordSchema.Parse( @"{ ""namespace"": ""Confluent.Kafka.Examples.AvroSpecific"", ""type"": ""record"", ""name"": ""User"", ""fields"": [ {""name"": ""name"", ""type"": ""string""}, {""name"": ""favorite_number"", ""type"": [""int"", ""null""]}, {""name"": ""favorite_color"", ""type"": [""string"", ""null""]} ] }" ); var config = new ProducerConfig { BootstrapServers = bootstrapServers }; var schemaRegistryConfig = new SchemaRegistryConfig { SchemaRegistryUrl = schemaRegistryServers }; var topic = Guid.NewGuid().ToString(); DeliveryResult <Null, GenericRecord> dr; using (var schemaRegistry = new CachedSchemaRegistryClient(schemaRegistryConfig)) using (var p = new ProducerBuilder <Null, GenericRecord>(config) .SetKeySerializer(Serializers.Null) .SetValueSerializer(new AsyncAvroSerializer <GenericRecord>(schemaRegistry)) .Build()) { var record = new GenericRecord(s); record.Add("name", "my name 2"); record.Add("favorite_number", 44); record.Add("favorite_color", null); dr = p.ProduceAsync(topic, new Message <Null, GenericRecord> { Value = record }).Result; } // produce a specific record (to later consume back as a generic record). using (var schemaRegistry = new CachedSchemaRegistryClient(schemaRegistryConfig)) using (var p = new ProducerBuilder <Null, User>(config) .SetKeySerializer(Serializers.Null) .SetValueSerializer(new AsyncAvroSerializer <User>(schemaRegistry)) .Build()) { var user = new User { name = "my name 3", favorite_number = 47, favorite_color = "orange" }; p.ProduceAsync(topic, new Message <Null, User> { Value = user }).Wait(); } Assert.Null(dr.Message.Key); Assert.NotNull(dr.Message.Value); dr.Message.Value.TryGetValue("name", out object name); dr.Message.Value.TryGetValue("favorite_number", out object number); dr.Message.Value.TryGetValue("favorite_color", out object color); Assert.IsType <string>(name); Assert.IsType <int>(number); Assert.Equal("my name 2", name); Assert.Equal(44, number); Assert.Null(color); var cconfig = new ConsumerConfig { GroupId = Guid.NewGuid().ToString(), BootstrapServers = bootstrapServers }; using (var schemaRegistry = new CachedSchemaRegistryClient(schemaRegistryConfig)) using (var consumer = new ConsumerBuilder <Null, GenericRecord>(cconfig) .SetKeyDeserializer(Deserializers.Null) .SetValueDeserializer(new AsyncAvroDeserializer <GenericRecord>(schemaRegistry).AsSyncOverAsync()) .Build()) { // consume generic record produced as a generic record. consumer.Assign(new List <TopicPartitionOffset> { new TopicPartitionOffset(topic, 0, dr.Offset) }); var record = consumer.Consume(new CancellationTokenSource(TimeSpan.FromSeconds(10)).Token); record.Message.Value.TryGetValue("name", out object msgName); record.Message.Value.TryGetValue("favorite_number", out object msgNumber); record.Message.Value.TryGetValue("favorite_color", out object msgColor); Assert.IsType <string>(msgName); Assert.IsType <int>(msgNumber); Assert.Equal("my name 2", msgName); Assert.Equal(44, msgNumber); Assert.Null(msgColor); // consume generic record produced as a specific record. record = consumer.Consume(new CancellationTokenSource(TimeSpan.FromSeconds(10)).Token); record.Message.Value.TryGetValue("name", out msgName); record.Message.Value.TryGetValue("favorite_number", out msgNumber); record.Message.Value.TryGetValue("favorite_color", out msgColor); Assert.IsType <string>(msgName); Assert.IsType <int>(msgNumber); Assert.IsType <string>(msgColor); Assert.Equal("my name 3", msgName); Assert.Equal(47, msgNumber); Assert.Equal("orange", msgColor); } using (var schemaRegistry = new CachedSchemaRegistryClient(schemaRegistryConfig)) using (var consumer = new ConsumerBuilder <Null, User>(cconfig) .SetKeyDeserializer(Deserializers.Null) .SetValueDeserializer(new AsyncAvroDeserializer <User>(schemaRegistry).AsSyncOverAsync()) .Build()) { consumer.Assign(new List <TopicPartitionOffset> { new TopicPartitionOffset(topic, 0, dr.Offset) }); var record = consumer.Consume(new CancellationTokenSource(TimeSpan.FromSeconds(10)).Token); Assert.Equal("my name 2", record.Message.Value.name); Assert.Equal(44, record.Message.Value.favorite_number); Assert.Null(record.Message.Value.favorite_color); } }
public BusPublisher(ProducerConfig config) { _producer = new ProducerBuilder <Null, string>(config).Build(); }
public ProcessOrdersService(ConsumerConfig consumerConfig, ProducerConfig producerConfig) { this.producerConfig = producerConfig; this.consumerConfig = consumerConfig; }
public static void ProduceConsume(string bootstrapServers, string schemaRegistryServers) { var producerConfig = new ProducerConfig { BootstrapServers = bootstrapServers }; var consumerConfig = new ConsumerConfig { BootstrapServers = bootstrapServers, GroupId = Guid.NewGuid().ToString(), SessionTimeoutMs = 6000, AutoOffsetReset = AutoOffsetResetType.Earliest, EnablePartitionEof = true }; var schemaRegistryConfig = new SchemaRegistryConfig { SchemaRegistryUrl = schemaRegistryServers }; var adminClientConfig = new AdminClientConfig { BootstrapServers = bootstrapServers }; string topic = Guid.NewGuid().ToString(); using (var adminClient = new AdminClient(adminClientConfig)) { adminClient.CreateTopicsAsync( new List <TopicSpecification> { new TopicSpecification { Name = topic, NumPartitions = 1, ReplicationFactor = 1 } }).Wait(); } using (var schemaRegistry = new CachedSchemaRegistryClient(schemaRegistryConfig)) using (var producer = new Producer <string, User>(producerConfig, new AvroSerializer <string>(schemaRegistry), new AvroSerializer <User>(schemaRegistry))) { for (int i = 0; i < 100; ++i) { var user = new User { name = i.ToString(), favorite_number = i, favorite_color = "blue" }; producer .ProduceAsync(topic, new Message <string, User> { Key = user.name, Value = user }) .Wait(); } Assert.Equal(0, producer.Flush(TimeSpan.FromSeconds(10))); } using (var schemaRegistry = new CachedSchemaRegistryClient(schemaRegistryConfig)) using (var consumer = new Consumer <string, User>(consumerConfig, new AvroDeserializer <string>(schemaRegistry), new AvroDeserializer <User>(schemaRegistry))) { consumer.OnError += (_, e) => Assert.True(false, e.Reason); consumer.Subscribe(topic); int i = 0; while (true) { var record = consumer.Consume(TimeSpan.FromMilliseconds(100)); if (record == null) { continue; } if (record.IsPartitionEOF) { break; } Assert.Equal(i.ToString(), record.Message.Key); Assert.Equal(i.ToString(), record.Message.Value.name); Assert.Equal(i, record.Message.Value.favorite_number); Assert.Equal("blue", record.Message.Value.favorite_color); i += 1; } Assert.Equal(100, i); consumer.Close(); } }
public static void SyncOverAsync(string bootstrapServers, string schemaRegistryServers) { ThreadPool.GetMaxThreads(out int originalWorkerThreads, out int originalCompletionPortThreads); ThreadPool.GetMinThreads(out int workerThreads, out int completionPortThreads); ThreadPool.SetMaxThreads(workerThreads, completionPortThreads); ThreadPool.GetMaxThreads(out workerThreads, out completionPortThreads); var pConfig = new ProducerConfig { BootstrapServers = bootstrapServers }; var schemaRegistryConfig = new SchemaRegistryConfig { Url = schemaRegistryServers }; var topic = Guid.NewGuid().ToString(); using (var schemaRegistry = new CachedSchemaRegistryClient(schemaRegistryConfig)) using (var producer = new ProducerBuilder <Null, string>(pConfig) .SetValueSerializer(new AvroSerializer <string>(schemaRegistry).AsSyncOverAsync()) .Build()) { var tasks = new List <Task>(); // will deadlock if N >= workerThreads. Set to max number that // should not deadlock. int N = workerThreads - 1; for (int i = 0; i < N; ++i) { Func <int, Action> actionCreator = (taskNumber) => { return(() => { object waitObj = new object(); Action <DeliveryReport <Null, string> > handler = dr => { Assert.True(dr.Error.Code == ErrorCode.NoError); lock (waitObj) { Monitor.Pulse(waitObj); } }; producer.Produce(topic, new Message <Null, string> { Value = $"value: {taskNumber}" }, handler); lock (waitObj) { Monitor.Wait(waitObj); } }); }; tasks.Add(Task.Run(actionCreator(i))); } Task.WaitAll(tasks.ToArray()); } ThreadPool.SetMaxThreads(originalWorkerThreads, originalCompletionPortThreads); Assert.Equal(0, Library.HandleCount); }
/// <summary> /// Sets configurations in the producer based on a <see cref="P:Confluent.Kafka.ProducerConfig"/> instance /// </summary> /// <param name="builder">A class that implements <see cref="IProducerConfigurationBuilder"/></param> /// <param name="config"><see cref="P:Confluent.Kafka.ProducerConfig"/> instance</param> /// <returns></returns> public static IProducerConfigurationBuilder WithProducerConfig(this IProducerConfigurationBuilder builder, ProducerConfig config) { return(((ProducerConfigurationBuilder)builder).WithProducerConfig(config)); }
static void Main(string[] args) { var switchMappings = new Dictionary <string, string>() { { "-newmsg", "key1" }, { "-topic", "key2" }, { "-partition", "key3" }, }; var builder = new ConfigurationBuilder(); builder.AddCommandLine(args, switchMappings); var config = builder.Build(); int _numberOfMessages = 0; int _numberOfPartitions = 1; int _maxNumberOfMessagesToDisplay = 100; if (args.Length == 0) { _numberOfMessages = 1; } else { try { _numberOfMessages = Convert.ToInt32(config["Key1"]); _numberOfPartitions = Convert.ToInt32(config["Key3"]); } catch { Console.WriteLine("Invalid argument!"); Environment.Exit(0); } } AutoResetEvent _closing = new AutoResetEvent(false); IProducer <string, string> producer = null; ProducerConfig producerConfig = null; CreateConfig(); CreateProducer(); for (int i = 0; i < _numberOfMessages; i++) { var _key = _numberOfPartitions == 0 ? 1 : i % _numberOfPartitions; SendMessage(!string.IsNullOrWhiteSpace(config["Key2"]) ? config["Key2"] : "testTopic", string.Format("This is a test: {0}", i), _numberOfMessages < _maxNumberOfMessagesToDisplay ? true : false, _key); } Console.WriteLine("Press Ctrl+C to exit"); Console.CancelKeyPress += new ConsoleCancelEventHandler(OnExit); _closing.WaitOne(); void OnExit(object sender, ConsoleCancelEventArgs args) { Console.WriteLine("Exit"); _closing.Set(); } void CreateConfig() { producerConfig = new ProducerConfig { BootstrapServers = "localhost:9092", }; } void CreateProducer() { var pb = new ProducerBuilder <string, string>(producerConfig); producer = pb.Build(); } async void SendMessage(string topic, string message, bool display, int key) { var msg = new Message <string, string> { Key = key.ToString(), Value = message }; DeliveryResult <string, string> delRep; if (key > 1) { var p = new Partition(key); var tp = new TopicPartition(topic, p); delRep = await producer.ProduceAsync(tp, msg); } else { delRep = await producer.ProduceAsync(topic, msg); } var topicOffset = delRep.TopicPartitionOffset; if (display) { Console.WriteLine($"Delivered '{delRep.Value}' to: {topicOffset}"); } } }
public KafkaProducer(AppKafkaOptions configuration) { _config = configuration.ProducerConfig(); }
/// <summary> /// Used by a non-producer client to send a metadata request /// </summary> /// <param name="topics"> The topics for which the metadata needs to be fetched</param> /// <param name="brokers">The brokers in the cluster as configured on the client</param> /// <param name="clientId">The client's identifier</param> /// <param name="timeoutMs"></param> /// <param name="correlationId"></param> /// <returns></returns> public static TopicMetadataResponse FetchTopicMetadata( ISet<string> topics, List<Broker> brokers, string clientId, int timeoutMs, int correlationId = 0) { var config = new ProducerConfig(); config.ClientId = clientId; config.RequestTimeoutMs = timeoutMs; config.Brokers = brokers.Select(b => new BrokerConfiguration { BrokerId = b.Id, Host = b.Host, Port = b.Port }).ToList(); return FetchTopicMetadata(topics, brokers, config, correlationId); }
public void TestUpdateBrokerPartitionInfo() { var topic = "new-topic"; AdminUtils.CreateTopic(this.ZkClient, topic, 1, 2, new Dictionary<string, string>()); // wait until the update metadata request for new topic reaches all servers TestUtils.WaitUntilMetadataIsPropagated(this.servers, topic, 0, 3000); TestUtils.WaitUntilLeaderIsElectedOrChanged(this.ZkClient, topic, 0, 500); var producerConfig1 = new ProducerConfig(); producerConfig1.Brokers = new List<BrokerConfiguration> { new BrokerConfiguration { BrokerId = 0, Host = "localhost", Port = 18203 }, new BrokerConfiguration { BrokerId = 1, Host = "localhost", Port = 18204 } }; producerConfig1.KeySerializer = typeof(StringEncoder).AssemblyQualifiedName; producerConfig1.Serializer = typeof(StringEncoder).AssemblyQualifiedName; var producer1 = new Producer<string, string>(producerConfig1); try { producer1.Send(new KeyedMessage<string, string>(topic, "test", "test1")); Assert.False(true, "Test should fail because the broker list provided are not valid"); } catch (FailedToSendMessageException) { // ok } finally { producer1.Dispose(); } var producerConfig2 = new ProducerConfig(); producerConfig2.Brokers = new List<BrokerConfiguration> { new BrokerConfiguration { BrokerId = 0, Host = "localhost", Port = 18203 }, new BrokerConfiguration { BrokerId = 1, Host = "localhost", Port = this.port1 } }; producerConfig2.KeySerializer = typeof(StringEncoder).AssemblyQualifiedName; producerConfig2.Serializer = typeof(StringEncoder).AssemblyQualifiedName; var producer2 = new Producer<string, string>(producerConfig2); try { producer2.Send(new KeyedMessage<string, string>(topic, "test", "test1")); } finally { producer2.Dispose(); } var producerConfig3 = new ProducerConfig(); producerConfig3.Brokers = new List<BrokerConfiguration> { new BrokerConfiguration { BrokerId = 0, Host = "localhost", Port = this.port1 }, new BrokerConfiguration { BrokerId = 1, Host = "localhost", Port = this.port2 } }; producerConfig3.KeySerializer = typeof(StringEncoder).AssemblyQualifiedName; producerConfig3.Serializer = typeof(StringEncoder).AssemblyQualifiedName; var producer3 = new Producer<string, string>(producerConfig2); try { producer3.Send(new KeyedMessage<string, string>(topic, "test", "test1")); } finally { producer3.Dispose(); } }
public static void CancellationDelayMax(string bootstrapServers, string singlePartitionTopic, string partitionedTopic) { LogToFile("start CancellationDelayMax"); var consumerConfig = new ConsumerConfig { GroupId = Guid.NewGuid().ToString(), BootstrapServers = bootstrapServers, SessionTimeoutMs = 6000, EnablePartitionEof = false, CancellationDelayMaxMs = 2 }; var producerConfig = new ProducerConfig { BootstrapServers = bootstrapServers, CancellationDelayMaxMs = 2 }; var adminClientConfig = new AdminClientConfig { BootstrapServers = bootstrapServers, CancellationDelayMaxMs = 2 }; using (var topic = new TemporaryTopic(bootstrapServers, 3)) using (var consumer = new Consumer(consumerConfig)) using (var producer = new Producer(producerConfig)) using (var adminClient = new AdminClient(adminClientConfig)) { consumer.Subscribe(topic.Name); // for the consumer, check that the cancellation token is honored. for (int i = 0; i < 20; ++i) { var cts = new CancellationTokenSource(TimeSpan.FromMilliseconds(2)); var sw = Stopwatch.StartNew(); try { var record = consumer.Consume(cts.Token); } catch (OperationCanceledException) { // expected. } // 2ms + 2ms + quite a bit of leeway (but still much less than the default of 50). // in practice this should 4 almost all of the time. var elapsed = sw.ElapsedMilliseconds; Skip.If(elapsed > 8); } consumer.Close(); // for the producer, make do with just a simple check that this does not throw or hang. var dr = producer.ProduceAsync(topic.Name, new Message { Key = new byte[] { 42 }, Value = new byte[] { 255 } }).Result; // for the admin client, make do with just simple check that this does not throw or hang. var cr = new Confluent.Kafka.Admin.ConfigResource { Type = ResourceType.Topic, Name = topic.Name }; var configs = adminClient.DescribeConfigsAsync(new ConfigResource[] { cr }).Result; } Assert.Equal(0, Library.HandleCount); LogToFile("end CancellationDelayMax"); }
public void TestSendWithDeadBroker() { var config = new ProducerConfig { Serializer = typeof(StringEncoder).AssemblyQualifiedName, KeySerializer = typeof(StringEncoder).AssemblyQualifiedName, PartitionerClass = typeof(StaticPartitioner).AssemblyQualifiedName, Brokers = TestUtils.GetBrokerListFromConfigs( new List<TempKafkaConfig> { this.config1, this.config2 }), RequestRequiredAcks = 1, RequestTimeoutMs = 2000 }; var topic = "new-topic"; // create topic AdminUtils.CreateOrUpdateTopicPartitionAssignmentPathInZK( this.ZkClient, topic, new Dictionary<int, List<int>> { { 0, new List<int> { 0 } }, { 1, new List<int> { 0 } }, { 2, new List<int> { 0 } }, { 3, new List<int> { 0 } }, }, new Dictionary<string, string>()); // waiting for 1 partition is enought TestUtils.WaitUntilMetadataIsPropagated(this.servers, topic, 0, 1000); TestUtils.WaitUntilLeaderIsElectedOrChanged(this.ZkClient, topic, 0, 500); TestUtils.WaitUntilLeaderIsElectedOrChanged(this.ZkClient, topic, 1, 500); TestUtils.WaitUntilLeaderIsElectedOrChanged(this.ZkClient, topic, 2, 500); TestUtils.WaitUntilLeaderIsElectedOrChanged(this.ZkClient, topic, 3, 500); var producer = new Producer<string, string>(config); try { // Available partition ids should be 0, 1, 2 and 3, all lead and hosted only // on broker 0 producer.Send(new KeyedMessage<string, string>(topic, "test", "test1")); } finally { Thread.Sleep(1000); // wait for server to fetch message from consumer // kill the server using (this.server1) { this.server1.Kill(); SpinWait.SpinUntil(() => this.server1.HasExited, 500); } } try { // These sends should fail since there are no available brokers producer.Send(new KeyedMessage<string, string>(topic, "test", "test1")); Assert.True(false, "Should fail since no leader exists for the partition."); } catch { } // NOTE we can rewrite rest of test as we can't do the clean shutdown producer.Dispose(); }
// This method gets called by the runtime. Use this method to add services to the container. public void ConfigureServices(IServiceCollection services) { services.AddDbContext <CardContext>(optBuilder => { var connectionString = Environment.GetEnvironmentVariable("CardDb"); optBuilder.UseMySQL(connectionString); }); services.AddScoped <ICardContext, CardContext>(); services.AddScoped <ICardOperations, CardOperations>(); services.AddScoped <ICardEventHandler, CardEventHandler>(); var prconfig = new Dictionary <string, string> { { "bootstrap.servers", Environment.GetEnvironmentVariable("Producer") } }; var producerConfig = new ProducerConfig(prconfig); services.AddSingleton <ProducerConfig>(producerConfig); var cardEventHandler = new CardEventHandler(producerConfig, services.BuildServiceProvider().GetRequiredService <ILogger <CardEventHandler> >()); services.AddSingleton <CardEventHandler>(cardEventHandler); var config = new ConsumerConfig { BootstrapServers = Environment.GetEnvironmentVariable("Producer"), GroupId = Environment.GetEnvironmentVariable("GroupId"), AllowAutoCreateTopics = true, EnableAutoCommit = false }; services.AddCors(c => { c.AddPolicy("AllowOrigin", options => options.AllowAnyOrigin().AllowAnyMethod().AllowAnyHeader()); }); services.AddHostedService <UrlDeletionEventListener>(); services.AddHostedService <CardEventListener>(); services.AddControllers(setupAction => { setupAction.Filters.Add(new ProducesResponseTypeAttribute((int)HttpStatusCode.BadRequest)); setupAction.Filters.Add(new ProducesResponseTypeAttribute((int)HttpStatusCode.NotAcceptable)); setupAction.Filters.Add(new ProducesResponseTypeAttribute((int)HttpStatusCode.InternalServerError)); setupAction.Filters.Add(new ProducesAttribute("application/json")); setupAction.ReturnHttpNotAcceptable = true; }); services.AddSwaggerGen(setup => { setup.SwaggerDoc("CardOpenApiSpecification", new OpenApiInfo() { Title = "Cards Api", Version = "1", Description = "Through this api user can create and access the cards" }); var xmlCommentFile = $"{ Assembly.GetExecutingAssembly().GetName().Name}.xml"; var xmlPath = Path.Combine(AppContext.BaseDirectory, xmlCommentFile); setup.IncludeXmlComments(xmlPath); }); }
private static long BenchmarkProducerImpl( string bootstrapServers, string topic, int nMessages, int nTests, int nHeaders, bool useDeliveryHandler) { // mirrors the librdkafka performance test example. var config = new ProducerConfig { BootstrapServers = bootstrapServers, QueueBufferingMaxMessages = 2000000, MessageSendMaxRetries = 3, RetryBackoffMs = 500, LingerMs = 100, DeliveryReportFields = "none" }; DeliveryResult <Null, byte[]> firstDeliveryReport = null; Headers headers = null; if (nHeaders > 0) { headers = new Headers(); for (int i = 0; i < nHeaders; ++i) { headers.Add($"header-{i+1}", new byte[] { (byte)i, (byte)(i + 1), (byte)(i + 2), (byte)(i + 3) }); } } using (var producer = new ProducerBuilder <Null, byte[]>(config).Build()) { for (var j = 0; j < nTests; j += 1) { Console.WriteLine($"{producer.Name} producing on {topic} " + (useDeliveryHandler ? "[Action<Message>]" : "[Task]")); byte cnt = 0; var val = new byte[100].Select(a => ++ cnt).ToArray(); // this avoids including connection setup, topic creation time, etc.. in result. firstDeliveryReport = producer.ProduceAsync(topic, new Message <Null, byte[]> { Value = val, Headers = headers }).Result; var startTime = DateTime.Now.Ticks; if (useDeliveryHandler) { var autoEvent = new AutoResetEvent(false); var msgCount = nMessages; Action <DeliveryReport <Null, byte[]> > deliveryHandler = (DeliveryReport <Null, byte[]> deliveryReport) => { if (deliveryReport.Error.IsError) { // Not interested in benchmark results in the (unlikely) event there is an error. Console.WriteLine($"A error occured producing a message: {deliveryReport.Error.Reason}"); Environment.Exit(1); // note: exceptions do not currently propagate to calling code from a deliveryHandler method. } if (--msgCount == 0) { autoEvent.Set(); } }; for (int i = 0; i < nMessages; i += 1) { try { producer.Produce(topic, new Message <Null, byte[]> { Value = val, Headers = headers }, deliveryHandler); } catch (ProduceException <Null, byte[]> ex) { if (ex.Error.Code == ErrorCode.Local_QueueFull) { producer.Poll(TimeSpan.FromSeconds(1)); i -= 1; } else { throw; } } } while (true) { if (autoEvent.WaitOne(TimeSpan.FromSeconds(1))) { break; } Console.WriteLine(msgCount); } } else { try { var tasks = new Task[nMessages]; for (int i = 0; i < nMessages; i += 1) { tasks[i] = producer.ProduceAsync(topic, new Message <Null, byte[]> { Value = val, Headers = headers }); if (tasks[i].IsFaulted) { if (((ProduceException <Null, byte[]>)tasks[i].Exception.InnerException).Error.Code == ErrorCode.Local_QueueFull) { producer.Poll(TimeSpan.FromSeconds(1)); i -= 1; } else { // unexpected, abort benchmark test. throw tasks[i].Exception; } } } Task.WaitAll(tasks); } catch (AggregateException ex) { Console.WriteLine(ex.Message); } } var duration = DateTime.Now.Ticks - startTime; Console.WriteLine($"Produced {nMessages} messages in {duration/10000.0:F0}ms"); Console.WriteLine($"{nMessages / (duration/10000.0):F0}k msg/s"); } producer.Flush(TimeSpan.FromSeconds(10)); } return(firstDeliveryReport.Offset); }
static void Main(string[] args) { int messages = 1; if (args.Length > 0) { messages = int.Parse(args[0]); } var config = new ProducerConfig { BootstrapServers = "broker1:9092", }; var avroConfig = new AvroSerdeProviderConfig { // Note: you can specify more than one schema registry url using the // schema.registry.url property for redundancy (comma separated list). // The property name is not plural to follow the convention set by // the Java implementation. SchemaRegistryUrl = "http://*****:*****@somewhere.io", Value = httpRequest }); } producer.Flush(TimeSpan.FromSeconds(30)); } }
public ProducerThread(int threadId, ProducerPerfConfig config, AtomicLong totalBytesSent, AtomicLong totalMessagesSent, CountdownEvent allDone, Random rand) { this.threadId = threadId; this.config = config; this.totalBytesSent = totalBytesSent; this.totalMessagesSent = totalMessagesSent; this.allDone = allDone; this.rand = rand; this.producerConfig = new ProducerConfig(); this.producerConfig.Brokers = config.BrokerList.Split(',') .Select( (s, idx) => new BrokerConfiguration { BrokerId = idx, Host = s.Split(':')[0], Port = int.Parse(s.Split(':')[1]) }).ToList(); this.producerConfig.CompressionCodec = config.CompressionCodec; this.producerConfig.SendBufferBytes = 64 * 1024; if (!config.IsSync) { this.producerConfig.ProducerType = ProducerTypes.Async; this.producerConfig.BatchNumMessages = config.BatchSize; this.producerConfig.QueueEnqueueTimeoutMs = -1; } this.producerConfig.ClientId = "ProducerPerformance"; this.producerConfig.RequestRequiredAcks = config.ProducerRequestRequiredAcks; this.producerConfig.RequestTimeoutMs = config.ProducerRequestTimeoutMs; this.producerConfig.MessageSendMaxRetries = config.ProducerNumRetries; this.producerConfig.RetryBackoffMs = config.ProducerRetryBackoffMs; this.producerConfig.Serializer = typeof(DefaultEncoder).AssemblyQualifiedName; this.producerConfig.KeySerializer = typeof(NullEncoder<long>).AssemblyQualifiedName; this.producer = new Producer<long, byte[]>(this.producerConfig); this.messagesPerThread = config.NumMessages / config.NumThreads; Logger.DebugFormat("Messages per thread = {0}", this.messagesPerThread); }
public void LogDelegate(string bootstrapServers) { LogToFile("start LogDelegate"); var logCount = 0; var consumerConfig = new ConsumerConfig { GroupId = Guid.NewGuid().ToString(), BootstrapServers = bootstrapServers, Debug = "all" }; var producerConfig = new ProducerConfig { BootstrapServers = bootstrapServers, Debug = "all" }; var adminConfig = new AdminClientConfig { BootstrapServers = bootstrapServers, Debug = "all" }; DeliveryResult <byte[], byte[]> dr; using (var producer = new ProducerBuilder <byte[], byte[]>(producerConfig) .SetLogHandler((_, m) => logCount += 1) .Build()) { dr = producer.ProduceAsync(singlePartitionTopic, new Message <byte[], byte[]> { Value = Serializers.Utf8.Serialize("test value", SerializationContext.Empty) }).Result; producer.Flush(TimeSpan.FromSeconds(10)); } Assert.True(logCount > 0); logCount = 0; using (var consumer = new ConsumerBuilder <byte[], byte[]>(consumerConfig) .SetLogHandler((_, m) => logCount += 1) .Build()) { consumer.Assign(new TopicPartition(singlePartitionTopic, 0)); consumer.Consume(TimeSpan.FromSeconds(10)); } Assert.True(logCount > 0); logCount = 0; using (var adminClient = new AdminClientBuilder(adminConfig) .SetLogHandler((_, m) => logCount += 1) .Build()) { adminClient.GetMetadata(TimeSpan.FromSeconds(1)); } Assert.True(logCount > 0); Assert.Equal(0, Library.HandleCount); LogToFile("end LogDelegate"); }