public static void SetEventsHandlers( this IConfluentProducerBuilder producerBuilder, KafkaProducer producer, ISilverbackIntegrationLogger logger) => producerBuilder .SetStatisticsHandler((_, statistics) => OnStatistics(statistics, producer, logger)) .SetLogHandler((_, logMessage) => OnLog(logMessage, producer, logger));
public async Task Transactional_source_with_sink_Should_work() { var settings = CreateConsumerSettings <string>(CreateGroup(1)); var sourceTopic = CreateTopic(1); var targetTopic = CreateTopic(2); var transactionalId = Guid.NewGuid().ToString(); const int totalMessages = 10; var control = KafkaConsumer.TransactionalSource(settings, Subscriptions.Topics(sourceTopic)) .Via(Business <TransactionalMessage <Null, string> >()) .Select(message => { return(ProducerMessage.Single( new ProducerRecord <Null, string>(targetTopic, message.Record.Key, message.Record.Value), passThrough: message.PartitionOffset)); }) .ToMaterialized(KafkaProducer.TransactionalSink(ProducerSettings, transactionalId), Keep.Both) .MapMaterializedValue(DrainingControl <NotUsed> .Create) .Run(Materializer); var consumer = ConsumeStrings(targetTopic, totalMessages); await ProduceStrings(sourceTopic, Enumerable.Range(1, totalMessages), ProducerSettings); AssertTaskCompletesWithin(TimeSpan.FromSeconds(totalMessages), consumer.IsShutdown); AssertTaskCompletesWithin(TimeSpan.FromSeconds(totalMessages), control.DrainAndShutdown()); consumer.DrainAndShutdown().Result.Should().HaveCount(totalMessages); }
public WeatherForecastController(ILogger <WeatherForecastController> logger, KafkaConfiguration kafkaConfiguration, KafkaProducer <WeatherForecast> producer, TelemetryClient telemetryClient) { _logger = logger; _kafkaConfiguration = kafkaConfiguration; _producer = producer; _telemetryClient = telemetryClient; }
public async Task VerifyABrokerStoppingAndRestartingCanBeHandledByTheConsumer() { using (var cluster = new KafkaTestCluster("server.home", 1)) { var topic = "test"; cluster.CreateTopic(topic); using (var brokers = new KafkaBrokers(cluster.CreateBrokerUris())) { var producer = KafkaProducer.Create(brokers, new StringSerializer()); await producer.SendAsync(KeyedMessage.Create(topic, "Test"), CancellationToken.None); await Task.Delay(1000); cluster.StopKafkaBroker(0); cluster.RestartKafkaBroker(0); var consumer = KafkaConsumer.Create(topic, brokers, new StringSerializer(), new TopicSelector { Topic = topic, Partition = 0, Offset = 0 }); var result = await consumer.ReceiveAsync(CancellationToken.None); Assert.That(result, Has.Count.EqualTo(1)); var first = result[0]; Assert.That(first.Value, Is.EqualTo("Test")); Assert.That(first.Offset, Is.EqualTo(0)); } cluster.DeleteTopic(topic); } }
public static void Main(string[] args) { Config fallbackConfig = ConfigurationFactory.ParseString(@" akka.suppress-json-serializer-warning=true akka.loglevel = DEBUG ").WithFallback(ConfigurationFactory.FromResource <ConsumerSettings <object, object> >("Akka.Streams.Kafka.reference.conf")); var system = ActorSystem.Create("TestKafka", fallbackConfig); var materializer = system.Materializer(); var producerSettings = ProducerSettings <Null, string> .Create(system, null, new StringSerializer(Encoding.UTF8)) .WithBootstrapServers("localhost:29092"); Source .Cycle(() => Enumerable.Range(1, 100).GetEnumerator()) .Select(c => c.ToString()) .Select(elem => new MessageAndMeta <Null, string> { Topic = "akka100", Message = new Message <Null, string> { Value = elem } }) .Via(KafkaProducer.PlainFlow(producerSettings)) .Select(record => { Console.WriteLine($"Producer: {record.Topic}/{record.Partition} {record.Offset}: {record.Value}"); return(record); }) .RunWith(Sink.Ignore <DeliveryReport <Null, string> >(), materializer); // TODO: producer as a Commitable Sink // TODO: Sharing KafkaProducer Console.ReadLine(); }
public async Task IntegrationTest() { int counter = 0; //消费 var consumer = new KafkaConsumer($"integration.{group}", hosts) { EnableAutoCommit = false }; await consumer.ListenAsync(new string[] { $"integration.{topic}" }, result => { Output.WriteLine($"integration.{topic}({result.Key}):" + result.Message); counter++; result.Commit(); }); //发布 var producer = new KafkaProducer(hosts) { DefaultTopic = $"integration.{topic}", DefaultKey = $"integration.key" }; await producer.PublishAsync("hello kafka"); BlockUntil(() => counter >= 1, 3000); producer.Dispose(); consumer.Dispose(); Assert.Equal(1, counter); }
private static void GroupConsuemrTest() { var consumers = new List <KafkaConsumer <string, KafkaMessage> >(); for (var i = 0; i < 1; i++) { consumers.Add(CreateConsumer(commandQueue, i.ToString())); } var queueClient = new KafkaProducer <string, KafkaMessage>(commandQueue, brokerList, new StringSerializer(Encoding.UTF8), new KafkaMessageSerializer()); while (true) { var key = Console.ReadLine(); if (key.Equals("q")) { consumers.ForEach(consumer => consumer.Stop()); queueClient.Stop(); break; } var message = $"{key} @{DateTime.Now:yyyy-MM-dd HH:mm:ss.ffffff}"; var kafkaMessage = new KafkaMessage(message); var start = DateTime.Now; queueClient.SendAsync(key, kafkaMessage, CancellationToken.None) .ContinueWith(t => { var result = t.Result; Console.WriteLine($"send message: {message} partition:{result.Partition} offset:{result.Offset} cost: {(DateTime.Now - start).TotalMilliseconds}"); }); } }
static void ProducerSendTPSTest() { int batchCount = 100000; var queueClient = new KafkaProducer(commandQueue, zkConnectionString); var message = DateTime.Now.ToString("yyyy-MM-dd HH:mm:ss.ffffff"); var kafkaMessage = new Kafka.Client.Messages.Message(Encoding.UTF8.GetBytes(message)); var data = new Kafka.Client.Producers.ProducerData <string, Kafka.Client.Messages.Message>(commandQueue, message, kafkaMessage); double totalSendRt = 0; Stopwatch watch = new Stopwatch(); watch.Start(); for (int i = 0; i < batchCount; i++) { try { var start = DateTime.Now; queueClient.Send(data); totalSendRt += (DateTime.Now - start).TotalMilliseconds; } catch (Exception ex) { Console.WriteLine(ex.GetBaseException().Message); } } var elapsedMs = watch.ElapsedMilliseconds; Console.WriteLine($"cost: {elapsedMs} tps: {batchCount * 1000 / elapsedMs} rt: {totalSendRt / (double)batchCount}"); Console.ReadLine(); }
public async Task ShouldReturnKey() { // Arrange var topic = "some-topic"; var key = 10L; var value = "some-value"; var sut = new KafkaProducer <long, string>(kafkaProducer.Object); kafkaProducer.Setup(e => e.ProduceAsync(It.IsAny <string>(), It.IsAny <Confluent.Kafka.Message <long, string> >(), It.IsAny <CancellationToken>())) .ReturnsAsync((string topic, Confluent.Kafka.Message <long, string> message, CancellationToken token) => new Confluent.Kafka.DeliveryResult <long, string> { Message = new Confluent.Kafka.Message <long, string> { Key = key, Value = value, } }); // Act var actual = await sut.ProduceAsync(topic, value); // Assert Assert.That(actual, Is.EqualTo(key)); }
public void ThenItShouldFlushUnderlyingProducer(TimeSpan timeout) { // Arrange var producerMock = new Mock <IProducer <string, BasicObject> >(); producerMock.Setup(p => p.ProduceAsync(It.IsAny <string>(), It.IsAny <Message <string, BasicObject> >(), It.IsAny <CancellationToken>())) .ReturnsAsync(new DeliveryReport <string, BasicObject> { Topic = "some-topic", Partition = 1, Offset = 2, }); var producerBuilderMock = new Mock <IProducerBuilderWrapper <string, BasicObject> >(); producerBuilderMock.Setup(b => b.SetValueSerializer(It.IsAny <IAsyncSerializer <BasicObject> >())) .Returns(producerBuilderMock.Object); producerBuilderMock.Setup(b => b.Build()) .Returns(producerMock.Object); var serializerFactoryMock = new Mock <IKafkaSerializerFactory>(); serializerFactoryMock.Setup(f => f.GetValueSerializer <BasicObject>()) .Returns(new Mock <IAsyncSerializer <BasicObject> >().Object); var kafkaProducer = new KafkaProducer <string, BasicObject>(producerBuilderMock.Object, serializerFactoryMock.Object); // Act kafkaProducer.Flush(timeout); // Assert producerMock.Verify(p => p.Flush(timeout), Times.Once); }
public async Task ShouldCallProduceAsync() { // Arrange var topic = "some-topic"; var key = 10L; var value = "some-value"; var sut = new KafkaProducer <long, string>(kafkaProducer.Object); kafkaProducer.Setup(e => e.ProduceAsync(It.IsAny <string>(), It.IsAny <Confluent.Kafka.Message <long, string> >(), It.IsAny <CancellationToken>())) .ReturnsAsync( (string topic, Confluent.Kafka.Message <long, string> message, CancellationToken token) => new Confluent.Kafka.DeliveryResult <long, string> { Message = new Confluent.Kafka.Message <long, string> { Key = key, Value = value, } }); // Act await sut.ProduceAsync(topic, value); // Assert kafkaProducer.Verify( e => e.ProduceAsync(topic, It.Is <Confluent.Kafka.Message <long, string> >(m => m.Value == value), It.IsAny <CancellationToken>()), Times.Once); kafkaProducer.VerifyNoOtherCalls(); }
public App() { kafkaProducer = KafkaProducerProvider.Get(x => { Interlocked.Increment(ref processedCount); }); }
public static void staticinit() { Configuration conf = new Configuration { Seeds = "localhost:9093" }; //"broker.local:9091" conf.ClientId = "LibreriaBProducer"; conf.ClientRequestTimeoutMs = 2000; conf.RequestTimeoutMs = 2000; conf.RefreshMetadataInterval = new TimeSpan(0, 0, 0, 0, 100); conf.CompressionCodec = CompressionCodec.None; conf.ErrorStrategy = ErrorStrategy.Retry; //conf.MaxBufferedMessages = 100; conf.MaxRetry = 10; conf.ProduceBatchSize = 1; conf.ProduceBufferingTime = new TimeSpan(0, 0, 0, 0, 10); conf.RequiredAcks = RequiredAcks.Leader; logger = new CustomLogger(); cluster = new ClusterClient(conf, logger); prod = new KafkaProducer <string, string>(Program.args.KafkaTopic, cluster); prod.Acknowledged += Prod_Acknowledged; prod.MessageDiscarded += Prod_MessageDiscarded; prod.MessageExpired += Prod_MessageExpired; prod.Throttled += Prod_Throttled; }
private KafkaProducer <string, KafkaMessage> CreateQueueClient(string queue) { CreateTopicIfNotExists(queue); var queueClient = new KafkaProducer <string, KafkaMessage>(queue, BrokerList, new StringSerializer(Encoding.UTF8), new KafkaMessageSerializer()); return(queueClient); }
private static void OnLog( LogMessage logMessage, KafkaProducer producer, ISilverbackLogger logger) { switch (logMessage.Level) { case SyslogLevel.Emergency: case SyslogLevel.Alert: case SyslogLevel.Critical: logger.LogConfluentProducerLogCritical(logMessage, producer); break; case SyslogLevel.Error: logger.LogConfluentProducerLogError(logMessage, producer); break; case SyslogLevel.Warning: logger.LogConfluentProducerLogWarning(logMessage, producer); break; case SyslogLevel.Notice: case SyslogLevel.Info: logger.LogConfluentProducerLogInformation(logMessage, producer); break; default: logger.LogConfluentProducerLogDebug(logMessage, producer); break; } }
public IActionResult GreetingUser([FromBody] GreetingDto owner) { KafkaProducer sample = new KafkaProducer(); return(Created(string.Empty, sample.SendToKafka(Constants.Constants.TOPIC, GetGreetingForUser(owner)))); // return Created(string.Empty, GetGreetingForUser(owner)); }
public async Task TestSimpleConsumerWorksOk() { var keySerializer = new NullSerializer <object>(); var valueSerializer = new StringSerializer(); var messagePartitioner = new LoadBalancedPartitioner <object>(); using (var temporaryTopic = testCluster.CreateTemporaryTopic()) using (var brokers = new KafkaBrokers(testCluster.CreateBrokerUris())) { var topic = temporaryTopic.Name; var producer = KafkaProducer.Create(brokers, keySerializer, valueSerializer, messagePartitioner); var consumer = KafkaConsumer.Create(defaultConsumerGroup, brokers, keySerializer, valueSerializer, new TopicSelector { Partition = 0, Topic = topic }); await producer.SendAsync(KeyedMessage.Create(topic, "Message"), CancellationToken.None); var responses = await consumer.ReceiveAsync(CancellationToken.None); Assert.That(responses, Is.Not.Null); Assert.That(responses, Has.Count.EqualTo(1)); var first = responses.First(); Assert.That(first.Key, Is.Null); Assert.That(first.Offset, Is.EqualTo(0)); Assert.That(first.Partition, Is.EqualTo(0)); Assert.That(first.Topic, Is.EqualTo(topic)); Assert.That(first.Value, Is.EqualTo("Message")); } }
public async Task ProduceToMultiplePartitions(int numberOfPartitions, int numberOfKeys, int numberOfMessages) { var keySerializer = new Int32Serializer(); var valueSerializer = new StringSerializer(); var messagePartitioner = new Int32Partitioner(); using (var temporaryTopic = testCluster.CreateTemporaryTopic(partitions: 2)) using (var brokers = new KafkaBrokers(testCluster.CreateBrokerUris())) { var topic = temporaryTopic.Name; { var producer = KafkaProducer.Create(brokers, keySerializer, valueSerializer, messagePartitioner); var messages = Enumerable .Range(0, numberOfMessages) .Select(i => KeyedMessage.Create(topic, i % numberOfKeys, i % numberOfPartitions, "Message " + i)); await producer.SendAsync(messages, CancellationToken.None); } { var selectors = Enumerable .Range(0, numberOfPartitions) .Select(partition => new TopicSelector { Partition = partition, Topic = topic }) .ToArray(); var consumer = KafkaConsumer.Create(defaultConsumerGroup, brokers, keySerializer, valueSerializer, selectors); var responses = await consumer.ReceiveAsync(CancellationToken.None); Assert.That(responses, Has.Count.EqualTo(numberOfMessages)); var received = new bool[numberOfMessages]; var offsets = new long[numberOfPartitions]; foreach (var response in responses) { var split = response.Value.Split(' '); Assert.That(split, Has.Length.EqualTo(2)); Assert.That(split[0], Is.EqualTo("Message")); int messageNumber; var parsed = Int32.TryParse(split[1], out messageNumber); Assert.That(parsed, Is.True); Assert.That(messageNumber, Is.InRange(0, numberOfMessages - 1)); var key = messageNumber % numberOfKeys; Assert.That(response.Key, Is.EqualTo(key)); var partition = messageNumber % numberOfPartitions; Assert.That(response.Partition, Is.EqualTo(partition)); Assert.That(received[messageNumber], Is.False); received[messageNumber] = true; Assert.That(response.Offset, Is.EqualTo(offsets[response.Partition])); offsets[response.Partition] += 1; Assert.That(response.Topic, Is.EqualTo(topic)); } } } }
/// <summary> /// Produce to kafka topic. /// </summary> /// <param name="userDetails">User details</param> /// <returns>Action result</returns> internal async Task ProduceToKafkaTopic(UserDetails userDetails) { //TODO: Read these values from config. var config = new KafkaProducerProperties { ServerAddresses = new List <string> { "127.0.0.1:9092" }, TopicName = "Voting", CompressionType = KafkaProducerCompressionTypes.Snappy }; using (KafkaProducer <string, string> producer = new KafkaProducer <string, string>(config, new StringSerializer(Encoding.UTF8), new StringSerializer(Encoding.UTF8))) { List <Task> tasks = new List <Task> { producer.ProduceAsync(userDetails.AadharNo, JsonConvert.SerializeObject(userDetails)) }; if (tasks.Count == 100) { await Task.WhenAll(tasks); } await Task.WhenAll(tasks); } }
static void Main(string[] args) { var host = "192.168.33.12:9092"; var topic = "test2"; var count = 50000000; var size = 100; var prop = new Properties(); prop.setProperty("bootstrap.servers", "192.168.33.12:9092"); prop.setProperty("acks", "1"); prop.setProperty("buffer.memory", "67108864"); prop.setProperty("batch.size", "8196"); var producer = new KafkaProducer(prop, new ByteArraySerializer(), new ByteArraySerializer()); var payload = new byte[size]; for (int i = 0; i < size; i++) payload[i] = (byte)'a'; var record = new ProducerRecord(topic, payload); var stats = new Stats(count, 5000, Console.WriteLine); for (int i = 0; i < count; i++) { //var payload = Encoding.UTF8.GetBytes(i.ToString()); //var record = new ProducerRecord(topic, payload); var sendStart = DateTimeExtensions.CurrentTimeMillis(); var cb = new StatsCallback { Action = stats.NextCompletion(sendStart, payload.Length, stats) }; producer.send(record, cb); } producer.close(); stats.PrintTotal(); }
public void TestKafkaProducer() { IKafkaConfig kconf = new KafkaConfig(conf); KafkaProducer <Rootobject> kafkaProducer = new KafkaProducer <Rootobject>(kconf, msg); kafkaProducer.ProduceMessage(msg); }
public async Task CommitableSource_consumes_messages_from_Producer_without_commits() { int elementsCount = 100; var topic1 = CreateTopic(1); var group1 = CreateGroup(1); var topicPartition1 = new TopicPartition(topic1, 0); await GivenInitializedTopic(topicPartition1); await Source .From(Enumerable.Range(1, elementsCount)) .Select(elem => new ProducerRecord <Null, string>(topicPartition1, elem.ToString())) .RunWith(KafkaProducer.PlainSink(ProducerSettings), Materializer); var consumerSettings = CreateConsumerSettings <string>(group1); var probe = KafkaConsumer .CommittableSource(consumerSettings, Subscriptions.Assignment(topicPartition1)) .Where(c => !c.Record.Value.Equals(InitialMsg)) .Select(c => c.Record.Value) .RunWith(this.SinkProbe <string>(), Materializer); probe.Request(elementsCount); foreach (var i in Enumerable.Range(1, elementsCount).Select(c => c.ToString())) { probe.ExpectNext(i, TimeSpan.FromSeconds(10)); } probe.Cancel(); }
public static void Main(string[] args) { Config fallbackConfig = ConfigurationFactory.ParseString(@" akka.suppress-json-serializer-warning=true akka.loglevel = DEBUG ").WithFallback(ConfigurationFactory.FromResource <ConsumerSettings <object, object> >("Akka.Streams.Kafka.reference.conf")); var system = ActorSystem.Create("TestKafka", fallbackConfig); var materializer = system.Materializer(); var producerSettings = ProducerSettings <Null, string> .Create(system, null, null) .WithBootstrapServers("localhost:29092"); Source .Cycle(() => Enumerable.Range(1, 100).GetEnumerator()) .Select(c => c.ToString()) .Select(elem => ProducerMessage.Single(new ProducerRecord <Null, string>("akka100", elem))) .Via(KafkaProducer.FlexiFlow <Null, string, NotUsed>(producerSettings)) .Select(result => { var response = result as Result <Null, string, NotUsed>; Console.WriteLine($"Producer: {response.Metadata.Topic}/{response.Metadata.Partition} {response.Metadata.Offset}: {response.Metadata.Value}"); return(result); }) .RunWith(Sink.Ignore <IResults <Null, string, NotUsed> >(), materializer); // TODO: producer as a Commitable Sink // TODO: Sharing KafkaProducer Console.ReadLine(); }
public ChatHandler( WebSocketConnectionManager webSocketConnectionManager, KafkaProducer kafkaProducer, ActiveChatChannelRepository chatMessageRepository) : base(webSocketConnectionManager) { _kafkaProducer = kafkaProducer; _activeChatChannelRepository = chatMessageRepository; }
public WeatherForecastController(ILogger <WeatherForecastController> logger, KafkaProducer producer , TestEntityFContext dbContext ) { _logger = logger; _producer = producer; }
private void ApplicationShutdown() { Scheduler.Dispose(); if (KafkaProducer != null) { KafkaProducer.Dispose(); } }
public static void LogProduceNotAcknowledged( this ISilverbackLogger logger, KafkaProducer producer) => ProduceNotAcknowledged( logger.InnerLogger, producer.Id, producer.Endpoint.Name, null);
public static void LogCreatingConfluentProducer( this ISilverbackLogger logger, KafkaProducer producer) => CreatingConfluentProducer( logger.InnerLogger, producer.Id, producer.Endpoint.Name, null);
static void Main(string[] args) { for (int i = 0; i < 100; i++) { KafkaProducer.ProduceAsync(i.ToString(), "czj" + i); } Console.ReadKey(); }
static void StartProduce() { var messageContent = GetMessageContentFromFile(); var messageKey = GetMessageKey(); IKafkaProducer producer = new KafkaProducer(); producer.Produce(KafkaBootstrapper, TopicName, messageContent, messageKey); }
private void InitKafkaConsumer() { this.kafkaConsumerConfiguration = new ConfigurationBuilder() .AddJsonFile("kafkaConsumerSettings.json") .Build(); this.eventHubKafkaAccountSettings = ConfigHelper.GetEventHubKafkaAccount(this.kafkaConsumerConfiguration); this.kafkaProducer = new KafkaProducer(this.eventHubKafkaAccountSettings, this.cosmosConsumerRouterSettings); }