public static void Main(string[] args) { Config fallbackConfig = ConfigurationFactory.ParseString(@" akka.suppress-json-serializer-warning=true akka.loglevel = DEBUG ").WithFallback(ConfigurationFactory.FromResource <ConsumerSettings <object, object> >("Akka.Streams.Kafka.reference.conf")); var system = ActorSystem.Create("TestKafka", fallbackConfig); var materializer = system.Materializer(); var producerSettings = ProducerSettings <Null, string> .Create(system, null, null) .WithBootstrapServers("localhost:29092"); Source .Cycle(() => Enumerable.Range(1, 100).GetEnumerator()) .Select(c => c.ToString()) .Select(elem => ProducerMessage.Single(new ProducerRecord <Null, string>("akka100", elem))) .Via(KafkaProducer.FlexiFlow <Null, string, NotUsed>(producerSettings)) .Select(result => { var response = result as Result <Null, string, NotUsed>; Console.WriteLine($"Producer: {response.Metadata.Topic}/{response.Metadata.Partition} {response.Metadata.Offset}: {response.Metadata.Value}"); return(result); }) .RunWith(Sink.Ignore <IResults <Null, string, NotUsed> >(), materializer); // TODO: producer as a Commitable Sink // TODO: Sharing KafkaProducer Console.ReadLine(); }
protected void Setup() { testTopic = "akka100"; subscription = Subscriptions.Topics(testTopic); probe = this.CreateTestProbe(); string configText = File.ReadAllText("akka.test.conf"); var config = ConfigurationFactory.ParseString(configText); var system_producer = ActorSystem.Create("TestKafka", config); materializer_producer = system_producer.Materializer(); var system_consumer = ActorSystem.Create("TestKafka", config); materializer_consumer = system_producer.Materializer(); this.Sys.Settings.Config.WithFallback(config); producerSettings = ProducerSettings <Null, string> .Create(system_producer, null, null) .WithBootstrapServers("kafka:9092"); consumerSettings = ConsumerSettings <Null, string> .Create(system_consumer, null, null) .WithBootstrapServers("kafka:9092") .WithGroupId("group1"); }
public static void Main(string[] args) { Config fallbackConfig = ConfigurationFactory.ParseString(@" akka.suppress-json-serializer-warning=true akka.loglevel = DEBUG ").WithFallback(ConfigurationFactory.FromResource <ConsumerSettings <object, object> >("Akka.Streams.Kafka.reference.conf")); var system = ActorSystem.Create("TestKafka", fallbackConfig); var materializer = system.Materializer(); var producerSettings = ProducerSettings <Null, string> .Create(system, null, new StringSerializer(Encoding.UTF8)) .WithBootstrapServers("localhost:29092"); Source .Cycle(() => Enumerable.Range(1, 100).GetEnumerator()) .Select(c => c.ToString()) .Select(elem => new MessageAndMeta <Null, string> { Topic = "akka100", Message = new Message <Null, string> { Value = elem } }) .Via(KafkaProducer.PlainFlow(producerSettings)) .Select(record => { Console.WriteLine($"Producer: {record.Topic}/{record.Partition} {record.Offset}: {record.Value}"); return(record); }) .RunWith(Sink.Ignore <DeliveryReport <Null, string> >(), materializer); // TODO: producer as a Commitable Sink // TODO: Sharing KafkaProducer Console.ReadLine(); }
protected override void PreStart() { #if DEBUG Console.WriteLine("PreStart"); #endif base.PreStart(); var producerSettings = ProducerSettings <string, byte[]> .Create(settings.KafkaConfig, Serializers.Utf8, Serializers.ByteArray) .WithBootstrapServers(settings.KafkaConfig.GetString("bootstrap.servers")); materializer = Context.Materializer(); producer = producerSettings.CreateKafkaProducer(); topicMapper = settings.EventTopicMapper; enabledTopicPartitions = new HashSet <TopicPartition>(settings.EnabledTopicPartitions); #if DEBUG Console.WriteLine("Gettings RocksDB"); #endif var rocksDbSettings = settings.RocksDbSettings; rocksDbReadOptions = new ReadOptions().SetVerifyChecksums(rocksDbSettings.Checksum); rocksDbWriteOptions = new WriteOptions().SetSync(rocksDbSettings.FSync); //retrieve offsets for the consumer to start reading; as rocksdb possible already knows about database = RocksDb.Open(rocksDbOptions, rocksDbSettings.Path); #if DEBUG Console.WriteLine("Database opened"); #endif idMap = ReadIdMap(); currentOffsets = ReadOffsetsForPartitions(); StartConsuming(); }
public void Start(ProducerAkkaOption producerAkkaOption) { materializer_producer = producerSystem.Materializer(); var producer = ProducerSettings <Null, string> .Create(producerSystem, null, null) .WithBootstrapServers(producerAkkaOption.BootstrapServers); producerList[producerAkkaOption.ProducerName] = producer; }
public async Task PlainSink_should_resume_on_deserialization_errors() { var callCount = 0; Directive Decider(Exception cause) { callCount++; switch (cause) { case ProduceException <Null, string> ex when ex.Error.IsSerializationError(): return(Directive.Resume); default: return(Directive.Stop); } } var elementsCount = 10; var topic1 = CreateTopic(1); var group1 = CreateGroup(1); var producerSettings = ProducerSettings <Null, string> .Create(Sys, null, new FailingSerializer()) .WithBootstrapServers(Fixture.KafkaServer); var sink = KafkaProducer.PlainSink(producerSettings) .AddAttributes(ActorAttributes.CreateSupervisionStrategy(Decider)); var sourceTask = Source .From(new [] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }) .Select(elem => new ProducerRecord <Null, string>(new TopicPartition(topic1, 0), elem.ToString())) .RunWith(sink, Materializer); var timeoutTask = Task.Delay(TimeSpan.FromSeconds(5)); var completeTask = await Task.WhenAny(sourceTask, timeoutTask); if (completeTask == timeoutTask) { throw new Exception("Producer timed out"); } var settings = CreateConsumerSettings <Null, string>(group1).WithValueDeserializer(new StringDeserializer()); var probe = KafkaConsumer .PlainSource(settings, Subscriptions.Assignment(new TopicPartition(topic1, 0))) .Select(c => c.Value) .RunWith(this.SinkProbe <string>(), Materializer); probe.Request(elementsCount); for (var i = 0; i < 9; i++) { Log.Info($">>>>>>>>>>> {i}"); probe.ExpectNext(); } callCount.Should().Be(1); probe.Cancel(); }
public async Task SendWithAkka(SequenceFlowInput inputMessage) { var producerSettings = ProducerSettings <Null, string> .Create(Context.System, null, null) .WithBootstrapServers(KafkaEndpoint); await Source .Single("Akka: " + inputMessage.ProcessInstanceId.ToString()) .Select(kafkaMessage => ProducerMessage.Single(new ProducerRecord <Null, string>(TopicName, kafkaMessage))) .Via(KafkaProducer.FlexiFlow <Null, string, NotUsed>(producerSettings)) .RunWith(Sink.Ignore <IResults <Null, string, NotUsed> >(), Materializer); }
public async Task SupervisionStrategy_Decider_on_PlainSink_should_work() { var callCount = 0; Directive Decider(Exception cause) { callCount++; switch (cause) { case ProduceException <Null, string> ex when ex.Error.IsSerializationError(): return(Directive.Resume); default: return(Directive.Stop); } } var topic1 = CreateTopic(1); var group1 = CreateGroup(1); var producerSettings = ProducerSettings <Null, string> .Create(Sys, null, new FailingSerializer()) .WithBootstrapServers(Fixture.KafkaServer); // Exception is injected into the sink by the FailingSerializer serializer, it throws an exceptions // when the message "5" is encountered. var sourceTask = Source .From(new [] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }) .Select(elem => new ProducerRecord <Null, string>(new TopicPartition(topic1, 0), elem.ToString())) .RunWith( KafkaProducer.PlainSink(producerSettings) .WithAttributes(ActorAttributes.CreateSupervisionStrategy(Decider)), Materializer); await GuardWithTimeoutAsync(sourceTask, TimeSpan.FromSeconds(5)); var settings = CreateConsumerSettings <Null, string>(group1).WithValueDeserializer(new StringDeserializer()); var probe = KafkaConsumer .PlainSource(settings, Subscriptions.Assignment(new TopicPartition(topic1, 0))) .Select(c => c.Value) .RunWith(this.SinkProbe <string>(), Materializer); probe.Request(10); for (var i = 0; i < 9; i++) { var message = probe.ExpectNext(); Log.Info($"> [{i}]: {message}"); } callCount.Should().Be(1); probe.Cancel(); }
public void ProducerSettings_must_beAbleToMergeProducerConfig() { var conf = KafkaExtensions.DefaultSettings.GetConfig("akka.kafka.producer"); var settings = ProducerSettings <string, string> .Create(conf, null, null); var config = new ProducerConfig { BootstrapServers = "localhost:9092", ClientId = "client1", EnableIdempotence = true }; settings = settings.WithProducerConfig(config); settings.GetProperty("bootstrap.servers").Should().Be("localhost:9092"); settings.GetProperty("client.id").Should().Be("client1"); settings.GetProperty("enable.idempotence").Should().Be("True"); }
public void ProducerSettings_must_handleNestedKafkaClientsProperties() { var conf = ConfigurationFactory.ParseString(@" akka.kafka.producer.kafka-clients {{ bootstrap.servers = ""localhost:9092"" bootstrap.foo = baz foo = bar client.id = client1 }} ").WithFallback(KafkaExtensions.DefaultSettings).GetConfig("akka.kafka.producer"); var settings = ProducerSettings <string, string> .Create(conf, null, null); settings.GetProperty("bootstrap.servers").Should().Be("localhost:9092"); settings.GetProperty("client.id").Should().Be("client1"); settings.GetProperty("foo").Should().Be("bar"); settings.GetProperty("bootstrap.foo").Should().Be("baz"); }
public async Task PlainSink_should_fail_stage_if_broker_unavailable() { var topic1 = CreateTopic(1); await GivenInitializedTopic(topic1); var config = ProducerSettings <Null, string> .Create(Sys, null, null) .WithBootstrapServers("localhost:10092"); var probe = Source .From(Enumerable.Range(1, 100)) .Select(c => c.ToString()) .Select(elem => new ProducerRecord <Null, string>(topic1, elem.ToString())) .Select(record => new Message <Null, string, NotUsed>(record, NotUsed.Instance) as IEnvelope <Null, string, NotUsed>) .Via(KafkaProducer.FlexiFlow <Null, string, NotUsed>(config)) .RunWith(this.SinkProbe <IResults <Null, string, NotUsed> >(), Materializer); probe.ExpectSubscription(); probe.OnError(new KafkaException(ErrorCode.Local_Transport)); }
public async Task PlainSink_should_fail_stage_if_broker_unavailable() { var topic1 = CreateTopic(1); await GivenInitializedTopic(topic1); var config = ProducerSettings <Null, string> .Create(Sys, null, new StringSerializer(Encoding.UTF8)) .WithBootstrapServers("localhost:10092"); var probe = Source .From(Enumerable.Range(1, 100)) .Select(c => c.ToString()) .Select(elem => new MessageAndMeta <Null, string> { Topic = topic1, Message = new Message <Null, string> { Value = elem } }) .Via(KafkaProducer.PlainFlow(config)) .RunWith(this.SinkProbe <DeliveryReport <Null, string> >(), _materializer); probe.ExpectSubscription(); probe.OnError(new KafkaException(ErrorCode.Local_Transport)); }
static async Task Main(string[] args) { Config fallbackConfig = ConfigurationFactory.ParseString(@" akka.suppress-json-serializer-warning=true akka.loglevel = DEBUG ").WithFallback(ConfigurationFactory.FromResource <ConsumerSettings <object, object> >("Akka.Streams.Kafka.reference.conf")); var system = ActorSystem.Create("TestKafka", fallbackConfig); var materializer = system.Materializer(); var producerSettings = ProducerSettings <Null, string> .Create(system, null, null) .WithBootstrapServers($"{EventHubNamespace}.servicebus.windows.net:9093") .WithProperties(new Dictionary <string, string> { { "security.protocol", "SASL_SSL" }, { "sasl.mechanism", "PLAIN" }, { "sasl.username", "$ConnectionString" }, { "sasl.password", EventHubConnectionString }, }); await Source.From(Enumerable.Range(1, 100)) .Select(c => c.ToString()) .Select(elem => ProducerMessage.Single(new ProducerRecord <Null, string>(EventHubName, elem))) .Via(KafkaProducer.FlexiFlow <Null, string, NotUsed>(producerSettings)) .Select(result => { var response = result as Result <Null, string, NotUsed>; Console.WriteLine($"Producer: {response.Metadata.Topic}/{response.Metadata.Partition} {response.Metadata.Offset}: {response.Metadata.Value}"); return(result); }) .RunWith(Sink.Ignore <IResults <Null, string, NotUsed> >(), materializer); Console.ReadKey(); await system.Terminate(); }
protected ProducerSettings <TKey, TValue> BuildProducerSettings <TKey, TValue>() { return(ProducerSettings <TKey, TValue> .Create(Sys, null, null).WithBootstrapServers(_fixture.KafkaServer)); }
public async Task Directive_Restart_on_failed_Consumer_should_restart_Consumer() { var topic = CreateTopic(1); var group = CreateGroup(1); var topicPartition = new TopicPartition(topic, 0); var serializationCallCount = 0; var callCount = 0; Directive Decider(Exception cause) { callCount++; if (cause is ConsumeException ce && ce.Error.IsSerializationError()) { serializationCallCount++; return(Directive.Restart); } return(Directive.Stop); } var serializer = new Serializer <int>(BitConverter.GetBytes); var producerSettings = ProducerSettings <Null, int> .Create(Sys, null, serializer) .WithBootstrapServers(Fixture.KafkaServer); await Source.From(Enumerable.Range(1, 10)) .Select(elem => new ProducerRecord <Null, int>(topicPartition, elem)) .RunWith(KafkaProducer.PlainSink(producerSettings), Materializer); // Exception is injected once using the FailOnceDeserializer var deserializer = new FailOnceDeserializer <int>(5, data => BitConverter.ToInt32(data.Span)); var consumerSettings = ConsumerSettings <Null, int> .Create(Sys, null, deserializer) .WithBootstrapServers(Fixture.KafkaServer) .WithStopTimeout(TimeSpan.FromSeconds(1)) .WithProperty("auto.offset.reset", "earliest") .WithGroupId(group); var(_, probe) = KafkaConsumer .PlainSource(consumerSettings, Subscriptions.Assignment(topicPartition)) .WithAttributes(ActorAttributes.CreateSupervisionStrategy(Decider)) .Select(c => c.Message.Value) .ToMaterialized(this.SinkProbe <int>(), Keep.Both) .Run(Materializer); probe.Request(20); var pulled = new List <int>(); for (var i = 0; i < 14; i++) { var msg = probe.ExpectNext(); pulled.Add(msg); } probe.ExpectNoMsg(TimeSpan.FromSeconds(2)); probe.Cancel(); pulled.Should().BeEquivalentTo(new[] { 1, 2, 3, 4, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }, opt => opt.WithStrictOrdering()); // Decider should be called twice, because deciders are called in BaseSingleSourceLogic and KafkaConsumerActor callCount.Should().Be(2); serializationCallCount.Should().Be(2); }
public static async Task <int> Main(string[] args) { // Setup await SetupKafkaAsync(); await SetupAkkaAsync(); List <CpuUsage> usageBeforeLoad; List <CpuUsage> usageAfterLoad; try { _log = Logging.GetLogger(ConsumerSystem, nameof(Program)); // Create topic on Kafka server var builder = new AdminClientBuilder(new AdminClientConfig { BootstrapServers = Benchmark.Docker.KafkaAddress }); using (var client = builder.Build()) { await client.CreateTopicsAsync(new[] { new TopicSpecification { Name = KafkaTopic, NumPartitions = 3, ReplicationFactor = 1 } }); } // Set up consumer var consumerSettings = ConsumerSettings <string, string> .Create(ConsumerSystem, null, null) .WithBootstrapServers(Benchmark.Docker.KafkaAddress) .WithStopTimeout(TimeSpan.FromSeconds(1)) .WithProperty("auto.offset.reset", "earliest") .WithGroupId(KafkaGroup); var control = KafkaConsumer.PlainPartitionedSource(consumerSettings, Subscriptions.Topics(KafkaTopic)) .GroupBy(3, tuple => tuple.Item1) .SelectAsync(8, async tuple => { var(topicPartition, source) = tuple; _log.Info($"Sub-source for {topicPartition}"); var sourceMessages = await source .Scan(0, (i, message) => i + 1) .Select(i => { ReceivedMessage.IncrementAndGet(); return(LogReceivedMessages(topicPartition, i)); }) .RunWith(Sink.Last <long>(), ConsumerSystem.Materializer()); _log.Info($"{topicPartition}: Received {sourceMessages} messages in total"); return(sourceMessages); }) .MergeSubstreams() .AsInstanceOf <Source <long, IControl> >() .Scan(0L, (i, subValue) => i + subValue) .ToMaterialized(Sink.Last <long>(), Keep.Both) .MapMaterializedValue(tuple => DrainingControl <long> .Create(tuple.Item1, tuple.Item2)) .Run(ConsumerSystem.Materializer()); // Delay before benchmark await Task.Delay(TimeSpan.FromSeconds(DefaultDelay)); // Warmup await CollectSamplesAsync(DefaultWarmUpRepeat, DefaultSampleDuration, "[Warmup]"); // Collect CPU usage before load usageBeforeLoad = await CollectSamplesAsync(DefaultRepeat, DefaultSampleDuration, "[CPU Usage Before Load]"); // Create load var producerSettings = ProducerSettings <string, string> .Create(ConsumerSystem, null, null) .WithBootstrapServers(Benchmark.Docker.KafkaAddress); await Source .From(Enumerable.Range(1, DefaultMessageCount)) .Select(elem => new ProducerRecord <string, string>(KafkaTopic, "key", elem.ToString())) .RunWith(KafkaProducer.PlainSink(producerSettings), ConsumerSystem.Materializer()); // Wait until consumer consumed all messages var stopwatch = Stopwatch.StartNew(); while (stopwatch.Elapsed.TotalSeconds < DefaultTimeout && ReceivedMessage.Current < DefaultMessageCount) { await Task.Delay(100); } stopwatch.Stop(); if (stopwatch.Elapsed.TotalSeconds > DefaultTimeout) { throw new Exception($"Timed out while waiting consumer to process {DefaultMessageCount} messages"); } // Delay before benchmark await Task.Delay(TimeSpan.FromSeconds(DefaultDelay)); // Collect CPU usage after load usageAfterLoad = await CollectSamplesAsync(DefaultRepeat, DefaultSampleDuration, "[CPU Usage After Load]"); } finally { // Tear down await TearDownAkkaAsync(); await TearDownKafkaAsync(); } Console.WriteLine("CPU Benchmark complete."); await GenerateReportAsync(usageBeforeLoad, "BeforeLoad", DefaultSampleDuration, DefaultRepeat); await GenerateReportAsync(usageAfterLoad, "AfterLoad", DefaultSampleDuration, DefaultRepeat); return(0); }