public void Run() { var cConfig = new ConsumerConfig { BootstrapServers = bootstrapServers, GroupId = Guid.NewGuid().ToString(), AutoOffsetReset = AutoOffsetReset.Earliest, IsolationLevel = IsolationLevel.ReadCommitted, }; var lasts = new Dictionary <int, int>(); IConsumer <int, int> consumer = null; try { consumer = new ConsumerBuilder <int, int>(cConfig).Build(); consumer.Subscribe(conf.Topic); while (true) { var cr = consumer.Consume(); if (!lasts.ContainsKey(cr.Message.Key)) { lasts.Add(cr.Message.Key, -1); } if (cr.Message.Value == lasts[cr.Message.Key] + 1) { Console.Write("."); } else { Console.Write($"[producer {cr.Message.Key} expected seq {lasts[cr.Message.Key]+1} but got {cr.Message.Value}]"); break; } Console.Out.Flush(); lasts[cr.Message.Key] = cr.Message.Value; } } catch (Exception e) { Console.WriteLine(e.ToString()); } finally { if (consumer != null) { consumer.Close(); consumer.Dispose(); } Console.WriteLine("Consume loop exited..."); } }
public void Consumer_ClosedHandle(string bootstrapServers) { LogToFile("start Consumer_ClosedHandle"); var consumerConfig = new ConsumerConfig { GroupId = Guid.NewGuid().ToString(), BootstrapServers = bootstrapServers }; var consumer = new ConsumerBuilder <Null, Null>(consumerConfig).Build(); consumer.Consume(TimeSpan.FromSeconds(10)); consumer.Dispose(); Assert.Throws <ObjectDisposedException>(() => consumer.Consume(TimeSpan.FromSeconds(10))); Assert.Equal(0, Library.HandleCount); LogToFile("end Consumer_ClosedHandle"); }
public IEnumerable <object> GetKafkaObjets() { IConsumer <byte[], string> consumer = null; try { try { //creating the consumer consumer = new ConsumerBuilder <byte[], string>(_consumerConfig).SetKeyDeserializer(Deserializers.ByteArray).Build(); //subscribing to the topic from the jobData consumer.Subscribe(this._kafkaCrawlJobData.KafkaTopic); } catch (Exception ex) { this.log.Error(() => $"Kafka Crawler - Could not create Consumer. Exception: {ex.Message}"); if (consumer != null) { consumer.Dispose(); } yield break; } //track the time spent in the loop var stopWatch = new Stopwatch(); stopWatch.Start(); var attempt = 1; while (!_cts.IsCancellationRequested) { ConsumeResult <byte[], string> cr = null; try { cr = consumer.Consume(new TimeSpan(0)); attempt = 1; } catch (Exception ex) { this.log.Error(() => $"Kafka Crawler - Could not consume. Exception: {ex.Message}. Attempt {attempt}."); attempt++; if (attempt > 3) { this.log.Error(() => $"Kafka Crawler - Could not consume. Exception: {ex.Message}. Ending loop."); _cts.Cancel(); } else { Thread.Sleep(1000); } continue; } if (cr?.Message != null) { stopWatch.Restart(); //if we found a message, we restart the timer to 0 var resource = this.GetTypedObject(cr); if (resource == null) { continue; } yield return(resource); } else if (stopWatch.Elapsed.TotalMinutes >= this._kafkaCrawlJobData.KafkaDummyClueGenerationInterval) //in order to prevent the job from being shut down, we create a dummy clue and then we restart the timer { stopWatch.Restart(); this.log.Info(() => $"Kafka Crawler - Creating dummy object after receiving 0 messages for 5 minutes."); yield return(new Contact { AccountId = $"Dummy AccountId", AccountIdName = $"Dummy AccountIdName", AccountIdYomiName = $"Dummy AccountIdName", AccountRoleCode = $"Dummy AccountIdName", AccountRoleCodeName = $"Dummy AccountIdName", ContactId = $"Dummy ContactId", Description = $"Dummy Description", FullName = $"Dummy FullName", NickName = $"Dummy NickName", }); } } consumer.Dispose(); } finally { if (consumer != null) { consumer.Dispose(); } } }
public void Start() { var logContext = $"{nameof(Start)} for SubscriberId={_subscriberId}"; try { IConsumer <string, string> consumer = new ConsumerBuilder <string, string>(_consumerProperties).Build(); var processor = new KafkaMessageProcessor(_subscriberId, _handler, _loggerFactory.CreateLogger <KafkaMessageProcessor>()); using (IAdminClient adminClient = new DependentAdminClientBuilder(consumer.Handle).Build()) { foreach (string topic in _topics) { VerifyTopicExistsBeforeSubscribing(adminClient, topic); } } List <string> topicsList = new List <string>(_topics); _logger.LogDebug($"{logContext}: Subscribing to topics='{String.Join(",", topicsList)}'"); consumer.Subscribe(topicsList); // Set state to started before starting the processing thread instead of after as in the Java code // (prevent setting it to it started after it has potentially already been set to stopped) _state = EventuateKafkaConsumerState.Started; Task.Run(() => { try { while (!_cancellationTokenSource.IsCancellationRequested) { try { ConsumeResult <string, string> record = consumer.Consume(TimeSpan.FromMilliseconds(ConsumePollMilliseconds)); if (record != null) { _logger.LogDebug( $"{logContext}: process record at offset='{record.Offset}', key='{record.Key}', value='{record.Value}'"); processor.Process(record); } else { processor.ThrowExceptionIfHandlerFailed(); } MaybeCommitOffsets(consumer, processor); } catch (ConsumeException e) { _logger.LogError($"{logContext}: ConsumeException - {e.Error}. Continuing."); } } _state = EventuateKafkaConsumerState.Stopped; } catch (TaskCanceledException) { _logger.LogInformation($"{logContext}: Shutdown by cancel"); _state = EventuateKafkaConsumerState.Stopped; } catch (KafkaMessageProcessorFailedException e) { _logger.LogError($"{logContext}: Terminating due to KafkaMessageProcessorFailedException - {e}"); _state = EventuateKafkaConsumerState.MessageHandlingFailed; } catch (Exception e) { _logger.LogError($"{logContext}: Exception - {e}"); _state = EventuateKafkaConsumerState.Failed; // Java throws here, but seems like it isn't necessary } finally { // Try to put the last of the offsets away. Note that the // callbacks are done asynchronously so there is no guarantee // that all the offsets are ready. Worst case is that there // are messages processed more than once. MaybeCommitOffsets(consumer, processor); consumer.Dispose(); _logger.LogDebug($"{logContext}: Stopped in state {_state.ToString()}"); } }, _cancellationTokenSource.Token); } catch (Exception e) { _logger.LogError(e, $"{logContext}: Error subscribing"); _state = EventuateKafkaConsumerState.FailedToStart; throw; } }
private void ConsumeMessagesAndSendToWorkers() { var conf = new ConsumerConfig { GroupId = this.Topic, BootstrapServers = this.BootstrapServers, EnableAutoCommit = true, StatisticsIntervalMs = 5000, SessionTimeoutMs = 6000, AutoOffsetReset = AutoOffsetReset.Earliest, EnablePartitionEof = true }; using var consumer = new ConsumerBuilder <string, string>(conf) .SetErrorHandler((_, e) => Console.WriteLine($"Error: {e.Reason}")) .Build(); var partitionList = new List <TopicPartition>() { new TopicPartition(this.Topic, 0) }; consumer.Assign(partitionList); try { while (true) { try { CancellationTokenSource cancellationToken = new CancellationTokenSource(); Console.CancelKeyPress += (_, e) => { e.Cancel = true; cancellationToken.Cancel(); }; var consumeResult = consumer.Consume(cancellationToken.Token); if (consumeResult.IsPartitionEOF) { Console.WriteLine($"Reached end of topic {consumeResult.Topic}, partition {consumeResult.Partition}, offset {consumeResult.Offset}."); break; } else { var message = JsonConvert.DeserializeObject(consumeResult.Message.Value, new JsonSerializerSettings { TypeNameHandling = TypeNameHandling.All }); var provider = ((ActorSystemImpl)_context.System).Provider; var newActorRef = provider.ResolveActorRef(consumeResult.Message.Key); _workerActor.Tell(message, newActorRef); try { consumer.Commit(consumeResult); } catch (KafkaException e) { Console.WriteLine($"Commit error: {e.Error.Reason}"); } } } catch (ConsumeException e) { Console.WriteLine($"Consume error: {e.Error.Reason}"); } } } catch (OperationCanceledException) { Console.WriteLine("Closing consumer."); //consumer.Close(); } finally { consumer.Unassign(); consumer.Close(); consumer.Dispose(); } }
static async Task ConsumerDemoAsync() { ConsumerConfig consumerConfig = new() { // BootstrapServers = "192.168.199.133:9093,192.168.199.133:9094,192.168.199.133:9095", ClientId = Guid.NewGuid().ToString("n"), BootstrapServers = BootstrapServers, GroupId = GroupId, EnableAutoCommit = false, AutoOffsetReset = AutoOffsetReset.Earliest, EnablePartitionEof = false, // PartitionAssignmentStrategy = PartitionAssignmentStrategy.Range, SessionTimeoutMs = 6000, MaxPollIntervalMs = 600000, }; IConsumer <string, string> consumer = new ConsumerBuilder <string, string>(consumerConfig) .SetErrorHandler((msg, e) => { Console.WriteLine($"Error: {e.Reason}"); }) .SetPartitionsAssignedHandler((c, partitions) => { Console.WriteLine($"Assigned partitions: [{string.Join(", ", partitions)}]"); }) .SetPartitionsRevokedHandler((c, partitions) => { Console.WriteLine($"Revoking assignment: [{string.Join(", ", partitions)}]"); }).Build(); consumer.Subscribe(Topic); await Task.Factory.StartNew(() => { try { Random random = new(Environment.TickCount); while (true) { var consumeResult = consumer.Consume(); Console.WriteLine($"Consumer::{consumeResult.Message?.Key}::{consumeResult.Message?.Value}::{consumeResult.Partition.Value}::{consumeResult.Offset.Value}::{Thread.CurrentThread.ManagedThreadId}"); // Thread.Sleep(1000); if (random.Next(0, 2) == 0) { consumer.Commit(consumeResult); Console.WriteLine($"Commit: {consumeResult.Message?.Key}"); } else { Console.WriteLine($"Uncommit: {consumeResult.Message?.Key}"); } } } catch (System.Exception ex) { Console.WriteLine(ex); } finally { consumer.Close(); consumer.Dispose(); } }); }
public void SubScribe() { _cancelConsumer = new CancellationTokenSource(); _consumerCnt = 0; var brokerList = Endpoint; var config = new ConsumerConfig { BootstrapServers = brokerList, GroupId = _groupId, ApiVersionRequest = true, EnableAutoCommit = true, }; if (Debug) { config.Debug = "msg,broker,topic,protocol"; } var consumer = new ConsumerBuilder <string, string>(config) .SetLogHandler((_, msg) => { Logger.Log(MapLogLevel(msg.Level), msg.Message); }) .SetPartitionsAssignedHandler((c, partitions) => { var parFiltered = partitions.Where(_ => _.Topic == Topic).ToList(); var ret = new List <TopicPartitionOffset>(); if (!parFiltered.Any()) { _messageLogs.Add($"{DateTime.Now:yyyy-MM-dd HH:mm:ss} WARNING!!! empty partition filtered?\n\n"); } else if (!_firstAssigned) { ret = parFiltered.Select(p => new TopicPartitionOffset(p.Topic, p.Partition, Confluent.Kafka.Offset.Unset)).ToList(); } else if (CurOffsetType == "Beginning") { ret = parFiltered.Select(p => new TopicPartitionOffset(p.Topic, p.Partition, Confluent.Kafka.Offset.Beginning)).ToList();; } else if (CurOffsetType == "End") { ret = parFiltered.Select(p => new TopicPartitionOffset(p.Topic, p.Partition, Confluent.Kafka.Offset.End)).ToList();; } else { ret = parFiltered.Select(p => new TopicPartitionOffset(p.Topic, p.Partition, Confluent.Kafka.Offset.Unset)).ToList(); } var assignedInfo = ""; ret.ForEach(offset => { assignedInfo += $"(Topic: {offset.Topic}, Partition: {offset.Partition}, Offset: {offset.Offset})"; }); _messageLogs.Add($"{DateTime.Now:yyyy-MM-dd HH:mm:ss} partitions assigned. [{assignedInfo}]\n\n"); RefreshMessageLog(); _firstAssigned = false; return(ret); }).Build(); consumer.Subscribe(Topic); _messageLogs.Add($"{DateTime.Now:yyyy-MM-dd HH:mm:ss} subscribe done.\n\n"); RefreshMessageLog(); _pollTask = Task.Run(() => { try { while (true) { try { var result = consumer.Consume(_cancelConsumer.Token); if (result.IsPartitionEOF) { continue; } OnMessage(result); } catch (ConsumeException e) { MessageBox.Show($"Consume error: {e.Error.Reason}"); } } } catch (OperationCanceledException) { consumer.Close(); } consumer.Dispose(); }); NotSubscribe = false; }
public async Task ListeningThread(string host, Func <IEvent, Task> handlerAsync) { canceller = new CancellationTokenSource(); retry: IConsumer <string, byte[]> consumer = null; try { await KafkaCommon.AssureTopic(host, topic); var consumerConfig = new ConsumerConfig(); consumerConfig.BootstrapServers = host; consumerConfig.GroupId = Guid.NewGuid().ToString(); consumerConfig.EnableAutoCommit = false; consumer = new ConsumerBuilder <string, byte[]>(consumerConfig).Build(); consumer.Subscribe(topic); for (; ;) { try { if (canceller.Token.IsCancellationRequested) { break; } var consumerResult = consumer.Consume(canceller.Token); consumer.Commit(consumerResult); if (consumerResult.Message.Key == KafkaCommon.MessageKey) { var stopwatch = new Stopwatch(); stopwatch.Start(); byte[] body = consumerResult.Message.Value; if (encryptionKey != null) { body = SymmetricEncryptor.Decrypt(encryptionAlgorithm, encryptionKey, body); } var message = KafkaCommon.Deserialize <KafkaEventMessage>(body); if (message.Claims != null) { var claimsIdentity = new ClaimsIdentity(message.Claims.Select(x => new Claim(x[0], x[1])), "CQRS"); Thread.CurrentPrincipal = new ClaimsPrincipal(claimsIdentity); } await handlerAsync(message.Message); stopwatch.Stop(); _ = Log.TraceAsync($"Received Await: {topic} {stopwatch.ElapsedMilliseconds}"); } else { _ = Log.ErrorAsync($"{nameof(KafkaServer)} unrecognized message key {consumerResult.Message.Key}"); } } catch (TaskCanceledException) { break; } catch (Exception ex) { _ = Log.TraceAsync($"Error: Received Await: {topic}"); _ = Log.ErrorAsync(ex); } } consumer.Unsubscribe(); } catch (Exception ex) { _ = Log.ErrorAsync(ex); if (consumer != null) { consumer.Dispose(); } consumer = null; if (!canceller.IsCancellationRequested) { await Task.Delay(retryDelay); goto retry; } } canceller.Dispose(); canceller = null; if (consumer != null) { consumer.Dispose(); } IsOpen = false; }
private async Task ListeningThread(string host, Func <ICommand, Task> handlerAsync, Func <ICommand, Task> handlerAwaitAsync) { canceller = new CancellationTokenSource(); retry: IConsumer <string, byte[]> consumer = null; try { await KafkaCommon.AssureTopic(host, topic); var consumerConfig = new ConsumerConfig(); consumerConfig.BootstrapServers = host; consumerConfig.GroupId = topic; consumerConfig.EnableAutoCommit = false; consumer = new ConsumerBuilder <string, byte[]>(consumerConfig).Build(); consumer.Subscribe(topic); for (; ;) { Exception error = null; bool awaitResponse = false; string ackTopic = null; string ackKey = null; try { if (canceller.Token.IsCancellationRequested) { break; } var consumerResult = consumer.Consume(canceller.Token); consumer.Commit(consumerResult); awaitResponse = consumerResult.Message.Key == KafkaCommon.MessageWithAckKey; if (awaitResponse) { ackTopic = Encoding.UTF8.GetString(consumerResult.Message.Headers.GetLastBytes(KafkaCommon.AckTopicHeader)); ackKey = Encoding.UTF8.GetString(consumerResult.Message.Headers.GetLastBytes(KafkaCommon.AckKeyHeader)); } if (consumerResult.Message.Key == KafkaCommon.MessageKey || awaitResponse) { var stopwatch = new Stopwatch(); stopwatch.Start(); byte[] body = consumerResult.Message.Value; if (encryptionKey != null) { body = SymmetricEncryptor.Decrypt(encryptionAlgorithm, encryptionKey, body); } var message = KafkaCommon.Deserialize <KafkaCommandMessage>(body); if (message.Claims != null) { var claimsIdentity = new ClaimsIdentity(message.Claims.Select(x => new Claim(x[0], x[1])), "CQRS"); Thread.CurrentPrincipal = new ClaimsPrincipal(claimsIdentity); } if (awaitResponse) { await handlerAwaitAsync(message.Message); } else { await handlerAsync(message.Message); } stopwatch.Stop(); _ = Log.TraceAsync($"Received Await: {topic} {stopwatch.ElapsedMilliseconds}"); } else { _ = Log.ErrorAsync($"{nameof(KafkaServer)} unrecognized message key {consumerResult.Message.Key}"); } } catch (TaskCanceledException) { break; } catch (Exception ex) { _ = Log.TraceAsync($"Error: Received Await: {topic}"); _ = Log.ErrorAsync(ex); error = ex; } if (awaitResponse) { IProducer <string, byte[]> producer = null; try { var producerConfig = new ProducerConfig(); producerConfig.BootstrapServers = host; producerConfig.ClientId = clientID; producer = new ProducerBuilder <string, byte[]>(producerConfig).Build(); var ack = new Acknowledgement() { Success = error == null, ErrorMessage = error?.Message }; var body = KafkaCommon.Serialize(ack); if (encryptionKey != null) { body = SymmetricEncryptor.Encrypt(encryptionAlgorithm, encryptionKey, body); } await producer.ProduceAsync(ackTopic, new Message <string, byte[]>() { Key = ackKey, Value = body }); } catch (Exception ex) { _ = Log.ErrorAsync(ex); } finally { if (producer != null) { producer.Dispose(); } } } } consumer.Unsubscribe(); } catch (Exception ex) { _ = Log.ErrorAsync(ex); if (consumer != null) { consumer.Dispose(); } consumer = null; if (!canceller.IsCancellationRequested) { await Task.Delay(retryDelay); goto retry; } } canceller.Dispose(); canceller = null; if (consumer != null) { consumer.Dispose(); } IsOpen = false; }
private async Task AckListeningThread() { await KafkaCommon.AssureTopic(host, ackTopic); var consumerConfig = new ConsumerConfig(); consumerConfig.BootstrapServers = host; consumerConfig.GroupId = ackTopic; consumerConfig.EnableAutoCommit = false; IConsumer <string, byte[]> consumer = null; try { consumer = new ConsumerBuilder <string, byte[]>(consumerConfig).Build(); consumer.Subscribe(ackTopic); for (; ;) { try { if (canceller.Token.IsCancellationRequested) { break; } var consumerResult = consumer.Consume(canceller.Token); consumer.Commit(consumerResult); if (!ackCallbacks.TryRemove(consumerResult.Message.Key, out Action <Acknowledgement> callback)) { continue; } var response = consumerResult.Message.Value; if (encryptionKey != null) { response = SymmetricEncryptor.Decrypt(encryptionAlgorithm, encryptionKey, response); } var ack = KafkaCommon.Deserialize <Acknowledgement>(response); callback(ack); } catch (TaskCanceledException) { break; } catch { } } consumer.Unsubscribe(); await KafkaCommon.DeleteTopic(host, ackTopic); } finally { canceller.Dispose(); if (consumer != null) { consumer.Dispose(); } } }