static void Main(string[] args) { var options = new KafkaOptions(new Uri("http://kafka1:9092"), new Uri("http://kafka2:9092")) { Log = new ConsoleLog() }; var router = new BrokerRouter(options); var client = new Producer(router); var timing = new RollingQueue<double>(50); var rate = new RollingQueue<double>(50); var second = DateTime.Now.Second; var count = 0; Task.Run(() => { var consumer = new Consumer(new ConsumerOptions("latencies", router)); var position = consumer.GetTopicOffsetAsync("latencies"); position.Wait(); consumer.SetOffsetPosition( position.Result .Select(p => new OffsetPosition(p.PartitionId, p.Offsets.First())) .ToArray() ); foreach (var data in consumer.Consume()) { count++; var rtt = (DateTime.Now - new DateTime( long.Parse(Encoding.UTF8.GetString(data.Value)) )).TotalMilliseconds; if (rtt < 1000) timing.Enqueue(rtt); if (second != DateTime.Now.Second) { second = DateTime.Now.Second; rate.Enqueue(count); count = 0; Console.WriteLine("Rate: {0} pps.\t{1} ", rate.Average().ToString("N2") , (rtt < 1000) ? "RTT: " + timing.Average().ToString("N2") + " ms." : string.Empty ); } } }); while (true) { client.SendMessageAsync("latencies", new[] { new Message(DateTime.Now.Ticks.ToString()), new Message(DateTime.Now.Ticks.ToString()), new Message(DateTime.Now.Ticks.ToString()), new Message(DateTime.Now.Ticks.ToString()), new Message(DateTime.Now.Ticks.ToString()), new Message(DateTime.Now.Ticks.ToString()), new Message(DateTime.Now.Ticks.ToString()), new Message(DateTime.Now.Ticks.ToString()), new Message(DateTime.Now.Ticks.ToString()), new Message(DateTime.Now.Ticks.ToString()) }, 1); Thread.Sleep(1); } client.Dispose(); router.Dispose(); }
// Per partition consumer read handler private void ProcessPartitionMessages(object state) { int partition = (int)state; try { Dictionary<uint, MeasurementKey> idTable = new Dictionary<uint, MeasurementKey>(); ConsumerOptions options = new ConsumerOptions(Topic, m_router); LongSynchronizedOperation cacheLastConsumerOffset = null; OffsetPosition consumerCursor = new OffsetPosition { PartitionId = partition, Offset = 0 }; long lastUpdateTime = 0; long lastMetadataUpdateCount = 0; long lastMeasurementTime = 0; options.PartitionWhitelist.Add(partition); options.Log = new TimeSeriesLogger((message, parameters) => OnStatusMessage(MessageLevel.Info, string.Format($"P[{partition}]: " + message, parameters)), ex => OnProcessException(MessageLevel.Warning, ex)); // Handle consumer offset tracking, i.e., adapter will start reading messages where it left off from last run if (TrackConsumerOffset) { // Parse path/filename.ext into constituent parts string[] fileParts = new string[3]; fileParts[0] = FilePath.GetDirectoryName(ConsumerOffsetFileName); // 0: path/ fileParts[1] = FilePath.GetFileNameWithoutExtension(ConsumerOffsetFileName); // 1: filename fileParts[2] = FilePath.GetExtension(ConsumerOffsetFileName); // 2: .ext // Include partition index as part of consumer offset cache file name string fileName = $"{fileParts[0]}{fileParts[1]}-P{partition}{fileParts[2]}"; if (File.Exists(fileName)) { try { // Read last consumer offset consumerCursor.Offset = long.Parse(File.ReadAllText(fileName)); } catch (Exception ex) { OnProcessException(MessageLevel.Warning, new InvalidOperationException($"Failed to read last consumer offset from \"{fileName}\": {ex.Message}", ex)); } } cacheLastConsumerOffset = new LongSynchronizedOperation(() => { // Do not write file any more often than defined consumer offset cache interval int restTime = (int)(Ticks.FromSeconds(ConsumerOffsetCacheInterval) - (DateTime.UtcNow.Ticks - lastUpdateTime)).ToMilliseconds(); if (restTime > 0) Thread.Sleep(restTime); lastUpdateTime = DateTime.UtcNow.Ticks; // Write current consumer offset File.WriteAllText(fileName, consumerCursor.Offset.ToString()); }, ex => OnProcessException(MessageLevel.Warning, new InvalidOperationException($"Failed to cache current consumer offset to \"{fileName}\": {ex.Message}", ex))) { IsBackground = true }; } using (Consumer consumer = new Consumer(options, new OffsetPosition(partition, consumerCursor.Offset))) { lock (m_consumers) m_consumers.Add(new WeakReference<Consumer>(consumer)); foreach (Message message in consumer.Consume()) { if ((object)m_metadata == null) continue; uint id; byte metadataVersion; IMeasurement measurement = message.KafkaDeserialize(out id, out metadataVersion); // Kick-off a refresh for new metadata if message version numbers change if (m_lastMetadataVersion != metadataVersion) { m_lastMetadataVersion = metadataVersion; m_updateMetadata.RunOnceAsync(); } // Clear all undefined items in dictionary when metadata gets updated if (lastMetadataUpdateCount < m_metadataUpdateCount) { lastMetadataUpdateCount = m_metadataUpdateCount; foreach (uint undefinedID in idTable.Where(item => item.Value.SignalID == Guid.Empty).Select(item => item.Key).ToArray()) idTable.Remove(undefinedID); } // Get associated measurement key, or look it up in metadata table measurement.Metadata = idTable.GetOrAdd(id, lookupID => MeasurementKey.LookUpBySignalID(m_metadata?.Records?.FirstOrDefault(record => record.ID == lookupID)?.ParseSignalID() ?? Guid.Empty)).Metadata; // Only publish measurements with associated metadata and are assigned to this adapter if (measurement.Key != MeasurementKey.Undefined && ((object)m_outputMeasurementKeys == null || m_outputMeasurementKeys.Contains(measurement.Key))) OnNewMeasurements(new[] { measurement }); // Cache last consumer offset consumerCursor.Offset = message.Offset; if ((object)cacheLastConsumerOffset != null) cacheLastConsumerOffset.RunOnceAsync(); if (ReadDelay > -1) { // As a group of measurements transition from timestamp to another, inject configured read delay if (lastMeasurementTime != measurement.Timestamp) Thread.Sleep(ReadDelay); lastMeasurementTime = measurement.Timestamp; } } } } catch (Exception ex) { OnProcessException(MessageLevel.Warning, new InvalidOperationException($"Exception while reading Kafka messages for topic \"{Topic}\" P[{partition}]: {ex.Message}", ex)); } }