private static Message ReadMessage(BrokerRouter router, string topic, int messageIndex, int maxBytes = 32768) { Message message = null; long offset = router.LatestOffset(topic) - (2 - messageIndex); List<Message> messages = router.Fetch(topic, 0, offset, maxBytes)?.Messages; if ((messages?.Count ?? 0) > 0) message = messages?[0]; if ((object)message == null) throw new InvalidOperationException("No Kafka record to consume"); return message; }
/// <summary> /// Writes time-series metadata to specified Kafka <paramref name="topic"/>. /// </summary> /// <param name="metadata">Source time-series metadata object to write to Kafka.</param> /// <param name="router">Kafka router connection.</param> /// <param name="topic">Kafka topic.</param> public static void WriteToKafka(TimeSeriesMetadata metadata, BrokerRouter router, string topic) { if ((object)metadata == null) throw new ArgumentNullException(nameof(metadata)); if ((object)router == null) throw new ArgumentNullException(nameof(router)); if (string.IsNullOrWhiteSpace(topic)) throw new ArgumentNullException(nameof(topic)); using (MemoryStream stream = new MemoryStream()) { Serialize(metadata, stream, true); using (Producer producer = new Producer(router)) { Message[] messages = new Message[2]; byte[] timeKey = BitConverter.GetBytes(DateTime.UtcNow.Ticks); // First message used to serialize metadata size (since metadata messages can be large) messages[SizeMessage] = new Message { Key = timeKey, Value = BitConverter.GetBytes(stream.Length) }; // Second message used to serialize metadata value messages[ValueMessage] = new Message { Key = timeKey, Value = stream.ToArray() }; // Send meta-data to Kafka producer.SendMessageAsync(topic, messages).Wait(); } } }
/// <summary> /// Reads latest time-series metadata from specified Kafka <paramref name="topic"/>. /// </summary> /// <param name="router">Kafka router connection.</param> /// <param name="topic">Kafka topic.</param> /// <param name="serializationTime">Serialization time.</param> /// <param name="statusMessage">Status message function.</param> /// <returns>Latest <see cref="TimeSeriesMetadata"/> instance read from Kafka.</returns> public static TimeSeriesMetadata ReadFromKafka(BrokerRouter router, string topic, Action<string> statusMessage, out Ticks serializationTime) { if ((object)router == null) throw new ArgumentNullException(nameof(router)); if (string.IsNullOrWhiteSpace(topic)) throw new ArgumentNullException(nameof(topic)); Message sizeMessage = ReadMessage(router, topic, SizeMessage); serializationTime = BitConverter.ToInt64(sizeMessage.Key, 0); Message valueMessage = ReadMessage(router, topic, ValueMessage, (int)Math.Ceiling(BitConverter.ToInt64(sizeMessage.Value, 0) / 4096.0D) * 4096); if (serializationTime != BitConverter.ToInt64(valueMessage.Key, 0)) statusMessage?.Invoke("WARNING: Timestamp keys for metadata size and value records are mismatched..."); using (MemoryStream stream = new MemoryStream(valueMessage.Value)) return Deserialize(stream); }
/// <summary> /// Attempts to disconnect from data output stream. /// </summary> /// <remarks> /// Derived classes should attempt disconnect from data output stream here. Any exceptions thrown /// by this implementation will be reported to host via <see cref="AdapterBase.ProcessException"/> event. /// </remarks> protected override void AttemptDisconnection() { if ((object)m_producer != null) { m_producer.Dispose(); m_producer = null; } if ((object)m_router != null) { m_router.Dispose(); m_router = null; } }
/// <summary> /// Executes the metadata refresh in a synchronous fashion. /// </summary> protected override void ExecuteMetadataRefresh() { if (!Initialized || !Enabled || !SerializeMetadata) return; try { using (BrokerRouter router = new BrokerRouter(new KafkaOptions(m_servers) { Log = new TimeSeriesLogger ( (status, args) => OnStatusMessage(MessageLevel.Info, string.Format(status, args)), ex => OnProcessException(MessageLevel.Warning, new InvalidOperationException($"[{MetadataTopic}]: {ex.Message}", ex)) ) })) { // Attempt to retrieve last known metadata record from Kafka if ((object)m_metadata == null) { try { Ticks serializationTime; OnStatusMessage(MessageLevel.Info, "Reading latest time-series metadata records from Kafka..."); m_metadata = TimeSeriesMetadata.ReadFromKafka(router, MetadataTopic, status => OnStatusMessage(MessageLevel.Info, status), out serializationTime); OnStatusMessage(MessageLevel.Info, $"Deserialized {m_metadata.Count:N0} Kafka time-series metadata records, version {m_metadata.Version:N0}, from \"{MetadataTopic}\" serialized at {serializationTime.ToString(MetadataRecord.DateTimeFormat)}"); } catch (Exception ex) { OnStatusMessage(MessageLevel.Warning, $"Failed to read any existing Kafka time-series metadata records from topic \"{MetadataTopic}\": {ex.Message}"); } } // Create new meta-data object based on newly loaded configuration TimeSeriesMetadata metadata = new TimeSeriesMetadata(); try { foreach (DataRow row in DataSource.Tables["ActiveMeasurements"].AsEnumerable()) { MeasurementKey key; if (MeasurementKey.TryParse(row.Field<string>("ID") ?? MeasurementKey.Undefined.ToString(), out key)) { metadata.Records.Add(new MetadataRecord { ID = key.ID, Source = key.Source, UniqueID = row.Field<object>("SignalID").ToString(), PointTag = row.Field<string>("PointTag"), Device = row.Field<string>("Device"), Longitude = row.ConvertField("Longitude", 0.0F), Latitude = row.ConvertField("Latitude", 0.0F), Protocol = row.Field<string>("Protocol"), SignalType = row.Field<string>("SignalType"), EngineeringUnits = row.Field<string>("EngineeringUnits"), PhasorType = row.Field<string>("PhasorType"), Phase = row.Field<string>("Phase"), Description = row.Field<string>("Description"), LastUpdate = row.Field<DateTime>("UpdatedOn").ToString(MetadataRecord.DateTimeFormat) }); } } } catch (Exception ex) { OnProcessException(MessageLevel.Warning, new InvalidOperationException($"Failed to serialize current time-series metadata records: {ex.Message}", ex)); } if (metadata.Count > 0) { // See if metadata has not been created yet or is different from last known Kafka record if ((object)m_metadata == null || m_metadata.CalculateChecksum() != metadata.CalculateChecksum()) { // Update local metadata reference m_metadata = metadata; // Send updated metadata to Kafka TimeSeriesMetadata.WriteToKafka(m_metadata, router, MetadataTopic); // Cache metadata locally, if configured m_cacheMetadataLocally?.RunOnceAsync(); m_metadataUpdateCount++; OnStatusMessage(MessageLevel.Info, $"Updated \"{MetadataTopic}\" with {m_metadata.Count:N0} Kafka time-series metadata records..."); } else { OnStatusMessage(MessageLevel.Info, $"Latest \"{MetadataTopic}\" is up to date with current time-series metadata records..."); } } else { OnStatusMessage(MessageLevel.Warning, "No available local time-series metadata available to serialize..."); } } } catch (Exception ex) { OnProcessException(MessageLevel.Warning, new InvalidOperationException($"Failed to update \"{MetadataTopic}\" with current time-series metadata records: {ex.Message}", ex)); } }
/// <summary> /// Attempts to connect to data output stream. /// </summary> /// <remarks> /// Derived classes should attempt connection to data output stream here. Any exceptions thrown /// by this implementation will result in restart of the connection cycle. /// </remarks> protected override void AttemptConnection() { m_router = new BrokerRouter(new KafkaOptions(m_servers) { Log = new TimeSeriesLogger ( (status, args) => OnStatusMessage(MessageLevel.Info, string.Format(status, args)), ex => OnProcessException(MessageLevel.Warning, ex) ) }); m_producer = new Producer(m_router); MetadataRefreshOperation.RunOnceAsync(); }
static void Main(string[] args) { var options = new KafkaOptions(new Uri("http://kafka1:9092"), new Uri("http://kafka2:9092")) { Log = new ConsoleLog() }; var router = new BrokerRouter(options); var client = new Producer(router); var timing = new RollingQueue<double>(50); var rate = new RollingQueue<double>(50); var second = DateTime.Now.Second; var count = 0; Task.Run(() => { var consumer = new Consumer(new ConsumerOptions("latencies", router)); var position = consumer.GetTopicOffsetAsync("latencies"); position.Wait(); consumer.SetOffsetPosition( position.Result .Select(p => new OffsetPosition(p.PartitionId, p.Offsets.First())) .ToArray() ); foreach (var data in consumer.Consume()) { count++; var rtt = (DateTime.Now - new DateTime( long.Parse(Encoding.UTF8.GetString(data.Value)) )).TotalMilliseconds; if (rtt < 1000) timing.Enqueue(rtt); if (second != DateTime.Now.Second) { second = DateTime.Now.Second; rate.Enqueue(count); count = 0; Console.WriteLine("Rate: {0} pps.\t{1} ", rate.Average().ToString("N2") , (rtt < 1000) ? "RTT: " + timing.Average().ToString("N2") + " ms." : string.Empty ); } } }); while (true) { client.SendMessageAsync("latencies", new[] { new Message(DateTime.Now.Ticks.ToString()), new Message(DateTime.Now.Ticks.ToString()), new Message(DateTime.Now.Ticks.ToString()), new Message(DateTime.Now.Ticks.ToString()), new Message(DateTime.Now.Ticks.ToString()), new Message(DateTime.Now.Ticks.ToString()), new Message(DateTime.Now.Ticks.ToString()), new Message(DateTime.Now.Ticks.ToString()), new Message(DateTime.Now.Ticks.ToString()), new Message(DateTime.Now.Ticks.ToString()) }, 1); Thread.Sleep(1); } client.Dispose(); router.Dispose(); }
// Update metadata from latest Kafka records private void UpdateMetadata() { // Attempt to retrieve last known metadata record from Kafka try { using (BrokerRouter router = new BrokerRouter(new KafkaOptions(m_servers) { Log = new TimeSeriesLogger( (status, args) => OnStatusMessage(MessageLevel.Info, string.Format(status, args)), ex => OnProcessException(MessageLevel.Warning, new InvalidOperationException($"[{MetadataTopic}]: {ex.Message}", ex))) })) { Ticks serializationTime; OnStatusMessage(MessageLevel.Info, "Reading latest time-series metadata records from Kafka..."); TimeSeriesMetadata metadata = TimeSeriesMetadata.ReadFromKafka(router, MetadataTopic, status => OnStatusMessage(MessageLevel.Info, status), out serializationTime); if ((object)metadata != null) { m_metadata = metadata; OnStatusMessage(MessageLevel.Info, $"Deserialized {m_metadata.Count:N0} Kafka time-series metadata records, version {m_metadata.Version:N0}, from \"{MetadataTopic}\" serialized at {serializationTime.ToString(MetadataRecord.DateTimeFormat)}"); if (m_lastMetadataVersion != MetadataVersion) { m_lastMetadataVersion = MetadataVersion; m_metadataUpdateCount++; } // Cache metadata locally, if configured m_cacheMetadataLocally?.RunOnceAsync(); } } } catch (Exception ex) { if (Enabled) { // Treat exception as a warning if metadata already exists if ((object)m_metadata == null) throw; OnStatusMessage(MessageLevel.Warning, $"Failed to read latest Kafka time-series metadata records from topic \"{MetadataTopic}\": {ex.Message}"); } } }
/// <summary> /// Attempts to disconnect from data output stream. /// </summary> /// <remarks> /// Derived classes should attempt disconnect from data output stream here. Any exceptions thrown /// by this implementation will be reported to host via <see cref="AdapterBase.ProcessException"/> event. /// </remarks> protected override void AttemptDisconnection() { if ((object)m_processingThreads != null) { foreach (Thread processingThread in m_processingThreads) processingThread.Abort(); m_processingThreads = null; } if ((object)m_router != null) { m_router.Dispose(); m_router = null; } lock (m_consumers) m_consumers.Clear(); }
/// <summary> /// Attempts to connect to data output stream. /// </summary> /// <remarks> /// Derived classes should attempt connection to data output stream here. Any exceptions thrown /// by this implementation will result in restart of the connection cycle. /// </remarks> protected override void AttemptConnection() { m_router = new BrokerRouter(new KafkaOptions(m_servers) { Log = new TimeSeriesLogger ( (status, args) => OnStatusMessage(MessageLevel.Info, string.Format(status, args)), ex => OnProcessException(MessageLevel.Warning, ex) ) }); m_processingThreads = new Thread[Partitions]; for (int partition = 0; partition < m_processingThreads.Length; partition++) { m_processingThreads[partition] = new Thread(ProcessPartitionMessages) { IsBackground = true }; m_processingThreads[partition].Start(partition); } // Kick off process to update metadata m_updateMetadata.RunOnceAsync(); }