/// <summary> /// Main method for console app. /// </summary> /// <param name="args">No arguments used.</param> public static void Main(string[] args) { Console.WriteLine("Starting .net consumer."); var consumerConfig = new ConsumerConfig { BootstrapServers = "kafka:9092", GroupId = "csharp-consumer-prev", AutoOffsetReset = AutoOffsetReset.Earliest, PluginLibraryPaths = "monitoring-interceptor", }; using (var consumer = new ConsumerBuilder <string, string>(consumerConfig) .SetPartitionsAssignedHandler((c, partitions) => { var timestamp = new Confluent.Kafka.Timestamp(DateTime.Now.AddMinutes(-5)); var timestamps = partitions.Select(tp => new TopicPartitionTimestamp(tp, timestamp)); var offsets = c.OffsetsForTimes(timestamps, TimeSpan.FromMinutes(1)); foreach (var offset in offsets) { Console.WriteLine($"Moving partion {offset.Partition.Value} to {offset.Offset.Value}"); } return(offsets); }) .Build()) { consumer.Subscribe(KafkaTopic); CancellationTokenSource cts = new CancellationTokenSource(); Console.CancelKeyPress += (_, e) => { e.Cancel = true; // prevent the process from terminating. cts.Cancel(); }; try { int recordCount = 0; while (true) { try { var cr = consumer.Consume(cts.Token); Console.WriteLine($"{cr.Key},{cr.Value}"); recordCount++; if (recordCount >= 100) { break; } } catch (ConsumeException e) { Console.WriteLine($"Error occured: {e.Error.Reason}"); } } } catch (OperationCanceledException) { // Ensure the consumer leaves the group cleanly and final offsets are committed. Console.WriteLine("Closing consumer."); consumer.Close(); } } }
private ConsumeResult <K, V> ConsumeImpl <K, V>( int millisecondsTimeout, IDeserializer <K> keyDeserializer, IDeserializer <V> valueDeserializer) { var msgPtr = kafkaHandle.ConsumerPoll((IntPtr)millisecondsTimeout); if (msgPtr == IntPtr.Zero) { return(null); } try { var msg = Util.Marshal.PtrToStructure <rd_kafka_message>(msgPtr); string topic = null; if (this.enableTopicNameMarshaling) { if (msg.rkt != IntPtr.Zero) { topic = Util.Marshal.PtrToStringUTF8(Librdkafka.topic_name(msg.rkt)); } } if (msg.err == ErrorCode.Local_PartitionEOF) { return(new ConsumeResult <K, V> { TopicPartitionOffset = new TopicPartitionOffset(topic, msg.partition, msg.offset), Message = null, IsPartitionEOF = true }); } long timestampUnix = 0; IntPtr timestampType = (IntPtr)TimestampType.NotAvailable; if (enableTimestampMarshaling) { timestampUnix = Librdkafka.message_timestamp(msgPtr, out timestampType); } var timestamp = new Timestamp(timestampUnix, (TimestampType)timestampType); Headers headers = null; if (enableHeaderMarshaling) { headers = new Headers(); Librdkafka.message_headers(msgPtr, out IntPtr hdrsPtr); if (hdrsPtr != IntPtr.Zero) { for (var i = 0; ; ++i) { var err = Librdkafka.header_get_all(hdrsPtr, (IntPtr)i, out IntPtr namep, out IntPtr valuep, out IntPtr sizep); if (err != ErrorCode.NoError) { break; } var headerName = Util.Marshal.PtrToStringUTF8(namep); byte[] headerValue = null; if (valuep != IntPtr.Zero) { headerValue = new byte[(int)sizep]; Marshal.Copy(valuep, headerValue, 0, (int)sizep); } headers.Add(headerName, headerValue); } } } if (msg.err != ErrorCode.NoError) { throw new ConsumeException( new ConsumeResult <byte[], byte[]> { TopicPartitionOffset = new TopicPartitionOffset(topic, msg.partition, msg.offset), Message = new Message <byte[], byte[]> { Timestamp = timestamp, Headers = headers, Key = KeyAsByteArray(msg), Value = ValueAsByteArray(msg) }, IsPartitionEOF = false }, kafkaHandle.CreatePossiblyFatalError(msg.err, null)); } K key; try { unsafe { key = keyDeserializer.Deserialize( msg.key == IntPtr.Zero ? ReadOnlySpan <byte> .Empty : new ReadOnlySpan <byte>(msg.key.ToPointer(), (int)msg.key_len), msg.key == IntPtr.Zero, new SerializationContext(MessageComponentType.Key, topic)); } } catch (Exception ex) { throw new ConsumeException( new ConsumeResult <byte[], byte[]> { TopicPartitionOffset = new TopicPartitionOffset(topic, msg.partition, msg.offset), Message = new Message <byte[], byte[]> { Timestamp = timestamp, Headers = headers, Key = KeyAsByteArray(msg), Value = ValueAsByteArray(msg) }, IsPartitionEOF = false }, new Error(ErrorCode.Local_KeyDeserialization), ex); } V val; try { unsafe { val = valueDeserializer.Deserialize( msg.val == IntPtr.Zero ? ReadOnlySpan <byte> .Empty : new ReadOnlySpan <byte>(msg.val.ToPointer(), (int)msg.len), msg.val == IntPtr.Zero, new SerializationContext(MessageComponentType.Value, topic)); } } catch (Exception ex) { throw new ConsumeException( new ConsumeResult <byte[], byte[]> { TopicPartitionOffset = new TopicPartitionOffset(topic, msg.partition, msg.offset), Message = new Message <byte[], byte[]> { Timestamp = timestamp, Headers = headers, Key = KeyAsByteArray(msg), Value = ValueAsByteArray(msg) }, IsPartitionEOF = false }, new Error(ErrorCode.Local_ValueDeserialization), ex); } return(new ConsumeResult <K, V> { TopicPartitionOffset = new TopicPartitionOffset(topic, msg.partition, msg.offset), Message = new Message <K, V> { Timestamp = timestamp, Headers = headers, Key = key, Value = val }, IsPartitionEOF = false }); } finally { Librdkafka.message_destroy(msgPtr); } }
/// <summary> /// Main method for console app. /// </summary> /// <param name="args">No arguments used.</param> public static void Main(string[] args) { Console.WriteLine("Starting .net consumer."); // Configure the group id, location of the bootstrap server, default deserializers, // Confluent interceptors var consumerConfig = new ConsumerConfig { BootstrapServers = "kafka:9092", GroupId = "csharp-consumer-prev", AutoOffsetReset = AutoOffsetReset.Earliest, PluginLibraryPaths = "monitoring-interceptor", }; using (var consumer = new ConsumerBuilder <string, string>(consumerConfig) .SetPartitionsAssignedHandler((c, partitions) => { // Calculate the time 5 minutes ago var timestamp = new Confluent.Kafka.Timestamp(DateTime.Now.AddMinutes(-5)); // Build a list of TopicPartitionTimestamp var timestamps = partitions.Select(tp => new TopicPartitionTimestamp(tp, timestamp)); // TODO: Request the offsets for the start timestamp var offsets = c.OffsetsForTimes(timestamps, TimeSpan.FromMinutes(1)); foreach (var offset in offsets) { // TODO: Print the new offset for each partition Console.WriteLine($"Moving partion {offset.Partition.Value} to {offset.Offset.Value}"); } // Return the new partition offsets return(offsets); }) .Build()) { // Subscribe to our topic consumer.Subscribe(KafkaTopic); CancellationTokenSource cts = new CancellationTokenSource(); Console.CancelKeyPress += (_, e) => { e.Cancel = true; // prevent the process from terminating. cts.Cancel(); }; try { int recordCount = 0; while (true) { try { // Poll for available records var cr = consumer.Consume(cts.Token); Console.WriteLine($"{cr.Message.Key},{cr.Message.Value}"); recordCount++; // Exit processing after 100 records if (recordCount >= 100) { break; } } catch (ConsumeException e) { Console.WriteLine($"Error occured: {e.Error.Reason}"); } } } catch (OperationCanceledException) { // Clean up when the application exits Console.WriteLine("Closing consumer."); consumer.Close(); } } }
private void ProduceImpl( string topic, byte[] val, int valOffset, int valLength, byte[] key, int keyOffset, int keyLength, Timestamp timestamp, Partition partition, IEnumerable <IHeader> headers, IDeliveryHandler deliveryHandler) { if (timestamp.Type != TimestampType.CreateTime) { if (timestamp != Timestamp.Default) { throw new ArgumentException("Timestamp must be either Timestamp.Default, or Timestamp.CreateTime."); } } ErrorCode err; if (this.enableDeliveryReports && deliveryHandler != null) { // Passes the TaskCompletionSource to the delivery report callback via the msg_opaque pointer // Note: There is a level of indirection between the GCHandle and // physical memory address. GCHandle.ToIntPtr doesn't get the // physical address, it gets an id that refers to the object via // a handle-table. var gch = GCHandle.Alloc(deliveryHandler); var ptr = GCHandle.ToIntPtr(gch); err = KafkaHandle.Produce( topic, val, valOffset, valLength, key, keyOffset, keyLength, partition.Value, timestamp.UnixTimestampMs, headers, ptr); if (err != ErrorCode.NoError) { // note: freed in the delivery handler callback otherwise. gch.Free(); } } else { err = KafkaHandle.Produce( topic, val, valOffset, valLength, key, keyOffset, keyLength, partition.Value, timestamp.UnixTimestampMs, headers, IntPtr.Zero); } if (err != ErrorCode.NoError) { throw new KafkaException(KafkaHandle.CreatePossiblyFatalError(err, null)); } }