public SerializingProducer(Producer producer, ISerializer <TKey> keySerializer, ISerializer <TValue> valueSerializer) { this.producer = producer; KeySerializer = keySerializer; ValueSerializer = valueSerializer; // TODO: allow serializers to be set in the producer config IEnumerable<KeyValuePair<string, object>>. if (KeySerializer == null) { if (typeof(TKey) != typeof(Null)) { throw new ArgumentNullException("Key serializer must be specified."); } } if (ValueSerializer == null) { if (typeof(TValue) != typeof(Null)) { throw new ArgumentNullException("Value serializer must be specified."); } } producer.OnLog += (sender, e) => OnLog?.Invoke(sender, e); producer.OnError += (sender, e) => OnError?.Invoke(sender, e); producer.OnStatistics += (sender, e) => OnStatistics?.Invoke(sender, e); }
private int StatsCallback(IntPtr rk, IntPtr json, UIntPtr json_len, IntPtr opaque) { // Ensure registered handlers are never called as a side-effect of Dispose/Finalize (prevents deadlocks in common scenarios). if (kafkaHandle.IsClosed) { return(0); } OnStatistics?.Invoke(this, Util.Marshal.PtrToStringUTF8(json)); return(0); // instruct librdkafka to immediately free the json ptr. }
public Consumer( IEnumerable <KeyValuePair <string, object> > config, IDeserializer <TKey> keyDeserializer, IDeserializer <TValue> valueDeserializer) { KeyDeserializer = keyDeserializer; ValueDeserializer = valueDeserializer; // TODO: allow deserializers to be set in the producer config IEnumerable<KeyValuePair<string, object>>. if (KeyDeserializer == null) { if (typeof(TKey) != typeof(Null)) { throw new ArgumentNullException("Key deserializer must be specified."); } // TKey == Null -> cast is always valid. KeyDeserializer = (IDeserializer <TKey>) new NullDeserializer(); } if (ValueDeserializer == null) { if (typeof(TValue) != typeof(Null)) { throw new ArgumentNullException("Value deserializer must be specified."); } // TValue == Null -> cast is always valid. ValueDeserializer = (IDeserializer <TValue>) new NullDeserializer(); } consumer = new Consumer(config); consumer.OnLog += (sender, e) => OnLog?.Invoke(sender, e); consumer.OnError += (sender, e) => OnError?.Invoke(sender, e); consumer.OnStatistics += (sender, e) => OnStatistics?.Invoke(sender, e); consumer.OnPartitionsAssigned += (sender, e) => OnPartitionsAssigned?.Invoke(sender, e); consumer.OnPartitionsRevoked += (sender, e) => OnPartitionsRevoked?.Invoke(sender, e); consumer.OnOffsetCommit += (sender, e) => OnOffsetCommit?.Invoke(sender, e); // TODO: bypass this.consumer for this event to optimize perf. consumer.OnMessage += (sender, e) => OnMessage?.Invoke(sender, new Message <TKey, TValue> ( e.Topic, e.Partition, e.Offset, KeyDeserializer.Deserialize(e.Key), ValueDeserializer.Deserialize(e.Value), e.Timestamp, e.Error ) ); consumer.OnPartitionEOF += (sender, e) => OnPartitionEOF?.Invoke(sender, e); }
internal void Init(RdKafkaType type, IntPtr config, Config.LogCallback logger) { ErrorDelegate = (IntPtr rk, ErrorCode err, string reason, IntPtr opaque) => { OnError?.Invoke(this, new ErrorArgs() { ErrorCode = err, Reason = reason }); }; LibRdKafka.conf_set_error_cb(config, ErrorDelegate); if (logger == null) { logger = ((string handle, int level, string fac, string buf) => { var now = DateTime.Now.ToString("yyyy-MM-dd HH:mm:ss.fff"); Console.WriteLine($"{level}|{now}|{handle}|{fac}| {buf}"); }); } LogDelegate = (IntPtr rk, int level, string fac, string buf) => { // The log_cb is called very early during construction, before // SafeKafkaHandle or any of the C# wrappers are ready. // So we can't really pass rk on, just pass the rk name instead. var name = Marshal.PtrToStringAnsi(LibRdKafka.name(rk)); logger(name, level, fac, buf); }; LibRdKafka.conf_set_log_cb(config, LogDelegate); StatsDelegate = (IntPtr rk, IntPtr json, UIntPtr json_len, IntPtr opaque) => { OnStatistics?.Invoke(this, Marshal.PtrToStringAnsi(json)); return(0); }; LibRdKafka.conf_set_stats_cb(config, StatsDelegate); handle = SafeKafkaHandle.Create(type, config); callbackCts = new CancellationTokenSource(); callbackTask = StartCallbackTask(callbackCts.Token); }
private ConsumerBuilder <K, T> CreateKafkaConsumerBuilder() { var kafkaConsumerBuilder = new ConsumerBuilder <K, T>(consumerConfig); kafkaConsumerBuilder.SetKeyDeserializer(keyDeserializer); kafkaConsumerBuilder.SetValueDeserializer(valueDeserializer); kafkaConsumerBuilder.SetErrorHandler((_, e) => OnError?.Invoke(new StreamingError { IsFatal = e.IsFatal, Reason = e.Reason })); kafkaConsumerBuilder.SetStatisticsHandler((_, statistics) => OnStatistics?.Invoke(statistics)); if (partitionsAssignedHandle != null && CommitEnable) { throw new ArgumentException("The partition assigned handle can not been setted if " + "'CommitEnable' property was setted to true."); } else if (partitionsAssignedHandle != null) { kafkaConsumerBuilder.SetPartitionsAssignedHandler(partitionsAssignedHandle); } return(kafkaConsumerBuilder); }
private int StatsCallback(IntPtr rk, IntPtr json, UIntPtr json_len, IntPtr opaque) { OnStatistics?.Invoke(this, Util.Marshal.PtrToStringUTF8(json)); return(0); // instruct librdkafka to immediately free the json ptr. }
private void DumpMessages <M>(IEnumerable <M> messages, Func <M, Message <K, T> > getMessage, CancellationToken cancellationToken) { var producerBuilder = new ProducerBuilder <K, T>(producerConfig); producerBuilder.SetKeySerializer(keySerializer); producerBuilder.SetValueSerializer(valueSerializer); producerBuilder.SetErrorHandler((_, e) => OnError?.Invoke(new StreamingError { IsFatal = e.IsFatal, Reason = e.Reason })); producerBuilder.SetStatisticsHandler((_, statistics) => OnStatistics?.Invoke(statistics)); Stopwatch processTime = Stopwatch.StartNew(); using (var p = producerBuilder.Build()) { foreach (M message in messages) { if (cancellationToken.IsCancellationRequested) { break; } Policy.Handle <ProduceException <K, T> >() .WaitAndRetryForever(retryAttempt => TimeSpan.FromMilliseconds(Math.Min(100 * Math.Pow(2, retryAttempt), 10000)), (exception, timespan) => { var kafkaException = exception as ProduceException <K, T>; OnError?.Invoke(new StreamingError { IsFatal = kafkaException.Error.IsFatal, Reason = $"{kafkaException.Error.Reason}. The message with key {kafkaException.DeliveryResult.Key} in topic {kafkaException.DeliveryResult.Topic} will be resent on {timespan.TotalMilliseconds} ms." }); }) .Execute(() => { if (!cancellationToken.IsCancellationRequested) { p.Produce(this.topic, getMessage.Invoke(message), r => { if (r.Error.IsError) { OnError?.Invoke(new StreamingError { IsFatal = r.Error.IsFatal, Reason = r.Error.Reason }); } }); } }); if (processTime.ElapsedMilliseconds >= CommitTimeoutMs) { p.Flush(cancellationToken); if (!cancellationToken.IsCancellationRequested) { OnCommit?.Invoke(); processTime.Restart(); } } } p.Flush(cancellationToken); if (!cancellationToken.IsCancellationRequested) { OnCommit?.Invoke(); } } }