/// <summary> /// Asynchronously send a single message to a Kafka topic/partition. /// </summary> /// <param name="topicPartition"> /// The topic/partition to produce the message to. /// </param> /// <param name="message"> /// The message to produce. /// </param> /// <param name="cancellationToken"> /// A cancellation token that can be used to abort this request. /// </param> /// <returns> /// A Task which will complete with a delivery report corresponding to /// the produce request, or an exception if an error occured. /// </returns> public Task <DeliveryResult <TKey, TValue> > ProduceAsync( TopicPartition topicPartition, Message <TKey, TValue> message, CancellationToken cancellationToken = default(CancellationToken)) { var keyBytes = (keySerializer != null) ? keySerializer.Serialize(message.Key, true, message, topicPartition) : taskKeySerializer.SerializeAsync(message.Key, true, message, topicPartition) .ConfigureAwait(continueOnCapturedContext: false) .GetAwaiter() .GetResult(); var valBytes = (valueSerializer != null) ? valueSerializer.Serialize(message.Value, false, message, topicPartition) : taskValueSerializer.SerializeAsync(message.Value, false, message, topicPartition) .ConfigureAwait(continueOnCapturedContext: false) .GetAwaiter() .GetResult(); if (this.enableDeliveryReports) { var handler = new TypedTaskDeliveryHandlerShim <TKey, TValue>(topicPartition.Topic, enableDeliveryReportKey ? message.Key : default(TKey), enableDeliveryReportValue ? message.Value : default(TValue)); cancellationToken.Register(() => handler.TrySetException(new TaskCanceledException())); base.Produce( topicPartition.Topic, valBytes, 0, valBytes == null ? 0 : valBytes.Length, keyBytes, 0, keyBytes == null ? 0 : keyBytes.Length, message.Timestamp, topicPartition.Partition, message.Headers, handler); return(handler.Task); } else { base.Produce( topicPartition.Topic, valBytes, 0, valBytes == null ? 0 : valBytes.Length, keyBytes, 0, keyBytes == null ? 0 : keyBytes.Length, message.Timestamp, topicPartition.Partition, message.Headers, null); var result = new DeliveryResult <TKey, TValue> { TopicPartitionOffset = new TopicPartitionOffset(topicPartition, Offset.Invalid), Message = message }; return(Task.FromResult(result)); } }
/// <inheritdoc/> public Offset Position(TopicPartition partition) { try { return(kafkaHandle.Position(new List <TopicPartition> { partition }).First().Offset); } catch (TopicPartitionOffsetException e) { throw new KafkaException(e.Results[0].Error); } }
/// <summary> /// Asynchronously send a single message to a Kafka topic/partition. /// </summary> /// <param name="topicPartition"> /// The topic/partition to produce the message to. /// </param> /// <param name="message"> /// The message to produce. /// </param> /// <param name="deliveryHandler"> /// A delegate that will be called with a delivery report corresponding /// to the produce request (if enabled). /// </param> public void BeginProduce(TopicPartition topicPartition, Message <TKey, TValue> message, Action <DeliveryReportResult <TKey, TValue> > deliveryHandler = null) { var keyBytes = keySerializer(topicPartition.Topic, message.Key); var valBytes = valueSerializer(topicPartition.Topic, message.Value); producer.ProduceImpl( topicPartition.Topic, valBytes, 0, valBytes == null ? 0 : valBytes.Length, keyBytes, 0, keyBytes == null ? 0 : keyBytes.Length, message.Timestamp, topicPartition.Partition, message.Headers, new TypedDeliveryHandlerShim_Action( topicPartition.Topic, producer.enableDeliveryReportKey ? message.Key : default(TKey), producer.enableDeliveryReportValue ? message.Value : default(TValue), deliveryHandler) ); }
/// <summary> /// Asynchronously send a single message to a Kafka topic/partition. /// </summary> /// <param name="topicPartition"> /// The topic/partition to produce the message to. /// </param> /// <param name="message"> /// The message to produce. /// </param> /// <param name="cancellationToken"> /// A cancellation token that can be used to abort this request. /// </param> /// <returns> /// A Task which will complete with a delivery report corresponding to /// the produce request, or an exception if an error occured. /// </returns> public Task <DeliveryReport <TKey, TValue> > ProduceAsync(TopicPartition topicPartition, Message <TKey, TValue> message, CancellationToken cancellationToken = default(CancellationToken)) { if (this.producer.enableDeliveryReports) { var handler = new TypedTaskDeliveryHandlerShim(topicPartition.Topic, producer.enableDeliveryReportKey ? message.Key : default(TKey), producer.enableDeliveryReportValue ? message.Value : default(TValue)); cancellationToken.Register(() => handler.TrySetException(new TaskCanceledException())); var keyBytes = keySerializer(topicPartition.Topic, message.Key); var valBytes = valueSerializer(topicPartition.Topic, message.Value); producer.ProduceImpl( topicPartition.Topic, valBytes, 0, valBytes == null ? 0 : valBytes.Length, keyBytes, 0, keyBytes == null ? 0 : keyBytes.Length, message.Timestamp, topicPartition.Partition, message.Headers, handler); return(handler.Task); } else { var keyBytes = keySerializer(topicPartition.Topic, message.Key); var valBytes = valueSerializer(topicPartition.Topic, message.Value); producer.ProduceImpl( topicPartition.Topic, valBytes, 0, valBytes == null ? 0 : valBytes.Length, keyBytes, 0, keyBytes == null ? 0 : keyBytes.Length, message.Timestamp, topicPartition.Partition, message.Headers, null); var result = new DeliveryReport <TKey, TValue> { TopicPartitionOffset = new TopicPartitionOffset(topicPartition, Offset.Invalid), Message = message }; return(Task.FromResult(result)); } }
/// <summary> /// Asynchronously send a single message to a Kafka topic/partition. /// </summary> /// <param name="topicPartition"> /// The topic/partition to produce the message to. /// </param> /// <param name="message"> /// The message to produce. /// </param> /// <param name="deliveryHandler"> /// A delegate that will be called with a delivery report corresponding /// to the produce request (if enabled). /// </param> public void BeginProduce( TopicPartition topicPartition, Message <TKey, TValue> message, Action <DeliveryReport <TKey, TValue> > deliveryHandler = null) { if (deliveryHandler != null && !enableDeliveryReports) { throw new ArgumentException("A delivery handler was specified, but delivery reports are disabled."); } var keyBytes = (keySerializer != null) ? keySerializer.Serialize(message.Key, true, message, topicPartition) : taskKeySerializer.SerializeAsync(message.Key, true, message, topicPartition) .ConfigureAwait(continueOnCapturedContext: false) .GetAwaiter() .GetResult(); var valBytes = (valueSerializer != null) ? valueSerializer.Serialize(message.Value, false, message, topicPartition) : taskValueSerializer.SerializeAsync(message.Value, false, message, topicPartition) .ConfigureAwait(continueOnCapturedContext: false) .GetAwaiter() .GetResult(); base.Produce( topicPartition.Topic, valBytes, 0, valBytes == null ? 0 : valBytes.Length, keyBytes, 0, keyBytes == null ? 0 : keyBytes.Length, message.Timestamp, topicPartition.Partition, message.Headers, new TypedDeliveryHandlerShim_Action <TKey, TValue>( topicPartition.Topic, enableDeliveryReportKey ? message.Key : default(TKey), enableDeliveryReportValue ? message.Value : default(TValue), deliveryHandler) ); }
public WatermarkOffsets QueryWatermarkOffsets(TopicPartition topicPartition) => consumer.QueryWatermarkOffsets(topicPartition);
public double Deserialize(ReadOnlySpan <byte> data, bool isNull, bool isKey, MessageMetadata messageMetadata, TopicPartition source) { if (isNull) { throw new DeserializationException($"Null data encountered deserializing an double value."); } if (data.Length != 8) { throw new DeserializationException($"Deserializer<double> encountered data of length {data.Length}. Expecting data length to be 8."); } // network byte order -> big endian -> most significant byte in the smallest address. if (BitConverter.IsLittleEndian) { unsafe { double result = default(double); byte * p = (byte *)(&result); * p++ = data[7]; * p++ = data[6]; * p++ = data[5]; * p++ = data[4]; * p++ = data[3]; * p++ = data[2]; * p++ = data[1]; * p++ = data[0]; return(result); } } else { try { #if NETCOREAPP2_1 return(BitConverter.ToDouble(data)); #else return(BitConverter.ToDouble(data.ToArray(), 0)); #endif } catch (Exception e) { throw new DeserializationException("Error occured deserializing double value.", e); } } }
public WatermarkOffsets QueryWatermarkOffsets(TopicPartition topicPartition, TimeSpan timeout) => consumer.QueryWatermarkOffsets(topicPartition, timeout);
/// <summary> /// Refer to <see cref="Confluent.Kafka.IProducer{TKey,TValue}.ProduceAsync(TopicPartition, Message{TKey, TValue})" /> /// </summary> public async Task <DeliveryResult <TKey, TValue> > ProduceAsync( TopicPartition topicPartition, Message <TKey, TValue> message) { byte[] keyBytes; try { keyBytes = (keySerializer != null) ? keySerializer(message.Key) : await asyncKeySerializer.SerializeAsync(message.Key, new SerializationContext(MessageComponentType.Key, topicPartition.Topic)); } catch (Exception ex) { throw new ProduceException <TKey, TValue>( new Error(ErrorCode.Local_KeySerialization), new DeliveryResult <TKey, TValue> { Message = message, TopicPartitionOffset = new TopicPartitionOffset(topicPartition, Offset.Unset) }, ex); } byte[] valBytes; try { valBytes = (valueSerializer != null) ? valueSerializer(message.Value) : await asyncValueSerializer.SerializeAsync(message.Value, new SerializationContext(MessageComponentType.Value, topicPartition.Topic)); } catch (Exception ex) { throw new ProduceException <TKey, TValue>( new Error(ErrorCode.Local_ValueSerialization), new DeliveryResult <TKey, TValue> { Message = message, TopicPartitionOffset = new TopicPartitionOffset(topicPartition, Offset.Unset) }, ex); } try { if (enableDeliveryReports) { var handler = new TypedTaskDeliveryHandlerShim <TKey, TValue>( topicPartition.Topic, enableDeliveryReportKey ? message.Key : default(TKey), enableDeliveryReportValue ? message.Value : default(TValue)); ProduceImpl( topicPartition.Topic, valBytes, 0, valBytes == null ? 0 : valBytes.Length, keyBytes, 0, keyBytes == null ? 0 : keyBytes.Length, message.Timestamp, topicPartition.Partition, message.Headers, handler); return(await handler.Task); } else { ProduceImpl( topicPartition.Topic, valBytes, 0, valBytes == null ? 0 : valBytes.Length, keyBytes, 0, keyBytes == null ? 0 : keyBytes.Length, message.Timestamp, topicPartition.Partition, message.Headers, null); var result = new DeliveryResult <TKey, TValue> { TopicPartitionOffset = new TopicPartitionOffset(topicPartition, Offset.Unset), Message = message }; return(result); } } catch (KafkaException ex) { throw new ProduceException <TKey, TValue>( ex.Error, new DeliveryResult <TKey, TValue> { Message = message, TopicPartitionOffset = new TopicPartitionOffset(topicPartition, Offset.Unset) }); } }
public byte[] Deserialize(ReadOnlySpan <byte> data, bool isNull, bool isKey, MessageMetadata messageMetadata, TopicPartition source) { if (isNull) { return(null); } return(data.ToArray()); }
public WatermarkOffsets QueryWatermarkOffsets(TopicPartition topicPartition) => kafkaHandle.QueryWatermarkOffsets(topicPartition.Topic, topicPartition.Partition, -1);
/// <inheritdoc/> public async Task <DeliveryResult <TKey, TValue> > ProduceAsync( TopicPartition topicPartition, Message <TKey, TValue> message, CancellationToken cancellationToken) { Headers headers = message.Headers ?? new Headers(); byte[] keyBytes; try { keyBytes = (keySerializer != null) ? keySerializer.Serialize(message.Key, new SerializationContext(MessageComponentType.Key, topicPartition.Topic, headers)) : await asyncKeySerializer.SerializeAsync(message.Key, new SerializationContext(MessageComponentType.Key, topicPartition.Topic, headers)).ConfigureAwait(false); } catch (Exception ex) { throw new ProduceException <TKey, TValue>( new Error(ErrorCode.Local_KeySerialization), new DeliveryResult <TKey, TValue> { Message = message, TopicPartitionOffset = new TopicPartitionOffset(topicPartition, Offset.Unset) }, ex); } byte[] valBytes; try { valBytes = (valueSerializer != null) ? valueSerializer.Serialize(message.Value, new SerializationContext(MessageComponentType.Value, topicPartition.Topic, headers)) : await asyncValueSerializer.SerializeAsync(message.Value, new SerializationContext(MessageComponentType.Value, topicPartition.Topic, headers)).ConfigureAwait(false); } catch (Exception ex) { throw new ProduceException <TKey, TValue>( new Error(ErrorCode.Local_ValueSerialization), new DeliveryResult <TKey, TValue> { Message = message, TopicPartitionOffset = new TopicPartitionOffset(topicPartition, Offset.Unset) }, ex); } try { if (enableDeliveryReports) { var handler = new TypedTaskDeliveryHandlerShim( topicPartition.Topic, enableDeliveryReportKey ? message.Key : default(TKey), enableDeliveryReportValue ? message.Value : default(TValue)); if (cancellationToken != null && cancellationToken.CanBeCanceled) { handler.CancellationTokenRegistration = cancellationToken.Register(() => handler.TrySetCanceled()); } ProduceImpl( topicPartition.Topic, valBytes, 0, valBytes == null ? 0 : valBytes.Length, keyBytes, 0, keyBytes == null ? 0 : keyBytes.Length, message.Timestamp, topicPartition.Partition, headers, handler); return(await handler.Task.ConfigureAwait(false)); } else { ProduceImpl( topicPartition.Topic, valBytes, 0, valBytes == null ? 0 : valBytes.Length, keyBytes, 0, keyBytes == null ? 0 : keyBytes.Length, message.Timestamp, topicPartition.Partition, headers, null); var result = new DeliveryResult <TKey, TValue> { TopicPartitionOffset = new TopicPartitionOffset(topicPartition, Offset.Unset), Message = message }; return(result); } } catch (KafkaException ex) { throw new ProduceException <TKey, TValue>( ex.Error, new DeliveryResult <TKey, TValue> { Message = message, TopicPartitionOffset = new TopicPartitionOffset(topicPartition, Offset.Unset) }); } }
public string Deserialize(ReadOnlySpan <byte> data, bool isNull, bool isKey, MessageMetadata messageMetadata, TopicPartition source) { if (isNull) { return(null); } try { #if NETCOREAPP2_1 return(Encoding.UTF8.GetString(data)); #else return(Encoding.UTF8.GetString(data.ToArray())); #endif } catch (Exception e) { throw new DeserializationException("Error occured deserializing UTF8 string value", e); } }
/// <summary> /// Asynchronously send a single message to a Kafka topic/partition. /// </summary> /// <param name="topicPartition"> /// The topic/partition to produce the message to. /// </param> /// <param name="message"> /// The message to produce. /// </param> /// <returns> /// A Task which will complete with a delivery report corresponding to /// the produce request, or an exception if an error occured. /// </returns> public Task <DeliveryResult <TKey, TValue> > ProduceAsync( TopicPartition topicPartition, Message <TKey, TValue> message) { byte[] keyBytes; try { keyBytes = (keySerializer != null) ? keySerializer.Serialize(message.Key, new SerializationContext(MessageComponentType.Key, topicPartition.Topic)) : asyncKeySerializer.SerializeAsync(message.Key, new SerializationContext(MessageComponentType.Key, topicPartition.Topic)) .ConfigureAwait(continueOnCapturedContext: false) .GetAwaiter() .GetResult(); } catch (Exception exception) { throw new ProduceException <TKey, TValue>( new Error(ErrorCode.Local_KeySerialization), new DeliveryResult <TKey, TValue> { Message = message, TopicPartitionOffset = new TopicPartitionOffset(topicPartition, Offset.Invalid) }, exception); } byte[] valBytes; try { valBytes = (valueSerializer != null) ? valueSerializer.Serialize(message.Value, new SerializationContext(MessageComponentType.Value, topicPartition.Topic)) : asyncValueSerializer.SerializeAsync(message.Value, new SerializationContext(MessageComponentType.Value, topicPartition.Topic)) .ConfigureAwait(continueOnCapturedContext: false) .GetAwaiter() .GetResult(); } catch (Exception exception) { throw new ProduceException <TKey, TValue>( new Error(ErrorCode.Local_ValueSerialization), new DeliveryResult <TKey, TValue> { Message = message, TopicPartitionOffset = new TopicPartitionOffset(topicPartition, Offset.Invalid) }, exception); } try { if (enableDeliveryReports) { var handler = new TypedTaskDeliveryHandlerShim <TKey, TValue>( topicPartition.Topic, enableDeliveryReportKey ? message.Key : default(TKey), enableDeliveryReportValue ? message.Value : default(TValue)); ProduceImpl( topicPartition.Topic, valBytes, 0, valBytes == null ? 0 : valBytes.Length, keyBytes, 0, keyBytes == null ? 0 : keyBytes.Length, message.Timestamp, topicPartition.Partition, message.Headers, handler); return(handler.Task); } else { ProduceImpl( topicPartition.Topic, valBytes, 0, valBytes == null ? 0 : valBytes.Length, keyBytes, 0, keyBytes == null ? 0 : keyBytes.Length, message.Timestamp, topicPartition.Partition, message.Headers, null); var result = new DeliveryResult <TKey, TValue> { TopicPartitionOffset = new TopicPartitionOffset(topicPartition, Offset.Invalid), Message = message }; return(Task.FromResult(result)); } } catch (KafkaException ex) { throw new ProduceException <TKey, TValue>( ex.Error, new DeliveryResult <TKey, TValue> { Message = message, TopicPartitionOffset = new TopicPartitionOffset(topicPartition, Offset.Invalid) }); } // want other exceptions: ArgumentException, InvalidOperationException to propagate up. }
public Null Deserialize(ReadOnlySpan <byte> data, bool isNull, bool isKey, MessageMetadata messageMetadata, TopicPartition source) { if (!isNull) { throw new DeserializationException("Deserializer<Null> may only be used to deserialize data that is null."); } return(null); }
public long Deserialize(ReadOnlySpan <byte> data, bool isNull, bool isKey, MessageMetadata messageMetadata, TopicPartition source) { if (isNull) { throw new DeserializationException($"Null data encountered deserializing Int64 value."); } if (data.Length != 8) { throw new DeserializationException($"Deserializer<Long> encountered data of length {data.Length}. Expecting data length to be 8."); } // network byte order -> big endian -> most significant byte in the smallest address. long result = ((long)data[0]) << 56 | ((long)(data[1])) << 48 | ((long)(data[2])) << 40 | ((long)(data[3])) << 32 | ((long)(data[4])) << 24 | ((long)(data[5])) << 16 | ((long)(data[6])) << 8 | (data[7]); return(result); }
public Ignore Deserialize(ReadOnlySpan <byte> data, bool isNull, bool isKey, MessageMetadata messageMetadata, TopicPartition source) => null;
/// <inheritdoc/> public void Assign(TopicPartition partition) => Assign(new List <TopicPartition> { partition });
public byte[] Serialize(double data, bool isKey, MessageMetadata messageMetadata, TopicPartition destination) { if (BitConverter.IsLittleEndian) { unsafe { byte[] result = new byte[8]; byte * p = (byte *)(&data); result[7] = *p++; result[6] = *p++; result[5] = *p++; result[4] = *p++; result[3] = *p++; result[2] = *p++; result[1] = *p++; result[0] = *p++; return(result); } } else { return(BitConverter.GetBytes(data)); } }
public WatermarkOffsets QueryWatermarkOffsets(TopicPartition topicPartition, TimeSpan timeout) => kafkaHandle.QueryWatermarkOffsets(topicPartition.Topic, topicPartition.Partition, timeout.TotalMillisecondsAsInt());
public byte[] Serialize(string data, bool isKey, MessageMetadata messageMetadata, TopicPartition destination) { if (data == null) { return(null); } return(Encoding.UTF8.GetBytes(data)); }
public WatermarkOffsets GetWatermarkOffsets(TopicPartition topicPartition) => kafkaHandle.GetWatermarkOffsets(topicPartition.Topic, topicPartition.Partition);
public byte[] Serialize(Null data, bool isKey, MessageMetadata messageMetadata, TopicPartition destination) => null;
/// <inheritdoc/> public void Produce( TopicPartition topicPartition, Message <TKey, TValue> message, Action <DeliveryReport <TKey, TValue> > deliveryHandler = null) { if (deliveryHandler != null && !enableDeliveryReports) { throw new InvalidOperationException("A delivery handler was specified, but delivery reports are disabled."); } Headers headers = message.Headers ?? new Headers(); byte[] keyBytes; try { keyBytes = (keySerializer != null) ? keySerializer.Serialize(message.Key, new SerializationContext(MessageComponentType.Key, topicPartition.Topic, headers)) : throw new InvalidOperationException("Produce called with an IAsyncSerializer key serializer configured but an ISerializer is required."); } catch (Exception ex) { throw new ProduceException <TKey, TValue>( new Error(ErrorCode.Local_KeySerialization, ex.ToString()), new DeliveryResult <TKey, TValue> { Message = message, TopicPartitionOffset = new TopicPartitionOffset(topicPartition, Offset.Unset), }, ex); } byte[] valBytes; try { valBytes = (valueSerializer != null) ? valueSerializer.Serialize(message.Value, new SerializationContext(MessageComponentType.Value, topicPartition.Topic, headers)) : throw new InvalidOperationException("Produce called with an IAsyncSerializer value serializer configured but an ISerializer is required."); } catch (Exception ex) { throw new ProduceException <TKey, TValue>( new Error(ErrorCode.Local_ValueSerialization, ex.ToString()), new DeliveryResult <TKey, TValue> { Message = message, TopicPartitionOffset = new TopicPartitionOffset(topicPartition, Offset.Unset), }, ex); } try { ProduceImpl( topicPartition.Topic, valBytes, 0, valBytes == null ? 0 : valBytes.Length, keyBytes, 0, keyBytes == null ? 0 : keyBytes.Length, message.Timestamp, topicPartition.Partition, headers, new TypedDeliveryHandlerShim_Action( topicPartition.Topic, enableDeliveryReportKey ? message.Key : default(TKey), enableDeliveryReportValue ? message.Value : default(TValue), deliveryHandler)); } catch (KafkaException ex) { throw new ProduceException <TKey, TValue>( ex.Error, new DeliveryReport <TKey, TValue> { Message = message, TopicPartitionOffset = new TopicPartitionOffset(topicPartition, Offset.Unset) }); } }
public byte[] Serialize(long data, bool isKey, MessageMetadata messageMetadata, TopicPartition destination) { var result = new byte[8]; result[0] = (byte)(data >> 56); result[1] = (byte)(data >> 48); result[2] = (byte)(data >> 40); result[3] = (byte)(data >> 32); result[4] = (byte)(data >> 24); result[5] = (byte)(data >> 16); result[6] = (byte)(data >> 8); result[7] = (byte)data; return(result); }
/// <summary> /// Refer to <see cref="Confluent.Kafka.IProducer{TKey,TValue}.BeginProduce(TopicPartition, Message{TKey, TValue}, Action{DeliveryReport{TKey, TValue}})" /> /// </summary> public void BeginProduce( TopicPartition topicPartition, Message <TKey, TValue> message, Action <DeliveryReport <TKey, TValue> > deliveryHandler = null) { if (deliveryHandler != null && !enableDeliveryReports) { throw new InvalidOperationException("A delivery handler was specified, but delivery reports are disabled."); } byte[] keyBytes; try { keyBytes = (keySerializer != null) ? keySerializer(message.Key) : Task.Run(async() => await asyncKeySerializer.SerializeAsync(message.Key, new SerializationContext(MessageComponentType.Key, topicPartition.Topic))) .ConfigureAwait(false) .GetAwaiter() .GetResult(); } catch (Exception ex) { throw new ProduceException <TKey, TValue>( new Error(ErrorCode.Local_KeySerialization, ex.ToString()), new DeliveryResult <TKey, TValue> { Message = message, TopicPartitionOffset = new TopicPartitionOffset(topicPartition, Offset.Unset), } ); } byte[] valBytes; try { valBytes = (valueSerializer != null) ? valueSerializer(message.Value) : Task.Run(async() => await asyncValueSerializer.SerializeAsync(message.Value, new SerializationContext(MessageComponentType.Value, topicPartition.Topic))) .ConfigureAwait(continueOnCapturedContext: false) .GetAwaiter() .GetResult(); } catch (Exception ex) { throw new ProduceException <TKey, TValue>( new Error(ErrorCode.Local_ValueSerialization, ex.ToString()), new DeliveryResult <TKey, TValue> { Message = message, TopicPartitionOffset = new TopicPartitionOffset(topicPartition, Offset.Unset), } ); } try { ProduceImpl( topicPartition.Topic, valBytes, 0, valBytes == null ? 0 : valBytes.Length, keyBytes, 0, keyBytes == null ? 0 : keyBytes.Length, message.Timestamp, topicPartition.Partition, message.Headers, new TypedDeliveryHandlerShim_Action <TKey, TValue>( topicPartition.Topic, enableDeliveryReportKey ? message.Key : default(TKey), enableDeliveryReportValue ? message.Value : default(TValue), deliveryHandler)); } catch (KafkaException ex) { throw new ProduceException <TKey, TValue>( ex.Error, new DeliveryReport <TKey, TValue> { Message = message, TopicPartitionOffset = new TopicPartitionOffset(topicPartition, Offset.Unset) }); } }
public byte[] Serialize(int data, bool isKey, MessageMetadata messageMetadata, TopicPartition destination) { var result = new byte[4]; // int is always 32 bits on .NET. // network byte order -> big endian -> most significant byte in the smallest address. // Note: At the IL level, the conv.u1 operator is used to cast int to byte which truncates // the high order bits if overflow occurs. // https://msdn.microsoft.com/en-us/library/system.reflection.emit.opcodes.conv_u1.aspx result[0] = (byte)(data >> 24); result[1] = (byte)(data >> 16); // & 0xff; result[2] = (byte)(data >> 8); // & 0xff; result[3] = (byte)data; // & 0xff; return(result); }
public WatermarkOffsets GetWatermarkOffsets(TopicPartition topicPartition) => consumer.GetWatermarkOffsets(topicPartition);
public int Deserialize(ReadOnlySpan <byte> data, bool isNull, bool isKey, MessageMetadata messageMetadata, TopicPartition source) { if (isNull) { throw new DeserializationException($"Null data encountered deserializing an Int32 value"); } if (data.Length != 4) { throw new DeserializationException($"Deserializer<Int32> encountered data of length {data.Length}. Expecting data length to be 4."); } // network byte order -> big endian -> most significant byte in the smallest address. return ((((int)data[0]) << 24) | (((int)data[1]) << 16) | (((int)data[2]) << 8) | (int)data[3]); }