public static int Write(this IKafkaWriter writer, IEnumerable <Message> messages, MessageCodec codec) { if (codec == MessageCodec.None) { using (writer.MarkForLength()) { writer.Write(messages); } return(0); } using (var messageWriter = new KafkaWriter()) { messageWriter.Write(messages); var messageSet = messageWriter.ToSegment(false); using (writer.MarkForLength()) { // messageset writer.Write(0L); // offset using (writer.MarkForLength()) { // message using (writer.MarkForCrc()) { writer.Write((byte)0) // message version .Write((byte)codec) // attribute .Write(-1); // key -- null, so -1 length using (writer.MarkForLength()) { // value var initialPosition = writer.Position; writer.WriteCompressed(messageSet, codec); var compressedMessageLength = writer.Position - initialPosition; return(messageSet.Count - compressedMessageLength); } } } } } }
public static IKafkaWriter Write(this IKafkaWriter writer, IEnumerable <Message> messages) { foreach (var message in messages) { writer.Write(0L); using (writer.MarkForLength()) { message.WriteTo(writer); } } return(writer); }
/// <summary> /// Encodes a collection of messages, in order. /// </summary> /// <param name="writer">The writer</param> /// <param name="messages">The collection of messages to encode together.</param> /// <param name="includeLength">Whether to include the length at the start</param> public static IKafkaWriter Write(this IKafkaWriter writer, IEnumerable <Message> messages, bool includeLength = true) { using (includeLength ? writer.MarkForLength() : Disposable.None) { var offset = 0L; foreach (var message in messages) { writer.Write(offset) // TODO: should this be incremented? offset++? .Write(message); } } return(writer); }
/// <summary> /// Encodes a message object /// </summary> /// <param name="writer">The writer</param> /// <param name="message">Message data to encode.</param> /// <param name="includeLength">Whether to include the length at the start</param> /// <returns>Encoded byte[] representation of the message object.</returns> /// <remarks> /// Format: /// Crc (Int32), MagicByte (Byte), Attribute (Byte), Key (Byte[]), Value (Byte[]) /// </remarks> public static IKafkaWriter Write(this IKafkaWriter writer, Message message, bool includeLength = true) { using (includeLength ? writer.MarkForLength() : Disposable.None) { using (writer.MarkForCrc()) { writer.Write(message.MessageVersion) .Write(message.Attribute); if (message.MessageVersion >= 1) { writer.Write(message.Timestamp.GetValueOrDefault(DateTime.UtcNow).ToUnixEpochMilliseconds()); } writer.Write(message.Key) .Write(message.Value); } } return(writer); }
private static bool TryEncodeResponse(IKafkaWriter writer, IRequestContext context, FetchResponse response) { if (response == null) { return(false); } if (context.ApiVersion >= 1) { writer.Write((int?)response.throttle_time_ms?.TotalMilliseconds ?? 0); } var groupedTopics = response.responses.GroupBy(t => t.topic).ToList(); writer.Write(groupedTopics.Count); foreach (var topic in groupedTopics) { var partitions = topic.ToList(); writer.Write(topic.Key) .Write(partitions.Count); // partitionsPerTopic foreach (var partition in partitions) { writer.Write(partition.partition_id) .Write(partition.error_code) .Write(partition.high_watermark); if (partition.Messages.Count > 0) { // assume all are the same codec var codec = (MessageCodec)(partition.Messages[0].Attribute & Message.CodecMask); writer.Write(partition.Messages, codec); } else { using (writer.MarkForLength()) { writer.Write(partition.Messages); } } } } return(true); }
/// <inheritdoc /> public void EncodeAssignment(IKafkaWriter writer, IMemberAssignment value) { using (writer.MarkForLength()) { EncodeAssignment(writer, (TAssignment)value); } }
/// <inheritdoc /> public void EncodeMetadata(IKafkaWriter writer, IMemberMetadata value) { using (writer.MarkForLength()) { EncodeMetadata(writer, (TMetadata)value); } }