private void SerializeRecordBatch(ReusableMemoryStream stream, ISerializer keySerializer, ISerializer valueSerializer) { // Starting Produce request V3, messages are encoded in the new RecordBatch. var batch = new RecordBatch { CompressionCodec = CompressionCodec, Records = Messages.Select(message => new Record { Key = EnsureSizedSerializable(message.Key, keySerializer), Value = EnsureSizedSerializable(message.Value, valueSerializer), Headers = message.Headers, Timestamp = message.TimeStamp, // If the serializer is not compatible, we already resolved this // previously, so it's ok if the cast returns null KeySerializer = keySerializer as ISizableSerializer, ValueSerializer = valueSerializer as ISizableSerializer }), }; Basics.WriteSizeInBytes(stream, batch.Serialize); }
private static void SerializeMessagesUncompressed(ReusableMemoryStream stream, IEnumerable <Message> messages, Serializers serializers, MessageVersion msgVersion) { long offset = 0; foreach (var message in messages) { // We always set offsets starting from 0 and increasing by one for each consecutive message. // This is because in compressed messages, when message format is V1, the brokers // will follow this format on disk. You're expected to do the same if you want to // avoid offset reassignment and message recompression. // When message format is V0, brokers will rewrite the offsets anyway // so we use the same scheme in all cases. BigEndianConverter.Write(stream, offset++); Basics.WriteWithSize(stream, message, new SerializationInfo { CompressionCodec = CompressionCodec.None, Serializers = serializers, MessageVersion = msgVersion }, SerializeMessageWithCodec); } }
public ReusableMemoryStream Serialize(ReusableMemoryStream target, long baseTimestamp, long offsetDelta) { long timestampDelta = Timestamp - baseTimestamp; VarIntConverter.Write(target, SizeOfBodyInBytes(offsetDelta, timestampDelta)); // Record attributes are always null. target.WriteByte(0x00); VarIntConverter.Write(target, timestampDelta); VarIntConverter.Write(target, offsetDelta); if (SerializedKeyValue == null) { Basics.WriteObject(target, Key, KeySerializer); Basics.WriteObject(target, Value, ValueSerializer); } else { target.Write(SerializedKeyValue.GetBuffer(), 0, (int)SerializedKeyValue.Length); } if (Headers == null) { target.Write(Basics.ZeroVarInt, 0, Basics.ZeroVarInt.Length); } else { VarIntConverter.Write(target, Headers.Count); foreach (KafkaRecordHeader header in Headers) { SerializeHeader(target, header); } } return(target); }
public void SerializeBody(ReusableMemoryStream stream, object extra, Basics.ApiVersion version) { Basics.SerializeString(stream, GroupId); BigEndianConverter.Write(stream, SessionTimeout); if (version > Basics.ApiVersion.V0) { BigEndianConverter.Write(stream, RebalanceTimeout); } Basics.SerializeString(stream, MemberId); Basics.SerializeString(stream, "consumer"); var metadata = new[] // Only one protocol is supported { new ConsumerGroupProtocol { ProtocolName = "kafka-sharp-consumer", ProtocolMetadata = new ConsumerGroupProtocolMetadata { Version = 0, Subscription = Subscription, UserData = null, } } }; Basics.WriteArray(stream, metadata, (s, d) => d.Serialize(s, null, Basics.ApiVersion.Ignored)); }
public void SerializeBody(ReusableMemoryStream stream, object _, Basics.ApiVersion __) { Basics.SerializeString(stream, ConsumerGroupId); Basics.WriteArray(stream, TopicsData); }
public void Serialize(ReusableMemoryStream stream, object _, Basics.ApiVersion __) { BigEndianConverter.Write(stream, Partition); BigEndianConverter.Write(stream, Offset); Basics.SerializeString(stream, Metadata); }
public void SerializeBody(ReusableMemoryStream stream, object extra) { BigEndianConverter.Write(stream, RequiredAcks); BigEndianConverter.Write(stream, Timeout); Basics.WriteArray(stream, TopicsData, extra); }
public void Deserialize(ReusableMemoryStream stream, object _, Basics.ApiVersion __) { ErrorCode = (ErrorCode)BigEndianConverter.ReadInt16(stream); TopicName = Basics.DeserializeString(stream); Partitions = Basics.DeserializeArray <PartitionMeta>(stream); }
public void Deserialize(ReusableMemoryStream stream, object _, Basics.ApiVersion __) { Id = BigEndianConverter.ReadInt32(stream); Host = Basics.DeserializeString(stream); Port = BigEndianConverter.ReadInt32(stream); }
public void Deserialize(ReusableMemoryStream stream, object noextra = null) { Partition = BigEndianConverter.ReadInt32(stream); ErrorCode = (ErrorCode)BigEndianConverter.ReadInt16(stream); Offsets = Basics.DeserializeArray(stream, BigEndianConverter.ReadInt64); }
public void Serialize(ReusableMemoryStream stream, object _, Basics.ApiVersion __) { BigEndianConverter.Write(stream, Version); Basics.WriteArray(stream, PartitionAssignments, Basics.ApiVersion.Ignored); Basics.SerializeBytes(stream, UserData); }
private static void _SerializeMessages(ReusableMemoryStream stream, IEnumerable <Message> messages, SerializationInfo info) { if (info.CompressionCodec != CompressionCodec.None) { stream.Write(Basics.Zero64, 0, 8); using (var msgsetStream = stream.Pool.Reserve()) { SerializeMessagesUncompressed(msgsetStream, messages, info.Serializers, info.MessageVersion); using (var compressed = stream.Pool.Reserve()) { switch (info.CompressionCodec) { case CompressionCodec.Gzip: using (var gzip = new GZipStream(compressed, CompressionMode.Compress, true)) { msgsetStream.WriteTo(gzip); } break; case CompressionCodec.Lz4: KafkaLz4.Compress(compressed, msgsetStream.GetBuffer(), (int)msgsetStream.Length); break; case CompressionCodec.Snappy: { #if NET_CORE throw new NotImplementedException(); #else compressed.SetLength(SnappyCodec.GetMaxCompressedLength((int)msgsetStream.Length)); { int size = SnappyCodec.Compress(msgsetStream.GetBuffer(), 0, (int)msgsetStream.Length, compressed.GetBuffer(), 0); compressed.SetLength(size); } #endif } break; } var m = new Message { Value = compressed, TimeStamp = Timestamp.Now }; Basics.WriteSizeInBytes(stream, m, new SerializationInfo { Serializers = SerializationConfig.ByteArraySerializers, CompressionCodec = info.CompressionCodec, MessageVersion = info.MessageVersion }, SerializeMessageWithCodec); } } } else { SerializeMessagesUncompressed(stream, messages, info.Serializers, info.MessageVersion); } }
// Used only in tests public void Serialize(ReusableMemoryStream stream, object noextra = null) { BigEndianConverter.Write(stream, Id); Basics.SerializeString(stream, Host); BigEndianConverter.Write(stream, Port); }
public void Deserialize(ReusableMemoryStream stream, object noextra = null) { Id = BigEndianConverter.ReadInt32(stream); Host = Basics.DeserializeString(stream); Port = BigEndianConverter.ReadInt32(stream); }
// Deserialize a message set to a sequence of messages. // This handles the "partial message allowed at end of message set" from Kafka brokers // and compressed message sets (the method recursively calls itself in this case and // flatten the result). The returned enumeration must be enumerated for deserialization // effectiveley occuring. // // A message set can contain a mix of v0 and v1 messages. // In the case of compressed messages, offsets are returned differently by brokers. // Messages inside compressed message v0 will have absolute offsets assigned. // Messages inside compressed message v1 will have relative offset assigned, starting // from 0. The wrapping compressed message itself is assigned the absolute offset of the last // message in the set. That means in this case we can only assign offsets after having decompressing // all messages. Lazy deserialization won't be so lazy anymore... private static IEnumerable <ResponseMessage> LazyDeserializeMessageSet(ReusableMemoryStream stream, int messageSetSize, Deserializers deserializers) { var remainingMessageSetBytes = messageSetSize; while (remainingMessageSetBytes > 0) { const int offsetSize = 8; const int msgsizeSize = 4; if (remainingMessageSetBytes < offsetSize + msgsizeSize) { // This is a partial message => skip to the end of the message set. // TODO: unit test this stream.Position += remainingMessageSetBytes; yield break; } var offset = BigEndianConverter.ReadInt64(stream); var messageSize = BigEndianConverter.ReadInt32(stream); remainingMessageSetBytes -= offsetSize + msgsizeSize; if (remainingMessageSetBytes < messageSize) { // This is a partial message => skip to the end of the message set. stream.Position += remainingMessageSetBytes; yield break; } // Message body var crc = BigEndianConverter.ReadInt32(stream); var crcStartPos = stream.Position; // crc is computed from this position var magic = stream.ReadByte(); if ((uint)magic > 1) { throw new UnsupportedMagicByteVersion((byte)magic); } var attributes = stream.ReadByte(); long timestamp = 0; if (magic == 1) { timestamp = BigEndianConverter.ReadInt64(stream); } // Check for compression var codec = (CompressionCodec)(attributes & 3); // Lowest 2 bits if (codec == CompressionCodec.None) { var msg = new ResponseMessage { Offset = offset, Message = new Message { Key = Basics.DeserializeByteArray(stream, deserializers.Item1), Value = Basics.DeserializeByteArray(stream, deserializers.Item2), TimeStamp = timestamp } }; CheckCrc(crc, stream, crcStartPos); yield return(msg); } else { // Key is null, read/check/skip if (BigEndianConverter.ReadInt32(stream) != -1) { throw new InvalidDataException("Compressed messages key should be null"); } // Uncompress var compressedLength = BigEndianConverter.ReadInt32(stream); var dataPos = stream.Position; stream.Position += compressedLength; CheckCrc(crc, stream, crcStartPos); using (var uncompressedStream = stream.Pool.Reserve()) { Uncompress(uncompressedStream, stream.GetBuffer(), (int)dataPos, compressedLength, codec); // Deserialize recursively if (magic == 0) // v0 message { foreach (var m in LazyDeserializeMessageSet(uncompressedStream, (int)uncompressedStream.Length, deserializers)) { // Flatten yield return(m); } } else // v1 message, we have to assign the absolute offsets { var innerMsgs = ResponseMessageListPool.Reserve(); // We need to deserialize all messages first, because the wrapper offset is the // offset of the last messe in the set, so wee need to know how many messages there are // before assigning offsets. innerMsgs.AddRange(LazyDeserializeMessageSet(uncompressedStream, (int)uncompressedStream.Length, deserializers)); var baseOffset = offset - innerMsgs.Count + 1; foreach (var msg in innerMsgs) { yield return (new ResponseMessage { Offset = msg.Offset + baseOffset, Message = msg.Message }); } ResponseMessageListPool.Release(innerMsgs); } } } remainingMessageSetBytes -= messageSize; } }
// Used only in tests public void Serialize(ReusableMemoryStream stream, object noextra = null) { BigEndianConverter.Write(stream, Partition); BigEndianConverter.Write(stream, (short)ErrorCode); Basics.WriteArray(stream, Offsets, BigEndianConverter.Write); }
public void Deserialize(ReusableMemoryStream stream, object _, Basics.ApiVersion __) { Version = BigEndianConverter.ReadInt16(stream); PartitionAssignments = Basics.DeserializeArray <TopicData <PartitionAssignment> >(stream); UserData = Basics.DeserializeBytes(stream); }
// Deserialize a message set to a sequence of messages. // This handles the "partial message allowed at end of message set" from Kafka brokers // and compressed message sets (the method recursively calls itself in this case and // flatten the result). The returned enumeration must be enumerated for deserialization // effectiveley occuring. private static IEnumerable <ResponseMessage> LazyDeserializeMessageSet(ReusableMemoryStream stream, int messageSetSize, Deserializers deserializers) { var remainingMessageSetBytes = messageSetSize; while (remainingMessageSetBytes > 0) { const int offsetSize = 8; const int msgsizeSize = 4; if (remainingMessageSetBytes < offsetSize + msgsizeSize) { // This is a partial message => skip to the end of the message set. // TODO: unit test this stream.Position += remainingMessageSetBytes; yield break; } var offset = BigEndianConverter.ReadInt64(stream); var messageSize = BigEndianConverter.ReadInt32(stream); remainingMessageSetBytes -= offsetSize + msgsizeSize; if (remainingMessageSetBytes < messageSize) { // This is a partial message => skip to the end of the message set. stream.Position += remainingMessageSetBytes; yield break; } // Message body var crc = BigEndianConverter.ReadInt32(stream); var crcStartPos = stream.Position; // crc is computed from this position var magic = stream.ReadByte(); if (magic != 0) { throw new UnsupportedMagicByteVersion((byte)magic); } var attributes = stream.ReadByte(); // Check for compression var codec = (CompressionCodec)(attributes & 3); // Lowest 2 bits if (codec == CompressionCodec.None) { var msg = new ResponseMessage { Offset = offset, Message = new Message { Key = Basics.DeserializeByteArray(stream, deserializers.Item1), Value = Basics.DeserializeByteArray(stream, deserializers.Item2) } }; CheckCrc(crc, stream, crcStartPos); yield return(msg); } else { // Key is null, read/check/skip if (BigEndianConverter.ReadInt32(stream) != -1) { throw new InvalidDataException("Compressed messages key should be null"); } // Uncompress var compressedLength = BigEndianConverter.ReadInt32(stream); var dataPos = stream.Position; stream.Position += compressedLength; CheckCrc(crc, stream, crcStartPos); using (var uncompressedStream = stream.Pool.Reserve()) { Uncompress(uncompressedStream, stream.GetBuffer(), (int)dataPos, compressedLength, codec); // Deserialize recursively foreach (var m in LazyDeserializeMessageSet(uncompressedStream, (int)uncompressedStream.Length, deserializers)) { // Flatten yield return(m); } } } remainingMessageSetBytes -= messageSize; } }
public void Serialize(ReusableMemoryStream stream, object _, Basics.ApiVersion __) { BigEndianConverter.Write(stream, Version); Basics.WriteArray(stream, Subscription ?? Enumerable.Empty <string>(), Basics.SerializeString); Basics.SerializeBytes(stream, UserData); }
// Used only in tests public void Serialize(ReusableMemoryStream stream, object _, Basics.ApiVersion __) { BigEndianConverter.Write(stream, Id); Basics.SerializeString(stream, Host); BigEndianConverter.Write(stream, Port); }
public void SerializeBody(ReusableMemoryStream stream, object _, Basics.ApiVersion __) { Basics.SerializeString(stream, GroupId); BigEndianConverter.Write(stream, GenerationId); Basics.SerializeString(stream, MemberId); }
// Used only in tests public void Serialize(ReusableMemoryStream stream, object _, Basics.ApiVersion __) { BigEndianConverter.Write(stream, (short)ErrorCode); Basics.SerializeString(stream, TopicName); Basics.WriteArray(stream, Partitions); }
public void Deserialize(ReusableMemoryStream stream, object _, Basics.ApiVersion __) { Version = BigEndianConverter.ReadInt16(stream); Subscription = Basics.DeserializeArray(stream, Basics.DeserializeString); UserData = Basics.DeserializeBytes(stream); }
public void SerializeBody(ReusableMemoryStream stream, object _, Basics.ApiVersion __) { Basics.SerializeString(stream, GroupId); }
// Used only in tests public void Serialize(ReusableMemoryStream stream, object noextra) { Basics.WriteArray(stream, BrokersMeta, noextra, Basics.ApiVersion.Ignored); Basics.WriteArray(stream, TopicsMeta, noextra, Basics.ApiVersion.Ignored); }
public void Deserialize(ReusableMemoryStream stream, object _, Basics.ApiVersion __) { Partition = BigEndianConverter.ReadInt32(stream); Offset = BigEndianConverter.ReadInt64(stream); Metadata = Basics.DeserializeString(stream); }
public void SerializeBody(ReusableMemoryStream stream, object _, Basics.ApiVersion version) { stream.Write(Basics.MinusOne32, 0, 4); // ReplicaId, non clients that are not a broker must use -1 Basics.WriteArray(stream, TopicsData, version); }
public void Deserialize(ReusableMemoryStream stream, object extra, Basics.ApiVersion version) { TopicsResponse = Basics.DeserializeArrayExtra <TopicData <TPartitionData> >(stream, extra, version); }
private void DoSerializeKeyValueAsRecord(ReusableMemoryStream stream, Tuple <ISerializer, ISerializer> serializers) { Basics.WriteObject(stream, Key, serializers.Item1); Basics.WriteObject(stream, Value, serializers.Item2); }