Exemplo n.º 1
0
        private void SerializeRecordBatch(ReusableMemoryStream stream, ISerializer keySerializer,
                                          ISerializer valueSerializer)
        {
            // Starting Produce request V3, messages are encoded in the new RecordBatch.
            var batch = new RecordBatch
            {
                CompressionCodec = CompressionCodec,
                Records          = Messages.Select(message =>
                {
                    SerializeMessageIfNotSized(ref message, keySerializer, valueSerializer, stream.Pool);
                    return(new Record
                    {
                        Key = message.Key,
                        Value = message.Value,
                        Headers = message.Headers,
                        Timestamp = message.TimeStamp,
                        // If the serializer is not compatible, we already resolved this
                        // previously, so it's ok if the cast returns null
                        KeySerializer = keySerializer as ISizableSerializer,
                        ValueSerializer = valueSerializer as ISizableSerializer,
                        SerializedKeyValue = message.SerializedKeyValue,
                    });
                }),
            };

            Basics.WriteWithSize(stream, batch.Serialize);
        }
Exemplo n.º 2
0
                                                      + 4; // batchLength

        /// <remarks>
        /// From the official protocol documentation available at
        /// https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-FetchAPI
        /// "As an optimization the server is allowed to return a partial message at the end of the message set. Clients should handle this case."
        /// If the end of the RecordBatch exceeds the length of the whole response (= endOfAllBatches), we should discard this RecordBatch.
        /// </remarks>
        public static RecordBatch Deserialize(ReusableMemoryStream input, Deserializers deserializers, long endOfAllBatches)
        {
            var recordBatch = new RecordBatch();

            if (input.Position + BytesNecessaryToGetLength > endOfAllBatches)
            {
                throw new ProtocolException(
                          $"Trying to read a batch record at {input.Position} and the end of all batches is {endOfAllBatches}."
                          + $" There is not enough bytes remaining to even read the first fields...");
            }
            recordBatch.BaseOffset = BigEndianConverter.ReadInt64(input);
            var batchLength = BigEndianConverter.ReadInt32(input);
            var endOfBatch  = input.Position + batchLength;

            if (endOfAllBatches < endOfBatch)
            {
                // Partial message, CRCs won't match, return here so the CRC check doesn't throw
                return(null);
            }
            recordBatch.PartitionLeaderEpoch = BigEndianConverter.ReadInt32(input);
            var magic = input.ReadByte();

            // Current magic value is 2
            if ((uint)magic != 2)
            {
                throw new UnsupportedMagicByteVersion((byte)magic, "2");
            }

            var crc = (uint)BigEndianConverter.ReadInt32(input);
            var afterCrcPosition = input.Position; // The crc is calculated starting from this position

            Crc32.CheckCrcCastagnoli((int)crc, input, afterCrcPosition, endOfBatch - afterCrcPosition);

            var attributes = BigEndianConverter.ReadInt16(input);

            recordBatch.CompressionCodec = (CompressionCodec)(attributes & CompressionCodecMask);
            recordBatch.IsTransactional  = (attributes & TransactionalFlagMask) != 0;
            recordBatch.IsControl        = (attributes & ControlFlagMask) != 0;
            recordBatch.TimestampType    = (attributes & TimestampTypeMask) > 0
                ? TimestampType.LogAppendTime
                : TimestampType.CreateTime;

            var lastOffsetDelta = BigEndianConverter.ReadInt32(input);

            var firstTimestamp = BigEndianConverter.ReadInt64(input);
            var maxTimestamp   = BigEndianConverter.ReadInt64(input);

            recordBatch.ProducerId    = BigEndianConverter.ReadInt64(input);
            recordBatch.ProducerEpoch = BigEndianConverter.ReadInt16(input);
            recordBatch.BaseSequence  = BigEndianConverter.ReadInt32(input);

            var numberOfRecords = BigEndianConverter.ReadInt32(input);

            recordBatch.Records = DeserializeRecords(recordBatch, input, numberOfRecords, endOfBatch, firstTimestamp, deserializers);

            return(recordBatch);
        }
Exemplo n.º 3
0
        internal static List <ResponseMessage> DeserializeRecordBatch(ReusableMemoryStream stream,
                                                                      Deserializers deserializers)
        {
            var list = ResponseMessageListPool.Reserve();

            // First of all, we need to check if it is really a recordBatch (i.e. magic byte = 2)
            // For that, we need to fast-forward to the magic byte. Depending on its value, we go back
            // and deserialize following the corresponding format. (Kafka is amazing).
            var startOfBatchOffset = stream.Position;

            stream.Position += HeaderSize;
            var magic = stream.ReadByte();

            stream.Position = startOfBatchOffset;
            if (magic < 2)
            {
                // We were mistaken, this is not a recordBatch but a message set!
                return(DeserializeMessageSet(stream, deserializers));
            }
            // Now we know what we received is a proper recordBatch
            var size            = BigEndianConverter.ReadInt32(stream);
            var endOfAllBatches = stream.Position + size;

            if (stream.Length < endOfAllBatches)
            {
                throw new ProtocolException($"Fetch response advertise record batches of total size {size},"
                                            + $" but the stream only contains {stream.Length - stream.Position} byte remaining");
            }
            while (stream.Position < endOfAllBatches)
            {
                var recordBatch = RecordBatch.Deserialize(stream, deserializers, endOfAllBatches);
                if (recordBatch == null)
                {
                    // We received a partial record batch, discard it
                    stream.Position = endOfAllBatches;
                    break;
                }
                list.AddRange(recordBatch.Records.Select(record => new ResponseMessage
                {
                    Message = new Message {
                        Key = record.Key, Value = record.Value, TimeStamp = record.Timestamp
                    },
                    Offset = record.Offset
                }));
            }
            return(list);
        }
Exemplo n.º 4
0
 public static IEnumerable <Record> DeserializeRecords(RecordBatch recordBatch, ReusableMemoryStream input, int numberOfRecords, long endOfBatch,
                                                       long firstTimeStamp, Deserializers deserializers)
 {
     if (recordBatch.CompressionCodec == CompressionCodec.None)
     {
         return(DeserializeRecordsUncompressed(recordBatch, input, numberOfRecords, endOfBatch, firstTimeStamp,
                                               deserializers));
     }
     using (var uncompressedStream = input.Pool.Reserve())
     {
         Basics.Uncompress(uncompressedStream, input.GetBuffer(), (int)input.Position,
                           (int)(endOfBatch - input.Position), recordBatch.CompressionCodec);
         input.Position = endOfBatch;
         return(new List <Record>(DeserializeRecordsUncompressed(recordBatch, uncompressedStream, numberOfRecords, endOfBatch, firstTimeStamp,
                                                                 deserializers))); // We use a list here to force iteration to take place, so that we can release uncompressedStream
     }
 }
Exemplo n.º 5
0
        // Used only in tests, and we only support byte array data
        public void Serialize(ReusableMemoryStream stream, object extra, Basics.ApiVersion version)
        {
            BigEndianConverter.Write(stream, Partition);
            BigEndianConverter.Write(stream, (short)ErrorCode);
            BigEndianConverter.Write(stream, HighWatermarkOffset);
            if (version >= Basics.ApiVersion.V4)
            {
                BigEndianConverter.Write(stream, LastStableOffset);
                Basics.WriteArray(stream, AbortedTransactions);
            }
            if (version >= Basics.ApiVersion.V5)
            {
                BigEndianConverter.Write(stream, LogStartOffset);
            }

            if (version >= Basics.ApiVersion.V3)
            {
                var batch = new RecordBatch
                {
                    CompressionCodec = Compression,
                    Records          = Messages.Select(responseMessage => new Record
                    {
                        Key             = responseMessage.Message.Key,
                        Value           = responseMessage.Message.Value,
                        Timestamp       = responseMessage.Message.TimeStamp,
                        KeySerializer   = null,
                        ValueSerializer = null
                    }),
                };
                Basics.WriteWithSize(stream, batch.Serialize);
                return;
            }
            Basics.WriteWithSize(stream, Messages, (s, l) =>
            {
                foreach (var m in l)
                {
                    BigEndianConverter.Write(s, m.Offset);
                    Basics.WriteWithSize(s, m.Message,
                                         (st, msg) =>
                                         msg.Serialize(st, CompressionCodec.None, extra as Serializers,
                                                       version == Basics.ApiVersion.V2 ? MessageVersion.V1 : MessageVersion.V0));
                }
            });
        }
Exemplo n.º 6
0
        private void SerializeRecordBatch(ReusableMemoryStream stream, ISerializer keySerializer,
                                          ISerializer valueSerializer)
        {
            // Starting Produce request V3, messages are encoded in the new RecordBatch.
            var batch = new RecordBatch
            {
                CompressionCodec = CompressionCodec,
                Records          = Messages.Select(message => new Record
                {
                    Key       = EnsureSizedSerializable(message.Key, keySerializer),
                    Value     = EnsureSizedSerializable(message.Value, valueSerializer),
                    Headers   = message.Headers,
                    Timestamp = message.TimeStamp,
                    // If the serializer is not compatible, we already resolved this
                    // previously, so it's ok if the cast returns null
                    KeySerializer   = keySerializer as ISizableSerializer,
                    ValueSerializer = valueSerializer as ISizableSerializer
                }),
            };

            Basics.WriteSizeInBytes(stream, batch.Serialize);
        }
Exemplo n.º 7
0
        public static IEnumerable <Record> DeserializeRecordsUncompressed(RecordBatch recordBatch, ReusableMemoryStream input, int numberOfRecords, long endOfBatch,
                                                                          long firstTimeStamp, Deserializers deserializers)
        {
            for (var i = 0; i < numberOfRecords; i++)
            {
                var length = VarIntConverter.ReadAsInt32(input);
                if (input.Length - input.Position < length)
                {
                    throw new ProtocolException(
                              $"Record said it was of length {length}, but actually only {input.Length - input.Position} bytes remain");
                }

                var attributes     = input.ReadByte(); // ignored for now
                var timeStampDelta = VarIntConverter.ReadAsInt64(input);
                var offsetDelta    = VarIntConverter.ReadAsInt32(input);

                var keyLength   = VarIntConverter.ReadAsInt32(input);
                var key         = keyLength == -1 ? null : deserializers.Item1.Deserialize(input, keyLength);
                var valueLength = VarIntConverter.ReadAsInt32(input);
                var value       = valueLength == -1 ? null : deserializers.Item1.Deserialize(input, valueLength);

                var headersCount = VarIntConverter.ReadAsInt32(input);
                var headers      = new List <KafkaRecordHeader>(headersCount);
                for (var j = 0; j < headersCount; j++)
                {
                    headers.Add(Record.DeserializeHeader(input));
                }

                yield return(new Record
                {
                    Headers = headers,
                    Key = key,
                    Timestamp = firstTimeStamp + timeStampDelta,
                    Value = value,
                    Offset = recordBatch.BaseOffset + offsetDelta
                });
            }
        }