public void BufferedMessageSetWriteToValidSequence()
        {
            byte[] messageBytes = { 1, 2, 3, 4, 5 };
            var    msg1         = new Message(messageBytes)
            {
                Offset = 0
            };
            var msg2 = new Message(messageBytes);

            msg2.Offset = 1;
            MessageSet messageSet = new BufferedMessageSet(new List <Message>()
            {
                msg1, msg2
            }, 0);
            var ms = new MemoryStream();

            messageSet.WriteTo(ms);

            var reader     = new KafkaBinaryReader(ms);
            int baseOffset = 0;

            for (int i = 0; i < 2; ++i)
            {
                reader.ReadInt64().Should().Be(i);  // offset
                var msgLength = reader.ReadInt32(); // length
                msgLength.Should().Be(Message.DefaultHeaderSize + msg1.PayloadSize);
                reader.ReadUInt32().Should().Be(Crc32Hasher.ComputeCrcUint32(ms.GetBuffer(), baseOffset + 8 + 4 + 4, msgLength - 4));
                reader.ReadByte().Should().Be(0);                    // magic
                reader.ReadByte().Should().Be(msg1.Attributes);
                reader.ReadInt32().Should().Be(-1);                  // key length
                reader.ReadInt32().Should().Be(messageBytes.Length); // message length
                reader.ReadBytes(messageBytes.Length).SequenceEqual(messageBytes).Should().BeTrue();
                baseOffset += 8 + 4 + msgLength;
            }
        }
コード例 #2
0
            public ProducerResponse ParseFrom(KafkaBinaryReader reader)
            {
                var size          = reader.ReadInt32();
                var correlationId = reader.ReadInt32();
                var topicCount    = reader.ReadInt32();

                var statuses = new Dictionary <TopicAndPartition, ProducerResponseStatus>();

                for (var i = 0; i < topicCount; ++i)
                {
                    var topic          = reader.ReadShortString();
                    var partitionCount = reader.ReadInt32();
                    for (var p = 0; p < partitionCount; ++p)
                    {
                        var partitionId       = reader.ReadInt32();
                        var error             = reader.ReadInt16();
                        var offset            = reader.ReadInt64();
                        var topicAndPartition = new TopicAndPartition(topic, partitionId);

                        statuses.Add(topicAndPartition, new ProducerResponseStatus
                        {
                            Error  = ErrorMapper.ToError(error),
                            Offset = offset
                        });
                    }
                }

                return(new ProducerResponse(correlationId, statuses));
            }
コード例 #3
0
        internal static PartitionData ParseFrom(KafkaBinaryReader reader)
        {
            var partition          = reader.ReadInt32();
            var error              = reader.ReadInt16();
            var highWatermark      = reader.ReadInt64();
            var messageSetSize     = reader.ReadInt32();
            var bufferedMessageSet = BufferedMessageSet.ParseFrom(reader, messageSetSize, partition);

            return(new PartitionData(partition, ErrorMapper.ToError(error), bufferedMessageSet));
        }
コード例 #4
0
        public static PartitionOffsetsResponse ReadFrom(KafkaBinaryReader reader)
        {
            var partitionId = reader.ReadInt32();
            var error       = reader.ReadInt16();
            var numOffsets  = reader.ReadInt32();
            var offsets     = new List <long>();

            for (var o = 0; o < numOffsets; ++o)
            {
                offsets.Add(reader.ReadInt64());
            }

            return(new PartitionOffsetsResponse(partitionId,
                                                (ErrorMapping)Enum.Parse(typeof(ErrorMapping), error.ToString(CultureInfo.InvariantCulture)),
                                                offsets));
        }
コード例 #5
0
        public static BufferedMessageSet ParseFrom(KafkaBinaryReader reader, int size, int partitionID)
        {
            int bytesLeft = size;

            if (bytesLeft == 0)
            {
                return(new BufferedMessageSet(Enumerable.Empty <Message>(), partitionID));
            }

            var messages = new List <Message>();

            do
            {
                // Already read last message
                if (bytesLeft < 12)
                {
                    break;
                }

                long offset  = reader.ReadInt64();
                int  msgSize = reader.ReadInt32();
                bytesLeft -= 12;

                if (msgSize > bytesLeft || msgSize < 0)
                {
                    break;
                }

                Message msg = Message.ParseFrom(reader, offset, msgSize, partitionID);
                bytesLeft -= msgSize;
                messages.Add(msg);
            }while (bytesLeft > 0);

            if (bytesLeft > 0)
            {
                reader.ReadBytes(bytesLeft);
            }

            return(new BufferedMessageSet(messages, partitionID));
        }
コード例 #6
0
        public void GetBytesValid()
        {
            const string topicName   = "topic";
            var          requestInfo = new Dictionary <string, List <PartitionOffsetRequestInfo> >();

            requestInfo[topicName] = new List <PartitionOffsetRequestInfo>()
            {
                new PartitionOffsetRequestInfo(0, OffsetRequest.LatestTime, 10)
            };
            var request = new OffsetRequest(requestInfo);

            // format = len(request) + requesttype + version + correlation id + client id + replica id + request info count + request infos
            int count = 2 + 2 + 4 + 2 + 4 + 4 + 4 +
                        BitWorks.GetShortStringLength("topic", AbstractRequest.DefaultEncoding) + 4 + 4 + 8 + 4;
            var ms = new MemoryStream();

            request.WriteTo(ms);
            byte[] bytes = ms.ToArray();
            Assert.IsNotNull(bytes);
            Assert.AreEqual(count, bytes.Length);

            var reader = new KafkaBinaryReader(ms);

            reader.ReadInt32().Should().Be(count - 4);                        // length
            reader.ReadInt16().Should().Be((short)RequestTypes.Offsets);      // request type
            reader.ReadInt16().Should().Be(0);                                // version
            reader.ReadInt32().Should().Be(0);                                // correlation id
            string.IsNullOrEmpty(reader.ReadShortString()).Should().BeTrue(); // client id
            reader.ReadInt32().Should().Be(-1);                               // replica id
            reader.ReadInt32().Should().Be(1);                                // request info count
            reader.ReadShortString().Should().Be("topic");
            reader.ReadInt32().Should().Be(1);                                // info count
            reader.ReadInt32().Should().Be(0);                                // partition id
            reader.ReadInt64().Should().Be(OffsetRequest.LatestTime);         // time
            reader.ReadInt32().Should().Be(10);                               // max offset
        }