Esempio n. 1
0
        public static PartitionMetadata ParseFrom(KafkaBinaryReader reader, Dictionary <int, Broker> brokers)
        {
            var    errorCode   = reader.ReadInt16();
            var    partitionId = reader.ReadInt32();
            var    leaderId    = reader.ReadInt32();
            Broker leader      = null;

            if (leaderId != -1)
            {
                leader = brokers[leaderId];
            }

            // list of all replicas
            var numReplicas = reader.ReadInt32();
            var replicas    = new List <Broker>();

            for (var i = 0; i < numReplicas; ++i)
            {
                replicas.Add(brokers[reader.ReadInt32()]);
            }

            // list of in-sync replicas
            var numIsr = reader.ReadInt32();
            var isrs   = new List <Broker>();

            for (var i = 0; i < numIsr; ++i)
            {
                isrs.Add(brokers[reader.ReadInt32()]);
            }

            return(new PartitionMetadata(partitionId, leader, replicas, isrs));
        }
        public void ShouldAbleToParseFetchResponse()
        {
            var stream = new MemoryStream();
            var writer = new KafkaBinaryWriter(stream);

            writer.Write(1);
            writer.Write(123); // correlation id
            writer.Write(1);   // data count
            writer.WriteShortString("topic1");
            writer.Write(1);   // partition count
            writer.Write(111); //partition id
            writer.Write((short)ErrorMapping.NoError);

            writer.Write(1011L); // hw
            var messageStream = new MemoryStream();
            var messageWriter = new KafkaBinaryWriter(messageStream);

            new BufferedMessageSet(new List <Message>()
            {
                new Message(new byte[100])
            }, 0).WriteTo(messageWriter);
            writer.Write((int)messageStream.Length);
            writer.Write(messageStream.GetBuffer(), 0, (int)messageStream.Length);
            stream.Seek(0, SeekOrigin.Begin);
            var reader   = new KafkaBinaryReader(stream);
            var response = new FetchResponse.Parser().ParseFrom(reader);
            var set      = response.MessageSet("topic1", 111);

            set.Should().NotBeNull();
            var messages = set.Messages.ToList();

            messages.Count().Should().Be(1);
            messages.First().Payload.Length.Should().Be(100);
        }
Esempio n. 3
0
        internal static IList <BufferedMessageSet> ParseMultiFrom(KafkaBinaryReader reader, int size, int count)
        {
            var result = new List <BufferedMessageSet>();

            if (size == 0)
            {
                return(result);
            }

            int   readed    = 0;
            short errorCode = reader.ReadInt16();

            readed += 2;
            if (errorCode != KafkaException.NoError)
            {
                throw new KafkaException(errorCode);
            }

            for (int i = 0; i < count; i++)
            {
                int partSize = reader.ReadInt32();
                readed += 4;
                var item = ParseFrom(reader, partSize);
                readed += partSize;
                result.Add(item);
            }

            if (size != readed)
            {
                throw new KafkaException(KafkaException.InvalidRetchSizeCode);
            }

            return(result);
        }
        public void ShouldAbleToWriteMessageSetWithPartialMessage()
        {
            var stream = new MemoryStream();
            var writer = new KafkaBinaryWriter(stream);
            var msg1   = new Message(new byte[101])
            {
                Offset = 0
            };
            var msg2 = new Message(new byte[102])
            {
                Offset = 1
            };
            var set = new BufferedMessageSet(new List <Message>()
            {
                msg1, msg2
            }, 0);

            set.WriteTo(writer);
            // Writing partial message
            writer.Write(3L);
            writer.Write(100);
            writer.Write(new byte[10]);
            var size = (int)stream.Position;

            stream.Seek(0, SeekOrigin.Begin);
            var reader   = new KafkaBinaryReader(stream);
            var newSet   = BufferedMessageSet.ParseFrom(reader, size, 0);
            var messages = newSet.Messages.ToList();

            messages.Count().Should().Be(2);
            messages[0].Payload.Count().Should().Be(101);
            messages[1].Payload.Count().Should().Be(102);
        }
Esempio n. 5
0
            public ProducerResponse ParseFrom(KafkaBinaryReader reader)
            {
                var size          = reader.ReadInt32();
                var correlationId = reader.ReadInt32();
                var topicCount    = reader.ReadInt32();

                var statuses = new Dictionary <TopicAndPartition, ProducerResponseStatus>();

                for (var i = 0; i < topicCount; ++i)
                {
                    var topic          = reader.ReadShortString();
                    var partitionCount = reader.ReadInt32();
                    for (var p = 0; p < partitionCount; ++p)
                    {
                        var partitionId       = reader.ReadInt32();
                        var error             = reader.ReadInt16();
                        var offset            = reader.ReadInt64();
                        var topicAndPartition = new TopicAndPartition(topic, partitionId);

                        statuses.Add(topicAndPartition, new ProducerResponseStatus
                        {
                            Error  = ErrorMapper.ToError(error),
                            Offset = offset
                        });
                    }
                }

                return(new ProducerResponse(correlationId, statuses));
            }
Esempio n. 6
0
        internal static Message ParseFrom(KafkaBinaryReader reader, int size)
        {
            Message result;
            int     readed = 0;
            byte    magic  = reader.ReadByte();

            readed++;
            byte[] checksum;
            byte[] payload;
            if (magic == 1)
            {
                byte attributes = reader.ReadByte();
                readed++;
                checksum = reader.ReadBytes(4);
                readed  += 4;
                payload  = reader.ReadBytes(size - (DefaultHeaderSize + 1));
                readed  += size - (DefaultHeaderSize + 1);
                result   = new Message(payload, checksum, Messages.CompressionCodec.GetCompressionCodec(attributes & CompressionCodeMask));
            }
            else
            {
                checksum = reader.ReadBytes(4);
                readed  += 4;
                payload  = reader.ReadBytes(size - DefaultHeaderSize);
                readed  += size - DefaultHeaderSize;
                result   = new Message(payload, checksum);
            }

            if (size != readed)
            {
                throw new KafkaException(ErrorMapping.InvalidFetchSizeCode);
            }

            return(result);
        }
        public void GetBytesValidSequence()
        {
            var payload = Encoding.UTF8.GetBytes("kafka");
            Message message = new Message(payload, CompressionCodecs.NoCompressionCodec);

            MemoryStream ms = new MemoryStream();
            message.WriteTo(ms);

            Assert.AreEqual(message.Size, ms.Length);

            var crc = Crc32Hasher.ComputeCrcUint32(ms.GetBuffer(), 4, (int)(ms.Length - 4));

            // first 4 bytes = the crc
            using (var reader = new KafkaBinaryReader(ms))
            {
                Assert.AreEqual(crc, reader.ReadUInt32());

                // magic
                Assert.AreEqual(message.Magic, reader.ReadByte());

                // attributes
                Assert.AreEqual((byte)0, reader.ReadByte());

                // key size
                Assert.AreEqual(-1, reader.ReadInt32());

                // payload size
                Assert.AreEqual(payload.Length, reader.ReadInt32());

                // remaining bytes = the payload
                payload.SequenceEqual(reader.ReadBytes(10)).Should().BeTrue();
            }
        }
Esempio n. 8
0
        internal static IList <BufferedMessageSet> ParseMultiFrom(KafkaBinaryReader reader, int size, int count, List <long> initialOffsets)
        {
            var result = new List <BufferedMessageSet>();

            if (size == 0)
            {
                return(result);
            }

            int   readed    = 0;
            short errorCode = reader.ReadInt16();

            readed += 2;
            if (errorCode != KafkaException.NoError)
            {
                throw new KafkaException(errorCode);
            }

            for (int i = 0; i < count; i++)
            {
                int partSize = reader.ReadInt32();
                readed += 4;
                var item = ParseFrom(reader, partSize, initialOffsets[i]);
                readed += partSize;
                result.Add(item);
                var totalSetSize = item.SetSize + 2;// 2 is the size of int16 that is the error info
                if (totalSetSize != partSize)
                {
                    break;
                }
            }

            return(result);
        }
        public void BufferedMessageSetWriteToValidSequence()
        {
            byte[] messageBytes = { 1, 2, 3, 4, 5 };
            var    msg1         = new Message(messageBytes)
            {
                Offset = 0
            };
            var msg2 = new Message(messageBytes);

            msg2.Offset = 1;
            MessageSet messageSet = new BufferedMessageSet(new List <Message>()
            {
                msg1, msg2
            }, 0);
            var ms = new MemoryStream();

            messageSet.WriteTo(ms);

            var reader     = new KafkaBinaryReader(ms);
            int baseOffset = 0;

            for (int i = 0; i < 2; ++i)
            {
                reader.ReadInt64().Should().Be(i);  // offset
                var msgLength = reader.ReadInt32(); // length
                msgLength.Should().Be(Message.DefaultHeaderSize + msg1.PayloadSize);
                reader.ReadUInt32().Should().Be(Crc32Hasher.ComputeCrcUint32(ms.GetBuffer(), baseOffset + 8 + 4 + 4, msgLength - 4));
                reader.ReadByte().Should().Be(0);                    // magic
                reader.ReadByte().Should().Be(msg1.Attributes);
                reader.ReadInt32().Should().Be(-1);                  // key length
                reader.ReadInt32().Should().Be(messageBytes.Length); // message length
                reader.ReadBytes(messageBytes.Length).SequenceEqual(messageBytes).Should().BeTrue();
                baseOffset += 8 + 4 + msgLength;
            }
        }
Esempio n. 10
0
            public FetchResponse ParseFrom(KafkaBinaryReader reader)
            {
                int size = 0, correlationId = 0, dataCount = 0;

                try
                {
                    size          = reader.ReadInt32();
                    correlationId = reader.ReadInt32();
                    dataCount     = reader.ReadInt32();
                    var data = new TopicData[dataCount];
                    for (var i = 0; i < dataCount; i++)
                    {
                        data[i] = TopicData.ParseFrom(reader);
                    }

                    return(new FetchResponse(correlationId, data, size));
                }
                catch (OutOfMemoryException mex)
                {
                    Logger.Error(string.Format(
                                     "OOM Error. Data values were: size: {0}, correlationId: {1}, dataCound: {2}.\r\nFull Stack of exception: {3}",
                                     size, correlationId, dataCount, mex.StackTrace));
                    throw;
                }
            }
Esempio n. 11
0
        public void ShouldParseResponse()
        {
            var stream = new MemoryStream();
            var writer = new KafkaBinaryWriter(stream);

            writer.Write(1);
            writer.Write(123); // correlation id
            writer.Write(1);   // topic count
            writer.WriteShortString("topic");
            writer.Write(1);   // partition count
            writer.Write(999); // partition id
            writer.Write((short)ErrorMapping.NoError);
            writer.Write(3);   // number of offsets
            writer.Write(111L);
            writer.Write(222L);
            writer.Write(333L);
            stream.Seek(0, SeekOrigin.Begin);
            var reader   = new KafkaBinaryReader(stream);
            var response = new OffsetResponse.Parser().ParseFrom(reader);

            response.CorrelationId.Should().Be(123);
            response.ResponseMap.Count.Should().Be(1);
            var partitions = response.ResponseMap["topic"];

            partitions.Count.Should().Be(1);
            var info = partitions.First();

            info.Error.Should().Be(ErrorMapping.NoError);
            info.Offsets.Count.Should().Be(3);
            info.Offsets.SequenceEqual(new List <long>()
            {
                111L, 222L, 333L
            }).Should().BeTrue();
            info.PartitionId.Should().Be(999);
        }
        public void GetBytesValidSequence()
        {
            var     payload = Encoding.UTF8.GetBytes("kafka");
            Message message = new Message(payload, CompressionCodecs.NoCompressionCodec);

            MemoryStream ms = new MemoryStream();

            message.WriteTo(ms);

            Assert.AreEqual(message.Size, ms.Length);

            var crc = Crc32Hasher.ComputeCrcUint32(ms.GetBuffer(), 4, (int)(ms.Length - 4));

            // first 4 bytes = the crc
            using (var reader = new KafkaBinaryReader(ms))
            {
                Assert.AreEqual(crc, reader.ReadUInt32());

                // magic
                Assert.AreEqual(message.Magic, reader.ReadByte());

                // attributes
                Assert.AreEqual((byte)0, reader.ReadByte());

                // key size
                Assert.AreEqual(-1, reader.ReadInt32());

                // payload size
                Assert.AreEqual(payload.Length, reader.ReadInt32());

                // remaining bytes = the payload
                payload.SequenceEqual(reader.ReadBytes(10)).Should().BeTrue();
            }
        }
Esempio n. 13
0
        public static LeaveGroupResponse ParseFrom(KafkaBinaryReader reader)
        {
            var size          = reader.ReadInt32();
            var correlationid = reader.ReadInt32();
            var error         = reader.ReadInt16();

            return(new LeaveGroupResponse(error));
        }
Esempio n. 14
0
        internal static Broker ParseFrom(KafkaBinaryReader reader)
        {
            var id   = reader.ReadInt32();
            var host = BitWorks.ReadShortString(reader, AbstractRequest.DefaultEncoding);
            var port = reader.ReadInt32();

            return(new Broker(id, host, port));
        }
        public void ShouldParseEmptyMessageSet()
        {
            var stream   = new MemoryStream();
            var reader   = new KafkaBinaryReader(stream);
            var newSet   = BufferedMessageSet.ParseFrom(reader, 0, 0);
            var messages = newSet.Messages.ToList();

            messages.Count().Should().Be(0);
        }
Esempio n. 16
0
        public static BufferedMessageSet Decompress(Message message, int partition)
        {
            switch (message.CompressionCodec)
            {
            case CompressionCodecs.DefaultCompressionCodec:
            case CompressionCodecs.GZIPCompressionCodec:
                var inputBytes = message.Payload;
                using (var outputStream = new MemoryStream())
                {
                    using (var inputStream = new MemoryStream(inputBytes))
                    {
                        using (var gzipInputStream = new GZipStream(inputStream, CompressionMode.Decompress))
                        {
                            try
                            {
                                gzipInputStream.CopyTo(outputStream);
                                gzipInputStream.Close();
                            }
                            catch (IOException ex)
                            {
                                Logger.InfoFormat("Error while reading from the GZIP input stream: {0}",
                                                  ex.FormatException());
                                throw;
                            }
                        }
                    }

                    outputStream.Position = 0;
                    using (var reader = new KafkaBinaryReader(outputStream))
                    {
                        return(BufferedMessageSet.ParseFrom(reader, (int)outputStream.Length, partition));
                    }
                }

            case CompressionCodecs.SnappyCompressionCodec:
                try
                {
                    using (var stream = new MemoryStream(SnappyHelper.Decompress(message.Payload)))
                    {
                        using (var reader = new KafkaBinaryReader(stream))
                        {
                            return(BufferedMessageSet.ParseFrom(reader, (int)stream.Length, partition));
                        }
                    }
                }
                catch (Exception ex)
                {
                    Logger.ErrorFormat("Error while reading from the Snappy input stream  {0}",
                                       ex.FormatException());
                    throw;
                }

            default:
                throw new UnknownCodecException(string.Format(CultureInfo.CurrentCulture, "Unknown Codec: {0}",
                                                              message.CompressionCodec));
            }
        }
Esempio n. 17
0
        internal static BufferedMessageSet ParseFrom(KafkaBinaryReader reader, int size, long initialOffset)
        {
            if (size == 0)
            {
                return(new BufferedMessageSet(Enumerable.Empty <Message>(), initialOffset));
            }

            short errorCode = reader.ReadInt16();

            if (errorCode != ErrorMapping.NoError)
            {
                if (errorCode == ErrorMapping.OffsetOutOfRangeCode)
                {
                    return(new BufferedMessageSet(Enumerable.Empty <Message>(), errorCode, initialOffset));
                }

                throw new KafkaException(errorCode);
            }

            int readed = 2;

            if (readed == size)
            {
                return(new BufferedMessageSet(Enumerable.Empty <Message>(), initialOffset));
            }

            var messages = new List <Message>();

            do
            {
                int msgSize = reader.ReadInt32();
                readed += 4;
                int sizeNotUsed = size - readed;
                if (msgSize > sizeNotUsed || msgSize < 0)
                {
                    if (messages.Count == 0 || msgSize < 0)
                    {
                        string errorMessage =
                            String.Format(
                                "Invalid message size. Read size = {0}, the remaining data size = {1} (possible causes (1) a single message larger than the fetch size; (2) log corruption)",
                                msgSize, sizeNotUsed);
                        throw new InvalidMessageSizeException(errorMessage);
                    }
                    return(new BufferedMessageSet(messages, initialOffset));
                }
                Message msg = Message.ParseFrom(reader, msgSize);
                readed += msgSize;
                messages.Add(msg);
            }while (readed < size);
            if (size != readed)
            {
                throw new KafkaException(ErrorMapping.InvalidFetchSizeCode);
            }

            return(new BufferedMessageSet(messages, initialOffset));
        }
Esempio n. 18
0
        internal static PartitionData ParseFrom(KafkaBinaryReader reader)
        {
            var partition          = reader.ReadInt32();
            var error              = reader.ReadInt16();
            var highWatermark      = reader.ReadInt64();
            var messageSetSize     = reader.ReadInt32();
            var bufferedMessageSet = BufferedMessageSet.ParseFrom(reader, messageSetSize, partition);

            return(new PartitionData(partition, ErrorMapper.ToError(error), bufferedMessageSet));
        }
        public void ShouldAbleToParseRequest()
        {
            var stream = new MemoryStream();
            var writer = new KafkaBinaryWriter(stream);

            writer.Write(1);
            writer.Write(100);  // correlation id
            writer.Write(2);    // broker count
            writer.Write(0);    // broker id
            writer.WriteShortString("host1");
            writer.Write(9092); // port
            writer.Write(1);    // broker id
            writer.WriteShortString("host2");
            writer.Write(9093); // port
            writer.Write(1);    // topic count
            writer.Write((short)ErrorMapping.NoError);
            writer.WriteShortString("topic1");
            writer.Write(1);   // partitions
            writer.Write((short)ErrorMapping.NoError);
            writer.Write(111); // partition id
            writer.Write(0);   // leader broker id
            writer.Write(1);   // num replicas
            writer.Write(1);   // replica broker id
            writer.Write(1);   // in sync replicas
            writer.Write(1);   // in sync replica broker id
            stream.Seek(0, SeekOrigin.Begin);
            var reader     = new KafkaBinaryReader(stream);
            var response   = new TopicMetadataRequest.Parser().ParseFrom(reader);
            var enumerator = response.GetEnumerator();

            enumerator.MoveNext().Should().BeTrue();
            enumerator.Current.Topic.Should().Be("topic1");
            enumerator.Current.Error.Should().Be(ErrorMapping.NoError);
            var partitionEnumerator = enumerator.Current.PartitionsMetadata.GetEnumerator();

            partitionEnumerator.MoveNext().Should().BeTrue();
            partitionEnumerator.Current.PartitionId.Should().Be(111);
            var leader = partitionEnumerator.Current.Leader;

            leader.Id.Should().Be(0);
            leader.Host.Should().Be("host1");
            leader.Port.Should().Be(9092);
            var replicas = partitionEnumerator.Current.Replicas.ToList();

            replicas.Count.Should().Be(1);
            replicas.First().Id.Should().Be(1);
            replicas.First().Host.Should().Be("host2");
            replicas.First().Port.Should().Be(9093);
            var isrs = partitionEnumerator.Current.Isr.ToList();

            isrs.Count.Should().Be(1);
            isrs.First().Id.Should().Be(1);
            isrs.First().Host.Should().Be("host2");
            isrs.First().Port.Should().Be(9093);
        }
Esempio n. 20
0
        public static GroupCoordinatorResponse ParseFrom(KafkaBinaryReader reader)
        {
            var size            = reader.ReadInt32();
            var correlationid   = reader.ReadInt32();
            var error           = reader.ReadInt16();
            var coordinatorid   = reader.ReadInt32();
            var coordinatorhost = reader.ReadShortString();
            var coordinatorport = reader.ReadInt32();

            return(new GroupCoordinatorResponse(error, coordinatorid, coordinatorhost, coordinatorport));
        }
 public static string ReadShortString(KafkaBinaryReader reader, string encoding)
 {
     var size = reader.ReadInt16();
     if (size < 0)
     {
         return null;
     }
     var bytes = reader.ReadBytes(size);
     Encoding encoder = Encoding.GetEncoding(encoding);
     return encoder.GetString(bytes);
 }
Esempio n. 22
0
        internal static TopicData ParseFrom(KafkaBinaryReader reader)
        {
            var topic          = reader.ReadShortString();
            var partitionCount = reader.ReadInt32();
            var partitions     = new PartitionData[partitionCount];

            for (int i = 0; i < partitionCount; i++)
            {
                partitions[i] = Producers.PartitionData.ParseFrom(reader);
            }
            return(new TopicData(topic, partitions.OrderBy(x => x.Partition)));
        }
Esempio n. 23
0
        /**
         * A message. The format of an N byte message is the following:
         *
         * 1. 4 byte CRC32 of the message
         * 2. 1 byte "magic" identifier to allow format changes, value is 2 currently
         * 3. 1 byte "attributes" identifier to allow annotations on the message independent of the version (e.g. compression enabled, type of codec used)
         * 4. 4 byte key length, containing length K
         * 5. K byte key
         * 6. 4 byte payload length, containing length V
         * 7. V byte payload
         *
         */
        internal static Message ParseFrom(KafkaBinaryReader reader, long offset, int size, int partitionID)
        {
            Message result;
            var     readed   = 0;
            var     checksum = reader.ReadUInt32();

            readed += 4;
            var magic = reader.ReadByte();

            readed++;

            byte[] payload;
            if (magic == 2 || magic == 0) // some producers (CLI) send magic 0 while others have value of 2
            {
                var attributes = reader.ReadByte();
                readed++;
                var keyLength = reader.ReadInt32();
                readed += 4;
                byte[] key = null;
                if (keyLength != -1)
                {
                    key     = reader.ReadBytes(keyLength);
                    readed += keyLength;
                }
                var payloadSize = reader.ReadInt32();
                readed += 4;
                payload = reader.ReadBytes(payloadSize);
                readed += payloadSize;
                result  = new Message(payload, key,
                                      Messages.CompressionCodec.GetCompressionCodec(attributes & CompressionCodeMask))
                {
                    Offset      = offset,
                    PartitionId = partitionID
                };
            }
            else
            {
                payload = reader.ReadBytes(size - DefaultHeaderSize);
                readed += size - DefaultHeaderSize;
                result  = new Message(payload)
                {
                    Offset = offset, PartitionId = partitionID
                };
            }

            if (size != readed)
            {
                throw new KafkaException(ErrorMapping.InvalidFetchSizeCode);
            }

            return(result);
        }
Esempio n. 24
0
        internal static TopicMetadata ParseFrom(KafkaBinaryReader reader, Dictionary <int, Broker> brokers)
        {
            var errorCode          = reader.ReadInt16();
            var topic              = BitWorks.ReadShortString(reader, AbstractRequest.DefaultEncoding);
            var numPartitions      = reader.ReadInt32();
            var partitionsMetadata = new List <PartitionMetadata>();

            for (var i = 0; i < numPartitions; i++)
            {
                partitionsMetadata.Add(PartitionMetadata.ParseFrom(reader, brokers));
            }
            return(new TopicMetadata(topic, partitionsMetadata, ErrorMapper.ToError(errorCode)));
        }
Esempio n. 25
0
        public static PartitionOffsetsResponse ReadFrom(KafkaBinaryReader reader)
        {
            var partitionId = reader.ReadInt32();
            var error       = reader.ReadInt16();
            var numOffsets  = reader.ReadInt32();
            var offsets     = new List <long>();

            for (var o = 0; o < numOffsets; ++o)
            {
                offsets.Add(reader.ReadInt64());
            }

            return(new PartitionOffsetsResponse(partitionId,
                                                (ErrorMapping)Enum.Parse(typeof(ErrorMapping), error.ToString(CultureInfo.InvariantCulture)),
                                                offsets));
        }
        public static BufferedMessageSet ParseFrom(KafkaBinaryReader reader, int size, int partitionID)
        {
            int bytesLeft = size;

            if (bytesLeft == 0)
            {
                return(new BufferedMessageSet(Enumerable.Empty <Message>(), partitionID));
            }

            var messages = new List <Message>();

            do
            {
                // Already read last message
                if (bytesLeft < 12)
                {
                    break;
                }

                long offset  = reader.ReadInt64();
                int  msgSize = reader.ReadInt32();
                bytesLeft -= 12;

                if (msgSize > bytesLeft || msgSize < 0)
                {
                    break;
                }

                Message msg = Message.ParseFrom(reader, offset, msgSize, partitionID);
                bytesLeft -= msgSize;
                messages.Add(msg);
            }while (bytesLeft > 0);

            if (bytesLeft > 0)
            {
                reader.ReadBytes(bytesLeft);
            }

            return(new BufferedMessageSet(messages, partitionID));
        }
Esempio n. 27
0
            public IEnumerable <TopicMetadata> ParseFrom(KafkaBinaryReader reader)
            {
                reader.ReadInt32();
                var correlationId = reader.ReadInt32();
                var brokerCount   = reader.ReadInt32();
                var brokerMap     = new Dictionary <int, Broker>();

                for (var i = 0; i < brokerCount; ++i)
                {
                    var broker = Broker.ParseFrom(reader);
                    brokerMap[broker.Id] = broker;
                }

                var numTopics     = reader.ReadInt32();
                var topicMetadata = new TopicMetadata[numTopics];

                for (var i = 0; i < numTopics; i++)
                {
                    topicMetadata[i] = TopicMetadata.ParseFrom(reader, brokerMap);
                }
                return(topicMetadata);
            }
Esempio n. 28
0
            public OffsetResponse ParseFrom(KafkaBinaryReader reader)
            {
                reader.ReadInt32(); // skipping first int
                var correlationId = reader.ReadInt32();
                var numTopics     = reader.ReadInt32();
                var responseMap   = new Dictionary <string, List <PartitionOffsetsResponse> >();

                for (var i = 0; i < numTopics; ++i)
                {
                    var topic         = reader.ReadShortString();
                    var numPartitions = reader.ReadInt32();
                    var responses     = new List <PartitionOffsetsResponse>();
                    for (var p = 0; p < numPartitions; ++p)
                    {
                        responses.Add(PartitionOffsetsResponse.ReadFrom(reader));
                    }

                    responseMap[topic] = responses;
                }

                return(new OffsetResponse(correlationId, responseMap));
            }
Esempio n. 29
0
        internal static BufferedMessageSet ParseFrom(KafkaBinaryReader reader, int size)
        {
            if (size == 0)
            {
                return(new BufferedMessageSet(Enumerable.Empty <Message>()));
            }

            short errorCode = reader.ReadInt16();

            if (errorCode != KafkaException.NoError)
            {
                throw new KafkaException(errorCode);
            }

            int readed = 2;

            if (readed == size)
            {
                return(new BufferedMessageSet(Enumerable.Empty <Message>()));
            }

            var messages = new List <Message>();

            do
            {
                int msgSize = reader.ReadInt32();
                readed += 4;
                Message msg = Message.ParseFrom(reader, msgSize);
                readed += msgSize;
                messages.Add(msg);
            }while (readed < size);
            if (size != readed)
            {
                throw new KafkaException(KafkaException.InvalidRetchSizeCode);
            }

            return(new BufferedMessageSet(messages));
        }
Esempio n. 30
0
        public void GetBytesValid()
        {
            const string topicName   = "topic";
            var          requestInfo = new Dictionary <string, List <PartitionOffsetRequestInfo> >();

            requestInfo[topicName] = new List <PartitionOffsetRequestInfo>()
            {
                new PartitionOffsetRequestInfo(0, OffsetRequest.LatestTime, 10)
            };
            var request = new OffsetRequest(requestInfo);

            // format = len(request) + requesttype + version + correlation id + client id + replica id + request info count + request infos
            int count = 2 + 2 + 4 + 2 + 4 + 4 + 4 +
                        BitWorks.GetShortStringLength("topic", AbstractRequest.DefaultEncoding) + 4 + 4 + 8 + 4;
            var ms = new MemoryStream();

            request.WriteTo(ms);
            byte[] bytes = ms.ToArray();
            Assert.IsNotNull(bytes);
            Assert.AreEqual(count, bytes.Length);

            var reader = new KafkaBinaryReader(ms);

            reader.ReadInt32().Should().Be(count - 4);                        // length
            reader.ReadInt16().Should().Be((short)RequestTypes.Offsets);      // request type
            reader.ReadInt16().Should().Be(0);                                // version
            reader.ReadInt32().Should().Be(0);                                // correlation id
            string.IsNullOrEmpty(reader.ReadShortString()).Should().BeTrue(); // client id
            reader.ReadInt32().Should().Be(-1);                               // replica id
            reader.ReadInt32().Should().Be(1);                                // request info count
            reader.ReadShortString().Should().Be("topic");
            reader.ReadInt32().Should().Be(1);                                // info count
            reader.ReadInt32().Should().Be(0);                                // partition id
            reader.ReadInt64().Should().Be(OffsetRequest.LatestTime);         // time
            reader.ReadInt32().Should().Be(10);                               // max offset
        }
Esempio n. 31
0
        public void ShouldAbleToParseResponse()
        {
            var stream = new MemoryStream();
            var writer = new KafkaBinaryWriter(stream);

            writer.Write(1);
            writer.Write(123);                         // correlation id
            writer.Write(1);                           // topic count
            writer.WriteShortString("topic");
            writer.Write(1);                           // partition count
            writer.Write(999);                         // partition id
            writer.Write((short)ErrorMapping.NoError); // error
            writer.Write(111L);                        // offset
            stream.Seek(0, SeekOrigin.Begin);
            var reader   = new KafkaBinaryReader(stream);
            var response = new ProducerResponse.Parser().ParseFrom(reader);

            response.CorrelationId.Should().Be(123);
            response.Statuses.Count.Should().Be(1);
            var info = response.Statuses[new TopicAndPartition("topic", 999)];

            info.Error.Should().Be(ErrorMapping.NoError);
            info.Offset.Should().Be(111L);
        }
 internal static TopicMetadata ParseFrom(KafkaBinaryReader reader, Dictionary<int, Broker> brokers)
 {
     var errorCode = reader.ReadInt16();
     var topic = BitWorks.ReadShortString(reader, AbstractRequest.DefaultEncoding);
     var numPartitions = reader.ReadInt32();
     var partitionsMetadata = new List<PartitionMetadata>();
     for (int i = 0; i < numPartitions; i++)
     {
         partitionsMetadata.Add(PartitionMetadata.ParseFrom(reader, brokers));
     }
     return new TopicMetadata(topic, partitionsMetadata, ErrorMapper.ToError(errorCode));
 }