Пример #1
0
        public static PartitionMetadata ParseFrom(KafkaBinaryReader reader, Dictionary <int, Broker> brokers)
        {
            var    errorCode   = reader.ReadInt16();
            var    partitionId = reader.ReadInt32();
            var    leaderId    = reader.ReadInt32();
            Broker leader      = null;

            if (leaderId != -1)
            {
                leader = brokers[leaderId];
            }

            // list of all replicas
            var numReplicas = reader.ReadInt32();
            var replicas    = new List <Broker>();

            for (var i = 0; i < numReplicas; ++i)
            {
                replicas.Add(brokers[reader.ReadInt32()]);
            }

            // list of in-sync replicas
            var numIsr = reader.ReadInt32();
            var isrs   = new List <Broker>();

            for (var i = 0; i < numIsr; ++i)
            {
                isrs.Add(brokers[reader.ReadInt32()]);
            }

            return(new PartitionMetadata(partitionId, leader, replicas, isrs));
        }
Пример #2
0
        internal static IList <BufferedMessageSet> ParseMultiFrom(KafkaBinaryReader reader, int size, int count)
        {
            var result = new List <BufferedMessageSet>();

            if (size == 0)
            {
                return(result);
            }

            int   readed    = 0;
            short errorCode = reader.ReadInt16();

            readed += 2;
            if (errorCode != KafkaException.NoError)
            {
                throw new KafkaException(errorCode);
            }

            for (int i = 0; i < count; i++)
            {
                int partSize = reader.ReadInt32();
                readed += 4;
                var item = ParseFrom(reader, partSize);
                readed += partSize;
                result.Add(item);
            }

            if (size != readed)
            {
                throw new KafkaException(KafkaException.InvalidRetchSizeCode);
            }

            return(result);
        }
Пример #3
0
        internal static IList <BufferedMessageSet> ParseMultiFrom(KafkaBinaryReader reader, int size, int count, List <long> initialOffsets)
        {
            var result = new List <BufferedMessageSet>();

            if (size == 0)
            {
                return(result);
            }

            int   readed    = 0;
            short errorCode = reader.ReadInt16();

            readed += 2;
            if (errorCode != KafkaException.NoError)
            {
                throw new KafkaException(errorCode);
            }

            for (int i = 0; i < count; i++)
            {
                int partSize = reader.ReadInt32();
                readed += 4;
                var item = ParseFrom(reader, partSize, initialOffsets[i]);
                readed += partSize;
                result.Add(item);
                var totalSetSize = item.SetSize + 2;// 2 is the size of int16 that is the error info
                if (totalSetSize != partSize)
                {
                    break;
                }
            }

            return(result);
        }
Пример #4
0
            public ProducerResponse ParseFrom(KafkaBinaryReader reader)
            {
                var size          = reader.ReadInt32();
                var correlationId = reader.ReadInt32();
                var topicCount    = reader.ReadInt32();

                var statuses = new Dictionary <TopicAndPartition, ProducerResponseStatus>();

                for (var i = 0; i < topicCount; ++i)
                {
                    var topic          = reader.ReadShortString();
                    var partitionCount = reader.ReadInt32();
                    for (var p = 0; p < partitionCount; ++p)
                    {
                        var partitionId       = reader.ReadInt32();
                        var error             = reader.ReadInt16();
                        var offset            = reader.ReadInt64();
                        var topicAndPartition = new TopicAndPartition(topic, partitionId);

                        statuses.Add(topicAndPartition, new ProducerResponseStatus
                        {
                            Error  = ErrorMapper.ToError(error),
                            Offset = offset
                        });
                    }
                }

                return(new ProducerResponse(correlationId, statuses));
            }
Пример #5
0
        public static LeaveGroupResponse ParseFrom(KafkaBinaryReader reader)
        {
            var size          = reader.ReadInt32();
            var correlationid = reader.ReadInt32();
            var error         = reader.ReadInt16();

            return(new LeaveGroupResponse(error));
        }
Пример #6
0
        internal static BufferedMessageSet ParseFrom(KafkaBinaryReader reader, int size, long initialOffset)
        {
            if (size == 0)
            {
                return(new BufferedMessageSet(Enumerable.Empty <Message>(), initialOffset));
            }

            short errorCode = reader.ReadInt16();

            if (errorCode != ErrorMapping.NoError)
            {
                if (errorCode == ErrorMapping.OffsetOutOfRangeCode)
                {
                    return(new BufferedMessageSet(Enumerable.Empty <Message>(), errorCode, initialOffset));
                }

                throw new KafkaException(errorCode);
            }

            int readed = 2;

            if (readed == size)
            {
                return(new BufferedMessageSet(Enumerable.Empty <Message>(), initialOffset));
            }

            var messages = new List <Message>();

            do
            {
                int msgSize = reader.ReadInt32();
                readed += 4;
                int sizeNotUsed = size - readed;
                if (msgSize > sizeNotUsed || msgSize < 0)
                {
                    if (messages.Count == 0 || msgSize < 0)
                    {
                        string errorMessage =
                            String.Format(
                                "Invalid message size. Read size = {0}, the remaining data size = {1} (possible causes (1) a single message larger than the fetch size; (2) log corruption)",
                                msgSize, sizeNotUsed);
                        throw new InvalidMessageSizeException(errorMessage);
                    }
                    return(new BufferedMessageSet(messages, initialOffset));
                }
                Message msg = Message.ParseFrom(reader, msgSize);
                readed += msgSize;
                messages.Add(msg);
            }while (readed < size);
            if (size != readed)
            {
                throw new KafkaException(ErrorMapping.InvalidFetchSizeCode);
            }

            return(new BufferedMessageSet(messages, initialOffset));
        }
Пример #7
0
        internal static PartitionData ParseFrom(KafkaBinaryReader reader)
        {
            var partition          = reader.ReadInt32();
            var error              = reader.ReadInt16();
            var highWatermark      = reader.ReadInt64();
            var messageSetSize     = reader.ReadInt32();
            var bufferedMessageSet = BufferedMessageSet.ParseFrom(reader, messageSetSize, partition);

            return(new PartitionData(partition, ErrorMapper.ToError(error), bufferedMessageSet));
        }
 public static string ReadShortString(KafkaBinaryReader reader, string encoding)
 {
     var size = reader.ReadInt16();
     if (size < 0)
     {
         return null;
     }
     var bytes = reader.ReadBytes(size);
     Encoding encoder = Encoding.GetEncoding(encoding);
     return encoder.GetString(bytes);
 }
Пример #9
0
        public static GroupCoordinatorResponse ParseFrom(KafkaBinaryReader reader)
        {
            var size            = reader.ReadInt32();
            var correlationid   = reader.ReadInt32();
            var error           = reader.ReadInt16();
            var coordinatorid   = reader.ReadInt32();
            var coordinatorhost = reader.ReadShortString();
            var coordinatorport = reader.ReadInt32();

            return(new GroupCoordinatorResponse(error, coordinatorid, coordinatorhost, coordinatorport));
        }
Пример #10
0
        internal static TopicMetadata ParseFrom(KafkaBinaryReader reader, Dictionary <int, Broker> brokers)
        {
            var errorCode          = reader.ReadInt16();
            var topic              = BitWorks.ReadShortString(reader, AbstractRequest.DefaultEncoding);
            var numPartitions      = reader.ReadInt32();
            var partitionsMetadata = new List <PartitionMetadata>();

            for (var i = 0; i < numPartitions; i++)
            {
                partitionsMetadata.Add(PartitionMetadata.ParseFrom(reader, brokers));
            }
            return(new TopicMetadata(topic, partitionsMetadata, ErrorMapper.ToError(errorCode)));
        }
Пример #11
0
        public void GetBytesValid()
        {
            const string topicName   = "topic";
            var          requestInfo = new Dictionary <string, List <PartitionOffsetRequestInfo> >();

            requestInfo[topicName] = new List <PartitionOffsetRequestInfo>()
            {
                new PartitionOffsetRequestInfo(0, OffsetRequest.LatestTime, 10)
            };
            var request = new OffsetRequest(requestInfo);

            // format = len(request) + requesttype + version + correlation id + client id + replica id + request info count + request infos
            int count = 2 + 2 + 4 + 2 + 4 + 4 + 4 +
                        BitWorks.GetShortStringLength("topic", AbstractRequest.DefaultEncoding) + 4 + 4 + 8 + 4;
            var ms = new MemoryStream();

            request.WriteTo(ms);
            byte[] bytes = ms.ToArray();
            Assert.IsNotNull(bytes);
            Assert.AreEqual(count, bytes.Length);

            var reader = new KafkaBinaryReader(ms);

            reader.ReadInt32().Should().Be(count - 4);                        // length
            reader.ReadInt16().Should().Be((short)RequestTypes.Offsets);      // request type
            reader.ReadInt16().Should().Be(0);                                // version
            reader.ReadInt32().Should().Be(0);                                // correlation id
            string.IsNullOrEmpty(reader.ReadShortString()).Should().BeTrue(); // client id
            reader.ReadInt32().Should().Be(-1);                               // replica id
            reader.ReadInt32().Should().Be(1);                                // request info count
            reader.ReadShortString().Should().Be("topic");
            reader.ReadInt32().Should().Be(1);                                // info count
            reader.ReadInt32().Should().Be(0);                                // partition id
            reader.ReadInt64().Should().Be(OffsetRequest.LatestTime);         // time
            reader.ReadInt32().Should().Be(10);                               // max offset
        }
Пример #12
0
        public static PartitionOffsetsResponse ReadFrom(KafkaBinaryReader reader)
        {
            var partitionId = reader.ReadInt32();
            var error       = reader.ReadInt16();
            var numOffsets  = reader.ReadInt32();
            var offsets     = new List <long>();

            for (var o = 0; o < numOffsets; ++o)
            {
                offsets.Add(reader.ReadInt64());
            }

            return(new PartitionOffsetsResponse(partitionId,
                                                (ErrorMapping)Enum.Parse(typeof(ErrorMapping), error.ToString(CultureInfo.InvariantCulture)),
                                                offsets));
        }
Пример #13
0
        internal static BufferedMessageSet ParseFrom(KafkaBinaryReader reader, int size)
        {
            if (size == 0)
            {
                return(new BufferedMessageSet(Enumerable.Empty <Message>()));
            }

            short errorCode = reader.ReadInt16();

            if (errorCode != KafkaException.NoError)
            {
                throw new KafkaException(errorCode);
            }

            int readed = 2;

            if (readed == size)
            {
                return(new BufferedMessageSet(Enumerable.Empty <Message>()));
            }

            var messages = new List <Message>();

            do
            {
                int msgSize = reader.ReadInt32();
                readed += 4;
                Message msg = Message.ParseFrom(reader, msgSize);
                readed += msgSize;
                messages.Add(msg);
            }while (readed < size);
            if (size != readed)
            {
                throw new KafkaException(KafkaException.InvalidRetchSizeCode);
            }

            return(new BufferedMessageSet(messages));
        }
 internal static TopicMetadata ParseFrom(KafkaBinaryReader reader, Dictionary<int, Broker> brokers)
 {
     var errorCode = reader.ReadInt16();
     var topic = BitWorks.ReadShortString(reader, AbstractRequest.DefaultEncoding);
     var numPartitions = reader.ReadInt32();
     var partitionsMetadata = new List<PartitionMetadata>();
     for (int i = 0; i < numPartitions; i++)
     {
         partitionsMetadata.Add(PartitionMetadata.ParseFrom(reader, brokers));
     }
     return new TopicMetadata(topic, partitionsMetadata, ErrorMapper.ToError(errorCode));
 }