public ProducerResponse ParseFrom(KafkaBinaryReader reader) { var size = reader.ReadInt32(); var correlationId = reader.ReadInt32(); var topicCount = reader.ReadInt32(); var statuses = new Dictionary <TopicAndPartition, ProducerResponseStatus>(); for (var i = 0; i < topicCount; ++i) { var topic = reader.ReadShortString(); var partitionCount = reader.ReadInt32(); for (var p = 0; p < partitionCount; ++p) { var partitionId = reader.ReadInt32(); var error = reader.ReadInt16(); var offset = reader.ReadInt64(); var topicAndPartition = new TopicAndPartition(topic, partitionId); statuses.Add(topicAndPartition, new ProducerResponseStatus { Error = ErrorMapper.ToError(error), Offset = offset }); } } return(new ProducerResponse(correlationId, statuses)); }
public static GroupCoordinatorResponse ParseFrom(KafkaBinaryReader reader) { var size = reader.ReadInt32(); var correlationid = reader.ReadInt32(); var error = reader.ReadInt16(); var coordinatorid = reader.ReadInt32(); var coordinatorhost = reader.ReadShortString(); var coordinatorport = reader.ReadInt32(); return(new GroupCoordinatorResponse(error, coordinatorid, coordinatorhost, coordinatorport)); }
internal static TopicData ParseFrom(KafkaBinaryReader reader) { var topic = reader.ReadShortString(); var partitionCount = reader.ReadInt32(); var partitions = new PartitionData[partitionCount]; for (int i = 0; i < partitionCount; i++) { partitions[i] = Producers.PartitionData.ParseFrom(reader); } return(new TopicData(topic, partitions.OrderBy(x => x.Partition))); }
public void GetBytesValid() { const string topicName = "topic"; var requestInfo = new Dictionary <string, List <PartitionOffsetRequestInfo> >(); requestInfo[topicName] = new List <PartitionOffsetRequestInfo>() { new PartitionOffsetRequestInfo(0, OffsetRequest.LatestTime, 10) }; var request = new OffsetRequest(requestInfo); // format = len(request) + requesttype + version + correlation id + client id + replica id + request info count + request infos int count = 2 + 2 + 4 + 2 + 4 + 4 + 4 + BitWorks.GetShortStringLength("topic", AbstractRequest.DefaultEncoding) + 4 + 4 + 8 + 4; var ms = new MemoryStream(); request.WriteTo(ms); byte[] bytes = ms.ToArray(); Assert.IsNotNull(bytes); Assert.AreEqual(count, bytes.Length); var reader = new KafkaBinaryReader(ms); reader.ReadInt32().Should().Be(count - 4); // length reader.ReadInt16().Should().Be((short)RequestTypes.Offsets); // request type reader.ReadInt16().Should().Be(0); // version reader.ReadInt32().Should().Be(0); // correlation id string.IsNullOrEmpty(reader.ReadShortString()).Should().BeTrue(); // client id reader.ReadInt32().Should().Be(-1); // replica id reader.ReadInt32().Should().Be(1); // request info count reader.ReadShortString().Should().Be("topic"); reader.ReadInt32().Should().Be(1); // info count reader.ReadInt32().Should().Be(0); // partition id reader.ReadInt64().Should().Be(OffsetRequest.LatestTime); // time reader.ReadInt32().Should().Be(10); // max offset }
public OffsetResponse ParseFrom(KafkaBinaryReader reader) { reader.ReadInt32(); // skipping first int var correlationId = reader.ReadInt32(); var numTopics = reader.ReadInt32(); var responseMap = new Dictionary <string, List <PartitionOffsetsResponse> >(); for (var i = 0; i < numTopics; ++i) { var topic = reader.ReadShortString(); var numPartitions = reader.ReadInt32(); var responses = new List <PartitionOffsetsResponse>(); for (var p = 0; p < numPartitions; ++p) { responses.Add(PartitionOffsetsResponse.ReadFrom(reader)); } responseMap[topic] = responses; } return(new OffsetResponse(correlationId, responseMap)); }