public static FetchResponse FromBytes(IRequestContext context, ArraySegment <byte> bytes) { using (var reader = new KafkaReader(bytes)) { TimeSpan?throttleTime = null; if (context.ApiVersion >= 1) { throttleTime = TimeSpan.FromMilliseconds(reader.ReadInt32()); } var topics = new List <FetchResponse.Topic>(); var topicCount = reader.ReadInt32(); for (var t = 0; t < topicCount; t++) { var topicName = reader.ReadString(); var partitionCount = reader.ReadInt32(); for (var p = 0; p < partitionCount; p++) { var partitionId = reader.ReadInt32(); var errorCode = (ErrorCode)reader.ReadInt16(); var highWaterMarkOffset = reader.ReadInt64(); var messages = reader.ReadMessages(); topics.Add(new Topic(topicName, partitionId, highWaterMarkOffset, errorCode, messages)); } } return(new FetchResponse(topics, throttleTime)); } }
public static OffsetsResponse FromBytes(IRequestContext context, ArraySegment <byte> bytes) { using (var reader = new KafkaReader(bytes)) { var topics = new List <Topic>(); var topicCount = reader.ReadInt32(); for (var t = 0; t < topicCount; t++) { var topicName = reader.ReadString(); var partitionCount = reader.ReadInt32(); for (var p = 0; p < partitionCount; p++) { var partitionId = reader.ReadInt32(); var errorCode = (ErrorCode)reader.ReadInt16(); if (context.ApiVersion == 0) { var offsetsCount = reader.ReadInt32(); for (var o = 0; o < offsetsCount; o++) { var offset = reader.ReadInt64(); topics.Add(new Topic(topicName, partitionId, errorCode, offset)); } } else { var timestamp = reader.ReadInt64(); var offset = reader.ReadInt64(); topics.Add(new Topic(topicName, partitionId, errorCode, offset, DateTimeOffset.FromUnixTimeMilliseconds(timestamp))); } } } return(new OffsetsResponse(topics)); } }
public static DescribeGroupsResponse FromBytes(IRequestContext context, ArraySegment <byte> bytes) { using (var reader = new KafkaReader(bytes)) { var groups = new Group[reader.ReadInt32()]; for (var g = 0; g < groups.Length; g++) { var errorCode = (ErrorCode)reader.ReadInt16(); var groupId = reader.ReadString(); var state = reader.ReadString(); var protocolType = reader.ReadString(); var protocol = reader.ReadString(); IMembershipEncoder encoder = null; var members = new Member[reader.ReadInt32()]; for (var m = 0; m < members.Length; m++) { encoder = encoder ?? context.GetEncoder(protocolType); var memberId = reader.ReadString(); var clientId = reader.ReadString(); var clientHost = reader.ReadString(); var memberMetadata = encoder.DecodeMetadata(protocol, reader); var memberAssignment = encoder.DecodeAssignment(reader); members[m] = new Member(memberId, clientId, clientHost, memberMetadata, memberAssignment); } groups[g] = new Group(errorCode, groupId, state, protocolType, protocol, members); } return(new DescribeGroupsResponse(groups)); } }
public static GroupCoordinatorResponse FromBytes(IRequestContext context, ArraySegment <byte> bytes) { using (var reader = new KafkaReader(bytes)) { var errorCode = (ErrorCode)reader.ReadInt16(); var coordinatorId = reader.ReadInt32(); var coordinatorHost = reader.ReadString(); var coordinatorPort = reader.ReadInt32(); return(new GroupCoordinatorResponse(errorCode, coordinatorId, coordinatorHost, coordinatorPort)); } }
public static JoinGroupResponse FromBytes(IRequestContext context, ArraySegment <byte> bytes) { using (var reader = new KafkaReader(bytes)) { var errorCode = (ErrorCode)reader.ReadInt16(); var generationId = reader.ReadInt32(); var groupProtocol = reader.ReadString(); var leaderId = reader.ReadString(); var memberId = reader.ReadString(); var encoder = context.GetEncoder(context.ProtocolType); var members = new Member[reader.ReadInt32()]; for (var m = 0; m < members.Length; m++) { var id = reader.ReadString(); var metadata = encoder.DecodeMetadata(groupProtocol, reader); members[m] = new Member(id, metadata); } return(new JoinGroupResponse(errorCode, generationId, groupProtocol, leaderId, memberId, members)); } }
public static DeleteTopicsResponse FromBytes(IRequestContext context, ArraySegment <byte> bytes) { using (var reader = new KafkaReader(bytes)) { var topics = new Topic[reader.ReadInt32()]; for (var i = 0; i < topics.Length; i++) { var topicName = reader.ReadString(); var errorCode = reader.ReadErrorCode(); topics[i] = new Topic(topicName, errorCode); } return(new DeleteTopicsResponse(topics)); } }
public static ProduceResponse FromBytes(IRequestContext context, ArraySegment <byte> bytes) { using (var reader = new KafkaReader(bytes)) { TimeSpan?throttleTime = null; var topics = new List <Topic>(); var topicCount = reader.ReadInt32(); for (var i = 0; i < topicCount; i++) { var topicName = reader.ReadString(); var partitionCount = reader.ReadInt32(); for (var j = 0; j < partitionCount; j++) { var partitionId = reader.ReadInt32(); var errorCode = (ErrorCode)reader.ReadInt16(); var offset = reader.ReadInt64(); DateTimeOffset?timestamp = null; if (context.ApiVersion >= 2) { var milliseconds = reader.ReadInt64(); if (milliseconds >= 0) { timestamp = DateTimeOffset.FromUnixTimeMilliseconds(milliseconds); } } topics.Add(new Topic(topicName, partitionId, errorCode, offset, timestamp)); } } if (context.ApiVersion >= 1) { throttleTime = TimeSpan.FromMilliseconds(reader.ReadInt32()); } return(new ProduceResponse(topics, throttleTime)); } }
public static OffsetCommitResponse FromBytes(IRequestContext context, ArraySegment <byte> bytes) { using (var reader = new KafkaReader(bytes)) { var topics = new List <TopicResponse>(); var topicCount = reader.ReadInt32(); for (var t = 0; t < topicCount; t++) { var topicName = reader.ReadString(); var partitionCount = reader.ReadInt32(); for (var p = 0; p < partitionCount; p++) { var partitionId = reader.ReadInt32(); var errorCode = (ErrorCode)reader.ReadInt16(); topics.Add(new TopicResponse(topicName, partitionId, errorCode)); } } return(new OffsetCommitResponse(topics)); } }
public static SaslHandshakeResponse FromBytes(IRequestContext context, ArraySegment <byte> bytes) { using (var reader = new KafkaReader(bytes)) { var errorCode = (ErrorCode)reader.ReadInt16(); var enabledMechanisms = new string[reader.ReadInt32()]; for (var m = 0; m < enabledMechanisms.Length; m++) { enabledMechanisms[m] = reader.ReadString(); } return(new SaslHandshakeResponse(errorCode, enabledMechanisms)); } }
public static ListGroupsResponse FromBytes(IRequestContext context, ArraySegment <byte> bytes) { using (var reader = new KafkaReader(bytes)) { var errorCode = (ErrorCode)reader.ReadInt16(); var groups = new Group[reader.ReadInt32()]; for (var g = 0; g < groups.Length; g++) { var groupId = reader.ReadString(); var protocolType = reader.ReadString(); groups[g] = new Group(groupId, protocolType); } return(new ListGroupsResponse(errorCode, groups)); } }
public static ApiVersionsResponse FromBytes(IRequestContext context, ArraySegment <byte> bytes) { using (var reader = new KafkaReader(bytes)) { var errorCode = (ErrorCode)reader.ReadInt16(); var apiKeys = new VersionSupport[reader.ReadInt32()]; for (var i = 0; i < apiKeys.Length; i++) { var apiKey = (ApiKey)reader.ReadInt16(); var minVersion = reader.ReadInt16(); var maxVersion = reader.ReadInt16(); apiKeys[i] = new VersionSupport(apiKey, minVersion, maxVersion); } return(new ApiVersionsResponse(errorCode, apiKeys)); } }
public static MetadataResponse FromBytes(IRequestContext context, ArraySegment <byte> bytes) { using (var reader = new KafkaReader(bytes)) { var brokers = new Server[reader.ReadInt32()]; for (var b = 0; b < brokers.Length; b++) { var brokerId = reader.ReadInt32(); var host = reader.ReadString(); var port = reader.ReadInt32(); string rack = null; if (context.ApiVersion >= 1) { rack = reader.ReadString(); } brokers[b] = new Server(brokerId, host, port, rack); } string clusterId = null; if (context.ApiVersion >= 2) { clusterId = reader.ReadString(); } int?controllerId = null; if (context.ApiVersion >= 1) { controllerId = reader.ReadInt32(); } var topics = new Topic[reader.ReadInt32()]; for (var t = 0; t < topics.Length; t++) { var topicError = (ErrorCode)reader.ReadInt16(); var topicName = reader.ReadString(); bool?isInternal = null; if (context.ApiVersion >= 1) { isInternal = reader.ReadBoolean(); } var partitions = new Partition[reader.ReadInt32()]; for (var p = 0; p < partitions.Length; p++) { var partitionError = (ErrorCode)reader.ReadInt16(); var partitionId = reader.ReadInt32(); var leaderId = reader.ReadInt32(); var replicaCount = reader.ReadInt32(); var replicas = replicaCount.Repeat(reader.ReadInt32).ToArray(); var isrCount = reader.ReadInt32(); var isrs = isrCount.Repeat(reader.ReadInt32).ToArray(); partitions[p] = new Partition(partitionId, leaderId, partitionError, replicas, isrs); } topics[t] = new Topic(topicName, topicError, partitions, isInternal); } return(new MetadataResponse(brokers, topics, controllerId, clusterId)); } }