public void SendMessage(string topic, int partition, string key, string data) { // 1. figure out who owns the topic and partition var target = this.brokerOrderedMetadatas.FirstOrDefault(x => x.Value.Any(y => y.Key == topic && y.Value.Any(z => z.PartitionId == partition))); var bkId = target.Key; var socket = this.sockets[bkId]; var request = new KafkaProduceRequest(); var bytesSend = request.Serialize(); KafkaProduceResponse response = null; // 2. send the request to the target brokers lock (socket) { if (!socket.Connected) { socket.Connect(socket.RemoteEndPoint); } var stream = new NetworkStream(socket) { ReadTimeout = this.config.ReceiveTimeout, WriteTimeout = this.config.SendTimeout }; stream.Write(bytesSend, 0, bytesSend.Length); var reader = new KafkaBinaryReader(stream); response = KafkaProduceResponse.ParseFrom(reader); } // check response }
public static OffsetFetchResponse ParseFrom(KafkaBinaryReader reader) { var size = reader.ReadInt32(); var correlationid = reader.ReadInt32(); var count = reader.ReadInt32(); var data = new Dictionary <string, List <OffsetFetchResponseInfo> >(); for (int i = 0; i < count; i++) { var topic = reader.ReadShortString(); var num = reader.ReadInt32(); for (int j = 0; j < num; j++) { var partition = reader.ReadInt32(); var offset = reader.ReadInt64(); var metadata = reader.ReadShortString(); var error = reader.ReadInt16(); if (!data.ContainsKey(topic)) { data.Add(topic, new List <OffsetFetchResponseInfo>()); } data[topic].Add(new OffsetFetchResponseInfo(partition, offset, metadata, error)); } } return(new OffsetFetchResponse(size, correlationid, data)); }
public static TopicMetadataResponse ParseFrom(KafkaBinaryReader reader) { reader.ReadInt32(); int correlationId = reader.ReadInt32(); int brokerCount = reader.ReadInt32(); var brokerMap = new Dictionary <int, Broker>(); for (int i = 0; i < brokerCount; ++i) { var broker = Broker.ParseFrom(reader); brokerMap[broker.Id] = broker; } var numTopics = reader.ReadInt32(); var topicMetadata = new TopicMetadataResponseTopicInfo[numTopics]; for (int i = 0; i < numTopics; i++) { topicMetadata[i] = TopicMetadataResponseTopicInfo.ParseFrom(reader, brokerMap); } var response = new TopicMetadataResponse(); response.Brokers = brokerMap.Select(x => x.Value); response.TopicMetadatas = topicMetadata; return(response); }
public static OffsetCommitResponse ParseFrom(KafkaBinaryReader reader) { var size = reader.ReadInt32(); var correlationid = reader.ReadInt32(); var count = reader.ReadInt32(); var data = new Dictionary <string, List <PartitionOffsetCommitResponseInfo> >(); for (int i = 0; i < count; i++) { var topic = reader.ReadShortString(); var num = reader.ReadInt32(); var info = new List <PartitionOffsetCommitResponseInfo>(); for (int j = 0; j < num; j++) { var partition = reader.ReadInt32(); var errorCode = reader.ReadInt16(); info.Add(new PartitionOffsetCommitResponseInfo(partition, errorCode)); } if (!data.ContainsKey(topic)) { data.Add(topic, info); } else { data[topic] = info; } } var response = new OffsetCommitResponse(size, correlationid, data); return(response); }
public void Deserialize(byte[] bytes) { using (var ms = new MemoryStream(bytes)) { var reader = new KafkaBinaryReader(ms); this.Version = reader.ReadInt16(); var count = reader.ReadInt32(); this.PartitionAssignmentInfos = new SyncGroupResponsePartitionAssignmentInfo[count]; for (int i = 0; i < count; i++) { this.PartitionAssignmentInfos[i] = new SyncGroupResponsePartitionAssignmentInfo(); var txtLen = reader.ReadInt16(); this.PartitionAssignmentInfos[i].Topic = Encoding.UTF8.GetString(reader.ReadBytes(txtLen)); var size = reader.ReadInt32(); this.PartitionAssignmentInfos[i].Partitions = new int[size]; for (int j = 0; j < size; j++) { var pid = reader.ReadInt32(); this.PartitionAssignmentInfos[i].Partitions[j] = pid; } } var len = reader.ReadInt32(); this.UserData = reader.ReadBytes(len); } }
public SyncGroupResponseMemberAssignmentInfo ParseMemberAssignment() { // deserialize the bytes var info = new SyncGroupResponseMemberAssignmentInfo(); using (var ms = new MemoryStream(this.MemberAssignment)) { var reader = new KafkaBinaryReader(ms); info.Version = reader.ReadInt16(); int count = reader.ReadInt32(); info.PartitionAssignmentInfos = new SyncGroupResponsePartitionAssignmentInfo[count]; for (int i = 0; i < count; i++) { info.PartitionAssignmentInfos[i] = new SyncGroupResponsePartitionAssignmentInfo(); short txtSize = reader.ReadInt16(); byte[] txtBytes = reader.ReadBytes(txtSize); info.PartitionAssignmentInfos[i].Topic = Encoding.UTF8.GetString(txtBytes); int psize = reader.ReadInt32(); info.PartitionAssignmentInfos[i].Partitions = new int[psize]; for (int j = 0; j < psize; j++) { int pid = reader.ReadInt32(); info.PartitionAssignmentInfos[i].Partitions[j] = pid; } } int bytesSize = reader.ReadInt32(); info.UserData = reader.ReadBytes(bytesSize); } return(info); }
internal static Broker ParseFrom(KafkaBinaryReader reader) { var id = reader.ReadInt32(); var host = KafkaPrimitiveTypes.ReadShortString(reader, KafkaRequest.DefaultEncoding); var port = reader.ReadInt32(); return(new Broker(id, host, port)); }
public static SyncGroupResponse ParseFrom(KafkaBinaryReader reader) { var size = reader.ReadInt32(); var correlationid = reader.ReadInt32(); var error = reader.ReadInt16(); var count = reader.ReadInt32(); var data = reader.ReadBytes(count); return(new SyncGroupResponse(error, data)); }
public static string ReadShortString(KafkaBinaryReader reader, string encoding) { var size = reader.ReadInt16(); if (size < 0) { return(null); } var bytes = reader.ReadBytes(size); Encoding encoder = Encoding.GetEncoding(encoding); return(encoder.GetString(bytes)); }
internal static TopicMetadataResponseTopicInfo ParseFrom(KafkaBinaryReader reader, Dictionary <int, Broker> brokers) { var errorCode = reader.ReadInt16(); var topic = KafkaPrimitiveTypes.ReadShortString(reader, KafkaRequest.DefaultEncoding); var numPartitions = reader.ReadInt32(); var partitionsMetadata = new List <TopicMetadataResponsePartitionInfo>(); for (int i = 0; i < numPartitions; i++) { partitionsMetadata.Add(TopicMetadataResponsePartitionInfo.ParseFrom(reader, brokers)); } return(new TopicMetadataResponseTopicInfo(topic, partitionsMetadata, errorCode)); }
public static PartitionOffsetsResponse ReadFrom(KafkaBinaryReader reader) { var partitionId = reader.ReadInt32(); var error = reader.ReadInt16(); var numOffsets = reader.ReadInt32(); var offsets = new List <long>(); for (int o = 0; o < numOffsets; ++o) { offsets.Add(reader.ReadInt64()); } return(new PartitionOffsetsResponse(partitionId, error, offsets)); }
public void Deserialize(byte[] bytes) { using (var ms = new MemoryStream(bytes)) { var reader = new KafkaBinaryReader(ms); this.Version = reader.ReadInt16(); var count = reader.ReadInt32(); var topics = new string[count]; for (var i = 0; i < count; i++) { var length = reader.ReadInt16(); var topic = reader.ReadBytes(length); topics[i] = Encoding.UTF8.GetString(topic); } this.Topics = new List <string>(topics); count = reader.ReadInt32(); this.UserData = reader.ReadBytes(count); } }
public static TopicMetadataResponsePartitionInfo ParseFrom(KafkaBinaryReader reader, Dictionary <int, Broker> brokers) { var errorCode = reader.ReadInt16(); var partitionId = reader.ReadInt32(); var leaderId = reader.ReadInt32(); Broker leader = null; if (leaderId != -1) { leader = brokers[leaderId]; } // list of all replicas var numReplicas = reader.ReadInt32(); var replicas = new List <Broker>(); for (int i = 0; i < numReplicas; ++i) { var id = reader.ReadInt32(); if (brokers.ContainsKey(id)) { replicas.Add(brokers[id]); } } // list of in-sync replicas var numIsr = reader.ReadInt32(); var isrs = new List <Broker>(); for (int i = 0; i < numIsr; ++i) { var id = reader.ReadInt32(); if (brokers.ContainsKey(id)) { isrs.Add(brokers[id]); } } return(new TopicMetadataResponsePartitionInfo(partitionId, leader, replicas, isrs)); }
public static DescribeGroupsResponse ParseFrom(KafkaBinaryReader reader) { var size = reader.ReadInt32(); var correlationid = reader.ReadInt32(); var count = reader.ReadInt32(); var responseInfos = new DescribeGroupsResponseInfo[count]; for (int i = 0; i < count; i++) { var error = reader.ReadInt16(); var groupid = reader.ReadShortString(); var state = reader.ReadShortString(); var protocolType = reader.ReadShortString(); var protocol = reader.ReadShortString(); var count2 = reader.ReadInt32(); var members = new DescribeGroupsResponseMemberInfo[count2]; for (int j = 0; j < count2; j++) { var memberid = reader.ReadShortString(); var clientid = reader.ReadShortString(); var clienthost = reader.ReadShortString(); var metadataSize = reader.ReadInt32(); var metadata = reader.ReadBytes(metadataSize); var assignmentSize = reader.ReadInt32(); var assignment = reader.ReadBytes(assignmentSize); members[j] = new DescribeGroupsResponseMemberInfo(memberid, clientid, clienthost, metadata, assignment); } responseInfos[i] = new DescribeGroupsResponseInfo(error, groupid, state, protocolType, protocol, members); } return(new DescribeGroupsResponse(responseInfos) { CorrelationId = correlationid, Size = size }); }
public static OffsetResponse ParseFrom(KafkaBinaryReader reader) { reader.ReadInt32(); // skipping first int var correlationId = reader.ReadInt32(); var numTopics = reader.ReadInt32(); var responseMap = new Dictionary <string, List <PartitionOffsetsResponse> >(); for (int i = 0; i < numTopics; ++i) { var topic = reader.ReadShortString(); var numPartitions = reader.ReadInt32(); var responses = new List <PartitionOffsetsResponse>(); for (int p = 0; p < numPartitions; ++p) { responses.Add(PartitionOffsetsResponse.ReadFrom(reader)); } responseMap[topic] = responses; } return(new OffsetResponse(correlationId, responseMap)); }
public static JoinGroupResponse ParseFrom(KafkaBinaryReader reader) { var size = reader.ReadInt32(); var correlationid = reader.ReadInt32(); var error = reader.ReadInt16(); var generationid = reader.ReadInt32(); var groupprotocol = reader.ReadShortString(); var leaderid = reader.ReadShortString(); var memberid = reader.ReadShortString(); var count = reader.ReadInt32(); var members = new JoinGroupResponseMemberInfo[count]; for (int i = 0; i < count; i++) { var id = reader.ReadShortString(); var bytes = reader.ReadInt32(); var metadata = reader.ReadBytes(bytes); members[i] = new JoinGroupResponseMemberInfo(id, metadata); } return(new JoinGroupResponse(error, generationid, groupprotocol, leaderid, memberid, members)); }
public static FetchResponse ParseFrom(KafkaBinaryReader reader) { FetchResponse result = null; DateTime startUtc = DateTime.UtcNow; int size = 0, correlationId = 0, dataCount = 0; try { size = reader.ReadInt32(); Logger.Debug("FetchResponse.ParseFrom: read size byte after " + TimeSpan.FromTicks(DateTime.UtcNow.Ticks - startUtc.Ticks).TotalSeconds + " seconds, packet size " + size); startUtc = DateTime.UtcNow; byte[] remainingBytes = reader.ReadBytes(size); Logger.Debug("FetchResponse.ParseFrom: read remaining bytes after " + TimeSpan.FromTicks(DateTime.UtcNow.Ticks - startUtc.Ticks).TotalSeconds + " seconds"); startUtc = DateTime.UtcNow; KafkaBinaryReader dataReader = new KafkaBinaryReader(new MemoryStream(remainingBytes)); correlationId = dataReader.ReadInt32(); dataCount = dataReader.ReadInt32(); var data = new FetchResponseTopicInfo[dataCount]; // !!! improvement !!! // just receive the bytes, and try to parse them later // directly parse the record here, or just keep the bytes to speed up the fetch response for (int i = 0; i < dataCount; i++) { var topic = dataReader.ReadShortString(); var partitionCount = dataReader.ReadInt32(); startUtc = DateTime.UtcNow; var partitions = new FetchResponsePartitionInfo[partitionCount]; for (int j = 0; j < partitionCount; j++) { var partition = dataReader.ReadInt32(); var error = dataReader.ReadInt16(); var highWatermark = dataReader.ReadInt64(); var messageSetSize = dataReader.ReadInt32(); var messageSetBytes = dataReader.ReadBytes(messageSetSize); Logger.Debug("FetchResponse.ParseFrom: topic " + topic + " partition " + partition + " should get records in " + messageSetSize + " bytes, error " + error + " watermark " + highWatermark); partitions[j] = new FetchResponsePartitionInfo(partition, error, highWatermark, messageSetBytes); } Logger.Debug("FetchResponse.ParseFrom: read " + partitionCount + " partitions for segment " + (i + 1) + " use " + TimeSpan.FromTicks(DateTime.UtcNow.Ticks - startUtc.Ticks).TotalSeconds + " seconds"); data[i] = new FetchResponseTopicInfo(topic, partitions); } result = new FetchResponse(correlationId, data, size); Logger.Debug("FetchResponse.ParseFrom: read bytes into structure complete after " + TimeSpan.FromTicks(DateTime.UtcNow.Ticks - startUtc.Ticks).TotalSeconds + " seconds"); } catch (OutOfMemoryException mex) { Logger.Error( string.Format( "OOM Error. Data values were: size: {0}, correlationId: {1}, dataCound: {2}.\r\nFull Stack of exception: {3}", size, correlationId, dataCount, mex.StackTrace)); throw; } catch (Exception e) { Logger.Debug("FetchResponse.ParseFrom: parse response failed\r\n" + e); throw; } return(result); }
void metadataAction() { var metadataReq = TopicMetadataRequest.Create(null, 0, 0, this.config.ConsumerId); var sendBytes = metadataReq.Serialize(); var bk = sockets.First(); var bid = bk.Key; var socket = bk.Value; IEnumerable <TopicMetadataResponseTopicInfo> response = null; DateTime startUtc = DateTime.UtcNow; logger.Debug("metadataAction => lock on socket wait for " + bid); lock (socket) { logger.Debug("metadataAction => lock on socket acquired " + bid + " in " + TimeSpan.FromTicks(DateTime.UtcNow.Ticks - startUtc.Ticks).TotalSeconds + " seconds"); if (!socket.Connected) { socket.Connect(socket.RemoteEndPoint); } NetworkStream stream = new NetworkStream(socket) { ReadTimeout = this.config.ReceiveTimeout, WriteTimeout = this.config.SendTimeout }; stream.Write(sendBytes, 0, sendBytes.Length); stream.Flush(); var reader = new KafkaBinaryReader(stream); // parse data response = TopicMetadataRequest.ParseFrom(reader).TopicMetadatas; } if (response == null) { throw new Exception("[UNHANDLED-EXCEPTION] null MetadataResponse"); } logger.Debug("metadataAction => metadata response from broker " + bid + " received in " + TimeSpan.FromTicks(DateTime.UtcNow.Ticks - startUtc.Ticks).TotalSeconds + " seconds"); brokerOrderedMetadatas.Clear(); foreach (var metadata in response) { foreach (var p in metadata.PartitionsMetadata) { logger.Debug("metadataAction => topic:" + metadata.Topic + ",partition:" + p.PartitionId + ",leader" + p.Leader.Id); if (!brokerOrderedMetadatas.ContainsKey(p.Leader.Id)) { brokerOrderedMetadatas.Add(p.Leader.Id, new Dictionary <string, List <TopicMetadataResponsePartitionInfo> >()); } if (!brokerOrderedMetadatas[p.Leader.Id].ContainsKey(metadata.Topic)) { brokerOrderedMetadatas[p.Leader.Id].Add(metadata.Topic, new List <TopicMetadataResponsePartitionInfo>()); } brokerOrderedMetadatas[p.Leader.Id][metadata.Topic].Add(p); } } }
public static KafkaProduceResponse ParseFrom(KafkaBinaryReader reader) { return(new KafkaProduceResponse()); }