public void GetBytesValid() { const string topicName = "topic"; var requestInfo = new Dictionary<string, List<PartitionOffsetRequestInfo>>(); requestInfo[topicName] = new List<PartitionOffsetRequestInfo>() { new PartitionOffsetRequestInfo(0, OffsetRequest.LatestTime, 10) }; var request = new OffsetRequest(requestInfo); // format = len(request) + requesttype + version + correlation id + client id + replica id + request info count + request infos int count = 2 + 2 + 4 + 2 + 4 + 4 + 4 + BitWorks.GetShortStringLength("topic", AbstractRequest.DefaultEncoding) + 4 + 4 + 8 + 4; var ms = new MemoryStream(); request.WriteTo(ms); byte[] bytes = ms.ToArray(); Assert.IsNotNull(bytes); Assert.AreEqual(count, bytes.Length); var reader = new KafkaBinaryReader(ms); reader.ReadInt32().Should().Be(count - 4); // length reader.ReadInt16().Should().Be((short)RequestTypes.Offsets); // request type reader.ReadInt16().Should().Be(0); // version reader.ReadInt32().Should().Be(0); // correlation id string.IsNullOrEmpty(reader.ReadShortString()).Should().BeTrue(); // client id reader.ReadInt32().Should().Be(-1); // replica id reader.ReadInt32().Should().Be(1); // request info count reader.ReadShortString().Should().Be("topic"); reader.ReadInt32().Should().Be(1); // info count reader.ReadInt32().Should().Be(0); // partition id reader.ReadInt64().Should().Be(OffsetRequest.LatestTime); // time reader.ReadInt32().Should().Be(10); // max offset }
internal static TopicData ParseFrom(KafkaBinaryReader reader) { var topic = reader.ReadShortString(); var partitionCount = reader.ReadInt32(); var partitions = new PartitionData[partitionCount]; for (int i = 0; i < partitionCount; i++) { partitions[i] = Producers.PartitionData.ParseFrom(reader); } return new TopicData(topic, partitions.OrderBy(x => x.Partition)); }
public static string ParseFrom(KafkaBinaryReader reader, int count) { Guard.Assert<ArgumentNullException>(() => reader != null); var sb = new StringBuilder(); sb.Append("Request size: "); sb.Append(reader.ReadInt32()); sb.Append(", RequestId: "); short reqId = reader.ReadInt16(); sb.Append(reqId); sb.Append("("); sb.Append((RequestTypes)reqId); sb.Append("), Single Requests: {"); int i = 1; while (reader.BaseStream.Position != reader.BaseStream.Length) { sb.Append("Request "); sb.Append(i); sb.Append(" {"); int msgSize = 0; sb.Append(ProducerRequest.ParseFrom(reader, msgSize)); sb.AppendLine("} "); i++; } return sb.ToString(); }
/** * A message. The format of an N byte message is the following: * * 1. 4 byte CRC32 of the message * 2. 1 byte "magic" identifier to allow format changes, value is 2 currently * 3. 1 byte "attributes" identifier to allow annotations on the message independent of the version (e.g. compression enabled, type of codec used) * 4. 4 byte key length, containing length K * 5. K byte key * 6. 4 byte payload length, containing length V * 7. V byte payload * */ internal static Message ParseFrom(KafkaBinaryReader reader, long offset, int size, int partitionID) { Message result; int readed = 0; uint checksum = reader.ReadUInt32(); readed += 4; byte magic = reader.ReadByte(); readed++; byte[] payload; if (magic == 2 || magic == 0) // some producers (CLI) send magic 0 while others have value of 2 { byte attributes = reader.ReadByte(); readed++; var keyLength = reader.ReadInt32(); readed += 4; byte[] key = null; if (keyLength != -1) { key = reader.ReadBytes(keyLength); readed += keyLength; } var payloadSize = reader.ReadInt32(); readed += 4; payload = reader.ReadBytes(payloadSize); readed += payloadSize; result = new Message(payload, key, Messages.CompressionCodec.GetCompressionCodec(attributes & CompressionCodeMask)) { Offset = offset, PartitionId = partitionID }; } else { payload = reader.ReadBytes(size - DefaultHeaderSize); readed += size - DefaultHeaderSize; result = new Message(payload) { Offset = offset, PartitionId = partitionID }; } if (size != readed) { throw new KafkaException(ErrorMapping.InvalidFetchSizeCode); } return result; }
public static string ParseFrom(KafkaBinaryReader reader, int count, bool skipReqInfo = false) { Guard.Assert<ArgumentNullException>(() => reader != null); var sb = new StringBuilder(); if (!skipReqInfo) { sb.Append("Request size: "); sb.Append(reader.ReadInt32()); sb.Append(", RequestId: "); short reqId = reader.ReadInt16(); sb.Append(reqId); sb.Append("("); sb.Append((RequestTypes)reqId); sb.Append(")"); } sb.Append(", Topic: "); string topic = reader.ReadTopic(DefaultEncoding); sb.Append(topic); sb.Append(", Partition: "); sb.Append(reader.ReadInt32()); sb.Append(", Set size: "); sb.Append(reader.ReadInt32()); int size = count - DefaultHeaderSize - GetTopicLength(topic); sb.Append(", Set {"); sb.Append(BufferedMessageSet.ParseFrom(reader, size)); sb.Append("}"); return sb.ToString(); }
internal static Broker ParseFrom(KafkaBinaryReader reader) { var id = reader.ReadInt32(); var host = BitWorks.ReadShortString(reader, AbstractRequest.DefaultEncoding); var port = reader.ReadInt32(); return new Broker(id, host, port); }
/// <summary> /// Helper method to get string representation of set /// </summary> /// <param name="reader"> /// The reader. /// </param> /// <param name="count"> /// The count. /// </param> /// <returns> /// String representation of set /// </returns> internal static string ParseFrom(KafkaBinaryReader reader, int count) { Guard.Assert<ArgumentNullException>(() => reader != null); var sb = new StringBuilder(); int i = 1; while (reader.BaseStream.Position != reader.BaseStream.Length) { sb.Append("Message "); sb.Append(i); sb.Append(" {Length: "); int msgSize = reader.ReadInt32(); sb.Append(msgSize); sb.Append(", "); sb.Append(Message.ParseFrom(reader, msgSize)); sb.AppendLine("} "); i++; } return sb.ToString(); }
internal static PartitionData ParseFrom(KafkaBinaryReader reader) { var partition = reader.ReadInt32(); var error = reader.ReadInt16(); var highWatermark = reader.ReadInt64(); var messageSetSize = reader.ReadInt32(); var bufferedMessageSet = BufferedMessageSet.ParseFrom(reader, messageSetSize, partition); return new PartitionData(partition, ErrorMapper.ToError(error), bufferedMessageSet); }
public static PartitionMetadata ParseFrom(KafkaBinaryReader reader, Dictionary<int, Broker> brokers) { var errorCode = reader.ReadInt16(); var partitionId = reader.ReadInt32(); var leaderId = reader.ReadInt32(); Broker leader = null; if (leaderId != -1) { leader = brokers[leaderId]; } // list of all replicas var numReplicas = reader.ReadInt32(); var replicas = new List<Broker>(); for (int i = 0; i < numReplicas; ++i) { replicas.Add(brokers[reader.ReadInt32()]); } // list of in-sync replicas var numIsr = reader.ReadInt32(); var isrs = new List<Broker>(); for (int i = 0; i < numIsr; ++i) { isrs.Add(brokers[reader.ReadInt32()]); } return new PartitionMetadata(partitionId, leader, replicas, isrs); }