public void ShouldParseResponse() { var stream = new MemoryStream(); var writer = new KafkaBinaryWriter(stream); writer.Write(1); writer.Write(123); // correlation id writer.Write(1); // topic count writer.WriteShortString("topic"); writer.Write(1); // partition count writer.Write(999); // partition id writer.Write((short)ErrorMapping.NoError); writer.Write(3); // number of offsets writer.Write(111L); writer.Write(222L); writer.Write(333L); stream.Seek(0, SeekOrigin.Begin); var reader = new KafkaBinaryReader(stream); var response = new OffsetResponse.Parser().ParseFrom(reader); response.CorrelationId.Should().Be(123); response.ResponseMap.Count.Should().Be(1); var partitions = response.ResponseMap["topic"]; partitions.Count.Should().Be(1); var info = partitions.First(); info.Error.Should().Be(ErrorMapping.NoError); info.Offsets.Count.Should().Be(3); info.Offsets.SequenceEqual(new List<long>() {111L, 222L, 333L}).Should().BeTrue(); info.PartitionId.Should().Be(999); }
public void ShouldAbleToParseFetchResponse() { var stream = new MemoryStream(); var writer = new KafkaBinaryWriter(stream); writer.Write(1); writer.Write(123); // correlation id writer.Write(1); // data count writer.WriteShortString("topic1"); writer.Write(1); // partition count writer.Write(111); //partition id writer.Write((short)ErrorMapping.NoError); writer.Write(1011L); // hw var messageStream = new MemoryStream(); var messageWriter = new KafkaBinaryWriter(messageStream); new BufferedMessageSet(new List<Message>() { new Message(new byte[100]) }, 0).WriteTo(messageWriter); writer.Write((int)messageStream.Length); writer.Write(messageStream.GetBuffer(), 0, (int)messageStream.Length); stream.Seek(0, SeekOrigin.Begin); var reader = new KafkaBinaryReader(stream); var response = new FetchResponse.Parser().ParseFrom(reader); var set = response.MessageSet("topic1", 111); set.Should().NotBeNull(); var messages = set.Messages.ToList(); messages.Count().Should().Be(1); messages.First().Payload.Length.Should().Be(100); }
public void GetBytesValid() { const string topicName = "topic"; var requestInfo = new Dictionary<string, List<PartitionOffsetRequestInfo>>(); requestInfo[topicName] = new List<PartitionOffsetRequestInfo>() { new PartitionOffsetRequestInfo(0, OffsetRequest.LatestTime, 10) }; var request = new OffsetRequest(requestInfo); // format = len(request) + requesttype + version + correlation id + client id + replica id + request info count + request infos int count = 2 + 2 + 4 + 2 + 4 + 4 + 4 + BitWorks.GetShortStringLength("topic", AbstractRequest.DefaultEncoding) + 4 + 4 + 8 + 4; var ms = new MemoryStream(); request.WriteTo(ms); byte[] bytes = ms.ToArray(); Assert.IsNotNull(bytes); Assert.AreEqual(count, bytes.Length); var reader = new KafkaBinaryReader(ms); reader.ReadInt32().Should().Be(count - 4); // length reader.ReadInt16().Should().Be((short)RequestTypes.Offsets); // request type reader.ReadInt16().Should().Be(0); // version reader.ReadInt32().Should().Be(0); // correlation id string.IsNullOrEmpty(reader.ReadShortString()).Should().BeTrue(); // client id reader.ReadInt32().Should().Be(-1); // replica id reader.ReadInt32().Should().Be(1); // request info count reader.ReadShortString().Should().Be("topic"); reader.ReadInt32().Should().Be(1); // info count reader.ReadInt32().Should().Be(0); // partition id reader.ReadInt64().Should().Be(OffsetRequest.LatestTime); // time reader.ReadInt32().Should().Be(10); // max offset }
public static string ReadShortString(KafkaBinaryReader reader, string encoding) { var size = reader.ReadInt16(); if (size < 0) { return null; } var bytes = reader.ReadBytes(size); Encoding encoder = Encoding.GetEncoding(encoding); return encoder.GetString(bytes); }
public void ShouldAbleToParseRequest() { var stream = new MemoryStream(); var writer = new KafkaBinaryWriter(stream); writer.Write(1); writer.Write(100); // correlation id writer.Write(2); // broker count writer.Write(0); // broker id writer.WriteShortString("host1"); writer.Write(9092); // port writer.Write(1); // broker id writer.WriteShortString("host2"); writer.Write(9093); // port writer.Write(1); // topic count writer.Write((short)ErrorMapping.NoError); writer.WriteShortString("topic1"); writer.Write(1); // partitions writer.Write((short)ErrorMapping.NoError); writer.Write(111); // partition id writer.Write(0); // leader broker id writer.Write(1); // num replicas writer.Write(1); // replica broker id writer.Write(1); // in sync replicas writer.Write(1); // in sync replica broker id stream.Seek(0, SeekOrigin.Begin); var reader = new KafkaBinaryReader(stream); var response = new TopicMetadataRequest.Parser().ParseFrom(reader); var enumerator = response.GetEnumerator(); enumerator.MoveNext().Should().BeTrue(); enumerator.Current.Topic.Should().Be("topic1"); enumerator.Current.Error.Should().Be(ErrorMapping.NoError); var partitionEnumerator = enumerator.Current.PartitionsMetadata.GetEnumerator(); partitionEnumerator.MoveNext().Should().BeTrue(); partitionEnumerator.Current.PartitionId.Should().Be(111); var leader = partitionEnumerator.Current.Leader; leader.Id.Should().Be(0); leader.Host.Should().Be("host1"); leader.Port.Should().Be(9092); var replicas = partitionEnumerator.Current.Replicas.ToList(); replicas.Count.Should().Be(1); replicas.First().Id.Should().Be(1); replicas.First().Host.Should().Be("host2"); replicas.First().Port.Should().Be(9093); var isrs = partitionEnumerator.Current.Isr.ToList(); isrs.Count.Should().Be(1); isrs.First().Id.Should().Be(1); isrs.First().Host.Should().Be("host2"); isrs.First().Port.Should().Be(9093); }
public void ShouldAbleToWriteMessageSetWithExtraBytes() { var stream = new MemoryStream(); var writer = new KafkaBinaryWriter(stream); var msg1 = new Message(new byte[101]) {Offset = 0}; var msg2 = new Message(new byte[102]) {Offset = 1}; var set = new BufferedMessageSet(new List<Message>() {msg1, msg2}, 0); set.WriteTo(writer); writer.Write(new byte[10]); // less than offset and size var size = (int) stream.Position; stream.Seek(0, SeekOrigin.Begin); var reader = new KafkaBinaryReader(stream); var newSet = BufferedMessageSet.ParseFrom(reader, size, 0); var messages = newSet.Messages.ToList(); messages.Count().Should().Be(2); messages[0].Payload.Count().Should().Be(101); messages[1].Payload.Count().Should().Be(102); }
public void ShouldAbleToParseResponse() { var stream = new MemoryStream(); var writer = new KafkaBinaryWriter(stream); writer.Write(1); writer.Write(123); // correlation id writer.Write(1); // topic count writer.WriteShortString("topic"); writer.Write(1); // partition count writer.Write(999); // partition id writer.Write((short)ErrorMapping.NoError); // error writer.Write(111L); // offset stream.Seek(0, SeekOrigin.Begin); var reader = new KafkaBinaryReader(stream); var response = new ProducerResponse.Parser().ParseFrom(reader); response.CorrelationId.Should().Be(123); response.Statuses.Count.Should().Be(1); var info = response.Statuses[new TopicAndPartition("topic", 999)]; info.Error.Should().Be(ErrorMapping.NoError); info.Offset.Should().Be(111L); }
/** * A message. The format of an N byte message is the following: * * 1. 4 byte CRC32 of the message * 2. 1 byte "magic" identifier to allow format changes, value is 2 currently * 3. 1 byte "attributes" identifier to allow annotations on the message independent of the version (e.g. compression enabled, type of codec used) * 4. 4 byte key length, containing length K * 5. K byte key * 6. 4 byte payload length, containing length V * 7. V byte payload * */ internal static Message ParseFrom(KafkaBinaryReader reader, long offset, int size, int partitionID) { Message result; int readed = 0; uint checksum = reader.ReadUInt32(); readed += 4; byte magic = reader.ReadByte(); readed++; byte[] payload; if (magic == 2 || magic == 0) // some producers (CLI) send magic 0 while others have value of 2 { byte attributes = reader.ReadByte(); readed++; var keyLength = reader.ReadInt32(); readed += 4; byte[] key = null; if (keyLength != -1) { key = reader.ReadBytes(keyLength); readed += keyLength; } var payloadSize = reader.ReadInt32(); readed += 4; payload = reader.ReadBytes(payloadSize); readed += payloadSize; result = new Message(payload, key, Messages.CompressionCodec.GetCompressionCodec(attributes & CompressionCodeMask)) { Offset = offset, PartitionId = partitionID }; } else { payload = reader.ReadBytes(size - DefaultHeaderSize); readed += size - DefaultHeaderSize; result = new Message(payload) { Offset = offset, PartitionId = partitionID }; } if (size != readed) { throw new KafkaException(ErrorMapping.InvalidFetchSizeCode); } return result; }
/// <summary> /// Try to show the payload as decoded to UTF-8. /// </summary> /// <returns>The decoded payload as string.</returns> public override string ToString() { using (var reader = new KafkaBinaryReader(this.MessageBuffer)) { return ParseFrom(reader, this.Size); } }
public override string ToString() { using (var reader = new KafkaBinaryReader(this.RequestBuffer)) { return ParseFrom(reader, this.TotalSize); } }
public static string ParseFrom(KafkaBinaryReader reader, int count, bool skipReqInfo = false) { Guard.Assert<ArgumentNullException>(() => reader != null); var sb = new StringBuilder(); if (!skipReqInfo) { sb.Append("Request size: "); sb.Append(reader.ReadInt32()); sb.Append(", RequestId: "); short reqId = reader.ReadInt16(); sb.Append(reqId); sb.Append("("); sb.Append((RequestTypes)reqId); sb.Append(")"); } sb.Append(", Topic: "); string topic = reader.ReadTopic(DefaultEncoding); sb.Append(topic); sb.Append(", Partition: "); sb.Append(reader.ReadInt32()); sb.Append(", Set size: "); sb.Append(reader.ReadInt32()); int size = count - DefaultHeaderSize - GetTopicLength(topic); sb.Append(", Set {"); sb.Append(BufferedMessageSet.ParseFrom(reader, size)); sb.Append("}"); return sb.ToString(); }
public static BufferedMessageSet Decompress(Message message, int partition) { switch (message.CompressionCodec) { case CompressionCodecs.DefaultCompressionCodec: case CompressionCodecs.GZIPCompressionCodec: byte[] inputBytes = message.Payload; using (var outputStream = new MemoryStream()) { using (var inputStream = new MemoryStream(inputBytes)) { using (var gzipInputStream = new GZipStream(inputStream, CompressionMode.Decompress)) { try { gzipInputStream.CopyTo(outputStream); gzipInputStream.Close(); } catch (IOException ex) { Logger.InfoFormat("Error while reading from the GZIP input stream: {0}", ex.FormatException()); throw; } } } outputStream.Position = 0; using (var reader = new KafkaBinaryReader(outputStream)) { return BufferedMessageSet.ParseFrom(reader, (int)outputStream.Length, partition); } } case CompressionCodecs.SnappyCompressionCodec: try { using (var stream = new MemoryStream(SnappyHelper.Decompress(message.Payload))) { using (var reader = new KafkaBinaryReader(stream)) { return BufferedMessageSet.ParseFrom(reader, (int)stream.Length, partition); } } } catch (Exception ex) { Logger.ErrorFormat("Error while reading from the Snappy input stream {0}", ex.FormatException()); throw; } default: throw new UnknownCodecException(String.Format(CultureInfo.CurrentCulture, "Unknown Codec: {0}", message.CompressionCodec)); } }
internal static Broker ParseFrom(KafkaBinaryReader reader) { var id = reader.ReadInt32(); var host = BitWorks.ReadShortString(reader, AbstractRequest.DefaultEncoding); var port = reader.ReadInt32(); return new Broker(id, host, port); }
/// <summary> /// Creates string representation of message /// </summary> /// <param name="reader"> /// The reader. /// </param> /// <param name="count"> /// The count. /// </param> /// <returns> /// String representation of message /// </returns> public static string ParseFrom(KafkaBinaryReader reader, int count) { Guard.Assert<ArgumentNullException>(() => reader != null); var sb = new StringBuilder(); int payloadSize = count - DefaultHeaderSize; sb.Append("Magic: "); sb.Append(reader.ReadByte()); sb.Append(", Checksum: "); for (int i = 0; i < 4; i++) { sb.Append("["); sb.Append(reader.ReadByte()); sb.Append("]"); } sb.Append(", topic: "); var encodedPayload = reader.ReadBytes(payloadSize); try { sb.Append(Encoding.UTF8.GetString(encodedPayload)); } catch (Exception) { sb.Append("n/a"); } return sb.ToString(); }
public static PartitionMetadata ParseFrom(KafkaBinaryReader reader, Dictionary<int, Broker> brokers) { var errorCode = reader.ReadInt16(); var partitionId = reader.ReadInt32(); var leaderId = reader.ReadInt32(); Broker leader = null; if (leaderId != -1) { leader = brokers[leaderId]; } // list of all replicas var numReplicas = reader.ReadInt32(); var replicas = new List<Broker>(); for (int i = 0; i < numReplicas; ++i) { replicas.Add(brokers[reader.ReadInt32()]); } // list of in-sync replicas var numIsr = reader.ReadInt32(); var isrs = new List<Broker>(); for (int i = 0; i < numIsr; ++i) { isrs.Add(brokers[reader.ReadInt32()]); } return new PartitionMetadata(partitionId, leader, replicas, isrs); }
public override string ToString() { using (var reader = new KafkaBinaryReader(this.RequestBuffer)) { return ParseFrom(reader, (int)this.RequestBuffer.Length); } }
/// <summary> /// Helper method to get string representation of set /// </summary> /// <param name="reader"> /// The reader. /// </param> /// <param name="count"> /// The count. /// </param> /// <returns> /// String representation of set /// </returns> internal static string ParseFrom(KafkaBinaryReader reader, int count) { Guard.Assert<ArgumentNullException>(() => reader != null); var sb = new StringBuilder(); int i = 1; while (reader.BaseStream.Position != reader.BaseStream.Length) { sb.Append("Message "); sb.Append(i); sb.Append(" {Length: "); int msgSize = reader.ReadInt32(); sb.Append(msgSize); sb.Append(", "); sb.Append(Message.ParseFrom(reader, msgSize)); sb.AppendLine("} "); i++; } return sb.ToString(); }
internal static PartitionData ParseFrom(KafkaBinaryReader reader) { var partition = reader.ReadInt32(); var error = reader.ReadInt16(); var highWatermark = reader.ReadInt64(); var messageSetSize = reader.ReadInt32(); var bufferedMessageSet = BufferedMessageSet.ParseFrom(reader, messageSetSize, partition); return new PartitionData(partition, ErrorMapper.ToError(error), bufferedMessageSet); }
public static string ParseFrom(KafkaBinaryReader reader, int count) { Guard.Assert<ArgumentNullException>(() => reader != null); var sb = new StringBuilder(); sb.Append("Request size: "); sb.Append(reader.ReadInt32()); sb.Append(", RequestId: "); short reqId = reader.ReadInt16(); sb.Append(reqId); sb.Append("("); sb.Append((RequestTypes)reqId); sb.Append("), Single Requests: {"); int i = 1; while (reader.BaseStream.Position != reader.BaseStream.Length) { sb.Append("Request "); sb.Append(i); sb.Append(" {"); int msgSize = 0; sb.Append(ProducerRequest.ParseFrom(reader, msgSize)); sb.AppendLine("} "); i++; } return sb.ToString(); }
public void ShouldParseEmptyMessageSet() { var stream = new MemoryStream(); var reader = new KafkaBinaryReader(stream); var newSet = BufferedMessageSet.ParseFrom(reader, 0, 0); var messages = newSet.Messages.ToList(); messages.Count().Should().Be(0); }
internal static TopicData ParseFrom(KafkaBinaryReader reader) { var topic = reader.ReadShortString(); var partitionCount = reader.ReadInt32(); var partitions = new PartitionData[partitionCount]; for (int i = 0; i < partitionCount; i++) { partitions[i] = Producers.PartitionData.ParseFrom(reader); } return new TopicData(topic, partitions.OrderBy(x => x.Partition)); }
internal static Message ParseFrom(KafkaBinaryReader reader, int size) { Message result; int readed = 0; byte magic = reader.ReadByte(); readed++; byte[] checksum; byte[] payload; if (magic == 1) { byte attributes = reader.ReadByte(); readed++; checksum = reader.ReadBytes(4); readed += 4; payload = reader.ReadBytes(size - (DefaultHeaderSize + 1)); readed += size - (DefaultHeaderSize + 1); result = new Message(payload, checksum, Messages.CompressionCodec.GetCompressionCodec(attributes & CompressionCodeMask)); } else { checksum = reader.ReadBytes(4); readed += 4; payload = reader.ReadBytes(size - DefaultHeaderSize); readed += size - DefaultHeaderSize; result = new Message(payload, checksum); } if (size != readed) { throw new KafkaException(KafkaException.InvalidRetchSizeCode); } return result; }