public void ShouldParseResponse() { var stream = new MemoryStream(); var writer = new KafkaBinaryWriter(stream); writer.Write(1); writer.Write(123); // correlation id writer.Write(1); // topic count writer.WriteShortString("topic"); writer.Write(1); // partition count writer.Write(999); // partition id writer.Write((short)ErrorMapping.NoError); writer.Write(3); // number of offsets writer.Write(111L); writer.Write(222L); writer.Write(333L); stream.Seek(0, SeekOrigin.Begin); var reader = new KafkaBinaryReader(stream); var response = new OffsetResponse.Parser().ParseFrom(reader); response.CorrelationId.Should().Be(123); response.ResponseMap.Count.Should().Be(1); var partitions = response.ResponseMap["topic"]; partitions.Count.Should().Be(1); var info = partitions.First(); info.Error.Should().Be(ErrorMapping.NoError); info.Offsets.Count.Should().Be(3); info.Offsets.SequenceEqual(new List <long>() { 111L, 222L, 333L }).Should().BeTrue(); info.PartitionId.Should().Be(999); }
public void ShouldAbleToWriteMessageSetWithPartialMessage() { var stream = new MemoryStream(); var writer = new KafkaBinaryWriter(stream); var msg1 = new Message(new byte[101]) { Offset = 0 }; var msg2 = new Message(new byte[102]) { Offset = 1 }; var set = new BufferedMessageSet(new List <Message>() { msg1, msg2 }, 0); set.WriteTo(writer); // Writing partial message writer.Write(3L); writer.Write(100); writer.Write(new byte[10]); var size = (int)stream.Position; stream.Seek(0, SeekOrigin.Begin); var reader = new KafkaBinaryReader(stream); var newSet = BufferedMessageSet.ParseFrom(reader, size, 0); var messages = newSet.Messages.ToList(); messages.Count().Should().Be(2); messages[0].Payload.Count().Should().Be(101); messages[1].Payload.Count().Should().Be(102); }
/// <summary> /// Writes message data using given writer /// </summary> /// <param name="writer"> /// The writer. /// </param> public void WriteTo(KafkaBinaryWriter writer) { Guard.NotNull(writer, "writer"); writer.Write(this.Magic); writer.Write(this.Attributes); writer.Write(this.Checksum); writer.Write(this.Payload); }
public void WriteTo(KafkaBinaryWriter writer) { Guard.NotNull(writer, "writer"); writer.Write(Id); writer.WriteShortString(Host, AbstractRequest.DefaultEncoding); writer.Write(Port); }
/// <summary> /// Writes content into given writer /// </summary> /// <param name="writer"> /// The writer. /// </param> public void WriteTo(KafkaBinaryWriter writer) { Guard.NotNull(writer, "writer"); writer.WriteTopic(this.Topic, DefaultEncoding); writer.Write(this.Partition); writer.Write(this.MessageSet.SetSize); this.MessageSet.WriteTo(writer); }
/// <summary> /// Writes content into given writer /// </summary> /// <param name="writer"> /// The writer. /// </param> public void WriteTo(KafkaBinaryWriter writer) { Guard.NotNull(writer, "writer"); writer.WriteTopic(this.Topic, DefaultEncoding); writer.Write(this.Partition); writer.Write(this.Offset); writer.Write(this.MaxSize); }
/// <summary> /// Writes content into given writer /// </summary> /// <param name="writer"> /// The writer. /// </param> public sealed override void WriteTo(KafkaBinaryWriter writer) { Guard.NotNull(writer, "writer"); foreach (var message in this.Messages) { writer.Write(initialOffset++); writer.Write(message.Size); message.WriteTo(writer); } }
public void ShouldAbleToParseFetchResponse() { var stream = new MemoryStream(); var writer = new KafkaBinaryWriter(stream); writer.Write(1); writer.Write(123); // correlation id writer.Write(1); // data count writer.WriteShortString("topic1"); writer.Write(1); // partition count writer.Write(111); //partition id writer.Write((short)ErrorMapping.NoError); writer.Write(1011L); // hw var messageStream = new MemoryStream(); var messageWriter = new KafkaBinaryWriter(messageStream); new BufferedMessageSet(new List <Message>() { new Message(new byte[100]) }, 0).WriteTo(messageWriter); writer.Write((int)messageStream.Length); writer.Write(messageStream.GetBuffer(), 0, (int)messageStream.Length); stream.Seek(0, SeekOrigin.Begin); var reader = new KafkaBinaryReader(stream); var response = new FetchResponse.Parser().ParseFrom(reader); var set = response.MessageSet("topic1", 111); set.Should().NotBeNull(); var messages = set.Messages.ToList(); messages.Count().Should().Be(1); messages.First().Payload.Length.Should().Be(100); }
/// <summary> /// Writes content into given stream /// </summary> /// <param name="output"> /// The output stream. /// </param> public void WriteTo(MemoryStream output) { Guard.NotNull(output, "output"); using (var writer = new KafkaBinaryWriter(output)) { writer.Write(this.RequestBuffer.Capacity - DefaultRequestSizeSize); writer.Write(this.RequestTypeId); WriteTo(writer); } }
public void WriteTo(KafkaBinaryWriter writer) { Guard.NotNull(writer, "writer"); writer.Write(versionId); writer.Write(correlationId); writer.WriteShortString(clientId, DefaultEncoding); writer.Write(Topics.Count()); foreach (var topic in Topics) { writer.WriteShortString(topic, DefaultEncoding); } }
/// <summary> /// Writes content into given writer /// </summary> /// <param name="writer"> /// The writer. /// </param> public void WriteTo(KafkaBinaryWriter writer) { Guard.NotNull(writer, "writer"); writer.Write((short)this.ProducerRequests.Count()); foreach (var request in ProducerRequests) { request.WriteTo(writer); } }
/// <summary> /// Writes content into given writer /// </summary> /// <param name="writer"> /// The writer. /// </param> public void WriteTo(KafkaBinaryWriter writer) { Guard.NotNull(writer, "writer"); writer.Write(VersionId); writer.Write(CorrelationId); writer.WriteShortString(ClientId); writer.Write(ReplicaId); writer.Write(RequestInfo.Count); foreach (var kv in RequestInfo) { writer.WriteShortString(kv.Key); writer.Write(kv.Value.Count); foreach (var info in kv.Value) { info.WriteTo(writer); } } }
private static ByteBuffer Create( AtomicLong offsetCounter, CompressionCodecs compressionCodec, List <Message> messages) { if (messages == null || !messages.Any()) { return(Empty.Buffer); } else if (CompressionCodecs.NoCompressionCodec == compressionCodec) { var buffer = ByteBuffer.Allocate(MessageSetSize(messages)); foreach (var message in messages) { WriteMessage(buffer, message, offsetCounter.GetAndIncrement()); } buffer.Rewind(); return(buffer); } else { var byteArrayStream = new MemoryStream(MessageSetSize(messages)); var offset = -1L; using (var output = new KafkaBinaryWriter(CompressionFactory.BuildWriter(compressionCodec, byteArrayStream))) { foreach (var message in messages) { offset = offsetCounter.GetAndIncrement(); output.Write(offset); output.Write(message.Size); output.Write(message.Buffer.Array, message.Buffer.ArrayOffset(), message.Buffer.Limit()); } } var bytes = byteArrayStream.ToArray(); var msg = new Message(bytes, compressionCodec); var buffer = ByteBuffer.Allocate(msg.Size + LogOverhead); WriteMessage(buffer, msg, offset); buffer.Rewind(); return(buffer); } }
public void WriteTo(KafkaBinaryWriter writer) { Guard.NotNull(writer, "writer"); writer.WriteShortString(Topic, AbstractRequest.DefaultEncoding); writer.Write(PartitionsMetadata.Count()); foreach (var partitionMetadata in PartitionsMetadata) { partitionMetadata.WriteTo(writer); } }
public void WriteTo(KafkaBinaryWriter writer) { Guard.NotNull(writer, "writer"); // if leader exists writer.Write(PartitionId); if (Leader != null) { writer.Write((byte)1); Leader.WriteTo(writer); } else { writer.Write((byte)0); } // number of replicas writer.Write((short)Replicas.Count()); foreach (var replica in Replicas) { replica.WriteTo(writer); } // number of in-sync replicas writer.Write((short)Isr.Count()); foreach (var isr in Isr) { isr.WriteTo(writer); } writer.Write((byte)0); }
/// <summary> /// Writes content into given writer /// </summary> /// <param name="writer"> /// The writer. /// </param> public void WriteTo(KafkaBinaryWriter writer) { Guard.NotNull(writer, "writer"); writer.Write(this.VersionId); writer.Write(this.CorrelationId); writer.WriteShortString(this.ClientId); writer.Write(this.RequiredAcks); writer.Write(this.AckTimeout); writer.Write(this.Data.Count()); foreach (var topicData in this.Data) { writer.WriteShortString(topicData.Topic); writer.Write(topicData.PartitionData.Count()); foreach (var partitionData in topicData.PartitionData) { writer.Write(partitionData.Partition); writer.Write(partitionData.MessageSet.SetSize); partitionData.MessageSet.WriteTo(writer); } } }
/// <summary> /// Writes message data using given writer /// </summary> /// <param name="writer"> /// The writer. /// </param> /// <param name="getBuffer"></param> public void WriteTo(KafkaBinaryWriter writer) { Guard.NotNull(writer, "writer"); writer.Seek(MagicOffset, SeekOrigin.Current); var beginningPosition = writer.CurrentPos; writer.Write(this.Magic); writer.Write(this.Attributes); writer.Write(this.KeyLength); if (KeyLength != -1) { writer.Write(this.Key); } writer.Write(Payload.Length); writer.Write(this.Payload); var crc = ComputeChecksum(writer.Buffer, (int)beginningPosition, Size - MagicOffset); writer.Seek(-Size, SeekOrigin.Current); writer.Write(crc); writer.Seek(Size - DefaultCrcLength, SeekOrigin.Current); }
/// <summary> /// Writes content into given writer /// </summary> /// <param name="writer"> /// The writer. /// </param> public void WriteTo(KafkaBinaryWriter writer) { Guard.NotNull(writer, "writer"); writer.Write(this.VersionId); writer.Write(this.CorrelationId); writer.WriteShortString(this.ClientId); writer.Write(this.ReplicaId); writer.Write(this.MaxWait); writer.Write(this.MinBytes); writer.Write(this.OffsetInfo.Count); foreach (var offsetInfo in this.OffsetInfo) { writer.WriteShortString(offsetInfo.Key); writer.Write(offsetInfo.Value.Count); foreach (var v in offsetInfo.Value) { v.WriteTo(writer); } } }
public void ShouldAbleToParseResponse() { var stream = new MemoryStream(); var writer = new KafkaBinaryWriter(stream); writer.Write(1); writer.Write(123); // correlation id writer.Write(1); // topic count writer.WriteShortString("topic"); writer.Write(1); // partition count writer.Write(999); // partition id writer.Write((short)ErrorMapping.NoError); // error writer.Write(111L); // offset stream.Seek(0, SeekOrigin.Begin); var reader = new KafkaBinaryReader(stream); var response = new ProducerResponse.Parser().ParseFrom(reader); response.CorrelationId.Should().Be(123); response.Statuses.Count.Should().Be(1); var info = response.Statuses[new TopicAndPartition("topic", 999)]; info.Error.Should().Be(ErrorMapping.NoError); info.Offset.Should().Be(111L); }
public void ShouldAbleToParseRequest() { var stream = new MemoryStream(); var writer = new KafkaBinaryWriter(stream); writer.Write(1); writer.Write(100); // correlation id writer.Write(2); // broker count writer.Write(0); // broker id writer.WriteShortString("host1"); writer.Write(9092); // port writer.Write(1); // broker id writer.WriteShortString("host2"); writer.Write(9093); // port writer.Write(1); // topic count writer.Write((short)ErrorMapping.NoError); writer.WriteShortString("topic1"); writer.Write(1); // partitions writer.Write((short)ErrorMapping.NoError); writer.Write(111); // partition id writer.Write(0); // leader broker id writer.Write(1); // num replicas writer.Write(1); // replica broker id writer.Write(1); // in sync replicas writer.Write(1); // in sync replica broker id stream.Seek(0, SeekOrigin.Begin); var reader = new KafkaBinaryReader(stream); var response = new TopicMetadataRequest.Parser().ParseFrom(reader); var enumerator = response.GetEnumerator(); enumerator.MoveNext().Should().BeTrue(); enumerator.Current.Topic.Should().Be("topic1"); enumerator.Current.Error.Should().Be(ErrorMapping.NoError); var partitionEnumerator = enumerator.Current.PartitionsMetadata.GetEnumerator(); partitionEnumerator.MoveNext().Should().BeTrue(); partitionEnumerator.Current.PartitionId.Should().Be(111); var leader = partitionEnumerator.Current.Leader; leader.Id.Should().Be(0); leader.Host.Should().Be("host1"); leader.Port.Should().Be(9092); var replicas = partitionEnumerator.Current.Replicas.ToList(); replicas.Count.Should().Be(1); replicas.First().Id.Should().Be(1); replicas.First().Host.Should().Be("host2"); replicas.First().Port.Should().Be(9093); var isrs = partitionEnumerator.Current.Isr.ToList(); isrs.Count.Should().Be(1); isrs.First().Id.Should().Be(1); isrs.First().Host.Should().Be("host2"); isrs.First().Port.Should().Be(9093); }
public void WriteTo(KafkaBinaryWriter writer) { Guard.NotNull(writer, "writer"); writer.WriteShortString(this.Topic, AbstractRequest.DefaultEncoding); writer.Write(this.PartitionsMetadata.Count()); foreach (var partitionMetadata in PartitionsMetadata) { partitionMetadata.WriteTo(writer); } }
public void WriteTo(KafkaBinaryWriter writer) { writer.Write(PartitionId); writer.Write(Offset); writer.Write(FetchSize); }
public void WriteTo(KafkaBinaryWriter writer) { writer.Write(PartitionId); writer.Write(Time); writer.Write(MaxNumOffsets); }