public void Encode(BinaryStream writer) { if (this.Payload == null) { this.Payload = new List <Payload>(); } var groupedPayloads = (from p in this.Payload group p by new { p.Topic, p.Partition, p.Codec } into tpc select tpc).ToList(); // Here we put a placeholder for the length var placeholder = writer.PutPlaceholder(); // Encode the header first writer.Write(this.ApiKey); writer.Write(KafkaRequest.ApiVersion); writer.Write(this.Correlation); writer.Write(this.Client); // Encode the metadata now writer.Write(this.Acks); writer.Write(this.TimeoutMS); writer.Write(groupedPayloads.Count); foreach (var groupedPayload in groupedPayloads) { var payloads = groupedPayload.ToList(); // We do not support any compression right now if (groupedPayload.Key.Codec != MessageCodec.CodecNone) { throw new NotSupportedException(string.Format("Codec type of {0} is not supported.", groupedPayload.Key.Codec)); } // Write payload group writer.Write(groupedPayload.Key.Topic); writer.Write(payloads.Count); writer.Write(groupedPayload.Key.Partition); // Placeholder for the length var length = writer.PutPlaceholder(); Message.EncodeMessages(writer, payloads.SelectMany(x => x.Messages) ); // Write the total length writer.WriteLengthAt(length); } // Write the length at the placeholder writer.WriteLengthAt(placeholder); }
/// <summary> /// Encode a request for metadata about topic and broker information. /// </summary> /// <remarks>Format: (MessageSize), Header, ix(hs)</remarks> public void Encode(BinaryStream writer) { if (this.Topics == null) { this.Topics = new List <string>(); } // Here we put a placeholder for the length var placeholder = writer.PutPlaceholder(); // Encode the header first writer.Write(this.ApiKey); writer.Write(KafkaRequest.ApiVersion); writer.Write(this.Correlation); writer.Write(this.Client); // Write the number of topics writer.Write(this.Topics.Count); // Write each topic foreach (var topic in this.Topics) { writer.Write(topic); } // Write the length at the placeholder writer.WriteLengthAt(placeholder); }
/// <summary> /// Encodes a collection of messages into one byte[]. Encoded in order of list. /// </summary> /// <param name="messages">The collection of messages to encode together.</param> /// <returns>Encoded byte[] representing the collection of messages.</returns> public static void EncodeMessages(BinaryStream writer, IEnumerable <Message> messages) { foreach (var message in messages) { // Write the mesage writer.Write((long)0); // Placeholder for the length of the message var placeholder = writer.PutPlaceholder(); // Placeholder for CRC of the payload var crc = writer.PutPlaceholder(); // Write the body of the message writer.Write(message.MagicNumber); writer.Write(message.Attribute); writer.Write(message.Key); writer.Write(message.Value); // Write the CRC writer.WriteCrcAt(crc); // Write the length of the message now writer.WriteLengthAt(placeholder); } }
public void Encode(BinaryStream writer) { if (this.OffsetCommits == null) { this.OffsetCommits = new List <OffsetCommit>(); } var topicGroups = this.OffsetCommits .GroupBy(x => x.Topic).ToList(); // Here we put a placeholder for the length var placeholder = writer.PutPlaceholder(); // Encode the header first writer.Write(this.ApiKey); writer.Write(KafkaRequest.ApiVersion); writer.Write(this.Correlation); writer.Write(this.Client); // Write topic groups writer.Write(this.ConsumerGroup); writer.Write(topicGroups.Count); foreach (var topicGroup in topicGroups) { var partitions = topicGroup .GroupBy(x => x.PartitionId).ToList(); // Write topic group and #partitions writer.Write(topicGroup.Key); writer.Write(partitions.Count); foreach (var partition in partitions) { foreach (var commit in partition) { // Write partition info writer.Write(partition.Key); writer.Write(commit.Offset); writer.Write(commit.TimeStamp); writer.Write(commit.Metadata); } } } // Write the length at the placeholder writer.WriteLengthAt(placeholder); }
public void Encode(BinaryStream writer) { if (this.Offsets == null) { this.Offsets = new List <Offset>(); } var topicGroups = this.Offsets .GroupBy(x => x.Topic).ToList(); // Here we put a placeholder for the length var placeholder = writer.PutPlaceholder(); // Encode the header first writer.Write(this.ApiKey); writer.Write(KafkaRequest.ApiVersion); writer.Write(this.Correlation); writer.Write(this.Client); // Encode the metadata now writer.Write(ReplicaId); writer.Write(topicGroups.Count); foreach (var topicGroup in topicGroups) { var partitions = topicGroup .GroupBy(x => x.PartitionId).ToList(); writer.Write(topicGroup.Key); writer.Write(partitions.Count); foreach (var partition in partitions) { foreach (var offset in partition) { // Write offset info writer.Write(partition.Key); writer.Write(offset.Time); writer.Write(offset.MaxOffsets); } } } // Write the length at the placeholder writer.WriteLengthAt(placeholder); }
public void Encode(BinaryStream writer) { if (this.Fetches == null) { this.Fetches = new List <Fetch>(); } var topicGroups = this.Fetches .GroupBy(x => x.Topic); // Here we put a placeholder for the length var placeholder = writer.PutPlaceholder(); // Encode the header first writer.Write(this.ApiKey); writer.Write(KafkaRequest.ApiVersion); writer.Write(this.Correlation); writer.Write(this.Client); // Write info writer.Write(ReplicaId); writer.Write(this.MaxWaitTime); writer.Write(this.MinBytes); writer.Write(topicGroups.Count()); foreach (var topicGroup in topicGroups) { var partitions = topicGroup .GroupBy(x => x.PartitionId); writer.Write(topicGroup.Key); writer.Write(partitions.Count()); foreach (var partition in partitions) { foreach (var fetch in partition) { writer.Write(partition.Key); writer.Write(fetch.Offset); writer.Write(fetch.MaxBytes); } } } // Write the length at the placeholder writer.WriteLengthAt(placeholder); }
public void Encode(BinaryStream writer) { // Here we put a placeholder for the length var placeholder = writer.PutPlaceholder(); // Encode the header first writer.Write(this.ApiKey); writer.Write(KafkaRequest.ApiVersion); writer.Write(this.Correlation); writer.Write(this.Client); // Write the consumer group writer.Write(this.ConsumerGroup); // Write the length at the placeholder writer.WriteLengthAt(placeholder); }
public void Encode(BinaryStream writer) { if (this.Topics == null) { this.Topics = new List <OffsetFetch>(); } var topicGroups = this.Topics .GroupBy(x => x.Topic).ToList(); // Here we put a placeholder for the length var placeholder = writer.PutPlaceholder(); // Encode the header first writer.Write(this.ApiKey); writer.Write(KafkaRequest.ApiVersion); writer.Write(this.Correlation); writer.Write(this.Client); // Write the consumer group writer.Write(ConsumerGroup); writer.Write(topicGroups.Count); foreach (var topicGroup in topicGroups) { var partitions = topicGroup .GroupBy(x => x.PartitionId).ToList(); writer.Write(topicGroup.Key); writer.Write(partitions.Count); foreach (var partition in partitions) { foreach (var offset in partition) { writer.Write(offset.PartitionId); } } } // Write the length at the placeholder writer.WriteLengthAt(placeholder); }