/// <summary> /// Encodes a message object to byte[] /// </summary> /// <param name="message">Message data to encode.</param> /// <returns>Encoded byte[] representation of the message object.</returns> /// <remarks> /// Format: /// Crc (Int32), MagicByte (Byte), Attribute (Byte), Key (Byte[]), Value (Byte[]) /// </remarks> public static byte[] EncodeMessage(Message message) { var body = new WriteByteStream(); body.Pack(new[] { message.MagicNumber }, new[] { message.Attribute }, message.Key.ToIntPrefixedBytes(), message.Value.ToIntPrefixedBytes()); var crc = Crc32.ComputeHash(body.Payload()); body.Prepend(crc); return(body.Payload()); }
private byte[] EncodeOffsetCommitRequest(OffsetCommitRequest request) { var message = new WriteByteStream(); if (request.OffsetCommits == null) { request.OffsetCommits = new List <OffsetCommit>(); } message.Pack(EncodeHeader(request)); message.Pack(request.ConsumerGroup.ToInt16SizedBytes()); var topicGroups = request.OffsetCommits.GroupBy(x => x.Topic).ToList(); message.Pack(topicGroups.Count.ToBytes()); foreach (var topicGroup in topicGroups) { var partitions = topicGroup.GroupBy(x => x.PartitionId).ToList(); message.Pack(topicGroup.Key.ToInt16SizedBytes(), partitions.Count.ToBytes()); foreach (var partition in partitions) { foreach (var commit in partition) { message.Pack(partition.Key.ToBytes(), commit.Offset.ToBytes(), commit.TimeStamp.ToBytes(), commit.Metadata.ToInt16SizedBytes()); } } } message.Prepend(message.Length().ToBytes()); return(message.Payload()); }
private static byte[] CreateCorrelationMessage(int id) { var stream = new WriteByteStream(); stream.Pack(4.ToBytes(), id.ToBytes()); return(stream.Payload()); }
protected byte[] EncodeOffsetFetchRequest(OffsetFetchRequest request) { var message = new WriteByteStream(); if (request.Topics == null) { request.Topics = new List <OffsetFetch>(); } message.Pack(EncodeHeader(request)); var topicGroups = request.Topics.GroupBy(x => x.Topic).ToList(); message.Pack(ConsumerGroup.ToInt16SizedBytes(), topicGroups.Count.ToBytes()); foreach (var topicGroup in topicGroups) { var partitions = topicGroup.GroupBy(x => x.PartitionId).ToList(); message.Pack(topicGroup.Key.ToInt16SizedBytes(), partitions.Count.ToBytes()); foreach (var partition in partitions) { foreach (var offset in partition) { message.Pack(offset.PartitionId.ToBytes()); } } } message.Prepend(message.Length().ToBytes()); return(message.Payload()); }
private byte[] EncodeFetchRequest(FetchRequest request) { var message = new WriteByteStream(); if (request.Fetches == null) { request.Fetches = new List <Fetch>(); } message.Pack(EncodeHeader(request)); var topicGroups = request.Fetches.GroupBy(x => x.Topic).ToList(); message.Pack(ReplicaId.ToBytes(), request.MaxWaitTime.ToBytes(), request.MinBytes.ToBytes(), topicGroups.Count.ToBytes()); foreach (var topicGroup in topicGroups) { var partitions = topicGroup.GroupBy(x => x.PartitionId).ToList(); message.Pack(topicGroup.Key.ToInt16SizedBytes(), partitions.Count.ToBytes()); foreach (var partition in partitions) { foreach (var fetch in partition) { message.Pack(partition.Key.ToBytes(), fetch.Offset.ToBytes(), fetch.MaxBytes.ToBytes()); } } } message.Prepend(message.Length().ToBytes()); return(message.Payload()); }
private byte[] EncodeConsumerMetadataResponse(ConsumerMetadataRequest request) { var message = new WriteByteStream(); message.Pack(EncodeHeader(request)); message.Pack(request.ConsumerGroup.ToInt16SizedBytes()); message.Prepend(message.Length().ToBytes()); return(message.Payload()); }
/// <summary> /// Encode the common head for kafka request. /// </summary> /// <param name="request"></param> /// <returns></returns> /// <remarks>Format: (hhihs) </remarks> public static byte[] EncodeHeader <T>(IKafkaRequest <T> request) { var message = new WriteByteStream(); message.Pack(((Int16)request.ApiKey).ToBytes(), ApiVersion.ToBytes(), request.CorrelationId.ToBytes(), request.ClientId.ToInt16SizedBytes()); return(message.Payload()); }
/// <summary> /// Encodes a collection of messages into one byte[]. Encoded in order of list. /// </summary> /// <param name="messages">The collection of messages to encode together.</param> /// <returns>Encoded byte[] representing the collection of messages.</returns> public static byte[] EncodeMessageSet(IEnumerable <Message> messages) { var messageSet = new WriteByteStream(); foreach (var message in messages) { var encodedMessage = EncodeMessage(message); messageSet.Pack(((long)0).ToBytes(), encodedMessage.Length.ToBytes(), encodedMessage); } return(messageSet.Payload()); }
private byte[] EncodeProduceRequest(ProduceRequest request) { var message = new WriteByteStream(); if (request.Payload == null) { request.Payload = new List <Payload>(); } var groupedPayloads = (from p in request.Payload group p by new { p.Topic, p.Partition, p.Codec } into tpc select tpc).ToList(); message.Pack(EncodeHeader(request)); //header message.Pack(request.Acks.ToBytes(), request.TimeoutMS.ToBytes(), groupedPayloads.Count.ToBytes()); //metadata foreach (var groupedPayload in groupedPayloads) { var payloads = groupedPayload.ToList(); message.Pack(groupedPayload.Key.Topic.ToInt16SizedBytes(), payloads.Count.ToBytes()); byte[] messageSet; switch (groupedPayload.Key.Codec) { case MessageCodec.CodecNone: messageSet = Message.EncodeMessageSet(payloads.SelectMany(x => x.Messages)); break; case MessageCodec.CodecGzip: messageSet = Message.EncodeMessageSet(CreateGzipCompressedMessage(payloads.SelectMany(x => x.Messages))); break; default: throw new NotSupportedException(string.Format("Codec type of {0} is not supported.", groupedPayload.Key.Codec)); } message.Pack(groupedPayload.Key.Partition.ToBytes(), messageSet.Count().ToBytes(), messageSet); } //prepend final messages size and return message.Prepend(message.Length().ToBytes()); return(message.Payload()); }
/// <summary> /// Encode a request for metadata about topic and broker information. /// </summary> /// <param name="request">The MetaDataRequest to encode.</param> /// <returns>Encoded byte[] representing the request.</returns> /// <remarks>Format: (MessageSize), Header, ix(hs)</remarks> private byte[] EncodeMetadataRequest(MetadataRequest request) { var message = new WriteByteStream(); if (request.Topics == null) { request.Topics = new List <string>(); } message.Pack(EncodeHeader(request)); //header message.Pack(request.Topics.Count.ToBytes()); message.Pack(request.Topics.Select(x => x.ToInt16SizedBytes()).ToArray()); message.Prepend(message.Length().ToBytes()); return(message.Payload()); }
public void ReadShouldNotLoseDataFromStreamOverMultipleReads() { using (var server = new FakeTcpServer(FakeServerPort)) { const int firstMessage = 99; const string secondMessage = "testmessage"; var payload = new WriteByteStream(); payload.Pack(firstMessage.ToBytes(), secondMessage.ToBytes()); var test = new KafkaTcpSocket(new DefaultTraceLog(), _fakeServerUrl); //send the combined payload server.SendDataAsync(payload.Payload()); var firstResponse = test.ReadAsync(4).Result.ToInt32(); Assert.That(firstResponse, Is.EqualTo(firstMessage)); var secondResponse = Encoding.ASCII.GetString(test.ReadAsync(secondMessage.Length).Result); Assert.That(secondResponse, Is.EqualTo(secondMessage)); } }
public void ReadShouldStackReadRequestsAndReturnOneAtATime() { using (var server = new FakeTcpServer(FakeServerPort)) { var messages = new[] { "test1", "test2", "test3", "test4" }; var expectedLength = "test1".Length; var payload = new WriteByteStream(); payload.Pack(messages.Select(x => x.ToBytes()).ToArray()); var socket = new KafkaTcpSocket(new DefaultTraceLog(), _fakeServerUrl); var tasks = messages.Select(x => socket.ReadAsync(x.Length)).ToArray(); server.SendDataAsync(payload.Payload()); Task.WaitAll(tasks); foreach (var task in tasks) { Assert.That(task.Result.Length, Is.EqualTo(expectedLength)); } } }