public void OffsetCommitResponse() { var response1 = new OffsetCommitResponse { TopicPartitions = new[] { new OffsetCommitResponseTopicPartition { TopicName = Guid.NewGuid().ToString(), Details = new [] { new OffsetCommitResponseTopicPartitionDetail { Partition = _random.Next(), } } } } }; Stream binary1 = new MemoryStream(); response1.Serialize(binary1); binary1.Seek(0L, SeekOrigin.Begin); var response2 = new OffsetCommitResponse(); response2.Deserialize(binary1); var compareLogic = new CompareLogic(); var result = compareLogic.Compare(response1, response2); Assert.True(result.AreEqual); Stream binary2 = new MemoryStream(); response2.Serialize(binary2); Assert.Equal(binary1.Length, binary2.Length); using (var stream1 = new MemoryStream()) using (var stream2 = new MemoryStream()) { binary1.Seek(0L, SeekOrigin.Begin); binary1.CopyTo(stream1); binary2.Seek(0L, SeekOrigin.Begin); binary2.CopyTo(stream2); Assert.Equal(stream1.Length, stream2.Length); stream1.Seek(0L, SeekOrigin.Begin); var bytes1 = stream1.ToArray(); stream2.Seek(0L, SeekOrigin.Begin); var bytes2 = stream2.ToArray(); Assert.Equal(bytes1.Length, bytes2.Length); for (int i = 0; i < bytes1.Length; i++) { Assert.Equal(bytes1[i], bytes2[i]); } } }
public void OffsetCommitResponse( [Values("test", "a really long name, with spaces and punctuation!")] string topicName, [Values(1, 10)] int topicsPerRequest, [Values(1, 5)] int partitionsPerTopic, [Values( ErrorResponseCode.None, ErrorResponseCode.OffsetMetadataTooLarge )] ErrorResponseCode errorCode) { var topics = new List <TopicResponse>(); for (var t = 0; t < topicsPerRequest; t++) { for (var partitionId = 0; partitionId < partitionsPerTopic; partitionId++) { topics.Add(new TopicResponse(topicName + t, partitionId, errorCode)); } } var response = new OffsetCommitResponse(topics); response.AssertCanEncodeDecodeResponse(0); }
public void OffsetCommitResponse( [Values("testTopic")] string topicName, [Values(1, 10)] int topicsPerRequest, [Values(1, 5)] int partitionsPerTopic, [Values( ErrorCode.NONE, ErrorCode.OFFSET_METADATA_TOO_LARGE )] ErrorCode errorCode) { var topics = new List <TopicResponse>(); for (var t = 0; t < topicsPerRequest; t++) { for (var partitionId = 0; partitionId < partitionsPerTopic; partitionId++) { topics.Add(new TopicResponse(topicName + t, partitionId, errorCode)); } } var response = new OffsetCommitResponse(topics); response.AssertCanEncodeDecodeResponse(0); }
private static bool TryEncodeResponse(IKafkaWriter writer, IRequestContext context, OffsetCommitResponse response) { if (response == null) { return(false); } var groupedTopics = response.responses.GroupBy(t => t.topic).ToList(); writer.Write(groupedTopics.Count); foreach (var topic in groupedTopics) { var partitions = topic.ToList(); writer.Write(topic.Key) .Write(partitions.Count); // partitionsPerTopic foreach (var partition in partitions) { writer.Write(partition.partition_id) .Write(partition.error_code); } } return(true); }