public void SetupData() { Common.Compression.ZipLevel = Level; _request = new ProduceRequest( Enumerable.Range(1, Partitions) .Select(partitionId => new ProduceRequest.Topic( "topic", partitionId, Enumerable.Range(1, Messages) .Select(i => new Message(GenerateMessageBytes(), new ArraySegment <byte>(), (byte)Codec, version: MessageVersion)), Codec))); var response = new ProduceResponse(new ProduceResponse.Topic("topic", 1, ErrorCode.NONE, 0)); var port = 10000; var endpoint = new Endpoint(new IPEndPoint(IPAddress.Loopback, port), "localhost"); _server = new TcpServer(endpoint.Ip.Port) { OnReceivedAsync = async data => { var header = KafkaDecoder.DecodeHeader(data.Skip(4)); var bytes = KafkaDecoder.EncodeResponseBytes(new RequestContext(header.CorrelationId), response); await _server.SendDataAsync(bytes); } }; _connection = new Connection(endpoint); }
public async Task SendAsyncShouldNotAllowResponseToTimeoutWhileAwaitingKafkaToEstableConnection() { var endpoint = Endpoint.Resolve(TestConfig.ServerUri(), TestConfig.InfoLog); using (var socket = new TcpSocket(endpoint, log: TestConfig.InfoLog)) using (var conn = new Connection(socket, new ConnectionConfiguration(requestTimeout: TimeSpan.FromSeconds(1000)), log: TestConfig.InfoLog)) { Console.WriteLine("SendAsync blocked by reconnection attempts..."); var taskResult = conn.SendAsync(new MetadataRequest(), CancellationToken.None); Console.WriteLine("Task result should be WaitingForActivation..."); Assert.That(taskResult.IsFaulted, Is.False); Assert.That(taskResult.Status, Is.EqualTo(TaskStatus.WaitingForActivation)); Console.WriteLine("Starting server to establish connection..."); using (var server = new FakeTcpServer(TestConfig.InfoLog, endpoint.IP.Port)) { server.OnClientConnected += () => Console.WriteLine("Client connected..."); server.OnBytesReceived += b => { var request = KafkaDecoder.DecodeHeader(b); AsyncContext.Run(async() => await server.SendDataAsync(MessageHelper.CreateMetadataResponse(request.CorrelationId, "Test"))); }; await Task.WhenAny(taskResult, Task.Delay(TimeSpan.FromSeconds(10))); Assert.That(taskResult.IsFaulted, Is.False); Assert.That(taskResult.IsCanceled, Is.False); await taskResult; Assert.That(taskResult.Status, Is.EqualTo(TaskStatus.RanToCompletion)); } } }
/// <summary> /// Decode messages from a payload and assign it a given kafka offset. /// </summary> /// <param name="offset">The offset represting the log entry from kafka of this message.</param> /// <param name="payload">The byte[] encode as a message from kafka.</param> /// <returns>The message</returns> /// <remarks>The return type is an Enumerable as the message could be a compressed message set.</remarks> internal static Message DecodeMessage(long offset, int partitionId, KafkaDecoder decoder, int messageSize) { var crc = decoder.ReadUInt32(); var calculatedCrc = Crc32Provider.Compute(decoder.Buffer, decoder.Offset, messageSize - 4); if (calculatedCrc != crc) { throw new FailCrcCheckException("Payload did not match CRC validation."); } var message = new Message { Meta = new MessageMetadata(offset, partitionId), MagicNumber = decoder.ReadByte(), Attribute = decoder.ReadByte(), Key = decoder.ReadBytes(), }; var codec = (MessageCodec)(ProtocolConstants.AttributeCodeMask & message.Attribute); switch (codec) { case MessageCodec.CodecNone: message.Value = decoder.ReadBytes(); break; default: throw new NotSupportedException(string.Format("Codec type of {0} is not supported.", codec)); } return(message); }
public async Task SendAsyncWithDynamicVersionInfoOnlyMakesVersionCallOnce() { var versionRequests = 0; var endpoint = TestConfig.ServerEndpoint(); using (var server = new TcpServer(endpoint.Ip.Port, TestConfig.Log)) using (var conn = new Connection(endpoint, new ConnectionConfiguration(requestTimeout: TimeSpan.FromSeconds(3), versionSupport: VersionSupport.Kafka8.Dynamic()), log: TestConfig.Log)) { server.OnReceivedAsync = async data => { var fullHeader = KafkaDecoder.DecodeFullHeader(data.Skip(Request.IntegerByteSize)); var context = fullHeader.Item1; switch (fullHeader.Item2) { case ApiKey.ApiVersions: Interlocked.Increment(ref versionRequests); await server.SendDataAsync(KafkaDecoder.EncodeResponseBytes(context, new ApiVersionsResponse(ErrorCode.NONE, new[] { new ApiVersionsResponse.VersionSupport(ApiKey.Fetch, 3, 3) }))); break; default: await server.SendDataAsync(KafkaDecoder.EncodeResponseBytes(context, new FetchResponse())); break; } }; for (var i = 0; i < 3; i++) { await conn.SendAsync(new FetchRequest(new FetchRequest.Topic("Foo", 0, 0)), CancellationToken.None); } Assert.That(versionRequests, Is.EqualTo(1)); } }
private static MetadataResponse DecodeMetadataResponse(KafkaDecoder decoder) { var correlationId = decoder.ReadInt32(); var response = MetadataResponse.Decode(decoder); return(response); }
public async Task CorrelationOverflowGuardWorks() { var correlationId = -1; var endpoint = TestConfig.ServerEndpoint(); using (var server = new TcpServer(endpoint.Ip.Port, TestConfig.Log)) using (var conn = new Connection(endpoint, new ConnectionConfiguration(requestTimeout: TimeSpan.FromMilliseconds(5)), TestConfig.Log)) { server.OnReceivedAsync = data => { var context = KafkaDecoder.DecodeHeader(data.Skip(Request.IntegerByteSize)); correlationId = context.CorrelationId; return(Task.FromResult(0)); }; try { Connection.OverflowGuard = 10; await AssertAsync.Throws <TimeoutException>(() => conn.SendAsync(new MetadataRequest(), CancellationToken.None)); var initialCorrelation = correlationId; await AssertAsync.Throws <TimeoutException>(() => Task.WhenAll(Enumerable.Range(initialCorrelation, Connection.OverflowGuard - 1).Select(i => conn.SendAsync(new MetadataRequest(), CancellationToken.None)))); await AssertAsync.ThatEventually(() => correlationId > 1, () => $"correlation {correlationId}"); var currentCorrelation = correlationId; await AssertAsync.Throws <TimeoutException>(() => Task.WhenAll(Enumerable.Range(0, Connection.OverflowGuard / 2).Select(i => conn.SendAsync(new MetadataRequest(), CancellationToken.None)))); await AssertAsync.ThatEventually(() => correlationId < currentCorrelation, () => $"correlation {correlationId}"); } finally { Connection.OverflowGuard = int.MaxValue >> 1; } } }
public async Task MessagesStillLogWhenSendTimesOut() { var logger = new MemoryLog(); var received = 0; var timeout = TimeSpan.FromMilliseconds(100); var endpoint = TestConfig.ServerEndpoint(); using (var server = new TcpServer(endpoint.Ip.Port, TestConfig.Log)) using (var conn = new Connection(endpoint, new ConnectionConfiguration(requestTimeout: timeout, onRead: (e, read, elapsed) => Interlocked.Increment(ref received)), logger)) { await Task.WhenAny(server.ClientConnected, Task.Delay(TimeSpan.FromSeconds(3))); server.OnReceivedAsync = async data => { var context = KafkaDecoder.DecodeHeader(data.Skip(Request.IntegerByteSize)); await Task.Delay(timeout); await server.SendDataAsync(KafkaDecoder.EncodeResponseBytes(context, new MetadataResponse())); }; await AssertAsync.Throws <TimeoutException>(() => conn.SendAsync(new MetadataRequest(), CancellationToken.None)); await AssertAsync.ThatEventually(() => received > 0, () => $"received {received}"); await AssertAsync.ThatEventually(() => logger.LogEvents.Any(e => e.Item1 == LogLevel.Debug && e.Item2.Message.StartsWith("Timed out -----> (timed out or otherwise errored in client)")), () => logger.ToString(LogLevel.Debug)); } }
public async Task SendAsyncShouldNotAllowResponseToTimeoutWhileAwaitingKafkaToEstableConnection() { var endpoint = TestConfig.ServerEndpoint(); using (var conn = new Connection(endpoint, new ConnectionConfiguration(requestTimeout: TimeSpan.FromSeconds(1000)), log: TestConfig.Log)) { // SendAsync blocked by reconnection attempts var taskResult = conn.SendAsync(new MetadataRequest(), CancellationToken.None); // Task result should be WaitingForActivation Assert.That(taskResult.IsFaulted, Is.False); Assert.That(taskResult.Status, Is.EqualTo(TaskStatus.WaitingForActivation)); // Starting server to establish connection using (var server = new TcpServer(endpoint.Ip.Port, TestConfig.Log)) { server.OnConnected = () => TestConfig.Log.Info(() => LogEvent.Create("Client connected...")); server.OnReceivedAsync = async data => { var requestContext = KafkaDecoder.DecodeHeader(data.Skip(Request.IntegerByteSize)); await server.SendDataAsync(MessageHelper.CreateMetadataResponse(requestContext, "Test")); }; await Task.WhenAny(taskResult, Task.Delay(TimeSpan.FromSeconds(5))); Assert.That(taskResult.IsFaulted, Is.False); Assert.That(taskResult.IsCanceled, Is.False); await taskResult; Assert.That(taskResult.Status, Is.EqualTo(TaskStatus.RanToCompletion)); } } }
internal static Partition Decode(KafkaDecoder decoder) { var errorCode = decoder.ReadErrorResponseCode(); var partitionId = decoder.ReadInt32(); var leaderId = decoder.ReadInt32(); var numReplicas = decoder.ReadInt32(); var replicas = new int[numReplicas]; for (int i = 0; i < numReplicas; i++) { replicas[i] = decoder.ReadInt32(); } var numIsr = decoder.ReadInt32(); var isrs = new int[numIsr]; for (int i = 0; i < numIsr; i++) { isrs[i] = decoder.ReadInt32(); } var partition = new Partition(errorCode, partitionId, leaderId, replicas, isrs); return(partition); }
/// <summary> /// Decode a byte[] that represents a collection of messages. /// </summary> /// <param name="decoder">The decoder positioned at the start of the buffer</param> /// <returns>The messages</returns> internal static List <Message> DecodeMessageSet(int partitionId, KafkaDecoder decoder, int messageSetSize) { var numberOfBytes = messageSetSize; var messages = new List <Message>(); while (numberOfBytes > 0) { if (numberOfBytes < MessageHeaderSize) { break; } var offset = decoder.ReadInt64(); var messageSize = decoder.ReadInt32(); if (messageSetSize - MessageHeaderSize < messageSize) { // This message is too big to fit in the buffer so we will never get it throw new BufferUnderRunException(numberOfBytes, messageSize); } numberOfBytes -= MessageHeaderSize; if (numberOfBytes < messageSize) { break; } var message = DecodeMessage(offset, partitionId, decoder, messageSize); messages.Add(message); numberOfBytes -= messageSize; } return(messages); }
public static void AssertCanEncodeDecodeRequest <T>(this T request, short version, IMembershipEncoder encoder = null, T forComparison = null) where T : class, IRequest { var encoders = ImmutableDictionary <string, IMembershipEncoder> .Empty; if (encoder != null) { encoders = encoders.Add(encoder.ProtocolType, encoder); } var context = new RequestContext(17, version, "Test-Request", encoders, encoder?.ProtocolType); var bytes = request.ToBytes(context); var decoded = KafkaDecoder.Decode <T>(bytes.Skip(4), context); if (forComparison == null) { forComparison = request; } Assert.That(forComparison.GetHashCode(), Is.EqualTo(decoded.GetHashCode()), "HashCode equality"); Assert.That(forComparison.ShortString(), Is.EqualTo(decoded.ShortString()), "ShortString equality"); var original = forComparison.ToString(); var final = decoded.ToString(); Assert.That(original, Is.EqualTo(final), "ToString equality"); Assert.That(decoded.Equals(final), Is.False); // general equality test for sanity Assert.That(decoded.Equals(decoded), Is.True); // general equality test for sanity Assert.That(forComparison.Equals(decoded), $"Original\n{original}\nFinal\n{final}"); }
public static void AssertCanEncodeDecodeResponse <T>(this T response, short version, IMembershipEncoder encoder = null, T forComparison = null) where T : class, IResponse { var encoders = ImmutableDictionary <string, IMembershipEncoder> .Empty; if (encoder != null) { encoders = encoders.Add(encoder.ProtocolType, encoder); } var context = new RequestContext(16, version, "Test-Response", encoders, encoder?.ProtocolType); var data = KafkaDecoder.EncodeResponseBytes(context, response); var decoded = GetType <T>().ToResponse(context, data.Skip(Request.IntegerByteSize + Request.CorrelationSize)); if (forComparison == null) { forComparison = response; } Assert.That(forComparison.GetHashCode(), Is.EqualTo(decoded.GetHashCode()), "HashCode equality"); var original = forComparison.ToString(); var final = decoded.ToString(); Assert.That(original, Is.EqualTo(final), "ToString equality"); Assert.That(decoded.Equals(final), Is.False); // general test for equality Assert.That(decoded.Equals(decoded), Is.True); // general equality test for sanity Assert.That(forComparison.Equals(decoded), $"Original\n{original}\nFinal\n{final}"); Assert.That(forComparison.Errors.HasEqualElementsInOrder(decoded.Errors), "Errors"); }
public void Int16Tests(Int16 expectedValue, Byte[] givenBytes) { var decoder = new KafkaDecoder(givenBytes); var actualValue = decoder.ReadInt16(); Assert.That(actualValue, Is.EqualTo(expectedValue)); }
internal static OffsetCommitResponse Decode(KafkaDecoder decoder, string topic) { var partitionId = decoder.ReadInt32(); var error = decoder.ReadErrorResponseCode(); var response = new OffsetCommitResponse(topic, partitionId, error); return(response); }
public void StringTests(String expectedValue, Byte[] givenBytes) { var decoder = new KafkaDecoder(givenBytes); var actualValue = decoder.ReadString(); Assert.That(decoder.Offset, Is.EqualTo(givenBytes.Length)); Assert.That(actualValue, Is.EqualTo(expectedValue)); }
internal static ConsumerMetadataResponse Decode(KafkaDecoder decoder) { var error = decoder.ReadErrorResponseCode(); var coordinatorId = decoder.ReadInt32(); var coordinatorHost = decoder.ReadString(); var coordinatorPort = decoder.ReadInt32(); return(new ConsumerMetadataResponse(error, coordinatorId, coordinatorHost, coordinatorPort)); }
internal static ProduceResponse Decode(KafkaDecoder decoder, string topic) { var partitionId = decoder.ReadInt32(); var error = decoder.ReadErrorResponseCode(); var offset = decoder.ReadInt64(); var response = new ProduceResponse(topic, partitionId, error, offset); return(response); }
internal static OffsetFetchResponse Decode(KafkaDecoder decoder, string topic) { var partitionId = decoder.ReadInt32(); var offset = decoder.ReadInt64(); var metaData = decoder.ReadString(); var error = decoder.ReadErrorResponseCode(); var response = new OffsetFetchResponse(topic, partitionId, offset, metaData, error); return(response); }
public void DecodeMessageSetShouldHandleResponseWithMaxBufferSizeHit() { //This message set has a truncated message bytes at the end of it var decoder = new KafkaDecoder(MessageHelper.FetchResponseMaxBytesOverflow); var result = Message.DecodeMessageSet(0, decoder, decoder.Length); var message = Encoding.UTF8.GetString(result.First().Value); Assert.That(message, Is.EqualTo("test")); Assert.That(result.Count, Is.EqualTo(529)); }
internal static OffsetResponse Decode(KafkaDecoder decoder, string topic) { var partitionId = decoder.ReadInt32(); var error = decoder.ReadErrorResponseCode(); var offsetCount = decoder.ReadInt32(); var offsets = new long[offsetCount]; for (int k = 0; k < offsetCount; k++) { offsets[k] = decoder.ReadInt64(); } var response = new OffsetResponse(topic, partitionId, error, offsets); return(response); }
internal static Topic Decode(KafkaDecoder decoder) { var errorCode = decoder.ReadErrorResponseCode(); var name = decoder.ReadString(); var numPartitions = decoder.ReadInt32(); var partitions = new Partition[numPartitions]; for (int i = 0; i < numPartitions; i++) { partitions[i] = Partition.Decode(decoder); } var topic = new Topic(errorCode, name, partitions); return topic; }
public void DecodeMessageShouldThrowWhenCrcFails() { Assert.Throws(Is.TypeOf <FailCrcCheckException>(), () => { var testMessage = new Message(value: "kafka test message.", key: "test"); var buffer = new byte[1024]; var encoder = new KafkaEncoder(buffer); Message.EncodeMessage(testMessage, encoder); buffer[0] += 1; var decoder = new KafkaDecoder(buffer, 0, encoder.Offset); var result = Message.DecodeMessage(0, 0, decoder, encoder.Offset); }); }
public void EnsureMessageEncodeAndDecodeAreCompatible(string key, string value) { var testMessage = new Message(key: key, value: value); var buffer = new byte[1024]; var encoder = new KafkaEncoder(buffer); Message.EncodeMessage(testMessage, encoder); var decoder = new KafkaDecoder(buffer); var result = Message.DecodeMessage(0, 0, decoder, encoder.Offset); Assert.That(testMessage.Key, Is.EqualTo(result.Key)); Assert.That(testMessage.Value, Is.EqualTo(result.Value)); }
internal static FetchResponse Decode(KafkaDecoder decoder, string topic) { var partitionId = decoder.ReadInt32(); var error = decoder.ReadErrorResponseCode(); var highWaterMark = decoder.ReadInt64(); var messageSetSize = decoder.ReadInt32(); var current = decoder.Offset; var messages = Message.DecodeMessageSet(partitionId, decoder, messageSetSize); var response = new FetchResponse(topic, partitionId, error, highWaterMark, messages); // In case any truncated messages decoder.SetOffset(current + messageSetSize); return(response); }
public void SetupData() { Common.Compression.ZipLevel = Level; var response = new FetchResponse( Enumerable.Range(1, Partitions) .Select(partitionId => new FetchResponse.Topic( "topic", partitionId, 500, ErrorCode.NONE, Enumerable.Range(1, Messages) .Select(i => new Message(GenerateMessageBytes(), new ArraySegment <byte>(), (byte)Codec, version: MessageVersion)) ))); _bytes = KafkaDecoder.EncodeResponseBytes(new RequestContext(1, Version), response); }
public void WhenMessageIsExactlyTheSizeOfBufferThenMessageIsDecoded() { // arrange var expectedPayloadBytes = new Byte[] { 1, 2, 3, 4 }; var payload = MessageHelper.CreateMessage(0, new Byte[] { 0 }, expectedPayloadBytes); // act/assert var decoder = new KafkaDecoder(payload, 0, payload.Length); var messages = Message.DecodeMessageSet(0, decoder, payload.Length); var actualPayload = messages.First().Value; // assert var expectedPayload = new Byte[] { 1, 2, 3, 4 }; CollectionAssert.AreEqual(expectedPayload, actualPayload); }
internal static Topic Decode(KafkaDecoder decoder) { var errorCode = decoder.ReadErrorResponseCode(); var name = decoder.ReadString(); var numPartitions = decoder.ReadInt32(); var partitions = new Partition[numPartitions]; for (int i = 0; i < numPartitions; i++) { partitions[i] = Partition.Decode(decoder); } var topic = new Topic(errorCode, name, partitions); return(topic); }
public void FetchSize() { int partitions = 1; short version = 0; byte messageVersion = 0; var results = new List <object>(); foreach (var codec in new[] { MessageCodec.None, MessageCodec.Gzip, MessageCodec.Snappy }) { foreach (var messages in new[] { 100, 10000 }) { foreach (var messageSize in new[] { 1, 1000 }) { foreach (var level in new[] { CompressionLevel.Fastest }) { Compression.ZipLevel = level; var response = new FetchResponse( Enumerable.Range(1, partitions) .Select(partitionId => new FetchResponse.Topic( "topic", partitionId, 500, ErrorCode.NONE, Enumerable.Range(1, messages) .Select(i => new Message(GenerateMessageBytes(messageSize), new ArraySegment <byte>(), (byte)codec, version: messageVersion)) ))); var bytes = KafkaDecoder.EncodeResponseBytes(new RequestContext(1, version), response); var decoded = FetchResponse.FromBytes(new RequestContext(1, version), bytes.Skip(Request.IntegerByteSize + Request.CorrelationSize)); Assert.That(decoded.responses.Sum(t => t.Messages.Count), Is.EqualTo(response.responses.Sum(t => t.Messages.Count))); var result = new { Codec = codec.ToString(), Level = codec == MessageCodec.None ? "-" : level.ToString(), Messages = messages, MessageSize = messageSize, Bytes = bytes.Count }; results.Add(result); } } } } WriteResults(results); }
public async Task SendAsyncShouldUseStatictVersionInfo() { IRequestContext context = null; var endpoint = TestConfig.ServerEndpoint(); using (var server = new TcpServer(endpoint.Ip.Port, TestConfig.Log)) using (var conn = new Connection(endpoint, new ConnectionConfiguration(requestTimeout: TimeSpan.FromSeconds(1000), versionSupport: VersionSupport.Kafka10), log: TestConfig.Log)) { server.OnReceivedAsync = async data => { context = KafkaDecoder.DecodeHeader(data.Skip(Request.IntegerByteSize)); await server.SendDataAsync(KafkaDecoder.EncodeResponseBytes(context, new FetchResponse())); }; await conn.SendAsync(new FetchRequest(new FetchRequest.Topic("Foo", 0, 0)), CancellationToken.None); await AssertAsync.ThatEventually(() => context != null && context.ApiVersion.GetValueOrDefault() == 2, () => $"version {context?.ApiVersion}"); } }
public async Task SendAsyncWithDynamicVersionInfoMakesVersionCallFirst() { var firstCorrelation = -1; var correlationId = 0; var sentVersion = (short)-1; var endpoint = TestConfig.ServerEndpoint(); using (var server = new TcpServer(endpoint.Ip.Port, TestConfig.Log)) using (var conn = new Connection(endpoint, new ConnectionConfiguration(requestTimeout: TimeSpan.FromSeconds(3), versionSupport: VersionSupport.Kafka8.Dynamic()), log: TestConfig.Log)) { var apiVersion = (short)3; server.OnReceivedAsync = async data => { var context = KafkaDecoder.DecodeHeader(data.Skip(Request.IntegerByteSize)); if (firstCorrelation < 0) { firstCorrelation = context.CorrelationId; } correlationId = context.CorrelationId; switch (correlationId - firstCorrelation) { case 0: await server.SendDataAsync(KafkaDecoder.EncodeResponseBytes(context, new ApiVersionsResponse(ErrorCode.NONE, new[] { new ApiVersionsResponse.VersionSupport(ApiKey.Fetch, apiVersion, apiVersion) }))); break; case 1: sentVersion = context.ApiVersion.GetValueOrDefault(); await server.SendDataAsync(KafkaDecoder.EncodeResponseBytes(context, new FetchResponse())); break; default: return; } }; await conn.SendAsync(new FetchRequest(new FetchRequest.Topic("Foo", 0, 0)), CancellationToken.None); await AssertAsync.ThatEventually(() => correlationId - firstCorrelation >= 1, () => $"first {firstCorrelation}, current {correlationId}"); Assert.That(sentVersion, Is.EqualTo(apiVersion)); } }
public void WhenMessageIsTruncatedThenBufferUnderRunExceptionIsThrown() { Assert.Throws <BufferUnderRunException>(() => { // arrange var offset = (Int64)0; var message = new Byte[] { }; var messageSize = 5; var payloadBytes = new byte[16]; var encoder = new KafkaEncoder(payloadBytes); encoder.Write(offset); encoder.Write(messageSize); encoder.Write(message); var decoder = new KafkaDecoder(payloadBytes); Message.DecodeMessageSet(0, decoder, payloadBytes.Length); }); }
internal static Partition Decode(KafkaDecoder decoder) { var errorCode = decoder.ReadErrorResponseCode(); var partitionId = decoder.ReadInt32(); var leaderId = decoder.ReadInt32(); var numReplicas = decoder.ReadInt32(); var replicas = new int[numReplicas]; for (int i = 0; i < numReplicas; i++) { replicas[i] = decoder.ReadInt32(); } var numIsr = decoder.ReadInt32(); var isrs = new int[numIsr]; for (int i = 0; i < numIsr; i++) { isrs[i] = decoder.ReadInt32(); } var partition = new Partition(errorCode, partitionId, leaderId, replicas, isrs); return partition; }
/// <summary> /// Decode messages from a payload and assign it a given kafka offset. /// </summary> /// <param name="offset">The offset represting the log entry from kafka of this message.</param> /// <param name="payload">The byte[] encode as a message from kafka.</param> /// <returns>The message</returns> /// <remarks>The return type is an Enumerable as the message could be a compressed message set.</remarks> internal static Message DecodeMessage(long offset, int partitionId, KafkaDecoder decoder, int messageSize) { var crc = decoder.ReadUInt32(); var calculatedCrc = Crc32Provider.Compute(decoder.Buffer, decoder.Offset, messageSize - 4); if (calculatedCrc != crc) { throw new FailCrcCheckException("Payload did not match CRC validation."); } var message = new Message { Meta = new MessageMetadata(offset, partitionId), MagicNumber = decoder.ReadByte(), Attribute = decoder.ReadByte(), Key = decoder.ReadBytes(), }; var codec = (MessageCodec)(ProtocolConstants.AttributeCodeMask & message.Attribute); switch (codec) { case MessageCodec.CodecNone: message.Value = decoder.ReadBytes(); break; default: throw new NotSupportedException(string.Format("Codec type of {0} is not supported.", codec)); } return message; }
/// <summary> /// Decode a byte[] that represents a collection of messages. /// </summary> /// <param name="decoder">The decoder positioned at the start of the buffer</param> /// <returns>The messages</returns> internal static List<Message> DecodeMessageSet(int partitionId, KafkaDecoder decoder, int messageSetSize) { var numberOfBytes = messageSetSize; var messages = new List<Message>(); while (numberOfBytes > 0) { if (numberOfBytes < MessageHeaderSize) { break; } var offset = decoder.ReadInt64(); var messageSize = decoder.ReadInt32(); if (messageSetSize - MessageHeaderSize < messageSize) { // This message is too big to fit in the buffer so we will never get it throw new BufferUnderRunException(numberOfBytes, messageSize); } numberOfBytes -= MessageHeaderSize; if (numberOfBytes < messageSize) { break; } var message = DecodeMessage(offset, partitionId, decoder, messageSize); messages.Add(message); numberOfBytes -= messageSize; } return messages; }
internal static Broker Decode(KafkaDecoder decoder) { return new Broker(decoder.ReadInt32(), decoder.ReadString(), decoder.ReadInt32()); }