/// <summary> /// Combine multiple fetch requests in one call. /// </summary> /// <param name="request"> /// The list of fetch requests. /// </param> /// <returns> /// A list of sets of fetched messages. /// </returns> /// <remarks> /// Offset is passed in on every request, allowing the user to maintain this metadata /// however they choose. /// </remarks> public IList <BufferedMessageSet> MultiFetch(MultiFetchRequest request) { var result = new List <BufferedMessageSet>(); short tryCounter = 1; while (tryCounter <= this.config.NumberOfTries) { try { using (var conn = new KafkaConnection( this.host, this.port, this.config.BufferSize, this.config.SocketTimeout)) { conn.Write(request); int size = conn.Reader.ReadInt32(); return(BufferedMessageSet.ParseMultiFrom(conn.Reader, size, request.ConsumerRequests.Count, request.ConsumerRequests.Select(x => x.Offset).ToList())); } } catch (Exception ex) { // if maximum number of tries reached if (tryCounter == this.config.NumberOfTries) { throw; } tryCounter++; Logger.InfoFormat(CultureInfo.CurrentCulture, "MultiFetch reconnect due to {0}", ex); } } return(result); }
private int SendMessages(int messagesPerNode, List <SyncProducerConfiguration> configs) { var count = 0; foreach (var syncProducerConfiguration in configs) { using (var producer = new SyncProducer(syncProducerConfiguration)) { var messageList = new List <Message>(); for (int i = 0; i < messagesPerNode; i++) { string payload1 = "kafka " + i.ToString(); byte[] payloadData1 = Encoding.UTF8.GetBytes(payload1); var msg1 = new Message(payloadData1); messageList.Add(msg1); } var mSet = new BufferedMessageSet(CompressionCodecs.NoCompressionCodec, messageList); var request = new ProducerRequest(this.CurrentTestTopic, 0, mSet); producer.Send(request); count += mSet.Messages.Count(); } } return(count); }
public PartitionData(int partition, ErrorMapping error, BufferedMessageSet messages) { this.Partition = partition; this.MessageSet = messages; this.Error = error; this.HighWaterMark = messages.HighwaterOffset; }
/// <summary> /// Fetch a set of messages from a topic. /// </summary> /// <param name="request"> /// Specifies the topic name, topic partition, starting byte offset, maximum bytes to be fetched. /// </param> /// <returns> /// A set of fetched messages. /// </returns> /// <remarks> /// Offset is passed in on every request, allowing the user to maintain this metadata /// however they choose. /// </remarks> public BufferedMessageSet Fetch(FetchRequest request) { short tryCounter = 1; while (tryCounter <= this.config.NumberOfTries) { try { using (var conn = new KafkaConnection( this.host, this.port, this.config.BufferSize, this.config.SocketTimeout)) { conn.Write(request); int size = conn.Reader.ReadInt32(); return(BufferedMessageSet.ParseFrom(conn.Reader, size, request.Offset)); } } catch (Exception ex) { //// if maximum number of tries reached if (tryCounter == this.config.NumberOfTries) { throw; } tryCounter++; Logger.InfoFormat(CultureInfo.CurrentCulture, "Fetch reconnect due to {0}", ex); } } return(null); }
public PartitionData(int partition, ErrorMapping error, BufferedMessageSet messages) { Partition = partition; MessageSet = messages; Error = error; HighWaterMark = messages.HighwaterOffset; }
public void ShouldAbleToWriteMessageSetWithPartialMessage() { var stream = new MemoryStream(); var writer = new KafkaBinaryWriter(stream); var msg1 = new Message(new byte[101]) { Offset = 0 }; var msg2 = new Message(new byte[102]) { Offset = 1 }; var set = new BufferedMessageSet(new List <Message>() { msg1, msg2 }, 0); set.WriteTo(writer); // Writing partial message writer.Write(3L); writer.Write(100); writer.Write(new byte[10]); var size = (int)stream.Position; stream.Seek(0, SeekOrigin.Begin); var reader = new KafkaBinaryReader(stream); var newSet = BufferedMessageSet.ParseFrom(reader, size, 0); var messages = newSet.Messages.ToList(); messages.Count().Should().Be(2); messages[0].Payload.Count().Should().Be(101); messages[1].Payload.Count().Should().Be(102); }
public void BufferedMessageSetWriteToValidSequence() { byte[] messageBytes = { 1, 2, 3, 4, 5 }; var msg1 = new Message(messageBytes) { Offset = 0 }; var msg2 = new Message(messageBytes); msg2.Offset = 1; MessageSet messageSet = new BufferedMessageSet(new List <Message>() { msg1, msg2 }, 0); var ms = new MemoryStream(); messageSet.WriteTo(ms); var reader = new KafkaBinaryReader(ms); int baseOffset = 0; for (int i = 0; i < 2; ++i) { reader.ReadInt64().Should().Be(i); // offset var msgLength = reader.ReadInt32(); // length msgLength.Should().Be(Message.DefaultHeaderSize + msg1.PayloadSize); reader.ReadUInt32().Should().Be(Crc32Hasher.ComputeCrcUint32(ms.GetBuffer(), baseOffset + 8 + 4 + 4, msgLength - 4)); reader.ReadByte().Should().Be(0); // magic reader.ReadByte().Should().Be(msg1.Attributes); reader.ReadInt32().Should().Be(-1); // key length reader.ReadInt32().Should().Be(messageBytes.Length); // message length reader.ReadBytes(messageBytes.Length).SequenceEqual(messageBytes).Should().BeTrue(); baseOffset += 8 + 4 + msgLength; } }
public void ShouldParseEmptyMessageSet() { var stream = new MemoryStream(); var reader = new KafkaBinaryReader(stream); var newSet = BufferedMessageSet.ParseFrom(reader, 0, 0); var messages = newSet.Messages.ToList(); messages.Count().Should().Be(0); }
public void BufferedMessageSetWriteToValidSequence() { byte[] messageBytes = new byte[] { 1, 2, 3, 4, 5 }; Message msg1 = new Message(messageBytes); Message msg2 = new Message(messageBytes); MessageSet messageSet = new BufferedMessageSet(new List <Message>() { msg1, msg2 }); MemoryStream ms = new MemoryStream(); messageSet.WriteTo(ms); ////first message byte[] messageLength = new byte[MessageLengthPartLength]; Array.Copy(ms.ToArray(), MessageLengthPartOffset, messageLength, 0, MessageLengthPartLength); if (BitConverter.IsLittleEndian) { Array.Reverse(messageLength); } Assert.AreEqual(MagicNumberPartLength + AttributesPartLength + ChecksumPartLength + messageBytes.Length, BitConverter.ToInt32(messageLength, 0)); Assert.AreEqual(1, ms.ToArray()[MagicNumberPartOffset]); // default magic number should be 1 byte[] checksumPart = new byte[ChecksumPartLength]; Array.Copy(ms.ToArray(), ChecksumPartOffset, checksumPart, 0, ChecksumPartLength); Assert.AreEqual(Crc32Hasher.Compute(messageBytes), checksumPart); byte[] dataPart = new byte[messageBytes.Length]; Array.Copy(ms.ToArray(), DataPartOffset, dataPart, 0, messageBytes.Length); Assert.AreEqual(messageBytes, dataPart); ////second message int secondMessageOffset = MessageLengthPartLength + MagicNumberPartLength + AttributesPartLength + ChecksumPartLength + messageBytes.Length; messageLength = new byte[MessageLengthPartLength]; Array.Copy(ms.ToArray(), secondMessageOffset + MessageLengthPartOffset, messageLength, 0, MessageLengthPartLength); if (BitConverter.IsLittleEndian) { Array.Reverse(messageLength); } Assert.AreEqual(MagicNumberPartLength + AttributesPartLength + ChecksumPartLength + messageBytes.Length, BitConverter.ToInt32(messageLength, 0)); Assert.AreEqual(1, ms.ToArray()[secondMessageOffset + MagicNumberPartOffset]); // default magic number should be 1 checksumPart = new byte[ChecksumPartLength]; Array.Copy(ms.ToArray(), secondMessageOffset + ChecksumPartOffset, checksumPart, 0, ChecksumPartLength); Assert.AreEqual(Crc32Hasher.Compute(messageBytes), checksumPart); dataPart = new byte[messageBytes.Length]; Array.Copy(ms.ToArray(), secondMessageOffset + DataPartOffset, dataPart, 0, messageBytes.Length); Assert.AreEqual(messageBytes, dataPart); }
internal static PartitionData ParseFrom(KafkaBinaryReader reader) { var partition = reader.ReadInt32(); var error = reader.ReadInt16(); var highWatermark = reader.ReadInt64(); var messageSetSize = reader.ReadInt32(); var bufferedMessageSet = BufferedMessageSet.ParseFrom(reader, messageSetSize, partition); return(new PartitionData(partition, ErrorMapper.ToError(error), bufferedMessageSet)); }
public ProducerRequest(string topic, int partition, BufferedMessageSet messages) { Guard.NotNull(messages, "messages"); int length = GetRequestLength(topic, messages.SetSize); this.RequestBuffer = new BoundedBuffer(length); this.Topic = topic; this.Partition = partition; this.MessageSet = messages; this.WriteTo(this.RequestBuffer); }
public int Add(BufferedMessageSet messages, long fetchOffset) { int size = messages.SetSize; if (size > 0) { long newOffset = Interlocked.Add(ref this.fetchedOffset, size); Logger.Debug("Updated fetch offset of " + this + " to " + newOffset); this.chunkQueue.Add(new FetchedDataChunk(messages, this, fetchOffset)); } return(size); }
public void SetSizeValid() { byte[] messageBytes = new byte[] { 1, 2, 3, 4, 5 }; Message msg1 = new Message(messageBytes); Message msg2 = new Message(messageBytes); MessageSet messageSet = new BufferedMessageSet(new List <Message>() { msg1, msg2 }); Assert.AreEqual( 2 * (MessageLengthPartLength + MagicNumberPartLength + AttributesPartLength + ChecksumPartLength + messageBytes.Length), messageSet.SetSize); }
public void SetSizeValid() { byte[] messageBytes = new byte[] { 1, 2, 3, 4, 5 }; Message msg1 = new Message(messageBytes); Message msg2 = new Message(messageBytes); MessageSet messageSet = new BufferedMessageSet(new List <Message>() { msg1, msg2 }, 0); Assert.AreEqual( 2 * (8 + 4 + Message.DefaultHeaderSize + messageBytes.Length), messageSet.SetSize); }
public int Add(BufferedMessageSet messages, long fetchOffset) { int size = messages.SetSize; if (size > 0) { Logger.InfoFormat("Updating fetch offset = {0} with size = {1}", this.fetchedOffset, size); this.chunkQueue.Add(new FetchedDataChunk(messages, this, fetchOffset)); long newOffset = Interlocked.Add(ref this.fetchedOffset, size); Logger.Debug("Updated fetch offset of " + this + " to " + newOffset); Logger.DebugFormat("PartitionTopicInfo: Partition ({0}), ConsumedOffset ({1}), FetchedOffset ({2})", this.Partition, this.consumedOffset, this.fetchedOffset); } return(size); }
/// <summary> /// Ads a message set to the queue /// </summary> /// <param name="messages">The message set</param> /// <returns>The set size</returns> public int Add(BufferedMessageSet messages) { int size = messages.SetSize; if (size > 0) { long offset = messages.Messages.Last().Offset; Logger.InfoFormat("{2} : Updating fetch offset = {0} with value = {1}", this.fetchedOffset, offset, this.PartitionId); this.chunkQueue.Add(new FetchedDataChunk(messages, this, this.fetchedOffset)); Interlocked.Exchange(ref this.fetchedOffset, offset); Logger.Debug("Updated fetch offset of " + this + " to " + offset); } return(size); }
public void CreateCompressedBufferedMessageSet() { string testMessage = "TestMessage"; Message message = new Message(Encoding.UTF8.GetBytes(testMessage)); BufferedMessageSet bms = new BufferedMessageSet(CompressionCodecs.DefaultCompressionCodec, new List <Message>() { message }); foreach (var bmsMessage in bms.Messages) { Assert.AreNotEqual(bmsMessage.Payload, message.Payload); var decompressedBms = CompressionUtils.Decompress(bmsMessage); foreach (var decompressedMessage in decompressedBms.Messages) { Assert.AreEqual(message.ToString(), decompressedMessage.ToString()); } } }
public void ConsumerFetchMessage() { var consumerConfig = this.ConsumerConfig1; ProducerSendsMessage(); Thread.Sleep(1000); IConsumer consumer = new Consumer(consumerConfig); var request = new FetchRequest(CurrentTestTopic, 0, 0); BufferedMessageSet response = consumer.Fetch(request); Assert.NotNull(response); int count = 0; foreach (var message in response) { count++; Console.WriteLine(message.Message); } Assert.AreEqual(2, count); }
/// <summary> /// Combine multiple fetch requests in one call. /// </summary> /// <param name="request"> /// The list of fetch requests. /// </param> /// <returns> /// A list of sets of fetched messages. /// </returns> /// <remarks> /// Offset is passed in on every request, allowing the user to maintain this metadata /// however they choose. /// </remarks> public IList <BufferedMessageSet> MultiFetch(MultiFetchRequest request) { KafkaConnection conn = null; int size = 0; var result = new List <BufferedMessageSet>(); short tryCounter = 1; while (tryCounter <= this.config.NumberOfTries) { try { conn = KafkaClusterConnectionPool.GetConnection(this.host, this.port); conn.Write(request); size = conn.Reader.ReadInt32(); result = (BufferedMessageSet.ParseMultiFrom(conn.Reader, size, request.ConsumerRequests.Count, request.ConsumerRequests.Select(x => x.Offset).ToList())) as List <BufferedMessageSet>; break; } catch (Exception ex) { // if maximum number of tries reached if (tryCounter == this.config.NumberOfTries) { throw; } tryCounter++; Logger.InfoFormat(CultureInfo.CurrentCulture, "MultiFetch reconnect due to {0}", ex); } finally { KafkaClusterConnectionPool.ReleaseConnection(conn); } } return(result); }
public void ShouldAbleToEnumerateMessages() { var msg1 = new Message(new byte[101]) { Offset = 0 }; var msg2 = new Message(new byte[102]) { Offset = 1 }; var set = new BufferedMessageSet(new List <Message>() { msg1, msg2 }, 0); set.MoveNext().Should().BeTrue(); set.Current.Message.Payload.Length.Should().Be(101); set.Current.Message.Offset.Should().Be(0); set.MoveNext().Should().BeTrue(); set.Current.Message.Payload.Length.Should().Be(102); set.Current.Message.Offset.Should().Be(1); set.MoveNext().Should().BeFalse(); }
/// <summary> /// Fetch a set of messages from a topic. /// </summary> /// <param name="request"> /// Specifies the topic name, topic partition, starting byte offset, maximum bytes to be fetched. /// </param> /// <returns> /// A set of fetched messages. /// </returns> /// <remarks> /// Offset is passed in on every request, allowing the user to maintain this metadata /// however they choose. /// </remarks> public BufferedMessageSet Fetch(FetchRequest request) { short tryCounter = 1; KafkaConnection conn = null; int size = 0; BufferedMessageSet messageSet = null; while (tryCounter <= this.config.NumberOfTries) { try { conn = KafkaClusterConnectionPool.GetConnection(this.host, this.port); conn.Write(request); size = conn.Reader.ReadInt32(); messageSet = BufferedMessageSet.ParseFrom(conn.Reader, size, request.Offset); break; } catch (Exception ex) { //// if maximum number of tries reached if (tryCounter == this.config.NumberOfTries) { throw; } tryCounter++; Logger.InfoFormat(CultureInfo.CurrentCulture, "Fetch reconnect due to {0}", ex); } finally { KafkaClusterConnectionPool.ReleaseConnection(conn); } } return(messageSet); }
public BufferedMessageSet MessageSet(string topic, int partition) { var messageSet = new BufferedMessageSet(Enumerable.Empty <Message>(), partition); if (TopicDataDict.ContainsKey(topic)) { var topicData = TopicDataDict[topic]; if (topicData != null) { var data = TopicData.FindPartition(topicData.PartitionData, partition); if (data != null) { messageSet = new BufferedMessageSet(data.MessageSet.Messages, (short)data.Error, partition); messageSet.HighwaterOffset = data.HighWaterMark; } else { Logger.WarnFormat("Partition data was not found for partition {0}.", partition); } } } return(messageSet); }
/// <summary> /// Method to be used for starting a new thread /// </summary> internal void Run() { foreach (var partitionTopicInfo in partitionTopicInfos) { Logger.InfoFormat( CultureInfo.CurrentCulture, "{0} start fetching topic: {1} part: {2} offset: {3} from {4}:{5}", this.name, partitionTopicInfo.Topic, partitionTopicInfo.Partition.PartId, partitionTopicInfo.GetFetchOffset(), this.broker.Host, this.broker.Port); } try { while (!this.shouldStop) { var requestList = new List <FetchRequest>(); foreach (var partitionTopicInfo in this.partitionTopicInfos) { var singleRequest = new FetchRequest(partitionTopicInfo.Topic, partitionTopicInfo.Partition.PartId, partitionTopicInfo.GetFetchOffset(), this.config.MaxFetchSize); requestList.Add(singleRequest); } Logger.Debug("Fetch request: " + string.Join(", ", requestList.Select(x => x.ToString()))); var request = new MultiFetchRequest(requestList); var response = this.simpleConsumer.MultiFetch(request); int read = 0; var items = this.partitionTopicInfos.Zip( response, (x, y) => new Tuple <PartitionTopicInfo, BufferedMessageSet>(x, y)); foreach (Tuple <PartitionTopicInfo, BufferedMessageSet> item in items) { BufferedMessageSet messages = item.Item2; PartitionTopicInfo info = item.Item1; try { bool done = false; if (messages.ErrorCode == ErrorMapping.OffsetOutOfRangeCode) { Logger.InfoFormat(CultureInfo.CurrentCulture, "offset {0} out of range", info.GetFetchOffset()); //// see if we can fix this error var resetOffset = this.ResetConsumerOffsets(info.Topic, info.Partition); if (resetOffset >= 0) { info.ResetFetchOffset(resetOffset); info.ResetConsumeOffset(resetOffset); done = true; } } if (!done) { read += info.Add(messages, info.GetFetchOffset()); } } catch (Exception ex) { if (!shouldStop) { Logger.ErrorFormat(CultureInfo.CurrentCulture, "error in FetcherRunnable for {0}" + info, ex); } throw; } } Logger.Info("Fetched bytes: " + read); if (read == 0) { Logger.DebugFormat(CultureInfo.CurrentCulture, "backing off {0} ms", this.config.BackOffIncrement); Thread.Sleep(this.config.BackOffIncrement); } } } catch (Exception ex) { if (shouldStop) { Logger.InfoFormat(CultureInfo.CurrentCulture, "FetcherRunnable {0} interrupted", this); } else { Logger.ErrorFormat(CultureInfo.CurrentCulture, "error in FetcherRunnable {0}", ex); } } Logger.InfoFormat(CultureInfo.CurrentCulture, "stopping fetcher {0} to host {1}", this.name, this.broker.Host); }
/// <summary> /// Method to be used for starting a new thread /// </summary> internal void Run() { foreach (PartitionTopicInfo partitionTopicInfo in _partitionTopicInfos) { Logger.InfoFormat("{0} start fetching topic: {1} part: {2} offset: {3} from {4}:{5}", _name, partitionTopicInfo.Topic, partitionTopicInfo.PartitionId, partitionTopicInfo.NextRequestOffset, _broker.Host, _broker.Port); } int reqId = 0; while (!_shouldStop && _partitionTopicInfos.Any()) { try { IEnumerable <PartitionTopicInfo> fetchablePartitionTopicInfos = _partitionTopicInfos.Where(pti => pti.NextRequestOffset - pti.ConsumeOffset < _fetchBufferLength); long read = 0; if (fetchablePartitionTopicInfos.Any()) { FetchRequestBuilder builder = new FetchRequestBuilder(). CorrelationId(reqId). ClientId(_config.ConsumerId ?? _name). MaxWait(0). MinBytes(0); fetchablePartitionTopicInfos.ForEach(pti => builder.AddFetch(pti.Topic, pti.PartitionId, pti.NextRequestOffset, _config.FetchSize)); FetchRequest fetchRequest = builder.Build(); Logger.Debug("Sending fetch request: " + fetchRequest); FetchResponse response = _simpleConsumer.Fetch(fetchRequest); Logger.Debug("Fetch request completed"); var partitonsWithErrors = new List <PartitionTopicInfo>(); foreach (PartitionTopicInfo partitionTopicInfo in fetchablePartitionTopicInfos) { BufferedMessageSet messages = response.MessageSet(partitionTopicInfo.Topic, partitionTopicInfo.PartitionId); switch (messages.ErrorCode) { case (short)ErrorMapping.NoError: int bytesRead = partitionTopicInfo.Add(messages); // TODO: The highwater offset on the message set is the end of the log partition. If the message retrieved is -1 of that offset, we are at the end. if (messages.Messages.Any()) { partitionTopicInfo.NextRequestOffset = messages.Messages.Last().Offset + 1; read += bytesRead; } else { Logger.DebugFormat("No message returned by FetchRequest: {0}", fetchRequest.ToString()); } break; case (short)ErrorMapping.OffsetOutOfRangeCode: try { Logger.InfoFormat("offset for {0} out of range", partitionTopicInfo); long resetOffset = ResetConsumerOffsets(partitionTopicInfo.Topic, partitionTopicInfo.PartitionId); if (resetOffset >= 0) { partitionTopicInfo.FetchOffset = resetOffset; partitionTopicInfo.ConsumeOffset = resetOffset; Logger.InfoFormat("{0} marked as done.", partitionTopicInfo); } } catch (Exception ex) { Logger.ErrorFormat("Error getting offsets for partition {0} : {1}", partitionTopicInfo.PartitionId, ex.FormatException()); partitonsWithErrors.Add(partitionTopicInfo); } break; default: Logger.ErrorFormat("Error returned from broker {2} for partition {0} : KafkaErrorCode: {1}", partitionTopicInfo.PartitionId, messages.ErrorCode, partitionTopicInfo.BrokerId); partitonsWithErrors.Add(partitionTopicInfo); break; } } reqId = reqId == int.MaxValue ? 0 : reqId + 1; if (partitonsWithErrors.Any()) { RemovePartitionsFromProessing(partitonsWithErrors); } } if (read > 0) { Logger.Debug("Fetched bytes: " + read); } if (read == 0) { Logger.DebugFormat("backing off {0} ms", _config.BackOffIncrement); Thread.Sleep(_config.BackOffIncrement); } } catch (Exception ex) { if (_shouldStop) { Logger.InfoFormat("FetcherRunnable {0} interrupted", this); } else { Logger.ErrorFormat("error in FetcherRunnable {0}", ex.FormatException()); } } } Logger.InfoFormat("stopping fetcher {0} to host {1}", _name, _broker.Host); }
public void GetBytesValidStructure() { string topicName = "topic"; int correlationId = 1; string clientId = "TestClient"; short requiredAcks = 5; int ackTimeout = 345; var partition = 2; short error = 0; var payload = Encoding.UTF8.GetBytes("testMessage"); BufferedMessageSet messageSet = new BufferedMessageSet(new List<Message>() { new Message(payload) }, 0); var partitionData = new PartitionData(partition, ErrorMapper.ToError(error), messageSet); var topicData = new TopicData(topicName, new List<PartitionData>() { partitionData }); var request = new ProducerRequest(correlationId, clientId, requiredAcks, ackTimeout, new List<TopicData>() { topicData }); int requestSize = 2 + //request type id 2 + //versionId 4 + //correlation id request.GetShortStringWriteLength(clientId) + // actual client id 2 + //required acks 4 + //ack timeout 4 + //data count //=== data part request.GetShortStringWriteLength(topicName) + //topic 4 + //partition data count 4 + //partition id 4 + //messages set size messageSet.SetSize; var ms = new MemoryStream(); request.WriteTo(ms); byte[] bytes = ms.ToArray(); Assert.IsNotNull(bytes); // add 4 bytes for the length of the message at the beginning Assert.AreEqual(requestSize + 4, bytes.Length); // first 4 bytes = the message length Assert.AreEqual(requestSize, BitConverter.ToInt32(BitWorks.ReverseBytes(bytes.Take(4).ToArray<byte>()), 0)); // next 2 bytes = the request type Assert.AreEqual((short)RequestTypes.Produce, BitConverter.ToInt16(BitWorks.ReverseBytes(bytes.Skip(4).Take(2).ToArray<byte>()), 0)); // next 2 bytes = the version id Assert.AreEqual((short)ProducerRequest.CurrentVersion, BitConverter.ToInt16(BitWorks.ReverseBytes(bytes.Skip(6).Take(2).ToArray<byte>()), 0)); // next 2 bytes = the correlation id Assert.AreEqual(correlationId, BitConverter.ToInt32(BitWorks.ReverseBytes(bytes.Skip(8).Take(4).ToArray<byte>()), 0)); // next 2 bytes = the client id length Assert.AreEqual((short)clientId.Length, BitConverter.ToInt16(BitWorks.ReverseBytes(bytes.Skip(12).Take(2).ToArray<byte>()), 0)); // next few bytes = the client id Assert.AreEqual(clientId, Encoding.ASCII.GetString(bytes.Skip(14).Take(clientId.Length).ToArray<byte>())); // next 2 bytes = the required acks Assert.AreEqual((short)requiredAcks, BitConverter.ToInt16(BitWorks.ReverseBytes(bytes.Skip(14 + clientId.Length).Take(2).ToArray<byte>()), 0)); // next 4 bytes = the ack timeout Assert.AreEqual(ackTimeout, BitConverter.ToInt32(BitWorks.ReverseBytes(bytes.Skip(16 + clientId.Length).Take(4).ToArray<byte>()), 0)); // next 4 bytes = the data count Assert.AreEqual(1, BitConverter.ToInt32(BitWorks.ReverseBytes(bytes.Skip(20 + clientId.Length).Take(4).ToArray<byte>()), 0)); // next 2 bytes = the tppic length Assert.AreEqual((short)topicName.Length, BitConverter.ToInt16(BitWorks.ReverseBytes(bytes.Skip(24 + clientId.Length).Take(2).ToArray<byte>()), 0)); // next few bytes = the topic Assert.AreEqual(topicName, Encoding.ASCII.GetString(bytes.Skip(26 + clientId.Length).Take(topicName.Length).ToArray<byte>())); // next 4 bytes = the partition data count Assert.AreEqual(topicData.PartitionData.Count(), BitConverter.ToInt32(BitWorks.ReverseBytes(bytes.Skip(26 + clientId.Length + topicName.Length).Take(4).ToArray<byte>()), 0)); // next 4 bytes = the partition Assert.AreEqual(partition, BitConverter.ToInt32(BitWorks.ReverseBytes(bytes.Skip(30 + clientId.Length + topicName.Length).Take(4).ToArray<byte>()), 0)); // skipping MessageSet check - this could be done separately in another unit test }
public PartitionData(int partition, BufferedMessageSet messages) : this(partition, (short)ErrorMapping.NoError, messages) { }
public PartitionData(int partition, BufferedMessageSet messages) : this(partition, (short) ErrorMapping.NoError, messages) { }
public void CreateCompressedBufferedMessageSet() { string testMessage = "TestMessage"; Message message = new Message(Encoding.UTF8.GetBytes(testMessage)); BufferedMessageSet bms = new BufferedMessageSet(CompressionCodecs.DefaultCompressionCodec, new List<Message>() { message }, 0); foreach (var bmsMessage in bms.Messages) { Assert.AreNotEqual(bmsMessage.Payload, message.Payload); var decompressedBms = CompressionUtils.Decompress(bmsMessage, 0); foreach (var decompressedMessage in decompressedBms.Messages) { Assert.AreEqual(message.ToString(), decompressedMessage.ToString()); } } }
public FetchedDataChunk(BufferedMessageSet messages, PartitionTopicInfo topicInfo, long fetchOffset) { this.Messages = messages; this.TopicInfo = topicInfo; this.FetchOffset = fetchOffset; }
public void GetBytesValidStructure() { string topicName = "topic"; int correlationId = 1; string clientId = "TestClient"; short requiredAcks = 5; int ackTimeout = 345; var partition = 2; short error = 0; var payload = Encoding.UTF8.GetBytes("testMessage"); BufferedMessageSet messageSet = new BufferedMessageSet(new List <Message>() { new Message(payload) }, 0); var partitionData = new PartitionData(partition, ErrorMapper.ToError(error), messageSet); var topicData = new TopicData(topicName, new List <PartitionData>() { partitionData }); var request = new ProducerRequest(correlationId, clientId, requiredAcks, ackTimeout, new List <TopicData>() { topicData }); int requestSize = 2 + //request type id 2 + //versionId 4 + //correlation id request.GetShortStringWriteLength(clientId) + // actual client id 2 + //required acks 4 + //ack timeout 4 + //data count //=== data part request.GetShortStringWriteLength(topicName) + //topic 4 + //partition data count 4 + //partition id 4 + //messages set size messageSet.SetSize; var ms = new MemoryStream(); request.WriteTo(ms); byte[] bytes = ms.ToArray(); Assert.IsNotNull(bytes); // add 4 bytes for the length of the message at the beginning Assert.AreEqual(requestSize + 4, bytes.Length); // first 4 bytes = the message length Assert.AreEqual(requestSize, BitConverter.ToInt32(BitWorks.ReverseBytes(bytes.Take(4).ToArray <byte>()), 0)); // next 2 bytes = the request type Assert.AreEqual((short)RequestTypes.Produce, BitConverter.ToInt16(BitWorks.ReverseBytes(bytes.Skip(4).Take(2).ToArray <byte>()), 0)); // next 2 bytes = the version id Assert.AreEqual((short)ProducerRequest.CurrentVersion, BitConverter.ToInt16(BitWorks.ReverseBytes(bytes.Skip(6).Take(2).ToArray <byte>()), 0)); // next 2 bytes = the correlation id Assert.AreEqual(correlationId, BitConverter.ToInt32(BitWorks.ReverseBytes(bytes.Skip(8).Take(4).ToArray <byte>()), 0)); // next 2 bytes = the client id length Assert.AreEqual((short)clientId.Length, BitConverter.ToInt16(BitWorks.ReverseBytes(bytes.Skip(12).Take(2).ToArray <byte>()), 0)); // next few bytes = the client id Assert.AreEqual(clientId, Encoding.ASCII.GetString(bytes.Skip(14).Take(clientId.Length).ToArray <byte>())); // next 2 bytes = the required acks Assert.AreEqual((short)requiredAcks, BitConverter.ToInt16(BitWorks.ReverseBytes(bytes.Skip(14 + clientId.Length).Take(2).ToArray <byte>()), 0)); // next 4 bytes = the ack timeout Assert.AreEqual(ackTimeout, BitConverter.ToInt32(BitWorks.ReverseBytes(bytes.Skip(16 + clientId.Length).Take(4).ToArray <byte>()), 0)); // next 4 bytes = the data count Assert.AreEqual(1, BitConverter.ToInt32(BitWorks.ReverseBytes(bytes.Skip(20 + clientId.Length).Take(4).ToArray <byte>()), 0)); // next 2 bytes = the tppic length Assert.AreEqual((short)topicName.Length, BitConverter.ToInt16(BitWorks.ReverseBytes(bytes.Skip(24 + clientId.Length).Take(2).ToArray <byte>()), 0)); // next few bytes = the topic Assert.AreEqual(topicName, Encoding.ASCII.GetString(bytes.Skip(26 + clientId.Length).Take(topicName.Length).ToArray <byte>())); // next 4 bytes = the partition data count Assert.AreEqual(topicData.PartitionData.Count(), BitConverter.ToInt32(BitWorks.ReverseBytes(bytes.Skip(26 + clientId.Length + topicName.Length).Take(4).ToArray <byte>()), 0)); // next 4 bytes = the partition Assert.AreEqual(partition, BitConverter.ToInt32(BitWorks.ReverseBytes(bytes.Skip(30 + clientId.Length + topicName.Length).Take(4).ToArray <byte>()), 0)); // skipping MessageSet check - this could be done separately in another unit test }