/// <summary> /// Fetch a set of messages from a topic. /// </summary> /// <param name="request"> /// Specifies the topic name, topic partition, starting byte offset, maximum bytes to be fetched. /// </param> /// <returns> /// A set of fetched messages. /// </returns> /// <remarks> /// Offset is passed in on every request, allowing the user to maintain this metadata /// however they choose. /// </remarks> public BufferedMessageSet Fetch(FetchRequest request) { short tryCounter = 1; while (tryCounter <= this.config.NumberOfTries) { try { using (var conn = new KafkaConnection( this.host, this.port, this.config.BufferSize, this.config.SocketTimeout)) { conn.Write(request); int size = conn.Reader.ReadInt32(); return(BufferedMessageSet.ParseFrom(conn.Reader, size, request.Offset)); } } catch (Exception ex) { //// if maximum number of tries reached if (tryCounter == this.config.NumberOfTries) { throw; } tryCounter++; Logger.InfoFormat(CultureInfo.CurrentCulture, "Fetch reconnect due to {0}", ex); } } return(null); }
public void ShouldAbleToWriteMessageSetWithPartialMessage() { var stream = new MemoryStream(); var writer = new KafkaBinaryWriter(stream); var msg1 = new Message(new byte[101]) { Offset = 0 }; var msg2 = new Message(new byte[102]) { Offset = 1 }; var set = new BufferedMessageSet(new List <Message>() { msg1, msg2 }, 0); set.WriteTo(writer); // Writing partial message writer.Write(3L); writer.Write(100); writer.Write(new byte[10]); var size = (int)stream.Position; stream.Seek(0, SeekOrigin.Begin); var reader = new KafkaBinaryReader(stream); var newSet = BufferedMessageSet.ParseFrom(reader, size, 0); var messages = newSet.Messages.ToList(); messages.Count().Should().Be(2); messages[0].Payload.Count().Should().Be(101); messages[1].Payload.Count().Should().Be(102); }
public void ShouldParseEmptyMessageSet() { var stream = new MemoryStream(); var reader = new KafkaBinaryReader(stream); var newSet = BufferedMessageSet.ParseFrom(reader, 0, 0); var messages = newSet.Messages.ToList(); messages.Count().Should().Be(0); }
internal static PartitionData ParseFrom(KafkaBinaryReader reader) { var partition = reader.ReadInt32(); var error = reader.ReadInt16(); var highWatermark = reader.ReadInt64(); var messageSetSize = reader.ReadInt32(); var bufferedMessageSet = BufferedMessageSet.ParseFrom(reader, messageSetSize, partition); return(new PartitionData(partition, ErrorMapper.ToError(error), bufferedMessageSet)); }
/// <summary> /// Fetch a set of messages from a topic. /// </summary> /// <param name="request"> /// Specifies the topic name, topic partition, starting byte offset, maximum bytes to be fetched. /// </param> /// <returns> /// A set of fetched messages. /// </returns> /// <remarks> /// Offset is passed in on every request, allowing the user to maintain this metadata /// however they choose. /// </remarks> public BufferedMessageSet Fetch(FetchRequest request) { short tryCounter = 1; KafkaConnection conn = null; int size = 0; BufferedMessageSet messageSet = null; while (tryCounter <= this.config.NumberOfTries) { try { conn = KafkaClusterConnectionPool.GetConnection(this.host, this.port); conn.Write(request); size = conn.Reader.ReadInt32(); messageSet = BufferedMessageSet.ParseFrom(conn.Reader, size, request.Offset); break; } catch (Exception ex) { //// if maximum number of tries reached if (tryCounter == this.config.NumberOfTries) { throw; } tryCounter++; Logger.InfoFormat(CultureInfo.CurrentCulture, "Fetch reconnect due to {0}", ex); } finally { KafkaClusterConnectionPool.ReleaseConnection(conn); } } return(messageSet); }