public void ConsumerMultiFetchGetsMessage() { var config = this.ConsumerConfig1; ProducerSendMultiRequest(); Thread.Sleep(2000); IConsumer cons = new Consumer(config); var request = new MultiFetchRequest(new List <FetchRequest> { new FetchRequest(CurrentTestTopic, 0, 0), new FetchRequest(CurrentTestTopic, 0, 0), new FetchRequest(CurrentTestTopic, 0, 0) }); IList <BufferedMessageSet> response = cons.MultiFetch(request); Assert.AreEqual(3, response.Count); for (int ix = 0; ix < response.Count; ix++) { IEnumerable <Message> messageSet = response[ix].Messages; Assert.AreEqual(4, messageSet.Count()); Console.WriteLine(string.Format("Request #{0}-->", ix)); foreach (Message msg in messageSet) { Console.WriteLine(msg.ToString()); } } }
/// <summary> /// Combine multiple fetch requests in one call. /// </summary> /// <param name="request"> /// The list of fetch requests. /// </param> /// <returns> /// A list of sets of fetched messages. /// </returns> /// <remarks> /// Offset is passed in on every request, allowing the user to maintain this metadata /// however they choose. /// </remarks> public IList <BufferedMessageSet> MultiFetch(MultiFetchRequest request) { var result = new List <BufferedMessageSet>(); short tryCounter = 1; while (tryCounter <= this.config.NumberOfTries) { try { using (var conn = new KafkaConnection( this.host, this.port, this.config.BufferSize, this.config.SocketTimeout)) { conn.Write(request); int size = conn.Reader.ReadInt32(); return(BufferedMessageSet.ParseMultiFrom(conn.Reader, size, request.ConsumerRequests.Count, request.ConsumerRequests.Select(x => x.Offset).ToList())); } } catch (Exception ex) { // if maximum number of tries reached if (tryCounter == this.config.NumberOfTries) { throw; } tryCounter++; Logger.InfoFormat(CultureInfo.CurrentCulture, "MultiFetch reconnect due to {0}", ex); } } return(result); }
public void GetBytesValidFormat() { List <FetchRequest> requests = new List <FetchRequest> { new FetchRequest("topic a", 0, 0), new FetchRequest("topic a", 0, 0), new FetchRequest("topic b", 0, 0), new FetchRequest("topic c", 0, 0) }; MultiFetchRequest request = new MultiFetchRequest(requests); // format = len(request) + requesttype + requestcount + requestpackage // total byte count = 4 + (2 + 2 + 100) byte[] bytes = request.GetBytes(); Assert.IsNotNull(bytes); Assert.AreEqual(108, bytes.Length); // first 4 bytes = the length of the request Assert.AreEqual(104, BitConverter.ToInt32(BitWorks.ReverseBytes(bytes.Take(4).ToArray <byte>()), 0)); // next 2 bytes = the RequestType which in this case should be Produce Assert.AreEqual((short)RequestType.MultiFetch, BitConverter.ToInt16(BitWorks.ReverseBytes(bytes.Skip(4).Take(2).ToArray <byte>()), 0)); // next 2 bytes = the number of messages Assert.AreEqual((short)4, BitConverter.ToInt16(BitWorks.ReverseBytes(bytes.Skip(6).Take(2).ToArray <byte>()), 0)); }
/// <summary> /// Return Partition -> Message /// </summary> public IEnumerable <Tuple <Int32, Message> > Load(String topic, Dictionary <Int32, Int32> offsetByPartition, Int32 blockSize) { var multifetch = new MultiFetchRequest(); multifetch.FetchRequests = new List <FetchRequest>(offsetByPartition.Count); foreach (var pair in offsetByPartition) { var request = new FetchRequest(); request.Topic = topic; request.Partition = pair.Key; request.Offset = pair.Value; request.BlockSize = blockSize; multifetch.FetchRequests.Add(request); } var response = (MultiFetchResponse)_sender.Send(multifetch); foreach (var fetchResponse in response.FetchResponses) { using (var messageReader = new MessageReader(new BinaryMemoryStream(fetchResponse.Data))) { foreach (var message in messageReader.ReadAllMessages()) { yield return(new Tuple <int, Message>(fetchResponse.Partition, message)); } } } }
public void IsValidTrue() { List <FetchRequest> requests = new List <FetchRequest> { new FetchRequest("topic a", 0, 0), new FetchRequest("topic a", 0, 0), new FetchRequest("topic b", 0, 0), new FetchRequest("topic c", 0, 0) }; MultiFetchRequest multiRequest = new MultiFetchRequest(requests); Assert.IsTrue(multiRequest.IsValid()); }
/// <summary> /// Combine multiple fetch requests in one call. /// </summary> /// <param name="request"> /// The list of fetch requests. /// </param> /// <returns> /// A list of sets of fetched messages. /// </returns> /// <remarks> /// Offset is passed in on every request, allowing the user to maintain this metadata /// however they choose. /// </remarks> public IList <BufferedMessageSet> MultiFetch(MultiFetchRequest request) { KafkaConnection conn = null; int size = 0; var result = new List <BufferedMessageSet>(); short tryCounter = 1; while (tryCounter <= this.config.NumberOfTries) { try { conn = KafkaClusterConnectionPool.GetConnection(this.host, this.port); conn.Write(request); size = conn.Reader.ReadInt32(); result = (BufferedMessageSet.ParseMultiFrom(conn.Reader, size, request.ConsumerRequests.Count, request.ConsumerRequests.Select(x => x.Offset).ToList())) as List <BufferedMessageSet>; break; } catch (Exception ex) { // if maximum number of tries reached if (tryCounter == this.config.NumberOfTries) { throw; } tryCounter++; Logger.InfoFormat(CultureInfo.CurrentCulture, "MultiFetch reconnect due to {0}", ex); } finally { KafkaClusterConnectionPool.ReleaseConnection(conn); } } return(result); }
public void ConsumerMultiFetchGetsMessage() { ProducerSendMultiRequest(); Consumer consumer = new Consumer(KafkaServer, KafkaPort); MultiFetchRequest request = new MultiFetchRequest(new List <FetchRequest> { new FetchRequest("test", 0, 0), new FetchRequest("test", 0, 0), new FetchRequest("testa", 0, 0) }); List <List <Message> > messages = consumer.Consume(request); for (int ix = 0; ix < messages.Count; ix++) { List <Message> messageSet = messages[ix]; Console.WriteLine(string.Format("Request #{0}-->", ix)); foreach (Message msg in messageSet) { Console.WriteLine(msg); } } }
/// <summary> /// Writes a multifetch request to the server. /// </summary> /// <remarks> /// Write timeout is defaulted to infitite. /// </remarks> /// <param name="request">The <see cref="MultiFetchRequest"/> to send to the server.</param> public void Write(MultiFetchRequest request) { this.EnsuresNotDisposed(); Guard.NotNull(request, "request"); this.Write(request.RequestBuffer.GetBuffer()); }
public void IsValidNullRequests() { MultiFetchRequest multiRequest = new MultiFetchRequest(null); Assert.IsFalse(multiRequest.IsValid()); }
public void IsValidNoRequests() { MultiFetchRequest multiRequest = new MultiFetchRequest(new List <FetchRequest>()); Assert.IsFalse(multiRequest.IsValid()); }
/// <summary> /// Executes a multi-fetch operation. /// </summary> /// <param name="request">The request to push to Kafka.</param> /// <returns> /// A list containing sets of messages. The message sets should match the request order. /// </returns> public List <List <Message> > Consume(MultiFetchRequest request) { int fetchRequests = request.ConsumerRequests.Count; List <List <Message> > messages = new List <List <Message> >(); using (KafkaConnection connection = new KafkaConnection(Server, Port)) { connection.Write(request.GetBytes()); int dataLength = BitConverter.ToInt32(BitWorks.ReverseBytes(connection.Read(4)), 0); if (dataLength > 0) { byte[] data = connection.Read(dataLength); int position = 0; int errorCode = BitConverter.ToInt16(BitWorks.ReverseBytes(data.Take(2).ToArray <byte>()), 0); if (errorCode != KafkaException.NoError) { throw new KafkaException(errorCode); } // skip the error code and process the rest position = position + 2; for (int ix = 0; ix < fetchRequests; ix++) { messages.Add(new List <Message>()); int messageSetSize = BitConverter.ToInt32(BitWorks.ReverseBytes(data.Skip(position).Take(4).ToArray <byte>()), 0); position = position + 4; errorCode = BitConverter.ToInt16(BitWorks.ReverseBytes(data.Skip(position).Take(2).ToArray <byte>()), 0); if (errorCode != KafkaException.NoError) { throw new KafkaException(errorCode); } // skip the error code and process the rest position = position + 2; byte[] messageSetBytes = data.Skip(position).ToArray <byte>().Take(messageSetSize).ToArray <byte>(); int processed = 0; int messageSize = 0; // dropped 2 bytes at the end...padding??? while (processed < messageSetBytes.Length - 2) { messageSize = BitConverter.ToInt32(BitWorks.ReverseBytes(messageSetBytes.Skip(processed).Take(4).ToArray <byte>()), 0); messages[ix].Add(Message.ParseFrom(messageSetBytes.Skip(processed).Take(messageSize + 4).ToArray <byte>())); processed += 4 + messageSize; } position = position + processed; } } } return(messages); }
public void ProducerSendsAndConsumerReceivesMultiRequest() { var prodConfig = this.SyncProducerConfig1; var consumerConfig = this.ConsumerConfig1; string testTopic1 = CurrentTestTopic + "1"; string testTopic2 = CurrentTestTopic + "2"; string testTopic3 = CurrentTestTopic + "3"; var sourceMessage1 = new Message(Encoding.UTF8.GetBytes("1: TestMessage")); var sourceMessage2 = new Message(Encoding.UTF8.GetBytes("2: TestMessage")); var sourceMessage3 = new Message(Encoding.UTF8.GetBytes("3: TestMessage")); var sourceMessage4 = new Message(Encoding.UTF8.GetBytes("4: TestMessage")); var requests = new List <ProducerRequest> { new ProducerRequest(testTopic1, 0, new List <Message> { sourceMessage1 }), new ProducerRequest(testTopic1, 0, new List <Message> { sourceMessage2 }), new ProducerRequest(testTopic2, 0, new List <Message> { sourceMessage3 }), new ProducerRequest(testTopic3, 0, new List <Message> { sourceMessage4 }) }; long currentOffset1 = TestHelper.GetCurrentKafkaOffset(testTopic1, consumerConfig); long currentOffset2 = TestHelper.GetCurrentKafkaOffset(testTopic2, consumerConfig); long currentOffset3 = TestHelper.GetCurrentKafkaOffset(testTopic3, consumerConfig); using (var producer = new SyncProducer(prodConfig)) { producer.MultiSend(requests); } IConsumer consumer = new Consumer(consumerConfig); var request = new MultiFetchRequest(new List <FetchRequest> { new FetchRequest(testTopic1, 0, currentOffset1), new FetchRequest(testTopic2, 0, currentOffset2), new FetchRequest(testTopic3, 0, currentOffset3) }); IList <BufferedMessageSet> messageSets; int totalWaitTimeInMiliseconds = 0; int waitSingle = 100; while (true) { Thread.Sleep(waitSingle); messageSets = consumer.MultiFetch(request); if (messageSets.Count > 2 && messageSets[0].Messages.Count() > 0 && messageSets[1].Messages.Count() > 0 && messageSets[2].Messages.Count() > 0) { break; } totalWaitTimeInMiliseconds += waitSingle; if (totalWaitTimeInMiliseconds >= MaxTestWaitTimeInMiliseconds) { break; } } Assert.AreEqual(3, messageSets.Count); Assert.AreEqual(2, messageSets[0].Messages.Count()); Assert.AreEqual(1, messageSets[1].Messages.Count()); Assert.AreEqual(1, messageSets[2].Messages.Count()); Assert.AreEqual(sourceMessage1.ToString(), messageSets[0].Messages.First().ToString()); Assert.AreEqual(sourceMessage2.ToString(), messageSets[0].Messages.Skip(1).First().ToString()); Assert.AreEqual(sourceMessage3.ToString(), messageSets[1].Messages.First().ToString()); Assert.AreEqual(sourceMessage4.ToString(), messageSets[2].Messages.First().ToString()); }
public void ThrowsExceptionWhenNullArgumentPassedToTheConstructor() { MultiFetchRequest multiRequest; Assert.Throws <ArgumentNullException>(() => multiRequest = new MultiFetchRequest(null)); }
/// <summary> /// Method to be used for starting a new thread /// </summary> internal void Run() { foreach (var partitionTopicInfo in partitionTopicInfos) { Logger.InfoFormat( CultureInfo.CurrentCulture, "{0} start fetching topic: {1} part: {2} offset: {3} from {4}:{5}", this.name, partitionTopicInfo.Topic, partitionTopicInfo.Partition.PartId, partitionTopicInfo.GetFetchOffset(), this.broker.Host, this.broker.Port); } try { while (!this.shouldStop) { var requestList = new List <FetchRequest>(); foreach (var partitionTopicInfo in this.partitionTopicInfos) { var singleRequest = new FetchRequest(partitionTopicInfo.Topic, partitionTopicInfo.Partition.PartId, partitionTopicInfo.GetFetchOffset(), this.config.MaxFetchSize); requestList.Add(singleRequest); } Logger.Debug("Fetch request: " + string.Join(", ", requestList.Select(x => x.ToString()))); var request = new MultiFetchRequest(requestList); var response = this.simpleConsumer.MultiFetch(request); int read = 0; var items = this.partitionTopicInfos.Zip( response, (x, y) => new Tuple <PartitionTopicInfo, BufferedMessageSet>(x, y)); foreach (Tuple <PartitionTopicInfo, BufferedMessageSet> item in items) { BufferedMessageSet messages = item.Item2; PartitionTopicInfo info = item.Item1; try { bool done = false; if (messages.ErrorCode == ErrorMapping.OffsetOutOfRangeCode) { Logger.InfoFormat(CultureInfo.CurrentCulture, "offset {0} out of range", info.GetFetchOffset()); //// see if we can fix this error var resetOffset = this.ResetConsumerOffsets(info.Topic, info.Partition); if (resetOffset >= 0) { info.ResetFetchOffset(resetOffset); info.ResetConsumeOffset(resetOffset); done = true; } } if (!done) { read += info.Add(messages, info.GetFetchOffset()); } } catch (Exception ex) { if (!shouldStop) { Logger.ErrorFormat(CultureInfo.CurrentCulture, "error in FetcherRunnable for {0}" + info, ex); } throw; } } Logger.Info("Fetched bytes: " + read); if (read == 0) { Logger.DebugFormat(CultureInfo.CurrentCulture, "backing off {0} ms", this.config.BackOffIncrement); Thread.Sleep(this.config.BackOffIncrement); } } } catch (Exception ex) { if (shouldStop) { Logger.InfoFormat(CultureInfo.CurrentCulture, "FetcherRunnable {0} interrupted", this); } else { Logger.ErrorFormat(CultureInfo.CurrentCulture, "error in FetcherRunnable {0}", ex); } } Logger.InfoFormat(CultureInfo.CurrentCulture, "stopping fetcher {0} to host {1}", this.name, this.broker.Host); }