public void GetBytesValidFormat() { List<FetchRequest> requests = new List<FetchRequest> { new FetchRequest("topic a", 0, 0), new FetchRequest("topic a", 0, 0), new FetchRequest("topic b", 0, 0), new FetchRequest("topic c", 0, 0) }; MultiFetchRequest request = new MultiFetchRequest(requests); // format = len(request) + requesttype + requestcount + requestpackage // total byte count = 4 + (2 + 2 + 100) MemoryStream ms = new MemoryStream(); request.WriteTo(ms); byte[] bytes = ms.ToArray(); Assert.IsNotNull(bytes); Assert.AreEqual(108, bytes.Length); // first 4 bytes = the length of the request Assert.AreEqual(104, BitConverter.ToInt32(BitWorks.ReverseBytes(bytes.Take(4).ToArray<byte>()), 0)); // next 2 bytes = the RequestType which in this case should be Produce Assert.AreEqual((short)RequestTypes.MultiFetch, BitConverter.ToInt16(BitWorks.ReverseBytes(bytes.Skip(4).Take(2).ToArray<byte>()), 0)); // next 2 bytes = the number of messages Assert.AreEqual((short)4, BitConverter.ToInt16(BitWorks.ReverseBytes(bytes.Skip(6).Take(2).ToArray<byte>()), 0)); }
private static void MultiFetch(KafkaConnection conn, MultiFetchRequest request, IList<BufferedMessageSet> result) { result.Clear(); conn.Write(request); int dataLength = BitConverter.ToInt32(BitWorks.ReverseBytes(conn.Read(4)), 0); if (dataLength <= 0) { return; } byte[] data = conn.Read(dataLength); int errorCode = BitConverter.ToInt16(BitWorks.ReverseBytes(data.Take(2).ToArray()), 0); if (errorCode != KafkaException.NoError) { throw new KafkaException(errorCode); } // skip the error code byte[] unbufferedData = data.Skip(2).ToArray(); for (int i = 0; i < request.ConsumerRequests.Count; i++) { int partLength = BitConverter.ToInt32(BitWorks.ReverseBytes(unbufferedData.Take(4).ToArray()), 0); errorCode = BitConverter.ToInt16(BitWorks.ReverseBytes(unbufferedData.Skip(4).Take(2).ToArray()), 0); if (errorCode != KafkaException.NoError) { throw new KafkaException(errorCode); } result.Add(BufferedMessageSet.ParseFrom(unbufferedData.Skip(6).Take(partLength - 2).ToArray())); unbufferedData = unbufferedData.Skip(partLength + 4).ToArray(); } }
/// <summary> /// Combine multiple fetch requests in one call. /// </summary> /// <param name="request"> /// The list of fetch requests. /// </param> /// <returns> /// A list of sets of fetched messages. /// </returns> /// <remarks> /// Offset is passed in on every request, allowing the user to maintain this metadata /// however they choose. /// </remarks> public IList<BufferedMessageSet> MultiFetch(MultiFetchRequest request) { var result = new List<BufferedMessageSet>(); using (var conn = new KafkaConnection(this.Host, this.Port)) { short tryCounter = 1; bool success = false; while (!success && tryCounter <= this.config.NumberOfTries) { try { MultiFetch(conn, request, result); success = true; } catch (Exception ex) { // if maximum number of tries reached if (tryCounter == this.config.NumberOfTries) { throw; } tryCounter++; Logger.InfoFormat(CultureInfo.CurrentCulture, "MultiFetch reconnect due to {0}", ex); } } } return result; }
/// <summary> /// Writes a multifetch request to the server. /// </summary> /// <remarks> /// Write timeout is defaulted to infitite. /// </remarks> /// <param name="request">The <see cref="MultiFetchRequest"/> to send to the server.</param> public void Write(MultiFetchRequest request) { this.EnsuresNotDisposed(); Guard.NotNull(request, "request"); this.Write(request.RequestBuffer.GetBuffer()); }
/// <summary> /// Method to be used for starting a new thread /// </summary> internal void Run() { foreach (var partitionTopicInfo in partitionTopicInfos) { Logger.InfoFormat( CultureInfo.CurrentCulture, "{0} start fetching topic: {1} part: {2} offset: {3} from {4}:{5}", this.name, partitionTopicInfo.Topic, partitionTopicInfo.Partition.PartId, partitionTopicInfo.GetFetchOffset(), this.broker.Host, this.broker.Port); } try { while (!this.shouldStop) { var requestList = new List<FetchRequest>(); foreach (var partitionTopicInfo in this.partitionTopicInfos) { var singleRequest = new FetchRequest(partitionTopicInfo.Topic, partitionTopicInfo.Partition.PartId, partitionTopicInfo.GetFetchOffset(), this.config.MaxFetchSize); requestList.Add(singleRequest); } Logger.Debug("Fetch request: " + string.Join(", ", requestList.Select(x => x.ToString()))); var request = new MultiFetchRequest(requestList); var response = this.simpleConsumer.MultiFetch(request); int read = 0; var items = this.partitionTopicInfos.Zip( response, (x, y) => new Tuple<PartitionTopicInfo, BufferedMessageSet>(x, y)); foreach (Tuple<PartitionTopicInfo, BufferedMessageSet> item in items) { BufferedMessageSet messages = item.Item2; PartitionTopicInfo info = item.Item1; try { bool done = false; if (messages.ErrorCode == ErrorMapping.OffsetOutOfRangeCode) { Logger.InfoFormat(CultureInfo.CurrentCulture, "offset {0} out of range", info.GetFetchOffset()); //// see if we can fix this error var resetOffset = this.ResetConsumerOffsets(info.Topic, info.Partition); if (resetOffset >= 0) { info.ResetFetchOffset(resetOffset); info.ResetConsumeOffset(resetOffset); done = true; } } if (!done) { read += info.Add(messages, info.GetFetchOffset()); } } catch (Exception ex) { if (!shouldStop) { Logger.ErrorFormat(CultureInfo.CurrentCulture, "error in FetcherRunnable for {0}" + info, ex); } throw; } } Logger.Info("Fetched bytes: " + read); if (read == 0) { Logger.DebugFormat(CultureInfo.CurrentCulture, "backing off {0} ms", this.config.BackOffIncrement); Thread.Sleep(this.config.BackOffIncrement); } } } catch (Exception ex) { if (shouldStop) { Logger.InfoFormat(CultureInfo.CurrentCulture, "FetcherRunnable {0} interrupted", this); } else { Logger.ErrorFormat(CultureInfo.CurrentCulture, "error in FetcherRunnable {0}", ex); } } Logger.InfoFormat(CultureInfo.CurrentCulture, "stopping fetcher {0} to host {1}", this.name, this.broker.Host); }
public void ProducerSendsAndConsumerReceivesMultiRequest() { string testTopic1 = CurrentTestTopic + "1"; string testTopic2 = CurrentTestTopic + "2"; string testTopic3 = CurrentTestTopic + "3"; Message sourceMessage1 = new Message(Encoding.UTF8.GetBytes("1: TestMessage")); Message sourceMessage2 = new Message(Encoding.UTF8.GetBytes("2: TestMessage")); Message sourceMessage3 = new Message(Encoding.UTF8.GetBytes("3: TestMessage")); Message sourceMessage4 = new Message(Encoding.UTF8.GetBytes("4: TestMessage")); List<ProducerRequest> requests = new List<ProducerRequest> { new ProducerRequest(testTopic1, 0, new List<Message> { sourceMessage1 }), new ProducerRequest(testTopic1, 0, new List<Message> { sourceMessage2 }), new ProducerRequest(testTopic2, 0, new List<Message> { sourceMessage3 }), new ProducerRequest(testTopic3, 0, new List<Message> { sourceMessage4 }) }; var config = new SyncProducerConfig(clientConfig); var producer = new SyncProducer(config); long currentOffset1 = TestHelper.GetCurrentKafkaOffset(testTopic1, clientConfig); long currentOffset2 = TestHelper.GetCurrentKafkaOffset(testTopic2, clientConfig); long currentOffset3 = TestHelper.GetCurrentKafkaOffset(testTopic3, clientConfig); producer.MultiSend(requests); ConsumerConfig consumerConfig = new ConsumerConfig(clientConfig); IConsumer consumer = new Consumers.Consumer(consumerConfig); MultiFetchRequest request = new MultiFetchRequest(new List<FetchRequest> { new FetchRequest(testTopic1, 0, currentOffset1), new FetchRequest(testTopic2, 0, currentOffset2), new FetchRequest(testTopic3, 0, currentOffset3) }); IList<BufferedMessageSet> messageSets; int totalWaitTimeInMiliseconds = 0; int waitSingle = 100; while (true) { Thread.Sleep(waitSingle); messageSets = consumer.MultiFetch(request); if (messageSets.Count > 2 && messageSets[0].Messages.Count() > 0 && messageSets[1].Messages.Count() > 0 && messageSets[2].Messages.Count() > 0) { break; } else { totalWaitTimeInMiliseconds += waitSingle; if (totalWaitTimeInMiliseconds >= MaxTestWaitTimeInMiliseconds) { break; } } } Assert.AreEqual(3, messageSets.Count); Assert.AreEqual(2, messageSets[0].Messages.Count()); Assert.AreEqual(1, messageSets[1].Messages.Count()); Assert.AreEqual(1, messageSets[2].Messages.Count()); Assert.AreEqual(sourceMessage1.ToString(), messageSets[0].Messages.First().ToString()); Assert.AreEqual(sourceMessage2.ToString(), messageSets[0].Messages.Skip(1).First().ToString()); Assert.AreEqual(sourceMessage3.ToString(), messageSets[1].Messages.First().ToString()); Assert.AreEqual(sourceMessage4.ToString(), messageSets[2].Messages.First().ToString()); }
public void ConsumerMultiFetchGetsMessage() { ProducerSendMultiRequest(); ConsumerConfig config = new ConsumerConfig(clientConfig); IConsumer cons = new Consumers.Consumer(config); MultiFetchRequest request = new MultiFetchRequest(new List<FetchRequest> { new FetchRequest(CurrentTestTopic, 0, 0), new FetchRequest(CurrentTestTopic, 0, 0), new FetchRequest(CurrentTestTopic + "2", 0, 0) }); IList<BufferedMessageSet> response = cons.MultiFetch(request); for (int ix = 0; ix < response.Count; ix++) { IEnumerable<Message> messageSet = response[ix].Messages; Console.WriteLine(string.Format("Request #{0}-->", ix)); foreach (Message msg in messageSet) { Console.WriteLine(msg); } } }
public void ThrowsExceptionWhenNullArgumentPassedToTheConstructor() { MultiFetchRequest multiRequest; Assert.Throws<ArgumentNullException>(() => multiRequest = new MultiFetchRequest(null)); }
public void ConsumerMultiFetchGetsMessage() { var config = this.ConsumerConfig1; ProducerSendMultiRequest(); Thread.Sleep(2000); IConsumer cons = new Consumer(config); var request = new MultiFetchRequest(new List<FetchRequest> { new FetchRequest(CurrentTestTopic, 0, 0), new FetchRequest(CurrentTestTopic, 0, 0), new FetchRequest(CurrentTestTopic, 0, 0) }); IList<BufferedMessageSet> response = cons.MultiFetch(request); Assert.AreEqual(3, response.Count); for (int ix = 0; ix < response.Count; ix++) { IEnumerable<Message> messageSet = response[ix].Messages; Assert.AreEqual(4, messageSet.Count()); Console.WriteLine(string.Format("Request #{0}-->", ix)); foreach (Message msg in messageSet) { Console.WriteLine(msg.ToString()); } } }
/// <summary> /// Combine multiple fetch requests in one call. /// </summary> /// <param name="request"> /// The list of fetch requests. /// </param> /// <returns> /// A list of sets of fetched messages. /// </returns> /// <remarks> /// Offset is passed in on every request, allowing the user to maintain this metadata /// however they choose. /// </remarks> public IList<BufferedMessageSet> MultiFetch(MultiFetchRequest request) { var result = new List<BufferedMessageSet>(); short tryCounter = 1; while (tryCounter <= this.config.NumberOfTries) { try { using (var conn = new KafkaConnection( this.host, this.port, this.config.BufferSize, this.config.SocketTimeout)) { conn.Write(request); int size = conn.Reader.ReadInt32(); return BufferedMessageSet.ParseMultiFrom(conn.Reader, size, request.ConsumerRequests.Count); } } catch (Exception ex) { // if maximum number of tries reached if (tryCounter == this.config.NumberOfTries) { throw; } tryCounter++; Logger.InfoFormat(CultureInfo.CurrentCulture, "MultiFetch reconnect due to {0}", ex); } } return result; }
/// <summary> /// Writes a multifetch request to the server. /// </summary> /// <remarks> /// Write timeout is defaulted to infitite. /// </remarks> /// <param name="request">The <see cref="MultiFetchRequest"/> to send to the server.</param> public void Write(MultiFetchRequest request) { this.EnsuresNotDisposed(); Guard.Assert<ArgumentNullException>(() => request != null); this.Write(request.RequestBuffer.GetBuffer(), Timeout.Infinite); }