/// <summary> /// Gets a list of valid offsets (up to maxSize) before the given time. /// </summary> /// <param name="request"> /// The offset request. /// </param> /// <returns> /// The list of offsets, in descending order. /// </returns> public IList <long> GetOffsetsBefore(OffsetRequest request) { KafkaConnection conn = null; int size = 0; var result = new List <long>(); short tryCounter = 1; while (tryCounter <= this.config.NumberOfTries) { try { conn = KafkaClusterConnectionPool.GetConnection(this.host, this.port); conn.Write(request); size = conn.Reader.ReadInt32(); if (size > 0) { short errorCode = conn.Reader.ReadInt16(); if (errorCode != ErrorMapping.NoError) { throw new KafkaException(errorCode); } int count = conn.Reader.ReadInt32(); for (int i = 0; i < count; i++) { result.Add(conn.Reader.ReadInt64()); } } break; } catch (Exception ex) { //// if maximum number of tries reached if (tryCounter == this.config.NumberOfTries) { throw; } tryCounter++; Logger.InfoFormat(CultureInfo.CurrentCulture, "GetOffsetsBefore reconnect due to {0}", ex); } finally { KafkaClusterConnectionPool.ReleaseConnection(conn); } } return(result); }
/// <summary> /// Combine multiple fetch requests in one call. /// </summary> /// <param name="request"> /// The list of fetch requests. /// </param> /// <returns> /// A list of sets of fetched messages. /// </returns> /// <remarks> /// Offset is passed in on every request, allowing the user to maintain this metadata /// however they choose. /// </remarks> public IList <BufferedMessageSet> MultiFetch(MultiFetchRequest request) { KafkaConnection conn = null; int size = 0; var result = new List <BufferedMessageSet>(); short tryCounter = 1; while (tryCounter <= this.config.NumberOfTries) { try { conn = KafkaClusterConnectionPool.GetConnection(this.host, this.port); conn.Write(request); size = conn.Reader.ReadInt32(); result = (BufferedMessageSet.ParseMultiFrom(conn.Reader, size, request.ConsumerRequests.Count, request.ConsumerRequests.Select(x => x.Offset).ToList())) as List <BufferedMessageSet>; break; } catch (Exception ex) { // if maximum number of tries reached if (tryCounter == this.config.NumberOfTries) { throw; } tryCounter++; Logger.InfoFormat(CultureInfo.CurrentCulture, "MultiFetch reconnect due to {0}", ex); } finally { KafkaClusterConnectionPool.ReleaseConnection(conn); } } return(result); }
public void TestCorrectConnectionRetrieval() { var consumerConfig1 = this.ConsumerConfig1; var consumerConfig2 = this.ConsumerConfig2; var consumerConfig3 = this.ConsumerConfig3; //popluate pool KafkaClusterConnectionPool.ReleaseConnection(KafkaClusterConnectionPool.GetConnection(consumerConfig1.Broker.Host, consumerConfig1.Broker.Port)); KafkaClusterConnectionPool.ReleaseConnection(KafkaClusterConnectionPool.GetConnection(consumerConfig2.Broker.Host, consumerConfig2.Broker.Port)); KafkaClusterConnectionPool.ReleaseConnection(KafkaClusterConnectionPool.GetConnection(consumerConfig3.Broker.Host, consumerConfig3.Broker.Port)); //Retrieve connections var pooledConnection1 = KafkaClusterConnectionPool.GetConnection(consumerConfig1.Broker.Host, consumerConfig1.Broker.Port); var pooledConnection2 = KafkaClusterConnectionPool.GetConnection(consumerConfig2.Broker.Host, consumerConfig2.Broker.Port); var pooledConnection3 = KafkaClusterConnectionPool.GetConnection(consumerConfig3.Broker.Host, consumerConfig3.Broker.Port); Assert.AreEqual(consumerConfig1.Broker.Host, pooledConnection1.Server); Assert.AreEqual(consumerConfig1.Broker.Port, pooledConnection1.Port); Assert.AreEqual(consumerConfig2.Broker.Host, pooledConnection2.Server); Assert.AreEqual(consumerConfig2.Broker.Port, pooledConnection2.Port); Assert.AreEqual(consumerConfig3.Broker.Host, pooledConnection3.Server); Assert.AreEqual(consumerConfig3.Broker.Port, pooledConnection3.Port); }
public void TestPoolOverload() { var consumerConfig1 = this.ConsumerConfig1; var connectionCount = CONNECTION_POOL_SIZE + 1; for (int i = 0; i < connectionCount; i++) { connections.Add(KafkaClusterConnectionPool.GetConnection(consumerConfig1.Broker.Host, consumerConfig1.Broker.Port)); } try { for (int i = 0; i < connectionCount; i++) { KafkaClusterConnectionPool.ReleaseConnection(connections[i]); } } catch (KafkaConnectionPoolException ex) { //This is the expected result } }
/// <summary> /// Fetch a set of messages from a topic. /// </summary> /// <param name="request"> /// Specifies the topic name, topic partition, starting byte offset, maximum bytes to be fetched. /// </param> /// <returns> /// A set of fetched messages. /// </returns> /// <remarks> /// Offset is passed in on every request, allowing the user to maintain this metadata /// however they choose. /// </remarks> public BufferedMessageSet Fetch(FetchRequest request) { short tryCounter = 1; KafkaConnection conn = null; int size = 0; BufferedMessageSet messageSet = null; while (tryCounter <= this.config.NumberOfTries) { try { conn = KafkaClusterConnectionPool.GetConnection(this.host, this.port); conn.Write(request); size = conn.Reader.ReadInt32(); messageSet = BufferedMessageSet.ParseFrom(conn.Reader, size, request.Offset); break; } catch (Exception ex) { //// if maximum number of tries reached if (tryCounter == this.config.NumberOfTries) { throw; } tryCounter++; Logger.InfoFormat(CultureInfo.CurrentCulture, "Fetch reconnect due to {0}", ex); } finally { KafkaClusterConnectionPool.ReleaseConnection(conn); } } return(messageSet); }