/// <summary> /// Combine multiple fetch requests in one call. /// </summary> /// <param name="request"> /// The list of fetch requests. /// </param> /// <returns> /// A list of sets of fetched messages. /// </returns> /// <remarks> /// Offset is passed in on every request, allowing the user to maintain this metadata /// however they choose. /// </remarks> public IList <BufferedMessageSet> MultiFetch(MultiFetchRequest request) { var result = new List <BufferedMessageSet>(); short tryCounter = 1; while (tryCounter <= this.config.NumberOfTries) { try { using (var conn = new KafkaConnection( this.host, this.port, this.config.BufferSize, this.config.SocketTimeout)) { conn.Write(request); int size = conn.Reader.ReadInt32(); return(BufferedMessageSet.ParseMultiFrom(conn.Reader, size, request.ConsumerRequests.Count, request.ConsumerRequests.Select(x => x.Offset).ToList())); } } catch (Exception ex) { // if maximum number of tries reached if (tryCounter == this.config.NumberOfTries) { throw; } tryCounter++; Logger.InfoFormat(CultureInfo.CurrentCulture, "MultiFetch reconnect due to {0}", ex); } } return(result); }
/// <summary> /// Fetch a set of messages from a topic. /// </summary> /// <param name="request"> /// Specifies the topic name, topic partition, starting byte offset, maximum bytes to be fetched. /// </param> /// <returns> /// A set of fetched messages. /// </returns> /// <remarks> /// Offset is passed in on every request, allowing the user to maintain this metadata /// however they choose. /// </remarks> public BufferedMessageSet Fetch(FetchRequest request) { short tryCounter = 1; while (tryCounter <= this.config.NumberOfTries) { try { using (var conn = new KafkaConnection( this.host, this.port, this.config.BufferSize, this.config.SocketTimeout)) { conn.Write(request); int size = conn.Reader.ReadInt32(); return(BufferedMessageSet.ParseFrom(conn.Reader, size, request.Offset)); } } catch (Exception ex) { //// if maximum number of tries reached if (tryCounter == this.config.NumberOfTries) { throw; } tryCounter++; Logger.InfoFormat(CultureInfo.CurrentCulture, "Fetch reconnect due to {0}", ex); } } return(null); }
/// <summary> /// Gets a list of valid offsets (up to maxSize) before the given time. /// </summary> /// <param name="request"> /// The offset request. /// </param> /// <returns> /// The list of offsets, in descending order. /// </returns> public IList <long> GetOffsetsBefore(OffsetRequest request) { var result = new List <long>(); short tryCounter = 1; while (tryCounter <= this.config.NumberOfTries) { try { using (var conn = new KafkaConnection( this.host, this.port, this.config.BufferSize, this.config.SocketTimeout)) { conn.Write(request); int size = conn.Reader.ReadInt32(); if (size == 0) { return(result); } short errorCode = conn.Reader.ReadInt16(); if (errorCode != ErrorMapping.NoError) { throw new KafkaException(errorCode); } int count = conn.Reader.ReadInt32(); for (int i = 0; i < count; i++) { result.Add(conn.Reader.ReadInt64()); } return(result); } } catch (Exception ex) { //// if maximum number of tries reached if (tryCounter == this.config.NumberOfTries) { throw; } tryCounter++; Logger.InfoFormat(CultureInfo.CurrentCulture, "GetOffsetsBefore reconnect due to {0}", ex); } } return(result); }
/// <summary> /// Gets a list of valid offsets (up to maxSize) before the given time. /// </summary> /// <param name="request"> /// The offset request. /// </param> /// <returns> /// The list of offsets, in descending order. /// </returns> public IList <long> GetOffsetsBefore(OffsetRequest request) { KafkaConnection conn = null; int size = 0; var result = new List <long>(); short tryCounter = 1; while (tryCounter <= this.config.NumberOfTries) { try { conn = KafkaClusterConnectionPool.GetConnection(this.host, this.port); conn.Write(request); size = conn.Reader.ReadInt32(); if (size > 0) { short errorCode = conn.Reader.ReadInt16(); if (errorCode != ErrorMapping.NoError) { throw new KafkaException(errorCode); } int count = conn.Reader.ReadInt32(); for (int i = 0; i < count; i++) { result.Add(conn.Reader.ReadInt64()); } } break; } catch (Exception ex) { //// if maximum number of tries reached if (tryCounter == this.config.NumberOfTries) { throw; } tryCounter++; Logger.InfoFormat(CultureInfo.CurrentCulture, "GetOffsetsBefore reconnect due to {0}", ex); } finally { KafkaClusterConnectionPool.ReleaseConnection(conn); } } return(result); }
/// <summary> /// Combine multiple fetch requests in one call. /// </summary> /// <param name="request"> /// The list of fetch requests. /// </param> /// <returns> /// A list of sets of fetched messages. /// </returns> /// <remarks> /// Offset is passed in on every request, allowing the user to maintain this metadata /// however they choose. /// </remarks> public IList <BufferedMessageSet> MultiFetch(MultiFetchRequest request) { KafkaConnection conn = null; int size = 0; var result = new List <BufferedMessageSet>(); short tryCounter = 1; while (tryCounter <= this.config.NumberOfTries) { try { conn = KafkaClusterConnectionPool.GetConnection(this.host, this.port); conn.Write(request); size = conn.Reader.ReadInt32(); result = (BufferedMessageSet.ParseMultiFrom(conn.Reader, size, request.ConsumerRequests.Count, request.ConsumerRequests.Select(x => x.Offset).ToList())) as List <BufferedMessageSet>; break; } catch (Exception ex) { // if maximum number of tries reached if (tryCounter == this.config.NumberOfTries) { throw; } tryCounter++; Logger.InfoFormat(CultureInfo.CurrentCulture, "MultiFetch reconnect due to {0}", ex); } finally { KafkaClusterConnectionPool.ReleaseConnection(conn); } } return(result); }
/// <summary> /// Fetch a set of messages from a topic. /// </summary> /// <param name="request"> /// Specifies the topic name, topic partition, starting byte offset, maximum bytes to be fetched. /// </param> /// <returns> /// A set of fetched messages. /// </returns> /// <remarks> /// Offset is passed in on every request, allowing the user to maintain this metadata /// however they choose. /// </remarks> public BufferedMessageSet Fetch(FetchRequest request) { short tryCounter = 1; KafkaConnection conn = null; int size = 0; BufferedMessageSet messageSet = null; while (tryCounter <= this.config.NumberOfTries) { try { conn = KafkaClusterConnectionPool.GetConnection(this.host, this.port); conn.Write(request); size = conn.Reader.ReadInt32(); messageSet = BufferedMessageSet.ParseFrom(conn.Reader, size, request.Offset); break; } catch (Exception ex) { //// if maximum number of tries reached if (tryCounter == this.config.NumberOfTries) { throw; } tryCounter++; Logger.InfoFormat(CultureInfo.CurrentCulture, "Fetch reconnect due to {0}", ex); } finally { KafkaClusterConnectionPool.ReleaseConnection(conn); } } return(messageSet); }
private static void MultiFetch(KafkaConnection conn, MultiFetchRequest request, IList<BufferedMessageSet> result) { result.Clear(); conn.Write(request); int dataLength = BitConverter.ToInt32(BitWorks.ReverseBytes(conn.Read(4)), 0); if (dataLength <= 0) { return; } byte[] data = conn.Read(dataLength); int errorCode = BitConverter.ToInt16(BitWorks.ReverseBytes(data.Take(2).ToArray()), 0); if (errorCode != KafkaException.NoError) { throw new KafkaException(errorCode); } // skip the error code byte[] unbufferedData = data.Skip(2).ToArray(); for (int i = 0; i < request.ConsumerRequests.Count; i++) { int partLength = BitConverter.ToInt32(BitWorks.ReverseBytes(unbufferedData.Take(4).ToArray()), 0); errorCode = BitConverter.ToInt16(BitWorks.ReverseBytes(unbufferedData.Skip(4).Take(2).ToArray()), 0); if (errorCode != KafkaException.NoError) { throw new KafkaException(errorCode); } result.Add(BufferedMessageSet.ParseFrom(unbufferedData.Skip(6).Take(partLength - 2).ToArray())); unbufferedData = unbufferedData.Skip(partLength + 4).ToArray(); } }
private static void GetOffsetsBefore(KafkaConnection conn, OffsetRequest request, IList<long> offsets) { offsets.Clear(); // to make sure the list is clean after some previous attampts to get data conn.Write(request); int dataLength = BitConverter.ToInt32(BitWorks.ReverseBytes(conn.Read(4)), 0); if (dataLength > 0) { byte[] data = conn.Read(dataLength); int errorCode = BitConverter.ToInt16(BitWorks.ReverseBytes(data.Take(2).ToArray()), 0); if (errorCode != KafkaException.NoError) { throw new KafkaException(errorCode); } // skip the error code and process the rest byte[] unbufferedData = data.Skip(2).ToArray(); // first four bytes are the number of offsets int numOfOffsets = BitConverter.ToInt32(BitWorks.ReverseBytes(unbufferedData.Take(4).ToArray()), 0); for (int ix = 0; ix < numOfOffsets; ix++) { int position = (ix * 8) + 4; offsets.Add( BitConverter.ToInt64( BitWorks.ReverseBytes(unbufferedData.Skip(position).Take(8).ToArray()), 0)); } } }
private static BufferedMessageSet Fetch(KafkaConnection conn, FetchRequest request) { conn.Write(request); int dataLength = BitConverter.ToInt32(BitWorks.ReverseBytes(conn.Read(4)), 0); if (dataLength > 0) { byte[] data = conn.Read(dataLength); int errorCode = BitConverter.ToInt16(BitWorks.ReverseBytes(data.Take(2).ToArray()), 0); if (errorCode != KafkaException.NoError) { throw new KafkaException(errorCode); } // skip the error code byte[] unbufferedData = data.Skip(2).ToArray(); return BufferedMessageSet.ParseFrom(unbufferedData); } return null; }
/// <summary> /// Combine multiple fetch requests in one call. /// </summary> /// <param name="request"> /// The list of fetch requests. /// </param> /// <returns> /// A list of sets of fetched messages. /// </returns> /// <remarks> /// Offset is passed in on every request, allowing the user to maintain this metadata /// however they choose. /// </remarks> public IList<BufferedMessageSet> MultiFetch(MultiFetchRequest request) { var result = new List<BufferedMessageSet>(); short tryCounter = 1; while (tryCounter <= this.config.NumberOfTries) { try { using (var conn = new KafkaConnection( this.host, this.port, this.config.BufferSize, this.config.SocketTimeout)) { conn.Write(request); int size = conn.Reader.ReadInt32(); return BufferedMessageSet.ParseMultiFrom(conn.Reader, size, request.ConsumerRequests.Count); } } catch (Exception ex) { // if maximum number of tries reached if (tryCounter == this.config.NumberOfTries) { throw; } tryCounter++; Logger.InfoFormat(CultureInfo.CurrentCulture, "MultiFetch reconnect due to {0}", ex); } } return result; }
/// <summary> /// Gets a list of valid offsets (up to maxSize) before the given time. /// </summary> /// <param name="request"> /// The offset request. /// </param> /// <returns> /// The list of offsets, in descending order. /// </returns> public IList<long> GetOffsetsBefore(OffsetRequest request) { var result = new List<long>(); short tryCounter = 1; while (tryCounter <= this.config.NumberOfTries) { try { using (var conn = new KafkaConnection( this.host, this.port, this.config.BufferSize, this.config.SocketTimeout)) { conn.Write(request); int size = conn.Reader.ReadInt32(); if (size == 0) { return result; } short errorCode = conn.Reader.ReadInt16(); if (errorCode != KafkaException.NoError) { throw new KafkaException(errorCode); } int count = conn.Reader.ReadInt32(); for (int i = 0; i < count; i++) { result.Add(conn.Reader.ReadInt64()); } return result; } } catch (Exception ex) { //// if maximum number of tries reached if (tryCounter == this.config.NumberOfTries) { throw; } tryCounter++; Logger.InfoFormat(CultureInfo.CurrentCulture, "GetOffsetsBefore reconnect due to {0}", ex); } } return result; }
/// <summary> /// Sends the data to a multiple topics on Kafka server synchronously /// </summary> /// <param name="requests"> /// The requests. /// </param> public void MultiSend(IEnumerable<ProducerRequest> requests) { Guard.Assert<ArgumentNullException>(() => requests != null); Guard.Assert<ArgumentNullException>( () => requests.All( x => x != null && x.MessageSet != null && x.MessageSet.Messages != null)); Guard.Assert<ArgumentNullException>( () => requests.All( x => x.MessageSet.Messages.All( y => y != null && y.PayloadSize <= this.Config.MaxMessageSize))); var multiRequest = new MultiProducerRequest(requests); using (var conn = new KafkaConnection(this.config.Host, this.config.Port)) { conn.Write(multiRequest); } }
/// <summary> /// Sends request to Kafka server synchronously /// </summary> /// <param name="request"> /// The request. /// </param> public void Send(ProducerRequest request) { Guard.Assert<ArgumentNullException>(() => request != null); using (var conn = new KafkaConnection(this.config.Host, this.config.Port)) { conn.Write(request); } }