/// <summary> /// Consumes messages from Kafka. /// </summary> /// <param name="request">The request to send to Kafka.</param> /// <returns>A list of messages from Kafka.</returns> public List <Message> Consume(ConsumerRequest request) { List <Message> messages = new List <Message>(); using (KafkaConnection connection = new KafkaConnection(Server, Port)) { connection.Write(request.GetBytes()); int dataLength = BitConverter.ToInt32(BitWorks.ReverseBytes(connection.Read(4)), 0); if (dataLength > 0) { byte[] data = connection.Read(dataLength); // TODO: need to check in on kafka error codes...assume all's good for now byte[] unbufferedData = data.Skip(2).ToArray(); int processed = 0; int length = unbufferedData.Length - 4; int messageSize = 0; while (processed <= length) { messageSize = BitConverter.ToInt32(BitWorks.ReverseBytes(unbufferedData.Skip(processed).Take(4).ToArray <byte>()), 0); messages.Add(Message.ParseFrom(unbufferedData.Skip(processed).Take(messageSize + 4).ToArray <byte>())); processed += 4 + messageSize; } } } return(messages); }
/// <summary> /// Get a list of valid offsets (up to maxSize) before the given time. /// </summary> /// <param name="request">The offset request.</param> /// <returns>List of offsets, in descending order.</returns> public IList <long> GetOffsetsBefore(OffsetRequest request) { List <long> offsets = new List <long>(); using (KafkaConnection connection = new KafkaConnection(Server, Port)) { connection.Write(request.GetBytes()); int dataLength = BitConverter.ToInt32(BitWorks.ReverseBytes(connection.Read(4)), 0); if (dataLength > 0) { byte[] data = connection.Read(dataLength); // TODO: need to check in on kafka error codes...assume all's good for now byte[] unbufferedData = data.Skip(2).ToArray(); // first four bytes are the number of offsets int numOfOffsets = BitConverter.ToInt32(BitWorks.ReverseBytes(unbufferedData.Take(4).ToArray <byte>()), 0); int position = 0; for (int ix = 0; ix < numOfOffsets; ix++) { position = (ix * 8) + 4; offsets.Add(BitConverter.ToInt64(BitWorks.ReverseBytes(unbufferedData.Skip(position).Take(8).ToArray <byte>()), 0)); } } } return(offsets); }
/// <summary> /// Consumes messages from Kafka. /// </summary> /// <param name="request">The request to send to Kafka.</param> /// <returns>A list of messages from Kafka.</returns> public List<Message> Consume(FetchRequest request) { List<Message> messages = new List<Message>(); using (KafkaConnection connection = new KafkaConnection(Server, Port)) { connection.Write(request.GetBytes()); int dataLength = BitConverter.ToInt32(BitWorks.ReverseBytes(connection.Read(4)), 0); if (dataLength > 0) { byte[] data = connection.Read(dataLength); int errorCode = BitConverter.ToInt16(BitWorks.ReverseBytes(data.Take(2).ToArray<byte>()), 0); if (errorCode != KafkaException.NoError) { throw new KafkaException(errorCode); } // skip the error code and process the rest byte[] unbufferedData = data.Skip(2).ToArray(); int processed = 0; int length = unbufferedData.Length - 4; int messageSize = 0; while (processed <= length) { messageSize = BitConverter.ToInt32(BitWorks.ReverseBytes(unbufferedData.Skip(processed).Take(4).ToArray<byte>()), 0); messages.Add(Message.ParseFrom(unbufferedData.Skip(processed).Take(messageSize + 4).ToArray<byte>())); processed += 4 + messageSize; } } } return messages; }
public FetchResponse Fetch(FetchRequest request) { using (KafkaConnection connection = new KafkaConnection(server, port)) { connection.Write(request.GetRequestBytes().ToArray()); int dataLength = BitConverter.ToInt32(BitWorks.ReverseBytes(connection.Read(4)), 0); if (dataLength > 0) { byte[] data = connection.Read(dataLength); var fetchResponse = new FetchResponse(data); return fetchResponse; } return null; } }
public FetchResponse Fetch(FetchRequest request) { using (KafkaConnection connection = new KafkaConnection(server, port)) { connection.Write(request.GetRequestBytes().ToArray()); int dataLength = BitConverter.ToInt32(BitWorks.ReverseBytes(connection.Read(4)), 0); if (dataLength > 0) { byte[] data = connection.Read(dataLength); var fetchResponse = new FetchResponse(data); return(fetchResponse); } return(null); } }
/// <summary> /// Get a list of valid offsets (up to maxSize) before the given time. /// </summary> /// <param name="request">The offset request.</param> /// <returns>List of offsets, in descending order.</returns> public OffsetResponse GetOffsetResponseBefore(OffsetRequest request) { using (var connection = new KafkaConnection(server, port)) { connection.Write(request.GetRequestBytes().ToArray()); int dataLength = BitConverter.ToInt32(BitWorks.ReverseBytes(connection.Read(4)), 0); if (dataLength == 0) { return(null); } byte[] data = connection.Read(dataLength); var offsetResponse = new OffsetResponse(data); return(offsetResponse); } }
/// <summary> /// Get meta data for a topic /// </summary> /// <param name="correlationId"></param>Id used by the client to identify this transaction. Returned in the response /// <param name="clientId"></param>Name to identify the client. Used in server logs /// <param name="topicName"></param> Name of the requested topic. If topic name is null metadata for all topics will be returned /// <returns></returns> public MetadataResponse Metadata(int correlationId, string clientId, String topicName) { MetadataRequest request = new MetadataRequest(correlationId, clientId, topicName); using (var connection = new KafkaConnection(server, port)) { connection.Write(request.GetRequestBytes().ToArray()); int dataLength = BitConverter.ToInt32(BitWorks.ReverseBytes(connection.Read(4)), 0); if (dataLength == 0) { return(null); } byte[] data = connection.Read(dataLength); MetadataResponse metadataResponse = new MetadataResponse(); metadataResponse.Parse(data, 0); return(metadataResponse); } }
public ProduceResponse Produce(int correlationId, string clientId, int timeOut, string topicName, int partitionId, byte[] payLoad) { var request = new ProduceRequest(timeOut, correlationId, clientId); request.AddMessage(topicName, partitionId, payLoad); using (var connection = new KafkaConnection(server, port)) { connection.Write(request.GetRequestBytes().ToArray()); int dataLength = BitConverter.ToInt32(BitWorks.ReverseBytes(connection.Read(4)), 0); var response = new ProduceResponse(); if (dataLength != 0) { byte[] data = connection.Read(dataLength); response.Parse(data); } return(response); } }
/// <summary> /// Get a list of valid offsets (up to maxSize) before the given time. /// </summary> /// <param name="request">The offset request.</param> /// <returns>List of offsets, in descending order.</returns> public IList <long> GetOffsetsBefore(OffsetRequest request) { List <long> offsets = new List <long>(); using (KafkaConnection connection = new KafkaConnection(Server, Port)) { connection.Write(request.GetBytes()); int dataLength = BitConverter.ToInt32(BitWorks.ReverseBytes(connection.Read(4)), 0); if (dataLength > 0) { byte[] data = connection.Read(dataLength); int errorCode = BitConverter.ToInt16(BitWorks.ReverseBytes(data.Take(2).ToArray <byte>()), 0); if (errorCode != KafkaException.NoError) { throw new KafkaException(errorCode); } // skip the error code and process the rest byte[] unbufferedData = data.Skip(2).ToArray(); // first four bytes are the number of offsets int numOfOffsets = BitConverter.ToInt32(BitWorks.ReverseBytes(unbufferedData.Take(4).ToArray <byte>()), 0); int position = 0; for (int ix = 0; ix < numOfOffsets; ix++) { position = (ix * 8) + 4; offsets.Add(BitConverter.ToInt64(BitWorks.ReverseBytes(unbufferedData.Skip(position).Take(8).ToArray <byte>()), 0)); } } } return(offsets); }
/// <summary> /// Consumes messages from Kafka. /// </summary> /// <param name="request">The request to send to Kafka.</param> /// <returns>A list of messages from Kafka.</returns> public List <Message> Consume(FetchRequest request) { List <Message> messages = new List <Message>(); using (KafkaConnection connection = new KafkaConnection(Server, Port)) { connection.Write(request.GetBytes()); int dataLength = BitConverter.ToInt32(BitWorks.ReverseBytes(connection.Read(4)), 0); if (dataLength > 0) { byte[] data = connection.Read(dataLength); int errorCode = BitConverter.ToInt16(BitWorks.ReverseBytes(data.Take(2).ToArray <byte>()), 0); if (errorCode != KafkaException.NoError) { throw new KafkaException(errorCode); } // skip the error code and process the rest byte[] unbufferedData = data.Skip(2).ToArray(); int processed = 0; int length = unbufferedData.Length - 4; int messageSize = 0; while (processed <= length) { messageSize = BitConverter.ToInt32(BitWorks.ReverseBytes(unbufferedData.Skip(processed).Take(4).ToArray <byte>()), 0); messages.Add(Message.ParseFrom(unbufferedData.Skip(processed).Take(messageSize + 4).ToArray <byte>())); processed += 4 + messageSize; } } } return(messages); }
/// <summary> /// Get a list of valid offsets (up to maxSize) before the given time. /// </summary> /// <param name="request">The offset request.</param> /// <returns>List of offsets, in descending order.</returns> public OffsetResponse GetOffsetResponseBefore(OffsetRequest request) { using (var connection = new KafkaConnection(server, port)) { connection.Write(request.GetRequestBytes().ToArray()); int dataLength = BitConverter.ToInt32(BitWorks.ReverseBytes(connection.Read(4)), 0); if (dataLength == 0) return null; byte[] data = connection.Read(dataLength); var offsetResponse = new OffsetResponse(data); return offsetResponse; } }
/// <summary> /// Get a list of valid offsets (up to maxSize) before the given time. /// </summary> /// <param name="request">The offset request.</param> /// <returns>List of offsets, in descending order.</returns> public IList<long> GetOffsetsBefore(OffsetRequest request) { List<long> offsets = new List<long>(); using (KafkaConnection connection = new KafkaConnection(Server, Port)) { connection.Write(request.GetBytes()); int dataLength = BitConverter.ToInt32(BitWorks.ReverseBytes(connection.Read(4)), 0); if (dataLength > 0) { byte[] data = connection.Read(dataLength); int errorCode = BitConverter.ToInt16(BitWorks.ReverseBytes(data.Take(2).ToArray<byte>()), 0); if (errorCode != KafkaException.NoError) { throw new KafkaException(errorCode); } // skip the error code and process the rest byte[] unbufferedData = data.Skip(2).ToArray(); // first four bytes are the number of offsets int numOfOffsets = BitConverter.ToInt32(BitWorks.ReverseBytes(unbufferedData.Take(4).ToArray<byte>()), 0); int position = 0; for (int ix = 0; ix < numOfOffsets; ix++) { position = (ix * 8) + 4; offsets.Add(BitConverter.ToInt64(BitWorks.ReverseBytes(unbufferedData.Skip(position).Take(8).ToArray<byte>()), 0)); } } } return offsets; }
/// <summary> /// Executes a multi-fetch operation. /// </summary> /// <param name="request">The request to push to Kafka.</param> /// <returns> /// A list containing sets of messages. The message sets should match the request order. /// </returns> public List<List<Message>> Consume(MultiFetchRequest request) { int fetchRequests = request.ConsumerRequests.Count; List<List<Message>> messages = new List<List<Message>>(); using (KafkaConnection connection = new KafkaConnection(Server, Port)) { connection.Write(request.GetBytes()); int dataLength = BitConverter.ToInt32(BitWorks.ReverseBytes(connection.Read(4)), 0); if (dataLength > 0) { byte[] data = connection.Read(dataLength); int position = 0; int errorCode = BitConverter.ToInt16(BitWorks.ReverseBytes(data.Take(2).ToArray<byte>()), 0); if (errorCode != KafkaException.NoError) { throw new KafkaException(errorCode); } // skip the error code and process the rest position = position + 2; for (int ix = 0; ix < fetchRequests; ix++) { messages.Add(new List<Message>()); int messageSetSize = BitConverter.ToInt32(BitWorks.ReverseBytes(data.Skip(position).Take(4).ToArray<byte>()), 0); position = position + 4; errorCode = BitConverter.ToInt16(BitWorks.ReverseBytes(data.Skip(position).Take(2).ToArray<byte>()), 0); if (errorCode != KafkaException.NoError) { throw new KafkaException(errorCode); } // skip the error code and process the rest position = position + 2; byte[] messageSetBytes = data.Skip(position).ToArray<byte>().Take(messageSetSize).ToArray<byte>(); int processed = 0; int messageSize = 0; // dropped 2 bytes at the end...padding??? while (processed < messageSetBytes.Length - 2) { messageSize = BitConverter.ToInt32(BitWorks.ReverseBytes(messageSetBytes.Skip(processed).Take(4).ToArray<byte>()), 0); messages[ix].Add(Message.ParseFrom(messageSetBytes.Skip(processed).Take(messageSize + 4).ToArray<byte>())); processed += 4 + messageSize; } position = position + processed; } } } return messages; }
public ProduceResponse Produce(int correlationId, string clientId, int timeOut, string topicName, int partitionId, byte[] payLoad) { var request = new ProduceRequest(timeOut, correlationId, clientId); request.AddMessage(topicName, partitionId, payLoad); using (var connection = new KafkaConnection(server, port)) { connection.Write(request.GetRequestBytes().ToArray()); int dataLength = BitConverter.ToInt32(BitWorks.ReverseBytes(connection.Read(4)), 0); var response = new ProduceResponse(); if (dataLength != 0) { byte[] data = connection.Read(dataLength); response.Parse(data); } return response; } }
/// <summary> /// Get a list of valid offsets (up to maxSize) before the given time. /// </summary> /// <param name="request">The offset request.</param> /// <returns>List of offsets, in descending order.</returns> public IList<long> GetOffsetsBefore(OffsetRequest request) { List<long> offsets = new List<long>(); using (KafkaConnection connection = new KafkaConnection(Server, Port)) { connection.Write(request.GetBytes()); int dataLength = BitConverter.ToInt32(BitWorks.ReverseBytes(connection.Read(4)), 0); if (dataLength > 0) { byte[] data = connection.Read(dataLength); // TODO: need to check in on kafka error codes...assume all's good for now byte[] unbufferedData = data.Skip(2).ToArray(); // first four bytes are the number of offsets int numOfOffsets = BitConverter.ToInt32(BitWorks.ReverseBytes(unbufferedData.Take(4).ToArray<byte>()), 0); int position = 0; for (int ix = 0; ix < numOfOffsets; ix++) { position = (ix * 8) + 4; offsets.Add(BitConverter.ToInt64(BitWorks.ReverseBytes(unbufferedData.Skip(position).Take(8).ToArray<byte>()), 0)); } } } return offsets; }
/// <summary> /// Consumes messages from Kafka. /// </summary> /// <param name="request">The request to send to Kafka.</param> /// <returns>A list of messages from Kafka.</returns> public List<Message> Consume(ConsumerRequest request) { List<Message> messages = new List<Message>(); using (KafkaConnection connection = new KafkaConnection(Server, Port)) { connection.Write(request.GetBytes()); int dataLength = BitConverter.ToInt32(BitWorks.ReverseBytes(connection.Read(4)), 0); if (dataLength > 0) { byte[] data = connection.Read(dataLength); // TODO: need to check in on kafka error codes...assume all's good for now byte[] unbufferedData = data.Skip(2).ToArray(); int processed = 0; int length = unbufferedData.Length - 4; int messageSize = 0; while (processed <= length) { messageSize = BitConverter.ToInt32(BitWorks.ReverseBytes(unbufferedData.Skip(processed).Take(4).ToArray<byte>()), 0); messages.Add(Message.ParseFrom(unbufferedData.Skip(processed).Take(messageSize + 4).ToArray<byte>())); processed += 4 + messageSize; } } } return messages; }
/// <summary> /// Executes a multi-fetch operation. /// </summary> /// <param name="request">The request to push to Kafka.</param> /// <returns> /// A list containing sets of messages. The message sets should match the request order. /// </returns> public List <List <Message> > Consume(MultiFetchRequest request) { int fetchRequests = request.ConsumerRequests.Count; List <List <Message> > messages = new List <List <Message> >(); using (KafkaConnection connection = new KafkaConnection(Server, Port)) { connection.Write(request.GetBytes()); int dataLength = BitConverter.ToInt32(BitWorks.ReverseBytes(connection.Read(4)), 0); if (dataLength > 0) { byte[] data = connection.Read(dataLength); int position = 0; int errorCode = BitConverter.ToInt16(BitWorks.ReverseBytes(data.Take(2).ToArray <byte>()), 0); if (errorCode != KafkaException.NoError) { throw new KafkaException(errorCode); } // skip the error code and process the rest position = position + 2; for (int ix = 0; ix < fetchRequests; ix++) { messages.Add(new List <Message>()); int messageSetSize = BitConverter.ToInt32(BitWorks.ReverseBytes(data.Skip(position).Take(4).ToArray <byte>()), 0); position = position + 4; errorCode = BitConverter.ToInt16(BitWorks.ReverseBytes(data.Skip(position).Take(2).ToArray <byte>()), 0); if (errorCode != KafkaException.NoError) { throw new KafkaException(errorCode); } // skip the error code and process the rest position = position + 2; byte[] messageSetBytes = data.Skip(position).ToArray <byte>().Take(messageSetSize).ToArray <byte>(); int processed = 0; int messageSize = 0; // dropped 2 bytes at the end...padding??? while (processed < messageSetBytes.Length - 2) { messageSize = BitConverter.ToInt32(BitWorks.ReverseBytes(messageSetBytes.Skip(processed).Take(4).ToArray <byte>()), 0); messages[ix].Add(Message.ParseFrom(messageSetBytes.Skip(processed).Take(messageSize + 4).ToArray <byte>())); processed += 4 + messageSize; } position = position + processed; } } } return(messages); }
/// <summary> /// Get meta data for a topic /// </summary> /// <param name="correlationId"></param>Id used by the client to identify this transaction. Returned in the response /// <param name="clientId"></param>Name to identify the client. Used in server logs /// <param name="topicName"></param> Name of the requested topic. If topic name is null metadata for all topics will be returned /// <returns></returns> public MetadataResponse Metadata(int correlationId, string clientId, String topicName) { MetadataRequest request = new MetadataRequest(correlationId, clientId, topicName); using (var connection = new KafkaConnection(server, port)) { connection.Write(request.GetRequestBytes().ToArray()); int dataLength = BitConverter.ToInt32(BitWorks.ReverseBytes(connection.Read(4)), 0); if (dataLength == 0) return null; byte[] data = connection.Read(dataLength); MetadataResponse metadataResponse = new MetadataResponse(); metadataResponse.Parse(data, 0); return metadataResponse; } }