public async Task TestMultipleOffsetWorksOk() { using (var temporaryTopic = testCluster.CreateTemporaryTopic(partitions: 2)) using (var connection = await KafkaConnectionFactory.CreateSimpleKafkaConnectionAsync(testCluster.CreateBrokerUris()[0])) { var topic = temporaryTopic.Name; var request = new OffsetRequest { Offsets = new List <Offset> { new Offset { Topic = topic, MaxOffsets = 1, PartitionId = 0, Time = -1 }, new Offset { Topic = topic, MaxOffsets = 1, PartitionId = 1, Time = -1 } } }; var response = await connection.SendRequestAsync(request, CancellationToken.None); Assert.That(response, Has.Count.EqualTo(2)); Assert.That(response[0].Error, Is.EqualTo(ErrorResponseCode.NoError)); Assert.That(response[1].Error, Is.EqualTo(ErrorResponseCode.NoError)); } }
/// <summary> /// Get offsets for each partition from a given topic. /// </summary> /// <param name="topic">Name of the topic to get offset information from.</param> /// <param name="maxOffsets"></param> /// <param name="time"></param> /// <returns></returns> public async Task<List<OffsetResponse>> GetTopicOffsetAsync(string topic, int maxOffsets = 2, int time = -1) { var topicMetadata = GetTopic(topic); //send the offset request to each partition leader var sendRequests = topicMetadata.Partitions .GroupBy(x => x.PartitionId) .Select(p => { var route = _brokerRouter.SelectBrokerRoute(topic, p.Key); var request = new OffsetRequest { Offsets = new List<Offset> { new Offset { Topic = topic, PartitionId = p.Key, MaxOffsets = maxOffsets, Time = time } } }; return route.Connection.SendAsync(request); }).ToArray(); await Task.WhenAll(sendRequests).ConfigureAwait(false); return sendRequests.SelectMany(x => x.Result).ToList(); }
private long ResetConsumerOffsets(string topic, int partitionId) { long offset; switch (_config.AutoOffsetReset) { case OffsetRequest.SmallestTime: offset = OffsetRequest.EarliestTime; break; case OffsetRequest.LargestTime: offset = OffsetRequest.LatestTime; break; default: return(0); } var requestInfo = new Dictionary <string, List <PartitionOffsetRequestInfo> >(); requestInfo[topic] = new List <PartitionOffsetRequestInfo> { new PartitionOffsetRequestInfo(partitionId, offset, 1) }; var request = new OffsetRequest(requestInfo); OffsetResponse offsets = _simpleConsumer.GetOffsetsBefore(request); var topicDirs = new ZKGroupTopicDirs(_config.GroupId, topic); long offsetFound = offsets.ResponseMap[topic].First().Offsets[0]; Logger.InfoFormat("updating partition {0} with {1} offset {2}", partitionId, offset == OffsetRequest.EarliestTime ? "earliest" : "latest", offsetFound); ZkUtils.UpdatePersistentPath(_zkClient, topicDirs.ConsumerOffsetDir + "/" + partitionId, offsetFound.ToString(CultureInfo.InvariantCulture)); return(offsetFound); }
public void GetBytesValid() { string topicName = "topic"; OffsetRequest request = new OffsetRequest(topicName, 0, OffsetRequest.LatestTime, 10); // format = len(request) + requesttype + len(topic) + topic + partition + time + max // total byte count = 4 + (2 + 2 + 5 + 4 + 8 + 4) byte[] bytes = request.GetBytes(); Assert.IsNotNull(bytes); Assert.AreEqual(29, bytes.Length); // first 4 bytes = the length of the request Assert.AreEqual(25, BitConverter.ToInt32(BitWorks.ReverseBytes(bytes.Take(4).ToArray<byte>()), 0)); // next 2 bytes = the RequestType which in this case should be Produce Assert.AreEqual((short)RequestType.Offsets, BitConverter.ToInt16(BitWorks.ReverseBytes(bytes.Skip(4).Take(2).ToArray<byte>()), 0)); // next 2 bytes = the length of the topic Assert.AreEqual((short)5, BitConverter.ToInt16(BitWorks.ReverseBytes(bytes.Skip(6).Take(2).ToArray<byte>()), 0)); // next 5 bytes = the topic Assert.AreEqual(topicName, Encoding.ASCII.GetString(bytes.Skip(8).Take(5).ToArray<byte>())); // next 4 bytes = the partition Assert.AreEqual(0, BitConverter.ToInt32(BitWorks.ReverseBytes(bytes.Skip(13).Take(4).ToArray<byte>()), 0)); // next 8 bytes = time Assert.AreEqual(OffsetRequest.LatestTime, BitConverter.ToInt64(BitWorks.ReverseBytes(bytes.Skip(17).Take(8).ToArray<byte>()), 0)); // next 4 bytes = max offsets Assert.AreEqual(10, BitConverter.ToInt32(BitWorks.ReverseBytes(bytes.Skip(25).Take(4).ToArray<byte>()), 0)); }
/// <summary> /// Get offsets for a single partitions of a given topic. /// </summary> /// <param name="brokerRouter">The router which provides the route and metadata.</param> /// <param name="topicName">Name of the topic to get offset information from.</param> /// <param name="partitionId">The partition to get offsets for.</param> /// <param name="maxOffsets">How many to get, at most.</param> /// <param name="offsetTime">These are best described by <see cref="OffsetRequest.Topic.Timestamp"/></param> /// <param name="cancellationToken"></param> public static async Task <OffsetResponse.Topic> GetTopicOffsetAsync(this IBrokerRouter brokerRouter, string topicName, int partitionId, int maxOffsets, long offsetTime, CancellationToken cancellationToken) { var request = new OffsetRequest(new OffsetRequest.Topic(topicName, partitionId)); var response = await brokerRouter.SendAsync(request, topicName, partitionId, cancellationToken).ConfigureAwait(false); return(response.Topics.SingleOrDefault(t => t.TopicName == topicName && t.PartitionId == partitionId)); }
private long ResetConsumerOffsets(string topic, Partition partition) { long offset; switch (this.config.AutoOffsetReset) { case OffsetRequest.SmallestTime: offset = OffsetRequest.EarliestTime; break; case OffsetRequest.LargestTime: offset = OffsetRequest.LatestTime; break; default: return(-1); } var request = new OffsetRequest(topic, partition.PartId, offset, 1); var offsets = this.simpleConsumer.GetOffsetsBefore(request); var topicDirs = new ZKGroupTopicDirs(this.config.GroupId, topic); Logger.InfoFormat(CultureInfo.CurrentCulture, "updating partition {0} with {1} offset {2}", partition.Name, offset == OffsetRequest.EarliestTime ? "earliest" : "latest", offsets[0]); ZkUtils.UpdatePersistentPath(this.zkClient, topicDirs.ConsumerOffsetDir + "/" + partition.Name, offsets[0].ToString()); return(offsets[0]); }
public void GetBytesValid() { string topicName = "topic"; OffsetRequest request = new OffsetRequest(topicName, 0, OffsetRequest.LatestTime, 10); // format = len(request) + requesttype + len(topic) + topic + partition + time + max // total byte count = 4 + (2 + 2 + 5 + 4 + 8 + 4) byte[] bytes = request.GetBytes(); Assert.IsNotNull(bytes); Assert.AreEqual(29, bytes.Length); // first 4 bytes = the length of the request Assert.AreEqual(25, BitConverter.ToInt32(BitWorks.ReverseBytes(bytes.Take(4).ToArray <byte>()), 0)); // next 2 bytes = the RequestType which in this case should be Produce Assert.AreEqual((short)RequestType.Offsets, BitConverter.ToInt16(BitWorks.ReverseBytes(bytes.Skip(4).Take(2).ToArray <byte>()), 0)); // next 2 bytes = the length of the topic Assert.AreEqual((short)5, BitConverter.ToInt16(BitWorks.ReverseBytes(bytes.Skip(6).Take(2).ToArray <byte>()), 0)); // next 5 bytes = the topic Assert.AreEqual(topicName, Encoding.ASCII.GetString(bytes.Skip(8).Take(5).ToArray <byte>())); // next 4 bytes = the partition Assert.AreEqual(0, BitConverter.ToInt32(BitWorks.ReverseBytes(bytes.Skip(13).Take(4).ToArray <byte>()), 0)); // next 8 bytes = time Assert.AreEqual(OffsetRequest.LatestTime, BitConverter.ToInt64(BitWorks.ReverseBytes(bytes.Skip(17).Take(8).ToArray <byte>()), 0)); // next 4 bytes = max offsets Assert.AreEqual(10, BitConverter.ToInt32(BitWorks.ReverseBytes(bytes.Skip(25).Take(4).ToArray <byte>()), 0)); }
public long RefreshAndGetOffsetByTimeStamp(short versionId, string clientId, int correlationId, string topic, int partitionId, DateTime timeStampInUTC) { //Get using (Consumer consumer = this.GetConsumer(topic, partitionId)) { Dictionary <string, List <PartitionOffsetRequestInfo> > offsetRequestInfoEarliest = new Dictionary <string, List <PartitionOffsetRequestInfo> >(); List <PartitionOffsetRequestInfo> offsetRequestInfoForPartitionsEarliest = new List <PartitionOffsetRequestInfo>(); offsetRequestInfoForPartitionsEarliest.Add(new PartitionOffsetRequestInfo(partitionId, KafkaClientHelperUtils.ToUnixTimestampMillis(timeStampInUTC), 8)); offsetRequestInfoEarliest.Add(topic, offsetRequestInfoForPartitionsEarliest); OffsetRequest offsetRequestEarliest = new OffsetRequest(offsetRequestInfoEarliest); //Earliest OffsetResponse offsetResponseEarliest = consumer.GetOffsetsBefore(offsetRequestEarliest); List <PartitionOffsetsResponse> partitionOffsetByTimeStamp = null; if (offsetResponseEarliest.ResponseMap.TryGetValue(topic, out partitionOffsetByTimeStamp)) { foreach (var p in partitionOffsetByTimeStamp) { if (p.PartitionId == partitionId) { return(partitionOffsetByTimeStamp[0].Offsets[0]); } } } } return(-1); }
/// <summary> /// Get the earliest or latest offset of a given topic, partition. /// </summary> /// <param name="topicAndPartition">Topic and partition of which the offset is needed.</param> /// <param name="earliestOrLatest">A value to indicate earliest or latest offset.</param> /// <param name="consumerId">Id of the consumer which could be a consumer client, SimpleConsumerShell or a follower broker.</param> /// <returns>Requested offset.</returns> public long EarliestOrLatestOffset(TopicAndPartition topicAndPartition, long earliestOrLatest, int consumerId) { var request = new OffsetRequest( new Dictionary <TopicAndPartition, PartitionOffsetRequestInfo> { { topicAndPartition, new PartitionOffsetRequestInfo(earliestOrLatest, 1) } }, clientId: this.ClientId, replicaId: consumerId); var partitionErrorAndOffset = this.GetOffsetsBefore(request).PartitionErrorAndOffsets[topicAndPartition]; long offset; if (partitionErrorAndOffset.Error == ErrorMapping.NoError) { offset = partitionErrorAndOffset.Offsets[0]; } else { throw ErrorMapping.ExceptionFor(partitionErrorAndOffset.Error); } return(offset); }
/// <summary> /// Get offsets for each partition from a given topic. /// </summary> /// <param name="topic">Name of the topic to get offset information from.</param> /// <param name="maxOffsets"></param> /// <param name="time"></param> /// <returns></returns> public Task <List <OffsetResponse> > GetTopicOffsetAsync(string topic, int maxOffsets = 2, int time = -1) { var topicMetadata = GetTopic(topic); //send the offset request to each partition leader var sendRequests = topicMetadata.Partitions .GroupBy(x => x.PartitionId) .Select(p => { var route = _brokerRouter.SelectBrokerRoute(topic, p.Key); var request = new OffsetRequest { Offsets = new List <Offset> { new Offset { Topic = topic, PartitionId = p.Key, MaxOffsets = maxOffsets, Time = time } } }; return(route.Connection.SendAsync(request)); }).ToArray(); return(Task.WhenAll(sendRequests) .ContinueWith(t => sendRequests.SelectMany(x => x.Result).ToList())); }
public void GetBytesValid() { const string topicName = "topic"; var requestInfo = new Dictionary<string, List<PartitionOffsetRequestInfo>>(); requestInfo[topicName] = new List<PartitionOffsetRequestInfo>() { new PartitionOffsetRequestInfo(0, OffsetRequest.LatestTime, 10) }; var request = new OffsetRequest(requestInfo); // format = len(request) + requesttype + version + correlation id + client id + replica id + request info count + request infos int count = 2 + 2 + 4 + 2 + 4 + 4 + 4 + BitWorks.GetShortStringLength("topic", AbstractRequest.DefaultEncoding) + 4 + 4 + 8 + 4; var ms = new MemoryStream(); request.WriteTo(ms); byte[] bytes = ms.ToArray(); Assert.IsNotNull(bytes); Assert.AreEqual(count, bytes.Length); var reader = new KafkaBinaryReader(ms); reader.ReadInt32().Should().Be(count - 4); // length reader.ReadInt16().Should().Be((short)RequestTypes.Offsets); // request type reader.ReadInt16().Should().Be(0); // version reader.ReadInt32().Should().Be(0); // correlation id string.IsNullOrEmpty(reader.ReadShortString()).Should().BeTrue(); // client id reader.ReadInt32().Should().Be(-1); // replica id reader.ReadInt32().Should().Be(1); // request info count reader.ReadShortString().Should().Be("topic"); reader.ReadInt32().Should().Be(1); // info count reader.ReadInt32().Should().Be(0); // partition id reader.ReadInt64().Should().Be(OffsetRequest.LatestTime); // time reader.ReadInt32().Should().Be(10); // max offset }
/// <summary> /// Gets a list of valid offsets (up to maxSize) before the given time. /// </summary> /// <param name="request"> /// The offset request. /// </param> /// <returns> /// The list of offsets, in descending order. /// </returns> public OffsetResponse GetOffsetsBefore(OffsetRequest request) { short tryCounter = 1; while (tryCounter <= this.config.NumberOfTries) { try { lock (this) { return(connection.Send(request)); } } catch (Exception ex) { //// if maximum number of tries reached if (tryCounter == this.config.NumberOfTries) { throw; } tryCounter++; Logger.InfoFormat("GetOffsetsBefore reconnect due to {0}", ex.FormatException()); } } return(null); }
/// <summary> /// Get offsets for each partition from a given topic. /// </summary> /// <param name="topic">Name of the topic to get offset information from.</param> /// <param name="maxOffsets"></param> /// <param name="time"></param> /// <returns></returns> public async Task <List <OffsetResponse> > GetTopicOffsetAsync(string topic, int maxOffsets = 2, int time = -1) { await _brokerRouter.RefreshMissingTopicMetadata(topic); var topicMetadata = GetTopicFromCache(topic); //send the offset request to each partition leader var sendRequests = topicMetadata.Partitions .GroupBy(x => x.PartitionId) .Select(p => { var route = _brokerRouter.SelectBrokerRouteFromLocalCache(topic, p.Key); var request = new OffsetRequest { Offsets = new List <Offset> { new Offset { Topic = topic, PartitionId = p.Key, MaxOffsets = maxOffsets, Time = time } } }; return(route.Connection.SendAsync(request)); }).ToArray(); await Task.WhenAll(sendRequests).ConfigureAwait(false); return(sendRequests.SelectMany(x => x.Result).ToList()); }
public void OffsetRequest() { var request = new OffsetRequest(); request.ReplicaId = _random.Next(); request.TopicPartitions = new[] { new OffsetsRequestTopicPartition { TopicName = Guid.NewGuid().ToString(), Details = new [] { new OffsetsRequestTopicPartitionDetail() { Partition = _random.Next(), Time = (Int64)_random.Next(), MaxNumberOfOffsets = _random.Next() } } } }; Stream binary1 = new MemoryStream(); request.Serialize(binary1); binary1.Seek(0L, SeekOrigin.Begin); var request2 = new OffsetRequest(); request2.Deserialize(binary1); var compareLogic = new CompareLogic(); var result = compareLogic.Compare(request, request2); Assert.True(result.AreEqual); Stream binary2 = new MemoryStream(); request.Serialize(binary2); Assert.Equal(binary1.Length, binary2.Length); using (var stream1 = new MemoryStream()) using (var stream2 = new MemoryStream()) { binary1.Seek(0L, SeekOrigin.Begin); binary1.CopyTo(stream1); binary2.Seek(0L, SeekOrigin.Begin); binary2.CopyTo(stream2); Assert.Equal(stream1.Length, stream2.Length); stream1.Seek(0L, SeekOrigin.Begin); var bytes1 = stream1.ToArray(); stream2.Seek(0L, SeekOrigin.Begin); var bytes2 = stream2.ToArray(); Assert.Equal(bytes1.Length, bytes2.Length); for (int i = 0; i < bytes1.Length; i++) { Assert.Equal(bytes1[i], bytes2[i]); } } }
public static long GetCurrentKafkaOffset(string topic, string address, int port, int partition) { var request = new OffsetRequest(topic, partition, DateTime.Now.AddDays(-5).Ticks, 10); var consumerConfig = new ConsumerConfiguration(address, port); IConsumer consumer = new Consumer(consumerConfig, address, port); IList <long> list = consumer.GetOffsetsBefore(request); return(list.Sum()); }
public RequestResponseSerializationTest() { this.producerRequest = SerializationTestUtils.CreateTestProducerRequest(); this.producerResponse = SerializationTestUtils.CreateTestProducerResponse(); this.fetchRequest = SerializationTestUtils.CreateTestFetchRequest(); this.offsetRequest = SerializationTestUtils.CreateTestOffsetRequest(); this.offsetResponse = SerializationTestUtils.CreateTestOffsetResponse(); this.topicMetadataRequest = SerializationTestUtils.CreateTestTopicMetadataRequest(); this.topicMetadataResponse = SerializationTestUtils.CreateTestTopicMetadataResponse(); }
public void ConsumerGetsOffsets() { OffsetRequest request = new OffsetRequest("test", 0, DateTime.Now.AddHours(-24).Ticks, 10); Consumer consumer = new Consumer(KafkaServer, KafkaPort); IList <long> list = consumer.GetOffsetsBefore(request); foreach (long l in list) { Console.Out.WriteLine(l); } }
public void ConsumerGetsOffsets() { OffsetRequest request = new OffsetRequest("test", 0, DateTime.Now.AddHours(-24).Ticks, 10); Consumer consumer = new Consumer(KafkaServer, KafkaPort); IList<long> list = consumer.GetOffsetsBefore(request); foreach (long l in list) { Console.Out.WriteLine(l); } }
/// <summary> /// Gets a list of valid offsets (up to maxSize) before the given time. /// </summary> /// <param name="request"> /// The offset request. /// </param> /// <returns> /// The list of offsets, in descending order. /// </returns> public IList <long> GetOffsetsBefore(OffsetRequest request) { var result = new List <long>(); short tryCounter = 1; while (tryCounter <= this.config.NumberOfTries) { try { using (var conn = new KafkaConnection( this.host, this.port, this.config.BufferSize, this.config.SocketTimeout)) { conn.Write(request); int size = conn.Reader.ReadInt32(); if (size == 0) { return(result); } short errorCode = conn.Reader.ReadInt16(); if (errorCode != ErrorMapping.NoError) { throw new KafkaException(errorCode); } int count = conn.Reader.ReadInt32(); for (int i = 0; i < count; i++) { result.Add(conn.Reader.ReadInt64()); } return(result); } } catch (Exception ex) { //// if maximum number of tries reached if (tryCounter == this.config.NumberOfTries) { throw; } tryCounter++; Logger.InfoFormat(CultureInfo.CurrentCulture, "GetOffsetsBefore reconnect due to {0}", ex); } } return(result); }
public void TestSerializationAndDeserialization() { var buffer = ByteBuffer.Allocate(this.producerRequest.SizeInBytes); this.producerRequest.WriteTo(buffer); buffer.Rewind(); var deserializedProducerRequest = ProducerRequest.ReadFrom(buffer); Assert.Equal(this.producerRequest, deserializedProducerRequest); buffer = ByteBuffer.Allocate(this.producerResponse.SizeInBytes); this.producerResponse.WriteTo(buffer); buffer.Rewind(); var deserializedProducerResponse = ProducerResponse.ReadFrom(buffer); Assert.Equal(this.producerResponse, deserializedProducerResponse); buffer = ByteBuffer.Allocate(this.fetchRequest.SizeInBytes); this.fetchRequest.WriteTo(buffer); buffer.Rewind(); var deserializedFetchRequest = FetchRequest.ReadFrom(buffer); Assert.Equal(this.fetchRequest, deserializedFetchRequest); buffer = ByteBuffer.Allocate(this.offsetRequest.SizeInBytes); this.offsetRequest.WriteTo(buffer); buffer.Rewind(); var deserializedOffsetRequest = OffsetRequest.ReadFrom(buffer); Assert.Equal(this.offsetRequest, deserializedOffsetRequest); buffer = ByteBuffer.Allocate(this.offsetResponse.SizeInBytes); this.offsetResponse.WriteTo(buffer); buffer.Rewind(); var deserializedOffsetResponse = OffsetResponse.ReadFrom(buffer); Assert.Equal(this.offsetResponse, deserializedOffsetResponse); buffer = ByteBuffer.Allocate(this.topicMetadataRequest.SizeInBytes); this.topicMetadataRequest.WriteTo(buffer); buffer.Rewind(); var deserializedTopicMetadataRequest = TopicMetadataRequest.ReadFrom(buffer); Assert.Equal(this.topicMetadataRequest, deserializedTopicMetadataRequest); buffer = ByteBuffer.Allocate(this.topicMetadataResponse.SizeInBytes); this.topicMetadataResponse.WriteTo(buffer); buffer.Rewind(); var deserializedTopicMetadataResponse = TopicMetadataResponse.ReadFrom(buffer); Assert.Equal(this.topicMetadataResponse, deserializedTopicMetadataResponse); }
public void ConsumerGetsOffsets() { var consumerConfig = this.ConsumerConfig1; var request = new OffsetRequest(CurrentTestTopic, 0, DateTime.Now.AddHours(-24).Ticks, 10); IConsumer consumer = new Consumer(consumerConfig); IList <long> list = consumer.GetOffsetsBefore(request); foreach (long l in list) { Console.Out.WriteLine(l); } }
/// <summary> /// Gets a list of valid offsets (up to maxSize) before the given time. /// </summary> /// <param name="request"> /// The offset request. /// </param> /// <returns> /// The list of offsets, in descending order. /// </returns> public IList <long> GetOffsetsBefore(OffsetRequest request) { KafkaConnection conn = null; int size = 0; var result = new List <long>(); short tryCounter = 1; while (tryCounter <= this.config.NumberOfTries) { try { conn = KafkaClusterConnectionPool.GetConnection(this.host, this.port); conn.Write(request); size = conn.Reader.ReadInt32(); if (size > 0) { short errorCode = conn.Reader.ReadInt16(); if (errorCode != ErrorMapping.NoError) { throw new KafkaException(errorCode); } int count = conn.Reader.ReadInt32(); for (int i = 0; i < count; i++) { result.Add(conn.Reader.ReadInt64()); } } break; } catch (Exception ex) { //// if maximum number of tries reached if (tryCounter == this.config.NumberOfTries) { throw; } tryCounter++; Logger.InfoFormat(CultureInfo.CurrentCulture, "GetOffsetsBefore reconnect due to {0}", ex); } finally { KafkaClusterConnectionPool.ReleaseConnection(conn); } } return(result); }
private OffsetRequest CreateFetchLastOffsetRequest() { Offset offset = new Offset() { PartitionId = _partitionId, Topic = _topic, MaxOffsets = 1 }; OffsetRequest request = new OffsetRequest() { Offsets = new List <Offset>() { offset }, ClientId = _clientId }; return(request); }
internal static byte[] Serialize(OffsetRequest req, int correlationId) { var stream = new MemoryStream(); WriteRequestHeader(stream, correlationId, ApiKey.OffsetRequest); stream.Write(_minusOne32, 0, 4); // ReplicaId stream.Write(_one32, 0, 4); // array of size 1: we send request for one topic Write(stream, req.TopicName); WriteArray(stream, req.Partitions, p => { BigEndianConverter.Write(stream, p.Id); BigEndianConverter.Write(stream, p.Time); BigEndianConverter.Write(stream, p.MaxNumOffsets); }); return(WriteMessageLength(stream)); }
public void OffsetsRequest( [Values(0, 1)] short version, [Values("test", "a really long name, with spaces and punctuation!")] string topic, [Values(1, 10)] int topicsPerRequest, [Values(1, 5)] int totalPartitions, [Values(-2, -1, 123456, 10000000)] long time, [Values(1, 10)] int maxOffsets) { var topics = new List <OffsetRequest.Topic>(); for (var t = 0; t < topicsPerRequest; t++) { var offset = new OffsetRequest.Topic(topic + t, t % totalPartitions, time, version == 0 ? maxOffsets : 1); topics.Add(offset); } var request = new OffsetRequest(topics); request.AssertCanEncodeDecodeRequest(version); }
public Int64 Offset(String topic, Int32 partition, OffsetOption option) { EnsureLegalTopicSpelling(topic); var broker = _topicBrokerDispatcher.SelectBroker(topic, partition); var request = new OffsetRequest(topic, new[] { partition }, option); var response = (OffsetResponse)SubmitRequest(broker, request); var errors = response.TopicPartitions .SelectMany(r => r.PartitionOffsets) .Where(x => x.ErrorCode != ErrorCode.NoError) .ToList(); if (errors.Count > 0 && errors.All(e => e.ErrorCode == ErrorCode.UnknownTopicOrPartition)) { return(-1L); } if (errors.Count > 1) { throw new ProtocolException(errors.First().ErrorCode); } return(response.TopicPartitions[0].PartitionOffsets[0].Offsets[0]); }
internal async Task <OffsetResponse> GetOffsets(OffsetRequest req, Connection conn) { var tcp = await conn.GetClientAsync(); if (_etw.IsEnabled()) { _etw.ProtocolOffsetRequest(req.ToString()); } var response = await conn.Correlation.SendAndCorrelateAsync( id => Serializer.Serialize(req, id), Serializer.DeserializeOffsetResponse, tcp, CancellationToken.None); _log.Debug("Got OffsetResponse {0}", response); if (_etw.IsEnabled()) { _etw.ProtocolOffsetResponse(response.ToString()); } return(response); }
public void GetBytesValid() { const string topicName = "topic"; var requestInfo = new Dictionary <string, List <PartitionOffsetRequestInfo> >(); requestInfo[topicName] = new List <PartitionOffsetRequestInfo>() { new PartitionOffsetRequestInfo(0, OffsetRequest.LatestTime, 10) }; var request = new OffsetRequest(requestInfo); // format = len(request) + requesttype + version + correlation id + client id + replica id + request info count + request infos int count = 2 + 2 + 4 + 2 + 4 + 4 + 4 + BitWorks.GetShortStringLength("topic", AbstractRequest.DefaultEncoding) + 4 + 4 + 8 + 4; var ms = new MemoryStream(); request.WriteTo(ms); byte[] bytes = ms.ToArray(); Assert.IsNotNull(bytes); Assert.AreEqual(count, bytes.Length); var reader = new KafkaBinaryReader(ms); reader.ReadInt32().Should().Be(count - 4); // length reader.ReadInt16().Should().Be((short)RequestTypes.Offsets); // request type reader.ReadInt16().Should().Be(0); // version reader.ReadInt32().Should().Be(0); // correlation id string.IsNullOrEmpty(reader.ReadShortString()).Should().BeTrue(); // client id reader.ReadInt32().Should().Be(-1); // replica id reader.ReadInt32().Should().Be(1); // request info count reader.ReadShortString().Should().Be("topic"); reader.ReadInt32().Should().Be(1); // info count reader.ReadInt32().Should().Be(0); // partition id reader.ReadInt64().Should().Be(OffsetRequest.LatestTime); // time reader.ReadInt32().Should().Be(10); // max offset }
public void TestSerializeOffsetRequest() { var offset = new OffsetRequest { TopicsData = new[] { new TopicData <OffsetPartitionData> { TopicName = "boloss", PartitionsData = new[] { new OffsetPartitionData { MaxNumberOfOffsets = 3, Partition = 123, Time = 21341 } } } } }; using (var serialized = offset.Serialize(new ReusableMemoryStream(null), 1235, ClientId, null)) { CheckHeader(Basics.ApiKey.OffsetRequest, 0, 1235, TheClientId, serialized); Assert.AreEqual(-1, BigEndianConverter.ReadInt32(serialized)); // ReplicaId Assert.AreEqual(1, BigEndianConverter.ReadInt32(serialized)); // 1 topic data Assert.AreEqual(offset.TopicsData.First().TopicName, Basics.DeserializeString(serialized)); Assert.AreEqual(1, BigEndianConverter.ReadInt32(serialized)); // 1 partition data var od = new OffsetPartitionData(); od.Deserialize(serialized, null); Assert.AreEqual(123, od.Partition); Assert.AreEqual(21341, od.Time); Assert.AreEqual(3, od.MaxNumberOfOffsets); } }
/// <summary> /// Writes a offset request to the server. /// </summary> /// <remarks> /// Write timeout is defaulted to infitite. /// </remarks> /// <param name="request">The <see cref="OffsetRequest"/> to send to the server.</param> public void Write(OffsetRequest request) { this.EnsuresNotDisposed(); Guard.Assert<ArgumentNullException>(() => request != null); this.Write(request.RequestBuffer.GetBuffer(), Timeout.Infinite); }
/// <summary> /// Create an OffsetRequest to get the message offset for each requsted topic/partition /// </summary> /// <param name="topics"></param>List of topics to get offset for /// <param name="time"></param>Used to ask for all messages before a certain time (ms). Specify -1 to receive the latest offsets and -2 to receive the earliest available offset. Note that because offsets are pulled in descending order, asking for the earliest offset will always return you a single element. /// <param name="maxNumOffsets"></param>Max number of offsets to receive. Returns 2 when time = -1 firsrt and current. Returns 1 when time = -2 /// <param name="correlationId"></param>Id used by the client to identify this transaction. Returned in the response /// <param name="clientId"></param>Name to identify the client. Used in server logs /// <param name="partitionId"></param>Requested partition /// <returns></returns> public OffsetResponse GetOffsetResponse(List<string> topics, long? time, int maxNumOffsets, int correlationId, string clientId, int partitionId) { var requestTime = time.GetValueOrDefault(OffsetRequest.LatestTime); var request = new OffsetRequest(correlationId, clientId); foreach (var topicName in topics) { request.AddTopic(topicName, partitionId, requestTime, maxNumOffsets); } return GetOffsetResponseBefore(request); }
/// <summary> /// Get a list of valid offsets (up to maxSize) before the given time. /// </summary> /// <param name="request">The offset request.</param> /// <returns>List of offsets, in descending order.</returns> public IList<long> GetOffsetsBefore(OffsetRequest request) { List<long> offsets = new List<long>(); using (KafkaConnection connection = new KafkaConnection(Server, Port)) { connection.Write(request.GetBytes()); int dataLength = BitConverter.ToInt32(BitWorks.ReverseBytes(connection.Read(4)), 0); if (dataLength > 0) { byte[] data = connection.Read(dataLength); int errorCode = BitConverter.ToInt16(BitWorks.ReverseBytes(data.Take(2).ToArray<byte>()), 0); if (errorCode != KafkaException.NoError) { throw new KafkaException(errorCode); } // skip the error code and process the rest byte[] unbufferedData = data.Skip(2).ToArray(); // first four bytes are the number of offsets int numOfOffsets = BitConverter.ToInt32(BitWorks.ReverseBytes(unbufferedData.Take(4).ToArray<byte>()), 0); int position = 0; for (int ix = 0; ix < numOfOffsets; ix++) { position = (ix * 8) + 4; offsets.Add(BitConverter.ToInt64(BitWorks.ReverseBytes(unbufferedData.Skip(position).Take(8).ToArray<byte>()), 0)); } } } return offsets; }
public async Task TestNewTopicProductionWorksOk() { using (var temporaryTopic = testCluster.CreateTemporaryTopic()) using (var connection = await KafkaConnectionFactory.CreateSimpleKafkaConnectionAsync(testCluster.CreateBrokerUris()[0])) { var topic = temporaryTopic.Name; { var request = new MetadataRequest { Topics = new List <string> { topic } }; MetadataResponse response = null; while (response == null) { response = await connection.SendRequestAsync(request, CancellationToken.None); if (response.Topics[0].ErrorCode == ErrorResponseCode.LeaderNotAvailable) { response = null; await Task.Delay(1000); } } Assert.That(response, Is.Not.Null); var first = response; Assert.That(first.Topics, Has.Length.EqualTo(1)); var firstTopic = first.Topics.First(); Assert.That(firstTopic.ErrorCode, Is.EqualTo(ErrorResponseCode.NoError)); Assert.That(firstTopic.Name, Is.EqualTo(topic)); Assert.That(firstTopic.Partitions, Has.Length.EqualTo(1)); var firstPartition = firstTopic.Partitions.First(); Assert.That(firstPartition.PartitionId, Is.EqualTo(0)); } { var request = new ProduceRequest { Acks = 1, TimeoutMS = 10000, Payload = new List <Payload> { new Payload { Topic = topic, Partition = 0, Codec = MessageCodec.CodecNone, Messages = new List <Message> { new Message("Message 1"), new Message("Message 2"), new Message("Message 3"), new Message("Message 4"), } } } }; var response = await connection.SendRequestAsync(request, CancellationToken.None); Assert.That(response, Is.Not.Null); var first = response.First(); Assert.That(first.Error, Is.EqualTo(ErrorResponseCode.NoError)); Assert.That(first.Topic, Is.EqualTo(topic)); Assert.That(first.PartitionId, Is.EqualTo(0)); Assert.That(first.Offset, Is.EqualTo(0)); } { var request = new FetchRequest { MinBytes = 0, MaxWaitTime = 0, Fetches = new List <Fetch> { new Fetch { MaxBytes = 40, Offset = 0, PartitionId = 0, Topic = topic, } } }; var response = await connection.SendRequestAsync(request, CancellationToken.None); Assert.That(response, Has.Count.EqualTo(1)); var first = response.First(); Assert.That(first.Error, Is.EqualTo(ErrorResponseCode.NoError)); Assert.That(first.HighWaterMark, Is.EqualTo(4)); Assert.That(first.PartitionId, Is.EqualTo(0)); Assert.That(first.Topic, Is.EqualTo(topic)); Assert.That(first.Messages, Has.Count.EqualTo(1)); var firstMessage = first.Messages.First(); Assert.That(firstMessage.Meta.Offset, Is.EqualTo(0)); Assert.That(firstMessage.Meta.PartitionId, Is.EqualTo(0)); Assert.That(firstMessage.Attribute, Is.EqualTo(0)); Assert.That(firstMessage.Key, Is.Null); Assert.That(firstMessage.MagicNumber, Is.EqualTo(0)); Assert.That(firstMessage.Value, Is.Not.Null); var firstString = firstMessage.Value.ToUtf8String(); Assert.That(firstString, Is.EqualTo("Message 1")); } { var request = new OffsetRequest { Offsets = new List <Offset> { new Offset { MaxOffsets = 2, PartitionId = 0, Time = -1, Topic = topic } } }; var response = await connection.SendRequestAsync(request, CancellationToken.None); Assert.That(response, Has.Count.EqualTo(1)); var first = response.First(); Assert.That(first.Error, Is.EqualTo(ErrorResponseCode.NoError)); Assert.That(first.Topic, Is.EqualTo(topic)); Assert.That(first.PartitionId, Is.EqualTo(0)); Assert.That(first.Offsets, Has.Length.EqualTo(2)); Assert.That(first.Offsets[0], Is.EqualTo(4)); Assert.That(first.Offsets[1], Is.EqualTo(0)); } { var request = new ConsumerMetadataRequest { ConsumerGroup = topic }; ConsumerMetadataResponse response = null; while (response == null) { response = await connection.SendRequestAsync(request, CancellationToken.None); if (response.Error == ErrorResponseCode.ConsumerCoordinatorNotAvailableCode) { response = null; await Task.Delay(1000); } } Assert.That(response.Error, Is.EqualTo(ErrorResponseCode.NoError)); Console.WriteLine("Id = {0}, Host = {1}, Port = {2}", response.CoordinatorId, response.CoordinatorHost, response.CoordinatorPort); } { var request = new OffsetFetchRequest { ConsumerGroup = topic, Topics = new List <OffsetFetch> { new OffsetFetch { PartitionId = 0, Topic = topic } } }; var response = await connection.SendRequestAsync(request, CancellationToken.None); Assert.That(response, Has.Count.EqualTo(1)); var first = response.First(); Assert.That(first.Error, Is.EqualTo(ErrorResponseCode.NoError)); Assert.That(first.Topic, Is.EqualTo(topic)); Assert.That(first.PartitionId, Is.EqualTo(0)); Assert.That(first.MetaData, Is.Empty); Assert.That(first.Offset, Is.EqualTo(-1)); } { var request = new OffsetCommitRequest { ConsumerGroup = topic, ConsumerGroupGenerationId = 1, ConsumerId = "0", OffsetCommits = new List <OffsetCommit> { new OffsetCommit { Metadata = "Metadata 1", Offset = 0, PartitionId = 0, TimeStamp = -1, Topic = topic, } } }; var response = await connection.SendRequestAsync(request, CancellationToken.None); Assert.That(response, Has.Count.EqualTo(1)); var first = response.First(); Assert.That(first.Error, Is.EqualTo(ErrorResponseCode.NoError)); Assert.That(first.Topic, Is.EqualTo(topic)); Assert.That(first.PartitionId, Is.EqualTo(0)); } { var request = new OffsetFetchRequest { ConsumerGroup = topic, Topics = new List <OffsetFetch> { new OffsetFetch { PartitionId = 0, Topic = topic } } }; var response = await connection.SendRequestAsync(request, CancellationToken.None); Assert.That(response, Has.Count.EqualTo(1)); var first = response.First(); Assert.That(first.Error, Is.EqualTo(ErrorResponseCode.NoError)); Assert.That(first.Topic, Is.EqualTo(topic)); Assert.That(first.PartitionId, Is.EqualTo(0)); Assert.That(first.MetaData, Is.EqualTo("Metadata 1")); Assert.That(first.Offset, Is.EqualTo(0)); } { var request = new FetchRequest { MinBytes = 0, MaxWaitTime = 0, Fetches = new List <Fetch> { new Fetch { MaxBytes = 1024, Offset = 0 + 1, PartitionId = 0, Topic = topic, } } }; var response = await connection.SendRequestAsync(request, CancellationToken.None); Assert.That(response, Has.Count.EqualTo(1)); var first = response.First(); Assert.That(first.Error, Is.EqualTo(ErrorResponseCode.NoError)); Assert.That(first.HighWaterMark, Is.EqualTo(4)); Assert.That(first.PartitionId, Is.EqualTo(0)); Assert.That(first.Topic, Is.EqualTo(topic)); Assert.That(first.Messages, Has.Count.EqualTo(3)); var firstMessage = first.Messages.First(); Assert.That(firstMessage.Meta.Offset, Is.EqualTo(1)); Assert.That(firstMessage.Meta.PartitionId, Is.EqualTo(0)); Assert.That(firstMessage.Attribute, Is.EqualTo(0)); Assert.That(firstMessage.Key, Is.Null); Assert.That(firstMessage.MagicNumber, Is.EqualTo(0)); Assert.That(firstMessage.Value, Is.Not.Null); var firstString = firstMessage.Value.ToUtf8String(); Assert.That(firstString, Is.EqualTo("Message 2")); var lastMessage = first.Messages.Last(); Assert.That(lastMessage.Meta.Offset, Is.EqualTo(3)); Assert.That(lastMessage.Meta.PartitionId, Is.EqualTo(0)); Assert.That(lastMessage.Attribute, Is.EqualTo(0)); Assert.That(lastMessage.Key, Is.Null); Assert.That(lastMessage.MagicNumber, Is.EqualTo(0)); Assert.That(lastMessage.Value, Is.Not.Null); var lastString = lastMessage.Value.ToUtf8String(); Assert.That(lastString, Is.EqualTo("Message 4")); } { var request = new FetchRequest { MinBytes = 0, MaxWaitTime = 0, Fetches = new List <Fetch> { new Fetch { MaxBytes = 1024, Offset = 3 + 1, PartitionId = 0, Topic = topic, } } }; var response = await connection.SendRequestAsync(request, CancellationToken.None); Assert.That(response, Has.Count.EqualTo(1)); var first = response.First(); Assert.That(first.Error, Is.EqualTo(ErrorResponseCode.NoError)); Assert.That(first.HighWaterMark, Is.EqualTo(4)); Assert.That(first.PartitionId, Is.EqualTo(0)); Assert.That(first.Topic, Is.EqualTo(topic)); Assert.That(first.Messages, Has.Count.EqualTo(0)); } } Console.WriteLine("Test completed"); }
/// <summary> /// Get a list of valid offsets (up to maxSize) before the given time. /// </summary> /// <param name="request">The offset request.</param> /// <returns>List of offsets, in descending order.</returns> public OffsetResponse GetOffsetResponseBefore(OffsetRequest request) { using (var connection = new KafkaConnection(server, port)) { connection.Write(request.GetRequestBytes().ToArray()); int dataLength = BitConverter.ToInt32(BitWorks.ReverseBytes(connection.Read(4)), 0); if (dataLength == 0) return null; byte[] data = connection.Read(dataLength); var offsetResponse = new OffsetResponse(data); return offsetResponse; } }
/// <summary> /// Get a list of valid offsets (up to maxSize) before the given time. /// </summary> /// <param name="topic">The topic to check.</param> /// <param name="clientId"></param>Name to identify the client. Used in server logs /// <param name="partitionId">The partition on the topic.</param> /// <returns>OffseRersponse containing the next offser for the topic</returns> public OffsetResponse GetCurrentOffset(string topic, string clientId, int partitionId) { var request = new OffsetRequest(DefaultCorrelationId, clientId); request.AddTopic(topic, partitionId, OffsetRequest.LatestTime, 1); return GetOffsetResponseBefore(request); }
/// <summary> /// Get a list of valid offsets (up to maxSize) before the given time. /// </summary> /// <param name="topic">The topic to check.</param> /// <param name="time">time in millisecs (if -1, just get from the latest available)</param> /// <param name="maxNumOffsets">That maximum number of offsets to return.</param> /// <param name="correlationId"></param>Id used by the client to identify this transaction. Returned in the response /// <param name="clientId"></param>Name to identify the client. Used in server logs /// <param name="partitionId">The partition on the topic.</param> /// <returns>OffseRersponse</returns> public OffsetResponse GetOffsetResponse(string topic, long time, int maxNumOffsets, int correlationId, string clientId, int partitionId) { var request = new OffsetRequest(correlationId, clientId); request.AddTopic(topic, partitionId, time, maxNumOffsets); return GetOffsetResponseBefore(request); }
/// <summary> /// Get a list of valid offsets (up to maxSize) before the given time. /// </summary> /// <param name="request"></param> /// <returns></returns> internal OffsetResponse GetOffsetsBefore(OffsetRequest request) { return(OffsetResponse.ReadFrom(this.SendRequest(request).Buffer)); }
private static long GetOffset(Consumer consumer, string topic, int partition, long offsetTime) { string s = string.Empty; bool success = false; long result = 0; if (consumer == null) { throw new ArgumentNullException("consumer"); } if (string.IsNullOrEmpty(topic)) { throw new ArgumentNullException("topic"); } try { var offsetRequestInfo = new Dictionary <string, List <PartitionOffsetRequestInfo> >(); offsetRequestInfo.Add(topic, new List <PartitionOffsetRequestInfo>() { new PartitionOffsetRequestInfo(partition, offsetTime, 128) }); var offsetRequest = new OffsetRequest(offsetRequestInfo); var offsetResponse = consumer.GetOffsetsBefore(offsetRequest); if (null == offsetResponse) { s = string.Format("OffsetResponse for EarliestTime not found,topic={0}", topic); // Logger.Error(s); throw new ArgumentException(s); } List <PartitionOffsetsResponse> partitionOffset = null; if (!offsetResponse.ResponseMap.TryGetValue(topic, out partitionOffset) || partitionOffset == null || partitionOffset.Count == 0) { s = string.Format("OffsetResponse.ResponseMap for EarliestTime not found,topic={0}", topic); // Logger.Error(s); throw new ArgumentException(s); } foreach (var v in partitionOffset) { if (v.PartitionId == partition) { result = v.Offsets.First(); success = true; break; } } if (!success) { s = string.Format("OffsetResponse.ResponseMap.Partition not found partition={0},topic={1}", partition, topic); // Logger.Error(s); throw new ArgumentException(s); } } catch (Exception e) { Logger.Error(string.Format("GetOffset exception,partition={0},topic={1}", partition, topic), e); throw; } return(result); }
/// <summary> /// Get offset /// </summary> public void RefreshAndGetOffset(short versionId, string clientId, int correlationId, string topic, int partitionId, bool forceRefreshOffsetCache, out long earliestOffset, out long latestOffset) { earliestOffset = -1; latestOffset = -1; if (!forceRefreshOffsetCache && this.TopicOffsetEarliest.ContainsKey(topic) && this.TopicOffsetEarliest[topic].ContainsKey(partitionId)) { earliestOffset = this.TopicOffsetEarliest[topic][partitionId]; } if (!forceRefreshOffsetCache && this.TopicOffsetLatest.ContainsKey(topic) && this.TopicOffsetLatest[topic].ContainsKey(partitionId)) { latestOffset = this.TopicOffsetLatest[topic][partitionId]; } if (!forceRefreshOffsetCache && earliestOffset != -1 && latestOffset != -1) { return; } //Get using (Consumer consumer = this.GetConsumer(topic, partitionId)) { Dictionary <string, List <PartitionOffsetRequestInfo> > offsetRequestInfoEarliest = new Dictionary <string, List <PartitionOffsetRequestInfo> >(); List <PartitionOffsetRequestInfo> offsetRequestInfoForPartitionsEarliest = new List <PartitionOffsetRequestInfo>(); offsetRequestInfoForPartitionsEarliest.Add(new PartitionOffsetRequestInfo(partitionId, OffsetRequest.EarliestTime, 1)); offsetRequestInfoEarliest.Add(topic, offsetRequestInfoForPartitionsEarliest); OffsetRequest offsetRequestEarliest = new OffsetRequest(offsetRequestInfoEarliest); //Earliest OffsetResponse offsetResponseEarliest = consumer.GetOffsetsBefore(offsetRequestEarliest); List <PartitionOffsetsResponse> partitionOffsetEaliest = null; if (offsetResponseEarliest.ResponseMap.TryGetValue(topic, out partitionOffsetEaliest)) { foreach (var p in partitionOffsetEaliest) { if (p.Error == ErrorMapping.NoError && p.PartitionId == partitionId) { earliestOffset = p.Offsets[0]; //Cache if (!this.TopicOffsetEarliest.ContainsKey(topic)) { this.TopicOffsetEarliest.TryAdd(topic, new ConcurrentDictionary <int, long>()); } this.TopicOffsetEarliest[topic][partitionId] = earliestOffset; } } } //Latest Dictionary <string, List <PartitionOffsetRequestInfo> > offsetRequestInfoLatest = new Dictionary <string, List <PartitionOffsetRequestInfo> >(); List <PartitionOffsetRequestInfo> offsetRequestInfoForPartitionsLatest = new List <PartitionOffsetRequestInfo>(); offsetRequestInfoForPartitionsLatest.Add(new PartitionOffsetRequestInfo(partitionId, OffsetRequest.LatestTime, 1)); offsetRequestInfoLatest.Add(topic, offsetRequestInfoForPartitionsLatest); OffsetRequest offsetRequestLatest = new OffsetRequest(offsetRequestInfoLatest); OffsetResponse offsetResponseLatest = consumer.GetOffsetsBefore(offsetRequestLatest); List <PartitionOffsetsResponse> partitionOffsetLatest = null; if (offsetResponseLatest.ResponseMap.TryGetValue(topic, out partitionOffsetLatest)) { foreach (var p in partitionOffsetLatest) { if (p.Error == ErrorMapping.NoError && p.PartitionId == partitionId) { latestOffset = p.Offsets[0]; //Cache if (!this.TopicOffsetLatest.ContainsKey(topic)) { this.TopicOffsetLatest.TryAdd(topic, new ConcurrentDictionary <int, long>()); } this.TopicOffsetLatest[topic][partitionId] = latestOffset; } } } } }
public async Task TestMultipleOffsetWorksOk() { using (var temporaryTopic = testCluster.CreateTemporaryTopic(partitions:2)) using (var connection = await KafkaConnectionFactory.CreateSimpleKafkaConnectionAsync(testCluster.CreateBrokerUris()[0])) { var topic = temporaryTopic.Name; var request = new OffsetRequest { Offsets = new List<Offset> { new Offset { Topic = topic, MaxOffsets = 1, PartitionId = 0, Time = -1 }, new Offset { Topic = topic, MaxOffsets = 1, PartitionId = 1, Time = -1 } } }; var response = await connection.SendRequestAsync(request, CancellationToken.None); Assert.That(response, Has.Count.EqualTo(2)); Assert.That(response[0].Error, Is.EqualTo(ErrorResponseCode.NoError)); Assert.That(response[1].Error, Is.EqualTo(ErrorResponseCode.NoError)); } }
/// <summary> /// Writes a offset request to the server. /// </summary> /// <remarks> /// Write timeout is defaulted to infitite. /// </remarks> /// <param name="request">The <see cref="OffsetRequest"/> to send to the server.</param> public void Write(OffsetRequest request) { this.EnsuresNotDisposed(); Guard.NotNull(request, "request"); this.Write(request.RequestBuffer.GetBuffer()); }
public async Task TestNewTopicProductionWorksOk() { using (var temporaryTopic = testCluster.CreateTemporaryTopic()) using (var connection = await KafkaConnectionFactory.CreateSimpleKafkaConnectionAsync(testCluster.CreateBrokerUris()[0])) { var topic = temporaryTopic.Name; { var request = new MetadataRequest { Topics = new List<string> { topic } }; MetadataResponse response = null; while (response == null) { response = await connection.SendRequestAsync(request, CancellationToken.None); if (response.Topics[0].ErrorCode == ErrorResponseCode.LeaderNotAvailable) { response = null; await Task.Delay(1000); } } Assert.That(response, Is.Not.Null); var first = response; Assert.That(first.Topics, Has.Length.EqualTo(1)); var firstTopic = first.Topics.First(); Assert.That(firstTopic.ErrorCode, Is.EqualTo(ErrorResponseCode.NoError)); Assert.That(firstTopic.Name, Is.EqualTo(topic)); Assert.That(firstTopic.Partitions, Has.Length.EqualTo(1)); var firstPartition = firstTopic.Partitions.First(); Assert.That(firstPartition.PartitionId, Is.EqualTo(0)); } { var request = new ProduceRequest { Acks = 1, TimeoutMS = 10000, Payload = new List<Payload> { new Payload { Topic = topic, Partition = 0, Codec = MessageCodec.CodecNone, Messages = new List<Message> { new Message("Message 1"), new Message("Message 2"), new Message("Message 3"), new Message("Message 4"), } } } }; var response = await connection.SendRequestAsync(request, CancellationToken.None); Assert.That(response, Is.Not.Null); var first = response.First(); Assert.That(first.Error, Is.EqualTo(ErrorResponseCode.NoError)); Assert.That(first.Topic, Is.EqualTo(topic)); Assert.That(first.PartitionId, Is.EqualTo(0)); Assert.That(first.Offset, Is.EqualTo(0)); } { var request = new FetchRequest { MinBytes = 0, MaxWaitTime = 0, Fetches = new List<Fetch> { new Fetch { MaxBytes = 40, Offset = 0, PartitionId = 0, Topic = topic, } } }; var response = await connection.SendRequestAsync(request, CancellationToken.None); Assert.That(response, Has.Count.EqualTo(1)); var first = response.First(); Assert.That(first.Error, Is.EqualTo(ErrorResponseCode.NoError)); Assert.That(first.HighWaterMark, Is.EqualTo(4)); Assert.That(first.PartitionId, Is.EqualTo(0)); Assert.That(first.Topic, Is.EqualTo(topic)); Assert.That(first.Messages, Has.Count.EqualTo(1)); var firstMessage = first.Messages.First(); Assert.That(firstMessage.Meta.Offset, Is.EqualTo(0)); Assert.That(firstMessage.Meta.PartitionId, Is.EqualTo(0)); Assert.That(firstMessage.Attribute, Is.EqualTo(0)); Assert.That(firstMessage.Key, Is.Null); Assert.That(firstMessage.MagicNumber, Is.EqualTo(0)); Assert.That(firstMessage.Value, Is.Not.Null); var firstString = firstMessage.Value.ToUtf8String(); Assert.That(firstString, Is.EqualTo("Message 1")); } { var request = new OffsetRequest { Offsets = new List<Offset> { new Offset { MaxOffsets = 2, PartitionId = 0, Time = -1, Topic = topic } } }; var response = await connection.SendRequestAsync(request, CancellationToken.None); Assert.That(response, Has.Count.EqualTo(1)); var first = response.First(); Assert.That(first.Error, Is.EqualTo(ErrorResponseCode.NoError)); Assert.That(first.Topic, Is.EqualTo(topic)); Assert.That(first.PartitionId, Is.EqualTo(0)); Assert.That(first.Offsets, Has.Length.EqualTo(2)); Assert.That(first.Offsets[0], Is.EqualTo(4)); Assert.That(first.Offsets[1], Is.EqualTo(0)); } { var request = new ConsumerMetadataRequest { ConsumerGroup = topic }; ConsumerMetadataResponse response = null; while (response == null) { response = await connection.SendRequestAsync(request, CancellationToken.None); if (response.Error == ErrorResponseCode.ConsumerCoordinatorNotAvailableCode) { response = null; await Task.Delay(1000); } } Assert.That(response.Error, Is.EqualTo(ErrorResponseCode.NoError)); Console.WriteLine("Id = {0}, Host = {1}, Port = {2}", response.CoordinatorId, response.CoordinatorHost, response.CoordinatorPort); } { var request = new OffsetFetchRequest { ConsumerGroup = topic, Topics = new List<OffsetFetch> { new OffsetFetch { PartitionId = 0, Topic = topic } } }; var response = await connection.SendRequestAsync(request, CancellationToken.None); Assert.That(response, Has.Count.EqualTo(1)); var first = response.First(); Assert.That(first.Error, Is.EqualTo(ErrorResponseCode.NoError)); Assert.That(first.Topic, Is.EqualTo(topic)); Assert.That(first.PartitionId, Is.EqualTo(0)); Assert.That(first.MetaData, Is.Empty); Assert.That(first.Offset, Is.EqualTo(-1)); } { var request = new OffsetCommitRequest { ConsumerGroup = topic, ConsumerGroupGenerationId = 1, ConsumerId = "0", OffsetCommits = new List<OffsetCommit> { new OffsetCommit { Metadata = "Metadata 1", Offset = 0, PartitionId = 0, TimeStamp = -1, Topic = topic, } } }; var response = await connection.SendRequestAsync(request, CancellationToken.None); Assert.That(response, Has.Count.EqualTo(1)); var first = response.First(); Assert.That(first.Error, Is.EqualTo(ErrorResponseCode.NoError)); Assert.That(first.Topic, Is.EqualTo(topic)); Assert.That(first.PartitionId, Is.EqualTo(0)); } { var request = new OffsetFetchRequest { ConsumerGroup = topic, Topics = new List<OffsetFetch> { new OffsetFetch { PartitionId = 0, Topic = topic } } }; var response = await connection.SendRequestAsync(request, CancellationToken.None); Assert.That(response, Has.Count.EqualTo(1)); var first = response.First(); Assert.That(first.Error, Is.EqualTo(ErrorResponseCode.NoError)); Assert.That(first.Topic, Is.EqualTo(topic)); Assert.That(first.PartitionId, Is.EqualTo(0)); Assert.That(first.MetaData, Is.EqualTo("Metadata 1")); Assert.That(first.Offset, Is.EqualTo(0)); } { var request = new FetchRequest { MinBytes = 0, MaxWaitTime = 0, Fetches = new List<Fetch> { new Fetch { MaxBytes = 1024, Offset = 0 + 1, PartitionId = 0, Topic = topic, } } }; var response = await connection.SendRequestAsync(request, CancellationToken.None); Assert.That(response, Has.Count.EqualTo(1)); var first = response.First(); Assert.That(first.Error, Is.EqualTo(ErrorResponseCode.NoError)); Assert.That(first.HighWaterMark, Is.EqualTo(4)); Assert.That(first.PartitionId, Is.EqualTo(0)); Assert.That(first.Topic, Is.EqualTo(topic)); Assert.That(first.Messages, Has.Count.EqualTo(3)); var firstMessage = first.Messages.First(); Assert.That(firstMessage.Meta.Offset, Is.EqualTo(1)); Assert.That(firstMessage.Meta.PartitionId, Is.EqualTo(0)); Assert.That(firstMessage.Attribute, Is.EqualTo(0)); Assert.That(firstMessage.Key, Is.Null); Assert.That(firstMessage.MagicNumber, Is.EqualTo(0)); Assert.That(firstMessage.Value, Is.Not.Null); var firstString = firstMessage.Value.ToUtf8String(); Assert.That(firstString, Is.EqualTo("Message 2")); var lastMessage = first.Messages.Last(); Assert.That(lastMessage.Meta.Offset, Is.EqualTo(3)); Assert.That(lastMessage.Meta.PartitionId, Is.EqualTo(0)); Assert.That(lastMessage.Attribute, Is.EqualTo(0)); Assert.That(lastMessage.Key, Is.Null); Assert.That(lastMessage.MagicNumber, Is.EqualTo(0)); Assert.That(lastMessage.Value, Is.Not.Null); var lastString = lastMessage.Value.ToUtf8String(); Assert.That(lastString, Is.EqualTo("Message 4")); } { var request = new FetchRequest { MinBytes = 0, MaxWaitTime = 0, Fetches = new List<Fetch> { new Fetch { MaxBytes = 1024, Offset = 3 + 1, PartitionId = 0, Topic = topic, } } }; var response = await connection.SendRequestAsync(request, CancellationToken.None); Assert.That(response, Has.Count.EqualTo(1)); var first = response.First(); Assert.That(first.Error, Is.EqualTo(ErrorResponseCode.NoError)); Assert.That(first.HighWaterMark, Is.EqualTo(4)); Assert.That(first.PartitionId, Is.EqualTo(0)); Assert.That(first.Topic, Is.EqualTo(topic)); Assert.That(first.Messages, Has.Count.EqualTo(0)); } } Console.WriteLine("Test completed"); }
private long ResetConsumerOffsets(string topic, int partitionId) { long offset; switch (_config.AutoOffsetReset) { case OffsetRequest.SmallestTime: offset = OffsetRequest.EarliestTime; break; case OffsetRequest.LargestTime: offset = OffsetRequest.LatestTime; break; default: return 0; } var requestInfo = new Dictionary<string, List<PartitionOffsetRequestInfo>>(); requestInfo[topic] = new List<PartitionOffsetRequestInfo> { new PartitionOffsetRequestInfo(partitionId, offset, 1) }; var request = new OffsetRequest(requestInfo); OffsetResponse offsets = _simpleConsumer.GetOffsetsBefore(request); var topicDirs = new ZKGroupTopicDirs(_config.GroupId, topic); long offsetFound = offsets.ResponseMap[topic].First().Offsets[0]; Logger.InfoFormat("updating partition {0} with {1} offset {2}", partitionId, offset == OffsetRequest.EarliestTime ? "earliest" : "latest", offsetFound); ZkUtils.UpdatePersistentPath(_zkClient, topicDirs.ConsumerOffsetDir + "/" + partitionId, offsetFound.ToString(CultureInfo.InvariantCulture)); return offsetFound; }
private OffsetRequest CreateFetchLastOffsetRequest() { var offset = new Offset() { PartitionId = _partitionId, Topic = _topic, MaxOffsets = 1 }; var request = new OffsetRequest() { Offsets = new List<Offset>() { offset }, ClientId = _clientId }; return request; }