public static string StopBrokerLeaderForPartition(Cluster cluster, string topic, int partition) { var brokerMeta = cluster.FindBrokerMetaForPartitionId(topic, partition); var brokerName = GetBrokerNameFromIp(brokerMeta.Host); StopBroker(brokerName); return brokerName; }
public static void ReassignPartitions(Cluster cluster, string topic, int partition) { var brokerMeta = cluster.FindBrokerMetaForPartitionId(topic, partition); var brokerToMoveTo = brokerMeta.NodeId == 1 ? 2 : 1; var partitionsJson = string.Format("{{\"partitions\":[{{\"topic\":\"{0}\",\"partition\":{1},\"replicas\":[{2}]}}], \"version\":1}}", topic, partition, brokerToMoveTo); _log.Info(string.Format("Reassigning Partitions (topic {0}, partition {1}, from node {2} to node {3})", topic, partition, brokerMeta.NodeId, brokerToMoveTo)); var generateJson = "ssh -c \"printf '" + partitionsJson.Replace("\"", @"\\\""") + "' >partitions-to-move.json\" broker1"; Vagrant(generateJson); var reassignScript = "ssh -c '/opt/kafka_2.10-" + _kafkaVersion + "/bin/kafka-reassign-partitions.sh --zookeeper 192.168.56.2 --reassignment-json-file partitions-to-move.json --execute' broker1"; Vagrant(reassignScript); _log.Info("Reassigned Partitions"); }
public async void ReadOffsets() { kafka4net.Tracing.EtwTrace.Marker("ReadOffsets"); var sentEvents = new Subject<Message>(); var topic = "part12." + _rnd.Next(); VagrantBrokerUtil.CreateTopic(topic,1,1); var cluster = new Cluster(_seed2Addresses); await cluster.ConnectAsync(); var producer = new Producer(cluster, new ProducerConfiguration(topic, maxMessageSetSizeInBytes: 1024*1024)); producer.OnSuccess += e => e.ForEach(sentEvents.OnNext); await producer.ConnectAsync(); // read offsets of empty queue var heads = await cluster.FetchPartitionOffsetsAsync(topic, ConsumerLocation.TopicStart); var tails = await cluster.FetchPartitionOffsetsAsync(topic, ConsumerLocation.TopicEnd); Assert.AreEqual(1, heads.Partitions.Count(), "Expected just one head partition"); Assert.AreEqual(1, tails.Partitions.Count(), "Expected just one tail partition"); Assert.AreEqual(0L, heads.NextOffset(heads.Partitions.First()), "Expected start at 0"); Assert.AreEqual(0L, tails.NextOffset(tails.Partitions.First()), "Expected end at 0"); // log the broker selected as master var brokerMeta = cluster.FindBrokerMetaForPartitionId(topic, heads.Partitions.First()); _log.Info("Partition Leader is {0}", brokerMeta); // saw some inconsistency, so run this a few times. const int count = 1100; const int loops = 10; for (int i = 0; i < loops; i++) { // NOTE that the configuration for the test machines through vagrant are set to 1MB rolling file segments // so we need to generate large messages to force multiple segments to be created. // send count messages var t = sentEvents.Take(count).ToTask(); Enumerable.Range(1, count). Select(_ => new Message { Value = new byte[1024] }). ForEach(producer.Send); _log.Info("Waiting for {0} sent messages", count); await t; // re-read offsets after messages published await Task.Delay(TimeSpan.FromSeconds(2)); // NOTE: There seems to be a race condition on the Kafka broker that the offsets are not immediately available after getting a successful produce response tails = await cluster.FetchPartitionOffsetsAsync(topic, ConsumerLocation.TopicEnd); _log.Info("2:After loop {0} of {1} messages, Next Offset is {2}", i + 1, count, tails.NextOffset(tails.Partitions.First())); Assert.AreEqual(count * (i + 1), tails.NextOffset(tails.Partitions.First()), "Expected end at " + count * (i + 1)); } _log.Info("Closing producer"); await producer.CloseAsync(TimeSpan.FromSeconds(5)); await Task.Delay(TimeSpan.FromSeconds(1)); // re-read offsets after messages published heads = await cluster.FetchPartitionOffsetsAsync(topic, ConsumerLocation.TopicStart); tails = await cluster.FetchPartitionOffsetsAsync(topic, ConsumerLocation.TopicEnd); Assert.AreEqual(1, heads.Partitions.Count(), "Expected just one head partition"); Assert.AreEqual(1, tails.Partitions.Count(), "Expected just one tail partition"); Assert.AreEqual(0L, heads.NextOffset(heads.Partitions.First()), "Expected start at 0"); Assert.AreEqual(count*loops, tails.NextOffset(tails.Partitions.First()), "Expected end at " + count); kafka4net.Tracing.EtwTrace.Marker("/ReadOffsets"); }