public async void SaveOffsetsAndResumeConsuming() { kafka4net.Tracing.EtwTrace.Marker("SaveOffsetsAndResumeConsuming"); var sentEvents = new Subject<Message>(); var topic = "part12." + _rnd.Next(); VagrantBrokerUtil.CreateTopic(topic, 5, 2); var producer = new Producer(_seed2Addresses, new ProducerConfiguration(topic)); producer.OnSuccess += e => e.ForEach(sentEvents.OnNext); await producer.ConnectAsync(); // send 100 messages Enumerable.Range(1, 100). Select(i => new Message { Value = BitConverter.GetBytes(i) }). ForEach(producer.Send); _log.Info("Waiting for 100 sent messages"); sentEvents.Subscribe(msg => _log.Debug("Sent {0}", BitConverter.ToInt32(msg.Value, 0))); await sentEvents.Take(100).ToTask(); var offsets1 = await producer.Cluster.FetchPartitionOffsetsAsync(topic, ConsumerLocation.TopicStart); _log.Info("Closing producer"); await producer.CloseAsync(TimeSpan.FromSeconds(5)); // now consume the "first" 50. Stop, save offsets, and restart. var consumer1 = new Consumer(new ConsumerConfiguration(_seed2Addresses, topic, offsets1)); var receivedEvents = new List<int>(100); _log.Info("Consuming first half of messages."); await consumer1.OnMessageArrived .Do(msg => { var value = BitConverter.ToInt32(msg.Value, 0); _log.Info("Consumer1 Received value {0} from partition {1} at offset {2}", value, msg.Partition, msg.Offset); receivedEvents.Add(value); offsets1.UpdateOffset(msg.Partition, msg.Offset); }) .Take(50); //await consumer1.IsConnected; _log.Info("Closing first consumer"); consumer1.Dispose(); // now serialize the offsets. var offsetBytes = offsets1.WriteOffsets(); // load a new set of offsets, and a new consumer var offsets2 = new TopicPartitionOffsets(offsetBytes); var consumer2 = new Consumer(new ConsumerConfiguration(_seed2Addresses, offsets2.Topic, offsets2)); await consumer2.OnMessageArrived .Do(msg => { var value = BitConverter.ToInt32(msg.Value, 0); _log.Info("Consumer2 Received value {0} from partition {1} at offset {2}", value, msg.Partition, msg.Offset); receivedEvents.Add(value); offsets2.UpdateOffset(msg.Partition, msg.Offset); }) .Take(50); //await consumer2.IsConnected; _log.Info("Closing second consumer"); consumer2.Dispose(); Assert.AreEqual(100, receivedEvents.Distinct().Count()); Assert.AreEqual(100, receivedEvents.Count); kafka4net.Tracing.EtwTrace.Marker("/SaveOffsetsAndResumeConsuming"); }
public void TopicPartitionOffsetsSerializeAndDeSerialize() { kafka4net.Tracing.EtwTrace.Marker("TopicPartitionOffsetsSerializeAndDeSerialize"); var offsets1 = new TopicPartitionOffsets("test"); for (int i = 0; i < 50; i++) { offsets1.UpdateOffset(i,_rnd.Next()); } // save bytes var offsetBytes = offsets1.WriteOffsets(); var offsets2 = new TopicPartitionOffsets(offsetBytes); for (int i = 0; i < 50; i++) { Assert.AreEqual(offsets1.NextOffset(i),offsets2.NextOffset(i)); } kafka4net.Tracing.EtwTrace.Marker("/TopicPartitionOffsetsSerializeAndDeSerialize"); }