Пример #1
0
        /// <summary>
        /// Returns the total number of messages difference between this set of offsets, and the given set.
        /// </summary>
        /// <param name="priorOffsets">The earlier offsets. If the passed offsets are larger than this instance offsets, the result will be negative.</param>
        /// <returns></returns>
        public long MessagesSince(TopicPartitionOffsets priorOffsets)
        {
            if (_offsets.Count != priorOffsets._offsets.Count || Topic != priorOffsets.Topic)
            {
                throw new ArgumentException("priorOffsets does not match Topic or Number of Partitions of this set of offsets.");
            }

            return(_offsets.Keys.Select(p => _offsets[p] - priorOffsets._offsets[p]).Sum());
        }
Пример #2
0
 public StartAndStopAtExplicitOffsets(TopicPartitionOffsets startingOffsets, TopicPartitionOffsets stoppingOffsets)
 {
     _startingOffsets = startingOffsets;
     _stoppingOffsets = stoppingOffsets;
 }
Пример #3
0
 public StartAndStopAtExplicitOffsets(IEnumerable<Tuple<int, long>> startingOffsets, IEnumerable<Tuple<int, long>> stoppingOffsets)
 {
     _startingOffsets = new TopicPartitionOffsets("__xxx__",startingOffsets); 
     _stoppingOffsets = new TopicPartitionOffsets("__xxx__",stoppingOffsets); 
 }
Пример #4
0
 public StartAndStopAtExplicitOffsets(Dictionary<int, long> startingOffsets, Dictionary<int, long> stoppingOffsets)
 {
     _startingOffsets = new TopicPartitionOffsets("__xxx__",startingOffsets); 
     _stoppingOffsets = new TopicPartitionOffsets("__xxx__",stoppingOffsets); 
 }
Пример #5
0
        public async void ExplicitOffset()
        {
            kafka4net.Tracing.EtwTrace.Marker("ExplicitOffset");
            // create new topic with 3 partitions
            var topic = "part33." + _rnd.Next();
            VagrantBrokerUtil.CreateTopic(topic,3,3);

            // fill it out with 10K messages
            const int count = 10*1000;
            var producer = new Producer(_seed2Addresses, new ProducerConfiguration(topic));
            await producer.ConnectAsync();

            var sentMessagesObservable = Observable.FromEvent<Message[]>(evtHandler => producer.OnSuccess += evtHandler, evtHandler => { })
                .SelectMany(msgs=>msgs)
                .Take(count)
                .TakeUntil(DateTime.Now.AddSeconds(10))
                .ToList();

            _log.Info("Sending data");
            Enumerable.Range(1, count).
                Select(i => new Message { Value = BitConverter.GetBytes(i) }).
                ForEach(producer.Send);

            var sentMsgs = await sentMessagesObservable;
            _log.Info("Producer sent {0} messages.", sentMsgs.Count);

            _log.Debug("Closing producer");
            await producer.CloseAsync(TimeSpan.FromSeconds(5));

            var offsetFetchCluster = new Cluster(_seed2Addresses);
            await offsetFetchCluster.ConnectAsync();

            // consume tail-300 for each partition
            await Task.Delay(TimeSpan.FromSeconds(1));
            var offsets = new TopicPartitionOffsets(
                                topic, (await offsetFetchCluster.FetchPartitionOffsetsAsync(topic, ConsumerLocation.TopicEnd))
                                        .GetPartitionsOffset.Select(kv=>new KeyValuePair<int,long>(kv.Key,kv.Value-300)));
            _log.Info("Sum of offsets {0}. Raw: {1}",offsets.Partitions.Sum(p=>offsets.NextOffset(p)), offsets);
            
            var consumer = new Consumer(new ConsumerConfiguration(_seed2Addresses, topic, offsets));
            var messages = consumer.OnMessageArrived.
                GroupBy(m => m.Partition).Replay();
            messages.Connect();
            await consumer.IsConnected;

            var consumerSubscription = messages.Subscribe(p => p.Take(10).Subscribe(
                m => _log.Debug("Got message {0}/{1}", m.Partition, BitConverter.ToInt32(m.Value, 0)),
                e => _log.Error("Error", e),
                () => _log.Debug("Complete part {0}", p.Key)
            ));

            // wait for 3 partitions to arrrive and every partition to read at least 100 messages
            await messages.Select(g => g.Take(100)).Take(3).ToTask();

            consumerSubscription.Dispose();
            consumer.Dispose();

            kafka4net.Tracing.EtwTrace.Marker("/ExplicitOffset");
        }
Пример #6
0
        public async void InvalidOffsetShouldLogErrorAndStopFetching()
        {
            var count = 100;
            var topic = "test11."+_rnd.Next();
            await FillOutQueue(topic, count);

            var badPartitionMap = new Dictionary<int, long>{{0, -1}};
            var offsets = new TopicPartitionOffsets(topic, badPartitionMap);
            var consumer = new Consumer(new ConsumerConfiguration(_seed2Addresses, topic, offsets));
            await consumer.OnMessageArrived.Take(count);
            await consumer.IsConnected;
            _log.Info("Done");
        }
Пример #7
0
        public async void SaveOffsetsAndResumeConsuming()
        {
            kafka4net.Tracing.EtwTrace.Marker("SaveOffsetsAndResumeConsuming");

            var sentEvents = new Subject<Message>();
            var topic = "part12." + _rnd.Next();
            VagrantBrokerUtil.CreateTopic(topic, 5, 2);

            var producer = new Producer(_seed2Addresses, new ProducerConfiguration(topic));
            producer.OnSuccess += e => e.ForEach(sentEvents.OnNext);
            await producer.ConnectAsync();

            // send 100 messages
            Enumerable.Range(1, 100).
                Select(i => new Message { Value = BitConverter.GetBytes(i) }).
                ForEach(producer.Send);
            _log.Info("Waiting for 100 sent messages");
            sentEvents.Subscribe(msg => _log.Debug("Sent {0}", BitConverter.ToInt32(msg.Value, 0)));
            await sentEvents.Take(100).ToTask();


            var offsets1 = await producer.Cluster.FetchPartitionOffsetsAsync(topic, ConsumerLocation.TopicStart);

            _log.Info("Closing producer");
            await producer.CloseAsync(TimeSpan.FromSeconds(5));

            // now consume the "first" 50. Stop, save offsets, and restart.
            var consumer1 = new Consumer(new ConsumerConfiguration(_seed2Addresses, topic, offsets1));
            var receivedEvents = new List<int>(100);

            _log.Info("Consuming first half of messages.");

            await consumer1.OnMessageArrived
                .Do(msg =>
                {
                    var value = BitConverter.ToInt32(msg.Value, 0);
                    _log.Info("Consumer1 Received value {0} from partition {1} at offset {2}", value, msg.Partition, msg.Offset);
                    receivedEvents.Add(value);
                    offsets1.UpdateOffset(msg.Partition, msg.Offset);
                })
                .Take(50);
            //await consumer1.IsConnected;

            _log.Info("Closing first consumer");
            consumer1.Dispose();

            // now serialize the offsets.
            var offsetBytes = offsets1.WriteOffsets();

            // load a new set of offsets, and a new consumer
            var offsets2 = new TopicPartitionOffsets(offsetBytes);

            var consumer2 = new Consumer(new ConsumerConfiguration(_seed2Addresses, offsets2.Topic, offsets2));

            await consumer2.OnMessageArrived
                .Do(msg =>
                {
                    var value = BitConverter.ToInt32(msg.Value, 0);
                    _log.Info("Consumer2 Received value {0} from partition {1} at offset {2}", value, msg.Partition, msg.Offset);
                    receivedEvents.Add(value);
                    offsets2.UpdateOffset(msg.Partition, msg.Offset);
                })
                .Take(50);
            //await consumer2.IsConnected;

            _log.Info("Closing second consumer");
            consumer2.Dispose();

            Assert.AreEqual(100, receivedEvents.Distinct().Count());
            Assert.AreEqual(100, receivedEvents.Count);

            kafka4net.Tracing.EtwTrace.Marker("/SaveOffsetsAndResumeConsuming");
        }
Пример #8
0
        public void TopicPartitionOffsetsSerializeAndDeSerialize()
        {
            kafka4net.Tracing.EtwTrace.Marker("TopicPartitionOffsetsSerializeAndDeSerialize");

            var offsets1 = new TopicPartitionOffsets("test");

            for (int i = 0; i < 50; i++)
            {
                offsets1.UpdateOffset(i,_rnd.Next());
            }

            // save bytes
            var offsetBytes = offsets1.WriteOffsets();

            var offsets2 = new TopicPartitionOffsets(offsetBytes);

            for (int i = 0; i < 50; i++)
            {
                Assert.AreEqual(offsets1.NextOffset(i),offsets2.NextOffset(i));
            }

            kafka4net.Tracing.EtwTrace.Marker("/TopicPartitionOffsetsSerializeAndDeSerialize");
        }
Пример #9
0
        public async void StartAndStopAtExplicitOffset()
        {
            kafka4net.Tracing.EtwTrace.Marker("StartAndStopAtExplicitOffset");
            // create new topic with 3 partitions
            var topic = "part33." + _rnd.Next();
            VagrantBrokerUtil.CreateTopic(topic, 3, 3);

            // fill it out with 10K messages
            const int count = 10 * 1000;
            var producer = new Producer(_seed2Addresses, new ProducerConfiguration(topic));
            await producer.ConnectAsync();

            var sentMessagesObservable = Observable.FromEvent<Message[]>(evtHandler => producer.OnSuccess += evtHandler, evtHandler => { })
                .SelectMany(msgs => msgs)
                .Take(count)
                .TakeUntil(DateTime.Now.AddSeconds(10))
                .ToList();

            _log.Info("Sending data");
            Enumerable.Range(1, count).
                Select(i => new Message { Value = BitConverter.GetBytes(i) }).
                ForEach(producer.Send);

            var sentMsgs = await sentMessagesObservable;
            _log.Info("Producer sent {0} messages.", sentMsgs.Count);

            _log.Debug("Closing producer");
            await producer.CloseAsync(TimeSpan.FromSeconds(5));

            var offsetFetchCluster = new Cluster(_seed2Addresses);
            await offsetFetchCluster.ConnectAsync();

            await Task.Delay(TimeSpan.FromSeconds(1));
            var offsets = (await offsetFetchCluster.FetchPartitionOffsetsAsync(topic, ConsumerLocation.TopicEnd));
            _log.Info("Sum of offsets {0}. Raw: {1}", offsets.Partitions.Sum(p => offsets.NextOffset(p)), offsets);

            // consume first 300 for each partition
            var offsetStarts = new TopicPartitionOffsets(topic, offsets.GetPartitionsOffset.ToDictionary(pair => pair.Key, pair => pair.Value > 300 ? 300 : pair.Value));
            var offsetStops = new TopicPartitionOffsets(topic, offsets.GetPartitionsOffset.ToDictionary(pair => pair.Key, pair => pair.Value > 600 ? 600 : pair.Value));
            var numMessages = offsetStops.MessagesSince(offsetStarts);
            var startStopProvider = new StartAndStopAtExplicitOffsets(offsetStarts, offsetStops);
            _log.Info("Attempting to consume {0} messages and stop at {1}", numMessages, offsetStops);

            var consumer = new Consumer(new ConsumerConfiguration(_seed2Addresses, topic, startStopProvider, stopPosition: startStopProvider));
            var messages = await consumer.OnMessageArrived.ToList();

            consumer.Dispose();

            Assert.AreEqual(numMessages, messages.Count);

            kafka4net.Tracing.EtwTrace.Marker("/StartAndStopAtExplicitOffset");
        }
Пример #10
0
 public StartAndStopAtExplicitOffsets(TopicPartitionOffsets startingOffsets, TopicPartitionOffsets stoppingOffsets)
 {
     _startingOffsets = startingOffsets;
     _stoppingOffsets = stoppingOffsets;
 }
Пример #11
0
 public StartAndStopAtExplicitOffsets(IEnumerable <Tuple <int, long> > startingOffsets, IEnumerable <Tuple <int, long> > stoppingOffsets)
 {
     _startingOffsets = new TopicPartitionOffsets("__xxx__", startingOffsets);
     _stoppingOffsets = new TopicPartitionOffsets("__xxx__", stoppingOffsets);
 }
Пример #12
0
 public StartAndStopAtExplicitOffsets(Dictionary <int, long> startingOffsets, Dictionary <int, long> stoppingOffsets)
 {
     _startingOffsets = new TopicPartitionOffsets("__xxx__", startingOffsets);
     _stoppingOffsets = new TopicPartitionOffsets("__xxx__", stoppingOffsets);
 }
Пример #13
0
        /// <summary>For every patition, resolve offsets and build TopicPartition object</summary>
        private async Task<IEnumerable<TopicPartition>> BuildTopicPartitionsAsync()
        {
            // if they didn't specify explicit locations, initialize them here.
            var startPositionProvider = Configuration.StartPosition;
            if (startPositionProvider.StartLocation != ConsumerLocation.SpecifiedLocations)
            {
                // no offsets provided. Need to issue an offset request to get start/end locations and use them for consuming
                var partitions = await _cluster.FetchPartitionOffsetsAsync(Topic, startPositionProvider.StartLocation);

                if (_log.IsDebugEnabled)
                    _log.Debug("Consumer for topic {0} got time->offset resolved for location {1}. parts: [{2}]",
                        Topic, startPositionProvider,
                        string.Join(",", partitions.Partitions.OrderBy(p => p).Select(p => string.Format("{0}:{1}", p, partitions.NextOffset(p)))));

                IStartPositionProvider origProvider = startPositionProvider;
                // the new explicit offsets provider should use only the partitions included in the original one.
                startPositionProvider = new TopicPartitionOffsets(partitions.Topic, partitions.GetPartitionsOffset.Where(kv=> origProvider.ShouldConsumePartition(kv.Key)));
            }

            // we now have specified locations to start from, just get the partition metadata, and build the TopicPartitions
            var partitionMeta = await _cluster.GetOrFetchMetaForTopicAsync(Topic);
            return partitionMeta
                // only new partitions we don't already have in our dictionary
                .Where(pm => !_topicPartitions.ContainsKey(pm.Id))
                // only partitions we are "told" to.
                .Where(pm => startPositionProvider.ShouldConsumePartition(pm.Id))
                .Select(part =>
                {
                    var tp = new TopicPartition(_cluster, Topic, part.Id, startPositionProvider.GetStartOffset(part.Id));
                    _topicPartitions.Add(tp.PartitionId, tp);
                    return tp;
                });
        }
Пример #14
0
        /// <summary>
        /// Returns the total number of messages difference between this set of offsets, and the given set.
        /// </summary>
        /// <param name="priorOffsets">The earlier offsets. If the passed offsets are larger than this instance offsets, the result will be negative.</param>
        /// <returns></returns>
        public long MessagesSince(TopicPartitionOffsets priorOffsets)
        {
            if (_offsets.Count != priorOffsets._offsets.Count || Topic != priorOffsets.Topic)
                throw new ArgumentException("priorOffsets does not match Topic or Number of Partitions of this set of offsets.");

            return _offsets.Keys.Select(p => _offsets[p] - priorOffsets._offsets[p]).Sum();
        }