コード例 #1
0
        public async void ProducerSendBufferGrowsAutomatically()
        {
            kafka4net.Tracing.EtwTrace.Marker("ProducerSendBufferGrowsAutomatically");

            // now publish messages
            const int count2 = 25000;
            var topic = "part13." + _rnd.Next();
            var producer = new Producer(_seed2Addresses, new ProducerConfiguration(topic));
            _log.Debug("Connecting");
            await producer.ConnectAsync();

            _log.Debug("Filling out {0} with {1} messages", topic, count2);
            var sentList = await Enumerable.Range(0, count2)
                .Select(i => new Message { Value = BitConverter.GetBytes(i) })
                .ToObservable()
                .Do(producer.Send)
                .Select(msg => BitConverter.ToInt32(msg.Value, 0))
                .ToList();

            await Task.Delay(TimeSpan.FromSeconds(1));

            _log.Info("Done sending messages. Closing producer.");
            await producer.CloseAsync(TimeSpan.FromSeconds(5));
            _log.Info("Producer closed, starting consumer subscription.");

            
            
            // create a topic with 3 partitions
            var topicName = "part33." + _rnd.Next();
            VagrantBrokerUtil.CreateTopic(topicName, 3, 3);

            // sender is configured with 50ms batch period
            var receivedSubject = new ReplaySubject<Message>();
            producer = new Producer(_seed2Addresses,
                new ProducerConfiguration(topicName, TimeSpan.FromMilliseconds(50), sendBuffersInitialSize: 1));
            producer.OnSuccess += ms => ms.ForEach(receivedSubject.OnNext);
            await producer.ConnectAsync();

            // send 1000 messages
            const int count = 1000;
            await Observable.Interval(TimeSpan.FromMilliseconds(10))
                .Do(l => producer.Send(new Message() {Value = BitConverter.GetBytes((int) l)}))
                .Take(count);

            var receivedMessages = await receivedSubject.Take(count).TakeUntil(DateTime.Now.AddSeconds(2)).ToArray();

            Assert.AreEqual(count,receivedMessages.Length);

            await producer.CloseAsync(TimeSpan.FromSeconds(5));
            
            kafka4net.Tracing.EtwTrace.Marker("/ProducerSendBufferGrowsAutomatically");
        }
コード例 #2
0
        public async void ProducerAndListenerRecoveryTest()
        {
            kafka4net.Tracing.EtwTrace.Marker("ProducerAndListenerRecoveryTest");
            const int count = 200;
            var topic = "part33." + _rnd.Next();
            VagrantBrokerUtil.CreateTopic(topic,6,3);

            var producer = new Producer(_seed2Addresses, new ProducerConfiguration(topic));

            _log.Debug("Connecting");
            await producer.ConnectAsync();

            _log.Debug("Filling out {0}", topic);
            var sentList = new List<int>(200);
            Observable.Interval(TimeSpan.FromMilliseconds(100))
                .Select(l => (int) l)
                .Select(i => new Message {Value = BitConverter.GetBytes(i)})
                .Take(count)
                .Subscribe(msg=> { producer.Send(msg); sentList.Add(BitConverter.ToInt32(msg.Value, 0)); });

            var consumer = new Consumer(new ConsumerConfiguration(_seed2Addresses, topic, new StartPositionTopicStart(), maxBytesPerFetch: 4 * 8));
            var current =0;
            var received = new ReplaySubject<ReceivedMessage>();
            Task brokerStopped = null;
            var consumerSubscription = consumer.OnMessageArrived.
                Subscribe(async msg => {
                    current++;
                    if (current == 18)
                    {
                        brokerStopped = Task.Factory.StartNew(() => VagrantBrokerUtil.StopBrokerLeaderForPartition(consumer.Cluster, consumer.Topic, msg.Partition), CancellationToken.None, TaskCreationOptions.None, TaskScheduler.Default);
                        _log.Info("Stopped Broker Leader {0}",brokerStopped);
                    }
                    received.OnNext(msg);
                    _log.Info("Got: {0}", BitConverter.ToInt32(msg.Value, 0));
                });
            await consumer.IsConnected;

            _log.Info("Waiting for receiver complete");
            var receivedList = await received.Select(msg => BitConverter.ToInt32(msg.Value, 0)).Take(count).TakeUntil(DateTime.Now.AddSeconds(60)).ToList().ToTask();

            await brokerStopped.TimeoutAfter(TimeSpan.FromSeconds(10));

            // get the offsets for comparison later
            var heads = await consumer.Cluster.FetchPartitionOffsetsAsync(topic, ConsumerLocation.TopicStart);
            var tails = await consumer.Cluster.FetchPartitionOffsetsAsync(topic, ConsumerLocation.TopicEnd);

            _log.Info("Done waiting for receiver. Closing producer.");
            await producer.CloseAsync(TimeSpan.FromSeconds(5));
            _log.Info("Producer closed, disposing consumer subscription.");
            consumerSubscription.Dispose();
            _log.Info("Consumer subscription disposed. Closing consumer.");
            consumer.Dispose();
            _log.Info("Consumer closed.");

            if (sentList.Count != receivedList.Count)
            {
                // log some debug info.
                _log.Error("Did not receive all messages. Messages received: {0}",string.Join(",",receivedList.OrderBy(i=>i)));
                _log.Error("Did not receive all messages. Messages sent but NOT received: {0}", string.Join(",", sentList.Except(receivedList).OrderBy(i => i)));

                _log.Error("Sum of offsets fetched: {0}", tails.MessagesSince(heads));
                _log.Error("Offsets fetched: [{0}]", string.Join(",", tails.Partitions.Select(p => string.Format("{0}:{1}",p,tails.NextOffset(p)))));
            }

            Assert.AreEqual(sentList.Count, receivedList.Count);
            kafka4net.Tracing.EtwTrace.Marker("/ProducerAndListenerRecoveryTest");
        }
コード例 #3
0
        public async void KeyedMessagesPreserveOrder()
        {
            kafka4net.Tracing.EtwTrace.Marker("KeyedMessagesPreserveOrder");
            // create a topic with 3 partitions
            var topicName = "part33." + _rnd.Next();
            VagrantBrokerUtil.CreateTopic(topicName, 3, 3);
            
            // create listener in a separate connection/broker
            var receivedMsgs = new List<ReceivedMessage>();
            var consumer = new Consumer(new ConsumerConfiguration(_seed2Addresses, topicName, new StartPositionTopicEnd()));
            var consumerSubscription = consumer.OnMessageArrived.Synchronize().Subscribe(msg =>
            {
                lock (receivedMsgs)
                {
                    receivedMsgs.Add(msg);
                }
            });
            await consumer.IsConnected;

            // sender is configured with 50ms batch period
            var producer = new Producer(_seed2Addresses, new ProducerConfiguration(topicName, TimeSpan.FromMilliseconds(50)));
            await producer.ConnectAsync();

            //
            // generate messages with 100ms interval in 10 threads
            //
            var sentMsgs = new List<Message>();
            _log.Info("Start sending");
            var senders = Enumerable.Range(1, 1).
                Select(thread => Observable.
                    Interval(TimeSpan.FromMilliseconds(10)).
                    Synchronize(). // protect adding to sentMsgs
                    Select(i =>
                    {
                        var str = "msg " + i + " thread " + thread + " " + Guid.NewGuid();
                        var bin = Encoding.UTF8.GetBytes(str);
                        var msg = new Message
                        {
                            Key = BitConverter.GetBytes((int)(i + thread) % 10),
                            Value = bin
                        };
                        return Tuple.Create(msg, i, str);
                    }).
                    Subscribe(msg =>
                    {
                        lock (sentMsgs)
                        {
                            producer.Send(msg.Item1);
                            sentMsgs.Add(msg.Item1);
                            Assert.AreEqual(msg.Item2, sentMsgs.Count-1);
                        }
                    })
                ).
                ToArray();

            // wait for around 10K messages (10K/(10*10) = 100sec) and close producer
            _log.Info("Waiting for producer to produce enough...");
            await Task.Delay(100*1000);
            _log.Info("Closing senders intervals");
            senders.ForEach(s => s.Dispose());
            _log.Info("Closing producer");
            await producer.CloseAsync(TimeSpan.FromSeconds(5));

            _log.Info("Waiting for additional 10sec");
            await Task.Delay(10*1000);

            _log.Info("Disposing consumer");
            consumerSubscription.Dispose();
            _log.Info("Closing consumer");
            consumer.Dispose();
            _log.Info("Done with networking");

            // compare sent and received messages
            // TODO: for some reason preformance is not what I'd expect it to be and only 6K is generated.
            Assert.GreaterOrEqual(sentMsgs.Count, 4000, "Expected around 10K messages to be sent");

            if (sentMsgs.Count != receivedMsgs.Count)
            {
                var sentStr = sentMsgs.Select(m => Encoding.UTF8.GetString(m.Value)).ToArray();
                var receivedStr = receivedMsgs.Select(m => Encoding.UTF8.GetString(m.Value)).ToArray();
                sentStr.Except(receivedStr).
                    ForEach(m => _log.Error("Not received: '{0}'", m));
                receivedStr.Except(sentStr).
                    ForEach(m => _log.Error("Not sent but received: '{0}'", m));
            }
            Assert.AreEqual(sentMsgs.Count, receivedMsgs.Count, "Sent and received messages count differs");
            
            //
            // group messages by key and compare lists in each key to be the same (order should be preserved within key)
            //
            var keysSent = sentMsgs.GroupBy(m => BitConverter.ToInt32(m.Key, 0), m => Encoding.UTF8.GetString(m.Value), (i, mm) => new { Key = i, Msgs = mm.ToArray() }).ToArray();
            var keysReceived = receivedMsgs.GroupBy(m => BitConverter.ToInt32(m.Key, 0), m => Encoding.UTF8.GetString(m.Value), (i, mm) => new { Key = i, Msgs = mm.ToArray() }).ToArray();
            Assert.AreEqual(10, keysSent.Count(), "Expected 10 unique keys 0-9");
            Assert.AreEqual(keysSent.Count(), keysReceived.Count(), "Keys count does not match");
            // compare order within each key
            var notInOrder = keysSent
                .OrderBy(k => k.Key)
                .Zip(keysReceived.OrderBy(k => k.Key), (s, r) => new { s, r, ok = s.Msgs.SequenceEqual(r.Msgs) }).Where(_ => !_.ok).ToArray();

            if (notInOrder.Any())
            {
                _log.Error("{0} keys are out of order", notInOrder.Count());
                notInOrder.ForEach(_ => _log.Error("Failed order in:\n{0}", 
                    string.Join(" \n", DumpOutOfOrder(_.s.Msgs, _.r.Msgs))));
            }
            Assert.IsTrue(!notInOrder.Any(), "Detected out of order messages");

            kafka4net.Tracing.EtwTrace.Marker("/KeyedMessagesPreserveOrder");
        }
コード例 #4
0
        public async void Memory()
        {
            var topic = "topic11." + _rnd.Next();
            VagrantBrokerUtil.CreateTopic(topic, 1, 1);

            for (int i = 0; i < 10; ++i)
            {
                var producer = new Producer(_seed2Addresses, new ProducerConfiguration(topic));
                await producer.ConnectAsync();

                for (int j = 0; j < (int)1e6; j++)
                {
                    var msg = new Message
                    {
                        Key = BitConverter.GetBytes(1),
                        Value = Encoding.UTF8.GetBytes($@"SomeLongText - {j}")
                    };

                    producer.Send(msg);
                }

                await producer.CloseAsync(TimeSpan.FromSeconds(60));
            }

            var before = GC.GetTotalMemory(false);
            GC.Collect();
            var after = GC.GetTotalMemory(false);
            await Task.Delay(3000);
            GC.Collect();
            GC.WaitForFullGCComplete();
            _log.Info($"Memory: Before: {before}, after: {after}");
        }
コード例 #5
0
        public async void LeaderDownProducerAndConsumerRecovery()
        {
            kafka4net.Tracing.EtwTrace.Marker("LeaderDownProducerAndConsumerRecovery");
            string topic = "part32." + _rnd.Next();
            VagrantBrokerUtil.CreateTopic(topic, 3, 2);

            var sent = new List<string>();
            var confirmedSent1 = new List<string>();

            var producer = new Producer(_seed2Addresses, new ProducerConfiguration(topic));
            producer.OnSuccess += msgs =>
            {
                msgs.ForEach(msg => confirmedSent1.Add(Encoding.UTF8.GetString(msg.Value)));
                _log.Debug("Sent {0} messages", msgs.Length);
            };
            await producer.ConnectAsync();

            var consumer = new Consumer(new ConsumerConfiguration(_seed2Addresses, topic, new StartPositionTopicEnd()));

            const int postCount = 100;
            const int postCount2 = 50;

            //
            // Read messages
            //
            var received = new List<ReceivedMessage>();
            var receivedEvents = new ReplaySubject<ReceivedMessage>();
            var consumerSubscription = consumer.OnMessageArrived.
                Synchronize().
                Subscribe(msg =>
                {
                    received.Add(msg);
                    receivedEvents.OnNext(msg);
                    _log.Debug("Received {0}/{1}", Encoding.UTF8.GetString(msg.Value), received.Count);
                });
            await consumer.IsConnected;

            //
            // Send #1
            //
            _log.Info("Start sender");
            Observable.Interval(TimeSpan.FromMilliseconds(200)).
                Take(postCount).
                Subscribe(
                    i => {
                        var msg = "msg " + i;
                        producer.Send(new Message { Value = Encoding.UTF8.GetBytes(msg) });
                        sent.Add("msg " + i);
                    },
                    () => _log.Info("Producer complete")
                );

            // wait for first 50 messages to arrive
            _log.Info("Waiting for first {0} messages to arrive", postCount2);
            await receivedEvents.Take(postCount2).Count().ToTask();
            Assert.AreEqual(postCount2, received.Count);

            _log.Info("Stopping broker");
            var stoppedBroker = VagrantBrokerUtil.StopBrokerLeaderForPartition(producer.Cluster, topic, 0);
            _log.Debug("Stopped broker {0}", stoppedBroker);

            // post another 50 messages
            _log.Info("Sending another {0} messages", postCount2);
            var sender2 = Observable.Interval(TimeSpan.FromMilliseconds(200)).
                Take(postCount2).
                Publish().RefCount();

            //
            // Send #2
            //
            sender2.Subscribe(
                    i => {
                        var msg = "msg #2 " + i;
                        producer.Send(new Message { Value = Encoding.UTF8.GetBytes(msg) });
                        sent.Add(msg);
                        _log.Debug("Sent msg #2 {0}", i);
                    },
                    () => _log.Info("Producer #2 complete")
                );

            _log.Info("Waiting for #2 sender to complete");
            await sender2.ToTask();
            _log.Info("Waiting for producer.Close");
            await producer.CloseAsync(TimeSpan.FromSeconds(60));

            _log.Info("Waiting 4sec for remaining messages");
            await Task.Delay(TimeSpan.FromSeconds(4)); // if unexpected messages arrive, let them in to detect failure

            _log.Info("Waiting for consumer.CloseAsync");
            consumer.Dispose();
            consumerSubscription.Dispose();

            if (postCount + postCount2 != received.Count)
            {
                var receivedStr = received.Select(m => Encoding.UTF8.GetString(m.Value)).ToArray();

                var diff = sent.Except(received.Select(m => Encoding.UTF8.GetString(m.Value))).OrderBy(s => s);
                _log.Info("Not received {0}: \n {1}", diff.Count(), string.Join("\n ", diff));

                var diff2 = sent.Except(confirmedSent1).OrderBy(s => s);
                _log.Info("Not confirmed {0}: \n {1}", diff2.Count(), string.Join("\n ", diff2));

                var diff3 = received.Select(m => Encoding.UTF8.GetString(m.Value)).Except(sent).OrderBy(s => s);
                _log.Info("Received extra: {0}: \n {1}", diff3.Count(), string.Join("\n ", diff3));

                var diff4 = confirmedSent1.Except(sent).OrderBy(s => s);
                _log.Info("Confirmed extra {0}: \n {1}", diff4.Count(), string.Join("\n ", diff4));

                var dups = receivedStr.GroupBy(s => s).Where(g => g.Count() > 1).Select(g => string.Format("{0}: {1}", g.Count(), g.Key));
                _log.Info("Receved dups: \n {0}", string.Join("\n ", dups));

                _log.Debug("Received: \n{0}", string.Join("\n ", received.Select(m => Encoding.UTF8.GetString(m.Value))));
            }
            Assert.AreEqual(postCount + postCount2, received.Count, "Received.Count");

            _log.Info("Done");
            kafka4net.Tracing.EtwTrace.Marker("/LeaderDownProducerAndConsumerRecovery");
        }
コード例 #6
0
        public async void ProducerTestWhenPartitionReassignmentOccurs()
        {
            // Scenario: Broker gives away it's topic to another broker
            // See https://github.com/ntent-ad/kafka4net/issues/27

            var topic = "topic11." + _rnd.Next();

            // Create topic
            VagrantBrokerUtil.CreateTopic(topic, 1, 1);

            // Create cluster and producer
            var cluster = new Cluster(_seed2Addresses);
            //await cluster.ConnectAsync();
            //await cluster.GetOrFetchMetaForTopicAsync(topic);
            VagrantBrokerUtil.DescribeTopic(topic);
            var producer = new Producer(cluster, new ProducerConfiguration(topic, batchFlushSize: 1));
            var ctx = SynchronizationContext.Current;
            producer.OnPermError += (exception, messages) => ctx.Post(d => { throw exception; }, null);
            int successfullySent = 0;
            producer.OnSuccess += messages => successfullySent++;

            _log.Info("Connecting producer");
            await producer.ConnectAsync();

            _log.Info("Producer Send data before reassignment");
            producer.Send(new Message { Value = new byte[] { 0, 0, 0, 0 } });


            // Run the reassignment
            VagrantBrokerUtil.ReassignPartitions(cluster, topic, 0);
            _log.Info("Waiting for reassignment completion");
            await Task.Delay(5 * 1000);

            VagrantBrokerUtil.DescribeTopic(topic);

            _log.Info("Producer Send data after reassignment");
            producer.Send(new Message { Value = new byte[] { 1, 1, 1, 1 } });

            _log.Info("Waiting for producer to complete");
            await producer.CloseAsync(TimeSpan.FromSeconds(60));

            Assert.That(successfullySent, Is.EqualTo(2));
            _log.Info("Done");
        }
コード例 #7
0
        public async void ProducerConnectWhenOneBrokerIsDownAndThanUp()
        {
            // Scenario: 1 broker is down, Producer connects. Broker is brought up and forced to become master.
            // See https://github.com/ntent-ad/kafka4net/issues/14

            var topic = "topic11." + _rnd.Next();

            // Create topic
            VagrantBrokerUtil.CreateTopic(topic, 1, 2);
            var cluster = new Cluster(_seed3Addresses);
            await cluster.ConnectAsync();
            await cluster.GetOrFetchMetaForTopicAsync(topic);
            VagrantBrokerUtil.DescribeTopic(topic);
            // Stop the leader
            //var partitionDown = cluster.PartitionStateChanges.FirstAsync(_ => _.ErrorCode.IsFailure());
            var preferredBroker = VagrantBrokerUtil.StopBrokerLeaderForPartition(cluster, topic, 0);
            //_log.Info("Waiting for partition to be down");
            //await partitionDown;
            await cluster.CloseAsync(TimeSpan.FromSeconds(3));
            await Task.Delay(30 * 1000);
            VagrantBrokerUtil.DescribeTopic(topic);

            // Create new cluster and publisher, while preferred leader is down
            cluster = new Cluster(_seed3Addresses);
            cluster.NewBrokers.Subscribe(b => _log.Info("Discovered new broker: {0}", b));
            _log.Info("Connecting cluster");
            await cluster.ConnectAsync();
            var producer = new Producer(cluster, new ProducerConfiguration(topic));
            _log.Info("Connecting producer");
            await producer.ConnectAsync();

            // Start preferred leader up
            _log.Info("Starting preferred broker");
            VagrantBrokerUtil.StartBroker(preferredBroker);
            _log.Info("Waiting for preferred broker ({0}) to start up", preferredBroker);
            await Task.Delay(30 * 1000);
            //VagrantBrokerUtil.RebalanceLeadership();
            _log.Info("Stopping 2nd leader broker");
            VagrantBrokerUtil.StopBrokerLeaderForPartition(cluster, topic, 0);
            _log.Info("Producer Send data");
            producer.Send(new Message() { Value = new byte[]{0,0,0,0}});
            _log.Info("Waiting for producer to complete");
            await producer.CloseAsync(TimeSpan.FromSeconds(60));

            _log.Info("Done");
        }
コード例 #8
0
        public async void SchedulerThreadIsIsolatedFromUserCode()
        {
            kafka4net.Tracing.EtwTrace.Marker("SchedulerThreadIsIsolatedFromUserCode");

            const string threadName = "kafka-scheduler";
            _log.Info("Test Runner is using thread {0}", Thread.CurrentThread.Name);

            var topic = "topic." + _rnd.Next();
            VagrantBrokerUtil.CreateTopic(topic,6,3);

            var cluster = new Cluster(_seed2Addresses);
            await cluster.ConnectAsync();
            Assert.AreNotEqual(threadName, Thread.CurrentThread.Name);

            await cluster.FetchPartitionOffsetsAsync(topic, ConsumerLocation.TopicStart);
            Assert.AreNotEqual(threadName, Thread.CurrentThread.Name);

            var topics = await cluster.GetAllTopicsAsync();
            Assert.AreNotEqual(threadName, Thread.CurrentThread.Name);

            // now create a producer
            var producer = new Producer(cluster, new ProducerConfiguration(topic));
            await producer.ConnectAsync();
            Assert.AreNotEqual(threadName, Thread.CurrentThread.Name);

            // create a producer that also creates a cluster
            var producerWithCluster = new Producer(_seed2Addresses, new ProducerConfiguration(topic));
            await producerWithCluster.ConnectAsync();
            Assert.AreNotEqual(threadName, Thread.CurrentThread.Name);

            // TODO: Subscribe and check thread on notification observables!

            // run them both for a little while (~5 seconds)
            var msgs = await Observable.Interval(TimeSpan.FromMilliseconds(100))
                .Do(l =>
                {
                    producer.Send(new Message { Value = BitConverter.GetBytes(l) });
                    producerWithCluster.Send(new Message { Value = BitConverter.GetBytes(l) });
                    _log.Debug("After Producer Send using thread {0}", Thread.CurrentThread.Name);

                }).Take(50).ToArray();
            Assert.AreNotEqual(threadName, Thread.CurrentThread.Name);

            // now consumer(s)
            var consumer = new Consumer(new ConsumerConfiguration(_seed2Addresses, topic, new StartPositionTopicStart()));
            Assert.AreNotEqual(threadName, Thread.CurrentThread.Name);

            var msgsRcv = new List<long>();
            var messageSubscription = consumer.OnMessageArrived
                .Do(msg => Assert.AreEqual(threadName, Thread.CurrentThread.Name), exception => Assert.AreEqual(threadName, Thread.CurrentThread.Name), () => Assert.AreEqual(threadName, Thread.CurrentThread.Name))
                .Take(50)
                .TakeUntil(DateTime.Now.AddSeconds(500))
                .ObserveOn(System.Reactive.Concurrency.DefaultScheduler.Instance)
                .Do(msg => Assert.AreNotEqual(threadName, Thread.CurrentThread.Name), exception => Assert.AreNotEqual(threadName, Thread.CurrentThread.Name), () => Assert.AreNotEqual(threadName, Thread.CurrentThread.Name))
                .Subscribe(
                    msg=>
                    {
                        msgsRcv.Add(BitConverter.ToInt64(msg.Value,0));
                        Assert.AreNotEqual(threadName, Thread.CurrentThread.Name);
                        _log.Debug("In Consumer Subscribe OnNext using thread {0}", Thread.CurrentThread.Name);
                    }, exception =>
                    {
                        _log.Debug("In Consumer Subscribe OnError using thread {0} Error: {1}", Thread.CurrentThread.Name, exception.Message);
                        throw exception;
                    }, () =>
                    {
                        Assert.AreNotEqual(threadName, Thread.CurrentThread.Name);
                        _log.Debug("In Consumer Subscribe OnComplete using thread {0}", Thread.CurrentThread.Name);
                    });
            
            await consumer.IsConnected;

            _log.Info("Waitng for consumer to read");
            await Task.Delay(TimeSpan.FromSeconds(6));
            _log.Debug("After Consumer Subscribe using thread {0}", Thread.CurrentThread.Name);
            consumer.Dispose();
            Assert.AreNotEqual(threadName, Thread.CurrentThread.Name);

            Assert.AreEqual(msgs.Length, msgsRcv.Count);

            messageSubscription.Dispose();
            Assert.AreNotEqual(threadName, Thread.CurrentThread.Name);

            // now close down
            await producer.CloseAsync(TimeSpan.FromSeconds(5));
            _log.Debug("After Consumer Close using thread {0}", Thread.CurrentThread.Name);
            Assert.AreNotEqual(threadName, Thread.CurrentThread.Name);

            await producerWithCluster.CloseAsync(TimeSpan.FromSeconds(5));
            _log.Debug("After Producer Subscribe using thread {0}", Thread.CurrentThread.Name);
            Assert.AreNotEqual(threadName, Thread.CurrentThread.Name);

            Assert.AreNotEqual(threadName, Thread.CurrentThread.Name);

            await cluster.CloseAsync(TimeSpan.FromSeconds(5));
            _log.Debug("After Cluster Close using thread {0}", Thread.CurrentThread.Name);
            Assert.AreNotEqual(threadName, Thread.CurrentThread.Name);

            kafka4net.Tracing.EtwTrace.Marker("/SchedulerThreadIsIsolatedFromUserCode");
        }
コード例 #9
0
        public async void MultipleProducersOneCluster()
        {
            kafka4net.Tracing.EtwTrace.Marker("MultipleProducersOneCluster");

            var cluster = new Cluster(_seed2Addresses);
            var topic1 = "topic." + _rnd.Next();
            var topic2 = "topic." + _rnd.Next();

            VagrantBrokerUtil.CreateTopic(topic1, 6, 3);
            VagrantBrokerUtil.CreateTopic(topic2, 6, 3);

            // declare two producers
            var producer1 = new Producer(cluster, new ProducerConfiguration(topic1));
            await producer1.ConnectAsync();

            var producer2 = new Producer(cluster, new ProducerConfiguration(topic2));
            await producer2.ConnectAsync();

            // run them both for a little while (~10 seconds)
            var msgs = await Observable.Interval(TimeSpan.FromMilliseconds(100))
                .Do(l =>
            {
                producer1.Send(new Message {Value = BitConverter.GetBytes(l)});
                producer2.Send(new Message {Value = BitConverter.GetBytes(l)});

            }).Take(100);

            _log.Info("Done Sending, await on producer close.");

            // now stop them.
            await Task.WhenAll(new [] { producer1.CloseAsync(TimeSpan.FromSeconds(5)), producer2.CloseAsync(TimeSpan.FromSeconds(5)) });

            await Task.Delay(TimeSpan.FromSeconds(2));

            // check we got all 100 on each topic.
            _log.Info("Closed Producers. Checking Offsets");
            var topic1Heads = await cluster.FetchPartitionOffsetsAsync(topic1, ConsumerLocation.TopicStart);
            var topic2Heads = await cluster.FetchPartitionOffsetsAsync(topic2, ConsumerLocation.TopicStart);
            var topic1Tails = await cluster.FetchPartitionOffsetsAsync(topic1, ConsumerLocation.TopicEnd);
            var topic2Tails = await cluster.FetchPartitionOffsetsAsync(topic2, ConsumerLocation.TopicEnd);

            Assert.AreEqual(100, topic1Tails.MessagesSince(topic1Heads));
            Assert.AreEqual(100, topic2Tails.MessagesSince(topic2Heads));
            
            kafka4net.Tracing.EtwTrace.Marker("/MultipleProducersOneCluster");
        }
コード例 #10
0
        public async void TopicIsAutocreatedByProducer()
        {
            kafka4net.Tracing.EtwTrace.Marker("TopicIsAutocreatedByProducer");

            var topic ="autocreate.test." + _rnd.Next();
            const int producedCount = 10;
            var lala = Encoding.UTF8.GetBytes("la-la-la");
            // TODO: set wait to 5sec

            //
            // Produce
            // In order to make sure that topic was created by producer, send and wait for producer
            // completion before performing validation read.
            //
            var producer = new Producer(_seed2Addresses, new ProducerConfiguration(topic));

            await producer.ConnectAsync();

            _log.Debug("Producing...");
            await Observable.Interval(TimeSpan.FromSeconds(1)).
                Take(producedCount).
                Do(_ => producer.Send(new Message { Value = lala })).
                ToTask();
            await producer.CloseAsync(TimeSpan.FromSeconds(10));

            //
            // Validate by reading published messages
            //
            var consumer = new Consumer(new ConsumerConfiguration(_seed2Addresses, topic, new StartPositionTopicStart(), maxWaitTimeMs: 1000, minBytesPerFetch: 1));
            var msgs = consumer.OnMessageArrived.Publish().RefCount();
            var receivedTxt = new List<string>();
            var consumerSubscription = msgs.
                Select(m => Encoding.UTF8.GetString(m.Value)).
                Synchronize(). // protect receivedTxt
                Do(m => _log.Info("Received {0}", m)). 
                Do(receivedTxt.Add).
                Subscribe();
            await consumer.IsConnected;

            _log.Debug("Waiting for consumer");
            await msgs.Take(producedCount).TakeUntil(DateTimeOffset.Now.AddSeconds(5)).LastOrDefaultAsync().ToTask();

            Assert.AreEqual(producedCount, receivedTxt.Count, "Did not received all messages");
            Assert.IsTrue(receivedTxt.All(m => m == "la-la-la"), "Unexpected message content");

            consumerSubscription.Dispose();
            consumer.Dispose();

            kafka4net.Tracing.EtwTrace.Marker("/TopicIsAutocreatedByProducer");
        }