public PartitionMeta GetMessagePartition(Message message, PartitionMeta[] allPartitions) { var index = message.Key == null ? _rnd.Next(allPartitions.Length) : Fletcher32HashOptimized(message.Key) % allPartitions.Length; return allPartitions[index]; }
public async Task KafkaClientConnectionTest() { // Given const string Topic = "test"; var brokers = new[] { new Uri("localhost:9092") }; var options = new KafkaSinkOptions(Topic, brokers); var kafkaClient = new KafkaClient(options); var message = new Message { Value = Encoding.UTF8.GetBytes("{'message' : 'This is a test message'}") }; // When await kafkaClient.SendMessagesAsync(new[] { message }).ConfigureAwait(false); kafkaClient.Dispose(); // Then Assert.IsTrue(true); }
public async void KeyedMessagesPreserveOrder() { kafka4net.Tracing.EtwTrace.Marker("KeyedMessagesPreserveOrder"); // create a topic with 3 partitions var topicName = "part33." + _rnd.Next(); VagrantBrokerUtil.CreateTopic(topicName, 3, 3); // create listener in a separate connection/broker var receivedMsgs = new List<ReceivedMessage>(); var consumer = new Consumer(new ConsumerConfiguration(_seed2Addresses, topicName, new StartPositionTopicEnd())); var consumerSubscription = consumer.OnMessageArrived.Synchronize().Subscribe(msg => { lock (receivedMsgs) { receivedMsgs.Add(msg); } }); await consumer.IsConnected; // sender is configured with 50ms batch period var producer = new Producer(_seed2Addresses, new ProducerConfiguration(topicName, TimeSpan.FromMilliseconds(50))); await producer.ConnectAsync(); // // generate messages with 100ms interval in 10 threads // var sentMsgs = new List<Message>(); _log.Info("Start sending"); var senders = Enumerable.Range(1, 1). Select(thread => Observable. Interval(TimeSpan.FromMilliseconds(10)). Synchronize(). // protect adding to sentMsgs Select(i => { var str = "msg " + i + " thread " + thread + " " + Guid.NewGuid(); var bin = Encoding.UTF8.GetBytes(str); var msg = new Message { Key = BitConverter.GetBytes((int)(i + thread) % 10), Value = bin }; return Tuple.Create(msg, i, str); }). Subscribe(msg => { lock (sentMsgs) { producer.Send(msg.Item1); sentMsgs.Add(msg.Item1); Assert.AreEqual(msg.Item2, sentMsgs.Count-1); } }) ). ToArray(); // wait for around 10K messages (10K/(10*10) = 100sec) and close producer _log.Info("Waiting for producer to produce enough..."); await Task.Delay(100*1000); _log.Info("Closing senders intervals"); senders.ForEach(s => s.Dispose()); _log.Info("Closing producer"); await producer.CloseAsync(TimeSpan.FromSeconds(5)); _log.Info("Waiting for additional 10sec"); await Task.Delay(10*1000); _log.Info("Disposing consumer"); consumerSubscription.Dispose(); _log.Info("Closing consumer"); consumer.Dispose(); _log.Info("Done with networking"); // compare sent and received messages // TODO: for some reason preformance is not what I'd expect it to be and only 6K is generated. Assert.GreaterOrEqual(sentMsgs.Count, 4000, "Expected around 10K messages to be sent"); if (sentMsgs.Count != receivedMsgs.Count) { var sentStr = sentMsgs.Select(m => Encoding.UTF8.GetString(m.Value)).ToArray(); var receivedStr = receivedMsgs.Select(m => Encoding.UTF8.GetString(m.Value)).ToArray(); sentStr.Except(receivedStr). ForEach(m => _log.Error("Not received: '{0}'", m)); receivedStr.Except(sentStr). ForEach(m => _log.Error("Not sent but received: '{0}'", m)); } Assert.AreEqual(sentMsgs.Count, receivedMsgs.Count, "Sent and received messages count differs"); // // group messages by key and compare lists in each key to be the same (order should be preserved within key) // var keysSent = sentMsgs.GroupBy(m => BitConverter.ToInt32(m.Key, 0), m => Encoding.UTF8.GetString(m.Value), (i, mm) => new { Key = i, Msgs = mm.ToArray() }).ToArray(); var keysReceived = receivedMsgs.GroupBy(m => BitConverter.ToInt32(m.Key, 0), m => Encoding.UTF8.GetString(m.Value), (i, mm) => new { Key = i, Msgs = mm.ToArray() }).ToArray(); Assert.AreEqual(10, keysSent.Count(), "Expected 10 unique keys 0-9"); Assert.AreEqual(keysSent.Count(), keysReceived.Count(), "Keys count does not match"); // compare order within each key var notInOrder = keysSent .OrderBy(k => k.Key) .Zip(keysReceived.OrderBy(k => k.Key), (s, r) => new { s, r, ok = s.Msgs.SequenceEqual(r.Msgs) }).Where(_ => !_.ok).ToArray(); if (notInOrder.Any()) { _log.Error("{0} keys are out of order", notInOrder.Count()); notInOrder.ForEach(_ => _log.Error("Failed order in:\n{0}", string.Join(" \n", DumpOutOfOrder(_.s.Msgs, _.r.Msgs)))); } Assert.IsTrue(!notInOrder.Any(), "Detected out of order messages"); kafka4net.Tracing.EtwTrace.Marker("/KeyedMessagesPreserveOrder"); }
public async void Memory() { var topic = "topic11." + _rnd.Next(); VagrantBrokerUtil.CreateTopic(topic, 1, 1); for (int i = 0; i < 10; ++i) { var producer = new Producer(_seed2Addresses, new ProducerConfiguration(topic)); await producer.ConnectAsync(); for (int j = 0; j < (int)1e6; j++) { var msg = new Message { Key = BitConverter.GetBytes(1), Value = Encoding.UTF8.GetBytes($@"SomeLongText - {j}") }; producer.Send(msg); } await producer.CloseAsync(TimeSpan.FromSeconds(60)); } var before = GC.GetTotalMemory(false); GC.Collect(); var after = GC.GetTotalMemory(false); await Task.Delay(3000); GC.Collect(); GC.WaitForFullGCComplete(); _log.Info($"Memory: Before: {before}, after: {after}"); }
public void Send(Message msg) { if (_shutdown.IsCancellationRequested || _drain.IsCancellationRequested) throw new Exception("Cannot send messages after producer is canceled / closed."); if (!IsConnected) throw new Exception("Must call ConnectAsync prior to sending messages."); _sendMessagesSubject.OnNext(msg); }