This class provides an abstraction from querying multiple Kafka servers for Metadata details and caching this data. All metadata queries are cached lazily. If metadata from a topic does not exist in cache it will be queried for using the default brokers provided in the constructor. Each Uri will be queried to get metadata information in turn until a response is received. It is recommended therefore to provide more than one Kafka Uri as this API will be able to to get metadata information even if one of the Kafka servers goes down. The metadata will stay in cache until an error condition is received indicating the metadata is out of data. This error can be in the form of a socket disconnect or an error code from a response indicating a broker no longer hosts a partition.
Inheritance: IBrokerRouter
Ejemplo n.º 1
2
        static void Main(string[] args)
        {
            try
            {
                _topic = ConfigurationManager.AppSettings["topic"];
                var brokers = from x in ConfigurationManager.AppSettings["kafkaBrokers"].Split(',')
                              select new Uri(x);
                Console.WriteLine("Connecting to kafka queue brokers {0} with topic {1}", string.Join(",", brokers), _topic);
                var options = new KafkaOptions(brokers.ToArray());
                var router = new BrokerRouter(options);
                var coption = new ConsumerOptions(_topic, router);
                _consumer = new KafkaNet.Consumer(coption);
                var offset = _consumer.GetTopicOffsetAsync(_topic, 1000000).Result;
                var t = from x in offset select new OffsetPosition(x.PartitionId, x.Offsets.Max());
                _consumer.SetOffsetPosition(t.ToArray());

                foreach (var message in _consumer.Consume())
                {
                    Console.WriteLine("Response: P{0},O{1} : {2}",
                    message.Meta.PartitionId, 
                    message.Meta.Offset,
                    System.Text.Encoding.Default.GetString(message.Value));
                }
            }
            catch(Exception ex)
            {
                Console.WriteLine(ex.Message);
            }
            Console.ReadLine();
        }
Ejemplo n.º 2
0
 public void BrokerRouterConstructorThrowsServerUnreachableException()
 {
     var result = new BrokerRouter(new KafkaOptions
     {
         KafkaServerUri = new List<Uri> { new Uri("http://noaddress:1") }
     });
 }
Ejemplo n.º 3
0
        public void Main(string[] args)
        {
            var options = GetOptions(args);
            if (options == null) return;

            var count = 0;
            var lastCount = 0;
            var reporter = new Task(() =>
            {
                while (true)
                {
                    var current = count;
                    Console.WriteLine("{0} messages in last second.", current - lastCount);
                    lastCount = current;
                    Thread.Sleep(1000);
                }
            });
            var kafkaOptions = new KafkaOptions(options.KafkaNodeUri);// { Log = new ConsoleLog() };
            using (var router = new BrokerRouter(kafkaOptions))
            using (var client = new KafkaNet.Producer(router))
            {
                reporter.Start();
                while (true)
                {
                    Thread.Sleep(100);
                    client.SendMessageAsync("TestHarness", new[] { new Message() { Value = BitConverter.GetBytes(DateTime.Now.Ticks) } });
                    count++;
                }
            }
        }
Ejemplo n.º 4
0
 public void BrokerRouterConstructorShouldIgnoreUnresolvableUriWhenAtLeastOneIsGood()
 {
     var result = new BrokerRouter(new KafkaOptions
     {
         KafkaServerUri = new List<Uri> { new Uri("http://noaddress:1"), new Uri("http://localhost:1") }
     });
 }
Ejemplo n.º 5
0
        static void Main(string[] args)
        {
            var options = new KafkaOptions(new Uri("http://CSDKAFKA01:9092"), new Uri("http://CSDKAFKA02:9092"))
                {
                    Log = new ConsoleLog()
                };
            var router = new BrokerRouter(options);
            var client = new Producer(router);

            Task.Factory.StartNew(() =>
                {
                    var consumer = new Consumer(new ConsumerOptions("TestHarness", router));
                    foreach (var data in consumer.Consume())
                    {
                        Console.WriteLine("Response: P{0},O{1} : {2}", data.Meta.PartitionId, data.Meta.Offset, data.Value);
                    }
                });

            Console.WriteLine("Type a message and press enter...");
            while (true)
            {
                var message = Console.ReadLine();
                if (message == "quit") break;
                client.SendMessageAsync("TestHarness", new[] {new Message {Value = message}});
            }

            using (client)
            using (router)
            {

            }
        }
Ejemplo n.º 6
0
        public void Main(string[] args)
        {
            var options = GetOptions(args);
            if (options == null) return;

            StartReporting();

            var kafkaOptions = new KafkaOptions(options.KafkaNodeUri);
            using (var router = new BrokerRouter(kafkaOptions))
            using (var client = new KafkaNet.Consumer(new ConsumerOptions("TestHarness", router) { Log = new ConsoleLog(), MinimumBytes = 1 }))
            {
                Console.WriteLine("Listening for messages...");
                foreach (var message in client.Consume())
                {
                    try
                    {
                        var received = DateTime.Now;
                        var sent = new DateTime(BitConverter.ToInt64(message.Value, 0));
                        var diff = received - sent;
                        lock (sync)
                            receivedItems.Add(new ReceivedMessage { DateTime = received, TotalMilliseconds = diff.TotalMilliseconds });
                    }
                    catch (Exception ex)
                    {
                        Console.Error.WriteLine("Oops... " + ex);
                    }
                }
            }
        }
Ejemplo n.º 7
0
 public void Run()
 {
     var options = new KafkaOptions(new Uri("http://localhost:9092"));
     var router = new BrokerRouter(options);
     _producer = new KafkaNet.Producer(router);
     Process();
 }
        public void ConsumerShouldConsumeInSameOrderAsProduced()
        {
            var expected = new List<string> { "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19" };
            var testId = Guid.NewGuid().ToString();

            using (var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri)))
            using (var producer = new Producer(router))
            {

                var offsets = producer.GetTopicOffsetAsync(IntegrationConfig.IntegrationTopic).Result;

                using (var consumer = new Consumer(new ConsumerOptions(IntegrationConfig.IntegrationTopic, router),
                    offsets.Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray()))
                {

                    for (int i = 0; i < 20; i++)
                    {
                        producer.SendMessageAsync(IntegrationConfig.IntegrationTopic, new[] { new Message(i.ToString(), testId) }).Wait();
                    }

                    var results = consumer.Consume().Take(20).ToList();

                    //ensure the produced messages arrived
                    Console.WriteLine("Message order:  {0}", string.Join(", ", results.Select(x => x.Value.ToUtf8String()).ToList()));

                    Assert.That(results.Count, Is.EqualTo(20));
                    Assert.That(results.Select(x => x.Value.ToUtf8String()).ToList(), Is.EqualTo(expected), "Expected the message list in the correct order.");
                    Assert.That(results.Any(x => x.Key.ToUtf8String() != testId), Is.False);
                }
            }
        }
        public void OffsetFetchRequestOfNonExistingGroupShouldReturnNoError([Values(0,1)] int version)
        {
            //From documentation: https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-OffsetFetchRequest
            //Note that if there is no offset associated with a topic-partition under that consumer group the broker does not set an error code 
            //(since it is not really an error), but returns empty metadata and sets the offset field to -1.
            const int partitionId = 0;
            using (var router = new BrokerRouter(Options))
            {
                var request = CreateOffsetFetchRequest(version, Guid.NewGuid().ToString(), partitionId);

                var conn = router.SelectBrokerRoute(IntegrationConfig.IntegrationTopic, partitionId);

                var response = conn.Connection.SendAsync(request).Result.FirstOrDefault();

                Assert.That(response, Is.Not.Null);
                if (version == 0)
                {
                    // Version 0 (storing in zookeeper) results in unknown topic or partition as the consumer group
                    // and partition are used to make up the string, and when it is missing it results in an error
                    Assert.That(response.Error, Is.EqualTo((int)ErrorResponseCode.UnknownTopicOrPartition));
                }
                else
                {
                    Assert.That(response.Error, Is.EqualTo((int)ErrorResponseCode.NoError));
                }
                Assert.That(response.Offset, Is.EqualTo(-1));
            }
        }
Ejemplo n.º 10
0
        public async Task ProtocolGateway()
        {
            int partitionId = 0;
            var router = new BrokerRouter(Options);

            var producer = new Producer(router);
            string messge1 = Guid.NewGuid().ToString();
            var respose = await producer.SendMessageAsync(IntegrationConfig.IntegrationTopic, new[] { new Message(messge1) }, 1, null, MessageCodec.CodecNone, partitionId);
            var offset = respose.FirstOrDefault().Offset;

            ProtocolGateway protocolGateway = new ProtocolGateway(IntegrationConfig.IntegrationUri);
            var fetch = new Fetch
                         {
                             Topic = IntegrationConfig.IntegrationTopic,
                             PartitionId = partitionId,
                             Offset = offset,
                             MaxBytes = 32000,
                         };

            var fetches = new List<Fetch> { fetch };

            var fetchRequest = new FetchRequest
                {
                    MaxWaitTime = 1000,
                    MinBytes = 10,
                    Fetches = fetches
                };

            var r = await protocolGateway.SendProtocolRequest(fetchRequest, IntegrationConfig.IntegrationTopic, partitionId);
            //  var r1 = await protocolGateway.SendProtocolRequest(fetchRequest, IntegrationConfig.IntegrationTopic, partitionId);
            Assert.IsTrue(r.Messages.FirstOrDefault().Value.ToUtf8String() == messge1);
        }
        public void EnsureGzipCanDecompressMessageFromKafka()
        {
            var router = new BrokerRouter(_options);
            var producer = new Producer(router);

            var offsets = producer.GetTopicOffsetAsync(IntegrationConfig.IntegrationCompressionTopic).Result;

            var consumer = new Consumer(new ConsumerOptions(IntegrationConfig.IntegrationCompressionTopic, router) { PartitionWhitelist = new List<int>() { 0 } },
                offsets.Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray());
            int numberOfmessage = 3;
            for (int i = 0; i < numberOfmessage; i++)
            {
                producer.SendMessageAsync(IntegrationConfig.IntegrationCompressionTopic, new[] { new Message(i.ToString()) }, codec: MessageCodec.CodecGzip,
              partition: 0);
            }

            var results = consumer.Consume(new CancellationTokenSource(TimeSpan.FromMinutes(1)).Token).Take(numberOfmessage).ToList();

            for (int i = 0; i < numberOfmessage; i++)
            {
                Assert.That(results[i].Value.ToUtf8String(), Is.EqualTo(i.ToString()));
            }

            using (producer)
            using (consumer) { }
        }
        public async Task FetchMessagesCacheContainsAllRequestTest()
        {
            // Creating a broker router and a protocol gateway for the producer and consumer
            var brokerRouter = new BrokerRouter(_options);
            var protocolGateway = new ProtocolGateway(_kafkaUri);

            Producer producer = new Producer(brokerRouter);
            ManualConsumer consumer = new ManualConsumer(_partitionId, _topic, protocolGateway, "TestClient", DefaultMaxMessageSetSize);

            var offset = await consumer.FetchLastOffset();

            // Creating 5 messages
            List<Message> messages = CreateTestMessages(10, 1);

            await producer.SendMessageAsync(_topic, messages, partition: _partitionId, timeout: TimeSpan.FromSeconds(3));

            // Now let's consume
            var result = (await consumer.FetchMessages(5, offset)).ToList();

            CheckMessages(messages.Take(5).ToList(), result);

            // Now let's consume again
            result = (await consumer.FetchMessages(5, offset + 5)).ToList();

            CheckMessages(messages.Skip(5).ToList(), result);
        }
Ejemplo n.º 13
0
 public void Run()
 {
     var options = new KafkaOptions(new Uri("http://localhost:9092"));
     var router = new BrokerRouter(options);
     var consumerOptions = new ConsumerOptions("test", router);           
     _consumer = new KafkaNet.Consumer(consumerOptions, new OffsetPosition(0, 100));
     Process();
 }
Ejemplo n.º 14
0
        public DestinationKafka(params Uri[] servers)
        {
            var options = new KafkaOptions(servers) { Log = new ConsoleLogger() };
            _router = new BrokerRouter(options);
            _producer = new Producer(_router, maximumMessageBuffer: 5000, maximumAsyncRequests: 10) { BatchSize = 1000, BatchDelayTime = TimeSpan.FromSeconds(1) };

            StatisticsTracker.OnStatisticsHeartbeat += StatisticsTracker_OnStatisticsHeartbeat;
        }
Ejemplo n.º 15
0
        public KafkaAgent(Uri[] kafkaServers)
        {
            var options = new KafkaOptions(kafkaServers);
            _router = new BrokerRouter(options);

            var influxDbClient = new InfluxDb("http://localhost", "reapadda", "qwerty", InfluxVersion.v09x);
            _formatter = influxDbClient.GetFormatter();
        }
Ejemplo n.º 16
0
        public async void SendAsyncShouldGetAResultForEachPartitionSentTo()
        {
            using (var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri)))
            using (var producer = new Producer(router))
            {
                var result = await producer.SendMessageAsync(IntegrationConfig.IntegrationTopic, new[] { new Message("1"), new Message("2"), new Message("3") });

                Assert.That(result.Count, Is.EqualTo(2));
            }
        }
        public async void SendAsyncShouldGetOneResultForMessage()
        {
            using (var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri)))
            using (var producer = new Producer(router))
            {
                var result = await producer.SendMessageAsync(IntegrationConfig.IntegrationTopic, new[] { new Message(Guid.NewGuid().ToString()) });

                Assert.That(result.Count, Is.EqualTo(1));
            }
        }
Ejemplo n.º 18
0
        public void BrokerRouterCanConstruct()
        {
            var result = new BrokerRouter(new KafkaOptions
            {
                KafkaServerUri = new List<Uri> { new Uri("http://localhost:1") },
                KafkaConnectionFactory = _mockKafkaConnectionFactory.Object
            });

            Assert.That(result, Is.Not.Null);
        }
 public async Task ProducerAckLevel1ResponseOffsetShouldBeEqualToLastOffset()
 {
     using (var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri) { Log = IntegrationConfig.NoDebugLog }))
     using (var producer = new Producer(router))
     {
         var responseAckLevel1 = await producer.SendMessageAsync(new Message("Ack Level 1"), IntegrationConfig.IntegrationTopic, acks: 1, partition: 0);
         var offsetResponse = await producer.GetTopicOffsetAsync(IntegrationConfig.IntegrationTopic);
         var maxOffset = offsetResponse.Find(x => x.PartitionId == 0);
         Assert.AreEqual(responseAckLevel1.Offset, maxOffset.Offsets.Max() - 1);
     }
 }
 public async Task ProducerAckLevel()
 {
     using (var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri) { Log = IntegrationConfig.NoDebugLog }))
     using (var producer = new Producer(router))
     {
         var responseAckLevel0 = await producer.SendMessageAsync(new Message("Ack Level 0"), IntegrationConfig.IntegrationTopic, acks: 0, partition: 0);
         Assert.AreEqual(responseAckLevel0.Offset, -1);
         var responseAckLevel1 = await producer.SendMessageAsync(new Message("Ack Level 1"), IntegrationConfig.IntegrationTopic, acks: 1, partition: 0);
         Assert.That(responseAckLevel1.Offset, Is.GreaterThan(-1));
     }
 }
Ejemplo n.º 21
0
        public async Task ManualConsumerFailure()
        {
            string topic = "TestTopicIssue13-3R-1P";
            var manualConsumer = new ManualConsumer(0, topic, new ProtocolGateway(_options), "test client", 10000);
            long offset = await manualConsumer.FetchLastOffset();

            var router = new BrokerRouter(_options);
            var producer = new Producer(router);
            SandMessageForever(producer, topic);
            await ReadMessageForever(manualConsumer, offset);
        }
Ejemplo n.º 22
0
 public static Producer GetProducer(this Kafka kafkaObj)
 {
     if (_producer == null)
     {
         var addresses = from x in kafkaObj.brokers
                         select new Uri(x.address);
         var router = new BrokerRouter(new KafkaOptions(addresses.ToArray()));
         _producer = new Producer(router);
     }
     return _producer;
 }
        public async Task Send(object channel, Address address, TransportMessage message, object properties)
        {
            var options = new KafkaOptions(new Uri(address.Machine));
            var router = new BrokerRouter(options);
            var topic = address.Queue;
            var messageString = System.Text.Encoding.Default.GetString(message.Body);

            using (var client = new Producer(router))
            {
                await client.SendMessageAsync(topic, new[] { new Message(messageString) });
            }
        }
        public void ProducerShouldNotExpectResponseWhenAckIsZero()
        {
            using (var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri)))
            using (var producer = new Producer(router))
            {
                var sendTask = producer.SendMessageAsync(IntegrationConfig.IntegrationTopic, new[] { new Message(Guid.NewGuid().ToString()) }, acks: 0);

                sendTask.Wait(TimeSpan.FromMinutes(2));

                Assert.That(sendTask.Status, Is.EqualTo(TaskStatus.RanToCompletion));
            }
        }
Ejemplo n.º 25
0
        private static void Consume(string broker, string topic)
        {   
            var options = new KafkaOptions(new Uri(broker));
            var router = new BrokerRouter(options);
            var consumer = new Consumer(new ConsumerOptions(topic, router));

            //Consume returns a blocking IEnumerable (ie: never ending stream)
            foreach (var message in consumer.Consume())
            {
                Console.WriteLine("Response: Partition {0},Offset {1} : {2}",
                    message.Meta.PartitionId, message.Meta.Offset, message.Value.ToUtf8String());
            }
        }
Ejemplo n.º 26
0
        public void OffsetCommitShouldStoreAndReturnSuccess()
        {
            const int partitionId = 0;
            using (var router = new BrokerRouter(Options))
            {
                var conn = router.SelectBrokerRoute(IntegrationConfig.IntegrationTopic, partitionId);

                var commit = CreateOffsetCommitRequest(IntegrationConfig.IntegrationConsumer, partitionId, 10);
                var response = conn.Connection.SendAsync(commit).Result.FirstOrDefault();

                Assert.That(response, Is.Not.Null);
                Assert.That(response.Error, Is.EqualTo((int)ErrorResponseCode.NoError));
            }
        }
Ejemplo n.º 27
0
        public void BrokerRouterUsesFactoryToAddNewBrokers()
        {
            var router = new BrokerRouter(new KafkaNet.Model.KafkaOptions
            {
                KafkaServerUri = new List<Uri> { new Uri("http://localhost:1") },
                KafkaConnectionFactory = _factoryMock.Object
            });

            _connMock1.Setup(x => x.SendAsync(It.IsAny<IKafkaRequest<MetadataResponse>>()))
                      .Returns(() => Task.Factory.StartNew(() => new List<MetadataResponse> { CreateMetaResponse() }));

            var topics = router.GetTopicMetadata(TestTopic);
            _factoryMock.Verify(x => x.Create(It.Is<Uri>(uri => uri.Port == 2), It.IsAny<int>(), It.IsAny<IKafkaLog>()), Times.Once());
        }
        public async Task BrokerRouterUsesFactoryToAddNewBrokers()
        {
            var router = new BrokerRouter(new KafkaOptions
            {
                KafkaServerUri = new List<Uri> { new Uri("http://localhost:1") },
                KafkaConnectionFactory = _mockKafkaConnectionFactory.Object
            });

            _mockKafkaConnection1.Setup(x => x.SendAsync(It.IsAny<IKafkaRequest<MetadataResponse>>()))
                      .Returns(() => Task.Run(async () => new List<MetadataResponse> { await BrokerRouterProxy.CreateMetadataResponseWithMultipleBrokers() }));
            await router.RefreshMissingTopicMetadata(TestTopic);
            var topics = router.GetTopicMetadataFromLocalCache(TestTopic);
            _mockKafkaConnectionFactory.Verify(x => x.Create(It.Is<KafkaEndpoint>(e => e.Endpoint.Port == 2), It.IsAny<TimeSpan>(), It.IsAny<IKafkaLog>(), It.IsAny<int>(), It.IsAny<TimeSpan?>(), It.IsAny<StatisticsTrackerOptions>()), Times.Once());
        }
Ejemplo n.º 29
0
        public void BrokerRouterUsesFactoryToAddNewBrokers()
        {
            var router = new BrokerRouter(new KafkaOptions
            {
                KafkaServerUri = new List<Uri> { new Uri("http://localhost:1") },
                KafkaConnectionFactory = _mockKafkaConnectionFactory.Object
            });

            _mockKafkaConnection1.Setup(x => x.SendAsync(It.IsAny<IKafkaRequest<MetadataResponse>>()))
                      .Returns(() => Task.Run(() => new List<MetadataResponse> { CreateMetaResponse() }));

            var topics = router.GetTopicMetadata(TestTopic);
            _mockKafkaConnectionFactory.Verify(x => x.Create(It.Is<KafkaEndpoint>(e => e.Endpoint.Port == 2), It.IsAny<TimeSpan>(), It.IsAny<IKafkaLog>(), null), Times.Once());
        }
Ejemplo n.º 30
0
        public void ConsumerFailure()
        {
            string topic = "TestTopicIssue13-2-3R-1P";
            using (var router = new BrokerRouter(_options))
            {
                var producer = new Producer(router);
                var offsets = producer.GetTopicOffsetAsync(topic).Result;
                var maxOffsets = offsets.Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray();
                var consumerOptions = new ConsumerOptions(topic, router) { PartitionWhitelist = new List<int>() { 0 }, MaxWaitTimeForMinimumBytes = TimeSpan.Zero };

                SandMessageForever(producer, topic);
                ReadMessageForever(consumerOptions, maxOffsets);
            }
        }
Ejemplo n.º 31
0
        private async Task ProduceAndSendBatchAsync(List <TopicMessage> batchs, CancellationToken cancellationToken)
        {
            //we must send a different produce request for each ack level and timeout combination.
            foreach (var ackLevelBatch in batchs.GroupBy(batch => new { batch.Acks, batch.Timeout }))
            {
                var messageByRouter = ackLevelBatch.Select(batch => new
                {
                    TopicMessage = batch,
                    Route        = BrokerRouter.SelectBrokerRoute(batch.Topic, batch.Message.Key),
                })
                                      .GroupBy(x => new { x.Route, x.TopicMessage.Topic, x.TopicMessage.Codec });

                var sendTasks = new List <BrokerRouteSendBatch>();
                foreach (var group in messageByRouter)
                {
                    var payload = new Payload
                    {
                        Codec     = group.Key.Codec,
                        Topic     = group.Key.Topic,
                        Partition = group.Key.Route.PartitionId,
                        Messages  = group.Select(x => x.TopicMessage.Message).ToList()
                    };

                    var request = new ProduceRequest
                    {
                        Acks      = ackLevelBatch.Key.Acks,
                        TimeoutMS = (int)ackLevelBatch.Key.Timeout.TotalMilliseconds,
                        Payload   = new List <Payload> {
                            payload
                        }
                    };

                    await _semaphoreMaximumAsync.WaitAsync(cancellationToken).ConfigureAwait(false);

                    var brokerSendTask = new BrokerRouteSendBatch
                    {
                        Route        = group.Key.Route,
                        Task         = group.Key.Route.Connection.SendAsync(request),
                        MessagesSent = group.Select(x => x.TopicMessage).ToList()
                    };

                    //ensure the async is released as soon as each task is completed
                    brokerSendTask.Task.ContinueWith(t => { _semaphoreMaximumAsync.Release(); }, cancellationToken,
                                                     TaskContinuationOptions.ExecuteSynchronously, TaskScheduler.Default);

                    sendTasks.Add(brokerSendTask);
                }

                try
                {
                    await Task.WhenAll(sendTasks.Select(x => x.Task)).ConfigureAwait(false);

                    foreach (var task in sendTasks)
                    {
                        task.MessagesSent.ForEach(x => x.Tcs.TrySetResult(task.Task.Result.FirstOrDefault()));
                    }
                }
                catch
                {
                    //if an error occurs here, all we know is some or all of the messages in this ackBatch failed.
                    var failedTask = sendTasks.FirstOrDefault(t => t.Task.IsFaulted);
                    if (failedTask != null)
                    {
                        foreach (var topicMessageBatch in ackLevelBatch)
                        {
                            topicMessageBatch.Tcs.TrySetException(new KafkaApplicationException("An exception occured while executing a send operation against {0}.  Exception:{1}",
                                                                                                failedTask.Route, failedTask.Task.Exception));
                        }
                    }
                }
            }
        }