Constructs a request to send to Kafka. FetchRequest => ReplicaId MaxWaitTime MinBytes [TopicName [Partition FetchOffset MaxBytes]] ReplicaId => int32 MaxWaitTime => int32 MinBytes => int32 TopicName => string Partition => int32 FetchOffset => int64 MaxBytes => int32 set MaxWaitTime to 0 and MinBytes to 0 can reduce latency.
Inheritance: AbstractRequest, IWritable
        public void ZkAwareProducerSends1Message()
        {
            var prodConfig = this.ZooKeeperBasedSyncProdConfig;

            int totalWaitTimeInMiliseconds = 0;
            int waitSingle = 100;
            var originalMessage = new Message(Encoding.UTF8.GetBytes("TestData"));

            var multipleBrokersHelper = new TestMultipleBrokersHelper(CurrentTestTopic);
            multipleBrokersHelper.GetCurrentOffsets(new[] { this.SyncProducerConfig1, this.SyncProducerConfig2, this.SyncProducerConfig3 });

            var mockPartitioner = new MockAlwaysZeroPartitioner();
            using (var producer = new Producer<string, Message>(prodConfig, mockPartitioner, new DefaultEncoder()))
            {
                var producerData = new ProducerData<string, Message>(
                    CurrentTestTopic, "somekey", new List<Message> { originalMessage });
                producer.Send(producerData);

                while (!multipleBrokersHelper.CheckIfAnyBrokerHasChanged(new[] { this.SyncProducerConfig1, this.SyncProducerConfig2, this.SyncProducerConfig3 }))
                {
                    totalWaitTimeInMiliseconds += waitSingle;
                    Thread.Sleep(waitSingle);
                    if (totalWaitTimeInMiliseconds > this.maxTestWaitTimeInMiliseconds)
                    {
                        Assert.Fail("None of the brokers changed their offset after sending a message");
                    }
                }

                totalWaitTimeInMiliseconds = 0;

                var consumerConfig = new ConsumerConfiguration(
                    multipleBrokersHelper.BrokerThatHasChanged.Host,
                    multipleBrokersHelper.BrokerThatHasChanged.Port);
                IConsumer consumer = new Consumer(consumerConfig);
                var request = new FetchRequest(CurrentTestTopic, multipleBrokersHelper.PartitionThatHasChanged, multipleBrokersHelper.OffsetFromBeforeTheChange);

                BufferedMessageSet response;

                while (true)
                {
                    Thread.Sleep(waitSingle);
                    response = consumer.Fetch(request);
                    if (response != null & response.Messages.Count() > 0)
                    {
                        break;
                    }

                    totalWaitTimeInMiliseconds += waitSingle;
                    if (totalWaitTimeInMiliseconds >= this.maxTestWaitTimeInMiliseconds)
                    {
                        break;
                    }
                }

                Assert.NotNull(response);
                Assert.AreEqual(1, response.Messages.Count());
                Assert.AreEqual(originalMessage.ToString(), response.Messages.First().ToString());
            }
        }
        public void AsyncProducerSendsAndConsumerReceivesSingleSimpleMessage()
        {
            Message sourceMessage = new Message(Encoding.UTF8.GetBytes("test message"));

            var config = new AsyncProducerConfig(clientConfig);
            var producer = new AsyncProducer(config);
            var producerRequest = new ProducerRequest(CurrentTestTopic, 0, new List<Message>() { sourceMessage });

            long currentOffset = TestHelper.GetCurrentKafkaOffset(CurrentTestTopic, clientConfig);

            producer.Send(producerRequest);

            ConsumerConfig consumerConfig = new ConsumerConfig(clientConfig);
            IConsumer consumer = new Consumers.Consumer(consumerConfig);
            FetchRequest request = new FetchRequest(CurrentTestTopic, 0, currentOffset);

            BufferedMessageSet response;
            int totalWaitTimeInMiliseconds = 0;
            int waitSingle = 100;
            while (true)
            {
                Thread.Sleep(waitSingle);
                response = consumer.Fetch(request);
                if (response != null && response.Messages.Count() > 0)
                {
                    break;
                }
                else
                {
                    totalWaitTimeInMiliseconds += waitSingle;
                    if (totalWaitTimeInMiliseconds >= MaxTestWaitTimeInMiliseconds)
                    {
                        break;
                    }
                }
            }

            Assert.NotNull(response);
            Assert.AreEqual(1, response.Messages.Count());
            Message resultMessage = response.Messages.First();
            Assert.AreEqual(sourceMessage.ToString(), resultMessage.ToString());
        }
Exemple #3
0
        public void GetBytesValidStructure()
        {
            string topicName = "topic";
            FetchRequest request = new FetchRequest(topicName, 1, 10L, 100);

            // REQUEST TYPE ID + TOPIC LENGTH + TOPIC + PARTITION + OFFSET + MAX SIZE
            int requestSize = 2 + 2 + topicName.Length + 4 + 8 + 4;

            MemoryStream ms = new MemoryStream();
            request.WriteTo(ms);
            byte[] bytes = ms.ToArray();
            Assert.IsNotNull(bytes);

            // add 4 bytes for the length of the message at the beginning
            Assert.AreEqual(requestSize + 4, bytes.Length);

            // first 4 bytes = the message length
            Assert.AreEqual(25, BitConverter.ToInt32(BitWorks.ReverseBytes(bytes.Take(4).ToArray<byte>()), 0));

            // next 2 bytes = the request type
            Assert.AreEqual((short)RequestTypes.Fetch, BitConverter.ToInt16(BitWorks.ReverseBytes(bytes.Skip(4).Take(2).ToArray<byte>()), 0));

            // next 2 bytes = the topic length
            Assert.AreEqual((short)topicName.Length, BitConverter.ToInt16(BitWorks.ReverseBytes(bytes.Skip(6).Take(2).ToArray<byte>()), 0));

            // next few bytes = the topic
            Assert.AreEqual(topicName, Encoding.ASCII.GetString(bytes.Skip(8).Take(topicName.Length).ToArray<byte>()));

            // next 4 bytes = the partition
            Assert.AreEqual(1, BitConverter.ToInt32(BitWorks.ReverseBytes(bytes.Skip(8 + topicName.Length).Take(4).ToArray<byte>()), 0));

            // next 8 bytes = the offset
            Assert.AreEqual(10, BitConverter.ToInt32(BitWorks.ReverseBytes(bytes.Skip(12 + topicName.Length).Take(8).ToArray<byte>()), 0));

            // last 4 bytes = the max size
            Assert.AreEqual(100, BitConverter.ToInt32(BitWorks.ReverseBytes(bytes.Skip(20 + +topicName.Length).Take(4).ToArray<byte>()), 0));
        }
Exemple #4
0
        private static BufferedMessageSet Fetch(KafkaConnection conn, FetchRequest request)
        {
            conn.Write(request);
            int dataLength = BitConverter.ToInt32(BitWorks.ReverseBytes(conn.Read(4)), 0);
            if (dataLength > 0)
            {
                byte[] data = conn.Read(dataLength);

                int errorCode = BitConverter.ToInt16(BitWorks.ReverseBytes(data.Take(2).ToArray()), 0);
                if (errorCode != KafkaException.NoError)
                {
                    throw new KafkaException(errorCode);
                }

                // skip the error code
                byte[] unbufferedData = data.Skip(2).ToArray();
                return BufferedMessageSet.ParseFrom(unbufferedData);
            }

            return null;
        }
Exemple #5
0
        /// <summary>
        /// Fetch a set of messages from a topic.
        /// </summary>
        /// <param name="request">
        /// Specifies the topic name, topic partition, starting byte offset, maximum bytes to be fetched.
        /// </param>
        /// <returns>
        /// A set of fetched messages.
        /// </returns>
        /// <remarks>
        /// Offset is passed in on every request, allowing the user to maintain this metadata 
        /// however they choose.
        /// </remarks>
        public BufferedMessageSet Fetch(FetchRequest request)
        {
            BufferedMessageSet result = null;
            using (var conn = new KafkaConnection(this.Host, this.Port))
            {
                short tryCounter = 1;
                bool success = false;
                while (!success && tryCounter <= this.config.NumberOfTries)
                {
                    try
                    {
                        result = Fetch(conn, request);
                        success = true;
                    }
                    catch (Exception ex)
                    {
                        //// if maximum number of tries reached
                        if (tryCounter == this.config.NumberOfTries)
                        {
                            throw;
                        }

                        tryCounter++;
                        Logger.InfoFormat(CultureInfo.CurrentCulture, "Fetch reconnect due to {0}", ex);
                    }
                }
            }

            return result;
        }
Exemple #6
0
        public void ProducerSends3Messages()
        {
            int totalWaitTimeInMiliseconds = 0;
            int waitSingle = 100;
            var originalMessage1 = new Message(Encoding.UTF8.GetBytes("TestData1"));
            var originalMessage2 = new Message(Encoding.UTF8.GetBytes("TestData2"));
            var originalMessage3 = new Message(Encoding.UTF8.GetBytes("TestData3"));
            var originalMessageList =
                new List<Message> { originalMessage1, originalMessage2, originalMessage3 };

            var multipleBrokersHelper = new TestMultipleBrokersHelper(CurrentTestTopic);
            multipleBrokersHelper.GetCurrentOffsets();

            var producerConfig = new ProducerConfig(clientConfig);
            var mockPartitioner = new MockAlwaysZeroPartitioner();
            using (var producer = new Producer<string, Message>(producerConfig, mockPartitioner, new DefaultEncoder(), null))
            {
                var producerData = new ProducerData<string, Message>(CurrentTestTopic, "somekey", originalMessageList);
                producer.Send(producerData);
                Thread.Sleep(waitSingle);

                while (!multipleBrokersHelper.CheckIfAnyBrokerHasChanged())
                {
                    totalWaitTimeInMiliseconds += waitSingle;
                    Thread.Sleep(waitSingle);
                    if (totalWaitTimeInMiliseconds > this.maxTestWaitTimeInMiliseconds)
                    {
                        Assert.Fail("None of the brokers changed their offset after sending a message");
                    }
                }

                totalWaitTimeInMiliseconds = 0;

                var consumerConfig = new ConsumerConfig(clientConfig)
                    {
                        Host = multipleBrokersHelper.BrokerThatHasChanged.Address,
                        Port = multipleBrokersHelper.BrokerThatHasChanged.Port
                    };
                IConsumer consumer = new Consumers.Consumer(consumerConfig);
                var request = new FetchRequest(CurrentTestTopic, 0, multipleBrokersHelper.OffsetFromBeforeTheChange);

                BufferedMessageSet response;
                while (true)
                {
                    Thread.Sleep(waitSingle);
                    response = consumer.Fetch(request);
                    if (response != null && response.Messages.Count() > 2)
                    {
                        break;
                    }

                    totalWaitTimeInMiliseconds += waitSingle;
                    if (totalWaitTimeInMiliseconds >= this.maxTestWaitTimeInMiliseconds)
                    {
                        break;
                    }
                }

                Assert.NotNull(response);
                Assert.AreEqual(3, response.Messages.Count());
                Assert.AreEqual(originalMessage1.ToString(), response.Messages.First().ToString());
                Assert.AreEqual(originalMessage2.ToString(), response.Messages.Skip(1).First().ToString());
                Assert.AreEqual(originalMessage3.ToString(), response.Messages.Skip(2).First().ToString());
            }
        }
Exemple #7
0
 /// <summary>
 /// Writes a fetch request to the server.
 /// </summary>
 /// <remarks>
 /// Write timeout is defaulted to infitite.
 /// </remarks>
 /// <param name="request">The <see cref="FetchRequest"/> to send to the server.</param>
 public void Write(FetchRequest request)
 {
     this.EnsuresNotDisposed();
     Guard.NotNull(request, "request");
     this.Write(request.RequestBuffer.GetBuffer());
 }
Exemple #8
0
        /// <summary>
        /// Method to be used for starting a new thread
        /// </summary>
        internal void Run()
        {
            foreach (var partitionTopicInfo in partitionTopicInfos)
            {
                Logger.InfoFormat(
                    CultureInfo.CurrentCulture,
                    "{0} start fetching topic: {1} part: {2} offset: {3} from {4}:{5}",
                    this.name,
                    partitionTopicInfo.Topic,
                    partitionTopicInfo.Partition.PartId,
                    partitionTopicInfo.GetFetchOffset(),
                    this.broker.Host,
                    this.broker.Port);
            }

            try
            {
                while (!this.shouldStop)
                {
                    var requestList = new List<FetchRequest>();
                    foreach (var partitionTopicInfo in this.partitionTopicInfos)
                    {
                        var singleRequest = new FetchRequest(partitionTopicInfo.Topic, partitionTopicInfo.Partition.PartId, partitionTopicInfo.GetFetchOffset(), this.config.MaxFetchSize);
                        requestList.Add(singleRequest);
                    }

                    Logger.Debug("Fetch request: " + string.Join(", ", requestList.Select(x => x.ToString())));
                    var request = new MultiFetchRequest(requestList);
                    var response = this.simpleConsumer.MultiFetch(request);
                    int read = 0;
                    var items = this.partitionTopicInfos.Zip(
                        response,
                        (x, y) =>
                        new Tuple<PartitionTopicInfo, BufferedMessageSet>(x, y));
                    foreach (Tuple<PartitionTopicInfo, BufferedMessageSet> item in items)
                    {
                        BufferedMessageSet messages = item.Item2;
                        PartitionTopicInfo info = item.Item1;
                        try
                        {
                            bool done = false;
                            if (messages.ErrorCode == ErrorMapping.OffsetOutOfRangeCode)
                            {
                                Logger.InfoFormat(CultureInfo.CurrentCulture, "offset {0} out of range", info.GetFetchOffset());
                                //// see if we can fix this error
                                var resetOffset = this.ResetConsumerOffsets(info.Topic, info.Partition);
                                if (resetOffset >= 0)
                                {
                                    info.ResetFetchOffset(resetOffset);
                                    info.ResetConsumeOffset(resetOffset);
                                    done = true;
                                }
                            }

                            if (!done)
                            {
                                read += info.Add(messages, info.GetFetchOffset());
                            }
                        }
                        catch (Exception ex)
                        {
                            if (!shouldStop)
                            {
                                Logger.ErrorFormat(CultureInfo.CurrentCulture, "error in FetcherRunnable for {0}" + info, ex);
                            }

                            throw;
                        }
                    }

                    Logger.Info("Fetched bytes: " + read);
                    if (read == 0)
                    {
                        Logger.DebugFormat(CultureInfo.CurrentCulture, "backing off {0} ms", this.config.BackOffIncrement);
                        Thread.Sleep(this.config.BackOffIncrement);
                    }
                }
            }
            catch (Exception ex)
            {
                if (shouldStop)
                {
                    Logger.InfoFormat(CultureInfo.CurrentCulture, "FetcherRunnable {0} interrupted", this);
                }
                else
                {
                    Logger.ErrorFormat(CultureInfo.CurrentCulture, "error in FetcherRunnable {0}", ex);
                }
            }

            Logger.InfoFormat(CultureInfo.CurrentCulture, "stopping fetcher {0} to host {1}", this.name, this.broker.Host);
        }
        public void ConsumerFetchMessage()
        {
            ProducerSendsMessage();

            ConsumerConfig config = new ConsumerConfig(clientConfig);
            IConsumer consumer = new Kafka.Client.Consumers.Consumer(config);
            FetchRequest request = new FetchRequest(CurrentTestTopic, 0, 0);
            BufferedMessageSet response = consumer.Fetch(request);
            Assert.NotNull(response);
            foreach (var message in response.Messages)
            {
                Console.WriteLine(message);
            }
        }
Exemple #10
0
        public void ConsumerFetchMessage()
        {
            var consumerConfig = this.ConsumerConfig1;
            ProducerSendsMessage();
            Thread.Sleep(1000);
            IConsumer consumer = new Consumer(consumerConfig);
            var request = new FetchRequest(CurrentTestTopic, 0, 0);
            BufferedMessageSet response = consumer.Fetch(request);
            Assert.NotNull(response);
            int count = 0;
            foreach (var message in response)
            {
                count++;
                Console.WriteLine(message.Message);
            }

            Assert.AreEqual(2, count);
        }
Exemple #11
0
        /// <summary>
        /// Fetch a set of messages from a topic.
        /// </summary>
        /// <param name="request">
        /// Specifies the topic name, topic partition, starting byte offset, maximum bytes to be fetched.
        /// </param>
        /// <returns>
        /// A set of fetched messages.
        /// </returns>
        /// <remarks>
        /// Offset is passed in on every request, allowing the user to maintain this metadata
        /// however they choose.
        /// </remarks>
        public BufferedMessageSet Fetch(FetchRequest request)
        {
            short tryCounter = 1;
            while (tryCounter <= this.config.NumberOfTries)
            {
                try
                {
                    using (var conn = new KafkaConnection(
                        this.host,
                        this.port,
                        this.config.BufferSize,
                        this.config.SocketTimeout))
                    {
                        conn.Write(request);
                        int size = conn.Reader.ReadInt32();
                        return BufferedMessageSet.ParseFrom(conn.Reader, size);
                    }
                }
                catch (Exception ex)
                {
                    //// if maximum number of tries reached
                    if (tryCounter == this.config.NumberOfTries)
                    {
                        throw;
                    }

                    tryCounter++;
                    Logger.InfoFormat(CultureInfo.CurrentCulture, "Fetch reconnect due to {0}", ex);
                }
            }

            return null;
        }
        public FetchResponse Fetch(FetchRequest request)
        {
            short tryCounter = 1;
            while (tryCounter <= this.config.NumberOfTries)
            {
                try
                {
                    Logger.Debug("Fetch is waiting for send lock");
                    lock (this)
                    {
                        Logger.Debug("Fetch acquired send lock. Begin send");
                        return connection.Send(request);
                    }
                }
                catch (Exception ex)
                {
                    //// if maximum number of tries reached
                    if (tryCounter == this.config.NumberOfTries)
                    {
                        throw;
                    }

                    tryCounter++;
                    Logger.InfoFormat("Fetch reconnect due to {0}", ex.FormatException());
                }
            }

            return null;
        }
Exemple #13
0
 /// <summary>
 /// Writes a fetch request to the server.
 /// </summary>
 /// <remarks>
 /// Write timeout is defaulted to infitite.
 /// </remarks>
 /// <param name="request">The <see cref="FetchRequest"/> to send to the server.</param>
 public void Write(FetchRequest request)
 {
     this.EnsuresNotDisposed();
     Guard.Assert<ArgumentNullException>(() => request != null);
     this.Write(request.RequestBuffer.GetBuffer(), Timeout.Infinite);
 }
        public void ZkAwareProducerSends1MessageUsingNotDefaultEncoder()
        {
            int totalWaitTimeInMiliseconds = 0;
            int waitSingle = 100;
            string originalMessage = "TestData";

            var multipleBrokersHelper = new TestMultipleBrokersHelper(CurrentTestTopic);
            multipleBrokersHelper.GetCurrentOffsets();

            var producerConfig = new ProducerConfig(clientConfig);
            var mockPartitioner = new MockAlwaysZeroPartitioner();
            using (var producer = new Producer<string, string>(producerConfig, mockPartitioner, new StringEncoder(), null))
            {
                var producerData = new ProducerData<string, string>(
                    CurrentTestTopic, "somekey", new List<string> { originalMessage });
                producer.Send(producerData);

                while (!multipleBrokersHelper.CheckIfAnyBrokerHasChanged())
                {
                    totalWaitTimeInMiliseconds += waitSingle;
                    Thread.Sleep(waitSingle);
                    if (totalWaitTimeInMiliseconds > MaxTestWaitTimeInMiliseconds)
                    {
                        Assert.Fail("None of the brokers changed their offset after sending a message");
                    }
                }

                totalWaitTimeInMiliseconds = 0;

                var consumerConfig = new ConsumerConfig(clientConfig)
                    {
                        Host = multipleBrokersHelper.BrokerThatHasChanged.Address,
                        Port = multipleBrokersHelper.BrokerThatHasChanged.Port
                    };
                IConsumer consumer = new Consumers.Consumer(consumerConfig);
                var request = new FetchRequest(CurrentTestTopic, 0, multipleBrokersHelper.OffsetFromBeforeTheChange);
                BufferedMessageSet response;

                while (true)
                {
                    Thread.Sleep(waitSingle);
                    response = consumer.Fetch(request);
                    if (response != null && response.Messages.Count() > 0)
                    {
                        break;
                    }

                    totalWaitTimeInMiliseconds += waitSingle;
                    if (totalWaitTimeInMiliseconds >= MaxTestWaitTimeInMiliseconds)
                    {
                        break;
                    }
                }

                Assert.NotNull(response);
                Assert.AreEqual(1, response.Messages.Count());
                Assert.AreEqual(originalMessage, Encoding.UTF8.GetString(response.Messages.First().Payload));
            }
        }