Exemplo n.º 1
0
        public void TestBadCompression()
        {
            using (var serialized = Pool.Reserve())
            {
                var set = new PartitionData
                {
                    Partition        = 42,
                    CompressionCodec = CompressionCodec.Gzip,
                    Messages         = new[]
                    {
                        new Message {
                            Key = Key, Value = Value
                        }
                    }
                };
                set.Serialize(serialized, SerializationConfig.ByteArraySerializers);

                // corrupt compressed data
                serialized[37] = 8;

                // recompute crc
                var crc = (int)Crc32.Compute(serialized, 24, serialized.Length - 24);
                serialized.Position = 20;
                BigEndianConverter.Write(serialized, crc);

                // go
                serialized.Position = 4;
                Assert.That(() => FetchPartitionResponse.DeserializeMessageSet(serialized, SerializationConfig.ByteArrayDeserializers), Throws.InstanceOf <UncompressException>());
            }
        }
Exemplo n.º 2
0
        public void TestDeserializeMessageSet(CompressionCodec codec)
        {
            using (var serialized = Pool.Reserve())
            {
                var set = new PartitionData
                {
                    Partition        = 42,
                    CompressionCodec = codec,
                    Messages         = new[]
                    {
                        new Message {
                            Key = Key, Value = Value
                        },
                        new Message {
                            Key = Key, Value = Value
                        }
                    }
                };
                set.Serialize(serialized, SerializationConfig.ByteArraySerializers);
                serialized.Position = 4;

                var deserialized = FetchPartitionResponse.DeserializeMessageSet(serialized, SerializationConfig.ByteArrayDeserializers);
                Assert.AreEqual(2, deserialized.Count);
                foreach (var msg in deserialized)
                {
                    Assert.AreEqual(0, msg.Offset);
                    CollectionAssert.AreEqual(Key, msg.Message.Key as byte[]);
                    CollectionAssert.AreEqual(Value, msg.Message.Value as byte[]);
                }
            }
        }
Exemplo n.º 3
0
        /// <summary>
        /// Handle a partition response for a given topic: filter out of range messages
        /// (can happen when receiving compressed messages), update the partitions states for the
        /// topic, emit all received messages on the dispatcher, and issue a new Fetch request if needed.
        /// </summary>
        /// <param name="topic"></param>
        /// <param name="partitionResponse"></param>
        /// <returns></returns>
        private async Task HandleFetchPartitionResponse(string topic, FetchPartitionResponse partitionResponse)
        {
            var state = _partitionsStates[topic][partitionResponse.Partition];

            if (!state.Active)
            {
                return;
            }

            // Filter messages under required offset, this may happen when receiving
            // compressed messages. The Kafka protocol specifies it's up to the client
            // to filter out those messages. We also filter messages with offset greater
            // than the required stop offset if set.
            foreach (
                var message in
                partitionResponse.Messages.Where(
                    message =>
                    message.Offset >= state.NextOffsetExpected &&
                    (state.StopAt < 0 || message.Offset <= state.StopAt)))
            {
                state.LastOffsetSeen = message.Offset;
                MessageReceived(new RawKafkaRecord
                {
                    Topic     = topic,
                    Key       = message.Message.Key,
                    Value     = message.Message.Value,
                    Offset    = message.Offset,
                    Partition = partitionResponse.Partition
                });
            }

            ResponseMessageListPool.Release(partitionResponse.Messages);

            // Stop if we have seen the last required offset
            // TODO: what if we never see it?
            if (state.StopAt != Offsets.Never && state.LastOffsetSeen >= state.StopAt)
            {
                state.Active = false;
                return;
            }

            // Loop fetch request
            if (state.LastOffsetSeen >= Offsets.Some)
            {
                state.NextOffsetExpected = state.LastOffsetSeen + 1;
            }
            await Fetch(topic, partitionResponse.Partition, state.NextOffsetExpected);
        }
Exemplo n.º 4
0
        public void TestSerializeProduceRequest()
        {
            var produce = new ProduceRequest
            {
                Timeout      = 1223,
                RequiredAcks = 1,
                TopicsData   = new[]
                {
                    new TopicData <PartitionData>
                    {
                        TopicName      = "barbu",
                        PartitionsData = new[]
                        {
                            new PartitionData
                            {
                                Partition        = 22,
                                CompressionCodec = CompressionCodec.None,
                                Messages         = new[]
                                {
                                    new Message {
                                        Value = TheValue
                                    }
                                },
                            }
                        }
                    },
                }
            };
            var config = new SerializationConfig();

            config.SetSerializersForTopic("barbu", new StringSerializer(), new StringSerializer());
            config.SetDeserializersForTopic("barbu", new StringDeserializer(), new StringDeserializer());
            using (var serialized = produce.Serialize(new ReusableMemoryStream(null), 321, ClientId, config))
            {
                CheckHeader(Basics.ApiKey.ProduceRequest, 0, 321, TheClientId, serialized);
                Assert.AreEqual(produce.RequiredAcks, BigEndianConverter.ReadInt16(serialized));
                Assert.AreEqual(produce.Timeout, BigEndianConverter.ReadInt32(serialized));
                Assert.AreEqual(1, BigEndianConverter.ReadInt32(serialized)); // 1 topic data
                Assert.AreEqual("barbu", Basics.DeserializeString(serialized));
                Assert.AreEqual(1, BigEndianConverter.ReadInt32(serialized)); // 1 partition data
                Assert.AreEqual(22, BigEndianConverter.ReadInt32(serialized));
                var msgs = FetchPartitionResponse.DeserializeMessageSet(serialized, config.GetDeserializersForTopic("barbu"));
                Assert.AreEqual(1, msgs.Count);
                //Assert.AreEqual(TheValue, Encoding.UTF8.GetString(msgs[0].Message.Value));
                Assert.AreEqual(TheValue, msgs[0].Message.Value as string);
            }
        }
Exemplo n.º 5
0
        public void TestBadCrc()
        {
            using (var serialized = new ReusableMemoryStream(null))
            {
                var set = new PartitionData
                {
                    Partition        = 42,
                    CompressionCodec = CompressionCodec.None,
                    Messages         = new[]
                    {
                        new Message {
                            Key = Key, Value = Value
                        }
                    }
                };
                set.Serialize(serialized, SerializationConfig.ByteArraySerializers);
                serialized[20]      = 8; // change crc
                serialized.Position = 4;

                Assert.That(() => FetchPartitionResponse.DeserializeMessageSet(serialized, SerializationConfig.ByteArrayDeserializers), Throws.InstanceOf <CrcException>());
            }
        }
Exemplo n.º 6
0
        public void TestUnsupportedMagicByte()
        {
            using (var serialized = new ReusableMemoryStream(null))
            {
                var set = new PartitionData
                {
                    Partition        = 42,
                    CompressionCodec = CompressionCodec.None,
                    Messages         = new[]
                    {
                        new Message {
                            Key = Key, Value = Value
                        }
                    }
                };
                set.Serialize(serialized, SerializationConfig.ByteArraySerializers);
                serialized[24]      = 8; // set non supported magic byte
                serialized.Position = 4;

                Assert.That(() => FetchPartitionResponse.DeserializeMessageSet(serialized, SerializationConfig.ByteArrayDeserializers), Throws.InstanceOf <UnsupportedMagicByteVersion>());
            }
        }
Exemplo n.º 7
0
 // Utility function, statically allocated closure
 private static bool IsPartitionOkForClients(FetchPartitionResponse fr)
 {
     return(Error.IsPartitionOkForClients(fr.ErrorCode));
 }
Exemplo n.º 8
0
        /// <summary>
        /// Handle a partition response for a given topic: filter out of range messages
        /// (can happen when receiving compressed messages), update the partitions states for the
        /// topic, emit all received messages on the dispatcher, and issue a new Fetch request if needed.
        /// </summary>
        /// <param name="topic"></param>
        /// <param name="partitionResponse"></param>
        /// <returns></returns>
        private async Task HandleFetchPartitionResponse(string topic, FetchPartitionResponse partitionResponse)
        {
            var state = _partitionsStates[topic][partitionResponse.Partition];

            if (partitionResponse.ErrorCode == ErrorCode.OffsetOutOfRange)
            {
                _cluster.Logger.LogWarning(
                    string.Format(
                        "Offset {3} out of range for topic {0} / partition {1}, will read from {2} offset instead.", topic,
                        partitionResponse.Partition,
                        _configuration.OffsetOutOfRangeStrategy == Public.Offset.Earliest ? "earliest" : "latest", state.NextOffset));
                state.Active = false;
                StartConsume(topic, partitionResponse.Partition, (long)_configuration.OffsetOutOfRangeStrategy);
            }
            else
            {
                if (CheckActivity(state, topic, partitionResponse.Partition))
                {
                    // Filter messages under required offset, this may happen when receiving
                    // compressed messages. The Kafka protocol specifies it's up to the client
                    // to filter out those messages. We also filter messages with offset greater
                    // than the required stop offset if set.
                    long firstRequired = state.NextOffset;
                    foreach (var message in
                             partitionResponse.Messages.Where(
                                 message =>
                                 message.Offset >= firstRequired && (state.StopAt < 0 || message.Offset <= state.StopAt))
                             )
                    {
                        MessageReceived(new RawKafkaRecord
                        {
                            Topic     = topic,
                            Key       = message.Message.Key,
                            Value     = message.Message.Value,
                            Offset    = message.Offset,
                            Lag       = partitionResponse.HighWatermarkOffset - message.Offset,
                            Partition = partitionResponse.Partition,
                            Timestamp = Timestamp.FromUnixTimestamp(message.Message.TimeStamp)
                        });
                        state.NextOffset = message.Offset + 1;

                        await CheckHeartbeat();
                        await CheckCommit(null);

                        // Recheck status (may have changed if heartbeat triggered rebalance)
                        // TODO: is it really useful to check for heartbeat/commit after each message ?
                        // It's merely a defense versus bad message handler, maybe we don't care and
                        // just better check only at the beginning of partition processing.
                        if (!CheckActivity(state, topic, partitionResponse.Partition))
                        {
                            break;
                        }
                    }
                }

                ResponseMessageListPool.Release(partitionResponse.Messages);

                await CheckCommit(null);

                // Stop if we have seen the last required offset
                // TODO: what if we never see it?
                if (!state.Active || (state.StopAt != Offsets.Never && state.NextOffset > state.StopAt))
                {
                    state.Active = false;
                    return;
                }

                // Loop fetch request
                await Fetch(topic, partitionResponse.Partition, state.NextOffset);
            }
        }