public void SetupData()
        {
            Common.Compression.ZipLevel = Level;
            _request = new ProduceRequest(
                Enumerable.Range(1, Partitions)
                .Select(partitionId => new ProduceRequest.Topic(
                            "topic",
                            partitionId,
                            Enumerable.Range(1, Messages)
                            .Select(i => new Message(GenerateMessageBytes(), new ArraySegment <byte>(), (byte)Codec, version: MessageVersion)),
                            Codec)));

            var response = new ProduceResponse(new ProduceResponse.Topic("topic", 1, ErrorCode.NONE, 0));

            var port     = 10000;
            var endpoint = new Endpoint(new IPEndPoint(IPAddress.Loopback, port), "localhost");

            _server = new TcpServer(endpoint.Ip.Port)
            {
                OnReceivedAsync = async data => {
                    var header = KafkaDecoder.DecodeHeader(data.Skip(4));
                    var bytes  = KafkaDecoder.EncodeResponseBytes(new RequestContext(header.CorrelationId), response);
                    await _server.SendDataAsync(bytes);
                }
            };
            _connection = new Connection(endpoint);
        }
Exemple #2
0
        public async Task ConsumeByOffsetShouldGetSameMessageProducedAtSameOffset()
        {
            long offsetResponse;
            Guid messge = Guid.NewGuid();

            using (var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri)
            {
                Log = IntegrationConfig.NoDebugLog
            }))
                using (var producer = new Producer(router))
                {
                    ProduceResponse responseAckLevel1 = await producer.SendMessageAsync(new Message(messge.ToString()), IntegrationConfig.IntegrationTopic, acks : 1, partition : 0);

                    offsetResponse = responseAckLevel1.Offset;
                }
            using (var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri)
            {
                Log = IntegrationConfig.NoDebugLog
            }))
                using (var consumer = new Consumer(new ConsumerOptions(IntegrationConfig.IntegrationTopic, router)
                {
                    MaxWaitTimeForMinimumBytes = TimeSpan.Zero
                }, new OffsetPosition[] { new OffsetPosition(0, offsetResponse) }))
                {
                    var result = consumer.Consume().Take(1).ToList().FirstOrDefault();
                    Assert.AreEqual(messge.ToString(), result.Value.ToUtf8String());
                }
        }
Exemple #3
0
        public ReusableMemoryStream SerializeProduceBatch(int correlationId, IEnumerable <IGrouping <string, IGrouping <int, ProduceMessage> > > batch)
        {
            var r = new ProduceResponse {
                ProducePartitionResponse = new CommonResponse <ProducePartitionResponse>
                {
                    TopicsResponse = batch.Select(g => new TopicData <ProducePartitionResponse>
                    {
                        TopicName      = g.Key,
                        PartitionsData = g.Select(pg => new ProducePartitionResponse
                        {
                            ErrorCode = _forceErrors && Interlocked.Increment(ref _count) % 2 == 0
                            ? ErrorCode.LeaderNotAvailable
                            : _forceErrors && Interlocked.Increment(ref _count) % 3 == 0
                                ? ErrorCode.MessageSizeTooLarge
                                : _metadataResponse.TopicsMeta.Where(tm => tm.TopicName == g.Key)
                                        .Select(tm => tm.Partitions.First(p => p.Id == pg.Key).ErrorCode)
                                        .First(),
                            Offset    = 0,
                            Partition = pg.Key
                        }).ToArray()
                    }).ToArray()
                }
            };

            _produceResponses[correlationId] = r;

            return(new ReusableMemoryStream(null));
        }
Exemple #4
0
        public void ProduceResponse()
        {
            var response1 = new ProduceResponse {
                TopicPartitions = new[] {
                    new ProduceResponseTopicPartition {
                        TopicName = Guid.NewGuid().ToString(),
                        Details   = new [] {
                            new ProduceResponseTopicPartitionDetail {
                                Partition = _random.Next(),
                                Offset    = (Int64)_random.Next(),
                            }
                        }
                    }
                }
            };

            Stream binary1 = new MemoryStream();

            response1.Serialize(binary1);

            binary1.Seek(0L, SeekOrigin.Begin);
            var response2 = new ProduceResponse();

            response2.Deserialize(binary1);

            var compareLogic = new CompareLogic();
            var result       = compareLogic.Compare(response1, response2);

            Assert.True(result.AreEqual);

            Stream binary2 = new MemoryStream();

            response2.Serialize(binary2);
            Assert.Equal(binary1.Length, binary2.Length);

            using (var stream1 = new MemoryStream())
                using (var stream2 = new MemoryStream()) {
                    binary1.Seek(0L, SeekOrigin.Begin);
                    binary1.CopyTo(stream1);

                    binary2.Seek(0L, SeekOrigin.Begin);
                    binary2.CopyTo(stream2);

                    Assert.Equal(stream1.Length, stream2.Length);
                    stream1.Seek(0L, SeekOrigin.Begin);
                    var bytes1 = stream1.ToArray();

                    stream2.Seek(0L, SeekOrigin.Begin);
                    var bytes2 = stream2.ToArray();
                    Assert.Equal(bytes1.Length, bytes2.Length);

                    for (int i = 0; i < bytes1.Length; i++)
                    {
                        Assert.Equal(bytes1[i], bytes2[i]);
                    }
                }
        }
Exemple #5
0
        public short Produce(string topicName, int partitionId, string data)
        {
            try
            {
                if (!topicPartitionDictionary.ContainsKey(topicName))
                {
                    // Check if topic exist and on what partition.
                    // This call will automatically create the topic if the brooker is set up to auto create
                    MetadataResponse metadataResponse = connector.Metadata(DefaultCorrelationId, clientId, topicName);
                    short            errorCode        = metadataResponse.TopicErrorCode(topicName);
                    if (errorCode != (short)KafkaErrorCode.NoError)
                    {
                        if (errorCode != (short)KafkaErrorCode.LeaderNotAvailable)
                        {
                            return(errorCode);
                        }
                        // Check if the topic was auto created
                        metadataResponse = connector.Metadata(DefaultCorrelationId, clientId, topicName);
                        errorCode        = metadataResponse.TopicErrorCode(topicName);
                        if (errorCode != (short)KafkaErrorCode.NoError)
                        {
                            return(errorCode);
                        }
                        topicPartitionDictionary.Add(topicName, metadataResponse.Partitions(topicName)[0]);
                    }
                    else
                    {
                        topicPartitionDictionary.Add(topicName, metadataResponse.Partitions(topicName)[0]);
                    }
                }

                if (partitionId == -1)
                {
                    partitionId = topicPartitionDictionary[topicName];
                }
                var message = Encoding.UTF8.GetBytes(data);

                ProduceResponse response = connector.Produce(DefaultCorrelationId, clientId, 500, topicName, partitionId, message);
                return(response.ErrorCode(topicName, 0));
            }
            catch (SocketException ex)
            {
                throw new KafkaException(ex.Message);
            }
        }
Exemple #6
0
        private async Task SetResult(List <BrokerRouteSendBatch> sendTasks)
        {
            foreach (var sendTask in sendTasks)
            {
                try
                {
                    //all ready done don't need to await but it none blocking syntext
                    var batchResult = await sendTask.Task.ConfigureAwait(false);

                    var numberOfMessage = sendTask.MessagesSent.Count;
                    for (int i = 0; i < numberOfMessage; i++)
                    {
                        bool isAckLevel0 = sendTask.AckLevel == 0;
                        if (isAckLevel0)
                        {
                            var responce = new ProduceResponse()
                            {
                                Error       = (short)ErrorResponseCode.NoError,
                                PartitionId = sendTask.Route.PartitionId,
                                Topic       = sendTask.Route.Topic,
                                Offset      = -1
                            };
                            sendTask.MessagesSent[i].Tcs.SetResult(responce);
                        }
                        else
                        {
                            var response = new ProduceResponse()
                            {
                                Error       = batchResult.Error,
                                PartitionId = batchResult.PartitionId,
                                Topic       = batchResult.Topic,
                                Offset      = batchResult.Offset + i
                            };
                            sendTask.MessagesSent[i].Tcs.SetResult(response);
                        }
                    }
                }
                catch (Exception ex)
                {
                    BrokerRouter.Log.ErrorFormat("failed to send batch Topic[{0}] ackLevel[{1}] partition[{2}] EndPoint[{3}] Exception[{4}] stacktrace[{5}]", sendTask.Route.Topic, sendTask.AckLevel, sendTask.Route.PartitionId, sendTask.Route.Connection.Endpoint, ex.Message, ex.StackTrace);
                    sendTask.MessagesSent.ForEach((x) => x.Tcs.TrySetException(ex));
                }
            }
        }
Exemple #7
0
        public ProduceResponse Produce(int correlationId, string clientId, int timeOut, string topicName, int partitionId, byte[] payLoad)
        {
            var request = new ProduceRequest(timeOut, correlationId, clientId);

            request.AddMessage(topicName, partitionId, payLoad);
            using (var connection = new KafkaConnection(server, port))
            {
                connection.Write(request.GetRequestBytes().ToArray());

                int dataLength = BitConverter.ToInt32(BitWorks.ReverseBytes(connection.Read(4)), 0);

                var response = new ProduceResponse();
                if (dataLength != 0)
                {
                    byte[] data = connection.Read(dataLength);
                    response.Parse(data);
                }
                return(response);
            }
        }
Exemple #8
0
        public void ProduceResponse(
            [Values(0, 1, 2)] short version,
            [Values(-1, 0, 10000000)] long timestampMilliseconds,
            [Values("test", "a really long name, with spaces and punctuation!")] string topicName,
            [Values(1, 10)] int topicsPerRequest,
            [Values(1, 5)] int totalPartitions,
            [Values(
                 ErrorResponseCode.None,
                 ErrorResponseCode.CorruptMessage
                 )] ErrorResponseCode errorCode,
            [Values(0, 100000)] int throttleTime)
        {
            var topics = new List <ProduceResponse.Topic>();

            for (var t = 0; t < topicsPerRequest; t++)
            {
                topics.Add(new ProduceResponse.Topic(topicName + t, t % totalPartitions, errorCode, _randomizer.Next(), version >= 2 ? timestampMilliseconds.FromUnixEpochMilliseconds() : (DateTime?)null));
            }
            var response = new ProduceResponse(topics, version >= 1 ? TimeSpan.FromMilliseconds(throttleTime) : (TimeSpan?)null);

            response.AssertCanEncodeDecodeResponse(version);
        }
        public void ProduceResponse(
            [Values(0, 1, 2)] short version,
            [Values(-1, 0, 10000000)] long timestampMilliseconds,
            [Values("testTopic")] string topicName,
            [Values(1, 10)] int topicsPerRequest,
            [Values(1, 5)] int totalPartitions,
            [Values(
                 ErrorCode.NONE,
                 ErrorCode.CORRUPT_MESSAGE
                 )] ErrorCode errorCode,
            [Values(0, 100000)] int throttleTime)
        {
            var topics = new List <ProduceResponse.Topic>();

            for (var t = 0; t < topicsPerRequest; t++)
            {
                topics.Add(new ProduceResponse.Topic(topicName + t, t % totalPartitions, errorCode, _randomizer.Next(), version >= 2 ? DateTimeOffset.FromUnixTimeMilliseconds(timestampMilliseconds) : (DateTimeOffset?)null));
            }
            var response = new ProduceResponse(topics, version >= 1 ? TimeSpan.FromMilliseconds(throttleTime) : (TimeSpan?)null);

            response.AssertCanEncodeDecodeResponse(version);
        }
Exemple #10
0
        private async Task AddFileToKafka(byte[] filename, byte[] message, string topic)
        {
            KafkaOptions options = new KafkaOptions(new Uri("http://sandbox.hortonworks.com:6667"));

            using (BrokerRouter router = new BrokerRouter(options))
                using (Producer client = new Producer(router))
                {
                    var topicMetas = router.GetTopicMetadata(topic);

                    var responses = await client.SendMessageAsync(topic,
                                                                  new[] {
                        new KafkaNet.Protocol.Message
                        {
                            Key   = filename,
                            Value = message
                        }
                    });

                    ProduceResponse response = responses.FirstOrDefault();
                    MessageBox.Show(String.Format("File added to the queue - partition {0} offset {1}",
                                                  response.PartitionId,
                                                  response.Offset));
                }
        }
 private async Task SetResult(List<BrokerRouteSendBatch> sendTasks)
 {
     foreach (var sendTask in sendTasks)
     {
         try
         {
             //all ready done don't need to await but it none blocking syntext
             var batchResult = await sendTask.Task;
             var numberOfMessage = sendTask.MessagesSent.Count;
             for (int i = 0; i < numberOfMessage; i++)
             {
                 bool isAckLevel0 = sendTask.AckLevel == 0;
                 if (isAckLevel0)
                 {
                     var responce = new ProduceResponse()
                     {
                         Error = (short)ErrorResponseCode.NoError,
                         PartitionId = sendTask.Route.PartitionId,
                         Topic = sendTask.Route.Topic,
                         Offset = -1
                     };
                     sendTask.MessagesSent[i].Tcs.SetResult(responce);
                 }
                 else
                 {
                     var responce = new ProduceResponse()
                     {
                         Error = batchResult.Error,
                         PartitionId = batchResult.PartitionId,
                         Topic = batchResult.Topic,
                         Offset = batchResult.Offset + i
                     };
                     sendTask.MessagesSent[i].Tcs.SetResult(responce);
                 }
             }
         }
         catch (Exception ex)
         {
             BrokerRouter.Log.ErrorFormat("failed to send bach Topic[{0}] ackLevel[{1}] partition[{2}] EndPoint[{3}] Exception[{4}] stacktrace[{5}]", sendTask.Route.Topic, sendTask.AckLevel, sendTask.Route.PartitionId, sendTask.Route.Connection.Endpoint, ex.Message, ex.StackTrace);
             sendTask.MessagesSent.ForEach((x) => x.Tcs.TrySetException(ex));
         }
     }
 }
Exemple #12
0
        private static bool TryEncodeResponse(IKafkaWriter writer, IRequestContext context, ProduceResponse response)
        {
            if (response == null)
            {
                return(false);
            }

            var groupedTopics = response.responses.GroupBy(t => t.topic).ToList();

            writer.Write(groupedTopics.Count);
            foreach (var topic in groupedTopics)
            {
                var partitions = topic.ToList();

                writer.Write(topic.Key)
                .Write(partitions.Count);
                foreach (var partition in partitions)
                {
                    writer.Write(partition.partition_id)
                    .Write(partition.error_code)
                    .Write(partition.base_offset);
                    if (context.ApiVersion >= 2)
                    {
                        writer.Write(partition.timestamp?.ToUnixTimeMilliseconds() ?? -1L);
                    }
                }
            }
            if (context.ApiVersion >= 1)
            {
                writer.Write((int?)response.throttle_time_ms?.TotalMilliseconds ?? 0);
            }
            return(true);
        }
Exemple #13
0
        private static bool TryEncodeResponse(IKafkaWriter writer, IRequestContext context, ProduceResponse response)
        {
            if (response == null)
            {
                return(false);
            }

            var groupedTopics = response.Topics.GroupBy(t => t.TopicName).ToList();

            writer.Write(groupedTopics.Count);
            foreach (var topic in groupedTopics)
            {
                var partitions = topic.ToList();

                writer.Write(topic.Key)
                .Write(partitions.Count);
                foreach (var partition in partitions)
                {
                    writer.Write(partition.PartitionId)
                    .Write(partition.ErrorCode)
                    .Write(partition.Offset);
                    if (context.ApiVersion >= 2)
                    {
                        writer.Write(partition.Timestamp.ToUnixEpochMilliseconds() ?? -1L);
                    }
                }
            }
            if (context.ApiVersion >= 1)
            {
                writer.Write((int?)response.ThrottleTime?.TotalMilliseconds ?? 0);
            }
            return(true);
        }