Exemple #1
0
        internal async Task <ProducerResponse> Produce(ProduceRequest request)
        {
            var conn   = request.Broker.Conn;
            var client = await conn.GetClientAsync();

            _log.Debug("Sending ProduceRequest to {0}, Request: {1}", conn, request);
            if (_etw.IsEnabled())
            {
                _etw.ProtocolProduceRequest(request.ToString(), request.Broker.NodeId);
            }

            var response = await conn.Correlation.SendAndCorrelateAsync(
                id => Serializer.Serialize(request, id),
                Serializer.GetProducerResponse,
                client,
                CancellationToken.None
                );

            _log.Debug("Got ProduceResponse: {0}", response);
            if (_etw.IsEnabled())
            {
                _etw.ProtocolProduceResponse(response.ToString(), request.Broker.NodeId);
            }

            return(response);
        }
        public void ProduceRequest(
            [Values(0, 1, 2)] short version,
            [Values(0, 2, -1)] short acks,
            [Values(0, 1000)] int timeoutMilliseconds,
            [Values("testTopic")] string topic,
            [Values(1, 10)] int topicsPerRequest,
            [Values(1, 5)] int totalPartitions,
            [Values(3)] int messagesPerSet,
            [Values(MessageCodec.None, MessageCodec.Gzip, MessageCodec.Snappy)] MessageCodec codec)
        {
#if !DOTNETSTANDARD
            if (codec == MessageCodec.Snappy)
            {
                Assert.Inconclusive($"{codec} is only available in .net core");
            }
#endif
            var payloads = new List <ProduceRequest.Topic>();
            for (var t = 0; t < topicsPerRequest; t++)
            {
                var partition = 1 + t % totalPartitions;
                payloads.Add(new ProduceRequest.Topic(topic + t, partition, GenerateMessages(messagesPerSet, (byte)(version >= 2 ? 1 : 0), codec), codec));
            }
            var request = new ProduceRequest(payloads, TimeSpan.FromMilliseconds(timeoutMilliseconds), acks);
            var requestWithUpdatedAttribute = new ProduceRequest(request.topics.Select(t => new ProduceRequest.Topic(t.topic, t.partition_id,
                                                                                                                     t.Messages.Select(m => m.Attribute == 0 ? m : new Message(m.Value, m.Key, 0, m.Offset, m.MessageVersion, m.Timestamp)))),
                                                                 request.timeout, request.acks);

            request.AssertCanEncodeDecodeRequest(version, forComparison: requestWithUpdatedAttribute);
        }
        public async Task EnsureGzipCompressedMessageCanSend()
        {
            var topicName = TestConfig.TopicName();

            TestConfig.InfoLog.Info(() => LogEvent.Create(">> Start EnsureGzipCompressedMessageCanSend"));
            using (var conn = GetKafkaConnection()) {
                await conn.SendAsync(new MetadataRequest(topicName), CancellationToken.None);
            }

            using (var router = new BrokerRouter(_options)) {
                TestConfig.InfoLog.Info(() => LogEvent.Create(">> Start GetTopicMetadataAsync"));
                await router.GetTopicMetadataAsync(topicName, CancellationToken.None);

                TestConfig.InfoLog.Info(() => LogEvent.Create(">> End GetTopicMetadataAsync"));
                var conn = router.GetBrokerRoute(topicName, 0);

                var request = new ProduceRequest(new ProduceRequest.Payload(topicName, 0, new [] {
                    new Message("0", "1"),
                    new Message("1", "1"),
                    new Message("2", "1")
                }, MessageCodec.CodecGzip));
                TestConfig.InfoLog.Info(() => LogEvent.Create(">> start SendAsync"));
                var response = await conn.Connection.SendAsync(request, CancellationToken.None);

                TestConfig.InfoLog.Info(() => LogEvent.Create("end SendAsync"));
                Assert.That(response.Errors.Any(e => e != ErrorResponseCode.None), Is.False);
                TestConfig.InfoLog.Info(() => LogEvent.Create("start dispose"));
            }
            TestConfig.InfoLog.Info(() => LogEvent.Create(">> End EnsureGzipCompressedMessageCanSend"));
        }
        private async void CallBackConnectServer4(IntPtr param1, IntPtr param2, IntPtr param3, IntPtr param4)
        {
            try
            {
                string        rtps          = Marshal.PtrToStringAnsi(param3);
                RtpParameters rtpParameters = JsonConvert.DeserializeObject <RtpParameters>(rtps);

                string strappdata = Marshal.PtrToStringAnsi(param4);
                Dictionary <string, object> appdata = JsonConvert.DeserializeObject <Dictionary <string, object> >(strappdata);

                ProduceRequest request = new ProduceRequest();
                //request.TransportId = Marshal.PtrToStringAnsi(param1);
                request.Kind          = (Marshal.PtrToStringAnsi(param2) == "audio") ? MediaKind.Audio : MediaKind.Video;
                request.RtpParameters = rtpParameters;
                request.AppData       = appdata;

                var result = await connection.InvokeAsync <dynamic>("Produce", request);

                int i = 0;
            }
            catch (Exception ex)
            {
                messagesList.Items.Add(ex.ToString());
            }
        }
        public void SetupData()
        {
            Common.Compression.ZipLevel = Level;
            _request = new ProduceRequest(
                Enumerable.Range(1, Partitions)
                .Select(partitionId => new ProduceRequest.Topic(
                            "topic",
                            partitionId,
                            Enumerable.Range(1, Messages)
                            .Select(i => new Message(GenerateMessageBytes(), new ArraySegment <byte>(), (byte)Codec, version: MessageVersion)),
                            Codec)));

            var response = new ProduceResponse(new ProduceResponse.Topic("topic", 1, ErrorCode.NONE, 0));

            var port     = 10000;
            var endpoint = new Endpoint(new IPEndPoint(IPAddress.Loopback, port), "localhost");

            _server = new TcpServer(endpoint.Ip.Port)
            {
                OnReceivedAsync = async data => {
                    var header = KafkaDecoder.DecodeHeader(data.Skip(4));
                    var bytes  = KafkaDecoder.EncodeResponseBytes(new RequestContext(header.CorrelationId), response);
                    await _server.SendDataAsync(bytes);
                }
            };
            _connection = new Connection(endpoint);
        }
        public async Task TestProducingWorksOk()
        {
            using (var temporaryTopic = testCluster.CreateTemporaryTopic())
                using (var connection = await KafkaConnectionFactory.CreateSimpleKafkaConnectionAsync(testCluster.CreateBrokerUris()[0]))
                {
                    var request = new ProduceRequest
                    {
                        Acks      = 1,
                        TimeoutMS = 10000,
                        Payload   = new List <Payload>
                        {
                            new Payload
                            {
                                Topic     = temporaryTopic.Name,
                                Partition = 0,
                                Codec     = MessageCodec.CodecNone,
                                Messages  = new List <Message>
                                {
                                    new Message(Guid.NewGuid().ToString())
                                }
                            }
                        }
                    };

                    var response = await connection.SendRequestAsync(request, CancellationToken.None);

                    Assert.That(response, Has.Count.EqualTo(1));
                    var first = response.First();
                    Assert.That(first.Error, Is.EqualTo(ErrorResponseCode.NoError));
                }
        }
        public async Task <ActionResult> Produce([FromBody] ProduceRequest request)
        {
            _eventTracker.TrackInfo("JmsDebugProducing", "produced message");
            try
            {
                var kafkaOptions = Options.Create(new KafkaOptions {
                    ServerAddress = request.ServerAddress
                });
                var taskOptions =
                    Options.Create(new TaskOptions {
                    ConsumeTaskTopic = "n/a", ProduceTopic = request.KafkaTopic
                });
                var kafka = new KafkaResultProducer(kafkaOptions, taskOptions, _kafkaLogger);

                var message = new Message()
                {
                    Key   = "key",
                    Value = request.Message
                };
                await kafka.ProduceAsync(message);
            }
            catch (Exception e)
            {
                _eventTracker.TrackError(
                    "JmsDebugProducing",
                    $"Error while producing: {e.Message}",
                    new { exception = e });
            }

            return(Ok());
        }
        public async Task TestProducingWorksOk()
        {
            using (var temporaryTopic = testCluster.CreateTemporaryTopic())
            using (var connection = await KafkaConnectionFactory.CreateSimpleKafkaConnectionAsync(testCluster.CreateBrokerUris()[0]))
            {
                var request = new ProduceRequest
                {
                    Acks = 1,
                    TimeoutMS = 10000,
                    Payload = new List<Payload>
                     {
                         new Payload
                         {
                              Topic = temporaryTopic.Name,
                              Partition = 0,
                              Codec = MessageCodec.CodecNone,
                              Messages = new List<Message>
                              {
                                  new Message(Guid.NewGuid().ToString())
                              }
                         }
                     }
                };

                var response = await connection.SendRequestAsync(request, CancellationToken.None);
                Assert.That(response, Has.Count.EqualTo(1));
                var first = response.First();
                Assert.That(first.Error, Is.EqualTo(ErrorResponseCode.NoError));
            }
        }
        public async Task SnappyCanCompressMessageAndSend()
        {
#if !DOTNETSTANDARD
            Assert.Inconclusive("Snappy is only available in .net core");
#endif
            using (var router = await TestConfig.IntegrationOptions.CreateRouterAsync()) {
                await router.TemporaryTopicAsync(async topicName => {
                    TestConfig.Log.Info(() => LogEvent.Create(">> Start EnsureGzipCompressedMessageCanSend"));
                    var endpoint = await Endpoint.ResolveAsync(TestConfig.IntegrationOptions.ServerUris.First(), TestConfig.IntegrationOptions.Log);
                    using (var conn1 = TestConfig.IntegrationOptions.CreateConnection(endpoint)) {
                        await conn1.SendAsync(new MetadataRequest(topicName), CancellationToken.None);
                    }

                    TestConfig.Log.Info(() => LogEvent.Create(">> Start GetTopicMetadataAsync"));
                    await router.GetTopicMetadataAsync(topicName, CancellationToken.None);
                    TestConfig.Log.Info(() => LogEvent.Create(">> End GetTopicMetadataAsync"));
                    var conn = router.GetTopicConnection(topicName, 0);

                    var request = new ProduceRequest(new ProduceRequest.Topic(topicName, 0, new [] {
                        new Message("0", "1"),
                        new Message("1", "1"),
                        new Message("2", "1")
                    }, MessageCodec.Snappy));
                    TestConfig.Log.Info(() => LogEvent.Create(">> start SendAsync"));
                    var response = await conn.Connection.SendAsync(request, CancellationToken.None);
                    TestConfig.Log.Info(() => LogEvent.Create("end SendAsync"));
                    Assert.That(response.Errors.Any(e => e != ErrorCode.NONE), Is.False);
                    TestConfig.Log.Info(() => LogEvent.Create("start dispose"));
                    TestConfig.Log.Info(() => LogEvent.Create(">> End EnsureGzipCompressedMessageCanSend"));
                });
            }
        }
Exemple #10
0
        public void EnsureGzipCompressedMessageCanSend()
        {
            var conn = _router.SelectBrokerRoute(CompressTopic, 0);

            var request = new ProduceRequest
            {
                Acks      = 1,
                TimeoutMS = 1000,
                Payload   = new List <Payload>
                {
                    new Payload
                    {
                        Codec     = MessageCodec.CodecGzip,
                        Topic     = CompressTopic,
                        Partition = 0,
                        Messages  = new List <Message>
                        {
                            new Message {
                                Value = "0", Key = "1"
                            },
                            new Message {
                                Value = "1", Key = "1"
                            },
                            new Message {
                                Value = "2", Key = "1"
                            }
                        }
                    }
                }
            };

            var response = conn.Connection.SendAsync(request).Result;

            Assert.That(response.First().Error, Is.EqualTo(0));

            //var offsets = producer.GetTopicOffsetAsync("NewTopic").Result;

            //var consumer = new Consumer(new ConsumerOptions("NewTopic", _router),
            //    offsets.Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray());

            //var response = producer.SendMessageAsync("NewTopic", new[]
            //    {
            //        new Message {Value = "0", Key = "1"},
            //        new Message {Value = "1", Key = "1"},
            //        new Message {Value = "2", Key = "1"}
            //    }, codec: MessageCodec.CodecGzip).Result;

            //Assert.That(response.First().Error, Is.EqualTo(0));

            //var results = consumer.Consume().Take(3).ToList();

            //for (int i = 0; i < 3; i++)
            //{
            //    Assert.That(results[i].Value, Is.EqualTo(i.ToString()));
            //}
        }
Exemple #11
0
        public static byte[] Serialize(ProduceRequest request, int correlationId)
        {
            var stream = new MemoryStream();

            WriteRequestHeader(stream, correlationId, ApiKey.ProduceRequest);
            BigEndianConverter.Write(stream, request.RequiredAcks);
            BigEndianConverter.Write(stream, request.Timeout);
            WriteArray(stream, request.TopicData, t => Write(stream, t));
            return(WriteMessageLength(stream));
        }
Exemple #12
0
        public async Task <Int64> ProduceAsync(String topic, Int32 partition, IList <Message> messages,
                                               AcknowlegeStrategy strategy = AcknowlegeStrategy.Written, MessageCodec codec = MessageCodec.None)
        {
            EnsureLegalTopicSpelling(topic);
            var broker   = _topicBrokerDispatcher.SelectBroker(topic, partition);
            var request  = new ProduceRequest(topic, partition, messages);
            var response = (ProduceResponse)(await SubmitRequestAsync(broker.ToUri(), request));

            response.TryThrowFirstErrorOccured();
            return(response.TopicPartitions[0].Details[0].Offset);
        }
 public void SetupData()
 {
     Common.Compression.ZipLevel = Level;
     _request = new ProduceRequest(
         Enumerable.Range(1, Partitions)
         .Select(partitionId => new ProduceRequest.Topic(
                     "topic",
                     partitionId,
                     Enumerable.Range(1, Messages)
                     .Select(i => new Message(GenerateMessageBytes(), new ArraySegment <byte>(), (byte)Codec, version: MessageVersion)),
                     Codec)));
 }
Exemple #14
0
        /// <summary>
        /// Send a enumerable of message objects to a given topic.
        /// </summary>
        /// <param name="messages">The enumerable of messages that will be sent to the given topic. All messages *MUST* have a topic assigned to them.</param>
        /// <param name="acks">The required level of acknowlegment from the kafka server.  0=none, 1=writen to leader, 2+=writen to replicas, -1=writen to all replicas.</param>
        /// <param name="timeoutMS">Interal kafka timeout to wait for the requested level of ack to occur before returning.</param>
        /// <param name="codec">The codec to apply to the message collection.  Defaults to none.</param>
        /// <returns>List of ProduceResponses for each message sent or empty list if acks = 0.</returns>
        public async Task <List <ProduceResponse> > SendMultiTopicMessagesAsync(IEnumerable <Message> messages, Int16 acks = 1, int timeoutMS = 1000, MessageCodec codec = MessageCodec.CodecNone)
        {
            Interlocked.Increment(ref _currentAsyncQueue);

            try
            {
                //This goes against async philosophy but it convenient for dataflow management
                while (_maximumAsyncQueue != -1 && _currentAsyncQueue >= _maximumAsyncQueue)
                {
                    Thread.Sleep(100);
                }

                //group message by the server connection they will be sent to
                var routeGroup = from message in messages
                                 select new { Route = _router.SelectBrokerRoute(message.Topic, message.Key), Message = message }
                into routes
                group routes by routes.Route;

                var sendTasks = new List <Task <List <ProduceResponse> > >();
                foreach (var route in routeGroup)
                {
                    var request = new ProduceRequest
                    {
                        Acks      = acks,
                        TimeoutMS = timeoutMS,
                        Payload   = new List <Payload>
                        {
                            new Payload
                            {
                                Codec     = codec,
                                Topic     = route.Key.Topic,
                                Partition = route.Key.PartitionId,
                                Messages  = route.Select(x => x.Message).ToList()
                            }
                        }
                    };

                    sendTasks.Add(route.Key.Connection.SendAsync(request));
                }

                await Task.WhenAll(sendTasks.ToArray());

                return(sendTasks.SelectMany(t => t.Result).ToList());
            }
            finally
            {
                Interlocked.Decrement(ref _currentAsyncQueue);
            }
        }
Exemple #15
0
        /// <summary>
        /// Send a enumerable of message objects to a given topic.
        /// </summary>
        /// <param name="topic">The name of the kafka topic to send the messages to.</param>
        /// <param name="messages">The enumerable of messages that will be sent to the given topic.</param>
        /// <param name="acks">The required level of acknowlegment from the kafka server.  0=none, 1=writen to leader, 2+=writen to replicas, -1=writen to all replicas.</param>
        /// <param name="timeout">Interal kafka timeout to wait for the requested level of ack to occur before returning. Defaults to 1000ms.</param>
        /// <param name="codec">The codec to apply to the message collection.  Defaults to none.</param>
        /// <returns>List of ProduceResponses for each message sent or empty list if acks = 0.</returns>
        public async Task <List <ProduceResponse> > SendMessageAsync(string topic, IEnumerable <Message> messages, Int16 acks = 1,
                                                                     TimeSpan?timeout = null, MessageCodec codec = MessageCodec.CodecNone)
        {
            if (timeout == null)
            {
                timeout = TimeSpan.FromMilliseconds(DefaultTimeoutMS);
            }

            try
            {
                _sendSemaphore.Wait();

                //group message by the server connection they will be sent to
                var routeGroup = from message in messages
                                 select new { Route = _router.SelectBrokerRoute(topic, message.Key), Message = message }
                into routes
                group routes by routes.Route;

                var sendTasks = new List <Task <List <ProduceResponse> > >();
                foreach (var route in routeGroup)
                {
                    var request = new ProduceRequest
                    {
                        Acks      = acks,
                        TimeoutMS = (int)timeout.Value.TotalMilliseconds,
                        Payload   = new List <Payload>
                        {
                            new Payload
                            {
                                Codec     = codec,
                                Topic     = route.Key.Topic,
                                Partition = route.Key.PartitionId,
                                Messages  = route.Select(x => x.Message).ToList()
                            }
                        }
                    };

                    sendTasks.Add(route.Key.Connection.SendAsync(request));
                }

                await Task.WhenAll(sendTasks.ToArray());

                return(sendTasks.SelectMany(t => t.Result).ToList());
            }
            finally
            {
                _sendSemaphore.Release();
            }
        }
        public void EnsureGzipCompressedMessageCanSend()
        {
            var conn = _router.SelectBrokerRoute(CompressTopic, 0);

            var request = new ProduceRequest
            {
                Acks = 1,
                TimeoutMS = 1000,
                Payload = new List<Payload>
                                {
                                    new Payload
                                        {
                                            Codec = MessageCodec.CodecGzip,
                                            Topic = CompressTopic,
                                            Partition = 0,
                                            Messages = new List<Message>
                                                    {
                                                        new Message {Value = "0", Key = "1"},
                                                        new Message {Value = "1", Key = "1"},
                                                        new Message {Value = "2", Key = "1"}
                                                    }
                                        }
                                }
            };

            var response = conn.Connection.SendAsync(request).Result;
            Assert.That(response.First().Error, Is.EqualTo(0));

            //var offsets = producer.GetTopicOffsetAsync("NewTopic").Result;

            //var consumer = new Consumer(new ConsumerOptions("NewTopic", _router),
            //    offsets.Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray());

            //var response = producer.SendMessageAsync("NewTopic", new[]
            //    {
            //        new Message {Value = "0", Key = "1"},
            //        new Message {Value = "1", Key = "1"},
            //        new Message {Value = "2", Key = "1"}
            //    }, codec: MessageCodec.CodecGzip).Result;

            //Assert.That(response.First().Error, Is.EqualTo(0));

            //var results = consumer.Consume().Take(3).ToList();

            //for (int i = 0; i < 3; i++)
            //{
            //    Assert.That(results[i].Value, Is.EqualTo(i.ToString()));
            //}
        }
Exemple #17
0
        /// <summary>
        /// Send a enumerable of message objects to a given topic.
        /// </summary>
        /// <param name="messages">The enumerable of messages that will be sent to the given topic. All messages *MUST* have a topic assigned to them.</param>
        /// <param name="acks">The required level of acknowlegment from the kafka server.  0=none, 1=writen to leader, 2+=writen to replicas, -1=writen to all replicas.</param>
        /// <param name="timeoutMS">Interal kafka timeout to wait for the requested level of ack to occur before returning.</param>
        /// <param name="codec">The codec to apply to the message collection.  Defaults to none.</param>
        /// <returns>List of ProduceResponses for each message sent or empty list if acks = 0.</returns>
        public async Task<List<ProduceResponse>> SendMultiTopicMessagesAsync(IEnumerable<Message> messages, Int16 acks = 1, int timeoutMS = 1000, MessageCodec codec = MessageCodec.CodecNone)
        {
            Interlocked.Increment(ref _currentAsyncQueue);

            try
            {
                //This goes against async philosophy but it convenient for dataflow management
                while (_maximumAsyncQueue != -1 && _currentAsyncQueue >= _maximumAsyncQueue)
                {
                    Thread.Sleep(100);
                }

                //group message by the server connection they will be sent to
                var routeGroup = from message in messages
                                 select new {Route = _router.SelectBrokerRoute(message.Topic, message.Key), Message = message}
                                 into routes
                                 group routes by routes.Route;
                
                var sendTasks = new List<Task<List<ProduceResponse>>>();
                foreach (var route in routeGroup)
                {
                    var request = new ProduceRequest
                        {
                            Acks = acks,
                            TimeoutMS = timeoutMS,
                            Payload = new List<Payload>
                                {
                                    new Payload
                                        {
                                            Codec = codec,
                                            Topic = route.Key.Topic,
                                            Partition = route.Key.PartitionId,
                                            Messages = route.Select(x => x.Message).ToList()
                                        }
                                }
                        };

                    sendTasks.Add(route.Key.Connection.SendAsync(request));
                }

                await Task.WhenAll(sendTasks.ToArray());

                return sendTasks.SelectMany(t => t.Result).ToList();
            }
            finally
            {
                Interlocked.Decrement(ref _currentAsyncQueue);
            }
        }
Exemple #18
0
        public async Task EnsureGzipCompressedMessageCanSend()
        {
            IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("start EnsureGzipCompressedMessageCanSend"));
            using (var conn = GetKafkaConnection())
            {
                conn.SendAsync(new MetadataRequest
                {
                    Topics = new List <string>(new[] { IntegrationConfig.IntegrationCompressionTopic })
                })
                .Wait(TimeSpan.FromSeconds(10));
            }

            using (var router = new BrokerRouter(_options))
            {
                IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("start RefreshMissingTopicMetadata"));
                await router.RefreshMissingTopicMetadata(IntegrationConfig.IntegrationCompressionTopic);

                IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("end RefreshMissingTopicMetadata"));
                var conn = router.SelectBrokerRouteFromLocalCache(IntegrationConfig.IntegrationCompressionTopic, 0);

                var request = new ProduceRequest
                {
                    Acks      = 1,
                    TimeoutMS = 1000,
                    Payload   = new List <Payload>
                    {
                        new Payload
                        {
                            Codec     = MessageCodec.CodecGzip,
                            Topic     = IntegrationConfig.IntegrationCompressionTopic,
                            Partition = 0,
                            Messages  = new List <Message>
                            {
                                new Message("0", "1"),
                                new Message("1", "1"),
                                new Message("2", "1")
                            }
                        }
                    }
                };
                IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("start SendAsync"));
                var response = conn.Connection.SendAsync(request).Result;
                IntegrationConfig.NoDebugLog.InfoFormat("end SendAsync");
                Assert.That(response.First().Error, Is.EqualTo(0));
                IntegrationConfig.NoDebugLog.InfoFormat("start dispose");
            }
            IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("end EnsureGzipCompressedMessageCanSend"));
        }
        public void TestSerializeProduceRequest()
        {
            var produce = new ProduceRequest
            {
                Timeout      = 1223,
                RequiredAcks = 1,
                TopicsData   = new[]
                {
                    new TopicData <PartitionData>
                    {
                        TopicName      = "barbu",
                        PartitionsData = new[]
                        {
                            new PartitionData
                            {
                                Partition        = 22,
                                CompressionCodec = CompressionCodec.None,
                                Messages         = new[]
                                {
                                    new Message {
                                        Value = TheValue
                                    }
                                },
                            }
                        }
                    },
                }
            };
            var config = new SerializationConfig();

            config.SetSerializersForTopic("barbu", new StringSerializer(), new StringSerializer());
            config.SetDeserializersForTopic("barbu", new StringDeserializer(), new StringDeserializer());
            using (var serialized = produce.Serialize(new ReusableMemoryStream(null), 321, ClientId, config))
            {
                CheckHeader(Basics.ApiKey.ProduceRequest, 0, 321, TheClientId, serialized);
                Assert.AreEqual(produce.RequiredAcks, BigEndianConverter.ReadInt16(serialized));
                Assert.AreEqual(produce.Timeout, BigEndianConverter.ReadInt32(serialized));
                Assert.AreEqual(1, BigEndianConverter.ReadInt32(serialized)); // 1 topic data
                Assert.AreEqual("barbu", Basics.DeserializeString(serialized));
                Assert.AreEqual(1, BigEndianConverter.ReadInt32(serialized)); // 1 partition data
                Assert.AreEqual(22, BigEndianConverter.ReadInt32(serialized));
                var msgs = FetchPartitionResponse.DeserializeMessageSet(serialized, config.GetDeserializersForTopic("barbu"));
                Assert.AreEqual(1, msgs.Count);
                //Assert.AreEqual(TheValue, Encoding.UTF8.GetString(msgs[0].Message.Value));
                Assert.AreEqual(TheValue, msgs[0].Message.Value as string);
            }
        }
        public async Task EnsureGzipCompressedMessageCanSend()
        {
            IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("start EnsureGzipCompressedMessageCanSend"));
            using (var conn = GetKafkaConnection())
            {
                conn.SendAsync(new MetadataRequest
                {
                    Topics = new List<string>(new[] { IntegrationConfig.IntegrationCompressionTopic })
                })
                    .Wait(TimeSpan.FromSeconds(10));
            }

            using (var router = new BrokerRouter(_options))
            {
                IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("start RefreshMissingTopicMetadata"));
                await router.RefreshMissingTopicMetadata(IntegrationConfig.IntegrationCompressionTopic);
                IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("end RefreshMissingTopicMetadata"));
                var conn = router.SelectBrokerRouteFromLocalCache(IntegrationConfig.IntegrationCompressionTopic, 0);

                var request = new ProduceRequest
                {
                    Acks = 1,
                    TimeoutMS = 1000,
                    Payload = new List<Payload>
                        {
                            new Payload
                            {
                                Codec = MessageCodec.CodecGzip,
                                Topic = IntegrationConfig.IntegrationCompressionTopic,
                                Partition = 0,
                                Messages = new List<Message>
                                {
                                    new Message("0", "1"),
                                    new Message("1", "1"),
                                    new Message("2", "1")
                                }
                            }
                        }
                };
                IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("start SendAsync"));
                var response = conn.Connection.SendAsync(request).Result;
                IntegrationConfig.NoDebugLog.InfoFormat("end SendAsync");
                Assert.That(response.First().Error, Is.EqualTo(0));
                IntegrationConfig.NoDebugLog.InfoFormat("start dispose");
            }
            IntegrationConfig.NoDebugLog.InfoFormat(IntegrationConfig.Highlight("end EnsureGzipCompressedMessageCanSend"));
        }
Exemple #21
0
        private async Task SendBatchWithCodecAsync(IImmutableList <ProduceTopicTask> produceTasks, MessageCodec codec, CancellationToken cancellationToken)
        {
            await BrokerRouter.GetTopicMetadataAsync(produceTasks.Select(m => m.TopicName), cancellationToken).ConfigureAwait(false);

            // we must send a different produce request for each ack level and timeout combination.
            // we must also send requests to the correct broker / endpoint
            var endpointGroups = produceTasks.Select(
                ptt => new {
                ProduceTask = ptt,
                Route       = ptt.Partition.HasValue
                        ? BrokerRouter.GetBrokerRoute(ptt.TopicName, ptt.Partition.Value)
                        : BrokerRouter.GetBrokerRoute(ptt.TopicName, ptt.Message.Key)
            })
                                 .GroupBy(_ => new { _.ProduceTask.Acks, _.ProduceTask.AckTimeout, _.Route.Connection.Endpoint });

            var sendBatches = new List <ProduceTaskBatch>();

            foreach (var endpointGroup in endpointGroups)
            {
                var produceTasksByTopicPayload = endpointGroup
                                                 .GroupBy(_ => (TopicPartition)_.Route)
                                                 .ToImmutableDictionary(g => g.Key, g => g.Select(_ => _.ProduceTask).ToImmutableList());
                var messageCount = produceTasksByTopicPayload.Values.Sum(_ => _.Count);
                var payloads     = produceTasksByTopicPayload.Select(p => new ProduceRequest.Payload(p.Key.TopicName, p.Key.PartitionId, p.Value.Select(_ => _.Message), codec));
                var request      = new ProduceRequest(payloads, endpointGroup.Key.AckTimeout, endpointGroup.Key.Acks);
                BrokerRouter.Log.Debug(() => LogEvent.Create($"Produce request for topics{request.Payloads.Aggregate("", (buffer, p) => $"{buffer} {p}")} with {messageCount} messages"));

                var connection = endpointGroup.Select(_ => _.Route).First().Connection; // they'll all be the same since they're grouped by this
                await _produceRequestSemaphore.WaitAsync(cancellationToken).ConfigureAwait(false);

                // TODO: what about retryability like for the broker router?? Need this to be robust to node failures
                var sendGroupTask = connection.SendAsync(request, cancellationToken);
                // ReSharper disable once UnusedVariable
                var continuation = sendGroupTask.ContinueWith(t => _produceRequestSemaphore.Release(), CancellationToken.None);
                sendBatches.Add(new ProduceTaskBatch(connection.Endpoint, endpointGroup.Key.Acks, sendGroupTask, produceTasksByTopicPayload));
            }

            try {
                await Task.WhenAll(sendBatches.Select(batch => batch.ReceiveTask)).ConfigureAwait(false);
            } catch (Exception ex) {
                BrokerRouter.Log.Error(LogEvent.Create(ex));
            }
            await SetResult(sendBatches).ConfigureAwait(false);
        }
Exemple #22
0
        internal async Task <ProducerResponse> SendBatchAsync(int leader, IEnumerable <Message> batch, Producer producer)
        {
            CheckConnected();
            // TODO: do state checking. Introduce this.Connected task to wait if needed

            var request = new ProduceRequest
            {
                Broker       = _metadata.Brokers.First(b => b.NodeId == leader),
                RequiredAcks = producer.Configuration.RequiredAcks,
                Timeout      = producer.Configuration.ProduceRequestTimeoutMs,
                TopicData    = new[]
                {
                    new TopicData {
                        TopicName      = producer.Topic,
                        PartitionsData = (
                            from msg in batch
                            // group messages belonging to the same partition
                            group msg by msg.PartitionId
                            into partitionGrp
                            select new PartitionData {
                            Pub = producer,
                            OriginalMessages = partitionGrp.ToArray(),
                            Partition = partitionGrp.Key,
                            CompressionType = producer.Configuration.CompressionType,
                            Messages = (
                                from msg in partitionGrp
                                select new MessageData {
                                Key = msg.Key,
                                Value = new ArraySegment <byte>(msg.Value)
                            })
                        }
                            )
                    }
                }
            };



            var response = await _protocol.Produce(request).ConfigureAwait(false);

            _log.Debug("#{0} SendBatchAsync complete", _id);
            return(response);
        }
Exemple #23
0
        internal static async IAsyncEnumerable <Record> ExtractRecordsAsync(
            this ProduceRequest request,
            [EnumeratorCancellation] CancellationToken cancellationToken)
        {
            await foreach (var batch in request
                           .ExtractRecordBatchesAsync(cancellationToken)
                           .ConfigureAwait(false))
            {
                if (batch.Records == null)
                {
                    continue;
                }

                foreach (var record in batch.Records)
                {
                    yield return(record);
                }
            }
        }
Exemple #24
0
        /// <summary>
        /// Send a enumerable of message objects to a given topic.
        /// </summary>
        /// <param name="topic">The name of the kafka topic to send the messages to.</param>
        /// <param name="messages">The enumerable of messages that will be sent to the given topic.</param>
        /// <param name="acks">The required level of acknowlegment from the kafka server.  0=none, 1=writen to leader, 2+=writen to replicas, -1=writen to all replicas.</param>
        /// <param name="timeoutMS">Interal kafka timeout to wait for the requested level of ack to occur before returning.</param>
        /// <param name="codec">The codec to apply to the message collection.  Defaults to none.</param>
        /// <returns>List of ProduceResponses for each message sent or empty list if acks = 0.</returns>
        public async Task<List<ProduceResponse>> SendMessageAsync(string topic, IEnumerable<Message> messages, Int16 acks = 1, int timeoutMS = 1000, MessageCodec codec = MessageCodec.CodecNone)
        {
            _sendSemaphore.Wait();

            try
            {
                //group message by the server connection they will be sent to
                var routeGroup = from message in messages
                                 select new {Route = _router.SelectBrokerRoute(topic, message.Key), Message = message}
                                 into routes
                                 group routes by routes.Route;
                
                var sendTasks = new List<Task<List<ProduceResponse>>>();
                foreach (var route in routeGroup)
                {
                    var request = new ProduceRequest
                        {
                            Acks = acks,
                            TimeoutMS = timeoutMS,
                            Payload = new List<Payload>
                                {
                                    new Payload
                                        {
                                            Codec = codec,
                                            Topic = route.Key.Topic,
                                            Partition = route.Key.PartitionId,
                                            Messages = route.Select(x => x.Message).ToList()
                                        }
                                }
                        };

                    sendTasks.Add(route.Key.Connection.SendAsync(request));
                }

                await Task.WhenAll(sendTasks.ToArray());
                return sendTasks.SelectMany(t => t.Result).ToList();
            }
            finally
            {
                _sendSemaphore.Release();
            }
        }
Exemple #25
0
        public void ProduceRequest(
            [Values(0, 1, 2)] short version,
            [Values(0, 2, -1)] short acks,
            [Values(0, 1000)] int timeoutMilliseconds,
            [Values("test", "a really long name, with spaces and punctuation!")] string topic,
            [Values(1, 10)] int topicsPerRequest,
            [Values(1, 5)] int totalPartitions,
            [Values(3)] int messagesPerSet)
        {
            var payloads = new List <ProduceRequest.Payload>();

            for (var t = 0; t < topicsPerRequest; t++)
            {
                var partition = 1 + t % totalPartitions;
                payloads.Add(new ProduceRequest.Payload(topic + t, partition, GenerateMessages(messagesPerSet, (byte)(version >= 2 ? 1 : 0), partition)));
            }
            var request = new ProduceRequest(payloads, TimeSpan.FromMilliseconds(timeoutMilliseconds), acks);

            request.AssertCanEncodeDecodeRequest(version);
        }
Exemple #26
0
        public ProduceResponse Produce(int correlationId, string clientId, int timeOut, string topicName, int partitionId, byte[] payLoad)
        {
            var request = new ProduceRequest(timeOut, correlationId, clientId);

            request.AddMessage(topicName, partitionId, payLoad);
            using (var connection = new KafkaConnection(server, port))
            {
                connection.Write(request.GetRequestBytes().ToArray());

                int dataLength = BitConverter.ToInt32(BitWorks.ReverseBytes(connection.Read(4)), 0);

                var response = new ProduceResponse();
                if (dataLength != 0)
                {
                    byte[] data = connection.Read(dataLength);
                    response.Parse(data);
                }
                return(response);
            }
        }
Exemple #27
0
        public void ProduceSize()
        {
            int   partitions     = 1;
            short version        = 0;
            byte  messageVersion = 0;

            var results = new List <object>();

            foreach (var codec in new[] { MessageCodec.None, MessageCodec.Gzip, MessageCodec.Snappy })
            {
                foreach (var messages in new[] { 100, 10000 })
                {
                    foreach (var messageSize in new[] { 1, 1000 })
                    {
                        foreach (var level in new[] { CompressionLevel.Fastest })
                        {
                            Compression.ZipLevel = level;
                            var request = new ProduceRequest(
                                Enumerable.Range(1, partitions)
                                .Select(partitionId => new ProduceRequest.Topic(
                                            "topic",
                                            partitionId,
                                            Enumerable.Range(1, messages)
                                            .Select(i => new Message(GenerateMessageBytes(messageSize), new ArraySegment <byte>(), 0, version: messageVersion)),
                                            codec)));

                            var result = new {
                                Codec       = codec.ToString(),
                                Level       = codec == MessageCodec.None ? "-" : level.ToString(),
                                Messages    = messages,
                                MessageSize = messageSize,
                                Bytes       = request.ToBytes(new RequestContext(1, version)).Count
                            };
                            results.Add(result);
                        }
                    }
                }
            }

            WriteResults(results);
        }
        public void EnsureGzipCompressedMessageCanSend()
        {
            //ensure topic exists
            _kafkaConnection.SendAsync(new MetadataRequest {
                Topics = new List <string>(new[] { IntegrationConfig.IntegrationCompressionTopic })
            }).Wait();

            var conn = _router.SelectBrokerRoute(IntegrationConfig.IntegrationCompressionTopic, 0);

            var request = new ProduceRequest
            {
                Acks      = 1,
                TimeoutMS = 1000,
                Payload   = new List <Payload>
                {
                    new Payload
                    {
                        Codec     = MessageCodec.CodecGzip,
                        Topic     = IntegrationConfig.IntegrationCompressionTopic,
                        Partition = 0,
                        Messages  = new List <Message>
                        {
                            new Message {
                                Value = "0", Key = "1"
                            },
                            new Message {
                                Value = "1", Key = "1"
                            },
                            new Message {
                                Value = "2", Key = "1"
                            }
                        }
                    }
                }
            };

            var response = conn.Connection.SendAsync(request).Result;

            Assert.That(response.First().Error, Is.EqualTo(0));
        }
Exemple #29
0
        public void EnsureGzipCompressedMessageCanSend()
        {
            //ensure topic exists
            using (var conn = GetKafkaConnection())
            {
                conn.SendAsync(new MetadataRequest {
                    Topics = new List <string>(new[] { IntegrationConfig.IntegrationCompressionTopic })
                }).Wait(TimeSpan.FromSeconds(10));
            }

            using (var router = new BrokerRouter(_options))
            {
                var conn = router.SelectBrokerRoute(IntegrationConfig.IntegrationCompressionTopic, 0);

                var request = new ProduceRequest
                {
                    Acks      = 1,
                    TimeoutMS = 1000,
                    Payload   = new List <Payload>
                    {
                        new Payload
                        {
                            Codec     = MessageCodec.CodecGzip,
                            Topic     = IntegrationConfig.IntegrationCompressionTopic,
                            Partition = 0,
                            Messages  = new List <Message>
                            {
                                new Message("0", "1"),
                                new Message("1", "1"),
                                new Message("2", "1")
                            }
                        }
                    }
                };

                var response = conn.Connection.SendAsync(request).Result;
                Assert.That(response.First().Error, Is.EqualTo(0));
            }
        }
Exemple #30
0
        internal static async IAsyncEnumerable <RecordBatch> ExtractRecordBatchesAsync(
            this ProduceRequest produceRequest,
            [EnumeratorCancellation] CancellationToken cancellationToken = default)
        {
            var records = produceRequest.TopicsCollection.SelectMany(data =>
                                                                     data.PartitionsCollection.Select(produceData =>
                                                                                                      produceData.Records))
                          .Where(record => record.HasValue);

            var pipe   = new Pipe();
            var reader = new KafkaReader(pipe.Reader);

            foreach (var record in records)
            {
                await pipe.Writer.WriteAsync(
                    record.Value.Value.AsMemory(),
                    cancellationToken);

                yield return(await RecordBatch.ReadFromAsync(Int16.Default, reader,
                                                             cancellationToken));
            }
        }
Exemple #31
0
        private async Task SendBatchWithCodecAsync(IImmutableList <ProduceTask> produceTasks, MessageCodec codec, CancellationToken cancellationToken)
        {
            await Router.GetTopicMetadataAsync(produceTasks.Select(m => m.Partition.topic), cancellationToken).ConfigureAwait(false);

            // we must send a different produce request for each ack level and timeout combination.
            // we must also send requests to the correct server
            var endpointGroups = produceTasks.Select(
                produceTask => new {
                ProduceTask     = produceTask,
                TopicConnection = Router.GetTopicConnection(produceTask.Partition.topic, produceTask.Partition.partition_id)
            })
                                 .GroupBy(_ => new { _.ProduceTask.Acks, _.ProduceTask.AckTimeout, _.TopicConnection.Connection.Endpoint });

            var sendBatches = new List <ProduceTaskBatch>();

            foreach (var endpointGroup in endpointGroups)
            {
                var produceTasksByTopic = endpointGroup
                                          .GroupBy(_ => new TopicPartition(_.TopicConnection.TopicName, _.TopicConnection.PartitionId))
                                          .ToImmutableDictionary(g => g.Key, g => g.Select(_ => _.ProduceTask).ToImmutableList());
                var messageCount = produceTasksByTopic.Values.Sum(_ => _.Count);
                var payloads     = produceTasksByTopic.Select(p => new ProduceRequest.Topic(p.Key.topic, p.Key.partition_id, p.Value.SelectMany(_ => _.Messages), codec));
                var request      = new ProduceRequest(payloads, endpointGroup.Key.AckTimeout, endpointGroup.Key.Acks);
                Router.Log.Debug(() => LogEvent.Create($"Produce request for topics{request.topics.Aggregate("", (buffer, p) => $"{buffer} {p}")} with {messageCount} messages"));

                var connection = endpointGroup.Select(_ => _.TopicConnection).First().Connection; // they'll all be the same since they're grouped by this
                // TODO: what about retryability like for the router?? Need this to be robust to node failures
                var sendGroupTask = _produceRequestSemaphore.LockAsync(() => connection.SendAsync(request, cancellationToken), cancellationToken);
                sendBatches.Add(new ProduceTaskBatch(connection.Endpoint, endpointGroup.Key.Acks, sendGroupTask, produceTasksByTopic));
            }

            try {
                await Task.WhenAll(sendBatches.Select(batch => batch.ReceiveTask)).ConfigureAwait(false);
            } catch (Exception ex) {
                Router.Log.Error(LogEvent.Create(ex));
            }
            await SetResult(sendBatches).ConfigureAwait(false);
        }
        public void EnsureGzipCompressedMessageCanSend()
        {
            //ensure topic exists
            using (var conn = GetKafkaConnection())
            {
                conn.SendAsync(new MetadataRequest { Topics = new List<string>(new[] { IntegrationConfig.IntegrationCompressionTopic }) }).Wait(TimeSpan.FromSeconds(10));
            }

            using (var router = new BrokerRouter(_options))
            {
                var conn = router.SelectBrokerRoute(IntegrationConfig.IntegrationCompressionTopic, 0);

                var request = new ProduceRequest
                {
                    Acks = 1,
                    TimeoutMS = 1000,
                    Payload = new List<Payload>
                                {
                                    new Payload
                                        {
                                            Codec = MessageCodec.CodecGzip,
                                            Topic = IntegrationConfig.IntegrationCompressionTopic,
                                            Partition = 0,
                                            Messages = new List<Message>
                                                    {
                                                        new Message("0", "1"),
                                                        new Message("1", "1"),
                                                        new Message("2", "1")
                                                    }
                                        }
                                }
                };

                var response = conn.Connection.SendAsync(request).Result;
                Assert.That(response.First().Error, Is.EqualTo(0));
            }
        }
        public async Task TestNewTopicProductionWorksOk()
        {
            using (var temporaryTopic = testCluster.CreateTemporaryTopic())
            using (var connection = await KafkaConnectionFactory.CreateSimpleKafkaConnectionAsync(testCluster.CreateBrokerUris()[0]))
            {
                var topic = temporaryTopic.Name;
                {
                    var request = new MetadataRequest
                    {
                        Topics = new List<string>
                         {
                             topic
                         }
                    };
                    MetadataResponse response = null;
                    while (response == null)
                    {
                        response = await connection.SendRequestAsync(request, CancellationToken.None);
                        if (response.Topics[0].ErrorCode == ErrorResponseCode.LeaderNotAvailable)
                        {
                            response = null;
                            await Task.Delay(1000);
                        }

                    }
                    Assert.That(response, Is.Not.Null);
                    var first = response;
                    Assert.That(first.Topics, Has.Length.EqualTo(1));

                    var firstTopic = first.Topics.First();
                    Assert.That(firstTopic.ErrorCode, Is.EqualTo(ErrorResponseCode.NoError));
                    Assert.That(firstTopic.Name, Is.EqualTo(topic));
                    Assert.That(firstTopic.Partitions, Has.Length.EqualTo(1));

                    var firstPartition = firstTopic.Partitions.First();
                    Assert.That(firstPartition.PartitionId, Is.EqualTo(0));
                }

                {
                    var request = new ProduceRequest
                    {
                        Acks = 1,
                        TimeoutMS = 10000,
                        Payload = new List<Payload>
                             {
                                 new Payload
                                 {
                                      Topic = topic,
                                      Partition = 0,
                                      Codec = MessageCodec.CodecNone,
                                      Messages = new List<Message>
                                      {
                                          new Message("Message 1"),
                                          new Message("Message 2"),
                                          new Message("Message 3"),
                                          new Message("Message 4"),
                                      }
                                 }
                             }
                    };

                    var response = await connection.SendRequestAsync(request, CancellationToken.None);
                    Assert.That(response, Is.Not.Null);

                    var first = response.First();
                    Assert.That(first.Error, Is.EqualTo(ErrorResponseCode.NoError));
                    Assert.That(first.Topic, Is.EqualTo(topic));
                    Assert.That(first.PartitionId, Is.EqualTo(0));
                    Assert.That(first.Offset, Is.EqualTo(0));
                }

                {
                    var request = new FetchRequest
                    {
                        MinBytes = 0,
                        MaxWaitTime = 0,
                        Fetches = new List<Fetch>
                             {
                                 new Fetch
                                 {
                                    MaxBytes = 40,
                                    Offset = 0,
                                    PartitionId = 0,
                                    Topic = topic,
                                 }
                            }
                    };

                    var response = await connection.SendRequestAsync(request, CancellationToken.None);
                    Assert.That(response, Has.Count.EqualTo(1));
                    var first = response.First();

                    Assert.That(first.Error, Is.EqualTo(ErrorResponseCode.NoError));
                    Assert.That(first.HighWaterMark, Is.EqualTo(4));
                    Assert.That(first.PartitionId, Is.EqualTo(0));
                    Assert.That(first.Topic, Is.EqualTo(topic));
                    Assert.That(first.Messages, Has.Count.EqualTo(1));

                    var firstMessage = first.Messages.First();
                    Assert.That(firstMessage.Meta.Offset, Is.EqualTo(0));
                    Assert.That(firstMessage.Meta.PartitionId, Is.EqualTo(0));
                    Assert.That(firstMessage.Attribute, Is.EqualTo(0));
                    Assert.That(firstMessage.Key, Is.Null);
                    Assert.That(firstMessage.MagicNumber, Is.EqualTo(0));
                    Assert.That(firstMessage.Value, Is.Not.Null);

                    var firstString = firstMessage.Value.ToUtf8String();
                    Assert.That(firstString, Is.EqualTo("Message 1"));
                }

                {
                    var request = new OffsetRequest
                    {
                        Offsets = new List<Offset>
                             {
                                 new Offset
                                 {
                                      MaxOffsets = 2,
                                      PartitionId = 0,
                                      Time = -1,
                                      Topic = topic
                                 }
                             }
                    };

                    var response = await connection.SendRequestAsync(request, CancellationToken.None);
                    Assert.That(response, Has.Count.EqualTo(1));
                    var first = response.First();

                    Assert.That(first.Error, Is.EqualTo(ErrorResponseCode.NoError));
                    Assert.That(first.Topic, Is.EqualTo(topic));
                    Assert.That(first.PartitionId, Is.EqualTo(0));
                    Assert.That(first.Offsets, Has.Length.EqualTo(2));

                    Assert.That(first.Offsets[0], Is.EqualTo(4));
                    Assert.That(first.Offsets[1], Is.EqualTo(0));
                }

                {
                    var request = new ConsumerMetadataRequest
                    {
                        ConsumerGroup = topic
                    };
                    ConsumerMetadataResponse response = null;
                    while (response == null)
                    {
                        response = await connection.SendRequestAsync(request, CancellationToken.None);
                        if (response.Error == ErrorResponseCode.ConsumerCoordinatorNotAvailableCode)
                        {
                            response = null;
                            await Task.Delay(1000);
                        }
                    }
                    Assert.That(response.Error, Is.EqualTo(ErrorResponseCode.NoError));
                    Console.WriteLine("Id = {0}, Host = {1}, Port = {2}", response.CoordinatorId, response.CoordinatorHost, response.CoordinatorPort);

                }

                {
                    var request = new OffsetFetchRequest
                    {
                        ConsumerGroup = topic,
                        Topics = new List<OffsetFetch>
                              {
                                  new OffsetFetch
                                  {
                                       PartitionId = 0,
                                       Topic = topic
                                  }
                              }
                    };

                    var response = await connection.SendRequestAsync(request, CancellationToken.None);
                    Assert.That(response, Has.Count.EqualTo(1));
                    var first = response.First();

                    Assert.That(first.Error, Is.EqualTo(ErrorResponseCode.NoError));
                    Assert.That(first.Topic, Is.EqualTo(topic));
                    Assert.That(first.PartitionId, Is.EqualTo(0));
                    Assert.That(first.MetaData, Is.Empty);
                    Assert.That(first.Offset, Is.EqualTo(-1));
                }

                {
                    var request = new OffsetCommitRequest
                    {
                        ConsumerGroup = topic,
                        ConsumerGroupGenerationId = 1,
                        ConsumerId = "0",
                        OffsetCommits = new List<OffsetCommit>
                               {
                                   new OffsetCommit
                                   {
                                        Metadata = "Metadata 1",
                                        Offset = 0,
                                        PartitionId = 0,
                                        TimeStamp = -1,
                                        Topic = topic,
                                   }
                               }
                    };
                    var response = await connection.SendRequestAsync(request, CancellationToken.None);
                    Assert.That(response, Has.Count.EqualTo(1));
                    var first = response.First();

                    Assert.That(first.Error, Is.EqualTo(ErrorResponseCode.NoError));
                    Assert.That(first.Topic, Is.EqualTo(topic));
                    Assert.That(first.PartitionId, Is.EqualTo(0));
                }

                {
                    var request = new OffsetFetchRequest
                    {
                        ConsumerGroup = topic,
                        Topics = new List<OffsetFetch>
                              {
                                  new OffsetFetch
                                  {
                                       PartitionId = 0,
                                       Topic = topic
                                  }
                              }
                    };

                    var response = await connection.SendRequestAsync(request, CancellationToken.None);
                    Assert.That(response, Has.Count.EqualTo(1));
                    var first = response.First();

                    Assert.That(first.Error, Is.EqualTo(ErrorResponseCode.NoError));
                    Assert.That(first.Topic, Is.EqualTo(topic));
                    Assert.That(first.PartitionId, Is.EqualTo(0));
                    Assert.That(first.MetaData, Is.EqualTo("Metadata 1"));
                    Assert.That(first.Offset, Is.EqualTo(0));
                }

                {
                    var request = new FetchRequest
                    {
                        MinBytes = 0,
                        MaxWaitTime = 0,
                        Fetches = new List<Fetch>
                             {
                                 new Fetch
                                 {
                                    MaxBytes = 1024,
                                    Offset = 0 + 1,
                                    PartitionId = 0,
                                    Topic = topic,
                                 }
                            }
                    };

                    var response = await connection.SendRequestAsync(request, CancellationToken.None);
                    Assert.That(response, Has.Count.EqualTo(1));
                    var first = response.First();

                    Assert.That(first.Error, Is.EqualTo(ErrorResponseCode.NoError));
                    Assert.That(first.HighWaterMark, Is.EqualTo(4));
                    Assert.That(first.PartitionId, Is.EqualTo(0));
                    Assert.That(first.Topic, Is.EqualTo(topic));
                    Assert.That(first.Messages, Has.Count.EqualTo(3));

                    var firstMessage = first.Messages.First();
                    Assert.That(firstMessage.Meta.Offset, Is.EqualTo(1));
                    Assert.That(firstMessage.Meta.PartitionId, Is.EqualTo(0));
                    Assert.That(firstMessage.Attribute, Is.EqualTo(0));
                    Assert.That(firstMessage.Key, Is.Null);
                    Assert.That(firstMessage.MagicNumber, Is.EqualTo(0));
                    Assert.That(firstMessage.Value, Is.Not.Null);

                    var firstString = firstMessage.Value.ToUtf8String();
                    Assert.That(firstString, Is.EqualTo("Message 2"));

                    var lastMessage = first.Messages.Last();
                    Assert.That(lastMessage.Meta.Offset, Is.EqualTo(3));
                    Assert.That(lastMessage.Meta.PartitionId, Is.EqualTo(0));
                    Assert.That(lastMessage.Attribute, Is.EqualTo(0));
                    Assert.That(lastMessage.Key, Is.Null);
                    Assert.That(lastMessage.MagicNumber, Is.EqualTo(0));
                    Assert.That(lastMessage.Value, Is.Not.Null);

                    var lastString = lastMessage.Value.ToUtf8String();
                    Assert.That(lastString, Is.EqualTo("Message 4"));


                }

                {
                    var request = new FetchRequest
                    {
                        MinBytes = 0,
                        MaxWaitTime = 0,
                        Fetches = new List<Fetch>
                             {
                                 new Fetch
                                 {
                                    MaxBytes = 1024,
                                    Offset = 3 + 1,
                                    PartitionId = 0,
                                    Topic = topic,
                                 }
                            }
                    };

                    var response = await connection.SendRequestAsync(request, CancellationToken.None);
                    Assert.That(response, Has.Count.EqualTo(1));
                    var first = response.First();

                    Assert.That(first.Error, Is.EqualTo(ErrorResponseCode.NoError));
                    Assert.That(first.HighWaterMark, Is.EqualTo(4));
                    Assert.That(first.PartitionId, Is.EqualTo(0));
                    Assert.That(first.Topic, Is.EqualTo(topic));
                    Assert.That(first.Messages, Has.Count.EqualTo(0));
                }
            }
            Console.WriteLine("Test completed");
        }
Exemple #34
0
        private async Task ProduceAndSendBatchAsync(List<TopicMessage> messages, CancellationToken cancellationToken)
        {
            Interlocked.Add(ref _inFlightMessageCount, messages.Count);
            var topics = messages.GroupBy(batch => batch.Topic).Select(batch => batch.Key).ToArray();
            await BrokerRouter.RefreshMissingTopicMetadata(topics);

            //we must send a different produce request for each ack level and timeout combination.
            foreach (var ackLevelBatch in messages.GroupBy(batch => new { batch.Acks, batch.Timeout }))
            {
                var messageByRouter = ackLevelBatch.Select(batch => new
                {
                    TopicMessage = batch,
                    Route = batch.Partition.HasValue ? BrokerRouter.SelectBrokerRouteFromLocalCache(batch.Topic, batch.Partition.Value) : BrokerRouter.SelectBrokerRouteFromLocalCache(batch.Topic, batch.Message.Key)
                })
                                         .GroupBy(x => new { x.Route, x.TopicMessage.Topic, x.TopicMessage.Codec });

                var sendTasks = new List<BrokerRouteSendBatch>();
                foreach (var group in messageByRouter)
                {
                    var payload = new Payload
                    {
                        Codec = group.Key.Codec,
                        Topic = group.Key.Topic,
                        Partition = group.Key.Route.PartitionId,
                        Messages = group.Select(x => x.TopicMessage.Message).ToList()
                    };

                    var request = new ProduceRequest
                    {
                        Acks = ackLevelBatch.Key.Acks,
                        TimeoutMS = (int)ackLevelBatch.Key.Timeout.TotalMilliseconds,
                        Payload = new List<Payload> { payload }
                    };

                    await _semaphoreMaximumAsync.WaitAsync(cancellationToken).ConfigureAwait(false);

                    var sendGroupTask = _protocolGateway.SendProtocolRequest(request, group.Key.Topic, group.Key.Route.PartitionId);// group.Key.Route.Connection.SendAsync(request);
                    var brokerSendTask = new BrokerRouteSendBatch
                    {
                        Route = group.Key.Route,
                        Task = sendGroupTask,
                        MessagesSent = group.Select(x => x.TopicMessage).ToList()
                    };

                    //ensure the async is released as soon as each task is completed
                    brokerSendTask.Task.ContinueWith(t => { _semaphoreMaximumAsync.Release(); }, cancellationToken);

                    sendTasks.Add(brokerSendTask);
                }

                try
                {
                    await Task.WhenAll(sendTasks.Select(x => x.Task)).ConfigureAwait(false);

                    foreach (var task in sendTasks)
                    {
                        //TODO when we dont ask for an ACK, result is an empty list.  Which FirstOrDefault returns null.  Dont like this...
                        task.MessagesSent.ForEach(async x => x.Tcs.TrySetResult(await task.Task));
                    }
                }
                catch
                {
                    //if an error occurs here, all we know is some or all of the messages in this ackBatch failed.
                    var failedTask = sendTasks.FirstOrDefault(t => t.Task.IsFaulted);
                    if (failedTask != null)
                    {
                        foreach (var topicMessageBatch in ackLevelBatch)
                        {
                            topicMessageBatch.Tcs.TrySetException(
                                new KafkaApplicationException(
                                    "An exception occured while executing a send operation against {0}.  Exception:{1}",
                                    failedTask.Route, failedTask.Task.Exception));
                        }
                    }
                }
                finally
                {
                    Interlocked.Add(ref _inFlightMessageCount, messages.Count * -1);
                }
            }
        }
        private async Task ProduceAndSendBatchAsync(List<TopicMessage> messages, CancellationToken cancellationToken)
        {
            Interlocked.Add(ref _inFlightMessageCount, messages.Count);

            var topics = messages.GroupBy(batch => batch.Topic).Select(batch => batch.Key).ToArray();
            await BrokerRouter.RefreshMissingTopicMetadata(topics).ConfigureAwait(false);

            //we must send a different produce request for each ack level and timeout combination.
            foreach (var ackLevelBatch in messages.GroupBy(batch => new { batch.Acks, batch.Timeout }))
            {
                var messageByRouter = ackLevelBatch.Select(batch => new
                {
                    TopicMessage = batch,
                    AckLevel = ackLevelBatch.Key.Acks,
                    Route = batch.Partition.HasValue ? BrokerRouter.SelectBrokerRouteFromLocalCache(batch.Topic, batch.Partition.Value) : BrokerRouter.SelectBrokerRouteFromLocalCache(batch.Topic, batch.Message.Key)
                }).GroupBy(x => new { x.Route, x.TopicMessage.Topic, x.TopicMessage.Codec, x.AckLevel });

                var sendTasks = new List<BrokerRouteSendBatch>();
                foreach (var group in messageByRouter)
                {
                    var payload = new Payload
                    {
                        Codec = group.Key.Codec,
                        Topic = group.Key.Topic,
                        Partition = group.Key.Route.PartitionId,
                        Messages = group.Select(x => x.TopicMessage.Message).ToList()
                    };

                    var request = new ProduceRequest
                    {
                        Acks = ackLevelBatch.Key.Acks,
                        TimeoutMS = (int)ackLevelBatch.Key.Timeout.TotalMilliseconds,
                        Payload = new List<Payload> { payload }
                    };

                    await _semaphoreMaximumAsync.WaitAsync(cancellationToken).ConfigureAwait(false);

                    var sendGroupTask = _protocolGateway.SendProtocolRequest(request, group.Key.Topic, group.Key.Route.PartitionId);
                    var brokerSendTask = new BrokerRouteSendBatch
                    {
                        Route = group.Key.Route,
                        Task = sendGroupTask,
                        MessagesSent = group.Select(x => x.TopicMessage).ToList(),
                        AckLevel = group.Key.AckLevel
                    };

                    //ensure the async is released as soon as each task is completed //TODO: remove it from ack level 0 , don't like it
                    brokerSendTask.Task.ContinueWith(t => { _semaphoreMaximumAsync.Release(); }, cancellationToken);

                    sendTasks.Add(brokerSendTask);
                }

                try
                {
                    await Task.WhenAll(sendTasks.Select(x => x.Task)).ConfigureAwait(false);
                }
                catch (Exception ex)
                {
                    BrokerRouter.Log.ErrorFormat("Exception[{0}] stacktrace[{1}]", ex.Message, ex.StackTrace);
                }

                await SetResult(sendTasks);
                Interlocked.Add(ref _inFlightMessageCount, messages.Count * -1);
            }
        }
Exemple #36
0
        public async Task <ApiResult> Broadcast()
        {
            var roomId    = "0";
            var deviceId  = "100001@100001";
            var videoSsrc = 2222u;
            var audioSsrc = videoSsrc + 2;

            await _scheduler.LeaveAsync(deviceId);

            var joinRequest = new JoinRequest
            {
                RtpCapabilities = new RtpCapabilities(),
                DisplayName     = $"Device:{deviceId}",
                Sources         = new[] { "video:cam", "audio:mic" },
                AppData         = new Dictionary <string, object> {
                    ["type"] = "Device"
                },
            };

            _ = await _scheduler.JoinAsync(deviceId, "", joinRequest);

            var joinRoomRequest = new JoinRoomRequest
            {
                RoomId = roomId,
            };

            _ = await _scheduler.JoinRoomAsync(deviceId, "", joinRoomRequest);

            var createPlainTransportRequest = new CreatePlainTransportRequest
            {
                Comedia   = true,
                RtcpMux   = false,
                Producing = true,
                Consuming = false,
            };
            var transport = await _scheduler.CreatePlainTransportAsync(deviceId, "", createPlainTransportRequest);

            // Audio: "{ \"codecs\": [{ \"mimeType\":\"audio/opus\", \"payloadType\":${AUDIO_PT}, \"clockRate\":48000, \"channels\":2, \"parameters\":{ \"sprop-stereo\":1 } }], \"encodings\": [{ \"ssrc\":${AUDIO_SSRC} }] }"
            // Video :"{ \"codecs\": [{ \"mimeType\":\"video/vp8\", \"payloadType\":${VIDEO_PT}, \"clockRate\":90000 }], \"encodings\": [{ \"ssrc\":${VIDEO_SSRC} }] }"
            var videoProduceRequest = new ProduceRequest
            {
                Kind          = MediaKind.Video,
                Source        = "video",
                RtpParameters = new RtpParameters
                {
                    Codecs = new List <RtpCodecParameters>
                    {
                        new RtpCodecParameters
                        {
                            MimeType    = "video/h264",
                            PayloadType = 98,
                            ClockRate   = 90000,
                        }
                    },
                    Encodings = new List <RtpEncodingParameters> {
                        new RtpEncodingParameters
                        {
                            Ssrc = videoSsrc
                        }
                    },
                },
                AppData = new Dictionary <string, object>
                {
                    ["peerId"] = deviceId,
                }
            };
            var videoProduceResult = await _scheduler.ProduceAsync(deviceId, "", videoProduceRequest);

            var audioProduceRequest = new ProduceRequest
            {
                Kind          = MediaKind.Audio,
                Source        = "audio",
                RtpParameters = new RtpParameters
                {
                    Codecs = new List <RtpCodecParameters>
                    {
                        new RtpCodecParameters
                        {
                            MimeType    = "audio/PCMA",
                            PayloadType = 8,
                            ClockRate   = 8000,
                        }
                    },
                    Encodings = new List <RtpEncodingParameters> {
                        new RtpEncodingParameters
                        {
                            Ssrc = audioSsrc
                        }
                    },
                },
                AppData = new Dictionary <string, object>
                {
                    ["peerId"] = deviceId,
                }
            };
            var audioProduceResult = await _scheduler.ProduceAsync(deviceId, "", audioProduceRequest);

            var result = new CreatePlainTransportResult
            {
                TransportId = transport.TransportId,
                Ip          = transport.Tuple.LocalIp,
                Port        = transport.Tuple.LocalPort,
            };

            return(new ApiResult <CreatePlainTransportResult>
            {
                Data = result
            });
        }
Exemple #37
0
        private async Task ProduceAndSendBatchAsync(List<TopicMessage> batchs, CancellationToken cancellationToken)
        {
            //we must send a different produce request for each ack level and timeout combination.
            foreach (var ackLevelBatch in batchs.GroupBy(batch => new { batch.Acks, batch.Timeout }))
            {
                var messageByRouter = ackLevelBatch.Select(batch => new
                                            {
                                                TopicMessage = batch,
                                                Route = BrokerRouter.SelectBrokerRoute(batch.Topic, batch.Message.Key),
                                            })
                                         .GroupBy(x => new { x.Route, x.TopicMessage.Topic, x.TopicMessage.Codec });

                var sendTasks = new List<BrokerRouteSendBatch>();
                foreach (var group in messageByRouter)
                {
                    var payload = new Payload
                    {
                        Codec = group.Key.Codec,
                        Topic = group.Key.Topic,
                        Partition = group.Key.Route.PartitionId,
                        Messages = group.Select(x => x.TopicMessage.Message).ToList()
                    };

                    var request = new ProduceRequest
                    {
                        Acks = ackLevelBatch.Key.Acks,
                        TimeoutMS = (int)ackLevelBatch.Key.Timeout.TotalMilliseconds,
                        Payload = new List<Payload> { payload }
                    };

                    await _semaphoreMaximumAsync.WaitAsync(cancellationToken).ConfigureAwait(false);

                    var brokerSendTask = new BrokerRouteSendBatch
                    {
                        Route = group.Key.Route,
                        Task = group.Key.Route.Connection.SendAsync(request),
                        MessagesSent = group.Select(x => x.TopicMessage).ToList()
                    };

                    //ensure the async is released as soon as each task is completed
                    brokerSendTask.Task.ContinueWith(t => { _semaphoreMaximumAsync.Release(); }, cancellationToken, 
                        TaskContinuationOptions.ExecuteSynchronously, TaskScheduler.Default);

                    sendTasks.Add(brokerSendTask);
                }

                try
                {
                    await Task.WhenAll(sendTasks.Select(x => x.Task)).ConfigureAwait(false);

                    foreach (var task in sendTasks)
                    {
                        task.MessagesSent.ForEach(x => x.Tcs.TrySetResult(task.Task.Result.FirstOrDefault()));
                    }
                }
                catch
                {
                    //if an error occurs here, all we know is some or all of the messages in this ackBatch failed.
                    var failedTask = sendTasks.FirstOrDefault(t => t.Task.IsFaulted);
                    if (failedTask != null)
                    {
                        foreach (var topicMessageBatch in ackLevelBatch)
                        {
                            topicMessageBatch.Tcs.TrySetException(new KafkaApplicationException("An exception occured while executing a send operation against {0}.  Exception:{1}",
                                failedTask.Route, failedTask.Task.Exception));
                        }
                    }
                }
            }
        }
        public async Task TestNewTopicProductionWorksOk()
        {
            using (var temporaryTopic = testCluster.CreateTemporaryTopic())
                using (var connection = await KafkaConnectionFactory.CreateSimpleKafkaConnectionAsync(testCluster.CreateBrokerUris()[0]))
                {
                    var topic = temporaryTopic.Name;
                    {
                        var request = new MetadataRequest
                        {
                            Topics = new List <string>
                            {
                                topic
                            }
                        };
                        MetadataResponse response = null;
                        while (response == null)
                        {
                            response = await connection.SendRequestAsync(request, CancellationToken.None);

                            if (response.Topics[0].ErrorCode == ErrorResponseCode.LeaderNotAvailable)
                            {
                                response = null;
                                await Task.Delay(1000);
                            }
                        }
                        Assert.That(response, Is.Not.Null);
                        var first = response;
                        Assert.That(first.Topics, Has.Length.EqualTo(1));

                        var firstTopic = first.Topics.First();
                        Assert.That(firstTopic.ErrorCode, Is.EqualTo(ErrorResponseCode.NoError));
                        Assert.That(firstTopic.Name, Is.EqualTo(topic));
                        Assert.That(firstTopic.Partitions, Has.Length.EqualTo(1));

                        var firstPartition = firstTopic.Partitions.First();
                        Assert.That(firstPartition.PartitionId, Is.EqualTo(0));
                    }

                    {
                        var request = new ProduceRequest
                        {
                            Acks      = 1,
                            TimeoutMS = 10000,
                            Payload   = new List <Payload>
                            {
                                new Payload
                                {
                                    Topic     = topic,
                                    Partition = 0,
                                    Codec     = MessageCodec.CodecNone,
                                    Messages  = new List <Message>
                                    {
                                        new Message("Message 1"),
                                        new Message("Message 2"),
                                        new Message("Message 3"),
                                        new Message("Message 4"),
                                    }
                                }
                            }
                        };

                        var response = await connection.SendRequestAsync(request, CancellationToken.None);

                        Assert.That(response, Is.Not.Null);

                        var first = response.First();
                        Assert.That(first.Error, Is.EqualTo(ErrorResponseCode.NoError));
                        Assert.That(first.Topic, Is.EqualTo(topic));
                        Assert.That(first.PartitionId, Is.EqualTo(0));
                        Assert.That(first.Offset, Is.EqualTo(0));
                    }

                    {
                        var request = new FetchRequest
                        {
                            MinBytes    = 0,
                            MaxWaitTime = 0,
                            Fetches     = new List <Fetch>
                            {
                                new Fetch
                                {
                                    MaxBytes    = 40,
                                    Offset      = 0,
                                    PartitionId = 0,
                                    Topic       = topic,
                                }
                            }
                        };

                        var response = await connection.SendRequestAsync(request, CancellationToken.None);

                        Assert.That(response, Has.Count.EqualTo(1));
                        var first = response.First();

                        Assert.That(first.Error, Is.EqualTo(ErrorResponseCode.NoError));
                        Assert.That(first.HighWaterMark, Is.EqualTo(4));
                        Assert.That(first.PartitionId, Is.EqualTo(0));
                        Assert.That(first.Topic, Is.EqualTo(topic));
                        Assert.That(first.Messages, Has.Count.EqualTo(1));

                        var firstMessage = first.Messages.First();
                        Assert.That(firstMessage.Meta.Offset, Is.EqualTo(0));
                        Assert.That(firstMessage.Meta.PartitionId, Is.EqualTo(0));
                        Assert.That(firstMessage.Attribute, Is.EqualTo(0));
                        Assert.That(firstMessage.Key, Is.Null);
                        Assert.That(firstMessage.MagicNumber, Is.EqualTo(0));
                        Assert.That(firstMessage.Value, Is.Not.Null);

                        var firstString = firstMessage.Value.ToUtf8String();
                        Assert.That(firstString, Is.EqualTo("Message 1"));
                    }

                    {
                        var request = new OffsetRequest
                        {
                            Offsets = new List <Offset>
                            {
                                new Offset
                                {
                                    MaxOffsets  = 2,
                                    PartitionId = 0,
                                    Time        = -1,
                                    Topic       = topic
                                }
                            }
                        };

                        var response = await connection.SendRequestAsync(request, CancellationToken.None);

                        Assert.That(response, Has.Count.EqualTo(1));
                        var first = response.First();

                        Assert.That(first.Error, Is.EqualTo(ErrorResponseCode.NoError));
                        Assert.That(first.Topic, Is.EqualTo(topic));
                        Assert.That(first.PartitionId, Is.EqualTo(0));
                        Assert.That(first.Offsets, Has.Length.EqualTo(2));

                        Assert.That(first.Offsets[0], Is.EqualTo(4));
                        Assert.That(first.Offsets[1], Is.EqualTo(0));
                    }

                    {
                        var request = new ConsumerMetadataRequest
                        {
                            ConsumerGroup = topic
                        };
                        ConsumerMetadataResponse response = null;
                        while (response == null)
                        {
                            response = await connection.SendRequestAsync(request, CancellationToken.None);

                            if (response.Error == ErrorResponseCode.ConsumerCoordinatorNotAvailableCode)
                            {
                                response = null;
                                await Task.Delay(1000);
                            }
                        }
                        Assert.That(response.Error, Is.EqualTo(ErrorResponseCode.NoError));
                        Console.WriteLine("Id = {0}, Host = {1}, Port = {2}", response.CoordinatorId, response.CoordinatorHost, response.CoordinatorPort);
                    }

                    {
                        var request = new OffsetFetchRequest
                        {
                            ConsumerGroup = topic,
                            Topics        = new List <OffsetFetch>
                            {
                                new OffsetFetch
                                {
                                    PartitionId = 0,
                                    Topic       = topic
                                }
                            }
                        };

                        var response = await connection.SendRequestAsync(request, CancellationToken.None);

                        Assert.That(response, Has.Count.EqualTo(1));
                        var first = response.First();

                        Assert.That(first.Error, Is.EqualTo(ErrorResponseCode.NoError));
                        Assert.That(first.Topic, Is.EqualTo(topic));
                        Assert.That(first.PartitionId, Is.EqualTo(0));
                        Assert.That(first.MetaData, Is.Empty);
                        Assert.That(first.Offset, Is.EqualTo(-1));
                    }

                    {
                        var request = new OffsetCommitRequest
                        {
                            ConsumerGroup             = topic,
                            ConsumerGroupGenerationId = 1,
                            ConsumerId    = "0",
                            OffsetCommits = new List <OffsetCommit>
                            {
                                new OffsetCommit
                                {
                                    Metadata    = "Metadata 1",
                                    Offset      = 0,
                                    PartitionId = 0,
                                    TimeStamp   = -1,
                                    Topic       = topic,
                                }
                            }
                        };
                        var response = await connection.SendRequestAsync(request, CancellationToken.None);

                        Assert.That(response, Has.Count.EqualTo(1));
                        var first = response.First();

                        Assert.That(first.Error, Is.EqualTo(ErrorResponseCode.NoError));
                        Assert.That(first.Topic, Is.EqualTo(topic));
                        Assert.That(first.PartitionId, Is.EqualTo(0));
                    }

                    {
                        var request = new OffsetFetchRequest
                        {
                            ConsumerGroup = topic,
                            Topics        = new List <OffsetFetch>
                            {
                                new OffsetFetch
                                {
                                    PartitionId = 0,
                                    Topic       = topic
                                }
                            }
                        };

                        var response = await connection.SendRequestAsync(request, CancellationToken.None);

                        Assert.That(response, Has.Count.EqualTo(1));
                        var first = response.First();

                        Assert.That(first.Error, Is.EqualTo(ErrorResponseCode.NoError));
                        Assert.That(first.Topic, Is.EqualTo(topic));
                        Assert.That(first.PartitionId, Is.EqualTo(0));
                        Assert.That(first.MetaData, Is.EqualTo("Metadata 1"));
                        Assert.That(first.Offset, Is.EqualTo(0));
                    }

                    {
                        var request = new FetchRequest
                        {
                            MinBytes    = 0,
                            MaxWaitTime = 0,
                            Fetches     = new List <Fetch>
                            {
                                new Fetch
                                {
                                    MaxBytes    = 1024,
                                    Offset      = 0 + 1,
                                    PartitionId = 0,
                                    Topic       = topic,
                                }
                            }
                        };

                        var response = await connection.SendRequestAsync(request, CancellationToken.None);

                        Assert.That(response, Has.Count.EqualTo(1));
                        var first = response.First();

                        Assert.That(first.Error, Is.EqualTo(ErrorResponseCode.NoError));
                        Assert.That(first.HighWaterMark, Is.EqualTo(4));
                        Assert.That(first.PartitionId, Is.EqualTo(0));
                        Assert.That(first.Topic, Is.EqualTo(topic));
                        Assert.That(first.Messages, Has.Count.EqualTo(3));

                        var firstMessage = first.Messages.First();
                        Assert.That(firstMessage.Meta.Offset, Is.EqualTo(1));
                        Assert.That(firstMessage.Meta.PartitionId, Is.EqualTo(0));
                        Assert.That(firstMessage.Attribute, Is.EqualTo(0));
                        Assert.That(firstMessage.Key, Is.Null);
                        Assert.That(firstMessage.MagicNumber, Is.EqualTo(0));
                        Assert.That(firstMessage.Value, Is.Not.Null);

                        var firstString = firstMessage.Value.ToUtf8String();
                        Assert.That(firstString, Is.EqualTo("Message 2"));

                        var lastMessage = first.Messages.Last();
                        Assert.That(lastMessage.Meta.Offset, Is.EqualTo(3));
                        Assert.That(lastMessage.Meta.PartitionId, Is.EqualTo(0));
                        Assert.That(lastMessage.Attribute, Is.EqualTo(0));
                        Assert.That(lastMessage.Key, Is.Null);
                        Assert.That(lastMessage.MagicNumber, Is.EqualTo(0));
                        Assert.That(lastMessage.Value, Is.Not.Null);

                        var lastString = lastMessage.Value.ToUtf8String();
                        Assert.That(lastString, Is.EqualTo("Message 4"));
                    }

                    {
                        var request = new FetchRequest
                        {
                            MinBytes    = 0,
                            MaxWaitTime = 0,
                            Fetches     = new List <Fetch>
                            {
                                new Fetch
                                {
                                    MaxBytes    = 1024,
                                    Offset      = 3 + 1,
                                    PartitionId = 0,
                                    Topic       = topic,
                                }
                            }
                        };

                        var response = await connection.SendRequestAsync(request, CancellationToken.None);

                        Assert.That(response, Has.Count.EqualTo(1));
                        var first = response.First();

                        Assert.That(first.Error, Is.EqualTo(ErrorResponseCode.NoError));
                        Assert.That(first.HighWaterMark, Is.EqualTo(4));
                        Assert.That(first.PartitionId, Is.EqualTo(0));
                        Assert.That(first.Topic, Is.EqualTo(topic));
                        Assert.That(first.Messages, Has.Count.EqualTo(0));
                    }
                }
            Console.WriteLine("Test completed");
        }
Exemple #39
0
        private async Task ProduceAndSendBatchAsync(List <TopicMessage> messages, CancellationToken cancellationToken)
        {
            Interlocked.Add(ref _inFlightMessageCount, messages.Count);

            //we must send a different produce request for each ack level and timeout combination.
            foreach (var ackLevelBatch in messages.GroupBy(batch => new { batch.Acks, batch.Timeout }))
            {
                var messageByRouter = ackLevelBatch.Select(batch => new
                {
                    TopicMessage = batch,
                    Route        = BrokerRouter.SelectBrokerRoute(batch.Topic, batch.Message.Key),
                })
                                      .GroupBy(x => new { x.Route, x.TopicMessage.Topic, x.TopicMessage.Codec });

                var sendTasks = new List <BrokerRouteSendBatch>();
                foreach (var group in messageByRouter)
                {
                    var payload = new Payload
                    {
                        Codec     = group.Key.Codec,
                        Topic     = group.Key.Topic,
                        Partition = group.Key.Route.PartitionId,
                        Messages  = group.Select(x => x.TopicMessage.Message).ToList()
                    };

                    var request = new ProduceRequest
                    {
                        Acks      = ackLevelBatch.Key.Acks,
                        TimeoutMS = (int)ackLevelBatch.Key.Timeout.TotalMilliseconds,
                        Payload   = new List <Payload> {
                            payload
                        }
                    };

                    await _semaphoreMaximumAsync.WaitAsync(cancellationToken).ConfigureAwait(false);

                    var brokerSendTask = new BrokerRouteSendBatch
                    {
                        Route        = group.Key.Route,
                        Task         = group.Key.Route.Connection.SendAsync(request),
                        MessagesSent = group.Select(x => x.TopicMessage).ToList()
                    };

                    //ensure the async is released as soon as each task is completed
                    brokerSendTask.Task.ContinueWith(t => { _semaphoreMaximumAsync.Release(); }, cancellationToken);

                    sendTasks.Add(brokerSendTask);
                }

                try
                {
                    await Task.WhenAll(sendTasks.Select(x => x.Task)).ConfigureAwait(false);

                    foreach (var task in sendTasks)
                    {
                        //TODO when we dont ask for an ACK, result is an empty list.  Which FirstOrDefault returns null.  Dont like this...
                        task.MessagesSent.ForEach(x => x.Tcs.TrySetResult(task.Task.Result.FirstOrDefault()));
                    }
                }
                catch
                {
                    //if an error occurs here, all we know is some or all of the messages in this ackBatch failed.
                    var failedTask = sendTasks.FirstOrDefault(t => t.Task.IsFaulted);
                    if (failedTask != null)
                    {
                        foreach (var topicMessageBatch in ackLevelBatch)
                        {
                            topicMessageBatch.Tcs.TrySetException(
                                new KafkaApplicationException(
                                    "An exception occured while executing a send operation against {0}.  Exception:{1}",
                                    failedTask.Route, failedTask.Task.Exception));
                        }
                    }
                }
                finally
                {
                    Interlocked.Add(ref _inFlightMessageCount, messages.Count * -1);
                }
            }
        }
Exemple #40
0
        private async Task ProduceAndSendBatchAsync(List <TopicMessage> messages, CancellationToken cancellationToken)
        {
            Interlocked.Add(ref _inFlightMessageCount, messages.Count);

            var topics = messages.GroupBy(batch => batch.Topic).Select(batch => batch.Key).ToArray();
            await BrokerRouter.RefreshMissingTopicMetadata(topics).ConfigureAwait(false);

            //we must send a different produce request for each ack level and timeout combination.
            foreach (var ackLevelBatch in messages.GroupBy(batch => new { batch.Acks, batch.Timeout }))
            {
                var messageByRouter = ackLevelBatch.Select(batch => new
                {
                    TopicMessage = batch,
                    AckLevel     = ackLevelBatch.Key.Acks,
                    Route        = batch.Partition.HasValue ? BrokerRouter.SelectBrokerRouteFromLocalCache(batch.Topic, batch.Partition.Value) : BrokerRouter.SelectBrokerRouteFromLocalCache(batch.Topic, batch.Message.Key)
                }).GroupBy(x => new { x.Route, x.TopicMessage.Topic, x.TopicMessage.Codec, x.AckLevel });

                var sendTasks = new List <BrokerRouteSendBatch>();
                foreach (var group in messageByRouter)
                {
                    var payload = new Payload
                    {
                        Codec     = group.Key.Codec,
                        Topic     = group.Key.Topic,
                        Partition = group.Key.Route.PartitionId,
                        Messages  = group.Select(x => x.TopicMessage.Message).ToList()
                    };

                    var request = new ProduceRequest
                    {
                        Acks      = ackLevelBatch.Key.Acks,
                        TimeoutMS = (int)ackLevelBatch.Key.Timeout.TotalMilliseconds,
                        Payload   = new List <Payload> {
                            payload
                        }
                    };

                    await _semaphoreMaximumAsync.WaitAsync(cancellationToken).ConfigureAwait(false);

                    var sendGroupTask  = _protocolGateway.SendProtocolRequest(request, group.Key.Topic, group.Key.Route.PartitionId);
                    var brokerSendTask = new BrokerRouteSendBatch
                    {
                        Route        = group.Key.Route,
                        Task         = sendGroupTask,
                        MessagesSent = group.Select(x => x.TopicMessage).ToList(),
                        AckLevel     = group.Key.AckLevel
                    };

                    //ensure the async is released as soon as each task is completed //TODO: remove it from ack level 0 , don't like it
                    brokerSendTask.Task.ContinueWith(t => { _semaphoreMaximumAsync.Release(); }, cancellationToken);

                    sendTasks.Add(brokerSendTask);
                }

                try
                {
                    await Task.WhenAll(sendTasks.Select(x => x.Task)).ConfigureAwait(false);
                }
                catch (Exception ex)
                {
                    BrokerRouter.Log.ErrorFormat("Exception[{0}] stacktrace[{1}]", ex.Message, ex.StackTrace);
                }

                await SetResult(sendTasks).ConfigureAwait(false);

                Interlocked.Add(ref _inFlightMessageCount, messages.Count * -1);
            }
        }