public SendMessageAsync ( |
||
messages | ||
topic | string | |
partition | int | |
acks | ||
return | Task |
public async Task ProducerAckLevel() { using (var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri) { Log = IntegrationConfig.NoDebugLog })) using (var producer = new Producer(router)) { var responseAckLevel0 = await producer.SendMessageAsync(new Message("Ack Level 0"), IntegrationConfig.IntegrationTopic, acks: 0, partition: 0); Assert.AreEqual(responseAckLevel0.Offset, -1); var responseAckLevel1 = await producer.SendMessageAsync(new Message("Ack Level 1"), IntegrationConfig.IntegrationTopic, acks: 1, partition: 0); Assert.That(responseAckLevel1.Offset, Is.GreaterThan(-1)); } }
private void enviaGraylog(string nome, string s) { lock (thisLock) { client.SendMessageAsync(nome, new[] { new KafkaNet.Protocol.Message(s) }); } }
public static void twitterInfo() { var options = new KafkaOptions (new Uri("http://localhost:9092")); var router = new BrokerRouter(options); var client = new KafkaNet.Producer(router); const String topic = "newtest1"; const String tokenSecret = "FbcP68UtZR8U6n0AIMkBZHxgx4hzv3ibLQrU35qQipQ7Y"; const String consumerSecret = "xL20m2W34vg8dhxjxvBs0R9vvx3dZCw0fYAG9UjhVRBqgHTQ9d"; const String token = "2710082868-swCbtRmmODBOB6TMbsDGIQNMCUKoATTVAwbCQwi"; const String consumerKey = "XC6GdAJVYF9jMQqS68bQOu6kG"; Auth.SetUserCredentials(consumerKey, consumerSecret, token, tokenSecret); var stream = Tweetinvi.Stream.CreateFilteredStream(); stream.AddTrack("Politics"); stream.MatchingTweetReceived += (sender, arguments) => { Console.WriteLine(arguments.Tweet.Text); client.SendMessageAsync(topic, new[] { new KafkaNet.Protocol.Message(arguments.Tweet.Text) }).Wait(); }; stream.StartStreamMatchingAllConditions(); }
public async Task FetchMessagesCacheContainsAllRequestTest() { // Creating a broker router and a protocol gateway for the producer and consumer var brokerRouter = new BrokerRouter(_options); var protocolGateway = new ProtocolGateway(_kafkaUri); Producer producer = new Producer(brokerRouter); ManualConsumer consumer = new ManualConsumer(_partitionId, _topic, protocolGateway, "TestClient", DefaultMaxMessageSetSize); var offset = await consumer.FetchLastOffset(); // Creating 5 messages List<Message> messages = CreateTestMessages(10, 1); await producer.SendMessageAsync(_topic, messages, partition: _partitionId, timeout: TimeSpan.FromSeconds(3)); // Now let's consume var result = (await consumer.FetchMessages(5, offset)).ToList(); CheckMessages(messages.Take(5).ToList(), result); // Now let's consume again result = (await consumer.FetchMessages(5, offset + 5)).ToList(); CheckMessages(messages.Skip(5).ToList(), result); }
public void Main(string[] args) { var options = GetOptions(args); if (options == null) return; var count = 0; var lastCount = 0; var reporter = new Task(() => { while (true) { var current = count; Console.WriteLine("{0} messages in last second.", current - lastCount); lastCount = current; Thread.Sleep(1000); } }); var kafkaOptions = new KafkaOptions(options.KafkaNodeUri);// { Log = new ConsoleLog() }; using (var router = new BrokerRouter(kafkaOptions)) using (var client = new KafkaNet.Producer(router)) { reporter.Start(); while (true) { Thread.Sleep(100); client.SendMessageAsync("TestHarness", new[] { new Message() { Value = BitConverter.GetBytes(DateTime.Now.Ticks) } }); count++; } } }
public async Task ProtocolGateway() { int partitionId = 0; var router = new BrokerRouter(Options); var producer = new Producer(router); string messge1 = Guid.NewGuid().ToString(); var respose = await producer.SendMessageAsync(IntegrationConfig.IntegrationTopic, new[] { new Message(messge1) }, 1, null, MessageCodec.CodecNone, partitionId); var offset = respose.FirstOrDefault().Offset; ProtocolGateway protocolGateway = new ProtocolGateway(IntegrationConfig.IntegrationUri); var fetch = new Fetch { Topic = IntegrationConfig.IntegrationTopic, PartitionId = partitionId, Offset = offset, MaxBytes = 32000, }; var fetches = new List<Fetch> { fetch }; var fetchRequest = new FetchRequest { MaxWaitTime = 1000, MinBytes = 10, Fetches = fetches }; var r = await protocolGateway.SendProtocolRequest(fetchRequest, IntegrationConfig.IntegrationTopic, partitionId); // var r1 = await protocolGateway.SendProtocolRequest(fetchRequest, IntegrationConfig.IntegrationTopic, partitionId); Assert.IsTrue(r.Messages.FirstOrDefault().Value.ToUtf8String() == messge1); }
static void Main(string[] args) { var options = new KafkaOptions(new Uri("http://CSDKAFKA01:9092"), new Uri("http://CSDKAFKA02:9092")) { Log = new ConsoleLog() }; var router = new BrokerRouter(options); var client = new Producer(router); Task.Factory.StartNew(() => { var consumer = new Consumer(new ConsumerOptions("TestHarness", router)); foreach (var data in consumer.Consume()) { Console.WriteLine("Response: P{0},O{1} : {2}", data.Meta.PartitionId, data.Meta.Offset, data.Value); } }); Console.WriteLine("Type a message and press enter..."); while (true) { var message = Console.ReadLine(); if (message == "quit") break; client.SendMessageAsync("TestHarness", new[] {new Message {Value = message}}); } using (client) using (router) { } }
public void ConsumerShouldBeAbleToGetCurrentOffsetInformation() { var producer = new Producer(_router); var startOffsets = producer.GetTopicOffsetAsync("LoadTest").Result .Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray(); var consumer = new Consumer(new ConsumerOptions("LoadTest", _router), startOffsets); var tasks = new List<Task<List<ProduceResponse>>>(); for (int i = 0; i < 20; i++) { tasks.Add(producer.SendMessageAsync("LoadTest", new[] { new Message { Value = i.ToString(), Key = "1" } })); } Task.WaitAll(tasks.ToArray()); var results = consumer.Consume().Take(20).ToList(); //ensure the produced messages arrived for (int i = 0; i < 20; i++) { Assert.That(results[i].Value == i.ToString()); } //the current offsets should be 20 positions higher than start var currentOffsets = consumer.GetOffsetPosition(); Assert.That(currentOffsets.Sum(x => x.Offset) - startOffsets.Sum(x => x.Offset), Is.EqualTo(20)); }
public virtual async Task UpsertDocument <TDocument>(IUpsertDocumentContext <TDocument> context) where TDocument : class { var topicName = context.Document.GetType().Name; var config = this._producerConfigManager.GetConfiguration(x => (x.ConfigurationScope & ConfigurationScope.Producer) == ConfigurationScope.Producer); var options = new KafkaOptions (new Uri("http://localhost:9092")); var router = new BrokerRouter(options); var client = new KafkaNet.Producer(router); client.SendMessageAsync(topicName, new[] { new Message("!!!Test message sent from alternative .Net => kafka provider!!!") }, -1, TimeSpan.FromSeconds(5)) .Wait(); //var valueSerialiser = new BinarySerializer<TDocument>(); // var keySerialiser = new BinarySerializer<Guid>(); //var deliveryHandler = new DeliveryHandler<Guid, TDocument>(); //using (var producer = new Producer<Guid, TDocument>(config, keySerialiser, valueSerialiser)) //{ // var deliveryReport = await producer.ProduceAsync(topicName, context.Id, context.Document); // //producer.ProduceAsync(topicName, null, context.Document, deliveryHandler); // //producer.Flush(); //} }
public void ConsumerShouldBeAbleToSeekBackToEarlierOffset() { var producer = new Producer(_router); var offsets = producer.GetTopicOffsetAsync("LoadTest").Result .Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray(); var consumer = new Consumer(new ConsumerOptions("LoadTest", _router), offsets); var tasks = new List<Task<List<ProduceResponse>>>(); for (int i = 0; i < 20; i++) { tasks.Add(producer.SendMessageAsync("LoadTest", new[] { new Message { Value = i.ToString(), Key = "1" } })); } Task.WaitAll(tasks.ToArray()); var results = consumer.Consume().Take(20).ToList(); //ensure the produced messages arrived for (int i = 0; i < 20; i++) { Assert.That(results[i].Value == i.ToString()); } //seek back to initial offset consumer.SetOffsetPosition(offsets); //ensure all produced messages arrive again for (int i = 0; i < 20; i++) { Assert.That(results[i].Value == i.ToString()); } }
public static void ProducirMsg(string msg, string idMsg) { Message message = new Message(msg, idMsg); client.SendMessageAsync("PingPongTopic", new[] { message }); //client.Stop(); }
public void ProducerShouldReportCorrectAmountOfAsyncRequests() { var semaphore = new SemaphoreSlim(0); var routerProxy = new FakeBrokerRouter(); //block the second call returning from send message async routerProxy.BrokerConn0.ProduceResponseFunction = () => { semaphore.Wait(); return new ProduceResponse(); }; var router = routerProxy.Create(); using (var producer = new Producer(router, maximumAsyncRequests: 1) { BatchSize = 1 }) { var messages = new[] { new Message("1") }; Assert.That(producer.AsyncCount, Is.EqualTo(0)); var sendTask = producer.SendMessageAsync(BrokerRouterProxy.TestTopic, messages); TaskTest.WaitFor(() => producer.AsyncCount > 0); Assert.That(producer.AsyncCount, Is.EqualTo(1), "One async operation should be sending."); semaphore.Release(); sendTask.Wait(TimeSpan.FromMilliseconds(500)); Assert.That(sendTask.IsCompleted, Is.True, "Send task should be marked as completed."); Assert.That(producer.AsyncCount, Is.EqualTo(0), "Async should now show zero count."); } }
static void Main(string[] args) { const string topicName = "kafka.SimpleTopic"; TopicClient topicClient = new TopicClient(); TopicView topicView = topicClient.getTopic(topicName); List <Uri> brokers = getBrokerList(topicView); //create an options file that sets up driver preferences var options = new KafkaOptions() { Log = new ConsoleLog() }; options.KafkaServerUri = brokers; //start an out of process thread that runs a consumer that will write all received messages to the console Task.Run(() => { var consumer = new Consumer(new ConsumerOptions(topicName, new BrokerRouter(options)) { Log = new ConsoleLog() }); foreach (var data in consumer.Consume()) { Console.WriteLine("Response: P{0},O{1} : {2}", data.Meta.PartitionId, data.Meta.Offset, data.Value.ToUtf8String()); } }); //create a producer to send messages with var producer = new KafkaNet.Producer(new BrokerRouter(options)) { BatchSize = 100, BatchDelayTime = TimeSpan.FromMilliseconds(2000) }; //take in console read messages Console.WriteLine("Type a message and press enter..."); while (true) { var message = Console.ReadLine(); if (message == "quit") { break; } if (string.IsNullOrEmpty(message)) { //send a random batch of messages SendRandomBatch(producer, topicName, 200); } else { producer.SendMessageAsync(topicName, new[] { new Message(message) }); } } using (producer) { } }
public void ConsumerShouldConsumeInSameOrderAsProduced() { var expected = new List<string> { "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19" }; var testId = Guid.NewGuid().ToString(); using (var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri))) using (var producer = new Producer(router)) { var offsets = producer.GetTopicOffsetAsync(IntegrationConfig.IntegrationTopic).Result; using (var consumer = new Consumer(new ConsumerOptions(IntegrationConfig.IntegrationTopic, router), offsets.Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray())) { for (int i = 0; i < 20; i++) { producer.SendMessageAsync(IntegrationConfig.IntegrationTopic, new[] { new Message(i.ToString(), testId) }).Wait(); } var results = consumer.Consume().Take(20).ToList(); //ensure the produced messages arrived Console.WriteLine("Message order: {0}", string.Join(", ", results.Select(x => x.Value.ToUtf8String()).ToList())); Assert.That(results.Count, Is.EqualTo(20)); Assert.That(results.Select(x => x.Value.ToUtf8String()).ToList(), Is.EqualTo(expected), "Expected the message list in the correct order."); Assert.That(results.Any(x => x.Key.ToUtf8String() != testId), Is.False); } } }
public async Task ContarAnuncios(Anuncio anuncio) { var options = new KafkaOptions (new Uri("http://localhost:9092")); var router = new BrokerRouter(options); var client = new KafkaNet.Producer(router); anuncio.Empresa = null; string jsonString = JsonConvert.SerializeObject(anuncio); client.SendMessageAsync("anuncios", new[] { new Message(jsonString) }).Wait(); }
static void Main(string[] args) { var options = new KafkaOptions(new Uri("http://evs01cpb592:9092")); var router = new BrokerRouter(options); var client = new KafkaNet.Producer(router); client.SendMessageAsync("testtopic", new[] { new Message("Hi Hello! Welcome to Kafka!") }).Wait(); Console.ReadLine(); }
public static void ProducirMsg(string msg, string idMsg) { Message message = new Message(msg + "_" + idMsg, idMsg); //System.Threading.Thread.Sleep(2000); client.SendMessageAsync("PongPingTopic", new[] { message }); ContadorMensajesAtendidos++; //client.Stop(); }
public async void SendAsyncShouldGetOneResultForMessage() { using (var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri))) using (var producer = new Producer(router)) { var result = await producer.SendMessageAsync(IntegrationConfig.IntegrationTopic, new[] { new Message(Guid.NewGuid().ToString()) }); Assert.That(result.Count, Is.EqualTo(1)); } }
public async void SendAsyncShouldGetAResultForEachPartitionSentTo() { using (var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri))) using (var producer = new Producer(router)) { var result = await producer.SendMessageAsync(IntegrationConfig.IntegrationTopic, new[] { new Message("1"), new Message("2"), new Message("3") }); Assert.That(result.Count, Is.EqualTo(2)); } }
public async void SendAsyncShouldGetOneResultForEachPartitionThroughBatching() { using (var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri))) using (var producer = new Producer(router)) { var tasks = new[] { producer.SendMessageAsync(IntegrationConfig.IntegrationTopic, new[] {new Message("1")}), producer.SendMessageAsync(IntegrationConfig.IntegrationTopic, new[] {new Message("1")}), producer.SendMessageAsync(IntegrationConfig.IntegrationTopic, new[] {new Message("1")}), }; await Task.WhenAll(tasks); var result = tasks.SelectMany(x => x.Result).Distinct().ToList(); Assert.That(result.Count, Is.EqualTo(tasks.Count())); } }
static void Main(string[] args) { var options = new KafkaOptions(new Uri("http://sdzyuban-mesos-01:31000")); var router = new BrokerRouter(options); var client = new KafkaNet.Producer(router); client.SendMessageAsync("test", new[] { new Message("hello world") }).Wait(); using (client) { } }
public async Task ProducerAckLevel1ResponseOffsetShouldBeEqualToLastOffset() { using (var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri) { Log = IntegrationConfig.NoDebugLog })) using (var producer = new Producer(router)) { var responseAckLevel1 = await producer.SendMessageAsync(new Message("Ack Level 1"), IntegrationConfig.IntegrationTopic, acks: 1, partition: 0); var offsetResponse = await producer.GetTopicOffsetAsync(IntegrationConfig.IntegrationTopic); var maxOffset = offsetResponse.Find(x => x.PartitionId == 0); Assert.AreEqual(responseAckLevel1.Offset, maxOffset.Offsets.Max() - 1); } }
public async Task Send(object channel, Address address, TransportMessage message, object properties) { var options = new KafkaOptions(new Uri(address.Machine)); var router = new BrokerRouter(options); var topic = address.Queue; var messageString = System.Text.Encoding.Default.GetString(message.Body); using (var client = new Producer(router)) { await client.SendMessageAsync(topic, new[] { new Message(messageString) }); } }
public void Send(Point[] points) { using (var client = new Producer(_router)) { var messages = points.Select(x => { var pointToString = _formatter.PointToString(x); return new Message(pointToString); }).ToArray(); client.SendMessageAsync("InfluxCapacitor", messages).Wait(); } }
public void ProducerShouldNotExpectResponseWhenAckIsZero() { using (var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri))) using (var producer = new Producer(router)) { var sendTask = producer.SendMessageAsync(IntegrationConfig.IntegrationTopic, new[] { new Message(Guid.NewGuid().ToString()) }, acks: 0); sendTask.Wait(TimeSpan.FromMinutes(2)); Assert.That(sendTask.Status, Is.EqualTo(TaskStatus.RanToCompletion)); } }
static void Main(string[] args) { var options = new KafkaOptions(new Uri("http://sjkap556:9092")); var router = new BrokerRouter(options); var client = new KafkaNet.Producer(router); for (int i = 0; i < 1; i++) { client.SendMessageAsync("testCockpit", new[] { new Message(DateTime.Now + " -- Teste: " + i) }).Wait(); } Console.ReadLine(); }
static void Main(string[] args) { const string topicName = "TestHarness"; //create an options file that sets up driver preferences var options = new KafkaOptions(new Uri("http://CSDKAFKA01:9092"), new Uri("http://CSDKAFKA02:9092")) { Log = new ConsoleLog() }; //start an out of process thread that runs a consumer that will write all received messages to the console Task.Run(() => { var consumer = new Consumer(new ConsumerOptions(topicName, new BrokerRouter(options)) { Log = new ConsoleLog() }); foreach (var data in consumer.Consume()) { Console.WriteLine("Response: P{0},O{1} : {2}", data.Meta.PartitionId, data.Meta.Offset, data.Value.ToUtf8String()); } }); //create a producer to send messages with var producer = new Producer(new BrokerRouter(options)) { BatchSize = 100, BatchDelayTime = TimeSpan.FromMilliseconds(2000) }; //take in console read messages Console.WriteLine("Type a message and press enter..."); while (true) { var message = Console.ReadLine(); if (message == "quit") break; if (string.IsNullOrEmpty(message)) { //send a random batch of messages SendRandomBatch(producer, topicName, 200); } else { producer.SendMessageAsync(topicName, new[] { new Message(message) }); } } using (producer) { } }
public static void ProduceRequest(string subTopicAsPayload, string requestString) { string topic = ServiceTopics.RequestTopic; string payload = topic + subTopicAsPayload + requestString; Message msg = new Message(payload); Uri uri = new Uri("http://localhost:9092"); var options = new KafkaOptions(uri); var router = new BrokerRouter(options); var client = new KafkaNet.Producer(router); client.SendMessageAsync(topic, new List <Message> { msg }).Wait(); }
private static void Produce(string broker, string topic) { var options = new KafkaOptions(new Uri(broker)); var router = new BrokerRouter(options); var client = new Producer(router); var current_datetime =DateTime.Now; var key = current_datetime.Second.ToString(); var events = new[] { new Message("Hello World " + current_datetime.ToString(), key) }; client.SendMessageAsync(topic, events).Wait(1500); Console.WriteLine("Produced: Key: {0}. Message: {1}", key, events[0].Value.ToUtf8String()); using (client) { } }
private static async void SendRandomBatch(KafkaNet.Producer producer, string topicName, int count) { //send multiple messages var sendTask = producer.SendMessageAsync(topicName, Enumerable.Range(0, count).Select(x => new Message(x.ToString()))); Console.WriteLine("Posted #{0} messages. Buffered:{1} AsyncCount:{2}", count, producer.BufferCount, producer.AsyncCount); var response = await sendTask; Console.WriteLine("Completed send of batch: {0}. Buffered:{1} AsyncCount:{2}", count, producer.BufferCount, producer.AsyncCount); foreach (var result in response.OrderBy(x => x.PartitionId)) { Console.WriteLine("Topic:{0} PartitionId:{1} Offset:{2}", result.Topic, result.PartitionId, result.Offset); } }
/// <summary> /// Sends request and response raw data to Kafka /// </summary> /// <param name="data">Request and response metrics in raw form</param> /// <returns>A completed task</returns> public override async Task Write(RequestResponseData data) { try { KafkaOptions options = new KafkaOptions(new Uri("http://SERVER1:9092"), new Uri("http://SERVER2:9092")); BrokerRouter router = new BrokerRouter(options); Producer client = new Producer(router); client.SendMessageAsync("API-Meter Data", new[] { new Message { Value = "NULL for now" } }).Wait(); } catch (Exception ex) { Trace.Write(ex.Message); } }
private static async void SendRandomBatch(Producer producer, string topicName, int count) { //send multiple messages var sendTask = producer.SendMessageAsync(topicName, Enumerable.Range(0, count).Select(x => new Message(x.ToString()))); Console.WriteLine("Posted #{0} messages. Buffered:{1} AsyncCount:{2}", count, producer.BufferCount, producer.AsyncCount); var response = await sendTask; Console.WriteLine("Completed send of batch: {0}. Buffered:{1} AsyncCount:{2}", count, producer.BufferCount, producer.AsyncCount); foreach (var result in response.OrderBy(x => x.PartitionId)) { Console.WriteLine("Topic:{0} PartitionId:{1} Offset:{2}", result.Topic, result.PartitionId, result.Offset); } }
public void ProducerShouldGroupMessagesByBroker() { var router = _routerProxy.Create(); var producer = new Producer(router); var messages = new List<Message> { new Message{Value = "1"}, new Message{Value = "2"} }; var response = producer.SendMessageAsync("UnitTest", messages).Result; Assert.That(response.Count, Is.EqualTo(2)); Assert.That(_routerProxy.BrokerConn0.ProduceRequestCallCount, Is.EqualTo(1)); Assert.That(_routerProxy.BrokerConn1.ProduceRequestCallCount, Is.EqualTo(1)); }
public async Task ShouldSendAsyncToAllConnectionsEvenWhenExceptionOccursOnOne() { var routerProxy = new FakeBrokerRouter(); routerProxy.BrokerConn1.ProduceResponseFunction = () => { throw new KafkaApplicationException("some exception"); }; var router = routerProxy.Create(); using (var producer = new Producer(router)) { var messages = new List<Message> { new Message("1"), new Message("2") }; var sendTask = producer.SendMessageAsync("UnitTest", messages).ConfigureAwait(false); Assert.Throws<KafkaApplicationException>(async () => await sendTask); Assert.That(routerProxy.BrokerConn0.ProduceRequestCallCount, Is.EqualTo(1)); Assert.That(routerProxy.BrokerConn1.ProduceRequestCallCount, Is.EqualTo(1)); } }
public void ProducerShouldGroupMessagesByBroker() { var routerProxy = new FakeBrokerRouter(); var router = routerProxy.Create(); using (var producer = new Producer(router)) { var messages = new List<Message> { new Message("1"), new Message("2") }; var response = producer.SendMessageAsync("UnitTest", messages).Result; Assert.That(response.Count, Is.EqualTo(2)); Assert.That(routerProxy.BrokerConn0.ProduceRequestCallCount, Is.EqualTo(1)); Assert.That(routerProxy.BrokerConn1.ProduceRequestCallCount, Is.EqualTo(1)); } }
private void kafkanetSend_click(object sender, EventArgs e) { //kafka-net library is used in this approach string payload = $"Welcome to Kafka {i++}"; string topic = "test-topic"; KafkaNet.Protocol.Message msg = new KafkaNet.Protocol.Message(payload); Uri uri = new Uri("http://localhost:9092"); var options = new KafkaOptions(uri); var router = new BrokerRouter(options); KafkaNet.Producer client = new KafkaNet.Producer(router); client.SendMessageAsync(topic, new List <KafkaNet.Protocol.Message> { msg }); Console.WriteLine("sent"); }
private void SandMessageForever(Producer producer, string topic) { var sandMessageForever = Task.Run(() => { int id = 0; while (true) { try { producer.SendMessageAsync(topic, new[] { new Message((++id).ToString()) }, partition: 0).Wait(); Thread.Sleep(1000); } catch (Exception ex) { _log.InfoFormat("can't send:\n" + ex); } } }); }
public void ConnectionExceptionOnOneShouldCommunicateBackWhichMessagesFailed() { //TODO is there a way to communicate back which client failed and which succeeded. var routerProxy = new BrokerRouterProxy(_kernel); routerProxy.BrokerConn1.ProduceResponseFunction = () => { throw new ApplicationException("some exception"); }; var router = routerProxy.Create(); var producer = new Producer(router); var messages = new List<Message> { new Message{Value = "1"}, new Message{Value = "2"} }; //this will produce an exception, but message 1 succeeded and message 2 did not. //should we return a ProduceResponse with an error and no error for the other messages? //at this point though the client does not know which message is routed to which server. //the whole batch of messages would need to be returned. var test = producer.SendMessageAsync("UnitTest", messages).Result; }
public void SendAsyncShouldHandleHighVolumeOfMessages(int amount, int maxAsync) { using (var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri))) using (var producer = new Producer(router, maxAsync) { BatchSize = amount / 2 }) { var tasks = new Task<List<ProduceResponse>>[amount]; for (var i = 0; i < amount; i++) { tasks[i] = producer.SendMessageAsync(IntegrationConfig.IntegrationTopic, new[] { new Message(Guid.NewGuid().ToString()) }); } var results = tasks.SelectMany(x => x.Result).ToList(); //Because of how responses are batched up and sent to servers, we will usually get multiple responses per requested message batch //So this assertion will never pass //Assert.That(results.Count, Is.EqualTo(amount)); Assert.That(results.Any(x => x.Error != 0), Is.False, "Should not have received any results as failures."); } }
public async Task FetchMessagesSimpleTest() { // Creating a broker router and a protocol gateway for the producer and consumer var brokerRouter = new BrokerRouter(_options); var protocolGateway = new ProtocolGateway(_kafkaUri); var partitionId = 1; var topic = "ManualConsumerTestTopic"; Producer producer = new Producer(brokerRouter); ManualConsumer consumer = new ManualConsumer(partitionId, topic, protocolGateway, "TestClient", DefaultMaxMessageSetSize); var offset = await consumer.FetchLastOffset(); // Creating 5 messages List<Message> messages = CreateTestMessages(5, 1); await producer.SendMessageAsync(topic, messages, partition: partitionId, timeout: TimeSpan.FromSeconds(3)); // Now let's consume var result = (await consumer.FetchMessages(5, offset)).ToList(); CheckMessages(messages, result); }
public async void ConsumerShouldMoveToNextAvailableOffsetWhenQueryingForNextMessage() { const int expectedCount = 1000; var options = new KafkaOptions(IntegrationConfig.IntegrationUri) { Log = new ConsoleLog() }; using (var producerRouter = new BrokerRouter(options)) using (var producer = new Producer(producerRouter)) { //get current offset and reset consumer to top of log var offsets = await producer.GetTopicOffsetAsync(IntegrationConfig.IntegrationTopic).ConfigureAwait(false); using (var consumerRouter = new BrokerRouter(options)) using (var consumer = new Consumer(new ConsumerOptions(IntegrationConfig.IntegrationTopic, consumerRouter), offsets.Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray())) { Console.WriteLine("Sending {0} test messages", expectedCount); var response = await producer.SendMessageAsync(IntegrationConfig.IntegrationTopic, Enumerable.Range(0, expectedCount).Select(x => new Message(x.ToString()))); Assert.That(response.Any(x => x.Error != (int)ErrorResponseCode.NoError), Is.False, "Error occured sending test messages to server."); var stream = consumer.Consume(); Console.WriteLine("Reading message back out from consumer."); var data = stream.Take(expectedCount).ToList(); var consumerOffset = consumer.GetOffsetPosition().OrderBy(x => x.Offset).ToList(); var serverOffset = await producer.GetTopicOffsetAsync(IntegrationConfig.IntegrationTopic).ConfigureAwait(false); var positionOffset = serverOffset.Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())) .OrderBy(x => x.Offset) .ToList(); Assert.That(consumerOffset, Is.EqualTo(positionOffset), "The consumerOffset position should match the server offset position."); Assert.That(data.Count, Is.EqualTo(expectedCount), "We should have received 2000 messages from the server."); } } }
public void ConsumerShouldNotLoseMessageWhenBlocked() { var testId = Guid.NewGuid().ToString(); using (var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri))) using (var producer = new Producer(router)) { var offsets = producer.GetTopicOffsetAsync(IntegrationConfig.IntegrationTopic).Result; //create consumer with buffer size of 1 (should block upstream) using (var consumer = new Consumer(new ConsumerOptions(IntegrationConfig.IntegrationTopic, router) { ConsumerBufferSize = 1 }, offsets.Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray())) { for (int i = 0; i < 20; i++) { producer.SendMessageAsync(IntegrationConfig.IntegrationTopic, new[] { new Message(i.ToString(), testId) }).Wait(); } for (int i = 0; i < 20; i++) { var result = consumer.Consume().Take(1).First(); Assert.That(result.Key.ToUtf8String(), Is.EqualTo(testId)); Assert.That(result.Value.ToUtf8String(), Is.EqualTo(i.ToString())); } } } }
public void ConsumerShouldBeAbleToGetCurrentOffsetInformation() { using (var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri))) using (var producer = new Producer(router)) { var startOffsets = producer.GetTopicOffsetAsync(IntegrationConfig.IntegrationTopic).Result .Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray(); using (var consumer = new Consumer(new ConsumerOptions(IntegrationConfig.IntegrationTopic, router), startOffsets)) { for (int i = 0; i < 20; i++) { producer.SendMessageAsync(IntegrationConfig.IntegrationTopic, new[] { new Message(i.ToString(), "1") }).Wait(); } var results = consumer.Consume().Take(20).ToList(); //ensure the produced messages arrived for (int i = 0; i < 20; i++) { Assert.That(results[i].Value.ToUtf8String(), Is.EqualTo(i.ToString())); } //the current offsets should be 20 positions higher than start var currentOffsets = consumer.GetOffsetPosition(); Assert.That(currentOffsets.Sum(x => x.Offset) - startOffsets.Sum(x => x.Offset), Is.EqualTo(20)); } } }
private void enviaGraylog(int _c1, int _c2, int _c3, int _c4) { string s = @"{""version"":""1.1"",""host"":""ArduinoUS83a"",""short_message"":""sensor de presenca 83a""," + @"""_corredor1"":" + _c1.ToString() + "," + @"""_corredor2"":" + _c2.ToString() + "," + @"""_corredor3"":" + _c3.ToString() + "," + @"""_corredor4"":" + _c4.ToString() + "}"; client.SendMessageAsync("US83a", new[] { new KafkaNet.Protocol.Message(s) }); }
private void enviaGraylog(float vazios, float ocupados) { string s = @"{""version"":""1.1"",""host"":""ArduinoUS93"",""short_message"":""sensor de presenca""," + @"""_vazios"":" + vazios.ToString() + @",""_ocupados"":" + ocupados.ToString() + "}"; client.SendMessageAsync("US93", new[] { new KafkaNet.Protocol.Message(s) }); }
public async Task <IEnumerable <ProduceResponse> > SendMessage(string payload) { return(await producer.SendMessageAsync(topic, new List <Message> { new Message(payload) })); }
private void button9_Click(object sender, EventArgs e) { textBox4.AppendText(DateTime.Now.ToString("h:mm:ss ") + " ENVIANDO TESTE" + Environment.NewLine); client.SendMessageAsync("Teste", new[] { new KafkaNet.Protocol.Message("TESTE") }); }