public static IBrokerRouter CreateConnection(int threadId) { string uri = "http://" + ConfigurationManager.AppSettings["MessagingQueueHostAddress"] + ":" + ConfigurationManager.AppSettings["KafkaPort"]; lock (SyncObj) { var kafkaOptions = new KafkaOptions(new Uri(uri)); var brokerRouter = new BrokerRouter(kafkaOptions); _brokerRouter = brokerRouter; if (ConnectionsDict.ContainsKey(threadId)) { var connection = ConnectionsDict.Where(x => x.Key == threadId).Select(c => c.Value.BrokerRouter).Single(); return(connection); } ConnectionsDict.Add(threadId, new KafkaQueueConnection { KafkaOptions = new KafkaOptions(), BrokerRouter = _brokerRouter }); return(_brokerRouter); } }
/// <summary> /// Returns an existing Consumer with the specified topic and cluster, if Consumer does not exist - creates a new one. /// </summary> /// <param name="topic">Topic to consume</param> /// <param name="cluster">Cluster to work with</param> public Consumer GetConsumer(string topic, string cluster = null) { if (string.IsNullOrEmpty(topic)) { throw new ArgumentNullException(nameof(topic)); } var clusterInfo = GetClusterInfo(cluster); if (this.consumers.ContainsKey(topic)) { return(this.consumers[topic]); } var consumerRouter = new BrokerRouter(new KafkaOptions(clusterInfo.Value.ToArray())); var consumer = new Consumer(new ConsumerOptions(topic, consumerRouter)); var offsets = consumer.GetTopicOffsetAsync(topic).GetAwaiter().GetResult(); consumer.SetOffsetPosition( offsets.Where(x => x.Offsets.Any()) .Select(x => new OffsetPosition(x.PartitionId, x.Offsets.First())) .ToArray()); this.consumers.Add(topic, consumer); return(consumer); }
public void ConsumerShouldNotLoseMessageWhenBlocked() { var testId = Guid.NewGuid().ToString(); using (var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri))) using (var producer = new Producer(router)) { var offsets = producer.GetTopicOffsetAsync(IntegrationConfig.IntegrationTopic).Result; //create consumer with buffer size of 1 (should block upstream) using (var consumer = new BlockingConsumer(new ConsumerOptions(IntegrationConfig.IntegrationTopic, router) { ConsumerBufferSize = 1 }, offsets.Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray())) { for (int i = 0; i < 20; i++) { producer.SendMessageAsync(IntegrationConfig.IntegrationTopic, new[] { new Message(i.ToString(), testId) }).Wait(); } for (int i = 0; i < 20; i++) { var result = consumer.Consume().Take(1).First(); Assert.That(result.Key.ToUtf8String(), Is.EqualTo(testId)); Assert.That(result.Value.ToUtf8String(), Is.EqualTo(i.ToString())); } } } }
static async Task MainAync(string[] args) { Console.Write("Press <return> to start"); Console.ReadLine(); var fn = @"..\..\..\..\..\VodafoneTestData\data\proto\8080415317.protobuf"; var router = new BrokerRouter( kafkaOptions: new KafkaOptions( kafkaServerUri: new Uri($"http://{Kafka.Contracts.Endpoint.KafkaHost}:9092"))); Console.WriteLine($"Using {Kafka.Contracts.Endpoint.KafkaHost}"); var packets = ReadProtobufFile(fn); using (var client = new Producer(router)) { if (firingMechanism == FiringMechanism.ConcreteTimes) { await SendMessagesBasedOnTime(client, packets); } else { await SendMessagesOnKeyPress(client, packets); } } }
public PagamentoBackground(ILogger <PagamentoBackground> logger) { _kafkaOptions = new KafkaOptions(new Uri("http://localhost:9092")); _brokerRouter = new BrokerRouter(_kafkaOptions); _consumer = new Consumer(new ConsumerOptions("pedido-response", _brokerRouter)); _logger = logger; }
/// <summary> /// serves Get Resource Methods /// returns single item if the id is mentioned /// otherwise returns list of resource items /// </summary> /// <param name="msg"></param> /// <param name="reqTopic"></param> /// <param name="uri"></param> private static void ServeGetResource(string msg, string reqTopic, Uri uri) { string idStr = msg.Substring((reqTopic + ServiceTopics.GetResource).Length); long? id; if (idStr != null && idStr.Length > 0) { id = Convert.ToInt64(idStr); } else { id = null; } List <Models.ResourceWithValue> resourceList = ResourceRepository.GetResourceInfo(id); string response = JsonConvert.SerializeObject(resourceList); string responseTopic = ServiceTopics.ResponseTopic + ServiceTopics.GetResource; Message responseMsg = new Message(response); var responseOptions = new KafkaOptions(uri); var responseRouter = new BrokerRouter(responseOptions); var producer = new Producer(responseRouter); producer.SendMessageAsync(responseTopic, new List <Message> { responseMsg }).Wait(); }
public Producer() { var options = new KafkaOptions(new Uri("http://localhost:9092")); var router = new BrokerRouter(options); client = new KafkaNet.Producer(router); }
static void Main(string[] args) { Console.WriteLine("Enter the Topic"); string topic = Console.ReadLine(); Uri uri = new Uri("http://localhost:9092"); var options = new KafkaOptions(uri); var router = new BrokerRouter(options); var client = new Producer(router); while (true) { Console.WriteLine("Enter message:"); string payload = Console.ReadLine(); if (payload == "exit") { Environment.Exit(0); } Message msg = new Message(payload); client.SendMessageAsync(topic, new List <Message> { msg }).Wait(); } // Console.ReadLine(); }
static void Main() { try { Console.ForegroundColor = ConsoleColor.Red; Console.WriteLine(Topic); Console.ForegroundColor = ConsoleColor.White; var options = new KafkaOptions(new Uri("http://localhost:9092")) { Log = new ConsoleLog() }; var brokerRouter = new BrokerRouter(options); _brokerRoute = brokerRouter.SelectBrokerRoute(Topic); List <Topic> topics = brokerRouter.GetTopicMetadata(Topic); OffsetPosition[] offsetPositions = OffsetFetchRequest(topics.First().Partitions.Select(p => new OffsetFetch { Topic = Topic, PartitionId = p.PartitionId })).Select(o => new OffsetPosition { Offset = o.Offset + 1, PartitionId = o.PartitionId }).ToArray(); StartConsumer(offsetPositions); } catch (Exception ex) { Console.WriteLine(ex); } finally { ConsoleLog.WaitOnKeys(); } }
public async Task FetchMessagesCacheContainsAllRequestTest() { // Creating a broker router and a protocol gateway for the producer and consumer var brokerRouter = new BrokerRouter(_options); var producer = new Producer(brokerRouter); var topic = TestConfig.TopicName(); var consumer = new Consumer(brokerRouter, _consumerConfig); var offset = await brokerRouter.GetTopicOffsetAsync(topic, _partitionId, CancellationToken.None); // Creating 5 messages var messages = CreateTestMessages(10, 1); await producer.SendMessagesAsync(messages, topic, _partitionId, new SendMessageConfiguration(ackTimeout : TimeSpan.FromSeconds(3)), CancellationToken.None); // Now let's consume var result = (await consumer.FetchMessagesAsync(offset, 5, CancellationToken.None)).ToList(); CheckMessages(messages.Take(5).ToList(), result); // Now let's consume again result = (await consumer.FetchMessagesAsync(offset.TopicName, offset.PartitionId, offset.Offset + 5, 5, CancellationToken.None)).ToList(); CheckMessages(messages.Skip(5).ToList(), result); }
static void Main(string[] args) { string topic = "TripTopic"; try { // Reads CSV file located in a path and writes to kafka topic using (StreamReader r = new StreamReader(@"C:\Latestbo\essls\EmplogJsons\KAFKATest\PartData.csv")) { string json = r.ReadToEnd(); Message msg = new Message(json); // Kafka ip Uri uri = new Uri("http://14.142.119.130:9092"); var options = new KafkaOptions(uri); var router = new BrokerRouter(options); var client = new Producer(router); client.SendMessageAsync(topic, new List <Message> { msg }).Wait(); } } catch (Exception e) { Console.Write(e.Message); } // Wait for user key press... Console.ReadLine(); }
public async Task UpdateOrCreateOffsetConsumerGroupExistsTest() { // Creating a broker router and a protocol gateway for the producer and consumer var brokerRouter = new BrokerRouter(_kafkaUri, new ConnectionFactory(), _config); var partitionId = 0; var consumerGroup = TestConfig.ConsumerName(); var topicName = TestConfig.TopicName(); var offest = 5; var newOffset = 10; await brokerRouter.GetTopicOffsetAsync(topicName, partitionId, CancellationToken.None); await brokerRouter.CommitTopicOffsetAsync(topicName, partitionId, consumerGroup, offest, CancellationToken.None); var res = await brokerRouter.GetTopicOffsetAsync(topicName, partitionId, consumerGroup, CancellationToken.None); Assert.AreEqual(offest, res.Offset); await brokerRouter.CommitTopicOffsetAsync(topicName, partitionId, consumerGroup, newOffset, CancellationToken.None); res = await brokerRouter.GetTopicOffsetAsync(topicName, partitionId, consumerGroup, CancellationToken.None); Assert.AreEqual(newOffset, res.Offset); }
public async Task FetchMessagesBufferUnderRunTest() { // Creating a broker router and a protocol gateway for the producer and consumer var brokerRouter = new BrokerRouter(_options); var smallMessageSet = 4096 / 2; var producer = new Producer(brokerRouter); var topic = TestConfig.TopicName(); var consumer = new Consumer(brokerRouter, new ConsumerConfiguration(maxPartitionFetchBytes: smallMessageSet)); var offset = await brokerRouter.GetTopicOffsetAsync(topic, _partitionId, CancellationToken.None); // Creating 5 messages var messages = CreateTestMessages(10, 4096); await producer.SendMessagesAsync(messages, topic, _partitionId, new SendMessageConfiguration(ackTimeout : TimeSpan.FromSeconds(3)), CancellationToken.None); try { // Now let's consume await consumer.FetchMessagesAsync(offset, 5, CancellationToken.None); Assert.Fail("should have thrown BufferUnderRunException"); } catch (BufferUnderRunException ex) { Console.WriteLine(ex.ToString()); } }
public IHttpActionResult GetFromKafka() { string topic = "ForecastCollaboration-SubmittedCommit"; //string message = String.Empty; Uri uri = new Uri(@"http://172.20.188.11:9092"); var options = new KafkaOptions(uri); BrokerRouter brokerRouter = new BrokerRouter(options); Consumer kafkaConsumer = new Consumer(new ConsumerOptions(topic, brokerRouter)); ArrayList al = new ArrayList(); int count = 0; //Consume returns a blocking IEnumerable (ie: never ending stream) int maxMsg = 3; foreach (var message1 in kafkaConsumer.Consume()) { if (message1 == null || count == maxMsg) { break; } /*Console.WriteLine("Response: P{0},O{1} : {2}", * message.Meta.PartitionId, message.Meta.Offset, * Encoding.UTF8.GetString(message.Value));*/ al.Add(Encoding.UTF8.GetString(message1.Value)); count++; } //Console.WriteLine("Total No Msg Count Size {0}", al.Count); return(Ok(al)); }
private static BrokerRouter InitDefaultConfig() { var options = new KafkaOptions(ConfigurationManager.AppSettings["BrokerList"].Split(',').Select(item => new Uri(item)).ToArray()); var router = new BrokerRouter(options); return(router); }
public virtual async Task UpsertDocument <TDocument>(IUpsertDocumentContext <TDocument> context) where TDocument : class { var topicName = context.Document.GetType().Name; var config = this._producerConfigManager.GetConfiguration(x => (x.ConfigurationScope & ConfigurationScope.Producer) == ConfigurationScope.Producer); var options = new KafkaOptions (new Uri("http://localhost:9092")); var router = new BrokerRouter(options); var client = new KafkaNet.Producer(router); client.SendMessageAsync(topicName, new[] { new Message("!!!Test message sent from alternative .Net => kafka provider!!!") }, -1, TimeSpan.FromSeconds(5)) .Wait(); //var valueSerialiser = new BinarySerializer<TDocument>(); // var keySerialiser = new BinarySerializer<Guid>(); //var deliveryHandler = new DeliveryHandler<Guid, TDocument>(); //using (var producer = new Producer<Guid, TDocument>(config, keySerialiser, valueSerialiser)) //{ // var deliveryReport = await producer.ProduceAsync(topicName, context.Id, context.Document); // //producer.ProduceAsync(topicName, null, context.Document, deliveryHandler); // //producer.Flush(); //} }
static void Main(string[] args) { try { var options = new KafkaOptions(new Uri("http://localhost:9092")) { Log = new ConsoleLog() }; var brokerRouter = new BrokerRouter(options); _brokerRoute = brokerRouter.SelectBrokerRoute(Topic); StartConsumer(ConsoleColor.Blue, OffsetFetchRequest().Offset + 1); StartConsumer(ConsoleColor.Green, OffsetFetchRequest().Offset + 1); StartConsumer(ConsoleColor.Red, OffsetFetchRequest().Offset + 1); StartConsumer(ConsoleColor.Yellow, OffsetFetchRequest().Offset + 1); } catch (Exception ex) { Console.WriteLine(ex); } finally { ConsoleLog.WaitOnKeys(); } }
/// <summary> /// Reads latest time-series metadata from specified Kafka <paramref name="topic"/>. /// </summary> /// <param name="router">Kafka router connection.</param> /// <param name="topic">Kafka topic.</param> /// <param name="serializationTime">Serialization time.</param> /// <param name="statusMessage">Status message function.</param> /// <returns>Latest <see cref="TimeSeriesMetadata"/> instance read from Kafka.</returns> public static TimeSeriesMetadata ReadFromKafka(BrokerRouter router, string topic, Action <string> statusMessage, out Ticks serializationTime) { if ((object)router == null) { throw new ArgumentNullException(nameof(router)); } if (string.IsNullOrWhiteSpace(topic)) { throw new ArgumentNullException(nameof(topic)); } Message sizeMessage = ReadMessage(router, topic, SizeMessage); serializationTime = BitConverter.ToInt64(sizeMessage.Key, 0); Message valueMessage = ReadMessage(router, topic, ValueMessage, (int)Math.Ceiling(BitConverter.ToInt64(sizeMessage.Value, 0) / 4096.0D) * 4096); if (serializationTime != BitConverter.ToInt64(valueMessage.Key, 0)) { statusMessage?.Invoke("WARNING: Timestamp keys for metadata size and value records are mismatched..."); } using (MemoryStream stream = new MemoryStream(valueMessage.Value)) return(Deserialize(stream)); }
public async Task OffsetCommitShouldStoreAndReturnSuccess() { const int partitionId = 0; var router = new BrokerRouter(_options); await router.GetTopicMetadataAsync(TestConfig.TopicName(), CancellationToken.None); var conn = router.GetBrokerRoute(TestConfig.TopicName(), partitionId); // ensure the group exists var group = new GroupCoordinatorRequest(TestConfig.ConsumerName()); var groupResponse = await conn.Connection.SendAsync(group, CancellationToken.None); Assert.That(groupResponse, Is.Not.Null); Assert.That(groupResponse.ErrorCode, Is.EqualTo(ErrorResponseCode.None)); var commit = new OffsetCommitRequest(group.GroupId, new [] { new OffsetCommitRequest.Topic(TestConfig.TopicName(), partitionId, 10, null) }); var response = await conn.Connection.SendAsync(commit, CancellationToken.None); var topic = response.Topics.FirstOrDefault(); Assert.That(topic, Is.Not.Null); Assert.That(topic.ErrorCode, Is.EqualTo(ErrorResponseCode.None)); router.Dispose(); }
public async Task FetchMessagesCacheContainsNoneOfRequestTest() { // Creating a broker router and a protocol gateway for the producer and consumer var brokerRouter = new BrokerRouter(_options); var protocolGateway = new ProtocolGateway(_kafkaUri); Producer producer = new Producer(brokerRouter); ManualConsumer consumer = new ManualConsumer(_partitionId, _topic, protocolGateway, "TestClient", DefaultMaxMessageSetSize); var offset = await consumer.FetchLastOffset(); // Creating 5 messages List <Message> messages = CreateTestMessages(10, 4096); await producer.SendMessageAsync(_topic, messages, partition : _partitionId, timeout : TimeSpan.FromSeconds(3)); // Now let's consume var result = (await consumer.FetchMessages(7, offset)).ToList(); CheckMessages(messages.Take(7).ToList(), result); // Now let's consume again result = (await consumer.FetchMessages(2, offset + 8)).ToList(); CheckMessages(messages.Skip(8).ToList(), result); }
public void ConsumerShouldMoveToNextAvailableOffsetWhenQueryingForNextMessage() { using (var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri))) using (var producer = new Producer(router)) { var offsets = producer.GetTopicOffsetAsync(IntegrationConfig.IntegrationTopic).Result; Assert.That(offsets.Count, Is.EqualTo(2), "This test requires there to be exactly two paritions."); Assert.That(offsets.Count(x => x.Offsets.Max(o => o) > 1000), Is.EqualTo(2), "Need more than 1000 messages in each topic for this test to work."); //set offset 1000 messages back on one partition. We should be able to get all 1000 messages over multiple calls. using (var consumer = new Consumer(new ConsumerOptions(IntegrationConfig.IntegrationTopic, router), offsets.Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max() - 1000)).ToArray())) { var data = new List <Message>(); var stream = consumer.Consume(); var takeTask = Task.Factory.StartNew(() => data.AddRange(stream.Take(2000))); takeTask.Wait(TimeSpan.FromSeconds(10)); var consumerOffset = consumer.GetOffsetPosition().OrderBy(x => x.Offset).ToList(); var serverOffset = offsets.Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).OrderBy(x => x.Offset).ToList(); Assert.That(consumerOffset, Is.EqualTo(serverOffset), "The consumerOffset position should match the server offset position."); Assert.That(data.Count, Is.EqualTo(2000), "We should have received 2000 messages from the server."); } } }
public async Task ConsumeByOffsetShouldGetSameMessageProducedAtSameOffset() { long offsetResponse; Guid messge = Guid.NewGuid(); using (var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri) { Log = IntegrationConfig.NoDebugLog })) using (var producer = new Producer(router)) { ProduceResponse responseAckLevel1 = await producer.SendMessageAsync(new Message(messge.ToString()), IntegrationConfig.IntegrationTopic, acks : 1, partition : 0); offsetResponse = responseAckLevel1.Offset; } using (var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri) { Log = IntegrationConfig.NoDebugLog })) using (var consumer = new Consumer(new ConsumerOptions(IntegrationConfig.IntegrationTopic, router) { MaxWaitTimeForMinimumBytes = TimeSpan.Zero }, new OffsetPosition[] { new OffsetPosition(0, offsetResponse) })) { var result = consumer.Consume().Take(1).ToList().FirstOrDefault(); Assert.AreEqual(messge.ToString(), result.Value.ToUtf8String()); } }
public void OffsetCommitShouldStoreMetadata() { const int partitionId = 0; const long offset = 101; const string metadata = "metadata"; using (var router = new BrokerRouter(Options)) { var conn = router.SelectBrokerRoute(IntegrationConfig.IntegrationTopic, partitionId); var commit = CreateOffsetCommitRequest(IntegrationConfig.IntegrationConsumer, partitionId, offset, metadata); var commitResponse = conn.Connection.SendAsync(commit).Result.FirstOrDefault(); Assert.That(commitResponse, Is.Not.Null); Assert.That(commitResponse.Error, Is.EqualTo((int)ErrorResponseCode.NoError)); var fetch = CreateOffsetFetchRequest(IntegrationConfig.IntegrationConsumer, partitionId); var fetchResponse = conn.Connection.SendAsync(fetch).Result.FirstOrDefault(); Assert.That(fetchResponse, Is.Not.Null); Assert.That(fetchResponse.Error, Is.EqualTo((int)ErrorResponseCode.NoError)); Assert.That(fetchResponse.Offset, Is.EqualTo(offset)); Assert.That(fetchResponse.MetaData, Is.EqualTo(metadata)); } }
public async Task SendAsyncShouldHandleHighVolumeOfMessages(int amount, int maxAsync) { using (var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri))) using (var producer = new Producer(router, maxAsync) { BatchSize = amount / 2 }) { var tasks = new Task <ProduceResponse[]> [amount]; for (var i = 0; i < amount; i++) { tasks[i] = producer.SendMessageAsync(IntegrationConfig.IntegrationTopic, new[] { new Message(Guid.NewGuid().ToString()) }); } var results = await Task.WhenAll(tasks.ToArray()); //Because of how responses are batched up and sent to servers, we will usually get multiple responses per requested message batch //So this assertion will never pass //Assert.That(results.Count, Is.EqualTo(amount)); Assert.That(results.Any(x => x.Any(y => y.Error != 0)), Is.False, "Should not have received any results as failures."); } }
public void EnsureGzipCanDecompressMessageFromKafka() { var router = new BrokerRouter(_options); var producer = new Producer(router); var offsets = producer.GetTopicOffsetAsync(IntegrationConfig.IntegrationCompressionTopic).Result; var consumer = new Consumer(new ConsumerOptions(IntegrationConfig.IntegrationCompressionTopic, router) { PartitionWhitelist = new List <int>() { 0 } }, offsets.Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray()); int numberOfmessage = 3; for (int i = 0; i < numberOfmessage; i++) { producer.SendMessageAsync(IntegrationConfig.IntegrationCompressionTopic, new[] { new Message(i.ToString()) }, codec: MessageCodec.CodecGzip, partition: 0); } var results = consumer.Consume(new CancellationTokenSource(TimeSpan.FromMinutes(1)).Token).Take(numberOfmessage).ToList(); for (int i = 0; i < numberOfmessage; i++) { Assert.That(results[i].Value.ToUtf8String(), Is.EqualTo(i.ToString())); } using (producer) using (consumer) { } }
static void Main(string[] args) { var options = new KafkaOptions(new Uri("http://CSDKAFKA01:9092"), new Uri("http://CSDKAFKA02:9092")) { Log = new ConsoleLog() }; var router = new BrokerRouter(options); var client = new Producer(router); Task.Factory.StartNew(() => { var consumer = new Consumer(new ConsumerOptions("TestHarness", router)); foreach (var data in consumer.Consume()) { Console.WriteLine("Response: P{0},O{1} : {2}", data.Meta.PartitionId, data.Meta.Offset, data.Value); } }); Console.WriteLine("Type a message and press enter..."); while (true) { var message = Console.ReadLine(); if (message == "quit") { break; } client.SendMessageAsync("TestHarness", new[] { new Message(message) }); } using (client) using (router) { } }
public void ConsumerShouldBeAbleToGetCurrentOffsetInformation() { using (var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri))) using (var producer = new Producer(router)) { var startOffsets = producer.GetTopicOffsetAsync(IntegrationConfig.IntegrationTopic).Result .Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray(); using (var consumer = new BlockingConsumer(new ConsumerOptions(IntegrationConfig.IntegrationTopic, router), startOffsets)) { for (int i = 0; i < 20; i++) { producer.SendMessageAsync(IntegrationConfig.IntegrationTopic, new[] { new Message(i.ToString(), "1") }).Wait(); } var results = consumer.Consume().Take(20).ToList(); //ensure the produced messages arrived for (int i = 0; i < 20; i++) { Assert.That(results[i].Value.ToUtf8String(), Is.EqualTo(i.ToString())); } //the current offsets should be 20 positions higher than start var currentOffsets = consumer.GetOffsetPosition(); Assert.That(currentOffsets.Sum(x => x.Offset) - startOffsets.Sum(x => x.Offset), Is.EqualTo(20)); } } }
public async Task OffsetCommitShouldStoreOffsetValue() { const int partitionId = 0; const long offset = 99; var router = new BrokerRouter(Options); await router.RefreshMissingTopicMetadata(IntegrationConfig.IntegrationTopic); var conn = router.SelectBrokerRouteFromLocalCache(IntegrationConfig.IntegrationTopic, partitionId); var commit = CreateOffsetCommitRequest(IntegrationConfig.IntegrationConsumer, partitionId, offset); var commitResponse = (await conn.Connection.SendAsync(commit)).FirstOrDefault(); Assert.That(commitResponse, Is.Not.Null); Assert.That(commitResponse.Error, Is.EqualTo((int)ErrorResponseCode.NoError)); var fetch = CreateOffsetFetchRequest(IntegrationConfig.IntegrationConsumer, partitionId); var fetchResponse = (await conn.Connection.SendAsync(fetch)).FirstOrDefault(); Assert.That(fetchResponse, Is.Not.Null); Assert.That(fetchResponse.Error, Is.EqualTo((int)ErrorResponseCode.NoError)); Assert.That(fetchResponse.Offset, Is.EqualTo(offset)); router.Dispose(); }
public void ConsumerShouldConsumeInSameOrderAsProduced() { var expected = new List <string> { "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19" }; var testId = Guid.NewGuid().ToString(); using (var router = new BrokerRouter(new KafkaOptions(IntegrationConfig.IntegrationUri))) using (var producer = new Producer(router)) { var offsets = producer.GetTopicOffsetAsync(IntegrationConfig.IntegrationTopic).Result; using (var consumer = new BlockingConsumer(new ConsumerOptions(IntegrationConfig.IntegrationTopic, router), offsets.Select(x => new OffsetPosition(x.PartitionId, x.Offsets.Max())).ToArray())) { for (int i = 0; i < 20; i++) { producer.SendMessageAsync(IntegrationConfig.IntegrationTopic, new[] { new Message(i.ToString(), testId) }).Wait(); } var results = consumer.Consume().Take(20).ToList(); //ensure the produced messages arrived Console.WriteLine("Message order: {0}", string.Join(", ", results.Select(x => x.Value.ToUtf8String()).ToList())); Assert.That(results.Count, Is.EqualTo(20)); Assert.That(results.Select(x => x.Value.ToUtf8String()).ToList(), Is.EqualTo(expected), "Expected the message list in the correct order."); Assert.That(results.Any(x => x.Key.ToUtf8String() != testId), Is.False); } } }
public void OffsetFetchRequestOfNonExistingGroupShouldReturnNoError([Values(0, 1)] int version) { //From documentation: https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-OffsetFetchRequest //Note that if there is no offset associated with a topic-partition under that consumer group the broker does not set an error code //(since it is not really an error), but returns empty metadata and sets the offset field to -1. const int partitionId = 0; using (var router = new BrokerRouter(Options)) { var request = CreateOffsetFetchRequest(version, Guid.NewGuid().ToString(), partitionId); var conn = router.SelectBrokerRoute(IntegrationConfig.IntegrationTopic, partitionId); var response = conn.Connection.SendAsync(request).Result.FirstOrDefault(); Assert.That(response, Is.Not.Null); if (version == 0) { // Version 0 (storing in zookeeper) results in unknown topic or partition as the consumer group // and partition are used to make up the string, and when it is missing it results in an error Assert.That(response.Error, Is.EqualTo((int)ErrorResponseCode.UnknownTopicOrPartition)); } else { Assert.That(response.Error, Is.EqualTo((int)ErrorResponseCode.NoError)); } Assert.That(response.Offset, Is.EqualTo(-1)); } }