private async Task<MetadataResponse> Get(IKafkaConnection[] connections, MetadataRequest request) { var maxRetryAttempt = 2; var performRetry = false; var retryAttempt = 0; MetadataResponse metadataResponse = null; do { performRetry = false; metadataResponse = await GetMetadataResponse(connections, request); if (metadataResponse == null) return null; foreach (var validation in ValidateResponse(metadataResponse)) { switch (validation.Status) { case ValidationResult.Retry: performRetry = true; _log.WarnFormat(validation.Message); break; case ValidationResult.Error: throw validation.Exception; } } await BackoffOnRetry(++retryAttempt, performRetry).ConfigureAwait(false); } while (retryAttempt < maxRetryAttempt && _interrupted == false && performRetry); return metadataResponse; }
public async Task <MetadataResponse> Metadata(MetadataRequest request) { var response = new MetadataResponse(); foreach (var project in _workspace.CurrentSolution.Projects) { var compilation = await project.GetCompilationAsync(); var symbol = compilation.GetTypeByMetadataName(request.TypeName); if (symbol != null && symbol.ContainingAssembly.Name == request.AssemblyName) { var cancellationSource = new CancellationTokenSource(TimeSpan.FromMilliseconds(request.Timeout)); var document = await MetadataHelper.GetDocumentFromMetadata(project, symbol, cancellationSource.Token); if (document != null) { var source = await document.GetTextAsync(); response.SourceName = MetadataHelper.GetFilePathForSymbol(project, symbol); response.Source = source.ToString(); return(response); } } } return(response); }
/// <summary> /// Given a collection of server connections, query for the topic metadata. /// </summary> /// <param name="connections">The server connections to query. Will cycle through the collection, starting at zero until a response is received.</param> /// <param name="topics">The collection of topics to get metadata for.</param> /// <returns>MetadataResponse validated to be complete.</returns> public MetadataResponse Get(IKafkaConnection[] connections, IEnumerable<string> topics) { var request = new MetadataRequest { Topics = topics.ToList() }; if (request.Topics.Count <= 0) return null; var performRetry = false; var retryAttempt = 0; MetadataResponse metadataResponse = null; do { performRetry = false; metadataResponse = GetMetadataResponse(connections, request); if (metadataResponse == null) return null; foreach (var validation in ValidateResponse(metadataResponse)) { switch (validation.Status) { case ValidationResult.Retry: performRetry = true; _log.WarnFormat(validation.Message); break; case ValidationResult.Error: throw validation.Exception; } } BackoffOnRetry(++retryAttempt, performRetry); } while (_interrupted == false && performRetry); return metadataResponse; }
private MetadataResponse CycleConnectionsForTopicMetadataAsync(IEnumerable <IKafkaConnection> connections, IEnumerable <string> topics) { var request = new MetadataRequest { Topics = topics.ToList() }; //try each default broker until we find one that is available foreach (var conn in connections) { try { var response = conn.SendAsync(request).Result; if (response != null && response.Count > 0) { var metadataResponse = response.First(); UpdateInternalMetadataCache(metadataResponse); return(metadataResponse); } } catch (Exception ex) { _kafkaOptions.Log.WarnFormat("Failed to contact Kafka server={0}. Trying next default server. Exception={1}", conn.KafkaUri, ex); } } throw new ServerUnreachableException( string.Format( "Unable to query for metadata from any of the default Kafka servers. At least one provided server must be available. Server list: {0}", string.Join(", ", _kafkaOptions.KafkaServerUri.Select(x => x.ToString())))); }
public async Task ReturnsDefinitionInMetadata_FromMetadata_WhenSymbolIsType(string filename) { var testFile = new TestFile(filename, @" using System; class Bar { public void Baz() { var number = in$$t.MaxValue; } }"); using (var host = CreateOmniSharpHost(testFile)) { var point = testFile.Content.GetPointFromPosition(); // 1. start by asking for definition of "int" var gotoDefinitionRequest = CreateRequest(testFile.FileName, point.Line, point.Offset, wantMetadata: true, timeout: 60000); var gotoDefinitionRequestHandler = GetRequestHandler(host); var gotoDefinitionResponse = await gotoDefinitionRequestHandler.Handle(gotoDefinitionRequest); var gotoDefinitionResponseMetadataSource = GetMetadataSource(gotoDefinitionResponse); // 2. now, based on the response information // go to the metadata endpoint, and ask for "int" specific metadata var metadataRequest = new MetadataRequest { AssemblyName = gotoDefinitionResponseMetadataSource.AssemblyName, TypeName = gotoDefinitionResponseMetadataSource.TypeName, ProjectName = gotoDefinitionResponseMetadataSource.ProjectName, Language = gotoDefinitionResponseMetadataSource.Language }; var metadataRequestHandler = host.GetRequestHandler <MetadataService>(OmniSharpEndpoints.Metadata); var metadataResponse = await metadataRequestHandler.Handle(metadataRequest); // 3. the metadata response contains SourceName (metadata "file") and SourceText (syntax tree) // use the source to locate "IComparable" which is an interface implemented by Int32 struct var metadataTree = CSharpSyntaxTree.ParseText(metadataResponse.Source); var iComparable = metadataTree.GetCompilationUnitRoot(). DescendantNodesAndSelf(). OfType <BaseTypeDeclarationSyntax>().First(). BaseList.Types.FirstOrDefault(x => x.Type.ToString() == "IComparable"); var relevantLineSpan = iComparable.GetLocation().GetLineSpan(); // 4. now ask for the definition of "IComparable" // pass in the SourceName (metadata "file") as FileName - since it's not a regular file in our workspace var metadataNavigationRequest = CreateRequest(metadataResponse.SourceName, relevantLineSpan.StartLinePosition.Line, relevantLineSpan.StartLinePosition.Character, wantMetadata: true); var metadataNavigationResponse = await gotoDefinitionRequestHandler.Handle(metadataNavigationRequest); var metadataNavigationResponseMetadataSource = GetMetadataSource(metadataNavigationResponse); var info = GetInfo(metadataNavigationResponse); // 5. validate the response to be matching the expected IComparable meta info Assert.NotNull(metadataNavigationResponseMetadataSource); Assert.Equal(AssemblyHelpers.CorLibName, metadataNavigationResponseMetadataSource.AssemblyName); Assert.Equal("System.IComparable", metadataNavigationResponseMetadataSource.TypeName); Assert.NotEqual(0, info.Single().Line); Assert.NotEqual(0, info.Single().Column); } }
public void MetadataResponseShouldDecode() { var request = new MetadataRequest(); var response = request.Decode(MessageHelper.CreateMetadataResponse(1, "Test").Skip(4).ToArray()).First(); Assert.That(response.CorrelationId, Is.EqualTo(1)); Assert.That(response.Topics[0].Name, Is.EqualTo("Test")); }
public void MetadataResponseShouldDecode() { var request = new MetadataRequest(); var response = request.Decode(MessageHelper.CreateMetadataResponse(1, "Test").Skip(4).ToArray()).First(); Assert.That(response.CorrelationId, Is.EqualTo(1)); Assert.That(response.Topics[0].Name, Is.EqualTo("Test")); }
public HttpResponseMessage PostQuery(MetadataRequest mdreq) { if (mdreq == null) { return(Request.CreateErrorResponse(HttpStatusCode.BadRequest, "invalid request")); } return(GetByKey(mdreq.Kind, mdreq.Key)); }
public void MetadataRequest_can_create_proper_url() { var req = new MetadataRequest(); Assert.AreEqual("datasets/ClinicalViewMetadata.csv", req.UrlPath()); var req2 = new MetadataRequest("xml"); Assert.AreEqual("datasets/ClinicalViewMetadata", req2.UrlPath()); }
/// <summary> /// Given a collection of server connections, query for the topic metadata. /// </summary> /// <param name="connections">The server connections to query. Will cycle through the collection, starting at zero until a response is received.</param> /// <param name="topics">The collection of topics to get metadata for.</param> /// <returns>MetadataResponse validated to be complete.</returns> public Task <MetadataResponse> Get(IKafkaConnection[] connections, IEnumerable <string> topics) { var request = new MetadataRequest { Topics = topics.ToList() }; if (request.Topics.Count <= 0) { return(null); } return(Get(connections, request)); }
public static int SendRequest(PlayHavenBinding.RequestType type, string placement, bool showsOverlayImmediately) { IPlayHavenRequest request = null; switch (type) { case PlayHavenBinding.RequestType.Open: request = new OpenRequest(placement); // placement is actually customUDID request.OnSuccess += HandleOpenRequestOnSuccess; request.OnError += HandleOpenRequestOnError; break; case PlayHavenBinding.RequestType.Metadata: request = new MetadataRequest(placement); request.OnSuccess += HandleMetadataRequestOnSuccess; request.OnError += HandleMetadataRequestOnError; request.OnWillDisplay += HandleMetadataRequestOnWillDisplay; request.OnDidDisplay += HandleMetadataRequestOnDidDisplay; break; case PlayHavenBinding.RequestType.Content: request = new ContentRequest(placement); request.OnError += HandleContentRequestOnError; request.OnDismiss += HandleContentRequestOnDismiss; request.OnReward += HandleContentRequestOnReward; request.OnPurchasePresented += HandleRequestOnPurchasePresented; request.OnWillDisplay += HandleContentRequestOnWillDisplay; request.OnDidDisplay += HandleContentRequestOnDidDisplay; break; case PlayHavenBinding.RequestType.Preload: request = new ContentPreloadRequest(placement); request.OnError += HandleContentRequestOnError; request.OnSuccess += HandlePreloadRequestOnSuccess; break; case PlayHavenBinding.RequestType.CrossPromotionWidget: request = new ContentRequest("more_games"); request.OnError += HandleCrossPromotionWidgetRequestOnError; request.OnDismiss += HandleCrossPromotionWidgetRequestOnDismiss; request.OnWillDisplay += HandleCrossPromotionWidgetRequestOnWillDisplay; request.OnDidDisplay += HandleCrossPromotionWidgetRequestOnDidDisplay; break; } if (request != null) { request.Send(showsOverlayImmediately); return(request.HashCode); } return(0); }
public virtual async Task <Response <object> > NetworkListAsync(MetadataRequest body, CancellationToken cancellationToken = default) { using var scope = _clientDiagnostics.CreateScope("RosettaClient.NetworkList"); scope.Start(); try { return(await RestClient.NetworkListAsync(body, cancellationToken).ConfigureAwait(false)); } catch (Exception e) { scope.Failed(e); throw; } }
public async Task <IHttpActionResult> GetMetadata(MetadataRequest request) { MetadataResponse response = await request.GetResponse(); if (response.Metadata == null) { HttpResponseMessage respMess = new HttpResponseMessage(response.StatusCode) { ReasonPhrase = response.ErrorMessage }; return(ResponseMessage(respMess)); } return(Ok(response.Metadata)); }
public virtual Response <object> NetworkList(MetadataRequest body, CancellationToken cancellationToken = default) { using var scope = _clientDiagnostics.CreateScope("RosettaClient.NetworkList"); scope.Start(); try { return(RestClient.NetworkList(body, cancellationToken)); } catch (Exception e) { scope.Failed(e); throw; } }
public void MetadataRequest( [Values("test", "a really long name, with spaces and punctuation!")] string topic, [Values(0, 1, 10)] int topicsPerRequest) { var topics = new List <string>(); for (var t = 0; t < topicsPerRequest; t++) { topics.Add(topic + t); } var request = new MetadataRequest(topics); request.AssertCanEncodeDecodeRequest(0); }
public void MetadataRequest( [Values("testTopic")] string topic, [Values(0, 1, 10)] int topicsPerRequest) { var topics = new List <string>(); for (var t = 0; t < topicsPerRequest; t++) { topics.Add(topic + t); } var request = new MetadataRequest(topics); request.AssertCanEncodeDecodeRequest(0); }
public async Task GetMetadataAsync() { var metadataRequest = new MetadataRequest(apiKey, signingKey) { Place = "Washington, DC" }; var metadata = await cache .LoadAsync(metadataDecoder, metadataRequest) .ConfigureAwait(false); Assert.AreEqual(HttpStatusCode.OK, metadata.Status); Assert.IsNotNull(metadata.Copyright); Assert.AreEqual("2016-07", metadata.Date?.ToString("yyyy-MM", CultureInfo.InvariantCulture)); Assert.IsNotNull(metadata.Location); Assert.IsNotNull(metadata.Pano_id); }
/// <summary> /// Given a collection of server connections, query for the topic metadata. /// </summary> /// <param name="brokerRouter">The router which provides the route and metadata.</param> /// <param name="topicNames">Topics to get metadata information for.</param> /// <param name="cancellationToken"></param> /// <remarks> /// Used by <see cref="BrokerRouter"/> internally. Broken out for better testability, but not intended to be used separately. /// </remarks> /// <returns>MetadataResponse validated to be complete.</returns> internal static async Task <MetadataResponse> GetMetadataAsync(this IBrokerRouter brokerRouter, IEnumerable <string> topicNames, CancellationToken cancellationToken) { var request = new MetadataRequest(topicNames); return(await brokerRouter.Configuration.RefreshRetry.AttemptAsync( async (attempt, timer) => { var response = await brokerRouter.GetMetadataAsync(request, cancellationToken).ConfigureAwait(false); if (response == null) { return new RetryAttempt <MetadataResponse>(null); } var results = response.Brokers .Select(ValidateBroker) .Union(response.Topics.Select(ValidateTopic)) .Where(r => !r.IsValid.GetValueOrDefault()) .ToList(); var exceptions = results.Select(r => r.ToException()).Where(e => e != null).ToList(); if (exceptions.Count == 1) { throw exceptions.Single(); } if (exceptions.Count > 1) { throw new AggregateException(exceptions); } if (results.Count == 0) { return new RetryAttempt <MetadataResponse>(response); } foreach (var result in results.Where(r => !string.IsNullOrEmpty(r.Message))) { brokerRouter.Log.Warn(() => LogEvent.Create(result.Message)); } return RetryAttempt <MetadataResponse> .Retry; }, (attempt, retry) => brokerRouter.Log.Warn(() => LogEvent.Create($"Failed metadata request on attempt {attempt}: Will retry in {retry}")), null, // return the failed response above, resulting in a null (ex, attempt, retry) => { throw ex.PrepareForRethrow(); }, (ex, attempt) => brokerRouter.Log.Warn(() => LogEvent.Create(ex, $"Failed metadata request on attempt {attempt}")), cancellationToken)); }
public async Task TestListingAllTopicsWorksOk() { using (var temporaryTopic = testCluster.CreateTemporaryTopic()) using (var connection = await KafkaConnectionFactory.CreateSimpleKafkaConnectionAsync(testCluster.CreateBrokerUris()[0])) { var request = new MetadataRequest { }; var response = await connection.SendRequestAsync(request, CancellationToken.None); Assert.That(response, Is.Not.Null); Assert.That(response.Brokers, Has.Length.EqualTo(1)); Assert.That(response.Topics, Has.Length.EqualTo(1)); Assert.That(response.Topics[0].Name, Is.EqualTo(temporaryTopic.Name)); Assert.That(response.Topics[0].ErrorCode, Is.EqualTo(ErrorResponseCode.NoError)); Assert.That(response.Topics[0].Partitions, Has.Length.EqualTo(1)); } }
private async Task TestMetadataAsync(string filename, string assemblyName, string typeName) { var testFile = new TestFile(filename, "class C {}"); SharedOmniSharpTestHost.AddFilesToWorkspace(testFile); var requestHandler = GetRequestHandler(SharedOmniSharpTestHost); var request = new MetadataRequest { AssemblyName = assemblyName, TypeName = typeName, Timeout = 60000 }; var response = await requestHandler.Handle(request); Assert.NotNull(response.Source); }
/// <summary> /// Given a collection of server connections, query for the topic metadata. /// </summary> /// <param name="connections">The server connections to query. Will cycle through the collection, starting at zero until a response is received.</param> /// <param name="topics">The collection of topics to get metadata for.</param> /// <returns>MetadataResponse validated to be complete.</returns> public async Task <MetadataResponse> Get(IKafkaConnection[] connections, IEnumerable <string> topics) { var request = new MetadataRequest { Topics = topics.ToList() }; if (request.Topics.Count <= 0) { return(null); } var maxRetryAttempt = 2; var performRetry = false; var retryAttempt = 0; MetadataResponse metadataResponse = null; do { performRetry = false; metadataResponse = await GetMetadataResponse(connections, request); if (metadataResponse == null) { return(null); } foreach (var validation in ValidateResponse(metadataResponse)) { switch (validation.Status) { case ValidationResult.Retry: performRetry = true; _log.WarnFormat(validation.Message); break; case ValidationResult.Error: throw validation.Exception; } } await BackoffOnRetry(++retryAttempt, performRetry).ConfigureAwait(false); } while (retryAttempt < maxRetryAttempt && _interrupted == false && performRetry); return(metadataResponse); }
private MetadataResponse GetMetadataResponse(IKafkaConnection[] connections, MetadataRequest request) { //try each default broker until we find one that is available foreach (var conn in connections) { if (!_interrupted) { try { //TODO remove blocking result here! var response = conn.SendAsync(request).Result; if (response != null && response.Count > 0) { return(response.FirstOrDefault()); } } catch (AggregateException ex) { if (ex.Flatten().InnerExceptions.Any(x => x is ObjectDisposedException)) { ex.Handle(x => true); _interrupted = true; } else { _log.WarnFormat("Failed to contact Kafka server={0}. Trying next default server. Exception={1}", conn.Endpoint, ex); } } catch (Exception ex) { _log.WarnFormat("Failed to contact Kafka server={0}. Trying next default server. Exception={1}", conn.Endpoint, ex); } } } if (!_interrupted) { throw new ServerUnreachableException( "Unable to query for metadata from any of the default Kafka servers. At least one provided server must be available. Server list: {0}", string.Join(", ", connections.Select(x => x.ToString()))); } return(null); }
public MetadataResponse Metadata(params String[] topics) { if (topics == null) { topics = new String[0]; } foreach (var topic in topics) { EnsureLegalTopicSpelling(topic); } var brokerUri = _knownBrokerDispatcher.FreeSelect(); var request = new MetadataRequest(topics); var response = (MetadataResponse)SubmitRequest(brokerUri, request); response.TopicMetadatas = response.TopicMetadatas .Where(x => !x.TopicName.Equals(__consumer_offsets, StringComparison.OrdinalIgnoreCase)).ToArray(); return(response); }
private async Task TestMetadataAsync(string filename, string assemblyName, string typeName) { var testFile = new TestFile(filename, "class C {}"); using (var host = CreateOmniSharpHost(testFile)) { var requestHandler = GetRequestHandler(host); var request = new MetadataRequest { AssemblyName = assemblyName, TypeName = typeName, Timeout = 60000 }; var response = await requestHandler.Handle(request); Assert.NotNull(response.Source); } }
private async Task <bool> TryToRefreshFromConnectionAsync(KafkaConnection connection, CancellationToken token) { var request = new MetadataRequest { Topics = topicToPartitions.Keys.ToList() }; try { var response = await connection.SendRequestAsync(request, token).ConfigureAwait(false); await RefreshBrokersAsync(response.Brokers, token).ConfigureAwait(false); RefreshTopics(response.Topics); return(true); } catch (Exception ex) { Log.Error(ex, "Error refreshing connection"); return(false); } }
/// <summary> /// Get meta data for a topic /// </summary> /// <param name="correlationId"></param>Id used by the client to identify this transaction. Returned in the response /// <param name="clientId"></param>Name to identify the client. Used in server logs /// <param name="topicName"></param> Name of the requested topic. If topic name is null metadata for all topics will be returned /// <returns></returns> public MetadataResponse Metadata(int correlationId, string clientId, String topicName) { MetadataRequest request = new MetadataRequest(correlationId, clientId, topicName); using (var connection = new KafkaConnection(server, port)) { connection.Write(request.GetRequestBytes().ToArray()); int dataLength = BitConverter.ToInt32(BitWorks.ReverseBytes(connection.Read(4)), 0); if (dataLength == 0) { return(null); } byte[] data = connection.Read(dataLength); MetadataResponse metadataResponse = new MetadataResponse(); metadataResponse.Parse(data, 0); return(metadataResponse); } }
private async Task <IImmutableList <MetadataResponse.Topic> > UpdateTopicMetadataFromServerAsync(IEnumerable <string> topicNames, bool ignoreCache, CancellationToken cancellationToken) { return(await _topicSemaphore.LockAsync( async() => { var cachedResults = new CachedResults <MetadataResponse.Topic>(misses: topicNames); if (!ignoreCache) { cachedResults = CachedResults <MetadataResponse.Topic> .ProduceResults(cachedResults.Misses, topicName => TryGetCachedTopic(topicName, Configuration.CacheExpiration)); if (cachedResults.Misses.Count == 0) { return cachedResults.Hits; } } MetadataRequest request; MetadataResponse response; if (ignoreCache && topicNames == null) { Log.Info(() => LogEvent.Create("Router refreshing metadata for all topics")); request = new MetadataRequest(); response = await this.GetMetadataAsync(request, cancellationToken).ConfigureAwait(false); } else { Log.Info(() => LogEvent.Create($"Router refreshing metadata for topics {string.Join(",", cachedResults.Misses)}")); request = new MetadataRequest(cachedResults.Misses); response = await this.GetMetadataAsync(request, cancellationToken).ConfigureAwait(false); } if (response != null) { await UpdateConnectionCacheAsync(response.brokers, cancellationToken); } UpdateTopicCache(response); // since the above may take some time to complete, it's necessary to hold on to the topics we found before // just in case they expired between when we searched for them and now. var result = cachedResults.Hits.AddNotNullRange(response?.topic_metadata); return result; }, cancellationToken).ConfigureAwait(false)); }
private MetadataResponse GetMetadataResponse(IKafkaConnection[] connections, MetadataRequest request) { //try each default broker until we find one that is available foreach (var conn in connections) { try { var response = conn.SendAsync(request).Result; if (response != null && response.Count > 0) { return(response.FirstOrDefault()); } } catch (Exception ex) { _log.WarnFormat("Failed to contact Kafka server={0}. Trying next default server. Exception={1}", conn.Endpoint, ex); } } throw new ServerUnreachableException( "Unable to query for metadata from any of the default Kafka servers. At least one provided server must be available. Server list: {0}", string.Join(", ", connections.Select(x => x.ToString()))); }
/// <summary> /// Given a collection of server connections, query for all topics metadata. /// </summary> /// <param name="connections">The server connections to query. Will cycle through the collection, starting at zero until a response is received.</param> /// <returns>MetadataResponse validated to be complete.</returns> public Task <MetadataResponse> Get(IKafkaConnection[] connections) { var request = new MetadataRequest(); return(Get(connections, request)); }
/// <summary> /// Given a collection of server connections, query for the topic metadata. /// </summary> /// <param name="connections">The server connections to query. Will cycle through the collection, starting at zero until a response is received.</param> /// <param name="topics">The collection of topics to get metadata for.</param> /// <returns>MetadataResponse validated to be complete.</returns> public Task<MetadataResponse> Get(IKafkaConnection[] connections, IEnumerable<string> topics) { var request = new MetadataRequest { Topics = topics.ToList() }; if (request.Topics.Count <= 0) return null; return Get(connections, request); }
public async Task TestNewTopicProductionWorksOk() { using (var temporaryTopic = testCluster.CreateTemporaryTopic()) using (var connection = await KafkaConnectionFactory.CreateSimpleKafkaConnectionAsync(testCluster.CreateBrokerUris()[0])) { var topic = temporaryTopic.Name; { var request = new MetadataRequest { Topics = new List <string> { topic } }; MetadataResponse response = null; while (response == null) { response = await connection.SendRequestAsync(request, CancellationToken.None); if (response.Topics[0].ErrorCode == ErrorResponseCode.LeaderNotAvailable) { response = null; await Task.Delay(1000); } } Assert.That(response, Is.Not.Null); var first = response; Assert.That(first.Topics, Has.Length.EqualTo(1)); var firstTopic = first.Topics.First(); Assert.That(firstTopic.ErrorCode, Is.EqualTo(ErrorResponseCode.NoError)); Assert.That(firstTopic.Name, Is.EqualTo(topic)); Assert.That(firstTopic.Partitions, Has.Length.EqualTo(1)); var firstPartition = firstTopic.Partitions.First(); Assert.That(firstPartition.PartitionId, Is.EqualTo(0)); } { var request = new ProduceRequest { Acks = 1, TimeoutMS = 10000, Payload = new List <Payload> { new Payload { Topic = topic, Partition = 0, Codec = MessageCodec.CodecNone, Messages = new List <Message> { new Message("Message 1"), new Message("Message 2"), new Message("Message 3"), new Message("Message 4"), } } } }; var response = await connection.SendRequestAsync(request, CancellationToken.None); Assert.That(response, Is.Not.Null); var first = response.First(); Assert.That(first.Error, Is.EqualTo(ErrorResponseCode.NoError)); Assert.That(first.Topic, Is.EqualTo(topic)); Assert.That(first.PartitionId, Is.EqualTo(0)); Assert.That(first.Offset, Is.EqualTo(0)); } { var request = new FetchRequest { MinBytes = 0, MaxWaitTime = 0, Fetches = new List <Fetch> { new Fetch { MaxBytes = 40, Offset = 0, PartitionId = 0, Topic = topic, } } }; var response = await connection.SendRequestAsync(request, CancellationToken.None); Assert.That(response, Has.Count.EqualTo(1)); var first = response.First(); Assert.That(first.Error, Is.EqualTo(ErrorResponseCode.NoError)); Assert.That(first.HighWaterMark, Is.EqualTo(4)); Assert.That(first.PartitionId, Is.EqualTo(0)); Assert.That(first.Topic, Is.EqualTo(topic)); Assert.That(first.Messages, Has.Count.EqualTo(1)); var firstMessage = first.Messages.First(); Assert.That(firstMessage.Meta.Offset, Is.EqualTo(0)); Assert.That(firstMessage.Meta.PartitionId, Is.EqualTo(0)); Assert.That(firstMessage.Attribute, Is.EqualTo(0)); Assert.That(firstMessage.Key, Is.Null); Assert.That(firstMessage.MagicNumber, Is.EqualTo(0)); Assert.That(firstMessage.Value, Is.Not.Null); var firstString = firstMessage.Value.ToUtf8String(); Assert.That(firstString, Is.EqualTo("Message 1")); } { var request = new OffsetRequest { Offsets = new List <Offset> { new Offset { MaxOffsets = 2, PartitionId = 0, Time = -1, Topic = topic } } }; var response = await connection.SendRequestAsync(request, CancellationToken.None); Assert.That(response, Has.Count.EqualTo(1)); var first = response.First(); Assert.That(first.Error, Is.EqualTo(ErrorResponseCode.NoError)); Assert.That(first.Topic, Is.EqualTo(topic)); Assert.That(first.PartitionId, Is.EqualTo(0)); Assert.That(first.Offsets, Has.Length.EqualTo(2)); Assert.That(first.Offsets[0], Is.EqualTo(4)); Assert.That(first.Offsets[1], Is.EqualTo(0)); } { var request = new ConsumerMetadataRequest { ConsumerGroup = topic }; ConsumerMetadataResponse response = null; while (response == null) { response = await connection.SendRequestAsync(request, CancellationToken.None); if (response.Error == ErrorResponseCode.ConsumerCoordinatorNotAvailableCode) { response = null; await Task.Delay(1000); } } Assert.That(response.Error, Is.EqualTo(ErrorResponseCode.NoError)); Console.WriteLine("Id = {0}, Host = {1}, Port = {2}", response.CoordinatorId, response.CoordinatorHost, response.CoordinatorPort); } { var request = new OffsetFetchRequest { ConsumerGroup = topic, Topics = new List <OffsetFetch> { new OffsetFetch { PartitionId = 0, Topic = topic } } }; var response = await connection.SendRequestAsync(request, CancellationToken.None); Assert.That(response, Has.Count.EqualTo(1)); var first = response.First(); Assert.That(first.Error, Is.EqualTo(ErrorResponseCode.NoError)); Assert.That(first.Topic, Is.EqualTo(topic)); Assert.That(first.PartitionId, Is.EqualTo(0)); Assert.That(first.MetaData, Is.Empty); Assert.That(first.Offset, Is.EqualTo(-1)); } { var request = new OffsetCommitRequest { ConsumerGroup = topic, ConsumerGroupGenerationId = 1, ConsumerId = "0", OffsetCommits = new List <OffsetCommit> { new OffsetCommit { Metadata = "Metadata 1", Offset = 0, PartitionId = 0, TimeStamp = -1, Topic = topic, } } }; var response = await connection.SendRequestAsync(request, CancellationToken.None); Assert.That(response, Has.Count.EqualTo(1)); var first = response.First(); Assert.That(first.Error, Is.EqualTo(ErrorResponseCode.NoError)); Assert.That(first.Topic, Is.EqualTo(topic)); Assert.That(first.PartitionId, Is.EqualTo(0)); } { var request = new OffsetFetchRequest { ConsumerGroup = topic, Topics = new List <OffsetFetch> { new OffsetFetch { PartitionId = 0, Topic = topic } } }; var response = await connection.SendRequestAsync(request, CancellationToken.None); Assert.That(response, Has.Count.EqualTo(1)); var first = response.First(); Assert.That(first.Error, Is.EqualTo(ErrorResponseCode.NoError)); Assert.That(first.Topic, Is.EqualTo(topic)); Assert.That(first.PartitionId, Is.EqualTo(0)); Assert.That(first.MetaData, Is.EqualTo("Metadata 1")); Assert.That(first.Offset, Is.EqualTo(0)); } { var request = new FetchRequest { MinBytes = 0, MaxWaitTime = 0, Fetches = new List <Fetch> { new Fetch { MaxBytes = 1024, Offset = 0 + 1, PartitionId = 0, Topic = topic, } } }; var response = await connection.SendRequestAsync(request, CancellationToken.None); Assert.That(response, Has.Count.EqualTo(1)); var first = response.First(); Assert.That(first.Error, Is.EqualTo(ErrorResponseCode.NoError)); Assert.That(first.HighWaterMark, Is.EqualTo(4)); Assert.That(first.PartitionId, Is.EqualTo(0)); Assert.That(first.Topic, Is.EqualTo(topic)); Assert.That(first.Messages, Has.Count.EqualTo(3)); var firstMessage = first.Messages.First(); Assert.That(firstMessage.Meta.Offset, Is.EqualTo(1)); Assert.That(firstMessage.Meta.PartitionId, Is.EqualTo(0)); Assert.That(firstMessage.Attribute, Is.EqualTo(0)); Assert.That(firstMessage.Key, Is.Null); Assert.That(firstMessage.MagicNumber, Is.EqualTo(0)); Assert.That(firstMessage.Value, Is.Not.Null); var firstString = firstMessage.Value.ToUtf8String(); Assert.That(firstString, Is.EqualTo("Message 2")); var lastMessage = first.Messages.Last(); Assert.That(lastMessage.Meta.Offset, Is.EqualTo(3)); Assert.That(lastMessage.Meta.PartitionId, Is.EqualTo(0)); Assert.That(lastMessage.Attribute, Is.EqualTo(0)); Assert.That(lastMessage.Key, Is.Null); Assert.That(lastMessage.MagicNumber, Is.EqualTo(0)); Assert.That(lastMessage.Value, Is.Not.Null); var lastString = lastMessage.Value.ToUtf8String(); Assert.That(lastString, Is.EqualTo("Message 4")); } { var request = new FetchRequest { MinBytes = 0, MaxWaitTime = 0, Fetches = new List <Fetch> { new Fetch { MaxBytes = 1024, Offset = 3 + 1, PartitionId = 0, Topic = topic, } } }; var response = await connection.SendRequestAsync(request, CancellationToken.None); Assert.That(response, Has.Count.EqualTo(1)); var first = response.First(); Assert.That(first.Error, Is.EqualTo(ErrorResponseCode.NoError)); Assert.That(first.HighWaterMark, Is.EqualTo(4)); Assert.That(first.PartitionId, Is.EqualTo(0)); Assert.That(first.Topic, Is.EqualTo(topic)); Assert.That(first.Messages, Has.Count.EqualTo(0)); } } Console.WriteLine("Test completed"); }
/// <summary> /// Given a collection of server connections, query for all topics metadata. /// </summary> /// <param name="connections">The server connections to query. Will cycle through the collection, starting at zero until a response is received.</param> /// <returns>MetadataResponse validated to be complete.</returns> public Task<MetadataResponse> Get(IKafkaConnection[] connections) { var request = new MetadataRequest(); return Get(connections, request); }
public async Task TestNewTopicProductionWorksOk() { using (var temporaryTopic = testCluster.CreateTemporaryTopic()) using (var connection = await KafkaConnectionFactory.CreateSimpleKafkaConnectionAsync(testCluster.CreateBrokerUris()[0])) { var topic = temporaryTopic.Name; { var request = new MetadataRequest { Topics = new List<string> { topic } }; MetadataResponse response = null; while (response == null) { response = await connection.SendRequestAsync(request, CancellationToken.None); if (response.Topics[0].ErrorCode == ErrorResponseCode.LeaderNotAvailable) { response = null; await Task.Delay(1000); } } Assert.That(response, Is.Not.Null); var first = response; Assert.That(first.Topics, Has.Length.EqualTo(1)); var firstTopic = first.Topics.First(); Assert.That(firstTopic.ErrorCode, Is.EqualTo(ErrorResponseCode.NoError)); Assert.That(firstTopic.Name, Is.EqualTo(topic)); Assert.That(firstTopic.Partitions, Has.Length.EqualTo(1)); var firstPartition = firstTopic.Partitions.First(); Assert.That(firstPartition.PartitionId, Is.EqualTo(0)); } { var request = new ProduceRequest { Acks = 1, TimeoutMS = 10000, Payload = new List<Payload> { new Payload { Topic = topic, Partition = 0, Codec = MessageCodec.CodecNone, Messages = new List<Message> { new Message("Message 1"), new Message("Message 2"), new Message("Message 3"), new Message("Message 4"), } } } }; var response = await connection.SendRequestAsync(request, CancellationToken.None); Assert.That(response, Is.Not.Null); var first = response.First(); Assert.That(first.Error, Is.EqualTo(ErrorResponseCode.NoError)); Assert.That(first.Topic, Is.EqualTo(topic)); Assert.That(first.PartitionId, Is.EqualTo(0)); Assert.That(first.Offset, Is.EqualTo(0)); } { var request = new FetchRequest { MinBytes = 0, MaxWaitTime = 0, Fetches = new List<Fetch> { new Fetch { MaxBytes = 40, Offset = 0, PartitionId = 0, Topic = topic, } } }; var response = await connection.SendRequestAsync(request, CancellationToken.None); Assert.That(response, Has.Count.EqualTo(1)); var first = response.First(); Assert.That(first.Error, Is.EqualTo(ErrorResponseCode.NoError)); Assert.That(first.HighWaterMark, Is.EqualTo(4)); Assert.That(first.PartitionId, Is.EqualTo(0)); Assert.That(first.Topic, Is.EqualTo(topic)); Assert.That(first.Messages, Has.Count.EqualTo(1)); var firstMessage = first.Messages.First(); Assert.That(firstMessage.Meta.Offset, Is.EqualTo(0)); Assert.That(firstMessage.Meta.PartitionId, Is.EqualTo(0)); Assert.That(firstMessage.Attribute, Is.EqualTo(0)); Assert.That(firstMessage.Key, Is.Null); Assert.That(firstMessage.MagicNumber, Is.EqualTo(0)); Assert.That(firstMessage.Value, Is.Not.Null); var firstString = firstMessage.Value.ToUtf8String(); Assert.That(firstString, Is.EqualTo("Message 1")); } { var request = new OffsetRequest { Offsets = new List<Offset> { new Offset { MaxOffsets = 2, PartitionId = 0, Time = -1, Topic = topic } } }; var response = await connection.SendRequestAsync(request, CancellationToken.None); Assert.That(response, Has.Count.EqualTo(1)); var first = response.First(); Assert.That(first.Error, Is.EqualTo(ErrorResponseCode.NoError)); Assert.That(first.Topic, Is.EqualTo(topic)); Assert.That(first.PartitionId, Is.EqualTo(0)); Assert.That(first.Offsets, Has.Length.EqualTo(2)); Assert.That(first.Offsets[0], Is.EqualTo(4)); Assert.That(first.Offsets[1], Is.EqualTo(0)); } { var request = new ConsumerMetadataRequest { ConsumerGroup = topic }; ConsumerMetadataResponse response = null; while (response == null) { response = await connection.SendRequestAsync(request, CancellationToken.None); if (response.Error == ErrorResponseCode.ConsumerCoordinatorNotAvailableCode) { response = null; await Task.Delay(1000); } } Assert.That(response.Error, Is.EqualTo(ErrorResponseCode.NoError)); Console.WriteLine("Id = {0}, Host = {1}, Port = {2}", response.CoordinatorId, response.CoordinatorHost, response.CoordinatorPort); } { var request = new OffsetFetchRequest { ConsumerGroup = topic, Topics = new List<OffsetFetch> { new OffsetFetch { PartitionId = 0, Topic = topic } } }; var response = await connection.SendRequestAsync(request, CancellationToken.None); Assert.That(response, Has.Count.EqualTo(1)); var first = response.First(); Assert.That(first.Error, Is.EqualTo(ErrorResponseCode.NoError)); Assert.That(first.Topic, Is.EqualTo(topic)); Assert.That(first.PartitionId, Is.EqualTo(0)); Assert.That(first.MetaData, Is.Empty); Assert.That(first.Offset, Is.EqualTo(-1)); } { var request = new OffsetCommitRequest { ConsumerGroup = topic, ConsumerGroupGenerationId = 1, ConsumerId = "0", OffsetCommits = new List<OffsetCommit> { new OffsetCommit { Metadata = "Metadata 1", Offset = 0, PartitionId = 0, TimeStamp = -1, Topic = topic, } } }; var response = await connection.SendRequestAsync(request, CancellationToken.None); Assert.That(response, Has.Count.EqualTo(1)); var first = response.First(); Assert.That(first.Error, Is.EqualTo(ErrorResponseCode.NoError)); Assert.That(first.Topic, Is.EqualTo(topic)); Assert.That(first.PartitionId, Is.EqualTo(0)); } { var request = new OffsetFetchRequest { ConsumerGroup = topic, Topics = new List<OffsetFetch> { new OffsetFetch { PartitionId = 0, Topic = topic } } }; var response = await connection.SendRequestAsync(request, CancellationToken.None); Assert.That(response, Has.Count.EqualTo(1)); var first = response.First(); Assert.That(first.Error, Is.EqualTo(ErrorResponseCode.NoError)); Assert.That(first.Topic, Is.EqualTo(topic)); Assert.That(first.PartitionId, Is.EqualTo(0)); Assert.That(first.MetaData, Is.EqualTo("Metadata 1")); Assert.That(first.Offset, Is.EqualTo(0)); } { var request = new FetchRequest { MinBytes = 0, MaxWaitTime = 0, Fetches = new List<Fetch> { new Fetch { MaxBytes = 1024, Offset = 0 + 1, PartitionId = 0, Topic = topic, } } }; var response = await connection.SendRequestAsync(request, CancellationToken.None); Assert.That(response, Has.Count.EqualTo(1)); var first = response.First(); Assert.That(first.Error, Is.EqualTo(ErrorResponseCode.NoError)); Assert.That(first.HighWaterMark, Is.EqualTo(4)); Assert.That(first.PartitionId, Is.EqualTo(0)); Assert.That(first.Topic, Is.EqualTo(topic)); Assert.That(first.Messages, Has.Count.EqualTo(3)); var firstMessage = first.Messages.First(); Assert.That(firstMessage.Meta.Offset, Is.EqualTo(1)); Assert.That(firstMessage.Meta.PartitionId, Is.EqualTo(0)); Assert.That(firstMessage.Attribute, Is.EqualTo(0)); Assert.That(firstMessage.Key, Is.Null); Assert.That(firstMessage.MagicNumber, Is.EqualTo(0)); Assert.That(firstMessage.Value, Is.Not.Null); var firstString = firstMessage.Value.ToUtf8String(); Assert.That(firstString, Is.EqualTo("Message 2")); var lastMessage = first.Messages.Last(); Assert.That(lastMessage.Meta.Offset, Is.EqualTo(3)); Assert.That(lastMessage.Meta.PartitionId, Is.EqualTo(0)); Assert.That(lastMessage.Attribute, Is.EqualTo(0)); Assert.That(lastMessage.Key, Is.Null); Assert.That(lastMessage.MagicNumber, Is.EqualTo(0)); Assert.That(lastMessage.Value, Is.Not.Null); var lastString = lastMessage.Value.ToUtf8String(); Assert.That(lastString, Is.EqualTo("Message 4")); } { var request = new FetchRequest { MinBytes = 0, MaxWaitTime = 0, Fetches = new List<Fetch> { new Fetch { MaxBytes = 1024, Offset = 3 + 1, PartitionId = 0, Topic = topic, } } }; var response = await connection.SendRequestAsync(request, CancellationToken.None); Assert.That(response, Has.Count.EqualTo(1)); var first = response.First(); Assert.That(first.Error, Is.EqualTo(ErrorResponseCode.NoError)); Assert.That(first.HighWaterMark, Is.EqualTo(4)); Assert.That(first.PartitionId, Is.EqualTo(0)); Assert.That(first.Topic, Is.EqualTo(topic)); Assert.That(first.Messages, Has.Count.EqualTo(0)); } } Console.WriteLine("Test completed"); }
private static async Task <MetadataResponse> GetMetadataAsync(this IBrokerRouter brokerRouter, MetadataRequest request, CancellationToken cancellationToken) { var servers = new List <string>(); foreach (var connection in brokerRouter.Connections) { var server = connection.Endpoint?.ToString(); try { return(await connection.SendAsync(request, cancellationToken).ConfigureAwait(false)); } catch (Exception ex) { servers.Add(server); brokerRouter.Log.Warn(() => LogEvent.Create(ex, $"Failed to contact {server}: Trying next server")); } } throw new RequestException(request.ApiKey, ErrorResponseCode.None, $"Unable to make Metadata Request to any of {string.Join(" ", servers)}"); }
public async Task TestListingAllTopicsWorksOk() { using (var temporaryTopic = testCluster.CreateTemporaryTopic()) using (var connection = await KafkaConnectionFactory.CreateSimpleKafkaConnectionAsync(testCluster.CreateBrokerUris()[0])) { var request = new MetadataRequest { }; var response = await connection.SendRequestAsync(request, CancellationToken.None); Assert.That(response, Is.Not.Null); Assert.That(response.Brokers, Has.Length.EqualTo(1)); Assert.That(response.Topics, Has.Length.EqualTo(1)); Assert.That(response.Topics[0].Name, Is.EqualTo(temporaryTopic.Name)); Assert.That(response.Topics[0].ErrorCode, Is.EqualTo(ErrorResponseCode.NoError)); Assert.That(response.Topics[0].Partitions, Has.Length.EqualTo(1)); } }
static void Main(string[] args) { RemoteHost = NodeBuilder.BuildNode().Host("192.168.0.12").WithPort(9092).WithTransportType(TransportType.Tcp); kafkaClient = new ClientBootstrap() .SetTransport(TransportType.Tcp) .SetDecoder(new NoOpDecoder()) .SetEncoder(new NoOpEncoder()) .RemoteAddress(RemoteHost) .OnConnect(ConnectionEstablishedCallback) .OnDisconnect(ConnectionTerminatedCallback) .Build().NewConnection(Node.Empty().WithTransportType(TransportType.Tcp), RemoteHost); kafkaClient.Open(); // TcpClient client = new TcpClient("192.168.0.12", 9092); Console.Title = string.Format("KafkaClient {0}", Process.GetCurrentProcess().Id); kafkaClient.OnError += KafkaClient_OnError; var request = new MetadataRequest(new List<string> { "test_topic" }); var requestHeader = new RequestHeader((short)ApiKeys.Metadata, "Mr Flibble", 1234); var buffer = new MemoryStream(); requestHeader.WriteTo(buffer); request.WriteTo(buffer); var bytes = buffer.ToArray(); var lenght = KafkaTypesHelper.Int32; buffer = new MemoryStream(); lenght.Write(buffer, bytes.Length); var writter = new BinaryWriter(buffer); writter.Write(bytes); Console.WriteLine("Bytes Lenght " + bytes.Length); bytes = buffer.ToArray(); HexPrint(bytes); //kafkaClient.Receive += KafkaClient_Receive; //NetworkStream nwStream = client.GetStream(); //nwStream.Write(bytes, 0, bytes.Length); kafkaClient.Send(new NetworkData() { Buffer = bytes, Length = bytes.Length }); Thread.Sleep(5000); kafkaClient.Send(new NetworkData() { Buffer = bytes, Length = bytes.Length }); /*byte[] bytesToRead = new byte[client.ReceiveBufferSize]; int bytesRead = nwStream.Read(bytesToRead, 0, client.ReceiveBufferSize); bytesToRead = bytesToRead.Take(bytesRead).ToArray(); HexPrint(bytesToRead); bytesToRead = bytesToRead.Skip(8).ToArray(); var s = ProtoUtils.parseResponse((short)ApiKeys.Metadata, new MemoryStream(bytesToRead)); var response = new MetadataResponse(s);*/ Console.WriteLine("Press any key to exit."); Console.ReadLine(); }
public async Task ReturnsDecompiledDefinition_FromMetadata_WhenSymbolIsType(string filename) { var testFile = new TestFile(filename, @" using System; class Bar { public void Baz() { var number = in$$t.MaxValue; } }"); using var host = CreateOmniSharpHost(new[] { testFile }, new Dictionary <string, string> { ["RoslynExtensionsOptions:EnableDecompilationSupport"] = "true" }); var point = testFile.Content.GetPointFromPosition(); // 1. start by asking for definition of "int" var gotoDefinitionRequest = new GotoDefinitionRequest { FileName = testFile.FileName, Line = point.Line, Column = point.Offset, WantMetadata = true, Timeout = 60000 }; var gotoDefinitionRequestHandler = GetRequestHandler(host); var gotoDefinitionResponse = await gotoDefinitionRequestHandler.Handle(gotoDefinitionRequest); // 2. now, based on the response information // go to the metadata endpoint, and ask for "int" specific decompiled source var metadataRequest = new MetadataRequest { AssemblyName = gotoDefinitionResponse.MetadataSource.AssemblyName, TypeName = gotoDefinitionResponse.MetadataSource.TypeName, ProjectName = gotoDefinitionResponse.MetadataSource.ProjectName, Language = gotoDefinitionResponse.MetadataSource.Language, Timeout = 60000 }; var metadataRequestHandler = host.GetRequestHandler <MetadataService>(OmniSharpEndpoints.Metadata); var metadataResponse = await metadataRequestHandler.Handle(metadataRequest); // 3. the response contains SourceName ("file") and SourceText (syntax tree) // use the source to locate "IComparable" which is an interface implemented by Int32 struct var decompiledTree = CSharpSyntaxTree.ParseText(metadataResponse.Source); var compilationUnit = decompiledTree.GetCompilationUnitRoot(); // second comment should indicate we have decompiled var comments = compilationUnit.DescendantTrivia().Where(t => t.Kind() == SyntaxKind.SingleLineCommentTrivia).ToArray(); Assert.NotNull(comments); Assert.Equal("// Decompiled with ICSharpCode.Decompiler 6.1.0.5902", comments[1].ToString()); // contrary to regular metadata, we should have methods with full bodies // this condition would fail if decompilation wouldn't work var methods = compilationUnit. DescendantNodesAndSelf(). OfType <MethodDeclarationSyntax>(). Where(m => m.Body != null); Assert.NotEmpty(methods); var iComparable = compilationUnit. DescendantNodesAndSelf(). OfType <BaseTypeDeclarationSyntax>().First(). BaseList.Types.FirstOrDefault(x => x.Type.ToString() == "IComparable"); var relevantLineSpan = iComparable.GetLocation().GetLineSpan(); // 4. now ask for the definition of "IComparable" // pass in the SourceName (metadata "file") as FileName - since it's not a regular file in our workspace var metadataNavigationRequest = new GotoDefinitionRequest { FileName = metadataResponse.SourceName, Line = relevantLineSpan.StartLinePosition.Line, Column = relevantLineSpan.StartLinePosition.Character, WantMetadata = true }; var metadataNavigationResponse = await gotoDefinitionRequestHandler.Handle(metadataNavigationRequest); // 5. validate the response to be matching the expected IComparable meta info Assert.NotNull(metadataNavigationResponse.MetadataSource); Assert.Equal(AssemblyHelpers.CorLibName, metadataNavigationResponse.MetadataSource.AssemblyName); Assert.Equal("System.IComparable", metadataNavigationResponse.MetadataSource.TypeName); Assert.NotEqual(0, metadataNavigationResponse.Line); Assert.NotEqual(0, metadataNavigationResponse.Column); }
private MetadataResponse GetMetadataResponse(IKafkaConnection[] connections, MetadataRequest request) { //try each default broker until we find one that is available foreach (var conn in connections) { try { var response = conn.SendAsync(request).Result; if (response != null && response.Count > 0) { return response.FirstOrDefault(); } } catch (Exception ex) { _log.WarnFormat("Failed to contact Kafka server={0}. Trying next default server. Exception={1}", conn.Endpoint, ex); } } throw new ServerUnreachableException( "Unable to query for metadata from any of the default Kafka servers. At least one provided server must be available. Server list: {0}", string.Join(", ", connections.Select(x => x.ToString()))); }
private MetadataResponse CycleConnectionsForTopicMetadataAsync(IEnumerable<IKafkaConnection> connections, IEnumerable<string> topics) { var request = new MetadataRequest { Topics = topics.ToList() }; //try each default broker until we find one that is available foreach (var conn in connections) { try { var response = conn.SendAsync(request).Result; if (response != null && response.Count > 0) { var metadataResponse = response.First(); UpdateInternalMetadataCache(metadataResponse); return metadataResponse; } } catch (Exception ex) { _kafkaOptions.Log.WarnFormat("Failed to contact Kafka server={0}. Trying next default server. Exception={1}", conn.KafkaUri, ex); } } throw new ServerUnreachableException( string.Format( "Unable to query for metadata from any of the default Kafka servers. At least one provided server must be available. Server list: {0}", string.Join(", ", _kafkaOptions.KafkaServerUri.Select(x => x.ToString())))); }