public FetchResponse Fetch(FetchRequest request) { short tryCounter = 1; while (tryCounter <= this.config.NumberOfTries) { try { Logger.Debug("Fetch is waiting for send lock"); lock (this) { Logger.Debug("Fetch acquired send lock. Begin send"); return(connection.Send(request)); } } catch (Exception ex) { //// if maximum number of tries reached if (tryCounter == this.config.NumberOfTries) { throw; } tryCounter++; Logger.InfoFormat("Fetch reconnect due to {0}", ex.FormatException()); } } return(null); }
/// <summary> /// Gets a list of valid offsets (up to maxSize) before the given time. /// </summary> /// <param name="request"> /// The offset request. /// </param> /// <returns> /// The list of offsets, in descending order. /// </returns> public OffsetResponse GetOffsetsBefore(OffsetRequest request) { short tryCounter = 1; while (tryCounter <= Config.NumberOfTries) { try { lock (this) { return(connection.Send(request)); } } catch (Exception ex) { //// if maximum number of tries reached if (tryCounter == Config.NumberOfTries) { throw; } tryCounter++; Logger.InfoFormat("GetOffsetsBefore reconnect due to {0}", ex.FormatException()); } } return(null); }
/// <summary> /// Check that brokers alive by sending topic metadata request to them /// </summary> /// <param name="brokers">Collection of brokers to check. If null - brokers list will be retrieved from ZooKeeper state</param> /// <returns> /// Dictionary where Key is Broker Id and Value indicates whether Broker responds to requests. /// Value is true when Broker TopicMetadataRequest was successfully sent to broker and any response was recieved back. /// Value is false when connection to Broker failed or /// </returns> /// <remarks> /// If brokers not specified this method will only ping brokers that exist in ZooKeeper state /// </remarks> public IDictionary <int, bool> GetKafkaBrokersAliveState(ICollection <Broker> brokers = null) { // retrive latest brokers info if (brokers == null) { RefreshKafkaBrokersInfo(); } brokers = brokers ?? kafkaCluster.Brokers.Values; var brokersConnectionString = string.Join(", ", kafkaCluster.Brokers.Values.Select(x => string.Join(":", x.Id, x.Host, x.Port))); Logger.InfoFormat("Collecting brokers alive state for brokers {0}", brokersConnectionString); var brokerAliveStateMap = new SortedDictionary <int, bool>(); foreach (var broker in brokers) { try { Logger.DebugFormat("Sending request to broker #{0} {1}:{2}", broker.Id, broker.Host, broker.Port); using (var kafkaConnection = new KafkaConnection(broker.Host, broker.Port, config.BufferSize, config.SendTimeout, config.ReceiveTimeout, int.MaxValue)) { // send topic offset request for random non-existing topic var requestInfos = new Dictionary <string, List <PartitionOffsetRequestInfo> >(); requestInfos[Guid.NewGuid().ToString("N")] = new List <PartitionOffsetRequestInfo> { new PartitionOffsetRequestInfo(0, OffsetRequest.EarliestTime, 1) }; kafkaConnection.Send(new OffsetRequest(requestInfos)); } brokerAliveStateMap[broker.Id] = true; } catch (Exception exc) { Logger.WarnFormat("Failed to send request to broker #{0} {1}:{2}. Error:", broker.Id, broker.Host, broker.Port, exc.FormatException()); brokerAliveStateMap[broker.Id] = false; } } Logger.InfoFormat("Completed collecting brokers alive state for brokers {0}", brokersConnectionString); return(brokerAliveStateMap); }
/// <summary> /// Check that brokers alive by sending topic metadata request to them /// </summary> /// <param name="brokers">Collection of brokers to check. If null - brokers list will be retrieved from ZooKeeper state</param> /// <returns> /// Dictionary where Key is Broker Id and Value indicates whether Broker responds to requests. /// Value is true when Broker TopicMetadataRequest was successfully sent to broker and any response was recieved back. /// Value is false when connection to Broker failed or /// </returns> /// <remarks> /// If brokers not specified this method will only ping brokers that exist in ZooKeeper state /// </remarks> public IDictionary<int, bool> GetKafkaBrokersAliveState(ICollection<Broker> brokers = null) { // retrive latest brokers info if (brokers == null) { this.RefreshKafkaBrokersInfo(); } brokers = brokers ?? this.kafkaCluster.Brokers.Values; var brokersConnectionString = string.Join(", ", this.kafkaCluster.Brokers.Values.Select(x => string.Join(":", x.Id, x.Host, x.Port))); Logger.InfoFormat("Collecting brokers alive state for brokers {0}", brokersConnectionString); var brokerAliveStateMap = new SortedDictionary<int, bool>(); foreach (var broker in brokers) { try { Logger.DebugFormat("Sending request to broker #{0} {1}:{2}", broker.Id, broker.Host, broker.Port); using (var kafkaConnection = new KafkaConnection(broker.Host, broker.Port, this.config.BufferSize, this.config.SendTimeout, this.config.ReceiveTimeout, int.MaxValue)) { // send topic offset request for random non-existing topic var requestInfos = new Dictionary<string, List<PartitionOffsetRequestInfo>>(); requestInfos[Guid.NewGuid().ToString("N")] = new List<PartitionOffsetRequestInfo>() { new PartitionOffsetRequestInfo(0, OffsetRequest.EarliestTime, 1) }; kafkaConnection.Send(new OffsetRequest(requestInfos)); } brokerAliveStateMap[broker.Id] = true; } catch (Exception exc) { Logger.WarnFormat("Failed to send request to broker #{0} {1}:{2}. Error:", broker.Id, broker.Host, broker.Port, exc.FormatException()); brokerAliveStateMap[broker.Id] = false; } } Logger.InfoFormat("Completed collecting brokers alive state for brokers {0}", brokersConnectionString); return brokerAliveStateMap; }