/// <summary> /// Get offsets for a single partitions of a given topic. /// </summary> /// <param name="brokerRouter">The router which provides the route and metadata.</param> /// <param name="topicName">Name of the topic to get offset information from.</param> /// <param name="partitionId">The partition to get offsets for.</param> /// <param name="maxOffsets">How many to get, at most.</param> /// <param name="offsetTime">These are best described by <see cref="OffsetRequest.Topic.Timestamp"/></param> /// <param name="cancellationToken"></param> public static async Task <OffsetResponse.Topic> GetTopicOffsetAsync(this IBrokerRouter brokerRouter, string topicName, int partitionId, int maxOffsets, long offsetTime, CancellationToken cancellationToken) { var request = new OffsetRequest(new OffsetRequest.Topic(topicName, partitionId)); var response = await brokerRouter.SendAsync(request, topicName, partitionId, cancellationToken).ConfigureAwait(false); return(response.Topics.SingleOrDefault(t => t.TopicName == topicName && t.PartitionId == partitionId)); }
/// <summary> /// Construct a Producer class. /// </summary> /// <param name="brokerRouter">The router used to direct produced messages to the correct partition.</param> /// <param name="maximumAsyncQueue">The maximum async calls allowed before blocking new requests. -1 indicates unlimited.</param> /// <remarks> /// The maximumAsyncQueue parameter provides a mechanism for blocking an async request return if the amount of requests queue is /// over a certain limit. This is usefull if a client is trying to push a large stream of documents through the producer and /// wants to block downstream if the producer is overloaded. /// /// A message will start its timeout countdown as soon as it is added to the producer async queue. If there are a large number of /// messages sitting in the async queue then a message may spend its entire timeout cycle waiting in this queue and never getting /// attempted to send to Kafka before a timeout exception is thrown. /// </remarks> public Producer(IBrokerRouter brokerRouter, int maximumAsyncQueue = -1) : base(brokerRouter) { _router = brokerRouter; _maximumAsyncQueue = maximumAsyncQueue == -1 ? int.MaxValue : maximumAsyncQueue; _sendSemaphore = new SemaphoreSlim(_maximumAsyncQueue, _maximumAsyncQueue); }
/// <summary> /// Construct a Producer class. /// </summary> /// <param name="brokerRouter">The router used to direct produced messages to the correct partition.</param> /// <param name="maximumAsyncQueue">The maximum async calls allowed before blocking new requests. -1 indicates unlimited.</param> /// <remarks> /// The maximumAsyncQueue parameter provides a mechanism for blocking an async request return if the amount of requests queue is /// over a certain limit. This is usefull if a client is trying to push a large stream of documents through the producer and /// wants to block downstream if the producer is overloaded. /// /// A message will start its timeout countdown as soon as it is added to the producer async queue. If there are a large number of /// messages sitting in the async queue then a message may spend its entire timeout cycle waiting in this queue and never getting /// attempted to send to Kafka before a timeout exception is thrown. /// </remarks> public Producer(IBrokerRouter brokerRouter, int maximumAsyncQueue = -1) { _router = brokerRouter; _metadataQueries = new MetadataQueries(_router); _maximumAsyncQueue = maximumAsyncQueue == -1 ? int.MaxValue : maximumAsyncQueue; _sendSemaphore = new SemaphoreSlim(_maximumAsyncQueue, _maximumAsyncQueue); }
public static IBrokerRouter CreateConnection(int threadId) { string uri = "http://" + ConfigurationManager.AppSettings["MessagingQueueHostAddress"] + ":" + ConfigurationManager.AppSettings["KafkaPort"]; lock (SyncObj) { var kafkaOptions = new KafkaOptions(new Uri(uri)); var brokerRouter = new BrokerRouter(kafkaOptions); _brokerRouter = brokerRouter; if (ConnectionsDict.ContainsKey(threadId)) { var connection = ConnectionsDict.Where(x => x.Key == threadId).Select(c => c.Value.BrokerRouter).Single(); return(connection); } ConnectionsDict.Add(threadId, new KafkaQueueConnection { KafkaOptions = new KafkaOptions(), BrokerRouter = _brokerRouter }); return(_brokerRouter); } }
/// <summary> /// Get offsets for a single partitions of a given topic. /// </summary> /// <param name="brokerRouter">The router which provides the route and metadata.</param> /// <param name="topicName">Name of the topic to get offset information from.</param> /// <param name="partitionId">The partition to get offsets for.</param> /// <param name="consumerGroup">The id of the consumer group</param> /// <param name="cancellationToken"></param> public static async Task <OffsetFetchResponse.Topic> GetTopicOffsetAsync(this IBrokerRouter brokerRouter, string topicName, int partitionId, string consumerGroup, CancellationToken cancellationToken) { var request = new OffsetFetchRequest(consumerGroup, new TopicPartition(topicName, partitionId)); var response = await brokerRouter.SendAsync(request, topicName, partitionId, consumerGroup, cancellationToken).ConfigureAwait(false); return(response.Topics.SingleOrDefault(t => t.TopicName == topicName && t.PartitionId == partitionId)); }
public async Task SendAsync(IBrokerRouter brokerRouter, CancellationToken cancellationToken, IRequestContext context = null) { _response = null; _route = await brokerRouter.GetBrokerRouteAsync(_topicName, _partitionId, cancellationToken).ConfigureAwait(false); _response = await _route.Connection.SendAsync(_request, cancellationToken, context).ConfigureAwait(false); }
/// <summary> /// Construct a Producer class. /// </summary> /// <param name="brokerRouter">The router used to direct produced messages to the correct partition.</param> /// <param name="maximumAsyncQueue">The maximum async calls allowed before blocking new requests. -1 indicates unlimited.</param> /// <remarks> /// The maximumAsyncQueue parameter provides a mechanism for blocking an async request return if the amount of requests queue is /// over a certain limit. This is usefull if a client is trying to push a large stream of documents through the producer and /// wants to block downstream if the producer is overloaded. /// /// A message will start its timeout countdown as soon as it is added to the producer async queue. If there are a large number of /// messages sitting in the async queue then a message may spend its entire timeout cycle waiting in this queue and never getting /// attempted to send to Kafka before a timeout exception is thrown. /// </remarks> public Producer(IBrokerRouter brokerRouter, int maximumAsyncQueue = -1) : base(brokerRouter) { _router = brokerRouter; maximumAsyncQueue = maximumAsyncQueue == -1 ? int.MaxValue : maximumAsyncQueue; _sendSemaphore = new SemaphoreSlim(maximumAsyncQueue, maximumAsyncQueue); }
public void Setup() { _log = new MemoryLog(); //_log = Substitute.ForPartsOf<MemoryLog>(); _brokerRouter = Substitute.For <IBrokerRouter>(); _brokerRouter.Log.ReturnsForAnyArgs(_log); _brokerRouter.Configuration.ReturnsForAnyArgs(new CacheConfiguration()); }
public Consumer(IBrokerRouter brokerRouter, IConsumerConfiguration configuration = null, bool leaveRouterOpen = true) { _stopToken = new CancellationTokenSource(); _brokerRouter = brokerRouter; _leaveRouterOpen = leaveRouterOpen; Configuration = configuration ?? new ConsumerConfiguration(); _localMessages = ImmutableList <Message> .Empty; }
public static async Task <bool> RefreshTopicMetadataIfInvalidAsync(this IBrokerRouter brokerRouter, IEnumerable <string> topicNames, bool?metadataInvalid, CancellationToken cancellationToken) { if (metadataInvalid.GetValueOrDefault(true)) { // unknown metadata status should not force the issue await brokerRouter.RefreshTopicMetadataAsync(topicNames, metadataInvalid.GetValueOrDefault(), cancellationToken).ConfigureAwait(false); } return(false); }
public ConsumerOptions(string topic, IBrokerRouter router) { Topic = topic; Router = router; PartitionWhitelist = new List<int>(); Log = new DefaultTraceLog(); TopicPartitionQueryTimeMs = (int)TimeSpan.FromMinutes(15).TotalMilliseconds; ConsumerBufferSize = 50; }
//Add Loger public ProtocolGateway(params Uri[] brokerUrl) { var kafkaOptions = new KafkaOptions(brokerUrl) { MaximumReconnectionTimeout = TimeSpan.FromSeconds(60), ResponseTimeoutMs = TimeSpan.FromSeconds(60) }; _brokerRouter = new BrokerRouter(kafkaOptions); }
public ConsumerOptions(string topic, IBrokerRouter router) { Topic = topic; Router = router; PartitionWhitelist = new List <int>(); Log = new DefaultTraceLog(); TopicPartitionQueryTimeMs = (int)TimeSpan.FromMinutes(15).TotalMilliseconds; ConsumerBufferSize = 50; }
/// <summary> /// Construct a Producer class. /// </summary> /// <param name="brokerRouter">The router used to direct produced messages to the correct partition.</param> /// <param name="configuration">The configuration parameters.</param> /// <param name="leaveRouterOpen">Whether to dispose the router when the producer is disposed.</param> /// <remarks> /// The <see cref="IProducerConfiguration.RequestParallelization"/> parameter provides a mechanism for minimizing the amount of /// async requests in flight at any one time by blocking the caller requesting the async call. This effectively puts an upper /// limit on the amount of times a caller can call SendMessagesAsync before the caller is blocked. /// /// The <see cref="IProducerConfiguration.BatchSize"/> parameter provides a way to limit the max amount of memory the driver uses /// should the send pipeline get overwhelmed and the buffer starts to fill up. This is an inaccurate limiting memory use as the /// amount of memory actually used is dependant on the general message size being buffered. /// /// A message will start its timeout countdown as soon as it is added to the producer async queue. If there are a large number of /// messages sitting in the async queue then a message may spend its entire timeout cycle waiting in this queue and never getting /// attempted to send to Kafka before a timeout exception is thrown. /// </remarks> public Producer(IBrokerRouter brokerRouter, IProducerConfiguration configuration = null, bool leaveRouterOpen = true) { _leaveRouterOpen = leaveRouterOpen; BrokerRouter = brokerRouter; Configuration = configuration ?? new ProducerConfiguration(); _produceMessageQueue = new AsyncProducerConsumerQueue <ProduceTopicTask>(); _produceRequestSemaphore = new SemaphoreSlim(Configuration.RequestParallelization, Configuration.RequestParallelization); _stopToken = new CancellationTokenSource(); _batchSendTask = Task.Run(BatchSendAsync, _stopToken.Token); }
public ConsumerOptions(string topic, IBrokerRouter router) { Topic = topic; Router = router; PartitionWhitelist = new List <int>(); Log = new DefaultTraceLog(); TopicPartitionQueryTimeMs = (int)TimeSpan.FromMinutes(15).TotalMilliseconds; ConsumerBufferSize = DefaultMaxConsumerBufferSize; BackoffInterval = TimeSpan.FromMilliseconds(DefaultBackoffIntervalMS); FetchBufferMultiplier = DefaulFetchBufferMultiplier; }
private static async Task <T> SendAsync <T>(this IBrokerRouter brokerRouter, IRequest <T> request, string topicName, int partitionId, string consumerGroup, CancellationToken cancellationToken) where T : class, IResponse { try { return(await brokerRouter.SendAsync(request, topicName, partitionId, cancellationToken).ConfigureAwait(false)); } catch (RequestException ex) when(ex.ErrorCode == ErrorResponseCode.NotCoordinatorForGroup) { // ensure the group exists, then retry await brokerRouter.SendAsync(new GroupCoordinatorRequest(consumerGroup), topicName, partitionId, cancellationToken).ConfigureAwait(false); return(await brokerRouter.SendAsync(request, topicName, partitionId, cancellationToken).ConfigureAwait(false)); } }
public ConsumerOptions(string topic, IBrokerRouter router) { Topic = topic; Router = router; PartitionWhitelist = new List<int>(); Log = router.Log; TopicPartitionQueryTimeMs = (int)TimeSpan.FromMinutes(15).TotalMilliseconds; ConsumerBufferSize = DefaultMaxConsumerBufferSize; BackoffInterval = TimeSpan.FromMilliseconds(DefaultBackoffIntervalMS); FetchBufferMultiplier = DefaulFetchBufferMultiplier; MaxWaitTimeForMinimumBytes = TimeSpan.FromMilliseconds(FetchRequest.DefaultMaxBlockingWaitTime); MinimumBytes = FetchRequest.DefaultMinBlockingByteBufferSize; }
public ConsumerOptions(string topic, IBrokerRouter router) { Topic = topic; Router = router; PartitionWhitelist = new List <int>(); Log = router.Log; TopicPartitionQueryTimeMs = (int)TimeSpan.FromMinutes(15).TotalMilliseconds; ConsumerBufferSize = DefaultMaxConsumerBufferSize; BackoffInterval = TimeSpan.FromMilliseconds(DefaultBackoffIntervalMilliseconds); FetchBufferMultiplier = DefaulFetchBufferMultiplier; MaxWaitTimeForMinimumBytes = TimeSpan.FromMilliseconds(FetchRequest.DefaultMaxBlockingWaitTime); MinimumBytes = FetchRequest.DefaultMinBlockingByteBufferSize; }
/// <summary> /// Given a collection of server connections, query for the topic metadata. /// </summary> /// <param name="brokerRouter">The router which provides the route and metadata.</param> /// <param name="topicNames">Topics to get metadata information for.</param> /// <param name="cancellationToken"></param> /// <remarks> /// Used by <see cref="BrokerRouter"/> internally. Broken out for better testability, but not intended to be used separately. /// </remarks> /// <returns>MetadataResponse validated to be complete.</returns> internal static async Task <MetadataResponse> GetMetadataAsync(this IBrokerRouter brokerRouter, IEnumerable <string> topicNames, CancellationToken cancellationToken) { var request = new MetadataRequest(topicNames); return(await brokerRouter.Configuration.RefreshRetry.AttemptAsync( async (attempt, timer) => { var response = await brokerRouter.GetMetadataAsync(request, cancellationToken).ConfigureAwait(false); if (response == null) { return new RetryAttempt <MetadataResponse>(null); } var results = response.Brokers .Select(ValidateBroker) .Union(response.Topics.Select(ValidateTopic)) .Where(r => !r.IsValid.GetValueOrDefault()) .ToList(); var exceptions = results.Select(r => r.ToException()).Where(e => e != null).ToList(); if (exceptions.Count == 1) { throw exceptions.Single(); } if (exceptions.Count > 1) { throw new AggregateException(exceptions); } if (results.Count == 0) { return new RetryAttempt <MetadataResponse>(response); } foreach (var result in results.Where(r => !string.IsNullOrEmpty(r.Message))) { brokerRouter.Log.Warn(() => LogEvent.Create(result.Message)); } return RetryAttempt <MetadataResponse> .Retry; }, (attempt, retry) => brokerRouter.Log.Warn(() => LogEvent.Create($"Failed metadata request on attempt {attempt}: Will retry in {retry}")), null, // return the failed response above, resulting in a null (ex, attempt, retry) => { throw ex.PrepareForRethrow(); }, (ex, attempt) => brokerRouter.Log.Warn(() => LogEvent.Create(ex, $"Failed metadata request on attempt {attempt}")), cancellationToken)); }
private static async Task <MetadataResponse> GetMetadataAsync(this IBrokerRouter brokerRouter, MetadataRequest request, CancellationToken cancellationToken) { var servers = new List <string>(); foreach (var connection in brokerRouter.Connections) { var server = connection.Endpoint?.ToString(); try { return(await connection.SendAsync(request, cancellationToken).ConfigureAwait(false)); } catch (Exception ex) { servers.Add(server); brokerRouter.Log.Warn(() => LogEvent.Create(ex, $"Failed to contact {server}: Trying next server")); } } throw new RequestException(request.ApiKey, ErrorResponseCode.None, $"Unable to make Metadata Request to any of {string.Join(" ", servers)}"); }
public static IBrokerRouter CreateRouter(int threadId, IBrokerRouter brokerRouter) { lock (SyncObj) { if (ConnectionsDict.ContainsKey(threadId) && ConnectionsDict[threadId].BrokerRouter != brokerRouter) { throw new NotSupportedException(); } _brokerRouter = brokerRouter; ConnectionsDict[threadId].BrokerRouter = _brokerRouter; return(_brokerRouter); } }
/// <exception cref="CachedMetadataException">Thrown if the cached metadata for the given topic is invalid or missing.</exception> /// <exception cref="FetchOutOfRangeException">Thrown if the fetch request is not valid.</exception> /// <exception cref="TimeoutException">Thrown if there request times out</exception> /// <exception cref="ConnectionException">Thrown in case of network error contacting broker (after retries), or if none of the default brokers can be contacted.</exception> /// <exception cref="RequestException">Thrown in case of an unexpected error in the request</exception> public static async Task <T> SendAsync <T>(this IBrokerRouter brokerRouter, IRequest <T> request, string topicName, int partitionId, CancellationToken cancellationToken, IRequestContext context = null, IRetry retryPolicy = null) where T : class, IResponse { bool?metadataInvalid = false; var brokeredRequest = new BrokeredRequest <T>(request, topicName, partitionId, brokerRouter.Log); return(await(retryPolicy ?? new Retry(TimeSpan.MaxValue, 3)).AttemptAsync( async(attempt, timer) => { metadataInvalid = await brokerRouter.RefreshTopicMetadataIfInvalidAsync(topicName, metadataInvalid, cancellationToken).ConfigureAwait(false); await brokeredRequest.SendAsync(brokerRouter, cancellationToken, context).ConfigureAwait(false); return brokeredRequest.MetadataRetryResponse(attempt, out metadataInvalid); }, brokeredRequest.MetadataRetry, brokeredRequest.ThrowExtractedException, (ex, attempt, retry) => brokeredRequest.MetadataRetry(attempt, ex, out metadataInvalid), null, // do nothing on final exception -- will be rethrown cancellationToken)); }
/// <summary> /// Construct a Producer class. /// </summary> /// <param name="brokerRouter">The router used to direct produced messages to the correct partition.</param> /// <param name="maximumAsyncRequests">The maximum async calls allowed before blocking new requests. -1 indicates unlimited.</param> /// <param name="maximumMessageBuffer">The maximum amount of messages to buffer if the async calls are blocking from sending.</param> /// <remarks> /// The maximumAsyncRequests parameter provides a mechanism for minimizing the amount of async requests in flight at any one time /// by blocking the caller requesting the async call. This affectively puts an upper limit on the amount of times a caller can /// call SendMessageAsync before the caller is blocked. /// /// The MaximumMessageBuffer parameter provides a way to limit the max amount of memory the driver uses should the send pipeline get /// overwhelmed and the buffer starts to fill up. This is an inaccurate limiting memory use as the amount of memory actually used is /// dependant on the general message size being buffered. /// /// A message will start its timeout countdown as soon as it is added to the producer async queue. If there are a large number of /// messages sitting in the async queue then a message may spend its entire timeout cycle waiting in this queue and never getting /// attempted to send to Kafka before a timeout exception is thrown. /// </remarks> public Producer(IBrokerRouter brokerRouter, int maximumAsyncRequests = MaximumAsyncRequests, int maximumMessageBuffer = MaximumMessageBuffer) { BrokerRouter = brokerRouter; _maximumAsyncRequests = maximumAsyncRequests; _metadataQueries = new MetadataQueries(BrokerRouter); _asyncCollection = new AsyncCollection <TopicMessage>(); _semaphoreMaximumAsync = new SemaphoreSlim(maximumAsyncRequests, maximumAsyncRequests); BatchSize = DefaultBatchSize; BatchDelayTime = TimeSpan.FromMilliseconds(DefaultBatchDelayMS); _postTask = Task.Run(async() => { await BatchSendAsync().ConfigureAwait(false); //TODO add log for ending the sending thread. }); }
/// <summary> /// Construct a Producer class. /// </summary> /// <param name="brokerRouter">The router used to direct produced messages to the correct partition.</param> /// <param name="maximumAsyncRequests">The maximum async calls allowed before blocking new requests. -1 indicates unlimited.</param> /// <param name="maximumMessageBuffer">The maximum amount of messages to buffer if the async calls are blocking from sending.</param> /// <remarks> /// The maximumAsyncRequests parameter provides a mechanism for minimizing the amount of async requests in flight at any one time /// by blocking the caller requesting the async call. This affectively puts an upper limit on the amount of times a caller can /// call SendMessageAsync before the caller is blocked. /// /// The MaximumMessageBuffer parameter provides a way to limit the max amount of memory the driver uses should the send pipeline get /// overwhelmed and the buffer starts to fill up. This is an inaccurate limiting memory use as the amount of memory actually used is /// dependant on the general message size being buffered. /// /// A message will start its timeout countdown as soon as it is added to the producer async queue. If there are a large number of /// messages sitting in the async queue then a message may spend its entire timeout cycle waiting in this queue and never getting /// attempted to send to Kafka before a timeout exception is thrown. /// </remarks> public Producer(IBrokerRouter brokerRouter, int maximumAsyncRequests = MaximumAsyncRequests, int maximumMessageBuffer = MaximumMessageBuffer) { BrokerRouter = brokerRouter; _protocolGateway = new ProtocolGateway(BrokerRouter); _maximumAsyncRequests = maximumAsyncRequests; _metadataQueries = new MetadataQueries(BrokerRouter); _asyncCollection = new AsyncCollection <TopicMessage>(); _semaphoreMaximumAsync = new SemaphoreSlim(maximumAsyncRequests, maximumAsyncRequests); BatchSize = DefaultBatchSize; BatchDelayTime = TimeSpan.FromMilliseconds(DefaultBatchDelayMS); _postTask = Task.Run(() => { BatchSendAsync(); BrokerRouter.Log.InfoFormat("ending the sending thread"); }); }
/// <summary> /// Construct a Producer class. /// </summary> /// <param name="brokerRouter">The router used to direct produced messages to the correct partition.</param> /// <param name="maximumAsyncRequests">The maximum async calls allowed before blocking new requests. -1 indicates unlimited.</param> /// <param name="maximumMessageBuffer">The maximum amount of messages to buffer if the async calls are blocking from sending.</param> /// <remarks> /// The maximumAsyncRequests parameter provides a mechanism for minimizing the amount of async requests in flight at any one time /// by blocking the caller requesting the async call. This affectively puts an upper limit on the amount of times a caller can /// call SendMessageAsync before the caller is blocked. /// /// The MaximumMessageBuffer parameter provides a way to limit the max amount of memory the driver uses should the send pipeline get /// overwhelmed and the buffer starts to fill up. This is an inaccurate limiting memory use as the amount of memory actually used is /// dependant on the general message size being buffered. /// /// A message will start its timeout countdown as soon as it is added to the producer async queue. If there are a large number of /// messages sitting in the async queue then a message may spend its entire timeout cycle waiting in this queue and never getting /// attempted to send to Kafka before a timeout exception is thrown. /// </remarks> public Producer(IBrokerRouter brokerRouter, int maximumAsyncRequests = MaximumAsyncRequests, int maximumMessageBuffer = MaximumMessageBuffer) { BrokerRouter = brokerRouter; _maximumAsyncRequests = maximumAsyncRequests; _metadataQueries = new MetadataQueries(BrokerRouter); _asyncCollection = new AsyncCollection<TopicMessage>(); _semaphoreMaximumAsync = new SemaphoreSlim(maximumAsyncRequests, maximumAsyncRequests); BatchSize = DefaultBatchSize; BatchDelayTime = TimeSpan.FromMilliseconds(DefaultBatchDelayMS); _postTask = Task.Run(async () => { await BatchSendAsync().ConfigureAwait(false); //TODO add log for ending the sending thread. }); }
/// <summary> /// Construct a Producer class. /// </summary> /// <param name="brokerRouter">The router used to direct produced messages to the correct partition.</param> /// <param name="maximumAsyncRequests">The maximum async calls allowed before blocking new requests. -1 indicates unlimited.</param> /// <param name="maximumMessageBuffer">The maximum amount of messages to buffer if the async calls are blocking from sending.</param> /// <remarks> /// The maximumAsyncRequests parameter provides a mechanism for minimizing the amount of async requests in flight at any one time /// by blocking the caller requesting the async call. This affectively puts an upper limit on the amount of times a caller can /// call SendMessageAsync before the caller is blocked. /// /// The MaximumMessageBuffer parameter provides a way to limit the max amount of memory the driver uses should the send pipeline get /// overwhelmed and the buffer starts to fill up. This is an inaccurate limiting memory use as the amount of memory actually used is /// dependant on the general message size being buffered. /// /// A message will start its timeout countdown as soon as it is added to the producer async queue. If there are a large number of /// messages sitting in the async queue then a message may spend its entire timeout cycle waiting in this queue and never getting /// attempted to send to Kafka before a timeout exception is thrown. /// </remarks> public Producer(IBrokerRouter brokerRouter, int maximumAsyncRequests = MaximumAsyncRequests, int maximumMessageBuffer = MaximumMessageBuffer) { BrokerRouter = brokerRouter; _protocolGateway = new ProtocolGateway(BrokerRouter); _maximumAsyncRequests = maximumAsyncRequests; _metadataQueries = new MetadataQueries(BrokerRouter); _asyncCollection = new AsyncCollection<TopicMessage>(); _semaphoreMaximumAsync = new SemaphoreSlim(maximumAsyncRequests, maximumAsyncRequests); BatchSize = DefaultBatchSize; BatchDelayTime = TimeSpan.FromMilliseconds(DefaultBatchDelayMS); _postTask = Task.Run(() => { BatchSendAsync(); BrokerRouter.Log.InfoFormat("ending the sending thread"); }); }
/// <summary> /// Get offsets for all partitions of a given topic. /// </summary> /// <param name="brokerRouter">The router which provides the route and metadata.</param> /// <param name="topicName">Name of the topic to get offset information from.</param> /// <param name="maxOffsets">How many to get, at most.</param> /// <param name="offsetTime">These are best described by <see cref="OffsetRequest.Topic.Timestamp"/></param> /// <param name="cancellationToken"></param> /// <param name="retryPolicy"></param> public static async Task <IImmutableList <OffsetResponse.Topic> > GetTopicOffsetsAsync(this IBrokerRouter brokerRouter, string topicName, int maxOffsets, long offsetTime, CancellationToken cancellationToken, IRetry retryPolicy = null) { bool?metadataInvalid = false; var offsets = new Dictionary <int, OffsetResponse.Topic>(); BrokeredRequest <OffsetResponse>[] brokeredRequests = null; return(await(retryPolicy ?? new Retry(TimeSpan.MaxValue, 3)).AttemptAsync( async(attempt, timer) => { metadataInvalid = await brokerRouter.RefreshTopicMetadataIfInvalidAsync(topicName, metadataInvalid, cancellationToken).ConfigureAwait(false); var topicMetadata = await brokerRouter.GetTopicMetadataAsync(topicName, cancellationToken).ConfigureAwait(false); brokeredRequests = topicMetadata .Partitions .Where(_ => !offsets.ContainsKey(_.PartitionId)) // skip partitions already successfully retrieved .GroupBy(x => x.LeaderId) .Select(partitions => new BrokeredRequest <OffsetResponse>( new OffsetRequest(partitions.Select(_ => new OffsetRequest.Topic(topicName, _.PartitionId, offsetTime, maxOffsets))), topicName, partitions.Select(_ => _.PartitionId).First(), brokerRouter.Log)) .ToArray(); await Task.WhenAll(brokeredRequests.Select(_ => _.SendAsync(brokerRouter, cancellationToken))).ConfigureAwait(false); var responses = brokeredRequests.Select(_ => _.MetadataRetryResponse(attempt, out metadataInvalid)).ToArray(); foreach (var response in responses.Where(_ => _.IsSuccessful)) { foreach (var offsetTopic in response.Value.Topics) { offsets[offsetTopic.PartitionId] = offsetTopic; } } return responses.All(_ => _.IsSuccessful) ? new RetryAttempt <IImmutableList <OffsetResponse.Topic> >(offsets.Values.ToImmutableList()) : RetryAttempt <IImmutableList <OffsetResponse.Topic> > .Retry; }, brokeredRequests.MetadataRetry, brokeredRequests.ThrowExtractedException, (ex, attempt, retry) => brokeredRequests.MetadataRetry(attempt, ex, out metadataInvalid), null, // do nothing on final exception -- will be rethrown cancellationToken)); }
public MetadataQueries(IBrokerRouter brokerRouter) { _brokerRouter = brokerRouter; }
private ConsumerOptions CreateOptions(IBrokerRouter router) { return(new ConsumerOptions(BrokerRouterProxy.TestTopic, router)); }
private ConsumerOptions CreateOptions(IBrokerRouter router) { return new ConsumerOptions(BrokerRouterProxy.TestTopic, router); }
public JsonProducer(IBrokerRouter brokerRouter) { _producer = new Producer(brokerRouter); }
/// <summary> /// Construct a Producer class. /// </summary> /// <param name="brokerRouter">The router used to direct produced messages to the correct partition.</param> /// <param name="maximumAsyncQueue">The maximum async calls allowed before blocking new requests. -1 indicates unlimited.</param> /// <remarks> /// The maximumAsyncQueue parameter provides a mechanism for blocking an async request return if the amount of requests queue is /// over a certain limit. This is usefull if a client is trying to push a large stream of documents through the producer and /// wants to block downstream if the producer is overloaded. /// /// A message will start its timeout countdown as soon as it is added to the producer async queue. If there are a large number of /// messages sitting in the async queue then a message may spend its entire timeout cycle waiting in this queue and never getting /// attempted to send to Kafka before a timeout exception is thrown. /// </remarks> public Producer(IBrokerRouter brokerRouter, int maximumAsyncQueue = -1) : base(brokerRouter) { _router = brokerRouter; _maximumAsyncQueue = maximumAsyncQueue; }
public CommonQueries(IBrokerRouter brokerRouter) { _brokerRouter = brokerRouter; }
public ProtocolGateway(KafkaOptions kafkaOptions) { _brokerRouter = new BrokerRouter(kafkaOptions); }
public ProtocolGateway(IBrokerRouter brokerRouter) { _brokerRouter = brokerRouter; }
/// <summary> /// Get offsets for a single partitions of a given topic. /// </summary> public static Task <OffsetResponse.Topic> GetTopicOffsetAsync(this IBrokerRouter brokerRouter, string topicName, int partitionId, CancellationToken cancellationToken) { return(brokerRouter.GetTopicOffsetAsync(topicName, partitionId, OffsetRequest.Topic.DefaultMaxOffsets, OffsetRequest.Topic.LatestTime, cancellationToken)); }
/// <summary> /// Get offsets for all partitions of a given topic. /// </summary> /// <param name="brokerRouter">The router which provides the route and metadata.</param> /// <param name="topicName">Name of the topic to get offset information from.</param> /// <param name="cancellationToken"></param> public static Task <IImmutableList <OffsetResponse.Topic> > GetTopicOffsetsAsync(this IBrokerRouter brokerRouter, string topicName, CancellationToken cancellationToken) { return(brokerRouter.GetTopicOffsetsAsync(topicName, OffsetRequest.Topic.DefaultMaxOffsets, OffsetRequest.Topic.LatestTime, cancellationToken)); }