/// <summary> /// This method is used to signal that the execution /// of a request is completed. This method subsequently enqueues /// any pending requests if available. /// </summary> private void SignalCompletion() { ThreadPoolOptions <T> context = default(ThreadPoolOptions <T>); int count = 0; lock (_queueLock) { count = _queuedRequests.Count; if (count > 0) { context = _queuedRequests.Dequeue(); } } if (count > 0) { if (IsWebGL) { Callback(context); } else { ThreadPool.QueueUserWorkItem(Callback, context); } } if (IsWebGL) { _requestCount--; } else { Interlocked.Decrement(ref _requestCount); } }
/// <summary> /// Enqueue a request to call the callback with the given state. /// </summary> /// <param name="executionContext">The state that is passed to the callback method.</param> /// <param name="callback">An Action delegate which needs to be invoked on the thread pool thread</param> /// <param name="errorCallback">An Action delegate which needs to be invoked on exception while executing the request on the thread pool thread</param> public void Enqueue(T executionContext, Action <T> callback, Action <Exception, T> errorCallback) { ThreadPoolOptions <T> options = new ThreadPoolOptions <T> { Callback = callback, ErrorCallback = errorCallback, State = executionContext }; var requestNumber = IsWebGL ? ++_requestCount : Interlocked.Increment(ref _requestCount); if (requestNumber <= this.MaxConcurentRequest) { // If we haven't hit the limit, enqueue to threadpool. if (IsWebGL) { Callback(options); } else { ThreadPool.QueueUserWorkItem(Callback, options); } } else { lock (_queueLock) { // If we hit the limit, store in the local queue _queuedRequests.Enqueue(options); } } }
/// <summary> /// Internal callback method that manages invoking /// the caller's callback (_callback) and does /// exception handling and signaling. /// </summary> /// <param name="state"></param> private void Callback(object state) { ThreadPoolOptions <T> callbackState = (ThreadPoolOptions <T>)state; try { callbackState.Callback(callbackState.State); } catch (Exception exception) { callbackState.ErrorCallback(exception, callbackState.State); } finally { this.SignalCompletion(); } }
private void Callback(object state) { ThreadPoolOptions <T> threadPoolOptions = (ThreadPoolOptions <T>)state; try { threadPoolOptions.Callback(threadPoolOptions.State); } catch (Exception arg) { threadPoolOptions.ErrorCallback(arg, threadPoolOptions.State); } finally { SignalCompletion(); } }
private void SignalCompletion() { ThreadPoolOptions <T> state = null; int num = 0; lock (_queueLock) { num = _queuedRequests.Count; if (num > 0) { state = _queuedRequests.Dequeue(); } } if (num > 0) { ThreadPool.QueueUserWorkItem(Callback, state); } Interlocked.Decrement(ref _requestCount); }
private void ConfigureThreadPoolAndServicePointSettings() { ThreadPoolOptions threadPoolOptions = Services.GetRequiredService <IOptions <ThreadPoolOptions> >().Value; if (threadPoolOptions.MinDotNetThreadPoolSize > 0) { int workerThreads; int completionPortThreads; ThreadPool.GetMinThreads(out workerThreads, out completionPortThreads); if (threadPoolOptions.MinDotNetThreadPoolSize > workerThreads || threadPoolOptions.MinDotNetThreadPoolSize > completionPortThreads) { // if at least one of the new values is larger, set the new min values to be the larger of the prev. and new config value. int newWorkerThreads = Math.Max(threadPoolOptions.MinDotNetThreadPoolSize, workerThreads); int newCompletionPortThreads = Math.Max(threadPoolOptions.MinDotNetThreadPoolSize, completionPortThreads); bool ok = ThreadPool.SetMinThreads(newWorkerThreads, newCompletionPortThreads); if (ok) { logger.Info(ErrorCode.SiloConfiguredThreadPool, "Configured ThreadPool.SetMinThreads() to values: {0},{1}. Previous values are: {2},{3}.", newWorkerThreads, newCompletionPortThreads, workerThreads, completionPortThreads); } else { logger.Warn(ErrorCode.SiloFailedToConfigureThreadPool, "Failed to configure ThreadPool.SetMinThreads(). Tried to set values to: {0},{1}. Previous values are: {2},{3}.", newWorkerThreads, newCompletionPortThreads, workerThreads, completionPortThreads); } } } // Set .NET ServicePointManager settings to optimize throughput performance when using Azure storage // http://blogs.msdn.com/b/windowsazurestorage/archive/2010/06/25/nagle-s-algorithm-is-not-friendly-towards-small-requests.aspx ServicePointOptions servicePointOptions = Services.GetRequiredService <IOptions <ServicePointOptions> >().Value; logger.Info(ErrorCode.SiloConfiguredServicePointManager, "Configured .NET ServicePointManager to Expect100Continue={0}, DefaultConnectionLimit={1}, UseNagleAlgorithm={2} to improve Azure storage performance.", servicePointOptions.Expect100Continue, servicePointOptions.DefaultConnectionLimit, servicePointOptions.UseNagleAlgorithm); ServicePointManager.Expect100Continue = servicePointOptions.Expect100Continue; ServicePointManager.DefaultConnectionLimit = servicePointOptions.DefaultConnectionLimit; ServicePointManager.UseNagleAlgorithm = servicePointOptions.UseNagleAlgorithm; }
public void Enqueue(T executionContext, Action <T> callback, Action <Exception, T> errorCallback) { ThreadPoolOptions <T> threadPoolOptions = new ThreadPoolOptions <T> { Callback = callback, ErrorCallback = errorCallback, State = executionContext }; if (Interlocked.Increment(ref _requestCount) <= MaxConcurentRequest) { ThreadPool.QueueUserWorkItem(Callback, threadPoolOptions); } else { lock (_queueLock) { _queuedRequests.Enqueue(threadPoolOptions); } } }