/// <summary> /// Accept a request from the incoming request queue. /// </summary> internal ValueTask <RequestContext> AcceptAsync(AsyncAcceptContext acceptContext) { CheckDisposed(); Debug.Assert(_state != State.Stopped, "Listener has been stopped."); return(acceptContext.AcceptAsync()); }
// The message pump. // When we start listening for the next request on one thread, we may need to be sure that the // completion continues on another thread as to not block the current request processing. // The awaits will manage stack depth for us. private async Task ProcessRequestsWorker() { Debug.Assert(RequestContextFactory != null); // Allocate and accept context per loop and reuse it for all accepts using var acceptContext = new AsyncAcceptContext(Listener, RequestContextFactory); int workerIndex = Interlocked.Increment(ref _acceptorCounts); while (!Stopping && workerIndex <= _maxAccepts) { // Receive a request RequestContext requestContext; try { requestContext = await Listener.AcceptAsync(acceptContext); if (!Listener.ValidateRequest(requestContext)) { // Dispose the request requestContext.ReleasePins(); requestContext.Dispose(); // If either of these is false then a response has already been sent to the client, so we can accept the next request continue; } } catch (Exception exception) { Debug.Assert(Stopping); if (Stopping) { Log.AcceptErrorStopping(_logger, exception); } else { Log.AcceptError(_logger, exception); } continue; } try { if (_options.UnsafePreferInlineScheduling) { await requestContext.ExecuteAsync(); } else { ThreadPool.UnsafeQueueUserWorkItem(requestContext, preferLocal: false); } } catch (Exception ex) { // Request processing failed // Log the error message, release throttle and move on Log.RequestListenerProcessError(_logger, ex); } } Interlocked.Decrement(ref _acceptorCounts); }
/// <summary> /// Accept a request from the incoming request queue. /// </summary> public Task <RequestContext> AcceptAsync() { AsyncAcceptContext asyncResult = null; try { CheckDisposed(); Debug.Assert(_state != State.Stopped, "Listener has been stopped."); // prepare the ListenerAsyncResult object (this will have it's own // event that the user can wait on for IO completion - which means we // need to signal it when IO completes) asyncResult = new AsyncAcceptContext(this); uint statusCode = asyncResult.QueueBeginGetContext(); if (statusCode != UnsafeNclNativeMethods.ErrorCodes.ERROR_SUCCESS && statusCode != UnsafeNclNativeMethods.ErrorCodes.ERROR_IO_PENDING) { // some other bad error, possible(?) return values are: // ERROR_INVALID_HANDLE, ERROR_INSUFFICIENT_BUFFER, ERROR_OPERATION_ABORTED asyncResult.Dispose(); throw new HttpSysException((int)statusCode); } } catch (Exception exception) { LogHelper.LogException(Logger, "GetContextAsync", exception); throw; } return(asyncResult.Task); }
// The message pump. // When we start listening for the next request on one thread, we may need to be sure that the // completion continues on another thread as to not block the current request processing. // The awaits will manage stack depth for us. private async Task ProcessRequestsWorker() { // Allocate and accept context per loop and reuse it for all accepts using var acceptContext = new AsyncAcceptContext(Listener, RequestContextFactory); int workerIndex = Interlocked.Increment(ref _acceptorCounts); while (!Stopping && workerIndex <= _maxAccepts) { // Receive a request RequestContext requestContext; try { requestContext = await Listener.AcceptAsync(acceptContext); if (!Listener.ValidateRequest(requestContext)) { // Dispose the request requestContext.ReleasePins(); requestContext.Dispose(); // If either of these is false then a response has already been sent to the client, so we can accept the next request continue; } } catch (Exception exception) { Debug.Assert(Stopping); if (Stopping) { _logger.LogDebug(LoggerEventIds.AcceptErrorStopping, exception, "Failed to accept a request, the server is stopping."); } else { _logger.LogError(LoggerEventIds.AcceptError, exception, "Failed to accept a request."); } continue; } try { ThreadPool.UnsafeQueueUserWorkItem(requestContext, preferLocal: false); } catch (Exception ex) { // Request processing failed to be queued in threadpool // Log the error message, release throttle and move on _logger.LogError(LoggerEventIds.RequestListenerProcessError, ex, "ProcessRequestAsync"); } } Interlocked.Decrement(ref _acceptorCounts); }
// The message pump. // When we start listening for the next request on one thread, we may need to be sure that the // completion continues on another thread as to not block the current request processing. // The awaits will manage stack depth for us. private async Task ProcessRequestsWorker() { // Allocate and accept context per loop and reuse it for all accepts using var acceptContext = new AsyncAcceptContext(Listener); int workerIndex = Interlocked.Increment(ref _acceptorCounts); while (!Stopping && workerIndex <= _maxAccepts) { // Receive a request RequestContext requestContext; try { requestContext = await Listener.AcceptAsync(acceptContext); // Assign the message pump to this request context requestContext.MessagePump = this; } catch (Exception exception) { Debug.Assert(Stopping); if (Stopping) { _logger.LogDebug(LoggerEventIds.AcceptErrorStopping, exception, "Failed to accept a request, the server is stopping."); } else { _logger.LogError(LoggerEventIds.AcceptError, exception, "Failed to accept a request."); } continue; } try { ThreadPool.UnsafeQueueUserWorkItem(requestContext, preferLocal: false); } catch (Exception ex) { // Request processing failed to be queued in threadpool // Log the error message, release throttle and move on _logger.LogError(LoggerEventIds.RequestListenerProcessError, ex, "ProcessRequestAsync"); } } Interlocked.Decrement(ref _acceptorCounts); }
private static void IOCompleted(AsyncAcceptContext asyncContext, uint errorCode, uint numBytes) { bool complete = false; try { if (errorCode != UnsafeNclNativeMethods.ErrorCodes.ERROR_SUCCESS && errorCode != UnsafeNclNativeMethods.ErrorCodes.ERROR_MORE_DATA) { asyncContext.TrySetException(new HttpSysException((int)errorCode)); complete = true; } else { HttpSysListener server = asyncContext.Server; if (errorCode == UnsafeNclNativeMethods.ErrorCodes.ERROR_SUCCESS) { // at this point we have received an unmanaged HTTP_REQUEST and memoryBlob // points to it we need to hook up our authentication handling code here. try { if (server.ValidateRequest(asyncContext._nativeRequestContext) && server.ValidateAuth(asyncContext._nativeRequestContext)) { RequestContext requestContext = new RequestContext(server, asyncContext._nativeRequestContext); asyncContext.TrySetResult(requestContext); complete = true; } } catch (Exception) { server.SendError(asyncContext._nativeRequestContext.RequestId, StatusCodes.Status400BadRequest); throw; } finally { // The request has been handed to the user, which means this code can't reuse the blob. Reset it here. if (complete) { asyncContext._nativeRequestContext = null; } else { asyncContext.AllocateNativeRequest(size: asyncContext._nativeRequestContext.Size); } } } else { // (uint)backingBuffer.Length - AlignmentPadding asyncContext.AllocateNativeRequest(numBytes, asyncContext._nativeRequestContext.RequestId); } // We need to issue a new request, either because auth failed, or because our buffer was too small the first time. if (!complete) { uint statusCode = asyncContext.QueueBeginGetContext(); if (statusCode != UnsafeNclNativeMethods.ErrorCodes.ERROR_SUCCESS && statusCode != UnsafeNclNativeMethods.ErrorCodes.ERROR_IO_PENDING) { // someother bad error, possible(?) return values are: // ERROR_INVALID_HANDLE, ERROR_INSUFFICIENT_BUFFER, ERROR_OPERATION_ABORTED asyncContext.TrySetException(new HttpSysException((int)statusCode)); complete = true; } } if (!complete) { return; } } if (complete) { asyncContext.Dispose(); } } catch (Exception exception) { // Logged by caller asyncContext.TrySetException(exception); asyncContext.Dispose(); } }