コード例 #1
0
        // The message pump.
        // When we start listening for the next request on one thread, we may need to be sure that the
        // completion continues on another thread as to not block the current request processing.
        // The awaits will manage stack depth for us.
        private async Task ProcessRequestsWorker()
        {
            Debug.Assert(RequestContextFactory != null);

            // Allocate and accept context per loop and reuse it for all accepts
            using var acceptContext = new AsyncAcceptContext(Listener, RequestContextFactory);

            int workerIndex = Interlocked.Increment(ref _acceptorCounts);

            while (!Stopping && workerIndex <= _maxAccepts)
            {
                // Receive a request
                RequestContext requestContext;
                try
                {
                    requestContext = await Listener.AcceptAsync(acceptContext);

                    if (!Listener.ValidateRequest(requestContext))
                    {
                        // Dispose the request
                        requestContext.ReleasePins();
                        requestContext.Dispose();

                        // If either of these is false then a response has already been sent to the client, so we can accept the next request
                        continue;
                    }
                }
                catch (Exception exception)
                {
                    Debug.Assert(Stopping);
                    if (Stopping)
                    {
                        Log.AcceptErrorStopping(_logger, exception);
                    }
                    else
                    {
                        Log.AcceptError(_logger, exception);
                    }
                    continue;
                }
                try
                {
                    if (_options.UnsafePreferInlineScheduling)
                    {
                        await requestContext.ExecuteAsync();
                    }
                    else
                    {
                        ThreadPool.UnsafeQueueUserWorkItem(requestContext, preferLocal: false);
                    }
                }
                catch (Exception ex)
                {
                    // Request processing failed
                    // Log the error message, release throttle and move on
                    Log.RequestListenerProcessError(_logger, ex);
                }
            }
            Interlocked.Decrement(ref _acceptorCounts);
        }
コード例 #2
0
        public void Run()
        {
            var ev = new SocketAsyncEventArgs();

            ev.Completed += OnAccept;
            if (Listener.AcceptAsync(ev) == false)
            {
                OnAccept(Listener, ev);
            }
        }
コード例 #3
0
ファイル: TCPServer.cs プロジェクト: jamieb452/RetroSpyServer
        /// <summary>
        /// Begins accepting a new connection asynchronously
        /// </summary>
        protected async void StartAcceptAsync()
        {
            if (!IsRunning)
            {
                return;
            }

            // Fetch ourselves an available AcceptEventArg for the next connection
            SocketAsyncEventArgs AcceptEventArg;

            if (SocketAcceptPool.Count > 0)
            {
                try
                {
                    AcceptEventArg = SocketAcceptPool.Pop();
                }
                catch
                {
                    AcceptEventArg            = new SocketAsyncEventArgs();
                    AcceptEventArg.Completed += (s, e) => PrepareAccept(e);
                }
            }
            else
            {
                // NO SOCKS AVAIL!
                AcceptEventArg            = new SocketAsyncEventArgs();
                AcceptEventArg.Completed += (s, e) => PrepareAccept(e);
            }

            try
            {
                // Enforce max connections. If we are capped on connections, the new connection will stop here,
                // and retrun once a connection is opened up from the Release() method
                if (ConnectionEnforceMode == EnforceMode.BeforeAccept)
                {
                    await MaxConnectionsEnforcer.WaitAsync();
                }

                // Begin accpetion connections
                bool willRaiseEvent = Listener.AcceptAsync(AcceptEventArg);

                // If we wont raise event, that means a connection has already been accepted synchronously
                // and the Accept_Completed event will NOT be fired. So we manually call ProcessAccept
                if (!willRaiseEvent)
                {
                    PrepareAccept(AcceptEventArg);
                }
            }
            catch (ObjectDisposedException)
            {
                // Happens when the server is shutdown
            }
        }
        void Accept_Completed(object sender, SocketAsyncEventArgs e)
        {
            if (isStopped)
            {
                return;
            }
            Socket client = e.AcceptSocket;

            Console.WriteLine("Accepted Connection From: " + client.RemoteEndPoint.ToString());
            e.AcceptSocket = null;
            Listener.AcceptAsync(AcceptArgs);
            HandleClient(client);
        }
コード例 #5
0
        // The message pump.
        // When we start listening for the next request on one thread, we may need to be sure that the
        // completion continues on another thread as to not block the current request processing.
        // The awaits will manage stack depth for us.
        private async Task ProcessRequestsWorker()
        {
            // Allocate and accept context per loop and reuse it for all accepts
            using var acceptContext = new AsyncAcceptContext(Listener, RequestContextFactory);

            int workerIndex = Interlocked.Increment(ref _acceptorCounts);

            while (!Stopping && workerIndex <= _maxAccepts)
            {
                // Receive a request
                RequestContext requestContext;
                try
                {
                    requestContext = await Listener.AcceptAsync(acceptContext);

                    if (!Listener.ValidateRequest(requestContext))
                    {
                        // Dispose the request
                        requestContext.ReleasePins();
                        requestContext.Dispose();

                        // If either of these is false then a response has already been sent to the client, so we can accept the next request
                        continue;
                    }
                }
                catch (Exception exception)
                {
                    Debug.Assert(Stopping);
                    if (Stopping)
                    {
                        _logger.LogDebug(LoggerEventIds.AcceptErrorStopping, exception, "Failed to accept a request, the server is stopping.");
                    }
                    else
                    {
                        _logger.LogError(LoggerEventIds.AcceptError, exception, "Failed to accept a request.");
                    }
                    continue;
                }
                try
                {
                    ThreadPool.UnsafeQueueUserWorkItem(requestContext, preferLocal: false);
                }
                catch (Exception ex)
                {
                    // Request processing failed to be queued in threadpool
                    // Log the error message, release throttle and move on
                    _logger.LogError(LoggerEventIds.RequestListenerProcessError, ex, "ProcessRequestAsync");
                }
            }
            Interlocked.Decrement(ref _acceptorCounts);
        }
コード例 #6
0
ファイル: Gateway.cs プロジェクト: emojireyiz/RetroClash
        public async Task StartAccept()
        {
            var acceptEvent = GetArgs;

            while (true)
            {
                if (!Listener.AcceptAsync(acceptEvent))
                {
                    await ProcessAccept(acceptEvent, false);
                }
                else
                {
                    break;
                }
            }
        }
コード例 #7
0
 /// <summary>
 /// 接受客户端连接
 /// </summary>
 /// <param name="obj">Socket</param>
 private void StartAccept(SocketAsyncEventArgs e)
 {
     e.AcceptSocket = null;
     if (!Listener.AcceptAsync(e))
     {
         //没有事件 直接结束 计数5次后线程 递归内存溢出
         if (AccpetEventCount > 5)
         {
             AccpetEventCount = 0;
             Task.Factory.StartNew(() => AcceptProcess(e));
         }
         else
         {
             AccpetEventCount++;
             AcceptProcess(e);
         }
     }
 }
コード例 #8
0
        public async Task Connect(
            [SuitParser(typeof(Parsers), nameof(Parsers.ParseInt))]
            int port)
        {
            for (; !Source.IsCancellationRequested;)
            {
                var s = await Listener.AcceptAsync();

                if (s?.RemoteEndPoint?.Port != port)
                {
                    continue;
                }
                DataStream = s;
                await IO.WriteLineAsync($"数据流连接成功.", OutputType.CustomInfo);

                break;
            }
        }
コード例 #9
0
        // The message pump.
        // When we start listening for the next request on one thread, we may need to be sure that the
        // completion continues on another thread as to not block the current request processing.
        // The awaits will manage stack depth for us.
        private async Task ProcessRequestsWorker()
        {
            // Allocate and accept context per loop and reuse it for all accepts
            using var acceptContext = new AsyncAcceptContext(Listener);

            int workerIndex = Interlocked.Increment(ref _acceptorCounts);

            while (!Stopping && workerIndex <= _maxAccepts)
            {
                // Receive a request
                RequestContext requestContext;
                try
                {
                    requestContext = await Listener.AcceptAsync(acceptContext);

                    // Assign the message pump to this request context
                    requestContext.MessagePump = this;
                }
                catch (Exception exception)
                {
                    Debug.Assert(Stopping);
                    if (Stopping)
                    {
                        _logger.LogDebug(LoggerEventIds.AcceptErrorStopping, exception, "Failed to accept a request, the server is stopping.");
                    }
                    else
                    {
                        _logger.LogError(LoggerEventIds.AcceptError, exception, "Failed to accept a request.");
                    }
                    continue;
                }
                try
                {
                    ThreadPool.UnsafeQueueUserWorkItem(requestContext, preferLocal: false);
                }
                catch (Exception ex)
                {
                    // Request processing failed to be queued in threadpool
                    // Log the error message, release throttle and move on
                    _logger.LogError(LoggerEventIds.RequestListenerProcessError, ex, "ProcessRequestAsync");
                }
            }
            Interlocked.Decrement(ref _acceptorCounts);
        }
コード例 #10
0
        private void OnAccept(object sender, SocketAsyncEventArgs e)
        {
            if (e.SocketError != SocketError.Success)
            {
                // 소켓 에러 발생시 더 이상 listen 하지 않음
                return;
            }
            var clientSocket = e.AcceptSocket;
            var handler      = new ProxyClient(clientSocket, this);

            handler.StartWorkerThread();

            lock (Clients)
            {
                Clients.Add(handler);
            }

            e.AcceptSocket = null;
            Listener.AcceptAsync(e);
        }
コード例 #11
0
ファイル: MessagePump.cs プロジェクト: zmkchina/aspnetcore
        // The message pump.
        // When we start listening for the next request on one thread, we may need to be sure that the
        // completion continues on another thread as to not block the current request processing.
        // The awaits will manage stack depth for us.
        private async void ProcessRequestsWorker()
        {
            int workerIndex = Interlocked.Increment(ref _acceptorCounts);

            while (!Stopping && workerIndex <= _maxAccepts)
            {
                // Receive a request
                RequestContext requestContext;
                try
                {
                    requestContext = await Listener.AcceptAsync().SupressContext();
                }
                catch (Exception exception)
                {
                    Contract.Assert(Stopping);
                    if (Stopping)
                    {
                        _logger.LogDebug(0, exception, "ListenForNextRequestAsync-Stopping");
                    }
                    else
                    {
                        _logger.LogError(0, exception, "ListenForNextRequestAsync");
                    }
                    continue;
                }
                try
                {
                    Task ignored = Task.Factory.StartNew(_processRequest, requestContext);
                }
                catch (Exception ex)
                {
                    // Request processing failed to be queued in threadpool
                    // Log the error message, release throttle and move on
                    _logger.LogError(0, ex, "ProcessRequestAsync");
                }
            }
            Interlocked.Decrement(ref _acceptorCounts);
        }
コード例 #12
0
 protected override bool ListenAsync(SocketAsyncEventArgs saea)
 {
     return(Listener.AcceptAsync(saea));
 }