// Initializes the server by preallocating reusable buffers and // context objects. These objects do not need to be preallocated // or reused, but it is done this way to illustrate how the API can // easily be used to create reusable objects to increase server performance. // public void Init(Action <SocketAsyncEventArgs> onReceive = null) { // Allocates one large byte buffer which all I/O operations use a piece of. This gaurds // against memory fragmentation m_bufferManager.InitBuffer(); // preallocate pool of SocketAsyncEventArgs objects SocketAsyncEventArgs readWriteEventArg; for (int i = 0; i < m_numConnections; i++) { //Pre-allocate a set of reusable SocketAsyncEventArgs readWriteEventArg = new SocketAsyncEventArgs(); readWriteEventArg.Completed += new EventHandler <SocketAsyncEventArgs>(IO_Completed); readWriteEventArg.UserToken = new AsyncUserToken(); // assign a byte buffer from the buffer pool to the SocketAsyncEventArg object m_bufferManager.SetBuffer(readWriteEventArg); // add SocketAsyncEventArg to the pool m_readWritePool.Push(readWriteEventArg); } m_receiveCallback = onReceive; }
public override void Clear() { base.Clear(); lock (_lockSessionContainer) { foreach (var session in _authedClientSessions.Values) { if (_receiveEventAragePool != null) { _receiveEventAragePool.Push(session.ReceiveEventArgs); } if (_sendEventAragePool != null) { _sendEventAragePool.Push(session.SendEventArgs); } } _authedClientSessions.Clear(); _reconnectClientSessions.Clear(); _expiredClientSessions.Clear(); } _logger.Info(string.Format("[Clear] _receiveEventAragePool: {0}, _sendEventAragePool: {1}", _receiveEventAragePool.Count, _sendEventAragePool.Count)); }
/// <summary> /// 初始化服务器通过预先分配的可重复使用的缓冲区和上下文对象。这些对象不需要预先分配或重用,但这样做是为了说明API如何可以易于用于创建可重用对象以提高服务器性能。 /// </summary> private void Init() { connectClient = new ConcurrentDictionary <int, ConnectClient>(); clientList = new ConcurrentDictionary <int, string>(); sendQueues = new ConcurrentQueue <SendingQueue> [sendthread]; for (int i = 0; i < sendthread; i++) { sendQueues[i] = new ConcurrentQueue <SendingQueue>(); } //分配一个大字节缓冲区,所有I/O操作都使用一个。这个侍卫对内存碎片 m_bufferManager.InitBuffer(); //预分配的接受对象池socketasynceventargs,并分配缓存 SocketAsyncEventArgs saea_receive; //分配的发送对象池socketasynceventargs,但是不分配缓存 SocketAsyncEventArgs saea_send; for (int i = 0; i < m_numConnections; i++) { //预先接受端分配一组可重用的消息 saea_receive = new SocketAsyncEventArgs(); saea_receive.Completed += new EventHandler <SocketAsyncEventArgs>(IO_Completed); //分配缓冲池中的字节缓冲区的socketasynceventarg对象 m_bufferManager.SetBuffer(saea_receive); m_receivePool.Push(saea_receive); //预先发送端分配一组可重用的消息 saea_send = new SocketAsyncEventArgs(); saea_send.Completed += new EventHandler <SocketAsyncEventArgs>(IO_Completed); m_sendPool.Push(saea_send); } }
/// <summary> /// 异步发送消息 /// </summary> /// <param name="sendQuere">发送消息体</param> private void Send(SendingQueue sendQuere) { try { mutex.WaitOne(); if (m_sendPool.Count == 0) { SocketAsyncEventArgs saea_send = new SocketAsyncEventArgs(); saea_send.Completed += new EventHandler <SocketAsyncEventArgs>(IO_Completed); m_sendPool.Push(saea_send); } SocketAsyncEventArgs socketArgs = m_sendPool.Pop(); mutex.ReleaseMutex(); socketArgs.RemoteEndPoint = sendQuere.remoteEndPoint; socketArgs.SetBuffer(sendQuere.data, sendQuere.offset, sendQuere.length); if (listenSocket?.SendToAsync(socketArgs) == false) { ProcessSend(socketArgs); } } catch (Exception) { // ignored } }
/// <summary> /// Releases the SocketAsyncEventArgs back to the pool, /// and free's up another slot for a new client to connect /// </summary> /// <param name="Stream"></param> protected void Release(SocketAsyncEventArgs e) { // Get our ReadWrite AsyncEvent object back SocketReadWritePool.Push(e); // Now that we have another set of AsyncEventArgs, we can // release this users Semephore lock, allowing another connection MaxConnectionsEnforcer.Release(); }
/// <summary> /// 用预分配的可重用缓冲区和上下文对象初始化服务器. /// </summary> public void Init() { // 为所有的I/O操作分配一个大的字节缓冲区.目的是防止内存碎片的产生 m_bufferManager.InitBuffer(); // 可重用的SocketAsyncEventArgs,用于重复接受客户端使用 SocketAsyncEventArgs readWriteEventArg; AsyncUserToken token; // 预分配一个 SocketAsyncEventArgs 对象池 // 预分配SocketAsyncEventArgs读对象池 for (int i = 0; i < m_numConnections; i++) { // 预分配一个 可重用的SocketAsyncEventArgs 对象 //readWriteEventArg = new SocketAsyncEventArgs(); // 创建token时同时创建了一个和token关联的用于读数据的SocketAsyncEventArgs对象, // 并且创建的SocketAsyncEventArgs对象的UserToken属性值为该token token = new AsyncUserToken(); //token.ReadEventArgs = readWriteEventArg; //readWriteEventArg.Completed += new EventHandler<SocketAsyncEventArgs>(IO_Completed); // 注册一个SocketAsyncEventArgs完成事件 token.ReadEventArgs.Completed += new EventHandler <SocketAsyncEventArgs>(IO_Completed); //readWriteEventArg.UserToken = new AsyncUserToken(); // 从缓冲区池中分配一个字节缓冲区给 SocketAsyncEventArg 对象 //m_bufferManager.SetBuffer(readWriteEventArg); m_bufferManager.SetBuffer(token.ReadEventArgs); //((AsyncUserToken)readWriteEventArg.UserToken).SetReceivedBytes(readWriteEventArg.Buffer, readWriteEventArg.Offset, 0); //设定接收缓冲区及偏移量 token.SetBuffer(token.ReadEventArgs.Buffer, token.ReadEventArgs.Offset, token.ReadEventArgs.Count); // 添加一个 SocketAsyncEventArg 到池中 //m_readPool.Push(readWriteEventArg); m_readPool.Push(token.ReadEventArgs); } // 预分配SocketAsyncEventArgs写对象池 for (int i = 0; i < m_numConnections; i++) { // 预分配一个 可重用的SocketAsyncEventArgs 对象 readWriteEventArg = new SocketAsyncEventArgs(); readWriteEventArg.Completed += new EventHandler <SocketAsyncEventArgs>(IO_Completed); readWriteEventArg.UserToken = null; // 从缓冲区池中分配一个字节缓冲区给 SocketAsyncEventArg 对象 m_bufferManager.SetBuffer(readWriteEventArg); //readWriteEventArg.SetBuffer(null, 0, 0); // 添加一个 SocketAsyncEventArg 到池中 m_writePool.Push(readWriteEventArg); } }
/// <summary> /// Releases the Stream's SocketAsyncEventArgs back to the pool, /// and free's up another slot for a new client to connect /// </summary> /// <param name="Stream">The GamespyTcpStream object that is being released.</param> public void Release(GamespyTcpStream Stream) { // If the stream has been released, then we stop here if (!IsListening || Stream.Released) { return; } // Make sure the connection is closed properly if (!Stream.SocketClosed) { Stream.Close(); return; } // To prevent cross instance releasing if (!Object.ReferenceEquals(this, Stream.SocketManager)) { throw new ArgumentException("Cannot pass a GamespyTcpStream belonging to a different TcpSocket than this one."); } // If we are still registered for this event, then the EventArgs should // NEVER be disposed here, or we have an error to fix if (Stream.DisposedEventArgs) { // Log this error Program.ErrorLog.Write("WARNING: [GamespyTcpSocket.Release] Event Args were disposed imporperly!"); // Dispose old buffer tokens BufferManager.ReleaseBuffer(Stream.ReadEventArgs); BufferManager.ReleaseBuffer(Stream.WriteEventArgs); // Create new Read Event Args SocketAsyncEventArgs SockArgR = new SocketAsyncEventArgs(); BufferManager.AssignBuffer(SockArgR); SocketReadWritePool.Push(SockArgR); // Create new Write Event Args SocketAsyncEventArgs SockArgW = new SocketAsyncEventArgs(); BufferManager.AssignBuffer(SockArgW); SocketReadWritePool.Push(SockArgW); } else { // Set null's Stream.ReadEventArgs.AcceptSocket = null; Stream.WriteEventArgs.AcceptSocket = null; // Get our ReadWrite AsyncEvent object back SocketReadWritePool.Push(Stream.ReadEventArgs); SocketReadWritePool.Push(Stream.WriteEventArgs); } // Now that we have another set of AsyncEventArgs, we can // release this users Semephore lock, allowing another connection MaxConnectionsEnforcer.Release(); }
public void Push__ExceedCapacity__Pushed_And_CapacityResized() { var pool = new SocketAsyncEventArgsPool(1); var args = new SocketAsyncEventArgs(); pool.Push(args); Assert.AreEqual(1, pool.Capacity); pool.Push(args); Assert.AreEqual(2, pool.Capacity); }
//给缓冲池分配内存,并分配给SocketAsyncEventArg private void Init() { var bufferManager = new BufferManager(_BufferSize * _MaxConnections * _OpsToPreAlloc, _BufferSize); for (int i = 0; i < _MaxConnections; i++) { var socketAsyncEventArgs = new SocketAsyncEventArgs(); socketAsyncEventArgs.Completed += IO_Completed; //将缓冲池中的字节缓冲区分配给SocketAsyncEventArg对象 bufferManager.SetBuffer(socketAsyncEventArgs); _SocketAsyncEventArgsPool.Push(socketAsyncEventArgs); } }
/// <summary> /// Creates a new UDP socket for handling Gamespy Protocol /// </summary> /// <param name="Port">The port this socket will be bound to</param> /// <param name="MaxConnections">The maximum number of concurrent connections</param> public GamespyUdpSocket(int Port, int MaxConnections) { // Create our Socket this.Port = Port; Listener = new Socket(AddressFamily.InterNetwork, SocketType.Dgram, ProtocolType.Udp) { SendTimeout = 5000, // We have a limited pool, so we dont want to be locked often SendBufferSize = BufferSizePerEvent, ReceiveBufferSize = BufferSizePerEvent }; // Bind to our port Listener.Bind(new IPEndPoint(IPAddress.Any, Port)); // Set the rest of our internals MaxNumConnections = MaxConnections; MaxConnectionsEnforcer = new SemaphoreSlim(MaxNumConnections, MaxNumConnections); SocketReadWritePool = new SocketAsyncEventArgsPool(MaxNumConnections); // Create our Buffer Manager for IO operations. BufferManager = new BufferManager(MaxNumConnections, BufferSizePerEvent); // Assign our Connection IO SocketAsyncEventArgs object instances for (int i = 0; i < MaxNumConnections; i++) { SocketAsyncEventArgs SockArg = new SocketAsyncEventArgs(); SockArg.Completed += IOComplete; BufferManager.AssignBuffer(SockArg); SocketReadWritePool.Push(SockArg); } // set public internals IsRunning = true; IsDisposed = false; }
protected bool TCPServerStartFlag = false; /* 冗余判断服务器是否启动 */ #endregion /// <summary> /// 初始化服务器 /// </summary> public void TCPServerInit() { _TCPServerSocketBufferManager.InitBuffer(); SocketAsyncEventArgs _SocketAsyncEventArgs; /* 预分配SocketAsyncEventArgs对象池 */ for (int count = 0; count < _TCPServerSocketSetting.DefaultMaxConnctions; count++) { _SocketAsyncEventArgs = new SocketAsyncEventArgs(); _SocketAsyncEventArgs.Completed += new EventHandler <SocketAsyncEventArgs>(TCPServerOnIOCompleted); _TCPServerSocketBufferManager.SetBuffer(_SocketAsyncEventArgs); _TCPServerSocketAsyncEventArgsPool.Push(_SocketAsyncEventArgs); } }
/// <summary>建立新的 WebSocketServer 類別,並初始化相關屬性值</summary> /// <param name="maxClients">同時可連接的最大連線數</param> public WebSocketServer(int maxClients) { this.UseAsyncCallback = EventCallbackMode.BeginInvoke; m_Mutex = new Mutex(); m_Services = new List <string>(); m_sha = SHA1CryptoServiceProvider.Create(); m_IsDisposed = false; m_LocalEndPort = null; m_Counters = new Dictionary <ServerCounterType, PerformanceCounter>(); m_Pool = new SocketAsyncEventArgsPool(); // 預留兩條線程,用於過多的連線數檢查 m_MaxClients = new Semaphore(maxClients + 2, maxClients + 2); m_Clients = new ConcurrentDictionary <string, AsyncClient>(); m_WaitToClean = new ConcurrentDictionary <EndPoint, int>(); m_IsShutdown = false; this.IsStarted = false; for (int i = 0; i < maxClients; i++) { SocketAsyncEventArgs arg = new SocketAsyncEventArgs(); arg.Completed += new EventHandler <SocketAsyncEventArgs>(this.IO_Completed); arg.DisconnectReuseSocket = true; arg.SetBuffer(new Byte[BUFFER_SIZE], 0, BUFFER_SIZE); m_Pool.Push(arg); } SetCounterDictionary(); }
public static void CloseClientSocket(SocketAsyncEventArgs Args) { var Token = Args.UserToken as AsyncUserToken; if (Token.Redused) { return; } Token.Redused = true; try { Sessions.Remove(GetSession(Token.Socket).Id); } catch { } try { Token.Socket.Shutdown(SocketShutdown.Both); } catch (Exception) { } Token.Socket.Close(); Interlocked.Decrement(ref ConnectedAmount); SocketSemaphore.Release(); SocketPool.Push(Args); }
public NetServerService(PacketHandler packetHandler, ILog logger, int maxConnection, int bufferSize, int keepAliveTime, int keepAliveInterval, bool onMonitoring) : base(packetHandler, logger, bufferSize, keepAliveTime, keepAliveInterval, onMonitoring) { _listener = new Listener(); _listener.OnNewClientCallback += OnConnectedClient; // SocketAsyncEventArgs object pool 생성 _receiveEventAragePool = new SocketAsyncEventArgsPool(maxConnection); _sendEventAragePool = new SocketAsyncEventArgsPool(maxConnection); // 버퍼 할당 _bufferHandler = new BufferHandler(maxConnection * bufferSize * preAllocCount, bufferSize); _bufferHandler.InitBuffer(); // SocketAsyncEventArgs object pool 할당 SocketAsyncEventArgs args; for (int i = 0; i < maxConnection; i++) { ClientSession clientSession = new ClientSession(_logger, bufferSize); clientSession.CompletedMessageCallback += OnMessageCompleted; // receive pool { args = new SocketAsyncEventArgs(); args.Completed += new EventHandler <SocketAsyncEventArgs>(OnReceiveCompleted); args.UserToken = clientSession; _bufferHandler.SetBuffer(args); _receiveEventAragePool.Push(args); } // send pool { args = new SocketAsyncEventArgs(); args.Completed += new EventHandler <SocketAsyncEventArgs>(OnSendCompleted); args.UserToken = clientSession; _bufferHandler.SetBuffer(args); _sendEventAragePool.Push(args); } } _authedClientSessions = new Dictionary <string, ClientSession>(); _reconnectClientSessions = new List <ClientSession>(); _expiredClientSessions = new List <string>(); _removeClientSessionTick = DateTime.UtcNow.AddSeconds(10).Ticks; _netMonitorHandler = new MonitorHandler(logger, 10); }
public void PopPush_Disposed_Exception() { var pool = new SocketAsyncEventArgsPool(64); // Populate the pool. for (int i = 0; i < 64; i++) { pool.Push(new SocketAsyncEventArgs()); } // Remove a couple of the args. for (int i = 0; i < 32; i++) { pool.Pop(); } pool.Dispose(); Assert.Throws <ObjectDisposedException>(() => pool.Push(new SocketAsyncEventArgs())); Assert.Throws <ObjectDisposedException>(() => pool.Pop()); }
void readWriteEventArg_Completed(object sender, SocketAsyncEventArgs e) { switch (e.LastOperation) { case SocketAsyncOperation.ReceiveFrom: ProcessReceive(e); break; case SocketAsyncOperation.SendTo: socketPool.Push(e); break; } }
internal void Send(SendingQueue sendQuere) { if (!socket.Connected) { return; } mutex.WaitOne(); if (m_sendPool.Count == 0) { SocketAsyncEventArgs saea_send = new SocketAsyncEventArgs(); saea_send.Completed += new EventHandler <SocketAsyncEventArgs>(IO_Completed); m_sendPool.Push(saea_send); } SocketAsyncEventArgs sendEventArgs = m_sendPool.Pop(); mutex.ReleaseMutex(); sendEventArgs.SetBuffer(sendQuere.data, sendQuere.offset, sendQuere.length); if (!socket.SendAsync(sendEventArgs)) { ProcessSend(sendEventArgs); } }
/// <summary> /// 初始化函数 /// </summary> public void Init() { // Allocates one large byte buffer which all I/O operations use a piece of. This gaurds // against memory fragmentation _bufferManager.InitBuffer(); // preallocate pool of SocketAsyncEventArgs objects SocketAsyncEventArgs readWriteEventArg; for (int i = 0; i < _maxClient; i++) { //Pre-allocate a set of reusable SocketAsyncEventArgs readWriteEventArg = new SocketAsyncEventArgs(); readWriteEventArg.Completed += new EventHandler <SocketAsyncEventArgs>(OnIOCompleted); readWriteEventArg.UserToken = null; // assign a byte buffer from the buffer pool to the SocketAsyncEventArg object _bufferManager.SetBuffer(readWriteEventArg); // add SocketAsyncEventArg to the pool _objectPool.Push(readWriteEventArg); } }
private void OnCloseClientSocket(CustomUserToken token) { token.OnRemove(); // EventArgs를 풀에 반환한다. // 버퍼는 반환하지 않는다. 어짜피 다음에 재사용할때 물고있던 버퍼를 그대로 사용할 수 있기 때문 if (null != m_receiveEventArgsPool) { m_receiveEventArgsPool.Push(token.m_receiveArgs); } if (null != m_sendEventArgsPool) { m_sendEventArgsPool.Push(token.m_sendArgs); } }
private void Init() { connectClient = new ConcurrentDictionary <int, ConnectClient>(); clientList = new ConcurrentDictionary <int, string>(); sendQueues = new ConcurrentQueue <SendingQueue> [sendthread]; for (int i = 0; i < sendthread; i++) { sendQueues[i] = new ConcurrentQueue <SendingQueue>(); } m_bufferManager.InitBuffer(); SocketAsyncEventArgs saea_receive; SocketAsyncEventArgs saea_send; for (int i = 0; i < m_numConnections; i++) { saea_receive = new SocketAsyncEventArgs(); saea_receive.Completed += new EventHandler <SocketAsyncEventArgs>(IO_Completed); m_bufferManager.SetBuffer(saea_receive); m_receivePool.Push(saea_receive); saea_send = new SocketAsyncEventArgs(); saea_send.Completed += new EventHandler <SocketAsyncEventArgs>(IO_Completed); m_sendPool.Push(saea_send); } }
/// <summary> /// Creates a new TCP socket for handling Gamespy Protocol /// </summary> /// <param name="Port">The port this socket will be bound to</param> /// <param name="MaxConnections">The maximum number of concurrent connections</param> public GamespyTcpSocket(int Port, int MaxConnections) { // Create our Socket Listener = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp); // Set Socket options Listener.SetSocketOption(SocketOptionLevel.Socket, SocketOptionName.DontLinger, true); Listener.SetSocketOption(SocketOptionLevel.Socket, SocketOptionName.Linger, false); Listener.SetSocketOption(SocketOptionLevel.Socket, SocketOptionName.KeepAlive, true); // Bind to our port Listener.Bind(new IPEndPoint(IPAddress.Any, Port)); Listener.Listen(25); // Set the rest of our internals MaxNumConnections = MaxConnections; MaxConnectionsEnforcer = new SemaphoreSlim(MaxNumConnections, MaxNumConnections); SocketAcceptPool = new SocketAsyncEventArgsPool(ConcurrentAcceptPoolSize); SocketReadWritePool = new SocketAsyncEventArgsPool(MaxNumConnections); // Create our Buffer Manager for IO operations. // Always allocate double space, one for recieving, and another for sending BufferManager = new BufferManager(MaxNumConnections * 2, BufferSizePerEventArg); // Assign our Connection Accept SocketAsyncEventArgs object instances for (int i = 0; i < ConcurrentAcceptPoolSize; i++) { SocketAsyncEventArgs SockArg = new SocketAsyncEventArgs(); SockArg.Completed += (s, e) => PrepareAccept(e); // Do NOT assign buffer space for Accept operations! // AcceptAsync does not take require a parameter for buffer size. SocketAcceptPool.Push(SockArg); } // Assign our Connection IO SocketAsyncEventArgs object instances for (int i = 0; i < MaxNumConnections * 2; i++) { SocketAsyncEventArgs SockArg = new SocketAsyncEventArgs(); BufferManager.AssignBuffer(SockArg); SocketReadWritePool.Push(SockArg); } // set public internals IsListening = true; IgnoreNewConnections = false; IsDisposed = false; }
public TCPServer(string serverName, IPEndPoint bindTo, int MaxConnections) { ServerName = "[" + serverName + "]"; // Create our Socket Listener = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp); // Set Socket options Listener.LingerState = new LingerOption(enable: false, seconds: 0); Listener.SetSocketOption(SocketOptionLevel.Socket, SocketOptionName.KeepAlive, true); // Bind to our port Listener.Bind(bindTo); Listener.Listen(25); // Set the rest of our internals MaxNumConnections = MaxConnections; MaxConnectionsEnforcer = new SemaphoreSlim(MaxNumConnections, MaxNumConnections); SocketAcceptPool = new SocketAsyncEventArgsPool(ConcurrentAcceptPoolSize); SocketReadWritePool = new SocketAsyncEventArgsPool(MaxNumConnections * 2); // Create our Buffer Manager for IO operations. // Always allocate double space, one for recieving, and another for sending BufferManager = new BufferManager(MaxNumConnections * 2, BufferSizePerOperation); // Assign our Connection Accept SocketAsyncEventArgs object instances for (int i = 0; i < ConcurrentAcceptPoolSize; i++) { SocketAsyncEventArgs SockArg = new SocketAsyncEventArgs(); SockArg.Completed += (s, e) => PrepareAccept(e); // Do NOT assign buffer space for Accept operations! SocketAcceptPool.Push(SockArg); } // Assign our Connection IO SocketAsyncEventArgs object instances for (int i = 0; i < MaxNumConnections * 2; i++) { SocketAsyncEventArgs SockArg = new SocketAsyncEventArgs(); BufferManager.AssignBuffer(SockArg); SocketReadWritePool.Push(SockArg); } // set public internals IsListening = true; }
public void Init() { m_maxConnection = 10000; m_bufferSize = 1024; // 동시커넥션 할 클라이언트 갯수 * 클라이언트별 패킷 버퍼의 사이즈 * 미리 잡아놓을 풀 사이즈 // 풀 사이즈는 read, write 총 2개 m_bufferManager = new BufferManager(m_maxConnection * m_bufferSize * m_preAllocCount, m_bufferSize); m_receiveEventArgsPool = new SocketAsyncEventArgsPool(m_maxConnection); m_sendEventArgsPool = new SocketAsyncEventArgsPool(m_maxConnection); m_bufferManager.InitBuffer(); SocketAsyncEventArgs args; for (int i = 0; i < m_maxConnection; i++) { // 동일한 소켓에 send 및 receive // 유저 토큰은 클라이언트 세션 당 한개 생성 후 // 각 eventArgs에서 동일한 토큰을 가지게 한 후 참조하도록 함 CustomUserToken token = new CustomUserToken(); { // receive pool args = new SocketAsyncEventArgs(); args.Completed += new EventHandler <SocketAsyncEventArgs>(OnReceiveCompleted); args.UserToken = token; m_bufferManager.SetBuffer(args); m_receiveEventArgsPool.Push(args); } { // send pool args = new SocketAsyncEventArgs(); args.Completed += new EventHandler <SocketAsyncEventArgs>(OnSendCompleted); args.UserToken = token; m_bufferManager.SetBuffer(args); m_sendEventArgsPool.Push(args); } } }
/// <summary> /// 关闭socket连接 /// </summary> /// <param name="e">SocketAsyncEventArg associated with the completed send/receive operation.</param> private void CloseClientSocket(SocketAsyncEventArgs e) { Socket s = e.UserToken as Socket; Log4Debug(String.Format("客户 {0} 断开连接!", s.RemoteEndPoint.ToString())); try { s.Shutdown(SocketShutdown.Both); } catch (Exception) { // Throw if client has closed, so it is not necessary to catch. } finally { s.Close(); } Interlocked.Decrement(ref _clientCount); //_maxAcceptedClients.Release(); //此处要修改 _objectPool.Push(e);//SocketAsyncEventArg 对象被释放,压入可重用队列。 }
public void Start(IPEndPoint ipEndPoint) { Console.Write("Buffer initializing..."); bufferManager = new BufferManager(MaxConnectCount * BufferSize * opsToPreAlloc, BufferSize); bufferManager.InitBuffer(); Console.WriteLine("Complete!"); Console.Write("ConnectionPool initializing..."); for (int i = 0; i < MaxConnectCount; i++) { var args = new SocketAsyncEventArgs(); bufferManager.SetBuffer(args); args.Completed += Receive; argsPool.Push(args); } Console.WriteLine("Complete!"); ListenSocket = new SysSocket.Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp); ListenSocket.Bind(ipEndPoint); ListenSocket.Listen(MaxConnectCount); ListenSocket.AcceptAsync(AcceptEventArgs); Console.WriteLine($"TcpService now listening [{ipEndPoint.Address.ToString()}:{ipEndPoint.Port}]"); Console.WriteLine("TCP Service is runing"); }
public static SerializeResult Serialize() { try { var MaxConnections = int.Parse(Settings.GetValue("TCP.Listener.Max")); Counter = 0; Sessions = new Dictionary <int, Session>(); SocketPool = new SocketAsyncEventArgsPool(MaxConnections); ConnectedAmount = new int(); SocketSemaphore = new Semaphore(MaxConnections, MaxConnections); BufferManager = new BufferManager(RECV_BUFFER_SIZE * MaxConnections * OPS_TO_PRE_ALLOC, RECV_BUFFER_SIZE); Solution.AppendLine("SessionHandler: Pushing SocketAsync({0}), big-buffer({1})", MaxConnections, BufferManager.Buffer.Length); for (int i = 0; i < MaxConnections; i++) { var Async = new SocketAsyncEventArgs(); Async.Completed += new EventHandler <SocketAsyncEventArgs>(Async_Completed); Async.UserToken = new AsyncUserToken(); BufferManager.SetBuffer(Async); SocketPool.Push(Async); } var IPEndPoint = new IPEndPoint(IPAddress.Parse(Settings.GetValue("TCP.Listener.IP")), int.Parse(Settings.GetValue("TCP.Listener.Port"))); BaseSocket = new Socket(IPEndPoint.AddressFamily, SocketType.Stream, ProtocolType.Tcp); BaseSocket.Bind(IPEndPoint); BaseSocket.Listen(int.Parse(Settings.GetValue("TCP.Listener.Backlog"))); WaitForAsync(null); } catch { return(SerializeResult.Broken); } return(SerializeResult.Finished); }
public void Init(int maxClient) { connTimeOut = new UpdClientTimeOut(); connTimeOut.TimeOut = 1800; connTimeOut.Start(); dataPacketManager = new YDataPacketManager(); YDataPacketManager.RegisterDataPacket(typeof(SocketRegisterPacket)); dataPacketManager.DataPacketReceived += dataPacketManager_DataPacketReceived; this.maxClient = maxClient + 1; socketPool = new SocketAsyncEventArgsPool(maxClient); SocketAsyncEventArgs readWriteEventArg; bufferManager = new BufferManager(maxClient * bufferLenthg, bufferLenthg); bufferManager.InitBuffer(); for (int i = 0; i < maxClient; i++) { readWriteEventArg = new SocketAsyncEventArgs(); readWriteEventArg.Completed += readWriteEventArg_Completed; bufferManager.SetBuffer(readWriteEventArg); socketPool.Push(readWriteEventArg); } AbsInit(); isInit = true; }
static void Run(string[] args) { var performanceCountersCategoryName = "Microshaoft EasyPerformanceCounters Category"; var enableCounters = MultiPerformanceCountersTypeFlags.ProcessCounter | MultiPerformanceCountersTypeFlags.ProcessedAverageTimerCounter | MultiPerformanceCountersTypeFlags.ProcessedCounter | MultiPerformanceCountersTypeFlags.ProcessedRateOfCountsPerSecondCounter | MultiPerformanceCountersTypeFlags.ProcessingCounter; var sendEncoding = Encoding.Default; var receiveEncoding = Encoding.Default; //byte[] data = new byte[1024]; string[] a = args[0].Split(new char[] { ':' }); string ip = a[0]; int port = int.Parse(a[1]); IPEndPoint ipep1 = new IPEndPoint(IPAddress.Parse(ip), port); Console.WriteLine("ipep1 {0}", ipep1.ToString()); a = args[1].Split(new char[] { ':' }); ip = a[0]; port = int.Parse(a[1]); IPEndPoint ipep2 = new IPEndPoint(IPAddress.Parse(ip), port); Console.WriteLine("ipep2 {0}", ipep2.ToString()); var remoteAnyIPEP = new IPEndPoint(IPAddress.Any, 0); Socket socket1 = new Socket(AddressFamily.InterNetwork, SocketType.Dgram, ProtocolType.Udp); socket1.Bind(ipep1); SocketAsyncDataHandler <string> handler1 = new SocketAsyncDataHandler <string>(socket1, 1); var receiveSocketAsyncEventArgs1 = _socketAsyncEventArgsPool.Pop(); _bufferManager.SetBuffer(receiveSocketAsyncEventArgs1); handler1.StartReceiveDataFrom ( remoteAnyIPEP , receiveSocketAsyncEventArgs1 , (x, y, z, w) => { Console.WriteLine("次数: {0}", x.ReceivedCount); Console.WriteLine("字节: {0}", z.Length); EasyPerformanceCountersHelper <CommonPerformanceCountersContainer> .CountPerformance ( enableCounters , performanceCountersCategoryName , "Hander1::Received" , null , () => { var ss = receiveEncoding.GetString(z); //Console.Write(s); Console.WriteLine ( "from {0} , to {1}, data {2}" , x.WorkingSocket.LocalEndPoint , w.RemoteEndPoint , ss ); } , null , null , null ); return(false); } , (x, y, z) => { Console.WriteLine(z.ToString()); return(true); } ); Socket socket2 = new Socket(AddressFamily.InterNetwork, SocketType.Dgram, ProtocolType.Udp); socket2.Bind(ipep2); SocketAsyncDataHandler <string> handler2 = new SocketAsyncDataHandler <string>(socket2, 2); var receiveSocketAsyncEventArgs2 = _socketAsyncEventArgsPool.Pop(); _bufferManager.SetBuffer(receiveSocketAsyncEventArgs2); handler2.StartReceiveDataFrom ( remoteAnyIPEP , receiveSocketAsyncEventArgs2 , (x, y, z, w) => { Console.WriteLine("次数: {0}", x.ReceivedCount); Console.WriteLine("字节: {0}", z.Length); EasyPerformanceCountersHelper <CommonPerformanceCountersContainer> .CountPerformance ( enableCounters , performanceCountersCategoryName , "Hander2::Received" , null , () => { var ss = receiveEncoding.GetString(z); //Console.Write(s); Console.WriteLine ( "from {0} , to {1}, data {2}" , x.WorkingSocket.LocalEndPoint , w.RemoteEndPoint , ss ); } , null , null , null ); return(false); } , (x, y, z) => { Console.WriteLine(z.ToString()); return(true); } ); string s = string.Empty; Console.WriteLine("Send ..."); while ((s = Console.ReadLine().ToLower()) != "q") { var buffer = sendEncoding.GetBytes(s); Parallel.For ( 0 , 1000 , new ParallelOptions() { MaxDegreeOfParallelism = 1 // Environment.ProcessorCount //, TaskScheduler = null } , i => { Thread.Sleep(5); EasyPerformanceCountersHelper <CommonPerformanceCountersContainer> .CountPerformance ( enableCounters , performanceCountersCategoryName , "Hander1::Sended" , null , () => { handler1.SendDataToSync(buffer, ipep2); } , null , null , null ); EasyPerformanceCountersHelper <CommonPerformanceCountersContainer> .CountPerformance ( enableCounters , performanceCountersCategoryName , "Hander2::Sended" , null , () => { handler2.SendDataToSync(buffer, ipep1); } , null , null , null ); } ); } var e = handler1.ReceiveSocketAsyncEventArgs; //_bufferManager.FreeBuffer(e); _socketAsyncEventArgsPool.Push(e); e = handler2.ReceiveSocketAsyncEventArgs; //_bufferManager.FreeBuffer(e); _socketAsyncEventArgsPool.Push(e); handler1.DestoryWorkingSocket(); handler2.DestoryWorkingSocket(); Console.WriteLine("Send quit"); }
/// <summary> /// Once a connection has been received, its handed off here to convert it into /// our client object, and prepared to be handed off to the parent for processing /// </summary> /// <param name="AcceptEventArg"></param> protected async void PrepareAccept(SocketAsyncEventArgs AcceptEventArg) { // If we do not get a success code here, we have a bad socket if (IgnoreNewConnections || AcceptEventArg.SocketError != SocketError.Success) { // This method closes the socket and releases all resources, both // managed and unmanaged. It internally calls Dispose. AcceptEventArg.AcceptSocket.Close(); // Put the SAEA back in the pool. SocketAcceptPool.Push(AcceptEventArg); StartAcceptAsync(); return; } // If the server is full, send an error message to the player if (ConnectionEnforceMode == EnforceMode.DuringPrepare) { bool Success = await MaxConnectionsEnforcer.WaitAsync(WaitTimeout); if (!Success) { // If we arent even listening... if (!IsListening) { return; } // Alert the client that we are full if (!String.IsNullOrEmpty(FullErrorMessage)) { byte[] buffer = Encoding.UTF8.GetBytes( String.Format(@"\error\\err\0\fatal\\errmsg\{0}\id\1\final\", FullErrorMessage) ); AcceptEventArg.AcceptSocket.Send(buffer); } // Log so we can track this! Program.ErrorLog.Write("NOTICE: [GamespyTcpSocket.PrepareAccept] The Server is currently full! Rejecting connecting client."); // Put the SAEA back in the pool. AcceptEventArg.AcceptSocket.Close(); SocketAcceptPool.Push(AcceptEventArg); StartAcceptAsync(); return; } } // Begin accepting a new connection StartAcceptAsync(); // Grab a send/recieve object SocketAsyncEventArgs ReadArgs = SocketReadWritePool.Pop(); SocketAsyncEventArgs WriteArgs = SocketReadWritePool.Pop(); // Pass over the reference to the new socket that is handling // this specific stream, and dereference it so we can hand the // acception event back over ReadArgs.AcceptSocket = AcceptEventArg.AcceptSocket; WriteArgs.AcceptSocket = AcceptEventArg.AcceptSocket; AcceptEventArg.AcceptSocket = null; // Hand back the AcceptEventArg so another connection can be accepted SocketAcceptPool.Push(AcceptEventArg); // Hand off processing to the parent GamespyTcpStream Stream = null; try { Stream = new GamespyTcpStream(this, ReadArgs, WriteArgs); ProcessAccept(Stream); } catch (Exception e) { // Report Error Program.ErrorLog.Write("ERROR: An Error occured at [GamespyTcpSocket.PrepareAccept] : Generating Exception Log"); ExceptionHandler.GenerateExceptionLog(e); // Make sure the connection is closed properly if (Stream != null) { Release(Stream); } } }
private void HandleBadAccept(SocketAsyncEventArgs acceptEventArgs) { acceptEventArgs.AcceptSocket.Close(); poolOfAcceptEventArgs.Push(acceptEventArgs); }
private void Initialize() { this.totalBytesRead = 0; this.connectSocketNumber = 0; this.bufferManager = new BufferManager(bufferSize * connectNumber * 2, bufferSize); this.readWritePool = new SocketAsyncEventArgsPool(connectNumber); this.acceptClientMaxNumber = new Semaphore(connectNumber, connectNumber); bufferManager.InitBuffer(); SocketAsyncEventArgs readWriteEventArgs; for (int index = 0; index < connectNumber; index++) { readWriteEventArgs = new SocketAsyncEventArgs(); readWriteEventArgs.UserToken = new AsyncUserToken(); readWriteEventArgs.Completed += new EventHandler<SocketAsyncEventArgs>(IO_Completed); bufferManager.SetBuffer(readWriteEventArgs); readWritePool.Push(readWriteEventArgs); } }