/// <summary> /// Called when the server receive data from a client /// </summary> public static void OnData(int connectionId, ArraySegmentX <byte> segment) { var eventType = (ChicasInternalEventType)segment.Array[0]; Log.Info($"Received data ({segment.Count}) result, event type: {eventType.ToString()}"); switch (eventType) { case ChicasInternalEventType.Data: var all = server.GetAllConnectionsIds(); ServerSend(all, segment); break; case ChicasInternalEventType.CreatePlayer: InternalServerEventHandler.CreatePlayer(connectionId, segment); break; case ChicasInternalEventType.FetchFriends: InternalServerEventHandler.FetchFriends(connectionId, segment); break; case ChicasInternalEventType.SendInvitation: InternalServerEventHandler.SendInvitation(connectionId, segment); break; default: Log.Warning("Not defined event type: " + eventType.ToString()); break; } }
// enqueue a message // -> ArraySegment to avoid allocations later // -> parameters passed directly so it's more obvious that we don't just // queue a passed 'Message', instead we copy the ArraySegment into // a byte[] and store it internally, etc.) public void Enqueue(int connectionId, EventType eventType, ArraySegmentX <byte> message) { // pool & queue usage always needs to be locked lock (this) { // does this message have a data array content? ArraySegmentX <byte> segment = default; if (message != default) { // ArraySegment is only valid until returning. // copy it into a byte[] that we can store. // ArraySegment array is only valid until returning, so copy // it into a byte[] that we can queue safely. // get one from the pool first to avoid allocations byte[] bytes = pool.Take(); // copy into it Buffer.BlockCopy(message.Array, message.Offset, bytes, 0, message.Count); // indicate which part is the message segment = new ArraySegmentX <byte>(bytes, 0, message.Count); } // enqueue it // IMPORTANT: pass the segment around pool byte[], // NOT the 'message' that is only valid until returning! Entry entry = new Entry(connectionId, eventType, segment); queue.Enqueue(entry); // increase counter for this connectionId int oldCount = Count(connectionId); queueCounter[connectionId] = oldCount + 1; } }
/// <summary> /// /// </summary> public static void SendInvitation(int connectionID, ArraySegmentX <byte> message) { var text = GetTextFromSegment(message); string[] split = text.Split('|'); int friendConnectionID = int.Parse(split[0]); var friend = Program.GetServer().GetClient(friendConnectionID); if (friend == null) { Log.Warning($"Couldn't found client with conID {friendConnectionID}"); return; } var sender = Program.GetServer().GetClient(connectionID); if (sender == null) { Log.Warning($"Couldn't found client sender with conID {connectionID}"); return; } sender.ProvidedAddress = split[1]; string data = $"{sender.NickName}|{connectionID}|{sender.AuthID}|{sender.ProvidedAddress}|{split[2]}"; Program.ServerSendToSingle(friendConnectionID, new ArraySegmentX <byte>(NetworkSerializer.SerializeText(data, ChicasInternalEventType.ReceiveInvitation))); Log.Info($"Invitation send to {friend.NickName} from {sender.NickName} to join in {sender.ProvidedAddress}"); }
/// <summary> /// /// </summary> public static void FetchFriends(int connectionID, ArraySegmentX <byte> message) { byte[] bytesArr = new byte[message.Count]; Buffer.BlockCopy(message.Array, message.Offset, bytesArr, 0, message.Count); // Get the text from byte array var line = NetworkSerializer.DeserializeText(bytesArr); // decompile the data from the text line var split = line.Split(','); var ids = new int[split.Length]; // get the friends ids to verify for (int i = 0; i < split.Length; i++) { if (string.IsNullOrEmpty(split[i])) { continue; } ids[i] = int.Parse(split[i]); } var cliens = Program.GetServer().GetAllClients(); var friends = new FriendData[ids.Length]; // check if the friends are connected to the server for (int i = 0; i < cliens.Length; i++) { for (int e = 0; e < ids.Length; e++) { // if a friend is connected if (ids[e] == cliens[i].AuthID) { // collect their information friends[e] = new FriendData(ids[e]); friends[e].ConnectionID = cliens[i].OwnerID; friends[e].IsOnline = true; } } } // echo the information of the connected friends to the client who request the information string echo = ""; for (int i = 0; i < friends.Length; i++) { if (friends[i].Found == false) { continue; } var f = friends[i]; echo += $"{f.AuthID}|{f.ConnectionID}|{f.IsOnlineBinary()}&"; } //Log.Info($"Fetched friends: {echo}"); Program.ServerSendToSingle(connectionID, new ArraySegmentX <byte>(NetworkSerializer.SerializeText(echo, ChicasInternalEventType.FetchFriends))); }
/// <summary> /// Called after the client connect to the server and register his player presence. /// </summary> public static void CreatePlayer(int connectionID, ArraySegmentX <byte> message) { byte[] bytesArr = new byte[message.Count]; Buffer.BlockCopy(message.Array, message.Offset, bytesArr, 0, message.Count); var text = NetworkSerializer.DeserializeText(bytesArr); var client = Program.GetServer().GetClient(connectionID); // Example text Username|10 string[] split = text.Split('|'); string nickName = split[0]; int id; int.TryParse(split[1], out id); string echo; if (client != null) { client.NickName = nickName; client.AuthID = id; echo = $"{connectionID}"; Log.Info($"Player: {nickName}#{id} created and link to connection: {connectionID}"); } else { echo = "-1"; } Program.ServerSendToSingle(connectionID, new ArraySegmentX <byte>(NetworkSerializer.SerializeText(echo, ChicasInternalEventType.CreatePlayer))); }
// send message to client using socket connection. // arraysegment for allocation free sends later. // -> the segment's array is only used until Send() returns! public bool Send(int connectionId, ArraySegmentX <byte> message) { // respect max message size to avoid allocation attacks. if (message.Count <= MaxMessageSize) { // find the connection if (clients.TryGetValue(connectionId, out ChicasPlayer connection)) { // check send pipe limit if (connection.sendPipe.Count < SendQueueLimit) { // add to thread safe send pipe and return immediately. // calling Send here would be blocking (sometimes for long // times if other side lags or wire was disconnected) connection.sendPipe.Enqueue(message); connection.sendPending.Set(); // interrupt SendThread WaitOne() return(true); } // disconnect if send queue gets too big. // -> avoids ever growing queue memory if network is slower // than input // -> disconnecting is great for load balancing. better to // disconnect one connection than risking every // connection / the whole server // // note: while SendThread always grabs the WHOLE send queue // immediately, it's still possible that the sending // blocks for so long that the send queue just gets // way too big. have a limit - better safe than sorry. else { // log the reason Log.Warning($"Server.Send: sendPipe for connection {connectionId} reached limit of {SendQueueLimit}. This can happen if we call send faster than the network can process messages. Disconnecting this connection for load balancing."); // just close it. send thread will take care of the rest. connection.client.Close(); return(false); } } else { Log.Warning("Couldn't found connection: " + connectionId); } // sending to an invalid connectionId is expected sometimes. // for example, if a client disconnects, the server might still // try to send for one frame before it calls GetNextMessages // again and realizes that a disconnect happened. // so let's not spam the console with log messages. //Logger.Log("Server.Send: invalid connectionId: " + connectionId); return(false); } Log.Error("Server.Send: message too big: " + message.Count + ". Limit: " + MaxMessageSize); return(false); }
// send threads need to dequeue each byte[] and write it into the socket // -> dequeueing one byte[] after another works, but it's WAY slower // than dequeueing all immediately (locks only once) // lock{} & DequeueAll is WAY faster than ConcurrentQueue & dequeue // one after another: // // uMMORPG 450 CCU // SafeQueue: 900-1440ms latency // ConcurrentQueue: 2000ms latency // // -> the most obvious solution is to just return a list with all byte[] // (which allocates) and then write each one into the socket // -> a faster solution is to serialize each one into one payload buffer // and pass that to the socket only once. fewer socket calls always // give WAY better CPU performance(!) // -> to avoid allocating a new list of entries each time, we simply // serialize all entries into the payload here already // => having all this complexity built into the pipe makes testing and // modifying the algorithm super easy! // // IMPORTANT: serializing in here will allow us to return the byte[] // entries back to a pool later to completely avoid // allocations! public bool DequeueAndSerializeAll(ref byte[] payload, out int packetSize) { // pool & queue usage always needs to be locked lock (this) { // do nothing if empty packetSize = 0; if (queue.Count == 0) { return(false); } // we might have multiple pending messages. merge into one // packet to avoid TCP overheads and improve performance. // // IMPORTANT: Mirror & DOTSNET already batch into MaxMessageSize // chunks, but we STILL pack all pending messages // into one large payload so we only give it to TCP // ONCE. This is HUGE for performance so we keep it! packetSize = 0; foreach (ArraySegmentX <byte> message in queue) { packetSize += Common.PackageHeaderSize + message.Count; // header + content } // create payload buffer if not created yet or previous one is // too small // IMPORTANT: payload.Length might be > packetSize! don't use it! if (payload == null || payload.Length < packetSize) { payload = new byte[packetSize]; } // dequeue all byte[] messages and serialize into the packet int position = 0; while (queue.Count > 0) { // dequeue ArraySegmentX <byte> message = queue.Dequeue(); // write header (size) into buffer at position Utils.IntToBytesBigEndianNonAlloc(message.Count, payload, position); position += Common.PackageHeaderSize; // copy message into payload at position Buffer.BlockCopy(message.Array, message.Offset, payload, position, message.Count); position += message.Count; // return to pool so it can be reused (avoids allocations!) pool.Return(message.Array); } // we did serialize something return(true); } }
// send message to server using socket connection. // arraysegment for allocation free sends later. // -> the segment's array is only used until Send() returns! public bool Send(ArraySegmentX <byte> message) { if (Connected) { // respect max message size to avoid allocation attacks. if (message.Count <= MaxMessageSize) { // check send pipe limit if (state.sendPipe.Count < SendQueueLimit) { // add to thread safe send pipe and return immediately. // calling Send here would be blocking (sometimes for long // times if other side lags or wire was disconnected) state.sendPipe.Enqueue(message); state.sendPending.Set(); // interrupt SendThread WaitOne() return(true); } // disconnect if send queue gets too big. // -> avoids ever growing queue memory if network is slower // than input // -> avoids ever growing latency as well // // note: while SendThread always grabs the WHOLE send queue // immediately, it's still possible that the sending // blocks for so long that the send queue just gets // way too big. have a limit - better safe than sorry. else { // log the reason Log.Warning($"Client.Send: sendPipe reached limit of {SendQueueLimit}. This can happen if we call send faster than the network can process messages. Disconnecting to avoid ever growing memory & latency."); // just close it. send thread will take care of the rest. state.client.Close(); return(false); } } Log.Error("Client.Send: message too big: " + message.Count + ". Limit: " + MaxMessageSize); return(false); } Log.Warning("Client.Send: not connected!"); return(false); }
// peek the next message // -> allows the caller to process it while pipe still holds on to the // byte[] // -> TryDequeue should be called after processing, so that the message // is actually dequeued and the byte[] is returned to pool! // => see TryDequeue comments! // // IMPORTANT: TryPeek & Dequeue need to be called from the SAME THREAD! public bool TryPeek(out int connectionId, out EventType eventType, out ArraySegmentX <byte> data) { connectionId = 0; eventType = EventType.Disconnected; data = default; // pool & queue usage always needs to be locked lock (this) { if (queue.Count > 0) { Entry entry = queue.Peek(); connectionId = entry.connectionId; eventType = entry.eventType; data = entry.data; return(true); } return(false); } }
// enqueue a message // arraysegment for allocation free sends later. // -> the segment's array is only used until Enqueue() returns! public void Enqueue(ArraySegmentX <byte> message) { // pool & queue usage always needs to be locked lock (this) { // ArraySegment array is only valid until returning, so copy // it into a byte[] that we can queue safely. // get one from the pool first to avoid allocations byte[] bytes = pool.Take(); // copy into it Buffer.BlockCopy(message.Array, message.Offset, bytes, 0, message.Count); // indicate which part is the message ArraySegmentX <byte> segment = new ArraySegmentX <byte>(bytes, 0, message.Count); // now enqueue it queue.Enqueue(segment); } }
// thread receive function is the same for client and server's clients public static void ReceiveLoop(int connectionId, TcpClient client, int MaxMessageSize, MagnificentReceivePipe receivePipe, int QueueLimit) { // get NetworkStream from client NetworkStream stream = client.GetStream(); // every receive loop needs it's own receive buffer of // HeaderSize + MaxMessageSize // to avoid runtime allocations. // // IMPORTANT: DO NOT make this a member, otherwise every connection // on the server would use the same buffer simulatenously byte[] receiveBuffer = new byte[Common.PackageHeaderSize + MaxMessageSize]; // avoid header[4] allocations // // IMPORTANT: DO NOT make this a member, otherwise every connection // on the server would use the same buffer simulatenously byte[] headerBuffer = new byte[Common.PackageHeaderSize]; // absolutely must wrap with try/catch, otherwise thread exceptions // are silent try { // add connected event to pipe receivePipe.Enqueue(connectionId, EventType.Connected, default); // let's talk about reading data. // -> normally we would read as much as possible and then // extract as many <size,content>,<size,content> messages // as we received this time. this is really complicated // and expensive to do though // -> instead we use a trick: // Read(2) -> size // Read(size) -> content // repeat // Read is blocking, but it doesn't matter since the // best thing to do until the full message arrives, // is to wait. // => this is the most elegant AND fast solution. // + no resizing // + no extra allocations, just one for the content // + no crazy extraction logic while (true) { // read the next message (blocking) or stop if stream closed if (!ReadMessageBlocking(stream, MaxMessageSize, headerBuffer, receiveBuffer, out int size)) { // break instead of return so stream close still happens! break; } // create arraysegment for the read message ArraySegmentX <byte> message = new ArraySegmentX <byte>(receiveBuffer, 0, size); // send to main thread via pipe // -> it'll copy the message internally so we can reuse the // receive buffer for next read! receivePipe.Enqueue(connectionId, EventType.Data, message); // disconnect if receive pipe gets too big for this connectionId. // -> avoids ever growing queue memory if network is slower // than input // -> disconnecting is great for load balancing. better to // disconnect one connection than risking every // connection / the whole server if (receivePipe.Count(connectionId) >= QueueLimit) { // log the reason Log.Warning($"receivePipe reached limit of {QueueLimit} for connectionId {connectionId}. This can happen if network messages come in way faster than we manage to process them. Disconnecting this connection for load balancing."); // IMPORTANT: do NOT clear the whole queue. we use one // queue for all connections. //receivePipe.Clear(); // just break. the finally{} will close everything. break; } } } catch (Exception exception) { // something went wrong. the thread was interrupted or the // connection closed or we closed our own connection or ... // -> either way we should stop gracefully Log.Info("ReceiveLoop: finished receive function for connectionId=" + connectionId + " reason: " + exception); } finally { // clean up no matter what stream.Close(); client.Close(); // add 'Disconnected' message after disconnecting properly. // -> always AFTER closing the streams to avoid a race condition // where Disconnected -> Reconnect wouldn't work because // Connected is still true for a short moment before the stream // would be closed. receivePipe.Enqueue(connectionId, EventType.Disconnected, default); } }
public bool Equals(ArraySegmentX <T> obj) => obj.Array == Array && obj.Offset == Offset && obj.Count == Count;
public Entry(int connectionId, EventType eventType, ArraySegmentX <byte> data) { this.connectionId = connectionId; this.eventType = eventType; this.data = data; }
/// <summary> /// /// </summary> /// <returns></returns> private static byte[] GetBytesFromSegment(ArraySegmentX <byte> message) { byte[] bytesArr = new byte[message.Count]; Buffer.BlockCopy(message.Array, message.Offset, bytesArr, 0, message.Count); return(bytesArr); }
/// <summary> /// /// </summary> /// <returns></returns> private static string GetTextFromSegment(ArraySegmentX <byte> message) { return(NetworkSerializer.DeserializeText(GetBytesFromSegment(message))); }