// add a new batch. // returns true if valid. // returns false if not, in which case the connection should be disconnected. public bool AddBatch(ArraySegment <byte> batch) { // IMPORTANT: ArraySegment is only valid until returning. we copy it! // // NOTE: it's not possible to create empty ArraySegments, so we // don't need to check against that. // make sure we have at least 8 bytes to read for tick timestamp if (batch.Count < Batcher.HeaderSize) { return(false); } // put into a (pooled) writer // -> WriteBytes instead of WriteSegment because the latter // would add a size header. we want to write directly. // -> will be returned to pool when sending! NetworkWriterPooled writer = NetworkWriterPool.Get(); writer.WriteBytes(batch.Array, batch.Offset, batch.Count); // first batch? then point reader there if (batches.Count == 0) { StartReadingBatch(writer); } // add batch batches.Enqueue(writer); //Debug.Log($"Adding Batch {BitConverter.ToString(batch.Array, batch.Offset, batch.Count)} => batches={batches.Count} reader={reader}"); return(true); }
// Send stage two: serialized NetworkMessage as ArraySegment<byte> internal override void Send(ArraySegment <byte> segment, int channelId = Channels.Reliable) { if (segment.Count == 0) { Debug.LogError("LocalConnection.SendBytes cannot send zero bytes"); return; } // OnTransportData assumes batching. // so let's make a batch with proper timestamp prefix. Batcher batcher = GetBatchForChannelId(channelId); batcher.AddMessage(segment, NetworkTime.localTime); // flush it to the server's OnTransportData immediately. // local connection to server always invokes immediately. using (NetworkWriterPooled writer = NetworkWriterPool.Get()) { // make a batch with our local time (double precision) if (batcher.GetBatch(writer)) { NetworkServer.OnTransportData(connectionId, writer.ToArraySegment(), channelId); } else { Debug.LogError("Local connection failed to make batch. This should never happen."); } } }
// add a message for batching // we allow any sized messages. // caller needs to make sure they are within max packet size. public void AddMessage(ArraySegment <byte> message, double timeStamp) { // when appending to a batch in progress, check final size. // if it expands beyond threshold, then we should finalize it first. // => less than or exactly threshold is fine. // GetBatch() will finalize it. // => see unit tests. if (batch != null && batch.Position + message.Count > threshold) { batches.Enqueue(batch); batch = null; } // initialize a new batch if necessary if (batch == null) { // borrow from pool. we return it in GetBatch. batch = NetworkWriterPool.Get(); // write timestamp first. // -> double precision for accuracy over long periods of time // -> batches are per-frame, it doesn't matter which message's // timestamp we use. batch.WriteDouble(timeStamp); } // add serialization to current batch. even if > threshold. // -> we do allow > threshold sized messages as single batch // -> WriteBytes instead of WriteSegment because the latter // would add a size header. we want to write directly. batch.WriteBytes(message.Array, message.Offset, message.Count); }
void FixedUpdate() { if (!SendMessagesAllowed) { return; } if (!animator.enabled) { return; } CheckSendRate(); for (int i = 0; i < animator.layerCount; i++) { int stateHash; float normalizedTime; if (!CheckAnimStateChanged(out stateHash, out normalizedTime, i)) { continue; } using (NetworkWriterPooled writer = NetworkWriterPool.Get()) { WriteParameters(writer); SendAnimationMessage(stateHash, normalizedTime, i, layerWeight[i], writer.ToArray()); } } CheckSpeed(); }
// flush batched messages at the end of every Update. internal virtual void Update() { // go through batches for all channels foreach (KeyValuePair <int, Batcher> kvp in batches) { // make and send as many batches as necessary from the stored // messages. Batcher batcher = kvp.Value; using (NetworkWriterPooled writer = NetworkWriterPool.Get()) { // make a batch with our local time (double precision) while (batcher.MakeNextBatch(writer, NetworkTime.localTime)) { // validate packet before handing the batch to the // transport. this guarantees that we always stay // within transport's max message size limit. // => just in case transport forgets to check it // => just in case mirror miscalulated it etc. ArraySegment <byte> segment = writer.ToArraySegment(); if (ValidatePacketSize(segment, kvp.Key)) { // send to transport SendToTransport(segment, kvp.Key); //UnityEngine.Debug.Log($"sending batch of {writer.Position} bytes for channel={kvp.Key} connId={connectionId}"); // reset writer for each new batch writer.Position = 0; } } } } }
public void Send <T>(T message, int channelId = Channels.Reliable) where T : struct, NetworkMessage { using (NetworkWriterPooled writer = NetworkWriterPool.Get()) { // pack message and send allocation free MessagePacking.Pack(message, writer); NetworkDiagnostics.OnSend(message, channelId, writer.Position, 1); Send(writer.ToArraySegment(), channelId); } }
// add a message for batching // we allow any sized messages. // caller needs to make sure they are within max packet size. public void AddMessage(ArraySegment <byte> message) { // put into a (pooled) writer // -> WriteBytes instead of WriteSegment because the latter // would add a size header. we want to write directly. // -> will be returned to pool when making the batch! // IMPORTANT: NOT adding a size header / msg saves LOTS of bandwidth NetworkWriterPooled writer = NetworkWriterPool.Get(); writer.WriteBytes(message.Array, message.Offset, message.Count); messages.Enqueue(writer); }
// Send stage two: serialized NetworkMessage as ArraySegment<byte> internal override void Send(ArraySegment <byte> segment, int channelId = Channels.Reliable) { // get a writer to copy the message into since the segment is only // valid until returning. // => pooled writer will be returned to pool when dequeuing. // => WriteBytes instead of WriteArraySegment because the latter // includes a 4 bytes header. we just want to write raw. //Debug.Log($"Enqueue {BitConverter.ToString(segment.Array, segment.Offset, segment.Count)}"); NetworkWriterPooled writer = NetworkWriterPool.Get(); writer.WriteBytes(segment.Array, segment.Offset, segment.Count); connectionToServer.queue.Enqueue(writer); }
void CheckSendRate() { double now = NetworkTime.localTime; if (SendMessagesAllowed && syncInterval >= 0 && now > nextSendTime) { nextSendTime = now + syncInterval; using (NetworkWriterPooled writer = NetworkWriterPool.Get()) { if (WriteParameters(writer)) { SendAnimationParametersMessage(writer.ToArray()); } } } }
internal override void Update() { base.Update(); // should we still process a connected event? if (connectedEventPending) { connectedEventPending = false; NetworkClient.OnConnectedEvent?.Invoke(); } // process internal messages so they are applied at the correct time while (queue.Count > 0) { // call receive on queued writer's content, return to pool NetworkWriterPooled writer = queue.Dequeue(); ArraySegment <byte> message = writer.ToArraySegment(); // OnTransportData assumes a proper batch with timestamp etc. // let's make a proper batch and pass it to OnTransportData. Batcher batcher = GetBatchForChannelId(Channels.Reliable); batcher.AddMessage(message, NetworkTime.localTime); using (NetworkWriterPooled batchWriter = NetworkWriterPool.Get()) { // make a batch with our local time (double precision) if (batcher.GetBatch(batchWriter)) { NetworkClient.OnTransportData(batchWriter.ToArraySegment(), Channels.Reliable); } } NetworkWriterPool.Return(writer); } // should we still process a disconnected event? if (disconnectedEventPending) { disconnectedEventPending = false; NetworkClient.OnDisconnectedEvent?.Invoke(); } }