private static void Receive(ref NetworkPipelineContext ctx, ref InboundRecvBuffer inboundBuffer, ref NetworkPipelineStage.Requests request) { for (int i = 0; i < inboundBuffer.bufferLength; ++i) { inboundBuffer.buffer[i] = (byte)(inboundBuffer.buffer[i] ^ 0xff); } }
/// <summary> /// Store the packet for possible later resends, and fill in the header we'll use to send it (populate with /// sequence ID, last acknowledged ID from remote with ackmask. /// </summary> /// <param name="context">Pipeline context, the reliability shared state is used here.</param> /// <param name="inboundBuffer">Buffer with packet data.</param> /// <param name="header">Packet header which will be populated.</param> /// <returns>Sequence ID assigned to this packet.</returns> public static unsafe int Write(NetworkPipelineContext context, InboundBufferVec inboundBuffer, ref PacketHeader header) { SharedContext *reliable = (SharedContext *)context.internalSharedProcessBuffer.GetUnsafePtr(); var sequence = (ushort)reliable->SentPackets.Sequence; if (!TryAquire(context.internalProcessBuffer, sequence)) { reliable->errorCode = ErrorCodes.OutgoingQueueIsFull; return((int)ErrorCodes.OutgoingQueueIsFull); } reliable->stats.PacketsSent++; header.SequenceId = sequence; header.AckedSequenceId = (ushort)reliable->ReceivedPackets.Sequence; header.AckMask = reliable->ReceivedPackets.AckMask; reliable->ReceivedPackets.Acked = reliable->ReceivedPackets.Sequence; // Attach our processing time of the packet we're acknowledging (time between receiving it and sending this ack) header.ProcessingTime = CalculateProcessingTime(context.internalSharedProcessBuffer, header.AckedSequenceId, context.timestamp); reliable->SentPackets.Sequence = (ushort)(reliable->SentPackets.Sequence + 1); SetHeaderAndPacket(context.internalProcessBuffer, sequence, header, inboundBuffer, context.timestamp); StoreTimestamp(context.internalSharedProcessBuffer, sequence, context.timestamp); return(sequence); }
/// <summary> /// Resume or play back a packet we had received earlier out of order. When an out of order packet is received /// it is stored since we need to first return the packet with the next sequence ID. When that packet finally /// arrives it is returned but a pipeline resume is requested since we already have the next packet stored /// and it can be processed immediately after. /// </summary> /// <param name="context">Pipeline context, we'll use both the shared reliability context and receive context.</param> /// <param name="startSequence">The first packet which we need to retrieve now, there could be more after that.</param> /// <param name="needsResume">Indicates if we need the pipeline to resume again.</param> /// <returns></returns> public static unsafe NativeSlice <byte> ResumeReceive(NetworkPipelineContext context, int startSequence, ref bool needsResume) { if (startSequence == NullEntry) { return(default(NativeSlice <byte>)); } SharedContext *shared = (SharedContext *)context.internalSharedProcessBuffer.GetUnsafePtr(); Context * reliable = (Context *)context.internalProcessBuffer.GetUnsafePtr(); reliable->Resume = NullEntry; PacketInformation *info = GetPacketInformation(context.internalProcessBuffer, startSequence); var latestReceivedPacket = shared->ReceivedPackets.Sequence; if (info->SequenceId == startSequence) { var offset = reliable->DataPtrOffset + ((startSequence % reliable->Capacity) * reliable->DataStride); NativeSlice <byte> slice = new NativeSlice <byte>(context.internalProcessBuffer, offset, info->Size); reliable->Delivered = startSequence; if ((ushort)(startSequence + 1) <= latestReceivedPacket) { reliable->Resume = (ushort)(startSequence + 1); needsResume = true; } return(slice); } return(default(NativeSlice <byte>)); }
public unsafe bool DelayPacket(ref NetworkPipelineContext ctx, InboundBufferVec inboundBuffer, ref bool needsUpdate, long timestamp) { // Find empty slot in bookkeeping data space to track this packet int packetPayloadOffset = 0; int packetDataOffset = 0; var processBufferPtr = (byte*) ctx.internalProcessBuffer.GetUnsafePtr(); bool foundSlot = GetEmptyDataSlot(processBufferPtr, ref packetPayloadOffset, ref packetDataOffset); if (!foundSlot) { //UnityEngine.Debug.LogWarning("No space left for delaying packet (" + m_PacketCount + " packets in queue)"); return false; } NativeSlice<byte> packetPayload = new NativeSlice<byte>(ctx.internalProcessBuffer, packetPayloadOffset, inboundBuffer.buffer1.Length + inboundBuffer.buffer2.Length); StorePacketPayload(packetPayload, inboundBuffer.buffer1, inboundBuffer.buffer2); // Add tracking for this packet so we can resurrect later DelayedPacket packet; packet.delayUntil = timestamp + m_PacketDelayMs; packet.processBufferOffset = packetPayloadOffset; packet.packetSize = inboundBuffer.buffer1.Length + inboundBuffer.buffer2.Length; byte* packetPtr = (byte*) &packet; UnsafeUtility.MemCpy(processBufferPtr + packetDataOffset, packetPtr, UnsafeUtility.SizeOf<DelayedPacket>()); // Schedule an update call so packet can be resurrected later needsUpdate = true; return true; }
public unsafe bool DelayPacket(ref NetworkPipelineContext ctx, InboundSendBuffer inboundBuffer, ref NetworkPipelineStage.Requests requests, long timestamp) { // Find empty slot in bookkeeping data space to track this packet int packetPayloadOffset = 0; int packetDataOffset = 0; var processBufferPtr = (byte *)ctx.internalProcessBuffer; bool foundSlot = GetEmptyDataSlot(processBufferPtr, ref packetPayloadOffset, ref packetDataOffset); if (!foundSlot) { //UnityEngine.Debug.LogWarning("No space left for delaying packet (" + m_PacketCount + " packets in queue)"); return(false); } UnsafeUtility.MemCpy(ctx.internalProcessBuffer + packetPayloadOffset + inboundBuffer.headerPadding, inboundBuffer.buffer, inboundBuffer.bufferLength); var param = (SimulatorUtility.Context *)ctx.internalSharedProcessBuffer; // Add tracking for this packet so we can resurrect later DelayedPacket packet; packet.delayUntil = timestamp + m_PacketDelayMs + param->Random.NextInt(m_PacketJitterMs * 2) - m_PacketJitterMs; packet.processBufferOffset = packetPayloadOffset; packet.packetSize = (ushort)(inboundBuffer.headerPadding + inboundBuffer.bufferLength); packet.packetHeaderPadding = (ushort)inboundBuffer.headerPadding; byte *packetPtr = (byte *)&packet; UnsafeUtility.MemCpy(processBufferPtr + packetDataOffset, packetPtr, UnsafeUtility.SizeOf <DelayedPacket>()); // Schedule an update call so packet can be resurrected later requests |= NetworkPipelineStage.Requests.Update; return(true); }
public NativeSlice <byte> Receive(NetworkPipelineContext ctx, NativeSlice <byte> inboundBuffer, ref bool needsResume, ref bool needsUpdate, ref bool needsSendUpdate) { for (int i = 0; i < inboundBuffer.Length; ++i) { ctx.internalProcessBuffer[i] = (byte)(inboundBuffer[i] ^ 0xff); } return(new NativeSlice <byte>(ctx.internalProcessBuffer, 0, inboundBuffer.Length)); }
public unsafe bool GetDelayedPacket(ref NetworkPipelineContext ctx, ref NativeSlice<byte> delayedPacket, ref bool needsResume, ref bool needsUpdate, long currentTimestamp) { needsUpdate = needsResume = false; var dataSize = UnsafeUtility.SizeOf<DelayedPacket>(); byte* processBufferPtr = (byte*) ctx.internalProcessBuffer.GetUnsafePtr(); var simCtx = (Context*) ctx.internalSharedProcessBuffer.GetUnsafePtr(); int oldestPacketIndex = -1; long oldestTime = long.MaxValue; int readyPackets = 0; int packetsInQueue = 0; for (int i = 0; i < m_PacketCount; i++) { DelayedPacket* packet = (DelayedPacket*) (processBufferPtr + dataSize * i); if ((int) packet->delayUntil == 0) continue; packetsInQueue++; if (packet->delayUntil > currentTimestamp) continue; readyPackets++; if (oldestTime <= packet->delayUntil) continue; oldestPacketIndex = i; oldestTime = packet->delayUntil; } simCtx->ReadyPackets = readyPackets; simCtx->WaitingPackets = packetsInQueue; simCtx->NextPacketTime = oldestTime; simCtx->StatsTime = currentTimestamp; // If more than one item has expired timer we need to resume this pipeline stage if (readyPackets > 1) { needsUpdate = false; needsResume = true; } // If more than one item is present (but doesn't have expired timer) we need to re-run the pipeline // in a later update call else if (packetsInQueue > 0) { needsUpdate = true; needsResume = false; } if (oldestPacketIndex >= 0) { DelayedPacket* packet = (DelayedPacket*) (processBufferPtr + dataSize * oldestPacketIndex); packet->delayUntil = 0; delayedPacket = new NativeSlice<byte>(ctx.internalProcessBuffer, packet->processBufferOffset, packet->packetSize); return true; } return false; }
private static void Receive(ref NetworkPipelineContext ctx, ref InboundRecvBuffer inboundBuffer, ref NetworkPipelineStage.Requests request) { var headerData = (int *)inboundBuffer.buffer; if (*headerData != 1) { throw new InvalidOperationException("Header data invalid, got " + *headerData); } inboundBuffer = inboundBuffer.Slice(4); }
public unsafe NativeSlice <byte> Receive(NetworkPipelineContext ctx, NativeSlice <byte> inboundBuffer, ref bool needsResume, ref bool needsUpdate, ref bool needsSendUpdate) { var headerData = (int *)inboundBuffer.GetUnsafeReadOnlyPtr(); if (*headerData != 2) { throw new InvalidOperationException("Header data invalid, got " + *headerData); } return(new NativeSlice <byte>(inboundBuffer, 4, inboundBuffer.Length - 4)); }
/// <summary> /// Write an ack packet, only the packet header is used and this doesn't advance the sequence ID. /// The packet is not stored away for resend routine. /// </summary> /// <param name="context">Pipeline context, the reliability shared state is used here.</param> /// <param name="header">Packet header which will be populated.</param> /// <returns></returns> public static unsafe void WriteAckPacket(NetworkPipelineContext context, ref PacketHeader header) { SharedContext *reliable = (SharedContext *)context.internalSharedProcessBuffer.GetUnsafePtr(); header.Type = (ushort)PacketType.Ack; header.AckedSequenceId = (ushort)reliable->ReceivedPackets.Sequence; header.AckMask = reliable->ReceivedPackets.AckMask; header.ProcessingTime = CalculateProcessingTime(context.internalSharedProcessBuffer, header.AckedSequenceId, context.timestamp); reliable->ReceivedPackets.Acked = reliable->ReceivedPackets.Sequence; }
private static void Receive(ref NetworkPipelineContext ctx, ref InboundRecvBuffer inboundBuffer, ref NetworkPipelineStage.Requests request) { byte idx = ctx.staticInstanceBuffer[1]; if (ctx.staticInstanceBuffer[0] == idx) { // Drop the packet inboundBuffer = default; } *ctx.staticInstanceBuffer += 1; }
public static unsafe bool ShouldSendAck(NetworkPipelineContext ctx) { var reliable = (Context *)ctx.internalProcessBuffer.GetUnsafePtr(); var shared = (SharedContext *)ctx.internalSharedProcessBuffer.GetUnsafePtr(); // If more than one full frame (timestamp - prevTimestamp = one frame) has elapsed then send ack packet // and if the last received sequence ID has not been acked yet if (reliable->LastSentTime < reliable->PreviousTimestamp && shared->ReceivedPackets.Acked < shared->ReceivedPackets.Sequence) { return(true); } return(false); }
private static unsafe void Send(ref NetworkPipelineContext ctx, ref InboundSendBuffer inboundBuffer, ref NetworkPipelineStage.Requests request) { var len = inboundBuffer.bufferLength; for (int i = 0; i < len; ++i) { ctx.internalProcessBuffer[inboundBuffer.headerPadding + i] = (byte)(inboundBuffer.buffer[i] ^ 0xff); } var nextInbound = default(InboundSendBuffer); nextInbound.bufferWithHeaders = ctx.internalProcessBuffer; nextInbound.bufferWithHeadersLength = len + inboundBuffer.headerPadding; nextInbound.SetBufferFrombufferWithHeaders(); inboundBuffer = nextInbound; }
/// <summary> /// Resend a packet which we have not received an acknowledgement for in time. Pipeline resume /// will be enabled if there are more packets which we need to resend. The send reliability context /// will then also be updated to track the next packet we need to resume. /// </summary> /// <param name="context">Pipeline context, we'll use both the shared reliability context and send context.</param> /// <param name="header">Packet header for the packet payload we're resending.</param> /// <param name="needsResume">Indicates if a pipeline resume is needed again.</param> /// <returns>Buffer slice to packet payload.</returns> /// <exception cref="ApplicationException"></exception> public static unsafe NativeSlice <byte> ResumeSend(NetworkPipelineContext context, out PacketHeader header, ref bool needsResume) { SharedContext *reliable = (SharedContext *)context.internalSharedProcessBuffer.GetUnsafePtr(); Context * ctx = (Context *)context.internalProcessBuffer.GetUnsafePtr(); #if ENABLE_UNITY_COLLECTIONS_CHECKS if (ctx->Resume == NullEntry) { throw new ApplicationException("This function should not be called unless there is data in resume"); } #endif var sequence = (ushort)ctx->Resume; PacketInformation *information; information = GetPacketInformation(context.internalProcessBuffer, sequence); // Reset the resend timer information->SendTime = context.timestamp; Packet *packet = GetPacket(context.internalProcessBuffer, sequence); header = packet->Header; // Update acked/ackmask to latest values header.AckedSequenceId = (ushort)reliable->ReceivedPackets.Sequence; header.AckMask = reliable->ReceivedPackets.AckMask; var offset = (ctx->DataPtrOffset + ((sequence % ctx->Capacity) * ctx->DataStride)) + UnsafeUtility.SizeOf <PacketHeader>(); NativeSlice <byte> slice = new NativeSlice <byte>(context.internalProcessBuffer, offset, information->Size); reliable->stats.PacketsResent++; needsResume = false; ctx->Resume = -1; // Check if another packet needs to be resent right after this one for (int i = sequence + 1; i < reliable->ReceivedPackets.Sequence + 1; i++) { var timeToResend = CurrentResendTime(context.internalSharedProcessBuffer); information = GetPacketInformation(context.internalProcessBuffer, i); if (information->SequenceId >= 0 && information->SendTime + timeToResend > context.timestamp) { needsResume = true; ctx->Resume = i; } } return(slice); }
private static void Send(ref NetworkPipelineContext ctx, ref InboundSendBuffer inboundBuffer, ref NetworkPipelineStage.Requests request) { var sendData = (int *)ctx.internalProcessBuffer; for (int i = 1; i <= 3; ++i) { Assert.AreEqual(*sendData, i * 10); sendData++; } var sharedData = (int *)ctx.internalSharedProcessBuffer; for (int i = 7; i <= 8; ++i) { Assert.AreEqual(*sharedData, i * 10); sharedData++; } }
private static void Receive(ref NetworkPipelineContext ctx, ref InboundRecvBuffer inboundBuffer, ref NetworkPipelineStage.Requests request) { var receiveData = (int *)ctx.internalProcessBuffer; for (int i = 4; i <= 6; ++i) { Assert.AreEqual(*receiveData, i * 10); receiveData++; } var sharedData = (int *)ctx.internalSharedProcessBuffer; for (int i = 7; i <= 8; ++i) { Assert.AreEqual(*sharedData, i * 10); sharedData++; } }
private static int Send(ref NetworkPipelineContext ctx, ref InboundSendBuffer inboundBuffer, ref NetworkPipelineStage.Requests request) { var sendData = (int *)ctx.internalProcessBuffer; for (int i = 1; i <= 3; ++i) { Assert.AreEqual(*sendData, i); sendData++; } var sharedData = (int *)ctx.internalSharedProcessBuffer; for (int i = 7; i <= 8; ++i) { Assert.AreEqual(*sharedData, i); sharedData++; } return((int)Error.StatusCode.Success); }
public static unsafe void ReadAckPacket(NetworkPipelineContext context, PacketHeader header) { SharedContext *reliable = (SharedContext *)context.internalSharedProcessBuffer.GetUnsafePtr(); // Store receive timestamp for our acked sequence ID with remote processing time StoreReceiveTimestamp(context.internalSharedProcessBuffer, header.AckedSequenceId, context.timestamp, header.ProcessingTime); // Check the distance of the acked seqId in the header, if it's too far away from last acked packet we // can't process it and add it to the ack mask if (!SequenceHelpers.GreaterThan16(header.AckedSequenceId, (ushort)reliable->SentPackets.Acked)) { // No new acks; return; } reliable->SentPackets.Acked = header.AckedSequenceId; reliable->SentPackets.AckMask = header.AckMask; }
public InboundBufferVec Send(NetworkPipelineContext ctx, InboundBufferVec inboundBuffer, ref bool needsResume, ref bool needsUpdate) { var len1 = inboundBuffer.buffer1.Length; var len2 = inboundBuffer.buffer2.Length; for (int i = 0; i < len1; ++i) { ctx.internalProcessBuffer[i] = (byte)(inboundBuffer.buffer1[i] ^ 0xff); } for (int i = 0; i < len2; ++i) { ctx.internalProcessBuffer[len1 + i] = (byte)(inboundBuffer.buffer2[i] ^ 0xff); } var nextInbound = default(InboundBufferVec); nextInbound.buffer1 = new NativeSlice <byte>(ctx.internalProcessBuffer, 0, len1 + len2); return(nextInbound); }
public unsafe InboundBufferVec Send(NetworkPipelineContext ctx, InboundBufferVec inboundBuffer, ref bool needsResume, ref bool needsUpdate) { var sendData = (int *)ctx.internalProcessBuffer.GetUnsafePtr(); for (int i = 1; i <= 3; ++i) { Assert.AreEqual(*sendData, i * 10); sendData++; } var sharedData = (int *)ctx.internalSharedProcessBuffer.GetUnsafePtr(); for (int i = 7; i <= 8; ++i) { Assert.AreEqual(*sharedData, i * 10); sharedData++; } return(inboundBuffer); }
public NativeSlice <byte> Receive(NetworkPipelineContext ctx, NativeSlice <byte> inboundBuffer, ref bool needsResume, ref bool needsUpdate, ref bool needsSendUpdate) { var receiveData = (int *)ctx.internalProcessBuffer.GetUnsafePtr(); for (int i = 4; i <= 6; ++i) { Assert.AreEqual(*receiveData, i * 10); receiveData++; } var sharedData = (int *)ctx.internalSharedProcessBuffer.GetUnsafePtr(); for (int i = 7; i <= 8; ++i) { Assert.AreEqual(*sharedData, i * 10); sharedData++; } return(inboundBuffer); }
/// <summary> /// Acknowledge the reception of packets which have been sent. The reliability /// shared context/state is updated when packets are received from the other end /// of the connection. The other side will update it's ackmask with which packets /// have been received (starting from last received sequence ID) each time it sends /// a packet back. This checks the resend timers on each non-acknowledged packet /// and notifies if it's time to resend yet. /// </summary> /// <param name="context">Pipeline context, contains the buffer slices this pipeline connection owns.</param> /// <returns></returns> public static unsafe bool ReleaseOrResumePackets(NetworkPipelineContext context) { SharedContext *reliable = (SharedContext *)context.internalSharedProcessBuffer.GetUnsafePtr(); Context * ctx = (Context *)context.internalProcessBuffer.GetUnsafePtr(); // Last sequence ID and ackmask we received from the remote peer, these are confirmed delivered packets var lastReceivedAckMask = reliable->SentPackets.AckMask; var lastOwnSequenceIdAckedByRemote = (ushort)reliable->SentPackets.Acked; // To deal with wrapping, chop off the upper half of the sequence ID and multiply by window size, it // will then never wrap but will map to the correct index in the packet storage, wrapping happens when // sending low sequence IDs (since it checks sequence IDs backwards in time). var sequence = (ushort)(reliable->WindowSize * ((1 - lastOwnSequenceIdAckedByRemote) >> 15)); // Check each slot in the window, starting from the sequence ID calculated above (this isn't the // latest sequence ID though as it was adjusted to avoid wrapping) for (int i = 0; i < reliable->WindowSize; i++) { var info = GetPacketInformation(context.internalProcessBuffer, sequence); if (info->SequenceId >= 0) { // Check the bit for this sequence ID against the ackmask. Bit 0 in the ackmask is the latest // ackedSeqId, bit 1 latest ackedSeqId - 1 (one older) and so on. If bit X is 1 then ackedSeqId-X is acknowledged var ackBits = 1 << (lastOwnSequenceIdAckedByRemote - info->SequenceId); // Release if this seqId has been flipped on in the ackmask (so it's acknowledged) // Ignore if sequence ID is out of window range of the last acknowledged id if (SequenceHelpers.AbsDistance((ushort)lastOwnSequenceIdAckedByRemote, (ushort)info->SequenceId) < reliable->WindowSize && (ackBits & lastReceivedAckMask) != 0) { Release(context.internalProcessBuffer, info->SequenceId); info->SendTime = -1; sequence = (ushort)(sequence - 1); continue; } var timeToResend = CurrentResendTime(context.internalSharedProcessBuffer); if (context.timestamp > info->SendTime + timeToResend) { ctx->Resume = info->SequenceId; } } sequence = (ushort)(sequence - 1); } return(ctx->Resume != NullEntry); }
public NativeSlice <byte> InvokeReceive(int pipelineStageId, NetworkPipelineContext ctx, NativeSlice <byte> inboundBuffer, ref bool needsResume, ref bool needsUpdate, ref bool needsSendUpdate) { switch (pipelineStageId) { case 0: return(testPipelineStageWithHeader.Receive(ctx, inboundBuffer, ref needsResume, ref needsUpdate, ref needsSendUpdate)); case 1: return(testEncryptPipelineStage.Receive(ctx, inboundBuffer, ref needsResume, ref needsUpdate, ref needsSendUpdate)); case 2: return(testEncryptInPlacePipelineStage.Receive(ctx, inboundBuffer, ref needsResume, ref needsUpdate, ref needsSendUpdate)); case 3: return(testInvertPipelineStage.Receive(ctx, inboundBuffer, ref needsResume, ref needsUpdate, ref needsSendUpdate)); case 4: return(testDelayedReadPipelineStage.Receive(ctx, inboundBuffer, ref needsResume, ref needsUpdate, ref needsSendUpdate)); case 5: return(testDelayedSendPipelineStage.Receive(ctx, inboundBuffer, ref needsResume, ref needsUpdate, ref needsSendUpdate)); case 6: return(testUnreliableSequencedPipelineStage.Receive(ctx, inboundBuffer, ref needsResume, ref needsUpdate, ref needsSendUpdate)); case 7: return(testPipelineStageWithHeaderTwo.Receive(ctx, inboundBuffer, ref needsResume, ref needsUpdate, ref needsSendUpdate)); case 8: return(testPipelineWithInitializers.Receive(ctx, inboundBuffer, ref needsResume, ref needsUpdate, ref needsSendUpdate)); case 9: return(testPipelineWithInitializersTwo.Receive(ctx, inboundBuffer, ref needsResume, ref needsUpdate, ref needsSendUpdate)); } return(inboundBuffer); }
/// <summary> /// Resume or play back a packet we had received earlier out of order. When an out of order packet is received /// it is stored since we need to first return the packet with the next sequence ID. When that packet finally /// arrives it is returned but a pipeline resume is requested since we already have the next packet stored /// and it can be processed immediately after. /// </summary> /// <param name="context">Pipeline context, we'll use both the shared reliability context and receive context.</param> /// <param name="startSequence">The first packet which we need to retrieve now, there could be more after that.</param> /// <param name="needsResume">Indicates if we need the pipeline to resume again.</param> /// <returns></returns> public static unsafe InboundRecvBuffer ResumeReceive(NetworkPipelineContext context, int startSequence, ref bool needsResume) { if (startSequence == NullEntry) { return(default);
public unsafe bool GetDelayedPacket(ref NetworkPipelineContext ctx, ref InboundSendBuffer delayedPacket, ref NetworkPipelineStage.Requests requests, long currentTimestamp) { requests = NetworkPipelineStage.Requests.None; var dataSize = UnsafeUtility.SizeOf <DelayedPacket>(); byte *processBufferPtr = (byte *)ctx.internalProcessBuffer; var simCtx = (Context *)ctx.internalSharedProcessBuffer; int oldestPacketIndex = -1; long oldestTime = long.MaxValue; int readyPackets = 0; int packetsInQueue = 0; for (int i = 0; i < m_PacketCount; i++) { DelayedPacket *packet = (DelayedPacket *)(processBufferPtr + dataSize * i); if ((int)packet->delayUntil == 0) { continue; } packetsInQueue++; if (packet->delayUntil > currentTimestamp) { continue; } readyPackets++; if (oldestTime <= packet->delayUntil) { continue; } oldestPacketIndex = i; oldestTime = packet->delayUntil; } simCtx->ReadyPackets = readyPackets; simCtx->WaitingPackets = packetsInQueue; simCtx->NextPacketTime = oldestTime; simCtx->StatsTime = currentTimestamp; // If more than one item has expired timer we need to resume this pipeline stage if (readyPackets > 1) { requests |= NetworkPipelineStage.Requests.Resume; } // If more than one item is present (but doesn't have expired timer) we need to re-run the pipeline // in a later update call else if (packetsInQueue > 0) { requests |= NetworkPipelineStage.Requests.Update; } if (oldestPacketIndex >= 0) { DelayedPacket *packet = (DelayedPacket *)(processBufferPtr + dataSize * oldestPacketIndex); packet->delayUntil = 0; delayedPacket.bufferWithHeaders = ctx.internalProcessBuffer + packet->processBufferOffset; delayedPacket.bufferWithHeadersLength = packet->packetSize; delayedPacket.headerPadding = packet->packetHeaderPadding; delayedPacket.SetBufferFrombufferWithHeaders(); return(true); } return(false); }
private static void Receive(ref NetworkPipelineContext ctx, ref InboundRecvBuffer inboundBuffer, ref NetworkPipelineStage.Requests request) { }
/// <summary> /// Read header data and update reliability tracking information in the shared context. /// - If the packets sequence ID is lower than the last received ID+1, then it's stale /// - If the packets sequence ID is higher, then we'll process it and update tracking info in the shared context /// </summary> /// <param name="context">Pipeline context, the reliability shared state is used here.</param> /// <param name="header">Packet header of a new received packet.</param> /// <returns>Sequence ID of the received packet.</returns> public static unsafe int Read(NetworkPipelineContext context, PacketHeader header) { SharedContext *reliable = (SharedContext *)context.internalSharedProcessBuffer.GetUnsafePtr(); reliable->stats.PacketsReceived++; if (SequenceHelpers.StalePacket( header.SequenceId, (ushort)(reliable->ReceivedPackets.Sequence + 1), (ushort)reliable->WindowSize)) { reliable->stats.PacketsStale++; return((int)ErrorCodes.Stale_Packet); } var window = reliable->WindowSize - 1; if (SequenceHelpers.GreaterThan16((ushort)(header.SequenceId + 1), (ushort)reliable->ReceivedPackets.Sequence)) { int distance = SequenceHelpers.AbsDistance(header.SequenceId, (ushort)reliable->ReceivedPackets.Sequence); for (var i = 0; i < Math.Min(distance, window); ++i) { if ((reliable->ReceivedPackets.AckMask & 1 << (window - i)) == 0) { reliable->stats.PacketsDropped++; } } if (distance > window) { reliable->stats.PacketsDropped += distance - window; reliable->ReceivedPackets.AckMask = 1; } else { reliable->ReceivedPackets.AckMask <<= distance; reliable->ReceivedPackets.AckMask |= 1; } reliable->ReceivedPackets.Sequence = header.SequenceId; } else if (SequenceHelpers.LessThan16(header.SequenceId, (ushort)reliable->ReceivedPackets.Sequence)) { int distance = SequenceHelpers.AbsDistance(header.SequenceId, (ushort)reliable->ReceivedPackets.Sequence); // If this is a resent packet the distance will seem very big and needs to be calculated again with adjustment for wrapping if (distance >= ushort.MaxValue - reliable->WindowSize) { distance = reliable->ReceivedPackets.Sequence - header.SequenceId; } var ackBit = 1 << distance; if ((ackBit & reliable->ReceivedPackets.AckMask) != 0) { reliable->stats.PacketsDuplicated++; return((int)ErrorCodes.Duplicated_Packet); } reliable->stats.PacketsOutOfOrder++; reliable->ReceivedPackets.AckMask |= (uint)ackBit; } // Store receive timestamp for remote sequence ID we just received StoreRemoteReceiveTimestamp(context.internalSharedProcessBuffer, header.SequenceId, context.timestamp); ReadAckPacket(context, header); return(header.SequenceId); }
public InboundBufferVec Send(NetworkPipelineContext ctx, InboundBufferVec inboundBuffer, ref bool needsResume, ref bool needsUpdate) { ctx.header.Write((int)2); return(inboundBuffer); }
private static void Send(ref NetworkPipelineContext ctx, ref InboundSendBuffer inboundBuffer, ref NetworkPipelineStage.Requests request) { ctx.header.WriteInt((int)2); }
public NativeSlice <byte> Receive(NetworkPipelineContext ctx, NativeSlice <byte> inboundBuffer, ref bool needsResume, ref bool needsUpdate, ref bool needsSendUpdate) { return(inboundBuffer); }