public void ProcessInPacket(LLQueItem item) { Packet packet = item.Packet; // Always ack the packet! // if (packet.Header.Reliable) { AckPacket(packet); } if (packet.Type != PacketType.AgentUpdate) { m_PacketsReceived++; } PruneDupeTracker(); // Check for duplicate packets.. packets that the client is // resending because it didn't receive our ack // lock (m_DupeTracker) { if (m_DupeTracker.ContainsKey(packet.Header.Sequence)) { return; } m_DupeTracker.Add(packet.Header.Sequence, Util.UnixTimeSinceEpoch()); } m_Client.ProcessInPacket(packet); }
public LLQueItem Dequeue() { while (true) { LLQueItem item = SendQueue.Dequeue(); if (item == null) { return(null); } if (item.Incoming) { return(item); } item.TickCount = System.Environment.TickCount; if (item.Sequence == 0) { return(item); } lock (contents) { if (contents.Contains(item.Sequence)) { if (contents.Remove(item.Sequence)) { return(item); } } } } }
public void ProcessInPacket(LLQueItem item) { Packet packet = item.Packet; // Always ack the packet! // if (packet.Header.Reliable) { AckPacket(packet); } if (packet.Type != PacketType.AgentUpdate) { m_PacketsReceived++; } // Check for duplicate packets.. packets that the client is // resending because it didn't receive our ack // lock (m_alreadySeenTracker) { ExpireSeenPackets(); if (m_alreadySeenTracker.ContainsKey(packet.Header.Sequence)) { return; } m_alreadySeenTracker.Add(packet.Header.Sequence, Environment.TickCount & Int32.MaxValue); m_alreadySeenList.Add(packet.Header.Sequence); } m_Client.ProcessInPacket(packet); }
public void InPacket(Packet packet) { if (packet == null) { return; } // When too many acks are needed to be sent, the client sends // a packet consisting of acks only // if (packet.Type == PacketType.PacketAck) { PacketAckPacket ackPacket = (PacketAckPacket)packet; foreach (PacketAckPacket.PacketsBlock block in ackPacket.Packets) { ProcessAck(block.ID); } PacketPool.Instance.ReturnPacket(packet); return; } // Any packet can have some packet acks in the header. // Process them here // if (packet.Header.AppendedAcks) { foreach (uint id in packet.Header.AckList) { ProcessAck(id); } } // If this client is on another partial instance, no need // to handle packets // if (!m_Client.IsActive && packet.Type != PacketType.LogoutRequest) { PacketPool.Instance.ReturnPacket(packet); return; } if (packet.Type == PacketType.StartPingCheck) { StartPingCheckPacket startPing = (StartPingCheckPacket)packet; CompletePingCheckPacket endPing = (CompletePingCheckPacket)PacketPool.Instance.GetPacket(PacketType.CompletePingCheck); endPing.PingID.PingID = startPing.PingID.PingID; OutPacket(endPing, ThrottleOutPacketType.Task); } else { LLQueItem item = new LLQueItem(); item.Packet = packet; item.Incoming = true; m_PacketQueue.Enqueue(item); } }
public void SetClientInfo(ClientInfo info) { m_PendingAcks = info.pendingAcks; m_NeedAck = new Dictionary <uint, LLQueItem>(); Packet packet = null; int packetEnd = 0; byte[] zero = new byte[3000]; foreach (uint key in info.needAck.Keys) { byte[] buff = info.needAck[key]; packetEnd = buff.Length - 1; try { packet = PacketPool.Instance.GetPacket(buff, ref packetEnd, zero); } catch (Exception) { } LLQueItem item = new LLQueItem(); item.Packet = packet; item.Incoming = false; item.throttleType = 0; item.TickCount = Environment.TickCount; item.Identifier = 0; item.Resends = 0; item.Length = packet.Length; item.Sequence = packet.Header.Sequence; m_NeedAck.Add(key, item); } m_Sequence = info.sequence; m_PacketQueue.ResendThrottle.Throttle = info.resendThrottle; m_PacketQueue.LandThrottle.Throttle = info.landThrottle; m_PacketQueue.WindThrottle.Throttle = info.windThrottle; m_PacketQueue.CloudThrottle.Throttle = info.cloudThrottle; m_PacketQueue.TaskThrottle.Throttle = info.taskThrottle; m_PacketQueue.AssetThrottle.Throttle = info.assetThrottle; m_PacketQueue.TextureThrottle.Throttle = info.textureThrottle; m_PacketQueue.TotalThrottle.Throttle = info.totalThrottle; }
private void QueuePacket( Packet packet, ThrottleOutPacketType throttlePacketType, Object id) { LLQueItem item = new LLQueItem(); item.Packet = packet; item.Incoming = false; item.throttleType = throttlePacketType; item.TickCount = Environment.TickCount; item.Identifier = id; item.Resends = 0; item.Length = packet.Length; item.Sequence = packet.Header.Sequence; m_PacketQueue.Enqueue(item); m_PacketsSent++; }
private void DropResend(Object id) { LLQueItem d = null; foreach (LLQueItem data in m_NeedAck.Values) { if (data.Identifier != null && data.Identifier == id) { d = data; break; } } if (null == d) { return; } m_NeedAck.Remove(d.Packet.Header.Sequence); m_PacketQueue.Cancel(d.Sequence); PacketPool.Instance.ReturnPacket(d.Packet); }
// Convert the packet to bytes and stuff it onto the send queue // public void ProcessOutPacket(LLQueItem item) { Packet packet = item.Packet; // Assign sequence number here to prevent out of order packets if (packet.Header.Sequence == 0) { lock (m_NeedAck) { packet.Header.Sequence = NextPacketSequenceNumber(); item.Sequence = packet.Header.Sequence; item.TickCount = Environment.TickCount; // We want to see that packet arrive if it's reliable if (packet.Header.Reliable) { m_UnackedBytes += item.Length; // Keep track of when this packet was sent out item.TickCount = Environment.TickCount; m_NeedAck[packet.Header.Sequence] = item; } } } // If we sent a killpacket if (packet is KillPacket) { Abort(); } try { // If this packet has been reused/returned, the ToBytes // will blow up in our face. // Fail gracefully. // // Actually make the byte array and send it byte[] sendbuffer = item.Packet.ToBytes(); if (packet.Header.Zerocoded) { int packetsize = Helpers.ZeroEncode(sendbuffer, sendbuffer.Length, m_ZeroOutBuffer); m_PacketServer.SendPacketTo(m_ZeroOutBuffer, packetsize, SocketFlags.None, m_Client.CircuitCode); } else { // Need some extra space in case we need to add proxy // information to the message later Buffer.BlockCopy(sendbuffer, 0, m_ZeroOutBuffer, 0, sendbuffer.Length); m_PacketServer.SendPacketTo(m_ZeroOutBuffer, sendbuffer.Length, SocketFlags.None, m_Client.CircuitCode); } } catch (NullReferenceException) { m_log.Debug("[PACKET] Detected reuse of a returned packet"); m_PacketQueue.Cancel(item.Sequence); return; } // If this is a reliable packet, we are still holding a ref // Dont't return in that case // if (!packet.Header.Reliable) { m_PacketQueue.Cancel(item.Sequence); PacketPool.Instance.ReturnPacket(packet); } }
// Convert the packet to bytes and stuff it onto the send queue // public void ProcessOutPacket(LLQueItem item) { Packet packet = item.Packet; // Assign sequence number here to prevent out of order packets if (packet.Header.Sequence == 0) { lock (m_NeedAck) { packet.Header.Sequence = NextPacketSequenceNumber(); item.Sequence = packet.Header.Sequence; item.TickCount = Environment.TickCount; // We want to see that packet arrive if it's reliable if (packet.Header.Reliable) { m_UnackedBytes += item.Length; // Keep track of when this packet was sent out item.TickCount = Environment.TickCount; m_NeedAck[packet.Header.Sequence] = item; } } } // If we sent a killpacket if (packet is KillPacket) Abort(); try { // If this packet has been reused/returned, the ToBytes // will blow up in our face. // Fail gracefully. // // Actually make the byte array and send it byte[] sendbuffer = item.Packet.ToBytes(); if (packet.Header.Zerocoded) { int packetsize = Helpers.ZeroEncode(sendbuffer, sendbuffer.Length, m_ZeroOutBuffer); m_PacketServer.SendPacketTo(m_ZeroOutBuffer, packetsize, SocketFlags.None, m_Client.CircuitCode); } else { // Need some extra space in case we need to add proxy // information to the message later Buffer.BlockCopy(sendbuffer, 0, m_ZeroOutBuffer, 0, sendbuffer.Length); m_PacketServer.SendPacketTo(m_ZeroOutBuffer, sendbuffer.Length, SocketFlags.None, m_Client.CircuitCode); } } catch (NullReferenceException) { m_log.Error("[PACKET]: Detected reuse of a returned packet"); m_PacketQueue.Cancel(item.Sequence); return; } // If this is a reliable packet, we are still holding a ref // Dont't return in that case // if (!packet.Header.Reliable) { m_PacketQueue.Cancel(item.Sequence); PacketPool.Instance.ReturnPacket(packet); } }
public void SetClientInfo(ClientInfo info) { m_PendingAcksMap = info.pendingAcks; m_PendingAcks = new List<uint>(m_PendingAcksMap.Keys); m_NeedAck = new Dictionary<uint, LLQueItem>(); Packet packet = null; int packetEnd = 0; byte[] zero = new byte[3000]; foreach (uint key in info.needAck.Keys) { byte[] buff = info.needAck[key]; packetEnd = buff.Length - 1; try { packet = PacketPool.Instance.GetPacket(buff, ref packetEnd, zero); } catch (Exception) { } LLQueItem item = new LLQueItem(); item.Packet = packet; item.Incoming = false; item.throttleType = 0; item.TickCount = Environment.TickCount; item.Identifier = 0; item.Resends = 0; item.Length = packet.Length; item.Sequence = packet.Header.Sequence; m_NeedAck.Add(key, item); } m_Sequence = info.sequence; m_PacketQueue.ResendThrottle.Throttle = info.resendThrottle; m_PacketQueue.LandThrottle.Throttle = info.landThrottle; m_PacketQueue.WindThrottle.Throttle = info.windThrottle; m_PacketQueue.CloudThrottle.Throttle = info.cloudThrottle; m_PacketQueue.TaskThrottle.Throttle = info.taskThrottle; m_PacketQueue.AssetThrottle.Throttle = info.assetThrottle; m_PacketQueue.TextureThrottle.Throttle = info.textureThrottle; m_PacketQueue.TotalThrottle.Throttle = info.totalThrottle; }
public void ProcessInPacket(LLQueItem item) { Packet packet = item.Packet; // Always ack the packet! // if (packet.Header.Reliable) AckPacket(packet); if (packet.Type != PacketType.AgentUpdate) m_PacketsReceived++; // Check for duplicate packets.. packets that the client is // resending because it didn't receive our ack // lock (m_alreadySeenTracker) { ExpireSeenPackets(); if (m_alreadySeenTracker.ContainsKey(packet.Header.Sequence)) return; m_alreadySeenTracker.Add(packet.Header.Sequence, Environment.TickCount & Int32.MaxValue); m_alreadySeenList.Add(packet.Header.Sequence); } m_Client.ProcessInPacket(packet); }
public void InPacket(Packet packet) { if (packet == null) return; // When too many acks are needed to be sent, the client sends // a packet consisting of acks only // if (packet.Type == PacketType.PacketAck) { PacketAckPacket ackPacket = (PacketAckPacket)packet; foreach (PacketAckPacket.PacketsBlock block in ackPacket.Packets) { ProcessAck(block.ID); } PacketPool.Instance.ReturnPacket(packet); return; } // Any packet can have some packet acks in the header. // Process them here // if (packet.Header.AppendedAcks) { foreach (uint id in packet.Header.AckList) { ProcessAck(id); } } // If this client is on another partial instance, no need // to handle packets // if (!m_Client.IsActive && packet.Type != PacketType.LogoutRequest) { PacketPool.Instance.ReturnPacket(packet); return; } if (packet.Type == PacketType.StartPingCheck) { StartPingCheckPacket startPing = (StartPingCheckPacket)packet; CompletePingCheckPacket endPing = (CompletePingCheckPacket)PacketPool.Instance.GetPacket(PacketType.CompletePingCheck); endPing.PingID.PingID = startPing.PingID.PingID; OutPacket(endPing, ThrottleOutPacketType.Task); } else { LLQueItem item = new LLQueItem(); item.Packet = packet; item.Incoming = true; m_PacketQueue.Enqueue(item); } }
private void ThrottleCheck(ref LLPacketThrottle throttle, ref Queue <LLQueItem> q, LLQueItem item) { // The idea.. is if the packet throttle queues are empty // and the client is under throttle for the type. Queue // it up directly. This basically short cuts having to // wait for the timer to fire to put things into the // output queue if ((q.Count == 0) && (throttle.UnderLimit())) { try { Monitor.Enter(this); throttle.AddBytes(item.Length); TotalThrottle.AddBytes(item.Length); SendQueue.Enqueue(item); } catch (Exception e) { // Probably a serialization exception m_log.WarnFormat("ThrottleCheck: {0}", e.ToString()); } finally { Monitor.Pulse(this); Monitor.Exit(this); } } else { q.Enqueue(item); } }
public void ProcessThrottle() { // I was considering this.. Will an event fire if the thread it's on is blocked? // Then I figured out.. it doesn't really matter.. because this thread won't be blocked for long // The General overhead of the UDP protocol gets sent to the queue un-throttled by this // so This'll pick up about around the right time. int MaxThrottleLoops = 4550; // 50*7 packets can be dequeued at once. int throttleLoops = 0; // We're going to dequeue all of the saved up packets until // we've hit the throttle limit or there's no more packets to send lock (this) { // this variable will be true if there was work done in the last execution of the // loop, since each pass through the loop checks the queue length, we no longer // need the check on entering the loop bool qchanged = true; ResetCounters(); // m_log.Info("[THROTTLE]: Entering Throttle"); while (TotalThrottle.UnderLimit() && qchanged && throttleLoops <= MaxThrottleLoops) { qchanged = false; // We will break out of the loop if no work was accomplished throttleLoops++; //Now comes the fun part.. we dump all our elements into m_packetQueue that we've saved up. if ((ResendOutgoingPacketQueue.Count > 0) && ResendThrottle.UnderLimit()) { LLQueItem qpack = ResendOutgoingPacketQueue.Dequeue(); SendQueue.Enqueue(qpack); TotalThrottle.AddBytes(qpack.Length); ResendThrottle.AddBytes(qpack.Length); qchanged = true; } if ((LandOutgoingPacketQueue.Count > 0) && LandThrottle.UnderLimit()) { LLQueItem qpack = LandOutgoingPacketQueue.Dequeue(); SendQueue.Enqueue(qpack); TotalThrottle.AddBytes(qpack.Length); LandThrottle.AddBytes(qpack.Length); qchanged = true; } if ((WindOutgoingPacketQueue.Count > 0) && WindThrottle.UnderLimit()) { LLQueItem qpack = WindOutgoingPacketQueue.Dequeue(); SendQueue.Enqueue(qpack); TotalThrottle.AddBytes(qpack.Length); WindThrottle.AddBytes(qpack.Length); qchanged = true; } if ((CloudOutgoingPacketQueue.Count > 0) && CloudThrottle.UnderLimit()) { LLQueItem qpack = CloudOutgoingPacketQueue.Dequeue(); SendQueue.Enqueue(qpack); TotalThrottle.AddBytes(qpack.Length); CloudThrottle.AddBytes(qpack.Length); qchanged = true; } if ((TaskOutgoingPacketQueue.Count > 0 || TaskLowpriorityPacketQueue.Count > 0) && TaskThrottle.UnderLimit()) { LLQueItem qpack; if (TaskOutgoingPacketQueue.Count > 0) { qpack = TaskOutgoingPacketQueue.Dequeue(); SendQueue.PriorityEnqueue(qpack); } else { qpack = TaskLowpriorityPacketQueue.Dequeue(); SendQueue.Enqueue(qpack); } TotalThrottle.AddBytes(qpack.Length); TaskThrottle.AddBytes(qpack.Length); qchanged = true; } if ((TextureOutgoingPacketQueue.Count > 0) && TextureThrottle.UnderLimit()) { LLQueItem qpack = TextureOutgoingPacketQueue.Dequeue(); SendQueue.Enqueue(qpack); TotalThrottle.AddBytes(qpack.Length); TextureThrottle.AddBytes(qpack.Length); qchanged = true; } if ((AssetOutgoingPacketQueue.Count > 0) && AssetThrottle.UnderLimit()) { LLQueItem qpack = AssetOutgoingPacketQueue.Dequeue(); SendQueue.Enqueue(qpack); TotalThrottle.AddBytes(qpack.Length); AssetThrottle.AddBytes(qpack.Length); qchanged = true; } } // m_log.Info("[THROTTLE]: Processed " + throttleLoops + " packets"); } }
/* STANDARD QUEUE MANIPULATION INTERFACES */ public void Enqueue(LLQueItem item) { if (!m_enabled) { return; } // We could micro lock, but that will tend to actually // probably be worse than just synchronizing on SendQueue if (item == null) { SendQueue.Enqueue(item); return; } if (item.Incoming) { SendQueue.PriorityEnqueue(item); return; } if (item.Sequence != 0) lock (contents) { if (contents.ContainsKey(item.Sequence)) contents[item.Sequence] += 1; else contents.Add(item.Sequence, 1); } lock (this) { switch (item.throttleType & ThrottleOutPacketType.TypeMask) { case ThrottleOutPacketType.Resend: ThrottleCheck(ref ResendThrottle, ref ResendOutgoingPacketQueue, item); break; case ThrottleOutPacketType.Texture: ThrottleCheck(ref TextureThrottle, ref TextureOutgoingPacketQueue, item); break; case ThrottleOutPacketType.Task: if ((item.throttleType & ThrottleOutPacketType.LowPriority) != 0) ThrottleCheck(ref TaskThrottle, ref TaskLowpriorityPacketQueue, item); else ThrottleCheck(ref TaskThrottle, ref TaskOutgoingPacketQueue, item); break; case ThrottleOutPacketType.Land: ThrottleCheck(ref LandThrottle, ref LandOutgoingPacketQueue, item); break; case ThrottleOutPacketType.Asset: ThrottleCheck(ref AssetThrottle, ref AssetOutgoingPacketQueue, item); break; case ThrottleOutPacketType.Cloud: ThrottleCheck(ref CloudThrottle, ref CloudOutgoingPacketQueue, item); break; case ThrottleOutPacketType.Wind: ThrottleCheck(ref WindThrottle, ref WindOutgoingPacketQueue, item); break; default: // Acknowledgements and other such stuff should go directly to the blocking Queue // Throttling them may and likely 'will' be problematic SendQueue.PriorityEnqueue(item); break; } } }
private void ThrottleCheck(ref LLPacketThrottle throttle, ref Queue<LLQueItem> q, LLQueItem item) { // The idea.. is if the packet throttle queues are empty // and the client is under throttle for the type. Queue // it up directly. This basically short cuts having to // wait for the timer to fire to put things into the // output queue if ((q.Count == 0) && (throttle.UnderLimit())) { try { Monitor.Enter(this); throttle.AddBytes(item.Length); TotalThrottle.AddBytes(item.Length); SendQueue.Enqueue(item); } catch (Exception e) { // Probably a serialization exception m_log.WarnFormat("ThrottleCheck: {0}", e.ToString()); } finally { Monitor.Pulse(this); Monitor.Exit(this); } } else { q.Enqueue(item); } }
/* STANDARD QUEUE MANIPULATION INTERFACES */ public void Enqueue(LLQueItem item) { if (!m_enabled) { return; } // We could micro lock, but that will tend to actually // probably be worse than just synchronizing on SendQueue if (item == null) { SendQueue.Enqueue(item); return; } if (item.Incoming) { SendQueue.PriorityEnqueue(item); return; } if (item.Sequence != 0) { contents.Add(item.Sequence); } lock (this) { switch (item.throttleType & ThrottleOutPacketType.TypeMask) { case ThrottleOutPacketType.Resend: ThrottleCheck(ref ResendThrottle, ref ResendOutgoingPacketQueue, item); break; case ThrottleOutPacketType.Texture: ThrottleCheck(ref TextureThrottle, ref TextureOutgoingPacketQueue, item); break; case ThrottleOutPacketType.Task: if ((item.throttleType & ThrottleOutPacketType.LowPriority) != 0) { ThrottleCheck(ref TaskThrottle, ref TaskLowpriorityPacketQueue, item); } else { ThrottleCheck(ref TaskThrottle, ref TaskOutgoingPacketQueue, item); } break; case ThrottleOutPacketType.Land: ThrottleCheck(ref LandThrottle, ref LandOutgoingPacketQueue, item); break; case ThrottleOutPacketType.Asset: ThrottleCheck(ref AssetThrottle, ref AssetOutgoingPacketQueue, item); break; case ThrottleOutPacketType.Cloud: ThrottleCheck(ref CloudThrottle, ref CloudOutgoingPacketQueue, item); break; case ThrottleOutPacketType.Wind: ThrottleCheck(ref WindThrottle, ref WindOutgoingPacketQueue, item); break; default: // Acknowledgements and other such stuff should go directly to the blocking Queue // Throttling them may and likely 'will' be problematic SendQueue.PriorityEnqueue(item); break; } } }