/// <summary> /// Initialise a new PriorityQueueItem /// </summary> /// <param name="priority"></param> /// <param name="connection"></param> /// <param name="packetHeader"></param> /// <param name="dataStream"></param> /// <param name="sendReceiveOptions"></param> public PriorityQueueItem(QueueItemPriority priority, Connection connection, PacketHeader packetHeader, MemoryStream dataStream, SendReceiveOptions sendReceiveOptions) { if (connection == null) { throw new ArgumentNullException("connection", "Provided Connection parameter cannot be null."); } if (packetHeader == null) { throw new ArgumentNullException("packetHeader", "Provided PacketHeader parameter cannot be null."); } if (dataStream == null) { throw new ArgumentNullException("dataStream", "Provided MemoryStream parameter cannot be null."); } if (sendReceiveOptions == null) { throw new ArgumentNullException("sendReceiveOptions", "Provided sendReceiveOptions cannot be null."); } this.Priority = priority; this.Connection = connection; this.PacketHeader = packetHeader; this.DataStream = dataStream; this.SendReceiveOptions = sendReceiveOptions; }
/// <summary> /// Try removing an item from the priority queue which has a priority of at least that provided. /// </summary> /// <param name="minimumPriority">The minimum priority to consider</param> /// <param name="item">Key is priority, lower number is lower priority, and value is TValue</param> /// <returns>True if an item was successfully removed from the queue</returns> public bool TryTake(QueueItemPriority minimumPriority, out KeyValuePair <QueueItemPriority, TValue> item) { // Loop through the queues in priority order. Higher priority first for (int i = numDistinctPriorities - 1; i >= (int)minimumPriority; i--) { // Lock the internal data so that the Dequeue // operation and the updating of m_count are atomic. lock (internalQueues) { if (internalQueues[QueueItemPriorityVals[i]].Count > 0) { item = internalQueues[QueueItemPriorityVals[i]].Dequeue(); Interlocked.Decrement(ref totalNumberQueuedItems); return(true); } else { continue; } } } // If we get here, we found nothing, return defaults item = new KeyValuePair <QueueItemPriority, TValue>((QueueItemPriority)0, default(TValue)); return(false); }
/// <summary> /// Initialise a new PriorityQueueItem /// </summary> /// <param name="priority"></param> /// <param name="connection"></param> /// <param name="packetHeader"></param> /// <param name="dataStream"></param> /// <param name="sendReceiveOptions"></param> public PriorityQueueItem(QueueItemPriority priority, Connection connection, PacketHeader packetHeader, MemoryStream dataStream, SendReceiveOptions sendReceiveOptions) { if (connection == null) throw new ArgumentNullException("connection", "Provided Connection parameter cannot be null."); if (packetHeader == null) throw new ArgumentNullException("packetHeader", "Provided PacketHeader parameter cannot be null."); if (dataStream == null) throw new ArgumentNullException("dataStream", "Provided MemoryStream parameter cannot be null."); if (sendReceiveOptions == null) throw new ArgumentNullException("sendReceiveOptions", "Provided sendReceiveOptions cannot be null."); this.Priority = priority; this.Connection = connection; this.PacketHeader = packetHeader; this.DataStream = dataStream; this.SendReceiveOptions = sendReceiveOptions; }
public bool Enqueue(T item, QueueItemPriority priority) { ModuleProc PROC = new ModuleProc(DYN_MODULE_NAME, "Enqueue"); bool result = default(bool); // will block when the queue is full if (!_semProducer.WaitOne(this.QueueTimeout)) { return(false); } try { _mtxLock.WaitOne(); // high priority items should be come first if (priority == QueueItemPriority.High) { _queue.AddFirst(item); } else { _queue.AddLast(item); } } catch (Exception ex) { Log.Exception(PROC, ex); } finally { _mtxLock.ReleaseMutex(); _semConsumer.Release(); // signal the consumer to pick the data from queue } return(result); }
/// <summary> /// Enqueue a callback to the thread pool. /// </summary> /// <param name="priority">The priority with which to enqueue the provided callback</param> /// <param name="callback">The callback to execute</param> /// <param name="state">The state parameter to pass to the callback when executed</param> /// <returns>Returns the managed threadId running the callback if one was available, otherwise -1</returns> public void EnqueueItem(QueueItemPriority priority, WaitCallback callback, object state) { lock (SyncRoot) jobQueue.TryAdd(new KeyValuePair <QueueItemPriority, WaitCallBackWrapper>(priority, new WaitCallBackWrapper(callback, state))); Task t = null; CancellationTokenSource cSource = new CancellationTokenSource(); t = new Task(() => { KeyValuePair <QueueItemPriority, WaitCallBackWrapper> toRun; lock (SyncRoot) { if (!jobQueue.TryTake(out toRun) || shutdown) { return; } } toRun.Value.WaitCallBack(toRun.Value.State); lock (SyncRoot) { scheduledTasks.Remove(t.Id); taskCancellationTokens.Remove(t.Id); } }, cSource.Token); lock (SyncRoot) { scheduledTasks.Add(t.Id, t); taskCancellationTokens.Add(t.Id, cSource); t.Start(); } }
/// <summary> /// Attempts to use the data provided in packetBuilder to recreate something useful. If we don't have enough data /// yet that value is set in packetBuilder. /// </summary> /// <param name="packetBuilder">The <see cref="PacketBuilder"/> containing incoming cached data</param> protected void IncomingPacketHandleHandOff(PacketBuilder packetBuilder) { int loopCounter = 0; try { if (NetworkComms.LoggingEnabled) { NetworkComms.Logger.Trace(" ... checking for completed packet with " + packetBuilder.TotalBytesCached.ToString() + " bytes read."); } if (packetBuilder.TotalPartialPacketCount == 0) { throw new Exception("Executing IncomingPacketHandleHandOff when no packets exist in packetbuilder."); } //Loop until we are finished with this packetBuilder while (true) { //If we have ended up with a null packet at the front, probably due to some form of concatenation we can pull it off here //It is possible we have concatenation of several null packets along with real data so we loop until the firstByte is greater than 0 if (ConnectionInfo.ApplicationLayerProtocol == ApplicationLayerProtocolStatus.Enabled && packetBuilder.FirstByte() == 0) { #region Ignore Null Packet if (NetworkComms.LoggingEnabled) { NetworkComms.Logger.Trace(" ... null packet removed in IncomingPacketHandleHandOff() from " + ConnectionInfo + ", loop index - " + loopCounter.ToString()); } packetBuilder.ClearNTopBytes(1); //Reset the expected bytes to 0 so that the next check starts from scratch packetBuilder.TotalBytesExpected = 0; //If we have run out of data completely then we can return immediately if (packetBuilder.TotalBytesCached == 0) { return; } #endregion } else { int packetHeaderSize = 0; PacketHeader topPacketHeader; #region Set topPacketHeader if (ConnectionInfo.ApplicationLayerProtocol == ApplicationLayerProtocolStatus.Enabled) { //First determine the expected size of a header packet packetHeaderSize = packetBuilder.FirstByte() + 1; //Do we have enough data to build a header? if (packetBuilder.TotalBytesCached < packetHeaderSize) { if (NetworkComms.LoggingEnabled) { NetworkComms.Logger.Trace(" ... require " + packetHeaderSize + " bytes for packet header, only " + packetBuilder.TotalBytesCached + " bytes cached."); } //Set the expected number of bytes and then return packetBuilder.TotalBytesExpected = packetHeaderSize; return; } if (NetworkComms.LoggingEnabled) { NetworkComms.Logger.Trace(" ... deserializing header using " + packetHeaderSize + " bytes, " + packetBuilder.TotalBytesCached + " bytes cached."); } //We have enough for a header using (MemoryStream headerStream = packetBuilder.ReadDataSection(1, packetHeaderSize - 1)) topPacketHeader = new PacketHeader(headerStream, NetworkComms.InternalFixedSendReceiveOptions); } else { topPacketHeader = new PacketHeader(Enum.GetName(typeof(ReservedPacketType), ReservedPacketType.Unmanaged), packetBuilder.TotalBytesCached); } #endregion //Idiot test if (topPacketHeader.PacketType == null) { throw new SerialisationException("packetType value in packetHeader should never be null"); } //We can now use the header to establish if we have enough payload data //First case is when we have not yet received enough data if (packetBuilder.TotalBytesCached < packetHeaderSize + topPacketHeader.TotalPayloadSize) { if (NetworkComms.LoggingEnabled) { NetworkComms.Logger.Trace(" ... more data required for complete packet payload. Expecting " + (packetHeaderSize + topPacketHeader.TotalPayloadSize).ToString() + " total packet bytes."); } //Set the expected number of bytes and then return packetBuilder.TotalBytesExpected = packetHeaderSize + topPacketHeader.TotalPayloadSize; return; } //Second case is we have enough data else if (packetBuilder.TotalBytesCached >= packetHeaderSize + topPacketHeader.TotalPayloadSize) { #region Handle Packet //We can either have exactly the right amount or even more than we were expecting //We may have too much data if we are sending high quantities and the packets have been concatenated SendReceiveOptions incomingPacketSendReceiveOptions = IncomingPacketSendReceiveOptions(topPacketHeader); if (NetworkComms.LoggingEnabled) { NetworkComms.Logger.Debug("Received packet of type '" + topPacketHeader.PacketType + "' from " + ConnectionInfo + ", containing " + packetHeaderSize.ToString() + " header bytes and " + topPacketHeader.TotalPayloadSize.ToString() + " payload bytes."); } bool isReservedPacketType = (topPacketHeader.PacketType != Enum.GetName(typeof(ReservedPacketType), ReservedPacketType.Unmanaged) && NetworkComms.ReservedPacketTypeNames.ContainsKey(topPacketHeader.PacketType)); //Get the packet sequence number if logging string packetSeqNumStr = ""; if (NetworkComms.LoggingEnabled) { packetSeqNumStr = (topPacketHeader.ContainsOption(PacketHeaderLongItems.PacketSequenceNumber) ? ". pSeq#-" + topPacketHeader.GetOption(PacketHeaderLongItems.PacketSequenceNumber).ToString() + "." : ""); } //Only reserved packet types get completed inline by default if (isReservedPacketType) { #if WINDOWS_PHONE || NETFX_CORE QueueItemPriority priority = QueueItemPriority.Normal; #else QueueItemPriority priority = (QueueItemPriority)Thread.CurrentThread.Priority; #endif PriorityQueueItem item = new PriorityQueueItem(priority, this, topPacketHeader, packetBuilder.ReadDataSection(packetHeaderSize, topPacketHeader.TotalPayloadSize), incomingPacketSendReceiveOptions); if (NetworkComms.LoggingEnabled) { NetworkComms.Logger.Trace(" ... handling packet type '" + topPacketHeader.PacketType + "' inline. Loop index - " + loopCounter.ToString() + packetSeqNumStr); } NetworkComms.CompleteIncomingItemTask(item); } else { QueueItemPriority itemPriority = (incomingPacketSendReceiveOptions.Options.ContainsKey("ReceiveHandlePriority") ? (QueueItemPriority)Enum.Parse(typeof(QueueItemPriority), incomingPacketSendReceiveOptions.Options["ReceiveHandlePriority"]) : QueueItemPriority.Normal); PriorityQueueItem item = new PriorityQueueItem(itemPriority, this, topPacketHeader, packetBuilder.ReadDataSection(packetHeaderSize, topPacketHeader.TotalPayloadSize), incomingPacketSendReceiveOptions); //QueueItemPriority.Highest is the only priority that is executed inline if (itemPriority == QueueItemPriority.Highest) { if (NetworkComms.LoggingEnabled) { NetworkComms.Logger.Trace(" ... handling packet type '" + topPacketHeader.PacketType + "' with priority HIGHEST inline. Loop index - " + loopCounter.ToString() + packetSeqNumStr); } NetworkComms.CompleteIncomingItemTask(item); } else { #if NETFX_CORE NetworkComms.CommsThreadPool.EnqueueItem(item.Priority, NetworkComms.CompleteIncomingItemTask, item); if (NetworkComms.LoggingEnabled) { NetworkComms.Logger.Trace(" ... added completed " + item.PacketHeader.PacketType + " packet to thread pool (Q:" + NetworkComms.CommsThreadPool.QueueCount.ToString() + ") with priority " + itemPriority.ToString() + ". Loop index=" + loopCounter.ToString() + packetSeqNumStr); } #else int threadId = NetworkComms.CommsThreadPool.EnqueueItem(item.Priority, NetworkComms.CompleteIncomingItemTask, item); if (NetworkComms.LoggingEnabled) { NetworkComms.Logger.Trace(" ... added completed " + item.PacketHeader.PacketType + " packet to thread pool (Q:" + NetworkComms.CommsThreadPool.QueueCount.ToString() + ", T:" + NetworkComms.CommsThreadPool.CurrentNumTotalThreads.ToString() + ", I:" + NetworkComms.CommsThreadPool.CurrentNumIdleThreads.ToString() + ") with priority " + itemPriority.ToString() + (threadId > 0 ? ". Selected threadId=" + threadId.ToString() : "") + ". Loop index=" + loopCounter.ToString() + packetSeqNumStr); } #endif } } //We clear the bytes we have just handed off if (NetworkComms.LoggingEnabled) { NetworkComms.Logger.Trace("Removing " + (packetHeaderSize + topPacketHeader.TotalPayloadSize).ToString() + " bytes from incoming packet builder from connection with " + ConnectionInfo + "."); } packetBuilder.ClearNTopBytes(packetHeaderSize + topPacketHeader.TotalPayloadSize); //Reset the expected bytes to 0 so that the next check starts from scratch packetBuilder.TotalBytesExpected = 0; //If we have run out of data completely then we can return immediately if (packetBuilder.TotalBytesCached == 0) { return; } #endregion } else { throw new CommunicationException("This should be impossible!"); } } loopCounter++; } } catch (Exception ex) { //Any error, throw an exception. if (NetworkComms.LoggingEnabled) { NetworkComms.Logger.Fatal("A fatal exception occurred in IncomingPacketHandleHandOff(), connection with " + ConnectionInfo + " be closed. See log file for more information."); } if (this is IPConnection) { //Log the exception in DOS protection if enabled if (IPConnection.DOSProtection.Enabled && ConnectionInfo.RemoteEndPoint.GetType() == typeof(IPEndPoint)) { IPConnection.DOSProtection.LogMalformedData(ConnectionInfo.RemoteIPEndPoint.Address); } } LogTools.LogException(ex, "CommsError", "A fatal exception occurred in IncomingPacketHandleHandOff(), connection with " + ConnectionInfo + " be closed. Loop counter " + loopCounter.ToString() + ". Packet builder contained " + packetBuilder.TotalBytesCached + " total cached bytes."); CloseConnection(true, 45); } }
/// <summary> /// Polls all existing connections based on ConnectionKeepAlivePollIntervalSecs value. Server side connections are polled /// slightly earlier than client side to help reduce potential congestion. /// </summary> /// <param name="returnImmediately">If true runs as task and returns immediately.</param> private static void AllConnectionsSendNullPacketKeepAlive(bool returnImmediately = false) { if (NetworkComms.LoggingEnabled) { NetworkComms.Logger.Trace("Starting AllConnectionsSendNullPacketKeepAlive"); } //Loop through all connections and test the alive state List <Connection> allConnections = NetworkComms.GetExistingConnection(ApplicationLayerProtocolStatus.Enabled); int remainingConnectionCount = allConnections.Count; QueueItemPriority nullSendPriority = QueueItemPriority.AboveNormal; ManualResetEvent allConnectionsComplete = new ManualResetEvent(false); for (int i = 0; i < allConnections.Count; i++) { //We don't send null packets to unconnected UDP connections UDPConnection asUDP = allConnections[i] as UDPConnection; if (asUDP != null && asUDP.ConnectionUDPOptions == UDPOptions.None) { if (Interlocked.Decrement(ref remainingConnectionCount) == 0) { allConnectionsComplete.Set(); } continue; } else { int innerIndex = i; NetworkComms.CommsThreadPool.EnqueueItem(nullSendPriority, new WaitCallback((obj) => { try { //If the connection is server side we poll preferentially if (allConnections[innerIndex] != null) { if (allConnections[innerIndex].ConnectionInfo.ServerSide) { //We check the last incoming traffic time //In scenarios where the client is sending us lots of data there is no need to poll if ((DateTime.Now - allConnections[innerIndex].ConnectionInfo.LastTrafficTime).TotalSeconds > ConnectionKeepAlivePollIntervalSecs) { allConnections[innerIndex].SendNullPacket(); } } else { //If we are client side we wait up to an additional 3 seconds to do the poll //This means the server will probably beat us if ((DateTime.Now - allConnections[innerIndex].ConnectionInfo.LastTrafficTime).TotalSeconds > ConnectionKeepAlivePollIntervalSecs + 1.0 + (NetworkComms.randomGen.NextDouble() * 2.0)) { allConnections[innerIndex].SendNullPacket(); } } } } catch (Exception) { } finally { if (Interlocked.Decrement(ref remainingConnectionCount) == 0) { allConnectionsComplete.Set(); } } }), null); } } //Max wait is 1 seconds per connection if (!returnImmediately && allConnections.Count > 0) { #if NET2 if (!allConnectionsComplete.WaitOne(allConnections.Count * 2500, false)) #else if (!allConnectionsComplete.WaitOne(allConnections.Count * 2500)) #endif { //This timeout should not really happen so we are going to log an error if it does //LogTools.LogException(new TimeoutException("Timeout after " + allConnections.Count.ToString() + " seconds waiting for null packet sends to finish. " + remainingConnectionCount.ToString() + " connection waits remain. This error indicates very high send load or a possible send deadlock."), "NullPacketKeepAliveTimeoutError"); if (NetworkComms.LoggingEnabled) { NetworkComms.Logger.Warn("Timeout after " + allConnections.Count.ToString() + " seconds waiting for null packet sends to finish. " + remainingConnectionCount.ToString() + " connection waits remain. This error indicates very high send load or a possible send deadlock."); } } } }
/// <summary> /// Enqueue a callback to the thread pool. /// </summary> /// <param name="priority">The priority with which to enqueue the provided callback</param> /// <param name="callback">The callback to execute</param> /// <param name="state">The state parameter to pass to the callback when executed</param> /// <returns>Returns the managed threadId running the callback if one was available, otherwise -1</returns> public void EnqueueItem(QueueItemPriority priority, WaitCallback callback, object state) { lock (SyncRoot) jobQueue.TryAdd(new KeyValuePair<QueueItemPriority, WaitCallBackWrapper>(priority, new WaitCallBackWrapper(callback, state))); Task t = null; CancellationTokenSource cSource = new CancellationTokenSource(); t = new Task(() => { KeyValuePair<QueueItemPriority, WaitCallBackWrapper> toRun; lock (SyncRoot) { if (!jobQueue.TryTake(out toRun) || shutdown) return; } toRun.Value.WaitCallBack(toRun.Value.State); lock (SyncRoot) { scheduledTasks.Remove(t.Id); taskCancellationTokens.Remove(t.Id); } }, cSource.Token); lock (SyncRoot) { scheduledTasks.Add(t.Id, t); taskCancellationTokens.Add(t.Id, cSource); t.Start(); } }
public bool Enqueue(T item, QueueItemPriority priority) { ModuleProc PROC = new ModuleProc(DYN_MODULE_NAME, "Enqueue"); bool result = default(bool); lock (_queue) { try { // if full, wait until some items consumed while (_queue.Count == this.Capacity) { if (this.ExecutorService.IsShutdown) { break; } _fullWaiters++; try { lock (_lockFullEvent) { Monitor.Exit(_queue); Log.Debug(PROC, "Locked : BlockingBoundQueueUser.Enqueue"); Monitor.Wait(_lockFullEvent); Log.Debug(PROC, "Unlocked : BlockingBoundQueueUser.Enqueue"); Monitor.Enter(_queue); } } finally { _fullWaiters--; } } } catch (Exception ex) { this.LogException(PROC, ex); } finally { if (!this.ExecutorService.IsShutdown) { if (priority == QueueItemPriority.High) { _queue.AddFirst(item); } else { _queue.AddLast(item); } result = true; _count = _queue.Count; } } } // wake the waiting consumers if (!this.ExecutorService.IsShutdown) { if (_emptyWaiters > 0) { lock (_lockEmptyEvent) { Monitor.Pulse(_lockEmptyEvent); } } } return(result); }
/// <summary> /// Enqueue a callback to the thread pool. /// </summary> /// <param name="priority">The priority with which to enqueue the provided callback</param> /// <param name="callback">The callback to execute</param> /// <param name="state">The state parameter to pass to the callback when executed</param> /// <returns>Returns the managed threadId running the callback if one was available, otherwise -1</returns> public int EnqueueItem(QueueItemPriority priority, WaitCallback callback, object state) { int chosenThreadId = -1; lock (SyncRoot) { UpdateThreadWaitSleepJoinCountCache(); int numInJobActiveThreadsCount = Math.Max(0, threadDict.Count - CurrentNumWaitSleepJoinThreadsCache - requireJobThreadsCount); //int numActiveThreads = Math.Max(0,threadDict.Count - CurrentNumWaitSleepJoinThreadsCache); if (!shutdown && requireJobThreadsCount == 0 && numInJobActiveThreadsCount < MaxActiveThreadsCount && threadDict.Count < MaxTotalThreadsCount) { //Launch a new thread Thread newThread = new Thread(ThreadWorker); newThread.Name = "ManagedThreadPool_" + newThread.ManagedThreadId.ToString(); WorkerInfo info = new WorkerInfo(newThread.ManagedThreadId, new WaitCallBackWrapper(callback, state)); chosenThreadId = newThread.ManagedThreadId; threadDict.Add(newThread.ManagedThreadId, newThread); workerInfoDict.Add(newThread.ManagedThreadId, info); newThread.Start(info); } else if (!shutdown && requireJobThreadsCount > 0 && numInJobActiveThreadsCount < MaxActiveThreadsCount) { jobQueue.TryAdd(new KeyValuePair <QueueItemPriority, WaitCallBackWrapper>(priority, new WaitCallBackWrapper(callback, state))); int checkCount = 0; foreach (var info in workerInfoDict) { //Trigger the first idle thread checkCount++; if (info.Value.ThreadIdle) { info.Value.ClearThreadIdle(); requireJobThreadsCount--; info.Value.ThreadSignal.Set(); chosenThreadId = info.Value.ThreadId; break; } if (checkCount == workerInfoDict.Count) { throw new Exception("IdleThreads count is " + requireJobThreadsCount.ToString() + " but unable to locate thread marked as idle."); } } } else if (!shutdown) { //If there are no idle threads and we can't start any new ones we just have to enqueue the item jobQueue.TryAdd(new KeyValuePair <QueueItemPriority, WaitCallBackWrapper>(priority, new WaitCallBackWrapper(callback, state))); } } return(chosenThreadId); }