/// <summary> /// Indicates if we can write the specified packet. /// </summary> /// <param name="packet"></param> /// <returns></returns> public static bool CanWritePacket(IMessengerPacket packet) { //we don't send across all types - just a few we understand. if ((packet is LogMessagePacket) || (packet is ApplicationUserPacket) || (packet is ExceptionInfoPacket) || (packet is ThreadInfoPacket) || (packet is SessionSummaryPacket)) { #if DEBUG //BUG: This is a check to see why we're getting one byte session summaries. if (packet is SessionSummaryPacket) { if ((string.IsNullOrEmpty(((SessionSummaryPacket)packet).ProductName)) && (Debugger.IsAttached)) { Debugger.Break(); // Stop in debugger, ignore in production. } } #endif return(true); } else { return(false); } }
private void QueueToNotifier(IMessengerPacket packet) { if (packet is LogMessagePacket message) { if (m_AutoSendOnError) { //Do we have an error that should be sent? if (m_PendingAutoSend || message.Severity <= LogMessageSeverity.Error) { //OK, but *can* we? We don't want to go too often... if (m_NextAutoSendAllowed < DateTimeOffset.UtcNow) { m_NextAutoSendAllowed = DateTimeOffset.UtcNow.Add(Notifier.DefaultSendDelay); //to be 100% sure we're not blocking, we'll chuck this to a task. Task.Run(() => Log.SendSessions(SessionCriteria.ActiveSession, null, false, Log.SilentMode)); } else { //We are holding this send until the minimum delay expires. m_PendingAutoSend = true; } } } //now that we've dealt with auto-send, lets notify everyone else. LogMessageNotify?.Invoke(this, new LogMessageNotifyEventArgs(message)); } }
/// <summary> /// Inheritors must override this method to implement their custom message writing functionality. /// </summary> /// <remarks>Code in this method is protected by a Queue Lock /// This method is called with the Message Dispatch thread exclusively.</remarks> protected override void OnWrite(IMessengerPacket packet, bool writeThrough, ref MaintenanceModeRequest maintenanceRequested) { //Do we have a serializer opened? if (m_CurrentSerializer == null) { //we do not. we need to open a file. OpenFile(); } //now write to the file m_CurrentSerializer.Write(packet); if (writeThrough) { OnFlush(); } //and do we need to request maintenance? if (m_CurrentFile.Length > m_MaxFileSizeBytes) { maintenanceRequested = MaintenanceModeRequest.Regular; } else if (DateTime.Now > m_FileExpiration) { maintenanceRequested = MaintenanceModeRequest.Regular; } }
/// <inheritdoc /> public void Process(IMessengerPacket packet, ref bool cancel) { if (_func != null) { try { cancel = !_func(packet); //note we are inverting the boolean - most people think of functions returning true for success. } catch (Exception ex) { GC.KeepAlive(ex); } } else { try { _action(packet); } catch (Exception ex) { GC.KeepAlive(ex); } } }
/// <summary> /// Perform the actual package queuing and wait for it to be committed. /// </summary> /// <remarks>This must be done within the message queue lock. This method may return a null envelope if called /// on a thread which must not block and the packet had to be discarded due to an overflow condition.</remarks> /// <param name="packet">The packet to be queued</param> /// <param name="writeThrough">True if the call should block the current thread until the packet has been committed, /// false otherwise.</param> /// <returns>The packet envelope for the packet that was queued, or null if the packet was discarded.</returns> private PacketEnvelope QueuePacket(IMessengerPacket packet, bool writeThrough) { //even though the packet might already have a timestamp that's preferable to ours, we're deciding we're the judge of order to ensure it aligns with sequence. packet.Timestamp = DateTimeOffset.Now; //we convert to UTC during serialization, we want local time. //wrap it in a packet envelope and indicate we're in write through mode. PacketEnvelope packetEnvelope = new PacketEnvelope(packet, writeThrough); //But what queue do we put the packet in? if ((m_MessageOverflowQueue.Count > 0) || (m_MessageQueue.Count > m_MessageQueueMaxLength)) { // We are currently using the overflow queue, so we'll put it there. // However, if we were called by a must-not-block thread, we want to discard overflow packets... // unless it's a command packet, which is too important to discard (it just won't wait on pending). if (t_ThreadMustNotBlock && !packetEnvelope.IsCommand) { packetEnvelope = null; // We won't queue this packet, so there's no envelope to hang onto. } else { m_MessageOverflowQueue.Enqueue(packetEnvelope); //and set that it's pending so our caller knows they need to wait for it. packetEnvelope.IsPending = true; } } else { //just queue the packet, we don't want to wait. m_MessageQueue.Enqueue(packetEnvelope); } return(packetEnvelope); }
private void QueueToNotifier(IMessengerPacket packet) { if (packet is LogMessagePacket message) { if (m_AutoSendOnError) { //Do we have an error that should be sent? if (m_PendingAutoSend || message.Severity <= LogMessageSeverity.Error) { //OK, but *can* we? We don't want to go too often... if (m_NextAutoSendAllowed < DateTimeOffset.UtcNow) { try { Task.Run(() => Log.SendSessions(SessionCriteria.ActiveSession, null, true)); } catch (Exception) { //we never want to log this because we're in the middle of the publisher pipeline.. } m_NextAutoSendAllowed = DateTimeOffset.UtcNow.Add(Notifier.DefaultSendDelay); } else { //We are holding this send until the minimum delay expires. m_PendingAutoSend = true; } } } //now that we've dealt with auto-send, lets notify everyone else. LogMessageNotify?.Invoke(this, new LogMessageNotifyEventArgs(message)); } }
private void QueueToNotifier(IMessengerPacket packet) { LogMessageNotifyEventHandler notifyEvent = LogMessageNotify; if (notifyEvent != null) { notifyEvent(this, new LogMessageNotifyEventArgs(packet)); } }
public void Process(IMessengerPacket packet, ref bool cancel) { if (packet is LogMessagePacket logMessagePacket) { logMessagePacket.Caption = logMessagePacket.Caption?.Replace(_FindWord, _ReplaceWord); logMessagePacket.Description = logMessagePacket.Description?.Replace(_FindWord, _ReplaceWord); logMessagePacket.Details = logMessagePacket.Details?.Replace(_FindWord, _ReplaceWord); if (logMessagePacket.HasException) { foreach (var exception in logMessagePacket.Exceptions.Cast <ExceptionInfoPacket>()) { exception.Message = exception.Message?.Replace(_FindWord, _ReplaceWord); } } } }
/// <summary> /// Write the provided packet to the client stream (synchronously) /// </summary> /// <param name="packet"></param> public void Write(IMessengerPacket packet) { lock (m_Lock) { if ((IsFailed) || (IsClosed)) { return; } //we don't send across all types - just a few we understand. if (CanWritePacket(packet)) { SendPacket(packet); } } }
//public event EventHandler PacketCommitted; public PacketEnvelope(IMessengerPacket packet, bool writeThrough) { m_Packet = packet; m_WriteThrough = writeThrough; if (packet is CommandPacket) { m_IsCommand = true; } else { m_IsCommand = false; } ICachedMessengerPacket cachedPacket = packet as ICachedMessengerPacket; if (cachedPacket != null) { m_IsHeader = cachedPacket.IsHeader; } }
private void QueuePacket(IMessengerPacket messengerPacket) { LogMessagePacket packet = messengerPacket as LogMessagePacket; if (packet == null || packet.SuppressNotification) { return; } if (packet.Severity > m_MinimumSeverity) // Severity compares in reverse. Critical = 1, Verbose = 16. { return; // Bail if this packet doesn't meet the minimum severity we care about. } lock (m_MessageQueueLock) { if (m_NotificationEvent == null) // Check for unsubscribe race condition. { return; // Don't add it to the queue if there are no subscribers. } int messageQueueLength = m_MessageQueue.Count; if (messageQueueLength < m_MessageQueueMaxLength) { if (messageQueueLength <= 0) // First new one: Wait for a burst to collect. { m_BurstCollectionWait = DateTimeOffset.MinValue; // Clear it so we'll reset the wait clock. } m_MessageQueue.Enqueue(packet); // If there were already messages in our queue, it's waiting on a timeout, so don't bother pulsing it. // But if there were no messages in the queue, we need to make sure it's not waiting forever! if (messageQueueLength <= 0 || DateTimeOffset.Now >= m_NextNotifyAfter) { System.Threading.Monitor.PulseAll(m_MessageQueueLock); } } } EnsureNotificationThreadIsValid(); }
/// <summary> /// Inheritors must override this method to implement their custom message writing functionality. /// </summary> /// <remarks>Code in this method is protected by a Queue Lock /// This method is called with the Message Dispatch thread exclusively.</remarks> protected override void OnWrite(IMessengerPacket packet, bool writeThrough, ref MaintenanceModeRequest maintenanceRequested) { if (m_IsClosed) //we act like we're closed as soon as we receive exit mode, so we will still get writes after that. { return; } if (NetworkWriter.CanWritePacket(packet)) { lock (m_ActiveClients) //between caching and writing to the active clients we need to be consistent. { //queue it for later clients CachePacket(packet); //send the packet to all our clients foreach (var activeClient in m_ActiveClients) { try { //if we run into a failed active client it's because it hasn't yet been pruned from the active list, //so we need to go into maintenance if ((activeClient.IsFailed) || (activeClient.IsClosed)) { maintenanceRequested = MaintenanceModeRequest.Regular; } else { activeClient.Write(packet); } } catch (Exception ex) { GC.KeepAlive(ex); } } } } }
private void CachePacket(IMessengerPacket packet) { // Make sure this is actually a message, not null. if (packet == null) { Log.DebugBreak(); // This shouldn't happen, and we'd like to know if it is, so stop here if debugging. return; // Otherwise, just return; we don't want to throw exceptions. } lock (m_ActiveClients) //we are kept in sync with active client activity. { if (m_BufferSize > 0) { m_Buffer.Enqueue(packet); } while (m_Buffer.Count > m_BufferSize) { m_Buffer.Dequeue(); //discard older excess. } } }
private void StampPacket(IMessengerPacket packet, DateTimeOffset defaultTimeStamp) { #if DEBUG Debug.Assert(defaultTimeStamp.Ticks > 0); #endif //we don't check dependencies on command packets, it'll fail (and they aren't written out) if ((packet is CommandPacket) == false) { //check our dependent packets to see if they've been stamped. Dictionary <IPacket, IPacket> dependentPackets = GetRequiredPackets(packet); if ((dependentPackets != null) && (dependentPackets.Count > 0)) { //we only have to check these packets, not their children because if they've been stamped, their children have. foreach (IPacket dependentPacket in dependentPackets.Values) { IMessengerPacket dependentMessengerPacket = dependentPacket as IMessengerPacket; if ((dependentMessengerPacket != null) && (dependentMessengerPacket.Sequence == 0) && //our quickest bail check - if it has a nonzero sequence it's definitely been stamped. (dependentMessengerPacket.Timestamp.Ticks == 0)) { //stamp this guy first, we depend on him and he's not been stamped. StampPacket(dependentMessengerPacket, defaultTimeStamp); } } } } packet.Sequence = m_PacketSequence; m_PacketSequence++; //yeah, this could have been on the previous line. but hey, this is really clear on order. //make sure we have a timestamp - if there isn't one use the default (which is the timestamp of the packet that depended on us or earlier) if (packet.Timestamp.Ticks == 0) { packet.Timestamp = defaultTimeStamp; } }
public void Process(IMessengerPacket packet, ref bool cancel) { Interlocked.Increment(ref _FilterRequests); }
/// <summary> /// Register the network writer to receive information and get it in sync with the current packet stream /// </summary> /// <remarks>If the network writer was previously activated then it will be re-activated.</remarks> internal void ActivateWriter(NetworkWriter writer, long sequenceOffset = 0) { //dump the queue to it.... try { if (!Log.SilentMode) { Log.Write(LogMessageSeverity.Verbose, LogCategory, "New remote network viewer connection starting", "We will process the connection attempt and feed it our buffered data.\r\nRemote Endpoint: {0}\r\nSequence Offset: {1:N0}", writer, sequenceOffset); } lock (m_ActiveClients) //we can't have a gap between starting to dump the buffer and the buffer changing. { //write out every header packet to the stream ICachedMessengerPacket[] headerPackets = HeaderPackets; if (headerPackets != null) { writer.Write(headerPackets); } var bufferContents = m_Buffer.ToArray(); if ((sequenceOffset > 0) && (bufferContents.Length > 0)) { //they have all the packets up through the sequence offset so only later packets if (bufferContents[0].Sequence > sequenceOffset) { //All of our packets qualify because even the first one is after our offset. So we just use bufferContents unmodified. } else if (bufferContents[bufferContents.Length - 1].Sequence <= sequenceOffset) { //*none* of our packets qualify, it's at the end of our buffer, so just clear it. bufferContents = new IMessengerPacket[0]; } else { //figure out exactly where in the buffer we should be. int firstPacketOffset = 0; //we know the zeroth packet should not be included because we checked that above. for (int packetBufferIndex = bufferContents.Length - 2; packetBufferIndex >= 0; packetBufferIndex--) //we iterate backwards because if they have any offset they're likely close to current. { if (bufferContents[packetBufferIndex].Sequence <= sequenceOffset) { //This is the first packet we should *skip* so the first offset to take is up one. firstPacketOffset = packetBufferIndex + 1; } } var offsetBuffer = new IMessengerPacket[bufferContents.Length - firstPacketOffset]; //inclusive //we've been trying unsuccessfully to isolate why we're getting an exception that the destination array isn't long enough. try { Array.Copy(bufferContents, firstPacketOffset, offsetBuffer, 0, bufferContents.Length - firstPacketOffset); bufferContents = offsetBuffer; } catch (ArgumentException ex) { Log.Write(LogMessageSeverity.Error, LogWriteMode.Queued, ex, false, LogCategory, "Unable to create offset buffer due to " + ex.GetType(), "Original Buffer Length: {0}\r\nFirst Packet Offset: {1}\r\nOffset Buffer Length: {2}", bufferContents.Length, firstPacketOffset, offsetBuffer.Length); } } } if (bufferContents.Length > 0) { writer.Write(bufferContents); } //and mark it active if that succeeded if (writer.IsFailed == false) { //note that it may have been previously registered so we need to be cautious about this. if (m_ActiveClients.Contains(writer) == false) { m_ActiveClients.Add(writer); } } //if it didn't succeed it should raise its failed event, and in turn we will eventually dispose it in due course. } } catch (Exception ex) { if (!Log.SilentMode) { Log.RecordException(0, ex, null, LogCategory, true); } } }
internal PacketEventArgs(IMessengerPacket packet) { m_Packet = packet; }
/// <summary> /// Publish the provided batch of packets. /// </summary> /// <param name="packetArray">An array of packets to publish as a batch.</param> /// <param name="writeThrough">True if the information contained in packet should be committed synchronously, /// false if the publisher can use write caching (when available).</param> public void Publish(IMessengerPacket[] packetArray, bool writeThrough) { // Sanity-check the most likely no-op cases before we bother with the lock if (packetArray == null) { return; } // Check for nulls from the end to find the last valid packet. int count = packetArray.Length; int lastIndex = count - 1; while (lastIndex >= 0 && packetArray[lastIndex] == null) { lastIndex--; } if (lastIndex < 0) { return; // An array of only null packets (or empty), just quick bail. Don't bother with the lock. } //resolve users... var resolver = m_PrincipalResolver; IPrincipal principal = null; if (resolver != null) { //and set that user to each packet that wants to track the current user (and doesn't have one manually set) foreach (var packet in packetArray.AsEnumerable().OfType <IUserPacket>().Where(p => p.Principal == null)) { //we only want to resolve the principal once per block, even if there are multiple messages. if (principal == null) { //before we resolve the principal make sure our thread isn't *currently* trying to resolve a principal. if (!t_ThreadMustNotResolvePrincipal) { try { t_ThreadMustNotResolvePrincipal = true; var resolved = resolver.TryResolveCurrentPrincipal(out principal); if (resolved == false) { principal = null; //in case they broke the contract.. } } catch (Exception ex) { Log.DebugBreak(); GC.KeepAlive(ex); } finally { t_ThreadMustNotResolvePrincipal = false; } } if (principal == null) { break; //no point in keeping trying if we filed to resolve the principal.. } } packet.Principal = principal; } } PacketEnvelope lastPacketEnvelope = null; bool effectiveWriteThrough; bool isPending; int queuedCount = 0; // Get the queue lock. lock (m_MessageQueueLock) { if (m_Shutdown) // If we're already shut down, just bail. We'll never process it anyway. { return; } // Check to see if either the overall force write through or the local write through are set... // or if we are in ExitingMode. In those cases, we'll want to block until the packet is committed. effectiveWriteThrough = (m_ForceWriteThrough || writeThrough || m_ExitingMode); for (int i = 0; i < count; i++) { IMessengerPacket packet = packetArray[i]; // We have to double-check each element for null, or QueuePacket() would barf on it. if (packet != null) { // We have a real packet, so queue it. Only WriteThrough for the last packet, to flush the rest. PacketEnvelope packetEnvelope = QueuePacket(packet, effectiveWriteThrough && i >= lastIndex); // If a null is returned, the packet wasn't queued, so don't overwrite lastPacketEnvelope. if (packetEnvelope != null) { queuedCount++; lastPacketEnvelope = packetEnvelope; // Keep track of the last one queued. if (!m_ExitMode && packetEnvelope.IsCommand) { CommandPacket commandPacket = (CommandPacket)packet; if (commandPacket.Command == MessagingCommand.ExitMode) { // Once we *receive* an ExitMode command, all subsequent messages queued // need to block, to make sure the process stays alive for any final logging // foreground threads might have. We will be switching the Publisher to a // background thread when we process the ExitMode command so we don't hold // up the process beyond its own foreground threads. m_ExitingMode = true; // Force writeThrough blocking from now on. // Set the ending status, if it needs to be (probably won't). SessionStatus endingStatus = (SessionStatus)commandPacket.State; if (m_SessionSummary.Status < endingStatus) { m_SessionSummary.Status = endingStatus; } } } } } } if (effectiveWriteThrough && t_ThreadMustNotBlock == false && queuedCount > 0 && (lastPacketEnvelope == null || ReferenceEquals(lastPacketEnvelope.Packet, packetArray[lastIndex]) == false)) { // The expected WriteThrough packet got dropped because of overflow? But we still need to block until // those queued have completed, so issue a specific Flush command packet, which should not get dropped. CommandPacket flushPacket = new CommandPacket(MessagingCommand.Flush); PacketEnvelope flushEnvelope = QueuePacket(flushPacket, true); if (flushEnvelope != null) { lastPacketEnvelope = flushEnvelope; } } // Grab the pending flag before we release the lock so we know we have a consistent view. // If we didn't queue any packets then lastPacketEnvelope will be null and there's nothing to be pending. isPending = (lastPacketEnvelope == null) ? false : lastPacketEnvelope.IsPending; // Now signal our next thread that might be waiting that the lock will be released. System.Threading.Monitor.PulseAll(m_MessageQueueLock); } // Make sure our dispatch thread is still going. This has its own independent locking (when necessary), // so we don't need to hold up other threads that are publishing. EnsureMessageDispatchThreadIsValid(); if (lastPacketEnvelope == null || t_ThreadMustNotBlock) { // If we had no actual packets queued (e.g. shutdown, or no packets to queue), there's nothing to wait on. // Also, special case for must-not-block threads. Once it's on the queue (or not), don't wait further. // We need the thread to get back to processing stuff off the queue or we're deadlocked! return; } // See if we need to wait because we've degraded to synchronous message handling due to a backlog of messages if (isPending) { // This routine does its own locking so we don't need to interfere with the nominal case of // not needing to pend. WaitOnPending(lastPacketEnvelope); } // Finally, if we need to wait on the write to complete now we want to stall. We had to do this outside of // the message queue lock to ensure we don't block other threads. if (effectiveWriteThrough) { WaitOnPacket(lastPacketEnvelope); } }
internal LogMessageNotifyEventArgs(IMessengerPacket packet) { Packet = packet; }