public override void Handle( GrandOutputEventInfo logEvent, bool parrallelCall ) { ++TotalHandleCount; if( logEvent.Entry.LogType == LogEntryType.Line && logEvent.Entry.Text.StartsWith( "PerfTrace:" ) ) ++HandlePerfTraceCount; ComputeSize( logEvent, true ); for( int i = 0; i < _extraLoad; ++i ) ComputeSize( logEvent, false ); }
internal void Dispatch( GrandOutputEventInfo e ) { if( CommonSink != null ) { try { CommonSink.Handle( e, false ); } catch( Exception ex ) { ActivityMonitor.CriticalErrorCollector.Add( ex, "While logging event into Global sinks." ); } } try { foreach( var h in Handlers ) h.Handle( e, false ); } catch( Exception ex ) { ActivityMonitor.CriticalErrorCollector.Add( ex, "While logging event." ); } finally { if( ConfigLock != null ) ConfigLock.Unlock(); } }
/// <summary> /// Handles a <see cref="GrandOutputEventInfo"/> by calling each /// child's handle in sequence. /// </summary> /// <param name="logEvent">Event to handle.</param> /// <param name="parrallelCall">True if this is called in parallel.</param> public override void Handle(GrandOutputEventInfo logEvent, bool parrallelCall) { foreach (var c in _children) { c.Handle(logEvent, parrallelCall); } }
internal void Dispatch(GrandOutputEventInfo e) { if (CommonSink != null) { try { CommonSink.Handle(e, false); } catch (Exception ex) { ActivityMonitor.CriticalErrorCollector.Add(ex, "While logging event into Global sinks."); } } try { foreach (var h in Handlers) { h.Handle(e, false); } } catch (Exception ex) { ActivityMonitor.CriticalErrorCollector.Add(ex, "While logging event."); } finally { if (ConfigLock != null) { ConfigLock.Unlock(); } } }
void IChannel.Handle(GrandOutputEventInfo logEvent, bool sendToCommonSink) { if (sendToCommonSink) { _dispatcher.Add(logEvent, CommonSinkOnlyReceiver); } }
public bool Add(GrandOutputEventInfo e, FinalReceiver receiver) { if (receiver == null) { throw new ArgumentNullException(); } return(DoAdd(e, receiver)); }
public void Handle(IActivityMonitor m, GrandOutputEventInfo logEvent) { _memoryStream.SetLength(0); _memoryStream.Seek(0, SeekOrigin.Begin); logEvent.Entry.WriteLogEntry(_binaryWriter); _controlChannelClient.SendAsync("GrandOutputEventInfo", _memoryStream.ToArray()).GetAwaiter().GetResult(); }
void ComputeSize( GrandOutputEventInfo logEvent, bool increment ) { using( MemoryStream m = new MemoryStream() ) using( CKBinaryWriter w = new CKBinaryWriter( m ) ) { logEvent.Entry.WriteLogEntry( w ); if( increment ) Interlocked.Add( ref SizeHandled, (int)m.Position ); } }
void ComputeSize(GrandOutputEventInfo logEvent, bool increment) { using (MemoryStream m = new MemoryStream()) using (CKBinaryWriter w = new CKBinaryWriter(m)) { logEvent.Entry.WriteLogEntry(w); if (increment) { Interlocked.Add(ref SizeHandled, (int)m.Position); } } }
public void Handle( GrandOutputEventInfo logEvent, bool sendToCommonSink ) { Debug.Assert( sendToCommonSink == true ); try { _dispatcher.Add( logEvent, _receiver ); _buffer.Enqueue( logEvent ); } finally { _useLock.Signal(); } }
public override void Handle(GrandOutputEventInfo logEvent, bool parrallelCall) { ++TotalHandleCount; if (logEvent.Entry.LogType == LogEntryType.Line && logEvent.Entry.Text.StartsWith("PerfTrace:")) { ++HandlePerfTraceCount; } ComputeSize(logEvent, true); for (int i = 0; i < _extraLoad; ++i) { ComputeSize(logEvent, false); } }
public void Handle(GrandOutputEventInfo logEvent, bool sendToCommonSink) { Debug.Assert(sendToCommonSink == true); try { _dispatcher.Add(logEvent, _receiver); _buffer.Enqueue(logEvent); } finally { _useLock.Signal(); } }
bool DoAdd( GrandOutputEventInfo e, FinalReceiver receiver ) { bool result = true; Debug.Assert( e.Entry != null || receiver == null, "Only the MustStop item has null everywhere." ); if( receiver == null ) { // This is the MustStop message. _queue.Enqueue( new EventItem( e, null ) ); lock( _dispatchLock ) Monitor.Pulse( _dispatchLock ); // Ensures that if _overloadedErrorWaiting is true, a final "Lost Event" monitoring error is sent. _nextCapacityError = DateTime.MinValue; Interlocked.MemoryBarrier(); } else { // Normal message. Interlocked.MemoryBarrier(); var strat = _strat; if( strat == null ) return false; if( strat.IsOpened( ref _maxQueuedCount ) ) { // Normal message and no queue overload detected. Interlocked.Increment( ref _nonBlockingCount ); _queue.Enqueue( new EventItem( e, receiver ) ); lock( _dispatchLock ) Monitor.Pulse( _dispatchLock ); } else { // Overload has been detected. // Unlock the configuration: the message will not be handled. if( receiver.ConfigLock != null ) receiver.ConfigLock.Unlock(); Interlocked.Increment( ref _eventLostCount ); // A new "Lost Event" monitoring error must be sent once. _overloadedErrorWaiting = true; result = false; } Interlocked.MemoryBarrier(); } // Whatever happens, if a "Lost Event" monitoring error must be send once, // checks to see if we must send it now. Interlocked.MemoryBarrier(); if( _overloadedErrorWaiting ) { var now = receiver != null ? e.Entry.LogTime.TimeUtc : DateTime.MaxValue; if( now > _nextCapacityError ) { // Double check locking. lock( _overloadLock ) { if( _overloadedErrorWaiting && now > _nextCapacityError ) { ActivityMonitor.CriticalErrorCollector.Add( new CKException( "GrandOutput dispatcher overload. Lost {0} total events.", _eventLostCount ), null ); if( receiver != null ) _nextCapacityError = now.Add( _delayBetweenCapacityError ); _overloadedErrorWaiting = false; } } } } return result; }
public bool Add( GrandOutputEventInfo e, FinalReceiver receiver ) { if( receiver == null ) throw new ArgumentNullException(); return DoAdd( e, receiver ); }
public void Handle(IActivityMonitor m, GrandOutputEventInfo logEvent) { }
void IGrandOutputHandler.Handle(IActivityMonitor m, GrandOutputEventInfo logEvent) { _builder.AppendEntry(logEvent.Entry); _config.FromSink(_builder.Builder, false); }
/// <summary> /// Writes a log entry (that can actually be a <see cref="IMulticastLogEntry"/>). /// </summary> /// <param name="logEvent">The log entry.</param> /// <param name="parrallelCall">True if this is a parrallel call.</param> public override void Handle(GrandOutputEventInfo logEvent, bool parrallelCall) { _file.Write(logEvent.Entry); }
/// <summary> /// Handles a <see cref="GrandOutputEventInfo"/>. /// </summary> /// <param name="logEvent">Event to handle.</param> /// <param name="parrallelCall">True when this method is called in parallel with other handlers.</param> public abstract void Handle(GrandOutputEventInfo logEvent, bool parrallelCall);
bool DoAdd(GrandOutputEventInfo e, FinalReceiver receiver) { bool result = true; Debug.Assert(e.Entry != null || receiver == null, "Only the MustStop item has null everywhere."); if (receiver == null) { // This is the MustStop message. _queue.Enqueue(new EventItem(e, null)); lock (_dispatchLock) Monitor.Pulse(_dispatchLock); // Ensures that if _overloadedErrorWaiting is true, a final "Lost Event" monitoring error is sent. _nextCapacityError = DateTime.MinValue; Interlocked.MemoryBarrier(); } else { // Normal message. Interlocked.MemoryBarrier(); var strat = _strat; if (strat == null) { return(false); } if (strat.IsOpened(ref _maxQueuedCount)) { // Normal message and no queue overload detected. Interlocked.Increment(ref _nonBlockingCount); _queue.Enqueue(new EventItem(e, receiver)); lock (_dispatchLock) Monitor.Pulse(_dispatchLock); } else { // Overload has been detected. // Unlock the configuration: the message will not be handled. if (receiver.ConfigLock != null) { receiver.ConfigLock.Unlock(); } Interlocked.Increment(ref _eventLostCount); // A new "Lost Event" monitoring error must be sent once. _overloadedErrorWaiting = true; result = false; } Interlocked.MemoryBarrier(); } // Whatever happens, if a "Lost Event" monitoring error must be send once, // checks to see if we must send it now. Interlocked.MemoryBarrier(); if (_overloadedErrorWaiting) { var now = receiver != null ? e.Entry.LogTime.TimeUtc : DateTime.MaxValue; if (now > _nextCapacityError) { // Double check locking. lock ( _overloadLock ) { if (_overloadedErrorWaiting && now > _nextCapacityError) { ActivityMonitor.CriticalErrorCollector.Add(new CKException("GrandOutput dispatcher overload. Lost {0} total events.", _eventLostCount), null); if (receiver != null) { _nextCapacityError = now.Add(_delayBetweenCapacityError); } _overloadedErrorWaiting = false; } } } } return(result); }
/// <summary> /// Writes a log entry (that can actually be a <see cref="IMulticastLogEntry"/>). /// </summary> /// <param name="logEvent">The log entry.</param> /// <param name="parrallelCall">True if this is a parrallel call.</param> public override void Handle( GrandOutputEventInfo logEvent, bool parrallelCall ) { _file.Write( logEvent.Entry ); }
/// <summary> /// Handles a <see cref="GrandOutputEventInfo"/> by calling each /// child's handle in sequence. /// </summary> /// <param name="logEvent">Event to handle.</param> /// <param name="parrallelCall">True if this is called in parallel.</param> public override void Handle( GrandOutputEventInfo logEvent, bool parrallelCall ) { foreach( var c in _children ) c.Handle( logEvent, parrallelCall ); }
public EventItem(GrandOutputEventInfo e, FinalReceiver receiver) { EventInfo = e; Receiver = receiver; }
public EventItem( GrandOutputEventInfo e, FinalReceiver receiver ) { EventInfo = e; Receiver = receiver; }
/// <summary> /// Handles a <see cref="GrandOutputEventInfo"/> by calling each /// child's handler in parallel. /// </summary> /// <param name="logEvent">Event to handle.</param> /// <param name="parrallelCall">True if this is called in parallel.</param> public override void Handle( GrandOutputEventInfo logEvent, bool parrallelCall ) { Parallel.For( 0, _children.Length, i => _children[i].Handle( logEvent, true ) ); }
/// <summary> /// Handles a <see cref="GrandOutputEventInfo"/> by calling each /// child's handler in parallel. /// </summary> /// <param name="logEvent">Event to handle.</param> /// <param name="parrallelCall">True if this is called in parallel.</param> public override void Handle(GrandOutputEventInfo logEvent, bool parrallelCall) { Parallel.For(0, _children.Length, i => _children[i].Handle(logEvent, true)); }
/// <summary> /// Handles a <see cref="GrandOutputEventInfo"/>. /// </summary> /// <param name="logEvent">Event to handle.</param> /// <param name="parrallelCall">True when this method is called in parallel with other handlers.</param> public abstract void Handle( GrandOutputEventInfo logEvent, bool parrallelCall );