private void CommonHandle(Message message) { if (BlockWriter && !(message is SystemMessage.StateChangeMessage)) { Log.Verbose("Blocking message {message} in StorageWriterService. Message:", message.GetType().Name); Log.Verbose("{message}", message); return; } if (_vnodeState != VNodeState.Leader && _vnodeState != VNodeState.ResigningLeader && message is StorageMessage.ILeaderWriteMessage) { Log.Fatal("{message} appeared in StorageWriter during state {vnodeStrate}.", message.GetType().Name, _vnodeState); var msg = String.Format("{0} appeared in StorageWriter during state {1}.", message.GetType().Name, _vnodeState); Application.Exit(ExitCode.Error, msg); return; } try { _writerBus.Handle(message); } catch (Exception exc) { BlockWriter = true; Log.Fatal(exc, "Unexpected error in StorageWriterService. Terminating the process..."); Application.Exit(ExitCode.Error, string.Format("Unexpected error in StorageWriterService: {0}", exc.Message)); } }
private Task Log(LogMessage logMessage) { switch (logMessage.Severity) { case LogSeverity.Critical: _logger.Fatal(logMessage.ToString()); break; case LogSeverity.Error: _logger.Error(logMessage.ToString()); break; case LogSeverity.Warning: _logger.Warning(logMessage.ToString()); break; case LogSeverity.Info: _logger.Information(logMessage.ToString()); break; case LogSeverity.Verbose: case LogSeverity.Debug: _logger.Debug(logMessage.ToString()); break; default: _logger.Fatal(logMessage.ToString()); break; } return(Task.CompletedTask); }
void OnConnectionErrorOccurred(object sender, ClientErrorEventArgs args) { Log.Error(args.Exception, "EventStore connection with id = {ConnectionId} error occured", args.Connection.ConnectionName); _onError?.Invoke(args.Exception); var exception = (args.Exception as AggregateException)?.GetBaseException() ?? args.Exception; switch (exception) { case RetriesLimitReachedException retriesLimitReached: Log.Fatal( "EventStore connection's limit of reconnection or operation retries reached. " + "Stopping service...", retriesLimitReached); _onFatalFailure(); break; case ClusterException clusterException: Log.Fatal( "EventStore connection could not establish link with EventStore cluster. " + "Maximum number of cluster connection attempts reached. " + "Stopping service...", clusterException); _onFatalFailure(); break; default: Log.Warning(exception, ""); break; } }
public IActionResult Get() { _logger.Error("Data Critical Added Successfully"); _logger.Fatal("Data Error Added Successfully"); _logger.Error("Data saved as information {@DateTime}", DateTime.Now); return(Ok("Success")); }
/// <summary> /// Logs the specified log event. /// </summary> /// <param name="logEvent">The <see cref="T:Swan.LogMessageReceivedEventArgs" /> instance containing the event data.</param> public void Log(LogMessageReceivedEventArgs logEvent) { switch (logEvent.MessageType) { case LogLevel.None: break; case LogLevel.Info: _logger.Information("{@logEvent}", logEvent); break; case LogLevel.Trace: _logger.Verbose("{@logEvent}", logEvent); break; case LogLevel.Debug: _logger.Debug("{@logEvent}", logEvent); break; case LogLevel.Warning: _logger.Warning("{@logEvent}", logEvent); break; case LogLevel.Error: _logger.Error("{@logEvent}", logEvent); break; case LogLevel.Fatal: _logger.Fatal("{@logEvent}", logEvent); break; } }
private void TrackReplication() { try { while (!_stop) { _replicationChange.Reset(); if (_state == VNodeState.Leader) { //Publish Log Commit Position var newPos = _replicationCheckpoint.Read(); if (newPos > Interlocked.Read(ref _publishedPosition)) { _publisher.Publish(new ReplicationTrackingMessage.ReplicatedTo(newPos)); Interlocked.Exchange(ref _publishedPosition, newPos); } } _replicationChange.Wait(100); } } catch (Exception exc) { _log.Fatal(exc, $"Error in {nameof(ReplicationTrackingService)}. Terminating..."); _tcs.TrySetException(exc); Application.Exit(ExitCode.Error, $"Error in {nameof(ReplicationTrackingService)}. Terminating...\nError: " + exc.Message); //TODO(clc): is this right, are we waiting for someone to clean us up??? while (!_stop) { Thread.Sleep(100); } } _publisher.Publish(new SystemMessage.ServiceShutdown(nameof(ReplicationTrackingService))); }
public void Log(LogEntry entry) { switch (entry.Severity) { case LoggingEventType.Verbose: _logger.Verbose(entry.Exception, entry.Message, entry.Args); break; case LoggingEventType.Debug: _logger.Debug(entry.Exception, entry.Message, entry.Args); break; case LoggingEventType.Information: _logger.Information(entry.Exception, entry.Message, entry.Args); break; case LoggingEventType.Warning: _logger.Warning(entry.Exception, entry.Message, entry.Args); break; case LoggingEventType.Error: _logger.Error(entry.Exception, entry.Message, entry.Args); break; case LoggingEventType.Fatal: _logger.Fatal(entry.Exception, entry.Message, entry.Args); break; default: throw new ArgumentException($"Severity {entry.Severity} is not supported by SerilogAdapter", "Severity"); } }
public bool WriteCore(TraceEventType eventType, int eventId, object state, Exception exception, Func <object, Exception, string> formatter) { var log = new OwinContextLog(eventId, formatter(state, exception)); switch (eventType) { case TraceEventType.Critical: _logger.Fatal(exception, SerilogMessage, log); return(true); case TraceEventType.Error: _logger.Error(exception, SerilogMessage, log); return(true); case TraceEventType.Information: _logger.Information(exception, SerilogMessage, log); return(true); case TraceEventType.Warning: _logger.Warning(exception, SerilogMessage, log); return(true); case TraceEventType.Verbose: _logger.Verbose(exception, SerilogMessage, log); return(true); default: return(false); } }
public void Log(object sender, LogEventArgs e) { switch (e.Severity) { case LogSeverity.Debug: var debugMsg = CreatetMessage(e.Message); _logger.Debug(debugMsg); break; case LogSeverity.Info: var infoMsg = CreatetMessage(e.Message); _logger.Information(infoMsg); break; case LogSeverity.Warning: var warningMsg = CreatetMessage(e.Message); _logger.Warning(warningMsg); break; case LogSeverity.Error: var errorMsg = CreatetMessage(e.Message, e.Exception); _logger.Error(errorMsg); break; case LogSeverity.Fatal: var fatalMsg = CreatetMessage(e.Message); _logger.Fatal(fatalMsg); break; } }
private void ReplicationFail(string message, string messageStructured, params object[] args) { if (args.Length == 0) { Log.Fatal(messageStructured); } else { Log.Fatal(messageStructured, args); } var msg = args.Length == 0 ? message : string.Format(message, args); BlockWriter = true; Application.Exit(ExitCode.Error, msg); throw new Exception(msg); }
public IdeService([Import(typeof(SVsServiceProvider))] IServiceProvider serviceProvider) { try { _serviceProvider = serviceProvider; _componentModel = serviceProvider?.GetService(typeof(SComponentModel)) as IComponentModel; _extensions = ExtensionManager.Initialize(LogManager.ForContext <ExtensionManagerDummy>()).Value; } catch (Exception ex) { Log.Fatal(ex, nameof(IdeService)); } }
public IActionResult Serilog() { _serilogLogger.Verbose("Serilog Verbose"); _serilogLogger.Debug("Serilog Debug"); _serilogLogger.Information("Serilog Information"); _serilogLogger.Warning("Serilog Warning"); _serilogLogger.Error("Serilog Error"); _serilogLogger.Fatal("Serilog Fatal"); return(Ok()); }
public async Task InvokeAsync(HttpContext httpContext) { try { await _next(httpContext); } catch (Exception ex) { _logger.Fatal(ex, "Fatal exception occured"); await HandleExceptionAsync(httpContext, ex); } }
public void HandleReplicatedQueue() { try { _queueStats.Start(); QueueMonitor.Default.Register(this); StorageMessage.CommitAck replicatedMessage; var msgType = typeof(StorageMessage.CommitAck); while (!_stop) { _addMsgSignal.Reset(); if (_replicatedQueue.TryDequeue(out replicatedMessage)) { _queueStats.EnterBusy(); #if DEBUG _queueStats.Dequeued(replicatedMessage); #endif _queueStats.ProcessingStarted(msgType, _replicatedQueue.Count); ProcessCommitReplicated(replicatedMessage); _queueStats.ProcessingEnded(1); } else { _queueStats.EnterIdle(); _addMsgSignal.Wait(_waitTimeoutMs); } } } catch (Exception exc) { _queueStats.EnterIdle(); _queueStats.ProcessingStarted <FaultedIndexCommitterServiceState>(0); Log.Fatal(exc, "Error in IndexCommitterService. Terminating..."); _tcs.TrySetException(exc); Application.Exit(ExitCode.Error, "Error in IndexCommitterService. Terminating...\nError: " + exc.Message); while (!_stop) { Thread.Sleep(100); } _queueStats.ProcessingEnded(0); } finally { _queueStats.Stop(); QueueMonitor.Default.Unregister(this); } _publisher.Publish(new SystemMessage.ServiceShutdown(Name)); }
public static IHost MigrateDatabase(this IHost host, Serilog.ILogger log) { try { using var scope = host.Services.CreateScope(); log.Information("Migrating identity server database"); var idsrvContext = scope.ServiceProvider.GetService <ApplicationDbContext>(); idsrvContext.Database.Migrate(); log.Information("Migrations applied"); } catch (Exception ex) { log.Fatal("Database migration error", ex); } return(host); }
public CustomerDto Post(CustomerDto customerDto) { logger.LogWarning("Mensaje Warning desde Post Customer"); logger.LogError("Mensaje Error desde Post Customer"); logger.LogCritical("Mensaje Critical desde Post Customer"); seriLogger.Warning("Mensaje Warning desde Post Customer"); seriLogger.Error("Mensaje Error desde Post Customer"); seriLogger.Fatal("Mensaje Critical desde Post Customer"); var customer = mapper.Map <Customer>(customerDto); customerRepository.Insert(customer); unitOfWork.SaveChanges(); return(mapper.Map <CustomerDto>(customer)); }
public void Log(string message, LogLevel logLevel) { switch (logLevel) { case LogLevel.Information: _serilog.Information(message); break; case LogLevel.Error: _serilog.Error(message); break; case LogLevel.Fatal: _serilog.Fatal(message); break; default: break; } }
public async Task PizzaPlannerLoopTick() { try { await ClosePizzaPlanAfterFinished(); await LockInPizzaPlansOrCancelOnesThatPassDeadline(); await NominatePersonToMakeReservation(); await NominatePersonToHandleExpenses(); await RemindParticipantsOfEvent(); await ScheduleNewEventsIfThereIsNoPlannedEventTwoWeeksFromNow(); await AnnouncePizzaPlanInPizzaRoom(); await HandlePlansWithMissingInvitations(); } catch (Exception e) { _activityLog.Log($"ERROR: {e.Message}"); _logger.Fatal(e, "Exception running 'PizzaPlannerLoopTick'"); Environment.Exit(-1); } }
public static IHost MigrateDatabase(this IHost host, Serilog.ILogger log) { try { using var scope = host.Services.CreateScope(); log.Information("Migrating data database"); var dataContext = scope.ServiceProvider.GetService <TripPlannerContext>(); dataContext.Database.Migrate(); log.Information("Migarting diagnostics database"); var diagnosticsContext = scope.ServiceProvider.GetService <DiagnosticsContext>(); diagnosticsContext.Database.Migrate(); log.Information("All migartions applied"); } catch (Exception ex) { log.Fatal("Database migration error", ex); } return(host); }
public void Log(LogEntry entry) { switch (entry.Severity) { case LoggingEventType.Information when IsEnabledFor(LoggingEventType.Information): _logger.Information(entry.Exception, entry.Message); break; case LoggingEventType.Debug when IsEnabledFor(LoggingEventType.Debug): _logger.Debug(entry.Exception, entry.Message); break; case LoggingEventType.Warning when IsEnabledFor(LoggingEventType.Warning): _logger.Warning(entry.Exception, entry.Message); break; case LoggingEventType.Error when IsEnabledFor(LoggingEventType.Error): _logger.Error(entry.Exception, entry.Message); break; case LoggingEventType.Fatal when IsEnabledFor(LoggingEventType.Fatal): _logger.Fatal(entry.Exception, entry.Message); break; case LoggingEventType.Trace when IsEnabledFor(LoggingEventType.Trace): _logger.Verbose(entry.Exception, entry.Message); break; default: throw new ArgumentOutOfRangeException(); } }
public void Log(LogEntry entry) { if (entry.Severity == LoggingEventType.Debug) { logger.Debug(entry.Exception, entry.Message); } if (entry.Severity == LoggingEventType.Information) { m_Adaptee.Information(entry.Exception, entry.Message); } else if (entry.Severity == LoggingEventType.Warning) { m_Adaptee.Warning(entry.Message, entry.Exception); } else if (entry.Severity == LoggingEventType.Error) { m_Adaptee.Error(entry.Message, entry.Exception); } else { m_Adaptee.Fatal(entry.Message, entry.Exception); } }
private static void SerilogTest(long process) { var maxDegreeOfParallelism = Environment.ProcessorCount * 10; Parallel.For(0, process, new ParallelOptions { MaxDegreeOfParallelism = maxDegreeOfParallelism }, i => { _file01Log.Debug("ThreadId:" + Thread.CurrentThread.ManagedThreadId + ",写入日志01:Debug!"); _file01Log.Information("ThreadId:" + Thread.CurrentThread.ManagedThreadId + ",写入日志01:Information!"); _file01Log.Warning("ThreadId:" + Thread.CurrentThread.ManagedThreadId + ",写入日志01:Warning!"); _file01Log.Error("ThreadId:" + Thread.CurrentThread.ManagedThreadId + ",写入日志01:Error!"); _file01Log.Fatal("ThreadId:" + Thread.CurrentThread.ManagedThreadId + ",写入日志01:Fatal!"); _file01Log.Verbose("ThreadId:" + Thread.CurrentThread.ManagedThreadId + ",写入日志01:Verbose!"); _file02Log.Debug("ThreadId:" + Thread.CurrentThread.ManagedThreadId + ",写入日志02:Debug!"); _file02Log.Information("ThreadId:" + Thread.CurrentThread.ManagedThreadId + ",写入日志02:Information!"); _file02Log.Warning("ThreadId:" + Thread.CurrentThread.ManagedThreadId + ",写入日志02:Warning!"); _file02Log.Error("ThreadId:" + Thread.CurrentThread.ManagedThreadId + ",写入日志02:Error!"); _file02Log.Fatal("ThreadId:" + Thread.CurrentThread.ManagedThreadId + ",写入日志02:Fatal!"); _file02Log.Verbose("ThreadId:" + Thread.CurrentThread.ManagedThreadId + ",写入日志02:Verbose!"); _db01Log.Debug("ThreadId:" + Thread.CurrentThread.ManagedThreadId + ",写入数据库01:Debug!"); _db01Log.Information("ThreadId:" + Thread.CurrentThread.ManagedThreadId + ",写入数据库01:Information!"); _db01Log.Warning("ThreadId:" + Thread.CurrentThread.ManagedThreadId + ",写入数据库01:Warning!"); _db01Log.Error("ThreadId:" + Thread.CurrentThread.ManagedThreadId + ",写入数据库01:Error!"); _db01Log.Fatal("ThreadId:" + Thread.CurrentThread.ManagedThreadId + ",写入数据库01:Fatal!"); _db01Log.Verbose("ThreadId:" + Thread.CurrentThread.ManagedThreadId + ",写入数据库01:Verbose!"); _db02Log.Debug("ThreadId:" + Thread.CurrentThread.ManagedThreadId + ",写入数据库02:Debug!"); _db02Log.Information("ThreadId:" + Thread.CurrentThread.ManagedThreadId + ",写入数据库02:Information!"); _db02Log.Warning("ThreadId:" + Thread.CurrentThread.ManagedThreadId + ",写入数据库02:Warning!"); _db02Log.Error("ThreadId:" + Thread.CurrentThread.ManagedThreadId + ",写入数据库02:Error!"); _db02Log.Fatal("ThreadId:" + Thread.CurrentThread.ManagedThreadId + ",写入数据库02:Fatal!"); _db02Log.Verbose("ThreadId:" + Thread.CurrentThread.ManagedThreadId + ",写入数据库02:Verbose!"); }); }
Action <EventStoreCatchUpSubscription, SubscriptionDropReason, Exception> SubscriptionDropped(Projection projection) => (subscription, reason, ex) => { // TODO: Reevaluate stopping subscriptions when issues with reconnect get fixed. // https://github.com/EventStore/EventStore/issues/1127 // https://groups.google.com/d/msg/event-store/AdKzv8TxabM/VR7UDIRxCgAJ subscription.Stop(); switch (reason) { case SubscriptionDropReason.UserInitiated: Log.Debug("{projection} projection stopped gracefully.", projection); break; case SubscriptionDropReason.SubscribingError: case SubscriptionDropReason.ServerError: case SubscriptionDropReason.ConnectionClosed: case SubscriptionDropReason.CatchUpError: case SubscriptionDropReason.ProcessingQueueOverflow: case SubscriptionDropReason.EventHandlerException: Log.Error( "{projection} projection stopped because of a transient error ({reason}). " + "Attempting to restart...", ex, projection, reason); Thread.Sleep(TimeSpan.FromSeconds(1)); Task.Run(() => StartProjection(projection)); break; default: Log.Fatal( "{projection} projection stopped because of an internal error ({reason}). " + "Please check your logs for details.", ex, projection, reason); break; } };
public void Fatal(Exception e, string format, params object[] args) { _seriLogger.Fatal(e, format, args); }
public void Fatal(string messageTemplate) { _logger.Fatal(messageTemplate); }
public void LogFatal(Exception ex, string message) { logger.Fatal(ex, message); }
public void Open(bool verifyHash = true, bool readOnly = false, int threads = 1) { Ensure.Positive(threads, "threads"); ValidateReaderChecksumsMustBeLess(Config); var checkpoint = Config.WriterCheckpoint.Read(); if (Config.InMemDb) { Manager.AddNewChunk(); return; } var lastChunkNum = (int)(checkpoint / Config.ChunkSize); var lastChunkVersions = Config.FileNamingStrategy.GetAllVersionsFor(lastChunkNum); try { Parallel.ForEach(GetAllLatestChunkVersions(checkpoint), new ParallelOptions { MaxDegreeOfParallelism = threads }, chunkInfo => { TFChunk.TFChunk chunk; if (lastChunkVersions.Length == 0 && (chunkInfo.ChunkStartNumber + 1) * (long)Config.ChunkSize == checkpoint) { // The situation where the logical data size is exactly divisible by ChunkSize, // so it might happen that we have checkpoint indicating one more chunk should exist, // but the actual last chunk is (lastChunkNum-1) one and it could be not completed yet -- perfectly valid situation. var footer = ReadChunkFooter(chunkInfo.ChunkFileName); if (footer.IsCompleted) { chunk = TFChunk.TFChunk.FromCompletedFile(chunkInfo.ChunkFileName, verifyHash: false, unbufferedRead: Config.Unbuffered, initialReaderCount: Config.InitialReaderCount, maxReaderCount: Config.MaxReaderCount, optimizeReadSideCache: Config.OptimizeReadSideCache, reduceFileCachePressure: Config.ReduceFileCachePressure); } else { chunk = TFChunk.TFChunk.FromOngoingFile(chunkInfo.ChunkFileName, Config.ChunkSize, checkSize: false, unbuffered: Config.Unbuffered, writethrough: Config.WriteThrough, initialReaderCount: Config.InitialReaderCount, maxReaderCount: Config.MaxReaderCount, reduceFileCachePressure: Config.ReduceFileCachePressure); // chunk is full with data, we should complete it right here if (!readOnly) { chunk.Complete(); } } } else { chunk = TFChunk.TFChunk.FromCompletedFile(chunkInfo.ChunkFileName, verifyHash: false, unbufferedRead: Config.Unbuffered, initialReaderCount: Config.InitialReaderCount, maxReaderCount: Config.MaxReaderCount, optimizeReadSideCache: Config.OptimizeReadSideCache, reduceFileCachePressure: Config.ReduceFileCachePressure); } // This call is theadsafe. Manager.AddChunk(chunk); }); } catch (AggregateException aggEx) { // We only really care that *something* is wrong - throw the first inner exception. throw aggEx.InnerException; } if (lastChunkVersions.Length == 0) { var onBoundary = checkpoint == (Config.ChunkSize * (long)lastChunkNum); if (!onBoundary) { throw new CorruptDatabaseException( new ChunkNotFoundException(Config.FileNamingStrategy.GetFilenameFor(lastChunkNum, 0))); } if (!readOnly) { Manager.AddNewChunk(); } } else { var chunkFileName = lastChunkVersions[0]; var chunkHeader = ReadChunkHeader(chunkFileName); var chunkLocalPos = chunkHeader.GetLocalLogPosition(checkpoint); if (chunkHeader.IsScavenged) { var lastChunk = TFChunk.TFChunk.FromCompletedFile(chunkFileName, verifyHash: false, unbufferedRead: Config.Unbuffered, initialReaderCount: Config.InitialReaderCount, maxReaderCount: Config.MaxReaderCount, optimizeReadSideCache: Config.OptimizeReadSideCache, reduceFileCachePressure: Config.ReduceFileCachePressure); if (lastChunk.ChunkFooter.LogicalDataSize != chunkLocalPos) { lastChunk.Dispose(); throw new CorruptDatabaseException(new BadChunkInDatabaseException( string.Format("Chunk {0} is corrupted. Expected local chunk position: {1}, " + "but Chunk.LogicalDataSize is {2} (Chunk.PhysicalDataSize is {3}). Writer checkpoint: {4}.", chunkFileName, chunkLocalPos, lastChunk.LogicalDataSize, lastChunk.PhysicalDataSize, checkpoint))); } Manager.AddChunk(lastChunk); if (!readOnly) { _log.Information( "Moving WriterCheckpoint from {checkpoint} to {chunkEndPosition}, as it points to the scavenged chunk. " + "If that was not caused by replication of scavenged chunks, that could be a bug.", checkpoint, lastChunk.ChunkHeader.ChunkEndPosition); Config.WriterCheckpoint.Write(lastChunk.ChunkHeader.ChunkEndPosition); Config.WriterCheckpoint.Flush(); Manager.AddNewChunk(); } } else { var lastChunk = TFChunk.TFChunk.FromOngoingFile(chunkFileName, (int)chunkLocalPos, checkSize: false, unbuffered: Config.Unbuffered, writethrough: Config.WriteThrough, initialReaderCount: Config.InitialReaderCount, maxReaderCount: Config.MaxReaderCount, reduceFileCachePressure: Config.ReduceFileCachePressure); Manager.AddChunk(lastChunk); } } EnsureNoExcessiveChunks(lastChunkNum); if (!readOnly) { RemoveOldChunksVersions(lastChunkNum); CleanUpTempFiles(); } if (verifyHash && lastChunkNum > 0) { var preLastChunk = Manager.GetChunk(lastChunkNum - 1); var lastBgChunkNum = preLastChunk.ChunkHeader.ChunkStartNumber; ThreadPool.QueueUserWorkItem(_ => { for (int chunkNum = lastBgChunkNum; chunkNum >= 0;) { var chunk = Manager.GetChunk(chunkNum); try { chunk.VerifyFileHash(); } catch (FileBeingDeletedException exc) { _log.Debug( "{exceptionType} exception was thrown while doing background validation of chunk {chunk}.", exc.GetType().Name, chunk); _log.Debug( "That's probably OK, especially if truncation was request at the same time: {e}.", exc.Message); } catch (Exception exc) { _log.Fatal(exc, "Verification of chunk {chunk} failed, terminating server...", chunk); var msg = string.Format("Verification of chunk {0} failed, terminating server...", chunk); Application.Exit(ExitCode.Error, msg); return; } chunkNum = chunk.ChunkHeader.ChunkStartNumber - 1; } }); } Manager.EnableCaching(); }
public void Critical(string message) { _logger.Fatal(message); }
public void Fatal(string message, System.Exception ex, params object[] messageParams) { Logger.Fatal(message, messageParams, ex); }
public void Log(LogEntry entry) { //see if we can log anything, else exit early if (!_logger.IsEnabled(LogEventLevel.Fatal)) { return; } if (entry.Exception == null) { if (entry.Severity == LoggingEventType.Trace && _logger.IsEnabled(LogEventLevel.Verbose)) { _logger.Verbose(entry.Message); } else if (entry.Severity == LoggingEventType.Debug && _logger.IsEnabled(LogEventLevel.Debug)) { _logger.Debug(entry.Message); } else if (entry.Severity == LoggingEventType.Information && _logger.IsEnabled(LogEventLevel.Information)) { _logger.Information(entry.Message); } else if (entry.Severity == LoggingEventType.Warning && _logger.IsEnabled(LogEventLevel.Warning)) { _logger.Warning(entry.Message); } else if (entry.Severity == LoggingEventType.Error && _logger.IsEnabled(LogEventLevel.Error)) { _logger.Error(entry.Message); } else if (entry.Severity == LoggingEventType.Fatal && _logger.IsEnabled(LogEventLevel.Fatal)) { _logger.Fatal(entry.Message); } } else { if (entry.Severity == LoggingEventType.Trace && _logger.IsEnabled(LogEventLevel.Verbose)) { _logger.Verbose(entry.Exception, entry.Message); } else if (entry.Severity == LoggingEventType.Debug && _logger.IsEnabled(LogEventLevel.Debug)) { _logger.Debug(entry.Exception, entry.Message); } else if (entry.Severity == LoggingEventType.Information && _logger.IsEnabled(LogEventLevel.Information)) { _logger.Information(entry.Exception, entry.Message); } else if (entry.Severity == LoggingEventType.Warning && _logger.IsEnabled(LogEventLevel.Warning)) { _logger.Warning(entry.Message, entry.Exception); } else if (entry.Severity == LoggingEventType.Error && _logger.IsEnabled(LogEventLevel.Error)) { _logger.Error(entry.Message, entry.Exception); } else if (entry.Severity == LoggingEventType.Fatal && _logger.IsEnabled(LogEventLevel.Fatal)) { _logger.Fatal(entry.Message, entry.Exception); } } }