public HttpLogShipper( string serverUrl, string bufferBaseFilename, string apiKey, int batchPostingLimit, TimeSpan period, long?eventBodyLimitBytes, LoggingLevelSwitch levelControlSwitch, HttpMessageHandler messageHandler, long?retainedInvalidPayloadsLimitBytes) { _apiKey = apiKey; _batchPostingLimit = batchPostingLimit; _eventBodyLimitBytes = eventBodyLimitBytes; _controlledSwitch = new ControlledLevelSwitch(levelControlSwitch); _connectionSchedule = new ExponentialBackoffConnectionSchedule(period); _retainedInvalidPayloadsLimitBytes = retainedInvalidPayloadsLimitBytes; _httpClient = messageHandler != null ? new HttpClient(messageHandler) : new HttpClient(); _httpClient.BaseAddress = new Uri(SeqApi.NormalizeServerBaseAddress(serverUrl)); _bookmarkFilename = Path.GetFullPath(bufferBaseFilename + ".bookmark"); _logFolder = Path.GetDirectoryName(_bookmarkFilename); _candidateSearchPath = Path.GetFileName(bufferBaseFilename) + "*.json"; _timer = new PortableTimer(c => OnTick()); SetTimer(); }
protected override async Task EmitBatchAsync(IEnumerable <LogEvent> events) { _nextRequiredLevelCheckUtc = DateTime.UtcNow.Add(RequiredLevelCheckInterval); string payload, payloadContentType; if (_useCompactFormat) { payloadContentType = SeqApi.CompactLogEventFormatMimeType; payload = FormatCompactPayload(events, _eventBodyLimitBytes); } else { payloadContentType = SeqApi.RawEventFormatMimeType; payload = FormatRawPayload(events, _eventBodyLimitBytes); } var content = new StringContent(payload, Encoding.UTF8, payloadContentType); if (!string.IsNullOrWhiteSpace(_apiKey)) { content.Headers.Add(SeqApi.ApiKeyHeaderName, _apiKey); } var result = await _httpClient.PostAsync(SeqApi.BulkUploadResource, content).ConfigureAwait(false); if (!result.IsSuccessStatusCode) { throw new LoggingFailedException($"Received failed result {result.StatusCode} when posting events to Seq"); } var returned = await result.Content.ReadAsStringAsync(); _controlledSwitch.Update(SeqApi.ReadEventInputResult(returned)); }
public SeqAuditSink( string serverUrl, string apiKey, HttpMessageHandler messageHandler, bool useCompactFormat) { if (serverUrl == null) { throw new ArgumentNullException(nameof(serverUrl)); } _apiKey = apiKey; _useCompactFormat = useCompactFormat; _httpClient = messageHandler != null ? new HttpClient(messageHandler) : new HttpClient(); _httpClient.BaseAddress = new Uri(SeqApi.NormalizeServerBaseAddress(serverUrl)); }
public SeqSink( string serverUrl, string apiKey, int batchPostingLimit, TimeSpan period, long?eventBodyLimitBytes, LoggingLevelSwitch levelControlSwitch, HttpMessageHandler messageHandler) : base(batchPostingLimit, period) { if (serverUrl == null) { throw new ArgumentNullException(nameof(serverUrl)); } _apiKey = apiKey; _eventBodyLimitBytes = eventBodyLimitBytes; _controlledSwitch = new ControlledLevelSwitch(levelControlSwitch); _httpClient = messageHandler != null ? new HttpClient(messageHandler) : new HttpClient(); _httpClient.BaseAddress = new Uri(SeqApi.NormalizeServerBaseAddress(serverUrl)); }
protected override void EmitBatch(IEnumerable <LogEvent> events) { var payload = new StringWriter(); payload.Write("{\"events\":["); var formatter = new JsonFormatter(); var delimStart = ""; foreach (var logEvent in events) { payload.Write(delimStart); formatter.Format(logEvent, payload); delimStart = ","; } payload.Write("]}"); var content = new StringContent(payload.ToString(), Encoding.UTF8, "application/json"); if (!string.IsNullOrWhiteSpace(_apiKey)) { content.Headers.Add(ApiKeyHeaderName, _apiKey); } var result = _httpClient.PostAsync(BulkUploadResource, content).Result; if (!result.IsSuccessStatusCode) { SelfLog.WriteLine("Received failed result {0}: {1}", result.StatusCode, result.Content.ReadAsStringAsync().Result); } var returned = result.Content.ReadAsStringAsync().Result; _minimumAcceptedLevel = SeqApi.ReadEventInputResult(returned); }
protected override async Task EmitBatchAsync(IEnumerable <LogEvent> events) { var payload = new StringWriter(); payload.Write("{\"events\":["); var formatter = new JsonFormatter(closingDelimiter: ""); var delimStart = ""; foreach (var logEvent in events) { payload.Write(delimStart); formatter.Format(logEvent, payload); delimStart = ","; } payload.Write("]}"); var content = new StringContent(payload.ToString(), Encoding.UTF8, "application/json"); if (!string.IsNullOrWhiteSpace(_apiKey)) { content.Headers.Add(ApiKeyHeaderName, _apiKey); } var result = await _httpClient.PostAsync(BulkUploadResource, content); if (!result.IsSuccessStatusCode) { throw new LoggingFailedException(string.Format("Received failed result {0} when posting events to Seq", result.StatusCode)); } var returned = await result.Content.ReadAsStringAsync(); _minimumAcceptedLevel = SeqApi.ReadEventInputResult(returned); }
void OnTick() { LogEventLevel?minimumAcceptedLevel = null; try { int count; do { count = 0; // Locking the bookmark ensures that though there may be multiple instances of this // class running, only one will ship logs at a time. using (var bookmark = IOFile.Open(_bookmarkFilename, FileMode.OpenOrCreate, FileAccess.ReadWrite, FileShare.Read)) { long nextLineBeginsAtOffset; string currentFile; TryReadBookmark(bookmark, out nextLineBeginsAtOffset, out currentFile); var fileSet = GetFileSet(); if (currentFile == null || !IOFile.Exists(currentFile)) { nextLineBeginsAtOffset = 0; currentFile = fileSet.FirstOrDefault(); } if (currentFile == null) { continue; } var payload = ReadPayload(currentFile, ref nextLineBeginsAtOffset, ref count); if (count > 0 || _levelControlSwitch != null && _nextRequiredLevelCheckUtc < DateTime.UtcNow) { lock (_stateLock) { _nextRequiredLevelCheckUtc = DateTime.UtcNow.Add(RequiredLevelCheckInterval); } var content = new StringContent(payload, Encoding.UTF8, "application/json"); if (!string.IsNullOrWhiteSpace(_apiKey)) { content.Headers.Add(ApiKeyHeaderName, _apiKey); } var result = _httpClient.PostAsync(BulkUploadResource, content).Result; if (result.IsSuccessStatusCode) { _connectionSchedule.MarkSuccess(); WriteBookmark(bookmark, nextLineBeginsAtOffset, currentFile); var returned = result.Content.ReadAsStringAsync().Result; minimumAcceptedLevel = SeqApi.ReadEventInputResult(returned); } else if (result.StatusCode == HttpStatusCode.BadRequest || result.StatusCode == HttpStatusCode.RequestEntityTooLarge) { // The connection attempt was successful - the payload we sent was the problem. _connectionSchedule.MarkSuccess(); DumpInvalidPayload(result, payload).Wait(); WriteBookmark(bookmark, nextLineBeginsAtOffset, currentFile); } else { _connectionSchedule.MarkFailure(); SelfLog.WriteLine("Received failed HTTP shipping result {0}: {1}", result.StatusCode, result.Content.ReadAsStringAsync().Result); break; } } else { // For whatever reason, there's nothing waiting to send. This means we should try connecting again at the // regular interval, so mark the attempt as successful. _connectionSchedule.MarkSuccess(); // Only advance the bookmark if no other process has the // current file locked, and its length is as we found it. if (fileSet.Length == 2 && fileSet.First() == currentFile && IsUnlockedAtLength(currentFile, nextLineBeginsAtOffset)) { WriteBookmark(bookmark, 0, fileSet[1]); } if (fileSet.Length > 2) { // Once there's a third file waiting to ship, we do our // best to move on, though a lock on the current file // will delay this. IOFile.Delete(fileSet[0]); } } } }while (count == _batchPostingLimit); } catch (Exception ex) { SelfLog.WriteLine("Exception while emitting periodic batch from {0}: {1}", this, ex); _connectionSchedule.MarkFailure(); } finally { lock (_stateLock) { UpdateLevelControlSwitch(minimumAcceptedLevel); if (!_unloading) { SetTimer(); } } } }
void OnTick() { LogEventLevel?minimumAcceptedLevel = null; try { var count = 0; do { // Locking the bookmark ensures that though there may be multiple instances of this // class running, only one will ship logs at a time. using (var bookmark = File.Open(_bookmarkFilename, FileMode.OpenOrCreate, FileAccess.ReadWrite, FileShare.Read)) { long nextLineBeginsAtOffset; string currentFile; TryReadBookmark(bookmark, out nextLineBeginsAtOffset, out currentFile); var fileSet = GetFileSet(); if (currentFile == null || !File.Exists(currentFile)) { nextLineBeginsAtOffset = 0; currentFile = fileSet.FirstOrDefault(); } if (currentFile != null) { var payload = new StringWriter(); payload.Write("{\"events\":["); count = 0; var delimStart = ""; using (var current = File.Open(currentFile, FileMode.Open, FileAccess.Read, FileShare.ReadWrite)) { current.Position = nextLineBeginsAtOffset; string nextLine; while (count < _batchPostingLimit && TryReadLine(current, ref nextLineBeginsAtOffset, out nextLine)) { ++count; payload.Write(delimStart); payload.Write(nextLine); delimStart = ","; } payload.Write("]}"); } if (count > 0) { var content = new StringContent(payload.ToString(), Encoding.UTF8, "application/json"); if (!string.IsNullOrWhiteSpace(_apiKey)) { content.Headers.Add(ApiKeyHeaderName, _apiKey); } var result = _httpClient.PostAsync(BulkUploadResource, content).Result; if (result.IsSuccessStatusCode) { WriteBookmark(bookmark, nextLineBeginsAtOffset, currentFile); var returned = result.Content.ReadAsStringAsync().Result; minimumAcceptedLevel = SeqApi.ReadEventInputResult(returned); } else { SelfLog.WriteLine("Received failed HTTP shipping result {0}: {1}", result.StatusCode, result.Content.ReadAsStringAsync().Result); } } else { // Only advance the bookmark if no other process has the // current file locked, and its length is as we found it. if (fileSet.Length == 2 && fileSet.First() == currentFile && IsUnlockedAtLength(currentFile, nextLineBeginsAtOffset)) { WriteBookmark(bookmark, 0, fileSet[1]); } if (fileSet.Length > 2) { // Once there's a third file waiting to ship, we do our // best to move on, though a lock on the current file // will delay this. File.Delete(fileSet[0]); } } } } }while (count == _batchPostingLimit); } catch (Exception ex) { SelfLog.WriteLine("Exception while emitting periodic batch from {0}: {1}", this, ex); } finally { lock (_stateLock) { _minimumAcceptedLevel = minimumAcceptedLevel; if (!_unloading) { SetTimer(); } } } }
protected override async Task EmitBatchAsync(IEnumerable <LogEvent> events) { _nextRequiredLevelCheckUtc = DateTime.UtcNow.Add(RequiredLevelCheckInterval); var payload = new StringWriter(); payload.Write("{\"Events\":["); var delimStart = ""; foreach (var logEvent in events) { if (_eventBodyLimitBytes.HasValue) { var scratch = new StringWriter(); RawJsonFormatter.FormatContent(logEvent, scratch); var buffered = scratch.ToString(); if (Encoding.UTF8.GetByteCount(buffered) > _eventBodyLimitBytes.Value) { SelfLog.WriteLine("Event JSON representation exceeds the byte size limit of {0} set for this sink and will be dropped; data: {1}", _eventBodyLimitBytes, buffered); } else { payload.Write(delimStart); payload.Write(buffered); delimStart = ","; } } else { payload.Write(delimStart); RawJsonFormatter.FormatContent(logEvent, payload); delimStart = ","; } } payload.Write("]}"); var content = new StringContent(payload.ToString(), Encoding.UTF8, "application/json"); if (!string.IsNullOrWhiteSpace(_apiKey)) { content.Headers.Add(ApiKeyHeaderName, _apiKey); } var result = await _httpClient.PostAsync(BulkUploadResource, content).ConfigureAwait(false); if (!result.IsSuccessStatusCode) { throw new LoggingFailedException($"Received failed result {result.StatusCode} when posting events to Seq"); } var returned = await result.Content.ReadAsStringAsync(); var minimumAcceptedLevel = SeqApi.ReadEventInputResult(returned); if (minimumAcceptedLevel == null) { if (_levelControlSwitch != null) { _levelControlSwitch.MinimumLevel = LevelAlias.Minimum; } } else { if (_levelControlSwitch == null) { _levelControlSwitch = new LoggingLevelSwitch(minimumAcceptedLevel.Value); } else { _levelControlSwitch.MinimumLevel = minimumAcceptedLevel.Value; } } }