public void PeekingDoesNotChangeState() { using (var temp = TempFolder.ForCaller()) using (var buffer = new LogBuffer(temp.AllocateFilename("mdb"), DefaultBufferSize)) { buffer.Enqueue(new[] { Some.Bytes(140) }); var contents = buffer.Peek((int)DefaultBufferSize); Assert.Equal(1, contents.Length); var remainder = buffer.Peek((int)DefaultBufferSize); Assert.Equal(1, remainder.Length); } }
// The odd three-stage initialization improves our chances of correctly tearing down the `LightningEnvironment`s within // `LogBuffer`s in the event of a failure during start-up. See: https://github.com/CoreyKaylor/Lightning.NET/blob/master/src/LightningDB/LightningEnvironment.cs#L252 public void Load() { // At startup, we look for buffers and either delete them if they're empty, or load them // up if they're not. This garbage collection at start-up is a simplification, // we might try cleaning up in the background if the gains are worthwhile, although more synchronization // would be required. lock (_sync) { Directory.CreateDirectory(_bufferPath); var defaultDataFilePath = Path.Combine(_bufferPath, DataFileName); if (File.Exists(defaultDataFilePath)) { _log.Information("Loading the default log buffer in {Path}", _bufferPath); var buffer = new LogBuffer(_bufferPath, _bufferSizeBytes); if (buffer.Peek(0).Length == 0) { _log.Information("The default buffer is empty and will be removed until more data is received"); buffer.Dispose(); File.Delete(defaultDataFilePath); var lockFilePath = Path.Combine(_bufferPath, LockFileName); if (File.Exists(lockFilePath)) { File.Delete(lockFilePath); } } else { _noApiKeyLogBuffer = new ActiveLogBuffer(buffer, _shipperFactory.Create(buffer, _outputConfig.ApiKey)); } } foreach (var subfolder in Directory.GetDirectories(_bufferPath)) { var encodedApiKeyFilePath = Path.Combine(subfolder, ApiKeyFileName); if (!File.Exists(encodedApiKeyFilePath)) { _log.Information("Folder {Path} does not appear to be a log buffer; skipping", subfolder); continue; } _log.Information("Loading an API-key specific buffer in {Path}", subfolder); var apiKey = MachineScopeDataProtection.Unprotect(File.ReadAllText(encodedApiKeyFilePath)); var buffer = new LogBuffer(subfolder, _bufferSizeBytes); if (buffer.Peek(0).Length == 0) { _log.Information("API key-specific buffer in {Path} is empty and will be removed until more data is received", subfolder); buffer.Dispose(); Directory.Delete(subfolder, true); } else { var activeBuffer = new ActiveLogBuffer(buffer, _shipperFactory.Create(buffer, apiKey)); _buffersByApiKey.Add(apiKey, activeBuffer); } } } }
public void GivingTheLastSeenEventKeyRemovesPrecedingEvents() { using (var temp = TempFolder.ForCaller()) using (var buffer = new LogBuffer(temp.AllocateFilename("mdb"), DefaultBufferSize)) { byte[] a1 = Some.Bytes(140), a2 = Some.Bytes(140), a3 = Some.Bytes(140); buffer.Enqueue(new[] { a1, a2, a3 }); var contents = buffer.Peek(420); Assert.Equal(3, contents.Length); buffer.Dequeue(contents[2].Key); var remaining = buffer.Peek(420); Assert.Equal(0, remaining.Length); } }
public void ANewLogBufferIsEmpty() { using (var temp = TempFolder.ForCaller()) using (var buffer = new LogBuffer(temp.AllocateFilename("mdb"), DefaultBufferSize)) { var contents = buffer.Peek((int)DefaultBufferSize); Assert.Equal(0, contents.Length); } }
public void AtLeastOneEventIsAlwaysDequeued() { using (var temp = TempFolder.ForCaller()) using (var buffer = new LogBuffer(temp.AllocateFilename("mdb"), DefaultBufferSize)) { byte[] a1 = Some.Bytes(140), a2 = Some.Bytes(140), a3 = Some.Bytes(140); buffer.Enqueue(new[] { a1, a2, a3 }); var contents = buffer.Peek(30); Assert.Equal(1, contents.Length); Assert.Equal(a1, contents[0].Value); } }
public void SizeHintLimitsDequeuedEventCount() { using (var temp = TempFolder.ForCaller()) using (var buffer = new LogBuffer(temp.AllocateFilename("mdb"), DefaultBufferSize)) { byte[] a1 = Some.Bytes(140), a2 = Some.Bytes(140), a3 = Some.Bytes(140); buffer.Enqueue(new[] { a1, a2, a3 }); var contents = buffer.Peek(300); Assert.Equal(2, contents.Length); Assert.Equal(a1, contents[0].Value); Assert.Equal(a2, contents[1].Value); } }
public void EntriesOverLimitArePurgedFifo() { using (var temp = TempFolder.ForCaller()) using (var buffer = new LogBuffer(temp.AllocateFilename("mdb"), 4096)) { byte[] a1 = Some.Bytes(140), a2 = Some.Bytes(140), a3 = Some.Bytes(140); buffer.Enqueue(new[] { a1, a2, a3 }); var contents = buffer.Peek((int)DefaultBufferSize); Assert.Equal(2, contents.Length); Assert.Equal(a2, contents[0].Value); Assert.Equal(a3, contents[1].Value); } }
void OnTick() { try { var sendingSingles = 0; do { var available = _logBuffer.Peek((int)_outputConfig.RawPayloadLimitBytes); if (available.Length == 0) { // For whatever reason, there's nothing waiting to send. This means we should try connecting again at the // regular interval, so mark the attempt as successful. _connectionSchedule.MarkSuccess(); break; } Stream payload; ulong lastIncluded; MakePayload(available, sendingSingles > 0, out payload, out lastIncluded); var content = new StreamContent(new UnclosableStreamWrapper(payload)); content.Headers.ContentType = new MediaTypeHeaderValue("application/json") { CharSet = Encoding.UTF8.WebName }; if (!string.IsNullOrWhiteSpace(_outputConfig.ApiKey)) { content.Headers.Add(ApiKeyHeaderName, _outputConfig.ApiKey); } var result = _httpClient.PostAsync(BulkUploadResource, content).Result; if (result.IsSuccessStatusCode) { _connectionSchedule.MarkSuccess(); _logBuffer.Dequeue(lastIncluded); if (sendingSingles > 0) { sendingSingles--; } } else if (result.StatusCode == HttpStatusCode.BadRequest || result.StatusCode == HttpStatusCode.RequestEntityTooLarge) { // The connection attempt was successful - the payload we sent was the problem. _connectionSchedule.MarkSuccess(); if (sendingSingles != 0) { payload.Position = 0; var payloadText = new StreamReader(payload, Encoding.UTF8).ReadToEnd(); Log.Error("HTTP shipping failed with {StatusCode}: {Result}; payload was {InvalidPayload}", result.StatusCode, result.Content.ReadAsStringAsync().Result, payloadText); _logBuffer.Dequeue(lastIncluded); sendingSingles = 0; } else { // Unscientific (shoudl "binary search" in batches) but sending the next // hundred events singly should flush out the problematic one. sendingSingles = 100; } } else { _connectionSchedule.MarkFailure(); Log.Error("Received failed HTTP shipping result {StatusCode}: {Result}", result.StatusCode, result.Content.ReadAsStringAsync().Result); break; } }while (true); } catch (Exception ex) { Log.Error(ex, "Exception while sending a batch from the log shipper"); _connectionSchedule.MarkFailure(); } finally { lock (_stateLock) { if (!_unloading) { SetTimer(); } } } }