/// <summary> /// Patrolling thread. keeps tab on the PutLogEvent request and the /// Concurrent Queue /// </summary> private async Task Monitor(CancellationToken token) { try { token.ThrowIfCancellationRequested(); if (_currentStreamName == null) { await LogEventTransmissionSetup(token).ConfigureAwait(false); } while (!token.IsCancellationRequested) { if (!_pendingMessageQueue.IsEmpty) { while (!_pendingMessageQueue.IsEmpty) { InputLogEvent ev; if (_pendingMessageQueue.TryDequeue(out ev)) { // See if new message will cause the current batch to violote the size constraint. // If so send the current batch now before adding more to the batch of messages to send. if (_repo.IsSizeConstraintViolated(ev.Message)) { await SendMessages(token).ConfigureAwait(false); } _repo.AddMessage(ev); if (_repo.ShouldSendRequest(_config.MaxQueuedMessages) && !_isTerminated) { await SendMessages(token).ConfigureAwait(false); } } } } else { // If the logger is being terminated and all the messages have been sent exit out of loop. // If there are messages keep pushing the remaining messages before the process dies. if (_isTerminated && _repo._request.LogEvents.Count == 0) { break; } await Task.Delay(Convert.ToInt32(_config.MonitorSleepTime.TotalMilliseconds)); if (_repo.ShouldSendRequest(_config.MaxQueuedMessages)) { await SendMessages(token).ConfigureAwait(false); } } } } catch (OperationCanceledException oc) { LogLibraryError(oc, _config.LibraryLogFileName); throw; } catch (Exception e) { LogLibraryError(e, _config.LibraryLogFileName); } }
/// <summary> /// Patrolling thread. keeps tab on the PutLogEvent request and the /// Concurrent Queue /// </summary> private async Task Monitor(CancellationToken token) { try { if (_currentStreamName == null) { await LogEventTransmissionSetup(token).ConfigureAwait(false); } while (true) { try { while (_pendingMessageQueue.TryDequeue(out var inputLogEvent)) { // See if new message will cause the current batch to violote the size constraint. // If so send the current batch now before adding more to the batch of messages to send. if (_repo.IsSizeConstraintViolated(inputLogEvent.Message)) { await SendMessages(token).ConfigureAwait(false); } _repo.AddMessage(inputLogEvent); } if (_isTerminated) { // If the logger is being terminated and all the messages have been sent, exit out of loop. // If there are messages keep pushing the remaining messages before the process dies. if (_repo._request.LogEvents.Count == 0) { break; } } // Check if we have enough data to warrant making the webcall if (_repo.ShouldSendRequest(_config.MaxQueuedMessages)) { await SendMessages(token).ConfigureAwait(false); } await Task.Delay(Convert.ToInt32(_config.MonitorSleepTime.TotalMilliseconds), token); } catch (Exception ex) when(!(ex is OperationCanceledException)) { // We don't want to kill the main monitor loop. We will simply log the error, then continue. // If it is an OperationCancelledException, die LogLibraryError(ex, _config.LibraryLogFileName); } } } catch (OperationCanceledException) { //Just exit the method } }
/// <summary> /// Patrolling thread. keeps tab on the PutLogEvent request and the /// Concurrent Queue /// </summary> private async Task Monitor(CancellationToken token) { bool executeFlush = false; while (_currentStreamName == null && !token.IsCancellationRequested) { try { _currentStreamName = await LogEventTransmissionSetup(token).ConfigureAwait(false); } catch (OperationCanceledException ex) { if (!_pendingMessageQueue.IsEmpty) { LogLibraryServiceError(ex); } if (token.IsCancellationRequested) { _client.Dispose(); return; } } catch (Exception ex) { // We don't want to kill the main monitor loop. We will simply log the error, then continue. // If it is an OperationCancelledException, die LogLibraryServiceError(ex); await Task.Delay(Math.Max(100, DateTime.UtcNow.Second * 10), token); } } while (!token.IsCancellationRequested) { try { while (_pendingMessageQueue.TryDequeue(out var inputLogEvent)) { // See if new message will cause the current batch to violote the size constraint. // If so send the current batch now before adding more to the batch of messages to send. if (_repo.CurrentBatchMessageCount > 0 && _repo.IsSizeConstraintViolated(inputLogEvent.Message)) { await SendMessages(token).ConfigureAwait(false); } _repo.AddMessage(inputLogEvent); } if (_repo.ShouldSendRequest(_config.MaxQueuedMessages) || (executeFlush && !_repo.IsEmpty)) { await SendMessages(token).ConfigureAwait(false); } if (executeFlush) { _flushCompletedEvent.Set(); } executeFlush = await _flushTriggerEvent.WaitAsync(TimeSpan.FromMilliseconds(_config.MonitorSleepTime.TotalMilliseconds), token); } catch (OperationCanceledException ex) { if (!token.IsCancellationRequested || !_repo.IsEmpty || !_pendingMessageQueue.IsEmpty) { LogLibraryServiceError(ex); } _client.Dispose(); return; } catch (Exception ex) { // We don't want to kill the main monitor loop. We will simply log the error, then continue. // If it is an OperationCancelledException, die LogLibraryServiceError(ex); } } }