public async Task RegexTest() { var logGroupName = "RegexTest"; var logStreamName = "TestMessage"; Regex invalid_sequence_token_regex = new Regex(@"The given sequenceToken is invalid. The next expected sequenceToken is: (\d+)"); client = new AmazonCloudWatchLogsClient(RegionEndpoint.USWest2); await client.CreateLogGroupAsync(new CreateLogGroupRequest { LogGroupName = logGroupName }); _testFixure.LogGroupNameList.Add(logGroupName); await client.CreateLogStreamAsync(new CreateLogStreamRequest { LogGroupName = logGroupName, LogStreamName = logStreamName }); var putlogEventsRequest = new PutLogEventsRequest { LogGroupName = logGroupName, LogStreamName = logStreamName, LogEvents = new List <InputLogEvent> { new InputLogEvent { Timestamp = DateTime.Now, Message = "Message1" } } }; var response = await client.PutLogEventsAsync(putlogEventsRequest); try { putlogEventsRequest.LogEvents = new List <InputLogEvent> { new InputLogEvent { Timestamp = DateTime.Now, Message = "Message2" } }; await client.PutLogEventsAsync(putlogEventsRequest); } catch (InvalidSequenceTokenException ex) { var regexResult = invalid_sequence_token_regex.Match(ex.Message); if (regexResult.Success) { Assert.Equal(regexResult.Groups[1].Value, response.NextSequenceToken); } } }
private async void PutLog(string groupName, string streamName, string msg) { try { var req = new DescribeLogStreamsRequest(groupName) { LogStreamNamePrefix = streamName }; var resp = await _logsClient.DescribeLogStreamsAsync(req); var token = resp.LogStreams.FirstOrDefault(s => s.LogStreamName == streamName)?.UploadSequenceToken; var ie = new InputLogEvent { Message = msg, Timestamp = DateTime.UtcNow }; var request = new PutLogEventsRequest(groupName, streamName, new List <InputLogEvent> { ie }); if (!string.IsNullOrEmpty(token)) { request.SequenceToken = token; } await _logsClient.PutLogEventsAsync(request); } catch (Exception e) { // ignored } }
private static void FlushBufferToCloudWatchLogs() { var request = new PutLogEventsRequest { LogGroupName = _logGroup, LogStreamName = _logStream, SequenceToken = _sequenceToken }; lock (BUFFER_LOCK) { if (_cwlBuffer.Count == 0) { return; } foreach (var message in _cwlBuffer) { request.LogEvents.Add( new InputLogEvent { Message = message, Timestamp = DateTime.Now }); } _cwlBuffer.Clear(); } lock (SEQUENCE_LOCK) { var response = _cwlClient.PutLogEventsAsync(request).Result; _sequenceToken = response.NextSequenceToken; } }
/// <summary> /// Initiates the asynchronous execution of the PutLogEvents operation. /// <seealso cref="Amazon.CloudWatchLogs.IAmazonCloudWatchLogs"/> /// </summary> /// /// <param name="request">Container for the necessary parameters to execute the PutLogEvents operation.</param> /// <param name="cancellationToken"> /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. /// </param> /// <returns>The task object representing the asynchronous operation.</returns> public Task <PutLogEventsResponse> PutLogEventsAsync(PutLogEventsRequest request, CancellationToken cancellationToken = default(CancellationToken)) { var marshaller = new PutLogEventsRequestMarshaller(); var unmarshaller = PutLogEventsResponseUnmarshaller.Instance; return(Invoke <IRequest, PutLogEventsRequest, PutLogEventsResponse>(request, marshaller, unmarshaller, signer, cancellationToken)); }
internal PutLogEventsResponse PutLogEvents(PutLogEventsRequest request) { var marshaller = new PutLogEventsRequestMarshaller(); var unmarshaller = PutLogEventsResponseUnmarshaller.Instance; return(Invoke <PutLogEventsRequest, PutLogEventsResponse>(request, marshaller, unmarshaller)); }
void CwLog(string group_name, string stream_name, string body) { using (var cwl = new AmazonCloudWatchLogsClient(aws_credentials, aws_region)) { int attempts = 0; do { try { attempts++; var request = new PutLogEventsRequest(group_name, stream_name, new List <InputLogEvent> { new InputLogEvent { Timestamp = DateTime.Now, Message = body } }); request.SequenceToken = cw_logstream_sequence_token; var ret = cwl.PutLogEvents(request); // Update sequence token for next put cw_logstream_sequence_token = ret.NextSequenceToken; break; // success } catch (Amazon.CloudWatchLogs.Model.ResourceNotFoundException e) { Console.WriteLine($"type: {e.ErrorType} code: {e.ErrorCode} source: {e.Source} "); Console.WriteLine(e.Data); CreateCwLogGroup(cw_log_group_name); CreateCwLogSream(cw_log_stream_name); // Log group doesn't exist. Let's create. This only needs to be done once Task.Delay(1000); } catch (Amazon.CloudWatchLogs.Model.InvalidSequenceTokenException e) { // Each log event must contain a sequence value, unless it's the first event // https://docs.aws.amazon.com/sdkfornet/v3/apidocs/items/CloudWatchLogs/TPutLogEventsRequest.html cw_logstream_sequence_token = e.ExpectedSequenceToken; // Now that we have the sequence token, try one more time. Console.WriteLine("Exception caught for unexpected squence token. It's now updated and will retry."); Task.Delay(1000); } catch (Exception e) { Console.WriteLine(e.ToString()); Console.WriteLine("This exception is not handled, will not retry :-( "); break; } if (attempts > 2) { Console.WriteLine("Unable to send log event retries exhausted."); break; } } while (true); } }
protected override async Task EmitBatchAsync(IEnumerable <LogEvent> events) { // We do not need synchronization in this method since it is only called from a single thread by the PeriodicBatchSink. try { await EnsureInitialized(); } catch (Exception ex) { Console.WriteLine("Error initializing log stream. No logs will be sent to AWS CloudWatch."); Console.WriteLine(ex); return; } try { // log events need to be ordered by timestamp within a single bulk upload to CloudWatch var logEvents = events.OrderBy(e => e.Timestamp) .Select(e => new InputLogEvent { Message = renderer.RenderLogEvent(e), Timestamp = e.Timestamp.UtcDateTime }) .ToList(); // creates the request to upload a new event to CloudWatch PutLogEventsRequest putEventsRequest = new PutLogEventsRequest(options.LogGroupName, logStreamName, logEvents) { SequenceToken = nextSequenceToken }; // actually upload the event to CloudWatch var putLogEventsResponse = await cloudWatchClient.PutLogEventsAsync(putEventsRequest); // validate success if (!putLogEventsResponse.HttpStatusCode.IsSuccessStatusCode()) { // let's start a new log stream in case anything went wrong with the upload UpdateLogStreamName(); throw new Exception( $"Tried to send logs, but failed with status code '{putLogEventsResponse.HttpStatusCode}' and data '{putLogEventsResponse.ResponseMetadata.FlattenedMetaData()}'."); } // remember the next sequence token, which is required nextSequenceToken = putLogEventsResponse.NextSequenceToken; } catch (Exception ex) { try { traceLogger.Error("Error sending logs. No logs will be sent to AWS CloudWatch. Error was {0}", ex); } catch (Exception) { // we event failed to log to the trace logger - giving up trying to put something out } } }
public void AddLogRequest(List <InputLogEvent> events) { var putLogEventsRequest = new PutLogEventsRequest( _logGroup, _logStream, events); var logstream = _client.DescribeLogStreams ( new DescribeLogStreamsRequest(_logGroup) ) .LogStreams .Where(x => x.LogStreamName == _logStream) .ToList(); if (!logstream.Any()) { throw new ApplicationException("LogStream doesn't exist"); } _nextSequence = logstream.Single().UploadSequenceToken; lock (LockObject) { AmazonWebServiceResponse ret = null; for (var i = 0; i < 10 && ret == null; i++) { try { putLogEventsRequest.SequenceToken = _nextSequence; var putLogEventsResponse = _client.PutLogEvents(putLogEventsRequest); if (putLogEventsResponse == null) { continue; } _nextSequence = putLogEventsResponse.NextSequenceToken; ret = putLogEventsResponse; } catch (OperationAbortedException) { } catch (DataAlreadyAcceptedException e) { var matchCollection = Regex.Matches(e.Message, @"[0-9]{20,}"); _nextSequence = matchCollection.Count > 0 ? matchCollection[0].Value : null; } catch (InvalidSequenceTokenException e) { var matchCollection = Regex.Matches(e.Message, @"[0-9]{20,}"); _nextSequence = matchCollection.Count > 0 ? matchCollection[0].Value : null; } } } }
/// <inheritdoc/> protected async Task <PutLogEventsResponse> SendRequestAsync(PutLogEventsRequest putDataRequest) { // Failover if (_failoverSinkEnabled) { // Failover to Secondary Region _ = FailOverToSecondaryRegion(_throttle); } // Update CloudWatch Logs sequence token putDataRequest.SequenceToken = _sequenceToken; return(await CloudWatchLogsClient.PutLogEventsAsync(putDataRequest)); }
public string DispatchLogEvents(List <InputLogEvent> logEvents, string sequenceToken) { Console.WriteLine("Dispatching Log Events."); var request = new PutLogEventsRequest { LogGroupName = _applicationVars.LogGroup, LogStreamName = _applicationVars.LogStream, LogEvents = logEvents, SequenceToken = sequenceToken }; var response = _client.PutLogEventsAsync(request).Result; return(response.NextSequenceToken); }
internal PutLogEventsResponse PutLogEvents(PutLogEventsRequest request) { var task = PutLogEventsAsync(request); try { return(task.Result); } catch (AggregateException e) { ExceptionDispatchInfo.Capture(e.InnerException).Throw(); return(null); } }
private async Task Worker(CancellationToken cancellationToken) { string sequenceToken = await GetSequenceToken(LogGroupName, LogStreamName); while (!cancellationToken.IsCancellationRequested) { try { cancellationToken.ThrowIfCancellationRequested(); LogEvent logEvent = await _queue.DequeueAsync(cancellationToken); var request = new PutLogEventsRequest(LogGroupName, LogStreamName, new List <InputLogEvent> { new InputLogEvent { Message = logEvent.RenderMessage(), Timestamp = logEvent.Timestamp.UtcDateTime, } }); request.SequenceToken = sequenceToken; var resp = await _client.PutLogEventsAsync(request, cancellationToken); sequenceToken = resp.NextSequenceToken; } catch (OperationCanceledException e) { Console.Error.WriteLine($"Worker canceled: {e}."); throw; } catch (Exception e) { Console.Error.WriteLine($"Worker exception occured: {e}."); } } Console.Error.WriteLine("Worker ended."); }
static void Main(string[] args) { var logClient = new AmazonCloudWatchLogsClient(); // Add a new log group for testing const string newLogGroupName = "NewLogGroup"; DescribeLogGroupsResponse dlgr = logClient.DescribeLogGroups(); var groups = new List <LogGroup> { }; groups = dlgr.LogGroups; LogGroup lg = new LogGroup(); lg.LogGroupName = newLogGroupName; // Look for our new log group name to determine if we need to do setup LogGroup result = groups.Find( delegate(LogGroup bk) { return(bk.LogGroupName == newLogGroupName); } ); if (result != null) { Console.WriteLine(result.LogGroupName + " found"); } else { //Haven't seen this log group, set it up CreateLogGroupRequest clgr = new CreateLogGroupRequest(newLogGroupName); logClient.CreateLogGroup(clgr); // Create a file to sace next SequenceToken in File.CreateText("..\\..\\" + lg.LogGroupName + ".txt"); CreateLogStreamRequest csr = new CreateLogStreamRequest(lg.LogGroupName, newLogGroupName); logClient.CreateLogStream(csr); } string tokenFile = ""; try { Console.WriteLine(lg.LogGroupName); //Pick up the next sequence token from the last run tokenFile = lg.LogGroupName; StreamReader sr = File.OpenText("..\\..\\" + tokenFile + ".txt"); string sequenceToken = sr.ReadLine(); sr.Close(); lg.RetentionInDays = 30; string groupName = lg.LogGroupName;; TestMetricFilterRequest tmfr = new TestMetricFilterRequest(); List <InputLogEvent> logEvents = new List <InputLogEvent>(3); InputLogEvent ile = new InputLogEvent(); ile.Message = "Test Event 1"; //DateTime dt = new DateTime(1394793518000); DateTime dt = new DateTime(2017, 01, 11); ile.Timestamp = dt; logEvents.Add(ile); ile.Message = "Test Event 2"; logEvents.Add(ile); ile.Message = "This message also contains an Error"; logEvents.Add(ile); DescribeLogStreamsRequest dlsr = new DescribeLogStreamsRequest(groupName); PutLogEventsRequest pler = new PutLogEventsRequest(groupName, tokenFile, logEvents); pler.SequenceToken = sequenceToken; //use last sequence token PutLogEventsResponse plerp = new PutLogEventsResponse(); plerp = logClient.PutLogEvents(pler); Console.WriteLine("Next sequence token = " + plerp.NextSequenceToken); FileStream fs = File.OpenWrite("..\\..\\" + tokenFile + ".txt"); fs.Position = 0; UTF8Encoding utf8 = new UTF8Encoding(); Byte[] encodedBytes = utf8.GetBytes(plerp.NextSequenceToken); fs.Write(encodedBytes, 0, utf8.GetByteCount(plerp.NextSequenceToken)); fs.Close(); List <string> lem = new List <string>(1); lem.Add("Error"); tmfr.LogEventMessages = lem; tmfr.FilterPattern = "Error"; TestMetricFilterResponse tmfrp = new TestMetricFilterResponse(); tmfrp = logClient.TestMetricFilter(tmfr); var results = new List <MetricFilterMatchRecord> { }; results = tmfrp.Matches; Console.WriteLine("Found " + results.Count.ToString() + " match records"); IEnumerator ie = results.GetEnumerator(); while (ie.MoveNext()) { MetricFilterMatchRecord mfmr = (MetricFilterMatchRecord)ie.Current; Console.WriteLine("Event Message = " + mfmr.EventMessage); } Console.WriteLine("Metric filter test done"); } catch (Exception e) { Console.WriteLine(e.Message); } }
internal void ForwardLogs() { do { Action pushLogsToCloudWatchAction = () => { LogMessageDTO logMessageObj; while (LogService.taskQueue.TryDequeue(out logMessageObj)) { try { using (IAmazonCloudWatchLogs logsclient = Amazon.AWSClientFactory.CreateAmazonCloudWatchLogsClient(this.cloudWatchAccessKey, this.cloudWatchSecretKey, this.cloudWatchRegion)) { string logStreamName = string.IsNullOrWhiteSpace(logMessageObj.ApplicationAlias) ? "Custom" : logMessageObj.ApplicationAlias; // put the object into JSON format and send it to CloudWatch List <InputLogEvent> logEvents = new List <InputLogEvent>(); InputLogEvent logEntry = new InputLogEvent(); logEntry.Message = JsonConvert.SerializeObject(logMessageObj); logEntry.Timestamp = logMessageObj.Timestamp; logEvents.Add(logEntry); PutLogEventsRequest request = new PutLogEventsRequest(this.logGroupName, logStreamName, logEvents); PutLogEventsResponse response = null; for (int i = 0; i < 5; ++i) { try { lock (this.criticalSection) { // if we have a token, set it if (LogStreamTokenMap.ContainsKey(logStreamName)) { request.SequenceToken = LogStreamTokenMap[logStreamName]; } // put the logs and get the token for the next submissions of logs response = logsclient.PutLogEvents(request); var newToken = response.NextSequenceToken; if (LogStreamTokenMap.ContainsKey(logStreamName)) { LogStreamTokenMap[logStreamName] = newToken; } else { LogStreamTokenMap.Add(logStreamName, newToken); } } // if we successfully pushed the logs, exit the loop, otherwise we will exit in 5 tries break; } catch (InvalidSequenceTokenException) { // we don't have the right token for the next sequence in the stream, so get it again // in fact we will refresh all tokens for all streams var logstreamsrequest = new DescribeLogStreamsRequest(this.logGroupName); var logStreamResponse = logsclient.DescribeLogStreams(logstreamsrequest); var logstreamsList = logStreamResponse.LogStreams; lock (this.criticalSection) { foreach (var logstream in logstreamsList) { var appname = logstream.LogStreamName; var token = logstream.UploadSequenceToken; if (LogStreamTokenMap.ContainsKey(appname)) { LogStreamTokenMap[appname] = token; } else { LogStreamTokenMap.Add(appname, token); } } } } catch (ResourceNotFoundException) { // we likely introduced a new log stream that needs to be provisioned in CloudWatch // ignore exceptions in creation try { CreateLogGroupRequest logGroup = new CreateLogGroupRequest(this.logGroupName); logsclient.CreateLogGroup(logGroup); } catch (Exception) { } try { CreateLogStreamRequest logStream = new CreateLogStreamRequest(this.logGroupName, logStreamName); logsclient.CreateLogStream(logStream); lock (this.criticalSection) { LogStreamTokenMap.Remove(logStreamName); } } catch (Exception) { } } } if (response.HttpStatusCode == HttpStatusCode.OK) { ++LogService.successfulForwards; } else { ++LogService.failedForwards; } } } catch (Exception) { // can't really log this exception to avoid cyclical logging and deadlocks ++LogService.numExceptions; } } }; // Start 5 parallel tasks to consume the logs from the queue // the problem here is that the logs might not be pushed in order to CloudWatch // However, that's ok since there might be multiple instances of this service and logs could be coming // in to all instances at the same time and we might push logs to CloudWatch out of order Parallel.Invoke(pushLogsToCloudWatchAction, pushLogsToCloudWatchAction, pushLogsToCloudWatchAction, pushLogsToCloudWatchAction, pushLogsToCloudWatchAction); // sleep before getting into another iteration of this loop to look for logs Thread.Sleep(1000); } while (true); }
void Writer(object sender, ElapsedEventArgs e) { var logEvents = new List <InputLogEvent>(); try { var more = true; while (more) { InputLogEvent item; more = items.TryTake(out item); if (more) { logEvents.Add(item); } } if (logEvents.Count == 0) { return; } if (!Settings.Default.SendUsageData) { return; } using (var logs = new AmazonCloudWatchLogsClient(AwsKeys.AccessKey, AwsKeys.SecretKey, RegionEndpoint.APSoutheast2)) { var request = new PutLogEventsRequest(AwsKeys.GroupName, LogStreamName, logEvents); var describeLogStreamsRequest = new DescribeLogStreamsRequest(AwsKeys.GroupName) { LogStreamNamePrefix = trackingId, Descending = true }; var describeLogStreamsResponse = logs.DescribeLogStreams(describeLogStreamsRequest); var logStreams = describeLogStreamsResponse.LogStreams; var logStream = logStreams.FirstOrDefault(ls => ls.LogStreamName == LogStreamName); if (logStream != null) { var token = logStream.UploadSequenceToken; request.SequenceToken = token; checkResponse(logs.PutLogEvents(request)); } else { var createRequest = new CreateLogStreamRequest(AwsKeys.GroupName, LogStreamName); checkResponse(logs.CreateLogStream(createRequest)); checkResponse(logs.PutLogEvents(request)); } } } catch (Exception ee) { AttempToRestoreErrors(logEvents, ee); } finally { if (timer != null) { timer.Start(); } } }
private async Task SendBatchAsync(List <Envelope <InputLogEvent> > records, CancellationToken stopToken) { if (records.Count == 0) { return; } // resolve log group name var logGroup = ResolveVariables(_logGroup); // resolve log stream name, including the timestamp. var logStreamName = ResolveVariables(_logStream); logStreamName = ResolveTimestampInLogStreamName(logStreamName, records[0].Timestamp); var batchBytes = records.Sum(r => GetRecordSize(r)); Interlocked.Add(ref _recordsAttempted, records.Count); Interlocked.Add(ref _bytesAttempted, batchBytes); Interlocked.Exchange(ref _clientLatency, (long)records.Average(r => (DateTime.UtcNow - r.Timestamp).TotalMilliseconds)); _logger.LogDebug("Sending {0} records {1} bytes to log group '{2}', log stream '{3}'.", records.Count, batchBytes, logGroup, logStreamName); var sendCount = 0; if (records[records.Count - 1].Timestamp - records[0].Timestamp <= _batchMaximumTimeSpan) { sendCount = records.Count; } else { while (sendCount < records.Count && records[sendCount].Timestamp - records[0].Timestamp <= _batchMaximumTimeSpan) { sendCount++; } } var recordsToSend = records.Take(sendCount).Select(e => e.Data).ToList(); var beforeSendTimestamp = Utility.GetElapsedMilliseconds(); // If the sequence token is null, try to get it. // If the log stream doesn't exist, create it (by specifying "true" in the second parameter). // This should be the only place where a log stream is created. // This method will ensure that both the log group and stream exists, // so there is no need to handle a ResourceNotFound exception later. if (string.IsNullOrEmpty(_sequenceToken) || AWSConstants.NullString.Equals(_sequenceToken)) { await GetSequenceTokenAsync(logGroup, logStreamName, true, stopToken); } var request = new PutLogEventsRequest { LogGroupName = logGroup, LogStreamName = logStreamName, SequenceToken = _sequenceToken, LogEvents = recordsToSend }; try { // try sending the records and mark them as sent var response = await _cloudWatchLogsClient.PutLogEventsAsync(request, stopToken); Interlocked.Exchange(ref _latency, Utility.GetElapsedMilliseconds() - beforeSendTimestamp); // update sequence token _sequenceToken = response.NextSequenceToken; var recordsSent = recordsToSend.Count; var rejectedLogEventsInfo = response.RejectedLogEventsInfo; if (rejectedLogEventsInfo is not null) { var sb = new StringBuilder() .Append("CloudWatchLogsSink encountered some rejected logs.") .AppendFormat(" ExpiredLogEventEndIndex {0}", rejectedLogEventsInfo.ExpiredLogEventEndIndex) .AppendFormat(" TooNewLogEventStartIndex {0}", rejectedLogEventsInfo.TooNewLogEventStartIndex) .AppendFormat(" TooOldLogEventEndIndex {0}", rejectedLogEventsInfo.TooOldLogEventEndIndex); _logger.LogWarning(sb.ToString()); if (rejectedLogEventsInfo.TooNewLogEventStartIndex >= 0) { recordsSent -= recordsToSend.Count - rejectedLogEventsInfo.TooNewLogEventStartIndex; } var tooOldIndex = Math.Max(rejectedLogEventsInfo.TooNewLogEventStartIndex, rejectedLogEventsInfo.ExpiredLogEventEndIndex); if (tooOldIndex > 0) { recordsSent -= tooOldIndex; } if (recordsSent < 0) { recordsSent = 0; } } Interlocked.Add(ref _recordsSuccess, recordsSent); Interlocked.Add(ref _recordsFailedNonrecoverable, recordsToSend.Count - recordsSent); _logger.LogDebug("Successfully sent {0} records.", recordsSent); // save the bookmarks only for the records that were processed await SaveBookmarksAsync(records.Take(sendCount).ToList()); records.RemoveRange(0, sendCount); } catch (AmazonCloudWatchLogsException acle) { Interlocked.Exchange(ref _latency, Utility.GetElapsedMilliseconds() - beforeSendTimestamp); // handle the types of exceptions we know how to handle // then return so that the records can be re-sent switch (acle) { case InvalidSequenceTokenException iste: _sequenceToken = iste.ExpectedSequenceToken; break; case ResourceNotFoundException: await GetSequenceTokenAsync(logGroup, logStreamName, true, stopToken); break; case DataAlreadyAcceptedException: // this batch won't be accepted, skip it await SaveBookmarksAsync(records.Take(sendCount).ToList()); records.RemoveRange(0, sendCount); break; case InvalidParameterException ipme: // this can happens due to a log event being too large // we already checked for this when creating the record, // so best thing we can do here is to skip this batch and moveon _logger.LogError(ipme, "Error sending records to CloudWatchLogs"); records.RemoveRange(0, sendCount); break; default: // for other exceptions we rethrow so the main loop can catch it throw; } } }
/// <summary> /// Publish the batch of log events to AWS CloudWatch Logs. /// </summary> /// <param name="batch">The request.</param> private async Task PublishBatchAsync(List <InputLogEvent> batch) { if (batch?.Count == 0) { return; } var success = false; var attemptIndex = 0; while (!success && attemptIndex <= options.RetryAttempts) { try { // creates the request to upload a new event to CloudWatch var putLogEventsRequest = new PutLogEventsRequest { LogGroupName = options.LogGroupName, LogStreamName = logStreamName, SequenceToken = nextSequenceToken, LogEvents = batch }; // actually upload the event to CloudWatch var putLogEventsResponse = await cloudWatchClient.PutLogEventsAsync(putLogEventsRequest); // remember the next sequence token, which is required nextSequenceToken = putLogEventsResponse.NextSequenceToken; success = true; } catch (ServiceUnavailableException e) { // retry with back-off Debugging.SelfLog.WriteLine("Service unavailable. Attempt: {0} Error: {1}", attemptIndex, e); await Task.Delay(ErrorBackoffStartingInterval.Milliseconds *(int)Math.Pow(2, attemptIndex)); attemptIndex++; } catch (ResourceNotFoundException e) { // no retry with back-off because.. // if one of these fails, we get out of the loop. // if they're both successful, we don't hit this case again. Debugging.SelfLog.WriteLine("Resource was not found. Error: {0}", e); await CreateLogGroupAsync(); await CreateLogStreamAsync(); } catch (DataAlreadyAcceptedException e) { Debugging.SelfLog.WriteLine("Data already accepted. Attempt: {0} Error: {1}", attemptIndex, e); try { await UpdateLogStreamSequenceTokenAsync(); } catch (Exception ex) { Debugging.SelfLog.WriteLine("Unable to update log stream sequence. Attempt: {0} Error: {1}", attemptIndex, ex); // try again with a different log stream UpdateLogStreamName(); await CreateLogStreamAsync(); } attemptIndex++; } catch (InvalidSequenceTokenException e) { Debugging.SelfLog.WriteLine("Invalid sequence token. Attempt: {0} Error: {1}", attemptIndex, e); try { await UpdateLogStreamSequenceTokenAsync(); } catch (Exception ex) { Debugging.SelfLog.WriteLine("Unable to update log stream sequence. Attempt: {0} Error: {1}", attemptIndex, ex); // try again with a different log stream UpdateLogStreamName(); await CreateLogStreamAsync(); } attemptIndex++; } catch (Exception e) { Debugging.SelfLog.WriteLine("Unhandled exception. Error: {0}", e); break; } } }
internal void AddLogRequest(PutLogEventsRequest putLogEventsRequest) { AddRequest(() => PutLogEvents(putLogEventsRequest)); }
protected override async Task OnNextAsync(List <Envelope <InputLogEvent> > records, long batchBytes) { if (records == null || records.Count == 0) { return; } try { _logger?.LogDebug($"CloudWatchLogsSink client {this.Id} sending {records.Count} records {batchBytes} bytes."); DateTime timestamp = records[0].Timestamp; string logStreamName = ResolveTimestampInLogStreamName(timestamp); if (string.IsNullOrEmpty(_sequenceToken)) { await GetSequenceTokenAsync(logStreamName); } var request = new PutLogEventsRequest { LogGroupName = _logGroupName, LogStreamName = logStreamName, SequenceToken = _sequenceToken, LogEvents = records .Select(e => e.Data) .OrderBy(e => e.Timestamp) //Added sort here in case messages are from multiple streams and they are not merged in order .ToList() }; bool attemptedCreatingLogStream = false; int invalidSequenceTokenCount = 0; while (true) { DateTime utcNow = DateTime.UtcNow; _clientLatency = (long)records.Average(r => (utcNow - r.Timestamp).TotalMilliseconds); long elapsedMilliseconds = Utility.GetElapsedMilliseconds(); try { PutLogEventsResponse response = await _client.PutLogEventsAsync(request); _latency = Utility.GetElapsedMilliseconds() - elapsedMilliseconds; _throttle.SetSuccess(); _sequenceToken = response.NextSequenceToken; RejectedLogEventsInfo rejectedLogEventsInfo = response.RejectedLogEventsInfo; _recordsAttempted += records.Count; _bytesAttempted += batchBytes; if (rejectedLogEventsInfo != null) { StringBuilder sb = new StringBuilder(); sb.Append($"CloudWatchLogsSink client {this.Id} some of the logs where rejected."); sb.Append($" ExpiredLogEventEndIndex {rejectedLogEventsInfo.ExpiredLogEventEndIndex}"); sb.Append($" TooNewLogEventStartIndex {rejectedLogEventsInfo.TooNewLogEventStartIndex}"); sb.Append($" TooOldLogEventEndIndex {rejectedLogEventsInfo.TooOldLogEventEndIndex}"); _logger?.LogError(sb.ToString()); long recordCount = records.Count - rejectedLogEventsInfo.ExpiredLogEventEndIndex - rejectedLogEventsInfo.TooOldLogEventEndIndex; if (rejectedLogEventsInfo.TooOldLogEventEndIndex > 0) { recordCount -= records.Count - rejectedLogEventsInfo.TooNewLogEventStartIndex; } _recordsSuccess += recordCount; _recordsFailedNonrecoverable += (records.Count - recordCount); } else { _recordsSuccess += records.Count; _logger?.LogDebug($"CloudWatchLogsSink client {this.Id} successfully sent {records.Count} records {batchBytes} bytes."); } this.SaveBookmarks(records); break; } catch (ResourceNotFoundException) { _latency = Utility.GetElapsedMilliseconds() - elapsedMilliseconds; _throttle.SetError(); //Logstream does not exist if (attemptedCreatingLogStream) { _nonrecoverableServiceErrors++; _recordsFailedNonrecoverable += records.Count; throw; } else { _recoverableServiceErrors++; _recordsFailedRecoverable += records.Count; await CreateLogStreamAsync(logStreamName); } } catch (AmazonCloudWatchLogsException ex) { _latency = Utility.GetElapsedMilliseconds() - elapsedMilliseconds; _throttle.SetError(); if (ex is InvalidSequenceTokenException || ex is ServiceUnavailableException) { if (ex is InvalidSequenceTokenException invalidSequenceTokenException) { invalidSequenceTokenCount++; _sequenceToken = invalidSequenceTokenException.GetExpectedSequenceToken(); //Sometimes we get a sequence token just say "null". This is obviously invalid if ("null".Equals(_sequenceToken)) { _sequenceToken = null; } else if (_sequenceToken != null && invalidSequenceTokenCount < 2) { continue; //Immediately try so that the sequence token does not become invalid again } } if (_buffer.Requeue(records, _throttle.ConsecutiveErrorCount < _maxAttempts)) { _logger?.LogWarning($"CloudWatchLogsSink client {this.Id} attempt={_throttle.ConsecutiveErrorCount} exception={ex.Message}. Will retry."); _recoverableServiceErrors++; _recordsFailedRecoverable += records.Count; break; } } _recordsFailedNonrecoverable += records.Count; _nonrecoverableServiceErrors++; throw; } catch (Exception) { _latency = Utility.GetElapsedMilliseconds() - elapsedMilliseconds; _throttle.SetError(); _recordsFailedNonrecoverable += records.Count; _nonrecoverableServiceErrors++; throw; } } } catch (Exception ex) { _logger?.LogError($"CloudWatchLogsSink client {this.Id} exception (attempt={_throttle.ConsecutiveErrorCount}): {ex.ToMinimized()}"); } PublishMetrics(MetricsConstants.CLOUDWATCHLOG_PREFIX); }
/// <summary> /// /// <para>WriteLogs:</para> /// /// <para>Writes logs to the logging service</para> /// /// <para>Check <seealso cref="IBLoggingServiceInterface.WriteLogs"/> for detailed documentation</para> /// /// </summary> public bool WriteLogs( List <BLoggingParametersStruct> _Messages, string _LogGroupName, string _LogStreamName, bool _bAsync = true, Action <string> _ErrorMessageAction = null) { if (_Messages == null || _Messages.Count == 0) { return(false); } if (_bAsync) { BTaskWrapper.Run(() => { WriteLogs(_Messages, _LogGroupName, _LogStreamName, false, _ErrorMessageAction); }); return(true); } else { _LogGroupName = BUtility.EncodeStringForTagging(_LogGroupName); _LogStreamName = BUtility.EncodeStringForTagging(_LogStreamName); string SequenceToken = null; bool bLogStreamAndGroupExists = false; try { var DescribeStreamRequest = new DescribeLogStreamsRequest(_LogGroupName); using (var CreatedDescribeTask = CloudWatchLogsClient.DescribeLogStreamsAsync(DescribeStreamRequest)) { CreatedDescribeTask.Wait(); if (CreatedDescribeTask.Result != null && CreatedDescribeTask.Result.LogStreams != null && CreatedDescribeTask.Result.LogStreams.Count > 0) { foreach (var Current in CreatedDescribeTask.Result.LogStreams) { if (Current != null && Current.LogStreamName == _LogStreamName) { SequenceToken = Current.UploadSequenceToken; bLogStreamAndGroupExists = true; break; } } } } } catch (Exception) { bLogStreamAndGroupExists = false; } if (!bLogStreamAndGroupExists) { try { var CreateGroupRequest = new CreateLogGroupRequest(_LogGroupName); using (var CreatedGroupTask = CloudWatchLogsClient.CreateLogGroupAsync(CreateGroupRequest)) { CreatedGroupTask.Wait(); } } catch (Exception e) { if (!(e is ResourceAlreadyExistsException)) { _ErrorMessageAction?.Invoke("BLoggingServiceAWS->WriteLogs: " + e.Message + ", Trace: " + e.StackTrace); return(false); } } try { var CreateStreamRequest = new CreateLogStreamRequest(_LogGroupName, _LogStreamName); using (var CreatedStreamTask = CloudWatchLogsClient.CreateLogStreamAsync(CreateStreamRequest)) { CreatedStreamTask.Wait(); } } catch (Exception e) { if (!(e is ResourceAlreadyExistsException)) { _ErrorMessageAction?.Invoke("BLoggingServiceAWS->WriteLogs: " + e.Message + ", Trace: " + e.StackTrace); return(false); } } } var LogEvents = new List <InputLogEvent>(); foreach (var Message in _Messages) { var LogEvent = new InputLogEvent() { Message = Message.Message, Timestamp = DateTime.UtcNow }; switch (Message.LogType) { case EBLoggingServiceLogType.Debug: LogEvent.Message = "Debug-> " + LogEvent.Message; break; case EBLoggingServiceLogType.Info: LogEvent.Message = "Info-> " + LogEvent.Message; break; case EBLoggingServiceLogType.Warning: LogEvent.Message = "Warning-> " + LogEvent.Message; break; case EBLoggingServiceLogType.Error: LogEvent.Message = "Error-> " + LogEvent.Message; break; case EBLoggingServiceLogType.Critical: LogEvent.Message = "Critical-> " + LogEvent.Message; break; } LogEvents.Add(LogEvent); } try { var PutRequest = new PutLogEventsRequest(_LogGroupName, _LogStreamName, LogEvents) { SequenceToken = SequenceToken }; using (var CreatedPutTask = CloudWatchLogsClient.PutLogEventsAsync(PutRequest)) { CreatedPutTask.Wait(); } } catch (Exception e) { _ErrorMessageAction?.Invoke("BLoggingServiceAWS->WriteLogs: " + e.Message + ", Trace: " + e.StackTrace); return(false); } return(true); } }
protected override async Task OnNextAsync(List <Envelope <InputLogEvent> > records, long batchBytes) { if (records == null || records.Count == 0) { return; } try { _logger?.LogDebug("CloudWatchLogsSink client {0} sending {1} records {2} bytes.", this.Id, records.Count, batchBytes); var logStreamName = this.ResolveTimestampInLogStreamName(records[0].Timestamp); // If the sequence token is null, try to get it. // If the log stream doesn't exist, create it (by specifying "true" in the second parameter). // This should be the only place where a log stream is created. // This method will ensure that both the log group and stream exists, // so there is no need to handle a ResourceNotFound exception later. if (string.IsNullOrEmpty(_sequenceToken)) { await this.GetSequenceTokenAsync(logStreamName, true); } var request = new PutLogEventsRequest { LogGroupName = _logGroupName, LogStreamName = logStreamName, SequenceToken = _sequenceToken, LogEvents = records .Select(e => e.Data) .OrderBy(e => e.Timestamp) // Added sort here in case messages are from multiple streams and they are not merged in order .ToList() }; int invalidSequenceTokenCount = 0; while (true) { var utcNow = DateTime.UtcNow; _clientLatency = (long)records.Average(r => (utcNow - r.Timestamp).TotalMilliseconds); long elapsedMilliseconds = Utility.GetElapsedMilliseconds(); try { var response = await _client.PutLogEventsAsync(request); _latency = Utility.GetElapsedMilliseconds() - elapsedMilliseconds; _throttle.SetSuccess(); _sequenceToken = response.NextSequenceToken; _recordsAttempted += records.Count; _bytesAttempted += batchBytes; var rejectedLogEventsInfo = response.RejectedLogEventsInfo; if (rejectedLogEventsInfo != null) { // Don't do the expensive string building unless we know the logger isn't null. if (_logger != null) { var sb = new StringBuilder() .AppendFormat("CloudWatchLogsSink client {0} encountered some rejected logs.", this.Id) .AppendFormat(" ExpiredLogEventEndIndex {0}", rejectedLogEventsInfo.ExpiredLogEventEndIndex) .AppendFormat(" TooNewLogEventStartIndex {0}", rejectedLogEventsInfo.TooNewLogEventStartIndex) .AppendFormat(" TooOldLogEventEndIndex {0}", rejectedLogEventsInfo.TooOldLogEventEndIndex); _logger.LogError(sb.ToString()); } var recordCount = records.Count - rejectedLogEventsInfo.ExpiredLogEventEndIndex - rejectedLogEventsInfo.TooOldLogEventEndIndex; if (rejectedLogEventsInfo.TooOldLogEventEndIndex > 0) { recordCount -= records.Count - rejectedLogEventsInfo.TooNewLogEventStartIndex; } _recordsFailedNonrecoverable += (records.Count - recordCount); } _logger?.LogDebug("CloudWatchLogsSink client {0} successfully sent {1} records {2} bytes.", this.Id, records.Count, batchBytes); _recordsSuccess += records.Count; this.SaveBookmarks(records); break; } catch (AmazonCloudWatchLogsException ex) { _latency = Utility.GetElapsedMilliseconds() - elapsedMilliseconds; _throttle.SetError(); // InvalidSequenceTokenExceptions are thrown when a PutLogEvents call doesn't have a valid sequence token. // This is usually recoverable, so we'll try twice before requeuing events. if (ex is InvalidSequenceTokenException invalidSequenceTokenException && invalidSequenceTokenCount < 2) { // Increment the invalid sequence token counter, to limit the "instant retries" that we attempt. invalidSequenceTokenCount++; // The exception from CloudWatch contains the sequence token, so we'll try to parse it out. _sequenceToken = invalidSequenceTokenException.GetExpectedSequenceToken(); // Sometimes we get a sequence token with a string value of "null". // This is invalid so we'll fetch it again and retry immediately. // If credentials have expired or this request is being throttled, // the wrapper try/catch will capture it and data will if (AWSConstants.NullString.Equals(_sequenceToken)) { _sequenceToken = null; await this.GetSequenceTokenAsync(logStreamName, false); } // Reset the sequence token in the request and immediately retry (without requeuing), // so that the sequence token does not become invalid again. request.SequenceToken = _sequenceToken; continue; } // Retry if one of the following was true: // - The exception was thrown because an invalid sequence token was used (more than twice in a row) // - The service was unavailable (transient error or service outage) // - The security token in the credentials has expired (previously this was handled as an unrecoverable error) if (this.IsRecoverableException(ex)) { // Try to requeue the records into the buffer. // This will mean that the events in the buffer are now out of order :( // There's nothing we can do about that short of rewriting all the buffering logic. // Having out of order events isn't that bad, because the service that we're uploading // to will be storing them based on their event time anyway. However, this can affect // the persistent bookmarking behavior, since bookmarks are updated based on the // position/eventId in the last batch sent, not what's currently in the buffer. if (_buffer.Requeue(records, _throttle.ConsecutiveErrorCount < _maxAttempts)) { _logger?.LogWarning("CloudWatchLogsSink client {0} attempt={1} exception={2}. Will retry.", this.Id, _throttle.ConsecutiveErrorCount, ex.Message); _recoverableServiceErrors++; _recordsFailedRecoverable += records.Count; break; } } _recordsFailedNonrecoverable += records.Count; _nonrecoverableServiceErrors++; throw; } catch (Exception) { _latency = Utility.GetElapsedMilliseconds() - elapsedMilliseconds; _throttle.SetError(); _recordsFailedNonrecoverable += records.Count; _nonrecoverableServiceErrors++; throw; } } } catch (Exception ex) { _logger?.LogError("CloudWatchLogsSink client {0} exception (attempt={1}): {2}", this.Id, _throttle.ConsecutiveErrorCount, ex.ToMinimized()); } this.PublishMetrics(MetricsConstants.CLOUDWATCHLOG_PREFIX); }
private AmazonWebServiceResponse PutLogEvents(PutLogEventsRequest putLogEventsRequest) { if (!_validatedGroupNames.ContainsKey(putLogEventsRequest.LogGroupName) || !_validatedStreamNames.ContainsKey(putLogEventsRequest.LogStreamName)) { lock (_lockObject) { if (!_validatedGroupNames.ContainsKey(putLogEventsRequest.LogGroupName)) { try { Client.CreateLogGroup(new CreateLogGroupRequest { LogGroupName = putLogEventsRequest.LogGroupName }); } catch (ResourceAlreadyExistsException) { } _validatedGroupNames.TryAdd(putLogEventsRequest.LogGroupName, putLogEventsRequest.LogGroupName); } if (!_validatedStreamNames.ContainsKey(putLogEventsRequest.LogStreamName)) { try { Client.CreateLogStream(new CreateLogStreamRequest { LogGroupName = putLogEventsRequest.LogGroupName, LogStreamName = putLogEventsRequest.LogStreamName }); } catch (ResourceAlreadyExistsException) { } _validatedStreamNames.TryAdd(putLogEventsRequest.LogStreamName, putLogEventsRequest.LogStreamName); } } } lock (_lockObject) { AmazonWebServiceResponse ret = null; string nextSequenceToken; var key = putLogEventsRequest.LogGroupName + "/" + putLogEventsRequest.LogStreamName; if (!_nextSequenceToken.ContainsKey(key)) { _nextSequenceToken[key] = null; } nextSequenceToken = _nextSequenceToken[key]; for (var i = 0; i < 10 && ret == null; i++) { try { try { putLogEventsRequest.SequenceToken = nextSequenceToken; var putLogEventsResponse = Client.PutLogEvents(putLogEventsRequest); _nextSequenceToken[key] = putLogEventsResponse.NextSequenceToken; ret = putLogEventsResponse; } catch (ResourceNotFoundException) { throw; } } catch (DataAlreadyAcceptedException e) { var matchCollection = Regex.Matches(e.Message, @"[0-9]{20,}"); if (matchCollection.Count > 0) { nextSequenceToken = matchCollection[0].Value; } else { nextSequenceToken = null; } } catch (InvalidSequenceTokenException e) { var matchCollection = Regex.Matches(e.Message, @"[0-9]{20,}"); if (matchCollection.Count > 0) { nextSequenceToken = matchCollection[0].Value; } else { nextSequenceToken = null; } } catch (OperationAbortedException) { LogLog.Debug(typeof(CloudWatchLogsClientWrapper), "Task lost due to conflicting operation"); } } return(ret); } }