/// <summary> /// Unmarshaller the response from the service to the response class. /// </summary> /// <param name="context"></param> /// <returns></returns> public override AmazonWebServiceResponse Unmarshall(JsonUnmarshallerContext context) { PutLogEventsResponse response = new PutLogEventsResponse(); context.Read(); int targetDepth = context.CurrentDepth; while (context.ReadAtDepth(targetDepth)) { if (context.TestExpression("nextSequenceToken", targetDepth)) { var unmarshaller = StringUnmarshaller.Instance; response.NextSequenceToken = unmarshaller.Unmarshall(context); continue; } if (context.TestExpression("rejectedLogEventsInfo", targetDepth)) { var unmarshaller = RejectedLogEventsInfoUnmarshaller.Instance; response.RejectedLogEventsInfo = unmarshaller.Unmarshall(context); continue; } } return(response); }
public async Task WriteAsync_Should_Queue_Log_Requests_And_Preserve_Their_Order() { // arrange var rand = new Random(); var clientMock = new Mock <IAmazonCloudWatchLogs>().Init(); var expectedSequence = Enumerable.Range(0, 20); List <int> actualSequence = new List <int>(), retried = new List <int>(); clientMock .Setup(m => m.PutLogEventsAsync(It.IsAny <PutLogEventsRequest>(), It.IsAny <CancellationToken>())) .Returns <PutLogEventsRequest, CancellationToken>((r, c) => { return(Task .Delay(rand.Next() % 500) // Simulates delay on the API side .ContinueWith(t => { PutLogEventsResponse result; var messageInt = int.Parse(r.LogEvents.First().Message); var shouldRetry = (messageInt % 5 == 0) && !retried.Contains(messageInt); // retry every fifth element once. if (shouldRetry) { retried.Add(messageInt); result = new PutLogEventsResponse { HttpStatusCode = HttpStatusCode.BadRequest }; } else { actualSequence.Add(messageInt); result = new PutLogEventsResponse { HttpStatusCode = HttpStatusCode.OK }; } return result; })); }); var target = new CloudWatchLogsClientWrapper( clientMock.Object, new CloudWatchLogsWrapperSettings(_logGroup, _logStream, CreateIntervalProvider()) ); // act await Task.WhenAll(expectedSequence .Select(i => target.WriteAsync(new[] { new InputLogEvent { Message = i.ToString() } })) .ToArray()); // assert Assert.True(expectedSequence.SequenceEqual(actualSequence), "Message sequence should be preserved."); Assert.True(expectedSequence.Where(i => i % 5 == 0).SequenceEqual(retried), "Expected and actual retries should match."); }
static void Main(string[] args) { var logClient = new AmazonCloudWatchLogsClient(); // Add a new log group for testing const string newLogGroupName = "NewLogGroup"; DescribeLogGroupsResponse dlgr = logClient.DescribeLogGroups(); var groups = new List <LogGroup> { }; groups = dlgr.LogGroups; LogGroup lg = new LogGroup(); lg.LogGroupName = newLogGroupName; // Look for our new log group name to determine if we need to do setup LogGroup result = groups.Find( delegate(LogGroup bk) { return(bk.LogGroupName == newLogGroupName); } ); if (result != null) { Console.WriteLine(result.LogGroupName + " found"); } else { //Haven't seen this log group, set it up CreateLogGroupRequest clgr = new CreateLogGroupRequest(newLogGroupName); logClient.CreateLogGroup(clgr); // Create a file to sace next SequenceToken in File.CreateText("..\\..\\" + lg.LogGroupName + ".txt"); CreateLogStreamRequest csr = new CreateLogStreamRequest(lg.LogGroupName, newLogGroupName); logClient.CreateLogStream(csr); } string tokenFile = ""; try { Console.WriteLine(lg.LogGroupName); //Pick up the next sequence token from the last run tokenFile = lg.LogGroupName; StreamReader sr = File.OpenText("..\\..\\" + tokenFile + ".txt"); string sequenceToken = sr.ReadLine(); sr.Close(); lg.RetentionInDays = 30; string groupName = lg.LogGroupName;; TestMetricFilterRequest tmfr = new TestMetricFilterRequest(); List <InputLogEvent> logEvents = new List <InputLogEvent>(3); InputLogEvent ile = new InputLogEvent(); ile.Message = "Test Event 1"; //DateTime dt = new DateTime(1394793518000); DateTime dt = new DateTime(2017, 01, 11); ile.Timestamp = dt; logEvents.Add(ile); ile.Message = "Test Event 2"; logEvents.Add(ile); ile.Message = "This message also contains an Error"; logEvents.Add(ile); DescribeLogStreamsRequest dlsr = new DescribeLogStreamsRequest(groupName); PutLogEventsRequest pler = new PutLogEventsRequest(groupName, tokenFile, logEvents); pler.SequenceToken = sequenceToken; //use last sequence token PutLogEventsResponse plerp = new PutLogEventsResponse(); plerp = logClient.PutLogEvents(pler); Console.WriteLine("Next sequence token = " + plerp.NextSequenceToken); FileStream fs = File.OpenWrite("..\\..\\" + tokenFile + ".txt"); fs.Position = 0; UTF8Encoding utf8 = new UTF8Encoding(); Byte[] encodedBytes = utf8.GetBytes(plerp.NextSequenceToken); fs.Write(encodedBytes, 0, utf8.GetByteCount(plerp.NextSequenceToken)); fs.Close(); List <string> lem = new List <string>(1); lem.Add("Error"); tmfr.LogEventMessages = lem; tmfr.FilterPattern = "Error"; TestMetricFilterResponse tmfrp = new TestMetricFilterResponse(); tmfrp = logClient.TestMetricFilter(tmfr); var results = new List <MetricFilterMatchRecord> { }; results = tmfrp.Matches; Console.WriteLine("Found " + results.Count.ToString() + " match records"); IEnumerator ie = results.GetEnumerator(); while (ie.MoveNext()) { MetricFilterMatchRecord mfmr = (MetricFilterMatchRecord)ie.Current; Console.WriteLine("Event Message = " + mfmr.EventMessage); } Console.WriteLine("Metric filter test done"); } catch (Exception e) { Console.WriteLine(e.Message); } }
public void CRUDLogGroup() { var logGroupName = "sdk-dotnet-" + DateTime.Now.Ticks; Client.CreateLogGroupAsync(new CreateLogGroupRequest { LogGroupName = logGroupName }).Wait(); try { { DescribeLogGroupsResponse response = Client.DescribeLogGroupsAsync(new DescribeLogGroupsRequest { LogGroupNamePrefix = logGroupName }).Result; Assert.AreEqual(1, response.LogGroups.Count); Assert.IsNotNull(response.LogGroups[0].Arn); Assert.IsNull(response.LogGroups[0].RetentionInDays); Client.PutRetentionPolicyAsync(new PutRetentionPolicyRequest { LogGroupName = logGroupName, RetentionInDays = 1 }).Wait(); response = Client.DescribeLogGroupsAsync(new DescribeLogGroupsRequest { LogGroupNamePrefix = logGroupName }).Result; Assert.AreEqual(1, response.LogGroups.Count); Assert.AreEqual(1, response.LogGroups[0].RetentionInDays.GetValueOrDefault()); } { Client.CreateLogStreamAsync(new CreateLogStreamRequest { LogGroupName = logGroupName, LogStreamName = "sample" }).Wait(); DescribeLogStreamsResponse describeResponse = Client.DescribeLogStreamsAsync(new DescribeLogStreamsRequest { LogGroupName = logGroupName, LogStreamNamePrefix = "sample" }).Result; Assert.AreEqual(1, describeResponse.LogStreams.Count); Assert.IsNotNull(describeResponse.LogStreams[0].Arn); PutLogEventsResponse putResponse1 = Client.PutLogEventsAsync(new PutLogEventsRequest { LogGroupName = logGroupName, LogStreamName = "sample", LogEvents = new List <InputLogEvent> { new InputLogEvent { Message = "First Data", Timestamp = DateTime.Now } } }).Result; // Pad the time so the 2 events are not at the same time. UtilityMethods.Sleep(TimeSpan.FromSeconds(.1)); Client.PutLogEventsAsync(new PutLogEventsRequest { SequenceToken = putResponse1.NextSequenceToken, LogGroupName = logGroupName, LogStreamName = "sample", LogEvents = new List <InputLogEvent> { new InputLogEvent { Message = "Second Data", Timestamp = DateTime.Now } } }).Wait(); GetLogEventsResponse getResponse = null; // Doing this in a loop to wait for the eventual consistency of the events // being written to cloudwatch logs. for (int i = 0; i < 20; i++) { getResponse = Client.GetLogEventsAsync(new GetLogEventsRequest { LogGroupName = logGroupName, LogStreamName = "sample", StartTime = DateTime.Now.AddDays(-2), EndTime = DateTime.Now }).Result; if (getResponse.Events.Count == 2) { break; } UtilityMethods.Sleep(TimeSpan.FromSeconds(2)); } Assert.AreEqual(2, getResponse.Events.Count); Assert.AreEqual("First Data", getResponse.Events[0].Message); Assert.AreEqual(DateTime.UtcNow.Date, getResponse.Events[0].Timestamp.Date); Assert.AreEqual("Second Data", getResponse.Events[1].Message); Assert.AreEqual(DateTime.UtcNow.Date, getResponse.Events[1].Timestamp.Date); Assert.IsTrue(getResponse.Events[0].Timestamp < getResponse.Events[1].Timestamp); Client.DeleteLogStreamAsync(new DeleteLogStreamRequest { LogGroupName = logGroupName, LogStreamName = "sample" }).Wait(); } } finally { Client.DeleteLogGroupAsync(new DeleteLogGroupRequest { LogGroupName = logGroupName }).Wait(); } }
protected override async Task OnNextAsync(List <Envelope <InputLogEvent> > records, long batchBytes) { if (records == null || records.Count == 0) { return; } try { _logger?.LogDebug($"CloudWatchLogsSink client {this.Id} sending {records.Count} records {batchBytes} bytes."); DateTime timestamp = records[0].Timestamp; string logStreamName = ResolveTimestampInLogStreamName(timestamp); if (string.IsNullOrEmpty(_sequenceToken)) { await GetSequenceTokenAsync(logStreamName); } var request = new PutLogEventsRequest { LogGroupName = _logGroupName, LogStreamName = logStreamName, SequenceToken = _sequenceToken, LogEvents = records .Select(e => e.Data) .OrderBy(e => e.Timestamp) //Added sort here in case messages are from multiple streams and they are not merged in order .ToList() }; bool attemptedCreatingLogStream = false; int invalidSequenceTokenCount = 0; while (true) { DateTime utcNow = DateTime.UtcNow; _clientLatency = (long)records.Average(r => (utcNow - r.Timestamp).TotalMilliseconds); long elapsedMilliseconds = Utility.GetElapsedMilliseconds(); try { PutLogEventsResponse response = await _client.PutLogEventsAsync(request); _latency = Utility.GetElapsedMilliseconds() - elapsedMilliseconds; _throttle.SetSuccess(); _sequenceToken = response.NextSequenceToken; RejectedLogEventsInfo rejectedLogEventsInfo = response.RejectedLogEventsInfo; _recordsAttempted += records.Count; _bytesAttempted += batchBytes; if (rejectedLogEventsInfo != null) { StringBuilder sb = new StringBuilder(); sb.Append($"CloudWatchLogsSink client {this.Id} some of the logs where rejected."); sb.Append($" ExpiredLogEventEndIndex {rejectedLogEventsInfo.ExpiredLogEventEndIndex}"); sb.Append($" TooNewLogEventStartIndex {rejectedLogEventsInfo.TooNewLogEventStartIndex}"); sb.Append($" TooOldLogEventEndIndex {rejectedLogEventsInfo.TooOldLogEventEndIndex}"); _logger?.LogError(sb.ToString()); long recordCount = records.Count - rejectedLogEventsInfo.ExpiredLogEventEndIndex - rejectedLogEventsInfo.TooOldLogEventEndIndex; if (rejectedLogEventsInfo.TooOldLogEventEndIndex > 0) { recordCount -= records.Count - rejectedLogEventsInfo.TooNewLogEventStartIndex; } _recordsSuccess += recordCount; _recordsFailedNonrecoverable += (records.Count - recordCount); } else { _recordsSuccess += records.Count; _logger?.LogDebug($"CloudWatchLogsSink client {this.Id} successfully sent {records.Count} records {batchBytes} bytes."); } this.SaveBookmarks(records); break; } catch (ResourceNotFoundException) { _latency = Utility.GetElapsedMilliseconds() - elapsedMilliseconds; _throttle.SetError(); //Logstream does not exist if (attemptedCreatingLogStream) { _nonrecoverableServiceErrors++; _recordsFailedNonrecoverable += records.Count; throw; } else { _recoverableServiceErrors++; _recordsFailedRecoverable += records.Count; await CreateLogStreamAsync(logStreamName); } } catch (AmazonCloudWatchLogsException ex) { _latency = Utility.GetElapsedMilliseconds() - elapsedMilliseconds; _throttle.SetError(); if (ex is InvalidSequenceTokenException || ex is ServiceUnavailableException) { if (ex is InvalidSequenceTokenException invalidSequenceTokenException) { invalidSequenceTokenCount++; _sequenceToken = invalidSequenceTokenException.GetExpectedSequenceToken(); //Sometimes we get a sequence token just say "null". This is obviously invalid if ("null".Equals(_sequenceToken)) { _sequenceToken = null; } else if (_sequenceToken != null && invalidSequenceTokenCount < 2) { continue; //Immediately try so that the sequence token does not become invalid again } } if (_buffer.Requeue(records, _throttle.ConsecutiveErrorCount < _maxAttempts)) { _logger?.LogWarning($"CloudWatchLogsSink client {this.Id} attempt={_throttle.ConsecutiveErrorCount} exception={ex.Message}. Will retry."); _recoverableServiceErrors++; _recordsFailedRecoverable += records.Count; break; } } _recordsFailedNonrecoverable += records.Count; _nonrecoverableServiceErrors++; throw; } catch (Exception) { _latency = Utility.GetElapsedMilliseconds() - elapsedMilliseconds; _throttle.SetError(); _recordsFailedNonrecoverable += records.Count; _nonrecoverableServiceErrors++; throw; } } } catch (Exception ex) { _logger?.LogError($"CloudWatchLogsSink client {this.Id} exception (attempt={_throttle.ConsecutiveErrorCount}): {ex.ToMinimized()}"); } PublishMetrics(MetricsConstants.CLOUDWATCHLOG_PREFIX); }
internal void ForwardLogs() { do { Action pushLogsToCloudWatchAction = () => { LogMessageDTO logMessageObj; while (LogService.taskQueue.TryDequeue(out logMessageObj)) { try { using (IAmazonCloudWatchLogs logsclient = Amazon.AWSClientFactory.CreateAmazonCloudWatchLogsClient(this.cloudWatchAccessKey, this.cloudWatchSecretKey, this.cloudWatchRegion)) { string logStreamName = string.IsNullOrWhiteSpace(logMessageObj.ApplicationAlias) ? "Custom" : logMessageObj.ApplicationAlias; // put the object into JSON format and send it to CloudWatch List <InputLogEvent> logEvents = new List <InputLogEvent>(); InputLogEvent logEntry = new InputLogEvent(); logEntry.Message = JsonConvert.SerializeObject(logMessageObj); logEntry.Timestamp = logMessageObj.Timestamp; logEvents.Add(logEntry); PutLogEventsRequest request = new PutLogEventsRequest(this.logGroupName, logStreamName, logEvents); PutLogEventsResponse response = null; for (int i = 0; i < 5; ++i) { try { lock (this.criticalSection) { // if we have a token, set it if (LogStreamTokenMap.ContainsKey(logStreamName)) { request.SequenceToken = LogStreamTokenMap[logStreamName]; } // put the logs and get the token for the next submissions of logs response = logsclient.PutLogEvents(request); var newToken = response.NextSequenceToken; if (LogStreamTokenMap.ContainsKey(logStreamName)) { LogStreamTokenMap[logStreamName] = newToken; } else { LogStreamTokenMap.Add(logStreamName, newToken); } } // if we successfully pushed the logs, exit the loop, otherwise we will exit in 5 tries break; } catch (InvalidSequenceTokenException) { // we don't have the right token for the next sequence in the stream, so get it again // in fact we will refresh all tokens for all streams var logstreamsrequest = new DescribeLogStreamsRequest(this.logGroupName); var logStreamResponse = logsclient.DescribeLogStreams(logstreamsrequest); var logstreamsList = logStreamResponse.LogStreams; lock (this.criticalSection) { foreach (var logstream in logstreamsList) { var appname = logstream.LogStreamName; var token = logstream.UploadSequenceToken; if (LogStreamTokenMap.ContainsKey(appname)) { LogStreamTokenMap[appname] = token; } else { LogStreamTokenMap.Add(appname, token); } } } } catch (ResourceNotFoundException) { // we likely introduced a new log stream that needs to be provisioned in CloudWatch // ignore exceptions in creation try { CreateLogGroupRequest logGroup = new CreateLogGroupRequest(this.logGroupName); logsclient.CreateLogGroup(logGroup); } catch (Exception) { } try { CreateLogStreamRequest logStream = new CreateLogStreamRequest(this.logGroupName, logStreamName); logsclient.CreateLogStream(logStream); lock (this.criticalSection) { LogStreamTokenMap.Remove(logStreamName); } } catch (Exception) { } } } if (response.HttpStatusCode == HttpStatusCode.OK) { ++LogService.successfulForwards; } else { ++LogService.failedForwards; } } } catch (Exception) { // can't really log this exception to avoid cyclical logging and deadlocks ++LogService.numExceptions; } } }; // Start 5 parallel tasks to consume the logs from the queue // the problem here is that the logs might not be pushed in order to CloudWatch // However, that's ok since there might be multiple instances of this service and logs could be coming // in to all instances at the same time and we might push logs to CloudWatch out of order Parallel.Invoke(pushLogsToCloudWatchAction, pushLogsToCloudWatchAction, pushLogsToCloudWatchAction, pushLogsToCloudWatchAction, pushLogsToCloudWatchAction); // sleep before getting into another iteration of this loop to look for logs Thread.Sleep(1000); } while (true); }