/// <summary> /// Unmarshaller the response from the service to the response class. /// </summary> /// <param name="context"></param> /// <returns></returns> public override AmazonWebServiceResponse Unmarshall(JsonUnmarshallerContext context) { PutRecordBatchResponse response = new PutRecordBatchResponse(); context.Read(); int targetDepth = context.CurrentDepth; while (context.ReadAtDepth(targetDepth)) { if (context.TestExpression("FailedPutCount", targetDepth)) { var unmarshaller = IntUnmarshaller.Instance; response.FailedPutCount = unmarshaller.Unmarshall(context); continue; } if (context.TestExpression("RequestResponses", targetDepth)) { var unmarshaller = new ListUnmarshaller <PutRecordBatchResponseEntry, PutRecordBatchResponseEntryUnmarshaller>(PutRecordBatchResponseEntryUnmarshaller.Instance); response.RequestResponses = unmarshaller.Unmarshall(context); continue; } } return(response); }
private async Task RetryFailedRecords(PutRecordBatchResponse response, List <Record> originalRecords) { var failedRecords = new List <Record>(); for (int i = 0; i < originalRecords.Count; i++) { var recordResponse = response.RequestResponses[i]; if (!string.IsNullOrEmpty(recordResponse?.ErrorCode)) { var originalRecord = originalRecords[i]; failedRecords.Add(originalRecord); } } if (failedRecords.Count > 0 && _failedRecordsRetriesCount < FailedRecordsMaxRetries) { _failedRecordsRetriesCount++; Thread.Sleep(TimeSpan.FromSeconds(5)); _logger.LogInfo($"Retrying {failedRecords.Count} failed record(s)."); await AttemptPutRecords(failedRecords); } else if (failedRecords.Count > 0) { foreach (var record in failedRecords) { _logger.LogInfo(string.Format("Not able to put a record: {0}", new StreamReader(record.Data).ReadToEnd())); } } }
private async Task <bool> TryPublishRecordsAsync( IAmazonKinesisFirehose firehose, List <Record> records ) { m_metrics.IncrementPutRecordBatchRequests(); try { int recordCount = records.Count; var batchRequest = new PutRecordBatchRequest { DeliveryStreamName = m_args.DeliveryStreamName, Records = records }; PutRecordBatchResponse response = await firehose .PutRecordBatchAsync(batchRequest); LogEventInfo log = new LogEventInfo { Properties = { { "Response", response } } }; int failedPutCount = response.FailedPutCount; if (failedPutCount == recordCount) { log.Level = LogLevel.Error; log.Message = "Failed to put all records in batch."; m_metrics.IncrementRecordsFailed(failedPutCount); } else if (failedPutCount > 0) { log.Level = LogLevel.Warn; log.Message = "Failed to put some records in batch."; m_metrics.IncrementRecordsPut(recordCount - failedPutCount); m_metrics.IncrementRecordsFailed(failedPutCount); } else { log.Level = LogLevel.Debug; log.Message = "Successfully put all records."; m_metrics.IncrementRecordsPut(recordCount); } m_log.Log(log); return(true); } catch (Exception err) { m_log.Error(err, "Failed to put record batch."); m_metrics.IncrementPutRecordBatchErrors(); return(false); } }
public async Task <PutRecordBatchResponse> AttemptPutRecords(List <Record> kinesisRecords) { var retryPolicy = CreateExceptionRetryPolicy(); PutRecordBatchResponse response = null; await retryPolicy.ExecuteAsync(async() => { response = await _kinesisFirehoseClient.PutRecordBatchAsync(DeliveryStreamName, kinesisRecords); if (response.FailedPutCount > 0) { await RetryFailedRecords(response, kinesisRecords); } }); return(response); }
public async Task <bool> RecordAsync <T>(IEnumerable <T> events, string streamName) { if (!events.Any()) { return(false); } var records = new List <Record>(); foreach (T obj in events) { string objAsJson = _serializer.Serialize(events); byte[] objAsBytes = Encoding.UTF8.GetBytes(objAsJson + "\n"); using MemoryStream ms = new MemoryStream(objAsBytes); Record record = new Record { Data = ms }; records.Add(record); } var request = new PutRecordBatchRequest { DeliveryStreamName = streamName, Records = records }; PutRecordBatchResponse response = await _client.PutRecordBatchAsync(request); if (response.HttpStatusCode != HttpStatusCode.OK) { throw new System.Exception($"Error sending message. HttpStatusCode: {response.HttpStatusCode}"); } return(true); }
protected override async Task OnNextAsync(List <Envelope <Record> > envelopes, long batchBytes) { _logger?.LogDebug($"KinesisFirehoseSink {this.Id} sending {envelopes.Count} records {batchBytes} bytes."); DateTime utcNow = DateTime.UtcNow; _clientLatency = (long)envelopes.Average(r => (utcNow - r.Timestamp).TotalMilliseconds); long elapsedMilliseconds = Utility.GetElapsedMilliseconds(); try { _recordsAttempted += envelopes.Count; _bytesAttempted += batchBytes; List <Record> records = envelopes.Select(r => r.Data).ToList(); if (this.CanCombineRecords) { records = CombineRecords(records); } PutRecordBatchResponse response = await _firehoseClient.PutRecordBatchAsync(_deliveryStreamName, records); _latency = Utility.GetElapsedMilliseconds() - elapsedMilliseconds; if (response.FailedPutCount > 0 && response.RequestResponses != null) { _throttle.SetError(); _recoverableServiceErrors++; _recordsSuccess += envelopes.Count - response.FailedPutCount; _logger?.LogError($"KinesisFirehoseSink client {this.Id} BatchRecordCount={envelopes.Count} FailedPutCount={response.FailedPutCount} Attempt={_throttle.ConsecutiveErrorCount}"); List <Envelope <Record> > requeueRecords = new List <Envelope <Record> >(); for (int i = 0; i < response.RequestResponses.Count; i++) { var reqResponse = response.RequestResponses[i]; if (!string.IsNullOrEmpty(reqResponse.ErrorCode)) { requeueRecords.Add(envelopes[i]); //When there is error, reqResponse.RecordId would be null. So we have to use the sequence number within the batch here. if (_throttle.ConsecutiveErrorCount >= _maxAttempts) { _logger?.LogDebug($"Record {i} error {reqResponse.ErrorCode}: {reqResponse.ErrorMessage}"); } } } if (_buffer.Requeue(requeueRecords, _throttle.ConsecutiveErrorCount < _maxAttempts)) { _recordsFailedRecoverable += response.FailedPutCount; } else { _recordsFailedNonrecoverable += response.FailedPutCount; throw new AmazonKinesisFirehoseException($"Messages discarded after {_throttle.ConsecutiveErrorCount} attempts."); } } else { _throttle.SetSuccess(); _recordsSuccess += envelopes.Count; _logger?.LogDebug($"KinesisFirehoseSink {this.Id} successfully sent {envelopes.Count} records {batchBytes} bytes."); this.SaveBookmarks(envelopes); } } catch (Exception ex) { _latency = Utility.GetElapsedMilliseconds() - elapsedMilliseconds; _throttle.SetError(); if (IsRecoverableException(ex) && _buffer.Requeue(envelopes, _throttle.ConsecutiveErrorCount < _maxAttempts)) { _recoverableServiceErrors++; _recordsFailedRecoverable += envelopes.Count; if (LogThrottler.ShouldWrite(LogThrottler.CreateLogTypeId(this.GetType().FullName, "OnNextAsync", "Requeued", this.Id), TimeSpan.FromMinutes(5))) { _logger?.LogWarning($"KinesisFirehoseSink {this.Id} requeued request after exception (attempt {_throttle.ConsecutiveErrorCount}): {ex.ToMinimized()}"); } } else { _nonrecoverableServiceErrors++; _recordsFailedNonrecoverable += envelopes.Count; _logger?.LogError($"KinesisFirehoseSink {this.Id} client exception after {_throttle.ConsecutiveErrorCount} attempts: {ex.ToMinimized()}"); } } PublishMetrics(MetricsConstants.KINESIS_FIREHOSE_PREFIX); }