void handle_result(AwsHttpResult result) { bool retry = false; bool failed = false; string err_msg = ""; if (!result.successful()) { failed = true; err_msg = result.error(); retry = true; } else if (result.status_code() != 200) { failed = true; err_msg = result.response_body(); retry = result.status_code() >= 500; } if (failed) { StdErrorOut.Instance.StdError(string.Format("Metrics upload failed: \n{0}\nRequest was: \n {1}", err_msg, result.context <string>())); } if (retry) { lock (mutex_) retryable_requests.Add(result.context <string>()); } }
void fail(AwsHttpResult result, string err_code, string err_msg) { foreach (var kr in result.context <PutRecordsRequest>().Items()) { fail(kr, result.start_time(), result.end_time(), err_code, err_msg); } }
void emit_metrics(AwsHttpResult result) { MetricsPutter metrics_putter = new MetricsPutter(metrics_manager_, result); PutRecordsRequest prr = result.context <PutRecordsRequest>(); double num_urs = 0; foreach (var kr in prr.Items()) { metrics_putter.put(Names.UserRecordsPerKinesisRecord, kr.Items().Count, (ulong)kr.Items()[kr.Items().Count - 1].Predicted_shard()); num_urs += kr.Items().Count; } metrics_putter.put (Names.RequestTime, result.duration_millis()).put (Names.KinesisRecordsPerPutRecordsRequest, prr.Items().Count).put (Names.UserRecordsPerPutRecordsRequest, num_urs); string err_code = null; if (result.successful()) { var status_code = result.status_code(); if (status_code != 200) { // TODO parse the json (if any) to get the error code err_code = "Http" + status_code; } } else { err_code = result.error().Substring(0, 255); } if (err_code != null) { metrics_putter.put (Names.ErrorsByCode, 1, 0, err_code).put (Names.AllErrors, 1); } }
void on_200(AwsHttpResult result) { dynamic json = System.Web.Helpers.Json.Decode(result.response_body()); List <dynamic> records = json["Records"]; var prr = result.context <PutRecordsRequest>(); MetricsPutter metrics_putter = new MetricsPutter(metrics_manager_, result); // If somehow there's a size mismatch, subsequent code may crash from // array out of bounds, so we're going to explicitly catch it here and // print a nicer message. Also, if there's a size mismatch, we can no longer // be sure which result is for which record, so we better fail all of them. // None of this is expected to happen if the backend behaves correctly, // but if it does happen, this will make it easier to identify the problem. if (records.Count != (int)prr.Size()) { string ss = "Count of records in PutRecords response differs from the number " + "sent: " + records.Count + " received, but " + prr.Size() + " were sent."; StdErrorOut.Instance.StdError(ss); fail(result, "Record Count Mismatch", ss); return; } for (int i = 0; i < (int)prr.Size(); i++) { var record = records[i]; var kr = prr.Items()[i]; bool success = record["SequenceNumber"]; var start = result.start_time(); var end = result.end_time(); var shard_id = kr.Items()[0].Predicted_shard(); if (success) { metrics_putter.put (Names.KinesisRecordsPut, 1, (ulong)shard_id).put (Names.KinesisRecordsDataPut, kr.accurate_size(), (ulong)shard_id).put (Names.AllErrors, 0, (ulong)shard_id); } else { metrics_putter.put (Names.KinesisRecordsPut, 0, (ulong)shard_id).put (Names.ErrorsByCode, 1, (ulong)shard_id, record["ErrorCode"].ToString()).put (Names.AllErrors, 1, shard_id); } if (success) { foreach (var ur in kr.Items()) { succeed_if_correct_shard(ur, start, end, record["ShardId"], record["SequenceNumber"]); } } else { string err_code = record["ErrorCode"]; string err_msg = record["ErrorMessage"]; bool can_retry = (!config_.failIfThrottled && err_code == "ProvisionedThroughputExceededException") || (err_code == "InternalFailure") || (err_code == "ServiceUnavailable"); if (can_retry) { retry_not_expired(kr, start, end, err_code, err_msg); } else { fail(kr, start, end, err_code, err_msg); } } } }
public MetricsPutter(MetricsManager metrics_manager, AwsHttpResult result) { this.metrics_manager = metrics_manager; stream = result.context <PutRecordsRequest>().stream(); }