Exemple #1
0
 void fail(AwsHttpResult result, string err_code, string err_msg)
 {
     foreach (var kr in result.context <PutRecordsRequest>().Items())
     {
         fail(kr, result.start_time(), result.end_time(), err_code, err_msg);
     }
 }
Exemple #2
0
        void handle_result(AwsHttpResult result)
        {
            bool   retry   = false;
            bool   failed  = false;
            string err_msg = "";

            if (!result.successful())
            {
                failed  = true;
                err_msg = result.error();
                retry   = true;
            }
            else if (result.status_code() != 200)
            {
                failed  = true;
                err_msg = result.response_body();
                retry   = result.status_code() >= 500;
            }

            if (failed)
            {
                StdErrorOut.Instance.StdError(string.Format("Metrics upload failed: \n{0}\nRequest was: \n {1}", err_msg, result.context <string>()));
            }

            if (retry)
            {
                lock (mutex_)
                    retryable_requests.Add(result.context <string>());
            }
        }
Exemple #3
0
 void retry_not_expired(AwsHttpResult result)
 {
     retry_not_expired
     (
         result,
         result.successful() ? result.status_code().ToString() : "Exception",
         result.successful() ? result.response_body().Substring(0, 4096) : result.error()
     );
 }
Exemple #4
0
        void emit_metrics(AwsHttpResult result)
        {
            MetricsPutter     metrics_putter = new MetricsPutter(metrics_manager_, result);
            PutRecordsRequest prr            = result.context <PutRecordsRequest>();

            double num_urs = 0;

            foreach (var kr in prr.Items())
            {
                metrics_putter.put(Names.UserRecordsPerKinesisRecord, kr.Items().Count, (ulong)kr.Items()[kr.Items().Count - 1].Predicted_shard());
                num_urs += kr.Items().Count;
            }

            metrics_putter.put
                (Names.RequestTime, result.duration_millis()).put
                (Names.KinesisRecordsPerPutRecordsRequest, prr.Items().Count).put
                (Names.UserRecordsPerPutRecordsRequest, num_urs);

            string err_code = null;

            if (result.successful())
            {
                var status_code = result.status_code();
                if (status_code != 200)
                {
                    // TODO parse the json (if any) to get the error code
                    err_code = "Http" + status_code;
                }
            }
            else
            {
                err_code = result.error().Substring(0, 255);
            }

            if (err_code != null)
            {
                metrics_putter.put
                    (Names.ErrorsByCode, 1, 0, err_code).put
                    (Names.AllErrors, 1);
            }
        }
Exemple #5
0
        void handle_put_records_result(AwsHttpResult result)
        {
            emit_metrics(result);

            try
            {
                if (result.successful())
                {
                    var status_code = result.status_code();
                    if (status_code == 200)
                    {
                        on_200(result);
                    }
                    else if (status_code >= 500 && status_code < 600)
                    {
                        retry_not_expired(result);
                    }
                    else
                    {
                        // For PutRecords, errors that apply to individual kinesis records
                        // (like throttling, too big or bad format) come back in code 200s.
                        // This is different from plain old PutRecord, where those come back
                        // with code 400. As such, all the errors we want to retry on are
                        // handled in the 200 case. All 400 codes are therefore not retryable.
                        StdErrorOut.Instance.StdError(string.Format("PutRecords failed: {0}", result.response_body()));
                        fail(result);
                    }
                }
                else
                {
                    retry_not_expired(result);
                }
            }
            catch (Exception ex)
            {
                StdErrorOut.Instance.StdError(string.Format("Unexpected error encountered processing http result: {0}", ex.ToString()));
                fail(result, "Unexpected Error", ex.ToString());
            }
        }
Exemple #6
0
 void fail(AwsHttpResult result)
 {
     fail(result,
          result.successful() ? result.status_code().ToString() : "Exception",
          result.successful() ? result.response_body().Substring(0, 4096) : result.error());
 }
Exemple #7
0
        void on_200(AwsHttpResult result)
        {
            dynamic        json           = System.Web.Helpers.Json.Decode(result.response_body());
            List <dynamic> records        = json["Records"];
            var            prr            = result.context <PutRecordsRequest>();
            MetricsPutter  metrics_putter = new MetricsPutter(metrics_manager_, result);

            // If somehow there's a size mismatch, subsequent code may crash from
            // array out of bounds, so we're going to explicitly catch it here and
            // print a nicer message. Also, if there's a size mismatch, we can no longer
            // be sure which result is for which record, so we better fail all of them.
            // None of this is expected to happen if the backend behaves correctly,
            // but if it does happen, this will make it easier to identify the problem.
            if (records.Count != (int)prr.Size())
            {
                string ss = "Count of records in PutRecords response differs from the number " + "sent: " + records.Count + " received, but " + prr.Size() + " were sent.";
                StdErrorOut.Instance.StdError(ss);
                fail(result, "Record Count Mismatch", ss);
                return;
            }

            for (int i = 0; i < (int)prr.Size(); i++)
            {
                var  record  = records[i];
                var  kr      = prr.Items()[i];
                bool success = record["SequenceNumber"];
                var  start   = result.start_time();
                var  end     = result.end_time();

                var shard_id = kr.Items()[0].Predicted_shard();
                if (success)
                {
                    metrics_putter.put
                        (Names.KinesisRecordsPut, 1, (ulong)shard_id).put
                        (Names.KinesisRecordsDataPut, kr.accurate_size(), (ulong)shard_id).put
                        (Names.AllErrors, 0, (ulong)shard_id);
                }
                else
                {
                    metrics_putter.put
                        (Names.KinesisRecordsPut, 0, (ulong)shard_id).put
                        (Names.ErrorsByCode, 1, (ulong)shard_id, record["ErrorCode"].ToString()).put
                        (Names.AllErrors, 1, shard_id);
                }

                if (success)
                {
                    foreach (var ur in kr.Items())
                    {
                        succeed_if_correct_shard(ur, start, end, record["ShardId"], record["SequenceNumber"]);
                    }
                }
                else
                {
                    string err_code = record["ErrorCode"];
                    string err_msg  = record["ErrorMessage"];

                    bool can_retry = (!config_.failIfThrottled && err_code == "ProvisionedThroughputExceededException") || (err_code == "InternalFailure") || (err_code == "ServiceUnavailable");

                    if (can_retry)
                    {
                        retry_not_expired(kr, start, end, err_code, err_msg);
                    }
                    else
                    {
                        fail(kr, start, end, err_code, err_msg);
                    }
                }
            }
        }
Exemple #8
0
 public void put(AwsHttpResult result)
 {
     handle_put_records_result(result);
 }
Exemple #9
0
 public MetricsPutter(MetricsManager metrics_manager, AwsHttpResult result)
 {
     this.metrics_manager = metrics_manager;
     stream = result.context <PutRecordsRequest>().stream();
 }
Exemple #10
0
 private void retrier_put(AwsHttpResult result)
 {
     StdErrorOut.Instance.StdOut(LogLevel.debug, "Pipeline.retrier_put");
     this.executor_.Submit((Action)(() => { try { this.retrier_.put(result); } catch (Exception e) { StdErrorOut.Instance.StdError("pipeline retrier_put_kr failed", e); } }));
 }
Exemple #11
0
        void update_callback(AwsHttpResult result)
        {
            if (!result.successful())
            {
                update_fail(result.error());
                return;
            }

            if (result.status_code() != 200)
            {
                update_fail(result.response_body());
                return;
            }

            try
            {
                dynamic json = System.Web.Helpers.Json.Decode(result.response_body());
                var     stream_description = json["StreamDescription"];

                string stream_status = stream_description["StreamStatus"];
                if (stream_status != "ACTIVE" && stream_status != "UPDATING")
                {
                    string ss = "Stream status is " + stream_status;
                    throw new Exception(ss);
                }

                List <dynamic> shards = stream_description["Shards"];
                for (int i = 0; i < shards.Count; i++)
                {
                    // Check if the shard is closed, if so, do not use it.
                    if (shards[i]["SequenceNumberRange"]["EndingSequenceNumber"])
                    {
                        continue;
                    }

                    end_hash_key_to_shard_id.Add(new KeyValuePair <BigInteger, ulong>(BigInteger.Parse(shards[i]["HashKeyRange"]["EndingHashKey"]), shard_id_from_str(shards[i]["ShardId"])));
                }

                backoff = min_backoff;

                if (stream_description["HasMoreShards"])
                {
                    update(shards[shards.Count - 1]["ShardId"]);
                    return;
                }

                end_hash_key_to_shard_id.Sort();

                lock (mutex_)
                {
                    state      = State.READY;
                    updated_at = DateTime.Now;
                }

                StdErrorOut.Instance.StdOut(LogLevel.info, string.Format("Successfully updated shard map for stream \"{0}\" found {1} shards", stream, end_hash_key_to_shard_id.Count));
            }
            catch (Exception ex)
            {
                update_fail(ex.ToString());
            }
        }