void fail(KinesisRecord kr, DateTime start, DateTime end, string err_code, string err_msg) { foreach (var ur in kr.Items()) { fail(ur, start, end, err_code, err_msg); } }
public PutRecordsRequest put(KinesisRecord kr) { var prr = reducer_.add(kr) as PutRecordsRequest; decrease_buffered_data(prr); return(prr); }
public KinesisRecord put(UserRecord ur) { // If shard map is not available, or aggregation is disabled, just send the record by itself, and do not attempt to aggrgegate. long shard_id = -1; BigInteger hk = 0; StdErrorOut.Instance.StdOut(LogLevel.debug, "Aggregator.put -> Called"); if (config_.aggregationEnabled && shard_map_ != null) { hk = ur.Hash_key(); shard_id = shard_map_.Shard_id(hk); StdErrorOut.Instance.StdOut(LogLevel.debug, "hk = " + hk + " shard_id= " + shard_id); } if (-1 == shard_id) { StdErrorOut.Instance.StdOut(LogLevel.debug, "if (-1 == shard_id)"); var kr = new KinesisRecord(); kr.add(ur); return(kr); } else { ur.Predicted_shard(shard_id); return(GetReducer(shard_id).add(ur) as KinesisRecord); } }
void retry_not_expired(KinesisRecord kr, DateTime start, DateTime end, string err_code, string err_msg) { foreach (var ur in kr.Items()) { retry_not_expired(ur, start, end, err_code, err_msg); } }
public void collector_put(KinesisRecord kr) { StdErrorOut.Instance.StdOut(LogLevel.debug, "Pipeline.collector_put"); PutRecordsRequest prr = this.collector_.put(kr); if (null != prr) { this.send_put_records_request(prr); } }
private void aggregator_put(UserRecord ur) { StdErrorOut.Instance.StdOut(LogLevel.debug, "Pipeline.aggregator_put. aggregator_ NULL = " + (this.aggregator_ == null ? "true" : "false")); KinesisRecord kr = this.aggregator_.put(ur); if (kr != null) { this.limiter_put(kr); } }
// We don't want any individual shard to accumulate too much data // because that makes traffic to that shard bursty, and might cause // throttling, so we flush whenever a shard reaches a certain limit. private bool should_flush(KinesisRecord kr) { var shard_id = kr.Items()[0].Predicted_shard(); if (shard_id != -1 && buffered_data_.Keys.Contains(shard_id)) { var d = (buffered_data_[shard_id] += kr.accurate_size()); if (d >= 256 * 1024) { return(true); } } return(false); }
public void put(KinesisRecord kr) { var shard_id = kr.Items()[kr.Items().Count - 1].Predicted_shard(); // Limiter doesn't work if we don't know which shard the record is going to if (shard_id == -1) { callback_(kr); } else { GetShardLimiter(shard_id).put(kr, callback_, expired_callback_); } }
public void put(KinesisRecord kr, string err_code, string err_msg) { var now = DateTime.Now; retry_not_expired(kr, now, now, err_code, err_msg); }
private void limiter_put(KinesisRecord kr) { StdErrorOut.Instance.StdOut(LogLevel.debug, "Pipeline.limiter_put"); this.limiter_.put(kr); }
private void retrier_put_kr(KinesisRecord kr) { StdErrorOut.Instance.StdOut(LogLevel.debug, "Pipeline.retrier_put_kr"); this.executor_.Submit((Action)(() => { try { this.retrier_.put(kr, "Expired", "Expiration reached while waiting in limiter"); } catch (Exception e) { StdErrorOut.Instance.StdError("pipeline retrier_put_kr failed", e); } })); }
public void put(KinesisRecord incoming, Callback callback, Callback expired_callback) { queue_.Enqueue(incoming); drain(callback, expired_callback); }