private IEnumerable <DynamicTableEntity> PreprocessEntities(IEnumerable <DynamicTableEntity> entities, ShardKeyArrived shardKeyArrived, string shardKeyTime) { var minDateTime = DateTimeOffset.MaxValue; var n = 0; foreach (var entity in entities) { var eventDateTimeOffset = entity.GetEventDateTimeOffset(); var delayInSeconds = entity.Timestamp.Subtract(eventDateTimeOffset).TotalSeconds; if (delayInSeconds >= _shardKeyDelayWarning) { TheTrace.TraceWarning( "SHARD_KEY_ACTOR_DELAY_DETECTED => Delay of {0} seconds for {1} in shardKey {2} and time {3}", delayInSeconds, shardKeyArrived.Source.TypeName, shardKeyArrived.ShardKey, shardKeyTime); } entity.Timestamp = eventDateTimeOffset; yield return(entity); minDateTime = minDateTime > entity.Timestamp ? entity.Timestamp : minDateTime; n++; } TheTrace.TraceInformation("Gathered {0} records for {1} and ShardKey {2} => {1}_{2} {1}_{3}", n, shardKeyArrived.Source.TypeName, shardKeyArrived.ShardKey, shardKeyTime); if (n > 0) { _telemetryProvider.WriteTelemetry( "ShardKeyArrivedActor log delay duration", (long)(DateTimeOffset.UtcNow - minDateTime).TotalMilliseconds, shardKeyArrived.Source.TypeName); } }
/// <summary> /// Note: RunAsync and Start ping-pong between each other. /// </summary> /// <returns></returns> private async Task RunAsync() { bool result = false; try { _cancellationTokenSource = new CancellationTokenSource(); result = await _work(_cancellationTokenSource.Token); } catch (Exception exception) { TheTrace.TraceWarning(exception.ToString()); } if (result) { _interval.Reset(); } else { await Task.Delay(_interval.Next()); } if (_isWorking) { Start(); } }
public async Task <PollerResult <Event> > NextAsync(QueueName name) { try { BrokeredMessage message = null; if (name.IsSimpleQueue) { var client = _clientProvider.GetQueueClient(name); message = await client.ReceiveAsync(_longPollingTimeout); } else { var client = _clientProvider.GetSubscriptionClient(name); message = await client.ReceiveAsync(_longPollingTimeout); } return(new PollerResult <Event>(message != null, message == null ? null : message.ToEvent(name) )); } catch (Exception e) { TheTrace.TraceWarning(e.ToString()); return(new PollerResult <Event>(false, null)); } }
private async Task HeartBeatSend(CancellationToken c) { if (_role != Role.Leader) { return; } if (_lastHeartbeatSent.Since() < _settings.ElectionTimeoutMin.Multiply(0.2)) { return; } var currentTerm = State.CurrentTerm; // create a var. Could change during the method leading to confusing logs. var req = new AppendEntriesRequest() { CurrentTerm = currentTerm, Entries = new byte[0][], LeaderCommitIndex = _volatileState.CommitIndex, LeaderId = State.Id, PreviousLogIndex = long.MaxValue, PreviousLogTerm = long.MaxValue }; var peers = _peerManager.GetPeers().ToArray(); var proxies = peers.Select(x => _peerManager.GetProxy(x.Address)); var retry = TheTrace.LogPolicy(_meAsAPeer.ShortName).RetryForeverAsync(); var policy = Policy.TimeoutAsync(_settings.ElectionTimeoutMin.Multiply(0.2)).WrapAsync(retry); var all = await Task.WhenAll(proxies.Select(p => policy.ExecuteAndCaptureAsync(() => p.AppendEntriesAsync(req)))); var maxTerm = currentTerm; foreach (var r in all) { if (r.Outcome == OutcomeType.Successful) { if (!r.Result.IsSuccess) { TheTrace.TraceWarning($"[{_meAsAPeer.ShortName}] Got this reason for unsuccessful AppendEntriesAsync from a peer: {r.Result.Reason}"); } // NOTE: We do NOT change leadership if they send higher term, since they could be candidates whom will not become leaders // we actually do not need to do anything with the result other than logging it if (r.Result.CurrentTerm > maxTerm) { maxTerm = r.Result.CurrentTerm; } } } if (maxTerm > State.CurrentTerm) { TheTrace.TraceWarning($"[{_meAsAPeer.ShortName}] Revolution brewing. Terms as high as {maxTerm} (vs my {currentTerm}) were seen."); } _lastHeartbeatSent.Set(); }
public async Task <IEnumerable <Event> > ProcessAsync(Event evnt) { var shardKeyArrived = evnt.GetBody <ShardKeyArrived>(); _telemetryProvider.WriteTelemetry( "ShardKey receive message delay duration", (long)(DateTime.UtcNow - evnt.Timestamp).TotalMilliseconds, shardKeyArrived.Source.TypeName); await _durationInstrumentor.InstrumentAsync(async() => { TheTrace.TraceInformation("Got {0} from {1}", shardKeyArrived.ShardKey, shardKeyArrived.Source.TypeName); var shardKeyQuerier = (string)shardKeyArrived.Source.GetDynamicProperty(ConveyorBeltConstants.ShardKeyQuery); var query = FactoryHelper.Create <IShardKeyQuery>(shardKeyQuerier, typeof(TableStorageShardKeyQuery)); var entities = await query.QueryAsync(shardKeyArrived); var minDateTime = DateTimeOffset.MaxValue; var hasAnything = false; int n = 0; var shardKeyTime = shardKeyArrived.GetDateTimeOffset().ToString("yyyyMMddHHmm"); foreach (var entity in entities) { var eventDateTimeOffset = entity.GetEventDateTimeOffset(); var delayInSeconds = entity.Timestamp.Subtract(eventDateTimeOffset).TotalSeconds; if (delayInSeconds >= _shardKeyDelayWarning) { TheTrace.TraceWarning("SHARD_KEY_ACTOR_DELAY_DETECTED => Delay of {0} seconds for {1} in shardKey {2} and time {3}", delayInSeconds, shardKeyArrived.Source.TypeName, shardKeyArrived.ShardKey, shardKeyTime); } entity.Timestamp = eventDateTimeOffset; await _pusher.PushAsync(entity, shardKeyArrived.Source); hasAnything = true; minDateTime = minDateTime > entity.Timestamp ? entity.Timestamp : minDateTime; n++; } TheTrace.TraceInformation("Gathered {0} records for {1} and ShardKey {2} => {1}_{2} {1}_{3}", n, shardKeyArrived.Source.TypeName, shardKeyArrived.ShardKey, shardKeyTime); if (hasAnything) { await _pusher.FlushAsync(); _telemetryProvider.WriteTelemetry( "ShardKeyArrivedActor log delay duration", (long)(DateTimeOffset.UtcNow - minDateTime).TotalMilliseconds, shardKeyArrived.Source.TypeName); } }); return(Enumerable.Empty <Event>()); }
/// <inheritdocs/> public void CleanSnapshots() { try { foreach (var f in _snapMgr.GetPreviousSnapshots()) { File.Delete(f); } } catch (Exception e) { TheTrace.TraceWarning(e.ToString()); } }
private void BuildConnection() { lock (_padLock) { if (_connection != null && _connection.IsOpen) { return; } var newStats = new SortedDictionary <FactoryWrapperScore, IConnectionFactoryWrapper>(); bool gotConnection = false; foreach (var item in _stats) { try { if (!gotConnection) { _connection = item.Key.FactoryWrapper.CreateConnection(); if (!_connection.IsOpen) { throw new ConnectionNotOpenException(); } _connection.ConnectionShutdown += _connection_ConnectionShutdown; gotConnection = true; } } catch (SocketException socketException) { TheTrace.TraceWarning(socketException.ToString()); item.Key.ErrorCount++; } catch (BrokerUnreachableException brokerUnreachableException) { TheTrace.TraceWarning(brokerUnreachableException.ToString()); item.Key.ErrorCount++; } catch (ConnectionNotOpenException) { TheTrace.TraceWarning("Connection not open"); item.Key.ErrorCount++; } newStats.Add(item.Key, item.Value); } _stats = newStats; } }
private Func <CancellationToken, Task> PeerAppendLog(Peer peer) { return((CancellationToken c) => { long nextIndex; long matchIndex; var hasMatch = _volatileLeaderState.TryGetMatchIndex(peer.Id, out matchIndex); var hasNext = _volatileLeaderState.TryGetNextIndex(peer.Id, out nextIndex); var myLastIndex = _logPersister.LastIndex; if (!hasMatch) { TheTrace.TraceWarning($"[{_meAsAPeer.ShortName}] Could not find peer with id {peer.Id} and address {peer.Address} in matchIndex dic."); return Task.CompletedTask; } if (!hasNext) { TheTrace.TraceWarning($"[{_meAsAPeer.ShortName}] Could not find peer with id {peer.Id} and address {peer.Address} in nextIndex dic."); return Task.CompletedTask; } if (nextIndex > myLastIndex) { TheTrace.TraceVerbose($"[{_meAsAPeer.ShortName}] PeerAppendLog - Nothing to do for peer {peer.ShortName} since myIndex is {myLastIndex} and nextIndex is {nextIndex}."); return Task.CompletedTask; // nothing to do } var count = (int)Math.Min(_settings.MaxNumberLogEntriesToAskToBeAppended, myLastIndex + 1 - nextIndex); var proxy = _peerManager.GetProxy(peer.Address); var retry = TheTrace.LogPolicy(_meAsAPeer.ShortName).WaitAndRetryAsync(2, (i) => TimeSpan.FromMilliseconds(20)); var policy = Policy.TimeoutAsync(_settings.CandidacyTimeout).WrapAsync(retry); // TODO: create its own timeout if (nextIndex >= _logPersister.LogOffset) { TheTrace.TraceVerbose($"[{_meAsAPeer.ShortName}] Intending to do SendLog for peer {peer.Address} with nextIndex {nextIndex} and count {count}."); return SendLogs(proxy, policy, peer, nextIndex, matchIndex, count); } else { TheTrace.TraceVerbose($"[{_meAsAPeer.ShortName}] Intending to do SendSnapshot for peer {peer.Address}."); return SendSnapshot(proxy, policy, peer, nextIndex, matchIndex); } }); }
public static Task SafeObserve(this Task task) { return(task.ContinueWith(t => { try { var exception = t.Exception; if (exception != null) { TheTrace.TraceWarning("SafeObserve: " + exception.ToString()); // probably wil never run } } catch (Exception e) { TheTrace.TraceWarning("SafeObserve: " + e.ToString()); } })); }
private async Task <CloudBlockBlob> GetBlobAsync(string key) { var blob = _containerReference.GetBlockBlobReference(_source.Path.TrimEnd('/') + "/" + key); if (!(await blob.ExistsAsync())) { try { await blob.UploadFromStreamAsync(new MemoryStream()); } catch (Exception exception) // someone else created it in the meanwhile { TheTrace.TraceWarning("someone else created {0} meanwhile: {1}", key, exception.ToString()); } } return(blob); }
private async Task <string> GetIndexSettings() { const string defaultSettingsJsonFileName = "__index_settings"; var settingsJson = _configurationValueProvider.GetValue(ConfigurationKeys.EsIndexCreationJsonFileName); if (string.IsNullOrEmpty(settingsJson)) { settingsJson = defaultSettingsJsonFileName; } var mappingsPath = _configurationValueProvider.GetValue(ConfigurationKeys.MappingsPath); var jsonPath = $"{mappingsPath}{settingsJson}.json"; if (string.IsNullOrEmpty(mappingsPath)) // not defined return default { return(string.Empty); } var response = await _nonAuthenticatingClient.GetAsync(jsonPath); if (response.StatusCode == HttpStatusCode.NotFound) { TheTrace.TraceWarning("Could not find the index settings file: {0}", jsonPath); return(string.Empty); } if (response.Content == null) { throw new ApplicationException(response.ToString()); } var content = await response.Content.ReadAsStringAsync(); if (!response.IsSuccessStatusCode) { throw new InvalidOperationException(content); } TheTrace.TraceInformation("This is the index settings JSON: {0}", content); return(content); }
private async Task <int> PushAllImpl(IEnumerable <IDictionary <string, string> > lazyEnumerable, string mappingName) { var seenPages = 0; var tcs = new TaskCompletionSource <int>(); var observableBulk = _client.BulkAll(lazyEnumerable, bulkDescriptor => { bulkDescriptor .BufferToBulk((x, batch) => x.IndexMany(batch, (bd, d) => bd .Id(d["PartitionKey"] + d["RowKey"])) .Index(_indexNamer.BuildName(batch[0]["@timestamp"], mappingName) )) .Type(mappingName); if (_setPipeline) { bulkDescriptor.Pipeline(mappingName.ToLower()); } return(bulkDescriptor .MaxDegreeOfParallelism(5) .Size(_batchSize)); }); var observer = new BulkAllObserver( onNext: (b) => Interlocked.Increment(ref seenPages), onCompleted: () => { tcs.SetResult(seenPages); }, onError: e => { TheTrace.TraceWarning(e.ToString()); tcs.SetException(e); } ); observableBulk.Subscribe(observer); return(await tcs.Task.ConfigureAwait(false)); }
private void _connection_ConnectionShutdown(IConnection connection, ShutdownEventArgs reason) { TheTrace.TraceWarning("Connection was shut down: {0}", reason.ReplyText); BuildConnection(); }
private object ParseExpression(string expression) { if (string.IsNullOrWhiteSpace(expression)) { return(null); } var symbols = PredicateSymbols.Keys.Where(expression.Contains).ToArray(); if (symbols.Length > 1) { TheTrace.TraceWarning("Expression has multiple predicates: {0}", expression); return(null); } if (symbols.Length == 0) { TheTrace.TraceWarning("Expression has no predicates: {0}", expression); return(null); } var splits = expression.Split(new string[] { symbols[0] }, StringSplitOptions.RemoveEmptyEntries) .Select(x => x.Trim()).ToArray(); if (splits.Length != 2) { TheTrace.TraceWarning("Expression incomplete: {0}", expression); return(null); } string left = splits[0]; string right = splits[1]; _propertyName = left; int ic = 0; if (int.TryParse(right, out ic)) { var p1 = Expression.Parameter(typeof(int), "x"); return(Expression.Lambda <Func <int, bool> >( PredicateSymbols[symbols[0]](p1, Expression.Constant(ic)), p1).Compile()); } float iff; if (float.TryParse(right, out iff)) { var p3 = Expression.Parameter(typeof(float), "x"); return(Expression.Lambda <Func <float, bool> >( PredicateSymbols[symbols[0]](p3, Expression.Constant(iff)), p3).Compile()); } DateTime id; if (DateTime.TryParse(right, out id)) { var p2 = Expression.Parameter(typeof(DateTime), "x"); return(Expression.Lambda <Func <DateTime, bool> >( PredicateSymbols[symbols[0]](p2, Expression.Constant(id)), p2).Compile()); } Guid ig; if (Guid.TryParse(right, out ig)) { var p4 = Expression.Parameter(typeof(Guid), "x"); return(Expression.Lambda <Func <Guid, bool> >( PredicateSymbols[symbols[0]](p4, Expression.Constant(ig)), p4).Compile()); } bool ib; if (bool.TryParse(right, out ib)) { var p5 = Expression.Parameter(typeof(bool), "x"); return(Expression.Lambda <Func <bool, bool> >( PredicateSymbols[symbols[0]](p5, Expression.Constant(ib)), p5).Compile()); } // it is string then var parameter = Expression.Parameter(typeof(string), "x"); return(Expression.Lambda <Func <string, bool> >( PredicateSymbols[symbols[0]](parameter, Expression.Constant(right)), parameter).Compile()); }
/// <inheritdoc /> public Task <AppendEntriesResponse> AppendEntriesAsync(AppendEntriesRequest request) { _lastHeartbeat.Set(); if (request.Entries != null && request.Entries.Length > 0) { TheTrace.TraceInformation($"[{_meAsAPeer.ShortName}] Received the dough {request.Entries.Length} for position after {request.PreviousLogIndex}"); } string message = null; lock (State) { if (request.CurrentTerm > State.CurrentTerm) { BecomeFollower(request.CurrentTerm); } } // Reply false if term < currentTerm (§5.1) if (request.CurrentTerm < State.CurrentTerm) { message = $"[{_meAsAPeer.ShortName}] Leader's term is behind ({request.CurrentTerm} vs {State.CurrentTerm})."; TheTrace.TraceWarning(message); return(Task.FromResult(new AppendEntriesResponse(State.CurrentTerm, false, ReasonType.TermInconsistency, message))); } if (request.Entries == null || request.Entries.Length == 0) // it is a heartbeat, set the leader address { _leaderAddress = _peerManager.GetPeers().Where(x => x.Id == request.LeaderId).FirstOrDefault()?.Address; return(Task.FromResult(new AppendEntriesResponse(State.CurrentTerm, true))); } // chaos only when has entries _chaos.WreakHavoc(); if (request.PreviousLogIndex > _logPersister.LastIndex) { message = $"[{_meAsAPeer.ShortName}] Position for last log entry is {_logPersister.LastIndex} but got entries starting at {request.PreviousLogIndex}"; TheTrace.TraceWarning(message); return(Task.FromResult(new AppendEntriesResponse(State.CurrentTerm, false, ReasonType.LogInconsistency, message))); } if (request.PreviousLogIndex < _logPersister.LastIndex) { TheTrace.TraceInformation($"[{_meAsAPeer.ShortName}] Position for PreviousLogIndex {request.PreviousLogIndex} but my LastIndex {_logPersister.LastIndex}"); // Reply false if log doesn’t contain an entry at prevLogIndex whose term matches prevLogTerm(§5.3) var entry = _logPersister.GetEntries(request.PreviousLogIndex, 1).First(); if (entry.Term != request.CurrentTerm) { message = $"[{_meAsAPeer.ShortName}] Position at {request.PreviousLogIndex} has term {entry.Term} but according to leader {request.LeaderId} it must be {request.PreviousLogTerm}"; TheTrace.TraceWarning(message); return(Task.FromResult(new AppendEntriesResponse(State.CurrentTerm, false, ReasonType.LogInconsistency, message))); } // If an existing entry conflicts with a new one(same index but different terms), delete the existing entry and all that follow it(§5.3) _logPersister.DeleteEntries(request.PreviousLogIndex + 1); TheTrace.TraceWarning($"[{_meAsAPeer.ShortName}] Stripping the log from index {request.PreviousLogIndex + 1}. Last index was {_logPersister.LastIndex}"); } var entries = request.Entries.Select(x => new LogEntry() { Body = x, Term = request.CurrentTerm }).ToArray(); // Append any new entries not already in the log TheTrace.TraceInformation($"[{_meAsAPeer.ShortName}] Current last index is {_logPersister.LastIndex}. About to append {entries.Length} entries at {request.PreviousLogIndex + 1}"); _logPersister.Append(entries, request.PreviousLogIndex + 1); //If leaderCommit > commitIndex, set commitIndex = min(leaderCommit, index of last new entry) if (request.LeaderCommitIndex > _volatileState.CommitIndex) { _volatileState.CommitIndex = Math.Min(request.LeaderCommitIndex, _logPersister.LastIndex); } message = $"[{_meAsAPeer.ShortName}] Appended {request.Entries.Length} entries at position {request.PreviousLogIndex + 1}"; TheTrace.TraceInformation(message); return(Task.FromResult(new AppendEntriesResponse(State.CurrentTerm, true, ReasonType.None, message))); }
private async Task SendLogs( IRaftServer proxy, AsyncPolicy policy, Peer peer, long nextIndex, long matchIndex, int count) { var previousIndexTerm = -1L; if (nextIndex > 0) { if (nextIndex > _logPersister.LogOffset) { previousIndexTerm = _logPersister.GetEntries(nextIndex - 1, 1).First().Term; } else { Snapshot ss; if (_snapshotOperator.TryGetLastSnapshot(out ss)) { previousIndexTerm = ss.LastIncludedTerm; } } } var request = new AppendEntriesRequest() { CurrentTerm = State.CurrentTerm, Entries = _logPersister.GetEntries(nextIndex, count).Select(x => x.Body).ToArray(), LeaderCommitIndex = _volatileState.CommitIndex, LeaderId = State.Id, PreviousLogIndex = nextIndex - 1, PreviousLogTerm = previousIndexTerm }; var result = await policy.ExecuteAndCaptureAsync(() => proxy.AppendEntriesAsync(request)); if (result.Outcome == OutcomeType.Successful) { if (result.Result.IsSuccess) { // If successful: update nextIndex and matchIndex for follower(§5.3)" _volatileLeaderState.SetMatchIndex(peer.Id, nextIndex + count - 1); _volatileLeaderState.SetNextIndex(peer.Id, nextIndex + count); TheTrace.TraceInformation($"[{_meAsAPeer.ShortName}] Successfully transferred {count} entries from index {nextIndex} to peer {peer.Address} - Next Index is {_volatileLeaderState.NextIndices[peer.Id]}"); UpdateCommitIndex(); } else { // log reason only TheTrace.TraceWarning($"AppendEntries for start index {nextIndex} and count {count} for peer {peer.Address} with address {peer.Address} in term {State.CurrentTerm} failed with reason type {result.Result.ReasonType} and this reason: {result.Result.Reason}"); if (result.Result.ReasonType == ReasonType.LogInconsistency) { var diff = nextIndex - (matchIndex + 1); nextIndex = diff > _settings.MaxNumberOfDecrementForLogsThatAreBehind ? nextIndex - _settings.MaxNumberOfDecrementForLogsThatAreBehind : nextIndex - diff; _volatileLeaderState.SetNextIndex(peer.Id, nextIndex); TheTrace.TraceInformation($"[{_meAsAPeer.ShortName}] Updated (decremented) next index for peer {peer.Address} to {nextIndex}"); } } } else { // NUNCA!! // not interested in network, etc errors, they get logged in the policy } }
private async Task SendSnapshot( IRaftServer proxy, AsyncPolicy policy, Peer peer, long nextIndex, long matchIndex) { var logOffset = LogPersister.LogOffset; var term = State.CurrentTerm; Snapshot ss; if (!SnapshotOperator.TryGetLastSnapshot(out ss)) { throw new InvalidProgramException($"WE DO NOT HAVE A SNAPSHOT for client {peer.Address} whose nextIndex is {nextIndex} yet our LogOffset is {logOffset}"); } if (ss.LastIncludedIndex + 1 < nextIndex) { throw new InvalidProgramException($"WE DO NOT HAVE A <<PROPER>> SNAPSHOT for client {peer.Address} whose nextIndex is {nextIndex} yet our LogOffset is {logOffset}. Snapshot was have ({ss.FullName}) is short {ss.LastIncludedIndex}"); } // make a copy since it might be cleaned up or opened by another thread for another client var fileName = Path.GetTempFileName(); File.Copy(ss.FullName, fileName, true); TheTrace.TraceInformation($"[{_meAsAPeer.ShortName}] About to send Snapshot copy file {fileName} to [{peer.ShortName}] and copy of {ss.FullName}."); using (var fs = new FileStream(fileName, FileMode.Open)) { var start = 0; var total = 0; var length = fs.Length; var buffer = new byte[_settings.MaxSnapshotChunkSentInBytes]; TheTrace.TraceInformation($"[{_meAsAPeer.ShortName}] Snapshot copy file size is {length}. Location is {fileName} and copy of {ss.FullName}."); while (total < length) { var count = fs.Read(buffer, 0, buffer.Length); total += count; var result = await proxy.InstallSnapshotAsync(new InstallSnapshotRequest() { CurrentTerm = term, Data = count == buffer.Length ? buffer : buffer.Take(count).ToArray(), LastIncludedIndex = ss.LastIncludedIndex, LastIncludedTerm = term, IsDone = total == length, LeaderId = State.Id, Offset = start }); TheTrace.TraceInformation($"[{_meAsAPeer.ShortName}] Sent snapshot for peer {peer.Address} with {count} bytes totalling {total}."); start += count; if (result.CurrentTerm != term) { TheTrace.TraceWarning($"[{_meAsAPeer.ShortName}] I am sending snapshot but this peer {peer.Address} has term {result.CurrentTerm} vs my started term {term} and current term {State.CurrentTerm}."); } } } _volatileLeaderState.SetMatchIndex(peer.Id, ss.LastIncludedIndex); _volatileLeaderState.SetNextIndex(peer.Id, ss.LastIncludedIndex + 1); // the rest will be done by sending logs File.Delete(fileName); }
public void TraceWarningDoesNotThrowException() { TheTrace.TraceWarning("chappi {1} {0}", "hapachap", 7979); }
private async Task PushbatchAsync() { if (_batch.Count == 0) { return; } try { int retry = 0; List <int> statuses = null; do { var responseMessage = await _httpClient.PostAsync(_esUrl + "_bulk", new StringContent(_batch.ToString(), Encoding.UTF8, "application/json")); var content = responseMessage.Content == null ? "" : (await responseMessage.Content.ReadAsStringAsync()); if (!responseMessage.IsSuccessStatusCode) { throw new ApplicationException(string.Format("Unsuccessful ES bulk: {0} - {1}", responseMessage.StatusCode, content)); } dynamic j = JObject.Parse(content); if (j == null || j.items == null) { throw new ApplicationException(string.Format("Unsuccessful ES bulk - items null: {0}", content)); } var items = (JArray)j.items; statuses = items.Children <JObject>().Select(x => x.Properties().First().Value["status"].Value <int>()).ToList(); if (statuses.Any(y => y < 200 || (y > 299 && y != 429))) { TheTrace.TraceWarning("LOOK!! We had some errors from ES bulk at retry {1}: {0}", content, retry); } if (statuses.Any(y => y == 429)) { var timeSpan = _interval.Next(); TheTrace.TraceWarning("LOOK!! Got 429 -> backing off for {0} seconds", timeSpan.TotalSeconds); Thread.Sleep(timeSpan); } else { _interval.Reset(); } TheTrace.TraceInformation("ConveyorBelt_Pusher: Pushing {1} records to {0} [retry: {2}]", _esUrl, _batch.Count, retry); } while (_batch.Prune(statuses) > 0 && retry++ < 3); if (_batch.Count > 0) { TheTrace.TraceWarning("WARNING!!! Some residual documents could not be inserted even after retries: {0}", _batch.Count); _batch.Clear(); } } catch (Exception e) { TheTrace.TraceError(e.ToString()); throw; } }
public void TraceWarningDoesNotThrowExceptionWithNoParams() { TheTrace.TraceWarning("chappi "); }