public Task <int> PostResultAsync(IQueuedTaskToken token, int timeoutMilliseconds) { CheckNotDisposedOrThrow(); CheckRunningOrThrow(); if (token == null) { throw new ArgumentNullException(nameof(token)); } long requestId = Interlocked.Increment(ref mLastRequestId); PostgreSqlTaskResultQueueProcessRequest processRequest = new PostgreSqlTaskResultQueueProcessRequest(requestId, token.LastQueuedTaskResult, timeoutMilliseconds: timeoutMilliseconds, maxFailCount: 3); mResultProcessingQueue.Add(processRequest); IncrementPostResultCount(); return(processRequest.Task.WithCleanup((prev) => { if (processRequest.IsTimedOut) { IncrementResultWriteRequestTimeoutCount(); } processRequest.Dispose(); })); }
private async Task RunProcessingLoopAsync() { CancellationToken stopToken = mResultProcessingStopRequest .Token; if (stopToken.IsCancellationRequested) { return; } while (true) { //We need to use a queue here - as we process the batch, // we consume each element and, in case of an error // that affects all of them, // we would fail only the remaining ones, not the ones // that have been successfully processed Queue <PostgreSqlTaskResultQueueProcessRequest> currentBatch = new Queue <PostgreSqlTaskResultQueueProcessRequest>(); try { stopToken.ThrowIfCancellationRequested(); //Try to dequeue and block if no item is available PostgreSqlTaskResultQueueProcessRequest processItem = mResultProcessingQueue.Take(stopToken); currentBatch.Enqueue(processItem); //See if there are other items available // and add them to current batch while (currentBatch.Count < RESULT_QUEUE_PROCESSING_BATCH_SIZE && mResultProcessingQueue.TryTake(out processItem)) { currentBatch.Enqueue(processItem); } //Process the entire batch - don't observe // cancellation token await ProcessResultBatchAsync(currentBatch); } catch (OperationCanceledException) { mLogger.Debug("Cancellation requested. Breaking result processing loop..."); //Best effort to cancel all tasks foreach (PostgreSqlTaskResultQueueProcessRequest rq in mResultProcessingQueue.ToArray()) { rq.SetCancelled(); } break; } catch (Exception exc) { //Add them back to processing queue to be retried foreach (PostgreSqlTaskResultQueueProcessRequest rq in currentBatch) { rq.SetFailed(exc); if (rq.CanBeRetried) { mResultProcessingQueue.Add(rq); } } currentBatch.Clear(); mLogger.Error("Error processing results", exc); } finally { //Clear batch and start over currentBatch.Clear(); } } }
private async Task ProcessResultBatchAsync(Queue <PostgreSqlTaskResultQueueProcessRequest> currentBatch) { MonotonicTimestamp startWrite = MonotonicTimestamp .Now(); //An explicit choice has been made not to use transactions // since failing to update a result MUST NOT // cause the other successful updates to be rolled back. using (NpgsqlConnection conn = await OpenConnectionAsync(CancellationToken.None)) using (NpgsqlCommand updateCmd = new NpgsqlCommand(mUpdateSql, conn)) { NpgsqlParameter pStatus = updateCmd.Parameters .Add("t_status", NpgsqlDbType.Integer); NpgsqlParameter pLastError = updateCmd.Parameters .Add("t_last_error", NpgsqlDbType.Text); NpgsqlParameter pErrorCount = updateCmd.Parameters .Add("t_error_count", NpgsqlDbType.Integer); NpgsqlParameter pLastErrorIsRecoverable = updateCmd.Parameters .Add("t_last_error_recoverable", NpgsqlDbType.Boolean); NpgsqlParameter pProcessingTime = updateCmd.Parameters .Add("t_processing_time_milliseconds", NpgsqlDbType.Bigint); NpgsqlParameter pFinalizedAt = updateCmd.Parameters .Add("t_processing_finalized_at_ts", NpgsqlDbType.TimestampTz); NpgsqlParameter pId = updateCmd.Parameters .Add("t_id", NpgsqlDbType.Uuid); await updateCmd.PrepareAsync(); while (currentBatch.Count > 0) { PostgreSqlTaskResultQueueProcessRequest processRq = currentBatch.Dequeue(); try { pStatus.Value = ( int )processRq.ResultToUpdate.Status; string strLastError = processRq.ResultToUpdate.LastError.ToJson(); if (strLastError != null) { pLastError.Value = strLastError; } else { pLastError.Value = DBNull.Value; } pErrorCount.Value = processRq.ResultToUpdate.ErrorCount; pLastErrorIsRecoverable.Value = processRq.ResultToUpdate.LastErrorIsRecoverable; pProcessingTime.Value = processRq.ResultToUpdate.ProcessingTimeMilliseconds; if (processRq.ResultToUpdate.ProcessingFinalizedAtTs.HasValue) { pFinalizedAt.Value = processRq.ResultToUpdate.ProcessingFinalizedAtTs; } else { pFinalizedAt.Value = DBNull.Value; } pId.Value = processRq.ResultToUpdate.Id; int affectedRows = await updateCmd.ExecuteNonQueryAsync(); processRq.SetCompleted(affectedRows); IncrementResultWriteCount(MonotonicTimestamp .Since(startWrite)); } catch (OperationCanceledException) { processRq.SetCancelled(); throw; } catch (Exception exc) { processRq.SetFailed(exc); if (processRq.CanBeRetried) { mResultProcessingQueue.Add(processRq); } mLogger.Error("Error processing result", exc); } } await conn.CloseAsync(); } }