protected virtual Task <TResponse> ExecuteAsync <TRequest, TResponse>( TRequest request, CallContext callContext, Func <OperationContext, Task <Result <TResponse> > > executeAsync, string extraStartMessage = null, Func <Result <TResponse>, string> extraEndMessage = null, [CallerMemberName] string caller = null) where TRequest : ServiceRequestBase where TResponse : ServiceResponseBase, new() { var contextId = request.ContextId; contextId ??= MetadataServiceSerializer.TryGetContextId(callContext.RequestHeaders); var tracingContext = contextId != null ? new Context(contextId, StartupLogger) : new Context(StartupLogger); return(WithOperationContext( tracingContext, callContext.CancellationToken, async context => { var result = await context.PerformOperationAsync( Tracer, () => ExecuteCoreAsync(context, request, executeAsync), caller: caller, traceOperationStarted: false, // Removing this (i.e., enabling logging on all operations) overwhelms NLog, causing extreme // memory usage growth until you run out of it. traceErrorsOnly: true, extraStartMessage: extraStartMessage, extraEndMessage: r => string.Join(" ", extraEndMessage?.Invoke(r), request.BlockId?.ToString(), $"Retry=[{r.GetValueOrDefault()?.ShouldRetry}]")); if (result.Succeeded) { return result.Value; } else { var response = new TResponse() { ErrorMessage = result.ErrorMessage, Diagnostics = result.Diagnostics }; return response; } })); }
private async Task <TResult> ExecuteAsync <TResult>( OperationContext originalContext, Func <OperationContext, CallOptions, IGlobalCacheService, Task <TResult> > executeAsync, Func <TResult, string?> extraEndMessage, string?extraStartMessage = null, [CallerMemberName] string caller = null !) where TResult : ResultBase { var attempt = -1; using var contextWithShutdown = TrackShutdown(originalContext); var context = contextWithShutdown.Context; var callerAttempt = $"{caller}_Attempt"; return(await context.PerformOperationWithTimeoutAsync( Tracer, context => { var callOptions = new CallOptions( headers: new Metadata() { MetadataServiceSerializer.CreateContextIdHeaderEntry(context.TracingContext.TraceId) }, deadline: _clock.UtcNow + _configuration.OperationTimeout, cancellationToken: context.Token); return _retryPolicy.ExecuteAsync(async() => { await Task.Yield(); attempt++; var stopwatch = StopwatchSlim.Start(); var clientCreationTime = TimeSpan.Zero; var result = await context.PerformOperationAsync(Tracer, () => { return _serviceClientFactory.UseAsync(context, service => { clientCreationTime = stopwatch.Elapsed; return executeAsync(context, callOptions, service); }); }, extraStartMessage: extraStartMessage, extraEndMessage: r => $"Attempt=[{attempt}] ClientCreationTimeMs=[{clientCreationTime.TotalMilliseconds}] {extraEndMessage(r)}", caller: callerAttempt, traceErrorsOnly: true); await Task.Yield(); // Because we capture exceptions inside the PerformOperation, we need to make sure that they // get propagated for the retry policy to kick in. if (result.Exception != null) { result.ReThrow(); } return result; }, context.Token); }, caller : caller, traceErrorsOnly : true, extraStartMessage : extraStartMessage, extraEndMessage : r => $"Attempts=[{attempt + 1}] {extraEndMessage(r)}", timeout : _configuration.OperationTimeout)); }