Ejemplo n.º 1
0
        // to allow proper rescheduling of the first request from a bucket
        private async Task ExecuteRequestAsync(BaseRestRequest request, RateLimitBucket bucket, TaskCompletionSource <bool> ratelimitTcs)
        {
            try
            {
                await this.GlobalRateLimitEvent.WaitAsync();

                if (bucket == null)
                {
                    bucket = request.RateLimitBucket;
                }

                if (ratelimitTcs == null)
                {
                    ratelimitTcs = await this.WaitForInitialRateLimit(bucket);
                }

                if (ratelimitTcs == null) // ckeck rate limit only if we are not the probe request
                {
                    var now = DateTimeOffset.UtcNow;

                    await bucket.TryResetLimitAsync(now);

                    // Decrement the remaining number of requests as there can be other concurrent requests before this one finishes and has a chance to update the bucket
#pragma warning disable 420 // interlocked access is always volatile
                    if (Interlocked.Decrement(ref bucket._remaining) < 0)
#pragma warning restore 420 // blaze it
                    {
                        request.Discord?.DebugLogger?.LogMessage(LogLevel.Debug, "REST", $"Request for {bucket}. Blocking.", DateTime.Now);
                        var delay     = bucket.Reset - now;
                        var resetDate = bucket.Reset;

                        if (this.UseResetAfter)
                        {
                            delay     = bucket.ResetAfter.Value;
                            resetDate = bucket._resetAfterOffset;
                        }

                        if (delay < new TimeSpan(-TimeSpan.TicksPerMinute))
                        {
                            request.Discord?.DebugLogger?.LogMessage(LogLevel.Error, "REST", "Failed to retrieve ratelimits. Giving up and allowing next request for bucket.", DateTime.Now);
                            bucket._remaining = 1;
                        }

                        if (delay < TimeSpan.Zero)
                        {
                            delay = TimeSpan.FromMilliseconds(100);
                        }

                        request.Discord?.DebugLogger?.LogMessage(LogLevel.Warning, "REST", $"Pre-emptive ratelimit triggered. Waiting until {resetDate:yyyy-MM-dd HH:mm:ss zzz} ({delay:c}).", DateTime.Now);
                        request.Discord?.DebugLogger?.LogTaskFault(Task.Delay(delay).ContinueWith(_ => this.ExecuteRequestAsync(request, null, null)), LogLevel.Error, "RESET", "Error while executing request: ");
                        return;
                    }
                    request.Discord?.DebugLogger?.LogMessage(LogLevel.Debug, "REST", $"Request for {bucket}. Allowing.", DateTime.Now);
                }
                else
                {
                    request.Discord?.DebugLogger?.LogMessage(LogLevel.Debug, "REST", $"Initial request for {bucket}. Allowing.", DateTime.Now);
                }

                var req      = this.BuildRequest(request);
                var response = new RestResponse();
                try
                {
                    var res = await HttpClient.SendAsync(req, CancellationToken.None).ConfigureAwait(false);

                    var bts = await res.Content.ReadAsByteArrayAsync().ConfigureAwait(false);

                    var txt = Utilities.UTF8.GetString(bts, 0, bts.Length);

                    response.Headers      = res.Headers.ToDictionary(xh => xh.Key, xh => string.Join("\n", xh.Value), StringComparer.OrdinalIgnoreCase);
                    response.Response     = txt;
                    response.ResponseCode = (int)res.StatusCode;
                }
                catch (HttpRequestException httpex)
                {
                    request.Discord?.DebugLogger?.LogMessage(LogLevel.Error, "REST", $"Request to {request.Url} triggered an HttpException: {httpex.Message}", DateTime.Now);
                    request.SetFaulted(httpex);
                    this.FailInitialRateLimitTest(bucket, ratelimitTcs);
                    return;
                }

                this.UpdateBucket(request, response, ratelimitTcs);

                Exception ex = null;
                switch (response.ResponseCode)
                {
                case 400:
                case 405:
                    ex = new BadRequestException(request, response);
                    break;

                case 401:
                case 403:
                    ex = new UnauthorizedException(request, response);
                    break;

                case 404:
                    ex = new NotFoundException(request, response);
                    break;

                case 413:
                    ex = new RequestSizeException(request, response);
                    break;

                case 429:
                    ex = new RateLimitException(request, response);

                    // check the limit info and requeue
                    this.Handle429(response, out var wait, out var global);
                    if (wait != null)
                    {
                        if (global)
                        {
                            request.Discord?.DebugLogger?.LogMessage(LogLevel.Error, "REST", "Global ratelimit hit, cooling down", DateTime.Now);
                            try
                            {
                                this.GlobalRateLimitEvent.Reset();
                                await wait.ConfigureAwait(false);
                            }
                            finally
                            {
                                // we don't want to wait here until all the blocked requests have been run, additionally Set can never throw an exception that could be suppressed here
                                _ = this.GlobalRateLimitEvent.SetAsync();
                            }
                            request.Discord?.DebugLogger?.LogTaskFault(ExecuteRequestAsync(request, bucket, ratelimitTcs), LogLevel.Error, "REST", "Error while retrying request: ");
                        }
                        else
                        {
                            request.Discord?.DebugLogger?.LogMessage(LogLevel.Error, "REST", $"Ratelimit hit, requeueing request to {request.Url}", DateTime.Now);
                            await wait.ConfigureAwait(false);

                            request.Discord?.DebugLogger?.LogTaskFault(this.ExecuteRequestAsync(request, bucket, ratelimitTcs), LogLevel.Error, "REST", "Error while retrying request: ");
                        }

                        return;
                    }
                    break;
                }

                if (ex != null)
                {
                    request.SetFaulted(ex);
                }
                else
                {
                    request.SetCompleted(response);
                }
            }
            catch (Exception ex)
            {
                request.Discord?.DebugLogger?.LogMessage(LogLevel.Error, "REST", $"Request to {request.Url} triggered an {ex.GetType().Name}: {ex.Message}\n{ex.StackTrace}", DateTime.Now);

                // if something went wrong and we couldn't get rate limits for the first request here, allow the next request to run
                if (bucket != null && ratelimitTcs != null && bucket._limitTesting != 0)
                {
                    this.FailInitialRateLimitTest(bucket, ratelimitTcs);
                }

                if (!request.TrySetFaulted(ex))
                {
                    throw;
                }
            }
        }
Ejemplo n.º 2
0
        // to allow proper rescheduling of the first request from a bucket
        private async Task ExecuteRequestAsync(BaseRestRequest request, RateLimitBucket bucket, TaskCompletionSource <bool> ratelimitTcs)
        {
            if (this._disposed)
            {
                return;
            }

            HttpResponseMessage res = default;

            try
            {
                await this.GlobalRateLimitEvent.WaitAsync().ConfigureAwait(false);

                if (bucket == null)
                {
                    bucket = request.RateLimitBucket;
                }

                if (ratelimitTcs == null)
                {
                    ratelimitTcs = await this.WaitForInitialRateLimit(bucket).ConfigureAwait(false);
                }

                if (ratelimitTcs == null) // ckeck rate limit only if we are not the probe request
                {
                    var now = DateTimeOffset.UtcNow;

                    await bucket.TryResetLimitAsync(now).ConfigureAwait(false);

                    // Decrement the remaining number of requests as there can be other concurrent requests before this one finishes and has a chance to update the bucket
                    if (Interlocked.Decrement(ref bucket._remaining) < 0)
                    {
                        this.Logger.LogDebug(LoggerEvents.RatelimitDiag, "Request for {Bucket} is blocked", bucket.ToString());
                        var delay     = bucket.Reset - now;
                        var resetDate = bucket.Reset;

                        if (this.UseResetAfter)
                        {
                            delay     = bucket.ResetAfter.Value;
                            resetDate = bucket.ResetAfterOffset;
                        }

                        if (delay < new TimeSpan(-TimeSpan.TicksPerMinute))
                        {
                            this.Logger.LogError(LoggerEvents.RatelimitDiag, "Failed to retrieve ratelimits - giving up and allowing next request for bucket");
                            bucket._remaining = 1;
                        }

                        if (delay < TimeSpan.Zero)
                        {
                            delay = TimeSpan.FromMilliseconds(100);
                        }

                        this.Logger.LogWarning(LoggerEvents.RatelimitPreemptive, "Pre-emptive ratelimit triggered - waiting until {0:yyyy-MM-dd HH:mm:ss zzz} ({1:c}).", resetDate, delay);
                        Task.Delay(delay)
                        .ContinueWith(_ => this.ExecuteRequestAsync(request, null, null))
                        .LogTaskFault(this.Logger, LogLevel.Error, LoggerEvents.RestError, "Error while executing request");

                        return;
                    }
                    this.Logger.LogDebug(LoggerEvents.RatelimitDiag, "Request for {Bucket} is allowed", bucket.ToString());
                }
                else
                {
                    this.Logger.LogDebug(LoggerEvents.RatelimitDiag, "Initial request for {Bucket} is allowed", bucket.ToString());
                }

                var req      = this.BuildRequest(request);
                var response = new RestResponse();
                try
                {
                    if (this._disposed)
                    {
                        return;
                    }

                    res = await this.HttpClient.SendAsync(req, HttpCompletionOption.ResponseContentRead, CancellationToken.None).ConfigureAwait(false);

                    var bts = await res.Content.ReadAsByteArrayAsync().ConfigureAwait(false);

                    var txt = Utilities.UTF8.GetString(bts, 0, bts.Length);

                    this.Logger.LogTrace(LoggerEvents.RestRx, txt);

                    response.Headers      = res.Headers.ToDictionary(xh => xh.Key, xh => string.Join("\n", xh.Value), StringComparer.OrdinalIgnoreCase);
                    response.Response     = txt;
                    response.ResponseCode = (int)res.StatusCode;
                }
                catch (HttpRequestException httpex)
                {
                    this.Logger.LogError(LoggerEvents.RestError, httpex, "Request to {Url} triggered an HttpException", request.Url);
                    request.SetFaulted(httpex);
                    this.FailInitialRateLimitTest(request, ratelimitTcs);
                    return;
                }

                this.UpdateBucket(request, response, ratelimitTcs);

                Exception ex = null;
                switch (response.ResponseCode)
                {
                case 400:
                case 405:
                    ex = new BadRequestException(request, response);
                    break;

                case 401:
                case 403:
                    ex = new UnauthorizedException(request, response);
                    break;

                case 404:
                    ex = new NotFoundException(request, response);
                    break;

                case 413:
                    ex = new RequestSizeException(request, response);
                    break;

                case 429:
                    ex = new RateLimitException(request, response);

                    // check the limit info and requeue
                    this.Handle429(response, out var wait, out var global);
                    if (wait != null)
                    {
                        if (global)
                        {
                            this.Logger.LogError(LoggerEvents.RatelimitHit, "Global ratelimit hit, cooling down");
                            try
                            {
                                this.GlobalRateLimitEvent.Reset();
                                await wait.ConfigureAwait(false);
                            }
                            finally
                            {
                                // we don't want to wait here until all the blocked requests have been run, additionally Set can never throw an exception that could be suppressed here
                                _ = this.GlobalRateLimitEvent.SetAsync();
                            }
                            this.ExecuteRequestAsync(request, bucket, ratelimitTcs)
                            .LogTaskFault(this.Logger, LogLevel.Error, LoggerEvents.RestError, "Error while retrying request");
                        }
                        else
                        {
                            this.Logger.LogError(LoggerEvents.RatelimitHit, "Ratelimit hit, requeueing request to {Url}", request.Url);
                            await wait.ConfigureAwait(false);

                            this.ExecuteRequestAsync(request, bucket, ratelimitTcs)
                            .LogTaskFault(this.Logger, LogLevel.Error, LoggerEvents.RestError, "Error while retrying request");
                        }

                        return;
                    }
                    break;

                case 500:
                case 502:
                case 503:
                case 504:
                    ex = new ServerErrorException(request, response);
                    break;
                }

                if (ex != null)
                {
                    request.SetFaulted(ex);
                }
                else
                {
                    request.SetCompleted(response);
                }
            }
            catch (Exception ex)
            {
                this.Logger.LogError(LoggerEvents.RestError, ex, "Request to {Url} triggered an exception", request.Url);

                // if something went wrong and we couldn't get rate limits for the first request here, allow the next request to run
                if (bucket != null && ratelimitTcs != null && bucket._limitTesting != 0)
                {
                    this.FailInitialRateLimitTest(request, ratelimitTcs);
                }

                if (!request.TrySetFaulted(ex))
                {
                    throw;
                }
            }
            finally
            {
                res?.Dispose();

                // Get and decrement active requests in this bucket by 1.
                _ = this.RequestQueue.TryGetValue(bucket.BucketId, out var count);
                this.RequestQueue[bucket.BucketId] = Interlocked.Decrement(ref count);

                // If it's 0 or less, we can remove the bucket from the active request queue,
                // along with any of its past routes.
                if (count <= 0)
                {
                    foreach (var r in bucket.RouteHashes)
                    {
                        if (this.RequestQueue.ContainsKey(r))
                        {
                            _ = this.RequestQueue.TryRemove(r, out _);
                        }
                    }
                }
            }
        }