Example #1
0
        private void UpdateBucket(BaseRestRequest request, RestResponse response, TaskCompletionSource <bool> ratelimitTcs)
        {
            var bucket = request.RateLimitBucket;

            if (response.Headers == null)
            {
                if (response.ResponseCode != 429) // do not fail when ratelimit was or the next request will be scheduled hitting the rate limit again
                {
                    this.FailInitialRateLimitTest(request, ratelimitTcs);
                }
                return;
            }

            var hs = response.Headers;

            if (hs.TryGetValue("X-RateLimit-Global", out var isglobal) && isglobal.ToLowerInvariant() == "true")
            {
                if (response.ResponseCode != 429)
                {
                    this.FailInitialRateLimitTest(request, ratelimitTcs);
                }

                return;
            }

            var r1 = hs.TryGetValue("X-RateLimit-Limit", out var usesmax);
            var r2 = hs.TryGetValue("X-RateLimit-Remaining", out var usesleft);
            var r3 = hs.TryGetValue("X-RateLimit-Reset", out var reset);
            var r4 = hs.TryGetValue("X-Ratelimit-Reset-After", out var resetAfter);
            var r5 = hs.TryGetValue("X-Ratelimit-Bucket", out var hash);

            if (!r1 || !r2 || !r3 || !r4)
            {
                //If the limits were determined before this request, make the bucket initial again.
                if (response.ResponseCode != 429)
                {
                    this.FailInitialRateLimitTest(request, ratelimitTcs, ratelimitTcs == null);
                }

                return;
            }

            var clienttime = DateTimeOffset.UtcNow;
            var resettime  = new DateTimeOffset(1970, 1, 1, 0, 0, 0, TimeSpan.Zero).AddSeconds(double.Parse(reset, CultureInfo.InvariantCulture));
            var servertime = clienttime;

            if (hs.TryGetValue("Date", out var raw_date))
            {
                servertime = DateTimeOffset.Parse(raw_date, CultureInfo.InvariantCulture).ToUniversalTime();
            }

            var resetdelta = resettime - servertime;

            //var difference = clienttime - servertime;
            //if (Math.Abs(difference.TotalSeconds) >= 1)
            ////    this.Logger.LogMessage(LogLevel.DebugBaseDiscordClient.RestEventId,  $"Difference between machine and server time: {difference.TotalMilliseconds.ToString("#,##0.00", CultureInfo.InvariantCulture)}ms", DateTime.Now);
            //else
            //    difference = TimeSpan.Zero;

            if (request.RateLimitWaitOverride.HasValue)
            {
                resetdelta = TimeSpan.FromSeconds(request.RateLimitWaitOverride.Value);
            }
            var newReset = clienttime + resetdelta;

            if (this.UseResetAfter)
            {
                bucket.ResetAfter = TimeSpan.FromSeconds(double.Parse(resetAfter, CultureInfo.InvariantCulture));
                newReset          = clienttime + bucket.ResetAfter.Value + (request.RateLimitWaitOverride.HasValue
                    ? resetdelta
                    : TimeSpan.Zero);
                bucket._resetAfterOffset = newReset;
            }
            else
            {
                bucket.Reset = newReset;
            }

            var maximum   = int.Parse(usesmax, CultureInfo.InvariantCulture);
            var remaining = int.Parse(usesleft, CultureInfo.InvariantCulture);

            if (ratelimitTcs != null)
            {
                // initial population of the ratelimit data
                bucket.SetInitialValues(maximum, remaining, newReset);

                _ = Task.Run(() => ratelimitTcs.TrySetResult(true));
            }
            else
            {
                // only update the bucket values if this request was for a newer interval than the one
                // currently in the bucket, to avoid issues with concurrent requests in one bucket
                // remaining is reset by TryResetLimit and not the response, just allow that to happen when it is time
                if (bucket._nextReset == 0)
                {
                    bucket._nextReset = newReset.UtcTicks;
                }
            }

            this.UpdateHashCaches(request, bucket, hash);
        }
Example #2
0
        // to allow proper rescheduling of the first request from a bucket
        private async Task ExecuteRequestAsync(BaseRestRequest request, RateLimitBucket bucket, TaskCompletionSource <bool> ratelimitTcs)
        {
            if (this._disposed)
            {
                return;
            }

            HttpResponseMessage res = default;

            try
            {
                await this.GlobalRateLimitEvent.WaitAsync().ConfigureAwait(false);

                if (bucket == null)
                {
                    bucket = request.RateLimitBucket;
                }

                if (ratelimitTcs == null)
                {
                    ratelimitTcs = await this.WaitForInitialRateLimit(bucket).ConfigureAwait(false);
                }

                if (ratelimitTcs == null) // ckeck rate limit only if we are not the probe request
                {
                    var now = DateTimeOffset.UtcNow;

                    await bucket.TryResetLimitAsync(now).ConfigureAwait(false);

                    // Decrement the remaining number of requests as there can be other concurrent requests before this one finishes and has a chance to update the bucket
#pragma warning disable 420 // interlocked access is always volatile
                    if (Interlocked.Decrement(ref bucket._remaining) < 0)
#pragma warning restore 420 // blaze it
                    {
                        this.Logger.LogDebug(LoggerEvents.RatelimitDiag, "Request for {0} is blocked", bucket.ToString());
                        var delay     = bucket.Reset - now;
                        var resetDate = bucket.Reset;

                        if (this.UseResetAfter)
                        {
                            delay     = bucket.ResetAfter.Value;
                            resetDate = bucket._resetAfterOffset;
                        }

                        if (delay < new TimeSpan(-TimeSpan.TicksPerMinute))
                        {
                            this.Logger.LogError(LoggerEvents.RatelimitDiag, "Failed to retrieve ratelimits - giving up and allowing next request for bucket");
                            bucket._remaining = 1;
                        }

                        if (delay < TimeSpan.Zero)
                        {
                            delay = TimeSpan.FromMilliseconds(100);
                        }

                        this.Logger.LogWarning(LoggerEvents.RatelimitPreemptive, "Pre-emptive ratelimit triggered - waiting until {0:yyyy-MM-dd HH:mm:ss zzz} ({1:c}).", resetDate, delay);
                        Task.Delay(delay)
                        .ContinueWith(_ => this.ExecuteRequestAsync(request, null, null))
                        .LogTaskFault(this.Logger, LogLevel.Error, LoggerEvents.RestError, "Error while executing request");

                        return;
                    }
                    this.Logger.LogDebug(LoggerEvents.RatelimitDiag, "Request for {0} is allowed", bucket.ToString());
                }
                else
                {
                    this.Logger.LogDebug(LoggerEvents.RatelimitDiag, "Initial request for {0} is allowed", bucket.ToString());
                }

                var req      = this.BuildRequest(request);
                var response = new RestResponse();
                try
                {
                    if (this._disposed)
                    {
                        return;
                    }

                    res = await HttpClient.SendAsync(req, HttpCompletionOption.ResponseContentRead, CancellationToken.None).ConfigureAwait(false);

                    var bts = await res.Content.ReadAsByteArrayAsync().ConfigureAwait(false);

                    var txt = Utilities.UTF8.GetString(bts, 0, bts.Length);

                    this.Logger.LogTrace(LoggerEvents.RestRx, txt);

                    response.Headers      = res.Headers.ToDictionary(xh => xh.Key, xh => string.Join("\n", xh.Value), StringComparer.OrdinalIgnoreCase);
                    response.Response     = txt;
                    response.ResponseCode = (int)res.StatusCode;
                }
                catch (HttpRequestException httpex)
                {
                    this.Logger.LogError(LoggerEvents.RestError, httpex, "Request to {0} triggered an HttpException", request.Url);
                    request.SetFaulted(httpex);
                    this.FailInitialRateLimitTest(request, ratelimitTcs);
                    return;
                }

                this.UpdateBucket(request, response, ratelimitTcs);

                Exception ex = null;
                switch (response.ResponseCode)
                {
                case 400:
                case 405:
                    ex = new BadRequestException(request, response);
                    break;

                case 401:
                case 403:
                    ex = new UnauthorizedException(request, response);
                    break;

                case 404:
                    ex = new NotFoundException(request, response);
                    break;

                case 413:
                    ex = new RequestSizeException(request, response);
                    break;

                case 429:
                    ex = new RateLimitException(request, response);

                    // check the limit info and requeue
                    this.Handle429(response, out var wait, out var global);
                    if (wait != null)
                    {
                        if (global)
                        {
                            this.Logger.LogError(LoggerEvents.RatelimitHit, "Global ratelimit hit, cooling down");
                            try
                            {
                                this.GlobalRateLimitEvent.Reset();
                                await wait.ConfigureAwait(false);
                            }
                            finally
                            {
                                // we don't want to wait here until all the blocked requests have been run, additionally Set can never throw an exception that could be suppressed here
                                _ = this.GlobalRateLimitEvent.SetAsync();
                            }
                            this.ExecuteRequestAsync(request, bucket, ratelimitTcs)
                            .LogTaskFault(this.Logger, LogLevel.Error, LoggerEvents.RestError, "Error while retrying request");
                        }
                        else
                        {
                            this.Logger.LogError(LoggerEvents.RatelimitHit, "Ratelimit hit, requeueing request to {0}", request.Url);
                            await wait.ConfigureAwait(false);

                            this.ExecuteRequestAsync(request, bucket, ratelimitTcs)
                            .LogTaskFault(this.Logger, LogLevel.Error, LoggerEvents.RestError, "Error while retrying request");
                        }

                        return;
                    }
                    break;

                case 500:
                case 502:
                case 503:
                case 504:
                    ex = new ServerErrorException(request, response);
                    break;
                }

                if (ex != null)
                {
                    request.SetFaulted(ex);
                }
                else
                {
                    request.SetCompleted(response);
                }
            }
            catch (Exception ex)
            {
                this.Logger.LogError(LoggerEvents.RestError, ex, "Request to {0} triggered an exception", request.Url);

                // if something went wrong and we couldn't get rate limits for the first request here, allow the next request to run
                if (bucket != null && ratelimitTcs != null && bucket._limitTesting != 0)
                {
                    this.FailInitialRateLimitTest(request, ratelimitTcs);
                }

                if (!request.TrySetFaulted(ex))
                {
                    throw;
                }
            }
            finally
            {
                res?.Dispose();

                // Get and decrement active requests in this bucket by 1.
                _ = this.RequestQueue.TryGetValue(bucket.BucketId, out var count);
                this.RequestQueue[bucket.BucketId] = Interlocked.Decrement(ref count);

                // If it's 0 or less, we can remove the bucket from the active request queue,
                // along with any of its past routes.
                if (count <= 0)
                {
                    foreach (var r in bucket.RouteHashes)
                    {
                        if (this.RequestQueue.ContainsKey(r))
                        {
                            _ = this.RequestQueue.TryRemove(r, out _);
                        }
                    }
                }
            }
        }
 protected internal void SetCompleted(RestResponse response)
 => this.RequestTaskSource.SetResult(response);
Example #4
0
        // to allow proper rescheduling of the first request from a bucket
        private async Task ExecuteRequestAsync(BaseRestRequest request, RateLimitBucket bucket, TaskCompletionSource <bool> ratelimitTcs)
        {
            try
            {
                await this.GlobalRateLimitEvent.WaitAsync();

                if (bucket == null)
                {
                    bucket = request.RateLimitBucket;
                }

                if (ratelimitTcs == null)
                {
                    ratelimitTcs = await this.WaitForInitialRateLimit(bucket);
                }

                if (ratelimitTcs == null) // ckeck rate limit only if we are not the probe request
                {
                    var now = DateTimeOffset.UtcNow;

                    await bucket.TryResetLimit(now);

                    // Decrement the remaining number of requests as there can be other concurrent requests before this one finishes and has a chance to update the bucket
#pragma warning disable 420 // interlocked access is always volatile
                    if (Interlocked.Decrement(ref bucket._remaining) < 0)
#pragma warning restore 420
                    {
                        request.Discord.DebugLogger.LogMessage(LogLevel.Debug, "REST", $"Request for bucket {bucket}. Blocking.", DateTime.Now);
                        var delay = bucket.Reset - now;
                        if (delay < new TimeSpan(-TimeSpan.TicksPerMinute))
                        {
                            request.Discord.DebugLogger.LogMessage(LogLevel.Error, "REST", "Failed to retrieve ratelimits. Giving up and allowing next request for bucket.", DateTime.Now);
                            bucket._remaining = 1;
                        }
                        if (delay < TimeSpan.Zero)
                        {
                            delay = TimeSpan.FromMilliseconds(100);
                        }
                        request.Discord.DebugLogger.LogMessage(LogLevel.Warning, "REST", $"Pre-emptive ratelimit triggered, waiting until {bucket.Reset:yyyy-MM-dd HH:mm:ss zzz} ({delay:c})", DateTime.Now);
                        request.Discord.DebugLogger.LogTaskFault(Task.Delay(delay).ContinueWith(t => this.ExecuteRequestAsync(request, null, null)), LogLevel.Error, "RESET", "Error while executing request: ");
                        return;
                    }
                    request.Discord.DebugLogger.LogMessage(LogLevel.Debug, "REST", $"Request for bucket {bucket}. Allowing.", DateTime.Now);
                }
                else
                {
                    request.Discord.DebugLogger.LogMessage(LogLevel.Debug, "REST", $"Initial Request for bucket {bucket}. Allowing.", DateTime.Now);
                }

                var req      = this.BuildRequest(request);
                var response = new RestResponse();
                try
                {
                    var res = await HttpClient.SendAsync(req, CancellationToken.None).ConfigureAwait(false);

                    var bts = await res.Content.ReadAsByteArrayAsync().ConfigureAwait(false);

                    var txt = UTF8.GetString(bts, 0, bts.Length);

                    response.Headers      = res.Headers.ToDictionary(xh => xh.Key, xh => string.Join("\n", xh.Value));
                    response.Response     = txt;
                    response.ResponseCode = (int)res.StatusCode;
                }
                catch (HttpRequestException httpex)
                {
                    request.Discord.DebugLogger.LogMessage(LogLevel.Error, "REST", $"Request to {request.Url} triggered an HttpException: {httpex.Message}", DateTime.Now);
                    request.SetFaulted(httpex);
                    this.FailInitialRateLimitTest(bucket, ratelimitTcs);
                    return;
                }

                this.UpdateBucket(request, response, ratelimitTcs);

                Exception ex = null;
                switch (response.ResponseCode)
                {
                case 400:
                case 405:
                    ex = new BadRequestException(request, response);
                    break;

                case 401:
                case 403:
                    ex = new UnauthorizedException(request, response);
                    break;

                case 404:
                    ex = new NotFoundException(request, response);
                    break;

                case 429:
                    ex = new RateLimitException(request, response);

                    // check the limit info and requeue
                    this.Handle429(response, out var wait, out var global);
                    if (wait != null)
                    {
                        if (global)
                        {
                            request.Discord.DebugLogger.LogMessage(LogLevel.Error, "REST", "Global ratelimit hit, cooling down", DateTime.Now);
                            try
                            {
                                this.GlobalRateLimitEvent.Reset();
                                await wait.ConfigureAwait(false);
                            }
                            finally
                            {
                                // we don't want to wait here until all the blocked requests have been run, additionally Set can never throw an exception that could be suppressed here
                                _ = this.GlobalRateLimitEvent.Set();
                            }
                            request.Discord.DebugLogger.LogTaskFault(ExecuteRequestAsync(request, bucket, ratelimitTcs), LogLevel.Error, "REST", "Error while retrying request: ");
                        }
                        else
                        {
                            request.Discord.DebugLogger.LogMessage(LogLevel.Error, "REST", $"Ratelimit hit, requeueing request to {request.Url}", DateTime.Now);
                            await wait.ConfigureAwait(false);

                            request.Discord.DebugLogger.LogTaskFault(this.ExecuteRequestAsync(request, bucket, ratelimitTcs), LogLevel.Error, "REST", "Error while retrying request: ");
                        }

                        return;
                    }
                    break;
                }

                if (ex != null)
                {
                    request.SetFaulted(ex);
                }
                else
                {
                    request.SetCompleted(response);
                }
            }
            catch (Exception ex)
            {
                request.Discord.DebugLogger.LogMessage(LogLevel.Error, "REST", $"Request to {request.Url} triggered an {ex.GetType().Name}: {ex.Message}\n{ex.StackTrace}", DateTime.Now);

                // if something went wrong and we couldn't get rate limits for the first request here, allow the next request to run
                if (bucket != null && ratelimitTcs != null && bucket._limitTesting != 0)
                {
                    this.FailInitialRateLimitTest(bucket, ratelimitTcs);
                }

                if (!request.TrySetFaulted(ex))
                {
                    throw;
                }
            }
        }
Example #5
0
        public async Task ExecuteRequestAsync(BaseRestRequest request)
        {
            if (request == null)
            {
                throw new ArgumentNullException(nameof(request));
            }

            await this.RequestSemaphore.WaitAsync().ConfigureAwait(false);

            var bucket = request.RateLimitBucket;
            var now    = DateTimeOffset.UtcNow;

            if (bucket.Remaining <= 0 && bucket.Maximum > 0 && now < bucket.Reset)
            {
                request.Discord.DebugLogger.LogMessage(LogLevel.Warning, "REST", $"Pre-emptive ratelimit triggered, waiting until {bucket.Reset.ToString("yyyy-MM-dd HH:mm:ss zzz")}", DateTime.Now);
                _ = Task.Delay(bucket.Reset - now).ContinueWith(t => this.ExecuteRequestAsync(request));
                this.RequestSemaphore.Release();
                return;
            }

            var req      = this.BuildRequest(request);
            var response = new RestResponse();

            try
            {
                var res = await HttpClient.SendAsync(req, HttpCompletionOption.ResponseContentRead).ConfigureAwait(false);

                var bts = await res.Content.ReadAsByteArrayAsync().ConfigureAwait(false);

                var txt = UTF8.GetString(bts, 0, bts.Length);

                response.Headers      = res.Headers.ToDictionary(xh => xh.Key, xh => string.Join("\n", xh.Value));
                response.Response     = txt;
                response.ResponseCode = (int)res.StatusCode;
            }
            catch (HttpRequestException httpex)
            {
                request.Discord.DebugLogger.LogMessage(LogLevel.Error, "REST", $"Request to {request.Url} triggered an HttpException: {httpex.Message}", DateTime.Now);
                request.SetFaulted(httpex);
                this.RequestSemaphore.Release();
                return;
            }

            this.UpdateBucket(request, response);

            Exception ex = null;

            switch (response.ResponseCode)
            {
            case 400:
            case 405:
                ex = new BadRequestException(request, response);
                break;

            case 401:
            case 403:
                ex = new UnauthorizedException(request, response);
                break;

            case 404:
                ex = new NotFoundException(request, response);
                break;

            case 429:
                ex = new RateLimitException(request, response);

                // check the limit info, if more than one minute, fault, otherwise requeue
                this.Handle429(response, out var wait, out var global);
                if (wait != null)
                {
                    wait = wait.ContinueWith(t => this.ExecuteRequestAsync(request));
                    if (global)
                    {
                        request.Discord.DebugLogger.LogMessage(LogLevel.Error, "REST", "Global ratelimit hit, cooling down", DateTime.Now);
                        await wait.ConfigureAwait(false);
                    }
                    else
                    {
                        request.Discord.DebugLogger.LogMessage(LogLevel.Error, "REST", $"Ratelimit hit, requeueing request to {request.Url}", DateTime.Now);
                    }

                    this.RequestSemaphore.Release();
                    return;
                }
                break;
            }

            this.RequestSemaphore.Release();

            if (ex != null)
            {
                request.SetFaulted(ex);
            }
            else
            {
                request.SetCompleted(response);
            }
        }