Exemplo n.º 1
0
    private async Task <Dictionary <string, double> > GetUserCountByProjectIdsAsync(ICollection <Stack> stacks, AppFilter sf, DateTime utcStart, DateTime utcEnd)
    {
        var scopedCacheClient = new ScopedCacheClient(_cache, $"Project:user-count:{utcStart.Floor(TimeSpan.FromMinutes(15)).Ticks}-{utcEnd.Floor(TimeSpan.FromMinutes(15)).Ticks}");
        var projectIds        = stacks.Select(s => s.ProjectId).Distinct().ToList();
        var cachedTotals      = await scopedCacheClient.GetAllAsync <double>(projectIds);

        var totals = cachedTotals.Where(kvp => kvp.Value.HasValue).ToDictionary(kvp => kvp.Key, kvp => kvp.Value.Value);

        if (totals.Count == projectIds.Count)
        {
            return(totals);
        }

        var systemFilter = new RepositoryQuery <PersistentEvent>().AppFilter(sf).DateRange(utcStart, utcEnd, (PersistentEvent e) => e.Date).Index(utcStart, utcEnd);
        var projects     = cachedTotals.Where(kvp => !kvp.Value.HasValue).Select(kvp => new Project {
            Id = kvp.Key, OrganizationId = stacks.FirstOrDefault(s => s.ProjectId == kvp.Key)?.OrganizationId
        }).ToList();
        var countResult = await _eventRepository.CountAsync(q => q.SystemFilter(systemFilter).FilterExpression(projects.BuildFilter()).AggregationsExpression("terms:(project_id cardinality:user)"));

        // Cache all projects that have more than 10 users for 5 minutes.
        var projectTerms = countResult.Aggregations.Terms <string>("terms_project_id").Buckets;
        var aggregations = projectTerms.ToDictionary(t => t.Key, t => t.Aggregations.Cardinality("cardinality_user").Value.GetValueOrDefault());
        await scopedCacheClient.SetAllAsync(aggregations.Where(t => t.Value >= 10).ToDictionary(k => k.Key, v => v.Value), TimeSpan.FromMinutes(5));

        totals.AddRange(aggregations);

        return(totals);
    }
Exemplo n.º 2
0
        private async Task <Dictionary <string, double> > GetUserCountByProjectIdsAsync(ICollection <Stack> stacks, string systemFilter, DateTime utcStart, DateTime utcEnd)
        {
            var scopedCacheClient = new ScopedCacheClient(_cacheClient, $"project:user-count:{utcStart.Floor(TimeSpan.FromMinutes(15)).Ticks}-{utcEnd.Floor(TimeSpan.FromMinutes(15)).Ticks}");
            var projectIds        = stacks.Select(s => s.ProjectId).Distinct().ToList();
            var cachedTotals      = await scopedCacheClient.GetAllAsync <double>(projectIds);

            var totals = cachedTotals.Where(kvp => kvp.Value.HasValue).ToDictionary(kvp => kvp.Key, kvp => kvp.Value.Value);

            if (totals.Count == projectIds.Count)
            {
                return(totals);
            }

            var projects = cachedTotals.Where(kvp => !kvp.Value.HasValue).Select(kvp => new Project {
                Id = kvp.Key, OrganizationId = stacks.FirstOrDefault(s => s.ProjectId == kvp.Key)?.OrganizationId
            }).ToList();
            var projectTerms = await _eventStats.GetNumbersTermsStatsAsync("project_id", _distinctUsersFields, utcStart, utcEnd, systemFilter, projects.BuildRetentionFilter());

            // Cache all projects that have more than 10 users for 5 minutes.
            await scopedCacheClient.SetAllAsync(projectTerms.Terms.Where(t => t.Numbers[0] >= 10).ToDictionary(t => t.Term, t => t.Numbers[0]), TimeSpan.FromMinutes(5));

            totals.AddRange(projectTerms.Terms.ToDictionary(kvp => kvp.Term, kvp => kvp.Numbers[0]));

            return(totals);
        }
Exemplo n.º 3
0
        public virtual async Task CanUseScopedCaches()
        {
            var cache = GetCacheClient();

            if (cache == null)
            {
                return;
            }

            using (cache) {
                await cache.RemoveAllAsync();

                var scopedCache1       = new ScopedCacheClient(cache, "scoped1");
                var nestedScopedCache1 = new ScopedCacheClient(scopedCache1, "nested");
                var scopedCache2       = new ScopedCacheClient(cache, "scoped2");

                await cache.SetAsync("test", 1);

                await scopedCache1.SetAsync("test", 2);

                await nestedScopedCache1.SetAsync("test", 3);

                Assert.Equal(1, (await cache.GetAsync <int>("test")).Value);
                Assert.Equal(2, (await scopedCache1.GetAsync <int>("test")).Value);
                Assert.Equal(3, (await nestedScopedCache1.GetAsync <int>("test")).Value);

                Assert.Equal(3, (await scopedCache1.GetAsync <int>("nested:test")).Value);
                Assert.Equal(3, (await cache.GetAsync <int>("scoped1:nested:test")).Value);

                // ensure GetAllAsync returns unscoped keys
                Assert.Equal("test", (await scopedCache1.GetAllAsync <int>("test")).Keys.FirstOrDefault());
                Assert.Equal("test", (await nestedScopedCache1.GetAllAsync <int>("test")).Keys.FirstOrDefault());

                await scopedCache2.SetAsync("test", 1);

                var result = await scopedCache1.RemoveByPrefixAsync(String.Empty);

                Assert.Equal(2, result);

                // delete without any matching keys
                result = await scopedCache1.RemoveByPrefixAsync(String.Empty);

                Assert.Equal(0, result);

                Assert.False((await scopedCache1.GetAsync <int>("test")).HasValue);
                Assert.False((await nestedScopedCache1.GetAsync <int>("test")).HasValue);
                Assert.Equal(1, (await cache.GetAsync <int>("test")).Value);
                Assert.Equal(1, (await scopedCache2.GetAsync <int>("test")).Value);

                await scopedCache2.RemoveAllAsync();

                Assert.False((await scopedCache1.GetAsync <int>("test")).HasValue);
                Assert.False((await nestedScopedCache1.GetAsync <int>("test")).HasValue);
                Assert.False((await scopedCache2.GetAsync <int>("test")).HasValue);
                Assert.Equal(1, (await cache.GetAsync <int>("test")).Value);
            }
        }
 protected AbstractMigrateableService(
     TInfo info,
     ICacheClient cacheClient,
     IMessageBusFactory messageBusFactory
     ) : base(info, cacheClient, messageBusFactory)
 {
     AccountStateCache   = new ScopedCacheClient(cacheClient, Scopes.MigrationAccountCache);
     CharacterStateCache = new ScopedCacheClient(cacheClient, Scopes.MigrationCharacterCache);
     MigrationStateCache = new ScopedCacheClient(cacheClient, Scopes.MigrationCache);
 }
Exemplo n.º 5
0
        public static IServiceCollection AddCacheClient(this IServiceCollection services)
        {
            services.AddSingleton(provider =>
            {
                var options = provider.GetRequiredService <IOptions <CacheOptions> >().Value;
                var type    = options.Type.ToLower();

                ICacheClient client;

                if (type == "auto")
                {
                    var redisOptions = provider.GetRequiredService <IOptions <RedisOptions> >().Value;
                    type             = !string.IsNullOrEmpty(redisOptions.ConnectionString) ? "redis" : "memory";
                }

                switch (type)
                {
                case "inmemory":
                case "memory":
                    client = new InMemoryCacheClient(new InMemoryCacheClientOptions
                    {
                        LoggerFactory = provider.GetRequiredService <ILoggerFactory>(),
                        Serializer    = provider.GetRequiredService <ISerializer>()
                    });
                    break;

                case "redis":
                    client = new RedisCacheClient(new RedisCacheClientOptions
                    {
                        ConnectionMultiplexer = provider.GetRequiredService <IConnectionMultiplexer>(),
                        LoggerFactory         = provider.GetRequiredService <ILoggerFactory>(),
                        Serializer            = provider.GetRequiredService <ISerializer>()
                    });
                    break;

                default:
                    throw new NotSupportedException($"The cache type {options.Type} is not supported.");
                }

                if (!string.IsNullOrEmpty(options.Prefix))
                {
                    client = new ScopedCacheClient(client, options.Prefix);
                }

                return(client);
            });

            return(services);
        }
Exemplo n.º 6
0
        public AbstractHostedService(
            IApplicationLifetime appLifetime,
            ICacheClient cache,
            IMessageBus messageBus,
            IOptions <TInfo> info,
            IDataContextFactory dataContextFactory
            )
        {
            _appLifetime = appLifetime;
            _cache       = cache;
            _messageBus  = messageBus;
            _peers       = new ConcurrentDictionary <string, PeerServiceInfo>();

            Info = info.Value;
            DataContextFactory = dataContextFactory;

            MigrationCache     = new ScopedCacheClient(_cache, MigrationCacheScope);
            AccountStatusCache = new ScopedCacheClient(_cache, AccountStatusCacheScope);
        }
        public async Task <HealthCheckResult> CheckCacheAsync()
        {
            var sw = Stopwatch.StartNew();

            try {
                var cache      = new ScopedCacheClient(_configuration.Cache, "health");
                var cacheValue = await cache.GetAsync <string>("__PING__").AnyContext();

                if (cacheValue.HasValue)
                {
                    return(HealthCheckResult.NotHealthy("Cache Not Working"));
                }
            } catch (Exception ex) {
                return(HealthCheckResult.NotHealthy("Cache Not Working: " + ex.Message));
            } finally {
                sw.Stop();
                _logger.LogTrace("Checking cache took {Duration:g}", sw.Elapsed);
            }

            return(HealthCheckResult.Healthy);
        }
        public async Task <HealthCheckResult> CheckHealthAsync(HealthCheckContext context, CancellationToken cancellationToken = default)
        {
            var sw = Stopwatch.StartNew();

            try {
                var cache      = new ScopedCacheClient(_cache, "health");
                var cacheValue = await cache.GetAsync <string>("__PING__").AnyContext();

                if (cacheValue.HasValue)
                {
                    return(HealthCheckResult.Unhealthy("Cache Not Working"));
                }
            } catch (Exception ex) {
                return(HealthCheckResult.Unhealthy("Cache Not Working.", ex));
            } finally {
                sw.Stop();
                _logger.LogTrace("Checking cache took {Duration:g}", sw.Elapsed);
            }

            return(HealthCheckResult.Healthy());
        }
Exemplo n.º 9
0
 public NetInfoJob(IQueue <NetInfoWorkItem> queue, ILoggerFactory loggerFactory, INetInfoService netInfoService,
                   IHybridCacheClient cacheClient) : base(queue, loggerFactory)
 {
     _netInfoService = netInfoService;
     _statsCache     = new ScopedHybridCacheClient(cacheClient, CacheScopes.Stats);
 }
        private async Task<ReindexResult> InternalReindexAsync(ReindexWorkItem workItem, Func<int, string, Task> progressCallbackAsync, int startProgress = 0, int endProgress = 100, DateTime? startTime = null) {
            const string scroll = "5m";
            string timestampField = workItem.TimestampField ?? "_timestamp";
            var scopedCacheClient = new ScopedCacheClient(_cache, workItem.GetHashCode().ToString());

            var settingsResponse = await _client.GetIndexSettingsAsync(s => s.Index(workItem.OldIndex)).AnyContext();
            if (!settingsResponse.IsValid)
                throw new ApplicationException("Unable to retrieve index settings.");

            int scrollSize = 500 / settingsResponse.IndexSettings.NumberOfShards ?? 50;

            var scanResults = await _client.SearchAsync<object>(s => s
                .Index(workItem.OldIndex)
                .AllTypes()
                .Query(q => q.Filtered(f => {
                    if (startTime.HasValue)
                        f.Filter(f1 => f1.Range(r => r.OnField(timestampField).Greater(startTime.Value)));
                }))
                .Fields("_source", "_parent")
                .Size(scrollSize)
                .SearchType(SearchType.Scan)
                .Scroll(scroll)).AnyContext();

            _logger.Info(scanResults.GetRequest());

            if (!scanResults.IsValid || scanResults.ScrollId == null) {
                _logger.Error().Exception(scanResults.ConnectionStatus.OriginalException).Message("Invalid search result: message={0}", scanResults.GetErrorMessage()).Write();
                return new ReindexResult();
            }

            var results = await _client.ScrollAsync<JObject>("5m", scanResults.ScrollId).AnyContext();
            if (!results.IsValid) {
                await scopedCacheClient.RemoveAsync("id").AnyContext();
                return await InternalReindexAsync(workItem, progressCallbackAsync, startProgress, endProgress, startTime).AnyContext();
            }

            double completed = 0;
            long totalHits = results.Total;
            while (results.Hits.Any()) {
                ISearchResponse<JObject> results1 = results;

                IBulkResponse bulkResponse = null;
                try {
                    bulkResponse = await Run.WithRetriesAsync(() => _client.BulkAsync(b => {
                        foreach (var h in results1.Hits)
                            ConfigureIndexItem(b, h, workItem.NewIndex);

                        return b;
                    }), logger: _logger).AnyContext();
                } catch (Exception ex) {
                    _logger.Error(ex, $"Error trying to do bulk index: {ex.Message}");
                }

                if (bulkResponse == null || !bulkResponse.IsValid || bulkResponse.ItemsWithErrors.Any()) {
                    string message = $"Reindex bulk error: old={workItem.OldIndex} new={workItem.NewIndex} completed={completed} message={bulkResponse.GetErrorMessage()}";
                    _logger.Warn(bulkResponse.ConnectionStatus.OriginalException, message);
                    // try each doc individually so we can see which doc is breaking us

                    var hitsToRetry = bulkResponse.ItemsWithErrors.Select(i => results.Hits.First(hit => hit.Id == i.Id));
                    foreach (var itemWithError in hitsToRetry) {
                        var response = await _client.IndexAsync(itemWithError.Source, d => ConfigureItem(d, itemWithError, workItem.NewIndex)).AnyContext();
                        if (response.IsValid)
                            continue;

                        message = $"Reindex error: old={workItem.OldIndex} new={workItem.NewIndex} id={itemWithError.Id} completed={completed} message={response.GetErrorMessage()}";
                        _logger.Error().Exception(response.ConnectionStatus.OriginalException).Message(message);

                        var errorDoc = new JObject(new {
                            itemWithError.Type,
                            Content = itemWithError.Source.ToString(Formatting.Indented)
                        });

                        // put the document into an error index
                        response = await _client.IndexAsync(errorDoc, d => d.Index(workItem.NewIndex + "-error").Id(itemWithError.Id)).AnyContext();
                        if (response.IsValid)
                            continue;

                        message = $"Reindex error: old={workItem.OldIndex} new={workItem.NewIndex} id={itemWithError.Id} completed={completed} message={response.GetErrorMessage()}";
                        _logger.Error().Exception(response.ConnectionStatus.OriginalException).Message(message);
                        throw new ReindexException(response.ConnectionStatus, message);
                    }
                }

                completed += results.Hits.Count();
                await progressCallbackAsync(CalculateProgress(totalHits, (long)completed, startProgress, endProgress), $"Total: {totalHits} Completed: {completed}").AnyContext();
                results = await _client.ScrollAsync<JObject>("5m", results.ScrollId).AnyContext();
                await scopedCacheClient.AddAsync("id", results.ScrollId, TimeSpan.FromHours(1)).AnyContext();
            }

            await scopedCacheClient.RemoveAllAsync(new[] { "id" }).AnyContext();
            return new ReindexResult { Total = totalHits, Completed = (long)completed };
        }
Exemplo n.º 11
0
        public virtual void CanUseScopedCaches()
        {
            var cache = GetCacheClient();
            if (cache == null)
                return;

            using (cache) {
                var scopedCache1 = new ScopedCacheClient(cache, "scoped1");
                var nestedScopedCache1 = new ScopedCacheClient(scopedCache1, "nested");
                var scopedCache2 = new ScopedCacheClient(cache, "scoped2");

                cache.Set("test", 1);
                scopedCache1.Set("test", 2);
                nestedScopedCache1.Set("test", 3);

                Assert.Equal(1, cache.Get<int>("test"));
                Assert.Equal(2, scopedCache1.Get<int>("test"));
                Assert.Equal(3, nestedScopedCache1.Get<int>("test"));

                Assert.Equal(3, scopedCache1.Get<int>("nested:test"));
                Assert.Equal(3, cache.Get<int>("scoped1:nested:test"));

                scopedCache2.Set("test", 1);

                scopedCache1.FlushAll();
                Assert.Null(scopedCache1.Get<int?>("test"));
                Assert.Null(nestedScopedCache1.Get<int?>("test"));
                Assert.Equal(1, cache.Get<int>("test"));
                Assert.Equal(1, scopedCache2.Get<int>("test"));

                scopedCache2.FlushAll();
                Assert.Null(scopedCache1.Get<int?>("test"));
                Assert.Null(nestedScopedCache1.Get<int?>("test"));
                Assert.Null(scopedCache2.Get<int?>("test"));
                Assert.Equal(1, cache.Get<int>("test"));
            }
        }
 protected void DisableCache()
 {
     IsCacheEnabled     = false;
     _scopedCacheClient = new ScopedCacheClient(new NullCacheClient(), EntityTypeName);
 }
 private void SetCache(ICacheClient cache)
 {
     IsCacheEnabled     = cache != null;
     _scopedCacheClient = new ScopedCacheClient(cache ?? new NullCacheClient(), EntityTypeName);
 }
Exemplo n.º 14
0
        private async Task <ReindexResult> InternalReindexAsync(ReindexWorkItem workItem, Func <int, string, Task> progressCallbackAsync, int startProgress = 0, int endProgress = 100, DateTime?startTime = null)
        {
            const string scroll            = "5m";
            bool         errorIndexCreated = false;
            string       timestampField    = workItem.TimestampField ?? "_timestamp";
            var          scopedCacheClient = new ScopedCacheClient(_cache, workItem.GetHashCode().ToString());

            var settingsResponse = await _client.GetIndexSettingsAsync(s => s.Index(workItem.OldIndex)).AnyContext();

            if (!settingsResponse.IsValid)
            {
                throw new ApplicationException("Unable to retrieve index settings.");
            }

            int scrollSize = 500 / settingsResponse.IndexSettings.NumberOfShards ?? 50;

            var scanResults = await _client.SearchAsync <object>(s => s
                                                                 .Index(workItem.OldIndex)
                                                                 .AllTypes()
                                                                 .Query(q => q.Filtered(f => {
                if (startTime.HasValue)
                {
                    f.Filter(f1 => f1.Range(r => r.OnField(timestampField).Greater(startTime.Value)));
                }
            }))
                                                                 .Fields("_source", "_parent")
                                                                 .Size(scrollSize)
                                                                 .SearchType(SearchType.Scan)
                                                                 .Scroll(scroll)).AnyContext();

            _logger.Info(scanResults.GetRequest());

            if (!scanResults.IsValid || scanResults.ScrollId == null)
            {
                _logger.Error().Exception(scanResults.ConnectionStatus.OriginalException).Message("Invalid search result: message={0}", scanResults.GetErrorMessage()).Write();
                return(new ReindexResult());
            }

            var results = await _client.ScrollAsync <JObject>("5m", scanResults.ScrollId).AnyContext();

            if (!results.IsValid)
            {
                await scopedCacheClient.RemoveAsync("id").AnyContext();

                return(await InternalReindexAsync(workItem, progressCallbackAsync, startProgress, endProgress, startTime).AnyContext());
            }

            double completed = 0;
            long   totalHits = results.Total;

            while (results.Hits.Any())
            {
                ISearchResponse <JObject> results1 = results;

                IBulkResponse bulkResponse = null;
                try {
                    bulkResponse = await Run.WithRetriesAsync(() => _client.BulkAsync(b => {
                        foreach (var h in results1.Hits)
                        {
                            ConfigureIndexItem(b, h, workItem.NewIndex);
                        }

                        return(b);
                    }), logger : _logger, maxAttempts : 2).AnyContext();
                } catch (Exception ex) {
                    _logger.Error(ex, $"Error trying to do bulk index: {ex.Message}");
                }

                if (bulkResponse == null || !bulkResponse.IsValid || bulkResponse.ItemsWithErrors.Any())
                {
                    string message;
                    if (bulkResponse != null)
                    {
                        message = $"Reindex bulk error: old={workItem.OldIndex} new={workItem.NewIndex} completed={completed} message={bulkResponse?.GetErrorMessage()}";
                        _logger.Warn(bulkResponse.ConnectionStatus.OriginalException, message);
                    }

                    // try each doc individually so we can see which doc is breaking us
                    var hitsToRetry = bulkResponse?.ItemsWithErrors.Select(i => results1.Hits.First(hit => hit.Id == i.Id)) ?? results1.Hits;
                    foreach (var itemToRetry in hitsToRetry)
                    {
                        IIndexResponse response;
                        try {
                            response = await _client.IndexAsync(itemToRetry.Source, d => ConfigureItem(d, itemToRetry, workItem.NewIndex)).AnyContext();

                            if (response.IsValid)
                            {
                                continue;
                            }

                            message = $"Reindex error: old={workItem.OldIndex} new={workItem.NewIndex} id={itemToRetry.Id} completed={completed} message={response.GetErrorMessage()}";
                            _logger.Error().Exception(response.ConnectionStatus.OriginalException).Message(message);

                            var errorDoc = new JObject {
                                ["Type"]    = itemToRetry.Type,
                                ["Content"] = itemToRetry.Source.ToString(Formatting.Indented)
                            };

                            var errorIndex = workItem.NewIndex + "-error";
                            if (!errorIndexCreated && !(await _client.IndexExistsAsync(errorIndex).AnyContext()).Exists)
                            {
                                await _client.CreateIndexAsync(errorIndex).AnyContext();

                                errorIndexCreated = true;
                            }

                            // put the document into an error index
                            response = await _client.IndexAsync(errorDoc, d => d.Index(errorIndex).Id(itemToRetry.Id)).AnyContext();

                            if (response.IsValid)
                            {
                                continue;
                            }
                        } catch {
                            throw;
                        }

                        message = $"Reindex error: old={workItem.OldIndex} new={workItem.NewIndex} id={itemToRetry.Id} completed={completed} message={response.GetErrorMessage()}";
                        _logger.Error().Exception(response.ConnectionStatus.OriginalException).Message(message);
                        throw new ReindexException(response.ConnectionStatus, message);
                    }
                }

                completed += results.Hits.Count();
                await progressCallbackAsync(CalculateProgress(totalHits, (long)completed, startProgress, endProgress), $"Total: {totalHits} Completed: {completed}").AnyContext();

                results = await _client.ScrollAsync <JObject>("5m", results.ScrollId).AnyContext();

                await scopedCacheClient.AddAsync("id", results.ScrollId, TimeSpan.FromHours(1)).AnyContext();
            }

            await scopedCacheClient.RemoveAllAsync(new[] { "id" }).AnyContext();

            return(new ReindexResult {
                Total = totalHits, Completed = (long)completed
            });
        }
        protected override async Task <JobResult> ProcessQueueEntryAsync(QueueEntryContext <WebHookNotification> context)
        {
            var  body      = context.QueueEntry.Value;
            bool shouldLog = body.ProjectId != _appOptions.Value.InternalProjectId;

            using (_logger.BeginScope(new ExceptionlessState().Organization(body.OrganizationId).Project(body.ProjectId))) {
                if (shouldLog)
                {
                    _logger.LogTrace("Process web hook call: id={Id} project={1} url={Url}", context.QueueEntry.Id, body.ProjectId, body.Url);
                }

                if (!await IsEnabledAsync(body).AnyContext())
                {
                    _logger.LogInformation("Web hook cancelled: Web hook is disabled");
                    return(JobResult.Cancelled);
                }

                var  cache             = new ScopedCacheClient(_cacheClient, GetCacheKeyScope(body));
                long consecutiveErrors = await cache.GetAsync <long>(ConsecutiveErrorsCacheKey, 0).AnyContext();

                if (consecutiveErrors > 10)
                {
                    var lastAttempt = await cache.GetAsync(LastAttemptCacheKey, SystemClock.UtcNow).AnyContext();

                    var nextAttemptAllowedAt = lastAttempt.AddMinutes(15);
                    if (nextAttemptAllowedAt >= SystemClock.UtcNow)
                    {
                        _logger.LogInformation("Web hook cancelled due to {FailureCount} consecutive failed attempts. Will be allowed to try again at {NextAttempt}.", consecutiveErrors, nextAttemptAllowedAt);
                        return(JobResult.Cancelled);
                    }
                }

                bool successful = true;
                HttpResponseMessage response = null;
                try {
                    using (var timeoutCancellationTokenSource = new CancellationTokenSource(TimeSpan.FromSeconds(5))) {
                        using (var postCancellationTokenSource = CancellationTokenSource.CreateLinkedTokenSource(context.CancellationToken, timeoutCancellationTokenSource.Token)) {
                            response = await _client.PostAsJsonAsync(body.Url, body.Data.ToJson(Formatting.Indented, _jsonSerializerSettings), postCancellationTokenSource.Token).AnyContext();

                            if (!response.IsSuccessStatusCode)
                            {
                                successful = false;
                            }
                            else if (consecutiveErrors > 0)
                            {
                                await cache.RemoveAllAsync(_cacheKeys).AnyContext();
                            }
                        }
                    }
                } catch (OperationCanceledException ex) {
                    successful = false;
                    if (shouldLog)
                    {
                        _logger.LogError(ex, "Timeout calling web hook: status={Status} org={organization} project={project} url={Url}", response?.StatusCode, body.OrganizationId, body.ProjectId, body.Url);
                    }
                    return(JobResult.Cancelled);
                } catch (Exception ex) {
                    successful = false;
                    if (shouldLog)
                    {
                        _logger.LogError(ex, "Error calling web hook: status={Status} org={organization} project={project} url={Url}", response?.StatusCode, body.OrganizationId, body.ProjectId, body.Url);
                    }
                    return(JobResult.FromException(ex));
                } finally {
                    if (successful)
                    {
                        _logger.LogInformation("Web hook POST complete: status={Status} org={organization} project={project} url={Url}", response?.StatusCode, body.OrganizationId, body.ProjectId, body.Url);
                    }
                    else if (response != null && (response.StatusCode == HttpStatusCode.Unauthorized || response.StatusCode == HttpStatusCode.Forbidden || response.StatusCode == HttpStatusCode.Gone))
                    {
                        _logger.LogWarning("Disabling Web hook instance {WebHookId} due to status code: status={Status} org={organization} project={project} url={Url}", body.Type == WebHookType.Slack ? "Slack" : body.WebHookId, response.StatusCode, body.OrganizationId, body.ProjectId, body.Url);
                        await DisableIntegrationAsync(body).AnyContext();

                        await cache.RemoveAllAsync(_cacheKeys).AnyContext();
                    }
                    else
                    {
                        var now = SystemClock.UtcNow;
                        await cache.SetAsync(LastAttemptCacheKey, now, TimeSpan.FromDays(3)).AnyContext();

                        consecutiveErrors = await cache.IncrementAsync(ConsecutiveErrorsCacheKey, TimeSpan.FromDays(3)).AnyContext();

                        DateTime firstAttempt;
                        if (consecutiveErrors == 1)
                        {
                            await cache.SetAsync(FirstAttemptCacheKey, now, TimeSpan.FromDays(3)).AnyContext();

                            firstAttempt = now;
                        }
                        else
                        {
                            firstAttempt = await cache.GetAsync(FirstAttemptCacheKey, now).AnyContext();
                        }

                        if (consecutiveErrors >= 10)
                        {
                            // don't retry any more
                            context.QueueEntry.MarkCompleted();

                            // disable if more than 10 consecutive errors over the course of multiple days
                            if (firstAttempt.IsBefore(now.SubtractDays(2)))
                            {
                                _logger.LogWarning("Disabling Web hook instance {WebHookId} due to too many consecutive failures.", body.Type == WebHookType.Slack ? "Slack" : body.WebHookId);
                                await DisableIntegrationAsync(body).AnyContext();

                                await cache.RemoveAllAsync(_cacheKeys).AnyContext();
                            }
                        }
                    }
                }
            }

            return(JobResult.Success);
        }
Exemplo n.º 16
0
        protected override async Task <JobResult> RunInternalAsync(JobContext context)
        {
            var elasticOptions = _configuration.Options;

            if (elasticOptions.ElasticsearchToMigrate == null)
            {
                return(JobResult.CancelledWithMessage($"Please configure the connection string EX_{nameof(elasticOptions.ElasticsearchToMigrate)}."));
            }

            var    retentionPeriod = _configuration.Events.MaxIndexAge.GetValueOrDefault(TimeSpan.FromDays(180));
            string sourceScope     = elasticOptions.ElasticsearchToMigrate.Scope;
            string scope           = elasticOptions.ScopePrefix;
            var    cutOffDate      = elasticOptions.ReindexCutOffDate;

            var client = _configuration.Client;
            await _configuration.ConfigureIndexesAsync().AnyContext();

            var workItemQueue = new Queue <ReindexWorkItem>();

            workItemQueue.Enqueue(new ReindexWorkItem($"{sourceScope}organizations-v1", "organization", $"{scope}organizations-v1", "updated_utc"));
            workItemQueue.Enqueue(new ReindexWorkItem($"{sourceScope}organizations-v1", "project", $"{scope}projects-v1", "updated_utc"));
            workItemQueue.Enqueue(new ReindexWorkItem($"{sourceScope}organizations-v1", "token", $"{scope}tokens-v1", "updated_utc"));
            workItemQueue.Enqueue(new ReindexWorkItem($"{sourceScope}organizations-v1", "user", $"{scope}users-v1", "updated_utc"));
            workItemQueue.Enqueue(new ReindexWorkItem($"{sourceScope}organizations-v1", "webhook", $"{scope}webhooks-v1", "created_utc", script: MIGRATE_VERSION_SCRIPT));
            workItemQueue.Enqueue(new ReindexWorkItem($"{sourceScope}stacks-v1", "stacks", $"{scope}stacks-v1", "last_occurrence"));

            // create the new indexes, don't migrate yet
            foreach (var index in _configuration.Indexes.OfType <DailyIndex>())
            {
                for (int day = 0; day <= retentionPeriod.Days; day++)
                {
                    var    date          = day == 0 ? SystemClock.UtcNow : SystemClock.UtcNow.SubtractDays(day);
                    string indexToCreate = $"{scope}events-v1-{date:yyyy.MM.dd}";
                    workItemQueue.Enqueue(new ReindexWorkItem($"{sourceScope}events-v1-{date:yyyy.MM.dd}", "events", indexToCreate, "updated_utc", () => index.EnsureIndexAsync(date)));
                }
            }

            // Reset the alias cache
            var aliasCache = new ScopedCacheClient(_configuration.Cache, "alias");
            await aliasCache.RemoveAllAsync().AnyContext();

            var started        = SystemClock.UtcNow;
            var lastProgress   = SystemClock.UtcNow;
            int retriesCount   = 0;
            int totalTasks     = workItemQueue.Count;
            var workingTasks   = new List <ReindexWorkItem>();
            var completedTasks = new List <ReindexWorkItem>();
            var failedTasks    = new List <ReindexWorkItem>();

            while (true)
            {
                if (workingTasks.Count == 0 && workItemQueue.Count == 0)
                {
                    break;
                }

                if (workingTasks.Count < 10 && workItemQueue.TryDequeue(out var dequeuedWorkItem))
                {
                    if (dequeuedWorkItem.CreateIndex != null)
                    {
                        try {
                            await dequeuedWorkItem.CreateIndex().AnyContext();
                        } catch (Exception ex) {
                            _logger.LogError(ex, "Failed to create index for {TargetIndex}", dequeuedWorkItem.TargetIndex);
                            continue;
                        }
                    }

                    int batchSize = 1000;
                    if (dequeuedWorkItem.Attempts == 1)
                    {
                        batchSize = 500;
                    }
                    else if (dequeuedWorkItem.Attempts >= 2)
                    {
                        batchSize = 250;
                    }

                    var response = await client.ReindexOnServerAsync(r => r
                                                                     .Source(s => s
                                                                             .Remote(ConfigureRemoteElasticSource)
                                                                             .Index(dequeuedWorkItem.SourceIndex)
                                                                             .Size(batchSize)
                                                                             .Query <object>(q => {
                        var container = q.Term("_type", dequeuedWorkItem.SourceIndexType);
                        if (!String.IsNullOrEmpty(dequeuedWorkItem.DateField))
                        {
                            container &= q.DateRange(d => d.Field(dequeuedWorkItem.DateField).GreaterThanOrEquals(cutOffDate));
                        }

                        return(container);
                    }))
                                                                     .Destination(d => d
                                                                                  .Index(dequeuedWorkItem.TargetIndex))
                                                                     .Conflicts(Conflicts.Proceed)
                                                                     .WaitForCompletion(false)
                                                                     .Script(s => {
                        if (!String.IsNullOrEmpty(dequeuedWorkItem.Script))
                        {
                            return(s.Source(dequeuedWorkItem.Script));
                        }

                        return(null);
                    })).AnyContext();

                    dequeuedWorkItem.Attempts += 1;
                    dequeuedWorkItem.TaskId    = response.Task;
                    workingTasks.Add(dequeuedWorkItem);

                    _logger.LogInformation("STARTED - {TargetIndex} A:{Attempts} ({TaskId})...", dequeuedWorkItem.TargetIndex, dequeuedWorkItem.Attempts, dequeuedWorkItem.TaskId);

                    continue;
                }

                double highestProgress = 0;
                foreach (var workItem in workingTasks.ToArray())
                {
                    var taskStatus = await client.Tasks.GetTaskAsync(workItem.TaskId, t => t.WaitForCompletion(false)).AnyContext();

                    _logger.LogRequest(taskStatus);

                    var status = taskStatus?.Task?.Status;
                    if (status == null)
                    {
                        _logger.LogWarning(taskStatus?.OriginalException, "Error getting task status for {TargetIndex} {TaskId}: {Message}", workItem.TargetIndex, workItem.TaskId, taskStatus.GetErrorMessage());
                        if (taskStatus?.ServerError?.Status == 429)
                        {
                            await Task.Delay(TimeSpan.FromSeconds(1));
                        }

                        continue;
                    }

                    var    duration = TimeSpan.FromMilliseconds(taskStatus.Task.RunningTimeInNanoseconds * 0.000001);
                    double progress = status.Total > 0 ? (status.Created + status.Updated + status.Deleted + status.VersionConflicts * 1.0) / status.Total : 0;
                    highestProgress = Math.Max(highestProgress, progress);

                    if (!taskStatus.IsValid)
                    {
                        _logger.LogWarning(taskStatus.OriginalException, "Error getting task status for {TargetIndex} ({TaskId}): {Message}", workItem.TargetIndex, workItem.TaskId, taskStatus.GetErrorMessage());
                        workItem.ConsecutiveStatusErrors++;
                        if (taskStatus.Completed || workItem.ConsecutiveStatusErrors > 5)
                        {
                            workingTasks.Remove(workItem);
                            workItem.LastTaskInfo = taskStatus.Task;

                            if (taskStatus.Completed && workItem.Attempts < 3)
                            {
                                _logger.LogWarning("FAILED RETRY - {TargetIndex} in {Duration:hh\\:mm} C:{Created} U:{Updated} D:{Deleted} X:{Conflicts} T:{Total} A:{Attempts} ID:{TaskId}", workItem.TargetIndex, duration, status.Created, status.Updated, status.Deleted, status.VersionConflicts, status.Total, workItem.Attempts, workItem.TaskId);
                                workItem.ConsecutiveStatusErrors = 0;
                                workItemQueue.Enqueue(workItem);
                                totalTasks++;
                                retriesCount++;
                                await Task.Delay(TimeSpan.FromSeconds(15)).AnyContext();
                            }
                            else
                            {
                                _logger.LogCritical("FAILED - {TargetIndex} in {Duration:hh\\:mm} C:{Created} U:{Updated} D:{Deleted} X:{Conflicts} T:{Total} A:{Attempts} ID:{TaskId}", workItem.TargetIndex, duration, status.Created, status.Updated, status.Deleted, status.VersionConflicts, status.Total, workItem.Attempts, workItem.TaskId);
                                failedTasks.Add(workItem);
                            }
                        }

                        continue;
                    }

                    if (!taskStatus.Completed)
                    {
                        continue;
                    }

                    workingTasks.Remove(workItem);
                    workItem.LastTaskInfo = taskStatus.Task;
                    completedTasks.Add(workItem);
                    var targetCount = await client.CountAsync <object>(d => d.Index(workItem.TargetIndex)).AnyContext();

                    _logger.LogInformation("COMPLETED - {TargetIndex} ({TargetCount}) in {Duration:hh\\:mm} C:{Created} U:{Updated} D:{Deleted} X:{Conflicts} T:{Total} A:{Attempts} ID:{TaskId}", workItem.TargetIndex, targetCount.Count, duration, status.Created, status.Updated, status.Deleted, status.VersionConflicts, status.Total, workItem.Attempts, workItem.TaskId);
                }
                if (SystemClock.UtcNow.Subtract(lastProgress) > TimeSpan.FromMinutes(5))
                {
                    _logger.LogInformation("STATUS - I:{Completed}/{Total} P:{Progress:F0}% T:{Duration:d\\.hh\\:mm} W:{Working} F:{Failed} R:{Retries}", completedTasks.Count, totalTasks, highestProgress * 100, SystemClock.UtcNow.Subtract(started), workingTasks.Count, failedTasks.Count, retriesCount);
                    lastProgress = SystemClock.UtcNow;
                }
                await Task.Delay(TimeSpan.FromSeconds(2));
            }

            _logger.LogInformation("----- REINDEX COMPLETE", completedTasks.Count, totalTasks, SystemClock.UtcNow.Subtract(started), failedTasks.Count, retriesCount);
            foreach (var task in completedTasks)
            {
                var    status   = task.LastTaskInfo.Status;
                var    duration = TimeSpan.FromMilliseconds(task.LastTaskInfo.RunningTimeInNanoseconds * 0.000001);
                double progress = status.Total > 0 ? (status.Created + status.Updated + status.Deleted + status.VersionConflicts * 1.0) / status.Total : 0;

                var targetCount = await client.CountAsync <object>(d => d.Index(task.TargetIndex)).AnyContext();

                _logger.LogInformation("SUCCESS - {TargetIndex} ({TargetCount}) in {Duration:hh\\:mm} C:{Created} U:{Updated} D:{Deleted} X:{Conflicts} T:{Total} A:{Attempts} ID:{TaskId}", task.TargetIndex, targetCount.Count, duration, status.Created, status.Updated, status.Deleted, status.VersionConflicts, status.Total, task.Attempts, task.TaskId);
            }

            foreach (var task in failedTasks)
            {
                var    status   = task.LastTaskInfo.Status;
                var    duration = TimeSpan.FromMilliseconds(task.LastTaskInfo.RunningTimeInNanoseconds * 0.000001);
                double progress = status.Total > 0 ? (status.Created + status.Updated + status.Deleted + status.VersionConflicts * 1.0) / status.Total : 0;

                var targetCount = await client.CountAsync <object>(d => d.Index(task.TargetIndex));

                _logger.LogCritical("FAILED - {TargetIndex} ({TargetCount}) in {Duration:hh\\:mm} C:{Created} U:{Updated} D:{Deleted} X:{Conflicts} T:{Total} A:{Attempts} ID:{TaskId}", task.TargetIndex, targetCount.Count, duration, status.Created, status.Updated, status.Deleted, status.VersionConflicts, status.Total, task.Attempts, task.TaskId);
            }
            _logger.LogInformation("----- SUMMARY - I:{Completed}/{Total} T:{Duration:d\\.hh\\:mm} F:{Failed} R:{Retries}", completedTasks.Count, totalTasks, SystemClock.UtcNow.Subtract(started), failedTasks.Count, retriesCount);

            _logger.LogInformation("Updating aliases");
            await _configuration.MaintainIndexesAsync();

            _logger.LogInformation("Updated aliases");
            return(JobResult.Success);
        }