public void StartSentryTransaction_BindsToScope() { // Arrange using var _ = SentrySdk.UseHub(new Hub( new SentryOptions { Dsn = "https://[email protected]:65535/2147483647" }, Substitute.For <ISentryClient>() )); var context = HttpContextBuilder.Build(); // Act var transaction = context.StartSentryTransaction(); var transactionFromScope = SentrySdk.GetSpan(); // Assert transactionFromScope.Should().BeSameAs(transaction); }
/// <summary> /// Discovers and downloads all of the catalog leafs after the current cursor value and before the maximum /// commit timestamp found in the settings. Each catalog leaf is passed to the catalog leaf processor in /// chronological order. After a commit is completed, its commit timestamp is written to the cursor, i.e. when /// transitioning from commit timestamp A to B, A is written to the cursor so that it never is processed again. /// </summary> public async Task ProcessAsync(CancellationToken token) { var catalogIndexSpan = SentrySdk.GetSpan()?.StartChild("catalog.index", "Retrieving catalog index"); var catalogIndexUrl = await GetCatalogIndexUrlAsync(token); catalogIndexSpan?.SetTag("catalogIndexUrl", catalogIndexUrl); catalogIndexSpan?.Finish(); var minCommitTimestamp = await GetMinCommitTimestamp(token); _logger.LogInformation( "Using time bounds {min:O} (exclusive) to {max:O} (inclusive).", minCommitTimestamp, _settings.MaxCommitTimestamp); var processIndexSpan = SentrySdk.GetSpan()?.StartChild("catalog.process", "Processing catalog"); processIndexSpan?.SetTag("minCommitTimestamp", minCommitTimestamp.ToString()); await ProcessIndexAsync(catalogIndexUrl, minCommitTimestamp, token); processIndexSpan?.Finish(); }
public void StartSentryTransaction_BindsToScope() { // Arrange using var _ = SentrySdk.UseHub(new Sentry.Internal.Hub( new SentryOptions { Dsn = "https://[email protected]:65535/2147483647" }, Substitute.For <ISentryClient>() )); var context = new HttpContext( new HttpRequest("foo", "https://localhost/person/13", "details=true") { RequestType = "GET" }, new HttpResponse(TextWriter.Null)); // Act var transaction = context.StartSentryTransaction(); var transactionFromScope = SentrySdk.GetSpan(); // Assert transactionFromScope.Should().BeSameAs(transaction); }
private async Task ProcessPageAsync(DateTimeOffset minCommitTimestamp, CatalogPageItem pageItem, CancellationToken token) { var page = await _client.GetPageAsync(pageItem.Url, token); var leafItems = page.GetLeavesInBounds( minCommitTimestamp, _settings.MaxCommitTimestamp, _settings.ExcludeRedundantLeaves); SentrySdk.GetSpan()?.SetTag("leafItemsCount", leafItems.Count.ToString()); _logger.LogInformation( "On page {page}, {leaves} out of {totalLeaves} were in the time bounds.", pageItem.Url, leafItems.Count, page.Items.Count); DateTimeOffset?newCursor = null; var tasks = new List <Task <CatalogLeaf> >(); foreach (var batch in leafItems .Select((v, i) => new { Index = i, Value = v }) .GroupBy(v => v.Index / 25) .Select(v => v.Select(p => p.Value))) { foreach (var leafItem in batch) { newCursor = leafItem.CommitTimestamp; tasks.Add(ProcessLeafAsync(leafItem, token)); } await Task.WhenAll(tasks); foreach (var task in tasks) { try { if (task.Result is PackageDeleteCatalogLeaf del) { await _leafProcessor.ProcessPackageDeleteAsync(del, token); } else if (task.Result is PackageDetailsCatalogLeaf detail) { await _leafProcessor.ProcessPackageDetailsAsync(detail, token); } else { // Lots of null leafs _logger.LogInformation("Unsupported leaf type: {type}.", task.Result?.GetType()); } } catch (Exception e) { _logger.LogError(e, "Failed to process {result}.", task.Result); } } tasks.Clear(); } if (newCursor.HasValue) { await _cursor.SetAsync(newCursor.Value, token); } }
public async Task UploadAllPathsAsync( string friendlyName, BatchType type, IEnumerable <string> topLevelPaths, CancellationToken cancellationToken) { var groupsSpan = SentrySdk.GetSpan()?.StartChild("group.get", "Get the group of directories to search in parallel"); var counter = 0; var groups = (from topPath in topLevelPaths from lookupDirectory in SafeGetDirectories(topPath) where _blockListedPaths?.Contains(lookupDirectory) != true let c = counter++ group lookupDirectory by c / ParallelTasks into grp select grp.ToList()).ToList(); groupsSpan?.Finish(); var startSpan = SentrySdk.GetSpan()?.StartChild("batch.start"); Guid batchId; try { batchId = await _symbolClient.Start(friendlyName, type, cancellationToken); startSpan?.Finish(SpanStatus.Ok); } catch (Exception e) { startSpan?.Finish(e); throw; } var uploadSpan = SentrySdk.GetSpan()?.StartChild("batch.upload"); uploadSpan?.SetTag("groups", groups.Count.ToString()); uploadSpan?.SetTag("total_items", counter.ToString()); try { foreach (var group in groups) { await UploadParallel(batchId, group, cancellationToken); } uploadSpan?.Finish(SpanStatus.Ok); } catch (Exception e) { uploadSpan?.Finish(e); _logger.LogError(e, "Failed processing files for {batchId}. Rethrowing and leaving the batch open.", batchId); throw; } var stopSpan = SentrySdk.GetSpan()?.StartChild("batch.close"); await _symbolClient.Close(batchId, Metrics, cancellationToken); stopSpan?.Finish(SpanStatus.Ok); IEnumerable <string> SafeGetDirectories(string path) { _logger.LogDebug("Probing {path} for child directories.", path); yield return(path); IEnumerable <string> dirs; try { dirs = Directory.GetDirectories(path, "*"); // can't yield return here, didn't blow up so go go } catch (UnauthorizedAccessException) { Metrics.FileOrDirectoryUnauthorizedAccess(); yield break; } catch (DirectoryNotFoundException) { Metrics.DirectoryDoesNotExist(); yield break; } foreach (var dir in dirs) { foreach (var safeDir in SafeGetDirectories(dir)) { yield return(safeDir); } } } }