public void Register(Workspace workspace) { var correlationId = LogAggregator.GetNextId(); lock (_gate) { if (_documentWorkCoordinatorMap.ContainsKey(workspace)) { // already registered. return; } var coordinator = new WorkCoordinator( _listener, GetAnalyzerProviders(workspace), new Registration(correlationId, workspace, _progressReporter)); _documentWorkCoordinatorMap.Add(workspace, coordinator); } SolutionCrawlerLogger.LogRegistration(correlationId, workspace); }
public void Shutdown(bool blockingShutdown) { _optionService.OptionChanged -= OnOptionChanged; _documentTrackingService.ActiveDocumentChanged -= OnActiveDocumentSwitched; // detach from the workspace _registration.Workspace.WorkspaceChanged -= OnWorkspaceChanged; _registration.Workspace.DocumentOpened -= OnDocumentOpened; _registration.Workspace.DocumentClosed -= OnDocumentClosed; // cancel any pending blocks _shutdownNotificationSource.Cancel(); _documentAndProjectWorkerProcessor.Shutdown(); SolutionCrawlerLogger.LogWorkCoordinatorShutdown(CorrelationId, _logAggregator); if (blockingShutdown) { var shutdownTask = Task.WhenAll( _eventProcessingQueue.LastScheduledTask, _documentAndProjectWorkerProcessor.AsyncProcessorTask, _semanticChangeProcessor.AsyncProcessorTask); try { shutdownTask.Wait(TimeSpan.FromSeconds(5)); } catch (AggregateException ex) { ex.Handle(e => e is OperationCanceledException); } if (!shutdownTask.IsCompleted) { SolutionCrawlerLogger.LogWorkCoordinatorShutdownTimeout(CorrelationId); } } }
private void ProcessEvents(WorkspaceChangeEventArgs args, IAsyncToken asyncToken) { SolutionCrawlerLogger.LogWorkspaceEvent(_logAggregator, (int)args.Kind); // TODO: add telemetry that record how much it takes to process an event (max, min, average and etc) switch (args.Kind) { case WorkspaceChangeKind.SolutionAdded: case WorkspaceChangeKind.SolutionChanged: case WorkspaceChangeKind.SolutionReloaded: case WorkspaceChangeKind.SolutionRemoved: case WorkspaceChangeKind.SolutionCleared: ProcessSolutionEvent(args, asyncToken); break; case WorkspaceChangeKind.ProjectAdded: case WorkspaceChangeKind.ProjectChanged: case WorkspaceChangeKind.ProjectReloaded: case WorkspaceChangeKind.ProjectRemoved: ProcessProjectEvent(args, asyncToken); break; case WorkspaceChangeKind.DocumentAdded: case WorkspaceChangeKind.DocumentReloaded: case WorkspaceChangeKind.DocumentChanged: case WorkspaceChangeKind.DocumentRemoved: case WorkspaceChangeKind.AdditionalDocumentAdded: case WorkspaceChangeKind.AdditionalDocumentRemoved: case WorkspaceChangeKind.AdditionalDocumentChanged: case WorkspaceChangeKind.AdditionalDocumentReloaded: ProcessDocumentEvent(args, asyncToken); break; default: throw ExceptionUtilities.UnexpectedValue(args.Kind); } }
private async Task ProcessDocumentAsync(ImmutableArray <IIncrementalAnalyzer> analyzers, WorkItem workItem, CancellationTokenSource source) { if (this.CancellationToken.IsCancellationRequested) { return; } var processedEverything = false; var documentId = workItem.DocumentId; try { using (Logger.LogBlock(FunctionId.WorkCoordinator_ProcessDocumentAsync, source.Token)) { var cancellationToken = source.Token; var document = _processingSolution.GetDocument(documentId); if (document != null) { // if we are called because a document is opened, we invalidate the document so that // it can be re-analyzed. otherwise, since newly opened document has same version as before // analyzer will simply return same data back if (workItem.MustRefresh && !workItem.IsRetry) { var isOpen = document.IsOpen(); await ProcessOpenDocumentIfNeeded(analyzers, workItem, document, isOpen, cancellationToken).ConfigureAwait(false); await ProcessCloseDocumentIfNeeded(analyzers, workItem, document, isOpen, cancellationToken).ConfigureAwait(false); } // check whether we are having special reanalyze request await ProcessReanalyzeDocumentAsync(workItem, document, cancellationToken).ConfigureAwait(false); await ProcessDocumentAnalyzersAsync(document, analyzers, workItem, cancellationToken).ConfigureAwait(false); } else { SolutionCrawlerLogger.LogProcessDocumentNotExist(this.Processor._logAggregator); RemoveDocument(documentId); } if (!cancellationToken.IsCancellationRequested) { processedEverything = true; } } } catch (Exception e) when(FatalError.ReportUnlessCanceled(e)) { throw ExceptionUtilities.Unreachable; } finally { // we got cancelled in the middle of processing the document. // let's make sure newly enqueued work item has all the flag needed. if (!processedEverything) { _workItemQueue.AddOrReplace(workItem.Retry(this.Listener.BeginAsyncOperation("ReenqueueWorkItem"))); } SolutionCrawlerLogger.LogProcessDocument(this.Processor._logAggregator, documentId.Id, processedEverything); // remove one that is finished running _workItemQueue.RemoveCancellationSource(workItem.DocumentId); } }
private async Task ProcessProjectAsync(ImmutableArray <IIncrementalAnalyzer> analyzers, WorkItem workItem, CancellationToken cancellationToken) { if (CancellationToken.IsCancellationRequested) { return; } // we do have work item for this project var projectId = workItem.ProjectId; var processedEverything = false; var processingSolution = Processor._registration.GetSolutionToAnalyze(); try { using (Logger.LogBlock(FunctionId.WorkCoordinator_ProcessProjectAsync, w => w.ToString(), workItem, cancellationToken)) { var project = processingSolution.GetProject(projectId); if (project != null) { var reasons = workItem.InvocationReasons; var semanticsChanged = reasons.Contains(PredefinedInvocationReasons.SemanticChanged) || reasons.Contains(PredefinedInvocationReasons.SolutionRemoved); using (Processor.EnableCaching(project.Id)) { await Processor.RunAnalyzersAsync(analyzers, project, workItem, (a, p, c) => a.AnalyzeProjectAsync(p, semanticsChanged, reasons, c), cancellationToken).ConfigureAwait(false); } } else { SolutionCrawlerLogger.LogProcessProjectNotExist(Processor._logAggregator); await RemoveProjectAsync(projectId, cancellationToken).ConfigureAwait(false); } if (!cancellationToken.IsCancellationRequested) { processedEverything = true; } } } catch (Exception e) when(FatalError.ReportAndPropagateUnlessCanceled(e, cancellationToken)) { throw ExceptionUtilities.Unreachable; } finally { // we got cancelled in the middle of processing the project. // let's make sure newly enqueued work item has all the flag needed. // Avoid retry attempts after cancellation is requested, since work will not be processed // after that point. if (!processedEverything && !CancellationToken.IsCancellationRequested) { _workItemQueue.AddOrReplace(workItem.Retry(Listener.BeginAsyncOperation("ReenqueueWorkItem"))); } SolutionCrawlerLogger.LogProcessProject(Processor._logAggregator, projectId.Id, processedEverything); // remove one that is finished running _workItemQueue.MarkWorkItemDoneFor(projectId); } }
private async Task ProcessDocumentAsync(ImmutableArray <IIncrementalAnalyzer> analyzers, WorkItem workItem, CancellationToken cancellationToken) { Contract.ThrowIfNull(workItem.DocumentId); if (CancellationToken.IsCancellationRequested) { return; } var processedEverything = false; var documentId = workItem.DocumentId; // we should always use solution snapshot after workitem is removed from the queue. // otherwise, we can have a race such as below. // // 1.solution crawler picked up a solution // 2.before processing the solution, an workitem got changed // 3.and then the work item got picked up from the queue // 4.and use the work item with the solution that got picked up in step 1 // // step 2 is happening because solution has changed, but step 4 used old solution from step 1 // that doesn't have effects of the solution changes. // // solution crawler must remove the work item from the queue first and then pick up the soluton, // so that the queue gets new work item if there is any solution changes after the work item is removed // from the queue // // using later version of solution is always fine since, as long as there is new work item in the queue, // solution crawler will eventually call the last workitem with the lastest solution // making everything to catch up var solution = Processor.CurrentSolution; try { using (Logger.LogBlock(FunctionId.WorkCoordinator_ProcessDocumentAsync, w => w.ToString(), workItem, cancellationToken)) { var document = solution.GetDocument(documentId); if (document != null) { // if we are called because a document is opened, we invalidate the document so that // it can be re-analyzed. otherwise, since newly opened document has same version as before // analyzer will simply return same data back if (workItem.MustRefresh && !workItem.IsRetry) { var isOpen = document.IsOpen(); await ProcessOpenDocumentIfNeededAsync(analyzers, workItem, document, isOpen, cancellationToken).ConfigureAwait(false); await ProcessCloseDocumentIfNeededAsync(analyzers, workItem, document, isOpen, cancellationToken).ConfigureAwait(false); } // check whether we are having special reanalyze request await ProcessReanalyzeDocumentAsync(workItem, document, cancellationToken).ConfigureAwait(false); await Processor.ProcessDocumentAnalyzersAsync(document, analyzers, workItem, cancellationToken).ConfigureAwait(false); } else { SolutionCrawlerLogger.LogProcessDocumentNotExist(Processor._logAggregator); await RemoveDocumentAsync(documentId, cancellationToken).ConfigureAwait(false); } if (!cancellationToken.IsCancellationRequested) { processedEverything = true; } } } catch (Exception e) when(FatalError.ReportUnlessCanceled(e)) { throw ExceptionUtilities.Unreachable; } finally { // we got cancelled in the middle of processing the document. // let's make sure newly enqueued work item has all the flag needed. // Avoid retry attempts after cancellation is requested, since work will not be processed // after that point. if (!processedEverything && !CancellationToken.IsCancellationRequested) { _workItemQueue.AddOrReplace(workItem.Retry(Listener.BeginAsyncOperation("ReenqueueWorkItem"))); } SolutionCrawlerLogger.LogProcessDocument(Processor._logAggregator, documentId.Id, processedEverything); // remove one that is finished running _workItemQueue.MarkWorkItemDoneFor(workItem.DocumentId); } }
private void AddHigherPriorityDocument(DocumentId id) { _higherPriorityDocumentsNotProcessed[id] = Processor.EnableCaching(id.ProjectId); SolutionCrawlerLogger.LogHigherPriority(this.Processor._logAggregator, id.Id); }
protected override void OnPaused() => SolutionCrawlerLogger.LogGlobalOperation(Processor._logAggregator);
private async Task ProcessProjectAsync(ImmutableArray <IIncrementalAnalyzer> analyzers, WorkItem workItem, CancellationTokenSource source) { if (this.CancellationToken.IsCancellationRequested) { return; } // we do have work item for this project var projectId = workItem.ProjectId; var processedEverything = false; var processingSolution = this.Processor.CurrentSolution; try { using (Logger.LogBlock(FunctionId.WorkCoordinator_ProcessProjectAsync, source.Token)) { var cancellationToken = source.Token; var project = processingSolution.GetProject(projectId); if (project != null) { var semanticsChanged = workItem.InvocationReasons.Contains(PredefinedInvocationReasons.SemanticChanged) || workItem.InvocationReasons.Contains(PredefinedInvocationReasons.SolutionRemoved); if (project.Solution.Services.CacheService != null) { using (project.Solution.Services.CacheService.EnableCaching(project.Id)) { await RunAnalyzersAsync(analyzers, project, (a, p, c) => a.AnalyzeProjectAsync(p, semanticsChanged, c), cancellationToken).ConfigureAwait(false); } } else { await RunAnalyzersAsync(analyzers, project, (a, p, c) => a.AnalyzeProjectAsync(p, semanticsChanged, c), cancellationToken).ConfigureAwait(false); } } else { SolutionCrawlerLogger.LogProcessProjectNotExist(this.Processor._logAggregator); RemoveProject(projectId); } } processedEverything = true; } catch (Exception e) when(FatalError.ReportUnlessCanceled(e)) { throw ExceptionUtilities.Unreachable; } finally { // we got cancelled in the middle of processing the project. // let's make sure newly enqueued work item has all the flag needed. if (!processedEverything) { _workItemQueue.AddOrReplace(workItem.Retry(this.Listener.BeginAsyncOperation("ReenqueueWorkItem"))); } SolutionCrawlerLogger.LogProcessProject(this.Processor._logAggregator, projectId.Id, processedEverything); // remove one that is finished running _workItemQueue.RemoveCancellationSource(projectId); } }
private async Task ResetStatesAsync() { try { if (!IsSolutionChanged()) { return; } await Processor.RunAnalyzersAsync( Analyzers, Processor._registration.GetSolutionToAnalyze(), workItem : new WorkItem(), (a, s, c) => a.NewSolutionSnapshotAsync(s, c), CancellationToken).ConfigureAwait(false); foreach (var id in Processor.GetOpenDocumentIds()) { AddHigherPriorityDocument(id); } SolutionCrawlerLogger.LogResetStates(Processor._logAggregator); } catch (Exception e) when(FatalError.ReportAndPropagateUnlessCanceled(e)) { throw ExceptionUtilities.Unreachable; } bool IsSolutionChanged() { var currentSolution = Processor._registration.GetSolutionToAnalyze(); var oldSolution = _lastSolution; if (currentSolution == oldSolution) { return(false); } _lastSolution = currentSolution; ResetLogAggregatorIfNeeded(currentSolution, oldSolution); return(true); } void ResetLogAggregatorIfNeeded(Solution currentSolution, Solution?oldSolution) { if (oldSolution == null || currentSolution.Id == oldSolution.Id) { // we log aggregated info when solution is changed such as // new solution is opened or solution is closed return; } // this log things like how many time we analyzed active files, how many times other files are analyzed, // avg time to analyze files, how many solution snapshot got analyzed and etc. // all accumultation is done in VS side and we only send statistics to VS telemetry otherwise, it is too much // data to send SolutionCrawlerLogger.LogIncrementalAnalyzerProcessorStatistics( Processor._registration.CorrelationId, oldSolution, Processor._logAggregator, Analyzers); Processor.ResetLogAggregator(); } }
protected override void PauseOnGlobalOperation() { SolutionCrawlerLogger.LogGlobalOperation(this.Processor._logAggregator); }
private void ProcessEvent(WorkspaceChangeEventArgs args, string eventName) { SolutionCrawlerLogger.LogWorkspaceEvent(_logAggregator, (int)args.Kind); // TODO: add telemetry that record how much it takes to process an event (max, min, average and etc) switch (args.Kind) { case WorkspaceChangeKind.SolutionAdded: EnqueueFullSolutionEvent(args.NewSolution, InvocationReasons.DocumentAdded, eventName); break; case WorkspaceChangeKind.SolutionChanged: case WorkspaceChangeKind.SolutionReloaded: EnqueueSolutionChangedEvent(args.OldSolution, args.NewSolution, eventName); break; case WorkspaceChangeKind.SolutionRemoved: EnqueueFullSolutionEvent(args.OldSolution, InvocationReasons.SolutionRemoved, eventName); break; case WorkspaceChangeKind.SolutionCleared: EnqueueFullSolutionEvent(args.OldSolution, InvocationReasons.DocumentRemoved, eventName); break; case WorkspaceChangeKind.ProjectAdded: Contract.ThrowIfNull(args.ProjectId); EnqueueFullProjectEvent(args.NewSolution, args.ProjectId, InvocationReasons.DocumentAdded, eventName); break; case WorkspaceChangeKind.ProjectChanged: case WorkspaceChangeKind.ProjectReloaded: Contract.ThrowIfNull(args.ProjectId); EnqueueProjectChangedEvent(args.OldSolution, args.NewSolution, args.ProjectId, eventName); break; case WorkspaceChangeKind.ProjectRemoved: Contract.ThrowIfNull(args.ProjectId); EnqueueFullProjectEvent(args.OldSolution, args.ProjectId, InvocationReasons.DocumentRemoved, eventName); break; case WorkspaceChangeKind.DocumentAdded: Contract.ThrowIfNull(args.DocumentId); EnqueueFullDocumentEvent(args.NewSolution, args.DocumentId, InvocationReasons.DocumentAdded, eventName); break; case WorkspaceChangeKind.DocumentReloaded: case WorkspaceChangeKind.DocumentChanged: Contract.ThrowIfNull(args.DocumentId); EnqueueDocumentChangedEvent(args.OldSolution, args.NewSolution, args.DocumentId, eventName); break; case WorkspaceChangeKind.DocumentRemoved: Contract.ThrowIfNull(args.DocumentId); EnqueueFullDocumentEvent(args.OldSolution, args.DocumentId, InvocationReasons.DocumentRemoved, eventName); break; case WorkspaceChangeKind.AdditionalDocumentAdded: case WorkspaceChangeKind.AdditionalDocumentRemoved: case WorkspaceChangeKind.AdditionalDocumentChanged: case WorkspaceChangeKind.AdditionalDocumentReloaded: case WorkspaceChangeKind.AnalyzerConfigDocumentAdded: case WorkspaceChangeKind.AnalyzerConfigDocumentRemoved: case WorkspaceChangeKind.AnalyzerConfigDocumentChanged: case WorkspaceChangeKind.AnalyzerConfigDocumentReloaded: // If an additional file or .editorconfig has changed we need to reanalyze the entire project. Contract.ThrowIfNull(args.ProjectId); EnqueueFullProjectEvent(args.NewSolution, args.ProjectId, InvocationReasons.AdditionalDocumentChanged, eventName); break; default: throw ExceptionUtilities.UnexpectedValue(args.Kind); } }
private void AddHigherPriorityDocument(DocumentId id) { this.higherPriorityDocumentsNotProcessed[id] = true; SolutionCrawlerLogger.LogHigherPriority(this.Processor.logAggregator, id.Id); }
private async Task ProcessDocumentAsync( Solution solution, ImmutableArray <IIncrementalAnalyzer> analyzers, WorkItem workItem, CancellationToken cancellationToken ) { Contract.ThrowIfNull(workItem.DocumentId); if (CancellationToken.IsCancellationRequested) { return; } var processedEverything = false; var documentId = workItem.DocumentId; try { using ( Logger.LogBlock( FunctionId.WorkCoordinator_ProcessDocumentAsync, w => w.ToString(), workItem, cancellationToken ) ) { var document = solution.GetDocument(documentId); if (document != null) { await _processor .ProcessDocumentAnalyzersAsync( document, analyzers, workItem, cancellationToken ) .ConfigureAwait(false); } if (!cancellationToken.IsCancellationRequested) { processedEverything = true; } } } catch (Exception e) when(FatalError.ReportAndPropagateUnlessCanceled(e, cancellationToken)) { throw ExceptionUtilities.Unreachable; } finally { // we got cancelled in the middle of processing the document. // let's make sure newly enqueued work item has all the flag needed. // Avoid retry attempts after cancellation is requested, since work will not be processed // after that point. if (!processedEverything && !CancellationToken.IsCancellationRequested) { _workItemQueue.AddOrReplace( workItem.Retry( Listener.BeginAsyncOperation("ReenqueueWorkItem") ) ); } SolutionCrawlerLogger.LogProcessActiveFileDocument( _processor._logAggregator, documentId.Id, processedEverything ); // remove one that is finished running _workItemQueue.MarkWorkItemDoneFor(workItem.DocumentId); } }