public async Task <SumType <LSP.SemanticTokens, LSP.SemanticTokensEdits> > HandleRequestAsync( LSP.SemanticTokensEditsParams request, RequestContext context, CancellationToken cancellationToken) { // Temporary workaround for https://github.com/dotnet/roslyn/issues/54547: // We should eventually go back to throwing here if context.Document is null. if (context.Document is null) { return(new LSP.SemanticTokens()); } Contract.ThrowIfNull(request.TextDocument, "TextDocument is null."); Contract.ThrowIfNull(request.PreviousResultId, "previousResultId is null."); // Even though we want to ultimately pass edits back to LSP, we still need to compute all semantic tokens, // both for caching purposes and in order to have a baseline comparison when computing the edits. var newSemanticTokensData = await SemanticTokensHelpers.ComputeSemanticTokensDataAsync( context.Document, SemanticTokensCache.TokenTypeToIndex, range : null, cancellationToken).ConfigureAwait(false); Contract.ThrowIfNull(newSemanticTokensData, "newSemanticTokensData is null."); // Getting the cached tokens for the document. If we don't have an applicable cached token set, // we can't calculate edits, so we must return all semantic tokens instead. var oldSemanticTokensData = await _tokensCache.GetCachedTokensDataAsync( request.TextDocument.Uri, request.PreviousResultId, cancellationToken).ConfigureAwait(false); if (oldSemanticTokensData == null) { var newResultId = _tokensCache.GetNextResultId(); return(new LSP.SemanticTokens { ResultId = newResultId, Data = newSemanticTokensData }); } var resultId = request.PreviousResultId; var editArray = ComputeSemanticTokensEdits(oldSemanticTokensData, newSemanticTokensData); // If we have edits, generate a new ResultId. Otherwise, re-use the previous one. if (editArray.Length != 0) { resultId = _tokensCache.GetNextResultId(); var updatedTokens = new LSP.SemanticTokens { ResultId = resultId, Data = newSemanticTokensData }; await _tokensCache.UpdateCacheAsync( request.TextDocument.Uri, updatedTokens, cancellationToken).ConfigureAwait(false); } var edits = new SemanticTokensEdits { Edits = editArray, ResultId = resultId }; return(edits); }
public async Task <SumType <LSP.SemanticTokens, LSP.SemanticTokensEdits> > HandleRequestAsync( LSP.SemanticTokensEditsParams request, RequestContext context, CancellationToken cancellationToken ) { Contract.ThrowIfNull(request.TextDocument, "TextDocument is null."); Contract.ThrowIfNull(request.PreviousResultId, "previousResultId is null."); Contract.ThrowIfNull(context.Document, "Document is null."); // Even though we want to ultimately pass edits back to LSP, we still need to compute all semantic tokens, // both for caching purposes and in order to have a baseline comparison when computing the edits. var newSemanticTokensData = await SemanticTokensHelpers .ComputeSemanticTokensDataAsync( context.Document, SemanticTokensCache.TokenTypeToIndex, range : null, cancellationToken ) .ConfigureAwait(false); Contract.ThrowIfNull(newSemanticTokensData, "newSemanticTokensData is null."); var resultId = _tokensCache.GetNextResultId(); var newSemanticTokens = new LSP.SemanticTokens { ResultId = resultId, Data = newSemanticTokensData }; await _tokensCache .UpdateCacheAsync(request.TextDocument.Uri, newSemanticTokens, cancellationToken) .ConfigureAwait(false); // Getting the cached tokens for the document. If we don't have an applicable cached token set, // we can't calculate edits, so we must return all semantic tokens instead. var oldSemanticTokensData = await _tokensCache .GetCachedTokensDataAsync( request.TextDocument.Uri, request.PreviousResultId, cancellationToken ) .ConfigureAwait(false); if (oldSemanticTokensData == null) { return(newSemanticTokens); } var edits = new SemanticTokensEdits { Edits = ComputeSemanticTokensEdits(oldSemanticTokensData, newSemanticTokensData), ResultId = resultId }; return(edits); }
public SemanticTokensOrSemanticTokensEdits GetSemanticTokensEdits() { if (!_prevData.HasValue) { return(GetSemanticTokens()); } var prevData = _prevData.Value; var prevDataLength = prevData.Length; var dataLength = _data.Length; var startIndex = 0; while (startIndex < dataLength && startIndex < prevDataLength && prevData[startIndex] == _data[startIndex]) { startIndex++; } if (startIndex < dataLength && startIndex < prevDataLength) { // Find end index var endIndex = 0; while (endIndex < dataLength && endIndex < prevDataLength && prevData[prevDataLength - 1 - endIndex] == _data[dataLength - 1 - endIndex]) { endIndex++; } var newData = ImmutableArray.Create(_data, startIndex, dataLength - endIndex - startIndex); var result = new SemanticTokensEdits { ResultId = Id, Edits = new[] { new SemanticTokensEdit { Start = startIndex, DeleteCount = prevDataLength - endIndex - startIndex, Data = newData } } }; return(result); } if (startIndex < dataLength) { return(new SemanticTokensEdits { ResultId = Id, Edits = new[] { new SemanticTokensEdit { Start = startIndex, DeleteCount = 0, Data = ImmutableArray.Create(_data, startIndex, _dataLen - startIndex) } } }); } if (startIndex < prevDataLength) { return(new SemanticTokensEdits { ResultId = Id, Edits = new[] { new SemanticTokensEdit { Start = startIndex, DeleteCount = prevDataLength - startIndex } } }); } return(new SemanticTokensEdits { ResultId = Id, Edits = Array.Empty <SemanticTokensEdit>() }); }