private static bool IsKeywordInIdentifierContext(ITextSnapshot snapshot, TokenInfoWithLine? prevToken, TokenInfoWithLine? prevPrevToken, TokenInfoWithLine token) { if (prevToken != null) { var prevValue = prevToken.Value; if (prevValue.TokenInfo.Value.Category == TokenCategory.Operator && prevValue.TokenInfo.Value.Trigger == TokenTriggers.MemberSelect) { // https://nodejstools.codeplex.com/workitem/967 // member.get return true; } if (prevValue.TokenInfo.Value.Category == TokenCategory.Keyword && snapshot.GetText(SnapshotSpanToSpan(snapshot, prevValue.TokenInfo.Value, prevValue.Line)) == "function") { // https://nodejstools.codeplex.com/workitem/976 // function static() { } return true; } if (prevPrevToken != null && prevValue.TokenInfo.Value.Category == TokenCategory.Operator) { var prevSpan = SnapshotSpanToSpan(snapshot, prevValue.TokenInfo.Value, prevValue.Line); if (snapshot.GetText(prevSpan) == "*") { var prevPrevValue = prevPrevToken.Value; var prevPrevSpan = SnapshotSpanToSpan(snapshot, prevPrevValue.TokenInfo.Value, prevPrevValue.Line); if (snapshot.GetText(prevPrevSpan) == "function") { // https://nodejstools.codeplex.com/workitem/976 // This time with a generator function... // function *static() { } return true; } } } } return false; }
private static bool IsKeywordInIdentifierContext(ITextSnapshot snapshot, TokenInfoWithLine?prevToken, TokenInfoWithLine?prevPrevToken, TokenInfoWithLine token) { if (prevToken != null) { var prevValue = prevToken.Value; if (prevValue.TokenInfo.Value.Category == TokenCategory.Operator && prevValue.TokenInfo.Value.Trigger == TokenTriggers.MemberSelect) { // https://nodejstools.codeplex.com/workitem/967 // member.get return(true); } if (prevValue.TokenInfo.Value.Category == TokenCategory.Keyword && snapshot.GetText(SnapshotSpanToSpan(snapshot, prevValue.TokenInfo.Value, prevValue.Line)) == "function") { // https://nodejstools.codeplex.com/workitem/976 // function static() { } return(true); } if (prevPrevToken != null && prevValue.TokenInfo.Value.Category == TokenCategory.Operator) { var prevSpan = SnapshotSpanToSpan(snapshot, prevValue.TokenInfo.Value, prevValue.Line); if (snapshot.GetText(prevSpan) == "*") { var prevPrevValue = prevPrevToken.Value; var prevPrevSpan = SnapshotSpanToSpan(snapshot, prevPrevValue.TokenInfo.Value, prevPrevValue.Line); if (snapshot.GetText(prevPrevSpan) == "function") { // https://nodejstools.codeplex.com/workitem/976 // This time with a generator function... // function *static() { } return(true); } } } } return(false); }
/// <summary> /// Adds classification spans to the given collection. /// Scans a contiguous sub-<paramref name="span"/> of a larger code span which starts at <paramref name="codeStartLine"/>. /// </summary> private void AddClassifications(JSScanner JSScanner, List<ClassificationSpan> classifications, SnapshotSpan span) { Debug.Assert(span.Length > 0); var snapshot = span.Snapshot; int firstLine = snapshot.GetLineNumberFromPosition(span.Start); int lastLine = snapshot.GetLineNumberFromPosition(span.End - 1); Contract.Assert(firstLine >= 0); _tokenCache.EnsureCapacity(snapshot.LineCount); // find the closest line preceding firstLine for which we know categorizer state, stop at the codeStartLine: LineTokenization lineTokenization; int currentLine = _tokenCache.IndexOfPreviousTokenization(firstLine, 0, out lineTokenization) + 1; object state = lineTokenization.State; // track the previous 2 tokens to adjust our classifications of keywords // when they shouldn't be displayed as keywords... TokenInfoWithLine? prevToken = null, prevPrevToken = null; // initialize the previous tokens so we can handle things like: // foo. // get() // even if we're called on the line for get() int prevLine = currentLine - 1; while (prevLine >= 0 && prevToken == null) { LineTokenization prevLineTokenization = GetPreviousTokenization(JSScanner, snapshot, firstLine, prevLine); for (int i = prevLineTokenization.Tokens.Length - 1; i >= 0 && prevToken == null; i--) { var tempToken = prevLineTokenization.Tokens[i]; if (IsValidPreviousToken(ref tempToken)) { prevToken = prevPrevToken; prevPrevToken = new TokenInfoWithLine() { TokenInfo = tempToken, Line = prevLine }; } } prevLine--; } while (currentLine <= lastLine) { if (!_tokenCache.TryGetTokenization(currentLine, out lineTokenization)) { lineTokenization = TokenizeLine(JSScanner, snapshot, state, currentLine); _tokenCache[currentLine] = lineTokenization; } state = lineTokenization.State; for (int i = 0; i < lineTokenization.Tokens.Length; i++) { var token = lineTokenization.Tokens[i]; if (token.Category == TokenCategory.IncompleteMultiLineStringLiteral || token.Category == TokenCategory.Comment) { IClassificationType type; switch (token.Category) { case TokenCategory.IncompleteMultiLineStringLiteral: type = _provider.StringLiteral; break; case TokenCategory.Comment: type = _provider.Comment; break; default: type = null; break; } Debug.Assert(type != null, "We should have a defined ClassificationType for every token."); // we need to walk backwards to find the start of this multi-line string... TokenInfo startToken = token; int validPrevLine; int length = startToken.SourceSpan.Length; if (i == 0) { length += GetLeadingMultiLineTokens(JSScanner, snapshot, token.Category, firstLine, currentLine, out validPrevLine, ref startToken); } else { validPrevLine = currentLine; } if (i == lineTokenization.Tokens.Length - 1) { length += GetTrailingMultiLineTokens(JSScanner, snapshot, token.Category, currentLine, state); } var tokenSpan = new Span(SnapshotSpanToSpan(snapshot, startToken, validPrevLine).Start, length); var intersection = span.Intersection(tokenSpan); if ((intersection != null && intersection.Value.Length > 0) || (span.Length == 0 && tokenSpan.Contains(span.Start)) // handle zero-length spans ) { classifications.Add(new ClassificationSpan(new SnapshotSpan(snapshot, tokenSpan), type)); } } else { ClassificationSpan classification = null; if (token.Category == TokenCategory.Keyword) { // check and see if we're not really a keyword... if (IsKeywordInIdentifierContext(snapshot, prevToken, prevPrevToken, new TokenInfoWithLine() { TokenInfo = token, Line = currentLine })) { classification = GetClassificationSpan( span, token, currentLine, CategoryMap[TokenCategory.Identifier] ); } } if (classification == null) { classification = ClassifyToken(span, token, currentLine); } if (classification != null) { classifications.Add(classification); } } if (IsValidPreviousToken(ref token)) { prevPrevToken = prevToken; prevToken = new TokenInfoWithLine() { TokenInfo = token, Line = currentLine }; } } currentLine++; } }
/// <summary> /// Adds classification spans to the given collection. /// Scans a contiguous sub-<paramref name="span"/> of a larger code span which starts at <paramref name="codeStartLine"/>. /// </summary> private void AddClassifications(JSScanner JSScanner, List <ClassificationSpan> classifications, SnapshotSpan span) { Debug.Assert(span.Length > 0); var snapshot = span.Snapshot; int firstLine = snapshot.GetLineNumberFromPosition(span.Start); int lastLine = snapshot.GetLineNumberFromPosition(span.End - 1); Contract.Assert(firstLine >= 0); _tokenCache.EnsureCapacity(snapshot.LineCount); // find the closest line preceding firstLine for which we know categorizer state, stop at the codeStartLine: LineTokenization lineTokenization; int currentLine = _tokenCache.IndexOfPreviousTokenization(firstLine, 0, out lineTokenization) + 1; object state = lineTokenization.State; // track the previous 2 tokens to adjust our classifications of keywords // when they shouldn't be displayed as keywords... TokenInfoWithLine?prevToken = null, prevPrevToken = null; // initialize the previous tokens so we can handle things like: // foo. // get() // even if we're called on the line for get() int prevLine = currentLine - 1; while (prevLine >= 0 && prevToken == null) { LineTokenization prevLineTokenization = GetPreviousTokenization(JSScanner, snapshot, firstLine, prevLine); for (int i = prevLineTokenization.Tokens.Length - 1; i >= 0 && prevToken == null; i--) { var tempToken = prevLineTokenization.Tokens[i]; if (IsValidPreviousToken(ref tempToken)) { prevToken = prevPrevToken; prevPrevToken = new TokenInfoWithLine() { TokenInfo = tempToken, Line = prevLine }; } } prevLine--; } while (currentLine <= lastLine) { if (!_tokenCache.TryGetTokenization(currentLine, out lineTokenization)) { lineTokenization = TokenizeLine(JSScanner, snapshot, state, currentLine); _tokenCache[currentLine] = lineTokenization; } state = lineTokenization.State; for (int i = 0; i < lineTokenization.Tokens.Length; i++) { var token = lineTokenization.Tokens[i]; if (token.Category == TokenCategory.IncompleteMultiLineStringLiteral || token.Category == TokenCategory.Comment) { IClassificationType type; switch (token.Category) { case TokenCategory.IncompleteMultiLineStringLiteral: type = _provider.StringLiteral; break; case TokenCategory.Comment: type = _provider.Comment; break; default: type = null; break; } Debug.Assert(type != null, "We should have a defined ClassificationType for every token."); // we need to walk backwards to find the start of this multi-line string... TokenInfo startToken = token; int validPrevLine; int length = startToken.SourceSpan.Length; if (i == 0) { length += GetLeadingMultiLineTokens(JSScanner, snapshot, token.Category, firstLine, currentLine, out validPrevLine, ref startToken); } else { validPrevLine = currentLine; } if (i == lineTokenization.Tokens.Length - 1) { length += GetTrailingMultiLineTokens(JSScanner, snapshot, token.Category, currentLine, state); } var tokenSpan = new Span(SnapshotSpanToSpan(snapshot, startToken, validPrevLine).Start, length); var intersection = span.Intersection(tokenSpan); if ((intersection != null && intersection.Value.Length > 0) || (span.Length == 0 && tokenSpan.Contains(span.Start)) // handle zero-length spans ) { classifications.Add(new ClassificationSpan(new SnapshotSpan(snapshot, tokenSpan), type)); } } else { ClassificationSpan classification = null; if (token.Category == TokenCategory.Keyword) { // check and see if we're not really a keyword... if (IsKeywordInIdentifierContext(snapshot, prevToken, prevPrevToken, new TokenInfoWithLine() { TokenInfo = token, Line = currentLine })) { classification = GetClassificationSpan( span, token, currentLine, CategoryMap[TokenCategory.Identifier] ); } } if (classification == null) { classification = ClassifyToken(span, token, currentLine); } if (classification != null) { classifications.Add(classification); } } if (IsValidPreviousToken(ref token)) { prevPrevToken = prevToken; prevToken = new TokenInfoWithLine() { TokenInfo = token, Line = currentLine }; } } currentLine++; } }