private int GetLeadingMultiLineTokens(JSScanner JSScanner, ITextSnapshot snapshot, TokenCategory tokenCategory, int firstLine, int currentLine, out int validPrevLine, ref TokenInfo startToken) { validPrevLine = currentLine; int prevLine = currentLine - 1; int length = 0; while (prevLine >= 0) { LineTokenization prevLineTokenization = GetPreviousTokenization(JSScanner, snapshot, firstLine, prevLine); if (prevLineTokenization.Tokens.Length != 0) { if (prevLineTokenization.Tokens[prevLineTokenization.Tokens.Length - 1].Category != tokenCategory) { break; } startToken = prevLineTokenization.Tokens[prevLineTokenization.Tokens.Length - 1]; length += startToken.SourceSpan.Length; } validPrevLine = prevLine; prevLine--; if (prevLineTokenization.Tokens.Length > 1) { // http://pytools.codeplex.com/workitem/749 // if there are multiple tokens on this line then our multi-line string // is terminated. break; } } return(length); }
internal bool TryGetTokenization(int line, out LineTokenization tokenization) { if (line < 0) { throw new ArgumentOutOfRangeException("line", "Must be 0 or greater"); } Utilities.CheckNotNull(_map); if (_map[line].Tokens != null) { tokenization = _map[line]; return true; } else { tokenization = default(LineTokenization); return false; } }
/// <summary> /// Looks for the first cached tokenization preceding the given line. /// Returns the line we have a tokenization for or minLine - 1 if there is none. /// </summary> internal int IndexOfPreviousTokenization(int line, int minLine, out LineTokenization tokenization) { if (line < 0) { throw new ArgumentOutOfRangeException("line", "Must be 0 or greater"); } Utilities.CheckNotNull(_map); line--; while (line >= minLine) { if (_map[line].Tokens != null) { tokenization = _map[line]; return line; } line--; } tokenization = default(LineTokenization); return minLine - 1; }
internal bool TryGetTokenization(int line, out LineTokenization tokenization) { if (line < 0) { throw new ArgumentOutOfRangeException("line", "Must be 0 or greater"); } Utilities.CheckNotNull(_map); if (_map[line].Tokens != null) { tokenization = _map[line]; return(true); } else { tokenization = default(LineTokenization); return(false); } }
/// <summary> /// Looks for the first cached tokenization preceding the given line. /// Returns the line we have a tokenization for or minLine - 1 if there is none. /// </summary> internal int IndexOfPreviousTokenization(int line, int minLine, out LineTokenization tokenization) { if (line < 0) { throw new ArgumentOutOfRangeException("line", "Must be 0 or greater"); } Utilities.CheckNotNull(_map); line--; while (line >= minLine) { if (_map[line].Tokens != null) { tokenization = _map[line]; return(line); } line--; } tokenization = default(LineTokenization); return(minLine - 1); }
/// <summary> /// Adds classification spans to the given collection. /// Scans a contiguous sub-<paramref name="span"/> of a larger code span which starts at <paramref name="codeStartLine"/>. /// </summary> private void AddClassifications(JSScanner JSScanner, List <ClassificationSpan> classifications, SnapshotSpan span) { Debug.Assert(span.Length > 0); var snapshot = span.Snapshot; int firstLine = snapshot.GetLineNumberFromPosition(span.Start); int lastLine = snapshot.GetLineNumberFromPosition(span.End - 1); Contract.Assert(firstLine >= 0); _tokenCache.EnsureCapacity(snapshot.LineCount); // find the closest line preceding firstLine for which we know categorizer state, stop at the codeStartLine: LineTokenization lineTokenization; int currentLine = _tokenCache.IndexOfPreviousTokenization(firstLine, 0, out lineTokenization) + 1; object state = lineTokenization.State; // track the previous 2 tokens to adjust our classifications of keywords // when they shouldn't be displayed as keywords... TokenInfoWithLine?prevToken = null, prevPrevToken = null; // initialize the previous tokens so we can handle things like: // foo. // get() // even if we're called on the line for get() int prevLine = currentLine - 1; while (prevLine >= 0 && prevToken == null) { LineTokenization prevLineTokenization = GetPreviousTokenization(JSScanner, snapshot, firstLine, prevLine); for (int i = prevLineTokenization.Tokens.Length - 1; i >= 0 && prevToken == null; i--) { var tempToken = prevLineTokenization.Tokens[i]; if (IsValidPreviousToken(ref tempToken)) { prevToken = prevPrevToken; prevPrevToken = new TokenInfoWithLine() { TokenInfo = tempToken, Line = prevLine }; } } prevLine--; } while (currentLine <= lastLine) { if (!_tokenCache.TryGetTokenization(currentLine, out lineTokenization)) { lineTokenization = TokenizeLine(JSScanner, snapshot, state, currentLine); _tokenCache[currentLine] = lineTokenization; } state = lineTokenization.State; for (int i = 0; i < lineTokenization.Tokens.Length; i++) { var token = lineTokenization.Tokens[i]; if (token.Category == TokenCategory.IncompleteMultiLineStringLiteral || token.Category == TokenCategory.Comment) { IClassificationType type; switch (token.Category) { case TokenCategory.IncompleteMultiLineStringLiteral: type = _provider.StringLiteral; break; case TokenCategory.Comment: type = _provider.Comment; break; default: type = null; break; } Debug.Assert(type != null, "We should have a defined ClassificationType for every token."); // we need to walk backwards to find the start of this multi-line string... TokenInfo startToken = token; int validPrevLine; int length = startToken.SourceSpan.Length; if (i == 0) { length += GetLeadingMultiLineTokens(JSScanner, snapshot, token.Category, firstLine, currentLine, out validPrevLine, ref startToken); } else { validPrevLine = currentLine; } if (i == lineTokenization.Tokens.Length - 1) { length += GetTrailingMultiLineTokens(JSScanner, snapshot, token.Category, currentLine, state); } var tokenSpan = new Span(SnapshotSpanToSpan(snapshot, startToken, validPrevLine).Start, length); var intersection = span.Intersection(tokenSpan); if ((intersection != null && intersection.Value.Length > 0) || (span.Length == 0 && tokenSpan.Contains(span.Start)) // handle zero-length spans ) { classifications.Add(new ClassificationSpan(new SnapshotSpan(snapshot, tokenSpan), type)); } } else { ClassificationSpan classification = null; if (token.Category == TokenCategory.Keyword) { // check and see if we're not really a keyword... if (IsKeywordInIdentifierContext(snapshot, prevToken, prevPrevToken, new TokenInfoWithLine() { TokenInfo = token, Line = currentLine })) { classification = GetClassificationSpan( span, token, currentLine, CategoryMap[TokenCategory.Identifier] ); } } if (classification == null) { classification = ClassifyToken(span, token, currentLine); } if (classification != null) { classifications.Add(classification); } } if (IsValidPreviousToken(ref token)) { prevPrevToken = prevToken; prevToken = new TokenInfoWithLine() { TokenInfo = token, Line = currentLine }; } } currentLine++; } }