internal static bool IsPackageListCompletion(IEditorBuffer editorBuffer, int position) { var snapshot = editorBuffer.CurrentSnapshot; var line = snapshot.GetLineFromPosition(position); string lineText = line.GetText(); int linePosition = position - line.Start; // We should be either at library(| or inside library(|) // or over package name like in library(ba|se) // Go left and right looking for RTokenizer tokenizer = new RTokenizer(); ITextProvider textProvider = new TextStream(lineText); IReadOnlyTextRangeCollection <RToken> c = tokenizer.Tokenize(textProvider, 0, textProvider.Length); TokenStream <RToken> tokens = new TokenStream <RToken>(c, RToken.EndOfStreamToken); while (!tokens.IsEndOfStream()) { if (tokens.CurrentToken.Start >= linePosition) { break; } if (tokens.CurrentToken.TokenType == RTokenType.Identifier) { string identifier = textProvider.GetText(tokens.CurrentToken); if (identifier == "library" || identifier == "require") { tokens.MoveToNextToken(); if (tokens.CurrentToken.TokenType == RTokenType.OpenBrace) { RToken openBrace = tokens.CurrentToken; while (!tokens.IsEndOfStream()) { if (tokens.CurrentToken.TokenType == RTokenType.CloseBrace) { if (linePosition >= openBrace.End && linePosition <= tokens.CurrentToken.Start) { return(true); } return(false); } else if (tokens.NextToken.TokenType == RTokenType.EndOfStream) { return(true); } tokens.MoveToNextToken(); } } } } tokens.MoveToNextToken(); } return(false); }
/// <summary> /// Given RD data and function name parses the data and creates structured /// information about the function. Method returns multiple functions since /// RD data often provides information on several functions so in order /// to avoid processing same data multiple times parser extracts information /// on all related functions. /// </summary> public static IReadOnlyList <IFunctionInfo> GetFunctionInfos(string rdHelpData) { var tokenizer = new RdTokenizer(tokenizeRContent: false); ITextProvider textProvider = new TextStream(rdHelpData); IReadOnlyTextRangeCollection <RdToken> tokens = tokenizer.Tokenize(textProvider, 0, textProvider.Length); RdParseContext context = new RdParseContext(tokens, textProvider); return(ParseFunctions(context)); }
public static CToken GetToken(TextStream cursor) { CToken result = PeekToken(cursor); if (result.Kind != CppTokenKind.Eof) { Debug.Assert(result.Length > 0); cursor.Seek(result.End); } return(result); }
/// <summary> /// Builds a name /// </summary> /// <param name="c"></param> /// <param name="S"></param> /// <returns></returns> public static Name Read(string _fullyQualifiedName) { if (_fullyQualifiedName == null || _fullyQualifiedName == "") { return(null); } TextStream S = new TextStream(_fullyQualifiedName); return(Read(S.Pop(), S)); }
public void TextHelperTest_IsWhitespaceOnlyBetweenPositionsTest() { ITextProvider tp = new TextStream("0 \n3 \r 7 \r\n AB "); tp.IsWhitespaceOnlyBetweenPositions(0, 1).Should().BeFalse(); tp.IsWhitespaceOnlyBetweenPositions(1, 2).Should().BeTrue(); tp.IsWhitespaceOnlyBetweenPositions(2, 5).Should().BeFalse(); tp.IsWhitespaceOnlyBetweenPositions(5, 10).Should().BeFalse(); tp.IsWhitespaceOnlyBetweenPositions(tp.Length - 1, tp.Length).Should().BeTrue(); tp.IsWhitespaceOnlyBetweenPositions(100, 200).Should().BeTrue(); }
static void ThrowError(TextStream _stream, string _message) { string context = _stream.Context; context = context.Replace("\r", " "); context = context.Replace("\n", " "); context = context.Replace("\t", " "); Tuple <int, int> lineColumnIndex = _stream.LineColumnIndex; throw new Exception(_message + " at line " + lineColumnIndex.Item1 + ", column " + lineColumnIndex.Item2 + "\n" + context + "\n ^"); }
private void InitializeErrorOutput() { if (_errorStream == null) { #if SILVERLIGHT Stream errorStream = new TextStream(Console.Error); #else Stream errorStream = Console.OpenStandardError(); #endif Interlocked.CompareExchange(ref _errorStream, errorStream, null); Interlocked.CompareExchange(ref _errorWriter, Console.Error, null); } }
/// <summary> /// Gets the first index of ASPDirectiveEndToken from the given stream. /// </summary> /// <param name="stream">The stream to search.</param> /// <returns>The index of the first ASP end token, or -1 if there isn't one /// after the stream's current offset.</returns> public static int GetFirstEndScriptTag(TextStream stream) { do { if (stream.Token.Key == "ASPDirectiveEndToken") { return(stream.Token.StartOffset); } stream.SeekToken(1); }while (stream.IsAtDocumentEnd == false); return(-1); }
public static LexResult LexMultiLineComment(TextStream stream, bool init) { CppTokenKind kind = CppTokenKind.MultiLineComment; if (init) { Debug.Assert(stream.Peek(0) == '/'); Debug.Assert(stream.Peek(1) == '*'); stream.AdvanceColumns(2); char n = stream.Peek(); if (DoxygenSyntax.MultiLineDocChars.Contains(n)) { stream.AdvanceColumn(); kind = CppTokenKind.MultiLineCommentDoc; if (n == '*' && stream.Peek() == '/') { stream.AdvanceColumn(); return(new LexResult(kind, true)); } } } bool isComplete = false; while (!stream.IsEOF) { char c0 = stream.Peek(); if (c0 == '*') { char c1 = stream.Peek(1); if (c1 == '/') { stream.AdvanceColumns(2); isComplete = true; break; } else { stream.AdvanceColumn(); } } else if (char.IsWhiteSpace(c0)) { stream.SkipAllWhitespaces(); } else { stream.AdvanceColumn(); } } return(new LexResult(kind, isComplete)); }
private void FireChanged(int start, int oldLength, int newLength) { var oldTextProvider = new TextStream(CurrentSnapshot.GetText()); _version++; _currentSnapshot = new EditorBufferSnapshot(this, _content.ToString(), _version); var newTextProvider = new TextStream(_currentSnapshot.GetText()); var change = new TextChange(start, oldLength, newLength, oldTextProvider, newTextProvider); var args = new TextChangeEventArgs(change); ChangedHighPriority?.Invoke(this, args); Changed?.Invoke(this, args); }
/// <summary> /// Reads a name while alphanumeric characters are found /// </summary> internal static Name Read(char c, TextStream S) { List <string> results = new List <string>(); string result = ""; while ((c == CHAR_CHILD_MARKER || c == CHAR_STRING_DELIMITER || c == CHAR_SPECIAL_STRING_DELIMITER_START || c == CHAR_SPECIAL_STRING_DELIMITER_END || IsAlphaNumeric(c))) { if (c == CHAR_STRING_DELIMITER) { // Read a quoted-name like "\"example\"" Name temp = ReadQuotedName(c, S); for (int i = 0; i < temp.m_name.Length - 1; i++) { results.Add(temp.m_name[i]); } result = temp.m_name[temp.m_name.Length - 1]; } else if (c == CHAR_SPECIAL_STRING_DELIMITER_START) { // Read a special-name like "<example>" result = "" + c; while ((c = S.Pop()) != CHAR_SPECIAL_STRING_DELIMITER_END) { result += c; } result += c; } else if (c == CHAR_CHILD_MARKER) { // Add a new string results.Add(result); result = ""; } else { result += c; } if (S.EOT) { break; } c = S.Pop(); } results.Add(result); S.GoBackOne(); return(new Name(results.ToArray())); }
/// <summary> /// Builds a MemberList for the given syntaxEditor /// </summary> /// <param name="syntaxEditor"></param> /// <returns></returns> private MemberList BuildMemberList(ActiproSoftware.SyntaxEditor.SyntaxEditor syntaxEditor) { // Get the target text range int caret = syntaxEditor.Caret.Offset; TextRange targetTextRange = TextRange.Deleted; TextStream stream = syntaxEditor.Document.GetTextStream(caret); // Get the compilation unit var cu = syntaxEditor.Document.SemanticParseData as CompilationUnit; if (cu == null) { return(null); } var itemlist = new Hashtable(); stream.GoToPreviousNonWhitespaceToken(); if (stream.Token.IsComment) { return(null); } var node = cu.FindNodeRecursive <LuatAstNodeBase>(stream.Offset); if (null != node) { foreach (AutoCompleteItem item in m_plugin.Database.GetAutoCompleteList(node, caret)) { itemlist[item.Name] = new IntelliPromptMemberListItem(item.Name, (int)item.Icon, item.Description); } targetTextRange = node.GetAutoCompleteTextRange(caret); } if (itemlist.Count == 0) { return(null); } var memberlist = new MemberList { List = new IntelliPromptMemberListItem[itemlist.Count] }; itemlist.Values.CopyTo(memberlist.List, 0); memberlist.TargetTextRange = targetTextRange; return(memberlist); }
private static void AdvanceExponent(TextStream stream, char test) { char c = stream.Peek(); if (char.ToLower(test) == char.ToLower(c)) { stream.AdvanceColumn(); c = stream.Peek(); if (c == '+' || c == '-') { stream.AdvanceColumn(); } stream.AdvanceColumnsWhile(SyntaxUtils.IsNumeric); } }
/// <summary> /// Gets the offset of the start of the specified language. If the stream is not currently /// in that language, the result is meaningless. /// </summary> /// <param name="stream">The stream to search.</param> /// <param name="language">The language we are currently in and should find the start of.</param> /// <param name="startTokenKey">The token which delimits this language block. If used, stops /// the delimiters being counted as part of the language block.</param> /// <returns>The offset at which the language starts.</returns> public static int GetStartOfLanguageBlock(TextStream stream, string language, string startTokenKey) { do { if (stream.Token.Language.Tag.ToString() != language || stream.Token.Key == startTokenKey) { stream.SeekToken(1); break; } stream.SeekToken(-1); }while (stream.Token.StartOffset > 0); return(stream.Token.StartOffset); }
static void SyntaxError(TextStream _stream, char c, string _message) { string C = "" + c; if (c == '\r') { C = "\r"; } else if (c == '\n') { C = "\n"; } ThrowError(_stream, "Unexpected character '" + C + "' while parsing " + _message); }
/// <summary> /// Gets the offset of the end of the specified language. If the stream is not currently /// in that language, the result is meaningless. /// </summary> /// <param name="stream">The stream to search.</param> /// <param name="language">The language we are currently in and should find the end of.</param> /// <param name="endTokenKey">The token which delimits this language block. If used, stops /// the delimiters being counted as part of the language block.</param> /// <returns>The offset at which the language ends.</returns> public static int GetEndOfLanguageBlock(TextStream stream, string language, string endTokenKey) { do { if (stream.Token.Language.Tag.ToString() != language || stream.IsAtDocumentEnd || stream.Token.Key == endTokenKey) { stream.SeekToken(-1); break; } stream.SeekToken(1); }while (true); return(stream.Token.EndOffset); }
public static IReadOnlyList <ISignatureInfo> ParseSignatures(string usageContent) { // RD signature text may contain \dots sequence which denotes ellipsis. // R parser does not know about it and hence we will replace \dots by ... // Also, signatures may contain S3 method info like // '\method{as.matrix}{data.frame}(x, rownames.force = NA, \dots)' // which we need to filter out since they are irrelevant to intellisense. var signatures = new List <ISignatureInfo>(); usageContent = usageContent.Replace(@"\dots", "..."); var tokenizer = new RTokenizer(separateComments: true); var collection = tokenizer.Tokenize(usageContent); var textProvider = new TextStream(usageContent); var tokens = new TokenStream <RToken>(collection, RToken.EndOfStreamToken); var parseContext = new ParseContext(textProvider, TextRange.FromBounds(tokens.CurrentToken.Start, textProvider.Length), tokens, tokenizer.CommentTokens); while (!tokens.IsEndOfStream()) { // Filter out '\method{...}{}(signature) if (tokens.CurrentToken.TokenType == RTokenType.OpenCurlyBrace) { // Check if { is preceded by \method } if (tokens.CurrentToken.TokenType != RTokenType.Identifier) { break; } var functionName = textProvider.GetText(tokens.CurrentToken); tokens.MoveToNextToken(); var info = ParseSignature(functionName, parseContext); if (info != null) { signatures.Add(info); } } return(signatures); }
/// <summary> /// Checks the given line and determines if it is one single language. /// </summary> /// <param name="line">The line to check.</param> /// <param name="stream">The TextStream from the Document the line belongs to. We /// need this because the line does not have a reference to its parent Document.</param> /// <param name="language">The text representation of the language. We compare this against /// stream.Token.Language.Tag.ToString().</param> /// <returns>True if the line contains one language, false if it contains two or more.</returns> public static bool IsEntireLineOneLanguage(DocumentLine line, TextStream stream, string language) { bool isLineTemplateLanguage = true; stream.Offset = line.StartOffset; do { if (stream.Token.Language.Tag.ToString() != language) { isLineTemplateLanguage = false; break; } stream.SeekToken(1); }while (stream.Token.EndOffset <= line.EndOffset && stream.IsAtDocumentEnd == false); return(isLineTemplateLanguage); }
static bool ReadName(char c, TextStream S, ref Name _name) { if (c == CHAR_STRING_DELIMITER || c == CHAR_SPECIAL_STRING_DELIMITER_START || c == CHAR_CHILD_MARKER || IsAlphaNumeric(c)) { _name = Name.Read(c, S); } else { _name = null; } // For debugging purpose // if ( _name != null && _name.Length > 1 && _name[1] == "Chélicère" ) // return true; return(_name != null); }
public void TextHelperTest_IsNewLineAfterPositionTest() { ITextProvider tp = new TextStream("0 \n3 \r 7 \r\n "); tp.IsNewLineAfterPosition(0).Should().BeFalse(); tp.IsNewLineAfterPosition(1).Should().BeTrue(); tp.IsNewLineAfterPosition(2).Should().BeTrue(); tp.IsNewLineAfterPosition(3).Should().BeFalse(); tp.IsNewLineAfterPosition(4).Should().BeTrue(); tp.IsNewLineAfterPosition(5).Should().BeTrue(); tp.IsNewLineAfterPosition(6).Should().BeFalse(); tp.IsNewLineAfterPosition(7).Should().BeFalse(); tp.IsNewLineAfterPosition(8).Should().BeTrue(); tp.IsNewLineAfterPosition(9).Should().BeTrue(); tp.IsNewLineAfterPosition(10).Should().BeTrue(); tp.IsNewLineAfterPosition(11).Should().BeFalse(); tp.IsNewLineAfterPosition(12).Should().BeFalse(); }
public void TextHelperTest_IsNewLineAfterPositionTest() { ITextProvider tp = new TextStream("0 \n3 \r 7 \r\n "); TextHelper.IsNewLineAfterPosition(tp, 0).Should().BeFalse(); TextHelper.IsNewLineAfterPosition(tp, 1).Should().BeTrue(); TextHelper.IsNewLineAfterPosition(tp, 2).Should().BeTrue(); TextHelper.IsNewLineAfterPosition(tp, 3).Should().BeFalse(); TextHelper.IsNewLineAfterPosition(tp, 4).Should().BeTrue(); TextHelper.IsNewLineAfterPosition(tp, 5).Should().BeTrue(); TextHelper.IsNewLineAfterPosition(tp, 6).Should().BeFalse(); TextHelper.IsNewLineAfterPosition(tp, 7).Should().BeFalse(); TextHelper.IsNewLineAfterPosition(tp, 8).Should().BeTrue(); TextHelper.IsNewLineAfterPosition(tp, 9).Should().BeTrue(); TextHelper.IsNewLineAfterPosition(tp, 10).Should().BeTrue(); TextHelper.IsNewLineAfterPosition(tp, 11).Should().BeFalse(); TextHelper.IsNewLineAfterPosition(tp, 12).Should().BeFalse(); }
public static LrpQuoteEntry ReadQuoteEntry(this TextStream stream, string name = null) { if (name != null) { stream.ValidateVerbatimText(name); stream.ValidateVerbatimText(" = "); } var result = new LrpQuoteEntry(); stream.ValidateVerbatimText('{'); result.Price = stream.ReadDouble("Price"); stream.ValidateVerbatimText(';'); result.Volume = stream.ReadDouble("Volume"); stream.ValidateVerbatimText(';'); stream.ValidateVerbatimText('}'); return(result); }
public void TextHelperTest_IsNewLineBeforePositionTest() { ITextProvider tp = new TextStream("01\n34\r678\r\nBC"); tp.IsNewLineBeforePosition(0).Should().BeFalse(); tp.IsNewLineBeforePosition(1).Should().BeFalse(); tp.IsNewLineBeforePosition(2).Should().BeFalse(); tp.IsNewLineBeforePosition(3).Should().BeTrue(); tp.IsNewLineBeforePosition(4).Should().BeFalse(); tp.IsNewLineBeforePosition(5).Should().BeFalse(); tp.IsNewLineBeforePosition(6).Should().BeTrue(); tp.IsNewLineBeforePosition(7).Should().BeFalse(); tp.IsNewLineBeforePosition(8).Should().BeFalse(); tp.IsNewLineBeforePosition(9).Should().BeFalse(); tp.IsNewLineBeforePosition(10).Should().BeTrue(); tp.IsNewLineBeforePosition(11).Should().BeTrue(); tp.IsNewLineBeforePosition(12).Should().BeFalse(); }
private static CToken MakeToken(CppTokenKind kind, TextStream stream, TextPosition startPos, TextPosition endPos) { int length = endPos.Index - startPos.Index; string value = stream.GetSourceText(startPos.Index, length); if (kind == CppTokenKind.IdentLiteral) { if (CppLexer.ReservedKeywords.Contains(value)) { kind = CppTokenKind.ReservedKeyword; } else if (CppLexer.TypeKeywords.Contains(value) || CppLexer.GlobalClassKeywords.Contains(value)) { kind = CppTokenKind.TypeKeyword; } } CToken result = new CToken(kind, startPos, endPos, value); return(result); }
public static LexResult LexSingleLineComment(TextStream stream, bool init) { CppTokenKind kind = CppTokenKind.SingleLineComment; if (init) { Debug.Assert(stream.Peek(0) == '/'); Debug.Assert(stream.Peek(1) == '/'); stream.AdvanceColumns(2); if (DoxygenSyntax.SingleLineDocChars.Contains(stream.Peek())) { stream.AdvanceColumn(); kind = CppTokenKind.SingleLineCommentDoc; } } while (!stream.IsEOF) { char c0 = stream.Peek(); char c1 = stream.Peek(1); if (c0 == TextStream.InvalidCharacter) { break; } else if (SyntaxUtils.IsLineBreak(c0)) { break; } else if (c0 == '\t') { stream.AdvanceTab(); } else { stream.AdvanceColumn(); } } bool isComplete = stream.IsEOF || SyntaxUtils.IsLineBreak(stream.Peek()); return(new LexResult(kind, isComplete)); }
public static CToken PeekToken(TextStream cursor) { TextPosition startPos = cursor.TextPosition; CToken token = PeekTokenRaw(cursor); do { if (token.Kind == CppTokenKind.Eof) { break; } if (!(token.Kind == CppTokenKind.Spacings || token.Kind == CppTokenKind.EndOfLine)) { break; } Debug.Assert(token.Length > 0); cursor.Seek(token.End); token = PeekTokenRaw(cursor); } while (!cursor.IsEOF); cursor.Seek(startPos); return(token); }
public static string Read(TextStream S) { string result = ""; char c = S.Pop(); if (c == CHAR_COMMENT) { c = S.Pop(); } // Read until we reach EOL while (c != '\n') { if (c != '\r') { result += c; } c = S.Pop(); } return(result); }
public void Combine( int prevStart, int prevOldEnd, int prevNewEnd, int nextStart, int nextOldEnd, int nextNewEnd, int expectedStart, int expectedOldEnd, int expectedNewEnd) { var oldText = new TextStream(new string('a', 20), 0); var newText = new TextStream(new string('b', 20), 1); var tc1 = new TreeTextChange(prevStart, prevOldEnd - prevStart, prevNewEnd - prevStart, oldText, oldText); var tc2 = new TreeTextChange(nextStart, nextOldEnd - nextStart, nextNewEnd - nextStart, newText, newText); tc1.OldLength.Should().Be(prevOldEnd - prevStart); tc1.NewLength.Should().Be(prevNewEnd - prevStart); tc1.OldRange.Start.Should().Be(prevStart); tc1.OldRange.End.Should().Be(prevOldEnd); tc1.NewRange.Start.Should().Be(prevStart); tc1.NewRange.End.Should().Be(prevNewEnd); tc1.Combine(tc2); tc1.Start.Should().Be(expectedStart); tc1.OldEnd.Should().Be(expectedOldEnd); tc1.NewEnd.Should().Be(expectedNewEnd); }
public static LrpQuote ReadQuote(this TextStream stream, string name = null) { if (null != name) { stream.ValidateVerbatimText(name); stream.ValidateVerbatimText(" = "); } var result = new LrpQuote(); stream.ValidateVerbatimText('{'); result.Symbol = stream.ReadAString("Symbol"); stream.ValidateVerbatimText(';'); result.CreatingTime = stream.ReadTime("CreatingTime"); stream.ValidateVerbatimText(';'); result.Bids = stream.ReadQuoteEntryArray("Bids"); stream.ValidateVerbatimText(';'); result.Asks = stream.ReadQuoteEntryArray("Asks"); stream.ValidateVerbatimText(';'); result.Id = stream.ReadAString("Id"); stream.ValidateVerbatimText(';'); stream.ValidateVerbatimText('}'); return(result); }
public static void TokenizeFileImplementation(CoreTestFilesFixture fixture, string name) { string testFile = fixture.GetDestinationPath(name); string baselineFile = testFile + ".tokens"; string text = fixture.LoadDestinationFile(name); ITextProvider textProvider = new TextStream(text); var tokenizer = new RTokenizer(); var tokens = tokenizer.Tokenize(textProvider, 0, textProvider.Length); string actual = DebugWriter.WriteTokens <RToken, RTokenType>(tokens); if (_regenerateBaselineFiles) { // Update this to your actual enlistment if you need to update baseline baselineFile = Path.Combine(fixture.SourcePath, @"Tokenization\", Path.GetFileName(testFile)) + ".tokens"; TestFiles.UpdateBaseline(baselineFile, actual); } else { TestFiles.CompareToBaseLine(baselineFile, actual); } }
/// <summary> /// Checks the given line and determines if it is one single language. /// </summary> /// <param name="line">The line to check.</param> /// <param name="stream">The TextStream from the Document the line belongs to. We /// need this because the line does not have a reference to its parent Document.</param> /// <param name="language">The text representation of the language. We compare this against /// stream.Token.Language.Tag.ToString().</param> /// <returns>True if the line contains one language, false if it contains two or more.</returns> public static bool IsEntireLineOneLanguage(DocumentLine line, TextStream stream, string language) { bool isLineTemplateLanguage = true; stream.Offset = line.StartOffset; do { if (stream.Token.Language.Tag.ToString() != language) { isLineTemplateLanguage = false; break; } stream.SeekToken(1); } while (stream.Token.EndOffset <= line.EndOffset && stream.IsAtDocumentEnd == false); return isLineTemplateLanguage; }
public JspTokenStream(TextStream text) { m_Text = text; }
/// <summary> /// Gets the first index of ASPDirectiveStartToken from the given stream. /// </summary> /// <param name="stream">The stream to search.</param> /// <returns>The index of the first ASP start token, or -1 if there isn't one /// after the stream's current offset.</returns> public static int GetFirstStartScriptTag(TextStream stream) { do { if (stream.Token.Key == "ASPDirectiveStartToken") { return stream.Token.StartOffset; } stream.SeekToken(1); } while (stream.IsAtDocumentEnd == false); return -1; }
/// <summary> /// Helper method to find the start of a script language block. /// </summary> /// <param name="stream">The stream to search.</param> /// <returns>The start offset of the current script lanugage block, excluding the <% token. </returns> public static int GetStartOfScriptLanguageBlock(TextStream stream) { return GetStartOfLanguageBlock(stream, "ScriptLanguage", "ASPDirectiveStartToken"); }
/// <summary> /// Gets the offset of the start of the specified language. If the stream is not currently /// in that language, the result is meaningless. /// </summary> /// <param name="stream">The stream to search.</param> /// <param name="language">The language we are currently in and should find the start of.</param> /// <param name="startTokenKey">The token which delimits this language block. If used, stops /// the delimiters being counted as part of the language block.</param> /// <returns>The offset at which the language starts.</returns> public static int GetStartOfLanguageBlock(TextStream stream, string language, string startTokenKey) { do { if (stream.Token.Language.Tag.ToString() != language || stream.Token.Key == startTokenKey) { stream.SeekToken(1); break; } stream.SeekToken(-1); } while (stream.Token.StartOffset > 0); return stream.Token.StartOffset; }
/// <summary> /// Gets the start and end offsets of the current template language block. /// </summary> /// <param name="stream">The stream to search.</param> /// <param name="newStart">The variable to put the start offset in.</param> /// <param name="newEnd">The variable to put the end offset in.</param> public static void GetTemplateLanguageBlock(TextStream stream, out int newStart, out int newEnd) { newStart = GetStartOfTemplateLanguageBlock(stream); newEnd = GetEndOfTemplateLanguageBlock(stream); }
/// <summary> /// Gets the offset of the end of the specified language. If the stream is not currently /// in that language, the result is meaningless. /// </summary> /// <param name="stream">The stream to search.</param> /// <param name="language">The language we are currently in and should find the end of.</param> /// <param name="endTokenKey">The token which delimits this language block. If used, stops /// the delimiters being counted as part of the language block.</param> /// <returns>The offset at which the language ends.</returns> public static int GetEndOfLanguageBlock(TextStream stream, string language, string endTokenKey) { do { if (stream.Token.Language.Tag.ToString() != language || stream.IsAtDocumentEnd || stream.Token.Key == endTokenKey) { stream.SeekToken(-1); break; } stream.SeekToken(1); } while (true); return stream.Token.EndOffset; }
/// <summary> /// Helper method to find the start of a template language block. /// </summary> /// <param name="stream">The stream to search.</param> /// <returns>The start offset of the current template lanugage block, excluding the %> token. </returns> public static int GetStartOfTemplateLanguageBlock(TextStream stream) { return GetStartOfLanguageBlock(stream, "TemplateLanguage", "ASPDirectiveEndToken"); }