public static AST.Node LoadFile(string fileName, LexerMode mode, FunctionInformation functionInfo) { using (StreamReader file = new StreamReader(fileName, Parse.Latin1)) { return Parse.String(file.ReadToEnd(), mode, functionInfo); } }
private SyntaxToken MakeStringToken( string text, string bodyText, bool isVerbatim, SyntaxKind kind ) { var prefix = isVerbatim ? "@\"" : "\""; var fakeString = prefix + bodyText + "\""; using ( var tempLexer = new Lexer( Text.SourceText.From(fakeString), this.Options, allowPreprocessorDirectives: false ) ) { LexerMode mode = LexerMode.Syntax; SyntaxToken token = tempLexer.Lex(ref mode); Debug.Assert(token.Kind == SyntaxKind.StringLiteralToken); var result = SyntaxFactory.Literal(null, text, kind, token.ValueText, null); if (token.ContainsDiagnostics) { result = result.WithDiagnosticsGreen( MoveDiagnostics(token.GetDiagnostics(), -prefix.Length) ); } return(result); } }
public Aplus(Scope dlrglobals, LexerMode parsemode) { this.sysvars = new SystemVariables(); this.dependencies = new DependencyManager(); this.callbackManager = new CallbackManager(); this.dlrglobals = dlrglobals; this.globals = new DYN.ExpandoObject(); this.sysvars["mode"] = ASymbol.Create(parsemode.ToString().ToLower()); this.mmfmanager = new MemoryMappedFileManager(); this.systemFunctions = Function.SystemFunction.DiscoverSystemFunctions(); if (String.IsNullOrEmpty(Environment.GetEnvironmentVariable("APATH", EnvironmentVariableTarget.User))) { string paths = String.Join(";", ".", "./Runtime/Context/"); Environment.SetEnvironmentVariable("APATH", paths, EnvironmentVariableTarget.User); } // TODO: Move this to app.config? this.autoloadContexts = new string[] { "sys" }; this.contextLoader = new ContextLoader(this); }
public static AST.Node LoadFile(string fileName, LexerMode mode, FunctionInformation functionInfo) { using (StreamReader file = new StreamReader(fileName, Parse.Latin1)) { return(Parse.String(file.ReadToEnd(), mode, functionInfo)); } }
public ResetPoint(int resetCount, LexerMode mode, int position, GreenNode prevTokenTrailingTrivia) { this.ResetCount = resetCount; this.Mode = mode; this.Position = position; this.PrevTokenTrailingTrivia = prevTokenTrailingTrivia; }
internal ResetPoint(int tokenIndex, LexerMode mode, bool greaterThanTokenIsNotOperator, TerminatorState termState) { TokenIndex = tokenIndex; Mode = mode; GreaterThanTokenIsNotOperator = greaterThanTokenIsNotOperator; TermState = termState; }
protected void Reset(ref ResetPoint point) { _mode = point.Mode; var offset = point.Position - _firstToken; Debug.Assert(offset >= 0 && offset < _tokenCount); _tokenOffset = offset; _currentToken = default(SyntaxToken); _currentNode = default(BlendedNode); _prevTokenTrailingTrivia = point.PrevTokenTrailingTrivia; if (_blendedTokens != null) { // look forward for slots not holding a token for (int i = _tokenOffset; i < _tokenCount; i++) { if (_blendedTokens[i].Token == null) { // forget anything after and including any slot not holding a token _tokenCount = i; if (_tokenCount == _tokenOffset) { FetchCurrentToken(); } break; } } } }
private BlendedNode ReadNewToken(LexerMode mode) { Debug.Assert(_changeDelta > 0 || _oldTreeCursor.IsFinished); // The new text is either behind the cursor, or the cursor is done. In either event, // we need to lex a real token from the stream. var token = this.LexNewToken(mode); // If the oldTreeCursor was finished, then the below code isn't really necessary. // We'll just repeat the outer reader loop and call right back into ReadNewToken. // That will then call LexNewToken (which doesn't use either of these variables). If // oldTreeCursor wasn't finished then we need to update our state based on the token // we just read. var width = token.FullWidth; _newPosition += width; _changeDelta -= width; // By reading a token we may either have read into, or past, change ranges. Skip // past them. This will increase changeDelta which will indicate to us that we need // to keep on lexing. this.SkipPastChanges(); return(this.CreateBlendedNode(node: null, token: token)); }
internal ResetPoint(int resetCount, LexerMode mode, int position, CSharpSyntaxNode prevTokenTrailingTrivia) { this.ResetCount = resetCount; this.Mode = mode; this.Position = position; this.PrevTokenTrailingTrivia = prevTokenTrailingTrivia; }
public HlslParser(ILexer lexer, LexerMode mode = LexerMode.Syntax) { _lexer = lexer; _mode = mode; CommaIsSeparatorStack = new Stack <bool>(); CommaIsSeparatorStack.Push(false); }
public SyntaxToken Peek(LexerMode mode) { if (_tokenIndex < _macroBody.Count) { return(_macroBody[_tokenIndex]); } return(SyntaxFactory.ParseToken("\0")); }
private void Reset(ref ResetPoint state) { _scanStack.Pop(); _mode = state.Mode; _tokenIndex = state.TokenIndex; _greaterThanTokenIsNotOperator = state.GreaterThanTokenIsNotOperator; _termState = state.TermState; }
public SyntaxToken Peek(LexerMode mode) { var currentPosition = _lexer._charReader.Position; var result = Lex(mode); _lexer._charReader.Reset(currentPosition); return(result); }
protected void Reset(ref ResetPoint point) { _mode = point.Mode; var offset = point.Position - _firstToken; Debug.Assert(offset >= 0 && offset < _tokenCount); _tokenOffset = offset; _currentToken = default(SyntaxToken); _prevTokenTrailingTrivia = point.PrevTokenTrailingTrivia; }
/// <summary> /// Switches lexing mode if necessary, returns true if switch was performed /// </summary> private bool SwitchMode(LexerMode mode) { if (mode == LexerMode.Normal) { if ('"'.Equals(itr.Current())) { itr.Mode = LexerMode.String; return(true); } if ('/'.Equals(itr.Current())) { if ('/'.Equals(itr.Peek(1))) { itr.Mode = LexerMode.Comment; return(true); } if ('*'.Equals(itr.Peek(1))) { itr.Mode = LexerMode.MultiComment; return(true); } } } else if (mode == LexerMode.String) { if (itr.Current() == '"') { itr.Mode = LexerMode.Normal; return(true); } } else { if (mode == LexerMode.Comment) { if (itr.IsFinalIndex()) { itr.Mode = LexerMode.Normal; return(true); } } else if (mode == LexerMode.MultiComment) { if (itr == "*/") { itr.Mode = LexerMode.Normal; itr.MoveNext(); //move to '/' return(true); } } } return(false); }
public Reader(Blender blender) { _lexer = blender._lexer; _oldTreeCursor = blender._oldTreeCursor; _changes = blender._changes; _newPosition = blender._newPosition; _changeDelta = blender._changeDelta; _newDirectives = blender._newDirectives; _oldDirectives = blender._oldDirectives; _newLexerDrivenMode = blender._newLexerDrivenMode; }
public Reader(Blender blender) { this.lexer = blender.lexer; this.oldTreeCursor = blender.oldTreeCursor; this.changes = blender.changes; this.newPosition = blender.newPosition; this.changeDelta = blender.changeDelta; this.newDirectives = blender.newDirectives; this.oldDirectives = blender.oldDirectives; this.newLexerDrivenMode = blender.newLexerDrivenMode; }
public Lexer(char mode) { if (mode == 'A') { Mode = LexerMode.A; } else { Mode = LexerMode.S; } }
private void Reset(ref ResetPoint state) { _scanStack.Pop(); _mode = state.Mode; _tokenIndex = state.TokenIndex; _greaterThanTokenIsNotOperator = state.GreaterThanTokenIsNotOperator; _allowLinearAndPointAsIdentifiers = state.AllowLinearAndPointAsIdentifiers; CommaIsSeparatorStack = state.CommaIsSeparatorStack; _termState = state.TermState; }
public void FlipMode() { if (Mode == LexerMode.A) { Mode = LexerMode.S; } else { Mode = LexerMode.A; } }
public unsafe ParseLexer(char *text, int length) { _mode = LexerMode.None; _line = 1; _column = 1; _kind = TokenKind.Text; _startColumn = 1; _startLine = 1; _scanner = new CharScanner(text, length); _collection = new List <Token>(); _pos = new Stack <string>(); }
internal ResetPoint( int tokenIndex, LexerMode mode, bool greaterThanTokenIsNotOperator, bool allowLinearAndPointAsIdentifiers, Stack <bool> commaIsSeparatorStack, TerminatorState termState) { TokenIndex = tokenIndex; Mode = mode; GreaterThanTokenIsNotOperator = greaterThanTokenIsNotOperator; AllowLinearAndPointAsIdentifiers = allowLinearAndPointAsIdentifiers; CommaIsSeparatorStack = new Stack <bool>(commaIsSeparatorStack.Reverse()); TermState = termState; }
/// <summary> /// 重置分析器 /// </summary> public void Reset() { this.mode = LexerMode.None; this.line = 1; this.column = 1; this.kind = TokenKind.Text; this.startColumn = 1; this.startLine = 1; this.scanner = new CharScanner(this.document); this.collection = new List <Token>(); this.pos = new Stack <String>(); }
/// <summary> /// 重置分析器 /// </summary> public void Reset() { this.mode = LexerMode.None; this.line = 1; this.column = 1; this.kind = TokenKind.Text; this.startColumn = 1; this.startLine = 1; this.scanner = new CharScanner(this.document); this.collection = new List<Token>(); this.pos = new Stack<String>(); }
/// <summary> /// 分析所有Token /// </summary> /// <returns></returns> public Token[] Parse() { if (this.kind != TokenKind.EOF) { char c; do { if (this.mode == LexerMode.EnterLabel) { Next(this.pos.Peek().Length - 1); c = this.scanner.Read(); AddToken(GetToken(GetTokenKind(c), c)); switch (this.kind) { case TokenKind.StringStart: this.pos.Push(c.ToString()); break; case TokenKind.LeftParentheses: this.pos.Push("("); break; case TokenKind.LeftBracket: this.pos.Push("["); break; } ReadToken(); } else if (IsTagStart()) { AddToken(GetToken(TokenKind.TagStart)); this.mode = LexerMode.EnterLabel; } else if (this.scanner.Read() == '\n') { this.line++; this.column = 1; } }while (Next()); AddToken(GetToken(TokenKind.EOF)); if (this.mode == LexerMode.EnterLabel) { this.mode = LexerMode.LeaveLabel; AddToken(new Token(TokenKind.TagEnd, String.Empty)); } } return(this.collection.ToArray()); }
public Blender( Lexer lexer, CSharp.CSharpSyntaxNode oldTree, IEnumerable <TextChangeRange> changes ) { Debug.Assert(lexer != null); _lexer = lexer; _changes = ImmutableStack.Create <TextChangeRange>(); if (changes != null) { // TODO: Consider implementing NormalizedChangeCollection for TextSpan. the real // reason why we are collapsing is because we want to extend change ranges and // cannot allow them to overlap. This does not seem to be a big deal since multiple // changes are infrequent and typically close to each other. However if we have // NormalizedChangeCollection for TextSpan we can have both - we can extend ranges // and not require collapsing them. NormalizedChangeCollection would also ensure // that changes are always normalized. // TODO: this is a temporary measure to prevent individual change spans from // overlapping after they are widened to effective width (+1 token at the start). // once we have normalized collection for TextSpan we will not need to collapse all // the change spans. var collapsed = TextChangeRange.Collapse(changes); // extend the change to its affected range. This will make it easier // to filter out affected nodes since we will be able simply check // if node intersects with a change. var affectedRange = ExtendToAffectedRange(oldTree, collapsed); _changes = _changes.Push(affectedRange); } if (oldTree == null) { // start at lexer current position if no nodes specified _oldTreeCursor = new Cursor(); _newPosition = lexer.TextWindow.Position; } else { _oldTreeCursor = Cursor.FromRoot(oldTree).MoveToFirstChild(); _newPosition = 0; } _changeDelta = 0; _newDirectives = default(DirectiveStack); _oldDirectives = default(DirectiveStack); _newLexerDrivenMode = 0; }
public Blender(Lexer lexer, CSharp.CSharpSyntaxNode oldTree, IEnumerable<TextChangeRange> changes) { Debug.Assert(lexer != null); _lexer = lexer; _changes = ImmutableStack.Create<TextChangeRange>(); if (changes != null) { // TODO: Consider implementing NormalizedChangeCollection for TextSpan. the real // reason why we are collapsing is because we want to extend change ranges and // cannot allow them to overlap. This does not seem to be a big deal since multiple // changes are infrequent and typically close to each other. However if we have // NormalizedChangeCollection for TextSpan we can have both - we can extend ranges // and not require collapsing them. NormalizedChangeCollection would also ensure // that changes are always normalized. // TODO: this is a temporary measure to prevent individual change spans from // overlapping after they are widened to effective width (+1 token at the start). // once we have normalized collection for TextSpan we will not need to collapse all // the change spans. var collapsed = TextChangeRange.Collapse(changes); // extend the change to its affected range. This will make it easier // to filter out affected nodes since we will be able simply check // if node intersects with a change. var affectedRange = ExtendToAffectedRange(oldTree, collapsed); _changes = _changes.Push(affectedRange); } if (oldTree == null) { // start at lexer current position if no nodes specified _oldTreeCursor = new Cursor(); _newPosition = lexer.TextWindow.Position; } else { _oldTreeCursor = Cursor.FromRoot(oldTree).MoveToFirstChild(); _newPosition = 0; } _changeDelta = 0; _newDirectives = default(DirectiveStack); _oldDirectives = default(DirectiveStack); _newLexerDrivenMode = 0; }
private SyntaxToken LexNewToken(LexerMode mode) { if (this.lexer.TextWindow.Position != this.newPosition) { this.lexer.Reset(this.newPosition); } if (mode >= LexerMode.XmlDocComment) { mode |= this.newLexerDrivenMode; } var token = this.lexer.Lex(ref mode); this.newLexerDrivenMode = mode & (LexerMode.MaskXmlDocCommentLocation | LexerMode.MaskXmlDocCommentStyle); return(token); }
public void Next(LexerMode mode) { switch (mode) { case LexerMode.BLOCK: this.NextBlock (); break; case LexerMode.RAW: this.NextRaw (); break; default: throw new UnknownException (this, "invalid lexem"); } }
private Blender( Lexer lexer, Cursor oldTreeCursor, ImmutableStack <TextChangeRange> changes, int newPosition, int changeDelta, LexerMode newLexerDrivenMode) { Debug.Assert(lexer != null); Debug.Assert(changes != null); Debug.Assert(newPosition >= 0); this.lexer = lexer; this.oldTreeCursor = oldTreeCursor; this.changes = changes; this.newPosition = newPosition; this.changeDelta = changeDelta; this.newLexerDrivenMode = newLexerDrivenMode & (LexerMode.MaskXmlDocCommentLocation | LexerMode.MaskXmlDocCommentStyle); }
public void Next(LexerMode mode) { switch (mode) { case LexerMode.Block: this.current = this.NextBlock (); break; case LexerMode.Raw: this.current = this.NextRaw (); break; default: throw new ParseException (this.column, this.line, "<?>", "block or raw text"); } }
public void Next(LexerMode mode) { switch (mode) { case LexerMode.Block: this.current = this.NextBlock(); break; case LexerMode.Raw: this.current = this.NextRaw(); break; default: throw new ParseException(this.column, this.line, "<?>", "block or raw text"); } }
private SyntaxToken LexNewToken(LexerMode mode) { if (_lexer.TextWindow.Position != _newPosition) { _lexer.Reset(_newPosition, _newDirectives); } if (mode >= LexerMode.XmlDocComment) { mode |= _newLexerDrivenMode; } var token = _lexer.Lex(ref mode); _newDirectives = _lexer.Directives; _newLexerDrivenMode = mode & (LexerMode.MaskXmlDocCommentLocation | LexerMode.MaskXmlDocCommentStyle); return(token); }
public static AST.Node String(string input, LexerMode mode, FunctionInformation functionInfo) { switch (mode) { case LexerMode.ASCII: return(ASCIIString(input, functionInfo)); case LexerMode.APL: return(APLString(input, functionInfo)); case LexerMode.UNI: return(UNIString(input, functionInfo)); default: break; } throw new ParseException("Invalid Parse Mode"); }
public static AST.Node String(string input, LexerMode mode, FunctionInformation functionInfo) { switch (mode) { case LexerMode.ASCII: return ASCIIString(input, functionInfo); case LexerMode.APL: return APLString(input, functionInfo); case LexerMode.UNI: return UNIString(input, functionInfo); default: break; } throw new ParseException("Invalid Parse Mode"); }
//#endif internal XSharpLanguageParser( String FileName, SourceText Text, CSharpParseOptions options, CSharp.CSharpSyntaxNode oldTree, IEnumerable <TextChangeRange> changes, LexerMode lexerMode = LexerMode.Syntax, CancellationToken cancellationToken = default(CancellationToken)) : base(/*lexer*/ null, lexerMode, oldTree, changes, allowModeReset: false, preLexIfNotIncremental: true, cancellationToken: cancellationToken) { _syntaxFactoryContext = new SyntaxFactoryContext(); _syntaxFactory = new ContextAwareSyntax(_syntaxFactoryContext); _text = Text; _fileName = FileName; _options = options; _isScript = options.Kind == SourceCodeKind.Script; _isMacroScript = _isScript && options.MacroScript; }
internal BlendedNode ReadNodeOrToken(LexerMode mode, bool asToken) { // This is the core driver of the blender. It just sits in a loop trying to keep our // positions in the old and new text in sync. When they're out of sync it will try // to match them back up, and it will appropriately determine which nodes or tokens // from the old tree can be reused as long as they don't overlap and changes or // contain any errors. while (true) { // If the cursor in the old tree is finished, then our choice is easy. We just // read from the new text. if (this.oldTreeCursor.IsFinished) { return this.ReadNewToken(mode); } // If delta is non-zero then that means our positions in the respective text // streams are not in sync. This can be because of to reasons. Either: // // a) we're further ahead in the new text (i.e. 'changeDelta' is negative). We // should keep skipping tokens in the old text until we catch up. // TODO(cyrusn): We could actually be smarter here and skip whole nodes if // they're shorter than the changeDelta. We can try doing that in the future. // // b) we're further ahead in the old text (i.e. 'changeDelta' is positive). // This can happen when we are skipping over portions of the old tree because // it overlapped with changed text spans. In this case, we want to read a // token to try to consume that changed text and ensure that we get synced up. if (this.changeDelta < 0) { // Case '1' above. We're behind in the old text, so move forward a token. // And try again. this.SkipOldToken(); } else if (this.changeDelta > 0) { // Case '2' above. We're behind in the new text, so read a token to try to // catch up. return this.ReadNewToken(mode); } else { // Attempt to take a node or token from the old tree. If we can't, then // either break down the current node we're looking at to its first child // and try again, or move to the next token. BlendedNode blendedNode; if (this.TryTakeOldNodeOrToken(asToken, out blendedNode)) { return blendedNode; } // Couldn't take the current node or token. Figure out the next node or // token to reconsider and try again. if (this.oldTreeCursor.CurrentNodeOrToken.IsNode) { // It was a node. Just move to its first token and try again. this.oldTreeCursor = this.oldTreeCursor.MoveToFirstChild(); } else { // It was a token, just move to the next token. this.SkipOldToken(); } } } }
private static LexerMode ModeOf(LexerMode mode) { return mode & LexerMode.MaskLexMode; }
public SyntaxToken Lex(LexerMode mode) { // First check if we're in the middle of expanding a macro reference token. if (_expandedMacroTokens != null) { var result = _expandedMacroTokens[_expandedMacroIndex++]; if (_expandedMacroIndex == _expandedMacroTokens.Count) _expandedMacroTokens = null; return result; } _mode = mode; SyntaxToken token; switch (_mode) { case LexerMode.Syntax: token = LexSyntaxToken(); break; case LexerMode.Directive: token = LexDirectiveToken(); break; default: throw new ArgumentOutOfRangeException(); } // Swallow end-of-file tokens from include files. if (token.Kind == SyntaxKind.EndOfFileToken && _includeStack.Count > 1) { var originalToken = token; PopIncludeContext(); token = Lex(mode); token = token.WithLeadingTrivia(originalToken.LeadingTrivia.AddRange(token.LeadingTrivia)); // this is a bit weird, but we need to also update the leading trivia on the macro reference, // because that's what we use when outputting code. if (token.MacroReference != null) token = token.WithOriginalMacroReference(token.MacroReference.WithLeadingTrivia(token.LeadingTrivia), token.IsFirstTokenInMacroExpansion); } // Expand macros and attach as a special kind of trivia. if (token.Kind == SyntaxKind.IdentifierToken && ExpandMacros) { List<SyntaxToken> expandedTokens; if (TryExpandMacro(token, new BaseMacroExpansionLexer(this), out expandedTokens)) { if (expandedTokens.Count == 0) // Can happen for macros with empty body. { // Attach macro call as leading trivia on next token. var originalToken = token; token = Lex(mode); var leadingTrivia = new List<SyntaxNode>(); leadingTrivia.AddRange(originalToken.LeadingTrivia); leadingTrivia.Add(new SyntaxTrivia(SyntaxKind.EmptyExpandedMacroTrivia, originalToken.Text, originalToken.SourceRange, originalToken.Span, ImmutableArray<Diagnostic>.Empty)); leadingTrivia.AddRange(originalToken.TrailingTrivia); leadingTrivia.AddRange(token.LeadingTrivia); token = token.WithLeadingTrivia(leadingTrivia.ToImmutableArray()); } else { if (expandedTokens.Count > 1) { _expandedMacroTokens = expandedTokens; _expandedMacroIndex = 1; } token = expandedTokens[0]; } } } return token; }
private void ReadToken() { while (Next()) { if (this.scanner.Read() == '"') { if (this.pos.Count > 1 && this.pos.Peek() == "\"") { if (this.kind == TokenKind.StringStart) { AddToken(GetToken(TokenKind.String)); } AddToken(GetToken(TokenKind.StringEnd)); this.pos.Pop(); continue; } if (this.kind == TokenKind.TagStart || this.kind == TokenKind.LeftBracket || this.kind == TokenKind.LeftParentheses || this.kind == TokenKind.Operator || this.kind == TokenKind.Punctuation || this.kind == TokenKind.Comma || this.kind == TokenKind.Space) { AddToken(GetToken(TokenKind.StringStart)); this.pos.Push("\""); continue; } } if (this.kind == TokenKind.StringStart) { AddToken(GetToken(TokenKind.String)); continue; } if (this.kind == TokenKind.String) { continue; } if (this.scanner.Read() == '(') { this.pos.Push("("); } else if (this.scanner.Read() == ')' && this.pos.Peek() == "(")// && this.pos.Count > 2 { this.pos.Pop(); if (this.pos.Count == 1) { } } else if (IsTagEnd()) { //Next(1); //this.pos.Pop(); AddToken(GetToken(TokenKind.TagEnd)); this.mode = LexerMode.LeaveLabel; if (this.pos.Pop().Length == 2) { Next(1); } if (IsTagStart()) { AddToken(GetToken(TokenKind.TagStart)); this.mode = LexerMode.EnterLabel; } else { AddToken(GetToken(TokenKind.Text)); } break; } TokenKind tk; if (this.scanner.Read() == '+' || this.scanner.Read() == '-') //正负数符号识别 { if (Char.IsNumber(this.scanner.Read(1)) && (this.kind == TokenKind.Operator || this.kind == TokenKind.LeftParentheses)) { tk = TokenKind.Number; } else { tk = TokenKind.Operator; } } else { tk = GetTokenKind(this.scanner.Read()); } //if (this.kind == tk || (tk == TokenKind.Number && this.kind == TokenKind.TextData)) if ((this.kind != tk || this.kind == TokenKind.LeftParentheses || this.kind == TokenKind.RightParentheses) && (tk != TokenKind.Number || this.kind != TokenKind.TextData) //&& (this.kind == TokenKind.Number && tk != TokenKind.Dot) ) //|| (this.kind != TokenKind.Number && tk == TokenKind.Dot) { if (tk == TokenKind.Dot && this.kind == TokenKind.Number) { } else { AddToken(GetToken(tk)); } } } }
public SyntaxToken Lex(LexerMode mode) { #if DEBUG TokensLexed++; #endif _mode = mode; switch (_mode) { case LexerMode.Syntax: case LexerMode.DebuggerSyntax: return this.QuickScanSyntaxToken() ?? this.LexSyntaxToken(); case LexerMode.Directive: return this.LexDirectiveToken(); } switch (ModeOf(_mode)) { case LexerMode.XmlDocComment: return this.LexXmlToken(); case LexerMode.XmlElementTag: return this.LexXmlElementTagToken(); case LexerMode.XmlAttributeTextQuote: case LexerMode.XmlAttributeTextDoubleQuote: return this.LexXmlAttributeTextToken(); case LexerMode.XmlCDataSectionText: return this.LexXmlCDataSectionTextToken(); case LexerMode.XmlCommentText: return this.LexXmlCommentTextToken(); case LexerMode.XmlProcessingInstructionText: return this.LexXmlProcessingInstructionTextToken(); case LexerMode.XmlCrefQuote: case LexerMode.XmlCrefDoubleQuote: return this.LexXmlCrefOrNameToken(); case LexerMode.XmlNameQuote: case LexerMode.XmlNameDoubleQuote: // Same lexing as a cref attribute, just treat the identifiers a little differently. return this.LexXmlCrefOrNameToken(); case LexerMode.XmlCharacter: return this.LexXmlCharacter(); default: throw ExceptionUtilities.UnexpectedValue(ModeOf(_mode)); } }
public SyntaxToken Lex(ref LexerMode mode) { var result = Lex(mode); mode = _mode; return result; }
private static XmlDocCommentStyle StyleOf(LexerMode mode) { return (XmlDocCommentStyle)((int)(mode & LexerMode.MaskXmlDocCommentStyle) >> 20); }
private void MutateLocation(XmlDocCommentLocation location) { _mode &= ~LexerMode.MaskXmlDocCommentLocation; _mode |= (LexerMode)((int)location << 16); }
private static XmlDocCommentLocation LocationOf(LexerMode mode) { return (XmlDocCommentLocation)((int)(mode & LexerMode.MaskXmlDocCommentLocation) >> 16); }
private bool ModeIs(LexerMode mode) { return ModeOf(_mode) == mode; }
private SyntaxToken LexNewToken(LexerMode mode) { if (this.lexer.TextWindow.Position != this.newPosition) { this.lexer.Reset(this.newPosition, this.newDirectives); } if (mode >= LexerMode.XmlDocComment) { mode |= this.newLexerDrivenMode; } var token = this.lexer.Lex(ref mode); this.newDirectives = this.lexer.Directives; this.newLexerDrivenMode = mode & (LexerMode.MaskXmlDocCommentLocation | LexerMode.MaskXmlDocCommentStyle); return token; }
private BlendedNode ReadNewToken(LexerMode mode) { Debug.Assert(this.changeDelta > 0 || this.oldTreeCursor.IsFinished); // The new text is either behind the cursor, or the cursor is done. In either event, // we need to lex a real token from the stream. var token = this.LexNewToken(mode); // If the oldTreeCursor was finished, then the below code isn't really necessary. // We'll just repeat the outer reader loop and call right back into ReadNewToken. // That will then call LexNewToken (which doesn't use either of these variables). If // oldTreeCursor wasn't finished then we need to update our state based on the token // we just read. var width = token.FullWidth; this.newPosition += width; this.changeDelta -= width; // By reading a token we may either have read into, or past, change ranges. Skip // past them. This will increase changeDelta which will indicate to us that we need // to keep on lexing. this.SkipPastChanges(); return this.CreateBlendedNode(node: null, token: token); }
private CSharpSyntaxNode LexSingleDirective( bool isActive, bool endIsActive, bool afterFirstToken, bool afterNonWhitespaceOnLine, ref SyntaxListBuilder triviaList) { if (SyntaxFacts.IsWhitespace(TextWindow.PeekChar())) { this.Start(); this.AddTrivia(this.ScanWhitespace(), ref triviaList); } CSharpSyntaxNode directive; var saveMode = _mode; using (var dp = new DirectiveParser(this, _directives)) { directive = dp.ParseDirective(isActive, endIsActive, afterFirstToken, afterNonWhitespaceOnLine); } this.AddTrivia(directive, ref triviaList); _directives = directive.ApplyDirectives(_directives); _mode = saveMode; return directive; }
private SyntaxNode LexSingleDirective( bool isActive, bool endIsActive, bool afterNonWhitespaceOnLine, List<SyntaxNode> triviaList) { _start = _charReader.Position; if (char.IsWhiteSpace(_charReader.Current)) { ReadWhitespace(); AddTrivia(triviaList, SyntaxKind.WhitespaceTrivia); } var saveMode = _mode; var saveExpandMacros = ExpandMacros; _mode = LexerMode.Directive; ExpandMacros = false; var dp = new DirectiveParser(this, _directives); var directive = dp.ParseDirective(isActive, endIsActive, afterNonWhitespaceOnLine); if (!isActive || directive.Kind != SyntaxKind.IncludeDirectiveTrivia) triviaList.Add(directive); _directives = directive.ApplyDirectives(_directives); ExpandMacros = saveExpandMacros; _mode = saveMode; // Directive parser sometimes leaves charReader at start of token *after* the one we want. _charReader.Reset(directive.GetLastToken().GetLastSpanIncludingTrivia().End); _start = _charReader.Position; return directive; }
private CSharpSyntaxNode LexXmlDocComment(XmlDocCommentStyle style) { var saveMode = _mode; bool isTerminated; var mode = style == XmlDocCommentStyle.SingleLine ? LexerMode.XmlDocCommentStyleSingleLine : LexerMode.XmlDocCommentStyleDelimited; if (_xmlParser == null) { _xmlParser = new DocumentationCommentParser(this, mode); } else { _xmlParser.ReInitialize(mode); } var docComment = _xmlParser.ParseDocumentationComment(out isTerminated); // We better have finished with the whole comment. There should be error // code in the implementation of ParseXmlDocComment that ensures this. Debug.Assert(this.LocationIs(XmlDocCommentLocation.End) || TextWindow.PeekChar() == SlidingTextWindow.InvalidCharacter); _mode = saveMode; if (!isTerminated) { // The comment didn't end. Report an error at the start point. // NOTE: report this error even if the DocumentationMode is less than diagnose - the comment // would be malformed as a non-doc comment as well. this.AddError(TextWindow.LexemeStartPosition, TextWindow.Width, ErrorCode.ERR_OpenEndedComment); } return docComment; }
/// <summary> /// 分析所有Token /// </summary> /// <returns></returns> public Token[] Parse() { if (this.kind != TokenKind.EOF) { do { if (this.mode == LexerMode.EnterLabel) { Next(this.pos.Peek().Length - 1); AddToken(GetToken(GetTokenKind(this.scanner.Read()))); switch (this.kind) { case TokenKind.StringStart: this.pos.Push("\""); break; case TokenKind.LeftParentheses: this.pos.Push("("); break; } ReadToken(); } else if (IsTagStart()) { AddToken(GetToken(TokenKind.TagStart)); this.mode = LexerMode.EnterLabel; } else if (this.scanner.Read() == '\n') { this.line++; this.column = 1; } } while (Next()); AddToken(GetToken(TokenKind.EOF)); if (this.mode == LexerMode.EnterLabel) { this.mode = LexerMode.LeaveLabel; AddToken(new Token(TokenKind.TagEnd, String.Empty)); } } return this.collection.ToArray(); }
private Blender( Lexer lexer, Cursor oldTreeCursor, ImmutableStack<TextChangeRange> changes, int newPosition, int changeDelta, DirectiveStack newDirectives, DirectiveStack oldDirectives, LexerMode newLexerDrivenMode) { Debug.Assert(lexer != null); Debug.Assert(changes != null); Debug.Assert(newPosition >= 0); _lexer = lexer; _oldTreeCursor = oldTreeCursor; _changes = changes; _newPosition = newPosition; _changeDelta = changeDelta; _newDirectives = newDirectives; _oldDirectives = oldDirectives; _newLexerDrivenMode = newLexerDrivenMode & (LexerMode.MaskXmlDocCommentLocation | LexerMode.MaskXmlDocCommentStyle); }
private BlendedNode ReadNodeOrToken(LexerMode mode, bool asToken) { var reader = new Reader(this); return reader.ReadNodeOrToken(mode, asToken); }
public BlendedNode ReadToken(LexerMode mode) { return ReadNodeOrToken(mode, asToken: true); }
public BlendedNode ReadNode(LexerMode mode) { return ReadNodeOrToken(mode, asToken: false); }
public SyntaxToken Lex(LexerMode mode) { #if DEBUG TokensLexed++; #endif this.mode = mode; switch (this.mode) { case LexerMode.Syntax: case LexerMode.DebuggerSyntax: #if true var result = this.QuickScanSyntaxToken(); if (result == null) { result = this.LexSyntaxToken(); } return result; #else return this.LexSyntaxToken(); #endif case LexerMode.Directive: return this.LexDirectiveToken(); } switch (ModeOf(this.mode)) { case LexerMode.XmlDocComment: return this.LexXmlToken(); case LexerMode.XmlElementTag: return this.LexXmlElementTagToken(); case LexerMode.XmlAttributeTextQuote: case LexerMode.XmlAttributeTextDoubleQuote: return this.LexXmlAttributeTextToken(); case LexerMode.XmlCDataSectionText: return this.LexXmlCDataSectionTextToken(); case LexerMode.XmlCommentText: return this.LexXmlCommentTextToken(); case LexerMode.XmlProcessingInstructionText: return this.LexXmlProcessingInstructionTextToken(); case LexerMode.XmlCrefQuote: case LexerMode.XmlCrefDoubleQuote: return this.LexXmlCrefOrNameToken(); case LexerMode.XmlNameQuote: case LexerMode.XmlNameDoubleQuote: // Same lexing as a cref attribute, just treat the identifiers a little differently. return this.LexXmlCrefOrNameToken(); case LexerMode.XmlCharacter: return this.LexXmlCharacter(); } Debug.Assert(false, "Unknown LexMode passed to Lexer.Lex"); return this.LexSyntaxToken(); }