static XSharpSyntax() { // Dummy call to a Lexer; just to copy the Keywords, Types, ... // Pass default options so this will be the core dialect and no // 4 letter abbreviations will be in the list var lexer = XSharpLexer.Create("", "", XSharpParseOptions.Default); // _keywordNames = new Dictionary <string, string>(StringComparer.OrdinalIgnoreCase); var keywords = new List <IXSymbol>(); var types = new List <IXSymbol>(); // foreach (var keyword in lexer.KwIds) { _keywordNames.Add(keyword.Key, keyword.Key); keywords.Add(new XSourceSymbol(keyword.Key, Kind.Keyword, Modifiers.None)); if (XSharpLexer.IsType(keyword.Value)) { types.Add(new XSourceSymbol(keyword.Key, Kind.Keyword, Modifiers.None)); } } // _keywords = keywords.ToArray(); _types = types.ToArray(); }
private static void GetLexerErrors(XSharpLexer lexer, BufferedTokenStream tokenStream, List <ParseErrorData> parseErrors) { // get lexer errors foreach (var error in lexer.LexErrors) { parseErrors.Add(error); } }
internal static bool IsLiteral(this IToken token) { if (token == null) { return(false); } return(XSharpLexer.IsConstant(token.Type)); }
/// <summary> /// Get the first keyword in Line. The keyword is in UPPERCASE The modifiers (Private, Protected, ... ) are ignored /// If the first Keyword is a Comment, "//" is returned /// </summary> /// <param name="line">The line to analyze</param> /// <param name="doSkipped">Bool value indicating if a "DO" keyword has been skipped</param> /// <param name="minIndent"></param> /// <returns></returns> private String getFirstKeywordInLine(ITextSnapshotLine line, out bool doSkipped, out int minIndent) { minIndent = -1; doSkipped = false; string startOfLine = line.GetText(); string keyword = ""; int index = 0; var tokens = getTokens(startOfLine); if (tokens.Count > 0) { if (tokens[0].Type == XSharpLexer.WS) { index = 1; minIndent = getIndentTokenLength(tokens[0]); } else { minIndent = 0; } while (tokens.Count > index) { var token = tokens[index]; if (token.Type == XSharpLexer.WS) { index++; continue; } if (XSharpLexer.IsKeyword(token.Type)) { keyword = token.Text.ToUpper(); // it could be modifier... if (keywordIsModifier(token.Type)) { index++; keyword = ""; continue; } if (token.Type == XSharpLexer.DO) { index++; keyword = ""; doSkipped = true; continue; } } else if (XSharpLexer.IsComment(token.Type)) { keyword = token.Text.Substring(0, 2); } break; } } return(keyword); }
private static bool FindMatchingOpenChar(SnapshotPoint startPoint, char open, char close, out SnapshotSpan pairSpan, IList <IToken> tokens, int offset) { pairSpan = new SnapshotSpan(startPoint, startPoint); try { int startpos = startPoint.Position; if (tokens != null) { int tokenpos = findtokeninList(tokens, startpos - offset); if (tokenpos == -1) { return(false); } IToken token = tokens[tokenpos]; // open/close braces are operators if (!XSharpLexer.IsOperator(token.Type)) { return(false); } int closeCount = 0; for (int i = tokenpos - 1; i >= 0; i--) { token = tokens[i]; if (XSharpLexer.IsOperator(token.Type)) { string text = token.Text; if (text[0] == close) { closeCount++; } if (text[0] == open) { if (closeCount > 0) { closeCount--; } else { pairSpan = new SnapshotSpan(startPoint.Snapshot, token.StartIndex + offset, 1); return(true); } } } } } return(false); } catch (System.Exception ex) { System.Diagnostics.Debug.WriteLine(ex.Message); } return(false); }
static XSharpKeywords() { var kwds = LanguageService.CodeAnalysis.XSharp.SyntaxParser.XSharpLexer.DefaultVocabulary; for (int i = 1; i < kwds.MaxTokenType; i++) { if (XSharpLexer.IsKeyword(i)) { string name = kwds.GetSymbolicName(i); if (!name.StartsWith("FIRST_") && !name.StartsWith("LAST_")) { _keywords.Add(name); } } } }
public static bool Lex(string sourceText, string fileName, CSharpParseOptions options, IErrorListener listener, out ITokenStream tokens) { tokens = null; var parseErrors = ParseErrorData.NewBag(); try { var lexer = XSharpLexer.Create(sourceText, fileName, options); lexer.Options = options; var tokenStream = lexer.GetTokenStream(); tokenStream.Fill(); tokens = tokenStream; GetLexerErrors(lexer, tokenStream, parseErrors); #region Determine if we need to preprocess bool mustPreprocess = true; if (options.NoStdDef) { mustPreprocess = lexer.MustBeProcessed || lexer.HasPreprocessorTokens; } #endregion XSharpPreprocessor pp = null; BufferedTokenStream ppStream = null; pp = new XSharpPreprocessor(lexer, tokenStream, options, fileName, Encoding.Unicode, SourceHashAlgorithm.None, parseErrors); if (mustPreprocess) { var ppTokens = pp.PreProcess(); ppStream = new CommonTokenStream(new XSharpListTokenSource(lexer, ppTokens)); } else { // No Standard Defs and no preprocessor tokens in the lexer // so we bypass the preprocessor and use the lexer token stream ppStream = new CommonTokenStream(new XSharpListTokenSource(lexer, tokenStream.GetTokens())); } ppStream.Fill(); } catch (Exception) { } ReportErrors(parseErrors, listener); return(tokens != null); }
static int AnalyzeCode(string code, bool showErrors) { var stream = new AntlrInputStream(code.ToString()); var lexer = new XSharpLexer(stream); lexer.Options = new CSharpParseOptions(); var tokens = new CommonTokenStream(lexer); var parser = new XSharpParser(tokens); parser.Options = lexer.Options; var errorListener = new XSharpErrorListener(showErrors); parser.AddErrorListener(errorListener); var tree = parser.source(); //Console.WriteLine(tree.ToStringTree()); return(errorListener.TotalErrors); }
List <MethodInfo> GetMethodInfos() { var lexer = XSharpLexer.Create(SourceCodeFile.SourceCode, SourceCodeFile.FileName); lexer.RemoveErrorListeners(); var tokenStream = new CommonTokenStream(lexer, 0); var parser = new XSharpParser(tokenStream); parser.Options = new XSharpParseOptions(); parser.Options.SetXSharpSpecificOptions(XSharpSpecificCompilationOptions.Default); parser.RemoveErrorListeners(); var source = parser.source(); var listener = new MethodListener(); new ParseTreeWalker().Walk(listener, source); return(listener.MethodList); }
/// <summary> /// Parse the current Snapshot, and build the Tag List /// </summary> private void Colorize() { var snapshot = this.Buffer.CurrentSnapshot; Snapshot = snapshot; ITokenStream TokenStream = null; // parse for positional keywords that change the colors // and get a reference to the tokenstream string path = String.Empty; if (txtdocfactory != null) { ITextDocument doc = null; if (txtdocfactory.TryGetTextDocument(this.Buffer, out doc)) { path = doc.FilePath; } } // Parse the source and get the (Lexer) Tokenstream to locate comments, keywords and other tokens. // The parser will identify (positional) keywords that are used as identifier xsTagger.Parse(snapshot, out TokenStream, path); if (TokenStream != null) { tags.Clear(); for (var iToken = 0; iToken < TokenStream.Size; iToken++) { var token = TokenStream.Get(iToken); var tokenType = token.Type; TextSpan tokenSpan = new TextSpan(token.StartIndex, token.StopIndex - token.StartIndex + 1); if (XSharpLexer.IsKeyword(tokenType)) { tags.Add(tokenSpan.ToTagSpan(snapshot, xsharpKeywordType)); } else if (XSharpLexer.IsConstant(tokenType)) { tags.Add(tokenSpan.ToTagSpan(snapshot, xsharpConstantType)); } else if (XSharpLexer.IsOperator(tokenType)) { switch (tokenType) { case LanguageService.CodeAnalysis.XSharp.SyntaxParser.XSharpLexer.LPAREN: case LanguageService.CodeAnalysis.XSharp.SyntaxParser.XSharpLexer.LCURLY: case LanguageService.CodeAnalysis.XSharp.SyntaxParser.XSharpLexer.LBRKT: tags.Add(tokenSpan.ToTagSpan(snapshot, xsharpBraceOpenType)); break; case LanguageService.CodeAnalysis.XSharp.SyntaxParser.XSharpLexer.RPAREN: case LanguageService.CodeAnalysis.XSharp.SyntaxParser.XSharpLexer.RCURLY: case LanguageService.CodeAnalysis.XSharp.SyntaxParser.XSharpLexer.RBRKT: tags.Add(tokenSpan.ToTagSpan(snapshot, xsharpBraceCloseType)); break; default: tags.Add(tokenSpan.ToTagSpan(snapshot, xsharpOperatorType)); break; } } else if (XSharpLexer.IsIdentifier(tokenType)) { tags.Add(tokenSpan.ToTagSpan(snapshot, xsharpIdentifierType)); } else if (XSharpLexer.IsComment(tokenType)) { tags.Add(tokenSpan.ToTagSpan(snapshot, xsharpCommentType)); } } foreach (var tag in xsTagger.Tags) { tags.Add(tag); } if (TagsChanged != null) { TagsChanged(this, new SnapshotSpanEventArgs(new SnapshotSpan(Buffer.CurrentSnapshot, 0, this.Buffer.CurrentSnapshot.Length))); } } }
internal static bool IsName(this IToken token) { return(token != null && (token.Type == XSharpLexer.ID || XSharpLexer.IsKeyword(token.Type))); }
private ClassificationSpan ClassifyToken(IToken token, IList <ClassificationSpan> regionTags, ITextSnapshot snapshot) { var tokenType = token.Type; ClassificationSpan result = null; switch (token.Channel) { case XSharpLexer.PRAGMACHANNEL: // #pragma case XSharpLexer.PREPROCESSORCHANNEL: // #define, #ifdef etc result = Token2ClassificationSpan(token, snapshot, xsharpPPType); switch (token.Type) { case XSharpLexer.PP_REGION: case XSharpLexer.PP_IFDEF: case XSharpLexer.PP_IFNDEF: regionTags.Add(Token2ClassificationSpan(token, snapshot, xsharpRegionStart)); break; case XSharpLexer.PP_ENDREGION: case XSharpLexer.PP_ENDIF: regionTags.Add(Token2ClassificationSpan(token, snapshot, xsharpRegionStop)); break; default: break; } break; case XSharpLexer.DEFOUTCHANNEL: // code in an inactive #ifdef result = Token2ClassificationSpan(token, snapshot, xsharpInactiveType); break; case XSharpLexer.XMLDOCCHANNEL: case XSharpLexer.Hidden: if (XSharpLexer.IsComment(token.Type)) { result = Token2ClassificationSpan(token, snapshot, xsharpCommentType); if (token.Type == XSharpLexer.ML_COMMENT && token.Text.IndexOf("\r") >= 0) { regionTags.Add(Token2ClassificationSpan(token, snapshot, xsharpRegionStart)); regionTags.Add(Token2ClassificationSpan(token, snapshot, xsharpRegionStop)); } } break; default: // Normal channel IClassificationType type = null; if (XSharpLexer.IsIdentifier(tokenType)) { type = xsharpIdentifierType; } else if (XSharpLexer.IsConstant(tokenType)) { switch (tokenType) { case XSharpLexer.STRING_CONST: case XSharpLexer.CHAR_CONST: case XSharpLexer.ESCAPED_STRING_CONST: case XSharpLexer.INTERPOLATED_STRING_CONST: type = xsharpStringType; break; case XSharpLexer.FALSE_CONST: case XSharpLexer.TRUE_CONST: type = xsharpKeywordType; break; case XSharpLexer.VO_AND: case XSharpLexer.VO_NOT: case XSharpLexer.VO_OR: case XSharpLexer.VO_XOR: case XSharpLexer.SYMBOL_CONST: case XSharpLexer.NIL: type = xsharpLiteralType; break; default: if ((tokenType >= XSharpLexer.FIRST_NULL) && (tokenType <= XSharpLexer.LAST_NULL)) { type = xsharpKeywordType; break; } else { type = xsharpNumberType; } break; } } else if (XSharpLexer.IsKeyword(tokenType)) { type = xsharpKeywordType; } else if (XSharpLexer.IsOperator(tokenType)) { switch (tokenType) { case XSharpLexer.LPAREN: case XSharpLexer.LCURLY: case XSharpLexer.LBRKT: type = xsharpBraceOpenType; break; case XSharpLexer.RPAREN: case XSharpLexer.RCURLY: case XSharpLexer.RBRKT: type = xsharpBraceCloseType; break; default: type = xsharpOperatorType; break; } } if (type != null) { result = Token2ClassificationSpan(token, snapshot, type); } break; } return(result); }
private String getFirstKeywordInLine(ITextSnapshotLine line, int start, int length) { String keyword = ""; var tokens = getTokensInLine(line.Snapshot, start, length); bool inAttribute = false; // if (tokens.Count > 0) { int index = 0; while (index < tokens.Count) { var token = tokens[index]; // skip whitespace tokens if (token.Type == XSharpLexer.WS) { index++; continue; } keyword = ""; if (XSharpLexer.IsKeyword(token.Type) || (token.Type >= XSharpLexer.PP_FIRST && token.Type <= XSharpLexer.PP_LAST)) { keyword = token.Text.ToUpper(); // it could be modifier... if (keywordIsModifier(token.Type)) { index++; continue; } else { // keyword found break; } } else if (XSharpLexer.IsComment(token.Type)) { keyword = token.Text; if (keyword.Length >= 2) { keyword = keyword.Substring(0, 2); } break; } else if (XSharpLexer.IsOperator(token.Type)) { keyword = token.Text; if (token.Type == XSharpLexer.LBRKT) { inAttribute = true; index++; continue; } else if (token.Type == XSharpLexer.RBRKT) { inAttribute = false; index++; continue; } } else { if (inAttribute) { // Skip All Content in index++; continue; } } break; } } return(keyword); }
private static void Parse(string fileName) { ITokenStream stream; IList <ParseErrorData> parseErrors = ParseErrorData.NewBag(); var filestream = new AntlrFileStream(fileName); var lexer = new XSharpLexer(filestream); lexer.TokenFactory = XSharpTokenFactory.Default; stream = new CommonTokenStream(lexer, Lexer.DefaultTokenChannel); var parser = new XSharpParser(stream); parser.IsScript = false; parser.AllowFunctionInsideClass = false; parser.AllowNamedArgs = false; parser.AllowXBaseVariables = false; parser.RemoveErrorListeners(); parser.Interpreter.PredictionMode = PredictionMode.Sll; parser.Interpreter.reportAmbiguities = true; parser.Interpreter.enable_global_context_dfa = true; // default false parser.Interpreter.optimize_tail_calls = true; parser.Interpreter.tail_call_preserves_sll = true; //parser.Interpreter.userWantsCtxSensitive = true; // default true parser.ErrorHandler = new XSharpErrorStrategy(); parser.AddErrorListener(new XSharpErrorListener(fileName, parseErrors, true)); XSharpParserRuleContext tree; try { tree = parser.source(); } catch (ParseCanceledException) { Console.WriteLine("Parse error, Errors from SLL mode"); showErrors(parseErrors); parseErrors.Clear(); parser.ErrorHandler = new XSharpErrorStrategy(); parser.AddErrorListener(new XSharpErrorListener(fileName, parseErrors, true)); parser.Interpreter.PredictionMode = PredictionMode.Ll; parser.Interpreter.force_global_context = true; parser.Interpreter.optimize_ll1 = false; parser.Interpreter.reportAmbiguities = true; parser.Reset(); try { tree = parser.source(); } catch (Exception e) { tree = null; Console.WriteLine(e.Message); } } // find parser errors (missing tokens etc) foreach (var e in lexer.LexErrors) { parseErrors.Add(e); } var walker = new ParseTreeWalker(); var errchecker = new XSharpParseErrorAnalysis(parser, parseErrors); if (tree != null) { walker.Walk(errchecker, tree); } Console.WriteLine("Parse error, Errors:"); showErrors(parseErrors); }
public static bool Parse(string sourceText, string fileName, CSharpParseOptions options, IErrorListener listener, out ITokenStream tokens, out XSharpParser.SourceContext tree) { tree = null; tokens = null; var parseErrors = ParseErrorData.NewBag(); try { var lexer = XSharpLexer.Create(sourceText, fileName, options); lexer.Options = options; BufferedTokenStream tokenStream = lexer.GetTokenStream(); tokenStream.Fill(); tokens = (ITokenStream)tokenStream; GetLexerErrors(lexer, tokenStream, parseErrors); // do we need to preprocess #region Determine if we really need the preprocessor bool mustPreprocess = true; if (lexer.HasPreprocessorTokens || !options.NoStdDef) { // no need to pre process in partial compilation // if lexer does not contain UDCs, Messages or Includes mustPreprocess = lexer.MustBeProcessed; } else { mustPreprocess = false; } #endregion XSharpPreprocessor pp = null; BufferedTokenStream ppStream = null; pp = new XSharpPreprocessor(lexer, tokenStream, options, fileName, Encoding.Unicode, SourceHashAlgorithm.None, parseErrors); if (mustPreprocess) { var ppTokens = pp.PreProcess(); ppStream = new CommonTokenStream(new XSharpListTokenSource(lexer, ppTokens)); } else { // No Standard Defs and no preprocessor tokens in the lexer // so we bypass the preprocessor and use the lexer token stream ppStream = new CommonTokenStream(new XSharpListTokenSource(lexer, tokenStream.GetTokens())); } ppStream.Fill(); var parser = new XSharpParser(ppStream); parser.Interpreter.tail_call_preserves_sll = false; // default = true Setting to FALSE will reduce memory used by parser parser.Options = options; tree = null; parser.RemoveErrorListeners(); parser.Interpreter.PredictionMode = PredictionMode.Sll; parser.ErrorHandler = new BailErrorStrategy(); try { tree = parser.source(); } catch (Exception) { var errorListener = new XSharpErrorListener(fileName, parseErrors); parser.AddErrorListener(errorListener); parser.ErrorHandler = new XSharpErrorStrategy(); parser.Interpreter.PredictionMode = PredictionMode.Ll; ppStream.Reset(); parser.Reset(); try { tree = parser.source(); } catch (Exception) { tree = null; } } } catch (Exception) { tree = null; } ReportErrors(parseErrors, listener); return(tree != null); }
internal static IList <XSharpToken> GetTokensUnderCursor(XSharpSearchLocation location, out CompletionState state) { var tokens = GetTokenList(location, out state, true, true).Where((t) => t.Channel == XSharpLexer.DefaultTokenChannel).ToList(); // Find "current" token if (tokens.Count > 0) { var tokenUnderCursor = tokens.Count - 1; for (int i = tokens.Count - 1; i >= 0; i--) { var token = tokens[i]; if (token.StartIndex <= location.Position && token.StopIndex >= location.Position) { tokenUnderCursor = i; break; } } var selectedToken = tokens[tokenUnderCursor]; var nextToken = tokenUnderCursor < tokens.Count - 1 ? tokens[tokenUnderCursor + 1] : null; bool done = false; switch (selectedToken.Type) { case XSharpLexer.NAMEOF: case XSharpLexer.TYPEOF: case XSharpLexer.SIZEOF: case XSharpLexer.SELF: case XSharpLexer.SUPER: if (nextToken != null && nextToken.Type == XSharpLexer.LPAREN) { return(tokens); } break; default: if (XSharpLexer.IsKeyword(selectedToken.Type)) { tokens.Clear(); tokens.Add(selectedToken); return(tokens); } break; } // When we are not on a Keyword then we need to walk back in the tokenlist to see // if we can evaluate the expression // This could be: // System.String.Compare() // static method cal or method call // SomeVar:MethodCall() // method call // Left(...) // function call // SomeId // local, global etc // SomeType.Id // Static property or normal property // SomeVar:Id // Instance field or property // If the token list contains with a RCURLY, RBRKT or RPAREN // Then strip everything until the matching LCURLY, LBRKT or LPAREN is found var list = new XSharpTokenList(tokens); tokens = new List <XSharpToken>(); while (!list.Eoi()) { var token = list.ConsumeAndGet(); switch (token.Type) { case XSharpLexer.LCURLY: tokens.Add(token); if (list.Contains(XSharpLexer.RCURLY)) { // this may return false when the RCURLY belongs to another LCURLY if (list.ConsumeUntilEndToken(XSharpLexer.RCURLY, out var endToken)) { tokens.Add(endToken); } } break; case XSharpLexer.LPAREN: tokens.Add(token); if (list.Contains(XSharpLexer.RPAREN)) { // this may return false when the RPAREN belongs to another LPAREN if (list.ConsumeUntilEndToken(XSharpLexer.RPAREN, out var endToken)) { tokens.Add(endToken); } } break; case XSharpLexer.LBRKT: tokens.Add(token); if (list.Contains(XSharpLexer.RBRKT)) { // this may return false when the RBRKT belongs to another LBRKT if (list.ConsumeUntilEndToken(XSharpLexer.RBRKT, out var endToken)) { tokens.Add(endToken); } } break; case XSharpLexer.DOT: case XSharpLexer.COLON: case XSharpLexer.SELF: case XSharpLexer.SUPER: tokens.Add(token); break; default: tokens.Add(token); if (XSharpLexer.IsOperator(token.Type)) { done = true; } if (token.Type == XSharpLexer.VAR) { done = true; } else if (XSharpLexer.IsKeyword(token.Type) && !XSharpLexer.IsPositionalKeyword(token.Type) ) { done = true; } break; } } // now result has the list of tokens starting with the cursor // we only keep: // ID, DOT, COLON, LPAREN, LBRKT, RBRKT // when we detect another token we truncate the list there if (tokens.Count > 0) { var lastType = tokens[0].Type; for (int i = tokenUnderCursor + 1; i < tokens.Count && !done; i++) { var token = tokens[i]; switch (token.Type) { case XSharpLexer.ID: case XSharpLexer.DOT: case XSharpLexer.COLON: case XSharpLexer.LPAREN: case XSharpLexer.LCURLY: case XSharpLexer.LBRKT: lastType = tokens[i].Type; break; case XSharpLexer.LT: int gtPos = findTokenInList(tokens, i + 1, XSharpLexer.GT); if (lastType == XSharpLexer.ID && gtPos > 0) { gtPos += 1; tokens.RemoveRange(gtPos, tokens.Count - gtPos); done = true; break; } else { goto default; } default: tokens.RemoveRange(i, tokens.Count - i); done = true; break; } } } } // check for extra lparen, lcurly at the end int count = tokens.Count; if (count > 2 && count < tokens.Count - 2) { if (tokens[count - 2].Type == XSharpLexer.LPAREN) { switch (tokens[count - 1].Type) { case XSharpLexer.LPAREN: case XSharpLexer.LCURLY: tokens.RemoveAt(count - 1); break; } } } return(tokens); }
internal static List <XSharpToken> GetTokenList(XSharpSearchLocation location, out CompletionState state, bool includeKeywords = false, bool underCursor = false) { location = AdjustStartLineNumber(location); var line = getLineFromBuffer(location); // state = CompletionState.General; if (line.Count == 0) { return(line); } // if the token appears after comma or paren then strip the tokens // now look forward and find the first token that is on or after the triggerpoint var result = new List <XSharpToken>(); var last = XSharpLexer.Eof; bool allowdot = location.Project?.ParseOptions?.AllowDotForInstanceMembers ?? false; var cursorPos = location.Position; var done = false; var list = new XSharpTokenList(line); while (!done && !list.Eoi()) { var token = list.ConsumeAndGet(); int openToken = 0; XSharpToken closeToken = null; bool isHit = token.StartIndex <= cursorPos && token.StopIndex >= cursorPos && underCursor; bool isNotLast = token.StopIndex < location.Position - 1; if (token.StartIndex > cursorPos) { // after the cursor we only include the open tokens // so we can see if the id under the cursor is a method, constructor etc switch (token.Type) { case XSharpLexer.LPAREN: case XSharpLexer.LCURLY: case XSharpLexer.LBRKT: break; case XSharpLexer.LT: // if this is a generic type // then add the complete bool first = true; bool endoflist = false; while (!endoflist) { endoflist = true; if (list.La1 == XSharpLexer.ID || XSharpLexer.IsType(list.La1)) { if (list.La2 == XSharpLexer.GT || list.La2 == XSharpLexer.COMMA) { if (first) { result.Add(token); first = false; } result.Add(list.ConsumeAndGet()); // la1 result.Add(list.ConsumeAndGet()); // la2 endoflist = false; } } } continue; default: done = true; break; } if (done) { continue; } } switch (token.Type) { // after these tokens we "restart" the list case XSharpLexer.EOS: if (token.Position < cursorPos && token != line.Last()) { // an EOS inside a line before the cursor // so there are 2 or more statements on the same line // clear the first statement result.Clear(); state = CompletionState.General; } else { // Exit loop, ignore the rest of the statements done = true; } continue; case XSharpLexer.WS: case XSharpLexer.Eof: continue; case XSharpLexer.TO: case XSharpLexer.UPTO: case XSharpLexer.DOWNTO: case XSharpLexer.IN: if (!isHit) { result.Clear(); if (isNotLast) // there has to be a space after the token { state = CompletionState.General; } else { state = CompletionState.None; } } else { result.Add(token); } break; case XSharpLexer.LCURLY: state = CompletionState.Constructors; result.Add(token); break; case XSharpLexer.LPAREN: state = CompletionState.StaticMembers | CompletionState.InstanceMembers; result.Add(token); break; case XSharpLexer.LBRKT: state = CompletionState.Brackets; result.Add(token); break; case XSharpLexer.ID: case XSharpLexer.NAMEOF: case XSharpLexer.TYPEOF: case XSharpLexer.SIZEOF: result.Add(token); break; case XSharpLexer.RCURLY: case XSharpLexer.RPAREN: case XSharpLexer.RBRKT: bool add = true; if (result.Count > 0 && token == list.LastOrDefault) { var lasttoken = result.Last(); if (lasttoken.Type == XSharpLexer.COLON || lasttoken.Type == XSharpLexer.DOT) { // closing char after colon or dot add = false; done = true; } } if (add) { result.Add(token); // delete everything between parens, curly braces and brackets closing token before cursor pos if (token.Position < location.Position) { closeToken = token; if (token.Type == XSharpLexer.RCURLY) { openToken = XSharpLexer.LCURLY; } else if (token.Type == XSharpLexer.RPAREN) { openToken = XSharpLexer.LPAREN; } else if (token.Type == XSharpLexer.RBRKT) { openToken = XSharpLexer.LBRKT; } } } break; case XSharpLexer.STATIC: // These tokens are all before a namespace of a (namespace dot) type if (isNotLast) // there has to be a space after the token { state = CompletionState.General; } else { state = CompletionState.None; } break; case XSharpLexer.USING: if (isNotLast) // there has to be a space after the token { if (list.Expect(XSharpLexer.STATIC)) { state = CompletionState.Namespaces | CompletionState.Types; result.Clear(); } else if (list.La1 == XSharpLexer.ID) { state = CompletionState.Namespaces; result.Clear(); } } break; case XSharpLexer.MEMBER: if (isNotLast) // there has to be a space after the token { state = CompletionState.StaticMembers; } else { state = CompletionState.None; } break; case XSharpLexer.AS: case XSharpLexer.IS: case XSharpLexer.REF: case XSharpLexer.INHERIT: if (!isHit) { result.Clear(); } else { result.Add(token); } if (isNotLast) // there has to be a space after the token { state = CompletionState.Namespaces | CompletionState.Types; } else { state = CompletionState.None; } break; case XSharpLexer.IMPLEMENTS: result.Clear(); if (isNotLast) { state = CompletionState.Namespaces | CompletionState.Interfaces; } else { state = CompletionState.None; } break; case XSharpLexer.COLON: state = CompletionState.InstanceMembers; result.Add(token); break; case XSharpLexer.DOT: if (!state.HasFlag(CompletionState.Namespaces)) { state = CompletionState.Namespaces | CompletionState.Types | CompletionState.StaticMembers; if (allowdot) { state |= CompletionState.InstanceMembers; } } result.Add(token); break; case XSharpLexer.QMARK: if (result.Count != 0) // when at start of line then do not add. Otherwise it might be a Nullable type or conditional access expression { result.Add(token); } break; case XSharpLexer.QQMARK: if (result.Count != 0) // when at start of line then do not add. Otherwise it might be a binary expression { result.Add(token); } break; case XSharpLexer.BACKSLASH: case XSharpLexer.BACKBACKSLASH: // this should only be seen at start of line // clear the list to be sure result.Clear(); break; case XSharpLexer.NAMESPACE: state = CompletionState.Namespaces; break; case XSharpLexer.COMMA: case XSharpLexer.ASSIGN_OP: case XSharpLexer.COLONCOLON: case XSharpLexer.SELF: case XSharpLexer.SUPER: state = CompletionState.General; result.Add(token); break; default: state = CompletionState.General; if (XSharpLexer.IsOperator(token.Type)) { result.Add(token); } else if (XSharpLexer.IsType(token.Type)) { result.Add(token); } else if (XSharpLexer.IsConstant(token.Type)) { result.Add(token); } else if (XSharpLexer.IsKeyword(token.Type) && includeKeywords) // For code completion we want to include keywords { token.Text = XSettings.FormatKeyword(token.Text); result.Add(token); } break; } last = token.Type; // remove everything between parens, curly braces or brackets when the closing token is before the cursor if (openToken != 0 && closeToken != null) { var iLast = result.Count - 1; int count = 0; while (iLast >= 0 && result[iLast] != closeToken) { iLast--; } int closeType = closeToken.Type; while (iLast >= 0) { var type = result[iLast].Type; if (type == closeType) { count += 1; } else if (type == openToken) { count -= 1; if (count == 0) { if (iLast < result.Count - 1) { result.RemoveRange(iLast + 1, result.Count - iLast - 2); } break; } } iLast -= 1; } } } // when the list ends with a comma, drop the ending comma. Why ? if (result.Count > 0) { var end = result.Last(); if (end.Type == XSharpLexer.COMMA) { result.RemoveAt(result.Count - 1); } } return(result); }
internal CompilationUnitSyntax ParseCompilationUnitCore() { #if DEBUG && DUMP_TIMES DateTime t = DateTime.Now; #endif if (_options.ShowIncludes) { _options.ConsoleOutput.WriteLine("Compiling {0}", _fileName); } var sourceText = _text.ToString(); XSharpLexer lexer = null; XSharpPreprocessor pp = null; XSharpParserRuleContext tree = new XSharpParserRuleContext(); XSharpParser parser = null; var parseErrors = ParseErrorData.NewBag(); try { lexer = XSharpLexer.Create(sourceText, _fileName, _options); lexer.Options = _options; _lexerTokenStream = lexer.GetTokenStream(); } catch (Exception e) { // Exception during Lexing parseErrors.Add(new ParseErrorData(_fileName, ErrorCode.ERR_Internal, e.Message, e.StackTrace)); // create empty token stream so we can continue the rest of the code _lexerTokenStream = new BufferedTokenStream(new XSharpListTokenSource(lexer, new List <IToken>())); } #if DEBUG && DUMP_TIMES { var ts = DateTime.Now - t; t += ts; Debug.WriteLine("Lexing completed in {0}", ts); } #endif // do not pre-process when there were lexer exceptions if (lexer != null && parseErrors.Count == 0) { foreach (var e in lexer.LexErrors) { parseErrors.Add(e); } BufferedTokenStream ppStream = null; try { // Check for #pragma in the lexerTokenStream _lexerTokenStream.Fill(); if (!_options.MacroScript) { pp = new XSharpPreprocessor(lexer, _lexerTokenStream, _options, _fileName, _text.Encoding, _text.ChecksumAlgorithm, parseErrors); } var mustPreprocess = !_options.MacroScript && (lexer.HasPreprocessorTokens || !_options.NoStdDef); if (mustPreprocess) { var ppTokens = pp.PreProcess(); ppStream = new CommonTokenStream(new XSharpListTokenSource(lexer, ppTokens)); } else { // No Standard Defs and no preprocessor tokens in the lexer // so we bypass the preprocessor and use the lexer tokenstream // but if a .ppo is required we must use the preprocessor to // write the source text to the .ppo file if (_options.PreprocessorOutput && pp != null) { pp.writeToPPO(sourceText, false); } BufferedTokenStream ts = (BufferedTokenStream)_lexerTokenStream; var tokens = ts.GetTokens(); // commontokenstream filters on tokens on the default channel. All other tokens are ignored ppStream = new CommonTokenStream(new XSharpListTokenSource(lexer, tokens)); } ppStream.Fill(); _preprocessorTokenStream = ppStream; } catch (Exception e) { // Exception during Preprocessing parseErrors.Add(new ParseErrorData(_fileName, ErrorCode.ERR_Internal, e.Message, e.StackTrace)); // create empty token stream so we can continue the rest of the code _preprocessorTokenStream = new BufferedTokenStream(new XSharpListTokenSource(lexer, new List <IToken>())); } } #if DEBUG && DUMP_TIMES { var ts = DateTime.Now - t; t += ts; Debug.WriteLine("Preprocessing completed in {0}", ts); } #endif parser = new XSharpParser(_preprocessorTokenStream) { Options = _options }; tree = new XSharpParserRuleContext(); if (_options.ParseLevel != ParseLevel.Lex) { // When parsing in Sll mode we do not record any parser errors. // When this fails, then we try again with LL mode and then we record errors parser.RemoveErrorListeners(); parser.Interpreter.PredictionMode = PredictionMode.Sll; // some options to have FAST parsing parser.Interpreter.tail_call_preserves_sll = false; parser.Interpreter.treat_sllk1_conflict_as_ambiguity = true; parser.ErrorHandler = new BailErrorStrategy(); try { tree = buildTree(parser); } catch (ParseCanceledException e) { if (_options.Verbose) { string msg = _GetInnerExceptionMessage(e); _options.ConsoleOutput.WriteLine("Antlr: SLL parsing failed with failure: " + msg + ". Trying again in LL mode."); } var errorListener = new XSharpErrorListener(_fileName, parseErrors); parser.AddErrorListener(errorListener); parser.ErrorHandler = new XSharpErrorStrategy(); // we need to set force_global_context to get proper error messages. This makes parsing slower // but gives better messages parser.Interpreter.treat_sllk1_conflict_as_ambiguity = false; parser.Interpreter.force_global_context = true; parser.Interpreter.enable_global_context_dfa = true; parser.Interpreter.PredictionMode = PredictionMode.Ll; _preprocessorTokenStream.Reset(); if (_options.Verbose && pp != null) { pp.DumpStats(); } if (pp != null) { pp.Close(); } parser.Reset(); try { tree = buildTree(parser); } catch (Exception e1) { // Cannot parse again. Must be a syntax error. if (_options.Verbose) { string msg = _GetInnerExceptionMessage(e1); _options.ConsoleOutput.WriteLine("Antlr: LL parsing also failed with failure: " + msg); } } } }// _options.ParseLevel < Complete #if DEBUG && DUMP_TIMES { var ts = DateTime.Now - t; t += ts; Debug.WriteLine("Parsing completed in {0}", ts); } #endif if (_options.DumpAST && tree != null) { string strTree = tree.ToStringTree(); string file = System.IO.Path.ChangeExtension(_fileName, "ast"); strTree = strTree.Replace(@"\r\n)))))", @"\r\n*)))))" + "\r\n"); strTree = strTree.Replace(@"\r\n))))", @"\r\n*)))" + "\r\n"); strTree = strTree.Replace(@"\r\n)))", @"\r\n*)))" + "\r\n"); strTree = strTree.Replace(@"\r\n))", @"\r\n*))" + "\r\n"); strTree = strTree.Replace(@"\r\n)", @"\r\n*)" + "\r\n"); strTree = strTree.Replace(@"\r\n*)", @"\r\n)"); System.IO.File.WriteAllText(file, strTree); } var walker = new ParseTreeWalker(); if (_options.ParseLevel == ParseLevel.Complete) { // check for parser errors, such as missing tokens // This adds items to the parseErrors list for missing // tokens and missing keywords try { var errchecker = new XSharpParseErrorAnalysis(parser, parseErrors, _options); walker.Walk(errchecker, tree); } catch (Exception e) { parseErrors.Add(new ParseErrorData(_fileName, ErrorCode.ERR_Internal, e.Message, e.StackTrace)); } } var treeTransform = CreateTransform(parser, _options, _pool, _syntaxFactory, _fileName); bool hasErrors = false; SyntaxToken eof = null; try { if (_options.ParseLevel < ParseLevel.Complete || parser.NumberOfSyntaxErrors != 0 || (parseErrors.Count != 0 && parseErrors.Contains(p => !ErrorFacts.IsWarning(p.Code)))) { eof = SyntaxFactory.Token(SyntaxKind.EndOfFileToken); eof = AddLeadingSkippedSyntax(eof, ParserErrorsAsTrivia(parseErrors, pp.IncludedFiles)); if (tree != null) { eof.XNode = new XTerminalNodeImpl(tree.Stop); } else { eof.XNode = new XTerminalNodeImpl(_lexerTokenStream.Get(_lexerTokenStream.Size - 1)); } hasErrors = true; } if (!hasErrors) { try { walker.Walk(treeTransform, tree); } catch (Exception e) { parseErrors.Add(new ParseErrorData(_fileName, ErrorCode.ERR_Internal, e.Message, e.StackTrace)); } eof = SyntaxFactory.Token(SyntaxKind.EndOfFileToken); if (!parseErrors.IsEmpty()) { eof = AddLeadingSkippedSyntax(eof, ParserErrorsAsTrivia(parseErrors, pp.IncludedFiles)); } } var result = _syntaxFactory.CompilationUnit( treeTransform.GlobalEntities.Externs, treeTransform.GlobalEntities.Usings, treeTransform.GlobalEntities.Attributes, treeTransform.GlobalEntities.Members, eof); result.XNode = tree; tree.CsNode = result; result.XTokens = _lexerTokenStream; result.XPPTokens = _preprocessorTokenStream; result.HasDocComments = lexer.HasDocComments; if (!_options.MacroScript && !hasErrors) { result.InitProcedures = treeTransform.GlobalEntities.InitProcedures; result.Globals = treeTransform.GlobalEntities.Globals; result.PragmaWarnings = treeTransform.GlobalEntities.PragmaWarnings; result.PragmaOptions = treeTransform.GlobalEntities.PragmaOptions; result.IncludedFiles = pp?.IncludedFiles; result.FileWidePublics = treeTransform.GlobalEntities.FileWidePublics; result.HasPCall = treeTransform.GlobalEntities.HasPCall; result.NeedsProcessing = treeTransform.GlobalEntities.NeedsProcessing; if (_options.HasRuntime) { result.LiteralSymbols = ((XSharpTreeTransformationRT)treeTransform).LiteralSymbols; result.LiteralPSZs = ((XSharpTreeTransformationRT)treeTransform).LiteralPSZs; } } return(result); } finally { #if DEBUG && DUMP_TIMES { var ts = DateTime.Now - t; t += ts; Debug.WriteLine("Tree transform completed in {0}", ts); } #endif treeTransform.Free(); if (pp != null) { pp.Close(); } } }
public XSharpListTokenSource(XSharpLexer lexer, IList <IToken> tokens) : base(tokens) { this.TokenFactory = lexer.TokenFactory; }
public XSharpListTokenSource(XSharpLexer lexer, IList <IToken> tokens, string sourceName) : base(tokens, sourceName) { this.TokenFactory = lexer.TokenFactory; }
private void formatToken(ITextEdit editSession, int offSet, IToken token) { if (token.Channel == XSharpLexer.Hidden || token.Channel == XSharpLexer.PREPROCESSORCHANNEL || token.Type == XSharpLexer.TEXT_STRING_CONST) { return; } bool syncKeyword = false; // Some exceptions are (pseudo) functions. These should not be formatted switch (token.Type) { case XSharpLexer.UDC_KEYWORD: syncKeyword = XSettings.UDCKeywordCase; break; case XSharpLexer.NAMEOF: case XSharpLexer.SIZEOF: case XSharpLexer.TYPEOF: // these are keywords but should be excluded I think syncKeyword = false; break; case XSharpLexer.TRUE_CONST: case XSharpLexer.FALSE_CONST: case XSharpLexer.MACRO: case XSharpLexer.LOGIC_AND: case XSharpLexer.LOGIC_OR: case XSharpLexer.LOGIC_NOT: case XSharpLexer.LOGIC_XOR: case XSharpLexer.VO_AND: case XSharpLexer.VO_OR: case XSharpLexer.VO_NOT: case XSharpLexer.VO_XOR: syncKeyword = true; break; default: if (token.Type >= XSharpLexer.FIRST_NULL && token.Type <= XSharpLexer.LAST_NULL) { syncKeyword = true; } else if (XSharpLexer.IsKeyword(token.Type)) { syncKeyword = token.Text[0] != '#'; } break; } if (syncKeyword) { var keyword = token.Text; var transform = XSettings.FormatKeyword(keyword, _settings.KeywordCase); if (String.Compare(transform, keyword) != 0) { int startpos = offSet + token.StartIndex; editSession.Replace(startpos, transform.Length, transform); } } if (token.Type == XSharpLexer.ID && XSettings.IdentifierCase) { var identifier = token.CleanText(); var lineNumber = getCurrentLine(); var currentMember = _textView.FindMember(); // if (currentMember == null) { return; } IXVariableSymbol element = null; // Search in Parameters if (currentMember.Parameters != null) { element = currentMember.Parameters.Where(x => XSharpTokenTools.StringEquals(x.Name, identifier)).FirstOrDefault(); } if (element == null) { // then Locals var location = new XSharpSearchLocation(currentMember.File, currentMember, null, lineNumber); var locals = currentMember.GetLocals(location); if (locals != null) { element = locals.Where(x => XSharpTokenTools.StringEquals(x.Name, identifier)).FirstOrDefault(); } if (element == null) { if (currentMember.Parent is IXTypeSymbol type) { var field = XSharpLookup.SearchPropertyOrField(location, type, identifier, Modifiers.Private).FirstOrDefault(); } } } } }