/// <summary> /// Parses an expression in text form for later evaluation. /// </summary> /// <param name="pszCode">The expression to be parsed.</param> /// <param name="dwFlags">A combination of flags from the PARSEFLAGS enumeration that controls parsing.</param> /// <param name="nRadix">The radix to be used in parsing any numerical information in pszCode.</param> /// <param name="ppExpr">Returns the IDebugExpression2 object that represents the parsed expression, which is ready for binding and evaluation.</param> /// <param name="pbstrError">Returns the error message if the expression contains an error.</param> /// <param name="pichError">Returns the character index of the error in pszCode if the expression contains an error.</param> /// <returns>If successful, returns S_OK; otherwise, returns an error code.</returns> /// <remarks> /// When this method is called, a debug engine (DE) should parse the expression and validate it for correctness. /// The pbstrError and pichError parameters may be filled in if the expression is invalid. /// /// Note that the expression is not evaluated, only parsed. A later call to the IDebugExpression2.EvaluateSync /// or IDebugExpression2.EvaluateAsync methods evaluates the parsed expression. /// </remarks> public int ParseText(string pszCode, enum_PARSEFLAGS dwFlags, uint nRadix, out IDebugExpression2 ppExpr, out string pbstrError, out uint pichError) { if (pszCode == null) throw new ArgumentNullException("pszCode"); if (pszCode.Length == 0) throw new ArgumentException(); // dwFlags=0 in the Immediate window if (dwFlags != enum_PARSEFLAGS.PARSE_EXPRESSION && dwFlags != 0) throw new NotImplementedException(); try { var expressionInput = new ANTLRStringStream(pszCode); var expressionUnicodeInput = new JavaUnicodeStream(expressionInput); var expressionLexer = new Java2Lexer(expressionUnicodeInput); var expressionTokens = new CommonTokenStream(expressionLexer); var expressionParser = new Java2Parser(expressionTokens); IAstRuleReturnScope<CommonTree> result = expressionParser.standaloneExpression(); ppExpr = new JavaDebugExpression(this, result.Tree, pszCode); pbstrError = null; pichError = 0; return VSConstants.S_OK; } catch (RecognitionException e) { ppExpr = null; pbstrError = e.Message; pichError = (uint)Math.Max(0, e.Index); return VSConstants.E_FAIL; } }
/// <summary> /// Parses an expression in text form for later evaluation. /// </summary> /// <param name="pszCode">The expression to be parsed.</param> /// <param name="dwFlags">A combination of flags from the PARSEFLAGS enumeration that controls parsing.</param> /// <param name="nRadix">The radix to be used in parsing any numerical information in pszCode.</param> /// <param name="ppExpr">Returns the IDebugExpression2 object that represents the parsed expression, which is ready for binding and evaluation.</param> /// <param name="pbstrError">Returns the error message if the expression contains an error.</param> /// <param name="pichError">Returns the character index of the error in pszCode if the expression contains an error.</param> /// <returns>If successful, returns S_OK; otherwise, returns an error code.</returns> /// <remarks> /// When this method is called, a debug engine (DE) should parse the expression and validate it for correctness. /// The pbstrError and pichError parameters may be filled in if the expression is invalid. /// /// Note that the expression is not evaluated, only parsed. A later call to the IDebugExpression2.EvaluateSync /// or IDebugExpression2.EvaluateAsync methods evaluates the parsed expression. /// </remarks> public int ParseText(string pszCode, enum_PARSEFLAGS dwFlags, uint nRadix, out IDebugExpression2 ppExpr, out string pbstrError, out uint pichError) { if (pszCode == null) { throw new ArgumentNullException("pszCode"); } if (pszCode.Length == 0) { throw new ArgumentException(); } // dwFlags=0 in the Immediate window if (dwFlags != enum_PARSEFLAGS.PARSE_EXPRESSION && dwFlags != 0) { throw new NotImplementedException(); } try { var expressionInput = new ANTLRStringStream(pszCode); var expressionUnicodeInput = new JavaUnicodeStream(expressionInput); var expressionLexer = new Java2Lexer(expressionUnicodeInput); var expressionTokens = new CommonTokenStream(expressionLexer); var expressionParser = new Java2Parser(expressionTokens); IAstRuleReturnScope <CommonTree> result = expressionParser.standaloneExpression(); ppExpr = new JavaDebugExpression(this, result.Tree, pszCode); pbstrError = null; pichError = 0; return(VSConstants.S_OK); } catch (RecognitionException e) { ppExpr = null; pbstrError = e.Message; pichError = (uint)Math.Max(0, e.Index); return(VSConstants.E_FAIL); } }
protected override void ReParseImpl() { Stopwatch timer = Stopwatch.StartNew(); // lex the entire document to get the set of identifiers we'll need to classify ITextSnapshot snapshot = TextBuffer.CurrentSnapshot; var input = new SnapshotCharStream(snapshot, new Span(0, snapshot.Length)); JavaUnicodeStream inputWrapper = new JavaUnicodeStream(input); var lexer = new Java2Lexer(inputWrapper); var tokens = new CommonTokenStream(lexer); tokens.Fill(); List <IToken> nameKeywords = new List <IToken>(); List <IToken> declColons = new List <IToken>(); List <IToken> identifiers = new List <IToken>(); HashSet <IToken> definitions = new HashSet <IToken>(TokenIndexEqualityComparer.Default); HashSet <IToken> references = new HashSet <IToken>(TokenIndexEqualityComparer.Default); GetLl2SymbolSets(); while (tokens.LA(1) != CharStreamConstants.EndOfFile) { // covered by the double-sided check if (_definitionOnlySourceSet.Contains(tokens.LA(1))) { if (tokens.LA(2) == Java2Lexer.IDENTIFIER) { definitions.Add(tokens.LT(2)); } } else if (_referenceOnlySourceSet.Contains(tokens.LA(1))) { if (tokens.LA(2) == Java2Lexer.IDENTIFIER) { references.Add(tokens.LT(2)); } } if (_definitionOnlyFollowSet.Contains(tokens.LA(1))) { IToken previous = tokens.LT(-1); if (previous != null && previous.Type == Java2Lexer.IDENTIFIER) { definitions.Add(previous); } } else if (_referenceOnlyFollowSet.Contains(tokens.LA(1))) { IToken previous = tokens.LT(-1); if (previous != null && previous.Type == Java2Lexer.IDENTIFIER) { references.Add(previous); } } if (tokens.LA(2) == Java2Lexer.IDENTIFIER) { IntervalSet bothWaysFollowDefinition; IntervalSet bothWaysFollowReference; _definitionContextSet1.TryGetValue(tokens.LA(1), out bothWaysFollowDefinition); _referenceContextSet1.TryGetValue(tokens.LA(1), out bothWaysFollowReference); bool couldBeDef = bothWaysFollowDefinition != null && bothWaysFollowDefinition.Contains(tokens.LA(3)); bool couldBeRef = bothWaysFollowReference != null && bothWaysFollowReference.Contains(tokens.LA(3)); if (couldBeDef && !couldBeRef) { definitions.Add(tokens.LT(2)); } else if (couldBeRef && !couldBeDef) { references.Add(tokens.LT(2)); } } if (tokens.LA(3) == Java2Lexer.IDENTIFIER && _definitionSourceSet.Contains(tokens.LA(2))) { IntervalSet sourceDefinition2; IntervalSet sourceReference2; _definitionSourceSet2.TryGetValue(tokens.LA(2), out sourceDefinition2); _referenceSourceSet2.TryGetValue(tokens.LA(2), out sourceReference2); bool couldBeDef = sourceDefinition2 != null && sourceDefinition2.Contains(tokens.LA(1)); bool couldBeRef = sourceReference2 != null && sourceReference2.Contains(tokens.LA(1)); if (couldBeDef && !couldBeRef) { definitions.Add(tokens.LT(3)); } else if (couldBeRef && !couldBeDef) { references.Add(tokens.LT(3)); } } if (_definitionFollowSet.Contains(tokens.LA(1))) { declColons.Add(tokens.LT(1)); } if (tokens.LA(1) == Java2Lexer.IDENTIFIER) { identifiers.Add(tokens.LT(1)); } tokens.Consume(); } foreach (var token in declColons) { tokens.Seek(token.TokenIndex); tokens.Consume(); IToken potentialDeclaration = tokens.LT(-2); if (potentialDeclaration.Type != Java2Lexer.IDENTIFIER || definitions.Contains(potentialDeclaration) || references.Contains(potentialDeclaration)) { continue; } bool agree = false; NetworkInterpreter interpreter = CreateVarDeclarationNetworkInterpreter(tokens, token.Type); while (interpreter.TryStepBackward()) { if (interpreter.Contexts.Count == 0 || interpreter.Contexts.Count > 400) { break; } if (interpreter.Contexts.All(context => context.BoundedStart)) { break; } interpreter.Contexts.RemoveAll(i => !IsConsistentWithPreviousResult(i, true, definitions, references)); agree = AllAgree(interpreter.Contexts, potentialDeclaration.TokenIndex); if (agree) { break; } } interpreter.CombineBoundedStartContexts(); if (!agree) { while (interpreter.TryStepForward()) { if (interpreter.Contexts.Count == 0 || interpreter.Contexts.Count > 400) { break; } if (interpreter.Contexts.All(context => context.BoundedEnd)) { break; } interpreter.Contexts.RemoveAll(i => !IsConsistentWithPreviousResult(i, false, definitions, references)); agree = AllAgree(interpreter.Contexts, potentialDeclaration.TokenIndex); if (agree) { break; } } interpreter.CombineBoundedEndContexts(); } foreach (var context in interpreter.Contexts) { foreach (var transition in context.Transitions) { if (!transition.Symbol.HasValue) { continue; } switch (transition.Symbol) { case Java2Lexer.IDENTIFIER: //case Java2Lexer.KW_THIS: RuleBinding rule = interpreter.Network.StateRules[transition.Transition.TargetState.Id]; if (rule.Name == JavaAtnBuilder.RuleNames.SymbolReferenceIdentifier) { references.Add(tokens.Get(transition.TokenIndex.Value)); } else if (rule.Name == JavaAtnBuilder.RuleNames.SymbolDefinitionIdentifier) { definitions.Add(tokens.Get(transition.TokenIndex.Value)); } break; default: continue; } } } } // tokens which are in both the 'definitions' and 'references' sets are actually unknown. HashSet <IToken> unknownIdentifiers = new HashSet <IToken>(definitions, TokenIndexEqualityComparer.Default); unknownIdentifiers.IntersectWith(references); definitions.ExceptWith(unknownIdentifiers); #if true // set to true to mark all unknown identifiers as references (requires complete analysis of definitions) references = new HashSet <IToken>(identifiers, TokenIndexEqualityComparer.Default); references.ExceptWith(definitions); references.ExceptWith(unknownIdentifiers); #else references.ExceptWith(unknownIdentifiers); // the full set of unknown identifiers are any that aren't explicitly classified as a definition or a reference unknownIdentifiers = new HashSet <IToken>(identifiers, TokenIndexEqualityComparer.Default); unknownIdentifiers.ExceptWith(definitions); unknownIdentifiers.ExceptWith(references); #endif List <ITagSpan <IClassificationTag> > tags = new List <ITagSpan <IClassificationTag> >(); IClassificationType definitionClassificationType = _classificationTypeRegistryService.GetClassificationType(JavaSymbolTaggerClassificationTypeNames.Definition); tags.AddRange(ClassifyTokens(snapshot, definitions, new ClassificationTag(definitionClassificationType))); IClassificationType referenceClassificationType = _classificationTypeRegistryService.GetClassificationType(JavaSymbolTaggerClassificationTypeNames.Reference); tags.AddRange(ClassifyTokens(snapshot, references, new ClassificationTag(referenceClassificationType))); IClassificationType unknownClassificationType = _classificationTypeRegistryService.GetClassificationType(JavaSymbolTaggerClassificationTypeNames.UnknownIdentifier); tags.AddRange(ClassifyTokens(snapshot, unknownIdentifiers, new ClassificationTag(unknownClassificationType))); _tags = tags; timer.Stop(); IOutputWindowPane pane = OutputWindowService.TryGetPane(PredefinedOutputWindowPanes.TvlIntellisense); if (pane != null) { pane.WriteLine(string.Format("Finished classifying {0} identifiers in {1}ms: {2} definitions, {3} references, {4} unknown", identifiers.Count, timer.ElapsedMilliseconds, definitions.Count, references.Count, unknownIdentifiers.Count)); } OnTagsChanged(new SnapshotSpanEventArgs(new SnapshotSpan(snapshot, new Span(0, snapshot.Length)))); }
private void ParseReferenceSourceFile(object state) { string fileName = state as string; if (string.IsNullOrEmpty(fileName)) { return; } try { if (!File.Exists(fileName)) { return; } string sourceText = File.ReadAllText(fileName); var inputStream = new ANTLRStringStream(sourceText, fileName); var unicodeStream = new JavaUnicodeStream(inputStream); var lexer = new Java2Lexer(unicodeStream); var tokenStream = new CommonTokenStream(lexer); var parser = new Java2Parser(tokenStream); int[] lineOffsets = null; var outputWindow = OutputWindowService.TryGetPane(PredefinedOutputWindowPanes.TvlIntellisense); List <ParseErrorEventArgs> errors = new List <ParseErrorEventArgs>(); parser.ParseError += (sender, e) => { errors.Add(e); string message = e.Message; if (message.Length > 100) { message = message.Substring(0, 100) + " ..."; } if (lineOffsets == null) { lineOffsets = FindLineOffsets(sourceText); } int line = Array.BinarySearch(lineOffsets, e.Span.Start); if (line < 0) { line = -line - 2; } int column; if (line >= lineOffsets.Length) { column = 0; } else { column = e.Span.Start - lineOffsets[line]; } if (outputWindow != null) { outputWindow.WriteLine(string.Format("{0}({1},{2}): {3}", fileName, line + 1, column + 1, message)); } if (errors.Count > 20) { throw new OperationCanceledException(); } }; var result = parser.compilationUnit(); CodeFileBuilder fileBuilder = new CodeFileBuilder(fileName); var treeNodeStream = new CommonTreeNodeStream(result.Tree); treeNodeStream.TokenStream = tokenStream; var walker = new IntelliSenseCacheWalker(treeNodeStream); List <ParseErrorEventArgs> walkerErrors = new List <ParseErrorEventArgs>(); walker.ParseError += (sender, e) => { walkerErrors.Add(e); string message = e.Message; if (message.Length > 100) { message = message.Substring(0, 100) + " ..."; } if (lineOffsets == null) { lineOffsets = FindLineOffsets(sourceText); } int line = Array.BinarySearch(lineOffsets, e.Span.Start); if (line < 0) { line = -line - 2; } int column; if (line >= lineOffsets.Length) { column = 0; } else { column = e.Span.Start - lineOffsets[line]; } if (outputWindow != null) { outputWindow.WriteLine(string.Format("{0}({1},{2}): {3}", fileName, line + 1, column + 1, message)); } if (walkerErrors.Count > 20) { throw new OperationCanceledException(); } }; walker.compilationUnit(fileBuilder); UpdateFile(fileBuilder); } catch (Exception e) { if (ErrorHandler.IsCriticalException(e)) { throw; } } }