public ScanningLexer(int tabSize, string source) { source += " "; // No idea why but without this extra space the lexer sometimes throws an exception // TODO: AA //Func<BooLexer, antlr.TokenStream> makeFilter = lexer => { // lexer.WantAllWhitespace = true; // return new WhitespacePreservingTokenStreamFilter(lexer, BooParserBase.EOL, BooParserBase.END, BooParserBase.ID); //}; lexer = BooParser.CreateBooLexer(tabSize, "Line Scanner", new StringReader(source));//, makeFilter); var sourcePos = 0; var mappedPos = 0; var positionList = new List <int>(); foreach (var c in source) { if (c == '\t') { while (mappedPos % tabSize < tabSize - 1) { positionList.Add(sourcePos); mappedPos++; } } positionList.Add(sourcePos++); mappedPos++; } positionList.Add(sourcePos); // to map the <EOL> token positionMap = positionList.ToArray(); }
private void MapTokens(int tabSize, string source) { var endLine = 0; var endIndex = 0; var tokens = BooParser.CreateBooLexer(tabSize, "code stream", new StringReader(source)); antlr.IToken token; while ((token = NextToken(tokens)).Type != BooLexer.EOF) { Tuple <int, int> endPoint; switch (token.Type) { case BooLexer.INDENT: case BooLexer.DEDENT: case BooLexer.EOL: case BooLexer.ESEPARATOR: continue; case BooLexer.SINGLE_QUOTED_STRING: case BooLexer.DOUBLE_QUOTED_STRING: endPoint = CalculateEndpoint(token, endLine, endIndex, 1); break; case BooLexer.TRIPLE_QUOTED_STRING: endPoint = CalculateEndpoint(token, endLine, endIndex, 3); break; default: endPoint = CalculateEndpoint(token, endLine, endIndex, 0); break; } endLine = endPoint.Item1; endIndex = endPoint.Item2; } var sIndex = positionMap[token.getLine() - 1][token.getColumn() - 1]; var sLine = token.getLine() - 1; if (sLine > endLine || sLine == endLine && sIndex > endIndex) { whitespaces.Add(new TextSpan { iStartLine = endLine, iStartIndex = endIndex, iEndLine = sLine, iEndIndex = sIndex }); } }
public ScanningLexer(int tabSize, string source) { source += " "; // No idea why but without this extra space the lexer sometimes throws an exception lexer = BooParser.CreateBooLexer(tabSize, "Line Scanner", new StringReader(source)); var sourcePos = 0; var mappedPos = 0; var positionList = new List <int>(); foreach (var c in source) { if (c == '\t') { while (mappedPos % tabSize < tabSize - 1) { positionList.Add(sourcePos); mappedPos++; } } positionList.Add(sourcePos++); mappedPos++; } positionList.Add(sourcePos); // to map the <EOL> token positionMap = positionList.ToArray(); }
private void MapTokens(int tabSize, string source) { var endLine = 0; var endIndex = 0; var tokens = BooParser.CreateBooLexer(tabSize, "code stream", new StringReader(source)); antlr.IToken token; while ((token = NextToken(tokens)).Type != BooLexer.EOF) { int length; switch (token.Type) { case BooLexer.INDENT: case BooLexer.DEDENT: case BooLexer.EOL: continue; case BooLexer.SINGLE_QUOTED_STRING: case BooLexer.DOUBLE_QUOTED_STRING: length = token.getText().Length + 2; break; case BooLexer.TRIPLE_QUOTED_STRING: length = token.getText().Length + 6; break; default: length = token.getText().Length; break; } var startIndex = positionMap[token.getLine() - 1][token.getColumn() - 1]; var startLine = token.getLine() - 1; if (startLine > endLine || startLine == endLine && startIndex > endIndex) { whitespaces.Add(new TextSpan { iStartLine = endLine, iStartIndex = endIndex, iEndLine = startLine, iEndIndex = startIndex }); } endIndex = positionMap[token.getLine() - 1][token.getColumn() - 1 + length]; endLine = startLine; var cluster = new MappedToken( startLine * lineSize + startIndex, endIndex - startIndex); if (tokenMap.Count > 0 && tokenMap[tokenMap.Count() - 1].Index >= cluster.Index) { throw new ArgumentException("Token Mapping order"); } tokenMap.Add(cluster); } var sIndex = positionMap[token.getLine() - 1][token.getColumn() - 1]; var sLine = token.getLine() - 1; if (sLine > endLine || sLine == endLine && sIndex > endIndex) { whitespaces.Add(new TextSpan { iStartLine = endLine, iStartIndex = endIndex, iEndLine = sLine, iEndIndex = sIndex }); } }