public override TokenAst TryMatch(CompilerContext context, ISourceStream source) { if (!source.MatchSymbol(_startSymbol, false)) { return(null); } source.Position += _startSymbol.Length; while (!source.EOF()) { int firstCharPos = source.Text.IndexOf(_endSymbol, source.Position); if (firstCharPos < 0) { source.Position = source.Text.Length; return(_isLineComment ? TokenAst.Create(this, context, source.TokenStart, source.GetLexeme()) : LRParser.CreateSyntaxErrorToken(context, source.TokenStart, "Unclosed comment block")); } source.Position = firstCharPos; if (source.MatchSymbol(_endSymbol, false)) { source.Position += _endSymbol.Length; return(TokenAst.Create(this, context, source.TokenStart, source.GetLexeme())); } source.Position++; } throw new NotSupportedException(); }
public override TokenAst TryMatch(CompilerContext context, ISourceStream source) { if (!source.MatchSymbol(_startSymbol, false)) return null; source.Position += _startSymbol.Length; while (!source.EOF()) { int firstCharPos = source.Text.IndexOf(_endSymbol, source.Position); if (firstCharPos < 0) { source.Position = source.Text.Length; if (_isLineComment) return TokenAst.Create(this, context, source.TokenStart, source.GetLexeme()); else return Grammar.CreateSyntaxErrorToken(context, source.TokenStart, "Unclosed comment block"); } source.Position = firstCharPos; if (source.MatchSymbol(_endSymbol, false)) { source.Position += _endSymbol.Length; return TokenAst.Create(this, context, source.TokenStart, source.GetLexeme()); } source.Position++; } throw new NotSupportedException(); }
public override Token TryMatch(CompilerContext context, ISourceStream source) { bool ignoreCase = !Grammar.CaseSensitive; //Check starting symbol if (!source.MatchSymbol(StartSymbol, ignoreCase)) { return(null); } //Find end symbol source.Position += StartSymbol.Length; while (!source.EOF()) { int firstCharPos; if (EndSymbols.Count == 1) { firstCharPos = source.Text.IndexOf(EndSymbols[0], source.Position); } else { firstCharPos = source.Text.IndexOfAny(_endSymbolsFirsts, source.Position); } if (firstCharPos < 0) { source.Position = source.Text.Length; if (_isLineComment) //if it is LineComment, it is ok to hit EOF without final line-break; just return all until end. { return(Token.Create(this, context, source.TokenStart, source.GetLexeme())); } else { return(Grammar.CreateSyntaxErrorToken(context, source.TokenStart, "Unclosed comment block")); } } //We found a character that might start an end symbol; let's see if it is true. source.Position = firstCharPos; foreach (string endSymbol in EndSymbols) { if (source.MatchSymbol(endSymbol, ignoreCase)) { //We found end symbol source.Position += endSymbol.Length; return(Token.Create(this, context, source.TokenStart, source.GetLexeme())); }//if } source.Position++; //move to the next char and try again } //while return(null); //never happens } //method
protected virtual TokenAst CreateToken(CompilerContext context, ISourceStream source) { string lexeme = source.GetLexeme(); TokenAst token = TokenAst.Create(this, context, source.TokenStart, lexeme, lexeme); return(token); }
public override Token TryMatch(CompilerContext context, ISourceStream source) { Token result; if (context.ScannerState.Value != 0) { // we are continuing in line mode - restore internal env (none in this case) context.ScannerState.Value = 0; } else { //we are starting from scratch if (!BeginMatch(context, source)) { return(null); } } result = CompleteMatch(context, source); if (result != null) { return(result); } //if it is LineComment, it is ok to hit EOF without final line-break; just return all until end. if (_isLineComment) { return(new Token(this, source.TokenStart, source.GetLexeme(), null)); } if (context.Mode == CompileMode.VsLineScan) { return(CreateIncompleteToken(context, source)); } return(context.CreateErrorTokenAndReportError(source.TokenStart, string.Empty, "Unclosed comment block")); }
public override Token TryMatch(CompilerContext context, ISourceStream source) { bool ignoreCase = !Grammar.CaseSensitive; //Check starting symbol if (!source.MatchSymbol(StartSymbol, ignoreCase)) return null; //Find end symbol source.Position += StartSymbol.Length; while(!source.EOF()) { int firstCharPos; if (EndSymbols.Count == 1) firstCharPos = source.Text.IndexOf(EndSymbols[0], source.Position); else firstCharPos = source.Text.IndexOfAny(_endSymbolsFirsts, source.Position); if (firstCharPos < 0) { source.Position = source.Text.Length; if (_isLineComment) //if it is LineComment, it is ok to hit EOF without final line-break; just return all until end. return Token.Create(this, context, source.TokenStart, source.GetLexeme()); else return Grammar.CreateSyntaxErrorToken(context, source.TokenStart, "Unclosed comment block"); } //We found a character that might start an end symbol; let's see if it is true. source.Position = firstCharPos; foreach (string endSymbol in EndSymbols) if (source.MatchSymbol(endSymbol, ignoreCase)) { //We found end symbol source.Position += endSymbol.Length; return Token.Create(this, context, source.TokenStart, source.GetLexeme()); }//if source.Position++; //move to the next char and try again }//while return null; //never happens }
public override Token TryMatch(CompilerContext context, ISourceStream source) { Match m = _expression.Match(source.Text, source.Position); if (!m.Success || m.Index != source.Position) return null; source.Position += m.Length; string text = source.GetLexeme(); return new Token(this, source.TokenStart, text, null); }
protected virtual Token CreateToken(CompilerContext context, ISourceStream source, ScanDetails details) { string lexeme = source.GetLexeme(); Token token = Token.Create(this, context, source.TokenStart, lexeme, details.Value); token.Details = details; return(token); }
private Token CreateIncompleteToken(CompilerContext context, ISourceStream source) { source.Position = source.Text.Length; Token result = new Token(this, source.TokenStart, source.GetLexeme(), null); result.Flags |= TokenFlags.IsIncomplete; context.ScannerState.TerminalIndex = this.MultilineIndex; return(result); }
public override Token TryMatch(CompilerContext context, ISourceStream source) { Match m = _expression.Match(source.Text, source.Position); if (!m.Success) return null; source.Position += m.Length + 1; string text = source.GetLexeme(); return Token.Create(this, context, source.TokenStart, text); }
/* private static List<string> _firsts = new List<string>() { "'", "\"", "@" }; */ #endregion #region Init public override TokenAst TryMatch(CompilerContext context, ISourceStream source) { bool isVerbatim = false; int start = source.Position; if (source.CurrentChar == '@') { isVerbatim = true; source.Position++; start++; } if (IsCurrentQuote(source)) { source.Position++; start++; } else return null; while (!source.EOF()) { if (!isVerbatim) { if (source.CurrentChar == '\\') { //TODO: Escape processing source.Position += 2; continue; } if (LRParser.LineTerminators.IndexOf(source.CurrentChar) >= 0) return null; } if (IsCurrentQuote(source)) break; source.Position++; } if (IsCurrentQuote(source)) source.Position++; else return null; string lexeme = source.GetLexeme(); string body = source.Text.Substring(start, source.Position - start - 1); //TODO: handle this in escape processing if (!isVerbatim) body = body.Replace("\\'", "'").Replace("\\\"", "\"").Replace("\\\\", "\\"); TokenAst token = TokenAst.Create(this, context, source.TokenStart, lexeme, body); return token; //return Grammar.CreateSyntaxErrorToken(context, source.TokenStart, "Failed to convert the value"); }
public override Token TryMatch(CompilerContext context, ISourceStream source) { char current = source.CurrentChar; if (!LineTerminators.Contains(current)) return null; //Treat \r\n as a single terminator bool doExtraShift = (current == '\r' && source.NextChar == '\n'); source.Position++; //main shift if (doExtraShift) source.Position++; Token result = new Token(this, source.TokenStart, source.GetLexeme(), null); return result; }
protected virtual Token CreateToken(CompilerContext context, ISourceStream source, CompoundTokenDetails details) { string lexeme = source.GetLexeme(); Token token = new Token(this, source.TokenStart, lexeme, details.Value); token.Details = details; if (details.IsPartial) { token.Flags |= TokenFlags.IsIncomplete; } return(token); }
public override Token TryMatch(CompilerContext context, ISourceStream source) { Match m = _expression.Match(source.Text, source.Position); if (!m.Success) { return(null); } source.Position += m.Length + 1; string text = source.GetLexeme(); return(Token.Create(this, context, source.TokenStart, text)); }
public override Token TryMatch(CompilerContext context, ISourceStream source) { Match m = _expression.Match(source.Text, source.Position); if (!m.Success || m.Index != source.Position) { return(null); } source.Position += m.Length; string text = source.GetLexeme(); return(new Token(this, source.TokenStart, text, null)); }
public override Token TryMatch(CompilerContext context, ISourceStream source) { Token result; if (context.ScannerState.Value != 0) { // we are continuing in line mode - restore internal env (none in this case) context.ScannerState.Value = 0; } else { //we are starting from scratch if (!BeginMatch(context, source)) return null; } result = CompleteMatch(context, source); if (result != null) return result; //if it is LineComment, it is ok to hit EOF without final line-break; just return all until end. if (_isLineComment) return new Token(this, source.TokenStart, source.GetLexeme(), null); if (context.Mode == CompileMode.VsLineScan) return CreateIncompleteToken(context, source); return context.CreateErrorTokenAndReportError(source.TokenStart, string.Empty, "Unclosed comment block"); }
protected override Token QuickParse(CompilerContext context, ISourceStream source) { if (AllFirstChars.IndexOf(source.CurrentChar) < 0) { return(null); } source.Position++; while (AllChars.IndexOf(source.CurrentChar) >= 0 && !source.EOF()) { source.Position++; } //if it is not a terminator then cancel; we need to go through full algorithm if (_terminators.IndexOf(source.CurrentChar) < 0) { return(null); } string text = source.GetLexeme(); return(Token.Create(this, context, source.TokenStart, text)); }
public override Token TryMatch(CompilerContext context, ISourceStream source) { char current = source.CurrentChar; if (!LineTerminators.Contains(current)) { return(null); } //Treat \r\n as a single terminator bool doExtraShift = (current == '\r' && source.NextChar == '\n'); source.Position++; //main shift if (doExtraShift) { source.Position++; } Token result = new Token(this, source.TokenStart, source.GetLexeme(), null); return(result); }
private Token CompleteMatch(CompilerContext context, ISourceStream source) { //Find end symbol while (!source.EOF()) { int firstCharPos; if (EndSymbols.Count == 1) { firstCharPos = source.Text.IndexOf(EndSymbols[0], source.Position); } else { firstCharPos = source.Text.IndexOfAny(_endSymbolsFirsts, source.Position); } if (firstCharPos < 0) { source.Position = source.Text.Length; return(null); //indicating error } //We found a character that might start an end symbol; let's see if it is true. source.Position = firstCharPos; foreach (string endSymbol in EndSymbols) { if (source.MatchSymbol(endSymbol, !OwnerGrammar.CaseSensitive)) { //We found end symbol; eat end symbol only if it is not line comment. // For line comment, leave LF symbol there, it might be important to have a separate LF token if (!_isLineComment) { source.Position += endSymbol.Length; } return(new Token(this, source.TokenStart, source.GetLexeme(), null)); } //if } //foreach endSymbol source.Position++; //move to the next char and try again } //while return(null); //might happen if we found a start char of end symbol, but not the full endSymbol } //method
/* * private static List<string> _firsts = new List<string>() { "'", "\"", "@" }; */ #endregion #region Init public override TokenAst TryMatch(CompilerContext context, ISourceStream source) { bool isVerbatim = false; int start = source.Position; if (source.CurrentChar == '@') { isVerbatim = true; source.Position++; start++; } if (IsCurrentQuote(source)) { source.Position++; start++; } else { return(null); } while (!source.EOF()) { if (!isVerbatim) { if (source.CurrentChar == '\\') { //TODO: Escape processing source.Position += 2; continue; } if (LRParser.LineTerminators.IndexOf(source.CurrentChar) >= 0) { return(null); } } if (IsCurrentQuote(source)) { break; } source.Position++; } if (IsCurrentQuote(source)) { source.Position++; } else { return(null); } string lexeme = source.GetLexeme(); string body = source.Text.Substring(start, source.Position - start - 1); //TODO: handle this in escape processing if (!isVerbatim) { body = body.Replace("\\'", "'").Replace("\\\"", "\"").Replace("\\\\", "\\"); } TokenAst token = TokenAst.Create(this, context, source.TokenStart, lexeme, body); return(token); //return Grammar.CreateSyntaxErrorToken(context, source.TokenStart, "Failed to convert the value"); }
private Token CreateIncompleteToken(CompilerContext context, ISourceStream source) { source.Position = source.Text.Length; Token result = new Token(this, source.TokenStart, source.GetLexeme(), null); result.Flags |= TokenFlags.IsIncomplete; context.ScannerState.TerminalIndex = this.MultilineIndex; return result; }
protected virtual TokenAst CreateToken(CompilerContext context, ISourceStream source) { string lexeme = source.GetLexeme(); TokenAst token = TokenAst.Create(this, context, source.TokenStart, lexeme, lexeme); return token; }
private Token CompleteMatch(CompilerContext context, ISourceStream source) { //Find end symbol while (!source.EOF()) { int firstCharPos; if (EndSymbols.Count == 1) firstCharPos = source.Text.IndexOf(EndSymbols[0], source.Position); else firstCharPos = source.Text.IndexOfAny(_endSymbolsFirsts, source.Position); if (firstCharPos < 0) { source.Position = source.Text.Length; return null; //indicating error } //We found a character that might start an end symbol; let's see if it is true. source.Position = firstCharPos; foreach (String endSymbol in EndSymbols) { if (source.MatchSymbol(endSymbol, !Grammar.CaseSensitive)) { //We found end symbol; eat end symbol only if it is not line comment. // For line comment, leave LF symbol there, it might be important to have a separate LF token if (!_isLineComment) source.Position += endSymbol.Length; return Token.Create(this, context, source.TokenStart, source.GetLexeme()); }//if }//foreach endSymbol source.Position++; //move to the next char and try again }//while return null; //might happen if we found a start char of end symbol, but not the full endSymbol }
private Token CreateIncompleteToken(CompilerContext context, ISourceStream source) { source.Position = source.Text.Length; Token result = Token.Create(this, context, source.TokenStart, source.GetLexeme()); result.Flags |= AstNodeFlags.IsIncomplete; context.ScannerState.TokenKind = this.MultilineKind; return result; }