public override Statement Parse(Parser parser, Lexer.Token current) { string name = parser.Current.Lexeme; parser.Consume("IDENTIFIER"); parser.Consume("LEFTBRACE"); var functions = new List <FunctionExpr>(); var members = new List <VarDefinitionStmt>(); do { var statement = parser.ParseNext(); if (statement is FunctionExpr) { functions.Add(statement as FunctionExpr); } else if (statement is VarDefinitionStmt) { members.Add(statement as VarDefinitionStmt); } else { throw new Exception("Unexpected statement type"); } } while (parser.Peek().Type != "RIGHTBRACE"); parser.Consume("RIGHTBRACE"); return(new ClassDefinitionStmt(new IdentifierExpr(name), members, functions)); }
public override Statement Parse(Parser parser, Lexer.Token current) { var expr = parser.ParseExpression(0); parser.Consume("SEMICOLON"); return(new ReturnStmt(expr)); }
public override Statement Parse(Parser parser, Lexer.Token current) { var name = parser.Current.Lexeme; parser.Consume("IDENTIFIER"); string type = null; if (parser.Peek().Type == "COLON") { type = ParseTypeSpecified(parser); } Expression initialValue = null; if (parser.ConsumeOptional("ASSIGNMENT")) { initialValue = parser.ParseExpression(0); } else if (_constVariables) { throw new ParseException("Const variable declarations must have an initialiser."); } else if (type == null) { throw new ParseException("Type must be specified if not assigned at point of definition."); } parser.Consume("SEMICOLON"); return(new VarDefinitionStmt(new IdentifierExpr(name), new IdentifierExpr(type), _constVariables, initialValue)); }
public override Statement Parse(Parser parser, Lexer.Token current) { string name = null; if (parser.Peek().Type == "IDENTIFIER") { name = parser.Current.Lexeme; parser.Consume("IDENTIFIER"); } parser.Consume("LEFTPAREN"); var parameters = ParseParameterList(parser).ToArray(); parser.Consume("RIGHTPAREN"); var token = parser.Peek(); if (token.Type == "LEFTBRACE") { var body = parser.ParseNext(); return(new FunctionDefinitionExpr(new IdentifierExpr(name), parameters, (Statement)body, new IdentifierExpr("UNKNOWN"))); } else if (token.Type == "RIGHTARROW") { parser.Consume("RIGHTARROW"); var body = parser.ParseExpression(0); parser.Consume("SEMICOLON"); return(new LambdaDefinitionExpr(new IdentifierExpr(name), parameters, body, new IdentifierExpr("UNKNOWN"))); } throw new ParseException("Malformed function defintion"); }
public static Stmt.Member Member(this Parser parser) { Lexer.Token name = parser.Consume("Expected identifier after 'member'", Lexer.Token.TokenType.IDENTIFIER); Expr initialiser = null; if (parser.Match(Lexer.Token.TokenType.EQUAL)) { initialiser = parser.Comparison(); return(new Stmt.Member(name, initialiser)); } Stmt.Comma args = null; if (parser.Match(Lexer.Token.TokenType.WITH)) { args = parser.Comma(); } Stmt.Block block = null; if (parser.Match(Lexer.Token.TokenType.DO)) { block = parser.Block(); } return(new Stmt.Member(name, block, args)); }
public TrashObject Get(Lexer.Token name) { if (_elements.ContainsKey(name.Literal)) { return(_elements[name.Literal]); } throw new Interpreter.RuntimeError($"Attempting to access non-existent element {name.Literal} of class {Name}"); }
private IDictionary <string, object> ParseObject() { Dictionary <string, object> dictionary = new Dictionary <string, object>(); this.lexer.NextToken(); for (;;) { Lexer.Token token = this.lexer.LookAhead(); if (token == Lexer.Token.None) { break; } if (token != Lexer.Token.Comma) { if (token == Lexer.Token.CurlyClose) { goto IL_56; } string key = this.EvalLexer(this.lexer.ParseString()); if (this.errorMessage != null) { goto Block_4; } token = this.lexer.NextToken(); if (token != Lexer.Token.Colon) { goto Block_5; } object value = this.ParseValue(); if (this.errorMessage != null) { goto Block_6; } dictionary[key] = value; } else { this.lexer.NextToken(); } } this.TriggerError("Invalid token"); return(null); IL_56: this.lexer.NextToken(); return(dictionary); Block_4: return(null); Block_5: this.TriggerError("Invalid token; expected ':'"); return(null); Block_6: return(null); }
/// <summary> /// 匹配一个Token /// </summary> /// <param name="Lex">词法分析器</param> /// <param name="TokenToMatch">需要匹配的Token</param> private static void MatchToken(Lexer Lex, Lexer.Token TokenToMatch) { if (Lex.CurrentToken != TokenToMatch) { throw new SyntaxException(Lex.Position, Lex.Line, Lex.Row, String.Format("expect {0}, but found {1}.", Lexer.FormatToken(TokenToMatch), Lex.FormatCurrentToken())); } Lex.Next(); }
/// <summary> /// 尝试匹配一个Token,若成功则向后推进 /// </summary> /// <param name="Lex">词法分析器</param> /// <param name="TokenToMatch">需要匹配的Token</param> /// <returns>是否成功匹配</returns> private static bool TryMatchToken(Lexer Lex, Lexer.Token TokenToMatch) { if (Lex.CurrentToken != TokenToMatch) { return(false); } Lex.Next(); return(true); }
public void Define(Lexer.Token name, Class value) { if (_classes.ContainsKey(name.Literal)) { _classes.Add(name.Literal, value); } else { _classes[name.Literal] = value; } }
public void Define(Lexer.Token name, TrashObject value) { if (!_values.ContainsKey(name.Literal)) { _values.Add(name.Literal, value); } else { _values[name.Literal] = value; } }
public static Expr New(this Parser parser) { Lexer.Token name = parser.Consume("Expected identifier after 'new'", Lexer.Token.TokenType.IDENTIFIER); Expr.Arg args = null; if (parser.Match(Lexer.Token.TokenType.WITH)) { args = parser.Arg(); } return(new Expr.New(name, args)); }
/// <summary> /// 解析二元表达式 /// /// 当优先级为0时退化到一元表达式 /// </summary> /// <param name="Lex">词法分析器</param> /// <param name="Priority">优先级</param> /// <returns>解析结果</returns> private static ASTNode_Expression ParseBinaryExpression(Lexer Lex, int Priority) { // 退化 if (Priority == 0) { return(ParseUnaryExpression(Lex)); } // 递归解析左侧的表达式 ASTNode_Expression tRet = ParseBinaryExpression(Lex, Priority - 1); // 检查是否为二元运算符 if (Lex.CurrentToken >= Lexer.Token.Plus && (int)Lex.CurrentToken < (int)Lexer.Token.Plus + BinaryOperatorPriorityTable.Length) { int tPriority = BinaryOperatorPriorityTable[Lex.CurrentToken - Lexer.Token.Plus]; if (tPriority > Priority) // 优先级不符,返回 { return(tRet); } else { Priority = tPriority; } } else { return(tRet); } // 循环解析右侧的表达式 while (true) { // 检查下一个算符的优先级 Lexer.Token tOpt = Lex.CurrentToken; if (!(tOpt >= Lexer.Token.Plus && (int)tOpt < (int)Lexer.Token.Plus + BinaryOperatorPriorityTable.Length && BinaryOperatorPriorityTable[Lex.CurrentToken - Lexer.Token.Plus] == Priority)) { break; } // 吃掉运算符 Lex.Next(); // 获取算符右侧 ASTNode_Expression tRight = ParseBinaryExpression(Lex, Priority - 1); // 组合成二元AST树 tRet = new ASTNode_BinaryExpression(Lex.Line, BinaryOp.Plus + (tOpt - Lexer.Token.Plus), tRet, tRight); } return(tRet); }
public static Expr Addition(this Parser parser) { Expr left = parser.Multiplication(); while (parser.Match(Lexer.Token.TokenType.PLUS, Lexer.Token.TokenType.MINUS)) { Lexer.Token op = parser.Previous(); Expr right = parser.Multiplication(); left = new Expr.Binary(left, op, right); } return(left); }
public static Expr Multiplication(this Parser parser) { Expr expr = parser.Unary(); while (parser.Match(Lexer.Token.TokenType.MULTIPLY, Lexer.Token.TokenType.DIVIDE)) { Lexer.Token op = parser.Previous(); Expr right = parser.Unary(); expr = new Expr.Binary(expr, op, right); } return(expr); }
public static Expr Dotted(this Parser parser) { Expr expr = parser.Grouping(); while (parser.Match(Lexer.Token.TokenType.DOT)) { Lexer.Token op = parser.Previous(); Expr right = parser.Grouping(); expr = new Expr.Binary(expr, op, right); } return(expr); }
public override Statement Parse(Parser parser, Lexer.Token current) { parser.Consume("LEFTPAREN"); var condition = parser.ParseExpression(0); parser.Consume("RIGHTPAREN"); var trueBlock = ParseStatement(parser); var falseBlock = parser.ConsumeOptional("ELSE") ? ParseStatement(parser) : null; return(new IfStmt(condition, trueBlock, falseBlock)); }
Expr multiplication() { Expr expr = unary(); while (match(Lexer.Token.TokenType.MULTIPLY, Lexer.Token.TokenType.DIVIDE)) { Lexer.Token op = previous(); Expr right = unary(); expr = new Expr.Binary(expr, op, right); } return(expr); }
Expr addition() { Expr expr = multiplication(); while (match(Lexer.Token.TokenType.PLUS, Lexer.Token.TokenType.MINUS)) { Lexer.Token op = previous(); Expr right = multiplication(); expr = new Expr.Binary(expr, op, right); } return(expr); }
public Class GetClass(Lexer.Token name) { if (_classes.ContainsKey(name.Literal)) { return(_classes[name.Literal]); } if (Enclosing != null) { return(Enclosing.GetClass(name)); } throw new Interpreter.RuntimeError($"Attempting to access undefined class {name.Literal}"); }
Stmt assignment() { Lexer.Token name = consume("Expected identifier after 'let'", Lexer.Token.TokenType.IDENTIFIER); Expr initialiser = null; if (match(Lexer.Token.TokenType.EQUAL)) { initialiser = condition(); } consume("Expected new line after variable declaration", Lexer.Token.TokenType.NEWLINE, Lexer.Token.TokenType.EOF); return(new Stmt.Assign(name, initialiser)); }
// currently returns null if not a value // should probably make it throw an error? public object Access(Lexer.Token name = null) { if (_type == ObjectType.CLASS && name != null) { if (_elements.Contains(name)) { return(_elements.Get(name)); } throw new Interpreter.RuntimeError($"Trying to access non-existing element {name.Literal} of class {(_value as Class).Name}"); } return(_value); }
public object Get(Lexer.Token name) { if (values.ContainsKey(name.Literal)) { return(values[name.Literal]); } if (Enclosing != null) { return(Enclosing.Get(name)); } throw new Interpreter.RuntimeError("Attempting to access undefined variable " + name.Literal); }
public static TrashObject RunOverride(this Interpreter interpreter, Lexer.Token op, TrashObject one, TrashObject two = null) { if (two != null) { var key = new Tuple <Lexer.Token.TokenType, TrashObject.ObjectType, TrashObject.ObjectType>(op.Type, one.GetType(), two.GetType()); // TrashML overrides to be considered first if (_binary_overrides_t.ContainsKey(key)) { // op1 and op2 are specifically defined variables for overrides var env = new Environment(op.Literal, interpreter.IntEnvironment); env.Define(new Lexer.Token { Literal = "op1" }, two); env.Define(new Lexer.Token { Literal = "op2" }, two); return(interpreter.ExecuteBlock(_binary_overrides_t[key], env)); } if (_binary_overrides_c.ContainsKey(key)) { return(_binary_overrides_c[key](one, two)); } throw new Interpreter.RuntimeError($"Unable to find suitable override for {op.Literal} with types {one.GetType()}, {two.GetType()}"); } var ukey = new Tuple <Lexer.Token.TokenType, TrashObject.ObjectType>(op.Type, one.GetType()); if (_unary_overrides_t.ContainsKey(ukey)) { var env = new Environment(op.Literal, interpreter.IntEnvironment); env.Define(new Lexer.Token { Literal = "op1" }, one); return(interpreter.ExecuteBlock(_unary_overrides_t[ukey], new Environment(op.Literal, interpreter.IntEnvironment))); } if (_unary_overrides_c.ContainsKey(ukey)) { return(_unary_overrides_c[ukey](one)); } throw new Interpreter.RuntimeError($"Unable to find suitable override for {op.Literal} with type {one.GetType()}"); }
public Class Add(Lexer.Token name, TrashObject body) { if (!Exists(name)) { _keys.Add(name); _elements.Add(name.Literal, body); } else { _elements[name.Literal] = body; } return(this); }
Expr condition() { Expr left = addition(); if (match(Lexer.Token.TokenType.EQUAL, Lexer.Token.TokenType.BANG_EQUAL, Lexer.Token.TokenType.LESS, Lexer.Token.TokenType.LESS_EQUAL, Lexer.Token.TokenType.GREATER, Lexer.Token.TokenType.GREATER_EQUAL)) { Lexer.Token op = previous(); Expr right = addition(); return(new Expr.Binary(left, op, right)); } return(left); }
public static Expr Condition(this Parser parser) { Expr left = parser.Addition(); while (parser.Match(Lexer.Token.TokenType.EQUAL_EQUAL, Lexer.Token.TokenType.BANG_EQUAL, Lexer.Token.TokenType.LESS, Lexer.Token.TokenType.LESS_EQUAL, Lexer.Token.TokenType.GREATER, Lexer.Token.TokenType.GREATER_EQUAL)) { Lexer.Token op = parser.Previous(); Expr right = parser.Addition(); left = new Expr.Binary(left, op, right); } return(left); }
public override Statement Parse(Parser parser, Lexer.Token current) { var statements = new List <Statement>(); do { var expression = ParseStatement(parser); if (expression != null) { statements.Add(expression); } } while (parser.Current.Type != _endToken); return(new ScopeBlockStmt(statements)); }
public List Parse() { Token = Lexer.GetNextToken(); if(Token != Lexer.Token.OPEN_PAREN) ParseError("file does not start with '('"); List Result = InternParse(); if(Token != Lexer.Token.EOF) { if(Token == Lexer.Token.CLOSE_PAREN) ParseError("too many ')'"); else ParseError("extra tokens at end of file"); } return Result; }
public static Stmt.Assign Assign(this Parser parser) { Lexer.Token name = parser.Consume("Expected identifier after 'let'", Lexer.Token.TokenType.IDENTIFIER); Expr initialiser = null; if (parser.Match(Lexer.Token.TokenType.EQUAL)) { initialiser = parser.Comparison(); } // this f***s up when doing with statements // shouldn't be needed, but that's a testing thing // parser.Consume("Expected new line after variable declaration", Lexer.Token.TokenType.NEWLINE, Lexer.Token.TokenType.EOF); return(new Stmt.Assign(name, initialiser)); }
public static Expr Comparison(this Parser parser) { if (parser.Match(Lexer.Token.TokenType.NEW)) { return(parser.New()); } Expr left = parser.Condition(); if (parser.Match(Lexer.Token.TokenType.AND, Lexer.Token.TokenType.OR)) { Lexer.Token op = parser.Previous(); Expr right = parser.Condition(); return(new Expr.Binary(left, op, right)); } return(left); }
protected override ResultOfProcess ProcessToken(Lexer.Token token) { mLastToken = token; if (IsTerminateCondition(token)) { mIsSuccessfulFinished = true; return ResultOfProcess.Stop; } this.Context.Recovery(token); var tmpOperatorLeaf = new Syntaxer.OperatorLeafs.OperatorLeaf(this.Context, this); var tmpRez = tmpOperatorLeaf.Run(); mResult.Expression = tmpOperatorLeaf.Result; return tmpRez; }
public Token2yyToken(Lexer.Token token) { this.token = token; }
private List InternParse() { ArrayList Entries = new ArrayList(); while(Token != Lexer.Token.CLOSE_PAREN && Token != Lexer.Token.EOF) { switch(Token) { case Lexer.Token.OPEN_PAREN: Token = Lexer.GetNextToken(); if(Token == Lexer.Token.SYMBOL && Lexer.TokenString == "_") { Token = Lexer.GetNextToken(); if(Token != Lexer.Token.STRING) ParseError("Expected string after '(_ ' sequence"); // TODO translate Entries.Add(Lexer.TokenString); Token = Lexer.GetNextToken(); if(Token != Lexer.Token.CLOSE_PAREN) ParseError("Expected ')' after '(_ \"\"' squence"); break; } Entries.Add(InternParse()); if(Token != Lexer.Token.CLOSE_PAREN) ParseError("Expected ')' token, got " + Token); break; case Lexer.Token.SYMBOL: Entries.Add(new Symbol(Lexer.TokenString)); break; case Lexer.Token.STRING: Entries.Add(Lexer.TokenString); break; case Lexer.Token.INTEGER: int ival = Int32.Parse(Lexer.TokenString, NumberStyles.Integer, NumberFormatInfo.InvariantInfo); Entries.Add(ival); break; case Lexer.Token.REAL: float fval = Single.Parse(Lexer.TokenString, NumberStyles.Float, NumberFormatInfo.InvariantInfo); Entries.Add(fval); break; case Lexer.Token.TRUE: Entries.Add(true); break; case Lexer.Token.FALSE: Entries.Add(false); break; default: ParseError("Unexpected Token " + Token); break; } Token = Lexer.GetNextToken(); } return new List(Entries.ToArray()); }
public Lexer.Token NextToken() { Lexer.Token token = new Lexer.Token(); token.Value = 0d; token.IntValue = 0; this.skip(); if(this._currentIndex >= this._sourceLength) { token.String = Lexer.Token.BASIC_END_OF_FILE; token.Type = Lexer.Token.I_BASIC_END_OF_FILE; } else { //Tokenization takes place here if(char.IsDigit(this.getCurrentCharacter()) || this.getCurrentCharacter() == '-') { token.Type = Lexer.Token.I_BASIC_NUMBER; string numberString; token.Value = this.number(out numberString); token.String = numberString; } else if(char.IsLetter(this.getCurrentCharacter())) { token.Type = Lexer.Token.I_BASIC_STRING; token.String = this.identifier(); switch (token.String) { #region Data Tokens... case Lexer.Token.DATA_VERTEX: token.Type = Lexer.Token.I_DATA_VERTEX; break; case Lexer.Token.DATA_VERTEX_NORMAL: token.Type = Lexer.Token.I_DATA_VERTEX_NORMAL; break; case Lexer.Token.DATA_TEXTURE_VERTEX: token.Type = Lexer.Token.I_DATA_TEXTURE_VERTEX; break; case Lexer.Token.DATA_PARAMETER_SPACE_VERTEX: token.Type = Lexer.Token.I_DATA_PARAMETER_SPACE_VERTEX; break; case Lexer.Token.DATA_BASIS_MATRIX: token.Type = Lexer.Token.I_DATA_BASIS_MATRIX; break; case Lexer.Token.DATA_DEGREE: token.Type = Lexer.Token.I_DATA_DEGREE; break; case Lexer.Token.DATA_RATIONAL_NON_RATIONAL_CURVE_OR_SURFACE_DATA: token.Type = Lexer.Token.I_DATA_RATIONAL_NON_RATIONAL_CURVE_OR_SURFACE_DATA; break; case Lexer.Token.DATA_STEP_SIZE: token.Type = Lexer.Token.I_DATA_STEP_SIZE; break; #endregion #region Element Tokens... case Lexer.Token.ELEMENT_POINT: token.Type = Lexer.Token.I_ELEMENT_POINT; break; case Lexer.Token.ELEMENT_LINE: token.Type = Lexer.Token.I_ELEMENT_LINE; break; case Lexer.Token.ELEMENT_FACE: token.Type = Lexer.Token.I_ELEMENT_FACE; break; case Lexer.Token.ELEMENT_CURVE: token.Type = Lexer.Token.I_ELEMENT_CURVE; break; case Lexer.Token.ELEMENT_2D_CURVE: token.Type = Lexer.Token.I_ELEMENT_2D_CURVE; break; case Lexer.Token.ELEMENT_SURFACE: token.Type = Lexer.Token.I_ELEMENT_SURFACE; break; #endregion #region Freeform Tokens... case Lexer.Token.FREEFORM_PARAMETER_VALUE: token.Type = Lexer.Token.I_FREEFORM_PARAMETER_VALUE; break; case Lexer.Token.FREEFORM_SPECIAL_POINT: token.Type = Lexer.Token.I_FREEFORM_SPECIAL_POINT; break; case Lexer.Token.FREEFORM_SPECIAL_CURVE: token.Type = Lexer.Token.I_FREEFORM_SPECIAL_CURVE; break; case Lexer.Token.FREEFORM_OUTER_TRIMMING_LOOP: token.Type = Lexer.Token.I_FREEFORM_OUTER_TRIMMING_LOOP; break; case Lexer.Token.FREEFORM_INNER_TRIMMING_LOOP: token.Type = Lexer.Token.I_FREEFORM_INNER_TRIMMING_LOOP; break; case Lexer.Token.FREEFORM_END_STATEMENT: token.Type = Lexer.Token.I_FREEFORM_END_STATEMENT; break; case Lexer.Token.FREEFORM_SURFACE_CONNECT: token.Type = Lexer.Token.I_FREEFORM_SURFACE_CONNECT; break; #endregion #region Grouping Tokens... case Lexer.Token.GROUPING_GROUP_NAME: token.Type = Lexer.Token.I_GROUPING_GROUP_NAME; break; case Lexer.Token.GROUPING_MERGING_GROUP: token.Type = Lexer.Token.I_GROUPING_MERGING_GROUP; break; case Lexer.Token.GROUPING_OBJECT_NAME: token.Type = Lexer.Token.I_GROUPING_OBJECT_NAME; break; case Lexer.Token.GROUPING_SMOOTHING_GROUP: token.Type = Lexer.Token.I_GROUPING_SMOOTHING_GROUP; break; #endregion #region Render Tokens... case Lexer.Token.RENDER_BEVEL_INTERPOLATION: token.Type = Lexer.Token.I_RENDER_BEVEL_INTERPOLATION; break; case Lexer.Token.RENDER_COLOR_INTERPOLATION: token.Type = Lexer.Token.I_RENDER_COLOR_INTERPOLATION; break; case Lexer.Token.RENDER_CURVE_APPROXIMATION_TECHNIQUE: token.Type = Lexer.Token.I_RENDER_CURVE_APPROXIMATION_TECHNIQUE; break; case Lexer.Token.RENDER_DISSOLVE_INTERPOLATION: token.Type = Lexer.Token.I_RENDER_DISSOLVE_INTERPOLATION; break; case Lexer.Token.RENDER_LEVEL_OF_DETAIL: token.Type = Lexer.Token.I_RENDER_LEVEL_OF_DETAIL; break; case Lexer.Token.RENDER_MATERIAL_LIBRARY: token.Type = Lexer.Token.I_RENDER_MATERIAL_LIBRARY; StringBuilder pathBuilder = new StringBuilder(); this.nextCharacter(); while (this.getCurrentCharacter() != '\n') { pathBuilder.Append(this.getCurrentCharacter()); this.nextCharacter(); } token.String = pathBuilder.ToString().Trim(); this._currentIndex--; break; case Lexer.Token.RENDER_MATERIAL_NAME: token.Type = Lexer.Token.I_RENDER_MATERIAL_NAME; break; case Lexer.Token.RENDER_RAY_TRACING: token.Type = Lexer.Token.I_RENDER_RAY_TRACING; break; case Lexer.Token.RENDER_SHADOW_CASTING: token.Type = Lexer.Token.I_RENDER_SHADOW_CASTING; break; case Lexer.Token.RENDER_SURFACE_APPROXIMATION_TECHNIQUE: token.Type = Lexer.Token.I_RENDER_SURFACE_APPROXIMATION_TECHNIQUE; break; #endregion } } else { token.String = this.getCurrentCharacter().ToString(); switch(token.String) { case "/": token.Type = Lexer.Token.I_BASIC_SEPERATOR; break; case ".": token.Type = Lexer.Token.I_BASIC_SEPERATOR; break; case "\\": token.Type = Lexer.Token.I_BASIC_SEPERATOR; break; case "#": token.Type = Lexer.Token.I_BASIC_COMMENT; StringBuilder commentBuilder = new StringBuilder(); do { commentBuilder.Append(this.nextCharacter()); } while (this.getCurrentCharacter() != '\n'); token.String = commentBuilder.ToString().Trim(); break; } } } this.nextCharacter(); return token; }
private void NextToken() { this._currentToken = this._lexer.NextToken(); }