コード例 #1
0
ファイル: LexerTokenTests.cs プロジェクト: wittyansh/nledger
        public void LexerToken_Constructor_SetsKind()
        {
            LexerToken lexerToken = new LexerToken(LexerTokenKindEnum.TOK_YEAR);

            Assert.AreEqual(LexerTokenKindEnum.TOK_YEAR, lexerToken.Kind);
            Assert.IsTrue(lexerToken.Value.IsEmpty);
        }
コード例 #2
0
ファイル: LexerTokenTests.cs プロジェクト: wittyansh/nledger
        public void LexerToken_Constructor_CreatesEmptyToken()
        {
            LexerToken lexerToken = new LexerToken();

            Assert.AreEqual(LexerTokenKindEnum.UNKNOWN, lexerToken.Kind);
            Assert.IsTrue(lexerToken.Value.IsEmpty);
        }
コード例 #3
0
 protected virtual LexerToken NextToken(string content, ref int index, LexerToken currentToken)
 {
     switch (currentToken)
     {
         case LexerToken.BeginKey:
             if (content[index] == '\r' || content[index] == '\n') return LexerToken.Ignore;
             if (char.IsLetter(content, index) || content[index] == '_') return LexerToken.Key;
             return LexerToken.Invalid;
         case LexerToken.BeginValue:
             if (char.IsWhiteSpace(content, index) || content[index] == '=') return LexerToken.Ignore;
             return LexerToken.Value;
         case LexerToken.Key:
             if (char.IsLetterOrDigit(content, index) || content[index] == '_') return LexerToken.Key;
             if (char.IsWhiteSpace(content, index) || content[index] == '=') return LexerToken.BeginValue;
             return LexerToken.Invalid;
         case LexerToken.Value:
             if(content[index] == '\r') return LexerToken.Ignore;
             if (content[index] == '\n')
             {
                 if (content.Length == index + 1) return LexerToken.Ignore;
                 if (content[index + 1] == '\t') return LexerToken.Ignore;
                 return LexerToken.BeginKey;
             }
             if (content[index] == '\t' && content[index - 1] == '\n') return LexerToken.Ignore;
             return LexerToken.Value;
         default: return LexerToken.Invalid;
     }
 }
コード例 #4
0
ファイル: LexerTokenTests.cs プロジェクト: wittyansh/nledger
        public void LexerToken_Constructor_SetsKindAndValue()
        {
            LexerToken lexerToken = new LexerToken(LexerTokenKindEnum.TOK_YEAR, new BoostVariant(33));

            Assert.AreEqual(LexerTokenKindEnum.TOK_YEAR, lexerToken.Kind);
            Assert.AreEqual(33, lexerToken.Value.GetValue <int>());
        }
コード例 #5
0
        /// <summary>
        /// Reads a string from the current stream. String will be strictly composed of
        /// <see cref="LexerToken.Untyped"/>. If quotes are encountered in the stream then the return value
        /// will be the string that is contained within the quotes (the quotes are stripped)
        /// <see cref="CurrentString"/> is set to the return value.
        /// </summary>
        /// <returns>The string being read</returns>
        public string ReadString()
        {
            this.currentToken = this.GetNextToken();
            if (this.currentToken == LexerToken.Untyped)
            {
                do
                {
                    this.stringBuffer[this.stringBufferCount++] = this.currentByte;
                } while (!IsSpace(this.currentByte = this.ReadNext()) && this.SetcurrentToken(this.currentByte) == LexerToken.Untyped && !this.eof);
            }
            else if (this.currentToken == LexerToken.Quote)
            {
                while ((this.currentByte = this.ReadNext()) != '"' && !this.eof)
                {
                    this.stringBuffer[this.stringBufferCount++] = this.currentByte;
                }
            }
            else
            {
                return(this.ReadString());
            }

            this.currentString     = new string(this.stringBuffer, 0, this.stringBufferCount);
            this.stringBufferCount = 0;
            return(this.currentString);
        }
コード例 #6
0
        /// <summary>
        /// Advances the parser to the next significant token, skipping whitespace and
        /// comments.  If a left or right curly is encountered, the current indent is
        /// adjusted accordingly.
        /// </summary>
        /// <returns>The significant token encountered</returns>
        private LexerToken GetNextToken()
        {
            if (this.nextToken != null)
            {
                LexerToken temp = this.nextToken.Value;
                this.nextToken = null;
                return(temp);
            }

            while (IsSpace(this.currentByte = this.ReadNext()) && !this.eof)
            {
                ;
            }

            if (this.SetcurrentToken(this.currentByte) == LexerToken.Comment)
            {
                while ((this.currentByte = this.ReadNext()) != '\n' && !this.eof)
                {
                    ;
                }
                return(this.GetNextToken());
            }

            return(this.currentToken);
        }
コード例 #7
0
 public IdentifierSpellCheckHighlighting(IDeclaration declaration, LexerToken token,
                                         ISolution solution, ISpellChecker spellChecker, IContextBoundSettingsStore settingsStore)
     : base(GetRange(declaration), token.Value, solution, spellChecker, settingsStore)
 {
     _lexerToken  = token;
     _declaration = declaration;
 }
コード例 #8
0
 public WordIsNotInDictionaryHighlight(string word, DocumentRange range,
                                       LexerToken misspelledToken, ISolution solution, ISpellChecker spellChecker, IContextBoundSettingsStore settingsStore)
     : base(range, misspelledToken.Value, solution, spellChecker, settingsStore)
 {
     _word  = word;
     _token = misspelledToken;
 }
コード例 #9
0
        protected virtual LexerToken NextToken(string content, ref int index, LexerToken currentToken)
        {
            switch (currentToken)
            {
            case LexerToken.BeginKey:
                if (content[index] == '\r' || content[index] == '\n')
                {
                    return(LexerToken.Ignore);
                }
                if (char.IsLetter(content, index) || content[index] == '_')
                {
                    return(LexerToken.Key);
                }
                return(LexerToken.Invalid);

            case LexerToken.BeginValue:
                if (char.IsWhiteSpace(content, index) || content[index] == '=')
                {
                    return(LexerToken.Ignore);
                }
                return(LexerToken.Value);

            case LexerToken.Key:
                if (char.IsLetterOrDigit(content, index) || content[index] == '_')
                {
                    return(LexerToken.Key);
                }
                if (char.IsWhiteSpace(content, index) || content[index] == '=')
                {
                    return(LexerToken.BeginValue);
                }
                return(LexerToken.Invalid);

            case LexerToken.Value:
                if (content[index] == '\r')
                {
                    return(LexerToken.Ignore);
                }
                if (content[index] == '\n')
                {
                    if (content.Length == index + 1)
                    {
                        return(LexerToken.Ignore);
                    }
                    if (content[index + 1] == '\t')
                    {
                        return(LexerToken.Ignore);
                    }
                    return(LexerToken.BeginKey);
                }
                if (content[index] == '\t' && content[index - 1] == '\n')
                {
                    return(LexerToken.Ignore);
                }
                return(LexerToken.Value);

            default: return(LexerToken.Invalid);
            }
        }
コード例 #10
0
ファイル: LexerTokenTests.cs プロジェクト: wittyansh/nledger
        public void LexerToken_Comparison_EqualsForEqualKinds()
        {
            LexerToken lexerToken1 = new LexerToken(LexerTokenKindEnum.TOK_DAILY);
            LexerToken lexerToken2 = new LexerToken(LexerTokenKindEnum.TOK_DAILY);

            Assert.IsTrue(lexerToken1.Equals(lexerToken2));
            Assert.IsTrue(lexerToken1 == lexerToken2);
        }
コード例 #11
0
ファイル: LexerTokenTests.cs プロジェクト: wittyansh/nledger
        public void LexerToken_Comparison_EqualsForEqualKindsAndValues()
        {
            LexerToken lexerToken1 = new LexerToken(LexerTokenKindEnum.TOK_DAILY, new BoostVariant(3));
            LexerToken lexerToken2 = new LexerToken(LexerTokenKindEnum.TOK_DAILY, new BoostVariant(3));

            Assert.IsTrue(lexerToken1.Equals(lexerToken2));
            Assert.IsTrue(lexerToken1 == lexerToken2);
        }
コード例 #12
0
ファイル: LexerTokenTests.cs プロジェクト: wittyansh/nledger
        public void LexerToken_IsNotEnd_IndicatesThatTheEndIsNotReached()
        {
            LexerToken lexerToken = new LexerToken();

            Assert.IsTrue(lexerToken.IsNotEnd);

            lexerToken = new LexerToken(LexerTokenKindEnum.END_REACHED);
            Assert.IsFalse(lexerToken.IsNotEnd);
        }
コード例 #13
0
        private LexerToken ScanHexadecimalString(EpsStreamReader reader)
        {
            Debug.Assert(reader.CurrentChar == Chars.Less);

            ClearToken();
            char[] hex = new char[2];
            ScanNextChar(reader);

            while (true)
            {
                MoveToNonWhiteSpace(reader);

                if (reader.CurrentChar == '>')
                {
                    ScanNextChar(reader);
                    break;
                }

                if (char.IsLetterOrDigit(reader.CurrentChar))
                {
                    hex[0] = char.ToUpper(reader.CurrentChar);

                    var nextChar = reader.NextChar;

                    if (nextChar != '>')
                    {
                        hex[1] = char.ToUpper(nextChar);
                    }
                    else
                    {
                        hex[1] = '0';
                    }

                    int ch = int.Parse(new string(hex), NumberStyles.AllowHexSpecifier);
                    Token.Append(Convert.ToChar(ch));
                    ScanNextChar(reader);
                    ScanNextChar(reader);
                }
            }

            string chars = Token.ToString();
            int    count = chars.Length;

            if (count > 2 && chars[0] == (char)0xFE && chars[1] == (char)0xFF)
            {
                Debug.Assert(count % 2 == 0);
                Token.Length = 0;

                for (int idx = 2; idx < count; idx += 2)
                {
                    Token.Append((char)(chars[idx] * 256 + chars[idx + 1]));
                }
            }

            return(Symbol = LexerToken.HexString);
        }
コード例 #14
0
 public void ReparseToken()
 {
     if (token != null)
     {
         token.m_kLinkedLeaf = null;
         token = null;
     }
     if (Parent != null)
     {
         Parent.RemoveNodeAt(m_iChildIndex /*, false*/);
     }
 }
コード例 #15
0
        /// <summary>
        /// Scans an operator.
        /// </summary>
        private LexerToken ScanOperator(EpsStreamReader reader)
        {
            ClearToken();
            char ch = reader.CurrentChar;

            while (IsOperatorFirstChar(ch) || char.IsDigit(ch) || ch == '-')
            {
                ch = AppendAndScanNextChar(reader);
            }

            return(Symbol = LexerToken.Operator);
        }
コード例 #16
0
        /// <summary>
        /// Reads a string from the current stream. String will be strictly composed of
        /// <see cref="LexerToken.Untyped"/>. If quotes are encountered in the stream then the return value
        /// will be the string that is contained within the quotes (the quotes are stripped)
        /// <see cref="CurrentString"/> is set to the return value.
        /// </summary>
        /// <returns>The string being read</returns>
        public string ReadString()
        {
            currentToken = GetNextToken();
            if (currentToken == LexerToken.Untyped)
            {
                do
                {
                    stringBuffer[stringBufferCount++] = currentChar;
                } while (!IsSpace(currentChar = ReadNext()) &&
                         SetCurrentToken(currentChar) == LexerToken.Untyped && !eof);
            }
            else if (currentToken == LexerToken.Quote)
            {
                while ((currentChar = ReadNext()) != '"' && !eof)
                {
                    stringBuffer[stringBufferCount++] = currentChar;
                }

                // Check for partially quoted string of the style "name"_group.
                // If it is, then read string as if untyped.
                char nextChar = ReadNext();
                if (nextChar == '_')
                {
                    stringBuffer[stringBufferCount++] = nextChar;
                    currentToken = GetNextToken();
                    do
                    {
                        stringBuffer[stringBufferCount++] = currentChar;
                    } while(!IsSpace(currentChar = ReadNext()) &&
                            SetCurrentToken(currentChar) == LexerToken.Untyped && !eof);
                }
                else
                {
                    // Enqueue because it could be important (Equals, quote, etc.)
                    nextChars.Enqueue(nextChar);
                    nextCharsEmpty = false;
                }
            }
            else if (currentToken == LexerToken.LeftCurly &&
                     PeekToken() == LexerToken.RightCurly)
            {
                return(null);
            }
            else
            {
                return(ReadString());
            }

            currentString     = new string(stringBuffer, 0, stringBufferCount);
            stringBufferCount = 0;
            return(currentString);
        }
コード例 #17
0
        /// <summary>
        /// Checks the current token, and if needed, reads next token in the
        /// stream in an attempt to locate a left curly.  If the token
        /// encountered is an equality symbol, it will read the next token and
        /// see if that is a left curly, e.g. x = { y }.  If the initial read
        /// token isn't an equality symbol or a left curly, or if the initial
        /// read token is an equality symbol but the subsequent token isn't a
        /// left curly, then an invalid operation exception is thrown.
        /// </summary>
        private void EnsureLeftCurly()
        {
            currentToken = GetNextToken();
            if (currentToken == LexerToken.Equals)
            {
                currentToken = GetNextToken();
            }

            if (currentToken != LexerToken.LeftCurly)
            {
                throw new InvalidOperationException("When reading inside brackets the first token must be a left curly");
            }
        }
コード例 #18
0
 /// <summary>
 /// Evaluates the token parameter for brackets and sets the current
 /// token to it
 /// </summary>
 /// <param name="token">The token to be evaluated</param>
 /// <returns>The current token</returns>
 private LexerToken SetCurrentToken(LexerToken token)
 {
     currentToken = token;
     if (currentToken == LexerToken.LeftCurly)
     {
         currentIndent++;
     }
     else if (currentToken == LexerToken.RightCurly)
     {
         currentIndent--;
     }
     return(currentToken);
 }
コード例 #19
0
    public override void InsertMissingToken(string errorMessage)
    {
        var missingAtLine  = m_kTokenScanner.CurrentLine;
        var missingAtIndex = m_kTokenScanner.CurrentTokenIndex;

        while (true)
        {
            if (--missingAtIndex < 0)
            {
                if (--missingAtLine < 0)
                {
                    missingAtLine = missingAtIndex = 0;
                    break;
                }
                missingAtIndex = m_kTokenScanner.GetFormatedLine(missingAtLine).tokens.Count;
                continue;
            }
            var tokenKind = m_kTokenScanner.GetFormatedLine(missingAtLine).tokens[missingAtIndex].tokenKind;
            if (tokenKind > LexerToken.Kind.LastWSToken)
            {
                ++missingAtIndex;
                break;
            }
            else if (tokenKind == LexerToken.Kind.Missing)
            {
                ErrorToken = m_kTokenScanner.GetFormatedLine(missingAtLine).tokens[missingAtIndex].m_kLinkedLeaf;
                return;
            }
        }

        var missingLine  = m_kTokenScanner.GetFormatedLine(missingAtLine);
        var missingToken = new LexerToken(LexerToken.Kind.Missing, string.Empty)
        {
            style = null, formatedLine = missingLine
        };
        //missingLine.tokens.Insert(missingAtIndex, missingToken);
        var leaf = SyntaxRule_Err.AddToken(missingToken);

        leaf.m_bMissing     = true;
        leaf.m_sSyntaxError = errorMessage;
        leaf.ParseNode      = ParseNode_Err;

        if (ErrorToken == null)
        {
            ErrorToken = leaf;
        }

        m_kTokenScanner.InsertToken(missingToken, missingAtLine, missingAtIndex);
        //if (missingAtLine == currentLine)
        //    ++currentTokenIndex;
    }
コード例 #20
0
        /// <summary>
        /// Sets the current token to the token associated with the parameter
        /// </summary>
        /// <param name="c">Char that will be evaluated for equivalent token</param>
        /// <returns>Current token</returns>
        private LexerToken SetcurrentToken(char c)
        {
            this.currentToken = GetToken(c);
            if (this.currentToken == LexerToken.LeftCurly)
            {
                this.currentIndent++;
            }
            else if (this.currentToken == LexerToken.RightCurly)
            {
                this.currentIndent--;
            }

            return(this.currentToken);
        }
コード例 #21
0
        /// <summary>
        /// Scans a dsc comment token
        /// </summary>
        private LexerToken ScanDscComment(EpsStreamReader reader)
        {
            Debug.Assert(reader.CurrentChar == Chars.Percent);

            ClearToken();
            char ch;

            do
            {
                ch = AppendAndScanNextChar(reader);
            }while (!IsWhiteSpace(ch) && ch != Chars.EOF);

            return(Symbol = LexerToken.DscComment);
        }
コード例 #22
0
        /// <summary>
        /// Scans a name.
        /// </summary>
        private LexerToken ScanName(EpsStreamReader reader)
        {
            Debug.Assert(reader.CurrentChar == Chars.Slash);
            ScanNextChar(reader);

            ClearToken();
            char ch;

            do
            {
                ch = AppendAndScanNextChar(reader);
            }while (!IsWhiteSpace(ch) && !IsDelimiter(ch));

            return(Symbol = LexerToken.Name);
        }
コード例 #23
0
        public static LexerToken t_NUMBER(LexerToken Tok)
        {
            int val;

            if (int.TryParse((string)Tok.Value, out val))
            {
                Tok.Value = val;
            }
            else
            {
                Console.WriteLine("Integer value too large " + Tok.Value.ToString());
                Tok.Value = 0;
            }
            return(Tok);
        }
コード例 #24
0
        /// <summary>
        /// Scan ASCII base85 string
        /// </summary>
        /// <param name="reader"></param>
        private LexerToken ScanAsciiBase85String(EpsStreamReader reader)
        {
            ClearToken();
            ScanNextChar(reader);
            ScanNextChar(reader);

            char ch = reader.CurrentChar;

            while (ch != '~' && ch != Chars.EOF)
            {
                ch = AppendAndScanNextChar(reader);
            }

            ScanNextChar(reader);
            ScanNextChar(reader);

            return(Symbol = LexerToken.AsciiBase85String);
        }
コード例 #25
0
    public void OnTokanMoveAt(LexerToken token)
    {
        switch (token.tokenKind)
        {
        case LexerToken.Kind.Missing:
        case LexerToken.Kind.Whitespace:
        case LexerToken.Kind.Comment:
        case LexerToken.Kind.EOF:
        case LexerToken.Kind.Preprocessor:
        case LexerToken.Kind.PreprocessorSymbol:
        case LexerToken.Kind.PreprocessorArguments:
        case LexerToken.Kind.PreprocessorDirectiveExpected:
        case LexerToken.Kind.PreprocessorCommentExpected:
        case LexerToken.Kind.PreprocessorUnexpectedDirective:
        case LexerToken.Kind.VerbatimStringLiteral:
            break;

        case LexerToken.Kind.Punctuator:
        case LexerToken.Kind.Keyword:
        case LexerToken.Kind.BuiltInLiteral:
            token.tokenId = Parser.TokenToId(token.text);
            break;

        case LexerToken.Kind.Identifier:
        case LexerToken.Kind.ContextualKeyword:
            token.tokenId = m_kParser.tokenIdentifier;
            break;

        case LexerToken.Kind.IntegerLiteral:
        case LexerToken.Kind.RealLiteral:
        case LexerToken.Kind.CharLiteral:
        case LexerToken.Kind.StringLiteral:
        case LexerToken.Kind.VerbatimStringBegin:
            token.tokenId = m_kParser.tokenLiteral;
            break;

        default:
            throw new ArgumentOutOfRangeException();
        }
    }
コード例 #26
0
    public SyntaxTreeNode_Leaf AddToken(LexerToken token)
    {
        if (!token.IsMissing() && NumValidNodes < nodes.Count)
        {
            var reused = nodes[NumValidNodes] as SyntaxTreeNode_Leaf;
            if (reused != null && reused.token.text == token.text && reused.token.tokenKind == token.tokenKind)
            {
                reused.m_bMissing     = false;
                reused.m_sSyntaxError = null;

                reused.token         = token;
                reused.Parent        = this;
                reused.m_iChildIndex = NumValidNodes;
                ++NumValidNodes;

                Debug.Log("reused " + reused.token + " from line " + (reused.Line + 1));
                return(reused);
            }
        }

        var leaf = new SyntaxTreeNode_Leaf {
            token = token, Parent = this, m_iChildIndex = NumValidNodes
        };

        if (NumValidNodes == nodes.Count)
        {
            nodes.Add(leaf);
            ++NumValidNodes;
        }
        else
        {
            nodes.Insert(NumValidNodes++, leaf);
            for (var i = NumValidNodes; i < nodes.Count; ++i)
            {
                ++nodes[i].m_iChildIndex;
            }
        }
        return(leaf);
    }
コード例 #27
0
        /// <summary>
        /// Advances the parser to the next significant token, skipping whitespace and
        /// comments.  If a left or right curly is encountered, the current indent is
        /// adjusted accordingly.
        /// </summary>
        /// <returns>The significant token encountered</returns>
        private LexerToken GetNextToken()
        {
            tagIsBracketed = null;

            if (nextToken != null)
            {
                LexerToken temp = nextToken.Value;
                nextToken = null;
                return(SetCurrentToken(temp));
            }

            // Check current character because checking the current token will cause it
            // to skip the next tag if the comment is preceeded by a space.
            if (currentChar == '#')
            {
                while ((currentChar = ReadNext()) != '\n' && !eof)
                {
                    ;
                }
                SetCurrentToken(currentChar);
            }

            while (IsSpace(currentChar = ReadNext()) && !eof)
            {
                ;
            }

            if (SetCurrentToken(currentChar) == LexerToken.Comment)
            {
                while ((currentChar = ReadNext()) != '\n' && !eof)
                {
                    ;
                }
                return(GetNextToken());
            }

            return(currentToken);
        }
コード例 #28
0
 // checks if lookahead accepts input symbol.
 public bool Contains(LexerToken token)
 {
     return(set != null ? set[token.tokenId] : token.tokenId == tokenId);
 }
コード例 #29
0
ファイル: LexerTokenTests.cs プロジェクト: wittyansh/nledger
 public void LexerToken_Expected_ReturnsInvalidCharWantedForNonZeroChar()
 {
     LexerToken.Expected('A', 'B');
 }
コード例 #30
0
 private bool IsRightUnary(LexerToken token)
 {
     return (token.IsType(TokenTypes.SymbolIdentifier) || token.IsType(TokenTypes.TextIdentifier))
         && library.ContainsEntity(token.Text, InfixNotation.PostOperator);
 }
コード例 #31
0
ファイル: LexerTokenTests.cs プロジェクト: wittyansh/nledger
 public void LexerToken_Expected_ReturnsInvalidCharForZeroChar()
 {
     LexerToken.Expected(default(char), 'B');
 }
コード例 #32
0
 private void TestTokenEqual(LexerToken token, LexerToken other)
 {
     Assert.Equal(token.Text, other.Text);
     Assert.Equal(token.Type, other.Type);
 }
コード例 #33
0
 /// <summary>Add token to end of the queue</summary>
 /// <param name="token">The token to add</param>
 private void Append(LexerToken token)
 {
     if(count == buffer.Length)
         Expand();
     buffer[(offset + count) & sizeLessOne] = token;
     count++;
 }
コード例 #34
0
 private bool IsBinary(LexerToken token)
 {
     return (token.IsType(TokenTypes.SymbolIdentifier) || token.IsType(TokenTypes.TextIdentifier))
         && (library.ContainsEntity(token.Text, InfixNotation.LeftAssociativeInnerOperator)
         || library.ContainsEntity(token.Text, InfixNotation.RightAssociativeInnerOperator));
 }
コード例 #35
0
 public Token()
 {
     Type = LexerToken.BeginKey;
     Value = "";
 }
コード例 #36
0
 /// <summary>Expand the token buffer by doubling its capacity</summary>
 private void Expand()
 {
     if(maxSize > 0 && buffer.Length * 2 > maxSize)
         throw new MathNet.Symbolics.Exceptions.ParsingException("Parsing failed. Maximum parser buffer size exceeded.");
     LexerToken[] newBuffer = new LexerToken[buffer.Length * 2];
     for(int i = 0; i < buffer.Length; i++)
         newBuffer[i] = buffer[(offset + i) & sizeLessOne];
     buffer = newBuffer;
     sizeLessOne = buffer.Length - 1;
     offset = 0;
 }
コード例 #37
0
 private IEntity ScanEntity(LexerToken token, InfixNotation notation, int inputs)
 {
     if(token.IsType(TokenTypes.MathIdentifier))
         return library.LookupEntity(MathIdentifier.Parse(token.Text));
     else if(token.IsType(TokenTypes.Literal) || token.IsType(TokenTypes.SymbolIdentifier)) //symbol
         return library.LookupEntity(token.Text, notation, inputs);
     else //textsymbol or label
     {
         IEntity entity;
         if(library.TryLookupEntity(token.Text, notation, inputs, out entity))
             return entity;
         else
         {
             MathIdentifier id = library.FindEntityByLabel(token.Text);
             return library.LookupEntity(id);
         }
     }
 }
コード例 #38
0
 private static bool IsBeginningEncapsulation(LexerToken token)
 {
     return token.IsType(TokenTypes.Left);
 }