コード例 #1
0
        public IToken NextToken()
        {
            var next = _source.NextToken();

            //collect the tokens, but don't send them to parser
            while (next.Type == QuestScriptLexer.Comment ||
                   next.Type == QuestScriptLexer.LineComment)
            {
                _commentTokens.Add(next);
                next = _source.NextToken();
            }

            return(next);
        }
コード例 #2
0
        /** <summary>
         *  Load all tokens from the token source and put in tokens.
         *  This is done upon first LT request because you might want to
         *  set some token type / channel overrides before filling buffer.
         *  </summary>
         */
        public virtual void FillBuffer()
        {
            // fast return if the buffer is already full
            if (p != -1)
            {
                return;
            }

            int    index = 0;
            IToken t     = _tokenSource.NextToken();

            while (t != null && t.Type != CharStreamConstants.EndOfFile)
            {
                bool discard = false;
                // is there a channel override for token type?
                int channelI;
                if (channelOverrideMap != null && channelOverrideMap.TryGetValue(t.Type, out channelI))
                {
                    t.Channel = channelI;
                }

                //if ( channelOverrideMap != null && channelOverrideMap.ContainsKey( t.getType() ) )
                //{
                //    object channelI = channelOverrideMap.get( t.getType() );
                //    if ( channelI != null )
                //    {
                //        t.setChannel( (int)channelI );
                //    }
                //}
                if (discardSet != null &&
                    discardSet.Contains(t.Type))
                {
                    discard = true;
                }
                else if (discardOffChannelTokens && t.Channel != this.channel)
                {
                    discard = true;
                }
                if (!discard)
                {
                    t.TokenIndex = index;
                    tokens.Add(t);
                    index++;
                }
                t = _tokenSource.NextToken();
            }
            // leave p pointing at first token on channel
            p = 0;
            p = SkipOffTokenChannels(p);
        }
コード例 #3
0
        private IToken NextTokenImpl()
        {
            if (_nextTokenIsSemicolon)
            {
                _nextTokenIsSemicolon = false;
                return
                    (new CommonToken(_nextRealToken.InputStream, GoLexer.SEMI, TokenChannels.Default, _nextRealToken.StartIndex, _nextRealToken.StopIndex)
                {
                    Text = ";*"
                });
            }

            // handle the first token in the stream
            if (_nextRealToken == null)
            {
                _nextRealToken = _lexer.NextToken();
            }

            IToken token = _nextRealToken;

            _nextRealToken = _lexer.NextToken();

            if (_nextRealToken.Type == GoLexer.NEW_LINE)
            {
                switch (token.Type)
                {
                case GoLexer.IDENTIFIER:
                case GoLexer.NUMBER:
                case GoLexer.CHAR_LITERAL:
                case GoLexer.STRING_LITERAL:
                case GoLexer.KW_BREAK:
                case GoLexer.KW_CONTINUE:
                case GoLexer.KW_FALLTHROUGH:
                case GoLexer.KW_RETURN:
                case GoLexer.INC:
                case GoLexer.DEC:
                case GoLexer.RPAREN:
                case GoLexer.RBRACK:
                case GoLexer.RBRACE:
                    _nextTokenIsSemicolon = true;
                    break;

                default:
                    break;
                }
            }

            return(token);
        }
コード例 #4
0
 /// <summary>
 /// Add
 /// <paramref name="n"/>
 /// elements to the buffer. Returns the number of tokens
 /// actually added to the buffer. If the return value is less than
 /// <paramref name="n"/>
 /// ,
 /// then EOF was reached before
 /// <paramref name="n"/>
 /// tokens could be added.
 /// </summary>
 protected internal virtual int Fill(int n)
 {
     for (int i = 0; i < n; i++)
     {
         if (this.n > 0 && tokens[this.n - 1].Type == TokenConstants.EOF)
         {
             return(i);
         }
         IToken t = TokenSource.NextToken();
         Add(t);
     }
     return(n);
 }
コード例 #5
0
        /** <summary>
         *  Load all tokens from the token source and put in tokens.
         *  This is done upon first LT request because you might want to
         *  set some token type / channel overrides before filling buffer.
         *  </summary>
         */
        public void FillBuffer()
        {
            // fast return if the buffer is already full
            if (p != -1)
            {
                return;
            }

            int       index = 0;
            SlimToken t     = _tokenSource.NextToken();

            while (t.Type != CharStreamConstants.EndOfFile)
            {
                //t.TokenIndex = index;
                tokens.Add(t);
                index++;

                t = _tokenSource.NextToken();
            }
            // leave p pointing at first token on channel
            p = 0;
            p = SkipOffTokenChannels(p);
        }
コード例 #6
0
        /// <summary>Load all tokens from the token source and put in tokens.
        /// This is done upon first LT request because you might want to
        /// set some token type / channel overrides before filling buffer.
        /// </summary>
        protected virtual void FillBuffer()
        {
            int    index = 0;
            IToken t     = tokenSource.NextToken();

            while ((t != null) && (t.Type != (int)Antlr.Runtime.CharStreamConstants.EOF))
            {
                bool discard = false;
                // is there a channel override for token type?
                if (channelOverrideMap != null)
                {
                    object channelI = channelOverrideMap[(int)t.Type];
                    if (channelI != null)
                    {
                        t.Channel = (int)channelI;
                    }
                }
                if (discardSet != null && discardSet.Contains(t.Type.ToString()))
                {
                    discard = true;
                }
                else if (discardOffChannelTokens && t.Channel != this.channel)
                {
                    discard = true;
                }
                if (!discard)
                {
                    t.TokenIndex = index;
                    tokens.Add(t);
                    index++;
                }
                t = tokenSource.NextToken();
            }
            // leave p pointing at first token on channel
            p = 0;
            p = SkipOffTokenChannels(p);
        }
コード例 #7
0
 /// <summary>
 /// Add
 /// <paramref name="n"/>
 /// elements to buffer.
 /// </summary>
 /// <returns>The actual number of elements added to the buffer.</returns>
 protected internal virtual int Fetch(int n)
 {
     if (fetchedEOF)
     {
         return(0);
     }
     for (int i = 0; i < n; i++)
     {
         IToken t = _tokenSource.NextToken();
         if (t is IWritableToken)
         {
             ((IWritableToken)t).TokenIndex = tokens.Count;
         }
         tokens.Add(t);
         if (t.Type == TokenConstants.Eof)
         {
             fetchedEOF = true;
             return(i + 1);
         }
     }
     return(n);
 }
コード例 #8
0
        public IToken NextToken()
        {
            if (_caretToken == null)
            {
                IToken token = _source.NextToken();
                if (token.StopIndex + 1 < _caretOffset)
                {
                    // the caret is after this token; nothing special to do
                }
                else if (token.StartIndex > _caretOffset)
                {
                    // the token is after the caret; no need to include it
                    token       = new CaretToken(_tokenFactorySourcePair, TokenConstants.DefaultChannel, _caretOffset, _caretOffset);
                    _caretToken = token;
                }
                else
                {
                    if (token.StopIndex + 1 == _caretOffset && token.StopIndex >= token.StartIndex)
                    {
                        if (!IsWordToken(token))
                        {
                            // the caret is at the end of this token, and this isn't a word token or a zero-length token
                            return(token);
                        }
                    }

                    // the caret is in the middle of or at the end of this token
                    token       = new CaretToken(token);
                    _caretToken = token;
                }

                return(token);
            }

            throw new InvalidOperationException("Attempted to look past the caret.");
        }
コード例 #9
0
        public IToken NextToken()
        {
            IToken token = source.NextToken();

            if (token.Type == InferenceRules_ENLexer.NEWLINE)
            {
                string statement = statementBuffer.ToString();

                if (definitions.Contains(statement))
                {
                    htmlBuffer.Append("<font color='#006600'>");
                }
                else
                {
                    htmlBuffer.Append("<font color='#FF0000'>");
                }

                htmlBuffer.Append(statement).Append("</font>");

                htmlBuffer.Append("<br/>");
                statementBuffer = new StringBuilder();
            }
            else if (token.Type == InferenceRules_ENLexer.TAB)
            {
                htmlBuffer.Append("&nbsp;&nbsp;");
            }
            else
            {
                if ((token.Type == InferenceRules_ENLexer.RULE) ||
                    (token.Type == InferenceRules_ENLexer.FACT) ||
                    (token.Type == InferenceRules_ENLexer.QUERY))
                {
                    htmlBuffer.Append("<br/>");
                }

                if ((token.Type == InferenceRules_ENLexer.QUOTE) && (!inQuote))
                {
                    htmlBuffer.Append("<font color='#0000FF'>");
                    htmlBuffer.Append(token.Text);
                    inQuote = true;
                }
                else if ((token.Type == InferenceRules_ENLexer.QUOTE) && (inQuote))
                {
                    htmlBuffer.Append(token.Text);
                    htmlBuffer.Append("</font>");
                    inQuote = false;
                }
                else if (inQuote)
                {
                    htmlBuffer.Append(token.Text);
                }
                else if ((token.Type == InferenceRules_ENLexer.CHAR) ||
                         (token.Type == InferenceRules_ENLexer.SPACE) ||
                         (token.Type == InferenceRules_ENLexer.NUMERIC))
                {
                    statementBuffer.Append(token.Text);
                }
                else if ((token.Type == InferenceRules_ENLexer.COUNT) ||
                         (token.Type == InferenceRules_ENLexer.DEDUCT) ||
                         (token.Type == InferenceRules_ENLexer.FORGET) ||
                         (token.Type == InferenceRules_ENLexer.MODIFY))
                {
                    htmlBuffer.Append("<font color='#990066'><b>").Append(token.Text).Append(" ").Append("</b></font>");
                }
                else
                {
                    htmlBuffer.Append("<b>").Append(token.Text).Append(" ").Append("</b>");
                }
            }

            return(token);
        }
コード例 #10
0
 public IToken NextToken()
 {
     return(tokens.NextToken());
 }
コード例 #11
0
        public virtual IList <ClassificationSpan> GetClassificationSpans(SnapshotSpan span)
        {
            Span requestedSpan = span;

            AdjustParseSpan(ref span);

            ICharStream  input = CreateInputStream(span);
            ITokenSource lexer = CreateLexer(input);
            List <ClassificationSpan> classificationSpans = new List <ClassificationSpan>();

            IToken previousToken         = null;
            bool   previousTokenEndsLine = false;

            /* this is held outside the loop because only tokens which end at the end of a line
             * impact its value.
             */
            bool lineStateChanged = false;

            int          extendMultilineSpanToLine = 0;
            SnapshotSpan extendedSpan = span;
            bool         spanExtended = false;

            while (true)
            {
                IToken token = lexer.NextToken();

                bool inBounds = token.StartIndex < span.End.Position;

                int startLineCurrent;
                if (token.Type == CharStreamConstants.EndOfFile)
                {
                    startLineCurrent = span.Snapshot.LineCount;
                }
                else
                {
                    startLineCurrent = token.Line;
                }

                if (previousToken == null || previousToken.Line < startLineCurrent - 1)
                {
                    // endLinePrevious is the line number the previous token ended on
                    int endLinePrevious;
                    if (previousToken != null)
                    {
                        endLinePrevious = span.Snapshot.GetLineNumberFromPosition(previousToken.StopIndex + 1);
                    }
                    else
                    {
                        endLinePrevious = span.Snapshot.GetLineNumberFromPosition(span.Start) - 1;
                    }

                    if (startLineCurrent > endLinePrevious + 1)
                    {
                        int firstMultilineLine = endLinePrevious;
                        if (previousToken == null || previousTokenEndsLine)
                        {
                            firstMultilineLine++;
                        }

                        for (int i = firstMultilineLine; i < startLineCurrent; i++)
                        {
                            if (!_lineStates[i].MultilineToken || lineStateChanged)
                            {
                                extendMultilineSpanToLine = i + 1;
                            }

                            if (inBounds)
                            {
                                SetLineState(i, LineStateInfo.Multiline);
                            }
                        }
                    }
                }

                if (token.Type == CharStreamConstants.EndOfFile)
                {
                    break;
                }

                previousToken         = token;
                previousTokenEndsLine = TokenEndsAtEndOfLine(span.Snapshot, lexer, token);

                if (IsMultilineToken(span.Snapshot, lexer, token))
                {
                    int startLine = span.Snapshot.GetLineNumberFromPosition(token.StartIndex);
                    int stopLine  = span.Snapshot.GetLineNumberFromPosition(token.StopIndex + 1);
                    for (int i = startLine; i < stopLine; i++)
                    {
                        if (!_lineStates[i].MultilineToken)
                        {
                            extendMultilineSpanToLine = i + 1;
                        }

                        if (inBounds)
                        {
                            SetLineState(i, LineStateInfo.Multiline);
                        }
                    }
                }

                bool tokenEndsLine = previousTokenEndsLine;
                if (tokenEndsLine)
                {
                    int line = span.Snapshot.GetLineNumberFromPosition(token.StopIndex + 1);
                    lineStateChanged = _lineStates[line].MultilineToken;

                    // even if the state didn't change, we call SetLineState to make sure the _first/_lastChangedLine values get updated.
                    if (inBounds)
                    {
                        SetLineState(line, new LineStateInfo());
                    }

                    if (lineStateChanged)
                    {
                        if (line < span.Snapshot.LineCount - 1)
                        {
                            /* update the span's end position or the line state change won't be reflected
                             * in the editor
                             */
                            int endPosition = span.Snapshot.GetLineFromLineNumber(line + 1).EndIncludingLineBreak;
                            if (endPosition > extendedSpan.End)
                            {
                                spanExtended = true;
                                extendedSpan = new SnapshotSpan(extendedSpan.Snapshot, Span.FromBounds(extendedSpan.Start, endPosition));
                            }
                        }
                    }
                }

                if (token.StartIndex >= span.End.Position)
                {
                    break;
                }

                if (token.StopIndex < requestedSpan.Start)
                {
                    continue;
                }

                var tokenClassificationSpans = GetClassificationSpansForToken(token, span.Snapshot);
                if (tokenClassificationSpans != null)
                {
                    classificationSpans.AddRange(tokenClassificationSpans);
                }

                if (!inBounds)
                {
                    break;
                }
            }

            if (extendMultilineSpanToLine > 0)
            {
                int endPosition = extendMultilineSpanToLine < span.Snapshot.LineCount ? span.Snapshot.GetLineFromLineNumber(extendMultilineSpanToLine).EndIncludingLineBreak : span.Snapshot.Length;
                if (endPosition > extendedSpan.End)
                {
                    spanExtended = true;
                    extendedSpan = new SnapshotSpan(extendedSpan.Snapshot, Span.FromBounds(extendedSpan.Start, endPosition));
                }
            }

            if (spanExtended)
            {
                /* Subtract 1 from each of these because the spans include the line break on their last
                 * line, forcing it to appear as the first position on the following line.
                 */
                int firstLine = extendedSpan.Snapshot.GetLineNumberFromPosition(span.End);
                int lastLine  = extendedSpan.Snapshot.GetLineNumberFromPosition(extendedSpan.End) - 1;
                ForceReclassifyLines(firstLine, lastLine);
            }

            return(classificationSpans);
        }