Esempio n. 1
0
        public override CommandResult Invoke(Guid group, int id, object inputArg, ref object outputArg)
        {
            string originalText  = TargetBuffer.CurrentSnapshot.GetText();
            string formattedText = string.Empty;
            var    formatter     = new RFormatter(REditorSettings.FormatOptions);

            try {
                formattedText = formatter.Format(originalText);
            } catch (Exception ex) {
                Debug.Assert(false, "Formatter exception: ", ex.Message);
            }

            if (!string.IsNullOrEmpty(formattedText) && !string.Equals(formattedText, originalText, StringComparison.Ordinal))
            {
                var selectionTracker = new RSelectionTracker(TextView, TargetBuffer, new TextRange(0, TargetBuffer.CurrentSnapshot.Length));
                selectionTracker.StartTracking(automaticTracking: false);

                try {
                    using (var massiveChange = new MassiveChange(TextView, TargetBuffer, EditorShell, Resources.FormatDocument)) {
                        IREditorDocument document = REditorDocument.TryFromTextBuffer(TargetBuffer);
                        if (document != null)
                        {
                            document.EditorTree.Invalidate();
                        }

                        var caretPosition = TextView.Caret.Position.BufferPosition;
                        var viewPortLeft  = TextView.ViewportLeft;

                        RTokenizer tokenizer = new RTokenizer();
                        string     oldText   = TargetBuffer.CurrentSnapshot.GetText();
                        IReadOnlyTextRangeCollection <RToken> oldTokens = tokenizer.Tokenize(oldText);
                        IReadOnlyTextRangeCollection <RToken> newTokens = tokenizer.Tokenize(formattedText);

#if DEBUG
                        //if (oldTokens.Count != newTokens.Count) {
                        //    for (int i = 0; i < Math.Min(oldTokens.Count, newTokens.Count); i++) {
                        //        if (oldTokens[i].TokenType != newTokens[i].TokenType) {
                        //            Debug.Assert(false, Invariant($"Token type difference at {i}"));
                        //            break;
                        //        } else if (oldTokens[i].Length != newTokens[i].Length) {
                        //            Debug.Assert(false, Invariant($"token length difference at {i}"));
                        //            break;
                        //        }
                        //    }
                        //}
#endif
                        IncrementalTextChangeApplication.ApplyChangeByTokens(
                            TargetBuffer,
                            new TextStream(oldText), new TextStream(formattedText),
                            oldTokens, newTokens,
                            TextRange.FromBounds(0, oldText.Length),
                            Resources.FormatDocument, selectionTracker, EditorShell);
                    }
                } finally {
                    selectionTracker.EndTracking();
                }
                return(new CommandResult(CommandStatus.Supported, 0));
            }
            return(CommandResult.NotSupported);
        }
Esempio n. 2
0
        private void TokenFromPosition(ITextSnapshot snapshot, int position, out int itemIndex, out int offset)
        {
            // Normally token stream does not change after formatting so we can simply rely on the fact
            // that caret position is going to remain relative to the same token index
            itemIndex = -1;
            offset    = 0;

            // Expand range to include the next line. This is needed when user introduces line break.
            var lineNumber = snapshot.GetLineNumberFromPosition(_changingRange.End);

            if (lineNumber < snapshot.LineCount - 1)
            {
                var end = snapshot.GetLineFromLineNumber(lineNumber + 1).End;
                _changingRange = TextRange.FromBounds(_changingRange.Start, end);
            }

            var tokenizer = new RTokenizer();
            IReadOnlyTextRangeCollection <RToken> tokens =
                tokenizer.Tokenize(new TextProvider(snapshot), _changingRange.Start, _changingRange.Length, true);

            // Check if position is adjacent to previous token
            int prevItemIndex = tokens.GetFirstItemBeforePosition(position);

            if (prevItemIndex >= 0 && tokens[prevItemIndex].End == position)
            {
                itemIndex = prevItemIndex;
                offset    = -tokens[itemIndex].Length;
                return;
            }

            int nextItemIndex = tokens.GetFirstItemAfterOrAtPosition(position);

            if (nextItemIndex >= 0)
            {
                // If two tokens are adjacent, gravity is negative, i.e. caret travels
                // with preceding token so it won't just to aniother line if, say,
                // formatter decides to insert a new line between tokens.

                if (nextItemIndex > 0 && tokens[nextItemIndex - 1].End == tokens[nextItemIndex].Start)
                {
                    nextItemIndex--;
                }

                offset    = tokens[nextItemIndex].Start - position;
                itemIndex = nextItemIndex;
                return;
            }

            // We are past last token
            if (tokens.Count > 0)
            {
                itemIndex = tokens.Count - 1;
                offset    = tokens[itemIndex].Start - position;
            }
            else
            {
                itemIndex = -1;
                offset    = position;
            }
        }
Esempio n. 3
0
        public override bool Parse(ParseContext context, IAstNode parent)
        {
            RToken currentToken  = context.Tokens.CurrentToken;
            string text          = context.TextProvider.GetText(currentToken);
            double realPart      = 0;
            double imaginaryPart = 0;

            Debug.Assert(currentToken.TokenType == RTokenType.Complex);

            // Split into real and imaginary parts. Imaginary part
            // should always be there since otherwise tokenizer would
            // not have idenfified the number as complex. Note that
            // real part may be missing as in '+0i'. Operator may also
            // be missing: 1i is a legal complex number.

            Debug.Assert(text[text.Length - 1] == 'i');

            // Drop trailing i and retokenize as two numbers
            RTokenizer tokenizer = new RTokenizer(separateComments: false);
            IReadOnlyTextRangeCollection <RToken> tokens = tokenizer.Tokenize(text.Substring(0, text.Length - 1));

            if (tokens.Count == 1)
            {
                // Only imaginary part is present
                Debug.Assert(tokens[0].TokenType == RTokenType.Number);
                // TODO: handle complex numbers in Hex
                if (!Double.TryParse(text.Substring(tokens[0].Start, tokens[0].Length), out imaginaryPart))
                {
                    imaginaryPart = 0;
                }
            }
            else if (tokens.Count == 3)
            {
                // Real and imaginary parts present
                Debug.Assert(tokens[0].TokenType == RTokenType.Number);
                Debug.Assert(tokens[1].TokenType == RTokenType.Operator);
                Debug.Assert(tokens[2].TokenType == RTokenType.Number);

                // TODO: handle complex numbers in Hex
                if (!Double.TryParse(text.Substring(tokens[0].Start, tokens[0].Length), out realPart))
                {
                    realPart = 0;
                }
                if (!Double.TryParse(text.Substring(tokens[2].Start, tokens[2].Length), out imaginaryPart))
                {
                    imaginaryPart = 0;
                }
            }
            else
            {
                context.AddError(new MissingItemParseError(ParseErrorType.NumberExpected, context.Tokens.PreviousToken));
                return(false);
            }

            Complex complex = new Complex(realPart, imaginaryPart);

            NodeValue = new RComplex(complex);
            return(base.Parse(context, parent));
        }
Esempio n. 4
0
        public static AstRoot Parse(ITextProvider textProvider, ITextRange range, IExpressionTermFilter filter)
        {
            var tokenizer = new RTokenizer(separateComments: true);

            IReadOnlyTextRangeCollection <RToken> tokens = tokenizer.Tokenize(textProvider, range.Start, range.Length);
            TokenStream <RToken> tokenStream             = new TokenStream <RToken>(tokens, new RToken(RTokenType.EndOfStream, TextRange.EmptyRange));

            return(Parse(textProvider, range, tokenStream, tokenizer.CommentTokens, filter));
        }
Esempio n. 5
0
        internal static bool IsPackageListCompletion(ITextBuffer textBuffer, int position)
        {
            ITextSnapshot     snapshot = textBuffer.CurrentSnapshot;
            ITextSnapshotLine line     = snapshot.GetLineFromPosition(position);
            string            lineText = line.GetText();
            int linePosition           = position - line.Start;

            // We should be either at library(| or inside library(|)
            // or over package name like in library(ba|se)

            // Go left and right looking for
            RTokenizer    tokenizer    = new RTokenizer();
            ITextProvider textProvider = new TextStream(lineText);
            IReadOnlyTextRangeCollection <RToken> c = tokenizer.Tokenize(textProvider, 0, textProvider.Length);
            TokenStream <RToken> tokens             = new TokenStream <RToken>(c, RToken.EndOfStreamToken);

            while (!tokens.IsEndOfStream())
            {
                if (tokens.CurrentToken.Start >= linePosition)
                {
                    break;
                }

                if (tokens.CurrentToken.TokenType == RTokenType.Identifier)
                {
                    string identifier = textProvider.GetText(tokens.CurrentToken);
                    if (identifier == "library" || identifier == "require")
                    {
                        tokens.MoveToNextToken();

                        if (tokens.CurrentToken.TokenType == RTokenType.OpenBrace)
                        {
                            RToken openBrace = tokens.CurrentToken;
                            while (!tokens.IsEndOfStream())
                            {
                                if (tokens.CurrentToken.TokenType == RTokenType.CloseBrace)
                                {
                                    if (linePosition >= openBrace.End && linePosition <= tokens.CurrentToken.Start)
                                    {
                                        return(true);
                                    }
                                    return(false);
                                }
                                else if (tokens.NextToken.TokenType == RTokenType.EndOfStream)
                                {
                                    return(true);
                                }
                                tokens.MoveToNextToken();
                            }
                        }
                    }
                }
                tokens.MoveToNextToken();
            }
            return(false);
        }
Esempio n. 6
0
        public TokenStream(IReadOnlyTextRangeCollection <T> tokens, T endOfStreamToken)
        {
            Check.ArgumentNull(nameof(tokens), tokens);

            _index            = 0;
            _tokens           = tokens;
            _endOfStreamToken = endOfStreamToken;
            _isEndOfStream    = tokens.Length == 0;
            CurrentToken      = _isEndOfStream ? _endOfStreamToken : _tokens[0];
        }
Esempio n. 7
0
        public void Tokenize_BuiltIns01()
        {
            IReadOnlyTextRangeCollection <RToken> tokens = this.Tokenize("require library switch return", new RTokenizer());

            tokens.Should().HaveCount(4);
            foreach (var token in tokens)
            {
                token.Should().HaveType(RTokenType.Identifier).And.HaveSubType(RTokenSubType.BuiltinFunction);
            }
        }
Esempio n. 8
0
        /// <summary>
        /// Given RD data and function name parses the data and creates structured
        /// information about the function. Method returns multiple functions since
        /// RD data often provides information on several functions so in order
        /// to avoid processing same data multiple times parser extracts information
        /// on all related functions.
        /// </summary>
        public static IReadOnlyList <IFunctionInfo> GetFunctionInfos(string rdHelpData)
        {
            var tokenizer = new RdTokenizer(tokenizeRContent: false);

            ITextProvider textProvider = new TextStream(rdHelpData);
            IReadOnlyTextRangeCollection <RdToken> tokens = tokenizer.Tokenize(textProvider, 0, textProvider.Length);
            RdParseContext context = new RdParseContext(tokens, textProvider);

            return(ParseFunctions(context));
        }
Esempio n. 9
0
        public void Tokenize_Missing()
        {
            string s = "NA NA_character_ NA_complex_ NA_integer_ NA_real_";

            IReadOnlyTextRangeCollection <RToken> tokens = this.Tokenize(s, new RTokenizer());

            tokens.Should().HaveCount(5);
            foreach (var token in tokens)
            {
                token.Should().HaveType(RTokenType.Missing).And.HaveSubType(RTokenSubType.BuiltinConstant);
            }
        }
Esempio n. 10
0
        public void Tokenize_BuiltIns02()
        {
            IReadOnlyTextRangeCollection <RToken> tokens = this.Tokenize("require() library() switch() return()", new RTokenizer());

            tokens.Should().HaveCount(12);
            for (var i = 0; i < tokens.Count; i += 3)
            {
                tokens[i].Should().HaveType(RTokenType.Identifier).And.HaveSubType(RTokenSubType.BuiltinFunction);
                tokens[i + 1].Should().HaveType(RTokenType.OpenBrace);
                tokens[i + 2].Should().HaveType(RTokenType.CloseBrace);
            }
        }
        private static bool IsMultiLineCandidate(string text)
        {
            if (text.IndexOfAny(new[] { '\n', '\r' }) != -1)
            {
                // if we already have newlines then we're multiline
                return(true);
            }

            var tokenizer = new RTokenizer();
            IReadOnlyTextRangeCollection <RToken> tokens = tokenizer.Tokenize(new TextStream(text), 0, text.Length);

            return(tokens.Any(t => t.TokenType == RTokenType.OpenCurlyBrace));
        }
Esempio n. 12
0
        public TokenStream(IReadOnlyTextRangeCollection <T> tokens, T endOfStreamToken)
        {
            if (tokens == null)
            {
                throw new ArgumentNullException("tokens");
            }

            _index            = 0;
            _tokens           = tokens;
            _endOfStreamToken = endOfStreamToken;
            _isEndOfStream    = tokens.Length == 0;
            _currentToken     = _isEndOfStream ? _endOfStreamToken : _tokens[0];
        }
Esempio n. 13
0
        private static void TokenFromPosition(ITextSnapshot snapshot, int position, out int itemIndex, out int offset)
        {
            // Normally token stream does not change after formatting so we can simply rely on the fact
            // that caret position is going to remain relative to the same token index
            itemIndex = -1;
            offset    = 0;

            var tokenizer = new RTokenizer();
            IReadOnlyTextRangeCollection <RToken> tokens = tokenizer.Tokenize(new TextProvider(snapshot), 0, snapshot.Length, true);

            // Check if position is adjacent to previous token
            int prevItemIndex = tokens.GetFirstItemBeforePosition(position);

            if (prevItemIndex >= 0 && tokens[prevItemIndex].End == position)
            {
                itemIndex = prevItemIndex;
                offset    = -tokens[itemIndex].Length;
                return;
            }

            int nextItemIndex = tokens.GetFirstItemAfterOrAtPosition(position);

            if (nextItemIndex >= 0)
            {
                // If two tokens are adjacent, gravity is negative, i.e. caret travels
                // with preceding token so it won't just to aniother line if, say,
                // formatter decides to insert a new line between tokens.

                if (nextItemIndex > 0 && tokens[nextItemIndex - 1].End == tokens[nextItemIndex].Start)
                {
                    nextItemIndex--;
                }

                offset    = tokens[nextItemIndex].Start - position;
                itemIndex = nextItemIndex;
                return;
            }

            // We are past last token
            if (tokens.Count > 0)
            {
                itemIndex = tokens.Count - 1;
                offset    = tokens[itemIndex].Start - position;
            }
            else
            {
                itemIndex = -1;
                offset    = position;
            }
        }
Esempio n. 14
0
        /// <summary>
        /// Parse text from a text provider within a given range
        /// </summary>
        /// <param name="textProvider">Text provider</param>
        /// <param name="range">Range to parse</param>
        public static AstRoot Parse(ITextProvider textProvider, ITextRange range)
        {
            var tokenizer = new RTokenizer(separateComments: true);

            IReadOnlyTextRangeCollection <RToken> tokens = tokenizer.Tokenize(textProvider, range.Start, range.Length);
            TokenStream <RToken> tokenStream             = new TokenStream <RToken>(tokens, new RToken(RTokenType.EndOfStream, TextRange.EmptyRange));

            ParseContext context = new ParseContext(textProvider, range, tokenStream, tokenizer.CommentTokens);

            context.AstRoot.Parse(context, context.AstRoot);
            context.AstRoot.Errors = new TextRangeCollection <IParseError>(context.Errors);

            return(context.AstRoot);
        }
Esempio n. 15
0
        public static IReadOnlyList <ISignatureInfo> ParseSignatures(string usageContent, IReadOnlyDictionary <string, string> argumentsDescriptions = null)
        {
            // RD signature text may contain \dots sequence  which denotes ellipsis.
            // R parser does not know  about it and hence we will replace \dots by ...
            // Also, signatures may contain S3 method info like
            // '\method{as.matrix}{data.frame}(x, rownames.force = NA, \dots)'
            // which we need to filter out since they are irrelevant to intellisense.

            List <ISignatureInfo> signatures = new List <ISignatureInfo>();

            usageContent = usageContent.Replace(@"\dots", "...");

            RTokenizer tokenizer = new RTokenizer(separateComments: true);
            IReadOnlyTextRangeCollection <RToken> collection = tokenizer.Tokenize(usageContent);
            ITextProvider        textProvider = new TextStream(usageContent);
            TokenStream <RToken> tokens       = new TokenStream <RToken>(collection, RToken.EndOfStreamToken);

            var parseContext = new ParseContext(textProvider,
                                                TextRange.FromBounds(tokens.CurrentToken.Start, textProvider.Length),
                                                tokens, tokenizer.CommentTokens);

            while (!tokens.IsEndOfStream())
            {
                // Filter out '\method{...}{}(signature)
                if (tokens.CurrentToken.TokenType == RTokenType.OpenCurlyBrace)
                {
                    // Check if { is preceded by \method
                }

                if (tokens.CurrentToken.TokenType != RTokenType.Identifier)
                {
                    break;
                }

                string functionName = textProvider.GetText(tokens.CurrentToken);
                tokens.MoveToNextToken();

                ISignatureInfo info = ParseSignature(functionName, parseContext, argumentsDescriptions);
                if (info != null)
                {
                    signatures.Add(info);
                }
            }

            return(signatures);
        }
Esempio n. 16
0
        public static bool FormatRangeExact(ITextView textView, ITextBuffer textBuffer,
                                            ITextRange formatRange, RFormatOptions options)
        {
            ITextSnapshot snapshot        = textBuffer.CurrentSnapshot;
            Span          spanToFormat    = new Span(formatRange.Start, formatRange.Length);
            string        spanText        = snapshot.GetText(spanToFormat.Start, spanToFormat.Length);
            string        trimmedSpanText = spanText.Trim();

            RFormatter formatter     = new RFormatter(options);
            string     formattedText = formatter.Format(trimmedSpanText);

            formattedText = formattedText.Trim(); // There may be inserted line breaks after {
            // Apply formatted text without indentation. We then will update the parse tree
            // so we can calculate proper line indents from the AST via the smart indenter.
            if (!spanText.Equals(formattedText, StringComparison.Ordinal))
            {
                // Extract existing indent before applying changes. Existing indent
                // may be used by the smart indenter for function argument lists.
                var startLine = snapshot.GetLineFromPosition(spanToFormat.Start);
                var originalIndentSizeInSpaces = IndentBuilder.TextIndentInSpaces(startLine.GetText(), options.IndentSize);

                var        selectionTracker = new RSelectionTracker(textView, textBuffer, formatRange);
                RTokenizer tokenizer        = new RTokenizer();
                IReadOnlyTextRangeCollection <RToken> oldTokens = tokenizer.Tokenize(spanText);
                IReadOnlyTextRangeCollection <RToken> newTokens = tokenizer.Tokenize(formattedText);

                IncrementalTextChangeApplication.ApplyChangeByTokens(
                    textBuffer,
                    new TextStream(spanText), new TextStream(formattedText),
                    oldTokens, newTokens,
                    formatRange,
                    Resources.AutoFormat, selectionTracker,
                    () => {
                    var ast = UpdateAst(textBuffer);
                    // Apply indentation
                    IndentLines(textView, textBuffer, new TextRange(formatRange.Start, formattedText.Length), ast, options, originalIndentSizeInSpaces);
                });

                return(true);
            }

            return(false);
        }
Esempio n. 17
0
        public static bool FormatRangeExact(ITextView textView, ITextBuffer textBuffer, ITextRange formatRange,
                                            AstRoot ast, RFormatOptions options,
                                            int scopeStatementPosition, bool respectUserIndent = true)
        {
            ITextSnapshot snapshot        = textBuffer.CurrentSnapshot;
            Span          spanToFormat    = new Span(formatRange.Start, formatRange.Length);
            string        spanText        = snapshot.GetText(spanToFormat.Start, spanToFormat.Length);
            string        trimmedSpanText = spanText.Trim();

            if (trimmedSpanText == "}")
            {
                // Locate opening { and its statement
                var scopeNode = ast.GetNodeOfTypeFromPosition <IAstNodeWithScope>(spanToFormat.Start);
                if (scopeNode != null)
                {
                    scopeStatementPosition = scopeNode.Start;
                }
            }

            RFormatter formatter     = new RFormatter(options);
            string     formattedText = formatter.Format(trimmedSpanText);

            formattedText = formattedText.Trim(); // there may be inserted line breaks after {
            formattedText = IndentLines(textBuffer, spanToFormat.Start, ast, formattedText, options, scopeStatementPosition, respectUserIndent);

            if (!spanText.Equals(formattedText, StringComparison.Ordinal))
            {
                var        selectionTracker = new RSelectionTracker(textView, textBuffer);
                RTokenizer tokenizer        = new RTokenizer();
                IReadOnlyTextRangeCollection <RToken> oldTokens = tokenizer.Tokenize(spanText);
                IReadOnlyTextRangeCollection <RToken> newTokens = tokenizer.Tokenize(formattedText);
                IncrementalTextChangeApplication.ApplyChangeByTokens(
                    textBuffer,
                    new TextStream(spanText), new TextStream(formattedText),
                    oldTokens, newTokens,
                    formatRange,
                    Resources.AutoFormat, selectionTracker);
                return(true);
            }

            return(false);
        }
Esempio n. 18
0
        public static string WriteTokens <Token, TokenType>(IReadOnlyTextRangeCollection <Token> tokens) where Token : ITextRange
        {
            var sb = new StringBuilder();

            foreach (Token t in tokens)
            {
                if (t is ICompositeToken)
                {
                    WriteCompositeToken(t as ICompositeToken, sb);
                }
                else
                {
                    WriteToken <Token, TokenType>(t, sb);
                }
            }

            string s = sb.ToString();

            return(s);
        }
Esempio n. 19
0
 public ErrorArgument(IEnumerable <RToken> tokens)
 {
     Tokens = new TextRangeCollection <RToken>(tokens);
 }
Esempio n. 20
0
 public RdParseContext(string packageName, IReadOnlyTextRangeCollection <RdToken> tokens, ITextProvider textProvider)
 {
     PackageName  = packageName;
     TextProvider = textProvider;
     Tokens       = new TokenStream <RdToken>(tokens, RdToken.EndOfStreamToken);
 }
Esempio n. 21
0
 public RdParseContext(IReadOnlyTextRangeCollection <RdToken> tokens, ITextProvider textProvider)
 {
     this.TextProvider = textProvider;
     this.Tokens       = new TokenStream <RdToken>(tokens, RdToken.EndOfStreamToken);
 }
Esempio n. 22
0
 public RdParseContext(IReadOnlyTextRangeCollection<RdToken> tokens, ITextProvider textProvider) {
     this.TextProvider = textProvider;
     this.Tokens = new TokenStream<RdToken>(tokens, RdToken.EndOfStreamToken);
 }
Esempio n. 23
0
 public ErrorArgument(IEnumerable<RToken> tokens) {
     Tokens = new TextRangeCollection<RToken>(tokens);
 }
Esempio n. 24
0
        public virtual bool GetLanguageBracesFromPosition(
            BraceType braceType,
            int position, bool reversed, out int start, out int end)
        {
            TokenTypeT startTokenType = BraceTypeToTokenTypeMap[braceType].OpenBrace;
            TokenTypeT endTokenType   = BraceTypeToTokenTypeMap[braceType].CloseBrace;
            IReadOnlyTextRangeCollection <TokenClassT> tokens = GetTokens(0, TextBuffer.CurrentSnapshot.Length);

            start = -1;
            end   = -1;

            Stack <TokenTypeT> stack = new Stack <TokenTypeT>();

            int startIndex = -1;

            for (int i = 0; i < tokens.Count; i++)
            {
                if (tokens[i].Start == position)
                {
                    startIndex = i;
                    break;
                }
            }

            if (startIndex < 0)
            {
                return(false);
            }

            if (_tokenComparer.Compare(tokens[startIndex].TokenType, startTokenType) != 0 && _tokenComparer.Compare(tokens[startIndex].TokenType, endTokenType) != 0)
            {
                return(false);
            }

            if (!reversed)
            {
                for (int i = startIndex; i < tokens.Count; i++)
                {
                    TokenClassT token = tokens[i];

                    if (token.TokenType.Equals(startTokenType))
                    {
                        stack.Push(token.TokenType);
                    }
                    else if (_tokenComparer.Compare(token.TokenType, endTokenType) == 0)
                    {
                        if (stack.Count > 0)
                        {
                            stack.Pop();
                        }

                        if (stack.Count == 0)
                        {
                            start = tokens[startIndex].Start;
                            end   = token.Start;
                            return(true);
                        }
                    }
                }
            }
            else
            {
                for (int i = startIndex; i >= 0; i--)
                {
                    TokenClassT token = tokens[i];

                    if (_tokenComparer.Compare(token.TokenType, endTokenType) == 0)
                    {
                        stack.Push(token.TokenType);
                    }
                    else if (_tokenComparer.Compare(token.TokenType, startTokenType) == 0)
                    {
                        if (stack.Count > 0)
                        {
                            stack.Pop();
                        }

                        if (stack.Count == 0)
                        {
                            start = token.Start;
                            end   = token.Start;

                            end = tokens[startIndex].Start;
                            return(true);
                        }
                    }
                }
            }

            return(false);
        }