public static Dictionary <string, string> MoveStartRuleToTop(int pos, Document document) { var result = new Dictionary <string, string>(); // Check if lexer grammar. AntlrGrammarDetails pd_parser = ParserDetailsFactory.Create(document) as AntlrGrammarDetails; ExtractGrammarType lp = new ExtractGrammarType(); ParseTreeWalker.Default.Walk(lp, pd_parser.ParseTree); var is_lexer = lp.Type == ExtractGrammarType.GrammarType.Lexer; if (is_lexer) { // We don't consider lexer grammars. return(result); } // Consider only the target grammar. Table table = new Table(pd_parser, document); table.ReadRules(); table.FindPartitions(); table.FindStartRules(); string old_code = document.Code; List <Pair <int, int> > move = new List <Pair <int, int> >(); foreach (var r in table.rules) { if (r.is_parser_rule && r.is_start == true) { move.Add(new Pair <int, int>(r.start_index, r.end_index)); } } move = move.OrderBy(p => p.a).ThenBy(p => p.b).ToList(); var find_first_rule = new FindFirstRule(); ParseTreeWalker.Default.Walk(find_first_rule, pd_parser.ParseTree); var first_rule = find_first_rule.First; if (first_rule == null) { return(result); } var insertion = first_rule.SourceInterval.a; var insertion_tok = pd_parser.TokStream.Get(insertion); var insertion_ind = insertion_tok.StartIndex; if (move.Count == 1 && move[0].a == insertion_ind) { return(result); } StringBuilder sb = new StringBuilder(); int previous = 0; { int index_start = insertion_ind; int len = 0; string pre = old_code.Substring(previous, index_start - previous); sb.Append(pre); previous = index_start + len; } foreach (var l in move) { int index_start = l.a; int len = l.b - l.a; string add = old_code.Substring(index_start, len); sb.Append(add); } foreach (var l in move) { int index_start = l.a; int len = l.b - l.a; string pre = old_code.Substring(previous, index_start - previous); sb.Append(pre); previous = index_start + len; } string rest = old_code.Substring(previous); sb.Append(rest); string new_code = sb.ToString(); result.Add(document.FullPath, new_code); return(result); }
public void FindPartitions() { var find_first_rule = new FindFirstRule(); ParseTreeWalker.Default.Walk(find_first_rule, pd_parser.ParseTree); var first_rule = find_first_rule.First; if (first_rule == null) { return; } var insertion = first_rule.SourceInterval.a; var insertion_tok = pd_parser.TokStream.Get(insertion); var insertion_ind = insertion_tok.StartIndex; string old_code = document.Code; for (int i = 0; i < rules.Count; ++i) { IParseTree rule = rules[i].rule; // Find range indices for rule including comments. Note, start index is inclusive; end // index is exclusive. We make the assumption // that the preceeding whitespace and comments are grouped with a rule all the way // from the end a previous non-whitespace or comment, such as options, headers, or rule. Interval token_interval = rule.SourceInterval; var end = token_interval.b; var end_tok = pd_parser.TokStream.Get(end); Antlr4.Runtime.IToken last = end_tok; var end_ind = old_code.Length <= last.StopIndex ? last.StopIndex : last.StopIndex + 1; bool on_end = false; for (int j = end_ind; j < old_code.Length; j++) { if (old_code[j] == '\r') { if (j + 1 < old_code.Length && old_code[j + 1] == '\n') { end_ind = j + 2; } else { end_ind = j + 1; } break; } end_ind = j; } var inter = pd_parser.TokStream.GetHiddenTokensToRight(end_tok.TokenIndex); var start = token_interval.a; var start_tok = pd_parser.TokStream.Get(start); var start_ind = start_tok.StartIndex; rules[i].start_index = start_ind; rules[i].end_index = end_ind; } for (int i = 0; i < rules.Count; ++i) { if (i > 0) { rules[i].start_index = rules[i - 1].end_index; } } bool bad = false; for (int i = 0; i < rules.Count; ++i) { for (int j = rules[i].start_index; j < rules[i].end_index; ++j) { if (old_code[j] == '\r') { if (j + 1 < rules[i].end_index && old_code[j + 1] == '\n') { ; } else { bad = true; } } } } }