internal override void CreateNewComment() { var firstNewlineToken = new NewlineToken(); var firstPartSummaryComment = new LiteralTextToken(" Initializes a new instance of the "); var firstTextNode = new TextNode(DocCommentExterior); firstTextNode.AddToken(firstNewlineToken); firstTextNode.AddToken(firstPartSummaryComment); var className = nodeBeingDocumented.ChildTokens() .Where(t => t.IsKind(SyntaxKind.IdentifierToken)) .First() .ToFullString(); var cref = new CrefNode(className); var secondPartSummaryComment = new LiteralTextTokenWithNoDocCommentExterior(" class."); var secondNewlineToken = new NewlineToken(); var thirdPartSummaryComment = new LiteralTextToken(" "); var secondTextNode = new TextNode(DocCommentExterior); secondTextNode.AddToken(secondPartSummaryComment); secondTextNode.AddToken(secondNewlineToken); secondTextNode.AddToken(thirdPartSummaryComment); Node[] nodes = { firstTextNode, cref, secondTextNode }; AddExampleElementNode(nodes); }
public void Dispatch(NewlineToken token) { if (this.currentLineCount == 0 && this.currentLineWordCount == 0) { // We haven't read any actual data from the file yet so lets ignore leading blank lines return; } if (this.isNewline) { // The last token was a newline too so let's skip this one return; } var currentWord = this.currentWordToken.ToString() + token.Value; if (!CommonAbbreviations.Any(x => x.StartsWith(currentWord))) { this.currentLineCount++; this.currentLineWordCount = 0; this.isWhitespace = false; this.isNewline = true; } else { this.currentWordToken.AddChar(token); } }
private NewlineToken ReadNewline() { // skip all newline characters while (c == '\n' || c == '\r') { tokenValue.Append(c); Move(); } var endOfLine = new NewlineToken(tokenValue.ToString(), tokenStartPosition); if (c.IsSpaceOrTab()) { return(endOfLine); } // if last newline doesn't followed // by whitespace - reset indentation to 0. // add newline at first tokens.Add(endOfLine); tokenStartPosition = Position; // then add outdents lastIndentLength = 0; while (indentLevel > 0) { tokens.Add(new Token(Outdent, tokenStartPosition)); indentLevel--; } return(null); }
public void Should_just_emit_platform_specific_newline_on_render() { var @event = new LogEvent(LogLevel.Info, DateTimeOffset.Now, null); var token = new NewlineToken(); var writer = new StringWriter(new StringBuilder()); token.Render(@event, writer, null); writer.ToString().Should().Be(Environment.NewLine); }
internal override void CreateNewComment() { var firstNewlineToken = new NewlineToken(); var firstPartSummaryComment = new LiteralTextToken(" "); var secondNewLineToken = new NewlineToken(); var secondTextLiteral = new LiteralTextToken(" "); var elementTextNode = new TextNode(DocCommentExterior); elementTextNode.AddToken(firstNewlineToken); elementTextNode.AddToken(firstPartSummaryComment); elementTextNode.AddToken(secondNewLineToken); elementTextNode.AddToken(secondTextLiteral); Node[] nodes = { elementTextNode }; AddExampleElementNode(nodes); }
/// <exception cref="System.IO.IOException"/> private static int TokReader(Reader r, BufferedWriter writer, Pattern parseInsidePattern, Pattern filterPattern, string options, bool preserveLines, bool oneLinePerElement, bool dump, bool lowerCase) { int numTokens = 0; bool beginLine = true; bool printing = (parseInsidePattern == null); // start off printing, unless you're looking for a start entity Matcher m = null; if (parseInsidePattern != null) { m = parseInsidePattern.Matcher(string.Empty); } // create once as performance hack // System.err.printf("parseInsidePattern is: |%s|%n", parseInsidePattern); for (Edu.Stanford.Nlp.Process.PTBTokenizer <CoreLabel> tokenizer = new Edu.Stanford.Nlp.Process.PTBTokenizer <CoreLabel>(r, new CoreLabelTokenFactory(), options); tokenizer.MoveNext();) { CoreLabel obj = tokenizer.Current; // String origStr = obj.get(CoreAnnotations.TextAnnotation.class).replaceFirst("\n+$", ""); // DanC added this to fix a lexer bug, hopefully now corrected string origStr = obj.Get(typeof(CoreAnnotations.TextAnnotation)); string str; if (lowerCase) { str = origStr.ToLower(Locale.English); obj.Set(typeof(CoreAnnotations.TextAnnotation), str); } else { str = origStr; } if (m != null && m.Reset(origStr).Matches()) { printing = m.Group(1).IsEmpty(); // turn on printing if no end element slash, turn it off it there is // System.err.printf("parseInsidePattern matched against: |%s|, printing is %b.%n", origStr, printing); if (!printing) { // true only if matched a stop beginLine = true; if (oneLinePerElement) { writer.NewLine(); } } } else { if (printing) { if (dump) { // after having checked for tags, change str to be exhaustive str = obj.ToShorterString(); } if (filterPattern != null && filterPattern.Matcher(origStr).Matches()) { } else { // skip if (preserveLines) { if (NewlineToken.Equals(origStr)) { beginLine = true; writer.NewLine(); } else { if (!beginLine) { writer.Write(' '); } else { beginLine = false; } // writer.write(str.replace("\n", "")); writer.Write(str); } } else { if (oneLinePerElement) { if (!beginLine) { writer.Write(' '); } else { beginLine = false; } writer.Write(str); } else { writer.Write(str); writer.NewLine(); } } } } } numTokens++; } return(numTokens); }
public IList <Token> Tokenize(IList <string> tune, Dictionary <string, string> tuneHeader, Dictionary <string, string> fileHeader) { Key = Key.FromName(fileHeader[InfoFields.Key.Name]); if (!fileHeader.TryGetValue(InfoFields.Meter.Name, out var meter)) { meter = "free"; } if (!fileHeader.TryGetValue(InfoFields.UnitNoteLength.Name, out var unit)) { if (meter != "free") { throw new NotSupportedException("unit must be specified in free meter (for now)"); // //TODO convert meter fraction to decimal properly // //if im correct, this implimentation should always fail // if (float.TryParse(meter, out var meter_unit)) // { // if (meter_unit < 0.75f) // { // unit = "1/16"; // } // else // { // unit = "1/8"; // } // } // else throw new NotImplementedException(); } } fileHeader.TryGetValue(InfoFields.Tempo.Name, out var tempo); var meterFraction = TimeSignature.ParseMeter(meter); var unitFraction = TimeSignature.ParseFraction(unit); TimeSignature = new TimeSignature(meterFraction, unitFraction, tempo); for (var lineIndex = 0; lineIndex < tune.Count; lineIndex++) { var line = tune[lineIndex].TrimEnd(); if (TryLexHeader(line, lineIndex)) { continue; } var charIndex = 0; while (charIndex < line.Length) { var part = line.Substring(charIndex); //Field if (TryLexInlineField(part, lineIndex, ref charIndex)) { continue; //Continue on success } //Space if (TryLexWhitespace(part, lineIndex, ref charIndex)) { continue; } //Notes if (TryLexNote(part, lineIndex, ref charIndex)) { continue; } //Beam if (TryLexBeam(part, lineIndex, ref charIndex)) { continue; } //BROKEN RHYTHM if (TryLexBrokenRythem(part, lineIndex, ref charIndex)) { continue; } //REST if (TryLexRest(part, lineIndex, ref charIndex)) { continue; } if (TryLexTuplets(part, lineIndex, ref charIndex)) { continue; } if (TryLexSlur(part, lineIndex, ref charIndex)) { continue; } if (TryLexTie(part, lineIndex, ref charIndex)) { continue; } if (TryLexEmbelishments(part, lineIndex, ref charIndex)) { continue; } if (TryLexDecorationsSingle(part, lineIndex, ref charIndex)) { continue; } if (TryLexDecorationsMulti(part, lineIndex, ref charIndex)) { continue; } // LINE NOT PART V V V if (TryLexContinuation(line, lineIndex, ref charIndex)) { continue; } if (TryLexAnnotation(part, lineIndex, ref charIndex)) { continue; } if (TryLexChord(part, lineIndex, ref charIndex)) { continue; } throw new NotSupportedException(part); } if (Tokens[Tokens.Count - 1] is ContinuationToken) { var token = new NewlineToken() { Line = lineIndex, Char = charIndex, Text = "\n" }; Tokens.Add(token); } } var cache = Tokens.ToArray(); Clear(); return(cache); }