internal virtual int ParseHunk(int ptr, int end) { byte[] buf = file.buf; if (RawParseUtils.Match(buf, ptr, LITERAL) >= 0) { type = BinaryHunk.Type.LITERAL_DEFLATED; length = RawParseUtils.ParseBase10(buf, ptr + LITERAL.Length, null); } else { if (RawParseUtils.Match(buf, ptr, DELTA) >= 0) { type = BinaryHunk.Type.DELTA_DEFLATED; length = RawParseUtils.ParseBase10(buf, ptr + DELTA.Length, null); } else { // Not a valid binary hunk. Signal to the caller that // we cannot parse any further and that this line should // be treated otherwise. // return(-1); } } ptr = RawParseUtils.NextLF(buf, ptr); // Skip until the first blank line; that is the end of the binary // encoded information in this hunk. To save time we don't do a // validation of the binary data at this point. // while (ptr < end) { bool empty = buf[ptr] == '\n'; ptr = RawParseUtils.NextLF(buf, ptr); if (empty) { break; } } return(ptr); }
/// <summary> /// Check a commit for errors. /// </summary> /// <param name="raw">The commit data. The array is never modified.</param> /// <exception cref="CorruptObjectException">If any error was detected.</exception> public void checkCommit(byte[] raw) { int ptr = 0; if ((ptr = RawParseUtils.match(raw, ptr, tree)) < 0) { throw new CorruptObjectException("no tree header"); } if ((ptr = id(raw, ptr)) < 0 || raw[ptr++] != '\n') { throw new CorruptObjectException("invalid tree"); } while (RawParseUtils.match(raw, ptr, parent) >= 0) { ptr += parent.Length; if ((ptr = id(raw, ptr)) < 0 || raw[ptr++] != '\n') { throw new CorruptObjectException("invalid parent"); } } if ((ptr = RawParseUtils.match(raw, ptr, author)) < 0) { throw new CorruptObjectException("no author"); } if ((ptr = personIdent(raw, ptr)) < 0 || raw[ptr++] != '\n') { throw new CorruptObjectException("invalid author"); } if ((ptr = RawParseUtils.match(raw, ptr, committer)) < 0) { throw new CorruptObjectException("no committer"); } if ((ptr = personIdent(raw, ptr)) < 0 || raw[ptr++] != '\n') { throw new CorruptObjectException("invalid committer"); } }
private string[] ExtractFileLines(Encoding[] csGuess) { TemporaryBuffer[] tmp = new TemporaryBuffer[GetParentCount() + 1]; try { for (int i = 0; i < tmp.Length; i++) { tmp[i] = new TemporaryBuffer.LocalFile(); } foreach (HunkHeader h in GetHunks()) { h.ExtractFileLines(tmp); } string[] r = new string[tmp.Length]; for (int i_1 = 0; i_1 < tmp.Length; i_1++) { Encoding cs = csGuess != null ? csGuess[i_1] : null; if (cs == null) { cs = Constants.CHARSET; } r[i_1] = RawParseUtils.Decode(cs, tmp[i_1].ToByteArray()); } return(r); } catch (IOException ioe) { throw new RuntimeException(JGitText.Get().cannotConvertScriptToText, ioe); } finally { foreach (TemporaryBuffer b in tmp) { if (b != null) { b.Destroy(); } } } }
private string ParseName(String expect, int ptr, int end) { if (ptr == end) { return(expect); } string r; if (Buffer[ptr] == '"') { // New style GNU diff format // r = QuotedString.GitPathStyle.GIT_PATH.dequote(Buffer, ptr, end - 1); } else { // Older style GNU diff format, an optional tab ends the name. // int tab = end; while (ptr < tab && Buffer[tab - 1] != '\t') { tab--; } if (ptr == tab) { tab = end; } r = RawParseUtils.decode(Constants.CHARSET, Buffer, ptr, tab - 1); } if (r.Equals(DEV_NULL)) { r = DEV_NULL; } return(r); }
private string[] ExtractFileLines(Encoding[] csGuess) { var tmp = new TemporaryBuffer[ParentCount + 1]; try { for (int i = 0; i < tmp.Length; i++) { tmp[i] = new TemporaryBuffer(); } foreach (HunkHeader h in Hunks) { h.extractFileLines(tmp); } var r = new String[tmp.Length]; for (int i = 0; i < tmp.Length; i++) { Encoding cs = (csGuess != null ? csGuess[i] : null) ?? Constants.CHARSET; r[i] = RawParseUtils.decode(cs, tmp[i].ToArray()); } return(r); } catch (IOException ioe) { throw new Exception("Cannot convert script to text", ioe); } finally { foreach (TemporaryBuffer b in tmp) { if (b != null) { b.destroy(); } } } }
public void parseCanonical(RevWalk walk, byte[] rawTag) { var pos = new MutableInteger { value = 53 }; int oType = Constants.decodeTypeString(this, rawTag, (byte)'\n', pos); walk.IdBuffer.FromString(rawTag, 7); _object = walk.lookupAny(walk.IdBuffer, oType); int p = pos.value += 4; // "tag " int nameEnd = RawParseUtils.nextLF(rawTag, p) - 1; _tagName = RawParseUtils.decode(Constants.CHARSET, rawTag, p, nameEnd); if (walk.isRetainBody()) { _buffer = rawTag; } Flags |= PARSED; }
private void parseModeLine(int ptr, int eol) { // "mode $amode,$bmode..$cmode" // int n = 0; while (ptr < eol) { int comma = RawParseUtils.nextLF(Buffer, ptr, (byte)','); if (eol <= comma) { break; } _oldModes[n++] = ParseFileMode(ptr, comma); ptr = comma; } int dot2 = RawParseUtils.nextLF(Buffer, ptr, (byte)'.'); _oldModes[n] = ParseFileMode(ptr, dot2); NewMode = ParseFileMode(dot2 + 1, eol); }
/// <param name="max">max number of entries to read</param> /// <returns>all reflog entries in reverse order</returns> /// <exception cref="System.IO.IOException">System.IO.IOException</exception> public virtual IList <ReflogEntry> GetReverseEntries(int max) { byte[] log; try { log = IOUtil.ReadFully(logName); } catch (FileNotFoundException) { return(Sharpen.Collections.EmptyList <ReflogEntry>()); } int rs = RawParseUtils.PrevLF(log, log.Length); IList <ReflogEntry> ret = new AList <ReflogEntry>(); while (rs >= 0 && max-- > 0) { rs = RawParseUtils.PrevLF(log, rs); ReflogEntry entry = new ReflogEntry(log, rs < 0 ? 0 : rs + 2); ret.AddItem(entry); } return(ret); }
/// <exception cref="System.IO.IOException"></exception> internal virtual string ReadStringRaw() { int len = ReadLength(); if (len == 0) { return(END); } len -= 4; // length header (4 bytes) byte[] raw; if (len <= lineBuffer.Length) { raw = lineBuffer; } else { raw = new byte[len]; } IOUtil.ReadFully(@in, raw, 0, len); return(RawParseUtils.Decode(Constants.CHARSET, raw, 0, len)); }
/// <summary>Escape unprintable characters optionally URI-reserved characters</summary> /// <param name="s">The Java String to encode (may contain any character)</param> /// <param name="escapeReservedChars">true to escape URI reserved characters</param> /// <param name="encodeNonAscii">encode any non-ASCII characters</param> /// <returns>a URI-encoded string</returns> private static string Escape(string s, bool escapeReservedChars, bool encodeNonAscii ) { if (s == null) { return(null); } ByteArrayOutputStream os = new ByteArrayOutputStream(s.Length); byte[] bytes; try { bytes = Sharpen.Runtime.GetBytesForString(s, Constants.CHARACTER_ENCODING); } catch (UnsupportedEncodingException e) { throw new RuntimeException(e); } // cannot happen for (int i = 0; i < bytes.Length; ++i) { int b = bytes[i] & unchecked ((int)(0xFF)); if (b <= 32 || (encodeNonAscii && b > 127) || b == '%' || (escapeReservedChars && reservedChars.Get(b))) { os.Write('%'); byte[] tmp = Constants.EncodeASCII(string.Format("{0:x2}", Sharpen.Extensions.ValueOf (b))); os.Write(tmp[0]); os.Write(tmp[1]); } else { os.Write(b); } } byte[] buf = os.ToByteArray(); return(RawParseUtils.Decode(buf, 0, buf.Length)); }
private string ParseName(string expect, int ptr, int end) { if (ptr == end) { return(expect); } string r; if (buf[ptr] == '"') { // New style GNU diff format // r = QuotedString.GIT_PATH.Dequote(buf, ptr, end - 1); } else { // Older style GNU diff format, an optional tab ends the name. // int tab = end; while (ptr < tab && buf[tab - 1] != '\t') { tab--; } if (ptr == tab) { tab = end; } r = RawParseUtils.Decode(Constants.CHARSET, buf, ptr, tab - 1); } if (r.Equals(DEV_NULL)) { r = DEV_NULL; } if (r.EndsWith("\r")) // If there was a windows line ending then remove it { r = r.Substring(0, r.Length - 1); } return(r); }
/// <exception cref="System.IO.IOException"></exception> internal virtual string ReadString() { int len = ReadLength(); if (len == 0) { return(END); } len -= 4; // length header (4 bytes) if (len == 0) { return(string.Empty); } byte[] raw = new byte[len]; IOUtil.ReadFully(@in, raw, 0, len); if (raw[len - 1] == '\n') { len--; } return(RawParseUtils.Decode(Constants.CHARSET, raw, 0, len)); }
/// <summary>Adds a new or existing file with the specified name to this tree.</summary> /// <remarks> /// Adds a new or existing file with the specified name to this tree. /// Trees are added if necessary as the name may contain '/':s. /// </remarks> /// <param name="s">an array containing the name</param> /// <param name="offset">when the name starts in the tree.</param> /// <returns> /// a /// <see cref="FileTreeEntry">FileTreeEntry</see> /// for the added file. /// </returns> /// <exception cref="System.IO.IOException">System.IO.IOException</exception> public virtual FileTreeEntry AddFile(byte[] s, int offset) { int slash; int p; for (slash = offset; slash < s.Length && s[slash] != '/'; slash++) { } // search for path component terminator EnsureLoaded(); byte xlast = slash < s.Length ? unchecked ((byte)(byte)('/')) : (byte)0; p = BinarySearch(contents, s, xlast, offset, slash); if (p >= 0 && slash < s.Length && contents[p] is NGit.Tree) { return(((NGit.Tree)contents[p]).AddFile(s, slash + 1)); } byte[] newName = Substring(s, offset, slash); if (p >= 0) { throw new EntryExistsException(RawParseUtils.Decode(newName)); } else { if (slash < s.Length) { NGit.Tree t = new NGit.Tree(this, newName); InsertEntry(p, t); return(t.AddFile(s, slash + 1)); } else { FileTreeEntry f = new FileTreeEntry(this, null, newName, false); InsertEntry(p, f); return(f); } } }
private int personIdent(byte[] raw, int ptr) { int emailB = RawParseUtils.nextLF(raw, ptr, (byte)'<'); if (emailB == ptr || raw[emailB - 1] != '<') { return(-1); } int emailE = RawParseUtils.nextLF(raw, emailB, (byte)'>'); if (emailE == emailB || raw[emailE - 1] != '>') { return(-1); } if (emailE == raw.Length || raw[emailE] != ' ') { return(-1); } RawParseUtils.parseBase10(raw, emailE + 1, ptrout); // when ptr = ptrout.value; if (emailE + 1 == ptr) { return(-1); } if (ptr == raw.Length || raw[ptr] != ' ') { return(-1); } RawParseUtils.parseBase10(raw, ptr + 1, ptrout); // tz offset if (ptr + 1 == ptrout.value) { return(-1); } return(ptrout.value); }
/// <summary> /// Adds a new or existing file with the specified name to this tree. /// Trees are added if necessary as the name may contain '/':s. /// </summary> /// <param name="s"> an array containing the name </param> /// <param name="offset"> when the name starts in the tree. /// </param> /// <returns>A <seealso cref="FileTreeEntry"/> for the added file.</returns> /// <exception cref="IOException"></exception> public FileTreeEntry AddFile(byte[] s, int offset) { int slash; for (slash = offset; slash < s.Length && s[slash] != '/'; slash++) { // search for path component terminator // [henon] body is empty by intention! } EnsureLoaded(); byte xlast = slash < s.Length ? (byte)'/' : (byte)0; int p = BinarySearch(_contents, s, xlast, offset, slash); if (p >= 0 && slash < s.Length && _contents[p] is Tree) { return(((Tree)_contents[p]).AddFile(s, slash + 1)); } byte[] newName = SubString(s, offset, slash); if (p >= 0) { throw new EntryExistsException(RawParseUtils.decode(newName)); } if (slash < s.Length) { Tree t = new Tree(this, newName); InsertEntry(p, t); return(t.AddFile(s, slash + 1)); } FileTreeEntry f = new FileTreeEntry(this, null, newName, false); InsertEntry(p, f); return(f); }
private void OnOpenPack() { PackIndex idx = LoadPackIndex(); var buf = new byte[20]; IO.ReadFully(_fd, 0, buf, 0, 12); if (RawParseUtils.match(buf, 0, Constants.PACK_SIGNATURE) != 4) { throw new IOException("Not a PACK file."); } long vers = NB.decodeUInt32(buf, 4); long packCnt = NB.decodeUInt32(buf, 8); if (vers != 2 && vers != 3) { throw new IOException("Unsupported pack version " + vers + "."); } if (packCnt != idx.ObjectCount) { throw new PackMismatchException("Pack object count mismatch:" + " pack " + packCnt + " index " + idx.ObjectCount + ": " + File.FullName); } IO.ReadFully(_fd, Length - 20, buf, 0, 20); if (!buf.SequenceEqual(_packChecksum)) { throw new PackMismatchException("Pack checksum mismatch:" + " pack " + ObjectId.FromRaw(buf) + " index " + ObjectId.FromRaw(idx.PackChecksum) + ": " + File.FullName); } }
/// <summary> /// /// Extract the email address (if present) from the footer. /// <para /> /// If there is an email address looking string inside of angle brackets /// (e.g. "<a@b>"), the return value is the part extracted from inside the /// brackets. If no brackets are found, then <see cref="Value"/> is returned /// if the value contains an '@' sign. Otherwise, null. /// </summary> /// <returns>email address appearing in the value of this footer, or null.</returns> public string getEmailAddress() { int lt = RawParseUtils.nextLF(_buffer, _valStart, (byte)'<'); if (_valEnd <= lt) { int at = RawParseUtils.nextLF(_buffer, _valStart, (byte)'@'); if (_valStart < at && at < _valEnd) { return(Value); } return(null); } int gt = RawParseUtils.nextLF(_buffer, lt, (byte)'>'); if (_valEnd < gt) { return(null); } return(RawParseUtils.decode(_enc, _buffer, lt, gt - 1)); }
internal override void ParseIndexLine(int ptr, int eol) { // "index $asha1,$bsha1..$csha1" // IList <AbbreviatedObjectId> ids = new AList <AbbreviatedObjectId>(); while (ptr < eol) { int comma = RawParseUtils.NextLF(buf, ptr, ','); if (eol <= comma) { break; } ids.AddItem(AbbreviatedObjectId.FromString(buf, ptr, comma - 1)); ptr = comma; } oldIds = new AbbreviatedObjectId[ids.Count + 1]; Sharpen.Collections.ToArray(ids, oldIds); int dot2 = RawParseUtils.NextLF(buf, ptr, '.'); oldIds[ids.Count] = AbbreviatedObjectId.FromString(buf, ptr, dot2 - 1); newId = AbbreviatedObjectId.FromString(buf, dot2 + 1, eol - 1); oldModes = new FileMode[oldIds.Length]; }
/// <summary>Adds a new or existing Tree with the specified name to this tree.</summary> /// <remarks> /// Adds a new or existing Tree with the specified name to this tree. /// Trees are added if necessary as the name may contain '/':s. /// </remarks> /// <param name="s">an array containing the name</param> /// <param name="offset">when the name starts in the tree.</param> /// <returns> /// a /// <see cref="FileTreeEntry">FileTreeEntry</see> /// for the added tree. /// </returns> /// <exception cref="System.IO.IOException">System.IO.IOException</exception> public virtual NGit.Tree AddTree(byte[] s, int offset) { int slash; int p; for (slash = offset; slash < s.Length && s[slash] != '/'; slash++) { } // search for path component terminator EnsureLoaded(); p = BinarySearch(contents, s, unchecked ((byte)'/'), offset, slash); if (p >= 0 && slash < s.Length && contents[p] is NGit.Tree) { return(((NGit.Tree)contents[p]).AddTree(s, slash + 1)); } byte[] newName = Substring(s, offset, slash); if (p >= 0) { throw new EntryExistsException(RawParseUtils.Decode(newName)); } NGit.Tree t = new NGit.Tree(this, newName); InsertEntry(p, t); return(slash == s.Length ? t : t.AddTree(s, slash + 1)); }
public virtual void load() { try { fromText(RawParseUtils.decode(IO.ReadFully(getFile()))); } catch (FileNotFoundException) { clear(); } catch (DirectoryNotFoundException) { clear(); } catch (IOException e) { var e2 = new IOException("Cannot read " + getFile(), e); throw e2; } catch (ConfigInvalidException e) { throw new ConfigInvalidException("Cannot read " + getFile(), e); } }
public void write(byte[] tmp, TemporaryBuffer os) { int ptr = tmp.Length; tmp[--ptr] = (byte)'\n'; ptr = RawParseUtils.formatBase10(tmp, ptr, _childCount); tmp[--ptr] = (byte)' '; ptr = RawParseUtils.formatBase10(tmp, ptr, isValid() ? _entrySpan : -1); tmp[--ptr] = 0; os.write(_encodedName, 0, _encodedName.Length); os.write(tmp, ptr, tmp.Length - ptr); if (isValid()) { _id.copyRawTo(tmp, 0); os.write(tmp, 0, Constants.OBJECT_ID_LENGTH); } for (int i = 0; i < _childCount; i++) { _children[i].write(tmp, os); } }
private int ParseFile(byte[] buf, int c, int end) { while (c < end) { if (FileHeader.isHunkHdr(buf, c, end) >= 1) { // If we find a disconnected hunk header we might // have missed a file header previously. The hunk // isn't valid without knowing where it comes from. // error(buf, c, "Hunk disconnected from file"); c = RawParseUtils.nextLF(buf, c); continue; } // Valid git style patch? // if (RawParseUtils.match(buf, c, DiffGit) >= 0) { return(ParseDiffGit(buf, c, end)); } if (RawParseUtils.match(buf, c, DiffCc) >= 0) { return(ParseDiffCombined(DiffCc, buf, c, end)); } if (RawParseUtils.match(buf, c, DiffCombined) >= 0) { return(ParseDiffCombined(DiffCombined, buf, c, end)); } // Junk between files? Leading junk? Traditional // (non-git generated) patch? // int n = RawParseUtils.nextLF(buf, c); if (n >= end) { // Patches cannot be only one line long. This must be // trailing junk that we should ignore. // return(end); } if (n - c < 6) { // A valid header must be at least 6 bytes on the // first line, e.g. "--- a/b\n". // c = n; continue; } if (RawParseUtils.match(buf, c, FileHeader.OLD_NAME) >= 0 && RawParseUtils.match(buf, n, FileHeader.NEW_NAME) >= 0) { // Probably a traditional patch. Ensure we have at least // a "@@ -0,0" smelling line next. We only check the "@@ -". // int f = RawParseUtils.nextLF(buf, n); if (f >= end) { return(end); } if (FileHeader.isHunkHdr(buf, f, end) == 1) { return(ParseTraditionalPatch(buf, c, end)); } } c = n; } return(c); }
private int ParseHunks(FileHeader fh, int c, int end) { byte[] buf = fh.Buffer; while (c < end) { // If we see a file header at this point, we have all of the // hunks for our current file. We should stop and report back // with this position so it can be parsed again later. // if (RawParseUtils.match(buf, c, DiffGit) >= 0) { break; } if (RawParseUtils.match(buf, c, DiffCc) >= 0) { break; } if (RawParseUtils.match(buf, c, DiffCombined) >= 0) { break; } if (RawParseUtils.match(buf, c, FileHeader.OLD_NAME) >= 0) { break; } if (RawParseUtils.match(buf, c, FileHeader.NEW_NAME) >= 0) { break; } if (FileHeader.isHunkHdr(buf, c, end) == fh.ParentCount) { HunkHeader h = fh.newHunkHeader(c); h.parseHeader(); c = h.parseBody(this, end); h.EndOffset = c; fh.addHunk(h); if (c < end) { switch (buf[c]) { case (byte)'@': case (byte)'d': case (byte)'\n': break; default: if (RawParseUtils.match(buf, c, SigFooter) < 0) { warn(buf, c, "Unexpected hunk trailer"); } break; } } continue; } int eol = RawParseUtils.nextLF(buf, c); if (fh.Hunks.isEmpty() && RawParseUtils.match(buf, c, GitBinary) >= 0) { fh.PatchType = FileHeader.PatchTypeEnum.GIT_BINARY; return(ParseGitBinary(fh, eol, end)); } if (fh.Hunks.isEmpty() && BinTrailer.Length < eol - c && RawParseUtils.match(buf, eol - BinTrailer.Length, BinTrailer) >= 0 && MatchAny(buf, c, BinHeaders)) { // The patch is a binary file diff, with no deltas. // fh.PatchType = FileHeader.PatchTypeEnum.BINARY; return(eol); } // Skip this line and move to the next. Its probably garbage // After the last hunk of a file. // c = eol; } if (fh.Hunks.isEmpty() && fh.getPatchType() == FileHeader.PatchTypeEnum.UNIFIED && !fh.hasMetaDataChanges()) { // Hmm, an empty patch? If there is no metadata here we // really have a binary patch that we didn't notice above. // fh.PatchType = FileHeader.PatchTypeEnum.BINARY; } return(c); }
internal override void ExtractFileLines(StringBuilder sb, string[] text, int[] offsets ) { byte[] buf = file.buf; int ptr = startOffset; int eol = RawParseUtils.NextLF(buf, ptr); if (endOffset <= eol) { return; } CopyLine(sb, text, offsets, 0); for (ptr = eol; ptr < endOffset; ptr = eol) { eol = RawParseUtils.NextLF(buf, ptr); if (eol - ptr < old.Length + 1) { // Line isn't long enough to mention the state of each // ancestor. It must be the end of the hunk. goto SCAN_break; } switch (buf[ptr]) { case (byte)(' '): case (byte)('-'): case (byte)('+'): { break; } default: { // Line can't possibly be part of this hunk; the first // ancestor information isn't recognizable. // goto SCAN_break; break; } } bool copied = false; for (int ancestor = 0; ancestor < old.Length; ancestor++) { switch (buf[ptr + ancestor]) { case (byte)(' '): case (byte)('-'): { if (copied) { SkipLine(text, offsets, ancestor); } else { CopyLine(sb, text, offsets, ancestor); copied = true; } continue; goto case (byte)('+'); } case (byte)('+'): { continue; goto default; } default: { goto SCAN_break; break; } } } if (!copied) { // If none of the ancestors caused the copy then this line // must be new across the board, so it only appears in the // text of the new file. // CopyLine(sb, text, offsets, old.Length); } SCAN_continue :; } SCAN_break :; }
/// <exception cref="System.IO.IOException"></exception> internal override void ExtractFileLines(OutputStream[] @out) { byte[] buf = file.buf; int ptr = startOffset; int eol = RawParseUtils.NextLF(buf, ptr); if (endOffset <= eol) { return; } // Treat the hunk header as though it were from the ancestor, // as it may have a function header appearing after it which // was copied out of the ancestor file. // @out[0].Write(buf, ptr, eol - ptr); for (ptr = eol; ptr < endOffset; ptr = eol) { eol = RawParseUtils.NextLF(buf, ptr); if (eol - ptr < old.Length + 1) { // Line isn't long enough to mention the state of each // ancestor. It must be the end of the hunk. goto SCAN_break; } switch (buf[ptr]) { case (byte)(' '): case (byte)('-'): case (byte)('+'): { break; } default: { // Line can't possibly be part of this hunk; the first // ancestor information isn't recognizable. // goto SCAN_break; break; } } int delcnt = 0; for (int ancestor = 0; ancestor < old.Length; ancestor++) { switch (buf[ptr + ancestor]) { case (byte)('-'): { delcnt++; @out[ancestor].Write(buf, ptr, eol - ptr); continue; goto case (byte)(' '); } case (byte)(' '): { @out[ancestor].Write(buf, ptr, eol - ptr); continue; goto case (byte)('+'); } case (byte)('+'): { continue; goto default; } default: { goto SCAN_break; break; } } } if (delcnt < old.Length) { // This line appears in the new file if it wasn't deleted // relative to all ancestors. // @out[old.Length].Write(buf, ptr, eol - ptr); } SCAN_continue :; } SCAN_break :; }
internal override int ParseBody(NGit.Patch.Patch script, int end) { byte[] buf = file.buf; int c = RawParseUtils.NextLF(buf, startOffset); foreach (CombinedHunkHeader.CombinedOldImage o in old) { o.nDeleted = 0; o.nAdded = 0; o.nContext = 0; } nContext = 0; int nAdded = 0; for (int eol; c < end; c = eol) { eol = RawParseUtils.NextLF(buf, c); if (eol - c < old.Length + 1) { // Line isn't long enough to mention the state of each // ancestor. It must be the end of the hunk. goto SCAN_break; } switch (buf[c]) { case (byte)(' '): case (byte)('-'): case (byte)('+'): { break; } default: { // Line can't possibly be part of this hunk; the first // ancestor information isn't recognizable. // goto SCAN_break; break; } } int localcontext = 0; for (int ancestor = 0; ancestor < old.Length; ancestor++) { switch (buf[c + ancestor]) { case (byte)(' '): { localcontext++; old[ancestor].nContext++; continue; goto case (byte)('-'); } case (byte)('-'): { old[ancestor].nDeleted++; continue; goto case (byte)('+'); } case (byte)('+'): { old[ancestor].nAdded++; nAdded++; continue; goto default; } default: { goto SCAN_break; break; } } } if (localcontext == old.Length) { nContext++; } SCAN_continue :; } SCAN_break :; for (int ancestor_1 = 0; ancestor_1 < old.Length; ancestor_1++) { CombinedHunkHeader.CombinedOldImage o_1 = old[ancestor_1]; int cmp = o_1.nContext + o_1.nDeleted; if (cmp < o_1.lineCount) { int missingCnt = o_1.lineCount - cmp; script.Error(buf, startOffset, MessageFormat.Format(JGitText.Get().truncatedHunkLinesMissingForAncestor , Sharpen.Extensions.ValueOf(missingCnt), Sharpen.Extensions.ValueOf(ancestor_1 + 1))); } } if (nContext + nAdded < newLineCount) { int missingCount = newLineCount - (nContext + nAdded); script.Error(buf, startOffset, MessageFormat.Format(JGitText.Get().truncatedHunkNewLinesMissing , Sharpen.Extensions.ValueOf(missingCount))); } return(c); }
/// <summary> /// The constructor from a byte array /// </summary> /// <param name="base">the base configuration file </param> /// <param name="blob">the byte array, should be UTF-8 encoded text. </param> /// <exception cref="ConfigInvalidException"> /// The byte array is not a valid configuration format. /// </exception> public BlobBasedConfig(Config @base, byte[] blob) : base(@base) { fromText(RawParseUtils.decode(blob)); }
/// <exception cref="System.IO.IOException"></exception> internal static ObjectLoader Open(InputStream @in, FilePath path, AnyObjectId id, WindowCursor wc) { try { @in = Buffer(@in); @in.Mark(20); byte[] hdr = new byte[64]; IOUtil.ReadFully(@in, hdr, 0, 2); if (IsStandardFormat(hdr)) { @in.Reset(); Inflater inf = wc.Inflater(); InputStream zIn = Inflate(@in, inf); int avail = ReadSome(zIn, hdr, 0, 64); if (avail < 5) { throw new CorruptObjectException(id, JGitText.Get().corruptObjectNoHeader); } MutableInteger p = new MutableInteger(); int type = Constants.DecodeTypeString(id, hdr, unchecked ((byte)' '), p); long size = RawParseUtils.ParseLongBase10(hdr, p.value, p); if (size < 0) { throw new CorruptObjectException(id, JGitText.Get().corruptObjectNegativeSize); } if (hdr[p.value++] != 0) { throw new CorruptObjectException(id, JGitText.Get().corruptObjectGarbageAfterSize ); } if (path == null && int.MaxValue < size) { LargeObjectException.ExceedsByteArrayLimit e; e = new LargeObjectException.ExceedsByteArrayLimit(); e.SetObjectId(id); throw e; } if (size < wc.GetStreamFileThreshold() || path == null) { byte[] data = new byte[(int)size]; int n = avail - p.value; if (n > 0) { System.Array.Copy(hdr, p.value, data, 0, n); } IOUtil.ReadFully(zIn, data, n, data.Length - n); CheckValidEndOfStream(@in, inf, id, hdr); return(new ObjectLoader.SmallObject(type, data)); } return(new UnpackedObject.LargeObject(type, size, path, id, wc.db)); } else { ReadSome(@in, hdr, 2, 18); int c = hdr[0] & unchecked ((int)(0xff)); int type = (c >> 4) & 7; long size = c & 15; int shift = 4; int p = 1; while ((c & unchecked ((int)(0x80))) != 0) { c = hdr[p++] & unchecked ((int)(0xff)); size += ((long)(c & unchecked ((int)(0x7f)))) << shift; shift += 7; } switch (type) { case Constants.OBJ_COMMIT: case Constants.OBJ_TREE: case Constants.OBJ_BLOB: case Constants.OBJ_TAG: { // Acceptable types for a loose object. break; } default: { throw new CorruptObjectException(id, JGitText.Get().corruptObjectInvalidType); } } if (path == null && int.MaxValue < size) { LargeObjectException.ExceedsByteArrayLimit e; e = new LargeObjectException.ExceedsByteArrayLimit(); e.SetObjectId(id); throw e; } if (size < wc.GetStreamFileThreshold() || path == null) { @in.Reset(); IOUtil.SkipFully(@in, p); Inflater inf = wc.Inflater(); InputStream zIn = Inflate(@in, inf); byte[] data = new byte[(int)size]; IOUtil.ReadFully(zIn, data, 0, data.Length); CheckValidEndOfStream(@in, inf, id, hdr); return(new ObjectLoader.SmallObject(type, data)); } return(new UnpackedObject.LargeObject(type, size, path, id, wc.db)); } } catch (SharpZipBaseException) { throw new CorruptObjectException(id, JGitText.Get().corruptObjectBadStream); } }
/// <returns>path name for this entry</returns> public virtual string GetName() { return(RawParseUtils.Decode(this.name)); }
/// <exception cref="System.IO.IOException"></exception> private IList <RebaseCommand.Step> LoadSteps() { byte[] buf = IOUtil.ReadFully(new FilePath(rebaseDir, GIT_REBASE_TODO)); int ptr = 0; int tokenBegin = 0; AList <RebaseCommand.Step> r = new AList <RebaseCommand.Step>(); while (ptr < buf.Length) { tokenBegin = ptr; ptr = RawParseUtils.NextLF(buf, ptr); int nextSpace = 0; int tokenCount = 0; RebaseCommand.Step current = null; while (tokenCount < 3 && nextSpace < ptr) { switch (tokenCount) { case 0: { nextSpace = RawParseUtils.Next(buf, tokenBegin, ' '); string actionToken = Sharpen.Runtime.GetStringForBytes(buf, tokenBegin, nextSpace - tokenBegin - 1); tokenBegin = nextSpace; if (actionToken[0] == '#') { tokenCount = 3; break; } RebaseCommand.Action action = RebaseCommand.Action.Parse(actionToken); if (action != null) { current = new RebaseCommand.Step(RebaseCommand.Action.Parse(actionToken)); } break; } case 1: { if (current == null) { break; } nextSpace = RawParseUtils.Next(buf, tokenBegin, ' '); string commitToken = Sharpen.Runtime.GetStringForBytes(buf, tokenBegin, nextSpace - tokenBegin - 1); tokenBegin = nextSpace; current.commit = AbbreviatedObjectId.FromString(commitToken); break; } case 2: { if (current == null) { break; } nextSpace = ptr; int length = ptr - tokenBegin; current.shortMessage = new byte[length]; System.Array.Copy(buf, tokenBegin, current.shortMessage, 0, length); r.AddItem(current); break; } } tokenCount++; } } return(r); }