/// <summary>Compare this ObjectId to a network-byte-order ObjectId.</summary> /// <remarks>Compare this ObjectId to a network-byte-order ObjectId.</remarks> /// <param name="bs">array containing the other ObjectId in network byte order.</param> /// <param name="p"> /// position within /// <code>bs</code> /// to start the compare at. At least /// 20 bytes, starting at this position are required. /// </param> /// <returns> /// a negative integer, zero, or a positive integer as this object is /// less than, equal to, or greater than the specified object. /// </returns> public int CompareTo(byte[] bs, int p) { int cmp; cmp = NB.CompareUInt32(w1, NB.DecodeInt32(bs, p)); if (cmp != 0) { return(cmp); } cmp = NB.CompareUInt32(w2, NB.DecodeInt32(bs, p + 4)); if (cmp != 0) { return(cmp); } cmp = NB.CompareUInt32(w3, NB.DecodeInt32(bs, p + 8)); if (cmp != 0) { return(cmp); } cmp = NB.CompareUInt32(w4, NB.DecodeInt32(bs, p + 12)); if (cmp != 0) { return(cmp); } return(NB.CompareUInt32(w5, NB.DecodeInt32(bs, p + 16))); }
/// <summary> /// Open an existing pack <code>.idx</code> file for reading.. /// <para/> /// The format of the file will be automatically detected and a proper access /// implementation for that format will be constructed and returned to the /// caller. The file may or may not be held open by the returned instance. /// </summary> /// <param name="idxFile">existing pack .idx to read.</param> /// <returns></returns> public static PackIndex Open(FileInfo idxFile) { try { using (FileStream fs = idxFile.OpenRead()) { byte[] hdr = new byte[8]; IO.ReadFully(fs, hdr, 0, hdr.Length); if (IsTOC(hdr)) { int v = NB.DecodeInt32(hdr, 4); switch (v) { case 2: return(new PackIndexV2(fs)); default: throw new IOException("Unsupported pack index version " + v); } } return(new PackIndexV1(fs, hdr)); } } catch (IOException) { throw new IOException("Unable to read pack index: " + idxFile.FullName); } }
/// <summary>Compare this abbreviation to a network-byte-order ObjectId.</summary> /// <remarks>Compare this abbreviation to a network-byte-order ObjectId.</remarks> /// <param name="bs">array containing the other ObjectId in network byte order.</param> /// <param name="p"> /// position within /// <code>bs</code> /// to start the compare at. At least /// 20 bytes, starting at this position are required. /// </param> /// <returns> /// <0 if this abbreviation names an object that is less than /// <code>other</code>; 0 if this abbreviation exactly matches the /// first /// <see cref="Length()">Length()</see> /// digits of <code>other.name()</code>; /// >0 if this abbreviation names an object that is after /// <code>other</code>. /// </returns> public int PrefixCompare(byte[] bs, int p) { int cmp; cmp = NB.CompareUInt32(w1, Mask(1, NB.DecodeInt32(bs, p))); if (cmp != 0) { return(cmp); } cmp = NB.CompareUInt32(w2, Mask(2, NB.DecodeInt32(bs, p + 4))); if (cmp != 0) { return(cmp); } cmp = NB.CompareUInt32(w3, Mask(3, NB.DecodeInt32(bs, p + 8))); if (cmp != 0) { return(cmp); } cmp = NB.CompareUInt32(w4, Mask(4, NB.DecodeInt32(bs, p + 12))); if (cmp != 0) { return(cmp); } return(NB.CompareUInt32(w5, Mask(5, NB.DecodeInt32(bs, p + 16)))); }
/// <summary>Convert an ObjectId from raw binary representation.</summary> /// <remarks>Convert an ObjectId from raw binary representation.</remarks> /// <param name="bs"> /// the raw byte buffer to read from. At least 20 bytes after p /// must be available within this byte array. /// </param> /// <param name="p">position to read the first byte of data from.</param> public virtual void FromRaw(byte[] bs, int p) { w1 = NB.DecodeInt32(bs, p); w2 = NB.DecodeInt32(bs, p + 4); w3 = NB.DecodeInt32(bs, p + 8); w4 = NB.DecodeInt32(bs, p + 12); w5 = NB.DecodeInt32(bs, p + 16); }
private long DecodeTimestamp(int pIdx) { int @base = _infoOffset + pIdx; int sec = NB.DecodeInt32(_info, @base); int ms = NB.DecodeInt32(_info, @base + 4) / 1000000; return(1000L * sec + ms); }
public void FromRaw(byte[] bs, int p) { W1 = NB.DecodeInt32(bs, p); W2 = NB.DecodeInt32(bs, p + 4); W3 = NB.DecodeInt32(bs, p + 8); W4 = NB.DecodeInt32(bs, p + 12); W5 = NB.DecodeInt32(bs, p + 16); }
public static ObjectId FromRaw(byte[] buffer, int offset) { int a = NB.DecodeInt32(buffer, offset); int b = NB.DecodeInt32(buffer, offset + 4); int c = NB.DecodeInt32(buffer, offset + 8); int d = NB.DecodeInt32(buffer, offset + 12); int e = NB.DecodeInt32(buffer, offset + 16); return(new ObjectId(a, b, c, d, e)); }
/// <summary>Convert an ObjectId from raw binary representation.</summary> /// <remarks>Convert an ObjectId from raw binary representation.</remarks> /// <param name="bs"> /// the raw byte buffer to read from. At least 20 bytes after p /// must be available within this byte array. /// </param> /// <param name="p">position to read the first byte of data from.</param> /// <returns>the converted object id.</returns> public static NGit.ObjectId FromRaw(byte[] bs, int p) { int a = NB.DecodeInt32(bs, p); int b = NB.DecodeInt32(bs, p + 4); int c = NB.DecodeInt32(bs, p + 8); int d = NB.DecodeInt32(bs, p + 12); int e = NB.DecodeInt32(bs, p + 16); return(new NGit.ObjectId(a, b, c, d, e)); }
/// <summary>Open an existing pack <code>.idx</code> file for reading.</summary> /// <remarks> /// Open an existing pack <code>.idx</code> file for reading. /// <p> /// The format of the file will be automatically detected and a proper access /// implementation for that format will be constructed and returned to the /// caller. The file may or may not be held open by the returned instance. /// </p> /// </remarks> /// <param name="idxFile">existing pack .idx to read.</param> /// <returns>access implementation for the requested file.</returns> /// <exception cref="System.IO.FileNotFoundException">the file does not exist.</exception> /// <exception cref="System.IO.IOException"> /// the file exists but could not be read due to security errors, /// unrecognized data version, or unexpected data corruption. /// </exception> public static PackIndex Open(FilePath idxFile) { FileInputStream fd = new FileInputStream(idxFile); try { byte[] hdr = new byte[8]; IOUtil.ReadFully(fd, hdr, 0, hdr.Length); if (IsTOC(hdr)) { int v = NB.DecodeInt32(hdr, 4); switch (v) { case 2: { return(new PackIndexV2(fd)); } default: { throw new IOException(MessageFormat.Format(JGitText.Get().unsupportedPackIndexVersion , v)); } } } return(new PackIndexV1(fd, hdr)); } catch (IOException ioe) { string path = idxFile.GetAbsolutePath(); IOException err; err = new IOException(MessageFormat.Format(JGitText.Get().unreadablePackIndex, path )); Sharpen.Extensions.InitCause(err, ioe); throw err; } finally { try { fd.Close(); } catch (IOException) { } } }
/// <summary> /// Is it possible for this entry to be accidentally assumed clean? /// <p> /// The "racy git" problem happens when a work file can be updated faster /// than the filesystem records file modification timestamps. /// </summary> /// <remarks> /// Is it possible for this entry to be accidentally assumed clean? /// <p> /// The "racy git" problem happens when a work file can be updated faster /// than the filesystem records file modification timestamps. It is possible /// for an application to edit a work file, update the index, then edit it /// again before the filesystem will give the work file a new modification /// timestamp. This method tests to see if file was written out at the same /// time as the index. /// </remarks> /// <param name="smudge_s">seconds component of the index's last modified time.</param> /// <param name="smudge_ns">nanoseconds component of the index's last modified time.</param> /// <returns>true if extra careful checks should be used.</returns> public bool MightBeRacilyClean(int smudge_s, int smudge_ns) { // If the index has a modification time then it came from disk // and was not generated from scratch in memory. In such cases // the entry is 'racily clean' if the entry's cached modification // time is equal to or later than the index modification time. In // such cases the work file is too close to the index to tell if // it is clean or not based on the modification time alone. // int @base = infoOffset + P_MTIME; int mtime = NB.DecodeInt32(info, @base); if (smudge_s == mtime) { return(smudge_ns <= NB.DecodeInt32(info, @base + 4)); } return(false); }
public void testDecodeInt32() { Assert.AreEqual(0, NB.DecodeInt32(b(0, 0, 0, 0), 0)); Assert.AreEqual(0, NB.DecodeInt32(Padb(3, 0, 0, 0, 0), 3)); Assert.AreEqual(3, NB.DecodeInt32(b(0, 0, 0, 3), 0)); Assert.AreEqual(3, NB.DecodeInt32(Padb(3, 0, 0, 0, 3), 3)); unchecked { Assert.AreEqual((int)0xdeadbeef, NB.DecodeInt32(b(0xde, 0xad, 0xbe, 0xef), 0)); Assert.AreEqual((int)0xdeadbeef, NB.DecodeInt32(Padb(3, 0xde, 0xad, 0xbe, 0xef), 3)); } Assert.AreEqual(0x0310adef, NB.DecodeInt32(b(0x03, 0x10, 0xad, 0xef), 0)); Assert.AreEqual(0x0310adef, NB.DecodeInt32(Padb(3, 0x03, 0x10, 0xad, 0xef), 3)); unchecked { Assert.AreEqual((int)0xffffffff, NB.DecodeInt32(b(0xff, 0xff, 0xff, 0xff), 0)); Assert.AreEqual((int)0xffffffff, NB.DecodeInt32(Padb(3, 0xff, 0xff, 0xff, 0xff), 3)); } }
// ignore /// <summary>Read an existing pack index file from a buffered stream.</summary> /// <remarks> /// Read an existing pack index file from a buffered stream. /// <p> /// The format of the file will be automatically detected and a proper access /// implementation for that format will be constructed and returned to the /// caller. The file may or may not be held open by the returned instance. /// </remarks> /// <param name="fd"> /// stream to read the index file from. The stream must be /// buffered as some small IOs are performed against the stream. /// The caller is responsible for closing the stream. /// </param> /// <returns>a copy of the index in-memory.</returns> /// <exception cref="System.IO.IOException">the stream cannot be read.</exception> /// <exception cref="NGit.Errors.CorruptObjectException">the stream does not contain a valid pack index. /// </exception> public static PackIndex Read(InputStream fd) { byte[] hdr = new byte[8]; IOUtil.ReadFully(fd, hdr, 0, hdr.Length); if (IsTOC(hdr)) { int v = NB.DecodeInt32(hdr, 4); switch (v) { case 2: { return(new PackIndexV2(fd)); } default: { throw new IOException(MessageFormat.Format(JGitText.Get().unsupportedPackIndexVersion , Sharpen.Extensions.ValueOf(v))); } } } return(new PackIndexV1(fd, hdr)); }
private void ReadPackHeader() { int hdrln = Constants.PACK_SIGNATURE.Length + 4 + 4; int p = FillFromInput(hdrln); for (int k = 0; k < Constants.PACK_SIGNATURE.Length; k++) { if (_buffer[p + k] != Constants.PACK_SIGNATURE[k]) { throw new IOException("Not a PACK file."); } } long vers = NB.DecodeInt32(_buffer, p + 4); // DecodeUInt32! if (vers != 2 && vers != 3) { throw new IOException("Unsupported pack version " + vers + "."); } _objectCount = NB.decodeUInt32(_buffer, p + 8); Use(hdrln); }
/// <exception cref="System.IO.IOException"></exception> internal PackIndexV2(InputStream fd) { byte[] fanoutRaw = new byte[4 * FANOUT]; IOUtil.ReadFully(fd, fanoutRaw, 0, fanoutRaw.Length); fanoutTable = new long[FANOUT]; for (int k = 0; k < FANOUT; k++) { fanoutTable[k] = NB.DecodeUInt32(fanoutRaw, k * 4); } objectCnt = fanoutTable[FANOUT - 1]; names = new int[FANOUT][]; offset32 = new byte[FANOUT][]; crc32 = new byte[FANOUT][]; // Object name table. The size we can permit per fan-out bucket // is limited to Java's 2 GB per byte array limitation. That is // no more than 107,374,182 objects per fan-out. // for (int k_1 = 0; k_1 < FANOUT; k_1++) { long bucketCnt; if (k_1 == 0) { bucketCnt = fanoutTable[k_1]; } else { bucketCnt = fanoutTable[k_1] - fanoutTable[k_1 - 1]; } if (bucketCnt == 0) { names[k_1] = NO_INTS; offset32[k_1] = NO_BYTES; crc32[k_1] = NO_BYTES; continue; } long nameLen = bucketCnt * Constants.OBJECT_ID_LENGTH; if (nameLen > int.MaxValue) { throw new IOException(JGitText.Get().indexFileIsTooLargeForJgit); } int intNameLen = (int)nameLen; byte[] raw = new byte[intNameLen]; int[] bin = new int[(int)(((uint)intNameLen) >> 2)]; IOUtil.ReadFully(fd, raw, 0, raw.Length); for (int i = 0; i < bin.Length; i++) { bin[i] = NB.DecodeInt32(raw, i << 2); } names[k_1] = bin; offset32[k_1] = new byte[(int)(bucketCnt * 4)]; crc32[k_1] = new byte[(int)(bucketCnt * 4)]; } // CRC32 table. for (int k_2 = 0; k_2 < FANOUT; k_2++) { IOUtil.ReadFully(fd, crc32[k_2], 0, crc32[k_2].Length); } // 32 bit offset table. Any entries with the most significant bit // set require a 64 bit offset entry in another table. // int o64cnt = 0; for (int k_3 = 0; k_3 < FANOUT; k_3++) { byte[] ofs = offset32[k_3]; IOUtil.ReadFully(fd, ofs, 0, ofs.Length); for (int p = 0; p < ofs.Length; p += 4) { if (((sbyte)ofs[p]) < 0) { o64cnt++; } } } // 64 bit offset table. Most objects should not require an entry. // if (o64cnt > 0) { offset64 = new byte[o64cnt * 8]; IOUtil.ReadFully(fd, offset64, 0, offset64.Length); } else { offset64 = NO_BYTES; } packChecksum = new byte[20]; IOUtil.ReadFully(fd, packChecksum, 0, packChecksum.Length); }
/// <exception cref="System.IO.IOException"></exception> /// <exception cref="NGit.Errors.CorruptObjectException"></exception> private void ReadFrom(InputStream inStream) { BufferedInputStream @in = new BufferedInputStream(inStream); MessageDigest md = Constants.NewMessageDigest(); // Read the index header and verify we understand it. // byte[] hdr = new byte[20]; IOUtil.ReadFully(@in, hdr, 0, 12); md.Update(hdr, 0, 12); if (!Is_DIRC(hdr)) { throw new CorruptObjectException(JGitText.Get().notADIRCFile); } int ver = NB.DecodeInt32(hdr, 4); bool extended = false; if (ver == 3) { extended = true; } else { if (ver != 2) { throw new CorruptObjectException(MessageFormat.Format(JGitText.Get().unknownDIRCVersion , ver)); } } entryCnt = NB.DecodeInt32(hdr, 8); if (entryCnt < 0) { throw new CorruptObjectException(JGitText.Get().DIRCHasTooManyEntries); } // Load the individual file entries. // int infoLength = DirCacheEntry.GetMaximumInfoLength(extended); byte[] infos = new byte[infoLength * entryCnt]; sortedEntries = new DirCacheEntry[entryCnt]; MutableInteger infoAt = new MutableInteger(); for (int i = 0; i < entryCnt; i++) { sortedEntries[i] = new DirCacheEntry(infos, infoAt, @in, md); } snapshot = FileSnapshot.Save(liveFile); // After the file entries are index extensions, and then a footer. // for (; ;) { @in.Mark(21); IOUtil.ReadFully(@in, hdr, 0, 20); if (@in.Read() < 0) { // No extensions present; the file ended where we expected. // break; } @in.Reset(); md.Update(hdr, 0, 8); IOUtil.SkipFully(@in, 8); long sz = NB.DecodeUInt32(hdr, 4); switch (NB.DecodeInt32(hdr, 0)) { case EXT_TREE: { if (int.MaxValue < sz) { throw new CorruptObjectException(MessageFormat.Format(JGitText.Get().DIRCExtensionIsTooLargeAt , FormatExtensionName(hdr), sz)); } byte[] raw = new byte[(int)sz]; IOUtil.ReadFully(@in, raw, 0, raw.Length); md.Update(raw, 0, raw.Length); tree = new DirCacheTree(raw, new MutableInteger(), null); break; } default: { if (hdr[0] >= 'A' && ((sbyte)hdr[0]) <= 'Z') { // The extension is optional and is here only as // a performance optimization. Since we do not // understand it, we can safely skip past it, after // we include its data in our checksum. // SkipOptionalExtension(@in, md, hdr, sz); } else { // The extension is not an optimization and is // _required_ to understand this index format. // Since we did not trap it above we must abort. // throw new CorruptObjectException(MessageFormat.Format(JGitText.Get().DIRCExtensionNotSupportedByThisVersion , FormatExtensionName(hdr))); } break; } } } byte[] exp = md.Digest(); if (!Arrays.Equals(exp, hdr)) { throw new CorruptObjectException(JGitText.Get().DIRCChecksumMismatch); } }
public PackIndexV2(Stream fd) { var fanoutRaw = new byte[4 * FANOUT]; IO.ReadFully(fd, fanoutRaw, 0, fanoutRaw.Length); _fanoutTable = new long[FANOUT]; for (int k = 0; k < FANOUT; k++) { _fanoutTable[k] = NB.DecodeUInt32(fanoutRaw, k * 4); } ObjectCount = _fanoutTable[FANOUT - 1]; _names = new int[FANOUT][]; _offset32 = new byte[FANOUT][]; _crc32 = new byte[FANOUT][]; // object name table. The size we can permit per fan-out bucket // is limited to Java's 2 GB per byte array limitation. That is // no more than 107,374,182 objects per fan-out. // for (int k = 0; k < FANOUT; k++) { long bucketCnt; if (k == 0) { bucketCnt = _fanoutTable[k]; } else { bucketCnt = _fanoutTable[k] - _fanoutTable[k - 1]; } if (bucketCnt == 0) { _names[k] = NoInts; _offset32[k] = NoBytes; _crc32[k] = NoBytes; continue; } long nameLen = bucketCnt * Constants.OBJECT_ID_LENGTH; if (nameLen > int.MaxValue) { throw new IOException("Index file is too large"); } var intNameLen = (int)nameLen; var raw = new byte[intNameLen]; var bin = new int[intNameLen >> 2]; IO.ReadFully(fd, raw, 0, raw.Length); for (int i = 0; i < bin.Length; i++) { bin[i] = NB.DecodeInt32(raw, i << 2); } _names[k] = bin; _offset32[k] = new byte[(int)(bucketCnt * 4)]; _crc32[k] = new byte[(int)(bucketCnt * 4)]; } // CRC32 table. for (int k = 0; k < FANOUT; k++) { IO.ReadFully(fd, _crc32[k], 0, _crc32[k].Length); } // 32 bit offset table. Any entries with the most significant bit // set require a 64 bit offset entry in another table. // int o64cnt = 0; for (int k = 0; k < FANOUT; k++) { byte[] ofs = _offset32[k]; IO.ReadFully(fd, ofs, 0, ofs.Length); for (int p = 0; p < ofs.Length; p += 4) { if (NB.ConvertUnsignedByteToSigned(ofs[p]) < 0) { o64cnt++; } } } // 64 bit offset table. Most objects should not require an entry. // if (o64cnt > 0) { _offset64 = new byte[o64cnt * 8]; IO.ReadFully(fd, _offset64, 0, _offset64.Length); } else { _offset64 = NoBytes; } PackChecksum = new byte[20]; IO.ReadFully(fd, PackChecksum, 0, PackChecksum.Length); }
/// <summary> /// Get the cached size (in bytes) of this file. /// <para /> /// One of the indicators that the file has been modified by an application /// changing the working tree is if the size of the file (in bytes) differs /// from the size stored in this entry. /// <para /> /// Note that this is the length of the file in the working directory, which /// may differ from the size of the decompressed blob if work tree filters /// are being used, such as LF<->CRLF conversion. /// </summary> /// <returns> cached size of the working directory file, in bytes. </returns> public int getLength() { return(NB.DecodeInt32(_info, _infoOffset + PSize)); }
/// <summary> /// Obtain the raw <seealso cref="FileMode"/> bits for this entry. /// </summary> /// <returns> mode bits for the entry. </returns> /// <seealso cref="FileMode.FromBits(int)"/> public int getRawMode() { return(NB.DecodeInt32(_info, _infoOffset + PMode)); }
private void ReadFrom(Stream inStream) { var @in = new StreamReader(inStream); MessageDigest md = Constants.newMessageDigest(); // Read the index header and verify we understand it. // var hdr = new byte[20]; IO.ReadFully(inStream, hdr, 0, 12); md.Update(hdr, 0, 12); if (!IsDIRC(hdr)) { throw new CorruptObjectException("Not a DIRC file."); } int ver = NB.DecodeInt32(hdr, 4); if (ver != 2) { throw new CorruptObjectException("Unknown DIRC version " + ver); } _entryCnt = NB.DecodeInt32(hdr, 8); if (_entryCnt < 0) { throw new CorruptObjectException("DIRC has too many entries."); } // Load the individual file entries. // var infos = new byte[InfoLen * _entryCnt]; _sortedEntries = new DirCacheEntry[_entryCnt]; for (int i = 0; i < _entryCnt; i++) { _sortedEntries[i] = new DirCacheEntry(infos, i * InfoLen, inStream, md); } _lastModified = _liveFile.lastModified(); // After the file entries are index extensions, and then a footer. // while (true) { var pos = inStream.Position; IO.ReadFully(inStream, hdr, 0, 20); if (inStream.ReadByte() < 0) { // No extensions present; the file ended where we expected. // break; } inStream.Seek(pos, SeekOrigin.Begin); md.Update(hdr, 0, 8); IO.skipFully(inStream, 8); long sz = NB.decodeUInt32(hdr, 4); switch (NB.DecodeInt32(hdr, 0)) { case ExtTree: if (int.MaxValue < sz) { throw new CorruptObjectException("DIRC extension " + formatExtensionName(hdr) + " is too large at " + sz + " bytes."); } byte[] raw = new byte[(int)sz]; IO.ReadFully(inStream, raw, 0, raw.Length); md.Update(raw, 0, raw.Length); _cacheTree = new DirCacheTree(raw, new MutableInteger(), null); break; default: if (hdr[0] >= (byte)'A' && hdr[0] <= (byte)'Z') { // The extension is optional and is here only as // a performance optimization. Since we do not // understand it, we can safely skip past it, after // we include its data in our checksum. // skipOptionalExtension(inStream, md, hdr, sz); } else { // The extension is not an optimization and is // _required_ to understand this index format. // Since we did not trap it above we must abort. // throw new CorruptObjectException("DIRC extension " + formatExtensionName(hdr) + " not supported by this version."); } break; } } byte[] exp = md.Digest(); if (!exp.SequenceEqual(hdr)) { throw new CorruptObjectException("DIRC checksum mismatch"); } }