private void WriteHeader() { Array.Copy(Constants.PACK_SIGNATURE, 0, _buf, 0, 4); NB.encodeInt32(_buf, 4, PackVersionGenerated); NB.encodeInt32(_buf, 8, getObjectsNumber()); _pos.Write(_buf, 0, 12); }
/// <summary> /// /// </summary> /// <param name="pIdx"></param> /// <param name="when"> /// New cached modification date of the file, in milliseconds. /// </param> private void EncodeTimestamp(int pIdx, long when) { int @base = _infoOffset + pIdx; NB.encodeInt32(_info, @base, (int)(when / 1000)); NB.encodeInt32(_info, @base + 4, ((int)(when % 1000)) * 1000000); }
/// <summary> /// Copy this ObjectId to a byte array. /// </summary> /// <param name="buf">the buffer to copy to.</param> /// <param name="off">the offset within b to write at.</param> public void copyRawTo(byte[] buf, int off) { NB.encodeInt32(buf, 0 + off, W1); NB.encodeInt32(buf, 4 + off, W2); NB.encodeInt32(buf, 8 + off, W3); NB.encodeInt32(buf, 12 + off, W4); NB.encodeInt32(buf, 16 + off, W5); }
private void WriteCRCs() { foreach (PackedObjectInfo oe in entries) { NB.encodeInt32(tmp, 0, oe.CRC); _stream.BaseStream.Write(tmp, 0, 4); } }
private void WriteTo(Stream os) { MessageDigest foot = Constants.newMessageDigest(); var dos = new DigestOutputStream(os, foot); // Write the header. // var tmp = new byte[128]; Array.Copy(SigDirc, 0, tmp, 0, SigDirc.Length); NB.encodeInt32(tmp, 4, /* version */ 2); NB.encodeInt32(tmp, 8, _entryCnt); dos.Write(tmp, 0, 12); // Write the individual file entries. // if (_lastModified <= 0) { // Write a new index, as no entries require smudging. // for (int i = 0; i < _entryCnt; i++) { _sortedEntries[i].write(dos); } } else { int smudge_s = (int)(_lastModified / 1000); int smudge_ns = ((int)(_lastModified % 1000)) * 1000000; for (int i = 0; i < _entryCnt; i++) { DirCacheEntry e = _sortedEntries[i]; if (e.mightBeRacilyClean(smudge_s, smudge_ns)) { e.smudgeRacilyClean(); } e.write(dos); } } if (_cacheTree != null) { var bb = new LocalFileBuffer(); _cacheTree.write(tmp, bb); bb.close(); NB.encodeInt32(tmp, 0, ExtTree); NB.encodeInt32(tmp, 4, (int)bb.Length); dos.Write(tmp, 0, 8); bb.writeTo(dos, null); } var hash = foot.Digest(); os.Write(hash, 0, hash.Length); os.Close(); }
private void WriteTo(Stream os) { MessageDigest foot = Constants.newMessageDigest(); var dos = new DigestOutputStream(os, foot); // Write the header. // var tmp = new byte[128]; Array.Copy(SigDirc, 0, tmp, 0, SigDirc.Length); NB.encodeInt32(tmp, 4, /* version */ 2); NB.encodeInt32(tmp, 8, _entryCnt); dos.Write(tmp, 0, 12); // Write the individual file entries. // if (_lastModified == DateTime.MinValue) { // Write a new index, as no entries require smudging. // for (int i = 0; i < _entryCnt; i++) { _sortedEntries[i].write(dos); } } else { var smudge_s = _lastModified.ToUnixTime(); var smudge_ns = _lastModified.Millisecond * 1000000; // [henon] <--- this could be done with much more precision in C# since DateTime has 100 nanosec ticks for (int i = 0; i < _entryCnt; i++) { DirCacheEntry e = _sortedEntries[i]; if (e.mightBeRacilyClean(smudge_s, smudge_ns)) { e.smudgeRacilyClean(); } e.write(dos); } } if (_cacheTree != null) { var bb = new TemporaryBuffer(); _cacheTree.write(tmp, bb); bb.close(); NB.encodeInt32(tmp, 0, ExtTree); NB.encodeInt32(tmp, 4, (int)bb.Length); dos.Write(tmp, 0, 8); bb.writeTo(dos, null); } var hash = foot.Digest(); os.Write(hash, 0, hash.Length); os.Close(); }
/// <summary> /// Copy this ObjectId to an output writer in hex format. /// </summary> /// <param name="s">the stream to copy to.</param> public void copyRawTo(Stream s) { var buf = new byte[20]; NB.encodeInt32(buf, 0, W1); NB.encodeInt32(buf, 4, W2); NB.encodeInt32(buf, 8, W3); NB.encodeInt32(buf, 12, W4); NB.encodeInt32(buf, 16, W5); s.Write(buf, 0, 20); }
/// <summary> /// Set the file mode for this entry. /// </summary> /// <param name="mode"> The new mode constant. </param> public void setFileMode(FileMode mode) { switch (mode.Bits & FileMode.TYPE_MASK) { case FileMode.TYPE_MISSING: case FileMode.TYPE_TREE: throw new ArgumentException("Invalid mode " + mode.Bits + " for path " + getPathString()); } NB.encodeInt32(_info, _infoOffset + PMode, mode.Bits); }
public void testEncodeInt32() { var @out = new byte[16]; PrepareOutput(@out); NB.encodeInt32(@out, 0, 0); AssertOutput(b(0, 0, 0, 0), @out, 0); PrepareOutput(@out); NB.encodeInt32(@out, 3, 0); AssertOutput(b(0, 0, 0, 0), @out, 3); PrepareOutput(@out); NB.encodeInt32(@out, 0, 3); AssertOutput(b(0, 0, 0, 3), @out, 0); PrepareOutput(@out); NB.encodeInt32(@out, 3, 3); AssertOutput(b(0, 0, 0, 3), @out, 3); PrepareOutput(@out); NB.encodeInt32(@out, 0, 0xdeac); AssertOutput(b(0, 0, 0xde, 0xac), @out, 0); PrepareOutput(@out); NB.encodeInt32(@out, 3, 0xdeac); AssertOutput(b(0, 0, 0xde, 0xac), @out, 3); PrepareOutput(@out); unchecked { NB.encodeInt32(@out, 0, (int)0xdeac9853); } AssertOutput(b(0xde, 0xac, 0x98, 0x53), @out, 0); PrepareOutput(@out); unchecked { NB.encodeInt32(@out, 3, (int)0xdeac9853); } AssertOutput(b(0xde, 0xac, 0x98, 0x53), @out, 3); PrepareOutput(@out); NB.encodeInt32(@out, 3, -1); AssertOutput(b(0xff, 0xff, 0xff, 0xff), @out, 3); }
internal override void WriteInternal() { WriteFanOutTable(); foreach (PackedObjectInfo oe in entries) { if (!CanStore(oe)) { throw new IOException("Pack too large for index version 1"); } NB.encodeInt32(tmp, 0, (int)oe.Offset); oe.copyRawTo(tmp, 4); _stream.Write(tmp, 0, tmp.Length); } WriteChecksumFooter(); }
private void WriteOffset32() { int o64 = 0; foreach (PackedObjectInfo oe in entries) { long o = oe.Offset; if (o < int.MaxValue) { NB.encodeInt32(tmp, 0, (int)o); } else { NB.encodeInt32(tmp, 0, (1 << 31) | o64++); } _stream.BaseStream.Write(tmp, 0, 4); } }
/// <summary> /// utput the standard 256 entry first-level fan-out table. /// <para /> /// The fan-out table is 4 KB in size, holding 256 32-bit unsigned integer /// counts. Each count represents the number of objects within this index /// whose <see cref="AnyObjectId.GetFirstByte()"/> matches the count's position in the /// fan-out table. /// </summary> internal void WriteFanOutTable() { int[] fanout = new int[256]; foreach (PackedObjectInfo po in entries) { fanout[po.GetFirstByte() & 0xff]++; } for (int i = 1; i < 256; i++) { fanout[i] += fanout[i - 1]; } foreach (int n in fanout) { NB.encodeInt32(tmp, 0, n); _stream.Write(tmp, 0, 4); } }
/// <summary> /// Output the version 2 (and later) TOC header, with version number. /// <para /> /// Post version 1 all index files start with a TOC header that makes the /// file an invalid version 1 file, and then includes the version number. /// This header is necessary to recognize a version 1 from a version 2 /// formatted index. /// </summary> /// <param name="version">Version number of this index format being written.</param> internal void WriteTOC(int version) { _stream.Write(TOC); NB.encodeInt32(tmp, 0, version); _stream.Write(tmp, 0, 4); }
/// <summary> /// Set the cached size (in bytes) of this file. /// </summary> /// <param name="sz">new cached size of the file, as bytes.</param> public void setLength(int sz) { NB.encodeInt32(_info, _infoOffset + PSize, sz); }
private void FixHeaderFooter(IEnumerable <byte> origcsum, IEnumerable <byte> tailcsum) { MessageDigest origDigest = Constants.newMessageDigest(); MessageDigest tailDigest = Constants.newMessageDigest(); long origRemaining = _originalEof; _packOut.Seek(0, SeekOrigin.Begin); _bAvail = 0; _bOffset = 0; FillFromFile(12); { var origCnt = (int)Math.Min(_bAvail, origRemaining); origDigest.Update(_buffer, 0, origCnt); origRemaining -= origCnt; if (origRemaining == 0) { tailDigest.Update(_buffer, origCnt, _bAvail - origCnt); } } NB.encodeInt32(_buffer, 8, _entryCount); _packOut.Seek(0, SeekOrigin.Begin); _packOut.Write(_buffer, 0, 12); _packOut.Seek(_bAvail, SeekOrigin.Begin); _packDigest.Reset(); _packDigest.Update(_buffer, 0, _bAvail); while (true) { int n = _packOut.Read(_buffer, 0, _buffer.Length); if (n <= 0) { break; } if (origRemaining != 0) { var origCnt = (int)Math.Min(n, origRemaining); origDigest.Update(_buffer, 0, origCnt); origRemaining -= origCnt; if (origRemaining == 0) { tailDigest.Update(_buffer, origCnt, n - origCnt); } } else { tailDigest.Update(_buffer, 0, n); } _packDigest.Update(_buffer, 0, n); } if (!origDigest.Digest().SequenceEqual(origcsum) || !tailDigest.Digest().SequenceEqual(tailcsum)) { throw new IOException("Pack corrupted while writing to filesystem"); } _packcsum = _packDigest.Digest(); _packOut.Write(_packcsum, 0, _packcsum.Length); }
/// <summary> /// Set the file mode for this entry. /// </summary> /// <param name="mode"> The new mode constant. </param> public void setFileMode(FileMode mode) { NB.encodeInt32(_info, _infoOffset + PMode, mode.Bits); }