/// <summary>Recursively get all entries within a subtree.</summary> /// <remarks>Recursively get all entries within a subtree.</remarks> /// <param name="path">the subtree path to get all entries within.</param> /// <returns>all entries recursively contained within the subtree.</returns> public virtual DirCacheEntry[] GetEntriesWithin(string path) { if (path.Length == 0) { DirCacheEntry[] r = new DirCacheEntry[sortedEntries.Length]; System.Array.Copy(sortedEntries, 0, r, 0, sortedEntries.Length); return(r); } if (!path.EndsWith("/")) { path += "/"; } byte[] p = Constants.Encode(path); int pLen = p.Length; int eIdx = FindEntry(p, pLen); if (eIdx < 0) { eIdx = -(eIdx + 1); } int lastIdx = NextEntry(p, pLen, eIdx); DirCacheEntry[] r_1 = new DirCacheEntry[lastIdx - eIdx]; System.Array.Copy(sortedEntries, eIdx, r_1, 0, r_1.Length); return(r_1); }
private void Resort() { Arrays.Sort(entries, 0, entryCnt, DirCache.ENT_CMP); for (int entryIdx = 1; entryIdx < entryCnt; entryIdx++) { DirCacheEntry pe = entries[entryIdx - 1]; DirCacheEntry ce = entries[entryIdx]; int cr = DirCache.Cmp(pe, ce); if (cr == 0) { // Same file path; we can only allow this if the stages // are 1-3 and no 0 exists. // int peStage = pe.Stage; int ceStage = ce.Stage; if (peStage == ceStage) { throw Bad(ce, JGitText.Get().duplicateStagesNotAllowed); } if (peStage == 0 || ceStage == 0) { throw Bad(ce, JGitText.Get().mixedStagesNotAllowed); } } } sorted = true; }
/// <exception cref="NGit.Errors.UnmergedPathException"></exception> /// <exception cref="System.IO.IOException"></exception> private int ComputeSize(DirCacheEntry[] cache, int cIdx, int pathOffset, ObjectInserter ow) { int endIdx = cIdx + entrySpan; int childIdx = 0; int entryIdx = cIdx; int size = 0; while (entryIdx < endIdx) { DirCacheEntry e = cache[entryIdx]; if (e.Stage != 0) { throw new UnmergedPathException(e); } byte[] ep = e.path; if (childIdx < childCnt) { NGit.Dircache.DirCacheTree st = children[childIdx]; if (st.Contains(ep, pathOffset, ep.Length)) { int stOffset = pathOffset + st.NameLength() + 1; st.WriteTree(cache, entryIdx, stOffset, ow); size += TreeFormatter.EntrySize(FileMode.TREE, st.NameLength()); entryIdx += st.entrySpan; childIdx++; continue; } } size += TreeFormatter.EntrySize(e.FileMode, ep.Length - pathOffset); entryIdx++; } return(size); }
private void ParseEntry() { currentEntry = cache.GetEntry(ptr); byte[] cep = currentEntry.path; if (nextSubtreePos != tree.GetChildCount()) { DirCacheTree s = tree.GetChild(nextSubtreePos); if (s.Contains(cep, pathOffset, cep.Length)) { // The current position is the first file of this subtree. // Use the subtree instead as the current position. // currentSubtree = s; nextSubtreePos++; if (s.IsValid()) { s.GetObjectId().CopyRawTo(subtreeId, 0); } mode = FileMode.TREE.GetBits(); path = cep; pathLen = pathOffset + s.NameLength(); return; } } // The current position is a file/symlink/gitlink so we // do not have a subtree located here. // mode = currentEntry.RawMode; path = cep; pathLen = cep.Length; currentSubtree = null; }
private void Keep(DirCacheEntry e) { if (e != null && !FileMode.TREE.Equals(e.FileMode)) { builder.Add(e); } }
public virtual void TestNonRecursiveFiltering() { ObjectInserter odi = db.NewObjectInserter(); ObjectId aSth = odi.Insert(Constants.OBJ_BLOB, Sharpen.Runtime.GetBytesForString( "a.sth")); ObjectId aTxt = odi.Insert(Constants.OBJ_BLOB, Sharpen.Runtime.GetBytesForString( "a.txt")); DirCache dc = db.ReadDirCache(); DirCacheBuilder builder = dc.Builder(); DirCacheEntry aSthEntry = new DirCacheEntry("a.sth"); aSthEntry.FileMode = FileMode.REGULAR_FILE; aSthEntry.SetObjectId(aSth); DirCacheEntry aTxtEntry = new DirCacheEntry("a.txt"); aTxtEntry.FileMode = FileMode.REGULAR_FILE; aTxtEntry.SetObjectId(aTxt); builder.Add(aSthEntry); builder.Add(aTxtEntry); builder.Finish(); ObjectId treeId = dc.WriteTree(odi); odi.Flush(); TreeWalk tw = new TreeWalk(db); tw.Filter = PathSuffixFilter.Create(".txt"); tw.AddTree(treeId); IList<string> paths = new List<string>(); while (tw.Next()) { paths.AddItem(tw.PathString); } IList<string> expected = new List<string>(); expected.AddItem("a.txt"); NUnit.Framework.Assert.AreEqual(expected, paths); }
public virtual void TestNoSubtree_NoTreeWalk() { DirCache dc = DirCache.NewInCore(); string[] paths = new string[] { "a.", "a0b" }; DirCacheEntry[] ents = new DirCacheEntry[paths.Length]; for (int i = 0; i < paths.Length; i++) { ents[i] = new DirCacheEntry(paths[i]); ents[i].FileMode = FileMode.REGULAR_FILE; } DirCacheBuilder b = dc.Builder(); for (int i_1 = 0; i_1 < ents.Length; i_1++) { b.Add(ents[i_1]); } b.Finish(); DirCacheIterator i_2 = new DirCacheIterator(dc); int pathIdx = 0; for (; !i_2.Eof; i_2.Next(1)) { NUnit.Framework.Assert.AreEqual(pathIdx, i_2.ptr); NUnit.Framework.Assert.AreSame(ents[pathIdx], i_2.GetDirCacheEntry()); pathIdx++; } NUnit.Framework.Assert.AreEqual(paths.Length, pathIdx); }
public virtual void TestWriteReadTree() { DirCache dc = db.LockDirCache(); string A = string.Format("a%2000s", "a"); string B = string.Format("b%2000s", "b"); string[] paths = new string[] { A + ".", A + "." + B, A + "/" + B, A + "0" + B }; DirCacheEntry[] ents = new DirCacheEntry[paths.Length]; for (int i = 0; i < paths.Length; i++) { ents[i] = new DirCacheEntry(paths[i]); ents[i].FileMode = FileMode.REGULAR_FILE; } DirCacheBuilder b = dc.Builder(); for (int i_1 = 0; i_1 < ents.Length; i_1++) { b.Add(ents[i_1]); } b.Commit(); DirCache read = db.ReadDirCache(); NUnit.Framework.Assert.AreEqual(paths.Length, read.GetEntryCount()); NUnit.Framework.Assert.AreEqual(1, read.GetCacheTree(true).GetChildCount()); }
/// <summary>Write (if necessary) this tree to the object store.</summary> /// <remarks>Write (if necessary) this tree to the object store.</remarks> /// <param name="cache">the complete cache from DirCache.</param> /// <param name="cIdx"> /// first position of <code>cache</code> that is a member of this /// tree. The path of <code>cache[cacheIdx].path</code> for the /// range <code>[0,pathOff-1)</code> matches the complete path of /// this tree, from the root of the repository. /// </param> /// <param name="pathOffset"> /// number of bytes of <code>cache[cacheIdx].path</code> that /// matches this tree's path. The value at array position /// <code>cache[cacheIdx].path[pathOff-1]</code> is always '/' if /// <code>pathOff</code> is > 0. /// </param> /// <param name="ow">the writer to use when serializing to the store.</param> /// <returns>identity of this tree.</returns> /// <exception cref="NGit.Errors.UnmergedPathException"> /// one or more paths contain higher-order stages (stage > 0), /// which cannot be stored in a tree object. /// </exception> /// <exception cref="System.IO.IOException">an unexpected error occurred writing to the object store. /// </exception> internal virtual ObjectId WriteTree(DirCacheEntry[] cache, int cIdx, int pathOffset , ObjectInserter ow) { if (id == null) { int endIdx = cIdx + entrySpan; TreeFormatter fmt = new TreeFormatter(ComputeSize(cache, cIdx, pathOffset, ow)); int childIdx = 0; int entryIdx = cIdx; while (entryIdx < endIdx) { DirCacheEntry e = cache[entryIdx]; byte[] ep = e.path; if (childIdx < childCnt) { NGit.Dircache.DirCacheTree st = children[childIdx]; if (st.Contains(ep, pathOffset, ep.Length)) { fmt.Append(st.encodedName, FileMode.TREE, st.id); entryIdx += st.entrySpan; childIdx++; continue; } } fmt.Append(ep, pathOffset, ep.Length - pathOffset, e.FileMode, e.IdBuffer, e.IdOffset ); entryIdx++; } id = ow.Insert(fmt); } return(id); }
public virtual void TestAdd_ReverseGitSortOrder() { DirCache dc = db.ReadDirCache(); string[] paths = new string[] { "a.", "a.b", "a/b", "a0b" }; DirCacheEntry[] ents = new DirCacheEntry[paths.Length]; for (int i = 0; i < paths.Length; i++) { ents[i] = new DirCacheEntry(paths[i]); ents[i].FileMode = FileMode.REGULAR_FILE; } DirCacheBuilder b = dc.Builder(); for (int i_1 = ents.Length - 1; i_1 >= 0; i_1--) { b.Add(ents[i_1]); } b.Finish(); NUnit.Framework.Assert.AreEqual(paths.Length, dc.GetEntryCount()); for (int i_2 = 0; i_2 < paths.Length; i_2++) { NUnit.Framework.Assert.AreSame(ents[i_2], dc.GetEntry(i_2)); NUnit.Framework.Assert.AreEqual(paths[i_2], dc.GetEntry(i_2).PathString); NUnit.Framework.Assert.AreEqual(i_2, dc.FindEntry(paths[i_2])); NUnit.Framework.Assert.AreSame(ents[i_2], dc.GetEntry(paths[i_2])); } }
public virtual void TestNoSubtree_NoTreeWalk() { DirCache dc = DirCache.NewInCore(); string[] paths = new string[] { "a.", "a0b" }; DirCacheEntry[] ents = new DirCacheEntry[paths.Length]; for (int i = 0; i < paths.Length; i++) { ents[i] = new DirCacheEntry(paths[i]); ents[i].FileMode = FileMode.REGULAR_FILE; } DirCacheBuilder b = dc.Builder(); for (int i_1 = 0; i_1 < ents.Length; i_1++) { b.Add(ents[i_1]); } b.Finish(); DirCacheIterator i_2 = new DirCacheIterator(dc); int pathIdx = 0; for (; !i_2.Eof; i_2.Next(1)) { NUnit.Framework.Assert.AreEqual(pathIdx, i_2.ptr); NUnit.Framework.Assert.AreSame(ents[pathIdx], i_2.GetDirCacheEntry()); pathIdx++; } NUnit.Framework.Assert.AreEqual(paths.Length, pathIdx); }
public virtual void TestBuilderClear() { DirCache dc = db.ReadDirCache(); string[] paths = new string[] { "a.", "a.b", "a/b", "a0b" }; DirCacheEntry[] ents = new DirCacheEntry[paths.Length]; for (int i = 0; i < paths.Length; i++) { ents[i] = new DirCacheEntry(paths[i]); ents[i].FileMode = FileMode.REGULAR_FILE; } { DirCacheBuilder b = dc.Builder(); for (int i_1 = 0; i_1 < ents.Length; i_1++) { b.Add(ents[i_1]); } b.Finish(); } NUnit.Framework.Assert.AreEqual(paths.Length, dc.GetEntryCount()); { DirCacheBuilder b = dc.Builder(); b.Finish(); } NUnit.Framework.Assert.AreEqual(0, dc.GetEntryCount()); }
/// <exception cref="NGit.Errors.CorruptObjectException"></exception> /// <exception cref="System.IO.IOException"></exception> private void TestLongPath(int len) { string longPath = MakeLongPath(len); string shortPath = "~~~ shorter-path"; DirCacheEntry longEnt = new DirCacheEntry(longPath); DirCacheEntry shortEnt = new DirCacheEntry(shortPath); longEnt.FileMode = FileMode.REGULAR_FILE; shortEnt.FileMode = FileMode.REGULAR_FILE; NUnit.Framework.Assert.AreEqual(longPath, longEnt.PathString); NUnit.Framework.Assert.AreEqual(shortPath, shortEnt.PathString); { DirCache dc1 = db.LockDirCache(); { DirCacheBuilder b = dc1.Builder(); b.Add(longEnt); b.Add(shortEnt); NUnit.Framework.Assert.IsTrue(b.Commit()); } NUnit.Framework.Assert.AreEqual(2, dc1.GetEntryCount()); NUnit.Framework.Assert.AreSame(longEnt, dc1.GetEntry(0)); NUnit.Framework.Assert.AreSame(shortEnt, dc1.GetEntry(1)); } { DirCache dc2 = db.ReadDirCache(); NUnit.Framework.Assert.AreEqual(2, dc2.GetEntryCount()); NUnit.Framework.Assert.AreNotSame(longEnt, dc2.GetEntry(0)); NUnit.Framework.Assert.AreEqual(longPath, dc2.GetEntry(0).PathString); NUnit.Framework.Assert.AreNotSame(shortEnt, dc2.GetEntry(1)); NUnit.Framework.Assert.AreEqual(shortPath, dc2.GetEntry(1).PathString); } }
/// <summary>A conflict is detected - add the three different stages to the index</summary> /// <param name="path">the path of the conflicting entry</param> /// <param name="e">the previous index entry</param> /// <param name="h">the first tree you want to merge (the HEAD)</param> /// <param name="m">the second tree you want to merge</param> private void Conflict(string path, DirCacheEntry e, AbstractTreeIterator h, AbstractTreeIterator m) { conflicts.AddItem(path); DirCacheEntry entry; if (e != null) { entry = new DirCacheEntry(e.PathString, DirCacheEntry.STAGE_1); entry.CopyMetaData(e); builder.Add(entry); } if (h != null && !FileMode.TREE.Equals(h.EntryFileMode)) { entry = new DirCacheEntry(h.EntryPathString, DirCacheEntry.STAGE_2); entry.FileMode = h.EntryFileMode; entry.SetObjectId(h.EntryObjectId); builder.Add(entry); } if (m != null && !FileMode.TREE.Equals(m.EntryFileMode)) { entry = new DirCacheEntry(m.EntryPathString, DirCacheEntry.STAGE_3); entry.FileMode = m.EntryFileMode; entry.SetObjectId(m.EntryObjectId); builder.Add(entry); } }
public virtual void TestEntriesWithin() { DirCache dc = db.ReadDirCache(); string[] paths = new string[] { "a.", "a/b", "a/c", "a/d", "a0b" }; DirCacheEntry[] ents = new DirCacheEntry[paths.Length]; for (int i = 0; i < paths.Length; i++) { ents[i] = new DirCacheEntry(paths[i]); ents[i].FileMode = FileMode.REGULAR_FILE; } int aFirst = 1; int aLast = 3; DirCacheBuilder b = dc.Builder(); for (int i_1 = 0; i_1 < ents.Length; i_1++) { b.Add(ents[i_1]); } b.Finish(); NUnit.Framework.Assert.AreEqual(paths.Length, dc.GetEntryCount()); for (int i_2 = 0; i_2 < ents.Length; i_2++) { NUnit.Framework.Assert.AreSame(ents[i_2], dc.GetEntry(i_2)); } { DirCacheEntry[] aContents = dc.GetEntriesWithin("a"); NUnit.Framework.Assert.IsNotNull(aContents); NUnit.Framework.Assert.AreEqual(aLast - aFirst + 1, aContents.Length); for (int i_3 = aFirst, j = 0; i_3 <= aLast; i_3++, j++) { NUnit.Framework.Assert.AreSame(ents[i_3], aContents[j]); } } { DirCacheEntry[] aContents = dc.GetEntriesWithin("a/"); NUnit.Framework.Assert.IsNotNull(aContents); NUnit.Framework.Assert.AreEqual(aLast - aFirst + 1, aContents.Length); for (int i_3 = aFirst, j = 0; i_3 <= aLast; i_3++, j++) { NUnit.Framework.Assert.AreSame(ents[i_3], aContents[j]); } } { DirCacheEntry[] aContents = dc.GetEntriesWithin(string.Empty); NUnit.Framework.Assert.IsNotNull(aContents); NUnit.Framework.Assert.AreEqual(ents.Length, aContents.Length); for (int i_3 = 0; i_3 < ents.Length; i_3++) { NUnit.Framework.Assert.AreSame(ents[i_3], aContents[i_3]); } } NUnit.Framework.Assert.IsNotNull(dc.GetEntriesWithin("a.")); NUnit.Framework.Assert.AreEqual(0, dc.GetEntriesWithin("a.").Length); NUnit.Framework.Assert.IsNotNull(dc.GetEntriesWithin("a0b")); NUnit.Framework.Assert.AreEqual(0, dc.GetEntriesWithin("a0b.").Length); NUnit.Framework.Assert.IsNotNull(dc.GetEntriesWithin("zoo")); NUnit.Framework.Assert.AreEqual(0, dc.GetEntriesWithin("zoo.").Length); }
/// <exception cref="System.Exception"></exception> private DirCacheEntry MakeEntry(string path, FileMode mode) { DirCacheEntry ent = new DirCacheEntry(path); ent.FileMode = mode; ent.SetObjectId(new ObjectInserter.Formatter().IdFor(Constants.OBJ_BLOB, Constants .Encode(path))); return ent; }
private static void AssertV3TreeEntry(int indexPosition, string path, bool skipWorkTree , bool intentToAdd, DirCache dc) { DirCacheEntry entry = dc.GetEntry(indexPosition); NUnit.Framework.Assert.AreEqual(path, entry.PathString); NUnit.Framework.Assert.AreEqual(skipWorkTree, entry.IsSkipWorkTree); NUnit.Framework.Assert.AreEqual(intentToAdd, entry.IsIntentToAdd); }
public virtual void TestEntriesWithin() { DirCache dc = db.ReadDirCache(); string[] paths = new string[] { "a.", "a/b", "a/c", "a/d", "a0b" }; DirCacheEntry[] ents = new DirCacheEntry[paths.Length]; for (int i = 0; i < paths.Length; i++) { ents[i] = new DirCacheEntry(paths[i]); ents[i].FileMode = FileMode.REGULAR_FILE; } int aFirst = 1; int aLast = 3; DirCacheBuilder b = dc.Builder(); for (int i_1 = 0; i_1 < ents.Length; i_1++) { b.Add(ents[i_1]); } b.Finish(); NUnit.Framework.Assert.AreEqual(paths.Length, dc.GetEntryCount()); for (int i_2 = 0; i_2 < ents.Length; i_2++) { NUnit.Framework.Assert.AreSame(ents[i_2], dc.GetEntry(i_2)); } { DirCacheEntry[] aContents = dc.GetEntriesWithin("a"); NUnit.Framework.Assert.IsNotNull(aContents); NUnit.Framework.Assert.AreEqual(aLast - aFirst + 1, aContents.Length); for (int i_3 = aFirst, j=0; i_3 <= aLast; i_3++, j++) { NUnit.Framework.Assert.AreSame(ents[i_3], aContents[j]); } } { DirCacheEntry[] aContents = dc.GetEntriesWithin("a/"); NUnit.Framework.Assert.IsNotNull(aContents); NUnit.Framework.Assert.AreEqual(aLast - aFirst + 1, aContents.Length); for (int i_3 = aFirst, j=0; i_3 <= aLast; i_3++, j++) { NUnit.Framework.Assert.AreSame(ents[i_3], aContents[j]); } } { DirCacheEntry[] aContents = dc.GetEntriesWithin(string.Empty); NUnit.Framework.Assert.IsNotNull(aContents); NUnit.Framework.Assert.AreEqual(ents.Length, aContents.Length); for (int i_3 = 0; i_3 < ents.Length; i_3++) { NUnit.Framework.Assert.AreSame(ents[i_3], aContents[i_3]); } } NUnit.Framework.Assert.IsNotNull(dc.GetEntriesWithin("a.")); NUnit.Framework.Assert.AreEqual(0, dc.GetEntriesWithin("a.").Length); NUnit.Framework.Assert.IsNotNull(dc.GetEntriesWithin("a0b")); NUnit.Framework.Assert.AreEqual(0, dc.GetEntriesWithin("a0b.").Length); NUnit.Framework.Assert.IsNotNull(dc.GetEntriesWithin("zoo")); NUnit.Framework.Assert.AreEqual(0, dc.GetEntriesWithin("zoo.").Length); }
/// <summary>Append one entry into the resulting entry list.</summary> /// <remarks> /// Append one entry into the resulting entry list. /// <p> /// The entry is placed at the end of the entry list. If the entry causes the /// list to now be incorrectly sorted a final sorting phase will be /// automatically enabled within /// <see cref="Finish()">Finish()</see> /// . /// <p> /// The internal entry table is automatically expanded if there is /// insufficient space for the new addition. /// </remarks> /// <param name="newEntry">the new entry to add.</param> /// <exception cref="System.ArgumentException">If the FileMode of the entry was not set by the caller. /// </exception> public virtual void Add(DirCacheEntry newEntry) { if (newEntry.RawMode == 0) { throw new ArgumentException(MessageFormat.Format(JGitText.Get().fileModeNotSetForPath , newEntry.PathString)); } BeforeAdd(newEntry); FastAdd(newEntry); }
private static void AssertEqual(DirCacheCGitCompatabilityTest.CGitIndexRecord c, DirCacheEntry j) { NUnit.Framework.Assert.IsNotNull(c); NUnit.Framework.Assert.IsNotNull(j); NUnit.Framework.Assert.AreEqual(c.path, j.PathString); NUnit.Framework.Assert.AreEqual(c.id, j.GetObjectId()); NUnit.Framework.Assert.AreEqual(c.mode, j.RawMode); NUnit.Framework.Assert.AreEqual(c.stage, j.Stage); }
/// <summary>Append one entry into the resulting entry list.</summary> /// <remarks> /// Append one entry into the resulting entry list. /// <p> /// The entry is placed at the end of the entry list. The caller is /// responsible for making sure the final table is correctly sorted. /// <p> /// The /// <see cref="entries">entries</see> /// table is automatically expanded if there is /// insufficient space for the new addition. /// </remarks> /// <param name="newEntry">the new entry to add.</param> protected internal virtual void FastAdd(DirCacheEntry newEntry) { if (entries.Length == entryCnt) { DirCacheEntry[] n = new DirCacheEntry[(entryCnt + 16) * 3 / 2]; System.Array.Copy(entries, 0, n, 0, entryCnt); entries = n; } entries[entryCnt++] = newEntry; }
/// <summary> /// Update the DirCache with the contents of /// <see cref="entries">entries</see> /// . /// <p> /// This method should be invoked only during an implementation of /// <see cref="Finish()">Finish()</see> /// , and only after /// <see cref="entries">entries</see> /// is sorted. /// </summary> protected internal virtual void Replace() { if (entryCnt < entries.Length / 2) { DirCacheEntry[] n = new DirCacheEntry[entryCnt]; System.Array.Copy(entries, 0, n, 0, entryCnt); entries = n; } cache.Replace(entries, entryCnt); }
private DirCacheEntry ToEntry(int stage, TreeWalk tw) { DirCacheEntry e = new DirCacheEntry(tw.RawPath, stage); AbstractTreeIterator i; i = tw.GetTree <AbstractTreeIterator>(0); e.FileMode = tw.GetFileMode(0); e.SetObjectIdFromRaw(i.IdBuffer, i.IdOffset); return(e); }
/// <exception cref="System.IO.IOException"></exception> internal virtual void WriteTo(OutputStream os) { MessageDigest foot = Constants.NewMessageDigest(); DigestOutputStream dos = new DigestOutputStream(os, foot); bool extended = false; for (int i = 0; i < entryCnt; i++) { extended |= sortedEntries[i].IsExtended; } // Write the header. // byte[] tmp = new byte[128]; System.Array.Copy(SIG_DIRC, 0, tmp, 0, SIG_DIRC.Length); NB.EncodeInt32(tmp, 4, extended ? 3 : 2); NB.EncodeInt32(tmp, 8, entryCnt); dos.Write(tmp, 0, 12); // Write the individual file entries. // if (snapshot == null) { // Write a new index, as no entries require smudging. // for (int i_1 = 0; i_1 < entryCnt; i_1++) { sortedEntries[i_1].Write(dos); } } else { int smudge_s = (int)(snapshot.LastModified() / 1000); int smudge_ns = ((int)(snapshot.LastModified() % 1000)) * 1000000; for (int i_1 = 0; i_1 < entryCnt; i_1++) { DirCacheEntry e = sortedEntries[i_1]; if (e.MightBeRacilyClean(smudge_s, smudge_ns)) { e.SmudgeRacilyClean(); } e.Write(dos); } } if (tree != null) { TemporaryBuffer bb = new TemporaryBuffer.LocalFile(); tree.Write(tmp, bb); bb.Close(); NB.EncodeInt32(tmp, 0, EXT_TREE); NB.EncodeInt32(tmp, 4, (int)bb.Length()); dos.Write(tmp, 0, 8); bb.WriteTo(dos, null); } os.Write(foot.Digest()); os.Close(); }
public virtual void TestBuildOneFile_Commit_IndexChangedEvent() { // empty string path = "a-file-path"; FileMode mode = FileMode.REGULAR_FILE; // "old" date in 2008 long lastModified = 1218123387057L; int length = 1342; DirCacheEntry entOrig; bool receivedEvent = false; DirCache dc = db.LockDirCache(); IndexChangedListener listener = new _IndexChangedListener_212(); ListenerList l = db.Listeners; l.AddIndexChangedListener(listener); DirCacheBuilder b = dc.Builder(); entOrig = new DirCacheEntry(path); entOrig.FileMode = mode; entOrig.LastModified = lastModified; entOrig.SetLength(length); b.Add(entOrig); try { b.Commit(); } catch (_T123327308) { receivedEvent = true; } if (!receivedEvent) { NUnit.Framework.Assert.Fail("did not receive IndexChangedEvent"); } // do the same again, as this doesn't change index compared to first // round we should get no event this time dc = db.LockDirCache(); listener = new _IndexChangedListener_239(); l = db.Listeners; l.AddIndexChangedListener(listener); b = dc.Builder(); entOrig = new DirCacheEntry(path); entOrig.FileMode = mode; entOrig.LastModified = lastModified; entOrig.SetLength(length); b.Add(entOrig); try { b.Commit(); } catch (_T123327308) { NUnit.Framework.Assert.Fail("unexpected IndexChangedEvent"); } }
private void Update(string path, ObjectId mId, FileMode mode) { if (!FileMode.TREE.Equals(mode)) { updated.Put(path, mId); DirCacheEntry entry = new DirCacheEntry(path, DirCacheEntry.STAGE_0); entry.SetObjectId(mId); entry.FileMode = mode; builder.Add(entry); } }
private void ApplyEdits() { edits.Sort(EDIT_CMP); int maxIdx = cache.GetEntryCount(); int lastIdx = 0; foreach (DirCacheEditor.PathEdit e in edits) { int eIdx = cache.FindEntry(e.path, e.path.Length); bool missing = eIdx < 0; if (eIdx < 0) { eIdx = -(eIdx + 1); } int cnt = Math.Min(eIdx, maxIdx) - lastIdx; if (cnt > 0) { FastKeep(lastIdx, cnt); } lastIdx = missing ? eIdx : cache.NextEntry(eIdx); if (e is DirCacheEditor.DeletePath) { continue; } if (e is DirCacheEditor.DeleteTree) { lastIdx = cache.NextEntry(e.path, e.path.Length, eIdx); continue; } DirCacheEntry ent; if (missing) { ent = new DirCacheEntry(e.path); e.Apply(ent); if (ent.RawMode == 0) { throw new ArgumentException(MessageFormat.Format(JGitText.Get().fileModeNotSetForPath , ent.PathString)); } } else { ent = cache.GetEntry(eIdx); e.Apply(ent); } FastAdd(ent); } int cnt_1 = maxIdx - lastIdx; if (cnt_1 > 0) { FastKeep(lastIdx, cnt_1); } }
/// <summary>Add a range of existing entries from the destination cache.</summary> /// <remarks> /// Add a range of existing entries from the destination cache. /// <p> /// The entries are placed at the end of the entry list, preserving their /// current order. The caller is responsible for making sure the final table /// is correctly sorted. /// <p> /// This method copies from the destination cache, which has not yet been /// updated with this editor's new table. So all offsets into the destination /// cache are not affected by any updates that may be currently taking place /// in this editor. /// <p> /// The /// <see cref="entries">entries</see> /// table is automatically expanded if there is /// insufficient space for the new additions. /// </remarks> /// <param name="pos">first entry to copy from the destination cache.</param> /// <param name="cnt">number of entries to copy.</param> protected internal virtual void FastKeep(int pos, int cnt) { if (entryCnt + cnt > entries.Length) { int m1 = (entryCnt + 16) * 3 / 2; int m2 = entryCnt + cnt; DirCacheEntry[] n = new DirCacheEntry[Math.Max(m1, m2)]; System.Array.Copy(entries, 0, n, 0, entryCnt); entries = n; } cache.ToArray(pos, entries, entryCnt, cnt); entryCnt += cnt; }
internal virtual int NextEntry(byte[] p, int pLen, int nextIdx) { while (nextIdx < entryCnt) { DirCacheEntry next = sortedEntries[nextIdx]; if (!DirCacheTree.Peq(p, next.path, pLen)) { break; } nextIdx++; } return(nextIdx); }
public virtual void TestTwoLevelSubtree() { DirCache dc = db.ReadDirCache(); string[] paths = new string[] { "a.", "a/b", "a/c/e", "a/c/f", "a/d", "a0b" }; DirCacheEntry[] ents = new DirCacheEntry[paths.Length]; for (int i = 0; i < paths.Length; i++) { ents[i] = new DirCacheEntry(paths[i]); ents[i].FileMode = FileMode.REGULAR_FILE; } int aFirst = 1; int aLast = 4; int acFirst = 2; int acLast = 3; DirCacheBuilder b = dc.Builder(); for (int i_1 = 0; i_1 < ents.Length; i_1++) { b.Add(ents[i_1]); } b.Finish(); NUnit.Framework.Assert.IsNull(dc.GetCacheTree(false)); DirCacheTree root = dc.GetCacheTree(true); NUnit.Framework.Assert.IsNotNull(root); NUnit.Framework.Assert.AreSame(root, dc.GetCacheTree(true)); NUnit.Framework.Assert.AreEqual(string.Empty, root.GetNameString()); NUnit.Framework.Assert.AreEqual(string.Empty, root.GetPathString()); NUnit.Framework.Assert.AreEqual(1, root.GetChildCount()); NUnit.Framework.Assert.AreEqual(dc.GetEntryCount(), root.GetEntrySpan()); NUnit.Framework.Assert.IsFalse(root.IsValid()); DirCacheTree aTree = root.GetChild(0); NUnit.Framework.Assert.IsNotNull(aTree); NUnit.Framework.Assert.AreSame(aTree, root.GetChild(0)); NUnit.Framework.Assert.AreEqual("a", aTree.GetNameString()); NUnit.Framework.Assert.AreEqual("a/", aTree.GetPathString()); NUnit.Framework.Assert.AreEqual(1, aTree.GetChildCount()); NUnit.Framework.Assert.AreEqual(aLast - aFirst + 1, aTree.GetEntrySpan()); NUnit.Framework.Assert.IsFalse(aTree.IsValid()); DirCacheTree acTree = aTree.GetChild(0); NUnit.Framework.Assert.IsNotNull(acTree); NUnit.Framework.Assert.AreSame(acTree, aTree.GetChild(0)); NUnit.Framework.Assert.AreEqual("c", acTree.GetNameString()); NUnit.Framework.Assert.AreEqual("a/c/", acTree.GetPathString()); NUnit.Framework.Assert.AreEqual(0, acTree.GetChildCount()); NUnit.Framework.Assert.AreEqual(acLast - acFirst + 1, acTree.GetEntrySpan()); NUnit.Framework.Assert.IsFalse(acTree.IsValid()); }
/// <summary> /// Updates the file in the working tree with content and mode from an entry /// in the index. /// </summary> /// <remarks> /// Updates the file in the working tree with content and mode from an entry /// in the index. The new content is first written to a new temporary file in /// the same directory as the real file. Then that new file is renamed to the /// final filename. /// TODO: this method works directly on File IO, we may need another /// abstraction (like WorkingTreeIterator). This way we could tell e.g. /// Eclipse that Files in the workspace got changed /// </remarks> /// <param name="repo"></param> /// <param name="f"> /// the file to be modified. The parent directory for this file /// has to exist already /// </param> /// <param name="entry">the entry containing new mode and content</param> /// <exception cref="System.IO.IOException">System.IO.IOException</exception> public static void CheckoutEntry(Repository repo, FilePath f, DirCacheEntry entry ) { ObjectLoader ol = repo.Open(entry.GetObjectId()); FilePath parentDir = f.GetParentFile(); FilePath tmpFile = FilePath.CreateTempFile("._" + f.GetName(), null, parentDir); FileOutputStream channel = new FileOutputStream(tmpFile); try { ol.CopyTo(channel); } finally { channel.Close(); } FS fs = repo.FileSystem; WorkingTreeOptions opt = repo.GetConfig().Get(WorkingTreeOptions.KEY); if (opt.IsFileMode() && fs.SupportsExecute()) { if (FileMode.EXECUTABLE_FILE.Equals(entry.RawMode)) { if (!fs.CanExecute(tmpFile)) { fs.SetExecute(tmpFile, true); } } else { if (fs.CanExecute(tmpFile)) { fs.SetExecute(tmpFile, false); } } } if (!tmpFile.RenameTo(f)) { // tried to rename which failed. Let' delete the target file and try // again FileUtils.Delete(f); if (!tmpFile.RenameTo(f)) { throw new IOException(MessageFormat.Format(JGitText.Get().couldNotWriteFile, tmpFile .GetPath(), f.GetPath())); } } entry.LastModified = f.LastModified(); entry.SetLength((int)ol.GetSize()); }
/// <summary>Update any smudged entries with information from the working tree.</summary> /// <remarks>Update any smudged entries with information from the working tree.</remarks> /// <exception cref="System.IO.IOException">System.IO.IOException</exception> private void UpdateSmudgedEntries() { TreeWalk walk = new TreeWalk(repository); IList <string> paths = new AList <string>(128); try { for (int i = 0; i < entryCnt; i++) { if (sortedEntries[i].IsSmudged) { paths.AddItem(sortedEntries[i].PathString); } } if (paths.IsEmpty()) { return; } walk.Filter = PathFilterGroup.CreateFromStrings(paths); DirCacheIterator iIter = new DirCacheIterator(this); FileTreeIterator fIter = new FileTreeIterator(repository); walk.AddTree(iIter); walk.AddTree(fIter); walk.Recursive = true; while (walk.Next()) { iIter = walk.GetTree <DirCacheIterator>(0); if (iIter == null) { continue; } fIter = walk.GetTree <FileTreeIterator>(1); if (fIter == null) { continue; } DirCacheEntry entry = iIter.GetDirCacheEntry(); if (entry.IsSmudged && iIter.IdEqual(fIter)) { entry.SetLength(fIter.GetEntryLength()); entry.LastModified = fIter.GetEntryLastModified(); } } } finally { walk.Release(); } }
public virtual void TestSingleSubtree_NoRecursion() { DirCache dc = DirCache.NewInCore(); string[] paths = new string[] { "a.", "a/b", "a/c", "a/d", "a0b" }; DirCacheEntry[] ents = new DirCacheEntry[paths.Length]; for (int i = 0; i < paths.Length; i++) { ents[i] = new DirCacheEntry(paths[i]); ents[i].FileMode = FileMode.REGULAR_FILE; } DirCacheBuilder b = dc.Builder(); for (int i_1 = 0; i_1 < ents.Length; i_1++) { b.Add(ents[i_1]); } b.Finish(); string[] expPaths = new string[] { "a.", "a", "a0b" }; FileMode[] expModes = new FileMode[] { FileMode.REGULAR_FILE, FileMode.TREE, FileMode .REGULAR_FILE }; int[] expPos = new int[] { 0, -1, 4 }; DirCacheIterator i_2 = new DirCacheIterator(dc); TreeWalk tw = new TreeWalk(db); tw.AddTree(i_2); tw.Recursive = false; int pathIdx = 0; while (tw.Next()) { NUnit.Framework.Assert.AreSame(i_2, tw.GetTree <DirCacheIterator>(0)); NUnit.Framework.Assert.AreEqual(expModes[pathIdx].GetBits(), tw.GetRawMode(0)); NUnit.Framework.Assert.AreSame(expModes[pathIdx], tw.GetFileMode(0)); NUnit.Framework.Assert.AreEqual(expPaths[pathIdx], tw.PathString); if (expPos[pathIdx] >= 0) { NUnit.Framework.Assert.AreEqual(expPos[pathIdx], i_2.ptr); NUnit.Framework.Assert.AreSame(ents[expPos[pathIdx]], i_2.GetDirCacheEntry()); } else { NUnit.Framework.Assert.AreSame(FileMode.TREE, tw.GetFileMode(0)); } pathIdx++; } NUnit.Framework.Assert.AreEqual(expPaths.Length, pathIdx); }
public virtual void TestBuildRejectsUnsetFileMode() { DirCache dc = DirCache.NewInCore(); DirCacheBuilder b = dc.Builder(); NUnit.Framework.Assert.IsNotNull(b); DirCacheEntry e = new DirCacheEntry("a"); NUnit.Framework.Assert.AreEqual(0, e.RawMode); try { b.Add(e); } catch (ArgumentException err) { NUnit.Framework.Assert.AreEqual("FileMode not set for path a", err.Message); } }
public virtual void TestPathFilterGroup_DoesNotSkipTail() { DirCache dc = db.ReadDirCache(); FileMode mode = FileMode.REGULAR_FILE; string[] paths = new string[] { "a.", "a/b", "a/c", "a/d", "a0b" }; DirCacheEntry[] ents = new DirCacheEntry[paths.Length]; for (int i = 0; i < paths.Length; i++) { ents[i] = new DirCacheEntry(paths[i]); ents[i].FileMode = mode; } { DirCacheBuilder b = dc.Builder(); for (int i_1 = 0; i_1 < ents.Length; i_1++) { b.Add(ents[i_1]); } b.Finish(); } int expIdx = 2; DirCacheBuilder b_1 = dc.Builder(); TreeWalk tw = new TreeWalk(db); tw.AddTree(new DirCacheBuildIterator(b_1)); tw.Recursive = true; tw.Filter = PathFilterGroup.CreateFromStrings(Collections.Singleton(paths[expIdx] )); NUnit.Framework.Assert.IsTrue(tw.Next(), "found " + paths[expIdx]); DirCacheIterator c = tw.GetTree <DirCacheIterator>(0); NUnit.Framework.Assert.IsNotNull(c); NUnit.Framework.Assert.AreEqual(expIdx, c.ptr); NUnit.Framework.Assert.AreSame(ents[expIdx], c.GetDirCacheEntry()); NUnit.Framework.Assert.AreEqual(paths[expIdx], tw.PathString); NUnit.Framework.Assert.AreEqual(mode.GetBits(), tw.GetRawMode(0)); NUnit.Framework.Assert.AreSame(mode, tw.GetFileMode(0)); b_1.Add(c.GetDirCacheEntry()); NUnit.Framework.Assert.IsFalse(tw.Next(), "no more entries"); b_1.Finish(); NUnit.Framework.Assert.AreEqual(ents.Length, dc.GetEntryCount()); for (int i_2 = 0; i_2 < ents.Length; i_2++) { NUnit.Framework.Assert.AreSame(ents[i_2], dc.GetEntry(i_2)); } }
public virtual void TestPathFilterGroup_DoesNotSkipTail() { DirCache dc = db.ReadDirCache(); FileMode mode = FileMode.REGULAR_FILE; string[] paths = new string[] { "a.", "a/b", "a/c", "a/d", "a0b" }; DirCacheEntry[] ents = new DirCacheEntry[paths.Length]; for (int i = 0; i < paths.Length; i++) { ents[i] = new DirCacheEntry(paths[i]); ents[i].FileMode = mode; } { DirCacheBuilder b = dc.Builder(); for (int i_1 = 0; i_1 < ents.Length; i_1++) { b.Add(ents[i_1]); } b.Finish(); } int expIdx = 2; DirCacheBuilder b_1 = dc.Builder(); TreeWalk tw = new TreeWalk(db); tw.AddTree(new DirCacheBuildIterator(b_1)); tw.Recursive = true; tw.Filter = PathFilterGroup.CreateFromStrings(Collections.Singleton(paths[expIdx] )); NUnit.Framework.Assert.IsTrue(tw.Next(), "found " + paths[expIdx]); DirCacheIterator c = tw.GetTree<DirCacheIterator>(0); NUnit.Framework.Assert.IsNotNull(c); NUnit.Framework.Assert.AreEqual(expIdx, c.ptr); NUnit.Framework.Assert.AreSame(ents[expIdx], c.GetDirCacheEntry()); NUnit.Framework.Assert.AreEqual(paths[expIdx], tw.PathString); NUnit.Framework.Assert.AreEqual(mode.GetBits(), tw.GetRawMode(0)); NUnit.Framework.Assert.AreSame(mode, tw.GetFileMode(0)); b_1.Add(c.GetDirCacheEntry()); NUnit.Framework.Assert.IsFalse(tw.Next(), "no more entries"); b_1.Finish(); NUnit.Framework.Assert.AreEqual(ents.Length, dc.GetEntryCount()); for (int i_2 = 0; i_2 < ents.Length; i_2++) { NUnit.Framework.Assert.AreSame(ents[i_2], dc.GetEntry(i_2)); } }
/// <summary>Append one entry into the resulting entry list.</summary> /// <remarks> /// Append one entry into the resulting entry list. /// <p/> /// The entry is placed at the end of the entry list. The caller is /// responsible for making sure the final table is correctly sorted. /// <p/> /// The /// <see cref="entries">entries</see> /// table is automatically expanded if there is /// insufficient space for the new addition. /// </remarks> /// <param name="newEntry">the new entry to add.</param> protected internal virtual void FastAdd(DirCacheEntry newEntry) { if (entries.Length == entryCnt) { DirCacheEntry[] n = new DirCacheEntry[(entryCnt + 16) * 3 / 2]; System.Array.Copy(entries, 0, n, 0, entryCnt); entries = n; } entries[entryCnt++] = newEntry; }
/// <summary>Updates the index after a content merge has happened.</summary> /// <remarks> /// Updates the index after a content merge has happened. If no conflict has /// occurred this includes persisting the merged content to the object /// database. In case of conflicts this method takes care to write the /// correct stages to the index. /// </remarks> /// <param name="base"></param> /// <param name="ours"></param> /// <param name="theirs"></param> /// <param name="result"></param> /// <param name="of"></param> /// <exception cref="System.IO.FileNotFoundException">System.IO.FileNotFoundException /// </exception> /// <exception cref="System.IO.IOException">System.IO.IOException</exception> private void UpdateIndex(CanonicalTreeParser @base, CanonicalTreeParser ours, CanonicalTreeParser theirs, MergeResult<RawText> result, FilePath of) { if (result.ContainsConflicts()) { // a conflict occurred, the file will contain conflict markers // the index will be populated with the three stages and only the // workdir (if used) contains the halfways merged content Add(tw.RawPath, @base, DirCacheEntry.STAGE_1); Add(tw.RawPath, ours, DirCacheEntry.STAGE_2); Add(tw.RawPath, theirs, DirCacheEntry.STAGE_3); mergeResults.Put(tw.PathString, result.Upcast ()); } else { // no conflict occurred, the file will contain fully merged content. // the index will be populated with the new merged version DirCacheEntry dce = new DirCacheEntry(tw.PathString); int newMode = MergeFileModes(tw.GetRawMode(0), tw.GetRawMode(1), tw.GetRawMode(2) ); // set the mode for the new content. Fall back to REGULAR_FILE if // you can't merge modes of OURS and THEIRS dce.FileMode = (newMode == FileMode.MISSING.GetBits()) ? FileMode.REGULAR_FILE : FileMode.FromBits(newMode); dce.LastModified = of.LastModified(); dce.SetLength((int)of.Length()); InputStream @is = new FileInputStream(of); try { dce.SetObjectId(oi.Insert(Constants.OBJ_BLOB, of.Length(), @is)); } finally { @is.Close(); if (inCore) { FileUtils.Delete(of); } } builder.Add(dce); } }
private static InvalidOperationException Bad(DirCacheEntry a, string msg) { return new InvalidOperationException(msg + ": " + a.Stage + " " + a.PathString); }
private DirCacheEntry ToEntry(int stage, TreeWalk tw) { DirCacheEntry e = new DirCacheEntry(tw.RawPath, stage); AbstractTreeIterator i; i = tw.GetTree<AbstractTreeIterator>(0); e.FileMode = tw.GetFileMode(0); e.SetObjectIdFromRaw(i.IdBuffer, i.IdOffset); return e; }
/// <summary>Resets the index to represent exactly some filesystem content.</summary> /// <remarks> /// Resets the index to represent exactly some filesystem content. E.g. the /// following call will replace the index with the working tree content: /// <p> /// <code>resetIndex(new FileSystemIterator(db))</code> /// <p> /// This method can be used by testcases which first prepare a new commit /// somewhere in the filesystem (e.g. in the working-tree) and then want to /// have an index which matches their prepared content. /// </remarks> /// <param name="treeItr"> /// a /// <see cref="NGit.Treewalk.FileTreeIterator">NGit.Treewalk.FileTreeIterator</see> /// which determines which files should /// go into the new index /// </param> /// <exception cref="System.IO.FileNotFoundException">System.IO.FileNotFoundException /// </exception> /// <exception cref="System.IO.IOException">System.IO.IOException</exception> protected internal virtual void ResetIndex(FileTreeIterator treeItr) { ObjectInserter inserter = db.NewObjectInserter(); DirCacheBuilder builder = db.LockDirCache().Builder(); DirCacheEntry dce; while (!treeItr.Eof) { long len = treeItr.GetEntryLength(); dce = new DirCacheEntry(treeItr.EntryPathString); dce.FileMode = treeItr.EntryFileMode; dce.LastModified = treeItr.GetEntryLastModified(); dce.SetLength((int)len); FileInputStream @in = new FileInputStream(treeItr.GetEntryFile()); dce.SetObjectId(inserter.Insert(Constants.OBJ_BLOB, len, @in)); @in.Close(); builder.Add(dce); treeItr.Next(1); } builder.Commit(); inserter.Flush(); inserter.Release(); }
/// <summary>adds a new path with the specified stage to the index builder</summary> /// <param name="path"></param> /// <param name="p"></param> /// <param name="stage"></param> /// <returns>the entry which was added to the index</returns> private DirCacheEntry Add(byte[] path, CanonicalTreeParser p, int stage) { if (p != null && !p.EntryFileMode.Equals(FileMode.TREE)) { DirCacheEntry e = new DirCacheEntry(path, stage); e.FileMode = p.EntryFileMode; e.SetObjectId(p.EntryObjectId); builder.Add(e); return e; } return null; }
public override void Apply(DirCacheEntry ent) { ent.FileMode = FileMode.REGULAR_FILE; ent.SetLength(length); ent.SetObjectId(data); }
public override void Apply(DirCacheEntry ent) { ent.FileMode = FileMode.REGULAR_FILE; ent.SetObjectId(gitmodulesBlob); }
/// <exception cref="System.IO.IOException"></exception> private DirCacheEntry AddEntryToBuilder(string path, FilePath file, ObjectInserter newObjectInserter, DirCacheBuilder builder, int stage) { FileInputStream inputStream = new FileInputStream(file); ObjectId id = newObjectInserter.Insert(Constants.OBJ_BLOB, file.Length(), inputStream ); inputStream.Close(); DirCacheEntry entry = new DirCacheEntry(path, stage); entry.SetObjectId(id); entry.FileMode = FileMode.REGULAR_FILE; entry.LastModified = file.LastModified(); entry.SetLength((int)file.Length()); builder.Add(entry); return entry; }
/// <summary> /// adds a entry to the index builder which is a copy of the specified /// DirCacheEntry /// </summary> /// <param name="e">the entry which should be copied</param> /// <returns>the entry which was added to the index</returns> private DirCacheEntry Keep(DirCacheEntry e) { DirCacheEntry newEntry = new DirCacheEntry(e.PathString, e.Stage); newEntry.FileMode = e.FileMode; newEntry.SetObjectId(e.GetObjectId()); newEntry.LastModified = e.LastModified; newEntry.SetLength(e.Length); builder.Add(newEntry); return newEntry; }
/// <exception cref="System.IO.IOException"></exception> /// <exception cref="NGit.Api.Errors.JGitInternalException"></exception> /// <exception cref="NGit.Api.Errors.GitAPIException"></exception> public virtual void SetupRepository() { // create initial commit git = new Git(db); initialCommit = git.Commit().SetMessage("initial commit").Call(); // create nested file FilePath dir = new FilePath(db.WorkTree, "dir"); FileUtils.Mkdir(dir); FilePath nestedFile = new FilePath(dir, "b.txt"); FileUtils.CreateNewFile(nestedFile); PrintWriter nesterFileWriter = new PrintWriter(nestedFile); nesterFileWriter.Write("content"); nesterFileWriter.Flush(); // create file indexFile = new FilePath(db.WorkTree, "a.txt"); FileUtils.CreateNewFile(indexFile); PrintWriter writer = new PrintWriter(indexFile); writer.Write("content"); writer.Flush(); // add file and commit it git.Add().AddFilepattern("dir").AddFilepattern("a.txt").Call(); secondCommit = git.Commit().SetMessage("adding a.txt and dir/b.txt").Call(); prestage = DirCache.Read(db.GetIndexFile(), db.FileSystem).GetEntry(indexFile.GetName ()); // modify file and add to index writer.Write("new content"); writer.Close(); nesterFileWriter.Write("new content"); nesterFileWriter.Close(); git.Add().AddFilepattern("a.txt").AddFilepattern("dir").Call(); // create a file not added to the index untrackedFile = new FilePath(db.WorkTree, "notAddedToIndex.txt"); FileUtils.CreateNewFile(untrackedFile); PrintWriter writer2 = new PrintWriter(untrackedFile); writer2.Write("content"); writer2.Close(); }
/// <summary>Add a range of existing entries from the destination cache.</summary> /// <remarks> /// Add a range of existing entries from the destination cache. /// <p/> /// The entries are placed at the end of the entry list, preserving their /// current order. The caller is responsible for making sure the final table /// is correctly sorted. /// <p/> /// This method copies from the destination cache, which has not yet been /// updated with this editor's new table. So all offsets into the destination /// cache are not affected by any updates that may be currently taking place /// in this editor. /// <p/> /// The /// <see cref="entries">entries</see> /// table is automatically expanded if there is /// insufficient space for the new additions. /// </remarks> /// <param name="pos">first entry to copy from the destination cache.</param> /// <param name="cnt">number of entries to copy.</param> protected internal virtual void FastKeep(int pos, int cnt) { if (entryCnt + cnt > entries.Length) { int m1 = (entryCnt + 16) * 3 / 2; int m2 = entryCnt + cnt; DirCacheEntry[] n = new DirCacheEntry[Math.Max(m1, m2)]; System.Array.Copy(entries, 0, n, 0, entryCnt); entries = n; } cache.ToArray(pos, entries, entryCnt, cnt); entryCnt += cnt; }
/// <summary> /// Update the DirCache with the contents of /// <see cref="entries">entries</see> /// . /// <p/> /// This method should be invoked only during an implementation of /// <see cref="Finish()">Finish()</see> /// , and only after /// <see cref="entries">entries</see> /// is sorted. /// </summary> protected internal virtual void Replace() { if (entryCnt < entries.Length / 2) { DirCacheEntry[] n = new DirCacheEntry[entryCnt]; System.Array.Copy(entries, 0, n, 0, entryCnt); entries = n; } cache.Replace(entries, entryCnt); }
public override void Apply(DirCacheEntry ent) { ent.FileMode = FileMode.REGULAR_FILE; ent.SetObjectId(id); ent.IsUpdateNeeded = false; }
public override void Revert (FilePath[] localPaths, bool recurse, IProgressMonitor monitor) { foreach (var group in localPaths.GroupBy (f => GetRepository (f))) { var repository = group.Key; var files = group.ToArray (); var c = GetHeadCommit (repository); RevTree tree = c != null ? c.Tree : null; List<FilePath> changedFiles = new List<FilePath> (); List<FilePath> removedFiles = new List<FilePath> (); monitor.BeginTask (GettextCatalog.GetString ("Reverting files"), 3); monitor.BeginStepTask (GettextCatalog.GetString ("Reverting files"), files.Length, 2); DirCache dc = repository.LockDirCache (); DirCacheBuilder builder = dc.Builder (); try { HashSet<string> entriesToRemove = new HashSet<string> (); HashSet<string> foldersToRemove = new HashSet<string> (); // Add the new entries foreach (FilePath fp in files) { string p = repository.ToGitPath (fp); // Register entries to be removed from the index if (Directory.Exists (fp)) foldersToRemove.Add (p); else entriesToRemove.Add (p); TreeWalk tw = tree != null ? TreeWalk.ForPath (repository, p, tree) : null; if (tw == null) { // Removed from the index } else { // Add new entries TreeWalk r; if (tw.IsSubtree) { // It's a directory. Make sure we remove existing index entries of this directory foldersToRemove.Add (p); // We have to iterate through all folder files. We need a new iterator since the // existing rw is not recursive r = new NGit.Treewalk.TreeWalk(repository); r.Reset (tree); r.Filter = PathFilterGroup.CreateFromStrings(new string[]{p}); r.Recursive = true; r.Next (); } else { r = tw; } do { // There can be more than one entry if reverting a whole directory string rpath = repository.FromGitPath (r.PathString); DirCacheEntry e = new DirCacheEntry (r.PathString); e.SetObjectId (r.GetObjectId (0)); e.FileMode = r.GetFileMode (0); if (!Directory.Exists (Path.GetDirectoryName (rpath))) Directory.CreateDirectory (rpath); DirCacheCheckout.CheckoutEntry (repository, rpath, e); builder.Add (e); changedFiles.Add (rpath); } while (r.Next ()); } monitor.Step (1); } // Add entries we want to keep int count = dc.GetEntryCount (); for (int n=0; n<count; n++) { DirCacheEntry e = dc.GetEntry (n); string path = e.PathString; if (!entriesToRemove.Contains (path) && !foldersToRemove.Any (f => IsSubpath (f,path))) builder.Add (e); } builder.Commit (); } catch { dc.Unlock (); throw; } monitor.EndTask (); monitor.BeginTask (null, files.Length); foreach (FilePath p in changedFiles) { FileService.NotifyFileChanged (p); monitor.Step (1); } foreach (FilePath p in removedFiles) { FileService.NotifyFileRemoved (p); monitor.Step (1); } monitor.EndTask (); } }
public virtual void TestNoSubtree_WithTreeWalk() { DirCache dc = DirCache.NewInCore(); string[] paths = new string[] { "a.", "a0b" }; FileMode[] modes = new FileMode[] { FileMode.EXECUTABLE_FILE, FileMode.GITLINK }; DirCacheEntry[] ents = new DirCacheEntry[paths.Length]; for (int i = 0; i < paths.Length; i++) { ents[i] = new DirCacheEntry(paths[i]); ents[i].FileMode = modes[i]; } DirCacheBuilder b = dc.Builder(); for (int i_1 = 0; i_1 < ents.Length; i_1++) { b.Add(ents[i_1]); } b.Finish(); DirCacheIterator i_2 = new DirCacheIterator(dc); TreeWalk tw = new TreeWalk(db); tw.AddTree(i_2); int pathIdx = 0; while (tw.Next()) { NUnit.Framework.Assert.AreSame(i_2, tw.GetTree<DirCacheIterator>(0)); NUnit.Framework.Assert.AreEqual(pathIdx, i_2.ptr); NUnit.Framework.Assert.AreSame(ents[pathIdx], i_2.GetDirCacheEntry()); NUnit.Framework.Assert.AreEqual(paths[pathIdx], tw.PathString); NUnit.Framework.Assert.AreEqual(modes[pathIdx].GetBits(), tw.GetRawMode(0)); NUnit.Framework.Assert.AreSame(modes[pathIdx], tw.GetFileMode(0)); pathIdx++; } NUnit.Framework.Assert.AreEqual(paths.Length, pathIdx); }
/// <exception cref="System.IO.FileNotFoundException"></exception> /// <exception cref="System.InvalidOperationException"></exception> /// <exception cref="System.IO.IOException"></exception> private bool ContentMerge(CanonicalTreeParser @base, CanonicalTreeParser ours, CanonicalTreeParser theirs) { MergeFormatter fmt = new MergeFormatter(); RawText baseText = @base == null ? RawText.EMPTY_TEXT : GetRawText(@base.EntryObjectId , db); // do the merge MergeResult<RawText> result = mergeAlgorithm.Merge(RawTextComparator.DEFAULT, baseText , GetRawText(ours.EntryObjectId, db), GetRawText(theirs.EntryObjectId, db)); FilePath of = null; FileOutputStream fos; if (!inCore) { FilePath workTree = db.WorkTree; if (workTree == null) { // TODO: This should be handled by WorkingTreeIterators which // support write operations throw new NotSupportedException(); } of = new FilePath(workTree, tw.PathString); fos = new FileOutputStream(of); try { fmt.FormatMerge(fos, result, Arrays.AsList(commitNames), Constants.CHARACTER_ENCODING ); } finally { fos.Close(); } } else { if (!result.ContainsConflicts()) { // When working inCore, only trivial merges can be handled, // so we generate objects only in conflict free cases of = FilePath.CreateTempFile("merge_", "_temp", null); fos = new FileOutputStream(of); try { fmt.FormatMerge(fos, result, Arrays.AsList(commitNames), Constants.CHARACTER_ENCODING ); } finally { fos.Close(); } } } if (result.ContainsConflicts()) { // a conflict occured, the file will contain conflict markers // the index will be populated with the three stages and only the // workdir (if used) contains the halfways merged content Add(tw.RawPath, @base, DirCacheEntry.STAGE_1); Add(tw.RawPath, ours, DirCacheEntry.STAGE_2); Add(tw.RawPath, theirs, DirCacheEntry.STAGE_3); mergeResults.Put(tw.PathString, result.Upcast ()); return false; } else { // no conflict occured, the file will contain fully merged content. // the index will be populated with the new merged version DirCacheEntry dce = new DirCacheEntry(tw.PathString); dce.FileMode = tw.GetFileMode(0); dce.LastModified = of.LastModified(); dce.SetLength((int)of.Length()); InputStream @is = new FileInputStream(of); try { dce.SetObjectId(oi.Insert(Constants.OBJ_BLOB, of.Length(), @is)); } finally { @is.Close(); if (inCore) { FileUtils.Delete(of); } } builder.Add(dce); return true; } }
public virtual void TestSingleSubtree_NoRecursion() { DirCache dc = DirCache.NewInCore(); string[] paths = new string[] { "a.", "a/b", "a/c", "a/d", "a0b" }; DirCacheEntry[] ents = new DirCacheEntry[paths.Length]; for (int i = 0; i < paths.Length; i++) { ents[i] = new DirCacheEntry(paths[i]); ents[i].FileMode = FileMode.REGULAR_FILE; } DirCacheBuilder b = dc.Builder(); for (int i_1 = 0; i_1 < ents.Length; i_1++) { b.Add(ents[i_1]); } b.Finish(); string[] expPaths = new string[] { "a.", "a", "a0b" }; FileMode[] expModes = new FileMode[] { FileMode.REGULAR_FILE, FileMode.TREE, FileMode .REGULAR_FILE }; int[] expPos = new int[] { 0, -1, 4 }; DirCacheIterator i_2 = new DirCacheIterator(dc); TreeWalk tw = new TreeWalk(db); tw.AddTree(i_2); tw.Recursive = false; int pathIdx = 0; while (tw.Next()) { NUnit.Framework.Assert.AreSame(i_2, tw.GetTree<DirCacheIterator>(0)); NUnit.Framework.Assert.AreEqual(expModes[pathIdx].GetBits(), tw.GetRawMode(0)); NUnit.Framework.Assert.AreSame(expModes[pathIdx], tw.GetFileMode(0)); NUnit.Framework.Assert.AreEqual(expPaths[pathIdx], tw.PathString); if (expPos[pathIdx] >= 0) { NUnit.Framework.Assert.AreEqual(expPos[pathIdx], i_2.ptr); NUnit.Framework.Assert.AreSame(ents[expPos[pathIdx]], i_2.GetDirCacheEntry()); } else { NUnit.Framework.Assert.AreSame(FileMode.TREE, tw.GetFileMode(0)); } pathIdx++; } NUnit.Framework.Assert.AreEqual(expPaths.Length, pathIdx); }
/// <summary>Append one entry into the resulting entry list.</summary> /// <remarks> /// Append one entry into the resulting entry list. /// <p> /// The entry is placed at the end of the entry list. If the entry causes the /// list to now be incorrectly sorted a final sorting phase will be /// automatically enabled within /// <see cref="Finish()">Finish()</see> /// . /// <p> /// The internal entry table is automatically expanded if there is /// insufficient space for the new addition. /// </remarks> /// <param name="newEntry">the new entry to add.</param> /// <exception cref="System.ArgumentException">If the FileMode of the entry was not set by the caller. /// </exception> public virtual void Add(DirCacheEntry newEntry) { if (newEntry.RawMode == 0) { throw new ArgumentException(MessageFormat.Format(JGitText.Get().fileModeNotSetForPath , newEntry.PathString)); } BeforeAdd(newEntry); FastAdd(newEntry); }
public virtual void TestTwoLevelSubtree_Recursive() { DirCache dc = DirCache.NewInCore(); FileMode mode = FileMode.REGULAR_FILE; string[] paths = new string[] { "a.", "a/b", "a/c/e", "a/c/f", "a/d", "a0b" }; DirCacheEntry[] ents = new DirCacheEntry[paths.Length]; for (int i = 0; i < paths.Length; i++) { ents[i] = new DirCacheEntry(paths[i]); ents[i].FileMode = mode; } DirCacheBuilder b = dc.Builder(); for (int i_1 = 0; i_1 < ents.Length; i_1++) { b.Add(ents[i_1]); } b.Finish(); TreeWalk tw = new TreeWalk(db); tw.AddTree(new DirCacheIterator(dc)); tw.Recursive = true; int pathIdx = 0; while (tw.Next()) { DirCacheIterator c = tw.GetTree<DirCacheIterator>(0); NUnit.Framework.Assert.IsNotNull(c); NUnit.Framework.Assert.AreEqual(pathIdx, c.ptr); NUnit.Framework.Assert.AreSame(ents[pathIdx], c.GetDirCacheEntry()); NUnit.Framework.Assert.AreEqual(paths[pathIdx], tw.PathString); NUnit.Framework.Assert.AreEqual(mode.GetBits(), tw.GetRawMode(0)); NUnit.Framework.Assert.AreSame(mode, tw.GetFileMode(0)); pathIdx++; } NUnit.Framework.Assert.AreEqual(paths.Length, pathIdx); }
private void BeforeAdd(DirCacheEntry newEntry) { if (sorted && entryCnt > 0) { DirCacheEntry lastEntry = entries[entryCnt - 1]; int cr = DirCache.Cmp(lastEntry, newEntry); if (cr > 0) { // The new entry sorts before the old entry; we are // no longer sorted correctly. We'll need to redo // the sorting before we can close out the build. // sorted = false; } else { if (cr == 0) { // Same file path; we can only insert this if the // stages won't be violated. // int peStage = lastEntry.Stage; int dceStage = newEntry.Stage; if (peStage == dceStage) { throw Bad(newEntry, JGitText.Get().duplicateStagesNotAllowed); } if (peStage == 0 || dceStage == 0) { throw Bad(newEntry, JGitText.Get().mixedStagesNotAllowed); } if (peStage > dceStage) { sorted = false; } } } } }
public virtual void TestTwoLevelSubtree_FilterPath() { DirCache dc = DirCache.NewInCore(); FileMode mode = FileMode.REGULAR_FILE; string[] paths = new string[] { "a.", "a/b", "a/c/e", "a/c/f", "a/d", "a0b" }; DirCacheEntry[] ents = new DirCacheEntry[paths.Length]; for (int i = 0; i < paths.Length; i++) { ents[i] = new DirCacheEntry(paths[i]); ents[i].FileMode = mode; } DirCacheBuilder b = dc.Builder(); for (int i_1 = 0; i_1 < ents.Length; i_1++) { b.Add(ents[i_1]); } b.Finish(); TreeWalk tw = new TreeWalk(db); for (int victimIdx = 0; victimIdx < paths.Length; victimIdx++) { tw.Reset(); tw.AddTree(new DirCacheIterator(dc)); tw.Filter = PathFilterGroup.CreateFromStrings(Collections.Singleton(paths[victimIdx ])); tw.Recursive = tw.Filter.ShouldBeRecursive(); NUnit.Framework.Assert.IsTrue(tw.Next()); DirCacheIterator c = tw.GetTree<DirCacheIterator>(0); NUnit.Framework.Assert.IsNotNull(c); NUnit.Framework.Assert.AreEqual(victimIdx, c.ptr); NUnit.Framework.Assert.AreSame(ents[victimIdx], c.GetDirCacheEntry()); NUnit.Framework.Assert.AreEqual(paths[victimIdx], tw.PathString); NUnit.Framework.Assert.AreEqual(mode.GetBits(), tw.GetRawMode(0)); NUnit.Framework.Assert.AreSame(mode, tw.GetFileMode(0)); NUnit.Framework.Assert.IsFalse(tw.Next()); } }
public override void Apply(DirCacheEntry ent) { ent.FileMode = FileMode.GITLINK; ent.SetObjectId(id); }
/// <summary>Processes one path and tries to merge.</summary> /// <remarks> /// Processes one path and tries to merge. This method will do all do all /// trivial (not content) merges and will also detect if a merge will fail. /// The merge will fail when one of the following is true /// <ul> /// <li>the index entry does not match the entry in ours. When merging one /// branch into the current HEAD, ours will point to HEAD and theirs will /// point to the other branch. It is assumed that the index matches the HEAD /// because it will only not match HEAD if it was populated before the merge /// operation. But the merge commit should not accidentally contain /// modifications done before the merge. Check the <a href= /// "http://www.kernel.org/pub/software/scm/git/docs/git-read-tree.html#_3_way_merge" /// >git read-tree</a> documentation for further explanations.</li> /// <li>A conflict was detected and the working-tree file is dirty. When a /// conflict is detected the content-merge algorithm will try to write a /// merged version into the working-tree. If the file is dirty we would /// override unsaved data.</li> /// </remarks> /// <param name="base">the common base for ours and theirs</param> /// <param name="ours"> /// the ours side of the merge. When merging a branch into the /// HEAD ours will point to HEAD /// </param> /// <param name="theirs"> /// the theirs side of the merge. When merging a branch into the /// current HEAD theirs will point to the branch which is merged /// into HEAD. /// </param> /// <param name="index">the index entry</param> /// <param name="work">the file in the working tree</param> /// <returns> /// <code>false</code> if the merge will fail because the index entry /// didn't match ours or the working-dir file was dirty and a /// conflict occurred /// </returns> /// <exception cref="NGit.Errors.MissingObjectException">NGit.Errors.MissingObjectException /// </exception> /// <exception cref="NGit.Errors.IncorrectObjectTypeException">NGit.Errors.IncorrectObjectTypeException /// </exception> /// <exception cref="NGit.Errors.CorruptObjectException">NGit.Errors.CorruptObjectException /// </exception> /// <exception cref="System.IO.IOException">System.IO.IOException</exception> private bool ProcessEntry(CanonicalTreeParser @base, CanonicalTreeParser ours, CanonicalTreeParser theirs, DirCacheBuildIterator index, WorkingTreeIterator work) { enterSubtree = true; int modeO = tw.GetRawMode(T_OURS); int modeT = tw.GetRawMode(T_THEIRS); int modeB = tw.GetRawMode(T_BASE); if (modeO == 0 && modeT == 0 && modeB == 0) { // File is either untracked or new, staged but uncommitted return true; } if (IsIndexDirty()) { return false; } DirCacheEntry ourDce = null; if (index == null || index.GetDirCacheEntry() == null) { // create a fake DCE, but only if ours is valid. ours is kept only // in case it is valid, so a null ourDce is ok in all other cases. if (NonTree(modeO)) { ourDce = new DirCacheEntry(tw.RawPath); ourDce.SetObjectId(tw.GetObjectId(T_OURS)); ourDce.FileMode = tw.GetFileMode(T_OURS); } } else { ourDce = index.GetDirCacheEntry(); } if (NonTree(modeO) && NonTree(modeT) && tw.IdEqual(T_OURS, T_THEIRS)) { // OURS and THEIRS have equal content. Check the file mode if (modeO == modeT) { // content and mode of OURS and THEIRS are equal: it doesn't // matter which one we choose. OURS is chosen. Since the index // is clean (the index matches already OURS) we can keep the existing one Keep(ourDce); // no checkout needed! return true; } else { // same content but different mode on OURS and THEIRS. // Try to merge the mode and report an error if this is // not possible. int newMode = MergeFileModes(modeB, modeO, modeT); if (newMode != FileMode.MISSING.GetBits()) { if (newMode == modeO) { // ours version is preferred Keep(ourDce); } else { // the preferred version THEIRS has a different mode // than ours. Check it out! if (IsWorktreeDirty(work)) { return false; } // we know about length and lastMod only after we have written the new content. // This will happen later. Set these values to 0 for know. DirCacheEntry e = Add(tw.RawPath, theirs, DirCacheEntry.STAGE_0, 0, 0); toBeCheckedOut.Put(tw.PathString, e); } return true; } else { // FileModes are not mergeable. We found a conflict on modes. // For conflicting entries we don't know lastModified and length. Add(tw.RawPath, @base, DirCacheEntry.STAGE_1, 0, 0); Add(tw.RawPath, ours, DirCacheEntry.STAGE_2, 0, 0); Add(tw.RawPath, theirs, DirCacheEntry.STAGE_3, 0, 0); unmergedPaths.AddItem(tw.PathString); mergeResults.Put(tw.PathString, new MergeResult<RawText>(Sharpen.Collections.EmptyList <RawText>()).Upcast ()); } return true; } } if (NonTree(modeO) && modeB == modeT && tw.IdEqual(T_BASE, T_THEIRS)) { // THEIRS was not changed compared to BASE. All changes must be in // OURS. OURS is chosen. We can keep the existing entry. Keep(ourDce); // no checkout needed! return true; } if (modeB == modeO && tw.IdEqual(T_BASE, T_OURS)) { // OURS was not changed compared to BASE. All changes must be in // THEIRS. THEIRS is chosen. // Check worktree before checking out THEIRS if (IsWorktreeDirty(work)) { return false; } if (NonTree(modeT)) { // we know about length and lastMod only after we have written // the new content. // This will happen later. Set these values to 0 for know. DirCacheEntry e = Add(tw.RawPath, theirs, DirCacheEntry.STAGE_0, 0, 0); if (e != null) { toBeCheckedOut.Put(tw.PathString, e); } return true; } else { if (modeT == 0 && modeB != 0) { // we want THEIRS ... but THEIRS contains the deletion of the // file toBeDeleted.AddItem(tw.PathString); return true; } } } if (tw.IsSubtree) { // file/folder conflicts: here I want to detect only file/folder // conflict between ours and theirs. file/folder conflicts between // base/index/workingTree and something else are not relevant or // detected later if (NonTree(modeO) && !NonTree(modeT)) { if (NonTree(modeB)) { Add(tw.RawPath, @base, DirCacheEntry.STAGE_1, 0, 0); } Add(tw.RawPath, ours, DirCacheEntry.STAGE_2, 0, 0); unmergedPaths.AddItem(tw.PathString); enterSubtree = false; return true; } if (NonTree(modeT) && !NonTree(modeO)) { if (NonTree(modeB)) { Add(tw.RawPath, @base, DirCacheEntry.STAGE_1, 0, 0); } Add(tw.RawPath, theirs, DirCacheEntry.STAGE_3, 0, 0); unmergedPaths.AddItem(tw.PathString); enterSubtree = false; return true; } // ours and theirs are both folders or both files (and treewalk // tells us we are in a subtree because of index or working-dir). // If they are both folders no content-merge is required - we can // return here. if (!NonTree(modeO)) { return true; } } // ours and theirs are both files, just fall out of the if block // and do the content merge if (NonTree(modeO) && NonTree(modeT)) { // Check worktree before modifying files if (IsWorktreeDirty(work)) { return false; } MergeResult<RawText> result = ContentMerge(@base, ours, theirs); FilePath of = WriteMergedFile(result); UpdateIndex(@base, ours, theirs, result, of); if (result.ContainsConflicts()) { unmergedPaths.AddItem(tw.PathString); } modifiedFiles.AddItem(tw.PathString); } else { if (modeO != modeT) { // OURS or THEIRS has been deleted if (((modeO != 0 && !tw.IdEqual(T_BASE, T_OURS)) || (modeT != 0 && !tw.IdEqual(T_BASE , T_THEIRS)))) { Add(tw.RawPath, @base, DirCacheEntry.STAGE_1, 0, 0); Add(tw.RawPath, ours, DirCacheEntry.STAGE_2, 0, 0); DirCacheEntry e = Add(tw.RawPath, theirs, DirCacheEntry.STAGE_3, 0, 0); // OURS was deleted checkout THEIRS if (modeO == 0) { // Check worktree before checking out THEIRS if (IsWorktreeDirty(work)) { return false; } if (NonTree(modeT)) { if (e != null) { toBeCheckedOut.Put(tw.PathString, e); } } } unmergedPaths.AddItem(tw.PathString); // generate a MergeResult for the deleted file mergeResults.Put(tw.PathString, ContentMerge(@base, ours, theirs).Upcast ()); } } } return true; }