/// <exception cref="System.IO.IOException"/> public virtual void ReadFields(DataInput @in) { lock (this) { indicesCount = @in.ReadLong(); ranges = new TreeSet <SortedRanges.Range>(); int size = @in.ReadInt(); for (int i = 0; i < size; i++) { SortedRanges.Range range = new SortedRanges.Range(); range.ReadFields(@in); ranges.AddItem(range); } } }
private static ICollection <string> ParseArrayStr(string str, string delimiter) { if (str == null) { return(null); } ICollection <string> strSet = new TreeSet <string>(); string[] strs = str.Split(delimiter); foreach (string aStr in strs) { strSet.AddItem(aStr.Trim()); } return(strSet); }
/// <summary> /// Adds an /// <see cref="Apache.Http.Cookie.Cookie">HTTP cookie</see> /// , replacing any existing equivalent cookies. /// If the given cookie has already expired it will not be added, but existing /// values will still be removed. /// </summary> /// <param name="cookie"> /// the /// <see cref="Apache.Http.Cookie.Cookie">cookie</see> /// to be added /// </param> /// <seealso cref="AddCookies(Apache.Http.Cookie.Cookie[])">AddCookies(Apache.Http.Cookie.Cookie[]) /// </seealso> public virtual void AddCookie(Apache.Http.Cookie.Cookie cookie) { lock (this) { if (cookie != null) { // first remove any old cookie that is equivalent cookies.Remove(cookie); if (!cookie.IsExpired(new DateTime())) { cookies.AddItem(cookie); } } } }
/// <exception cref="Org.Apache.Hadoop.Yarn.Exceptions.YarnException"/> /// <exception cref="System.IO.IOException"/> private ICollection <Container> AllocateContainers(AMRMClientImpl <AMRMClient.ContainerRequest > rmClient, int num) { // setup container request Resource capability = Resource.NewInstance(1024, 0); Priority priority = Priority.NewInstance(0); string node = nodeReports[0].GetNodeId().GetHost(); string rack = nodeReports[0].GetRackName(); string[] nodes = new string[] { node }; string[] racks = new string[] { rack }; for (int i = 0; i < num; ++i) { rmClient.AddContainerRequest(new AMRMClient.ContainerRequest(capability, nodes, racks , priority)); } int containersRequestedAny = rmClient.remoteRequestsTable[priority][ResourceRequest .Any][capability].remoteRequest.GetNumContainers(); // RM should allocate container within 2 calls to allocate() int allocatedContainerCount = 0; int iterationsLeft = 2; ICollection <Container> containers = new TreeSet <Container>(); while (allocatedContainerCount < containersRequestedAny && iterationsLeft > 0) { AllocateResponse allocResponse = rmClient.Allocate(0.1f); allocatedContainerCount += allocResponse.GetAllocatedContainers().Count; foreach (Container container in allocResponse.GetAllocatedContainers()) { containers.AddItem(container); } if (!allocResponse.GetNMTokens().IsEmpty()) { foreach (NMToken token in allocResponse.GetNMTokens()) { rmClient.GetNMTokenCache().SetToken(token.GetNodeId().ToString(), token.GetToken( )); } } if (allocatedContainerCount < containersRequestedAny) { // sleep to let NM's heartbeat to RM and trigger allocations Sleep(1000); } --iterationsLeft; } return(containers); }
public virtual void TestCleanWithPaths() { // create status StatusCommand command = git.Status(); Status status = command.Call(); ICollection <string> files = status.GetUntracked(); NUnit.Framework.Assert.IsTrue(files.Count > 0); // run clean with setPaths ICollection <string> paths = new TreeSet <string>(); paths.AddItem("File3.txt"); ICollection <string> cleanedFiles = git.Clean().SetPaths(paths).Call(); status = git.Status().Call(); files = status.GetUntracked(); NUnit.Framework.Assert.AreEqual(1, files.Count); NUnit.Framework.Assert.IsTrue(cleanedFiles.Contains("File3.txt")); NUnit.Framework.Assert.IsFalse(cleanedFiles.Contains("File2.txt")); }
/// <exception cref="Javax.Servlet.ServletException"/> /// <exception cref="System.IO.IOException"/> protected override void DoGet(HttpServletRequest request, HttpServletResponse response ) { PrintWriter @out = response.GetWriter(); ICollection <string> sortedKeys = new TreeSet <string>(); Enumeration <string> keys = request.GetParameterNames(); while (keys.MoveNext()) { sortedKeys.AddItem(keys.Current); } foreach (string key in sortedKeys) { @out.Write(key); @out.Write(':'); @out.Write(request.GetParameter(key)); @out.Write('\n'); } @out.Close(); }
/// <param name="inspector">inspector that has already inspected all storage dirs</param> /// <returns> /// the transaction ID corresponding to the oldest checkpoint /// that should be retained. /// </returns> private long GetImageTxIdToRetain(FSImageTransactionalStorageInspector inspector) { IList <FSImageStorageInspector.FSImageFile> images = inspector.GetFoundImages(); TreeSet <long> imageTxIds = Sets.NewTreeSet(); foreach (FSImageStorageInspector.FSImageFile image in images) { imageTxIds.AddItem(image.GetCheckpointTxId()); } IList <long> imageTxIdsList = Lists.NewArrayList(imageTxIds); if (imageTxIdsList.IsEmpty()) { return(0); } Sharpen.Collections.Reverse(imageTxIdsList); int toRetain = Math.Min(numCheckpointsToRetain, imageTxIdsList.Count); long minTxId = imageTxIdsList[toRetain - 1]; Log.Info("Going to retain " + toRetain + " images with txid >= " + minTxId); return(minTxId); }
/// <summary>Represent the state of the index in one String.</summary> /// <remarks> /// Represent the state of the index in one String. This representation is /// useful when writing tests which do assertions on the state of the index. /// By default information about path, mode, stage (if different from 0) is /// included. A bitmask controls which additional info about /// modificationTimes, smudge state and length is included. /// <p> /// The format of the returned string is described with this BNF: /// <pre> /// result = ( "[" path mode stage? time? smudge? length? sha1? content? "]" )* . /// mode = ", mode:" number . /// stage = ", stage:" number . /// time = ", time:t" timestamp-index . /// smudge = "" | ", smudged" . /// length = ", length:" number . /// sha1 = ", sha1:" hex-sha1 . /// content = ", content:" blob-data . /// </pre> /// 'stage' is only presented when the stage is different from 0. All /// reported time stamps are mapped to strings like "t0", "t1", ... "tn". The /// smallest reported time-stamp will be called "t0". This allows to write /// assertions against the string although the concrete value of the time /// stamps is unknown. /// </remarks> /// <param name="repo">the repository the index state should be determined for</param> /// <param name="includedOptions"> /// a bitmask constructed out of the constants /// <see cref="MOD_TIME">MOD_TIME</see> /// , /// <see cref="SMUDGE">SMUDGE</see> /// , /// <see cref="LENGTH">LENGTH</see> /// , /// <see cref="CONTENT_ID">CONTENT_ID</see> /// and /// <see cref="CONTENT">CONTENT</see> /// controlling which info is present in the /// resulting string. /// </param> /// <returns>a string encoding the index state</returns> /// <exception cref="System.InvalidOperationException">System.InvalidOperationException /// </exception> /// <exception cref="System.IO.IOException">System.IO.IOException</exception> public virtual string IndexState(Repository repo, int includedOptions) { DirCache dc = repo.ReadDirCache(); StringBuilder sb = new StringBuilder(); TreeSet<long> timeStamps = null; // iterate once over the dircache just to collect all time stamps if (0 != (includedOptions & MOD_TIME)) { timeStamps = new TreeSet<long>(); for (int i = 0; i < dc.GetEntryCount(); ++i) { timeStamps.AddItem(Sharpen.Extensions.ValueOf(dc.GetEntry(i).LastModified)); } } // iterate again, now produce the result string for (int i_1 = 0; i_1 < dc.GetEntryCount(); ++i_1) { DirCacheEntry entry = dc.GetEntry(i_1); sb.Append("[" + entry.PathString + ", mode:" + entry.FileMode); int stage = entry.Stage; if (stage != 0) { sb.Append(", stage:" + stage); } if (0 != (includedOptions & MOD_TIME)) { sb.Append(", time:t" + timeStamps.HeadSet(Sharpen.Extensions.ValueOf(entry.LastModified )).Count); } if (0 != (includedOptions & SMUDGE)) { if (entry.IsSmudged) { sb.Append(", smudged"); } } if (0 != (includedOptions & LENGTH)) { sb.Append(", length:" + Sharpen.Extensions.ToString(entry.Length)); } if (0 != (includedOptions & CONTENT_ID)) { sb.Append(", sha1:" + ObjectId.ToString(entry.GetObjectId())); } if (0 != (includedOptions & CONTENT)) { sb.Append(", content:" + Sharpen.Runtime.GetStringForBytes(db.Open(entry.GetObjectId (), Constants.OBJ_BLOB).GetCachedBytes(), "UTF-8")); } if (0 != (includedOptions & ASSUME_UNCHANGED)) { sb.Append(", assume-unchanged:" + entry.IsAssumeValid.ToString().ToLower()); } sb.Append("]"); } return sb.ToString(); }
// test.cleanup(); // clean up after all to restore the system state /// <exception cref="System.IO.IOException"/> private void AnalyzeResult(long execTime, string resFileName, bool viewStats) { Path reduceFile = new Path(ReadDir, "part-00000"); DataInputStream @in; @in = new DataInputStream(fs.Open(reduceFile)); BufferedReader lines; lines = new BufferedReader(new InputStreamReader(@in)); long blocks = 0; long size = 0; long time = 0; float rate = 0; StringTokenizer badBlocks = null; long nrBadBlocks = 0; string line; while ((line = lines.ReadLine()) != null) { StringTokenizer tokens = new StringTokenizer(line, " \t\n\r\f%"); string attr = tokens.NextToken(); if (attr.EndsWith("blocks")) { blocks = long.Parse(tokens.NextToken()); } else { if (attr.EndsWith("size")) { size = long.Parse(tokens.NextToken()); } else { if (attr.EndsWith("time")) { time = long.Parse(tokens.NextToken()); } else { if (attr.EndsWith("rate")) { rate = float.ParseFloat(tokens.NextToken()); } else { if (attr.EndsWith("badBlocks")) { badBlocks = new StringTokenizer(tokens.NextToken(), ";"); nrBadBlocks = badBlocks.CountTokens(); } } } } } } Vector <string> resultLines = new Vector <string>(); resultLines.AddItem("----- DistributedFSCheck ----- : "); resultLines.AddItem(" Date & time: " + Sharpen.Extensions.CreateDate (Runtime.CurrentTimeMillis())); resultLines.AddItem(" Total number of blocks: " + blocks); resultLines.AddItem(" Total number of files: " + nrFiles); resultLines.AddItem("Number of corrupted blocks: " + nrBadBlocks); int nrBadFilesPos = resultLines.Count; TreeSet <string> badFiles = new TreeSet <string>(); long nrBadFiles = 0; if (nrBadBlocks > 0) { resultLines.AddItem(string.Empty); resultLines.AddItem("----- Corrupted Blocks (file@offset) ----- : "); while (badBlocks.HasMoreTokens()) { string curBlock = badBlocks.NextToken(); resultLines.AddItem(curBlock); badFiles.AddItem(Sharpen.Runtime.Substring(curBlock, 0, curBlock.IndexOf('@'))); } nrBadFiles = badFiles.Count; } resultLines.InsertElementAt(" Number of corrupted files: " + nrBadFiles, nrBadFilesPos ); if (viewStats) { resultLines.AddItem(string.Empty); resultLines.AddItem("----- Performance ----- : "); resultLines.AddItem(" Total MBytes read: " + size / Mega); resultLines.AddItem(" Throughput mb/sec: " + (float)size * 1000.0 / (time * Mega)); resultLines.AddItem(" Average IO rate mb/sec: " + rate / 1000 / blocks); resultLines.AddItem(" Test exec time sec: " + (float)execTime / 1000); } TextWriter res = new TextWriter(new FileOutputStream(new FilePath(resFileName), true )); for (int i = 0; i < resultLines.Count; i++) { string cur = resultLines[i]; Log.Info(cur); res.WriteLine(cur); } }
/// <exception cref="SSLException"/> public virtual void Check(string[] hosts, string[] cns, string[] subjectAlts, bool ie6, bool strictWithSubDomains) { // Build up lists of allowed hosts For logging/debugging purposes. StringBuilder buf = new StringBuilder(32); buf.Append('<'); for (int i = 0; i < hosts.Length; i++) { string h = hosts[i]; h = h != null?StringUtils.ToLowerCase(h.Trim()) : string.Empty; hosts[i] = h; if (i > 0) { buf.Append('/'); } buf.Append(h); } buf.Append('>'); string hostnames = buf.ToString(); // Build the list of names we're going to check. Our DEFAULT and // STRICT implementations of the HostnameVerifier only use the // first CN provided. All other CNs are ignored. // (Firefox, wget, curl, Sun Java 1.4, 5, 6 all work this way). ICollection <string> names = new TreeSet <string>(); if (cns != null && cns.Length > 0 && cns[0] != null) { names.AddItem(cns[0]); if (ie6) { for (int i_1 = 1; i_1 < cns.Length; i_1++) { names.AddItem(cns[i_1]); } } } if (subjectAlts != null) { for (int i_1 = 0; i_1 < subjectAlts.Length; i_1++) { if (subjectAlts[i_1] != null) { names.AddItem(subjectAlts[i_1]); } } } if (names.IsEmpty()) { string msg = "Certificate for " + hosts[0] + " doesn't contain CN or DNS subjectAlt"; throw new SSLException(msg); } // StringBuffer for building the error message. buf = new StringBuilder(); bool match = false; for (IEnumerator <string> it = names.GetEnumerator(); it.HasNext();) { // Don't trim the CN, though! string cn = StringUtils.ToLowerCase(it.Next()); // Store CN in StringBuffer in case we need to report an error. buf.Append(" <"); buf.Append(cn); buf.Append('>'); if (it.HasNext()) { buf.Append(" OR"); } // The CN better have at least two dots if it wants wildcard // action. It also can't be [*.co.uk] or [*.co.jp] or // [*.org.uk], etc... bool doWildcard = cn.StartsWith("*.") && cn.LastIndexOf('.') >= 0 && !IsIP4Address (cn) && AcceptableCountryWildcard(cn); for (int i_1 = 0; i_1 < hosts.Length; i_1++) { string hostName = StringUtils.ToLowerCase(hosts[i_1].Trim()); if (doWildcard) { match = hostName.EndsWith(Runtime.Substring(cn, 1)); if (match && strictWithSubDomains) { // If we're in strict mode, then [*.foo.com] is not // allowed to match [a.b.foo.com] match = CountDots(hostName) == CountDots(cn); } } else { match = hostName.Equals(cn); } if (match) { goto out_break; } } out_continue :; } out_break :; if (!match) { throw new SSLException("hostname in certificate didn't match: " + hostnames + " !=" + buf); } }
public virtual void TestCleanWithPaths() { // create status StatusCommand command = git.Status(); Status status = command.Call(); ICollection<string> files = status.GetUntracked(); NUnit.Framework.Assert.IsTrue(files.Count > 0); // run clean with setPaths ICollection<string> paths = new TreeSet<string>(); paths.AddItem("File3.txt"); ICollection<string> cleanedFiles = git.Clean().SetPaths(paths).Call(); status = git.Status().Call(); files = status.GetUntracked(); NUnit.Framework.Assert.AreEqual(1, files.Count); NUnit.Framework.Assert.IsTrue(cleanedFiles.Contains("File3.txt")); NUnit.Framework.Assert.IsFalse(cleanedFiles.Contains("File2.txt")); }
protected internal override void Enter(int index, PlotCommit <L> currCommit) { SetupChildren(currCommit); int nChildren = currCommit.GetChildCount(); if (nChildren == 0) { return; } if (nChildren == 1 && currCommit.children[0].ParentCount < 2) { // Only one child, child has only us as their parent. // Stay in the same lane as the child. // PlotCommit c = currCommit.children[0]; if (c.lane == null) { // Hmmph. This child must be the first along this lane. // c.lane = NextFreeLane(); activeLanes.AddItem(c.lane); } for (int r = index - 1; r >= 0; r--) { PlotCommit rObj = this[r]; if (rObj == c) { break; } rObj.AddPassingLane(c.lane); } currCommit.lane = c.lane; } else { // More than one child, or our child is a merge. // Use a different lane. // // Process all our children. Especially important when there is more // than one child (e.g. a commit is processed where other branches // fork out). For each child the following is done // 1. If no lane was assigned to the child a new lane is created and // assigned // 2. The lane of the child is closed. If this frees a position, // this position will be added freePositions list. // If we have multiple children which where previously not on a lane // each such child will get his own new lane but all those new lanes // will be on the same position. We have to take care that not // multiple newly created (in step 1) lanes occupy that position on // which the // parent's lane will be on. Therefore we delay closing the lane // with the parents position until all children are processed. // The lane on that position the current commit will be on PlotLane reservedLane = null; for (int i = 0; i < nChildren; i++) { PlotCommit c = currCommit.children[i]; // don't forget to position all of your children if they are // not already positioned. if (c.lane == null) { c.lane = NextFreeLane(); activeLanes.AddItem(c.lane); if (reservedLane != null) { CloseLane(c.lane); } else { reservedLane = c.lane; } } else { if (reservedLane == null && activeLanes.Contains(c.lane)) { reservedLane = c.lane; } else { CloseLane(c.lane); } } } // finally all children are processed. We can close the lane on that // position our current commit will be on. if (reservedLane != null) { CloseLane(reservedLane); } currCommit.lane = NextFreeLane(); activeLanes.AddItem(currCommit.lane); // take care: when connecting yourself to your child make sure that // you will not be located on a lane on which a passed commit is // located on. Otherwise we would have to draw a line through a // commit. int remaining = nChildren; BitSet blockedPositions = new BitSet(); for (int r = index - 1; r >= 0; r--) { PlotCommit rObj = this[r]; if (currCommit.IsChild(rObj)) { if (--remaining == 0) { break; } } if (rObj != null) { PlotLane lane = rObj.GetLane(); if (lane != null) { blockedPositions.Set(lane.GetPosition()); } rObj.AddPassingLane(currCommit.lane); } } // Now let's check whether we have to reposition the lane if (blockedPositions.Get(currCommit.lane.GetPosition())) { int newPos = -1; foreach (int pos in freePositions) { if (!blockedPositions.Get(pos)) { newPos = pos; break; } } if (newPos == -1) { newPos = positionsAllocated++; } freePositions.AddItem(currCommit.lane.GetPosition()); currCommit.lane.position = newPos; } } }
/// <exception cref="System.InvalidOperationException"></exception> /// <exception cref="System.IO.IOException"></exception> /// <exception cref="System.Exception"></exception> public virtual void TestIterator() { TreeSet <long> modTimes = new TreeSet <long>(); FilePath lastFile = null; for (int i = 0; i < 10; i++) { lastFile = new FilePath(db.WorkTree, "0." + i); FileUtils.CreateNewFile(lastFile); if (i == 5) { FsTick(lastFile); } } modTimes.AddItem(FsTick(lastFile)); for (int i_1 = 0; i_1 < 10; i_1++) { lastFile = new FilePath(db.WorkTree, "1." + i_1); FileUtils.CreateNewFile(lastFile); } modTimes.AddItem(FsTick(lastFile)); for (int i_2 = 0; i_2 < 10; i_2++) { lastFile = new FilePath(db.WorkTree, "2." + i_2); FileUtils.CreateNewFile(lastFile); if (i_2 % 4 == 0) { FsTick(lastFile); } } FileTreeIteratorWithTimeControl fileIt = new FileTreeIteratorWithTimeControl(db, modTimes); NameConflictTreeWalk tw = new NameConflictTreeWalk(db); tw.AddTree(fileIt); tw.Recursive = true; FileTreeIterator t; long t0 = 0; for (int i_3 = 0; i_3 < 10; i_3++) { NUnit.Framework.Assert.IsTrue(tw.Next()); t = tw.GetTree <FileTreeIterator>(0); if (i_3 == 0) { t0 = t.GetEntryLastModified(); } else { NUnit.Framework.Assert.AreEqual(t0, t.GetEntryLastModified()); } } long t1 = 0; for (int i_4 = 0; i_4 < 10; i_4++) { NUnit.Framework.Assert.IsTrue(tw.Next()); t = tw.GetTree <FileTreeIterator>(0); if (i_4 == 0) { t1 = t.GetEntryLastModified(); NUnit.Framework.Assert.IsTrue(t1 > t0); } else { NUnit.Framework.Assert.AreEqual(t1, t.GetEntryLastModified()); } } long t2 = 0; for (int i_5 = 0; i_5 < 10; i_5++) { NUnit.Framework.Assert.IsTrue(tw.Next()); t = tw.GetTree <FileTreeIterator>(0); if (i_5 == 0) { t2 = t.GetEntryLastModified(); NUnit.Framework.Assert.IsTrue(t2 > t1); } else { NUnit.Framework.Assert.AreEqual(t2, t.GetEntryLastModified()); } } }
/// <exception cref="System.InvalidOperationException"></exception> /// <exception cref="System.IO.IOException"></exception> /// <exception cref="System.Exception"></exception> public virtual void TestIterator() { TreeSet<long> modTimes = new TreeSet<long>(); FilePath lastFile = null; for (int i = 0; i < 10; i++) { lastFile = new FilePath(db.WorkTree, "0." + i); FileUtils.CreateNewFile(lastFile); if (i == 5) { FsTick(lastFile); } } modTimes.AddItem(Sharpen.Extensions.ValueOf(FsTick(lastFile))); for (int i_1 = 0; i_1 < 10; i_1++) { lastFile = new FilePath(db.WorkTree, "1." + i_1); FileUtils.CreateNewFile(lastFile); } modTimes.AddItem(Sharpen.Extensions.ValueOf(FsTick(lastFile))); for (int i_2 = 0; i_2 < 10; i_2++) { lastFile = new FilePath(db.WorkTree, "2." + i_2); FileUtils.CreateNewFile(lastFile); if (i_2 % 4 == 0) { FsTick(lastFile); } } FileTreeIteratorWithTimeControl fileIt = new FileTreeIteratorWithTimeControl(db, modTimes); NameConflictTreeWalk tw = new NameConflictTreeWalk(db); tw.AddTree(fileIt); tw.Recursive = true; FileTreeIterator t; long t0 = 0; for (int i_3 = 0; i_3 < 10; i_3++) { NUnit.Framework.Assert.IsTrue(tw.Next()); t = tw.GetTree<FileTreeIterator>(0); if (i_3 == 0) { t0 = t.GetEntryLastModified(); } else { NUnit.Framework.Assert.AreEqual(t0, t.GetEntryLastModified()); } } long t1 = 0; for (int i_4 = 0; i_4 < 10; i_4++) { NUnit.Framework.Assert.IsTrue(tw.Next()); t = tw.GetTree<FileTreeIterator>(0); if (i_4 == 0) { t1 = t.GetEntryLastModified(); NUnit.Framework.Assert.IsTrue(t1 > t0); } else { NUnit.Framework.Assert.AreEqual(t1, t.GetEntryLastModified()); } } long t2 = 0; for (int i_5 = 0; i_5 < 10; i_5++) { NUnit.Framework.Assert.IsTrue(tw.Next()); t = tw.GetTree<FileTreeIterator>(0); if (i_5 == 0) { t2 = t.GetEntryLastModified(); NUnit.Framework.Assert.IsTrue(t2 > t1); } else { NUnit.Framework.Assert.AreEqual(t2, t.GetEntryLastModified()); } } }
/// <exception cref="System.IO.IOException"></exception> /// <exception cref="System.InvalidOperationException"></exception> /// <exception cref="System.Exception"></exception> public virtual void TestRacyGitDetection() { TreeSet<long> modTimes = new TreeSet<long>(); FilePath lastFile; // wait to ensure that modtimes of the file doesn't match last index // file modtime modTimes.AddItem(Sharpen.Extensions.ValueOf(FsTick(db.GetIndexFile()))); // create two files AddToWorkDir("a", "a"); lastFile = AddToWorkDir("b", "b"); // wait to ensure that file-modTimes and therefore index entry modTime // doesn't match the modtime of index-file after next persistance modTimes.AddItem(Sharpen.Extensions.ValueOf(FsTick(lastFile))); // now add both files to the index. No racy git expected ResetIndex(new FileTreeIteratorWithTimeControl(db, modTimes)); NUnit.Framework.Assert.AreEqual("[a, mode:100644, time:t0, length:1, content:a]" + "[b, mode:100644, time:t0, length:1, content:b]", IndexState(SMUDGE | MOD_TIME | LENGTH | CONTENT)); // Remember the last modTime of index file. All modifications times of // further modification are translated to this value so it looks that // files have been modified in the same time slot as the index file modTimes.AddItem(Sharpen.Extensions.ValueOf(db.GetIndexFile().LastModified())); // modify one file AddToWorkDir("a", "a2"); // now update the index the index. 'a' has to be racily clean -- because // it's modification time is exactly the same as the previous index file // mod time. ResetIndex(new FileTreeIteratorWithTimeControl(db, modTimes)); db.ReadDirCache(); // although racily clean a should not be reported as being dirty NUnit.Framework.Assert.AreEqual("[a, mode:100644, time:t1, smudged, length:0, content:a2]" + "[b, mode:100644, time:t0, length:1, content:b]", IndexState(SMUDGE | MOD_TIME | LENGTH | CONTENT)); }