private void Crash(IndexWriter writer) { MockRAMDirectory dir = (MockRAMDirectory) writer.GetDirectory(); ConcurrentMergeScheduler cms = (ConcurrentMergeScheduler) writer.GetMergeScheduler(); dir.Crash(); cms.Sync(); dir.ClearCrash(); }
private void Crash(IndexWriter writer) { MockRAMDirectory dir = (MockRAMDirectory)writer.GetDirectory(); ConcurrentMergeScheduler cms = (ConcurrentMergeScheduler)writer.GetMergeScheduler(); dir.Crash(); cms.Sync(); dir.ClearCrash(); }
public override void DoWork() { IndexWriter writer1 = new IndexWriter(dir1, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); writer1.SetMaxBufferedDocs(3); writer1.SetMergeFactor(2); ((ConcurrentMergeScheduler)writer1.GetMergeScheduler()).SetSuppressExceptions(); IndexWriter writer2 = new IndexWriter(dir2, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); // Intentionally use different params so flush/merge // happen @ different times writer2.SetMaxBufferedDocs(2); writer2.SetMergeFactor(3); ((ConcurrentMergeScheduler)writer2.GetMergeScheduler()).SetSuppressExceptions(); Update(writer1); Update(writer2); TestTransactions.doFail = true; try { lock (lock_Renamed) { try { writer1.PrepareCommit(); } catch (System.Exception t) { writer1.Rollback(); writer2.Rollback(); return; } try { writer2.PrepareCommit(); } catch (System.Exception t) { writer1.Rollback(); writer2.Rollback(); return; } writer1.Commit(); writer2.Commit(); } } finally { TestTransactions.doFail = false; } writer1.Close(); writer2.Close(); }
private IndexWriter InitIndex(MockRAMDirectory dir) { dir.SetLockFactory(NoLockFactory.GetNoLockFactory()); IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer()); //writer.setMaxBufferedDocs(2); writer.SetMaxBufferedDocs(10); ((ConcurrentMergeScheduler) writer.GetMergeScheduler()).SetSuppressExceptions(); Document doc = new Document(); doc.Add(new Field("content", "aaa", Field.Store.YES, Field.Index.ANALYZED)); doc.Add(new Field("id", "0", Field.Store.YES, Field.Index.ANALYZED)); for (int i = 0; i < 157; i++) writer.AddDocument(doc); return writer; }
private IndexWriter InitIndex(MockRAMDirectory dir) { dir.SetLockFactory(NoLockFactory.GetNoLockFactory()); IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer()); //writer.setMaxBufferedDocs(2); writer.SetMaxBufferedDocs(10); ((ConcurrentMergeScheduler)writer.GetMergeScheduler()).SetSuppressExceptions(); Document doc = new Document(); doc.Add(new Field("content", "aaa", Field.Store.YES, Field.Index.ANALYZED)); doc.Add(new Field("id", "0", Field.Store.YES, Field.Index.ANALYZED)); for (int i = 0; i < 157; i++) { writer.AddDocument(doc); } return(writer); }
public virtual void TestMergeWarmer() { Directory dir1 = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); writer.SetInfoStream(infoStream); // create the index CreateIndexNoClose(false, "test", writer); // get a reader to put writer into near real-time mode IndexReader r1 = writer.GetReader(); // Enroll warmer MyWarmer warmer = new MyWarmer(); writer.SetMergedSegmentWarmer(warmer); writer.SetMergeFactor(2); writer.SetMaxBufferedDocs(2); for (int i = 0; i < 10; i++) { writer.AddDocument(CreateDocument(i, "test", 4)); } ((ConcurrentMergeScheduler)writer.GetMergeScheduler()).Sync(); Assert.IsTrue(warmer.warmCount > 0); int count = warmer.warmCount; writer.AddDocument(CreateDocument(17, "test", 4)); writer.Optimize(); Assert.IsTrue(warmer.warmCount > count); writer.Close(); r1.Close(); dir1.Close(); }
public virtual void TestAfterCommit() { Directory dir1 = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); writer.SetInfoStream(infoStream); // create the index CreateIndexNoClose(false, "test", writer); // get a reader to put writer into near real-time mode IndexReader r1 = writer.GetReader(); _TestUtil.CheckIndex(dir1); writer.Commit(); _TestUtil.CheckIndex(dir1); Assert.AreEqual(100, r1.NumDocs()); for (int i = 0; i < 10; i++) { writer.AddDocument(CreateDocument(i, "test", 4)); } ((ConcurrentMergeScheduler)writer.GetMergeScheduler()).Sync(); IndexReader r2 = r1.Reopen(); if (r2 != r1) { r1.Close(); r1 = r2; } Assert.AreEqual(110, r1.NumDocs()); writer.Close(); r1.Close(); dir1.Close(); }
public override void DoWork() { IndexWriter writer1 = new IndexWriter(dir1, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); writer1.SetMaxBufferedDocs(3); writer1.SetMergeFactor(2); ((ConcurrentMergeScheduler) writer1.GetMergeScheduler()).SetSuppressExceptions(); IndexWriter writer2 = new IndexWriter(dir2, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); // Intentionally use different params so flush/merge // happen @ different times writer2.SetMaxBufferedDocs(2); writer2.SetMergeFactor(3); ((ConcurrentMergeScheduler) writer2.GetMergeScheduler()).SetSuppressExceptions(); Update(writer1); Update(writer2); TestTransactions.doFail = true; try { lock (lock_Renamed) { try { writer1.PrepareCommit(); } catch (System.Exception t) { writer1.Rollback(); writer2.Rollback(); return ; } try { writer2.PrepareCommit(); } catch (System.Exception t) { writer1.Rollback(); writer2.Rollback(); return ; } writer1.Commit(); writer2.Commit(); } } finally { TestTransactions.doFail = false; } writer1.Close(); writer2.Close(); }
private static IndexWriter CreateIndexWriter(Directory directory) { var indexWriter = new IndexWriter(directory, new StopAnalyzer(Version.LUCENE_29), IndexWriter.MaxFieldLength.UNLIMITED); var mergeScheduler = indexWriter.GetMergeScheduler(); if(mergeScheduler != null) mergeScheduler.Close(); indexWriter.SetMergeScheduler(new ErrorLoggingConcurrentMergeScheduler()); return indexWriter; }
public RunAddIndexesThreads(int numCopy) { NUM_COPY = numCopy; dir = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); writer.SetMaxBufferedDocs(2); for (int i = 0; i < NUM_INIT_DOCS; i++) AddDoc(writer); writer.Close(); dir2 = new MockRAMDirectory(); writer2 = new IndexWriter(dir2, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); cms = (ConcurrentMergeScheduler) writer2.GetMergeScheduler(); readers = new IndexReader[NUM_COPY]; for (int i = 0; i < NUM_COPY; i++) readers[i] = IndexReader.Open(dir); }
public virtual void TestOptimizeMaxNumSegments2() { MockRAMDirectory dir = new MockRAMDirectory(); Document doc = new Document(); doc.Add(new Field("content", "aaa", Field.Store.YES, Field.Index.TOKENIZED)); IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true); LogDocMergePolicy ldmp = new LogDocMergePolicy(); ldmp.SetMinMergeDocs(1); writer.SetMergePolicy(ldmp); writer.SetMergeFactor(4); writer.SetMaxBufferedDocs(2); for (int iter = 0; iter < 10; iter++) { for (int i = 0; i < 19; i++) writer.AddDocument(doc); writer.Flush(); SegmentInfos sis = new SegmentInfos(); ((ConcurrentMergeScheduler) writer.GetMergeScheduler()).Sync(); sis.Read(dir); int segCount = sis.Count; writer.Optimize(7); sis = new SegmentInfos(); ((ConcurrentMergeScheduler) writer.GetMergeScheduler()).Sync(); sis.Read(dir); int optSegCount = sis.Count; if (segCount < 7) Assert.AreEqual(segCount, optSegCount); else Assert.AreEqual(7, optSegCount); } }
public virtual void TestOptimizeExceptions() { RAMDirectory startDir = new MockRAMDirectory(); IndexWriter w = new IndexWriter(startDir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED); w.SetMaxBufferedDocs(2); w.SetMergeFactor(100); for (int i = 0; i < 27; i++) AddDoc(w); w.Close(); for (int i = 0; i < 200; i++) { MockRAMDirectory dir = new MockRAMDirectory(startDir); w = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED); ((ConcurrentMergeScheduler) w.GetMergeScheduler()).SetSuppressExceptions(); dir.SetRandomIOExceptionRate(0.5, 100); try { w.Optimize(); } catch (System.IO.IOException ioe) { if (ioe.InnerException == null) Assert.Fail("optimize threw IOException without root cause"); } w.Close(); dir.Close(); } }
public virtual void TestAddDocumentOnDiskFull() { bool debug = false; for (int pass = 0; pass < 3; pass++) { if (debug) System.Console.Out.WriteLine("TEST: pass="******"TEST: cycle: diskFree=" + diskFree); MockRAMDirectory dir = new MockRAMDirectory(); dir.SetMaxSizeInBytes(diskFree); IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true); MergeScheduler ms = writer.GetMergeScheduler(); if (ms is ConcurrentMergeScheduler) // This test intentionally produces exceptions // in the threads that CMS launches; we don't // want to pollute test output with these. ((ConcurrentMergeScheduler)ms).SetSuppressExceptions_ForNUnitTest(); bool hitError = false; try { for (int i = 0; i < 200; i++) { AddDoc(writer); } } catch (System.IO.IOException e) { if (debug) { System.Console.Out.WriteLine("TEST: exception on addDoc"); System.Console.Out.WriteLine(e.StackTrace); } hitError = true; } if (hitError) { if (doAbort) { writer.Abort(); } else { try { writer.Close(); } catch (System.IO.IOException e) { if (debug) { System.Console.Out.WriteLine("TEST: exception on close"); System.Console.Out.WriteLine(e.StackTrace); } dir.SetMaxSizeInBytes(0); writer.Close(); } } _TestUtil.SyncConcurrentMerges(ms); AssertNoUnreferencedFiles(dir, "after disk full during addDocument with autoCommit=" + autoCommit); // Make sure reader can open the index: IndexReader.Open(dir).Close(); dir.Close(); // Now try again w/ more space: diskFree += 500; } else { _TestUtil.SyncConcurrentMerges(writer); dir.Close(); break; } } } }
public virtual void TestAddIndexOnDiskFull() { int START_COUNT = 57; int NUM_DIR = 50; int END_COUNT = START_COUNT + NUM_DIR * 25; bool debug = false; // Build up a bunch of dirs that have indexes which we // will then merge together by calling addIndexes(*): Directory[] dirs = new Directory[NUM_DIR]; long inputDiskUsage = 0; for (int i = 0; i < NUM_DIR; i++) { dirs[i] = new RAMDirectory(); IndexWriter writer = new IndexWriter(dirs[i], new WhitespaceAnalyzer(), true); for (int j = 0; j < 25; j++) { AddDocWithIndex(writer, 25 * i + j); } writer.Close(); System.String[] files = dirs[i].List(); for (int j = 0; j < files.Length; j++) { inputDiskUsage += dirs[i].FileLength(files[j]); } } // Now, build a starting index that has START_COUNT docs. We // will then try to addIndexes into a copy of this: RAMDirectory startDir = new RAMDirectory(); IndexWriter writer2 = new IndexWriter(startDir, new WhitespaceAnalyzer(), true); for (int j = 0; j < START_COUNT; j++) { AddDocWithIndex(writer2, j); } writer2.Close(); // Make sure starting index seems to be working properly: Term searchTerm = new Term("content", "aaa"); IndexReader reader = IndexReader.Open(startDir); Assert.AreEqual(57, reader.DocFreq(searchTerm), "first docFreq"); IndexSearcher searcher = new IndexSearcher(reader); Hits hits = searcher.Search(new TermQuery(searchTerm)); Assert.AreEqual(57, hits.Length(), "first number of hits"); searcher.Close(); reader.Close(); // Iterate with larger and larger amounts of free // disk space. With little free disk space, // addIndexes will certainly run out of space & // fail. Verify that when this happens, index is // not corrupt and index in fact has added no // documents. Then, we increase disk space by 2000 // bytes each iteration. At some point there is // enough free disk space and addIndexes should // succeed and index should show all documents were // added. // String[] files = startDir.list(); long diskUsage = startDir.SizeInBytes(); long startDiskUsage = 0; System.String[] files2 = startDir.List(); for (int i = 0; i < files2.Length; i++) { startDiskUsage += startDir.FileLength(files2[i]); } for (int iter = 0; iter < 6; iter++) { if (debug) System.Console.Out.WriteLine("TEST: iter=" + iter); // Start with 100 bytes more than we are currently using: long diskFree = diskUsage + 100; bool autoCommit = iter % 2 == 0; int method = iter / 2; bool success = false; bool done = false; System.String methodName; if (0 == method) { methodName = "addIndexes(Directory[])"; } else if (1 == method) { methodName = "addIndexes(IndexReader[])"; } else { methodName = "addIndexesNoOptimize(Directory[])"; } while (!done) { // Make a new dir that will enforce disk usage: MockRAMDirectory dir = new MockRAMDirectory(startDir); writer2 = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), false); System.IO.IOException err = null; MergeScheduler ms = writer2.GetMergeScheduler(); for (int x = 0; x < 2; x++) { if (ms is ConcurrentMergeScheduler) // This test intentionally produces exceptions // in the threads that CMS launches; we don't // want to pollute test output with these. if (0 == x) ((ConcurrentMergeScheduler)ms).SetSuppressExceptions_ForNUnitTest(); else ((ConcurrentMergeScheduler) ms).ClearSuppressExceptions_ForNUnitTest(); // Two loops: first time, limit disk space & // throw random IOExceptions; second time, no // disk space limit: double rate = 0.05; double diskRatio = ((double) diskFree) / diskUsage; long thisDiskFree; System.String testName = null; if (0 == x) { thisDiskFree = diskFree; if (diskRatio >= 2.0) { rate /= 2; } if (diskRatio >= 4.0) { rate /= 2; } if (diskRatio >= 6.0) { rate = 0.0; } if (debug) testName = "disk full test " + methodName + " with disk full at " + diskFree + " bytes autoCommit=" + autoCommit; } else { thisDiskFree = 0; rate = 0.0; if (debug) testName = "disk full test " + methodName + " with unlimited disk space autoCommit=" + autoCommit; } if (debug) System.Console.Out.WriteLine("\ncycle: " + testName); dir.SetMaxSizeInBytes(thisDiskFree); dir.SetRandomIOExceptionRate(rate, diskFree); try { if (0 == method) { writer2.AddIndexes(dirs); } else if (1 == method) { IndexReader[] readers = new IndexReader[dirs.Length]; for (int i = 0; i < dirs.Length; i++) { readers[i] = IndexReader.Open(dirs[i]); } try { writer2.AddIndexes(readers); } finally { for (int i = 0; i < dirs.Length; i++) { readers[i].Close(); } } } else { writer2.AddIndexesNoOptimize(dirs); } success = true; if (debug) { System.Console.Out.WriteLine(" success!"); } if (0 == x) { done = true; } } catch (System.IO.IOException e) { success = false; err = e; if (debug) { System.Console.Out.WriteLine(" hit IOException: " + e); System.Console.Out.WriteLine(e.StackTrace); } if (1 == x) { System.Console.Out.WriteLine(e.StackTrace); Assert.Fail(methodName + " hit IOException after disk space was freed up"); } } // Make sure all threads from // ConcurrentMergeScheduler are done _TestUtil.SyncConcurrentMerges(writer2); if (autoCommit) { // Whether we succeeded or failed, check that // all un-referenced files were in fact // deleted (ie, we did not create garbage). // Only check this when autoCommit is true: // when it's false, it's expected that there // are unreferenced files (ie they won't be // referenced until the "commit on close"). // Just create a new IndexFileDeleter, have it // delete unreferenced files, then verify that // in fact no files were deleted: System.String successStr; if (success) { successStr = "success"; } else { successStr = "IOException"; } System.String message = methodName + " failed to delete unreferenced files after " + successStr + " (" + diskFree + " bytes)"; AssertNoUnreferencedFiles(dir, message); } if (debug) { System.Console.Out.WriteLine(" now test readers"); } // Finally, verify index is not corrupt, and, if // we succeeded, we see all docs added, and if we // failed, we see either all docs or no docs added // (transactional semantics): try { reader = IndexReader.Open(dir); } catch (System.IO.IOException e) { System.Console.Out.WriteLine(e.StackTrace); Assert.Fail(testName + ": exception when creating IndexReader: " + e); } int result = reader.DocFreq(searchTerm); if (success) { if (autoCommit && result != END_COUNT) { Assert.Fail(testName + ": method did not throw exception but docFreq('aaa') is " + result + " instead of expected " + END_COUNT); } else if (!autoCommit && result != START_COUNT) { Assert.Fail(testName + ": method did not throw exception but docFreq('aaa') is " + result + " instead of expected " + START_COUNT + " [autoCommit = false]"); } } else { // On hitting exception we still may have added // all docs: if (result != START_COUNT && result != END_COUNT) { System.Console.Out.WriteLine(err.StackTrace); Assert.Fail(testName + ": method did throw exception but docFreq('aaa') is " + result + " instead of expected " + START_COUNT + " or " + END_COUNT); } } searcher = new IndexSearcher(reader); try { hits = searcher.Search(new TermQuery(searchTerm)); } catch (System.IO.IOException e) { System.Console.Out.WriteLine(e.StackTrace); Assert.Fail(testName + ": exception when searching: " + e); } int result2 = hits.Length(); if (success) { if (result2 != result) { Assert.Fail(testName + ": method did not throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + result); } } else { // On hitting exception we still may have added // all docs: if (result2 != result) { System.Console.Out.WriteLine(err.StackTrace); Assert.Fail(testName + ": method did throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + result); } } searcher.Close(); reader.Close(); if (debug) { System.Console.Out.WriteLine(" count is " + result); } if (done || result == END_COUNT) { break; } } if (debug) { System.Console.Out.WriteLine(" start disk = " + startDiskUsage + "; input disk = " + inputDiskUsage + "; max used = " + dir.GetMaxUsedSizeInBytes()); } if (done) { // Javadocs state that temp free Directory space // required is at most 2X total input size of // indices so let's make sure: Assert.IsTrue( (dir.GetMaxUsedSizeInBytes() - startDiskUsage) < 2 * (startDiskUsage + inputDiskUsage), "max free Directory space required exceeded 1X the total input index sizes during " + methodName + ": max temp usage = " + (dir.GetMaxUsedSizeInBytes() - startDiskUsage) + " bytes; " + "starting disk usage = " + startDiskUsage + " bytes; " + "input index disk usage = " + inputDiskUsage + " bytes" ); } writer2.Close(); // Wait for all BG threads to finish else // dir.close() will throw IOException because // there are still open files _TestUtil.SyncConcurrentMerges(ms); dir.Close(); // Try again with 2000 more bytes of free space: diskFree += 2000; } } startDir.Close(); }
override public void Run() { bool endLoop = false; RAMDirectory dir = new RAMDirectory(); IndexWriter w = null; while (!finish) { try { //IndexWriter.unlock(dir); w = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED); ((ConcurrentMergeScheduler) w.GetMergeScheduler()).SetSuppressExceptions(); //w.setInfoStream(System.out); w.SetMaxBufferedDocs(2); w.SetMergeFactor(2); Document doc = new Document(); doc.Add(new Field("field", "some text contents", Field.Store.YES, Field.Index.ANALYZED)); for (int i = 0; i < 100; i++) { w.AddDocument(doc); w.Commit(); } } catch (System.SystemException re) { System.Exception e = re.InnerException; if (re is System.Threading.ThreadInterruptedException || e is System.Threading.ThreadInterruptedException) { // {{Aroush-2.9}} in Java, this is: java.lang.Thread.interrupted() //{There is no way in .Net to check interrupted state. So comment it out //// Make sure IW restored interrupted bit //if ((Instance.ThreadState & (System.Threading.ThreadState.Stopped | System.Threading.ThreadState.Unstarted)) != System.Threading.ThreadState.Running) // {{Aroush-2.9}} in Java, this is: java.lang.Thread.interrupted() //{ // System.Console.Out.WriteLine("FAILED; InterruptedException hit but thread.interrupted() was false"); // System.Console.Out.WriteLine(e.StackTrace); // failed = true; // break; //} } else { System.Console.Out.WriteLine("FAILED; unexpected exception"); if (e != null) { System.Console.Out.WriteLine(e.StackTrace); } else { System.Console.Out.WriteLine(re.StackTrace); } failed = true; break; } } catch (System.Exception t) { System.Console.Out.WriteLine("FAILED; unexpected exception"); System.Console.Out.WriteLine(t.StackTrace); failed = true; break; } finally { try { // Clear interrupt if pending lock (this) { Interrupt(); if (w != null) { w.Close(); } } } catch (System.Exception t) { System.Console.Out.WriteLine("FAILED; unexpected exception during close"); System.Console.Out.WriteLine(t.StackTrace); failed = true; endLoop = true; } } if (endLoop) break; } if (!failed) { try { _TestUtil.CheckIndex(dir); } catch (System.Exception e) { failed = true; System.Console.Out.WriteLine("CheckIndex FAILED: unexpected exception"); System.Console.Out.WriteLine(e.StackTrace); } try { IndexReader r = IndexReader.Open(dir); //System.out.println("doc count=" + r.numDocs()); r.Close(); } catch (System.Exception e) { failed = true; System.Console.Out.WriteLine("IndexReader.open FAILED: unexpected exception"); System.Console.Out.WriteLine(e.StackTrace); } } }
public virtual void TestMergeWarmer() { Directory dir1 = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); writer.SetInfoStream(infoStream); // create the index CreateIndexNoClose(false, "test", writer); // get a reader to put writer into near real-time mode IndexReader r1 = writer.GetReader(); // Enroll warmer MyWarmer warmer = new MyWarmer(); writer.SetMergedSegmentWarmer(warmer); writer.SetMergeFactor(2); writer.SetMaxBufferedDocs(2); for (int i = 0; i < 10; i++) { writer.AddDocument(CreateDocument(i, "test", 4)); } ((ConcurrentMergeScheduler) writer.GetMergeScheduler()).Sync(); Assert.IsTrue(warmer.warmCount > 0); int count = warmer.warmCount; writer.AddDocument(CreateDocument(17, "test", 4)); writer.Optimize(); Assert.IsTrue(warmer.warmCount > count); writer.Close(); r1.Close(); dir1.Close(); }
public virtual void TestAfterCommit() { Directory dir1 = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); writer.SetInfoStream(infoStream); // create the index CreateIndexNoClose(false, "test", writer); // get a reader to put writer into near real-time mode IndexReader r1 = writer.GetReader(); _TestUtil.CheckIndex(dir1); writer.Commit(); _TestUtil.CheckIndex(dir1); Assert.AreEqual(100, r1.NumDocs()); for (int i = 0; i < 10; i++) { writer.AddDocument(CreateDocument(i, "test", 4)); } ((ConcurrentMergeScheduler) writer.GetMergeScheduler()).Sync(); IndexReader r2 = r1.Reopen(); if (r2 != r1) { r1.Close(); r1 = r2; } Assert.AreEqual(110, r1.NumDocs()); writer.Close(); r1.Close(); dir1.Close(); }