private IndexWriter InitIndex(IConcurrentMergeScheduler scheduler, Random random, MockDirectoryWrapper dir, bool initialCommit) { dir.LockFactory = NoLockFactory.DoNoLockFactory; scheduler.SetSuppressExceptions(); IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)) .SetMaxBufferedDocs(10) .SetMergeScheduler(scheduler)); if (initialCommit) { writer.Commit(); } Document doc = new Document(); doc.Add(NewTextField("content", "aaa", Field.Store.NO)); doc.Add(NewTextField("id", "0", Field.Store.NO)); for (int i = 0; i < 157; i++) { writer.AddDocument(doc); } return writer; }
private IndexWriter InitIndex(Func <IConcurrentMergeScheduler> newScheduler, Random random, MockDirectoryWrapper dir, bool initialCommit) { dir.SetLockFactory(NoLockFactory.GetNoLockFactory()); IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)) .SetMaxBufferedDocs(10) .SetMergeScheduler(newScheduler())); IConcurrentMergeScheduler scheduler = writer.Config.MergeScheduler as IConcurrentMergeScheduler; if (scheduler != null) { scheduler.SetSuppressExceptions(); } if (initialCommit) { writer.Commit(); } Document doc = new Document(); doc.Add(NewTextField("content", "aaa", Field.Store.NO)); doc.Add(NewTextField("id", "0", Field.Store.NO)); for (int i = 0; i < 157; i++) { writer.AddDocument(doc); } return(writer); }
public virtual void TestImmediateDiskFullWithThreads([ValueSource(typeof(ConcurrentMergeSchedulers), "Values")] IConcurrentMergeScheduler scheduler) { int NUM_THREADS = 3; int numIterations = TEST_NIGHTLY ? 10 : 3; for (int iter = 0; iter < numIterations; iter++) { if (VERBOSE) { Console.WriteLine("\nTEST: iter=" + iter); } MockDirectoryWrapper dir = NewMockDirectory(); var config = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())) .SetMaxBufferedDocs(2) .SetMergeScheduler(scheduler) .SetMergePolicy(NewLogMergePolicy(4)); IndexWriter writer = new IndexWriter(dir, config); scheduler.SetSuppressExceptions(); dir.MaxSizeInBytes = 4 * 1024 + 20 * iter; IndexerThread[] threads = new IndexerThread[NUM_THREADS]; for (int i = 0; i < NUM_THREADS; i++) { threads[i] = new IndexerThread(writer, true, NewField); } for (int i = 0; i < NUM_THREADS; i++) { threads[i].Start(); } for (int i = 0; i < NUM_THREADS; i++) { // Without fix for LUCENE-1130: one of the // threads will hang threads[i].Join(); Assert.IsTrue(threads[i].Error == null, "hit unexpected Throwable"); } // Make sure once disk space is avail again, we can // cleanly close: dir.MaxSizeInBytes = 0; writer.Dispose(false); dir.Dispose(); } }
// Runs test, with multiple threads, using the specific // failure to trigger an IOException public virtual void TestMultipleThreadsFailure(IConcurrentMergeScheduler scheduler, MockDirectoryWrapper.Failure failure) { int NUM_THREADS = 3; for (int iter = 0; iter < 2; iter++) { if (VERBOSE) { Console.WriteLine("TEST: iter=" + iter); } MockDirectoryWrapper dir = NewMockDirectory(); var config = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())) .SetMaxBufferedDocs(2) .SetMergeScheduler(scheduler) .SetMergePolicy(NewLogMergePolicy(4)); IndexWriter writer = new IndexWriter(dir, config); scheduler.SetSuppressExceptions(); IndexerThread[] threads = new IndexerThread[NUM_THREADS]; for (int i = 0; i < NUM_THREADS; i++) { threads[i] = new IndexerThread(writer, true, NewField); } for (int i = 0; i < NUM_THREADS; i++) { threads[i].Start(); } Thread.Sleep(10); dir.FailOn(failure); failure.SetDoFail(); for (int i = 0; i < NUM_THREADS; i++) { threads[i].Join(); Assert.IsTrue(threads[i].Error == null, "hit unexpected Throwable"); } bool success = false; try { writer.Dispose(false); success = true; } catch (IOException) { failure.ClearDoFail(); writer.Dispose(false); } if (VERBOSE) { Console.WriteLine("TEST: success=" + success); } if (success) { IndexReader reader = DirectoryReader.Open(dir); Bits delDocs = MultiFields.GetLiveDocs(reader); for (int j = 0; j < reader.MaxDoc; j++) { if (delDocs == null || !delDocs.Get(j)) { reader.Document(j); reader.GetTermVectors(j); } } reader.Dispose(); } dir.Dispose(); } }
public virtual void TestCloseWithThreads([ValueSource(typeof(ConcurrentMergeSchedulers), "Values")] IConcurrentMergeScheduler scheduler) { int NUM_THREADS = 3; int numIterations = TEST_NIGHTLY ? 7 : 3; for (int iter = 0; iter < numIterations; iter++) { if (VERBOSE) { Console.WriteLine("\nTEST: iter=" + iter); } Directory dir = NewDirectory(); var config = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())) .SetMaxBufferedDocs(10) .SetMergeScheduler(scheduler) .SetMergePolicy(NewLogMergePolicy(4)); IndexWriter writer = new IndexWriter(dir, config); scheduler.SetSuppressExceptions(); IndexerThread[] threads = new IndexerThread[NUM_THREADS]; for (int i = 0; i < NUM_THREADS; i++) { threads[i] = new IndexerThread(writer, false, NewField); } for (int i = 0; i < NUM_THREADS; i++) { threads[i].Start(); } bool done = false; while (!done) { Thread.Sleep(100); for (int i = 0; i < NUM_THREADS; i++) // only stop when at least one thread has added a doc { if (threads[i].AddCount > 0) { done = true; break; } else if (!threads[i].IsAlive) { Assert.Fail("thread failed before indexing a single document"); } } } if (VERBOSE) { Console.WriteLine("\nTEST: now close"); } writer.Dispose(false); // Make sure threads that are adding docs are not hung: for (int i = 0; i < NUM_THREADS; i++) { // Without fix for LUCENE-1130: one of the // threads will hang threads[i].Join(); if (threads[i].IsAlive) { Assert.Fail("thread seems to be hung"); } } // Quick test to make sure index is not corrupt: IndexReader reader = DirectoryReader.Open(dir); DocsEnum tdocs = TestUtil.Docs(Random(), reader, "field", new BytesRef("aaa"), MultiFields.GetLiveDocs(reader), null, 0); int count = 0; while (tdocs.NextDoc() != DocIdSetIterator.NO_MORE_DOCS) { count++; } Assert.IsTrue(count > 0); reader.Dispose(); dir.Dispose(); } }
// Runs test, with multiple threads, using the specific // failure to trigger an IOException public virtual void TestMultipleThreadsFailure(IConcurrentMergeScheduler scheduler, MockDirectoryWrapper.Failure failure) { int NUM_THREADS = 3; for (int iter = 0; iter < 2; iter++) { if (VERBOSE) { Console.WriteLine("TEST: iter=" + iter); } MockDirectoryWrapper dir = NewMockDirectory(); var config = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())) .SetMaxBufferedDocs(2) .SetMergeScheduler(scheduler) .SetMergePolicy(NewLogMergePolicy(4)); IndexWriter writer = new IndexWriter(dir, config); scheduler.SetSuppressExceptions(); IndexerThread[] threads = new IndexerThread[NUM_THREADS]; for (int i = 0; i < NUM_THREADS; i++) { threads[i] = new IndexerThread(writer, true); } for (int i = 0; i < NUM_THREADS; i++) { threads[i].Start(); } Thread.Sleep(10); dir.FailOn(failure); failure.SetDoFail(); for (int i = 0; i < NUM_THREADS; i++) { threads[i].Join(); Assert.IsTrue(threads[i].Error == null, "hit unexpected Throwable"); } bool success = false; try { writer.Dispose(false); success = true; } catch (IOException) { failure.ClearDoFail(); writer.Dispose(false); } if (VERBOSE) { Console.WriteLine("TEST: success=" + success); } if (success) { IndexReader reader = DirectoryReader.Open(dir); Bits delDocs = MultiFields.GetLiveDocs(reader); for (int j = 0; j < reader.MaxDoc; j++) { if (delDocs == null || !delDocs.Get(j)) { reader.Document(j); reader.GetTermVectors(j); } } reader.Dispose(); } dir.Dispose(); } }
/// <summary> /// Make sure if modifier tries to commit but hits disk full that modifier /// remains consistent and usable. Similar to TestIndexReader.testDiskFull(). /// </summary> private void DoTestOperationsOnDiskFull(IConcurrentMergeScheduler scheduler, bool updates) { Term searchTerm = new Term("content", "aaa"); int START_COUNT = 157; int END_COUNT = 144; // First build up a starting index: MockDirectoryWrapper startDir = NewMockDirectory(); // TODO: find the resource leak that only occurs sometimes here. startDir.NoDeleteOpenFile = false; IndexWriter writer = new IndexWriter(startDir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random(), MockTokenizer.WHITESPACE, false))); for (int i = 0; i < 157; i++) { Document d = new Document(); d.Add(NewStringField("id", Convert.ToString(i), Field.Store.YES)); d.Add(NewTextField("content", "aaa " + i, Field.Store.NO)); if (DefaultCodecSupportsDocValues()) { d.Add(new NumericDocValuesField("dv", i)); } writer.AddDocument(d); } writer.Dispose(); long diskUsage = startDir.SizeInBytes(); long diskFree = diskUsage + 10; IOException err = null; bool done = false; // Iterate w/ ever increasing free disk space: while (!done) { if (VERBOSE) { Console.WriteLine("TEST: cycle"); } MockDirectoryWrapper dir = new MockDirectoryWrapper(Random(), new RAMDirectory(startDir, NewIOContext(Random()))); dir.PreventDoubleWrite = false; dir.AllowRandomFileNotFoundException = false; var config = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random(), MockTokenizer.WHITESPACE, false)) .SetMaxBufferedDocs(1000) .SetMaxBufferedDeleteTerms(1000) .SetMergeScheduler(scheduler); scheduler.SetSuppressExceptions(); IndexWriter modifier = new IndexWriter(dir, config); // For each disk size, first try to commit against // dir that will hit random IOExceptions & disk // full; after, give it infinite disk space & turn // off random IOExceptions & retry w/ same reader: bool success = false; for (int x = 0; x < 2; x++) { if (VERBOSE) { Console.WriteLine("TEST: x=" + x); } double rate = 0.1; double diskRatio = ((double)diskFree) / diskUsage; long thisDiskFree; string testName; if (0 == x) { thisDiskFree = diskFree; if (diskRatio >= 2.0) { rate /= 2; } if (diskRatio >= 4.0) { rate /= 2; } if (diskRatio >= 6.0) { rate = 0.0; } if (VERBOSE) { Console.WriteLine("\ncycle: " + diskFree + " bytes"); } testName = "disk full during reader.Dispose() @ " + thisDiskFree + " bytes"; dir.RandomIOExceptionRateOnOpen = Random().NextDouble() * 0.01; } else { thisDiskFree = 0; rate = 0.0; if (VERBOSE) { Console.WriteLine("\ncycle: same writer: unlimited disk space"); } testName = "reader re-use after disk full"; dir.RandomIOExceptionRateOnOpen = 0.0; } dir.MaxSizeInBytes = thisDiskFree; dir.RandomIOExceptionRate = rate; try { if (0 == x) { int docId = 12; for (int i = 0; i < 13; i++) { if (updates) { Document d = new Document(); d.Add(NewStringField("id", Convert.ToString(i), Field.Store.YES)); d.Add(NewTextField("content", "bbb " + i, Field.Store.NO)); if (DefaultCodecSupportsDocValues()) { d.Add(new NumericDocValuesField("dv", i)); } modifier.UpdateDocument(new Term("id", Convert.ToString(docId)), d); } // deletes else { modifier.DeleteDocuments(new Term("id", Convert.ToString(docId))); // modifier.setNorm(docId, "contents", (float)2.0); } docId += 12; } } modifier.Dispose(); success = true; if (0 == x) { done = true; } } catch (IOException e) { if (VERBOSE) { Console.WriteLine(" hit IOException: " + e); Console.WriteLine(e.StackTrace); } err = e; if (1 == x) { Console.WriteLine(e.ToString()); Console.Write(e.StackTrace); Assert.Fail(testName + " hit IOException after disk space was freed up"); } } // prevent throwing a random exception here!! double randomIOExceptionRate = dir.RandomIOExceptionRate; long maxSizeInBytes = dir.MaxSizeInBytes; dir.RandomIOExceptionRate = 0.0; dir.RandomIOExceptionRateOnOpen = 0.0; dir.MaxSizeInBytes = 0; if (!success) { // Must force the close else the writer can have // open files which cause exc in MockRAMDir.close if (VERBOSE) { Console.WriteLine("TEST: now rollback"); } modifier.Rollback(); } // If the close() succeeded, make sure there are // no unreferenced files. if (success) { TestUtil.CheckIndex(dir); TestIndexWriter.AssertNoUnreferencedFiles(dir, "after writer.close"); } dir.RandomIOExceptionRate = randomIOExceptionRate; dir.MaxSizeInBytes = maxSizeInBytes; // Finally, verify index is not corrupt, and, if // we succeeded, we see all docs changed, and if // we failed, we see either all docs or no docs // changed (transactional semantics): IndexReader newReader = null; try { newReader = DirectoryReader.Open(dir); } catch (IOException e) { Console.WriteLine(e.ToString()); Console.Write(e.StackTrace); Assert.Fail(testName + ":exception when creating IndexReader after disk full during close: " + e); } IndexSearcher searcher = NewSearcher(newReader); ScoreDoc[] hits = null; try { hits = searcher.Search(new TermQuery(searchTerm), null, 1000).ScoreDocs; } catch (IOException e) { Console.WriteLine(e.ToString()); Console.Write(e.StackTrace); Assert.Fail(testName + ": exception when searching: " + e); } int result2 = hits.Length; if (success) { if (x == 0 && result2 != END_COUNT) { Assert.Fail(testName + ": method did not throw exception but hits.Length for search on term 'aaa' is " + result2 + " instead of expected " + END_COUNT); } else if (x == 1 && result2 != START_COUNT && result2 != END_COUNT) { // It's possible that the first exception was // "recoverable" wrt pending deletes, in which // case the pending deletes are retained and // then re-flushing (with plenty of disk // space) will succeed in flushing the // deletes: Assert.Fail(testName + ": method did not throw exception but hits.Length for search on term 'aaa' is " + result2 + " instead of expected " + START_COUNT + " or " + END_COUNT); } } else { // On hitting exception we still may have added // all docs: if (result2 != START_COUNT && result2 != END_COUNT) { Console.WriteLine(err.ToString()); Console.Write(err.StackTrace); Assert.Fail(testName + ": method did throw exception but hits.Length for search on term 'aaa' is " + result2 + " instead of expected " + START_COUNT + " or " + END_COUNT); } } newReader.Dispose(); if (result2 == END_COUNT) { break; } } dir.Dispose(); modifier.Dispose(); // Try again with 10 more bytes of free space: diskFree += 10; } startDir.Dispose(); }