public void TestRollbackIntegrityWithBufferFlush() { Directory dir = new MockRAMDirectory(); IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED); for (int i = 0; i < 5; i++) { Document doc = new Document(); doc.Add(new Field("pk", i.ToString(), Field.Store.YES, Field.Index.ANALYZED_NO_NORMS)); w.AddDocument(doc); } w.Close(); // If buffer size is small enough to cause a flush, errors ensue... w = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED); w.SetMaxBufferedDocs(2); Term pkTerm = new Term("pk", ""); for (int i = 0; i < 3; i++) { Document doc = new Document(); String value = i.ToString(); doc.Add(new Field("pk", value, Field.Store.YES, Field.Index.ANALYZED_NO_NORMS)); doc.Add(new Field("text", "foo", Field.Store.YES, Field.Index.ANALYZED_NO_NORMS)); w.UpdateDocument(pkTerm.CreateTerm(value), doc); } w.Rollback(); IndexReader r = IndexReader.Open(dir, true); Assert.AreEqual(5, r.NumDocs(), "index should contain same number of docs post rollback"); r.Close(); dir.Close(); }
public virtual void TestRollbackIntegrityWithBufferFlush() { Directory dir = NewDirectory(); RandomIndexWriter rw = new RandomIndexWriter(Random(), dir); for (int i = 0; i < 5; i++) { Document doc = new Document(); doc.Add(NewStringField("pk", Convert.ToString(i), Field.Store.YES)); rw.AddDocument(doc); } rw.Dispose(); // If buffer size is small enough to cause a flush, errors ensue... IndexWriter w = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMaxBufferedDocs(2).SetOpenMode(IndexWriterConfig.OpenMode_e.APPEND)); for (int i = 0; i < 3; i++) { Document doc = new Document(); string value = Convert.ToString(i); doc.Add(NewStringField("pk", value, Field.Store.YES)); doc.Add(NewStringField("text", "foo", Field.Store.YES)); w.UpdateDocument(new Term("pk", value), doc); } w.Rollback(); IndexReader r = DirectoryReader.Open(dir); Assert.AreEqual(5, r.NumDocs, "index should contain same number of docs post rollback"); r.Dispose(); dir.Dispose(); }
public virtual void TestCommitOnCloseAbort() { Directory dir = NewDirectory(); IndexWriter writer = new IndexWriter(dir, (IndexWriterConfig)NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMaxBufferedDocs(10)); for (int i = 0; i < 14; i++) { TestIndexWriter.AddDoc(writer); } writer.Dispose(); Term searchTerm = new Term("content", "aaa"); IndexReader reader = DirectoryReader.Open(dir); IndexSearcher searcher = NewSearcher(reader); ScoreDoc[] hits = searcher.Search(new TermQuery(searchTerm), null, 1000).ScoreDocs; Assert.AreEqual(14, hits.Length, "first number of hits"); reader.Dispose(); writer = new IndexWriter(dir, (IndexWriterConfig)NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode_e.APPEND).SetMaxBufferedDocs(10)); for (int j = 0; j < 17; j++) { TestIndexWriter.AddDoc(writer); } // Delete all docs: writer.DeleteDocuments(searchTerm); reader = DirectoryReader.Open(dir); searcher = NewSearcher(reader); hits = searcher.Search(new TermQuery(searchTerm), null, 1000).ScoreDocs; Assert.AreEqual(14, hits.Length, "reader incorrectly sees changes from writer"); reader.Dispose(); // Now, close the writer: writer.Rollback(); TestIndexWriter.AssertNoUnreferencedFiles(dir, "unreferenced files remain after rollback()"); reader = DirectoryReader.Open(dir); searcher = NewSearcher(reader); hits = searcher.Search(new TermQuery(searchTerm), null, 1000).ScoreDocs; Assert.AreEqual(14, hits.Length, "saw changes after writer.abort"); reader.Dispose(); // Now make sure we can re-open the index, add docs, // and all is good: writer = new IndexWriter(dir, (IndexWriterConfig)NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode_e.APPEND).SetMaxBufferedDocs(10)); // On abort, writer in fact may write to the same // segments_N file: if (dir is MockDirectoryWrapper) { ((MockDirectoryWrapper)dir).PreventDoubleWrite = false; } for (int i = 0; i < 12; i++) { for (int j = 0; j < 17; j++) { TestIndexWriter.AddDoc(writer); } IndexReader r = DirectoryReader.Open(dir); searcher = NewSearcher(r); hits = searcher.Search(new TermQuery(searchTerm), null, 1000).ScoreDocs; Assert.AreEqual(14, hits.Length, "reader incorrectly sees changes from writer"); r.Dispose(); } writer.Dispose(); IndexReader ir = DirectoryReader.Open(dir); searcher = NewSearcher(ir); hits = searcher.Search(new TermQuery(searchTerm), null, 1000).ScoreDocs; Assert.AreEqual(218, hits.Length, "didn't see changes after close"); ir.Dispose(); dir.Dispose(); }
public virtual void TestOpenPriorSnapshot() { // Never deletes a commit KeepAllDeletionPolicy policy = new KeepAllDeletionPolicy(this); Directory dir = new MockRAMDirectory(); policy.dir = dir; IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), policy, IndexWriter.MaxFieldLength.LIMITED); writer.SetMaxBufferedDocs(2); for (int i = 0; i < 10; i++) { AddDoc(writer); if ((1 + i) % 2 == 0) writer.Commit(); } writer.Close(); System.Collections.ICollection commits = IndexReader.ListCommits(dir); Assert.AreEqual(6, commits.Count); IndexCommit lastCommit = null; System.Collections.IEnumerator it = commits.GetEnumerator(); while (it.MoveNext()) { IndexCommit commit = (IndexCommit) it.Current; if (lastCommit == null || commit.GetGeneration() > lastCommit.GetGeneration()) lastCommit = commit; } Assert.IsTrue(lastCommit != null); // Now add 1 doc and optimize writer = new IndexWriter(dir, new WhitespaceAnalyzer(), policy, IndexWriter.MaxFieldLength.LIMITED); AddDoc(writer); Assert.AreEqual(11, writer.NumDocs()); writer.Optimize(); writer.Close(); Assert.AreEqual(7, IndexReader.ListCommits(dir).Count); // Now open writer on the commit just before optimize: writer = new IndexWriter(dir, new WhitespaceAnalyzer(), policy, IndexWriter.MaxFieldLength.LIMITED, lastCommit); Assert.AreEqual(10, writer.NumDocs()); // Should undo our rollback: writer.Rollback(); IndexReader r = IndexReader.Open(dir); // Still optimized, still 11 docs Assert.IsTrue(r.IsOptimized()); Assert.AreEqual(11, r.NumDocs()); r.Close(); writer = new IndexWriter(dir, new WhitespaceAnalyzer(), policy, IndexWriter.MaxFieldLength.LIMITED, lastCommit); Assert.AreEqual(10, writer.NumDocs()); // Commits the rollback: writer.Close(); // Now 8 because we made another commit Assert.AreEqual(8, IndexReader.ListCommits(dir).Count); r = IndexReader.Open(dir); // Not optimized because we rolled it back, and now only // 10 docs Assert.IsTrue(!r.IsOptimized()); Assert.AreEqual(10, r.NumDocs()); r.Close(); // Reoptimize writer = new IndexWriter(dir, new WhitespaceAnalyzer(), policy, IndexWriter.MaxFieldLength.LIMITED); writer.Optimize(); writer.Close(); r = IndexReader.Open(dir); Assert.IsTrue(r.IsOptimized()); Assert.AreEqual(10, r.NumDocs()); r.Close(); // Now open writer on the commit just before optimize, // but this time keeping only the last commit: writer = new IndexWriter(dir, new WhitespaceAnalyzer(), new KeepOnlyLastCommitDeletionPolicy(), IndexWriter.MaxFieldLength.LIMITED, lastCommit); Assert.AreEqual(10, writer.NumDocs()); // Reader still sees optimized index, because writer // opened on the prior commit has not yet committed: r = IndexReader.Open(dir); Assert.IsTrue(r.IsOptimized()); Assert.AreEqual(10, r.NumDocs()); r.Close(); writer.Close(); // Now reader sees unoptimized index: r = IndexReader.Open(dir); Assert.IsTrue(!r.IsOptimized()); Assert.AreEqual(10, r.NumDocs()); r.Close(); dir.Close(); }
public override void DoWork() { IndexWriter writer1 = new IndexWriter(dir1, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); writer1.SetMaxBufferedDocs(3); writer1.MergeFactor = 2; ((ConcurrentMergeScheduler) writer1.MergeScheduler).SetSuppressExceptions(); IndexWriter writer2 = new IndexWriter(dir2, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); // Intentionally use different params so flush/merge // happen @ different times writer2.SetMaxBufferedDocs(2); writer2.MergeFactor = 3; ((ConcurrentMergeScheduler) writer2.MergeScheduler).SetSuppressExceptions(); Update(writer1); Update(writer2); TestTransactions.doFail = true; try { lock (lock_Renamed) { try { writer1.PrepareCommit(); } catch (System.Exception t) { writer1.Rollback(); writer2.Rollback(); return ; } try { writer2.PrepareCommit(); } catch (System.Exception t) { writer1.Rollback(); writer2.Rollback(); return ; } writer1.Commit(); writer2.Commit(); } } finally { TestTransactions.doFail = false; } writer1.Close(); writer2.Close(); }
public override void DoWork() { IndexWriter writer1 = new IndexWriter(Dir1, ((IndexWriterConfig)NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMaxBufferedDocs(3)).SetMergeScheduler(new ConcurrentMergeScheduler()).SetMergePolicy(NewLogMergePolicy(2))); ((ConcurrentMergeScheduler)writer1.Config.MergeScheduler).SetSuppressExceptions(); // Intentionally use different params so flush/merge // happen @ different times IndexWriter writer2 = new IndexWriter(Dir2, ((IndexWriterConfig)NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMaxBufferedDocs(2)).SetMergeScheduler(new ConcurrentMergeScheduler()).SetMergePolicy(NewLogMergePolicy(3))); ((ConcurrentMergeScheduler)writer2.Config.MergeScheduler).SetSuppressExceptions(); Update(writer1); Update(writer2); DoFail = true; try { lock (@lock) { try { writer1.PrepareCommit(); } catch (Exception t) { writer1.Rollback(); writer2.Rollback(); return; } try { writer2.PrepareCommit(); } catch (Exception t) { writer1.Rollback(); writer2.Rollback(); return; } writer1.Commit(); writer2.Commit(); } } finally { DoFail = false; } writer1.Dispose(); writer2.Dispose(); }
/// <summary> /// Make sure if modifier tries to commit but hits disk full that modifier /// remains consistent and usable. Similar to TestIndexReader.testDiskFull(). /// </summary> private void DoTestOperationsOnDiskFull(bool updates) { Term searchTerm = new Term("content", "aaa"); int START_COUNT = 157; int END_COUNT = 144; // First build up a starting index: MockDirectoryWrapper startDir = NewMockDirectory(); // TODO: find the resource leak that only occurs sometimes here. startDir.NoDeleteOpenFile = false; IndexWriter writer = new IndexWriter(startDir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random(), MockTokenizer.WHITESPACE, false))); for (int i = 0; i < 157; i++) { Document d = new Document(); d.Add(NewStringField("id", Convert.ToString(i), Field.Store.YES)); d.Add(NewTextField("content", "aaa " + i, Field.Store.NO)); if (DefaultCodecSupportsDocValues()) { d.Add(new NumericDocValuesField("dv", i)); } writer.AddDocument(d); } writer.Dispose(); long diskUsage = startDir.SizeInBytes(); long diskFree = diskUsage + 10; IOException err = null; bool done = false; // Iterate w/ ever increasing free disk space: while (!done) { if (VERBOSE) { Console.WriteLine("TEST: cycle"); } MockDirectoryWrapper dir = new MockDirectoryWrapper(Random(), new RAMDirectory(startDir, NewIOContext(Random()))); dir.PreventDoubleWrite = false; dir.AllowRandomFileNotFoundException = false; IndexWriter modifier = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random(), MockTokenizer.WHITESPACE, false)).SetMaxBufferedDocs(1000).SetMaxBufferedDeleteTerms(1000).SetMergeScheduler(new ConcurrentMergeScheduler())); ((ConcurrentMergeScheduler)modifier.Config.MergeScheduler).SetSuppressExceptions(); // For each disk size, first try to commit against // dir that will hit random IOExceptions & disk // full; after, give it infinite disk space & turn // off random IOExceptions & retry w/ same reader: bool success = false; for (int x = 0; x < 2; x++) { if (VERBOSE) { Console.WriteLine("TEST: x=" + x); } double rate = 0.1; double diskRatio = ((double)diskFree) / diskUsage; long thisDiskFree; string testName; if (0 == x) { thisDiskFree = diskFree; if (diskRatio >= 2.0) { rate /= 2; } if (diskRatio >= 4.0) { rate /= 2; } if (diskRatio >= 6.0) { rate = 0.0; } if (VERBOSE) { Console.WriteLine("\ncycle: " + diskFree + " bytes"); } testName = "disk full during reader.Dispose() @ " + thisDiskFree + " bytes"; dir.RandomIOExceptionRateOnOpen = Random().NextDouble() * 0.01; } else { thisDiskFree = 0; rate = 0.0; if (VERBOSE) { Console.WriteLine("\ncycle: same writer: unlimited disk space"); } testName = "reader re-use after disk full"; dir.RandomIOExceptionRateOnOpen = 0.0; } dir.MaxSizeInBytes = thisDiskFree; dir.RandomIOExceptionRate = rate; try { if (0 == x) { int docId = 12; for (int i = 0; i < 13; i++) { if (updates) { Document d = new Document(); d.Add(NewStringField("id", Convert.ToString(i), Field.Store.YES)); d.Add(NewTextField("content", "bbb " + i, Field.Store.NO)); if (DefaultCodecSupportsDocValues()) { d.Add(new NumericDocValuesField("dv", i)); } modifier.UpdateDocument(new Term("id", Convert.ToString(docId)), d); } // deletes else { modifier.DeleteDocuments(new Term("id", Convert.ToString(docId))); // modifier.setNorm(docId, "contents", (float)2.0); } docId += 12; } } modifier.Dispose(); success = true; if (0 == x) { done = true; } } catch (IOException e) { if (VERBOSE) { Console.WriteLine(" hit IOException: " + e); Console.WriteLine(e.StackTrace); } err = e; if (1 == x) { Console.WriteLine(e.ToString()); Console.Write(e.StackTrace); Assert.Fail(testName + " hit IOException after disk space was freed up"); } } // prevent throwing a random exception here!! double randomIOExceptionRate = dir.RandomIOExceptionRate; long maxSizeInBytes = dir.MaxSizeInBytes; dir.RandomIOExceptionRate = 0.0; dir.RandomIOExceptionRateOnOpen = 0.0; dir.MaxSizeInBytes = 0; if (!success) { // Must force the close else the writer can have // open files which cause exc in MockRAMDir.close if (VERBOSE) { Console.WriteLine("TEST: now rollback"); } modifier.Rollback(); } // If the close() succeeded, make sure there are // no unreferenced files. if (success) { TestUtil.CheckIndex(dir); TestIndexWriter.AssertNoUnreferencedFiles(dir, "after writer.close"); } dir.RandomIOExceptionRate = randomIOExceptionRate; dir.MaxSizeInBytes = maxSizeInBytes; // Finally, verify index is not corrupt, and, if // we succeeded, we see all docs changed, and if // we failed, we see either all docs or no docs // changed (transactional semantics): IndexReader newReader = null; try { newReader = DirectoryReader.Open(dir); } catch (IOException e) { Console.WriteLine(e.ToString()); Console.Write(e.StackTrace); Assert.Fail(testName + ":exception when creating IndexReader after disk full during close: " + e); } IndexSearcher searcher = NewSearcher(newReader); ScoreDoc[] hits = null; try { hits = searcher.Search(new TermQuery(searchTerm), null, 1000).ScoreDocs; } catch (IOException e) { Console.WriteLine(e.ToString()); Console.Write(e.StackTrace); Assert.Fail(testName + ": exception when searching: " + e); } int result2 = hits.Length; if (success) { if (x == 0 && result2 != END_COUNT) { Assert.Fail(testName + ": method did not throw exception but hits.Length for search on term 'aaa' is " + result2 + " instead of expected " + END_COUNT); } else if (x == 1 && result2 != START_COUNT && result2 != END_COUNT) { // It's possible that the first exception was // "recoverable" wrt pending deletes, in which // case the pending deletes are retained and // then re-flushing (with plenty of disk // space) will succeed in flushing the // deletes: Assert.Fail(testName + ": method did not throw exception but hits.Length for search on term 'aaa' is " + result2 + " instead of expected " + START_COUNT + " or " + END_COUNT); } } else { // On hitting exception we still may have added // all docs: if (result2 != START_COUNT && result2 != END_COUNT) { Console.WriteLine(err.ToString()); Console.Write(err.StackTrace); Assert.Fail(testName + ": method did throw exception but hits.Length for search on term 'aaa' is " + result2 + " instead of expected " + START_COUNT + " or " + END_COUNT); } } newReader.Dispose(); if (result2 == END_COUNT) { break; } } dir.Dispose(); modifier.Dispose(); // Try again with 10 more bytes of free space: diskFree += 10; } startDir.Dispose(); }
public virtual void TestPrepareCommitRollback() { Directory dir = NewDirectory(); if (dir is MockDirectoryWrapper) { ((MockDirectoryWrapper)dir).PreventDoubleWrite = false; } IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMaxBufferedDocs(2).SetMergePolicy(NewLogMergePolicy(5))); writer.Commit(); for (int i = 0; i < 23; i++) { TestIndexWriter.AddDoc(writer); } DirectoryReader reader = DirectoryReader.Open(dir); Assert.AreEqual(0, reader.NumDocs); writer.PrepareCommit(); IndexReader reader2 = DirectoryReader.Open(dir); Assert.AreEqual(0, reader2.NumDocs); writer.Rollback(); IndexReader reader3 = DirectoryReader.OpenIfChanged(reader); Assert.IsNull(reader3); Assert.AreEqual(0, reader.NumDocs); Assert.AreEqual(0, reader2.NumDocs); reader.Dispose(); reader2.Dispose(); writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))); for (int i = 0; i < 17; i++) { TestIndexWriter.AddDoc(writer); } reader = DirectoryReader.Open(dir); Assert.AreEqual(0, reader.NumDocs); reader.Dispose(); writer.PrepareCommit(); reader = DirectoryReader.Open(dir); Assert.AreEqual(0, reader.NumDocs); reader.Dispose(); writer.Commit(); reader = DirectoryReader.Open(dir); Assert.AreEqual(17, reader.NumDocs); reader.Dispose(); writer.Dispose(); dir.Dispose(); }
public virtual void TestCommitOnCloseForceMerge() { Directory dir = NewDirectory(); // Must disable throwing exc on double-write: this // test uses IW.rollback which easily results in // writing to same file more than once if (dir is MockDirectoryWrapper) { ((MockDirectoryWrapper)dir).PreventDoubleWrite = false; } IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMaxBufferedDocs(10).SetMergePolicy(NewLogMergePolicy(10))); for (int j = 0; j < 17; j++) { TestIndexWriter.AddDocWithIndex(writer, j); } writer.Dispose(); writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode_e.APPEND)); writer.ForceMerge(1); // Open a reader before closing (commiting) the writer: DirectoryReader reader = DirectoryReader.Open(dir); // Reader should see index as multi-seg at this // point: Assert.IsTrue(reader.Leaves.Count > 1, "Reader incorrectly sees one segment"); reader.Dispose(); // Abort the writer: writer.Rollback(); TestIndexWriter.AssertNoUnreferencedFiles(dir, "aborted writer after forceMerge"); // Open a reader after aborting writer: reader = DirectoryReader.Open(dir); // Reader should still see index as multi-segment Assert.IsTrue(reader.Leaves.Count > 1, "Reader incorrectly sees one segment"); reader.Dispose(); if (VERBOSE) { Console.WriteLine("TEST: do real full merge"); } writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode_e.APPEND)); writer.ForceMerge(1); writer.Dispose(); if (VERBOSE) { Console.WriteLine("TEST: writer closed"); } TestIndexWriter.AssertNoUnreferencedFiles(dir, "aborted writer after forceMerge"); // Open a reader after aborting writer: reader = DirectoryReader.Open(dir); // Reader should see index as one segment Assert.AreEqual(1, reader.Leaves.Count, "Reader incorrectly sees more than one segment"); reader.Dispose(); dir.Dispose(); }
public override void DoWork() { var config = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random)) .SetMaxBufferedDocs(3) .SetMergeScheduler(new ConcurrentMergeScheduler()) .SetMergePolicy(NewLogMergePolicy(2)); IndexWriter writer1 = new IndexWriter(dir1, config); ((IConcurrentMergeScheduler)writer1.Config.MergeScheduler).SetSuppressExceptions(); // Intentionally use different params so flush/merge // happen @ different times var config2 = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random)) .SetMaxBufferedDocs(2) .SetMergeScheduler(new ConcurrentMergeScheduler()) .SetMergePolicy(NewLogMergePolicy(3)); IndexWriter writer2 = new IndexWriter(dir2, config2); ((IConcurrentMergeScheduler)writer2.Config.MergeScheduler).SetSuppressExceptions(); Update(writer1); Update(writer2); doFail = true; try { UninterruptableMonitor.Enter(@lock); try { try { writer1.PrepareCommit(); } catch (Exception t) when(t.IsThrowable()) { writer1.Rollback(); writer2.Rollback(); return; } try { writer2.PrepareCommit(); } catch (Exception t) when(t.IsThrowable()) { writer1.Rollback(); writer2.Rollback(); return; } writer1.Commit(); writer2.Commit(); } finally { UninterruptableMonitor.Exit(@lock); } } finally { doFail = false; } writer1.Dispose(); writer2.Dispose(); }
public virtual void TestAddDocumentOnDiskFull() { for (int pass = 0; pass < 2; pass++) { if (VERBOSE) { Console.WriteLine("TEST: pass="******"TEST: cycle: diskFree=" + diskFree); } MockDirectoryWrapper dir = new MockDirectoryWrapper(Random(), new RAMDirectory()); dir.MaxSizeInBytes = diskFree; IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))); MergeScheduler ms = writer.Config.MergeScheduler; if (ms is ConcurrentMergeScheduler) { // this test intentionally produces exceptions // in the threads that CMS launches; we don't // want to pollute test output with these. ((ConcurrentMergeScheduler)ms).SetSuppressExceptions(); } bool hitError = false; try { for (int i = 0; i < 200; i++) { AddDoc(writer); } if (VERBOSE) { Console.WriteLine("TEST: done adding docs; now commit"); } writer.Commit(); } catch (IOException e) { if (VERBOSE) { Console.WriteLine("TEST: exception on addDoc"); Console.WriteLine(e.StackTrace); } hitError = true; } if (hitError) { if (doAbort) { if (VERBOSE) { Console.WriteLine("TEST: now rollback"); } writer.Rollback(); } else { try { if (VERBOSE) { Console.WriteLine("TEST: now close"); } writer.Dispose(); } catch (IOException e) { if (VERBOSE) { Console.WriteLine("TEST: exception on close; retry w/ no disk space limit"); Console.WriteLine(e.StackTrace); } dir.MaxSizeInBytes = 0; writer.Dispose(); } } //TestUtil.SyncConcurrentMerges(ms); if (TestUtil.AnyFilesExceptWriteLock(dir)) { TestIndexWriter.AssertNoUnreferencedFiles(dir, "after disk full during addDocument"); // Make sure reader can open the index: DirectoryReader.Open(dir).Dispose(); } dir.Dispose(); // Now try again w/ more space: diskFree += TEST_NIGHTLY ? TestUtil.NextInt(Random(), 400, 600) : TestUtil.NextInt(Random(), 3000, 5000); } else { //TestUtil.SyncConcurrentMerges(writer); dir.MaxSizeInBytes = 0; writer.Dispose(); dir.Dispose(); break; } } } }
public virtual void TestOpenPriorSnapshot() { // Never deletes a commit KeepAllDeletionPolicy policy = new KeepAllDeletionPolicy(this); Directory dir = new MockRAMDirectory(); policy.dir = dir; IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), policy, IndexWriter.MaxFieldLength.LIMITED, null); writer.SetMaxBufferedDocs(2); for (int i = 0; i < 10; i++) { AddDoc(writer); if ((1 + i) % 2 == 0) { writer.Commit(null); } } writer.Close(); ICollection <IndexCommit> commits = IndexReader.ListCommits(dir, null); Assert.AreEqual(6, commits.Count); IndexCommit lastCommit = null; System.Collections.IEnumerator it = commits.GetEnumerator(); while (it.MoveNext()) { IndexCommit commit = (IndexCommit)it.Current; if (lastCommit == null || commit.Generation > lastCommit.Generation) { lastCommit = commit; } } Assert.IsTrue(lastCommit != null); // Now add 1 doc and optimize writer = new IndexWriter(dir, new WhitespaceAnalyzer(), policy, IndexWriter.MaxFieldLength.LIMITED, null); AddDoc(writer); Assert.AreEqual(11, writer.NumDocs(null)); writer.Optimize(null); writer.Close(); Assert.AreEqual(7, IndexReader.ListCommits(dir, null).Count); // Now open writer on the commit just before optimize: writer = new IndexWriter(dir, new WhitespaceAnalyzer(), policy, IndexWriter.MaxFieldLength.LIMITED, lastCommit, null); Assert.AreEqual(10, writer.NumDocs(null)); // Should undo our rollback: writer.Rollback(null); IndexReader r = IndexReader.Open(dir, true, null); // Still optimized, still 11 docs Assert.IsTrue(r.IsOptimized()); Assert.AreEqual(11, r.NumDocs()); r.Close(); writer = new IndexWriter(dir, new WhitespaceAnalyzer(), policy, IndexWriter.MaxFieldLength.LIMITED, lastCommit, null); Assert.AreEqual(10, writer.NumDocs(null)); // Commits the rollback: writer.Close(); // Now 8 because we made another commit Assert.AreEqual(8, IndexReader.ListCommits(dir, null).Count); r = IndexReader.Open(dir, true, null); // Not optimized because we rolled it back, and now only // 10 docs Assert.IsTrue(!r.IsOptimized()); Assert.AreEqual(10, r.NumDocs()); r.Close(); // Reoptimize writer = new IndexWriter(dir, new WhitespaceAnalyzer(), policy, IndexWriter.MaxFieldLength.LIMITED, null); writer.Optimize(null); writer.Close(); r = IndexReader.Open(dir, true, null); Assert.IsTrue(r.IsOptimized()); Assert.AreEqual(10, r.NumDocs()); r.Close(); // Now open writer on the commit just before optimize, // but this time keeping only the last commit: writer = new IndexWriter(dir, new WhitespaceAnalyzer(), new KeepOnlyLastCommitDeletionPolicy(), IndexWriter.MaxFieldLength.LIMITED, lastCommit, null); Assert.AreEqual(10, writer.NumDocs(null)); // Reader still sees optimized index, because writer // opened on the prior commit has not yet committed: r = IndexReader.Open(dir, true, null); Assert.IsTrue(r.IsOptimized()); Assert.AreEqual(10, r.NumDocs()); r.Close(); writer.Close(); // Now reader sees unoptimized index: r = IndexReader.Open(dir, true, null); Assert.IsTrue(!r.IsOptimized()); Assert.AreEqual(10, r.NumDocs()); r.Close(); dir.Close(); }
public virtual void TestPrepareCommitRollback() { MockRAMDirectory dir = new MockRAMDirectory(); dir.SetPreventDoubleWrite(false); IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); writer.SetMaxBufferedDocs(2); writer.SetMergeFactor(5); for (int i = 0; i < 23; i++) AddDoc(writer); IndexReader reader = IndexReader.Open(dir); Assert.AreEqual(0, reader.NumDocs()); writer.PrepareCommit(); IndexReader reader2 = IndexReader.Open(dir); Assert.AreEqual(0, reader2.NumDocs()); writer.Rollback(); IndexReader reader3 = reader.Reopen(); Assert.AreEqual(0, reader.NumDocs()); Assert.AreEqual(0, reader2.NumDocs()); Assert.AreEqual(0, reader3.NumDocs()); reader.Close(); reader2.Close(); writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); for (int i = 0; i < 17; i++) AddDoc(writer); Assert.AreEqual(0, reader3.NumDocs()); reader3.Close(); reader = IndexReader.Open(dir); Assert.AreEqual(0, reader.NumDocs()); reader.Close(); writer.PrepareCommit(); reader = IndexReader.Open(dir); Assert.AreEqual(0, reader.NumDocs()); reader.Close(); writer.Commit(); reader = IndexReader.Open(dir); Assert.AreEqual(17, reader.NumDocs()); reader.Close(); writer.Close(); dir.Close(); }
public virtual void Test() { MockDirectoryWrapper dir = NewMockFSDirectory(CreateTempDir("TestIndexWriterOutOfFileDescriptors")); dir.PreventDoubleWrite = false; double rate = Random().NextDouble() * 0.01; //System.out.println("rate=" + rate); dir.RandomIOExceptionRateOnOpen = rate; int iters = AtLeast(20); LineFileDocs docs = new LineFileDocs(Random(), DefaultCodecSupportsDocValues()); IndexReader r = null; DirectoryReader r2 = null; bool any = false; MockDirectoryWrapper dirCopy = null; int lastNumDocs = 0; for (int iter = 0; iter < iters; iter++) { IndexWriter w = null; if (VERBOSE) { Console.WriteLine("TEST: iter=" + iter); } try { MockAnalyzer analyzer = new MockAnalyzer(Random()); analyzer.MaxTokenLength = TestUtil.NextInt(Random(), 1, IndexWriter.MAX_TERM_LENGTH); IndexWriterConfig iwc = NewIndexWriterConfig(TEST_VERSION_CURRENT, analyzer); if (VERBOSE) { // Do this ourselves instead of relying on LTC so // we see incrementing messageID: iwc.InfoStream = new PrintStreamInfoStream(Console.Out); } var ms = iwc.MergeScheduler; if (ms is IConcurrentMergeScheduler) { ((IConcurrentMergeScheduler)ms).SetSuppressExceptions(); } w = new IndexWriter(dir, iwc); if (r != null && Random().Next(5) == 3) { if (Random().NextBoolean()) { if (VERBOSE) { Console.WriteLine("TEST: addIndexes IR[]"); } w.AddIndexes(new IndexReader[] { r }); } else { if (VERBOSE) { Console.WriteLine("TEST: addIndexes Directory[]"); } w.AddIndexes(new Directory[] { dirCopy }); } } else { if (VERBOSE) { Console.WriteLine("TEST: addDocument"); } w.AddDocument(docs.NextDoc()); } dir.RandomIOExceptionRateOnOpen = 0.0; w.Dispose(); w = null; // NOTE: this is O(N^2)! Only enable for temporary debugging: //dir.setRandomIOExceptionRateOnOpen(0.0); //TestUtil.CheckIndex(dir); //dir.setRandomIOExceptionRateOnOpen(rate); // Verify numDocs only increases, to catch IndexWriter // accidentally deleting the index: dir.RandomIOExceptionRateOnOpen = 0.0; Assert.IsTrue(DirectoryReader.IndexExists(dir)); if (r2 == null) { r2 = DirectoryReader.Open(dir); } else { DirectoryReader r3 = DirectoryReader.OpenIfChanged(r2); if (r3 != null) { r2.Dispose(); r2 = r3; } } Assert.IsTrue(r2.NumDocs >= lastNumDocs, "before=" + lastNumDocs + " after=" + r2.NumDocs); lastNumDocs = r2.NumDocs; //System.out.println("numDocs=" + lastNumDocs); dir.RandomIOExceptionRateOnOpen = rate; any = true; if (VERBOSE) { Console.WriteLine("TEST: iter=" + iter + ": success"); } } catch (IOException ioe) { if (VERBOSE) { Console.WriteLine("TEST: iter=" + iter + ": exception"); Console.WriteLine(ioe.ToString()); Console.Write(ioe.StackTrace); } if (w != null) { // NOTE: leave random IO exceptions enabled here, // to verify that rollback does not try to write // anything: w.Rollback(); } } if (any && r == null && Random().NextBoolean()) { // Make a copy of a non-empty index so we can use // it to addIndexes later: dir.RandomIOExceptionRateOnOpen = 0.0; r = DirectoryReader.Open(dir); dirCopy = NewMockFSDirectory(CreateTempDir("TestIndexWriterOutOfFileDescriptors.copy")); HashSet<string> files = new HashSet<string>(); foreach (string file in dir.ListAll()) { dir.Copy(dirCopy, file, file, IOContext.DEFAULT); files.Add(file); } dirCopy.Sync(files); // Have IW kiss the dir so we remove any leftover // files ... we can easily have leftover files at // the time we take a copy because we are holding // open a reader: (new IndexWriter(dirCopy, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())))).Dispose(); dirCopy.RandomIOExceptionRate = rate; dir.RandomIOExceptionRateOnOpen = rate; } } if (r2 != null) { r2.Dispose(); } if (r != null) { r.Dispose(); dirCopy.Dispose(); } dir.Dispose(); }
public virtual void TestAddDocumentOnDiskFull() { for (int pass = 0; pass < 2; pass++) { if (VERBOSE) { Console.WriteLine("TEST: pass="******"TEST: cycle: diskFree=" + diskFree); } MockDirectoryWrapper dir = new MockDirectoryWrapper(Random(), new RAMDirectory()); dir.MaxSizeInBytes = diskFree; IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))); IMergeScheduler ms = writer.Config.MergeScheduler; if (ms is IConcurrentMergeScheduler) { // this test intentionally produces exceptions // in the threads that CMS launches; we don't // want to pollute test output with these. ((IConcurrentMergeScheduler)ms).SetSuppressExceptions(); } bool hitError = false; try { for (int i = 0; i < 200; i++) { AddDoc(writer); } if (VERBOSE) { Console.WriteLine("TEST: done adding docs; now commit"); } writer.Commit(); } catch (IOException e) { if (VERBOSE) { Console.WriteLine("TEST: exception on addDoc"); Console.WriteLine(e.StackTrace); } hitError = true; } if (hitError) { if (doAbort) { if (VERBOSE) { Console.WriteLine("TEST: now rollback"); } writer.Rollback(); } else { try { if (VERBOSE) { Console.WriteLine("TEST: now close"); } writer.Dispose(); } catch (IOException e) { if (VERBOSE) { Console.WriteLine("TEST: exception on close; retry w/ no disk space limit"); Console.WriteLine(e.StackTrace); } dir.MaxSizeInBytes = 0; writer.Dispose(); } } //TestUtil.SyncConcurrentMerges(ms); if (TestUtil.AnyFilesExceptWriteLock(dir)) { TestIndexWriter.AssertNoUnreferencedFiles(dir, "after disk full during addDocument"); // Make sure reader can open the index: DirectoryReader.Open(dir).Dispose(); } dir.Dispose(); // Now try again w/ more space: diskFree += TEST_NIGHTLY ? TestUtil.NextInt(Random(), 400, 600) : TestUtil.NextInt(Random(), 3000, 5000); } else { //TestUtil.SyncConcurrentMerges(writer); dir.MaxSizeInBytes = 0; writer.Dispose(); dir.Dispose(); break; } } } }
public virtual void TestDeleteAllNRT() { Directory dir = NewDirectory(); IndexWriter modifier = new IndexWriter(dir, (IndexWriterConfig)NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random(), MockTokenizer.WHITESPACE, false)).SetMaxBufferedDocs(2).SetMaxBufferedDeleteTerms(2)); int id = 0; int value = 100; for (int i = 0; i < 7; i++) { AddDoc(modifier, ++id, value); } modifier.Commit(); IndexReader reader = modifier.Reader; Assert.AreEqual(7, reader.NumDocs); reader.Dispose(); AddDoc(modifier, ++id, value); AddDoc(modifier, ++id, value); // Delete all modifier.DeleteAll(); reader = modifier.Reader; Assert.AreEqual(0, reader.NumDocs); reader.Dispose(); // Roll it back modifier.Rollback(); modifier.Dispose(); // Validate that the docs are still there reader = DirectoryReader.Open(dir); Assert.AreEqual(7, reader.NumDocs); reader.Dispose(); dir.Dispose(); }
public override void DoWork() { var config = outerInstance.NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())) .SetMaxBufferedDocs(3) .SetMergeScheduler(newScheduler1()) .SetMergePolicy(NewLogMergePolicy(2)); IndexWriter writer1 = new IndexWriter(dir1, config); ((IConcurrentMergeScheduler)writer1.Config.MergeScheduler).SetSuppressExceptions(); // Intentionally use different params so flush/merge // happen @ different times var config2 = outerInstance.NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())) .SetMaxBufferedDocs(2) .SetMergeScheduler(newScheduler2()) .SetMergePolicy(NewLogMergePolicy(3)); IndexWriter writer2 = new IndexWriter(dir2, config2); ((IConcurrentMergeScheduler)writer2.Config.MergeScheduler).SetSuppressExceptions(); Update(writer1); Update(writer2); DoFail = true; try { lock (@lock) { try { writer1.PrepareCommit(); } catch (Exception) { writer1.Rollback(); writer2.Rollback(); return; } try { writer2.PrepareCommit(); } catch (Exception) { writer1.Rollback(); writer2.Rollback(); return; } writer1.Commit(); writer2.Commit(); } } finally { DoFail = false; } writer1.Dispose(); writer2.Dispose(); }
public virtual void TestDeleteAllNRT() { Directory dir = new MockRAMDirectory(); IndexWriter modifier = new IndexWriter(dir, false, new WhitespaceAnalyzer(), true); modifier.SetMaxBufferedDocs(2); modifier.SetMaxBufferedDeleteTerms(2); int id = 0; int value_Renamed = 100; for (int i = 0; i < 7; i++) { AddDoc(modifier, ++id, value_Renamed); } modifier.Commit(); IndexReader reader = modifier.GetReader(); Assert.AreEqual(7, reader.NumDocs()); reader.Close(); AddDoc(modifier, ++id, value_Renamed); AddDoc(modifier, ++id, value_Renamed); // Delete all modifier.DeleteAll(); reader = modifier.GetReader(); Assert.AreEqual(0, reader.NumDocs()); reader.Close(); // Roll it back modifier.Rollback(); modifier.Close(); // Validate that the docs are still there reader = IndexReader.Open(dir); Assert.AreEqual(7, reader.NumDocs()); reader.Close(); dir.Close(); }
public virtual void TestOpenPriorSnapshot() { Directory dir = NewDirectory(); IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random)).SetIndexDeletionPolicy(new KeepAllDeletionPolicy(this, dir)).SetMaxBufferedDocs(2).SetMergePolicy(NewLogMergePolicy(10))); KeepAllDeletionPolicy policy = (KeepAllDeletionPolicy)writer.Config.IndexDeletionPolicy; for (int i = 0; i < 10; i++) { AddDoc(writer); if ((1 + i) % 2 == 0) { writer.Commit(); } } writer.Dispose(); ICollection <IndexCommit> commits = DirectoryReader.ListCommits(dir); Assert.AreEqual(5, commits.Count); IndexCommit lastCommit = null; foreach (IndexCommit commit in commits) { if (lastCommit == null || commit.Generation > lastCommit.Generation) { lastCommit = commit; } } Assert.IsTrue(lastCommit != null); // Now add 1 doc and merge writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random)).SetIndexDeletionPolicy(policy)); AddDoc(writer); Assert.AreEqual(11, writer.NumDocs); writer.ForceMerge(1); writer.Dispose(); Assert.AreEqual(6, DirectoryReader.ListCommits(dir).Count); // Now open writer on the commit just before merge: writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random)).SetIndexDeletionPolicy(policy).SetIndexCommit(lastCommit)); Assert.AreEqual(10, writer.NumDocs); // Should undo our rollback: writer.Rollback(); DirectoryReader r = DirectoryReader.Open(dir); // Still merged, still 11 docs Assert.AreEqual(1, r.Leaves.Count); Assert.AreEqual(11, r.NumDocs); r.Dispose(); writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random)).SetIndexDeletionPolicy(policy).SetIndexCommit(lastCommit)); Assert.AreEqual(10, writer.NumDocs); // Commits the rollback: writer.Dispose(); // Now 8 because we made another commit Assert.AreEqual(7, DirectoryReader.ListCommits(dir).Count); r = DirectoryReader.Open(dir); // Not fully merged because we rolled it back, and now only // 10 docs Assert.IsTrue(r.Leaves.Count > 1); Assert.AreEqual(10, r.NumDocs); r.Dispose(); // Re-merge writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random)).SetIndexDeletionPolicy(policy)); writer.ForceMerge(1); writer.Dispose(); r = DirectoryReader.Open(dir); Assert.AreEqual(1, r.Leaves.Count); Assert.AreEqual(10, r.NumDocs); r.Dispose(); // Now open writer on the commit just before merging, // but this time keeping only the last commit: writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random)).SetIndexCommit(lastCommit)); Assert.AreEqual(10, writer.NumDocs); // Reader still sees fully merged index, because writer // opened on the prior commit has not yet committed: r = DirectoryReader.Open(dir); Assert.AreEqual(1, r.Leaves.Count); Assert.AreEqual(10, r.NumDocs); r.Dispose(); writer.Dispose(); // Now reader sees not-fully-merged index: r = DirectoryReader.Open(dir); Assert.IsTrue(r.Leaves.Count > 1); Assert.AreEqual(10, r.NumDocs); r.Dispose(); dir.Dispose(); }
public virtual void TestUpdateOldSegments() { Codec[] oldCodecs = new Codec[] { new Lucene40RWCodec(), new Lucene41RWCodec(), new Lucene42RWCodec(), new Lucene45RWCodec() }; Directory dir = NewDirectory(); bool oldValue = OLD_FORMAT_IMPERSONATION_IS_ACTIVE; // create a segment with an old Codec IndexWriterConfig conf = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())); conf.SetCodec(oldCodecs[Random().Next(oldCodecs.Length)]); OLD_FORMAT_IMPERSONATION_IS_ACTIVE = true; IndexWriter writer = new IndexWriter(dir, conf); Document doc = new Document(); doc.Add(new StringField("id", "doc", Store.NO)); doc.Add(new NumericDocValuesField("f", 5)); writer.AddDocument(doc); writer.Dispose(); conf = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())); writer = new IndexWriter(dir, conf); writer.UpdateNumericDocValue(new Term("id", "doc"), "f", 4L); OLD_FORMAT_IMPERSONATION_IS_ACTIVE = false; try { writer.Dispose(); Assert.Fail("should not have succeeded to update a segment written with an old Codec"); } catch (System.NotSupportedException e) { writer.Rollback(); } finally { OLD_FORMAT_IMPERSONATION_IS_ACTIVE = oldValue; } dir.Dispose(); }
public virtual void Test() { MockDirectoryWrapper dir = NewMockFSDirectory(CreateTempDir("TestIndexWriterOutOfFileDescriptors")); dir.PreventDoubleWrite = false; double rate = Random().NextDouble() * 0.01; //System.out.println("rate=" + rate); dir.RandomIOExceptionRateOnOpen = rate; int iters = AtLeast(20); LineFileDocs docs = new LineFileDocs(Random(), DefaultCodecSupportsDocValues()); IndexReader r = null; DirectoryReader r2 = null; bool any = false; MockDirectoryWrapper dirCopy = null; int lastNumDocs = 0; for (int iter = 0; iter < iters; iter++) { IndexWriter w = null; if (VERBOSE) { Console.WriteLine("TEST: iter=" + iter); } try { MockAnalyzer analyzer = new MockAnalyzer(Random()); analyzer.MaxTokenLength = TestUtil.NextInt(Random(), 1, IndexWriter.MAX_TERM_LENGTH); IndexWriterConfig iwc = NewIndexWriterConfig(TEST_VERSION_CURRENT, analyzer); if (VERBOSE) { // Do this ourselves instead of relying on LTC so // we see incrementing messageID: iwc.InfoStream = new PrintStreamInfoStream(Console.Out); } MergeScheduler ms = iwc.MergeScheduler; if (ms is ConcurrentMergeScheduler) { ((ConcurrentMergeScheduler)ms).SetSuppressExceptions(); } w = new IndexWriter(dir, iwc); if (r != null && Random().Next(5) == 3) { if (Random().NextBoolean()) { if (VERBOSE) { Console.WriteLine("TEST: addIndexes IR[]"); } w.AddIndexes(new IndexReader[] { r }); } else { if (VERBOSE) { Console.WriteLine("TEST: addIndexes Directory[]"); } w.AddIndexes(new Directory[] { dirCopy }); } } else { if (VERBOSE) { Console.WriteLine("TEST: addDocument"); } w.AddDocument(docs.NextDoc()); } dir.RandomIOExceptionRateOnOpen = 0.0; w.Dispose(); w = null; // NOTE: this is O(N^2)! Only enable for temporary debugging: //dir.setRandomIOExceptionRateOnOpen(0.0); //TestUtil.CheckIndex(dir); //dir.setRandomIOExceptionRateOnOpen(rate); // Verify numDocs only increases, to catch IndexWriter // accidentally deleting the index: dir.RandomIOExceptionRateOnOpen = 0.0; Assert.IsTrue(DirectoryReader.IndexExists(dir)); if (r2 == null) { r2 = DirectoryReader.Open(dir); } else { DirectoryReader r3 = DirectoryReader.OpenIfChanged(r2); if (r3 != null) { r2.Dispose(); r2 = r3; } } Assert.IsTrue(r2.NumDocs >= lastNumDocs, "before=" + lastNumDocs + " after=" + r2.NumDocs); lastNumDocs = r2.NumDocs; //System.out.println("numDocs=" + lastNumDocs); dir.RandomIOExceptionRateOnOpen = rate; any = true; if (VERBOSE) { Console.WriteLine("TEST: iter=" + iter + ": success"); } } catch (IOException ioe) { if (VERBOSE) { Console.WriteLine("TEST: iter=" + iter + ": exception"); Console.WriteLine(ioe.ToString()); Console.Write(ioe.StackTrace); } if (w != null) { // NOTE: leave random IO exceptions enabled here, // to verify that rollback does not try to write // anything: w.Rollback(); } } if (any && r == null && Random().NextBoolean()) { // Make a copy of a non-empty index so we can use // it to addIndexes later: dir.RandomIOExceptionRateOnOpen = 0.0; r = DirectoryReader.Open(dir); dirCopy = NewMockFSDirectory(CreateTempDir("TestIndexWriterOutOfFileDescriptors.copy")); HashSet <string> files = new HashSet <string>(); foreach (string file in dir.ListAll()) { dir.Copy(dirCopy, file, file, IOContext.DEFAULT); files.Add(file); } dirCopy.Sync(files); // Have IW kiss the dir so we remove any leftover // files ... we can easily have leftover files at // the time we take a copy because we are holding // open a reader: (new IndexWriter(dirCopy, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())))).Dispose(); dirCopy.RandomIOExceptionRate = rate; dir.RandomIOExceptionRateOnOpen = rate; } } if (r2 != null) { r2.Dispose(); } if (r != null) { r.Dispose(); dirCopy.Dispose(); } dir.Dispose(); }
public virtual void TestSameFieldNameForPostingAndDocValue() { // LUCENE-5192: FieldInfos.Builder neglected to update // globalFieldNumbers.docValuesType map if the field existed, resulting in // potentially adding the same field with different DV types. Directory dir = NewDirectory(); IndexWriterConfig conf = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())); IndexWriter writer = new IndexWriter(dir, conf); Document doc = new Document(); doc.Add(new StringField("f", "mock-value", Field.Store.NO)); doc.Add(new NumericDocValuesField("f", 5)); writer.AddDocument(doc); writer.Commit(); doc = new Document(); doc.Add(new BinaryDocValuesField("f", new BytesRef("mock"))); try { writer.AddDocument(doc); Assert.Fail("should not have succeeded to add a field with different DV type than what already exists"); } catch (System.ArgumentException e) { writer.Rollback(); } dir.Dispose(); }
// LUCENE-4853 // [Test] // LUCENENET NOTE: For now, we are overriding this test in every subclass to pull it into the right context for the subclass public virtual void TestHugeBinaryValues() { Analyzer analyzer = new MockAnalyzer(Random()); // FSDirectory because SimpleText will consume gobbs of // space when storing big binary values: Directory d = NewFSDirectory(CreateTempDir("hugeBinaryValues")); bool doFixed = Random().NextBoolean(); int numDocs; int fixedLength = 0; if (doFixed) { // Sometimes make all values fixed length since some // codecs have different code paths for this: numDocs = TestUtil.NextInt(Random(), 10, 20); fixedLength = TestUtil.NextInt(Random(), 65537, 256 * 1024); } else { numDocs = TestUtil.NextInt(Random(), 100, 200); } IndexWriter w = new IndexWriter(d, NewIndexWriterConfig(TEST_VERSION_CURRENT, analyzer)); var docBytes = new List<byte[]>(); long totalBytes = 0; for (int docID = 0; docID < numDocs; docID++) { // we don't use RandomIndexWriter because it might add // more docvalues than we expect !!!! // Must be > 64KB in size to ensure more than 2 pages in // PagedBytes would be needed: int numBytes; if (doFixed) { numBytes = fixedLength; } else if (docID == 0 || Random().Next(5) == 3) { numBytes = TestUtil.NextInt(Random(), 65537, 3 * 1024 * 1024); } else { numBytes = TestUtil.NextInt(Random(), 1, 1024 * 1024); } totalBytes += numBytes; if (totalBytes > 5 * 1024 * 1024) { break; } var bytes = new byte[numBytes]; Random().NextBytes(bytes); docBytes.Add(bytes); Document doc = new Document(); BytesRef b = new BytesRef(bytes); b.Length = bytes.Length; doc.Add(new BinaryDocValuesField("field", b)); doc.Add(new StringField("id", "" + docID, Field.Store.YES)); try { w.AddDocument(doc); } catch (System.ArgumentException iae) { if (iae.Message.IndexOf("is too large") == -1) { throw iae; } else { // OK: some codecs can't handle binary DV > 32K Assert.IsFalse(CodecAcceptsHugeBinaryValues("field")); w.Rollback(); d.Dispose(); return; } } } DirectoryReader r; try { r = w.Reader; } catch (System.ArgumentException iae) { if (iae.Message.IndexOf("is too large") == -1) { throw iae; } else { Assert.IsFalse(CodecAcceptsHugeBinaryValues("field")); // OK: some codecs can't handle binary DV > 32K w.Rollback(); d.Dispose(); return; } } w.Dispose(); AtomicReader ar = SlowCompositeReaderWrapper.Wrap(r); BinaryDocValues s = FieldCache.DEFAULT.GetTerms(ar, "field", false); for (int docID = 0; docID < docBytes.Count; docID++) { Document doc = ar.Document(docID); BytesRef bytes = new BytesRef(); s.Get(docID, bytes); var expected = docBytes[Convert.ToInt32(doc.Get("id"))]; Assert.AreEqual(expected.Length, bytes.Length); Assert.AreEqual(new BytesRef(expected), bytes); } Assert.IsTrue(CodecAcceptsHugeBinaryValues("field")); ar.Dispose(); d.Dispose(); }