public virtual void TestWriterAfterCrash() { IndexWriter writer = InitIndex(); MockRAMDirectory dir = (MockRAMDirectory)writer.GetDirectory(); dir.SetPreventDoubleWrite(false); Crash(writer); writer = InitIndex(dir); writer.Close(); IndexReader reader = IndexReader.Open(dir); Assert.IsTrue(reader.NumDocs() < 314); }
/// <summary> Make sure if modifier tries to commit but hits disk full that modifier /// remains consistent and usable. Similar to TestIndexReader.testDiskFull(). /// </summary> private void TestOperationsOnDiskFull(bool updates) { bool debug = false; Term searchTerm = new Term("content", "aaa"); int START_COUNT = 157; int END_COUNT = 144; for (int pass = 0; pass < 2; pass++) { bool autoCommit = (0 == pass); // First build up a starting index: MockRAMDirectory startDir = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(startDir, autoCommit, new WhitespaceAnalyzer(), true); for (int i = 0; i < 157; i++) { Document d = new Document(); d.Add(new Field("id", System.Convert.ToString(i), Field.Store.YES, Field.Index.NOT_ANALYZED)); d.Add(new Field("content", "aaa " + i, Field.Store.NO, Field.Index.ANALYZED)); writer.AddDocument(d); } writer.Close(); long diskUsage = startDir.SizeInBytes(); long diskFree = diskUsage + 10; System.IO.IOException err = null; bool done = false; // Iterate w/ ever increasing free disk space: while (!done) { MockRAMDirectory dir = new MockRAMDirectory(startDir); dir.SetPreventDoubleWrite(false); IndexWriter modifier = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer()); modifier.SetMaxBufferedDocs(1000); // use flush or close modifier.SetMaxBufferedDeleteTerms(1000); // use flush or close // For each disk size, first try to commit against // dir that will hit random IOExceptions & disk // full; after, give it infinite disk space & turn // off random IOExceptions & retry w/ same reader: bool success = false; for (int x = 0; x < 2; x++) { double rate = 0.1; double diskRatio = ((double) diskFree) / diskUsage; long thisDiskFree; System.String testName; if (0 == x) { thisDiskFree = diskFree; if (diskRatio >= 2.0) { rate /= 2; } if (diskRatio >= 4.0) { rate /= 2; } if (diskRatio >= 6.0) { rate = 0.0; } if (debug) { System.Console.Out.WriteLine("\ncycle: " + diskFree + " bytes"); } testName = "disk full during reader.close() @ " + thisDiskFree + " bytes"; } else { thisDiskFree = 0; rate = 0.0; if (debug) { System.Console.Out.WriteLine("\ncycle: same writer: unlimited disk space"); } testName = "reader re-use after disk full"; } dir.SetMaxSizeInBytes(thisDiskFree); dir.SetRandomIOExceptionRate(rate, diskFree); try { if (0 == x) { int docId = 12; for (int i = 0; i < 13; i++) { if (updates) { Document d = new Document(); d.Add(new Field("id", System.Convert.ToString(i), Field.Store.YES, Field.Index.NOT_ANALYZED)); d.Add(new Field("content", "bbb " + i, Field.Store.NO, Field.Index.ANALYZED)); modifier.UpdateDocument(new Term("id", System.Convert.ToString(docId)), d); } else { // deletes modifier.DeleteDocuments(new Term("id", System.Convert.ToString(docId))); // modifier.setNorm(docId, "contents", (float)2.0); } docId += 12; } } modifier.Close(); success = true; if (0 == x) { done = true; } } catch (System.IO.IOException e) { if (debug) { System.Console.Out.WriteLine(" hit IOException: " + e); System.Console.Out.WriteLine(e.StackTrace); } err = e; if (1 == x) { System.Console.Error.WriteLine(e.StackTrace); Assert.Fail(testName + " hit IOException after disk space was freed up"); } } // If the close() succeeded, make sure there are // no unreferenced files. if (success) { Lucene.Net.Util._TestUtil.CheckIndex(dir); TestIndexWriter.AssertNoUnreferencedFiles(dir, "after writer.close"); } // Finally, verify index is not corrupt, and, if // we succeeded, we see all docs changed, and if // we failed, we see either all docs or no docs // changed (transactional semantics): IndexReader newReader = null; try { newReader = IndexReader.Open(dir); } catch (System.IO.IOException e) { System.Console.Error.WriteLine(e.StackTrace); Assert.Fail(testName + ":exception when creating IndexReader after disk full during close: " + e); } IndexSearcher searcher = new IndexSearcher(newReader); ScoreDoc[] hits = null; try { hits = searcher.Search(new TermQuery(searchTerm), null, 1000).ScoreDocs; } catch (System.IO.IOException e) { System.Console.Error.WriteLine(e.StackTrace); Assert.Fail(testName + ": exception when searching: " + e); } int result2 = hits.Length; if (success) { if (x == 0 && result2 != END_COUNT) { Assert.Fail(testName + ": method did not throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + END_COUNT); } else if (x == 1 && result2 != START_COUNT && result2 != END_COUNT) { // It's possible that the first exception was // "recoverable" wrt pending deletes, in which // case the pending deletes are retained and // then re-flushing (with plenty of disk // space) will succeed in flushing the // deletes: Assert.Fail(testName + ": method did not throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + START_COUNT + " or " + END_COUNT); } } else { // On hitting exception we still may have added // all docs: if (result2 != START_COUNT && result2 != END_COUNT) { System.Console.Error.WriteLine(err.StackTrace); Assert.Fail(testName + ": method did throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + START_COUNT + " or " + END_COUNT); } } searcher.Close(); newReader.Close(); if (result2 == END_COUNT) { break; } } dir.Close(); // Try again with 10 more bytes of free space: diskFree += 10; } } }
public virtual void TestDiskFull() { bool debug = false; Term searchTerm = new Term("content", "aaa"); int START_COUNT = 157; int END_COUNT = 144; // First build up a starting index: RAMDirectory startDir = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(startDir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); for (int i = 0; i < 157; i++) { Document d = new Document(); d.Add(new Field("id", System.Convert.ToString(i), Field.Store.YES, Field.Index.NOT_ANALYZED)); d.Add(new Field("content", "aaa " + i, Field.Store.NO, Field.Index.ANALYZED)); writer.AddDocument(d); } writer.Close(); long diskUsage = startDir.SizeInBytes(); long diskFree = diskUsage + 100; System.IO.IOException err = null; bool done = false; // Iterate w/ ever increasing free disk space: while (!done) { MockRAMDirectory dir = new MockRAMDirectory(startDir); // If IndexReader hits disk full, it can write to // the same files again. dir.SetPreventDoubleWrite(false); IndexReader reader = IndexReader.Open(dir, false); // For each disk size, first try to commit against // dir that will hit random IOExceptions & disk // full; after, give it infinite disk space & turn // off random IOExceptions & retry w/ same reader: bool success = false; for (int x = 0; x < 2; x++) { double rate = 0.05; double diskRatio = ((double) diskFree) / diskUsage; long thisDiskFree; System.String testName; if (0 == x) { thisDiskFree = diskFree; if (diskRatio >= 2.0) { rate /= 2; } if (diskRatio >= 4.0) { rate /= 2; } if (diskRatio >= 6.0) { rate = 0.0; } if (debug) { System.Console.Out.WriteLine("\ncycle: " + diskFree + " bytes"); } testName = "disk full during reader.close() @ " + thisDiskFree + " bytes"; } else { thisDiskFree = 0; rate = 0.0; if (debug) { System.Console.Out.WriteLine("\ncycle: same writer: unlimited disk space"); } testName = "reader re-use after disk full"; } dir.SetMaxSizeInBytes(thisDiskFree); dir.SetRandomIOExceptionRate(rate, diskFree); try { if (0 == x) { int docId = 12; for (int i = 0; i < 13; i++) { reader.DeleteDocument(docId); reader.SetNorm(docId, "contents", (float) 2.0); docId += 12; } } reader.Close(); success = true; if (0 == x) { done = true; } } catch (System.IO.IOException e) { if (debug) { System.Console.Out.WriteLine(" hit IOException: " + e); System.Console.Out.WriteLine(e.StackTrace); } err = e; if (1 == x) { System.Console.Error.WriteLine(e.StackTrace); Assert.Fail(testName + " hit IOException after disk space was freed up"); } } // Whether we succeeded or failed, check that all // un-referenced files were in fact deleted (ie, // we did not create garbage). Just create a // new IndexFileDeleter, have it delete // unreferenced files, then verify that in fact // no files were deleted: System.String[] startFiles = dir.ListAll(); SegmentInfos infos = new SegmentInfos(); infos.Read(dir); new IndexFileDeleter(dir, new KeepOnlyLastCommitDeletionPolicy(), infos, null, null,null); System.String[] endFiles = dir.ListAll(); System.Array.Sort(startFiles); System.Array.Sort(endFiles); //for(int i=0;i<startFiles.length;i++) { // System.out.println(" startFiles: " + i + ": " + startFiles[i]); //} if (!CollectionsHelper.Equals(startFiles, endFiles)) { System.String successStr; if (success) { successStr = "success"; } else { successStr = "IOException"; System.Console.Error.WriteLine(err.StackTrace); } Assert.Fail("reader.close() failed to delete unreferenced files after " + successStr + " (" + diskFree + " bytes): before delete:\n " + ArrayToString(startFiles) + "\n after delete:\n " + ArrayToString(endFiles)); } // Finally, verify index is not corrupt, and, if // we succeeded, we see all docs changed, and if // we failed, we see either all docs or no docs // changed (transactional semantics): IndexReader newReader = null; try { newReader = IndexReader.Open(dir, false); } catch (System.IO.IOException e) { System.Console.Error.WriteLine(e.StackTrace); Assert.Fail(testName + ":exception when creating IndexReader after disk full during close: " + e); } /* int result = newReader.docFreq(searchTerm); if (success) { if (result != END_COUNT) { fail(testName + ": method did not throw exception but docFreq('aaa') is " + result + " instead of expected " + END_COUNT); } } else { // On hitting exception we still may have added // all docs: if (result != START_COUNT && result != END_COUNT) { err.printStackTrace(); fail(testName + ": method did throw exception but docFreq('aaa') is " + result + " instead of expected " + START_COUNT + " or " + END_COUNT); } } */ IndexSearcher searcher = new IndexSearcher(newReader); ScoreDoc[] hits = null; try { hits = searcher.Search(new TermQuery(searchTerm), null, 1000).ScoreDocs; } catch (System.IO.IOException e) { System.Console.Error.WriteLine(e.StackTrace); Assert.Fail(testName + ": exception when searching: " + e); } int result2 = hits.Length; if (success) { if (result2 != END_COUNT) { Assert.Fail(testName + ": method did not throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + END_COUNT); } } else { // On hitting exception we still may have added // all docs: if (result2 != START_COUNT && result2 != END_COUNT) { System.Console.Error.WriteLine(err.StackTrace); Assert.Fail(testName + ": method did throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + START_COUNT); } } searcher.Close(); newReader.Close(); if (result2 == END_COUNT) { break; } } dir.Close(); // Try again with 10 more bytes of free space: diskFree += 10; } startDir.Close(); }
public virtual void TestIndexReaderUnDeleteAll() { MockRAMDirectory dir = new MockRAMDirectory(); dir.SetPreventDoubleWrite(false); IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(Util.Version.LUCENE_CURRENT), IndexWriter.MaxFieldLength.UNLIMITED); writer.AddDocument(CreateDocument("a")); writer.AddDocument(CreateDocument("b")); writer.AddDocument(CreateDocument("c")); writer.Close(); IndexReader reader = IndexReader.Open(dir, false); reader.DeleteDocuments(new Term("id", "a")); reader.Flush(); reader.DeleteDocuments(new Term("id", "b")); reader.UndeleteAll(); reader.DeleteDocuments(new Term("id", "b")); reader.Close(); IndexReader.Open(dir, false).Close(); dir.Close(); }
/// <summary> Make sure if modifier tries to commit but hits disk full that modifier /// remains consistent and usable. Similar to TestIndexReader.testDiskFull(). /// </summary> private void TestOperationsOnDiskFull(bool updates) { bool debug = false; Term searchTerm = new Term("content", "aaa"); int START_COUNT = 157; int END_COUNT = 144; // First build up a starting index: MockRAMDirectory startDir = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(startDir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED, null); for (int i = 0; i < 157; i++) { Document d = new Document(); d.Add(new Field("id", System.Convert.ToString(i), Field.Store.YES, Field.Index.NOT_ANALYZED)); d.Add(new Field("content", "aaa " + i, Field.Store.NO, Field.Index.ANALYZED)); writer.AddDocument(d, null); } writer.Close(); long diskUsage = startDir.SizeInBytes(); long diskFree = diskUsage + 10; System.IO.IOException err = null; bool done = false; // Iterate w/ ever increasing free disk space: while (!done) { MockRAMDirectory dir = new MockRAMDirectory(startDir); dir.SetPreventDoubleWrite(false); IndexWriter modifier = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED, null); modifier.SetMaxBufferedDocs(1000); // use flush or close modifier.SetMaxBufferedDeleteTerms(1000); // use flush or close // For each disk size, first try to commit against // dir that will hit random IOExceptions & disk // full; after, give it infinite disk space & turn // off random IOExceptions & retry w/ same reader: bool success = false; for (int x = 0; x < 2; x++) { double rate = 0.1; double diskRatio = ((double)diskFree) / diskUsage; long thisDiskFree; System.String testName; if (0 == x) { thisDiskFree = diskFree; if (diskRatio >= 2.0) { rate /= 2; } if (diskRatio >= 4.0) { rate /= 2; } if (diskRatio >= 6.0) { rate = 0.0; } if (debug) { System.Console.Out.WriteLine("\ncycle: " + diskFree + " bytes"); } testName = "disk full during reader.close() @ " + thisDiskFree + " bytes"; } else { thisDiskFree = 0; rate = 0.0; if (debug) { System.Console.Out.WriteLine("\ncycle: same writer: unlimited disk space"); } testName = "reader re-use after disk full"; } dir.SetMaxSizeInBytes(thisDiskFree); dir.SetRandomIOExceptionRate(rate, diskFree); try { if (0 == x) { int docId = 12; for (int i = 0; i < 13; i++) { if (updates) { Document d = new Document(); d.Add(new Field("id", System.Convert.ToString(i), Field.Store.YES, Field.Index.NOT_ANALYZED)); d.Add(new Field("content", "bbb " + i, Field.Store.NO, Field.Index.ANALYZED)); modifier.UpdateDocument(new Term("id", System.Convert.ToString(docId)), d, null); } else { // deletes modifier.DeleteDocuments(null, new Term("id", System.Convert.ToString(docId))); // modifier.setNorm(docId, "contents", (float)2.0); } docId += 12; } } modifier.Close(); success = true; if (0 == x) { done = true; } } catch (System.IO.IOException e) { if (debug) { System.Console.Out.WriteLine(" hit IOException: " + e); System.Console.Out.WriteLine(e.StackTrace); } err = e; if (1 == x) { System.Console.Error.WriteLine(e.StackTrace); Assert.Fail(testName + " hit IOException after disk space was freed up"); } } // If the close() succeeded, make sure there are // no unreferenced files. if (success) { Lucene.Net.Util._TestUtil.CheckIndex(dir); TestIndexWriter.AssertNoUnreferencedFiles(dir, "after writer.close"); } // Finally, verify index is not corrupt, and, if // we succeeded, we see all docs changed, and if // we failed, we see either all docs or no docs // changed (transactional semantics): IndexReader newReader = null; try { newReader = IndexReader.Open((Directory)dir, true, null); } catch (System.IO.IOException e) { System.Console.Error.WriteLine(e.StackTrace); Assert.Fail(testName + ":exception when creating IndexReader after disk full during close: " + e); } IndexSearcher searcher = new IndexSearcher(newReader); ScoreDoc[] hits = null; try { hits = searcher.Search(new TermQuery(searchTerm), null, 1000, null).ScoreDocs; } catch (System.IO.IOException e) { System.Console.Error.WriteLine(e.StackTrace); Assert.Fail(testName + ": exception when searching: " + e); } int result2 = hits.Length; if (success) { if (x == 0 && result2 != END_COUNT) { Assert.Fail(testName + ": method did not throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + END_COUNT); } else if (x == 1 && result2 != START_COUNT && result2 != END_COUNT) { // It's possible that the first exception was // "recoverable" wrt pending deletes, in which // case the pending deletes are retained and // then re-flushing (with plenty of disk // space) will succeed in flushing the // deletes: Assert.Fail(testName + ": method did not throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + START_COUNT + " or " + END_COUNT); } } else { // On hitting exception we still may have added // all docs: if (result2 != START_COUNT && result2 != END_COUNT) { System.Console.Error.WriteLine(err.StackTrace); Assert.Fail(testName + ": method did throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + START_COUNT + " or " + END_COUNT); } } searcher.Close(); newReader.Close(); if (result2 == END_COUNT) { break; } } dir.Close(); // Try again with 10 more bytes of free space: diskFree += 10; } }
public virtual void TestPrepareCommitRollback() { MockRAMDirectory dir = new MockRAMDirectory(); dir.SetPreventDoubleWrite(false); IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); writer.SetMaxBufferedDocs(2); writer.SetMergeFactor(5); for (int i = 0; i < 23; i++) AddDoc(writer); IndexReader reader = IndexReader.Open(dir); Assert.AreEqual(0, reader.NumDocs()); writer.PrepareCommit(); IndexReader reader2 = IndexReader.Open(dir); Assert.AreEqual(0, reader2.NumDocs()); writer.Rollback(); IndexReader reader3 = reader.Reopen(); Assert.AreEqual(0, reader.NumDocs()); Assert.AreEqual(0, reader2.NumDocs()); Assert.AreEqual(0, reader3.NumDocs()); reader.Close(); reader2.Close(); writer = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); for (int i = 0; i < 17; i++) AddDoc(writer); Assert.AreEqual(0, reader3.NumDocs()); reader3.Close(); reader = IndexReader.Open(dir); Assert.AreEqual(0, reader.NumDocs()); reader.Close(); writer.PrepareCommit(); reader = IndexReader.Open(dir); Assert.AreEqual(0, reader.NumDocs()); reader.Close(); writer.Commit(); reader = IndexReader.Open(dir); Assert.AreEqual(17, reader.NumDocs()); reader.Close(); writer.Close(); dir.Close(); }
public virtual void TestCommitOnCloseAbort() { MockRAMDirectory dir = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); writer.SetMaxBufferedDocs(10); for (int i = 0; i < 14; i++) { AddDoc(writer); } writer.Close(); Term searchTerm = new Term("content", "aaa"); IndexSearcher searcher = new IndexSearcher(dir); ScoreDoc[] hits = searcher.Search(new TermQuery(searchTerm), null, 1000).scoreDocs; Assert.AreEqual(14, hits.Length, "first number of hits"); searcher.Close(); writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED); writer.SetMaxBufferedDocs(10); for (int j = 0; j < 17; j++) { AddDoc(writer); } // Delete all docs: writer.DeleteDocuments(searchTerm); searcher = new IndexSearcher(dir); hits = searcher.Search(new TermQuery(searchTerm), null, 1000).scoreDocs; Assert.AreEqual(14, hits.Length, "reader incorrectly sees changes from writer with autoCommit disabled"); searcher.Close(); // Now, close the writer: writer.Abort(); AssertNoUnreferencedFiles(dir, "unreferenced files remain after abort()"); searcher = new IndexSearcher(dir); hits = searcher.Search(new TermQuery(searchTerm), null, 1000).scoreDocs; Assert.AreEqual(14, hits.Length, "saw changes after writer.abort"); searcher.Close(); // Now make sure we can re-open the index, add docs, // and all is good: writer = new IndexWriter(dir, new WhitespaceAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED); writer.SetMaxBufferedDocs(10); // On abort, writer in fact may write to the same // segments_N file: dir.SetPreventDoubleWrite(false); for (int i = 0; i < 12; i++) { for (int j = 0; j < 17; j++) { AddDoc(writer); } searcher = new IndexSearcher(dir); hits = searcher.Search(new TermQuery(searchTerm), null, 1000).scoreDocs; Assert.AreEqual(14, hits.Length, "reader incorrectly sees changes from writer with autoCommit disabled"); searcher.Close(); } writer.Close(); searcher = new IndexSearcher(dir); hits = searcher.Search(new TermQuery(searchTerm), null, 1000).scoreDocs; Assert.AreEqual(218, hits.Length, "didn't see changes after close"); searcher.Close(); dir.Close(); }
public void TestCorruptionAfterDiskFullDuringMerge() { MockRAMDirectory dir = new MockRAMDirectory(); dir.SetPreventDoubleWrite(false); IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED); w.SetMergeScheduler(new SerialMergeScheduler()); ((LogMergePolicy)w.GetMergePolicy()).SetMergeFactor(2); Document doc = new Document(); doc.Add(new Field("f", "doctor who", Field.Store.YES, Field.Index.ANALYZED)); w.AddDocument(doc); w.Commit(); w.DeleteDocuments(new Term("f", "who")); w.AddDocument(doc); // disk fills up! FailTwiceDuringMerge ftdm = new FailTwiceDuringMerge(); ftdm.SetDoFail(); dir.FailOn(ftdm); try { w.Commit(); Assert.Fail("fake disk full IOExceptions not hit"); } catch (System.IO.IOException ioe) { // expected Assert.IsTrue(ftdm.didFail1 || ftdm.didFail2); } _TestUtil.CheckIndex(dir); ftdm.ClearDoFail(); w.AddDocument(doc); w.Close(); _TestUtil.CheckIndex(dir); dir.Close(); }