public virtual void TestLucene() { int num = 100; Directory indexA = new MockRAMDirectory(); Directory indexB = new MockRAMDirectory(); FillIndex(indexA, 0, num); bool fail = VerifyIndex(indexA, 0); if (fail) { Assert.Fail("Index a is invalid"); } FillIndex(indexB, num, num); fail = VerifyIndex(indexB, num); if (fail) { Assert.Fail("Index b is invalid"); } Directory merged = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(merged, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); writer.SetMergeFactor(2); writer.AddIndexes(new Directory[]{indexA, indexB}); writer.Close(); fail = VerifyIndex(merged, 0); merged.Close(); Assert.IsFalse(fail, "The merged index is invalid"); }
public virtual void TestLucene() { int num = 100; Directory indexA = new MockRAMDirectory(); Directory indexB = new MockRAMDirectory(); FillIndex(indexA, 0, num); Assert.IsFalse(VerifyIndex(indexA, 0), "Index a is invalid"); FillIndex(indexB, num, num); Assert.IsFalse(VerifyIndex(indexB, num), "Index b is invalid"); Directory merged = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(merged, new StandardAnalyzer(Util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); writer.MergeFactor = 2; writer.AddIndexesNoOptimize(new []{indexA, indexB}); writer.Optimize(); writer.Close(); var fail = VerifyIndex(merged, 0); merged.Close(); Assert.IsFalse(fail, "The merged index is invalid"); }
public virtual void TestCloneNoChangesStillReadOnly() { Directory dir1 = new MockRAMDirectory(); TestIndexReaderReopen.CreateIndex(dir1, true); IndexReader r1 = IndexReader.Open(dir1, false); IndexReader r2 = r1.Clone(false); Assert.IsTrue(DeleteWorked(1, r2), "deleting from the cloned should have worked"); r1.Close(); r2.Close(); dir1.Close(); }
public virtual void TestCloneReadOnlySegmentReader() { Directory dir1 = new MockRAMDirectory(); TestIndexReaderReopen.CreateIndex(dir1, false); IndexReader reader = IndexReader.Open(dir1, false); IndexReader readOnlyReader = reader.Clone(true); Assert.IsTrue(IsReadOnly(readOnlyReader), "reader isn't read only"); Assert.IsFalse(DeleteWorked(1, readOnlyReader), "deleting from the original should not have worked"); reader.Close(); readOnlyReader.Close(); dir1.Close(); }
public virtual void TestBinaryFieldInIndex() { IFieldable binaryFldStored = new Field("binaryStored", System.Text.UTF8Encoding.UTF8.GetBytes(binaryValStored), Field.Store.YES); IFieldable stringFldStored = new Field("stringStored", binaryValStored, Field.Store.YES, Field.Index.NO, Field.TermVector.NO); // binary fields with store off are not allowed Assert.Throws<ArgumentException>( () => new Field("fail", System.Text.Encoding.UTF8.GetBytes(binaryValStored), Field.Store.NO)); Document doc = new Document(); doc.Add(binaryFldStored); doc.Add(stringFldStored); /** test for field count */ Assert.AreEqual(2, doc.fields_ForNUnit.Count); /** add the doc to a ram index */ MockRAMDirectory dir = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(Lucene.Net.Util.Version.LUCENE_CURRENT), true, IndexWriter.MaxFieldLength.LIMITED); writer.AddDocument(doc); writer.Close(); /** open a reader and fetch the document */ IndexReader reader = IndexReader.Open(dir, false); Document docFromReader = reader.Document(0); Assert.IsTrue(docFromReader != null); /** fetch the binary stored field and compare it's content with the original one */ System.String binaryFldStoredTest = new System.String(System.Text.UTF8Encoding.UTF8.GetChars(docFromReader.GetBinaryValue("binaryStored"))); Assert.IsTrue(binaryFldStoredTest.Equals(binaryValStored)); /** fetch the string field and compare it's content with the original one */ System.String stringFldStoredTest = docFromReader.Get("stringStored"); Assert.IsTrue(stringFldStoredTest.Equals(binaryValStored)); /** delete the document from index */ reader.DeleteDocument(0); Assert.AreEqual(0, reader.NumDocs()); reader.Close(); dir.Close(); }
public virtual void TestIndexing() { Directory mainDir = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(mainDir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); writer.UseCompoundFile = false; IndexReader reader = writer.GetReader(); // start pooling readers reader.Close(); writer.MergeFactor = 2; writer.SetMaxBufferedDocs(10); RunThread[] indexThreads = new RunThread[4]; for (int x = 0; x < indexThreads.Length; x++) { indexThreads[x] = new RunThread(this, x % 2, writer); indexThreads[x].Name = "Thread " + x; indexThreads[x].Start(); } long startTime = (DateTime.Now.Ticks / TimeSpan.TicksPerMillisecond); long duration = 5 * 1000; while (((DateTime.Now.Ticks / TimeSpan.TicksPerMillisecond) - startTime) < duration) { System.Threading.Thread.Sleep(new System.TimeSpan((System.Int64) 10000 * 100)); } int delCount = 0; int addCount = 0; for (int x = 0; x < indexThreads.Length; x++) { indexThreads[x].run_Renamed_Field = false; Assert.IsTrue(indexThreads[x].ex == null); addCount += indexThreads[x].addCount; delCount += indexThreads[x].delCount; } for (int x = 0; x < indexThreads.Length; x++) { indexThreads[x].Join(); } //System.out.println("addCount:"+addCount); //System.out.println("delCount:"+delCount); writer.Close(); mainDir.Close(); }
public virtual void TestMissingTerms() { System.String fieldName = "field1"; MockRAMDirectory rd = new MockRAMDirectory(); IndexWriter w = new IndexWriter(rd, new KeywordAnalyzer(), MaxFieldLength.UNLIMITED); for (int i = 0; i < 100; i++) { Document doc = new Document(); int term = i * 10; //terms are units of 10; doc.Add(new Field(fieldName, "" + term, Field.Store.YES, Field.Index.NOT_ANALYZED)); w.AddDocument(doc); } w.Close(); IndexReader reader = IndexReader.Open(rd, true); IndexSearcher searcher = new IndexSearcher(reader); int numDocs = reader.NumDocs(); ScoreDoc[] results; MatchAllDocsQuery q = new MatchAllDocsQuery(); System.Collections.ArrayList terms = new System.Collections.ArrayList(); terms.Add("5"); results = searcher.Search(q, new FieldCacheTermsFilter(fieldName, (System.String[]) terms.ToArray(typeof(System.String))), numDocs).ScoreDocs; Assert.AreEqual(0, results.Length, "Must match nothing"); terms = new System.Collections.ArrayList(); terms.Add("10"); results = searcher.Search(q, new FieldCacheTermsFilter(fieldName, (System.String[])terms.ToArray(typeof(System.String))), numDocs).ScoreDocs; Assert.AreEqual(1, results.Length, "Must match 1"); terms = new System.Collections.ArrayList(); terms.Add("10"); terms.Add("20"); results = searcher.Search(q, new FieldCacheTermsFilter(fieldName, (System.String[]) terms.ToArray(typeof(System.String))), numDocs).ScoreDocs; Assert.AreEqual(2, results.Length, "Must match 2"); reader.Close(); rd.Close(); }
public virtual void TestMaxBufferedDeletes() { for (int pass = 0; pass < 2; pass++) { bool autoCommit = (0 == pass); Directory dir = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true); writer.SetMaxBufferedDeleteTerms(1); writer.DeleteDocuments(new Term("foobar", "1")); writer.DeleteDocuments(new Term("foobar", "1")); writer.DeleteDocuments(new Term("foobar", "1")); Assert.AreEqual(3, writer.GetFlushDeletesCount()); writer.Close(); dir.Close(); } }
public virtual void TestErrorAfterApplyDeletes() { MockRAMDirectory.Failure failure = new AnonymousClassFailure(this); // create a couple of files System.String[] keywords = new System.String[] { "1", "2" }; System.String[] unindexed = new System.String[] { "Netherlands", "Italy" }; System.String[] unstored = new System.String[] { "Amsterdam has lots of bridges", "Venice has lots of canals" }; System.String[] text = new System.String[] { "Amsterdam", "Venice" }; MockRAMDirectory dir = new MockRAMDirectory(); IndexWriter modifier = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED); modifier.UseCompoundFile = true; modifier.SetMaxBufferedDeleteTerms(2); dir.FailOn(failure.Reset()); for (int i = 0; i < keywords.Length; i++) { Document doc = new Document(); doc.Add(new Field("id", keywords[i], Field.Store.YES, Field.Index.NOT_ANALYZED)); doc.Add(new Field("country", unindexed[i], Field.Store.YES, Field.Index.NO)); doc.Add(new Field("contents", unstored[i], Field.Store.NO, Field.Index.ANALYZED)); doc.Add(new Field("city", text[i], Field.Store.YES, Field.Index.ANALYZED)); modifier.AddDocument(doc); } // flush (and commit if ac) modifier.Optimize(); modifier.Commit(); // one of the two files hits Term term = new Term("city", "Amsterdam"); int hitCount = GetHitCount(dir, term); Assert.AreEqual(1, hitCount); // open the writer again (closed above) // delete the doc // max buf del terms is two, so this is buffered modifier.DeleteDocuments(term); // add a doc (needed for the !ac case; see below) // doc remains buffered Document doc2 = new Document(); modifier.AddDocument(doc2); // commit the changes, the buffered deletes, and the new doc // The failure object will fail on the first write after the del // file gets created when processing the buffered delete // in the ac case, this will be when writing the new segments // files so we really don't need the new doc, but it's harmless // in the !ac case, a new segments file won't be created but in // this case, creation of the cfs file happens next so we need // the doc (to test that it's okay that we don't lose deletes if // failing while creating the cfs file) bool failed = false; try { modifier.Commit(); } catch (System.IO.IOException ioe) { failed = true; } Assert.IsTrue(failed); // The commit above failed, so we need to retry it (which will // succeed, because the failure is a one-shot) modifier.Commit(); hitCount = GetHitCount(dir, term); // Make sure the delete was successfully flushed: Assert.AreEqual(0, hitCount); modifier.Close(); dir.Close(); }
public virtual void TestNonRAMDelete() { for (int pass = 0; pass < 2; pass++) { bool autoCommit = (0 == pass); Directory dir = new MockRAMDirectory(); IndexWriter modifier = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true); modifier.SetMaxBufferedDocs(2); modifier.SetMaxBufferedDeleteTerms(2); int id = 0; int value_Renamed = 100; for (int i = 0; i < 7; i++) { AddDoc(modifier, ++id, value_Renamed); } modifier.Commit(); Assert.AreEqual(0, modifier.GetNumBufferedDocuments()); Assert.IsTrue(0 < modifier.GetSegmentCount()); modifier.Commit(); IndexReader reader = IndexReader.Open(dir); Assert.AreEqual(7, reader.NumDocs()); reader.Close(); modifier.DeleteDocuments(new Term("value", System.Convert.ToString(value_Renamed))); modifier.Commit(); reader = IndexReader.Open(dir); Assert.AreEqual(0, reader.NumDocs()); reader.Close(); modifier.Close(); dir.Close(); } }
public virtual void TestNullOrSubScorer() { Directory dir = new MockRAMDirectory(); IndexWriter w = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED); Document doc = new Document(); doc.Add(new Field("field", "a b c d", Field.Store.NO, Field.Index.ANALYZED)); w.AddDocument(doc); IndexReader r = w.GetReader(); IndexSearcher s = new IndexSearcher(r); BooleanQuery q = new BooleanQuery(); q.Add(new TermQuery(new Term("field", "a")), BooleanClause.Occur.SHOULD); // PhraseQuery w/ no terms added returns a null scorer PhraseQuery pq = new PhraseQuery(); q.Add(pq, BooleanClause.Occur.SHOULD); Assert.AreEqual(1, s.Search(q, 10).TotalHits); // A required clause which returns null scorer should return null scorer to // IndexSearcher. q = new BooleanQuery(); pq = new PhraseQuery(); q.Add(new TermQuery(new Term("field", "a")), BooleanClause.Occur.SHOULD); q.Add(pq, BooleanClause.Occur.MUST); Assert.AreEqual(0, s.Search(q, 10).TotalHits); DisjunctionMaxQuery dmq = new DisjunctionMaxQuery(1.0f); dmq.Add(new TermQuery(new Term("field", "a"))); dmq.Add(pq); Assert.AreEqual(1, s.Search(dmq, 10).TotalHits); r.Close(); w.Close(); dir.Close(); }
public virtual void TestReuseAcrossWriters() { Directory dir = new MockRAMDirectory(); SnapshotDeletionPolicy dp = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy()); IndexWriter writer = new IndexWriter(dir, true, new StandardAnalyzer(), dp); // Force frequent commits writer.SetMaxBufferedDocs(2); Document doc = new Document(); doc.Add(new Field("content", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); for (int i = 0; i < 7; i++) writer.AddDocument(doc); IndexCommit cp = (IndexCommit) dp.Snapshot(); CopyFiles(dir, cp); writer.Close(); CopyFiles(dir, cp); writer = new IndexWriter(dir, true, new StandardAnalyzer(), dp); CopyFiles(dir, cp); for (int i = 0; i < 7; i++) writer.AddDocument(doc); CopyFiles(dir, cp); writer.Close(); CopyFiles(dir, cp); dp.Release(); writer = new IndexWriter(dir, true, new StandardAnalyzer(), dp); writer.Close(); try { CopyFiles(dir, cp); Assert.Fail("did not hit expected IOException"); } catch (System.IO.IOException ioe) { // expected } dir.Close(); }
public virtual void TestFilterIndexReader_Renamed() { RAMDirectory directory = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(directory, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); Document d1 = new Document(); d1.Add(new Field("default", "one two", Field.Store.YES, Field.Index.ANALYZED)); writer.AddDocument(d1); Document d2 = new Document(); d2.Add(new Field("default", "one three", Field.Store.YES, Field.Index.ANALYZED)); writer.AddDocument(d2); Document d3 = new Document(); d3.Add(new Field("default", "two four", Field.Store.YES, Field.Index.ANALYZED)); writer.AddDocument(d3); writer.Close(); IndexReader reader = new TestReader(IndexReader.Open(directory)); Assert.IsTrue(reader.IsOptimized()); TermEnum terms = reader.Terms(); while (terms.Next()) { Assert.IsTrue(terms.Term().Text().IndexOf('e') != - 1); } terms.Close(); TermPositions positions = reader.TermPositions(new Term("default", "one")); while (positions.Next()) { Assert.IsTrue((positions.Doc() % 2) == 1); } int NUM_DOCS = 3; TermDocs td = reader.TermDocs(null); for (int i = 0; i < NUM_DOCS; i++) { Assert.IsTrue(td.Next()); Assert.AreEqual(i, td.Doc()); Assert.AreEqual(1, td.Freq()); } td.Close(); reader.Close(); directory.Close(); }
public virtual void TestReuseAcrossWriters() { Directory dir = new MockRAMDirectory(); SnapshotDeletionPolicy dp = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy()); IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(Util.Version.LUCENE_CURRENT), dp, IndexWriter.MaxFieldLength.UNLIMITED); // Force frequent flushes writer.SetMaxBufferedDocs(2); Document doc = new Document(); doc.Add(new Field("content", "aaa", Field.Store.YES, Field.Index.ANALYZED, Field.TermVector.WITH_POSITIONS_OFFSETS)); for (int i = 0; i < 7; i++) { writer.AddDocument(doc); if (i % 2 == 0) { writer.Commit(); } } IndexCommit cp = dp.Snapshot(); CopyFiles(dir, cp); writer.Close(); CopyFiles(dir, cp); writer = new IndexWriter(dir, new StandardAnalyzer(Util.Version.LUCENE_CURRENT), dp, IndexWriter.MaxFieldLength.UNLIMITED); CopyFiles(dir, cp); for (int i = 0; i < 7; i++) { writer.AddDocument(doc); if (i % 2 == 0) { writer.Commit(); } } CopyFiles(dir, cp); writer.Close(); CopyFiles(dir, cp); dp.Release(); writer = new IndexWriter(dir, new StandardAnalyzer(Util.Version.LUCENE_CURRENT), dp, IndexWriter.MaxFieldLength.UNLIMITED); writer.Close(); Assert.Throws<System.IO.FileNotFoundException>(() => CopyFiles(dir, cp), "did not hit expected IOException"); dir.Close(); }
/// <summary> Make sure if modifier tries to commit but hits disk full that modifier /// remains consistent and usable. Similar to TestIndexReader.testDiskFull(). /// </summary> private void TestOperationsOnDiskFull(bool updates) { bool debug = false; Term searchTerm = new Term("content", "aaa"); int START_COUNT = 157; int END_COUNT = 144; for (int pass = 0; pass < 2; pass++) { bool autoCommit = (0 == pass); // First build up a starting index: MockRAMDirectory startDir = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(startDir, autoCommit, new WhitespaceAnalyzer(), true); for (int i = 0; i < 157; i++) { Document d = new Document(); d.Add(new Field("id", System.Convert.ToString(i), Field.Store.YES, Field.Index.NOT_ANALYZED)); d.Add(new Field("content", "aaa " + i, Field.Store.NO, Field.Index.ANALYZED)); writer.AddDocument(d); } writer.Close(); long diskUsage = startDir.SizeInBytes(); long diskFree = diskUsage + 10; System.IO.IOException err = null; bool done = false; // Iterate w/ ever increasing free disk space: while (!done) { MockRAMDirectory dir = new MockRAMDirectory(startDir); dir.SetPreventDoubleWrite(false); IndexWriter modifier = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer()); modifier.SetMaxBufferedDocs(1000); // use flush or close modifier.SetMaxBufferedDeleteTerms(1000); // use flush or close // For each disk size, first try to commit against // dir that will hit random IOExceptions & disk // full; after, give it infinite disk space & turn // off random IOExceptions & retry w/ same reader: bool success = false; for (int x = 0; x < 2; x++) { double rate = 0.1; double diskRatio = ((double) diskFree) / diskUsage; long thisDiskFree; System.String testName; if (0 == x) { thisDiskFree = diskFree; if (diskRatio >= 2.0) { rate /= 2; } if (diskRatio >= 4.0) { rate /= 2; } if (diskRatio >= 6.0) { rate = 0.0; } if (debug) { System.Console.Out.WriteLine("\ncycle: " + diskFree + " bytes"); } testName = "disk full during reader.close() @ " + thisDiskFree + " bytes"; } else { thisDiskFree = 0; rate = 0.0; if (debug) { System.Console.Out.WriteLine("\ncycle: same writer: unlimited disk space"); } testName = "reader re-use after disk full"; } dir.SetMaxSizeInBytes(thisDiskFree); dir.SetRandomIOExceptionRate(rate, diskFree); try { if (0 == x) { int docId = 12; for (int i = 0; i < 13; i++) { if (updates) { Document d = new Document(); d.Add(new Field("id", System.Convert.ToString(i), Field.Store.YES, Field.Index.NOT_ANALYZED)); d.Add(new Field("content", "bbb " + i, Field.Store.NO, Field.Index.ANALYZED)); modifier.UpdateDocument(new Term("id", System.Convert.ToString(docId)), d); } else { // deletes modifier.DeleteDocuments(new Term("id", System.Convert.ToString(docId))); // modifier.setNorm(docId, "contents", (float)2.0); } docId += 12; } } modifier.Close(); success = true; if (0 == x) { done = true; } } catch (System.IO.IOException e) { if (debug) { System.Console.Out.WriteLine(" hit IOException: " + e); System.Console.Out.WriteLine(e.StackTrace); } err = e; if (1 == x) { System.Console.Error.WriteLine(e.StackTrace); Assert.Fail(testName + " hit IOException after disk space was freed up"); } } // If the close() succeeded, make sure there are // no unreferenced files. if (success) { Lucene.Net.Util._TestUtil.CheckIndex(dir); TestIndexWriter.AssertNoUnreferencedFiles(dir, "after writer.close"); } // Finally, verify index is not corrupt, and, if // we succeeded, we see all docs changed, and if // we failed, we see either all docs or no docs // changed (transactional semantics): IndexReader newReader = null; try { newReader = IndexReader.Open(dir); } catch (System.IO.IOException e) { System.Console.Error.WriteLine(e.StackTrace); Assert.Fail(testName + ":exception when creating IndexReader after disk full during close: " + e); } IndexSearcher searcher = new IndexSearcher(newReader); ScoreDoc[] hits = null; try { hits = searcher.Search(new TermQuery(searchTerm), null, 1000).ScoreDocs; } catch (System.IO.IOException e) { System.Console.Error.WriteLine(e.StackTrace); Assert.Fail(testName + ": exception when searching: " + e); } int result2 = hits.Length; if (success) { if (x == 0 && result2 != END_COUNT) { Assert.Fail(testName + ": method did not throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + END_COUNT); } else if (x == 1 && result2 != START_COUNT && result2 != END_COUNT) { // It's possible that the first exception was // "recoverable" wrt pending deletes, in which // case the pending deletes are retained and // then re-flushing (with plenty of disk // space) will succeed in flushing the // deletes: Assert.Fail(testName + ": method did not throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + START_COUNT + " or " + END_COUNT); } } else { // On hitting exception we still may have added // all docs: if (result2 != START_COUNT && result2 != END_COUNT) { System.Console.Error.WriteLine(err.StackTrace); Assert.Fail(testName + ": method did throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + START_COUNT + " or " + END_COUNT); } } searcher.Close(); newReader.Close(); if (result2 == END_COUNT) { break; } } dir.Close(); // Try again with 10 more bytes of free space: diskFree += 10; } } }
public virtual void TestDeleteAll() { for (int pass = 0; pass < 2; pass++) { bool autoCommit = (0 == pass); Directory dir = new MockRAMDirectory(); IndexWriter modifier = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true); modifier.SetMaxBufferedDocs(2); modifier.SetMaxBufferedDeleteTerms(2); int id = 0; int value_Renamed = 100; for (int i = 0; i < 7; i++) { AddDoc(modifier, ++id, value_Renamed); } modifier.Commit(); IndexReader reader = IndexReader.Open(dir); Assert.AreEqual(7, reader.NumDocs()); reader.Close(); // Add 1 doc (so we will have something buffered) AddDoc(modifier, 99, value_Renamed); // Delete all modifier.DeleteAll(); // Delete all shouldn't be on disk yet reader = IndexReader.Open(dir); Assert.AreEqual(7, reader.NumDocs()); reader.Close(); // Add a doc and update a doc (after the deleteAll, before the commit) AddDoc(modifier, 101, value_Renamed); UpdateDoc(modifier, 102, value_Renamed); // commit the delete all modifier.Commit(); // Validate there are no docs left reader = IndexReader.Open(dir); Assert.AreEqual(2, reader.NumDocs()); reader.Close(); modifier.Close(); dir.Close(); } }
public virtual void TestRAMDeletes() { for (int pass = 0; pass < 2; pass++) { for (int t = 0; t < 2; t++) { bool autoCommit = (0 == pass); Directory dir = new MockRAMDirectory(); IndexWriter modifier = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true); modifier.SetMaxBufferedDocs(4); modifier.SetMaxBufferedDeleteTerms(4); int id = 0; int value_Renamed = 100; AddDoc(modifier, ++id, value_Renamed); if (0 == t) modifier.DeleteDocuments(new Term("value", System.Convert.ToString(value_Renamed))); else modifier.DeleteDocuments(new TermQuery(new Term("value", System.Convert.ToString(value_Renamed)))); AddDoc(modifier, ++id, value_Renamed); if (0 == t) { modifier.DeleteDocuments(new Term("value", System.Convert.ToString(value_Renamed))); Assert.AreEqual(2, modifier.GetNumBufferedDeleteTerms()); Assert.AreEqual(1, modifier.GetBufferedDeleteTermsSize()); } else modifier.DeleteDocuments(new TermQuery(new Term("value", System.Convert.ToString(value_Renamed)))); AddDoc(modifier, ++id, value_Renamed); Assert.AreEqual(0, modifier.GetSegmentCount()); modifier.Flush(); modifier.Commit(); IndexReader reader = IndexReader.Open(dir); Assert.AreEqual(1, reader.NumDocs()); int hitCount = GetHitCount(dir, new Term("id", System.Convert.ToString(id))); Assert.AreEqual(1, hitCount); reader.Close(); modifier.Close(); dir.Close(); } } }
public virtual void TestCloseStoredFields() { Directory dir = new MockRAMDirectory(); IndexWriter w = new IndexWriter(dir, new SimpleAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED); w.UseCompoundFile = false; Document doc = new Document(); doc.Add(new Field("field", "yes it's stored", Field.Store.YES, Field.Index.ANALYZED)); w.AddDocument(doc); w.Close(); IndexReader r1 = IndexReader.Open(dir, false); IndexReader r2 = r1.Clone(false); r1.Close(); r2.Close(); dir.Close(); }
public virtual void TestEmptyIndex() { // creating two directories for indices Directory indexStoreA = new MockRAMDirectory(); Directory indexStoreB = new MockRAMDirectory(); // creating a document to store Document lDoc = new Document(); lDoc.Add(new Field("fulltext", "Once upon a time.....", Field.Store.YES, Field.Index.ANALYZED)); lDoc.Add(new Field("id", "doc1", Field.Store.YES, Field.Index.NOT_ANALYZED)); lDoc.Add(new Field("handle", "1", Field.Store.YES, Field.Index.NOT_ANALYZED)); // creating a document to store Document lDoc2 = new Document(); lDoc2.Add(new Field("fulltext", "in a galaxy far far away.....", Field.Store.YES, Field.Index.ANALYZED)); lDoc2.Add(new Field("id", "doc2", Field.Store.YES, Field.Index.NOT_ANALYZED)); lDoc2.Add(new Field("handle", "1", Field.Store.YES, Field.Index.NOT_ANALYZED)); // creating a document to store Document lDoc3 = new Document(); lDoc3.Add(new Field("fulltext", "a bizarre bug manifested itself....", Field.Store.YES, Field.Index.ANALYZED)); lDoc3.Add(new Field("id", "doc3", Field.Store.YES, Field.Index.NOT_ANALYZED)); lDoc3.Add(new Field("handle", "1", Field.Store.YES, Field.Index.NOT_ANALYZED)); // creating an index writer for the first index IndexWriter writerA = new IndexWriter(indexStoreA, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); // creating an index writer for the second index, but writing nothing IndexWriter writerB = new IndexWriter(indexStoreB, new StandardAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); //-------------------------------------------------------------------- // scenario 1 //-------------------------------------------------------------------- // writing the documents to the first index writerA.AddDocument(lDoc); writerA.AddDocument(lDoc2); writerA.AddDocument(lDoc3); writerA.Optimize(); writerA.Close(); // closing the second index writerB.Close(); // creating the query QueryParser parser = new QueryParser("fulltext", new StandardAnalyzer()); Query query = parser.Parse("handle:1"); // building the searchables Searcher[] searchers = new Searcher[2]; // VITAL STEP:adding the searcher for the empty index first, before the searcher for the populated index searchers[0] = new IndexSearcher(indexStoreB); searchers[1] = new IndexSearcher(indexStoreA); // creating the multiSearcher Searcher mSearcher = GetMultiSearcherInstance(searchers); // performing the search ScoreDoc[] hits = mSearcher.Search(query, null, 1000).ScoreDocs; Assert.AreEqual(3, hits.Length); // iterating over the hit documents for (int i = 0; i < hits.Length; i++) { mSearcher.Doc(hits[i].doc); } mSearcher.Close(); //-------------------------------------------------------------------- // scenario 2 //-------------------------------------------------------------------- // adding one document to the empty index writerB = new IndexWriter(indexStoreB, new StandardAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED); writerB.AddDocument(lDoc); writerB.Optimize(); writerB.Close(); // building the searchables Searcher[] searchers2 = new Searcher[2]; // VITAL STEP:adding the searcher for the empty index first, before the searcher for the populated index searchers2[0] = new IndexSearcher(indexStoreB); searchers2[1] = new IndexSearcher(indexStoreA); // creating the mulitSearcher MultiSearcher mSearcher2 = GetMultiSearcherInstance(searchers2); // performing the same search ScoreDoc[] hits2 = mSearcher2.Search(query, null, 1000).ScoreDocs; Assert.AreEqual(4, hits2.Length); // iterating over the hit documents for (int i = 0; i < hits2.Length; i++) { // no exception should happen at this point mSearcher2.Doc(hits2[i].doc); } // test the subSearcher() method: Query subSearcherQuery = parser.Parse("id:doc1"); hits2 = mSearcher2.Search(subSearcherQuery, null, 1000).ScoreDocs; Assert.AreEqual(2, hits2.Length); Assert.AreEqual(0, mSearcher2.SubSearcher(hits2[0].doc)); // hit from searchers2[0] Assert.AreEqual(1, mSearcher2.SubSearcher(hits2[1].doc)); // hit from searchers2[1] subSearcherQuery = parser.Parse("id:doc2"); hits2 = mSearcher2.Search(subSearcherQuery, null, 1000).ScoreDocs; Assert.AreEqual(1, hits2.Length); Assert.AreEqual(1, mSearcher2.SubSearcher(hits2[0].doc)); // hit from searchers2[1] mSearcher2.Close(); //-------------------------------------------------------------------- // scenario 3 //-------------------------------------------------------------------- // deleting the document just added, this will cause a different exception to take place Term term = new Term("id", "doc1"); IndexReader readerB = IndexReader.Open(indexStoreB); readerB.DeleteDocuments(term); readerB.Close(); // optimizing the index with the writer writerB = new IndexWriter(indexStoreB, new StandardAnalyzer(), false, IndexWriter.MaxFieldLength.LIMITED); writerB.Optimize(); writerB.Close(); // building the searchables Searcher[] searchers3 = new Searcher[2]; searchers3[0] = new IndexSearcher(indexStoreB); searchers3[1] = new IndexSearcher(indexStoreA); // creating the mulitSearcher Searcher mSearcher3 = GetMultiSearcherInstance(searchers3); // performing the same search ScoreDoc[] hits3 = mSearcher3.Search(query, null, 1000).ScoreDocs; Assert.AreEqual(3, hits3.Length); // iterating over the hit documents for (int i = 0; i < hits3.Length; i++) { mSearcher3.Doc(hits3[i].doc); } mSearcher3.Close(); indexStoreA.Close(); indexStoreB.Close(); }
public virtual void TestBatchDeletes() { for (int pass = 0; pass < 2; pass++) { bool autoCommit = (0 == pass); Directory dir = new MockRAMDirectory(); IndexWriter modifier = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true); modifier.SetMaxBufferedDocs(2); modifier.SetMaxBufferedDeleteTerms(2); int id = 0; int value_Renamed = 100; for (int i = 0; i < 7; i++) { AddDoc(modifier, ++id, value_Renamed); } modifier.Commit(); IndexReader reader = IndexReader.Open(dir); Assert.AreEqual(7, reader.NumDocs()); reader.Close(); id = 0; modifier.DeleteDocuments(new Term("id", System.Convert.ToString(++id))); modifier.DeleteDocuments(new Term("id", System.Convert.ToString(++id))); modifier.Commit(); reader = IndexReader.Open(dir); Assert.AreEqual(5, reader.NumDocs()); reader.Close(); Term[] terms = new Term[3]; for (int i = 0; i < terms.Length; i++) { terms[i] = new Term("id", System.Convert.ToString(++id)); } modifier.DeleteDocuments(terms); modifier.Commit(); reader = IndexReader.Open(dir); Assert.AreEqual(2, reader.NumDocs()); reader.Close(); modifier.Close(); dir.Close(); } }
public virtual void TestCloneWriteToOrig() { Directory dir1 = new MockRAMDirectory(); TestIndexReaderReopen.CreateIndex(dir1, true); IndexReader r1 = IndexReader.Open(dir1, false); IndexReader r2 = r1.Clone(false); Assert.IsTrue(DeleteWorked(1, r1), "deleting from the original should have worked"); r1.Close(); r2.Close(); dir1.Close(); }
public virtual void TestDeleteAllNRT() { Directory dir = new MockRAMDirectory(); IndexWriter modifier = new IndexWriter(dir, false, new WhitespaceAnalyzer(), true); modifier.SetMaxBufferedDocs(2); modifier.SetMaxBufferedDeleteTerms(2); int id = 0; int value_Renamed = 100; for (int i = 0; i < 7; i++) { AddDoc(modifier, ++id, value_Renamed); } modifier.Commit(); IndexReader reader = modifier.GetReader(); Assert.AreEqual(7, reader.NumDocs()); reader.Close(); AddDoc(modifier, ++id, value_Renamed); AddDoc(modifier, ++id, value_Renamed); // Delete all modifier.DeleteAll(); reader = modifier.GetReader(); Assert.AreEqual(0, reader.NumDocs()); reader.Close(); // Roll it back modifier.Rollback(); modifier.Close(); // Validate that the docs are still there reader = IndexReader.Open(dir); Assert.AreEqual(7, reader.NumDocs()); reader.Close(); dir.Close(); }
public virtual void TestDeleteMerging() { RAMDirectory directory = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(directory, true, ANALYZER, true); ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler(); writer.SetMergeScheduler(cms); LogDocMergePolicy mp = new LogDocMergePolicy(writer); writer.SetMergePolicy(mp); // Force degenerate merging so we can get a mix of // merging of segments with and without deletes at the // start: mp.SetMinMergeDocs(1000); Document doc = new Document(); Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED); doc.Add(idField); for (int i = 0; i < 10; i++) { for (int j = 0; j < 100; j++) { idField.SetValue(System.Convert.ToString(i * 100 + j)); writer.AddDocument(doc); } int delID = i; while (delID < 100 * (1 + i)) { writer.DeleteDocuments(new Term("id", "" + delID)); delID += 10; } writer.Flush(); } writer.Close(); IndexReader reader = IndexReader.Open(directory); // Verify that we did not lose any deletes... Assert.AreEqual(450, reader.NumDocs()); reader.Close(); directory.Close(); }
public virtual void TestErrorAfterApplyDeletes() { MockRAMDirectory.Failure failure = new AnonymousClassFailure(this); // create a couple of files System.String[] keywords = new System.String[]{"1", "2"}; System.String[] unindexed = new System.String[]{"Netherlands", "Italy"}; System.String[] unstored = new System.String[]{"Amsterdam has lots of bridges", "Venice has lots of canals"}; System.String[] text = new System.String[]{"Amsterdam", "Venice"}; for (int pass = 0; pass < 2; pass++) { bool autoCommit = (0 == pass); MockRAMDirectory dir = new MockRAMDirectory(); IndexWriter modifier = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true); modifier.SetUseCompoundFile(true); modifier.SetMaxBufferedDeleteTerms(2); dir.FailOn(failure.Reset()); for (int i = 0; i < keywords.Length; i++) { Document doc = new Document(); doc.Add(new Field("id", keywords[i], Field.Store.YES, Field.Index.NOT_ANALYZED)); doc.Add(new Field("country", unindexed[i], Field.Store.YES, Field.Index.NO)); doc.Add(new Field("contents", unstored[i], Field.Store.NO, Field.Index.ANALYZED)); doc.Add(new Field("city", text[i], Field.Store.YES, Field.Index.ANALYZED)); modifier.AddDocument(doc); } // flush (and commit if ac) modifier.Optimize(); modifier.Commit(); // one of the two files hits Term term = new Term("city", "Amsterdam"); int hitCount = GetHitCount(dir, term); Assert.AreEqual(1, hitCount); // open the writer again (closed above) // delete the doc // max buf del terms is two, so this is buffered modifier.DeleteDocuments(term); // add a doc (needed for the !ac case; see below) // doc remains buffered Document doc2 = new Document(); modifier.AddDocument(doc2); // commit the changes, the buffered deletes, and the new doc // The failure object will fail on the first write after the del // file gets created when processing the buffered delete // in the ac case, this will be when writing the new segments // files so we really don't need the new doc, but it's harmless // in the !ac case, a new segments file won't be created but in // this case, creation of the cfs file happens next so we need // the doc (to test that it's okay that we don't lose deletes if // failing while creating the cfs file) bool failed = false; try { modifier.Commit(); } catch (System.IO.IOException ioe) { failed = true; } Assert.IsTrue(failed); // The commit above failed, so we need to retry it (which will // succeed, because the failure is a one-shot) modifier.Commit(); hitCount = GetHitCount(dir, term); // Make sure the delete was successfully flushed: Assert.AreEqual(0, hitCount); modifier.Close(); dir.Close(); } }
public virtual void TestNoExtraFiles() { RAMDirectory directory = new MockRAMDirectory(); for (int pass = 0; pass < 2; pass++) { bool autoCommit = pass == 0; IndexWriter writer = new IndexWriter(directory, autoCommit, ANALYZER, true); for (int iter = 0; iter < 7; iter++) { ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler(); writer.SetMergeScheduler(cms); writer.SetMaxBufferedDocs(2); for (int j = 0; j < 21; j++) { Document doc = new Document(); doc.Add(new Field("content", "a b c", Field.Store.NO, Field.Index.ANALYZED)); writer.AddDocument(doc); } writer.Close(); TestIndexWriter.AssertNoUnreferencedFiles(directory, "testNoExtraFiles autoCommit=" + autoCommit); // Reopen writer = new IndexWriter(directory, autoCommit, ANALYZER, false); } writer.Close(); } directory.Close(); }
public virtual void TestSnapshotDeletionPolicy_Renamed() { System.IO.DirectoryInfo dir = _TestUtil.GetTempDir(INDEX_PATH); try { Directory fsDir = FSDirectory.Open(dir); RunTest(fsDir); fsDir.Close(); } finally { _TestUtil.RmDir(dir); } MockRAMDirectory dir2 = new MockRAMDirectory(); RunTest(dir2); dir2.Close(); }
public virtual void TestNoWaitClose() { RAMDirectory directory = new MockRAMDirectory(); Document doc = new Document(); Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED); doc.Add(idField); for (int pass = 0; pass < 2; pass++) { bool autoCommit = pass == 0; IndexWriter writer = new IndexWriter(directory, autoCommit, ANALYZER, true); for (int iter = 0; iter < 10; iter++) { ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler(); writer.SetMergeScheduler(cms); writer.SetMaxBufferedDocs(2); writer.SetMergeFactor(100); for (int j = 0; j < 201; j++) { idField.SetValue(System.Convert.ToString(iter * 201 + j)); writer.AddDocument(doc); } int delID = iter * 201; for (int j = 0; j < 20; j++) { writer.DeleteDocuments(new Term("id", System.Convert.ToString(delID))); delID += 5; } // Force a bunch of merge threads to kick off so we // stress out aborting them on close: writer.SetMergeFactor(3); writer.AddDocument(doc); writer.Flush(); writer.Close(false); IndexReader reader = IndexReader.Open(directory); Assert.AreEqual((1 + iter) * 182, reader.NumDocs()); reader.Close(); // Reopen writer = new IndexWriter(directory, autoCommit, ANALYZER, false); } writer.Close(); } directory.Close(); }
public virtual void TestSubclassConcurrentMergeScheduler() { MockRAMDirectory dir = new MockRAMDirectory(); dir.FailOn(new FailOnlyOnMerge()); Document doc = new Document(); Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED); doc.Add(idField); IndexWriter writer = new IndexWriter(dir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); MyMergeScheduler ms = new MyMergeScheduler(this); writer.SetMergeScheduler(ms); writer.SetMaxBufferedDocs(2); writer.SetRAMBufferSizeMB(Lucene.Net.Index.IndexWriter.DISABLE_AUTO_FLUSH); for (int i = 0; i < 20; i++) writer.AddDocument(doc); ms.Sync(); writer.Close(); Assert.IsTrue(mergeThreadCreated); Assert.IsTrue(mergeCalled); Assert.IsTrue(excCalled); dir.Close(); Assert.IsTrue(ConcurrentMergeScheduler.AnyUnhandledExceptions()); }
public virtual void TestFlushExceptions() { MockRAMDirectory directory = new MockRAMDirectory(); FailOnlyOnFlush failure = new FailOnlyOnFlush(); directory.FailOn(failure); IndexWriter writer = new IndexWriter(directory, true, ANALYZER, true); ConcurrentMergeScheduler cms = new ConcurrentMergeScheduler(); writer.SetMergeScheduler(cms); writer.SetMaxBufferedDocs(2); Document doc = new Document(); Field idField = new Field("id", "", Field.Store.YES, Field.Index.NOT_ANALYZED); doc.Add(idField); for (int i = 0; i < 10; i++) { for (int j = 0; j < 20; j++) { idField.SetValue(System.Convert.ToString(i * 20 + j)); writer.AddDocument(doc); } writer.AddDocument(doc); failure.SetDoFail(); try { writer.Flush(); Assert.Fail("failed to hit IOException"); } catch (System.IO.IOException ioe) { failure.ClearDoFail(); } } writer.Close(); IndexReader reader = IndexReader.Open(directory); Assert.AreEqual(200, reader.NumDocs()); reader.Close(); directory.Close(); }
public virtual void TestSnapshotDeletionPolicy_Renamed() { System.IO.FileInfo dir = new System.IO.FileInfo(System.IO.Path.Combine(SupportClass.AppSettings.Get("tempDir", ""), INDEX_PATH)); try { // Sometimes past test leaves the dir _TestUtil.RmDir(dir); Directory fsDir = FSDirectory.Open(dir); RunTest(fsDir); fsDir.Close(); } finally { _TestUtil.RmDir(dir); } MockRAMDirectory dir2 = new MockRAMDirectory(); RunTest(dir2); dir2.Close(); }
public virtual void TestSimpleCase() { System.String[] keywords = new System.String[]{"1", "2"}; System.String[] unindexed = new System.String[]{"Netherlands", "Italy"}; System.String[] unstored = new System.String[]{"Amsterdam has lots of bridges", "Venice has lots of canals"}; System.String[] text = new System.String[]{"Amsterdam", "Venice"}; for (int pass = 0; pass < 2; pass++) { bool autoCommit = (0 == pass); Directory dir = new MockRAMDirectory(); IndexWriter modifier = new IndexWriter(dir, autoCommit, new WhitespaceAnalyzer(), true); modifier.SetUseCompoundFile(true); modifier.SetMaxBufferedDeleteTerms(1); for (int i = 0; i < keywords.Length; i++) { Document doc = new Document(); doc.Add(new Field("id", keywords[i], Field.Store.YES, Field.Index.NOT_ANALYZED)); doc.Add(new Field("country", unindexed[i], Field.Store.YES, Field.Index.NO)); doc.Add(new Field("contents", unstored[i], Field.Store.NO, Field.Index.ANALYZED)); doc.Add(new Field("city", text[i], Field.Store.YES, Field.Index.ANALYZED)); modifier.AddDocument(doc); } modifier.Optimize(); modifier.Commit(); Term term = new Term("city", "Amsterdam"); int hitCount = GetHitCount(dir, term); Assert.AreEqual(1, hitCount); modifier.DeleteDocuments(term); modifier.Commit(); hitCount = GetHitCount(dir, term); Assert.AreEqual(0, hitCount); modifier.Close(); dir.Close(); } }
public virtual void TestNPESpanQuery() { Directory dir = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(dir, new StandardAnalyzer(new System.Collections.Hashtable(0)), IndexWriter.MaxFieldLength.LIMITED); // Add documents AddDoc(writer, "1", "the big dogs went running to the market"); AddDoc(writer, "2", "the cat chased the mouse, then the cat ate the mouse quickly"); // Commit writer.Close(); // Get searcher IndexReader reader = IndexReader.Open(dir); IndexSearcher searcher = new IndexSearcher(reader); // Control (make sure docs indexed) Assert.AreEqual(2, HitCount(searcher, "the")); Assert.AreEqual(1, HitCount(searcher, "cat")); Assert.AreEqual(1, HitCount(searcher, "dogs")); Assert.AreEqual(0, HitCount(searcher, "rabbit")); // This throws exception (it shouldn't) Assert.AreEqual(1, searcher.Search(CreateSpan(0, true, new SpanQuery[]{CreateSpan(4, false, "chased", "cat"), CreateSpan("ate")}), 10).totalHits); reader.Close(); dir.Close(); }
/// <summary> Make sure if modifier tries to commit but hits disk full that modifier /// remains consistent and usable. Similar to TestIndexReader.testDiskFull(). /// </summary> private void TestOperationsOnDiskFull(bool updates) { bool debug = false; Term searchTerm = new Term("content", "aaa"); int START_COUNT = 157; int END_COUNT = 144; // First build up a starting index: MockRAMDirectory startDir = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(startDir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED); for (int i = 0; i < 157; i++) { Document d = new Document(); d.Add(new Field("id", System.Convert.ToString(i), Field.Store.YES, Field.Index.NOT_ANALYZED)); d.Add(new Field("content", "aaa " + i, Field.Store.NO, Field.Index.ANALYZED)); writer.AddDocument(d); } writer.Close(); long diskUsage = startDir.SizeInBytes(); long diskFree = diskUsage + 10; System.IO.IOException err = null; bool done = false; // Iterate w/ ever increasing free disk space: while (!done) { MockRAMDirectory dir = new MockRAMDirectory(startDir); dir.SetPreventDoubleWrite(false); IndexWriter modifier = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED); modifier.SetMaxBufferedDocs(1000); // use flush or close modifier.SetMaxBufferedDeleteTerms(1000); // use flush or close // For each disk size, first try to commit against // dir that will hit random IOExceptions & disk // full; after, give it infinite disk space & turn // off random IOExceptions & retry w/ same reader: bool success = false; for (int x = 0; x < 2; x++) { double rate = 0.1; double diskRatio = ((double)diskFree) / diskUsage; long thisDiskFree; System.String testName; if (0 == x) { thisDiskFree = diskFree; if (diskRatio >= 2.0) { rate /= 2; } if (diskRatio >= 4.0) { rate /= 2; } if (diskRatio >= 6.0) { rate = 0.0; } if (debug) { System.Console.Out.WriteLine("\ncycle: " + diskFree + " bytes"); } testName = "disk full during reader.close() @ " + thisDiskFree + " bytes"; } else { thisDiskFree = 0; rate = 0.0; if (debug) { System.Console.Out.WriteLine("\ncycle: same writer: unlimited disk space"); } testName = "reader re-use after disk full"; } dir.SetMaxSizeInBytes(thisDiskFree); dir.SetRandomIOExceptionRate(rate, diskFree); try { if (0 == x) { int docId = 12; for (int i = 0; i < 13; i++) { if (updates) { Document d = new Document(); d.Add(new Field("id", System.Convert.ToString(i), Field.Store.YES, Field.Index.NOT_ANALYZED)); d.Add(new Field("content", "bbb " + i, Field.Store.NO, Field.Index.ANALYZED)); modifier.UpdateDocument(new Term("id", System.Convert.ToString(docId)), d); } else { // deletes modifier.DeleteDocuments(new Term("id", System.Convert.ToString(docId))); // modifier.setNorm(docId, "contents", (float)2.0); } docId += 12; } } modifier.Close(); success = true; if (0 == x) { done = true; } } catch (System.IO.IOException e) { if (debug) { System.Console.Out.WriteLine(" hit IOException: " + e); System.Console.Out.WriteLine(e.StackTrace); } err = e; if (1 == x) { System.Console.Error.WriteLine(e.StackTrace); Assert.Fail(testName + " hit IOException after disk space was freed up"); } } // If the close() succeeded, make sure there are // no unreferenced files. if (success) { Lucene.Net.Util._TestUtil.CheckIndex(dir); TestIndexWriter.AssertNoUnreferencedFiles(dir, "after writer.close"); } // Finally, verify index is not corrupt, and, if // we succeeded, we see all docs changed, and if // we failed, we see either all docs or no docs // changed (transactional semantics): IndexReader newReader = null; try { newReader = IndexReader.Open(dir, true); } catch (System.IO.IOException e) { System.Console.Error.WriteLine(e.StackTrace); Assert.Fail(testName + ":exception when creating IndexReader after disk full during close: " + e); } IndexSearcher searcher = new IndexSearcher(newReader); ScoreDoc[] hits = null; try { hits = searcher.Search(new TermQuery(searchTerm), null, 1000).ScoreDocs; } catch (System.IO.IOException e) { System.Console.Error.WriteLine(e.StackTrace); Assert.Fail(testName + ": exception when searching: " + e); } int result2 = hits.Length; if (success) { if (x == 0 && result2 != END_COUNT) { Assert.Fail(testName + ": method did not throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + END_COUNT); } else if (x == 1 && result2 != START_COUNT && result2 != END_COUNT) { // It's possible that the first exception was // "recoverable" wrt pending deletes, in which // case the pending deletes are retained and // then re-flushing (with plenty of disk // space) will succeed in flushing the // deletes: Assert.Fail(testName + ": method did not throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + START_COUNT + " or " + END_COUNT); } } else { // On hitting exception we still may have added // all docs: if (result2 != START_COUNT && result2 != END_COUNT) { System.Console.Error.WriteLine(err.StackTrace); Assert.Fail(testName + ": method did throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + START_COUNT + " or " + END_COUNT); } } searcher.Close(); newReader.Close(); if (result2 == END_COUNT) { break; } } dir.Close(); // Try again with 10 more bytes of free space: diskFree += 10; } }