public virtual void TestForceMergeTempSpaceUsage() { MockDirectoryWrapper dir = NewMockDirectory(); IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMaxBufferedDocs(10).SetMergePolicy(NewLogMergePolicy())); if (VERBOSE) { Console.WriteLine("TEST: config1=" + writer.Config); } for (int j = 0; j < 500; j++) { TestIndexWriter.AddDocWithIndex(writer, j); } int termIndexInterval = writer.Config.TermIndexInterval; // force one extra segment w/ different doc store so // we see the doc stores get merged writer.Commit(); TestIndexWriter.AddDocWithIndex(writer, 500); writer.Dispose(); if (VERBOSE) { Console.WriteLine("TEST: start disk usage"); } long startDiskUsage = 0; string[] files = dir.ListAll(); for (int i = 0; i < files.Length; i++) { startDiskUsage += dir.FileLength(files[i]); if (VERBOSE) { Console.WriteLine(files[i] + ": " + dir.FileLength(files[i])); } } dir.ResetMaxUsedSizeInBytes(); dir.TrackDiskUsage = true; // Import to use same term index interval else a // smaller one here could increase the disk usage and // cause a false failure: writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode_e.APPEND).SetTermIndexInterval(termIndexInterval).SetMergePolicy(NewLogMergePolicy())); writer.ForceMerge(1); writer.Dispose(); long maxDiskUsage = dir.MaxUsedSizeInBytes; Assert.IsTrue(maxDiskUsage <= 4 * startDiskUsage, "forceMerge used too much temporary space: starting usage was " + startDiskUsage + " bytes; max temp usage was " + maxDiskUsage + " but should have been " + (4 * startDiskUsage) + " (= 4X starting usage)"); dir.Dispose(); }
private void RunTest(Random random, Directory dir) { // Run for ~1 seconds long stopTime = DateTime.Now.Millisecond + 1000; SnapshotDeletionPolicy dp = DeletionPolicy; IndexWriter writer = new IndexWriter(dir, (IndexWriterConfig)NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(random)).SetIndexDeletionPolicy(dp).SetMaxBufferedDocs(2)); // Verify we catch misuse: try { dp.Snapshot(); Assert.Fail("did not hit exception"); } catch (InvalidOperationException ise) { // expected } dp = (SnapshotDeletionPolicy)writer.Config.DelPolicy; writer.Commit(); ThreadClass t = new ThreadAnonymousInnerClassHelper(this, stopTime, writer); t.Start(); // While the above indexing thread is running, take many // backups: do { BackupIndex(dir, dp); Thread.Sleep(20); } while (t.IsAlive); t.Join(); // Add one more document to force writer to commit a // final segment, so deletion policy has a chance to // delete again: Document doc = new Document(); FieldType customType = new FieldType(TextField.TYPE_STORED); customType.StoreTermVectors = true; customType.StoreTermVectorPositions = true; customType.StoreTermVectorOffsets = true; doc.Add(NewField("content", "aaa", customType)); writer.AddDocument(doc); // Make sure we don't have any leftover files in the // directory: writer.Dispose(); TestIndexWriter.AssertNoUnreferencedFiles(dir, "some files were not deleted but should have been"); }
public virtual void TestCommitOnClose() { Directory dir = NewDirectory(); IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))); for (int i = 0; i < 14; i++) { TestIndexWriter.AddDoc(writer); } writer.Dispose(); Term searchTerm = new Term("content", "aaa"); DirectoryReader reader = DirectoryReader.Open(dir); IndexSearcher searcher = NewSearcher(reader); ScoreDoc[] hits = searcher.Search(new TermQuery(searchTerm), null, 1000).ScoreDocs; Assert.AreEqual(14, hits.Length, "first number of hits"); reader.Dispose(); reader = DirectoryReader.Open(dir); writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))); for (int i = 0; i < 3; i++) { for (int j = 0; j < 11; j++) { TestIndexWriter.AddDoc(writer); } IndexReader r = DirectoryReader.Open(dir); searcher = NewSearcher(r); hits = searcher.Search(new TermQuery(searchTerm), null, 1000).ScoreDocs; Assert.AreEqual(14, hits.Length, "reader incorrectly sees changes from writer"); r.Dispose(); Assert.IsTrue(reader.Current, "reader should have still been current"); } // Now, close the writer: writer.Dispose(); Assert.IsFalse(reader.Current, "reader should not be current now"); IndexReader ir = DirectoryReader.Open(dir); searcher = NewSearcher(ir); hits = searcher.Search(new TermQuery(searchTerm), null, 1000).ScoreDocs; Assert.AreEqual(47, hits.Length, "reader did not see changes after writer was closed"); ir.Dispose(); reader.Dispose(); dir.Dispose(); }
public virtual void TestForceCommit() { Directory dir = NewDirectory(); IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMaxBufferedDocs(2).SetMergePolicy(NewLogMergePolicy(5))); writer.Commit(); for (int i = 0; i < 23; i++) { TestIndexWriter.AddDoc(writer); } DirectoryReader reader = DirectoryReader.Open(dir); Assert.AreEqual(0, reader.NumDocs); writer.Commit(); DirectoryReader reader2 = DirectoryReader.OpenIfChanged(reader); Assert.IsNotNull(reader2); Assert.AreEqual(0, reader.NumDocs); Assert.AreEqual(23, reader2.NumDocs); reader.Dispose(); for (int i = 0; i < 17; i++) { TestIndexWriter.AddDoc(writer); } Assert.AreEqual(23, reader2.NumDocs); reader2.Dispose(); reader = DirectoryReader.Open(dir); Assert.AreEqual(23, reader.NumDocs); reader.Dispose(); writer.Commit(); reader = DirectoryReader.Open(dir); Assert.AreEqual(40, reader.NumDocs); reader.Dispose(); writer.Dispose(); dir.Dispose(); }
public virtual void TestCommitUserData() { Directory dir = NewDirectory(); IndexWriter w = new IndexWriter(dir, (IndexWriterConfig)NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMaxBufferedDocs(2)); for (int j = 0; j < 17; j++) { TestIndexWriter.AddDoc(w); } w.Dispose(); DirectoryReader r = DirectoryReader.Open(dir); // commit(Map) never called for this index Assert.AreEqual(0, r.IndexCommit.UserData.Count); r.Dispose(); w = new IndexWriter(dir, (IndexWriterConfig)NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMaxBufferedDocs(2)); for (int j = 0; j < 17; j++) { TestIndexWriter.AddDoc(w); } IDictionary <string, string> data = new Dictionary <string, string>(); data["label"] = "test1"; w.CommitData = data; w.Dispose(); r = DirectoryReader.Open(dir); Assert.AreEqual("test1", r.IndexCommit.UserData["label"]); r.Dispose(); w = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))); w.ForceMerge(1); w.Dispose(); dir.Dispose(); }
public CrashingFilter(TestIndexWriter enclosingInstance, System.String fieldName, TokenStream input):base(input) { InitBlock(enclosingInstance); this.fieldName = fieldName; }
public AnonymousClassThread1(IndexWriter finalWriter, Document doc, System.Collections.ArrayList failure, TestIndexWriter enclosingInstance) { InitBlock(finalWriter, doc, failure, enclosingInstance); }
private void InitBlock(int NUM_ITER, IndexWriter writer, int finalI, TestIndexWriter enclosingInstance) { this.NUM_ITER = NUM_ITER; this.writer = writer; this.finalI = finalI; this.enclosingInstance = enclosingInstance; }
public MyMergeScheduler(TestIndexWriter enclosingInstance) { InitBlock(enclosingInstance); }
public IndexerThreadInterrupt(TestIndexWriter enclosingInstance) { InitBlock(enclosingInstance); }
private void InitBlock(TestIndexWriter enclosingInstance) { this.enclosingInstance = enclosingInstance; this.termAtt = (TermAttribute)this.AddAttribute(typeof(TermAttribute)); this.posIncrAtt = (PositionIncrementAttribute)this.AddAttribute(typeof(PositionIncrementAttribute)); }
internal MyRAMDirectory(TestIndexWriter enclosingInstance) { InitBlock(enclosingInstance); lockFactory = null; myLockFactory = new SingleInstanceLockFactory(); }
public MyIndexWriter(TestIndexWriter enclosingInstance, Directory dir):base(dir, new StandardAnalyzer()) { InitBlock(enclosingInstance); myDir = dir; }
public virtual void TestCommitOnCloseDiskUsage() { // MemoryCodec, since it uses FST, is not necessarily // "additive", ie if you add up N small FSTs, then merge // them, the merged result can easily be larger than the // sum because the merged FST may use array encoding for // some arcs (which uses more space): string idFormat = TestUtil.GetPostingsFormat("id"); string contentFormat = TestUtil.GetPostingsFormat("content"); AssumeFalse("this test cannot run with Memory codec", idFormat.Equals("Memory") || contentFormat.Equals("Memory")); MockDirectoryWrapper dir = NewMockDirectory(); Analyzer analyzer; if (Random().NextBoolean()) { // no payloads analyzer = new AnalyzerAnonymousInnerClassHelper(this); } else { // fixed length payloads int length = Random().Next(200); analyzer = new AnalyzerAnonymousInnerClassHelper2(this, length); } IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, analyzer).SetMaxBufferedDocs(10).SetReaderPooling(false).SetMergePolicy(NewLogMergePolicy(10))); for (int j = 0; j < 30; j++) { TestIndexWriter.AddDocWithIndex(writer, j); } writer.Dispose(); dir.ResetMaxUsedSizeInBytes(); dir.TrackDiskUsage = true; long startDiskUsage = dir.MaxUsedSizeInBytes; writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, analyzer).SetOpenMode(OpenMode_e.APPEND).SetMaxBufferedDocs(10).SetMergeScheduler(new SerialMergeScheduler()).SetReaderPooling(false).SetMergePolicy(NewLogMergePolicy(10))); for (int j = 0; j < 1470; j++) { TestIndexWriter.AddDocWithIndex(writer, j); } long midDiskUsage = dir.MaxUsedSizeInBytes; dir.ResetMaxUsedSizeInBytes(); writer.ForceMerge(1); writer.Dispose(); DirectoryReader.Open(dir).Dispose(); long endDiskUsage = dir.MaxUsedSizeInBytes; // Ending index is 50X as large as starting index; due // to 3X disk usage normally we allow 150X max // transient usage. If something is wrong w/ deleter // and it doesn't delete intermediate segments then it // will exceed this 150X: // System.out.println("start " + startDiskUsage + "; mid " + midDiskUsage + ";end " + endDiskUsage); Assert.IsTrue(midDiskUsage < 150 * startDiskUsage, "writer used too much space while adding documents: mid=" + midDiskUsage + " start=" + startDiskUsage + " end=" + endDiskUsage + " max=" + (startDiskUsage * 150)); Assert.IsTrue(endDiskUsage < 150 * startDiskUsage, "writer used too much space after close: endDiskUsage=" + endDiskUsage + " startDiskUsage=" + startDiskUsage + " max=" + (startDiskUsage * 150)); dir.Dispose(); }
public AnonymousClassIndexWriter(System.Collections.IList thrown, TestIndexWriter enclosingInstance, MockRAMDirectory mockRAMDir, StandardAnalyzer standardAnalyzer) : base(mockRAMDir, standardAnalyzer) { InitBlock(thrown, enclosingInstance); }
/// <summary> Make sure if modifier tries to commit but hits disk full that modifier /// remains consistent and usable. Similar to TestIndexReader.testDiskFull(). /// </summary> private void TestOperationsOnDiskFull(bool updates) { bool debug = false; Term searchTerm = new Term("content", "aaa"); int START_COUNT = 157; int END_COUNT = 144; // First build up a starting index: MockRAMDirectory startDir = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(startDir, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.UNLIMITED, null); for (int i = 0; i < 157; i++) { Document d = new Document(); d.Add(new Field("id", System.Convert.ToString(i), Field.Store.YES, Field.Index.NOT_ANALYZED)); d.Add(new Field("content", "aaa " + i, Field.Store.NO, Field.Index.ANALYZED)); writer.AddDocument(d, null); } writer.Close(); long diskUsage = startDir.SizeInBytes(); long diskFree = diskUsage + 10; System.IO.IOException err = null; bool done = false; // Iterate w/ ever increasing free disk space: while (!done) { MockRAMDirectory dir = new MockRAMDirectory(startDir); dir.SetPreventDoubleWrite(false); IndexWriter modifier = new IndexWriter(dir, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED, null); modifier.SetMaxBufferedDocs(1000); // use flush or close modifier.SetMaxBufferedDeleteTerms(1000); // use flush or close // For each disk size, first try to commit against // dir that will hit random IOExceptions & disk // full; after, give it infinite disk space & turn // off random IOExceptions & retry w/ same reader: bool success = false; for (int x = 0; x < 2; x++) { double rate = 0.1; double diskRatio = ((double)diskFree) / diskUsage; long thisDiskFree; System.String testName; if (0 == x) { thisDiskFree = diskFree; if (diskRatio >= 2.0) { rate /= 2; } if (diskRatio >= 4.0) { rate /= 2; } if (diskRatio >= 6.0) { rate = 0.0; } if (debug) { System.Console.Out.WriteLine("\ncycle: " + diskFree + " bytes"); } testName = "disk full during reader.close() @ " + thisDiskFree + " bytes"; } else { thisDiskFree = 0; rate = 0.0; if (debug) { System.Console.Out.WriteLine("\ncycle: same writer: unlimited disk space"); } testName = "reader re-use after disk full"; } dir.SetMaxSizeInBytes(thisDiskFree); dir.SetRandomIOExceptionRate(rate, diskFree); try { if (0 == x) { int docId = 12; for (int i = 0; i < 13; i++) { if (updates) { Document d = new Document(); d.Add(new Field("id", System.Convert.ToString(i), Field.Store.YES, Field.Index.NOT_ANALYZED)); d.Add(new Field("content", "bbb " + i, Field.Store.NO, Field.Index.ANALYZED)); modifier.UpdateDocument(new Term("id", System.Convert.ToString(docId)), d, null); } else { // deletes modifier.DeleteDocuments(null, new Term("id", System.Convert.ToString(docId))); // modifier.setNorm(docId, "contents", (float)2.0); } docId += 12; } } modifier.Close(); success = true; if (0 == x) { done = true; } } catch (System.IO.IOException e) { if (debug) { System.Console.Out.WriteLine(" hit IOException: " + e); System.Console.Out.WriteLine(e.StackTrace); } err = e; if (1 == x) { System.Console.Error.WriteLine(e.StackTrace); Assert.Fail(testName + " hit IOException after disk space was freed up"); } } // If the close() succeeded, make sure there are // no unreferenced files. if (success) { Lucene.Net.Util._TestUtil.CheckIndex(dir); TestIndexWriter.AssertNoUnreferencedFiles(dir, "after writer.close"); } // Finally, verify index is not corrupt, and, if // we succeeded, we see all docs changed, and if // we failed, we see either all docs or no docs // changed (transactional semantics): IndexReader newReader = null; try { newReader = IndexReader.Open((Directory)dir, true, null); } catch (System.IO.IOException e) { System.Console.Error.WriteLine(e.StackTrace); Assert.Fail(testName + ":exception when creating IndexReader after disk full during close: " + e); } IndexSearcher searcher = new IndexSearcher(newReader); ScoreDoc[] hits = null; try { hits = searcher.Search(new TermQuery(searchTerm), null, 1000, null).ScoreDocs; } catch (System.IO.IOException e) { System.Console.Error.WriteLine(e.StackTrace); Assert.Fail(testName + ": exception when searching: " + e); } int result2 = hits.Length; if (success) { if (x == 0 && result2 != END_COUNT) { Assert.Fail(testName + ": method did not throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + END_COUNT); } else if (x == 1 && result2 != START_COUNT && result2 != END_COUNT) { // It's possible that the first exception was // "recoverable" wrt pending deletes, in which // case the pending deletes are retained and // then re-flushing (with plenty of disk // space) will succeed in flushing the // deletes: Assert.Fail(testName + ": method did not throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + START_COUNT + " or " + END_COUNT); } } else { // On hitting exception we still may have added // all docs: if (result2 != START_COUNT && result2 != END_COUNT) { System.Console.Error.WriteLine(err.StackTrace); Assert.Fail(testName + ": method did throw exception but hits.length for search on term 'aaa' is " + result2 + " instead of expected " + START_COUNT + " or " + END_COUNT); } } searcher.Close(); newReader.Close(); if (result2 == END_COUNT) { break; } } dir.Close(); // Try again with 10 more bytes of free space: diskFree += 10; } }
private void InitBlock(System.Collections.IList thrown, TestIndexWriter enclosingInstance) { this.thrown = thrown; this.enclosingInstance = enclosingInstance; }
public CommitAndAddIndexes3(TestIndexWriter enclosingInstance, int numCopy):base(numCopy) { InitBlock(enclosingInstance); }
private void InitBlock(IndexWriter finalWriter, Document doc, System.Collections.ArrayList failure, TestIndexWriter enclosingInstance) { this.finalWriter = finalWriter; this.doc = doc; this.failure = failure; this.enclosingInstance = enclosingInstance; }
public virtual void TestRollingUpdates_Mem() { Random random = new Random(Random().Next()); BaseDirectoryWrapper dir = NewDirectory(); LineFileDocs docs = new LineFileDocs(random, DefaultCodecSupportsDocValues()); //provider.register(new MemoryCodec()); if ((!"Lucene3x".Equals(Codec.Default.Name, StringComparison.Ordinal)) && Random().NextBoolean()) { Codec.Default = TestUtil.AlwaysPostingsFormat(new MemoryPostingsFormat(Random().nextBoolean(), random.NextFloat())); } MockAnalyzer analyzer = new MockAnalyzer(Random()); analyzer.MaxTokenLength = TestUtil.NextInt(Random(), 1, IndexWriter.MAX_TERM_LENGTH); IndexWriter w = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, analyzer)); int SIZE = AtLeast(20); int id = 0; IndexReader r = null; IndexSearcher s = null; int numUpdates = (int)(SIZE * (2 + (TEST_NIGHTLY ? 200 * Random().NextDouble() : 5 * Random().NextDouble()))); if (VERBOSE) { Console.WriteLine("TEST: numUpdates=" + numUpdates); } int updateCount = 0; // TODO: sometimes update ids not in order... for (int docIter = 0; docIter < numUpdates; docIter++) { Documents.Document doc = docs.NextDoc(); string myID = "" + id; if (id == SIZE - 1) { id = 0; } else { id++; } if (VERBOSE) { Console.WriteLine(" docIter=" + docIter + " id=" + id); } ((Field)doc.GetField("docid")).SetStringValue(myID); Term idTerm = new Term("docid", myID); bool doUpdate; if (s != null && updateCount < SIZE) { TopDocs hits = s.Search(new TermQuery(idTerm), 1); Assert.AreEqual(1, hits.TotalHits); doUpdate = !w.TryDeleteDocument(r, hits.ScoreDocs[0].Doc); if (VERBOSE) { if (doUpdate) { Console.WriteLine(" tryDeleteDocument failed"); } else { Console.WriteLine(" tryDeleteDocument succeeded"); } } } else { doUpdate = true; if (VERBOSE) { Console.WriteLine(" no searcher: doUpdate=true"); } } updateCount++; if (doUpdate) { w.UpdateDocument(idTerm, doc); } else { w.AddDocument(doc); } if (docIter >= SIZE && Random().Next(50) == 17) { if (r != null) { r.Dispose(); } bool applyDeletions = Random().NextBoolean(); if (VERBOSE) { Console.WriteLine("TEST: reopen applyDeletions=" + applyDeletions); } r = w.GetReader(applyDeletions); if (applyDeletions) { s = NewSearcher(r); } else { s = null; } Assert.IsTrue(!applyDeletions || r.NumDocs == SIZE, "applyDeletions=" + applyDeletions + " r.NumDocs=" + r.NumDocs + " vs SIZE=" + SIZE); updateCount = 0; } } if (r != null) { r.Dispose(); } w.Commit(); Assert.AreEqual(SIZE, w.NumDocs); w.Dispose(); TestIndexWriter.AssertNoUnreferencedFiles(dir, "leftover files after rolling updates"); docs.Dispose(); // LUCENE-4455: SegmentInfos infos = new SegmentInfos(); infos.Read(dir); long totalBytes = 0; foreach (SegmentCommitInfo sipc in infos.Segments) { totalBytes += sipc.GetSizeInBytes(); } long totalBytes2 = 0; foreach (string fileName in dir.ListAll()) { if (!fileName.StartsWith(IndexFileNames.SEGMENTS, StringComparison.Ordinal)) { totalBytes2 += dir.FileLength(fileName); } } Assert.AreEqual(totalBytes2, totalBytes); dir.Dispose(); }
public IndexerThread(TestIndexWriter enclosingInstance, IndexWriter writer, bool noErrors) { InitBlock(enclosingInstance); this.writer = writer; this.noErrors = noErrors; }
public virtual void TestCommitOnCloseForceMerge() { Directory dir = NewDirectory(); // Must disable throwing exc on double-write: this // test uses IW.rollback which easily results in // writing to same file more than once if (dir is MockDirectoryWrapper) { ((MockDirectoryWrapper)dir).PreventDoubleWrite = false; } IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMaxBufferedDocs(10).SetMergePolicy(NewLogMergePolicy(10))); for (int j = 0; j < 17; j++) { AddDocWithIndex(writer, j); } writer.Dispose(); writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode.APPEND)); writer.ForceMerge(1); // Open a reader before closing (commiting) the writer: DirectoryReader reader = DirectoryReader.Open(dir); // Reader should see index as multi-seg at this // point: Assert.IsTrue(reader.Leaves.Count > 1, "Reader incorrectly sees one segment"); reader.Dispose(); // Abort the writer: writer.Rollback(); TestIndexWriter.AssertNoUnreferencedFiles(dir, "aborted writer after forceMerge"); // Open a reader after aborting writer: reader = DirectoryReader.Open(dir); // Reader should still see index as multi-segment Assert.IsTrue(reader.Leaves.Count > 1, "Reader incorrectly sees one segment"); reader.Dispose(); if (VERBOSE) { Console.WriteLine("TEST: do real full merge"); } writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode.APPEND)); writer.ForceMerge(1); writer.Dispose(); if (VERBOSE) { Console.WriteLine("TEST: writer closed"); } TestIndexWriter.AssertNoUnreferencedFiles(dir, "aborted writer after forceMerge"); // Open a reader after aborting writer: reader = DirectoryReader.Open(dir); // Reader should see index as one segment Assert.AreEqual(1, reader.Leaves.Count, "Reader incorrectly sees more than one segment"); reader.Dispose(); dir.Dispose(); }
private void InitBlock(TestIndexWriter enclosingInstance) { this.enclosingInstance = enclosingInstance; }
public MockIndexWriter4(TestIndexWriter enclosingInstance, Directory dir, Analyzer a, bool create, MaxFieldLength mfl):base(dir, a, create, mfl) { InitBlock(enclosingInstance); }
public AnonymousClassAnalyzer(TestIndexWriter enclosingInstance) { InitBlock(enclosingInstance); }
public virtual void TestPrepareCommitRollback() { Directory dir = NewDirectory(); if (dir is MockDirectoryWrapper) { ((MockDirectoryWrapper)dir).PreventDoubleWrite = false; } IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMaxBufferedDocs(2).SetMergePolicy(NewLogMergePolicy(5))); writer.Commit(); for (int i = 0; i < 23; i++) { TestIndexWriter.AddDoc(writer); } DirectoryReader reader = DirectoryReader.Open(dir); Assert.AreEqual(0, reader.NumDocs); writer.PrepareCommit(); IndexReader reader2 = DirectoryReader.Open(dir); Assert.AreEqual(0, reader2.NumDocs); writer.Rollback(); IndexReader reader3 = DirectoryReader.OpenIfChanged(reader); Assert.IsNull(reader3); Assert.AreEqual(0, reader.NumDocs); Assert.AreEqual(0, reader2.NumDocs); reader.Dispose(); reader2.Dispose(); writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))); for (int i = 0; i < 17; i++) { TestIndexWriter.AddDoc(writer); } reader = DirectoryReader.Open(dir); Assert.AreEqual(0, reader.NumDocs); reader.Dispose(); writer.PrepareCommit(); reader = DirectoryReader.Open(dir); Assert.AreEqual(0, reader.NumDocs); reader.Dispose(); writer.Commit(); reader = DirectoryReader.Open(dir); Assert.AreEqual(17, reader.NumDocs); reader.Dispose(); writer.Dispose(); dir.Dispose(); }
public virtual void TestCommitOnCloseAbort() { Directory dir = NewDirectory(); IndexWriter writer = new IndexWriter(dir, (IndexWriterConfig)NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMaxBufferedDocs(10)); for (int i = 0; i < 14; i++) { AddDoc(writer); } writer.Dispose(); Term searchTerm = new Term("content", "aaa"); IndexReader reader = DirectoryReader.Open(dir); IndexSearcher searcher = NewSearcher(reader); ScoreDoc[] hits = searcher.Search(new TermQuery(searchTerm), null, 1000).ScoreDocs; Assert.AreEqual(14, hits.Length, "first number of hits"); reader.Dispose(); writer = new IndexWriter(dir, (IndexWriterConfig)NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode.APPEND).SetMaxBufferedDocs(10)); for (int j = 0; j < 17; j++) { AddDoc(writer); } // Delete all docs: writer.DeleteDocuments(searchTerm); reader = DirectoryReader.Open(dir); searcher = NewSearcher(reader); hits = searcher.Search(new TermQuery(searchTerm), null, 1000).ScoreDocs; Assert.AreEqual(14, hits.Length, "reader incorrectly sees changes from writer"); reader.Dispose(); // Now, close the writer: writer.Rollback(); TestIndexWriter.AssertNoUnreferencedFiles(dir, "unreferenced files remain after rollback()"); reader = DirectoryReader.Open(dir); searcher = NewSearcher(reader); hits = searcher.Search(new TermQuery(searchTerm), null, 1000).ScoreDocs; Assert.AreEqual(14, hits.Length, "saw changes after writer.abort"); reader.Dispose(); // Now make sure we can re-open the index, add docs, // and all is good: writer = new IndexWriter(dir, (IndexWriterConfig)NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode.APPEND).SetMaxBufferedDocs(10)); // On abort, writer in fact may write to the same // segments_N file: if (dir is MockDirectoryWrapper) { ((MockDirectoryWrapper)dir).PreventDoubleWrite = false; } for (int i = 0; i < 12; i++) { for (int j = 0; j < 17; j++) { AddDoc(writer); } IndexReader r = DirectoryReader.Open(dir); searcher = NewSearcher(r); hits = searcher.Search(new TermQuery(searchTerm), null, 1000).ScoreDocs; Assert.AreEqual(14, hits.Length, "reader incorrectly sees changes from writer"); r.Dispose(); } writer.Dispose(); IndexReader ir = DirectoryReader.Open(dir); searcher = NewSearcher(ir); hits = searcher.Search(new TermQuery(searchTerm), null, 1000).ScoreDocs; Assert.AreEqual(218, hits.Length, "didn't see changes after close"); ir.Dispose(); dir.Dispose(); }
public AnonymousClassThread(int NUM_ITER, IndexWriter writer, int finalI, TestIndexWriter enclosingInstance) { InitBlock(NUM_ITER, writer, finalI, enclosingInstance); }
public virtual void TestAddDocumentOnDiskFull() { for (int pass = 0; pass < 2; pass++) { if (VERBOSE) { Console.WriteLine("TEST: pass="******"TEST: cycle: diskFree=" + diskFree); } MockDirectoryWrapper dir = new MockDirectoryWrapper(Random(), new RAMDirectory()); dir.MaxSizeInBytes = diskFree; IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))); IMergeScheduler ms = writer.Config.MergeScheduler; if (ms is IConcurrentMergeScheduler) { // this test intentionally produces exceptions // in the threads that CMS launches; we don't // want to pollute test output with these. ((IConcurrentMergeScheduler)ms).SetSuppressExceptions(); } bool hitError = false; try { for (int i = 0; i < 200; i++) { AddDoc(writer); } if (VERBOSE) { Console.WriteLine("TEST: done adding docs; now commit"); } writer.Commit(); } catch (IOException e) { if (VERBOSE) { Console.WriteLine("TEST: exception on addDoc"); Console.WriteLine(e.StackTrace); } hitError = true; } if (hitError) { if (doAbort) { if (VERBOSE) { Console.WriteLine("TEST: now rollback"); } writer.Rollback(); } else { try { if (VERBOSE) { Console.WriteLine("TEST: now close"); } writer.Dispose(); } catch (IOException e) { if (VERBOSE) { Console.WriteLine("TEST: exception on close; retry w/ no disk space limit"); Console.WriteLine(e.StackTrace); } dir.MaxSizeInBytes = 0; writer.Dispose(); } } //TestUtil.SyncConcurrentMerges(ms); if (TestUtil.AnyFilesExceptWriteLock(dir)) { TestIndexWriter.AssertNoUnreferencedFiles(dir, "after disk full during addDocument"); // Make sure reader can open the index: DirectoryReader.Open(dir).Dispose(); } dir.Dispose(); // Now try again w/ more space: diskFree += TEST_NIGHTLY ? TestUtil.NextInt(Random(), 400, 600) : TestUtil.NextInt(Random(), 3000, 5000); } else { //TestUtil.SyncConcurrentMerges(writer); dir.MaxSizeInBytes = 0; writer.Dispose(); dir.Dispose(); break; } } } }
public AnonymousClassTokenStream(TestIndexWriter enclosingInstance) { InitBlock(enclosingInstance); }