public virtual void TestForceMergeNotNeeded() { Directory dir = NewDirectory(); AtomicBoolean mayMerge = new AtomicBoolean(true); MergeScheduler mergeScheduler = new SerialMergeSchedulerAnonymousInnerClassHelper(this, mayMerge); IndexWriter writer = new IndexWriter(dir, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetMergeScheduler(mergeScheduler).SetMergePolicy(MergePolicy())); writer.Config.MergePolicy.NoCFSRatio = Random().NextBoolean() ? 0 : 1; int numSegments = TestUtil.NextInt(Random(), 2, 20); for (int i = 0; i < numSegments; ++i) { int numDocs = TestUtil.NextInt(Random(), 1, 5); for (int j = 0; j < numDocs; ++j) { writer.AddDocument(new Document()); } writer.Reader.Dispose(); } for (int i = 5; i >= 0; --i) { int segmentCount = writer.SegmentCount; int maxNumSegments = i == 0 ? 1 : TestUtil.NextInt(Random(), 1, 10); mayMerge.Set(segmentCount > maxNumSegments); writer.ForceMerge(maxNumSegments); } writer.Dispose(); dir.Dispose(); }
public virtual void Test() { Directory d = NewDirectory(); MockAnalyzer analyzer = new MockAnalyzer(Random()); analyzer.MaxTokenLength = TestUtil.NextInt(Random(), 1, IndexWriter.MAX_TERM_LENGTH); MyIndexWriter w = new MyIndexWriter(d, NewIndexWriterConfig(TEST_VERSION_CURRENT, analyzer)); // Try to make an index that requires merging: w.Config.SetMaxBufferedDocs(TestUtil.NextInt(Random(), 2, 11)); int numStartDocs = AtLeast(20); LineFileDocs docs = new LineFileDocs(Random(), DefaultCodecSupportsDocValues()); for (int docIDX = 0; docIDX < numStartDocs; docIDX++) { w.AddDocument(docs.NextDoc()); } MergePolicy mp = w.Config.MergePolicy; int mergeAtOnce = 1 + w.GetSegmentInfosSize_Nunit(); if (mp is TieredMergePolicy) { ((TieredMergePolicy)mp).MaxMergeAtOnce = mergeAtOnce; } else if (mp is LogMergePolicy) { ((LogMergePolicy)mp).MergeFactor = mergeAtOnce; } else { // skip test w.Dispose(); d.Dispose(); return; } AtomicBoolean doStop = new AtomicBoolean(); w.Config.SetMaxBufferedDocs(2); ThreadClass t = new ThreadAnonymousInnerClassHelper(this, w, numStartDocs, docs, doStop); t.Start(); w.ForceMerge(1); doStop.Set(true); t.Join(); Assert.IsTrue(w.MergeCount.Get() <= 1, "merge count is " + w.MergeCount.Get()); w.Dispose(); d.Dispose(); docs.Dispose(); }
public virtual void TestNRTOpenExceptions() { // LUCENE-5262: test that several failed attempts to obtain an NRT reader // don't leak file handles. MockDirectoryWrapper dir = (MockDirectoryWrapper)GetAssertNoDeletesDirectory(NewMockDirectory()); AtomicBoolean shouldFail = new AtomicBoolean(); dir.FailOn(new FailureAnonymousInnerClassHelper(this, dir, shouldFail)); IndexWriterConfig conf = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())); conf.SetMergePolicy(NoMergePolicy.COMPOUND_FILES); // prevent merges from getting in the way IndexWriter writer = new IndexWriter(dir, conf); // create a segment and open an NRT reader writer.AddDocument(new Document()); writer.Reader.Dispose(); // add a new document so a new NRT reader is required writer.AddDocument(new Document()); // try to obtain an NRT reader twice: first time it fails and closes all the // other NRT readers. second time it fails, but also fails to close the // other NRT reader, since it is already marked closed! for (int i = 0; i < 2; i++) { shouldFail.Set(true); try { writer.Reader.Dispose(); } catch (FakeIOException e) { // expected if (VERBOSE) { Console.WriteLine("hit expected fake IOE"); } } } writer.Dispose(); dir.Dispose(); }