public virtual void Test1() { ParallelAtomicReader pr = new ParallelAtomicReader(Ir1, Ir2); IBits liveDocs = pr.LiveDocs; Fields fields = pr.Fields; IEnumerator <string> fe = fields.GetEnumerator(); fe.MoveNext(); string f = fe.Current; Assert.AreEqual("field1", f); CheckTerms(fields.GetTerms(f), liveDocs, "brown", "fox", "jumps", "quick", "the"); fe.MoveNext(); f = fe.Current; Assert.AreEqual("field2", f); CheckTerms(fields.GetTerms(f), liveDocs, "brown", "fox", "jumps", "quick", "the"); fe.MoveNext(); f = fe.Current; Assert.AreEqual("field3", f); CheckTerms(fields.GetTerms(f), liveDocs, "dog", "fox", "jumps", "lazy", "over", "the"); Assert.IsFalse(fe.MoveNext()); }
public virtual void TestReaderChaining() { Assert.IsTrue(reader.RefCount > 0); IndexReader wrappedReader = SlowCompositeReaderWrapper.Wrap(reader); wrappedReader = new ParallelAtomicReader((AtomicReader)wrappedReader); IndexSearcher searcher = NewSearcher(wrappedReader); TermRangeQuery query = TermRangeQuery.NewStringRange("field", "a", "z", true, true); searcher.Search(query, 5); reader.Dispose(); // close original child reader try { searcher.Search(query, 5); } catch (ObjectDisposedException ace) { //Assert.AreEqual("this IndexReader cannot be used anymore as one of its child readers was closed", ace.Message); // LUCENENET specific - ObjectDisposedExeption appends the type of object to the end of the message, // so we need to check the start of the message only. assertTrue(ace.Message.StartsWith("this IndexReader cannot be used anymore as one of its child readers was closed", StringComparison.Ordinal)); } finally { // shutdown executor: in case of wrap-wrap-wrapping searcher.IndexReader.Dispose(); } }
public virtual void TestCloseInnerReader() { Directory dir1 = GetDir1(Random); AtomicReader ir1 = SlowCompositeReaderWrapper.Wrap(DirectoryReader.Open(dir1)); // with overlapping ParallelAtomicReader pr = new ParallelAtomicReader(true, new AtomicReader[] { ir1 }, new AtomicReader[] { ir1 }); ir1.Dispose(); try { pr.Document(0); Assert.Fail("ParallelAtomicReader should be already closed because inner reader was closed!"); } #pragma warning disable 168 catch (ObjectDisposedException e) #pragma warning restore 168 { // pass } // noop: pr.Dispose(); dir1.Dispose(); }
public virtual void TestReaderChaining() { Assert.IsTrue(Reader.RefCount > 0); IndexReader wrappedReader = SlowCompositeReaderWrapper.Wrap(Reader); wrappedReader = new ParallelAtomicReader((AtomicReader)wrappedReader); IndexSearcher searcher = NewSearcher(wrappedReader); TermRangeQuery query = TermRangeQuery.NewStringRange("field", "a", "z", true, true); searcher.Search(query, 5); Reader.Dispose(); // close original child reader try { searcher.Search(query, 5); } catch (AlreadyClosedException ace) { Assert.AreEqual("this IndexReader cannot be used anymore as one of its child readers was closed", ace.Message); } finally { // shutdown executor: in case of wrap-wrap-wrapping searcher.IndexReader.Dispose(); } }
// Fields 1 & 2 in one index, 3 & 4 in other, with ParallelReader: private IndexSearcher Parallel(Random random) { dir1 = GetDir1(random); dir2 = GetDir2(random); ParallelAtomicReader pr = new ParallelAtomicReader(SlowCompositeReaderWrapper.Wrap(DirectoryReader.Open(dir1)), SlowCompositeReaderWrapper.Wrap(DirectoryReader.Open(dir2))); TestUtil.CheckReader(pr); return(NewSearcher(pr)); }
public virtual void TestFieldNames() { Directory dir1 = GetDir1(Random()); Directory dir2 = GetDir2(Random()); ParallelAtomicReader pr = new ParallelAtomicReader(SlowCompositeReaderWrapper.Wrap(DirectoryReader.Open(dir1)), SlowCompositeReaderWrapper.Wrap(DirectoryReader.Open(dir2))); FieldInfos fieldInfos = pr.FieldInfos; Assert.AreEqual(4, fieldInfos.Size()); Assert.IsNotNull(fieldInfos.FieldInfo("f1")); Assert.IsNotNull(fieldInfos.FieldInfo("f2")); Assert.IsNotNull(fieldInfos.FieldInfo("f3")); Assert.IsNotNull(fieldInfos.FieldInfo("f4")); pr.Dispose(); dir1.Dispose(); dir2.Dispose(); }
public virtual void TestFieldNames() { Directory dir1 = GetDir1(Random); Directory dir2 = GetDir2(Random); ParallelAtomicReader pr = new ParallelAtomicReader(SlowCompositeReaderWrapper.Wrap(DirectoryReader.Open(dir1)), SlowCompositeReaderWrapper.Wrap(DirectoryReader.Open(dir2))); FieldInfos fieldInfos = pr.FieldInfos; Assert.AreEqual(4, fieldInfos.Count); Assert.IsNotNull(fieldInfos.FieldInfo("f1")); Assert.IsNotNull(fieldInfos.FieldInfo("f2")); Assert.IsNotNull(fieldInfos.FieldInfo("f3")); Assert.IsNotNull(fieldInfos.FieldInfo("f4")); pr.Dispose(); dir1.Dispose(); dir2.Dispose(); }
public virtual void TestRefCounts1() { Directory dir1 = GetDir1(Random); Directory dir2 = GetDir2(Random); AtomicReader ir1, ir2; // close subreaders, ParallelReader will not change refCounts, but close on its own close ParallelAtomicReader pr = new ParallelAtomicReader(ir1 = SlowCompositeReaderWrapper.Wrap(DirectoryReader.Open(dir1)), ir2 = SlowCompositeReaderWrapper.Wrap(DirectoryReader.Open(dir2))); // check RefCounts Assert.AreEqual(1, ir1.RefCount); Assert.AreEqual(1, ir2.RefCount); pr.Dispose(); Assert.AreEqual(0, ir1.RefCount); Assert.AreEqual(0, ir2.RefCount); dir1.Dispose(); dir2.Dispose(); }
public virtual void TestRefCounts2() { Directory dir1 = GetDir1(Random); Directory dir2 = GetDir2(Random); AtomicReader ir1 = SlowCompositeReaderWrapper.Wrap(DirectoryReader.Open(dir1)); AtomicReader ir2 = SlowCompositeReaderWrapper.Wrap(DirectoryReader.Open(dir2)); // don't close subreaders, so ParallelReader will increment refcounts ParallelAtomicReader pr = new ParallelAtomicReader(false, ir1, ir2); // check RefCounts Assert.AreEqual(2, ir1.RefCount); Assert.AreEqual(2, ir2.RefCount); pr.Dispose(); Assert.AreEqual(1, ir1.RefCount); Assert.AreEqual(1, ir2.RefCount); ir1.Dispose(); ir2.Dispose(); Assert.AreEqual(0, ir1.RefCount); Assert.AreEqual(0, ir2.RefCount); dir1.Dispose(); dir2.Dispose(); }
public virtual void TestEmptyIndex() { Directory rd1 = NewDirectory(); IndexWriter iw = new IndexWriter(rd1, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random))); iw.Dispose(); // create a copy: Directory rd2 = NewDirectory(rd1); Directory rdOut = NewDirectory(); IndexWriter iwOut = new IndexWriter(rdOut, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random))); ParallelAtomicReader apr = new ParallelAtomicReader(SlowCompositeReaderWrapper.Wrap(DirectoryReader.Open(rd1)), SlowCompositeReaderWrapper.Wrap(DirectoryReader.Open(rd2))); // When unpatched, Lucene crashes here with a NoSuchElementException (caused by ParallelTermEnum) iwOut.AddIndexes(apr); iwOut.ForceMerge(1); // 2nd try with a readerless parallel reader iwOut.AddIndexes(new ParallelAtomicReader()); iwOut.ForceMerge(1); ParallelCompositeReader cpr = new ParallelCompositeReader(DirectoryReader.Open(rd1), DirectoryReader.Open(rd2)); // When unpatched, Lucene crashes here with a NoSuchElementException (caused by ParallelTermEnum) iwOut.AddIndexes(cpr); iwOut.ForceMerge(1); // 2nd try with a readerless parallel reader iwOut.AddIndexes(new ParallelCompositeReader()); iwOut.ForceMerge(1); iwOut.Dispose(); rdOut.Dispose(); rd1.Dispose(); rd2.Dispose(); }
public virtual void TestEmptyIndex() { Directory rd1 = NewDirectory(); IndexWriter iw = new IndexWriter(rd1, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))); iw.Dispose(); // create a copy: Directory rd2 = NewDirectory(rd1); Directory rdOut = NewDirectory(); IndexWriter iwOut = new IndexWriter(rdOut, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))); ParallelAtomicReader apr = new ParallelAtomicReader(SlowCompositeReaderWrapper.Wrap(DirectoryReader.Open(rd1)), SlowCompositeReaderWrapper.Wrap(DirectoryReader.Open(rd2))); // When unpatched, Lucene crashes here with a NoSuchElementException (caused by ParallelTermEnum) iwOut.AddIndexes(apr); iwOut.ForceMerge(1); // 2nd try with a readerless parallel reader iwOut.AddIndexes(new ParallelAtomicReader()); iwOut.ForceMerge(1); ParallelCompositeReader cpr = new ParallelCompositeReader(DirectoryReader.Open(rd1), DirectoryReader.Open(rd2)); // When unpatched, Lucene crashes here with a NoSuchElementException (caused by ParallelTermEnum) iwOut.AddIndexes(cpr); iwOut.ForceMerge(1); // 2nd try with a readerless parallel reader iwOut.AddIndexes(new ParallelCompositeReader()); iwOut.ForceMerge(1); iwOut.Dispose(); rdOut.Dispose(); rd1.Dispose(); rd2.Dispose(); }
public virtual void TestCloseInnerReader() { Directory dir1 = GetDir1(Random()); AtomicReader ir1 = SlowCompositeReaderWrapper.Wrap(DirectoryReader.Open(dir1)); // with overlapping ParallelAtomicReader pr = new ParallelAtomicReader(true, new AtomicReader[] { ir1 }, new AtomicReader[] { ir1 }); ir1.Dispose(); try { pr.Document(0); Assert.Fail("ParallelAtomicReader should be already closed because inner reader was closed!"); } catch (AlreadyClosedException e) { // pass } // noop: pr.Dispose(); dir1.Dispose(); }
public virtual void TestIgnoreStoredFields() { Directory dir1 = GetDir1(Random()); Directory dir2 = GetDir2(Random()); AtomicReader ir1 = SlowCompositeReaderWrapper.Wrap(DirectoryReader.Open(dir1)); AtomicReader ir2 = SlowCompositeReaderWrapper.Wrap(DirectoryReader.Open(dir2)); // with overlapping ParallelAtomicReader pr = new ParallelAtomicReader(false, new AtomicReader[] { ir1, ir2 }, new AtomicReader[] { ir1 }); Assert.AreEqual("v1", pr.Document(0).Get("f1")); Assert.AreEqual("v1", pr.Document(0).Get("f2")); Assert.IsNull(pr.Document(0).Get("f3")); Assert.IsNull(pr.Document(0).Get("f4")); // check that fields are there Assert.IsNotNull(pr.Terms("f1")); Assert.IsNotNull(pr.Terms("f2")); Assert.IsNotNull(pr.Terms("f3")); Assert.IsNotNull(pr.Terms("f4")); pr.Dispose(); // no stored fields at all pr = new ParallelAtomicReader(false, new AtomicReader[] { ir2 }, new AtomicReader[0]); Assert.IsNull(pr.Document(0).Get("f1")); Assert.IsNull(pr.Document(0).Get("f2")); Assert.IsNull(pr.Document(0).Get("f3")); Assert.IsNull(pr.Document(0).Get("f4")); // check that fields are there Assert.IsNull(pr.Terms("f1")); Assert.IsNull(pr.Terms("f2")); Assert.IsNotNull(pr.Terms("f3")); Assert.IsNotNull(pr.Terms("f4")); pr.Dispose(); // without overlapping pr = new ParallelAtomicReader(true, new AtomicReader[] { ir2 }, new AtomicReader[] { ir1 }); Assert.AreEqual("v1", pr.Document(0).Get("f1")); Assert.AreEqual("v1", pr.Document(0).Get("f2")); Assert.IsNull(pr.Document(0).Get("f3")); Assert.IsNull(pr.Document(0).Get("f4")); // check that fields are there Assert.IsNull(pr.Terms("f1")); Assert.IsNull(pr.Terms("f2")); Assert.IsNotNull(pr.Terms("f3")); Assert.IsNotNull(pr.Terms("f4")); pr.Dispose(); // no main readers try { new ParallelAtomicReader(true, new AtomicReader[0], new AtomicReader[] { ir1 }); Assert.Fail("didn't get expected exception: need a non-empty main-reader array"); } catch (System.ArgumentException iae) { // pass } dir1.Dispose(); dir2.Dispose(); }
// Fields 1 & 2 in one index, 3 & 4 in other, with ParallelReader: private IndexSearcher Parallel(Random random) { Dir1 = GetDir1(random); Dir2 = GetDir2(random); ParallelAtomicReader pr = new ParallelAtomicReader(SlowCompositeReaderWrapper.Wrap(DirectoryReader.Open(Dir1)), SlowCompositeReaderWrapper.Wrap(DirectoryReader.Open(Dir2))); TestUtil.CheckReader(pr); return NewSearcher(pr); }
public virtual void TestRefCounts2() { Directory dir1 = GetDir1(Random()); Directory dir2 = GetDir2(Random()); AtomicReader ir1 = SlowCompositeReaderWrapper.Wrap(DirectoryReader.Open(dir1)); AtomicReader ir2 = SlowCompositeReaderWrapper.Wrap(DirectoryReader.Open(dir2)); // don't close subreaders, so ParallelReader will increment refcounts ParallelAtomicReader pr = new ParallelAtomicReader(false, ir1, ir2); // check RefCounts Assert.AreEqual(2, ir1.RefCount); Assert.AreEqual(2, ir2.RefCount); pr.Dispose(); Assert.AreEqual(1, ir1.RefCount); Assert.AreEqual(1, ir2.RefCount); ir1.Dispose(); ir2.Dispose(); Assert.AreEqual(0, ir1.RefCount); Assert.AreEqual(0, ir2.RefCount); dir1.Dispose(); dir2.Dispose(); }
public virtual void TestRefCounts1() { Directory dir1 = GetDir1(Random()); Directory dir2 = GetDir2(Random()); AtomicReader ir1, ir2; // close subreaders, ParallelReader will not change refCounts, but close on its own close ParallelAtomicReader pr = new ParallelAtomicReader(ir1 = SlowCompositeReaderWrapper.Wrap(DirectoryReader.Open(dir1)), ir2 = SlowCompositeReaderWrapper.Wrap(DirectoryReader.Open(dir2))); // check RefCounts Assert.AreEqual(1, ir1.RefCount); Assert.AreEqual(1, ir2.RefCount); pr.Dispose(); Assert.AreEqual(0, ir1.RefCount); Assert.AreEqual(0, ir2.RefCount); dir1.Dispose(); dir2.Dispose(); }
/// <summary> /// Sometimes wrap the IndexReader as slow, parallel or filter reader (or /// combinations of that) /// </summary> public static IndexReader MaybeWrapReader(IndexReader r) { Random random = Random(); if (Rarely()) { // TODO: remove this, and fix those tests to wrap before putting slow around: bool wasOriginallyAtomic = r is AtomicReader; for (int i = 0, c = random.Next(6) + 1; i < c; i++) { switch (random.Next(5)) { case 0: r = SlowCompositeReaderWrapper.Wrap(r); break; case 1: // will create no FC insanity in atomic case, as ParallelAtomicReader has own cache key: r = (r is AtomicReader) ? (IndexReader)new ParallelAtomicReader((AtomicReader)r) : new ParallelCompositeReader((CompositeReader)r); break; case 2: // Häckidy-Hick-Hack: a standard MultiReader will cause FC insanity, so we use // QueryUtils' reader with a fake cache key, so insanity checker cannot walk // along our reader: r = new FCInvisibleMultiReader(r); break; case 3: AtomicReader ar = SlowCompositeReaderWrapper.Wrap(r); IList<string> allFields = new List<string>(); foreach (FieldInfo fi in ar.FieldInfos) { allFields.Add(fi.Name); } allFields = CollectionsHelper.Shuffle(allFields); int end = allFields.Count == 0 ? 0 : random.Next(allFields.Count); HashSet<string> fields = new HashSet<string>(allFields.SubList(0, end)); // will create no FC insanity as ParallelAtomicReader has own cache key: r = new ParallelAtomicReader(new FieldFilterAtomicReader(ar, fields, false), new FieldFilterAtomicReader(ar, fields, true)); break; case 4: // Häckidy-Hick-Hack: a standard Reader will cause FC insanity, so we use // QueryUtils' reader with a fake cache key, so insanity checker cannot walk // along our reader: if (r is AtomicReader) { r = new AssertingAtomicReader((AtomicReader)r); } else if (r is DirectoryReader) { r = new AssertingDirectoryReader((DirectoryReader)r); } break; default: Assert.Fail("should not get here"); break; } } if (wasOriginallyAtomic) { r = SlowCompositeReaderWrapper.Wrap(r); } else if ((r is CompositeReader) && !(r is FCInvisibleMultiReader)) { // prevent cache insanity caused by e.g. ParallelCompositeReader, to fix we wrap one more time: r = new FCInvisibleMultiReader(r); } if (VERBOSE) { Console.WriteLine("maybeWrapReader wrapped: " + r); } } return r; }
public virtual void TestEmptyIndexWithVectors() { Directory rd1 = NewDirectory(); { if (VERBOSE) { Console.WriteLine("\nTEST: make 1st writer"); } IndexWriter iw = new IndexWriter(rd1, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))); Document doc = new Document(); Field idField = NewTextField("id", "", Field.Store.NO); doc.Add(idField); FieldType customType = new FieldType(TextField.TYPE_NOT_STORED); customType.StoreTermVectors = true; doc.Add(NewField("test", "", customType)); idField.StringValue = "1"; iw.AddDocument(doc); doc.Add(NewTextField("test", "", Field.Store.NO)); idField.StringValue = "2"; iw.AddDocument(doc); iw.Dispose(); IndexWriterConfig dontMergeConfig = (new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))).SetMergePolicy(NoMergePolicy.COMPOUND_FILES); if (VERBOSE) { Console.WriteLine("\nTEST: make 2nd writer"); } IndexWriter writer = new IndexWriter(rd1, dontMergeConfig); writer.DeleteDocuments(new Term("id", "1")); writer.Dispose(); IndexReader ir = DirectoryReader.Open(rd1); Assert.AreEqual(2, ir.MaxDoc); Assert.AreEqual(1, ir.NumDocs); ir.Dispose(); iw = new IndexWriter(rd1, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())).SetOpenMode(OpenMode_e.APPEND)); iw.ForceMerge(1); iw.Dispose(); } Directory rd2 = NewDirectory(); { IndexWriter iw = new IndexWriter(rd2, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))); Document doc = new Document(); iw.AddDocument(doc); iw.Dispose(); } Directory rdOut = NewDirectory(); IndexWriter iwOut = new IndexWriter(rdOut, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))); DirectoryReader reader1, reader2; ParallelAtomicReader pr = new ParallelAtomicReader(SlowCompositeReaderWrapper.Wrap(reader1 = DirectoryReader.Open(rd1)), SlowCompositeReaderWrapper.Wrap(reader2 = DirectoryReader.Open(rd2))); // When unpatched, Lucene crashes here with an ArrayIndexOutOfBoundsException (caused by TermVectorsWriter) iwOut.AddIndexes(pr); // ParallelReader closes any IndexReader you added to it: pr.Dispose(); // assert subreaders were closed Assert.AreEqual(0, reader1.RefCount); Assert.AreEqual(0, reader2.RefCount); rd1.Dispose(); rd2.Dispose(); iwOut.ForceMerge(1); iwOut.Dispose(); rdOut.Dispose(); }
internal ParallelFields(ParallelAtomicReader outerInstance) { this.OuterInstance = outerInstance; }
public virtual void TestIgnoreStoredFields() { Directory dir1 = GetDir1(Random); Directory dir2 = GetDir2(Random); AtomicReader ir1 = SlowCompositeReaderWrapper.Wrap(DirectoryReader.Open(dir1)); AtomicReader ir2 = SlowCompositeReaderWrapper.Wrap(DirectoryReader.Open(dir2)); // with overlapping ParallelAtomicReader pr = new ParallelAtomicReader(false, new AtomicReader[] { ir1, ir2 }, new AtomicReader[] { ir1 }); Assert.AreEqual("v1", pr.Document(0).Get("f1")); Assert.AreEqual("v1", pr.Document(0).Get("f2")); Assert.IsNull(pr.Document(0).Get("f3")); Assert.IsNull(pr.Document(0).Get("f4")); // check that fields are there Assert.IsNotNull(pr.GetTerms("f1")); Assert.IsNotNull(pr.GetTerms("f2")); Assert.IsNotNull(pr.GetTerms("f3")); Assert.IsNotNull(pr.GetTerms("f4")); pr.Dispose(); // no stored fields at all pr = new ParallelAtomicReader(false, new AtomicReader[] { ir2 }, new AtomicReader[0]); Assert.IsNull(pr.Document(0).Get("f1")); Assert.IsNull(pr.Document(0).Get("f2")); Assert.IsNull(pr.Document(0).Get("f3")); Assert.IsNull(pr.Document(0).Get("f4")); // check that fields are there Assert.IsNull(pr.GetTerms("f1")); Assert.IsNull(pr.GetTerms("f2")); Assert.IsNotNull(pr.GetTerms("f3")); Assert.IsNotNull(pr.GetTerms("f4")); pr.Dispose(); // without overlapping pr = new ParallelAtomicReader(true, new AtomicReader[] { ir2 }, new AtomicReader[] { ir1 }); Assert.AreEqual("v1", pr.Document(0).Get("f1")); Assert.AreEqual("v1", pr.Document(0).Get("f2")); Assert.IsNull(pr.Document(0).Get("f3")); Assert.IsNull(pr.Document(0).Get("f4")); // check that fields are there Assert.IsNull(pr.GetTerms("f1")); Assert.IsNull(pr.GetTerms("f2")); Assert.IsNotNull(pr.GetTerms("f3")); Assert.IsNotNull(pr.GetTerms("f4")); pr.Dispose(); // no main readers try { new ParallelAtomicReader(true, new AtomicReader[0], new AtomicReader[] { ir1 }); Assert.Fail("didn't get expected exception: need a non-empty main-reader array"); } #pragma warning disable 168 catch (ArgumentException iae) #pragma warning restore 168 { // pass } dir1.Dispose(); dir2.Dispose(); }
public virtual void Test1() { ParallelAtomicReader pr = new ParallelAtomicReader(Ir1, Ir2); Bits liveDocs = pr.LiveDocs; Fields fields = pr.Fields(); IEnumerator<string> fe = fields.GetEnumerator(); fe.MoveNext(); string f = fe.Current; Assert.AreEqual("field1", f); CheckTerms(fields.Terms(f), liveDocs, "brown", "fox", "jumps", "quick", "the"); fe.MoveNext(); f = fe.Current; Assert.AreEqual("field2", f); CheckTerms(fields.Terms(f), liveDocs, "brown", "fox", "jumps", "quick", "the"); fe.MoveNext(); f = fe.Current; Assert.AreEqual("field3", f); CheckTerms(fields.Terms(f), liveDocs, "dog", "fox", "jumps", "lazy", "over", "the"); Assert.IsFalse(fe.MoveNext()); }
internal ParallelFields(ParallelAtomicReader outerInstance) { this.outerInstance = outerInstance; }
public virtual void TestEmptyIndexWithVectors() { Directory rd1 = NewDirectory(); { if (Verbose) { Console.WriteLine("\nTEST: make 1st writer"); } IndexWriter iw = new IndexWriter(rd1, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random))); Document doc = new Document(); Field idField = NewTextField("id", "", Field.Store.NO); doc.Add(idField); FieldType customType = new FieldType(TextField.TYPE_NOT_STORED); customType.StoreTermVectors = true; doc.Add(NewField("test", "", customType)); idField.SetStringValue("1"); iw.AddDocument(doc); doc.Add(NewTextField("test", "", Field.Store.NO)); idField.SetStringValue("2"); iw.AddDocument(doc); iw.Dispose(); IndexWriterConfig dontMergeConfig = (new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random))).SetMergePolicy(NoMergePolicy.COMPOUND_FILES); if (Verbose) { Console.WriteLine("\nTEST: make 2nd writer"); } IndexWriter writer = new IndexWriter(rd1, dontMergeConfig); writer.DeleteDocuments(new Term("id", "1")); writer.Dispose(); IndexReader ir = DirectoryReader.Open(rd1); Assert.AreEqual(2, ir.MaxDoc); Assert.AreEqual(1, ir.NumDocs); ir.Dispose(); iw = new IndexWriter(rd1, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random)).SetOpenMode(OpenMode.APPEND)); iw.ForceMerge(1); iw.Dispose(); } Directory rd2 = NewDirectory(); { IndexWriter iw = new IndexWriter(rd2, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random))); Document doc = new Document(); iw.AddDocument(doc); iw.Dispose(); } Directory rdOut = NewDirectory(); IndexWriter iwOut = new IndexWriter(rdOut, NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random))); DirectoryReader reader1, reader2; ParallelAtomicReader pr = new ParallelAtomicReader(SlowCompositeReaderWrapper.Wrap(reader1 = DirectoryReader.Open(rd1)), SlowCompositeReaderWrapper.Wrap(reader2 = DirectoryReader.Open(rd2))); // When unpatched, Lucene crashes here with an ArrayIndexOutOfBoundsException (caused by TermVectorsWriter) iwOut.AddIndexes(pr); // ParallelReader closes any IndexReader you added to it: pr.Dispose(); // assert subreaders were closed Assert.AreEqual(0, reader1.RefCount); Assert.AreEqual(0, reader2.RefCount); rd1.Dispose(); rd2.Dispose(); iwOut.ForceMerge(1); iwOut.Dispose(); rdOut.Dispose(); }