private static IndexReader RefreshReader(IndexReader reader) { IndexReader oldReader = reader; reader = reader.Reopen(); if (reader != oldReader) { oldReader.Close(); } return reader; }
private void ValidateSearcher(bool forceReopen) { EnsureIndex(); if (!forceReopen) { if (_reader == null) { lock (_locker) { //double check if (_reader == null) { try { //get a reader - could be NRT or based on directly depending on how this was constructed _reader = _nrtWriter == null ? OpenNewReader() : _nrtWriter.GetReader(); _searcher = new IndexSearcher(_reader); } catch (IOException ex) { throw new ApplicationException("Could not create an index searcher with the supplied lucene directory", ex); } } } } else { switch (_reader.GetReaderStatus()) { case ReaderStatus.Current: break; case ReaderStatus.Closed: lock (_locker) { //get a reader - could be NRT or based on directly depending on how this was constructed _reader = _nrtWriter == null ? OpenNewReader() : _nrtWriter.GetReader(); _searcher = new IndexSearcher(_reader); } break; case ReaderStatus.NotCurrent: lock (_locker) { //yes, this is actually the way the Lucene wants you to work... //normally, i would have thought just calling Reopen() on the underlying reader would suffice... but it doesn't. //here's references: // http://stackoverflow.com/questions/1323779/lucene-indexreader-reopen-doesnt-seem-to-work-correctly // http://gist.github.com/173978 //Also note that when a new reader is returned from Reopen() the old reader is not actually closed - // but more importantly the old reader might still be in use from another thread! So we can't just // close it here because that would cause a YSOD: Lucene.Net.Store.AlreadyClosedException: this IndexReader is closed // since another thread might be using it. I'm 'hoping' that the GC will just take care of the left over reader's that might // be currently being used in a search, otherwise there's really no way to now when it's safe to close the reader. A reader is // IDisposable so I'm pretty sure the GC will do it's thing since there won't be any refs to it once it is done using it. var newReader = _reader.Reopen(); if (newReader != _reader) { //if it's changed, then re-assign, note: the above, before we used to close the old one here // but that will cause problems since the old reader might be in use on another thread. _reader = newReader; _searcher = new IndexSearcher(_reader); } } break; } } } else { if (_reader != null) { lock (_locker) { //double check if (_reader != null) { try { _searcher.Close(); _reader.Close(); } catch (IOException ex) { //this will happen if it's already closed ( i think ) Trace.TraceError("Examine: error occurred closing index searcher. {0}", ex); } finally { //set to null in case another call to this method has passed the first lock and is checking for null _searcher = null; _reader = null; } try { //get a reader - could be NRT or based on directly depending on how this was constructed _reader = _nrtWriter == null ? OpenNewReader() : _nrtWriter.GetReader(); _searcher = new IndexSearcher(_reader); } catch (IOException ex) { throw new ApplicationException("Could not create an index searcher with the supplied lucene directory", ex); } } } } } }
public virtual void TestAfterClose() { Directory dir1 = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); writer.SetInfoStream(infoStream); // create the index CreateIndexNoClose(false, "test", writer); IndexReader r = writer.GetReader(); writer.Close(); _TestUtil.CheckIndex(dir1); // reader should remain usable even after IndexWriter is closed: Assert.AreEqual(100, r.NumDocs()); Query q = new TermQuery(new Term("indexname", "test")); Assert.AreEqual(100, new IndexSearcher(r).Search(q, 10).totalHits); try { r.Reopen(); Assert.Fail("failed to hit AlreadyClosedException"); } catch (AlreadyClosedException ace) { // expected } r.Close(); dir1.Close(); }
public virtual void TestDuringAddDelete() { Directory dir1 = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); writer.SetInfoStream(infoStream); writer.SetMergeFactor(2); // create the index CreateIndexNoClose(false, "test", writer); writer.Commit(); IndexReader r = writer.GetReader(); int NUM_THREAD = 5; float SECONDS = 3; long endTime = (long)((DateTime.Now.Ticks / TimeSpan.TicksPerMillisecond) + 1000.0 * SECONDS); System.Collections.IList excs = (System.Collections.IList)System.Collections.ArrayList.Synchronized(new System.Collections.ArrayList(new System.Collections.ArrayList())); SupportClass.ThreadClass[] threads = new SupportClass.ThreadClass[NUM_THREAD]; for (int i = 0; i < NUM_THREAD; i++) { threads[i] = new AnonymousClassThread1(endTime, writer, excs, this); threads[i].IsBackground = true; threads[i].Start(); } int sum = 0; while ((DateTime.Now.Ticks / TimeSpan.TicksPerMillisecond) < endTime) { IndexReader r2 = r.Reopen(); if (r2 != r) { r.Close(); r = r2; } Query q = new TermQuery(new Term("indexname", "test")); sum += new IndexSearcher(r).Search(q, 10).totalHits; } for (int i = 0; i < NUM_THREAD; i++) { threads[i].Join(); } Assert.IsTrue(sum > 0); Assert.AreEqual(0, excs.Count); writer.Close(); _TestUtil.CheckIndex(dir1); r.Close(); dir1.Close(); }
public virtual void TestReopenSegmentReaderToMultiReader() { Directory dir1 = new MockRAMDirectory(); TestIndexReaderReopen.CreateIndex(dir1, false); IndexReader reader1 = IndexReader.Open(dir1, false); TestIndexReaderReopen.ModifyIndex(5, dir1); IndexReader reader2 = reader1.Reopen(); Assert.IsTrue(reader1 != reader2); Assert.IsTrue(DeleteWorked(1, reader2)); reader1.Close(); reader2.Close(); dir1.Close(); }
public virtual void TestReopenWriteableToReadOnly() { Directory dir1 = new MockRAMDirectory(); TestIndexReaderReopen.CreateIndex(dir1, true); IndexReader reader = IndexReader.Open(dir1, false); int docCount = reader.NumDocs(); Assert.IsTrue(DeleteWorked(1, reader)); Assert.AreEqual(docCount - 1, reader.NumDocs()); IndexReader readOnlyReader = reader.Reopen(true); Assert.IsTrue(IsReadOnly(readOnlyReader), "reader isn't read only"); Assert.IsFalse(DeleteWorked(1, readOnlyReader)); Assert.AreEqual(docCount - 1, readOnlyReader.NumDocs()); reader.Close(); readOnlyReader.Close(); dir1.Close(); }
public virtual void TestAfterCommit() { Directory dir1 = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED); writer.SetInfoStream(infoStream); // create the index CreateIndexNoClose(false, "test", writer); // get a reader to put writer into near real-time mode IndexReader r1 = writer.GetReader(); _TestUtil.CheckIndex(dir1); writer.Commit(); _TestUtil.CheckIndex(dir1); Assert.AreEqual(100, r1.NumDocs()); for (int i = 0; i < 10; i++) { writer.AddDocument(CreateDocument(i, "test", 4)); } ((ConcurrentMergeScheduler)writer.GetMergeScheduler()).Sync(); IndexReader r2 = r1.Reopen(); if (r2 != r1) { r1.Close(); r1 = r2; } Assert.AreEqual(110, r1.NumDocs()); writer.Close(); r1.Close(); dir1.Close(); }
protected internal virtual IndexReader DoReopen(bool doClone) { EnsureOpen(); bool reopened = false; System.Collections.IList newReaders = new System.Collections.ArrayList(); bool success = false; try { for (int i = 0; i < readers.Count; i++) { IndexReader oldReader = (IndexReader)readers[i]; IndexReader newReader = null; if (doClone) { newReader = (IndexReader)oldReader.Clone(); } else { newReader = oldReader.Reopen(); } newReaders.Add(newReader); // if at least one of the subreaders was updated we remember that // and return a new ParallelReader if (newReader != oldReader) { reopened = true; } } success = true; } finally { if (!success && reopened) { for (int i = 0; i < newReaders.Count; i++) { IndexReader r = (IndexReader)newReaders[i]; if (r != readers[i]) { try { r.Close(); } catch (System.IO.IOException ignore) { // keep going - we want to clean up as much as possible } } } } } if (reopened) { System.Collections.IList newDecrefOnClose = new System.Collections.ArrayList(); ParallelReader pr = new ParallelReader(); for (int i = 0; i < readers.Count; i++) { IndexReader oldReader = (IndexReader)readers[i]; IndexReader newReader = (IndexReader)newReaders[i]; if (newReader == oldReader) { newDecrefOnClose.Add(true); newReader.IncRef(); } else { // this is a new subreader instance, so on close() we don't // decRef but close it newDecrefOnClose.Add(false); } pr.Add(newReader, !storedFieldReaders.Contains(oldReader)); } pr.decrefOnClose = newDecrefOnClose; pr.incRefReaders = incRefReaders; return(pr); } else { // No subreader was refreshed return(this); } }
public virtual void TestDuringAddIndexes() { Assert.Ignore("Known issue"); MockRAMDirectory dir1 = new MockRAMDirectory(); IndexWriter writer = new IndexWriter(dir1, new WhitespaceAnalyzer(), IndexWriter.MaxFieldLength.LIMITED, null); writer.SetInfoStream(infoStream, null); writer.MergeFactor = 2; // create the index CreateIndexNoClose(false, "test", writer); writer.Commit(null); Directory[] dirs = new Directory[10]; for (int i = 0; i < 10; i++) { dirs[i] = new MockRAMDirectory(dir1); } IndexReader r = writer.GetReader(null); int NUM_THREAD = 5; float SECONDS = 3; long endTime = (long)((DateTime.Now.Ticks / TimeSpan.TicksPerMillisecond) + 1000.0 * SECONDS); System.Collections.IList excs = (System.Collections.IList)System.Collections.ArrayList.Synchronized(new System.Collections.ArrayList(new System.Collections.ArrayList())); ThreadClass[] threads = new ThreadClass[NUM_THREAD]; for (int i = 0; i < NUM_THREAD; i++) { threads[i] = new AnonymousClassThread(endTime, writer, dirs, excs, this); threads[i].IsBackground = true; threads[i].Start(); } int lastCount = 0; while ((DateTime.Now.Ticks / TimeSpan.TicksPerMillisecond) < endTime) { IndexReader r2 = r.Reopen(null); if (r2 != r) { r.Close(); r = r2; } Query q = new TermQuery(new Term("indexname", "test")); int count = new IndexSearcher(r).Search(q, 10, null).TotalHits; Assert.IsTrue(count >= lastCount); lastCount = count; } for (int i = 0; i < NUM_THREAD; i++) { threads[i].Join(); } Assert.AreEqual(0, excs.Count); r.Close(); try { Assert.AreEqual(0, dir1.GetOpenDeletedFiles().Count); } catch { //DIGY: //I think this is an expected behaviour. //There isn't any pending files to be deleted after "writer.Close()". //But, since lucene.java's test case is designed that way //and I might be wrong, I will add a warning // Assert only in debug mode, so that CheckIndex is called during release. #if DEBUG Assert.Inconclusive("", 0, dir1.GetOpenDeletedFiles().Count); #endif } writer.Close(); _TestUtil.CheckIndex(dir1); dir1.Close(); }
internal virtual ReaderCouple RefreshReader(IndexReader reader, TestReopen test, int modify, bool hasChanges) { lock (createReaderMutex) { IndexReader r = null; if (test != null) { test.ModifyIndex(modify); r = test.OpenReader(); } IndexReader refreshed = null; try { refreshed = reader.Reopen(); } finally { if (refreshed == null && r != null) { // Hit exception -- close opened reader r.Close(); } } if (hasChanges) { if (refreshed == reader) { Assert.Fail("No new IndexReader instance created during refresh."); } } else { if (refreshed != reader) { Assert.Fail("New IndexReader instance created during refresh even though index had no changes."); } } return new ReaderCouple(r, refreshed); } }
public virtual void TestSegmentReaderDelDocsReferenceCounting() { Directory dir1 = new MockRAMDirectory(); TestIndexReaderReopen.CreateIndex(dir1, false); IndexReader origReader = IndexReader.Open(dir1, false); SegmentReader origSegmentReader = SegmentReader.GetOnlySegmentReader(origReader); // deletedDocsRef should be null because nothing has updated yet Assert.IsNull(origSegmentReader.deletedDocsRef_ForNUnit); // we deleted a document, so there is now a deletedDocs bitvector and a // reference to it origReader.DeleteDocument(1); AssertDelDocsRefCountEquals(1, origSegmentReader); // the cloned segmentreader should have 2 references, 1 to itself, and 1 to // the original segmentreader IndexReader clonedReader = (IndexReader)origReader.Clone(); SegmentReader clonedSegmentReader = SegmentReader.GetOnlySegmentReader(clonedReader); AssertDelDocsRefCountEquals(2, origSegmentReader); // deleting a document creates a new deletedDocs bitvector, the refs goes to // 1 clonedReader.DeleteDocument(2); AssertDelDocsRefCountEquals(1, origSegmentReader); AssertDelDocsRefCountEquals(1, clonedSegmentReader); // make sure the deletedocs objects are different (copy // on write) Assert.IsTrue(origSegmentReader.deletedDocs_ForNUnit != clonedSegmentReader.deletedDocs_ForNUnit); AssertDocDeleted(origSegmentReader, clonedSegmentReader, 1); Assert.IsTrue(!origSegmentReader.IsDeleted(2)); // doc 2 should not be deleted // in original segmentreader Assert.IsTrue(clonedSegmentReader.IsDeleted(2)); // doc 2 should be deleted in // cloned segmentreader // deleting a doc from the original segmentreader should throw an exception Assert.Throws <LockObtainFailedException>(() => origReader.DeleteDocument(4), "expected exception"); origReader.Close(); // try closing the original segment reader to see if it affects the // clonedSegmentReader clonedReader.DeleteDocument(3); clonedReader.Flush(); AssertDelDocsRefCountEquals(1, clonedSegmentReader); // test a reopened reader IndexReader reopenedReader = clonedReader.Reopen(); IndexReader cloneReader2 = (IndexReader)reopenedReader.Clone(); SegmentReader cloneSegmentReader2 = SegmentReader.GetOnlySegmentReader(cloneReader2); AssertDelDocsRefCountEquals(2, cloneSegmentReader2); clonedReader.Close(); reopenedReader.Close(); cloneReader2.Close(); dir1.Close(); }
private ReaderCouple RefreshReader(IndexReader reader, TestReopen_Renamed_Class test, int modify, bool hasChanges) { lock (createReaderMutex) { IndexReader r = null; if (test != null) { test.ModifyIndex(modify); r = test.OpenReader(); } IndexReader refreshed = reader.Reopen(); if (hasChanges) { if (refreshed == reader) { Assert.Fail("No new IndexReader instance created during refresh."); } } else { if (refreshed != reader) { Assert.Fail("New IndexReader instance created during refresh even though index had no changes."); } } return new ReaderCouple(r, refreshed); } }