public virtual void TestSingleFilter() { ChainedFilter chain = GetChainedFilter(new Filter[] { dateFilter }, null); int numHits = searcher.Search(query, chain, 1000).TotalHits; assertEquals(Max, numHits); chain = new ChainedFilter(new Filter[] { bobFilter }); numHits = searcher.Search(query, chain, 1000).TotalHits; assertEquals(Max / 2, numHits); chain = GetChainedFilter(new Filter[] { bobFilter }, new[] { ChainedFilter.AND }); TopDocs hits = searcher.Search(query, chain, 1000); numHits = hits.TotalHits; assertEquals(Max / 2, numHits); assertEquals("bob", searcher.Doc(hits.ScoreDocs[0].Doc).Get("owner")); chain = GetChainedFilter(new Filter[] { bobFilter }, new[] { ChainedFilter.ANDNOT }); hits = searcher.Search(query, chain, 1000); numHits = hits.TotalHits; assertEquals(Max / 2, numHits); assertEquals("sue", searcher.Doc(hits.ScoreDocs[0].Doc).Get("owner")); }
public virtual void TestWithCachingFilter() { Directory dir = NewDirectory(); RandomIndexWriter writer = new RandomIndexWriter(Random(), dir); IndexReader reader = writer.Reader; writer.Dispose(); IndexSearcher searcher = NewSearcher(reader); Query query = new TermQuery(new Term("none", "none")); QueryWrapperFilter queryFilter = new QueryWrapperFilter(query); CachingWrapperFilter cachingFilter = new CachingWrapperFilter(queryFilter); searcher.Search(query, cachingFilter, 1); CachingWrapperFilter cachingFilter2 = new CachingWrapperFilter(queryFilter); Filter[] chain = new Filter[2]; chain[0] = cachingFilter; chain[1] = cachingFilter2; ChainedFilter cf = new ChainedFilter(chain); // throws java.lang.ClassCastException: org.apache.lucene.util.OpenBitSet cannot be cast to java.util.BitSet searcher.Search(new MatchAllDocsQuery(), cf, 1); reader.Dispose(); dir.Dispose(); }
/// <exception cref="System.IO.IOException"></exception> public override DocIdSet GetDocIdSet(AtomicReaderContext context, Bits acceptDocs) { Bits docsWithField; if (field == null) { docsWithField = null; } else { //NOTE By using the FieldCache we re-use a cache // which is nice but loading it in this way might be slower than say using an // intersects filter against the world bounds. So do we add a method to the // strategy, perhaps? But the strategy can't cache it. docsWithField = FieldCache.DEFAULT.GetDocsWithField((context.AtomicReader), field); int maxDoc = context.AtomicReader.MaxDoc; if (docsWithField.Length() != maxDoc) { throw new InvalidOperationException("Bits length should be maxDoc (" + maxDoc + ") but wasn't: " + docsWithField); } if (docsWithField is Bits_MatchNoBits) { return null;//match nothing } else if (docsWithField is Bits_MatchAllBits) { docsWithField = null;//all docs } } //not so much a chain but a way to conveniently invert the Filter DocIdSet docIdSet = new ChainedFilter(new Filter[] { intersectsFilter }, ChainedFilter.ANDNOT).GetDocIdSet(context, acceptDocs); return BitsFilteredDocIdSet.Wrap(docIdSet, docsWithField); }