private static void AssertDocIdSetCacheable(IndexReader reader, Filter filter, bool shouldCacheable) { Assert.IsTrue(reader.Context is AtomicReaderContext); AtomicReaderContext context = (AtomicReaderContext)reader.Context; CachingWrapperFilter cacher = new CachingWrapperFilter(filter); DocIdSet originalSet = filter.GetDocIdSet(context, (context.AtomicReader).LiveDocs); DocIdSet cachedSet = cacher.GetDocIdSet(context, (context.AtomicReader).LiveDocs); if (originalSet == null) { Assert.IsNull(cachedSet); } if (cachedSet == null) { Assert.IsTrue(originalSet == null || originalSet.GetIterator() == null); } else { Assert.IsTrue(cachedSet.IsCacheable); Assert.AreEqual(shouldCacheable, originalSet.IsCacheable); //System.out.println("Original: "+originalSet.getClass().getName()+" -- cached: "+cachedSet.getClass().getName()); if (originalSet.IsCacheable) { Assert.AreEqual(originalSet.GetType(), cachedSet.GetType(), "Cached DocIdSet must be of same class like uncached, if cacheable"); } else { Assert.IsTrue(cachedSet is FixedBitSet || cachedSet == null, "Cached DocIdSet must be an FixedBitSet if the original one was not cacheable"); } } }
public override Scorer GetScorer(AtomicReaderContext context, IBits acceptDocs) { DocIdSetIterator disi; if (outerInstance.m_filter != null) { if (Debugging.AssertsEnabled) { Debugging.Assert(outerInstance.m_query == null); } DocIdSet dis = outerInstance.m_filter.GetDocIdSet(context, acceptDocs); if (dis == null) { return(null); } disi = dis.GetIterator(); } else { if (Debugging.AssertsEnabled) { Debugging.Assert(outerInstance.m_query != null && innerWeight != null); } disi = innerWeight.GetScorer(context, acceptDocs); } if (disi == null) { return(null); } return(new ConstantScorer(outerInstance, disi, this, queryWeight)); }
public override Scorer FilteredScorer(AtomicReaderContext context, Weight weight, DocIdSet docIdSet) { DocIdSetIterator filterIter = docIdSet.GetIterator(); if (filterIter == null) { // this means the filter does not accept any documents. return(null); } // we pass null as acceptDocs, as our filter has already respected acceptDocs, no need to do twice Scorer scorer = weight.GetScorer(context, null); if (scorer == null) { return(null); } if (scorerFirst) { return(new LeapFrogScorer(weight, scorer, filterIter, scorer)); } else { return(new LeapFrogScorer(weight, filterIter, scorer, scorer)); } }
/// <summary> /// Provide the <see cref="DocIdSet"/> to be cached, using the <see cref="DocIdSet"/> provided /// by the wrapped Filter. /// <para/>This implementation returns the given <see cref="DocIdSet"/>, /// if <see cref="DocIdSet.IsCacheable"/> returns <c>true</c>, else it calls /// <see cref="CacheImpl(DocIdSetIterator, AtomicReader)"/> /// <para/>Note: this method returns <see cref="EMPTY_DOCIDSET"/> if the given <paramref name="docIdSet"/> /// is <c>null</c> or if <see cref="DocIdSet.GetIterator()"/> return <c>null</c>. The empty /// instance is use as a placeholder in the cache instead of the <c>null</c> value. /// </summary> protected virtual DocIdSet DocIdSetToCache(DocIdSet docIdSet, AtomicReader reader) { if (docIdSet == null) { // this is better than returning null, as the nonnull result can be cached return(EMPTY_DOCIDSET); } else if (docIdSet.IsCacheable) { return(docIdSet); } else { DocIdSetIterator it = docIdSet.GetIterator(); // null is allowed to be returned by iterator(), // in this case we wrap with the sentinel set, // which is cacheable. if (it == null) { return(EMPTY_DOCIDSET); } else { return(CacheImpl(it, reader)); } } }
public override Scorer Scorer(AtomicReaderContext context, Bits acceptDocs) { DocIdSetIterator disi; if (OuterInstance.filter != null) { Debug.Assert(OuterInstance.query == null); DocIdSet dis = OuterInstance.filter.GetDocIdSet(context, acceptDocs); if (dis == null) { return(null); } disi = dis.GetIterator(); } else { Debug.Assert(OuterInstance.query != null && InnerWeight != null); disi = InnerWeight.Scorer(context, acceptDocs); } if (disi == null) { return(null); } return(new ConstantScorer(OuterInstance, disi, this, QueryWeight)); }
public override Scorer FilteredScorer(AtomicReaderContext context, Weight weight, DocIdSet docIdSet) { DocIdSetIterator filterIter = docIdSet.GetIterator(); if (filterIter == null) { // this means the filter does not accept any documents. return(null); } int firstFilterDoc = filterIter.NextDoc(); if (firstFilterDoc == DocIdSetIterator.NO_MORE_DOCS) { return(null); } IBits filterAcceptDocs = docIdSet.Bits; // force if RA is requested bool useRandomAccess = filterAcceptDocs != null && UseRandomAccess(filterAcceptDocs, firstFilterDoc); if (useRandomAccess) { // if we are using random access, we return the inner scorer, just with other acceptDocs return(weight.GetScorer(context, filterAcceptDocs)); } else { if (Debugging.AssertsEnabled) { Debugging.Assert(firstFilterDoc > -1); } // we are gonna advance() this scorer, so we set inorder=true/toplevel=false // we pass null as acceptDocs, as our filter has already respected acceptDocs, no need to do twice Scorer scorer = weight.GetScorer(context, null); // TODO once we have way to figure out if we use RA or LeapFrog we can remove this scorer return((scorer == null) ? null : new PrimaryAdvancedLeapFrogScorer(weight, firstFilterDoc, filterIter, scorer)); } }
protected override DocIdSet DocIdSetToCache(DocIdSet docIdSet, AtomicReader reader) { if (docIdSet == null) { return EMPTY_DOCIDSET; } if (docIdSet is FixedBitSet) { // this is different from CachingWrapperFilter: even when the DocIdSet is // cacheable, we convert it to a FixedBitSet since we require all the // cached filters to be FixedBitSets return docIdSet; } DocIdSetIterator it = docIdSet.GetIterator(); if (it == null) { return EMPTY_DOCIDSET; } FixedBitSet copy = new FixedBitSet(reader.MaxDoc); copy.Or(it); return copy; }
public override Explanation Explain(AtomicReaderContext ir, int i) { Explanation inner = weight.Explain(ir, i); Filter f = outerInstance.filter; DocIdSet docIdSet = f.GetDocIdSet(ir, ir.AtomicReader.LiveDocs); DocIdSetIterator docIdSetIterator = docIdSet == null?DocIdSetIterator.GetEmpty() : docIdSet.GetIterator(); if (docIdSetIterator == null) { docIdSetIterator = DocIdSetIterator.GetEmpty(); } if (docIdSetIterator.Advance(i) == i) { return(inner); } else { Explanation result = new Explanation(0.0f, "failure to match filter: " + f.ToString()); result.AddDetail(inner); return(result); } }
//JAVA TO C# CONVERTER WARNING: Method 'throws' clauses are not available in .NET: //ORIGINAL LINE: private void doChain(org.apache.lucene.util.FixedBitSet result, int logic, org.apache.lucene.search.DocIdSet dis) throws java.io.IOException private void doChain(FixedBitSet result, int logic, DocIdSet dis) { if (dis is FixedBitSet) { // optimized case for FixedBitSets switch (logic) { case OR: result.Or((FixedBitSet)dis); break; case AND: result.And((FixedBitSet)dis); break; case ANDNOT: result.AndNot((FixedBitSet)dis); break; case XOR: result.Xor((FixedBitSet)dis); break; default: doChain(result, DEFAULT, dis); break; } } else { DocIdSetIterator disi; if (dis == null) { disi = DocIdSetIterator.Empty(); } else { disi = dis.GetIterator() ?? DocIdSetIterator.Empty(); } switch (logic) { case OR: result.Or(disi); break; case AND: result.And(disi); break; case ANDNOT: result.AndNot(disi); break; case XOR: result.Xor(disi); break; default: doChain(result, DEFAULT, dis); break; } } }
public override Scorer FilteredScorer(AtomicReaderContext context, Weight weight, DocIdSet docIdSet) { DocIdSetIterator filterIter = docIdSet.GetIterator(); if (filterIter == null) { // this means the filter does not accept any documents. return null; } // we pass null as acceptDocs, as our filter has already respected acceptDocs, no need to do twice Scorer scorer = weight.Scorer(context, null); if (scorer == null) { return null; } if (ScorerFirst) { return new LeapFrogScorer(weight, scorer, filterIter, scorer); } else { return new LeapFrogScorer(weight, filterIter, scorer, scorer); } }
public override Scorer FilteredScorer(AtomicReaderContext context, Weight weight, DocIdSet docIdSet) { DocIdSetIterator filterIter = docIdSet.GetIterator(); if (filterIter == null) { // this means the filter does not accept any documents. return null; } int firstFilterDoc = filterIter.NextDoc(); if (firstFilterDoc == DocIdSetIterator.NO_MORE_DOCS) { return null; } Bits filterAcceptDocs = docIdSet.GetBits(); // force if RA is requested bool useRandomAccess = filterAcceptDocs != null && UseRandomAccess(filterAcceptDocs, firstFilterDoc); if (useRandomAccess) { // if we are using random access, we return the inner scorer, just with other acceptDocs return weight.Scorer(context, filterAcceptDocs); } else { Debug.Assert(firstFilterDoc > -1); // we are gonna advance() this scorer, so we set inorder=true/toplevel=false // we pass null as acceptDocs, as our filter has already respected acceptDocs, no need to do twice Scorer scorer = weight.Scorer(context, null); // TODO once we have way to figure out if we use RA or LeapFrog we can remove this scorer return (scorer == null) ? null : new PrimaryAdvancedLeapFrogScorer(weight, firstFilterDoc, filterIter, scorer); } }
/// <summary> /// Provide the DocIdSet to be cached, using the DocIdSet provided /// by the wrapped Filter. <p>this implementation returns the given <seealso cref="DocIdSet"/>, /// if <seealso cref="DocIdSet#isCacheable"/> returns <code>true</code>, else it calls /// <seealso cref="#cacheImpl(DocIdSetIterator,AtomicReader)"/> /// <p>Note: this method returns <seealso cref="#EMPTY_DOCIDSET"/> if the given docIdSet /// is <code>null</code> or if <seealso cref="DocIdSet#iterator()"/> return <code>null</code>. The empty /// instance is use as a placeholder in the cache instead of the <code>null</code> value. /// </summary> protected internal virtual DocIdSet DocIdSetToCache(DocIdSet docIdSet, AtomicReader reader) { if (docIdSet == null) { // this is better than returning null, as the nonnull result can be cached return EMPTY_DOCIDSET; } else if (docIdSet.Cacheable) { return docIdSet; } else { DocIdSetIterator it = docIdSet.GetIterator(); // null is allowed to be returned by iterator(), // in this case we wrap with the sentinel set, // which is cacheable. if (it == null) { return EMPTY_DOCIDSET; } else { return CacheImpl(it, reader); } } }