private CachedOrds GetCachedOrds(AtomicReaderContext context) { lock (this) { object cacheKey = context.Reader.CoreCacheKey; CachedOrds ords = ordsCache[cacheKey]; if (ords == null) { ords = new CachedOrds(source.GetReader(context), context.Reader.MaxDoc); ordsCache[cacheKey] = ords; } return ords; } }
public BitsAnonymousHelper(DocIdSetAnonymousHelper outerInstance, FunctionValues predFuncValues, AtomicReaderContext context, Bits acceptDocs) { this.outerInstance = outerInstance; this.predFuncValues = predFuncValues; this.context = context; this.acceptDocs = acceptDocs; }
public override DocIdSet GetDocIdSet(AtomicReaderContext context, Bits acceptDocs) { return null; }
public override DocIdSet GetDocIdSet(AtomicReaderContext context, Bits acceptDocs) { // TODO: this is just like ValueSourceScorer, // ValueSourceFilter (spatial), // ValueSourceRangeFilter (solr); also, // https://issues.apache.org/jira/browse/LUCENE-4251 FunctionValues values = valueSource.GetValues(new Dictionary<string,object>(), context); int maxDoc = context.Reader.MaxDoc; Bits fastMatchBits; if (fastMatchFilter != null) { DocIdSet dis = fastMatchFilter.GetDocIdSet(context, null); if (dis == null) { // No documents match return null; } fastMatchBits = dis.GetBits(); if (fastMatchBits == null) { throw new System.ArgumentException("fastMatchFilter does not implement DocIdSet.bits"); } } else { fastMatchBits = null; } return new DocIdSetAnonymousInnerClassHelper(this, acceptDocs, values, maxDoc, fastMatchBits); }
public NearSpansOrdered(SpanNearQuery spanNearQuery, AtomicReaderContext context, Bits acceptDocs, IDictionary<Term, TermContext> termContexts, bool collectPayloads) { sorter = new InPlaceMergeSorterAnonymousInnerClassHelper(this); if (spanNearQuery.Clauses.Length < 2) { throw new System.ArgumentException("Less than 2 clauses: " + spanNearQuery); } this.CollectPayloads = collectPayloads; AllowedSlop = spanNearQuery.Slop; SpanQuery[] clauses = spanNearQuery.Clauses; subSpans = new Spans[clauses.Length]; MatchPayload = new List<byte[]>(); SubSpansByDoc = new Spans[clauses.Length]; for (int i = 0; i < clauses.Length; i++) { subSpans[i] = clauses[i].GetSpans(context, acceptDocs, termContexts); SubSpansByDoc[i] = subSpans[i]; // used in toSameDoc() } Query = spanNearQuery; // kept for toString() only. }
public DocIdSetAnonymousInnerClassHelper2(FilterAnonymousInnerClassHelper4 outerInstance, AtomicReaderContext context) { this.OuterInstance = outerInstance; this.Context = context; }
//JAVA TO C# CONVERTER WARNING: Method 'throws' clauses are not available in .NET: //ORIGINAL LINE: public DocumentFilteredAtomicIndexReader(AtomicReaderContext context, org.apache.lucene.search.Filter preserveFilter, boolean negateFilter) throws java.io.IOException public DocumentFilteredAtomicIndexReader(AtomicReaderContext context, Filter preserveFilter, bool negateFilter) : base(context.reader()) { //JAVA TO C# CONVERTER WARNING: The original Java variable was marked 'final': //ORIGINAL LINE: final int maxDoc = in.maxDoc(); int maxDoc = @in.maxDoc(); //JAVA TO C# CONVERTER WARNING: The original Java variable was marked 'final': //ORIGINAL LINE: final org.apache.lucene.util.FixedBitSet bits = new org.apache.lucene.util.FixedBitSet(maxDoc); FixedBitSet bits = new FixedBitSet(maxDoc); // ignore livedocs here, as we filter them later: //JAVA TO C# CONVERTER WARNING: The original Java variable was marked 'final': //ORIGINAL LINE: final org.apache.lucene.search.DocIdSet docs = preserveFilter.getDocIdSet(context, null); DocIdSet docs = preserveFilter.getDocIdSet(context, null); if (docs != null) { //JAVA TO C# CONVERTER WARNING: The original Java variable was marked 'final': //ORIGINAL LINE: final org.apache.lucene.search.DocIdSetIterator it = docs.iterator(); DocIdSetIterator it = docs.GetEnumerator(); if (it != null) { bits.or(it); } } if (negateFilter) { bits.flip(0, maxDoc); } if (@in.hasDeletions()) { //JAVA TO C# CONVERTER WARNING: The original Java variable was marked 'final': //ORIGINAL LINE: final org.apache.lucene.util.Bits oldLiveDocs = in.getLiveDocs(); Bits oldLiveDocs = @in.LiveDocs; Debug.Assert(oldLiveDocs != null); //JAVA TO C# CONVERTER WARNING: The original Java variable was marked 'final': //ORIGINAL LINE: final org.apache.lucene.search.DocIdSetIterator it = bits.iterator(); DocIdSetIterator it = bits.GetEnumerator(); for (int i = it.nextDoc(); i < maxDoc; i = it.nextDoc()) { if (!oldLiveDocs.get(i)) { // we can safely modify the current bit, as the iterator already stepped over it: bits.clear(i); } } } this.liveDocs = bits; this.numDocs_Renamed = bits.cardinality(); }
public SpansAnonymousInnerClassHelper(SpanNotQuery outerInstance, AtomicReaderContext context, Bits acceptDocs, IDictionary<Term, TermContext> termContexts) { this.OuterInstance = outerInstance; this.Context = context; this.AcceptDocs = acceptDocs; this.TermContexts = termContexts; includeSpans = outerInstance.include.GetSpans(context, acceptDocs, termContexts); moreInclude = true; excludeSpans = outerInstance.exclude.GetSpans(context, acceptDocs, termContexts); moreExclude = excludeSpans.Next(); }
public override Explanation Explain(AtomicReaderContext context, int doc) { return(@in.Explain(context, doc)); }
internal virtual void TestSort(bool useFrom, bool VERBOSE) { IndexReader reader = null; Directory dir = null; if (!VERBOSE) { Console.WriteLine("Verbosity disabled. Enable manually if needed."); } int numDocs = VERBOSE ? AtLeast(50) : AtLeast(1000); //final int numDocs = AtLeast(50); string[] tokens = new string[] { "a", "b", "c", "d", "e" }; if (VERBOSE) { Console.WriteLine("TEST: make index"); } { dir = NewDirectory(); RandomIndexWriter w = new RandomIndexWriter(Random(), dir, Similarity, TimeZone); // w.setDoRandomForceMerge(false); // w.w.getConfig().SetMaxBufferedDocs(AtLeast(100)); string[] content = new string[AtLeast(20)]; for (int contentIDX = 0; contentIDX < content.Length; contentIDX++) { StringBuilder sb = new StringBuilder(); int numTokens = TestUtil.NextInt(Random(), 1, 10); for (int tokenIDX = 0; tokenIDX < numTokens; tokenIDX++) { sb.Append(tokens[Random().Next(tokens.Length)]).Append(' '); } content[contentIDX] = sb.ToString(); } for (int docIDX = 0; docIDX < numDocs; docIDX++) { Document doc = new Document(); doc.Add(NewStringField("string", TestUtil.RandomRealisticUnicodeString(Random()), Field.Store.NO)); doc.Add(NewTextField("text", content[Random().Next(content.Length)], Field.Store.NO)); doc.Add(new SingleField("float", (float)Random().NextDouble(), Field.Store.NO)); int intValue; if (Random().Next(100) == 17) { intValue = int.MinValue; } else if (Random().Next(100) == 17) { intValue = int.MaxValue; } else { intValue = Random().Next(); } doc.Add(new Int32Field("int", intValue, Field.Store.NO)); if (VERBOSE) { Console.WriteLine(" doc=" + doc); } w.AddDocument(doc); } reader = w.Reader; w.Dispose(); } // NOTE: sometimes reader has just one segment, which is // important to test IndexSearcher searcher = NewSearcher(reader); IndexReaderContext ctx = searcher.TopReaderContext; ShardSearcher[] subSearchers; int[] docStarts; if (ctx is AtomicReaderContext) { subSearchers = new ShardSearcher[1]; docStarts = new int[1]; subSearchers[0] = new ShardSearcher((AtomicReaderContext)ctx, ctx); docStarts[0] = 0; } else { CompositeReaderContext compCTX = (CompositeReaderContext)ctx; int size = compCTX.Leaves.Count; subSearchers = new ShardSearcher[size]; docStarts = new int[size]; int docBase = 0; for (int searcherIDX = 0; searcherIDX < subSearchers.Length; searcherIDX++) { AtomicReaderContext leave = compCTX.Leaves[searcherIDX]; subSearchers[searcherIDX] = new ShardSearcher(leave, compCTX); docStarts[searcherIDX] = docBase; docBase += leave.Reader.MaxDoc; } } IList <SortField> sortFields = new List <SortField>(); sortFields.Add(new SortField("string", SortFieldType.STRING, true)); sortFields.Add(new SortField("string", SortFieldType.STRING, false)); sortFields.Add(new SortField("int", SortFieldType.INT32, true)); sortFields.Add(new SortField("int", SortFieldType.INT32, false)); sortFields.Add(new SortField("float", SortFieldType.SINGLE, true)); sortFields.Add(new SortField("float", SortFieldType.SINGLE, false)); sortFields.Add(new SortField(null, SortFieldType.SCORE, true)); sortFields.Add(new SortField(null, SortFieldType.SCORE, false)); sortFields.Add(new SortField(null, SortFieldType.DOC, true)); sortFields.Add(new SortField(null, SortFieldType.DOC, false)); for (int iter = 0; iter < 1000 * RANDOM_MULTIPLIER; iter++) { // TODO: custom FieldComp... Query query = new TermQuery(new Term("text", tokens[Random().Next(tokens.Length)])); Sort sort; if (Random().Next(10) == 4) { // Sort by score sort = null; } else { SortField[] randomSortFields = new SortField[TestUtil.NextInt(Random(), 1, 3)]; for (int sortIDX = 0; sortIDX < randomSortFields.Length; sortIDX++) { randomSortFields[sortIDX] = sortFields[Random().Next(sortFields.Count)]; } sort = new Sort(randomSortFields); } int numHits = TestUtil.NextInt(Random(), 1, numDocs + 5); //final int numHits = 5; if (VERBOSE) { Console.WriteLine("TEST: search query=" + query + " sort=" + sort + " numHits=" + numHits); } int from = -1; int size = -1; // First search on whole index: TopDocs topHits; if (sort == null) { if (useFrom) { TopScoreDocCollector c = TopScoreDocCollector.Create(numHits, Random().NextBoolean()); searcher.Search(query, c); from = TestUtil.NextInt(Random(), 0, numHits - 1); size = numHits - from; TopDocs tempTopHits = c.GetTopDocs(); if (from < tempTopHits.ScoreDocs.Length) { // Can't use TopDocs#topDocs(start, howMany), since it has different behaviour when start >= hitCount // than TopDocs#merge currently has ScoreDoc[] newScoreDocs = new ScoreDoc[Math.Min(size, tempTopHits.ScoreDocs.Length - from)]; Array.Copy(tempTopHits.ScoreDocs, from, newScoreDocs, 0, newScoreDocs.Length); tempTopHits.ScoreDocs = newScoreDocs; topHits = tempTopHits; } else { topHits = new TopDocs(tempTopHits.TotalHits, new ScoreDoc[0], tempTopHits.MaxScore); } } else { topHits = searcher.Search(query, numHits); } } else { TopFieldCollector c = TopFieldCollector.Create(sort, numHits, true, true, true, Random().NextBoolean()); searcher.Search(query, c); if (useFrom) { from = TestUtil.NextInt(Random(), 0, numHits - 1); size = numHits - from; TopDocs tempTopHits = c.GetTopDocs(); if (from < tempTopHits.ScoreDocs.Length) { // Can't use TopDocs#topDocs(start, howMany), since it has different behaviour when start >= hitCount // than TopDocs#merge currently has ScoreDoc[] newScoreDocs = new ScoreDoc[Math.Min(size, tempTopHits.ScoreDocs.Length - from)]; Array.Copy(tempTopHits.ScoreDocs, from, newScoreDocs, 0, newScoreDocs.Length); tempTopHits.ScoreDocs = newScoreDocs; topHits = tempTopHits; } else { topHits = new TopDocs(tempTopHits.TotalHits, new ScoreDoc[0], tempTopHits.MaxScore); } } else { topHits = c.GetTopDocs(0, numHits); } } if (VERBOSE) { if (useFrom) { Console.WriteLine("from=" + from + " size=" + size); } Console.WriteLine(" top search: " + topHits.TotalHits + " totalHits; hits=" + (topHits.ScoreDocs == null ? "null" : topHits.ScoreDocs.Length + " maxScore=" + topHits.MaxScore)); if (topHits.ScoreDocs != null) { for (int hitIDX = 0; hitIDX < topHits.ScoreDocs.Length; hitIDX++) { ScoreDoc sd = topHits.ScoreDocs[hitIDX]; Console.WriteLine(" doc=" + sd.Doc + " score=" + sd.Score); } } } // ... then all shards: Weight w = searcher.CreateNormalizedWeight(query); TopDocs[] shardHits = new TopDocs[subSearchers.Length]; for (int shardIDX = 0; shardIDX < subSearchers.Length; shardIDX++) { TopDocs subHits; ShardSearcher subSearcher = subSearchers[shardIDX]; if (sort == null) { subHits = subSearcher.Search(w, numHits); } else { TopFieldCollector c = TopFieldCollector.Create(sort, numHits, true, true, true, Random().NextBoolean()); subSearcher.Search(w, c); subHits = c.GetTopDocs(0, numHits); } shardHits[shardIDX] = subHits; if (VERBOSE) { Console.WriteLine(" shard=" + shardIDX + " " + subHits.TotalHits + " totalHits hits=" + (subHits.ScoreDocs == null ? "null" : subHits.ScoreDocs.Length.ToString())); if (subHits.ScoreDocs != null) { foreach (ScoreDoc sd in subHits.ScoreDocs) { Console.WriteLine(" doc=" + sd.Doc + " score=" + sd.Score); } } } } // Merge: TopDocs mergedHits; if (useFrom) { mergedHits = TopDocs.Merge(sort, from, size, shardHits); } else { mergedHits = TopDocs.Merge(sort, numHits, shardHits); } if (mergedHits.ScoreDocs != null) { // Make sure the returned shards are correct: for (int hitIDX = 0; hitIDX < mergedHits.ScoreDocs.Length; hitIDX++) { ScoreDoc sd = mergedHits.ScoreDocs[hitIDX]; Assert.AreEqual(ReaderUtil.SubIndex(sd.Doc, docStarts), sd.ShardIndex, "doc=" + sd.Doc + " wrong shard"); } } TestUtil.AssertEquals(topHits, mergedHits); } reader.Dispose(); dir.Dispose(); }
public virtual void SetNextReader(AtomicReaderContext context) { @base = context.DocBase; }
public override BulkScorer GetBulkScorer(AtomicReaderContext context, bool scoreDocsInOrder, IBits acceptDocs) { return(new BulkScorerAnonymousInnerClassHelper(this)); }
public override Scorer GetScorer(AtomicReaderContext context, IBits acceptDocs) { throw new System.NotSupportedException(); }
public override Explanation Explain(AtomicReaderContext context, int doc) { throw new System.NotSupportedException(); }
public override FunctionValues GetValues(IDictionary context, AtomicReaderContext readerContext) { BinaryDocValues docValues = readerContext.AtomicReader.GetBinaryDocValues(fieldName); return(new FuctionValuesAnonymousHelper(this, docValues)); }
public override Scorer Scorer(AtomicReaderContext context, Bits acceptDocs) { return new MatchAllScorer(OuterInstance, context.Reader, acceptDocs, this, QueryWeight); }
public ShardSearcher(AtomicReaderContext ctx, IndexReaderContext parent) : base(parent) { this.Ctx = new List<AtomicReaderContext> { ctx }; }
public SegStart(AtomicReaderContext readerContext, int end) { this.ReaderContext = readerContext; this.End = end; }
public override Scorer Scorer(AtomicReaderContext context, Bits acceptDocs) { throw new System.NotSupportedException(); }
public virtual void SetReaderContext(IndexReaderContext topReaderContext, AtomicReaderContext readerContext) { this.m_readerContext = readerContext; this.m_topReaderContext = topReaderContext; }
public override DocIdSet GetDocIdSet(AtomicReaderContext context, Bits acceptDocs) { bool nullBitset = Random().Next(10) == 5; AtomicReader reader = (AtomicReader)context.Reader(); DocsEnum termDocsEnum = reader.TermDocsEnum(new Term("field", "0")); if (termDocsEnum == null) { return null; // no docs -- return null } BitArray bitSet = new BitArray(reader.MaxDoc()); int d; while ((d = termDocsEnum.NextDoc()) != DocsEnum.NO_MORE_DOCS) { bitSet.Set(d, true); } return new DocIdSetAnonymousInnerClassHelper(this, nullBitset, reader, bitSet); }
public override OrdinalsSegmentReader GetReader(AtomicReaderContext context) { CachedOrds cachedOrds = GetCachedOrds(context); return(new OrdinalsSegmentReaderAnonymousClass(cachedOrds)); }
/// <summary> /// Expert: Returns the matches for this query in an index. Used internally /// to search for spans. /// </summary> public abstract Spans GetSpans(AtomicReaderContext context, Bits acceptDocs, IDictionary<Term, TermContext> termContexts);
public virtual void SetNextReader(AtomicReaderContext context) { }
public override Scorer Scorer(AtomicReaderContext context, Bits acceptDocs) { // We can only run as a top scorer: throw new System.NotSupportedException(); }
public virtual void SetNextReader(AtomicReaderContext context) { _docBase = context.DocBase; TopScoreDocCollector.SetNextReader(context); }
public override Explanation Explain(AtomicReaderContext context, int doc) { Scorer scorer = Scorer(context, context.AtomicReader.LiveDocs); if (scorer != null) { int newDoc = scorer.Advance(doc); if (newDoc == doc) { float freq = scorer.Freq(); SimScorer docScorer = Similarity.DoSimScorer(Stats, context); ComplexExplanation result = new ComplexExplanation(); result.Description = "weight(" + Query + " in " + doc + ") [" + Similarity.GetType().Name + "], result of:"; Explanation scoreExplanation = docScorer.Explain(doc, new Explanation(freq, "termFreq=" + freq)); result.AddDetail(scoreExplanation); result.Value = scoreExplanation.Value; result.Match = true; return result; } } return new ComplexExplanation(false, 0.0f, "no matching term"); }
public virtual void SetNextReader(AtomicReaderContext context) { terms = FieldCache.DEFAULT.GetTerms(context.AtomicReader, FromField, true); docsWithField = FieldCache.DEFAULT.GetDocsWithField(context.AtomicReader, FromField); }
public override OrdinalsSegmentReader GetReader(AtomicReaderContext context) { CachedOrds cachedOrds = GetCachedOrds(context); return new OrdinalsSegmentReaderAnonymousInnerClassHelper(this, cachedOrds); }
public virtual void SetNextReader(AtomicReaderContext context) { docBase = context.DocBase; docTermOrds = FieldCache.DEFAULT.GetDocTermOrds(context.AtomicReader, _toField); }
public override FunctionValues GetValues(IDictionary context, AtomicReaderContext readerContext) { Scorer scorer = (Scorer)context["scorer"]; if (scorer == null) { throw new ThreadStateException("scores are missing; be sure to pass keepScores=true to FacetsCollector"); } return new DoubleDocValuesAnonymousInnerClassHelper(this, scorer); }
public virtual void SetNextReader(AtomicReaderContext context) { terms = FieldCache.DEFAULT.GetTerms(context.AtomicReader, ToField, false); docBase = context.DocBase; }
public override Explanation Explain(AtomicReaderContext context, int doc) { // explain query weight Explanation queryExpl = new ComplexExplanation(true, QueryWeight, "MatchAllDocsQuery, product of:"); if (OuterInstance.Boost != 1.0f) { queryExpl.AddDetail(new Explanation(OuterInstance.Boost, "boost")); } queryExpl.AddDetail(new Explanation(QueryNorm, "queryNorm")); return queryExpl; }
public virtual void SetNextReader(AtomicReaderContext context) { this.DocBase = context.DocBase; Collector.SetNextReader(context); }
public override Spans GetSpans(AtomicReaderContext context, Bits acceptDocs, IDictionary<Term, TermContext> termContexts) { return new SpansAnonymousInnerClassHelper(this, context, acceptDocs, termContexts); }
public override void SetNextReader(AtomicReaderContext context) { base.SetNextReader(context); assertFalse("segment should not be recognized as sorted as different sorter was used", m_segmentSorted); }
public override FunctionValues GetValues(IDictionary context, AtomicReaderContext readerContext) { return(new CachedDistanceFunctionValue(readerContext.AtomicReader, this)); }
public override BulkScorer BulkScorer(AtomicReaderContext context, bool scoreDocsInOrder, Bits acceptDocs) { return new BulkScorerAnonymousInnerClassHelper(this); }
public override Scorer GetScorer(AtomicReaderContext context, IBits acceptDocs) { Debug.Assert(outerInstance.terms.Count > 0); AtomicReader reader = context.AtomicReader; IBits liveDocs = acceptDocs; PostingsAndFreq[] postingsFreqs = new PostingsAndFreq[outerInstance.terms.Count]; Terms fieldTerms = reader.GetTerms(outerInstance.field); if (fieldTerms == null) { return(null); } // Reuse single TermsEnum below: TermsEnum te = fieldTerms.GetIterator(null); for (int i = 0; i < outerInstance.terms.Count; i++) { Term t = outerInstance.terms[i]; TermState state = states[i].Get(context.Ord); if (state == null) // term doesnt exist in this segment { Debug.Assert(TermNotInReader(reader, t), "no termstate found but term exists in reader"); return(null); } te.SeekExact(t.Bytes, state); DocsAndPositionsEnum postingsEnum = te.DocsAndPositions(liveDocs, null, DocsAndPositionsFlags.NONE); // PhraseQuery on a field that did not index // positions. if (postingsEnum == null) { Debug.Assert(te.SeekExact(t.Bytes), "termstate found but no term exists in reader"); // term does exist, but has no positions throw new InvalidOperationException("field \"" + t.Field + "\" was indexed without position data; cannot run PhraseQuery (term=" + t.Text() + ")"); } postingsFreqs[i] = new PostingsAndFreq(postingsEnum, te.DocFreq, (int)outerInstance.positions[i], t); } // sort by increasing docFreq order if (outerInstance.slop == 0) { ArrayUtil.TimSort(postingsFreqs); } if (outerInstance.slop == 0) // optimize exact case { ExactPhraseScorer s = new ExactPhraseScorer(this, postingsFreqs, similarity.GetSimScorer(stats, context)); if (s.noDocs) { return(null); } else { return(s); } } else { return(new SloppyPhraseScorer(this, postingsFreqs, outerInstance.slop, similarity.GetSimScorer(stats, context))); } }
public override DocIdSet GetDocIdSet(AtomicReaderContext context, Bits acceptDocs) { if (acceptDocs == null) { acceptDocs = new Bits_MatchAllBits(5); } BitArray bitset = new BitArray(5); if (acceptDocs.Get(1)) { bitset.Set(1, true); } if (acceptDocs.Get(3)) { bitset.Set(3, true); } return new DocIdBitSet(bitset); }
/// <summary> /// Returns the a <see cref="DocIdSetIterator"/> representing the Boolean composition /// of the filters that have been added. /// </summary> public override DocIdSet GetDocIdSet(AtomicReaderContext context, IBits acceptDocs) { FixedBitSet res = null; AtomicReader reader = context.AtomicReader; bool hasShouldClauses = false; foreach (FilterClause fc in clauses) { if (fc.Occur == Occur.SHOULD) { hasShouldClauses = true; DocIdSetIterator disi = GetDISI(fc.Filter, context); if (disi == null) { continue; } if (res == null) { res = new FixedBitSet(reader.MaxDoc); } res.Or(disi); } } if (hasShouldClauses && res == null) { return(null); } foreach (FilterClause fc in clauses) { if (fc.Occur == Occur.MUST_NOT) { if (res == null) { if (Debugging.AssertsEnabled) { Debugging.Assert(!hasShouldClauses); } res = new FixedBitSet(reader.MaxDoc); res.Set(0, reader.MaxDoc); // NOTE: may set bits on deleted docs } DocIdSetIterator disi = GetDISI(fc.Filter, context); if (disi != null) { res.AndNot(disi); } } } foreach (FilterClause fc in clauses) { if (fc.Occur == Occur.MUST) { DocIdSetIterator disi = GetDISI(fc.Filter, context); if (disi == null) { return(null); // no documents can match } if (res == null) { res = new FixedBitSet(reader.MaxDoc); res.Or(disi); } else { res.And(disi); } } } return(BitsFilteredDocIdSet.Wrap(res, acceptDocs)); }
public override DocIdSet GetDocIdSet(AtomicReaderContext context, Bits acceptDocs) { return new DocIdSetAnonymousInnerClassHelper2(this, context); }
public override Spans GetSpans(AtomicReaderContext context, Bits acceptDocs, IDictionary <Term, TermContext> termContexts) { return(new PositionCheckSpan(this, context, acceptDocs, termContexts)); }
public override DocIdSet GetDocIdSet(AtomicReaderContext context, Bits acceptDocs) { Assert.IsNull(acceptDocs, "acceptDocs should be null, as we have an index without deletions"); return new DocIdBitSet(Rnd); }
public PositionCheckSpan(SpanPositionCheckQuery outerInstance, AtomicReaderContext context, Bits acceptDocs, IDictionary <Term, TermContext> termContexts) { this.OuterInstance = outerInstance; Spans = outerInstance.match.GetSpans(context, acceptDocs, termContexts); }
public NearSpansOrdered(SpanNearQuery spanNearQuery, AtomicReaderContext context, Bits acceptDocs, IDictionary<Term, TermContext> termContexts) : this(spanNearQuery, context, acceptDocs, termContexts, true) { }
public NearSpansOrdered(SpanNearQuery spanNearQuery, AtomicReaderContext context, IBits acceptDocs, IDictionary <Term, TermContext> termContexts) : this(spanNearQuery, context, acceptDocs, termContexts, true) { }
public override Explanation Explain(AtomicReaderContext context, int doc) { return baseWeight.Explain(context, doc); }
public override FunctionValues GetValues(IDictionary context, AtomicReaderContext readerContext) { FunctionValues shapeValues = shapeValueSource.GetValues(context, readerContext); return(new DoubleDocValuesAnonymousClass(this, shapeValues)); }
public override BulkScorer BulkScorer(AtomicReaderContext context, bool scoreDocsInOrder, Bits acceptDocs) { // TODO: it could be better if we take acceptDocs // into account instead of baseScorer? Scorer baseScorer = baseWeight.Scorer(context, acceptDocs); DrillSidewaysScorer.DocsAndCost[] dims = new DrillSidewaysScorer.DocsAndCost[drillDowns.Length]; int nullCount = 0; for (int dim = 0; dim < dims.Length; dim++) { dims[dim] = new DrillSidewaysScorer.DocsAndCost(); dims[dim].sidewaysCollector = outerInstance.drillSidewaysCollectors[dim]; if (drillDowns[dim] is Filter) { // Pass null for acceptDocs because we already // passed it to baseScorer and baseScorer is // MUST'd here DocIdSet dis = ((Filter)drillDowns[dim]).GetDocIdSet(context, null); if (dis == null) { continue; } Bits bits = dis.GetBits(); if (bits != null) { // TODO: this logic is too naive: the // existence of bits() in DIS today means // either "I'm a cheap FixedBitSet so apply me down // low as you decode the postings" or "I'm so // horribly expensive so apply me after all // other Query/Filter clauses pass" // Filter supports random access; use that to // prevent .advance() on costly filters: dims[dim].bits = bits; // TODO: Filter needs to express its expected // cost somehow, before pulling the iterator; // we should use that here to set the order to // check the filters: } else { DocIdSetIterator disi = dis.GetIterator(); if (disi == null) { nullCount++; continue; } dims[dim].disi = disi; } } else { DocIdSetIterator disi = ((Weight)drillDowns[dim]).Scorer(context, null); if (disi == null) { nullCount++; continue; } dims[dim].disi = disi; } } // If more than one dim has no matches, then there // are no hits nor drill-sideways counts. Or, if we // have only one dim and that dim has no matches, // same thing. //if (nullCount > 1 || (nullCount == 1 && dims.length == 1)) { if (nullCount > 1) { return null; } // Sort drill-downs by most restrictive first: Array.Sort(dims); if (baseScorer == null) { return null; } return new DrillSidewaysScorer(context, baseScorer, outerInstance.drillDownCollector, dims, outerInstance.scoreSubDocsAtOnce); }
/// <summary> /// Returns a <see cref="CustomScoreProvider"/> that calculates the custom scores /// for the given <see cref="IndexReader"/>. The default implementation returns a default /// implementation as specified in the docs of <see cref="CustomScoreProvider"/>. /// @since 2.9.2 /// </summary> protected internal virtual CustomScoreProvider GetCustomScoreProvider(AtomicReaderContext context) // LUCENENET NOTE: Marked internal for documentation comments in CustomScoreProvider { return(new CustomScoreProvider(context)); }
/// <summary> /// Returns a <seealso cref="TermsEnum"/> positioned at this weights Term or null if /// the term does not exist in the given context /// </summary> internal TermsEnum GetTermsEnum(AtomicReaderContext context) { TermState state = TermStates.Get(context.Ord); if (state == null) // term is not present in that reader { Debug.Assert(TermNotInReader(context.AtomicReader, OuterInstance.Term_Renamed), "no termstate found but term exists in reader term=" + OuterInstance.Term_Renamed); return null; } //System.out.println("LD=" + reader.getLiveDocs() + " set?=" + (reader.getLiveDocs() != null ? reader.getLiveDocs().get(0) : "null")); TermsEnum termsEnum = context.AtomicReader.Terms(OuterInstance.Term_Renamed.Field()).Iterator(null); termsEnum.SeekExact(OuterInstance.Term_Renamed.Bytes(), state); return termsEnum; }
public override Explanation Explain(AtomicReaderContext context, int doc) { Explanation explain = DoExplain(context, doc); return(explain ?? new Explanation(0.0f, "no matching docs")); }
public override Scorer Scorer(AtomicReaderContext context, Bits acceptDocs) { Debug.Assert(TermStates.TopReaderContext == ReaderUtil.GetTopLevelContext(context), "The top-reader used to create Weight (" + TermStates.TopReaderContext + ") is not the same as the current reader's top-reader (" + ReaderUtil.GetTopLevelContext(context)); TermsEnum termsEnum = GetTermsEnum(context); if (termsEnum == null) { return null; } DocsEnum docs = termsEnum.Docs(acceptDocs, null); Debug.Assert(docs != null); return new TermScorer(this, docs, Similarity.DoSimScorer(Stats, context)); }
public override DocIdSet GetDocIdSet(AtomicReaderContext context, IBits acceptDocs) { return(new ContainsVisitor(this, context, acceptDocs).Visit(m_grid.WorldCell, acceptDocs)); }
public override DocIdSet GetDocIdSet(AtomicReaderContext context, Bits acceptDocs) { DocIdSet innerNullIteratorSet = new DocIdSetAnonymousInnerClassHelper2(this); return new FilteredDocIdSetAnonymousInnerClassHelper2(this, innerNullIteratorSet); }
public ContainsVisitor(ContainsPrefixTreeFilter outerInstance, AtomicReaderContext context, IBits acceptDocs) : base(outerInstance, context, acceptDocs) { }
public override Scorer Scorer(AtomicReaderContext context, Bits acceptDocs) { if (Stats == null) { return null; } else { return new SpanScorer(query.GetSpans(context, acceptDocs, TermContexts), this, Similarity.DoSimScorer(Stats, context)); } }
public DocIdSetAnonymousHelper(PredicateValueSourceFilter outerInstance, AtomicReaderContext context, Bits acceptDocs) { this.outerInstance = outerInstance; this.context = context; this.acceptDocs = acceptDocs; }