public ConstantScorer(ConstantScoreQuery outerInstance, DocIdSetIterator docIdSetIterator, Weight w, float theScore) : base(w) { this.outerInstance = outerInstance; this.theScore = theScore; this.docIdSetIterator = docIdSetIterator; }
public ConstantWeight(ConstantScoreQuery outerInstance, IndexSearcher searcher) { this.outerInstance = outerInstance; this.innerWeight = (outerInstance.m_query == null) ? null : outerInstance.m_query.CreateWeight(searcher); }
public ConstantBulkScorer(ConstantScoreQuery outerInstance, BulkScorer bulkScorer, Weight weight, float theScore) { this.outerInstance = outerInstance; this.bulkScorer = bulkScorer; this.weight = weight; this.theScore = theScore; }
public override BulkScorer FilteredBulkScorer(AtomicReaderContext context, Weight weight, bool scoreDocsInOrder, DocIdSet docIdSet) // ignored (we always top-score in order) { IBits filterAcceptDocs = docIdSet.Bits; if (filterAcceptDocs == null) { // Filter does not provide random-access Bits; we // must fallback to leapfrog: return(LEAP_FROG_QUERY_FIRST_STRATEGY.FilteredBulkScorer(context, weight, scoreDocsInOrder, docIdSet)); } Scorer scorer = weight.GetScorer(context, null); return(scorer == null ? null : new QueryFirstBulkScorer(scorer, filterAcceptDocs)); }
/// <summary> /// Returns a <see cref="Weight"/> that applies the filter to the enclosed query's <see cref="Weight"/>. /// this is accomplished by overriding the <see cref="Scorer"/> returned by the <see cref="Weight"/>. /// </summary> public override Weight CreateWeight(IndexSearcher searcher) { Weight weight = query.CreateWeight(searcher); return(new WeightAnonymousInnerClassHelper(this, weight)); }
/// <summary> /// Returns a filtered <see cref="Scorer"/> based on this strategy. /// </summary> /// <param name="context"> /// the <see cref="AtomicReaderContext"/> for which to return the <see cref="Scorer"/>. </param> /// <param name="weight"> the <see cref="FilteredQuery"/> <see cref="Weight"/> to create the filtered scorer. </param> /// <param name="docIdSet"> the filter <see cref="DocIdSet"/> to apply </param> /// <returns> a filtered scorer /// </returns> /// <exception cref="System.IO.IOException"> if an <see cref="System.IO.IOException"/> occurs </exception> public abstract Scorer FilteredScorer(AtomicReaderContext context, Weight weight, DocIdSet docIdSet);
/// <summary> /// Returns a filtered <see cref="BulkScorer"/> based on this /// strategy. this is an optional method: the default /// implementation just calls <see cref="FilteredScorer(AtomicReaderContext, Weight, DocIdSet)"/> and /// wraps that into a <see cref="BulkScorer"/>. /// </summary> /// <param name="context"> /// the <seealso cref="AtomicReaderContext"/> for which to return the <seealso cref="Scorer"/>. </param> /// <param name="weight"> the <seealso cref="FilteredQuery"/> <seealso cref="Weight"/> to create the filtered scorer. </param> /// <param name="scoreDocsInOrder"> <c>true</c> to score docs in order </param> /// <param name="docIdSet"> the filter <seealso cref="DocIdSet"/> to apply </param> /// <returns> a filtered top scorer </returns> public virtual BulkScorer FilteredBulkScorer(AtomicReaderContext context, Weight weight, bool scoreDocsInOrder, DocIdSet docIdSet) { Scorer scorer = FilteredScorer(context, weight, docIdSet); if (scorer == null) { return(null); } // this impl always scores docs in order, so we can // ignore scoreDocsInOrder: return(new Weight.DefaultBulkScorer(scorer)); }
internal QueryFirstScorer(Weight weight, IBits filterBits, Scorer other) : base(weight) { this.scorer = other; this.filterBits = filterBits; }
internal PrimaryAdvancedLeapFrogScorer(Weight weight, int firstFilteredDoc, DocIdSetIterator filterIter, Scorer other) : base(weight, filterIter, other, other) { this.firstFilteredDoc = firstFilteredDoc; this.m_primaryDoc = firstFilteredDoc; // initialize to prevent and advance call to move it further }
public ConjunctionScorerAnonymousInnerClassHelper(BooleanScorer2 outerInstance, Weight weight, Scorer[] scorers, int requiredNrMatchers) : base(weight, scorers) { this.outerInstance = outerInstance; this.requiredNrMatchers = requiredNrMatchers; lastScoredDoc = -1; lastDocScore = float.NaN; }
public override TopDocs Rescore(IndexSearcher searcher, TopDocs firstPassTopDocs, int topN) { ScoreDoc[] hits = (ScoreDoc[])firstPassTopDocs.ScoreDocs.Clone(); Array.Sort(hits, new ComparerAnonymousInnerClassHelper(this)); IList <AtomicReaderContext> leaves = searcher.IndexReader.Leaves; Weight weight = searcher.CreateNormalizedWeight(query); // Now merge sort docIDs from hits, with reader's leaves: int hitUpto = 0; int readerUpto = -1; int endDoc = 0; int docBase = 0; Scorer scorer = null; while (hitUpto < hits.Length) { ScoreDoc hit = hits[hitUpto]; int docID = hit.Doc; AtomicReaderContext readerContext = null; while (docID >= endDoc) { readerUpto++; readerContext = leaves[readerUpto]; endDoc = readerContext.DocBase + readerContext.Reader.MaxDoc; } if (readerContext != null) { // We advanced to another segment: docBase = readerContext.DocBase; scorer = weight.GetScorer(readerContext, null); } int targetDoc = docID - docBase; int actualDoc = scorer.DocID; if (actualDoc < targetDoc) { actualDoc = scorer.Advance(targetDoc); } if (actualDoc == targetDoc) { // Query did match this doc: hit.Score = Combine(hit.Score, true, scorer.GetScore()); } else { // Query did not match this doc: Debug.Assert(actualDoc > targetDoc); hit.Score = Combine(hit.Score, false, 0.0f); } hitUpto++; } // TODO: we should do a partial sort (of only topN) // instead, but typically the number of hits is // smallish: Array.Sort(hits, new ComparerAnonymousInnerClassHelper2(this)); if (topN < hits.Length) { ScoreDoc[] subset = new ScoreDoc[topN]; Array.Copy(hits, 0, subset, 0, topN); hits = subset; } return(new TopDocs(firstPassTopDocs.TotalHits, hits, hits[0].Score)); }
public DisjunctionSumScorerAnonymousInnerClassHelper(BooleanScorer2 outerInstance, Weight weight, Scorer[] subScorers, float[] coord) : base(weight, subScorers, coord) { this.outerInstance = outerInstance; }
public MinShouldMatchSumScorerAnonymousInnerClassHelper(BooleanScorer2 outerInstance, Weight weight, IList <Scorer> scorers, int minNrShouldMatch) : base(weight, scorers, minNrShouldMatch) { this.outerInstance = outerInstance; }
internal ConjunctionScorer(Weight weight, Scorer[] scorers) : this(weight, scorers, 1f) { }
/// <summary> /// Construct a <see cref="DisjunctionScorer"/>, using one as the minimum number /// of matching <paramref name="subScorers"/>. /// </summary> public MinShouldMatchSumScorer(Weight weight, IList <Scorer> subScorers) : this(weight, subScorers, 1) { }
public override TopDocs Rescore(IndexSearcher searcher, TopDocs firstPassTopDocs, int topN) { ScoreDoc[] hits = (ScoreDoc[])firstPassTopDocs.ScoreDocs.Clone(); Array.Sort(hits, Comparer <ScoreDoc> .Create((a, b) => a.Doc - b.Doc)); IList <AtomicReaderContext> leaves = searcher.IndexReader.Leaves; Weight weight = searcher.CreateNormalizedWeight(query); // Now merge sort docIDs from hits, with reader's leaves: int hitUpto = 0; int readerUpto = -1; int endDoc = 0; int docBase = 0; Scorer scorer = null; while (hitUpto < hits.Length) { ScoreDoc hit = hits[hitUpto]; int docID = hit.Doc; AtomicReaderContext readerContext = null; while (docID >= endDoc) { readerUpto++; readerContext = leaves[readerUpto]; endDoc = readerContext.DocBase + readerContext.Reader.MaxDoc; } if (readerContext != null) { // We advanced to another segment: docBase = readerContext.DocBase; scorer = weight.GetScorer(readerContext, null); } int targetDoc = docID - docBase; int actualDoc = scorer.DocID; if (actualDoc < targetDoc) { actualDoc = scorer.Advance(targetDoc); } if (actualDoc == targetDoc) { // Query did match this doc: hit.Score = Combine(hit.Score, true, scorer.GetScore()); } else { // Query did not match this doc: if (Debugging.AssertsEnabled) { Debugging.Assert(actualDoc > targetDoc); } hit.Score = Combine(hit.Score, false, 0.0f); } hitUpto++; } // TODO: we should do a partial sort (of only topN) // instead, but typically the number of hits is // smallish: Array.Sort(hits, Comparer <ScoreDoc> .Create((a, b) => { // Sort by score descending, then docID ascending: // LUCENENET specific - compare bits rather than using equality operators to prevent these comparisons from failing in x86 in .NET Framework with optimizations enabled if (NumericUtils.SingleToSortableInt32(a.Score) > NumericUtils.SingleToSortableInt32(b.Score)) { return(-1); } else if (NumericUtils.SingleToSortableInt32(a.Score) < NumericUtils.SingleToSortableInt32(b.Score)) { return(1); } else { // this subtraction can't overflow int // because docIDs are >= 0: return(a.Doc - b.Doc); } })); if (topN < hits.Length) { ScoreDoc[] subset = new ScoreDoc[topN]; Array.Copy(hits, 0, subset, 0, topN); hits = subset; } return(new TopDocs(firstPassTopDocs.TotalHits, hits, hits[0].Score)); }