/// <summary> /// Rewrites this <see cref="SortField"/>, returning a new <see cref="SortField"/> if a change is made. /// Subclasses should override this define their rewriting behavior when this /// SortField is of type <see cref="SortFieldType.REWRITEABLE"/>. /// <para/> /// @lucene.experimental /// </summary> /// <param name="searcher"> <see cref="IndexSearcher"/> to use during rewriting </param> /// <returns> New rewritten <see cref="SortField"/>, or <c>this</c> if nothing has changed. </returns> /// <exception cref="System.IO.IOException"> Can be thrown by the rewriting </exception> public virtual SortField Rewrite(IndexSearcher searcher) { return(this); }
public override Weight CreateWeight(IndexSearcher searcher) { return(new BooleanWeight(this, searcher, disableCoord)); }
public override TopDocs Rescore(IndexSearcher searcher, TopDocs firstPassTopDocs, int topN) { ScoreDoc[] hits = (ScoreDoc[])firstPassTopDocs.ScoreDocs.Clone(); Array.Sort(hits, Comparer <ScoreDoc> .Create((a, b) => a.Doc - b.Doc)); IList <AtomicReaderContext> leaves = searcher.IndexReader.Leaves; Weight weight = searcher.CreateNormalizedWeight(query); // Now merge sort docIDs from hits, with reader's leaves: int hitUpto = 0; int readerUpto = -1; int endDoc = 0; int docBase = 0; Scorer scorer = null; while (hitUpto < hits.Length) { ScoreDoc hit = hits[hitUpto]; int docID = hit.Doc; AtomicReaderContext readerContext = null; while (docID >= endDoc) { readerUpto++; readerContext = leaves[readerUpto]; endDoc = readerContext.DocBase + readerContext.Reader.MaxDoc; } if (readerContext != null) { // We advanced to another segment: docBase = readerContext.DocBase; scorer = weight.GetScorer(readerContext, null); } int targetDoc = docID - docBase; int actualDoc = scorer.DocID; if (actualDoc < targetDoc) { actualDoc = scorer.Advance(targetDoc); } if (actualDoc == targetDoc) { // Query did match this doc: hit.Score = Combine(hit.Score, true, scorer.GetScore()); } else { // Query did not match this doc: if (Debugging.AssertsEnabled) { Debugging.Assert(actualDoc > targetDoc); } hit.Score = Combine(hit.Score, false, 0.0f); } hitUpto++; } // TODO: we should do a partial sort (of only topN) // instead, but typically the number of hits is // smallish: Array.Sort(hits, Comparer <ScoreDoc> .Create((a, b) => { // Sort by score descending, then docID ascending: if (a.Score > b.Score) { return(-1); } else if (a.Score < b.Score) { return(1); } else { // this subtraction can't overflow int // because docIDs are >= 0: return(a.Doc - b.Doc); } })); if (topN < hits.Length) { ScoreDoc[] subset = new ScoreDoc[topN]; Array.Copy(hits, 0, subset, 0, topN); hits = subset; } return(new TopDocs(firstPassTopDocs.TotalHits, hits, hits[0].Score)); }
/// <summary> /// Returns a <see cref="Weight"/> that applies the filter to the enclosed query's <see cref="Weight"/>. /// this is accomplished by overriding the <see cref="Scorer"/> returned by the <see cref="Weight"/>. /// </summary> public override Weight CreateWeight(IndexSearcher searcher) { Weight weight = query.CreateWeight(searcher); return(new WeightAnonymousInnerClassHelper(this, weight)); }
/// <summary> /// Sugar API, calling <see cref="QueryRescorer.Rescore(IndexSearcher, TopDocs, int)"/> using a simple linear /// combination of firstPassScore + <paramref name="weight"/> * secondPassScore /// </summary> public static TopDocs Rescore(IndexSearcher searcher, TopDocs topDocs, Query query, double weight, int topN) { return(new QueryRescorerAnonymousInnerClassHelper(query, weight).Rescore(searcher, topDocs, topN)); }