protected internal DisjunctionScorer(Weight weight, Scorer[] subScorers) : base(weight) { this.SubScorers = subScorers; this.NumScorers = subScorers.Length; Heapify(); }
private void SearchWithFilter(IndexReader reader, Weight weight, Filter filter, Collector collector) { DocIdSet docIdSet = filter.GetDocIdSet(reader); if (docIdSet == null) return; Scorer scorer = weight.Scorer(reader, true, false); if (scorer == null) return; scorer.DocID(); DocIdSetIterator docIdSetIterator = docIdSet.Iterator(); if (docIdSetIterator == null) return; int target = docIdSetIterator.NextDoc(); int num = scorer.Advance(target); collector.SetScorer(scorer); while (true) { while (num != target) { if (num > target) target = docIdSetIterator.Advance(num); else num = scorer.Advance(target); } if (num != DocIdSetIterator.NO_MORE_DOCS && !((GroupCollector)collector).GroupLimitReached) { collector.Collect(num); target = docIdSetIterator.NextDoc(); num = scorer.Advance(target); } else break; } }
internal ExactPhraseScorer(Weight weight, PhraseQuery.PostingsAndFreq[] postings, Similarity.SimScorer docScorer) : base(weight) { this.DocScorer = docScorer; ChunkStates = new ChunkState[postings.Length]; EndMinus1 = postings.Length - 1; // min(cost) Cost_Renamed = postings[0].Postings.Cost(); for (int i = 0; i < postings.Length; i++) { // Coarse optimization: advance(target) is fairly // costly, so, if the relative freq of the 2nd // rarest term is not that much (> 1/5th) rarer than // the first term, then we just use .nextDoc() when // ANDing. this buys ~15% gain for phrases where // freq of rarest 2 terms is close: bool useAdvance = postings[i].DocFreq > 5 * postings[0].DocFreq; ChunkStates[i] = new ChunkState(postings[i].Postings, -postings[i].Position, useAdvance); if (i > 0 && postings[i].Postings.NextDoc() == DocIdSetIterator.NO_MORE_DOCS) { NoDocs = true; return; } } }
internal Hits(Searcher s, Query q, Filter f) { weight = q.Weight(s); searcher = s; filter = f; GetMoreDocs(50); // retrieve 100 initially }
private float freq; //prhase frequency in current doc as computed by phraseFreq(). internal PhraseScorer(Weight weight, TermPositions[] tps, int[] offsets, Similarity similarity, byte[] norms):base(similarity) { this.norms = norms; this.weight = weight; this.value_Renamed = weight.Value; // convert tps to a list of phrase positions. // note: phrase-position differs from term-position in that its position // reflects the phrase offset: pp.pos = tp.pos - offset. // this allows to easily identify a matching (exact) phrase // when all PhrasePositions have exactly the same position. for (int i = 0; i < tps.Length; i++) { PhrasePositions pp = new PhrasePositions(tps[i], offsets[i]); if (last != null) { // add next to end of list last.next = pp; } else { first = pp; } last = pp; } pq = new PhraseQueue(tps.Length); // construct empty pq first.doc = - 1; }
internal MatchAllScorer(MatchAllDocsQuery enclosingInstance, IndexReader reader, Similarity similarity, Weight w, byte[] norms):base(similarity) { InitBlock(enclosingInstance); this.termDocs = reader.TermDocs(null); score = w.Value; this.norms = norms; }
internal MatchAllScorer(MatchAllDocsQuery outerInstance, IndexReader reader, Bits liveDocs, Weight w, float score) : base(w) { this.OuterInstance = outerInstance; this.LiveDocs = liveDocs; this.Score_Renamed = score; MaxDoc = reader.MaxDoc; }
internal MatchAllScorer(MatchAllDocsQuery enclosingInstance, IndexReader reader, Similarity similarity, Weight w):base(similarity) { InitBlock(enclosingInstance); this.reader = reader; id = - 1; maxId = reader.MaxDoc() - 1; score = w.GetValue(); }
public /*internal*/ bool debugCheckedForDeletions = false; // for test purposes. internal Hits(Searcher s, Query q, Filter f) { weight = q.Weight(s); searcher = s; filter = f; nDeletions = CountDeletions(s); GetMoreDocs(50); // retrieve 100 initially lengthAtStart = length; }
/// <summary> Construct a <code>TermScorer</code>. /// /// </summary> /// <param name="weight">The weight of the <code>Term</code> in the query. /// </param> /// <param name="td">An iterator over the documents matching the <code>Term</code>. /// </param> /// <param name="similarity">The <code>Similarity</code> implementation to be used for score /// computations. /// </param> /// <param name="norms">The field norms of the document fields for the <code>Term</code>. /// </param> public /*internal*/ TermScorer(Weight weight, TermDocs td, Similarity similarity, byte[] norms):base(similarity) { this.weight = weight; this.termDocs = td; this.norms = norms; this.weightValue = weight.GetValue(); for (int i = 0; i < SCORE_CACHE_SIZE; i++) scoreCache[i] = GetSimilarity().Tf(i) * weightValue; }
/// <summary> A search implementation which spans a new thread for each /// Searchable, waits for each search to complete and merge /// the results back together. /// </summary> public override TopDocs Search(Weight weight, Filter filter, int nDocs) { HitQueue hq = new HitQueue(nDocs, false); int totalHits = 0; MultiSearcherThread[] msta = new MultiSearcherThread[searchables.Length]; for (int i = 0; i < searchables.Length; i++) { // search each searchable // Assume not too many searchables and cost of creating a thread is by far inferior to a search msta[i] = new MultiSearcherThread(searchables[i], weight, filter, nDocs, hq, i, starts, "MultiSearcher thread #" + (i + 1)); msta[i].Start(); } for (int i = 0; i < searchables.Length; i++) { try { msta[i].Join(); } catch (System.Threading.ThreadInterruptedException ie) { // In 3.0 we will change this to throw // InterruptedException instead SupportClass.ThreadClass.Current().Interrupt(); throw new System.SystemException(ie.Message, ie); } System.IO.IOException ioe = msta[i].GetIOException(); if (ioe == null) { totalHits += msta[i].Hits(); } else { // if one search produced an IOException, rethrow it throw ioe; } } ScoreDoc[] scoreDocs = new ScoreDoc[hq.Size()]; for (int i = hq.Size() - 1; i >= 0; i--) // put docs in array scoreDocs[i] = (ScoreDoc) hq.Pop(); float maxScore = (totalHits == 0)?System.Single.NegativeInfinity:scoreDocs[0].score; return new TopDocs(totalHits, scoreDocs, maxScore); }
public override void Search(Weight weight, Filter filter, Collector collector) { if (filter == null) { for (int index = 0; index < this.subReaders.Length; ++index) { collector.SetNextReader(this.subReaders[index], this.docStarts[index]); Scorer scorer = weight.Scorer(this.subReaders[index], !collector.AcceptsDocsOutOfOrder, true); if (scorer != null) this.SearchWithScorer(this.subReaders[index], weight, scorer, collector); } } else { for (int index = 0; index < this.subReaders.Length; ++index) { collector.SetNextReader(this.subReaders[index], this.docStarts[index]); this.SearchWithFilter(this.subReaders[index], weight, filter, collector); } } }
internal PhraseScorer(Weight weight, TermPositions[] tps, int[] positions, Similarity similarity, byte[] norms) : base(similarity) { this.norms = norms; this.weight = weight; this.value_Renamed = weight.GetValue(); // convert tps to a list for (int i = 0; i < tps.Length; i++) { PhrasePositions pp = new PhrasePositions(tps[i], positions[i]); if (last != null) { // add next to end of list last.next = pp; } else first = pp; last = pp; } pq = new PhraseQueue(tps.Length); // construct empty pq }
public void CreateSpatialFilterAndWeight(PointRadiusCriterion geoFilter, Filter currentFilter, Weight currentWeight) { var spatialContext = SpatialContext.GEO; var geohashTree = new GeohashPrefixTree(spatialContext, 10); var strategy = new RecursivePrefixTreeStrategy(geohashTree, geoFilter.FieldName); var point = spatialContext.MakePoint(geoFilter.Longitude, geoFilter.Latitude); var spatialArgs = new SpatialArgs(SpatialOperation.Intersects, spatialContext.MakeCircle(point, DistanceUtils.Dist2Degrees(geoFilter.RadiusKm, DistanceUtils.EARTH_MEAN_RADIUS_KM))); var circle = spatialContext.MakeCircle(point, DistanceUtils.Dist2Degrees(geoFilter.RadiusKm, DistanceUtils.EARTH_MEAN_RADIUS_KM)); var circleCells = strategy.GetGrid().GetWorldNode().GetSubCells(circle); var luceneFilters = new List<Filter>(); if (currentFilter != null) luceneFilters.Add(currentFilter); var tempSpatial = strategy.MakeFilter(spatialArgs); luceneFilters.Add(tempSpatial); if (geoFilter.Sort != PointRadiusCriterion.SortOption.None) { var valueSource = strategy.MakeDistanceValueSource(point); var funcQ = new FunctionQuery(valueSource); // this is a bit odd... but boosting the score negatively orders results if (geoFilter.Sort == PointRadiusCriterion.SortOption.Ascending) { funcQ.Boost = -1; } spatialWeight = funcQ.CreateWeight(this); spatialWeight.GetSumOfSquaredWeights(); luceneFilters.Add(new QueryWrapperFilter(currentWeight.Query)); } spatialFilter = new ChainedFilter(luceneFilters.ToArray(), 1); }
/// <summary> A search implementation which executes each /// <see cref="Searchable"/> in its own thread and waits for each search to complete /// and merge the results back together. /// </summary> public override TopDocs Search(Weight weight, Filter filter, int nDocs) { HitQueue hq = new HitQueue(nDocs, false); object lockObj = new object(); TopDocs[] results = new TopDocs[searchables.Length]; //search each searchable Parallel.For(0, searchables.Length, (i) => results[i] = MultiSearcherCallableNoSort(ThreadLock.MonitorLock, lockObj, searchables[i], weight, filter, nDocs, hq, i, starts)); int totalHits = 0; float maxScore = float.NegativeInfinity; foreach (TopDocs topDocs in results) { totalHits += topDocs.TotalHits; maxScore = Math.Max(maxScore, topDocs.MaxScore); } ScoreDoc[] scoreDocs = new ScoreDoc[hq.Size()]; for (int i = hq.Size() - 1; i >= 0; i--) // put docs in array scoreDocs[i] = hq.Pop(); return new TopDocs(totalHits, scoreDocs, maxScore); }
public MinShouldMatchSumScorerAnonymousClass(BooleanScorer2 outerInstance, Weight weight, IList <Scorer> scorers, int minNrShouldMatch) : base(weight, scorers, minNrShouldMatch) { this.outerInstance = outerInstance; }
protected override void Search(IList <AtomicReaderContext> leaves, Weight weight, ICollector collector) { Assert.AreEqual(-1, collector.GetType().Name.IndexOf("OutOfOrder")); base.Search(leaves, weight, collector); }
public DocIdSetAnonymousClass(IBits acceptDocs, AtomicReaderContext privateContext, Weight weight) { this.acceptDocs = acceptDocs; this.privateContext = privateContext; this.weight = weight; }
public ConjunctionScorerAnonymousInnerClassHelper(BooleanScorer2 outerInstance, Weight weight, Scorer[] scorers, int requiredNrMatchers) : base(weight, scorers) { this.OuterInstance = outerInstance; this.RequiredNrMatchers = requiredNrMatchers; lastScoredDoc = -1; lastDocScore = float.NaN; }
internal ConjunctionScorer(Weight weight, Scorer[] scorers) : this(weight, scorers, 1f) { }
internal ExactPhraseScorer(Weight weight, TermPositions[] tps, int[] offsets, Similarity similarity, byte[] norms) : base(weight, tps, offsets, similarity, norms) { }
public ConstantScorer(ConstantScoreQuery enclosingInstance, Similarity similarity, IndexReader reader, Weight w) : base(similarity) { InitBlock(enclosingInstance); theScore = w.GetValue(); docIdSetIterator = Enclosing_Instance.filter.GetDocIdSet(reader).Iterator(); }
public override Explanation Explain(Weight weight, int doc) { return(weight.Explain(reader, doc)); }
public override Explanation Explain(Weight weight, int doc) { int i = SubSearcher(doc); // find searcher index return(searchables[i].Explain(weight, doc - starts[i])); // dispatch to searcher }
internal QueryFirstScorer(Weight weight, IBits filterBits, Scorer other) : base(weight) { this.scorer = other; this.filterBits = filterBits; }
public override TopFieldDocs Search(Weight weight, Filter filter, int n, Sort sort) { throw new System.NotSupportedException(); }
public override void Search(Weight weight, Filter filter, Collector results) { throw new System.NotSupportedException(); }
public override Explanation Explain(Weight weight, int doc) { throw new System.NotSupportedException(); }
internal JustCompileScorer(Weight weight) : base(weight) { }
public virtual TopFieldDocs Search(Weight weight, Filter filter, int n, Sort sort) { return local.Search(weight, filter, n, sort); }
/// <summary> /// Construct a <see cref="TermScorer"/>. /// </summary> /// <param name="weight"> /// The weight of the <see cref="Index.Term"/> in the query. </param> /// <param name="td"> /// An iterator over the documents matching the <see cref="Index.Term"/>. </param> /// <param name="docScorer"> /// The <see cref="Similarity.SimScorer"/> implementation /// to be used for score computations. </param> internal TermScorer(Weight weight, DocsEnum td, Similarity.SimScorer docScorer) : base(weight) { this.docScorer = docScorer; this.docsEnum = td; }
abstract public TopDocs Search(Weight weight, Filter filter, int n);
public override TopFieldDocs Search(Weight weight, Filter filter, int nDocs, Sort sort) { return(Search(weight, filter, nDocs, sort, true)); }
abstract public TopFieldDocs Search(Weight weight, Filter filter, int n, Sort sort);
internal SloppyPhraseScorer(Weight weight, TermPositions[] tps, int[] offsets, Similarity similarity, int slop, byte[] norms) : base(weight, tps, offsets, similarity, norms) { this.slop = slop; }
public override void Search(Weight weight, Filter filter, Collector results) { throw new System.NotSupportedException(Lucene.Net.Search.JustCompileSearch.UNSUPPORTED_MSG); }
/// <summary> /// Constructs a Scorer </summary> /// <param name="weight"> The scorers <code>Weight</code>. </param> protected Scorer(Weight weight) { this.m_weight = weight; }
public override Explanation Explain(Weight weight, int doc) { int n = ReaderUtil.SubIndex(doc, docStarts); int deBasedDoc = doc - docStarts[n]; return weight.Explain(subReaders[n], deBasedDoc); }
/// <summary> /// Creates a new instance of <see cref="DisjunctionMaxScorer"/> /// </summary> /// <param name="weight"> /// The <see cref="Weight"/> to be used. </param> /// <param name="tieBreakerMultiplier"> /// Multiplier applied to non-maximum-scoring subqueries for a /// document as they are summed into the result. </param> /// <param name="subScorers"> /// The sub scorers this <see cref="Scorer"/> should iterate on </param> public DisjunctionMaxScorer(Weight weight, float tieBreakerMultiplier, Scorer[] subScorers) : base(weight, subScorers) { this.tieBreakerMultiplier = tieBreakerMultiplier; }
public override TopFieldDocs Search(Weight weight, Filter filter, int nDocs, Sort sort) { return Search(weight, filter, nDocs, sort, true); }
public DisjunctionSumScorerAnonymousInnerClassHelper(BooleanScorer2 outerInstance, Weight weight, Scorer[] subScorers, float[] coord) : base(weight, subScorers, coord) { this.OuterInstance = outerInstance; }
public override void Search(Weight weight, Filter filter, Collector collector) { if (filter == null) { for (int i = 0; i < subReaders.Length; i++) { // search each subreader collector.SetNextReader(subReaders[i], docStarts[i]); Scorer scorer = weight.Scorer(subReaders[i], !collector.AcceptsDocsOutOfOrder(), true); if (scorer != null) { scorer.Score(collector); } } } else { for (int i = 0; i < subReaders.Length; i++) { // search each subreader collector.SetNextReader(subReaders[i], docStarts[i]); SearchWithFilter(subReaders[i], weight, filter, collector); } } }
internal PrimaryAdvancedLeapFrogScorer(Weight weight, int firstFilteredDoc, DocIdSetIterator filterIter, Scorer other) : base(weight, filterIter, other, other) { this.firstFilteredDoc = firstFilteredDoc; this.m_primaryDoc = firstFilteredDoc; // initialize to prevent and advance call to move it further }
public virtual void TestBS2DisjunctionNextVsAdvance() { Directory d = NewDirectory(); RandomIndexWriter w = new RandomIndexWriter(Random(), d, Similarity, TimeZone); int numDocs = AtLeast(300); for (int docUpto = 0; docUpto < numDocs; docUpto++) { string contents = "a"; if (Random().Next(20) <= 16) { contents += " b"; } if (Random().Next(20) <= 8) { contents += " c"; } if (Random().Next(20) <= 4) { contents += " d"; } if (Random().Next(20) <= 2) { contents += " e"; } if (Random().Next(20) <= 1) { contents += " f"; } Document doc = new Document(); doc.Add(new TextField("field", contents, Field.Store.NO)); w.AddDocument(doc); } w.ForceMerge(1); IndexReader r = w.Reader; IndexSearcher s = NewSearcher(r); w.Dispose(); for (int iter = 0; iter < 10 * RANDOM_MULTIPLIER; iter++) { if (VERBOSE) { Console.WriteLine("iter=" + iter); } IList <string> terms = new List <string>(Arrays.AsList("a", "b", "c", "d", "e", "f")); int numTerms = TestUtil.NextInt(Random(), 1, terms.Count); while (terms.Count > numTerms) { terms.RemoveAt(Random().Next(terms.Count)); } if (VERBOSE) { Console.WriteLine(" terms=" + terms); } BooleanQuery q = new BooleanQuery(); foreach (string term in terms) { q.Add(new BooleanClause(new TermQuery(new Term("field", term)), Occur.SHOULD)); } Weight weight = s.CreateNormalizedWeight(q); Scorer scorer = weight.GetScorer(s.m_leafContexts[0], null); // First pass: just use .NextDoc() to gather all hits IList <ScoreDoc> hits = new List <ScoreDoc>(); while (scorer.NextDoc() != DocIdSetIterator.NO_MORE_DOCS) { hits.Add(new ScoreDoc(scorer.DocID, scorer.GetScore())); } if (VERBOSE) { Console.WriteLine(" " + hits.Count + " hits"); } // Now, randomly next/advance through the list and // verify exact match: for (int iter2 = 0; iter2 < 10; iter2++) { weight = s.CreateNormalizedWeight(q); scorer = weight.GetScorer(s.m_leafContexts[0], null); if (VERBOSE) { Console.WriteLine(" iter2=" + iter2); } int upto = -1; while (upto < hits.Count) { int nextUpto; int nextDoc; int left = hits.Count - upto; if (left == 1 || Random().nextBoolean()) { // next nextUpto = 1 + upto; nextDoc = scorer.NextDoc(); } else { // advance int inc = TestUtil.NextInt(Random(), 1, left - 1); nextUpto = inc + upto; nextDoc = scorer.Advance(hits[nextUpto].Doc); } if (nextUpto == hits.Count) { Assert.AreEqual(DocIdSetIterator.NO_MORE_DOCS, nextDoc); } else { ScoreDoc hit = hits[nextUpto]; Assert.AreEqual(hit.Doc, nextDoc); // Test for precise float equality: Assert.IsTrue(hit.Score == scorer.GetScore(), "doc " + hit.Doc + " has wrong score: expected=" + hit.Score + " actual=" + scorer.GetScore()); } upto = nextUpto; } } } r.Dispose(); d.Dispose(); }
public virtual TopDocs Search(Weight weight, Filter filter, int n) { return local.Search(weight, filter, n); }
/// <summary> /// Returns a filtered <see cref="Scorer"/> based on this strategy. /// </summary> /// <param name="context"> /// the <see cref="AtomicReaderContext"/> for which to return the <see cref="Scorer"/>. </param> /// <param name="weight"> the <see cref="FilteredQuery"/> <see cref="Weight"/> to create the filtered scorer. </param> /// <param name="docIdSet"> the filter <see cref="DocIdSet"/> to apply </param> /// <returns> a filtered scorer /// </returns> /// <exception cref="IOException"> if an <see cref="IOException"/> occurs </exception> public abstract Scorer FilteredScorer(AtomicReaderContext context, Weight weight, DocIdSet docIdSet);
/* The following abstract methods were added as a workaround for GCJ bug #15411. * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=15411 */ abstract public void Search(Weight weight, Filter filter, HitCollector results);
/// <summary> /// Returns a filtered <see cref="BulkScorer"/> based on this /// strategy. this is an optional method: the default /// implementation just calls <see cref="FilteredScorer(AtomicReaderContext, Weight, DocIdSet)"/> and /// wraps that into a <see cref="BulkScorer"/>. /// </summary> /// <param name="context"> /// the <seealso cref="AtomicReaderContext"/> for which to return the <seealso cref="Scorer"/>. </param> /// <param name="weight"> the <seealso cref="FilteredQuery"/> <seealso cref="Weight"/> to create the filtered scorer. </param> /// <param name="scoreDocsInOrder"> <c>true</c> to score docs in order </param> /// <param name="docIdSet"> the filter <seealso cref="DocIdSet"/> to apply </param> /// <returns> a filtered top scorer </returns> public virtual BulkScorer FilteredBulkScorer(AtomicReaderContext context, Weight weight, bool scoreDocsInOrder, DocIdSet docIdSet) { Scorer scorer = FilteredScorer(context, weight, docIdSet); if (scorer is null) { return(null); } // this impl always scores docs in order, so we can // ignore scoreDocsInOrder: return(new Weight.DefaultBulkScorer(scorer)); }
abstract public Explanation Explain(Weight weight, int doc);
public override BulkScorer FilteredBulkScorer(AtomicReaderContext context, Weight weight, bool scoreDocsInOrder, DocIdSet docIdSet) // ignored (we always top-score in order) { IBits filterAcceptDocs = docIdSet.Bits; if (filterAcceptDocs is null) { // Filter does not provide random-access Bits; we // must fallback to leapfrog: return(LEAP_FROG_QUERY_FIRST_STRATEGY.FilteredBulkScorer(context, weight, scoreDocsInOrder, docIdSet)); } Scorer scorer = weight.GetScorer(context, null); return(scorer is null ? null : new QueryFirstBulkScorer(scorer, filterAcceptDocs)); }
public override Explanation Explain(Weight weight, int doc) { throw new System.NotSupportedException(Lucene.Net.Search.JustCompileSearch.UNSUPPORTED_MSG); }
/// <summary> /// Returns a <see cref="Weight"/> that applies the filter to the enclosed query's <see cref="Weight"/>. /// this is accomplished by overriding the <see cref="Scorer"/> returned by the <see cref="Weight"/>. /// </summary> public override Weight CreateWeight(IndexSearcher searcher) { Weight weight = query.CreateWeight(searcher); return(new WeightAnonymousClass(this, weight)); }
public override TopFieldDocs Search(Weight weight, Filter filter, int n, Sort sort) { throw new System.NotSupportedException(Lucene.Net.Search.JustCompileSearch.UNSUPPORTED_MSG); }
public WeightAnonymousClass(FilteredQuery outerInstance, Weight weight) { this.outerInstance = outerInstance; this.weight = weight; }
// inherit javadoc public override TopDocs Search(Weight weight, Filter filter, int nDocs) { if (nDocs <= 0) { throw new System.ArgumentException("nDocs must be > 0"); } nDocs = Math.Min(nDocs, reader.MaxDoc()); TopScoreDocCollector collector = TopScoreDocCollector.create(nDocs, !weight.ScoresDocsOutOfOrder()); Search(weight, filter, collector); return collector.TopDocs(); }
public virtual Explanation Explain(IndexReader reader, int doc) { int minShouldMatch = Enclosing_Instance.GetMinimumNumberShouldMatch(); ComplexExplanation sumExpl = new ComplexExplanation(); sumExpl.SetDescription("sum of:"); int coord = 0; int maxCoord = 0; float sum = 0.0f; bool fail = false; int shouldMatchCount = 0; for (int i = 0; i < weights.Count; i++) { BooleanClause c = (BooleanClause)Enclosing_Instance.clauses[i]; Weight w = (Weight)weights[i]; Explanation e = w.Explain(reader, doc); if (!c.IsProhibited()) { maxCoord++; } if (e.IsMatch()) { if (!c.IsProhibited()) { sumExpl.AddDetail(e); sum += e.GetValue(); coord++; } else { Explanation r = new Explanation(0.0f, "match on prohibited clause (" + c.GetQuery().ToString() + ")"); r.AddDetail(e); sumExpl.AddDetail(r); fail = true; } if (c.GetOccur().Equals(Occur.SHOULD)) { shouldMatchCount++; } } else if (c.IsRequired()) { Explanation r = new Explanation(0.0f, "no match on required clause (" + c.GetQuery().ToString() + ")"); r.AddDetail(e); sumExpl.AddDetail(r); fail = true; } } if (fail) { System.Boolean tempAux = false; sumExpl.SetMatch(tempAux); sumExpl.SetValue(0.0f); sumExpl.SetDescription("Failure to meet condition(s) of required/prohibited clause(s)"); return(sumExpl); } else if (shouldMatchCount < minShouldMatch) { System.Boolean tempAux2 = false; sumExpl.SetMatch(tempAux2); sumExpl.SetValue(0.0f); sumExpl.SetDescription("Failure to match minimum number " + "of optional clauses: " + minShouldMatch); return(sumExpl); } sumExpl.SetMatch(0 < coord ? true : false); sumExpl.SetValue(sum); float coordFactor = similarity.Coord(coord, maxCoord); if (coordFactor == 1.0f) { // coord is no-op return(sumExpl); } // eliminate wrapper else { ComplexExplanation result = new ComplexExplanation(sumExpl.IsMatch(), sum * coordFactor, "product of:"); result.AddDetail(sumExpl); result.AddDetail(new Explanation(coordFactor, "coord(" + coord + "/" + maxCoord + ")")); return(result); } }
/// <summary> Just like <see cref="Search(Weight, Filter, int, Sort)" />, but you choose /// whether or not the fields in the returned <see cref="FieldDoc" /> instances /// should be set by specifying fillFields.<br/> /// /// <p/> /// NOTE: this does not compute scores by default. If you need scores, create /// a <see cref="TopFieldCollector" /> instance by calling /// <see cref="TopFieldCollector.create" /> and then pass that to /// <see cref="Search(Weight, Filter, Collector)" />. /// <p/> /// </summary> public virtual TopFieldDocs Search(Weight weight, Filter filter, int nDocs, Sort sort, bool fillFields) { nDocs = Math.Min(nDocs, reader.MaxDoc()); SortField[] fields = sort.fields; bool legacy = false; for (int i = 0; i < fields.Length; i++) { SortField field = fields[i]; System.String fieldname = field.GetField(); int type = field.GetType(); // Resolve AUTO into its true type if (type == SortField.AUTO) { int autotype = SortField.DetectFieldType(reader, fieldname); if (autotype == SortField.STRING) { fields[i] = new SortField(fieldname, field.GetLocale(), field.GetReverse()); } else { fields[i] = new SortField(fieldname, autotype, field.GetReverse()); } } if (field.GetUseLegacySearch()) { legacy = true; } } if (legacy) { // Search the single top-level reader TopDocCollector collector = new TopFieldDocCollector(reader, sort, nDocs); HitCollectorWrapper hcw = new HitCollectorWrapper(collector); hcw.SetNextReader(reader, 0); if (filter == null) { Scorer scorer = weight.Scorer(reader, true, true); if (scorer != null) { scorer.Score(hcw); } } else { SearchWithFilter(reader, weight, filter, hcw); } return (TopFieldDocs) collector.TopDocs(); } TopFieldCollector collector2 = TopFieldCollector.create(sort, nDocs, fillFields, fieldSortDoTrackScores, fieldSortDoMaxScore, !weight.ScoresDocsOutOfOrder()); Search(weight, filter, collector2); return (TopFieldDocs) collector2.TopDocs(); }
internal MatchAllScorer(MatchAllDocsQuery outerInstance, IndexReader reader, IBits liveDocs, Weight w, float score) : base(w) { this.outerInstance = outerInstance; this.liveDocs = liveDocs; this.score = score; maxDoc = reader.MaxDoc; }
private void SearchWithFilter(IndexReader reader, Weight weight, Filter filter, Collector collector) { System.Diagnostics.Debug.Assert(filter != null); Scorer scorer = weight.Scorer(reader, true, false); if (scorer == null) { return ; } int docID = scorer.DocID(); System.Diagnostics.Debug.Assert(docID == - 1 || docID == DocIdSetIterator.NO_MORE_DOCS); // CHECKME: use ConjunctionScorer here? DocIdSet filterDocIdSet = filter.GetDocIdSet(reader); if (filterDocIdSet == null) { // this means the filter does not accept any documents. return ; } DocIdSetIterator filterIter = filterDocIdSet.Iterator(); if (filterIter == null) { // this means the filter does not accept any documents. return ; } int filterDoc = filterIter.NextDoc(); int scorerDoc = scorer.Advance(filterDoc); collector.SetScorer(scorer); while (true) { if (scorerDoc == filterDoc) { // Check if scorer has exhausted, only before collecting. if (scorerDoc == DocIdSetIterator.NO_MORE_DOCS) { break; } collector.Collect(scorerDoc); filterDoc = filterIter.NextDoc(); scorerDoc = scorer.Advance(filterDoc); } else if (scorerDoc > filterDoc) { filterDoc = filterIter.Advance(scorerDoc); } else { scorerDoc = scorer.Advance(filterDoc); } } }
public SimpleScorer(Weight weight) : base(weight) { }