public override DocIdSet GetDocIdSet(AtomicReaderContext context, Bits acceptDocs) { // get a private context that is used to rewrite, createWeight and score eventually AtomicReaderContext privateContext = context.AtomicReader.AtomicContext; Weight weight = (new IndexSearcher(privateContext)).CreateNormalizedWeight(Query_Renamed); return new DocIdSetAnonymousInnerClassHelper(this, acceptDocs, privateContext, weight); }
public DocIdSetAnonymousInnerClassHelper(QueryWrapperFilter outerInstance, Bits acceptDocs, AtomicReaderContext privateContext, Lucene.Net.Search.Weight weight) { this.OuterInstance = outerInstance; this.AcceptDocs = acceptDocs; this.PrivateContext = privateContext; this.Weight = weight; }
public override BulkScorer BulkScorer(AtomicReaderContext context, bool scoreDocsInOrder, Bits acceptDocs) { // if the caller asks for in-order scoring or if the weight does not support // out-of order scoring then collection will have to happen in-order. BulkScorer inScorer = @in.BulkScorer(context, scoreDocsInOrder, acceptDocs); if (inScorer == null) { return null; } if (AssertingBulkScorer.ShouldWrap(inScorer)) { // The incoming scorer already has a specialized // implementation for BulkScorer, so we should use it: inScorer = AssertingBulkScorer.Wrap(new Random(Random.Next()), inScorer); } else if (Random.NextBoolean()) { // Let super wrap this.scorer instead, so we use // AssertingScorer: inScorer = base.BulkScorer(context, scoreDocsInOrder, acceptDocs); } if (scoreDocsInOrder == false && Random.NextBoolean()) { // The caller claims it can handle out-of-order // docs; let's confirm that by pulling docs and // randomly shuffling them before collection: inScorer = new AssertingBulkOutOfOrderScorer(new Random(Random.Next()), inScorer); } return inScorer; }
public override SimScorer DoSimScorer(SimWeight stats, AtomicReaderContext context) { SimScorer[] subScorers = new SimScorer[Sims.Length]; for (int i = 0; i < subScorers.Length; i++) { subScorers[i] = Sims[i].DoSimScorer(((MultiStats)stats).SubStats[i], context); } return new MultiSimScorer(subScorers); }
public override DocIdSet GetDocIdSet(AtomicReaderContext context, Bits acceptDocs) { FixedBitSet bits = new FixedBitSet(context.Reader.MaxDoc); bits.Set(Doc); if (acceptDocs != null && !acceptDocs.Get(Doc)) { bits.Clear(Doc); } return bits; }
public override OrdinalsSegmentReader GetReader(AtomicReaderContext context) { BinaryDocValues values0 = context.AtomicReader.GetBinaryDocValues(field); if (values0 == null) { values0 = DocValues.EMPTY_BINARY; } BinaryDocValues values = values0; return new OrdinalsSegmentReaderAnonymousInnerClassHelper(this, values); }
public override DocIdSet GetDocIdSet(AtomicReaderContext context, Bits acceptDocs) { return null; }
public override Explanation Explain(AtomicReaderContext context, int doc) { throw new System.NotSupportedException(); }
/// <summary> /// Creates a <seealso cref="DocIdSet"/> enumerating the documents that should be /// permitted in search results. <b>NOTE:</b> null can be /// returned if no documents are accepted by this Filter. /// <p> /// Note: this method will be called once per segment in /// the index during searching. The returned <seealso cref="DocIdSet"/> /// must refer to document IDs for that segment, not for /// the top-level reader. /// </summary> /// <param name="context"> a <seealso cref="AtomicReaderContext"/> instance opened on the index currently /// searched on. Note, it is likely that the provided reader info does not /// represent the whole underlying index i.e. if the index has more than /// one segment the given reader only represents a single segment. /// The provided context is always an atomic context, so you can call /// <seealso cref="AtomicReader#fields()"/> /// on the context's reader, for example. /// </param> /// <param name="acceptDocs"> /// Bits that represent the allowable docs to match (typically deleted docs /// but possibly filtering other documents) /// </param> /// <returns> a DocIdSet that provides the documents which should be permitted or /// prohibited in search results. <b>NOTE:</b> <code>null</code> should be returned if /// the filter doesn't accept any documents otherwise internal optimization might not apply /// in the case an <i>empty</i> <seealso cref="DocIdSet"/> is returned. </returns> public abstract DocIdSet GetDocIdSet(AtomicReaderContext context, Bits acceptDocs);
public override Scorer Scorer(AtomicReaderContext context, Bits acceptDocs) { // We can only run as a top scorer: throw new System.NotSupportedException(); }
public override BulkScorer BulkScorer(AtomicReaderContext context, bool scoreDocsInOrder, Bits acceptDocs) { // TODO: it could be better if we take acceptDocs // into account instead of baseScorer? Scorer baseScorer = baseWeight.Scorer(context, acceptDocs); DrillSidewaysScorer.DocsAndCost[] dims = new DrillSidewaysScorer.DocsAndCost[drillDowns.Length]; int nullCount = 0; for (int dim = 0; dim < dims.Length; dim++) { dims[dim] = new DrillSidewaysScorer.DocsAndCost(); dims[dim].sidewaysCollector = outerInstance.drillSidewaysCollectors[dim]; if (drillDowns[dim] is Filter) { // Pass null for acceptDocs because we already // passed it to baseScorer and baseScorer is // MUST'd here DocIdSet dis = ((Filter)drillDowns[dim]).GetDocIdSet(context, null); if (dis == null) { continue; } Bits bits = dis.GetBits(); if (bits != null) { // TODO: this logic is too naive: the // existence of bits() in DIS today means // either "I'm a cheap FixedBitSet so apply me down // low as you decode the postings" or "I'm so // horribly expensive so apply me after all // other Query/Filter clauses pass" // Filter supports random access; use that to // prevent .advance() on costly filters: dims[dim].bits = bits; // TODO: Filter needs to express its expected // cost somehow, before pulling the iterator; // we should use that here to set the order to // check the filters: } else { DocIdSetIterator disi = dis.GetIterator(); if (disi == null) { nullCount++; continue; } dims[dim].disi = disi; } } else { DocIdSetIterator disi = ((Weight)drillDowns[dim]).Scorer(context, null); if (disi == null) { nullCount++; continue; } dims[dim].disi = disi; } } // If more than one dim has no matches, then there // are no hits nor drill-sideways counts. Or, if we // have only one dim and that dim has no matches, // same thing. //if (nullCount > 1 || (nullCount == 1 && dims.length == 1)) { if (nullCount > 1) { return null; } // Sort drill-downs by most restrictive first: Array.Sort(dims); if (baseScorer == null) { return null; } return new DrillSidewaysScorer(context, baseScorer, outerInstance.drillDownCollector, dims, outerInstance.scoreSubDocsAtOnce); }
/// <summary> /// Creates a new <see cref="Similarity.SimScorer"/> to score matching documents from a segment of the inverted index. </summary> /// <param name="weight"> collection information from <see cref="ComputeWeight(float, CollectionStatistics, TermStatistics[])"/> </param> /// <param name="context"> segment of the inverted index to be scored. </param> /// <returns> Sloppy <see cref="SimScorer"/> for scoring documents across <c>context</c> </returns> /// <exception cref="System.IO.IOException"> if there is a low-level I/O error </exception> public abstract SimScorer GetSimScorer(SimWeight weight, AtomicReaderContext context);
public DocIdSetAnonymousInnerClassHelper2(FilterAnonymousInnerClassHelper4 outerInstance, AtomicReaderContext context) { this.OuterInstance = outerInstance; this.Context = context; }
public PositionCheckSpan(SpanPositionCheckQuery outerInstance, AtomicReaderContext context, Bits acceptDocs, IDictionary<Term, TermContext> termContexts) { this.OuterInstance = outerInstance; Spans = outerInstance.match.GetSpans(context, acceptDocs, termContexts); }
/// <summary> /// Set current atomic reader. /// </summary> public abstract OrdinalsSegmentReader GetReader(AtomicReaderContext context);
public override Spans GetSpans(AtomicReaderContext context, Bits acceptDocs, IDictionary<Term, TermContext> termContexts) { return new PositionCheckSpan(this, context, acceptDocs, termContexts); }
/// <summary> /// Returns a <seealso cref="TermsEnum"/> positioned at this weights Term or null if /// the term does not exist in the given context /// </summary> internal TermsEnum GetTermsEnum(AtomicReaderContext context) { TermState state = TermStates.Get(context.Ord); if (state == null) // term is not present in that reader { Debug.Assert(TermNotInReader(context.AtomicReader, OuterInstance._term), "no termstate found but term exists in reader term=" + OuterInstance._term); return null; } //System.out.println("LD=" + reader.getLiveDocs() + " set?=" + (reader.getLiveDocs() != null ? reader.getLiveDocs().get(0) : "null")); TermsEnum termsEnum = context.AtomicReader.Terms(OuterInstance._term.Field).Iterator(null); termsEnum.SeekExact(OuterInstance._term.Bytes, state); return termsEnum; }
public override Scorer Scorer(AtomicReaderContext context, Bits acceptDocs) { Debug.Assert(TermStates.TopReaderContext == ReaderUtil.GetTopLevelContext(context), "The top-reader used to create Weight (" + TermStates.TopReaderContext + ") is not the same as the current reader's top-reader (" + ReaderUtil.GetTopLevelContext(context)); TermsEnum termsEnum = GetTermsEnum(context); if (termsEnum == null) { return null; } DocsEnum docs = termsEnum.Docs(acceptDocs, null); Debug.Assert(docs != null); return new TermScorer(this, docs, Similarity.DoSimScorer(Stats, context)); }
public override void SetNextReader(AtomicReaderContext context) { @base = context.DocBase; }
public override DocIdSet GetDocIdSet(AtomicReaderContext context, Bits acceptDocs) { WasCalled_Renamed = true; return(new FixedBitSet(context.Reader.MaxDoc)); }
public override SimScorer DoSimScorer(SimWeight stats, AtomicReaderContext context) { if (stats is MultiSimilarity.MultiStats) { // a multi term query (e.g. phrase). return the summation, // scoring almost as if it were boolean query SimWeight[] subStats = ((MultiSimilarity.MultiStats)stats).SubStats; SimScorer[] subScorers = new SimScorer[subStats.Length]; for (int i = 0; i < subScorers.Length; i++) { BasicStats basicstats = (BasicStats)subStats[i]; subScorers[i] = new BasicSimScorer(this, basicstats, context.AtomicReader.GetNormValues(basicstats.field)); } return new MultiSimilarity.MultiSimScorer(subScorers); } else { BasicStats basicstats = (BasicStats)stats; return new BasicSimScorer(this, basicstats, context.AtomicReader.GetNormValues(basicstats.field)); } }
public override DocIdSet GetDocIdSet(AtomicReaderContext context, Bits acceptDocs) { return new DocIdSetAnonymousInnerClassHelper2(this, context); }
public void SetNextReader(AtomicReaderContext context) { }
/// <summary> /// Creates a new <seealso cref="Similarity.SimScorer"/> to score matching documents from a segment of the inverted index. </summary> /// <param name="weight"> collection information from <seealso cref="#computeWeight(float, CollectionStatistics, TermStatistics...)"/> </param> /// <param name="context"> segment of the inverted index to be scored. </param> /// <returns> SloppySimScorer for scoring documents across <code>context</code> </returns> /// <exception cref="IOException"> if there is a low-level I/O error </exception> public abstract SimScorer DoSimScorer(SimWeight weight, AtomicReaderContext context);
public override sealed SimScorer DoSimScorer(SimWeight weight, AtomicReaderContext context) { PerFieldSimWeight perFieldWeight = (PerFieldSimWeight)weight; return [email protected](perFieldWeight.DelegateWeight, context); }
public override Scorer Scorer(AtomicReaderContext context, Bits acceptDocs) { // if the caller asks for in-order scoring or if the weight does not support // out-of order scoring then collection will have to happen in-order. Scorer inScorer = @in.Scorer(context, acceptDocs); return AssertingScorer.Wrap(new Random(Random.Next()), inScorer); }
public override Explanation Explain(AtomicReaderContext context, int doc) { return baseWeight.Explain(context, doc); }
public override Scorer GetScorer(AtomicReaderContext context, IBits acceptDocs) { return(new PayloadTermSpanScorer(this, (TermSpans)m_query.GetSpans(context, acceptDocs, m_termContexts), this, m_similarity.GetSimScorer(m_stats, context))); }
public override FunctionValues GetValues(IDictionary ignored, AtomicReaderContext ignored2) { return new DoubleDocValuesAnonymousInnerClassHelper(this); }
public override DocIdSet GetDocIdSet(AtomicReaderContext context, Bits acceptDocs) { if (acceptDocs == null) { acceptDocs = new Bits_MatchAllBits(5); } BitArray bitset = new BitArray(5); if (acceptDocs.Get(1)) { bitset.SafeSet(1, true); } if (acceptDocs.Get(3)) { bitset.SafeSet(3, true); } return new DocIdBitSet(bitset); }
public override BulkScorer BulkScorer(AtomicReaderContext context, bool scoreDocsInOrder, Bits acceptDocs) { return new BulkScorerAnonymousInnerClassHelper(this); }
public override DocIdSet GetDocIdSet(AtomicReaderContext context, Bits acceptDocs) { Assert.IsNull(acceptDocs, "acceptDocs should be null, as we have an index without deletions"); BitArray bitset = new BitArray(5, true); return new DocIdBitSet(bitset); }
public override Scorer Scorer(AtomicReaderContext context, Bits acceptDocs) { throw new System.NotSupportedException(); }
public override DocIdSet GetDocIdSet(AtomicReaderContext context, Bits acceptDocs) { bool nullBitset = Random().Next(10) == 5; AtomicReader reader = context.AtomicReader; DocsEnum termDocsEnum = reader.TermDocsEnum(new Term("field", "0")); if (termDocsEnum == null) { return null; // no docs -- return null } BitArray bitSet = new BitArray(reader.MaxDoc); int d; while ((d = termDocsEnum.NextDoc()) != DocsEnum.NO_MORE_DOCS) { bitSet.SafeSet(d, true); } return new DocIdSetAnonymousInnerClassHelper(this, nullBitset, reader, bitSet); }
public override DocIdSet GetDocIdSet(AtomicReaderContext context, Bits acceptDocs) { DocIdSet innerNullIteratorSet = new DocIdSetAnonymousInnerClassHelper2(this); return new FilteredDocIdSetAnonymousInnerClassHelper2(this, innerNullIteratorSet); }
public virtual void SetNextReader(AtomicReaderContext context) { c.SetNextReader(context); }