public override Explanation Explain(Weight weight, int doc) { int n = ReaderUtil.SubIndex(doc, docStarts); int deBasedDoc = doc - docStarts[n]; return(weight.Explain(subReaders[n], deBasedDoc)); }
/// <summary> /// Expert: low-level implementation method /// Returns an <see cref="Explanation"/> that describes how <paramref name="doc"/> scored against /// <paramref name="weight"/>. /// /// <para/>This is intended to be used in developing <see cref="Similarities.Similarity"/> implementations, /// and, for good performance, should not be displayed with every hit. /// Computing an explanation is as expensive as executing the query over the /// entire index. /// <para/>Applications should call <see cref="IndexSearcher.Explain(Query, int)"/>. </summary> /// <exception cref="BooleanQuery.TooManyClausesException"> If a query would exceed /// <see cref="BooleanQuery.MaxClauseCount"/> clauses. </exception> protected virtual Explanation Explain(Weight weight, int doc) { int n = ReaderUtil.SubIndex(doc, m_leafContexts); AtomicReaderContext ctx = m_leafContexts[n]; int deBasedDoc = doc - ctx.DocBase; return(weight.Explain(ctx, deBasedDoc)); }
public override bool SkipTo(int target) { if (leafOrd >= numLeaves) { return(false); } int subIndex = ReaderUtil.SubIndex(target, leaves); if (Debugging.AssertsEnabled) { Debugging.Assert(subIndex >= leafOrd); } if (subIndex != leafOrd) { AtomicReaderContext ctx = leaves[subIndex]; current = query.GetSpans(ctx, ((AtomicReader)ctx.Reader).LiveDocs, termContexts); leafOrd = subIndex; } else if (current == null) { AtomicReaderContext ctx = leaves[leafOrd]; current = query.GetSpans(ctx, ((AtomicReader)ctx.Reader).LiveDocs, termContexts); } while (true) { if (target < leaves[leafOrd].DocBase) { // target was in the previous slice if (current.Next()) { return(true); } } else if (current.SkipTo(target - leaves[leafOrd].DocBase)) { return(true); } if (++leafOrd < numLeaves) { AtomicReaderContext ctx = leaves[leafOrd]; current = query.GetSpans(ctx, ((AtomicReader)ctx.Reader).LiveDocs, termContexts); } else { current = null; break; } } return(false); }
public override bool SkipTo(int target) { if (LeafOrd >= NumLeaves) { return(false); } int subIndex = ReaderUtil.SubIndex(target, Leaves); Debug.Assert(subIndex >= LeafOrd); if (subIndex != LeafOrd) { AtomicReaderContext ctx = Leaves[subIndex]; Current = Query.GetSpans(ctx, ((AtomicReader)ctx.Reader).LiveDocs, TermContexts); LeafOrd = subIndex; } else if (Current == null) { AtomicReaderContext ctx = Leaves[LeafOrd]; Current = Query.GetSpans(ctx, ((AtomicReader)ctx.Reader).LiveDocs, TermContexts); } while (true) { if (target < Leaves[LeafOrd].DocBase) { // target was in the previous slice if (Current.Next()) { return(true); } } else if (Current.SkipTo(target - Leaves[LeafOrd].DocBase)) { return(true); } if (++LeafOrd < NumLeaves) { AtomicReaderContext ctx = Leaves[LeafOrd]; Current = Query.GetSpans(ctx, ((AtomicReader)ctx.Reader).LiveDocs, TermContexts); } else { Current = null; break; } } return(false); }
public override Explanation Explain(IndexSearcher searcher, Explanation firstPassExplanation, int docID) { Explanation result = base.Explain(searcher, firstPassExplanation, docID); IList <AtomicReaderContext> leaves = searcher.IndexReader.Leaves; int subReader = ReaderUtil.SubIndex(docID, leaves); AtomicReaderContext readerContext = leaves[subReader]; int docIDInSegment = docID - readerContext.DocBase; var context = new Dictionary <string, object>(); var fakeScorer = new FakeScorer { score = firstPassExplanation.Value, doc = docIDInSegment }; context["scorer"] = fakeScorer; foreach (string variable in expression.variables) { result.AddDetail(new Explanation((float)bindings.GetValueSource(variable).GetValues (context, readerContext).DoubleVal(docIDInSegment), "variable \"" + variable + "\"" )); } return(result); }
public virtual void TestSpanScorerZeroSloppyFreq() { bool ordered = true; int slop = 1; IndexReaderContext topReaderContext = searcher.TopReaderContext; IList <AtomicReaderContext> leaves = topReaderContext.Leaves; int subIndex = ReaderUtil.SubIndex(11, leaves); for (int i = 0, c = leaves.Count; i < c; i++) { AtomicReaderContext ctx = leaves[i]; Similarity sim = new DefaultSimilarityAnonymousInnerClassHelper(this); Similarity oldSim = searcher.Similarity; Scorer spanScorer; try { searcher.Similarity = sim; SpanNearQuery snq = new SpanNearQuery(new SpanQuery[] { MakeSpanTermQuery("t1"), MakeSpanTermQuery("t2") }, slop, ordered); spanScorer = searcher.CreateNormalizedWeight(snq).GetScorer(ctx, ((AtomicReader)ctx.Reader).LiveDocs); } finally { searcher.Similarity = oldSim; } if (i == subIndex) { Assert.IsTrue(spanScorer.NextDoc() != DocIdSetIterator.NO_MORE_DOCS, "first doc"); Assert.AreEqual(spanScorer.DocID + ctx.DocBase, 11, "first doc number"); float score = spanScorer.GetScore(); Assert.IsTrue(score == 0.0f, "first doc score should be zero, " + score); } else { Assert.IsTrue(spanScorer.NextDoc() == DocIdSetIterator.NO_MORE_DOCS, "no second doc"); } } }
/// <summary> /// Returns the weight for the current <paramref name="docId"/> as computed /// by the <see cref="weightsValueSource"/> /// </summary> protected internal override long GetWeight(Document doc, int docId) { if (currentWeightValues == null) { return(0); } int subIndex = ReaderUtil.SubIndex(docId, starts); if (subIndex != currentLeafIndex) { currentLeafIndex = subIndex; try { currentWeightValues = outerInstance.weightsValueSource.GetValues(new Dictionary <string, object>(), leaves[currentLeafIndex]); } catch (IOException e) { throw new Exception(e.ToString(), e); } } return(currentWeightValues.Int64Val(docId - starts[subIndex])); }
public static BytesRef GetBinaryValue(this IndexReader?reader, string field, int docId, BytesRef?result = null) { if (result != null) { Array.Clear(result.Bytes, 0, result.Bytes.Length); } else { result = new BytesRef(); } if (reader == null || docId < 0) { return(result); } var leaves = reader.Leaves; if (leaves.Count == 1) { var docValues = leaves[0].AtomicReader.GetBinaryDocValues(field); docValues.Get(docId, result); } else if (leaves.Count > 1) { var subIndex = ReaderUtil.SubIndex(docId, leaves); var subLeave = leaves[subIndex]; var subValues = subLeave.AtomicReader.GetBinaryDocValues(field); subValues.Get(docId - subLeave.DocBase, result); } return(result); }
private IDictionary <int, object> HighlightField(string field, string[] contents, BreakIterator bi, BytesRef[] terms, int[] docids, IList <AtomicReaderContext> leaves, int maxPassages, Query query) { IDictionary <int, object> highlights = new Dictionary <int, object>(); PassageFormatter fieldFormatter = GetFormatter(field); if (fieldFormatter is null) { // LUCENENET: Changed from NullPointerException to InvalidOperationException (which isn't caught anywhere outside of tests) throw IllegalStateException.Create("PassageFormatter cannot be null"); } // check if we should do any multiterm processing Analyzer analyzer = GetIndexAnalyzer(field); CharacterRunAutomaton[] automata = Arrays.Empty <CharacterRunAutomaton>(); if (analyzer != null) { automata = MultiTermHighlighting.ExtractAutomata(query, field); } // resize 'terms', where the last term is the multiterm matcher if (automata.Length > 0) { BytesRef[] newTerms = new BytesRef[terms.Length + 1]; System.Array.Copy(terms, 0, newTerms, 0, terms.Length); terms = newTerms; } // we are processing in increasing docid order, so we only need to reinitialize stuff on segment changes // otherwise, we will just advance() existing enums to the new document in the same segment. DocsAndPositionsEnum[] postings = null; TermsEnum termsEnum = null; int lastLeaf = -1; for (int i = 0; i < docids.Length; i++) { string content = contents[i]; if (content.Length == 0) { continue; // nothing to do } bi.SetText(content); int doc = docids[i]; int leaf = ReaderUtil.SubIndex(doc, leaves); AtomicReaderContext subContext = leaves[leaf]; AtomicReader r = subContext.AtomicReader; if (Debugging.AssertsEnabled) { Debugging.Assert(leaf >= lastLeaf); // increasing order } // if the segment has changed, we must initialize new enums. if (leaf != lastLeaf) { Terms t = r.GetTerms(field); if (t != null) { termsEnum = t.GetEnumerator(); postings = new DocsAndPositionsEnum[terms.Length]; } } if (termsEnum is null) { continue; // no terms for this field, nothing to do } // if there are multi-term matches, we have to initialize the "fake" enum for each document if (automata.Length > 0) { DocsAndPositionsEnum dp = MultiTermHighlighting.GetDocsEnum(analyzer.GetTokenStream(field, content), automata); dp.Advance(doc - subContext.DocBase); postings[terms.Length - 1] = dp; // last term is the multiterm matcher } Passage[] passages = HighlightDoc(field, terms, content.Length, bi, doc - subContext.DocBase, termsEnum, postings, maxPassages); if (passages.Length == 0) { // no passages were returned, so ask for a default summary passages = GetEmptyHighlight(field, bi, maxPassages); } if (passages.Length > 0) { highlights[doc] = fieldFormatter.Format(passages, content); } lastLeaf = leaf; } return(highlights); }
/// <summary> /// Create the results based on the search hits. /// Can be overridden by subclass to add particular behavior (e.g. weight transformation) </summary> /// <exception cref="System.IO.IOException"> If there are problems reading fields from the underlying Lucene index. </exception> protected internal virtual IList <LookupResult> CreateResults(IndexSearcher searcher, TopFieldDocs hits, int num, string charSequence, bool doHighlight, IEnumerable <string> matchedTokens, string prefixToken) { BinaryDocValues textDV = MultiDocValues.GetBinaryValues(searcher.IndexReader, TEXT_FIELD_NAME); // This will just be null if app didn't pass payloads to build(): // TODO: maybe just stored fields? they compress... BinaryDocValues payloadsDV = MultiDocValues.GetBinaryValues(searcher.IndexReader, "payloads"); IList <AtomicReaderContext> leaves = searcher.IndexReader.Leaves; List <LookupResult> results = new List <LookupResult>(); BytesRef scratch = new BytesRef(); for (int i = 0; i < hits.ScoreDocs.Length; i++) { FieldDoc fd = (FieldDoc)hits.ScoreDocs[i]; textDV.Get(fd.Doc, scratch); string text = scratch.Utf8ToString(); long score = (long)fd.Fields[0]; BytesRef payload; if (payloadsDV != null) { payload = new BytesRef(); payloadsDV.Get(fd.Doc, payload); } else { payload = null; } // Must look up sorted-set by segment: int segment = ReaderUtil.SubIndex(fd.Doc, leaves); SortedSetDocValues contextsDV = leaves[segment].AtomicReader.GetSortedSetDocValues(CONTEXTS_FIELD_NAME); HashSet <BytesRef> contexts; if (contextsDV != null) { contexts = new HashSet <BytesRef>(); contextsDV.SetDocument(fd.Doc - leaves[segment].DocBase); long ord; while ((ord = contextsDV.NextOrd()) != SortedSetDocValues.NO_MORE_ORDS) { BytesRef context = new BytesRef(); contextsDV.LookupOrd(ord, context); contexts.Add(context); } } else { contexts = null; } LookupResult result; if (doHighlight) { object highlightKey = Highlight(text, matchedTokens, prefixToken); result = new LookupResult(highlightKey.ToString(), highlightKey, score, payload, contexts); } else { result = new LookupResult(text, score, payload, contexts); } results.Add(result); } return(results); }
internal virtual void TestSort(bool useFrom) { IndexReader reader = null; Directory dir = null; int numDocs = AtLeast(1000); //final int numDocs = AtLeast(50); string[] tokens = new string[] { "a", "b", "c", "d", "e" }; if (VERBOSE) { Console.WriteLine("TEST: make index"); } { dir = NewDirectory(); RandomIndexWriter w = new RandomIndexWriter(Random(), dir); // w.setDoRandomForceMerge(false); // w.w.getConfig().SetMaxBufferedDocs(AtLeast(100)); string[] content = new string[AtLeast(20)]; for (int contentIDX = 0; contentIDX < content.Length; contentIDX++) { StringBuilder sb = new StringBuilder(); int numTokens = TestUtil.NextInt(Random(), 1, 10); for (int tokenIDX = 0; tokenIDX < numTokens; tokenIDX++) { sb.Append(tokens[Random().Next(tokens.Length)]).Append(' '); } content[contentIDX] = sb.ToString(); } for (int docIDX = 0; docIDX < numDocs; docIDX++) { Document doc = new Document(); doc.Add(NewStringField("string", TestUtil.RandomRealisticUnicodeString(Random()), Field.Store.NO)); doc.Add(NewTextField("text", content[Random().Next(content.Length)], Field.Store.NO)); doc.Add(new FloatField("float", (float)Random().NextDouble(), Field.Store.NO)); int intValue; if (Random().Next(100) == 17) { intValue = int.MinValue; } else if (Random().Next(100) == 17) { intValue = int.MaxValue; } else { intValue = Random().Next(); } doc.Add(new IntField("int", intValue, Field.Store.NO)); if (VERBOSE) { Console.WriteLine(" doc=" + doc); } w.AddDocument(doc); } reader = w.Reader; w.Dispose(); } // NOTE: sometimes reader has just one segment, which is // important to test IndexSearcher searcher = NewSearcher(reader); IndexReaderContext ctx = searcher.TopReaderContext; ShardSearcher[] subSearchers; int[] docStarts; if (ctx is AtomicReaderContext) { subSearchers = new ShardSearcher[1]; docStarts = new int[1]; subSearchers[0] = new ShardSearcher((AtomicReaderContext)ctx, ctx); docStarts[0] = 0; } else { CompositeReaderContext compCTX = (CompositeReaderContext)ctx; int size = compCTX.Leaves.Count; subSearchers = new ShardSearcher[size]; docStarts = new int[size]; int docBase = 0; for (int searcherIDX = 0; searcherIDX < subSearchers.Length; searcherIDX++) { AtomicReaderContext leave = compCTX.Leaves[searcherIDX]; subSearchers[searcherIDX] = new ShardSearcher(leave, compCTX); docStarts[searcherIDX] = docBase; docBase += leave.Reader.MaxDoc; } } IList <SortField> sortFields = new List <SortField>(); sortFields.Add(new SortField("string", SortField.Type_e.STRING, true)); sortFields.Add(new SortField("string", SortField.Type_e.STRING, false)); sortFields.Add(new SortField("int", SortField.Type_e.INT, true)); sortFields.Add(new SortField("int", SortField.Type_e.INT, false)); sortFields.Add(new SortField("float", SortField.Type_e.FLOAT, true)); sortFields.Add(new SortField("float", SortField.Type_e.FLOAT, false)); sortFields.Add(new SortField(null, SortField.Type_e.SCORE, true)); sortFields.Add(new SortField(null, SortField.Type_e.SCORE, false)); sortFields.Add(new SortField(null, SortField.Type_e.DOC, true)); sortFields.Add(new SortField(null, SortField.Type_e.DOC, false)); for (int iter = 0; iter < 1000 * RANDOM_MULTIPLIER; iter++) { // TODO: custom FieldComp... Query query = new TermQuery(new Term("text", tokens[Random().Next(tokens.Length)])); Sort sort; if (Random().Next(10) == 4) { // Sort by score sort = null; } else { SortField[] randomSortFields = new SortField[TestUtil.NextInt(Random(), 1, 3)]; for (int sortIDX = 0; sortIDX < randomSortFields.Length; sortIDX++) { randomSortFields[sortIDX] = sortFields[Random().Next(sortFields.Count)]; } sort = new Sort(randomSortFields); } int numHits = TestUtil.NextInt(Random(), 1, numDocs + 5); //final int numHits = 5; if (VERBOSE) { Console.WriteLine("TEST: search query=" + query + " sort=" + sort + " numHits=" + numHits); } int from = -1; int size = -1; // First search on whole index: TopDocs topHits; if (sort == null) { if (useFrom) { TopScoreDocCollector c = TopScoreDocCollector.Create(numHits, Random().NextBoolean()); searcher.Search(query, c); from = TestUtil.NextInt(Random(), 0, numHits - 1); size = numHits - from; TopDocs tempTopHits = c.TopDocs(); if (from < tempTopHits.ScoreDocs.Length) { // Can't use TopDocs#topDocs(start, howMany), since it has different behaviour when start >= hitCount // than TopDocs#merge currently has ScoreDoc[] newScoreDocs = new ScoreDoc[Math.Min(size, tempTopHits.ScoreDocs.Length - from)]; Array.Copy(tempTopHits.ScoreDocs, from, newScoreDocs, 0, newScoreDocs.Length); tempTopHits.ScoreDocs = newScoreDocs; topHits = tempTopHits; } else { topHits = new TopDocs(tempTopHits.TotalHits, new ScoreDoc[0], tempTopHits.MaxScore); } } else { topHits = searcher.Search(query, numHits); } } else { TopFieldCollector c = TopFieldCollector.Create(sort, numHits, true, true, true, Random().NextBoolean()); searcher.Search(query, c); if (useFrom) { from = TestUtil.NextInt(Random(), 0, numHits - 1); size = numHits - from; TopDocs tempTopHits = c.TopDocs(); if (from < tempTopHits.ScoreDocs.Length) { // Can't use TopDocs#topDocs(start, howMany), since it has different behaviour when start >= hitCount // than TopDocs#merge currently has ScoreDoc[] newScoreDocs = new ScoreDoc[Math.Min(size, tempTopHits.ScoreDocs.Length - from)]; Array.Copy(tempTopHits.ScoreDocs, from, newScoreDocs, 0, newScoreDocs.Length); tempTopHits.ScoreDocs = newScoreDocs; topHits = tempTopHits; } else { topHits = new TopDocs(tempTopHits.TotalHits, new ScoreDoc[0], tempTopHits.MaxScore); } } else { topHits = c.TopDocs(0, numHits); } } if (VERBOSE) { if (useFrom) { Console.WriteLine("from=" + from + " size=" + size); } Console.WriteLine(" top search: " + topHits.TotalHits + " totalHits; hits=" + (topHits.ScoreDocs == null ? "null" : topHits.ScoreDocs.Length + " maxScore=" + topHits.MaxScore)); if (topHits.ScoreDocs != null) { for (int hitIDX = 0; hitIDX < topHits.ScoreDocs.Length; hitIDX++) { ScoreDoc sd = topHits.ScoreDocs[hitIDX]; Console.WriteLine(" doc=" + sd.Doc + " score=" + sd.Score); } } } // ... then all shards: Weight w = searcher.CreateNormalizedWeight(query); TopDocs[] shardHits = new TopDocs[subSearchers.Length]; for (int shardIDX = 0; shardIDX < subSearchers.Length; shardIDX++) { TopDocs subHits; ShardSearcher subSearcher = subSearchers[shardIDX]; if (sort == null) { subHits = subSearcher.Search(w, numHits); } else { TopFieldCollector c = TopFieldCollector.Create(sort, numHits, true, true, true, Random().NextBoolean()); subSearcher.Search(w, c); subHits = c.TopDocs(0, numHits); } shardHits[shardIDX] = subHits; if (VERBOSE) { Console.WriteLine(" shard=" + shardIDX + " " + subHits.TotalHits + " totalHits hits=" + (subHits.ScoreDocs == null ? "null" : subHits.ScoreDocs.Length.ToString())); if (subHits.ScoreDocs != null) { foreach (ScoreDoc sd in subHits.ScoreDocs) { Console.WriteLine(" doc=" + sd.Doc + " score=" + sd.Score); } } } } // Merge: TopDocs mergedHits; if (useFrom) { mergedHits = TopDocs.Merge(sort, from, size, shardHits); } else { mergedHits = TopDocs.Merge(sort, numHits, shardHits); } if (mergedHits.ScoreDocs != null) { // Make sure the returned shards are correct: for (int hitIDX = 0; hitIDX < mergedHits.ScoreDocs.Length; hitIDX++) { ScoreDoc sd = mergedHits.ScoreDocs[hitIDX]; Assert.AreEqual(ReaderUtil.SubIndex(sd.Doc, docStarts), sd.ShardIndex, "doc=" + sd.Doc + " wrong shard"); } } TestUtil.AssertEquals(topHits, mergedHits); } reader.Dispose(); dir.Dispose(); }
/// <summary>Returns index of the searcher for document <c>n</c> in the array /// used to construct this searcher. /// </summary> public virtual int SubSearcher(int n) { // find searcher for doc n: return(ReaderUtil.SubIndex(n, starts)); }