public override Query Rewrite(IndexReader reader) { FilteredTermEnum enumerator = GetEnum(reader); int maxClauseCount = BooleanQuery.GetMaxClauseCount(); ScoreTermQueue stQueue = new ScoreTermQueue(maxClauseCount); ScoreTerm reusableST = null; try { do { float score = 0.0f; Term t = enumerator.Term(); if (t != null) { score = enumerator.Difference(); if (reusableST == null) { reusableST = new ScoreTerm(t, score); } else if (score >= reusableST.score) { // reusableST holds the last "rejected" entry, so, if // this new score is not better than that, there's no // need to try inserting it reusableST.score = score; reusableST.term = t; } else { continue; } reusableST = (ScoreTerm)stQueue.InsertWithOverflow(reusableST); } }while (enumerator.Next()); } finally { enumerator.Close(); } BooleanQuery query = new BooleanQuery(true); int size = stQueue.Size(); for (int i = 0; i < size; i++) { ScoreTerm st = (ScoreTerm)stQueue.Pop(); TermQuery tq = new TermQuery(st.term); // found a match tq.SetBoost(GetBoost() * st.score); // set the boost query.Add(tq, BooleanClause.Occur.SHOULD); // add to query } return(query); }
/* (non-Javadoc) * @see org.apache.lucene.util.PriorityQueue#lessThan(java.lang.Object, java.lang.Object) */ public override bool LessThan(Object a, Object b) { ScoreTerm termA = (ScoreTerm)a; ScoreTerm termB = (ScoreTerm)b; if (termA.score == termB.score) { return(termA.term.CompareTo(termB.term) > 0); } else { return(termA.score < termB.score); } }
public override void SetNextEnum(TermsEnum termsEnum) { this.termsEnum = termsEnum; this.termComp = termsEnum.Comparer; Debug.Assert(CompareToLastTerm(null)); // lazy init the initial ScoreTerm because comparer is not known on ctor: if (st == null) { st = new ScoreTerm(this.termComp, new TermContext(m_topReaderContext)); } boostAtt = termsEnum.Attributes.AddAttribute <IBoostAttribute>(); }
public override Query Rewrite(IndexReader reader) { FilteredTermEnum enumerator = GetEnum(reader); int maxClauseCount = BooleanQuery.GetMaxClauseCount(); ScoreTermQueue stQueue = new ScoreTermQueue(maxClauseCount); try { do { float minScore = 0.0f; float score = 0.0f; Term t = enumerator.Term(); if (t != null) { score = enumerator.Difference(); // terms come in alphabetical order, therefore if queue is full and score // not bigger than minScore, we can skip if (stQueue.Size() < maxClauseCount || score > minScore) { stQueue.Insert(new ScoreTerm(t, score)); minScore = ((ScoreTerm)stQueue.Top()).score; // maintain minScore } } }while (enumerator.Next()); } finally { enumerator.Close(); } BooleanQuery query = new BooleanQuery(true); int size = stQueue.Size(); for (int i = 0; i < size; i++) { ScoreTerm st = (ScoreTerm)stQueue.Pop(); TermQuery tq = new TermQuery(st.term); // found a match tq.SetBoost(GetBoost() * st.score); // set the boost query.Add(tq, BooleanClause.Occur.SHOULD); // add to query } return(query); }
public override Query Rewrite(IndexReader reader) { if (rewrittenQuery != null) { return(rewrittenQuery); } //load up the list of possible terms foreach (var f in fieldVals) { AddTerms(reader, f); } //clear the list of fields fieldVals.Clear(); BooleanQuery bq = new BooleanQuery(); //create BooleanQueries to hold the variants for each token/field pair and ensure it // has no coord factor //Step 1: sort the termqueries by term/field IDictionary <Term, IList <ScoreTerm> > variantQueries = new Dictionary <Term, IList <ScoreTerm> >(); int size = q.Count; for (int i = 0; i < size; i++) { ScoreTerm st = q.Pop(); if (!variantQueries.TryGetValue(st.FuzziedSourceTerm, out IList <ScoreTerm> l) || l is null) { l = new JCG.List <ScoreTerm>(); variantQueries[st.FuzziedSourceTerm] = l; } l.Add(st); } //Step 2: Organize the sorted termqueries into zero-coord scoring boolean queries foreach (IList <ScoreTerm> variants in variantQueries.Values) { if (variants.Count == 1) { //optimize where only one selected variant ScoreTerm st = variants[0]; Query tq = ignoreTF ? (Query) new ConstantScoreQuery(new TermQuery(st.Term)) : new TermQuery(st.Term, 1); tq.Boost = st.Score; // set the boost to a mix of IDF and score bq.Add(tq, Occur.SHOULD); } else { BooleanQuery termVariants = new BooleanQuery(true); //disable coord and IDF for these term variants foreach (ScoreTerm st in variants) { // found a match Query tq = ignoreTF ? (Query) new ConstantScoreQuery(new TermQuery(st.Term)) : new TermQuery(st.Term, 1); tq.Boost = st.Score; // set the boost using the ScoreTerm's score termVariants.Add(tq, Occur.SHOULD); // add to query } bq.Add(termVariants, Occur.SHOULD); // add to query } } //TODO possible alternative step 3 - organize above booleans into a new layer of field-based // booleans with a minimum-should-match of NumFields-1? bq.Boost = Boost; this.rewrittenQuery = bq; return(bq); }
private void AddTerms(IndexReader reader, FieldVals f) { if (f.queryString is null) { return; } Terms terms = MultiFields.GetTerms(reader, f.fieldName); if (terms is null) { return; } TokenStream ts = analyzer.GetTokenStream(f.fieldName, f.queryString); try { ICharTermAttribute termAtt = ts.AddAttribute <ICharTermAttribute>(); int corpusNumDocs = reader.NumDocs; ISet <string> processedTerms = new JCG.HashSet <string>(); ts.Reset(); while (ts.IncrementToken()) { string term = termAtt.ToString(); if (!processedTerms.Contains(term)) { processedTerms.Add(term); ScoreTermQueue variantsQ = new ScoreTermQueue(MAX_VARIANTS_PER_TERM); //maxNum variants considered for any one term float minScore = 0; Term startTerm = new Term(f.fieldName, term); AttributeSource atts = new AttributeSource(); IMaxNonCompetitiveBoostAttribute maxBoostAtt = atts.AddAttribute <IMaxNonCompetitiveBoostAttribute>(); #pragma warning disable 612, 618 SlowFuzzyTermsEnum fe = new SlowFuzzyTermsEnum(terms, atts, startTerm, f.minSimilarity, f.prefixLength); #pragma warning restore 612, 618 //store the df so all variants use same idf int df = reader.DocFreq(startTerm); int numVariants = 0; int totalVariantDocFreqs = 0; BytesRef possibleMatch; IBoostAttribute boostAtt = fe.Attributes.AddAttribute <IBoostAttribute>(); while (fe.MoveNext()) { possibleMatch = fe.Term; numVariants++; totalVariantDocFreqs += fe.DocFreq; float score = boostAtt.Boost; if (variantsQ.Count < MAX_VARIANTS_PER_TERM || score > minScore) { ScoreTerm st = new ScoreTerm(new Term(startTerm.Field, BytesRef.DeepCopyOf(possibleMatch)), score, startTerm); variantsQ.InsertWithOverflow(st); minScore = variantsQ.Top.Score; // maintain minScore } maxBoostAtt.MaxNonCompetitiveBoost = variantsQ.Count >= MAX_VARIANTS_PER_TERM ? minScore : float.NegativeInfinity; } if (numVariants > 0) { int avgDf = totalVariantDocFreqs / numVariants; if (df == 0) //no direct match we can use as df for all variants { df = avgDf; //use avg df of all variants } // take the top variants (scored by edit distance) and reset the score // to include an IDF factor then add to the global queue for ranking // overall top query terms int size = variantsQ.Count; for (int i = 0; i < size; i++) { ScoreTerm st = variantsQ.Pop(); st.Score = (st.Score * st.Score) * sim.Idf(df, corpusNumDocs); q.InsertWithOverflow(st); } } } } ts.End(); } finally { IOUtils.DisposeWhileHandlingException(ts); } }
/// <summary> /// Provide spelling corrections based on several parameters. /// </summary> /// <param name="term"> The term to suggest spelling corrections for </param> /// <param name="numSug"> The maximum number of spelling corrections </param> /// <param name="ir"> The index reader to fetch the candidate spelling corrections from </param> /// <param name="docfreq"> The minimum document frequency a potential suggestion need to have in order to be included </param> /// <param name="editDistance"> The maximum edit distance candidates are allowed to have </param> /// <param name="accuracy"> The minimum accuracy a suggested spelling correction needs to have in order to be included </param> /// <param name="spare"> a chars scratch </param> /// <returns> a collection of spelling corrections sorted by <code>ScoreTerm</code>'s natural order. </returns> /// <exception cref="System.IO.IOException"> If I/O related errors occur </exception> protected internal virtual IEnumerable <ScoreTerm> SuggestSimilar(Term term, int numSug, IndexReader ir, int docfreq, int editDistance, float accuracy, CharsRef spare) { var atts = new AttributeSource(); IMaxNonCompetitiveBoostAttribute maxBoostAtt = atts.AddAttribute <IMaxNonCompetitiveBoostAttribute>(); Terms terms = MultiFields.GetTerms(ir, term.Field); if (terms == null) { return(new List <ScoreTerm>()); } FuzzyTermsEnum e = new FuzzyTermsEnum(terms, atts, term, editDistance, Math.Max(minPrefix, editDistance - 1), true); var stQueue = new Support.PriorityQueue <ScoreTerm>(); BytesRef queryTerm = new BytesRef(term.Text()); BytesRef candidateTerm; ScoreTerm st = new ScoreTerm(); IBoostAttribute boostAtt = e.Attributes.AddAttribute <IBoostAttribute>(); while ((candidateTerm = e.Next()) != null) { float boost = boostAtt.Boost; // ignore uncompetitive hits if (stQueue.Count >= numSug && boost <= stQueue.Peek().Boost) { continue; } // ignore exact match of the same term if (queryTerm.BytesEquals(candidateTerm)) { continue; } int df = e.DocFreq; // check docFreq if required if (df <= docfreq) { continue; } float score; string termAsString; if (distance == INTERNAL_LEVENSHTEIN) { // delay creating strings until the end termAsString = null; // undo FuzzyTermsEnum's scale factor for a real scaled lev score score = boost / e.ScaleFactor + e.MinSimilarity; } else { UnicodeUtil.UTF8toUTF16(candidateTerm, spare); termAsString = spare.ToString(); score = distance.GetDistance(term.Text(), termAsString); } if (score < accuracy) { continue; } // add new entry in PQ st.Term = BytesRef.DeepCopyOf(candidateTerm); st.Boost = boost; st.Docfreq = df; st.TermAsString = termAsString; st.Score = score; stQueue.Offer(st); // possibly drop entries from queue st = (stQueue.Count > numSug) ? stQueue.Poll() : new ScoreTerm(); maxBoostAtt.MaxNonCompetitiveBoost = (stQueue.Count >= numSug) ? stQueue.Peek().Boost : float.NegativeInfinity; } return(stQueue); }
public override Query Rewrite(IndexReader reader) { if (!termLongEnough) { // can only match if it's exact return new TermQuery(term); } FilteredTermEnum enumerator = GetEnum(reader); int maxClauseCount = BooleanQuery.GetMaxClauseCount(); ScoreTermQueue stQueue = new ScoreTermQueue(maxClauseCount); ScoreTerm reusableST = null; try { do { float score = 0.0f; Term t = enumerator.Term(); if (t != null) { score = enumerator.Difference(); if (reusableST == null) { reusableST = new ScoreTerm(t, score); } else if (score >= reusableST.score) { // reusableST holds the last "rejected" entry, so, if // this new score is not better than that, there's no // need to try inserting it reusableST.score = score; reusableST.term = t; } else { continue; } reusableST = (ScoreTerm) stQueue.InsertWithOverflow(reusableST); } } while (enumerator.Next()); } finally { enumerator.Close(); } BooleanQuery query = new BooleanQuery(true); int size = stQueue.Size(); for (int i = 0; i < size; i++) { ScoreTerm st = (ScoreTerm) stQueue.Pop(); TermQuery tq = new TermQuery(st.term); // found a match tq.SetBoost(GetBoost() * st.score); // set the boost query.Add(tq, BooleanClause.Occur.SHOULD); // add to query } return query; }
public override Query Rewrite(IndexReader reader, IState state) { if (!termLongEnough) { // can only match if it's exact return(new TermQuery(Term)); } int maxSize = BooleanQuery.MaxClauseCount; // TODO: Java uses a PriorityQueue. Using Linq, we can emulate it, // however it's considerable slower than the java counterpart. // this should be a temporary thing, fixed before release SortedList <ScoreTerm, ScoreTerm> stQueue = new SortedList <ScoreTerm, ScoreTerm>(); FilteredTermEnum enumerator = GetEnum(reader, state); try { ScoreTerm st = new ScoreTerm(); do { Term t = enumerator.Term; if (t == null) { break; } float score = enumerator.Difference(); //ignore uncompetetive hits if (stQueue.Count >= maxSize && score <= stQueue.Keys.First().score) { continue; } // add new entry in PQ st.term = t; st.score = score; stQueue.Add(st, st); // possibly drop entries from queue if (stQueue.Count > maxSize) { st = stQueue.Keys.First(); stQueue.Remove(st); } else { st = new ScoreTerm(); } }while (enumerator.Next(state)); } finally { enumerator.Close(); } BooleanQuery query = new BooleanQuery(true); foreach (ScoreTerm st in stQueue.Keys) { TermQuery tq = new TermQuery(st.term); // found a match tq.Boost = Boost * st.score; // set the boost query.Add(tq, Occur.SHOULD); // add to query } return(query); }
public override Query Rewrite(IndexReader reader) { if (rewrittenQuery != null) { return(rewrittenQuery); } //load up the list of possible terms foreach (FieldVals f in fieldVals) { AddTerms(reader, f); } //for (Iterator iter = fieldVals.iterator(); iter.hasNext(); ) //{ // FieldVals f = (FieldVals)iter.next(); // addTerms(reader, f); //} //clear the list of fields fieldVals.Clear(); BooleanQuery bq = new BooleanQuery(); //create BooleanQueries to hold the variants for each token/field pair and ensure it // has no coord factor //Step 1: sort the termqueries by term/field Hashtable variantQueries = new Hashtable(); int size = q.Size(); for (int i = 0; i < size; i++) { ScoreTerm st = (ScoreTerm)q.Pop(); ArrayList l = (ArrayList)variantQueries[st.fuzziedSourceTerm]; if (l == null) { l = new ArrayList(); variantQueries.Add(st.fuzziedSourceTerm, l); } l.Add(st); } //Step 2: Organize the sorted termqueries into zero-coord scoring boolean queries foreach (ArrayList variants in variantQueries.Values) //for (Iterator iter = variantQueries.values().iterator(); iter.hasNext(); ) { //ArrayList variants = (ArrayList)iter.next(); if (variants.Count == 1) { //optimize where only one selected variant ScoreTerm st = (ScoreTerm)variants[0]; TermQuery tq = new FuzzyTermQuery(st.term, ignoreTF); tq.SetBoost(st.score); // set the boost to a mix of IDF and score bq.Add(tq, BooleanClause.Occur.SHOULD); } else { BooleanQuery termVariants = new BooleanQuery(true); //disable coord and IDF for these term variants foreach (ScoreTerm st in variants) //for (Iterator iterator2 = variants.iterator(); iterator2.hasNext(); ) { //ScoreTerm st = (ScoreTerm)iterator2.next(); TermQuery tq = new FuzzyTermQuery(st.term, ignoreTF); // found a match tq.SetBoost(st.score); // set the boost using the ScoreTerm's score termVariants.Add(tq, BooleanClause.Occur.SHOULD); // add to query } bq.Add(termVariants, BooleanClause.Occur.SHOULD); // add to query } } //TODO possible alternative step 3 - organize above booleans into a new layer of field-based // booleans with a minimum-should-match of NumFields-1? bq.SetBoost(GetBoost()); this.rewrittenQuery = bq; return(bq); }
private void AddTerms(IndexReader reader, FieldVals f) { if (f.queryString == null) { return; } TokenStream ts = analyzer.TokenStream(f.fieldName, new System.IO.StringReader(f.queryString)); TermAttribute termAtt = (TermAttribute)ts.AddAttribute(typeof(TermAttribute)); int corpusNumDocs = reader.NumDocs(); Term internSavingTemplateTerm = new Term(f.fieldName); //optimization to avoid constructing new Term() objects Hashtable processedTerms = new Hashtable(); while (ts.IncrementToken()) { String term = termAtt.Term(); if (!processedTerms.Contains(term)) { processedTerms.Add(term, term); ScoreTermQueue variantsQ = new ScoreTermQueue(MAX_VARIANTS_PER_TERM); //maxNum variants considered for any one term float minScore = 0; Term startTerm = internSavingTemplateTerm.CreateTerm(term); FuzzyTermEnum fe = new FuzzyTermEnum(reader, startTerm, f.minSimilarity, f.prefixLength); TermEnum origEnum = reader.Terms(startTerm); int df = 0; if (startTerm.Equals(origEnum.Term())) { df = origEnum.DocFreq(); //store the df so all variants use same idf } int numVariants = 0; int totalVariantDocFreqs = 0; do { Term possibleMatch = fe.Term(); if (possibleMatch != null) { numVariants++; totalVariantDocFreqs += fe.DocFreq(); float score = fe.Difference(); if (variantsQ.Size() < MAX_VARIANTS_PER_TERM || score > minScore) { ScoreTerm st = new ScoreTerm(possibleMatch, score, startTerm); variantsQ.Insert(st); minScore = ((ScoreTerm)variantsQ.Top()).score; // maintain minScore } } }while (fe.Next()); if (numVariants > 0) { int avgDf = totalVariantDocFreqs / numVariants; if (df == 0) //no direct match we can use as df for all variants { df = avgDf; //use avg df of all variants } // take the top variants (scored by edit distance) and reset the score // to include an IDF factor then add to the global queue for ranking // overall top query terms int size = variantsQ.Size(); for (int i = 0; i < size; i++) { ScoreTerm st = (ScoreTerm)variantsQ.Pop(); st.score = (st.score * st.score) * sim.Idf(df, corpusNumDocs); q.Insert(st); } } } } }
public override bool Collect(BytesRef bytes) { float boost = boostAtt.Boost; // make sure within a single seg we always collect // terms in order Debug.Assert(CompareToLastTerm(bytes)); //System.out.println("TTR.collect term=" + bytes.utf8ToString() + " boost=" + boost + " ord=" + readerContext.ord); // ignore uncompetitive hits if (stQueue.Count == maxSize) { ScoreTerm t = stQueue.Peek(); if (boost < t.Boost) { return(true); } if (boost == t.Boost && termComp.Compare(bytes, t.Bytes) > 0) { return(true); } } TermState state = termsEnum.GetTermState(); Debug.Assert(state != null); if (visitedTerms.TryGetValue(bytes, out ScoreTerm t2)) { // if the term is already in the PQ, only update docFreq of term in PQ Debug.Assert(t2.Boost == boost, "boost should be equal in all segment TermsEnums"); t2.TermState.Register(state, m_readerContext.Ord, termsEnum.DocFreq, termsEnum.TotalTermFreq); } else { // add new entry in PQ, we must clone the term, else it may get overwritten! st.Bytes.CopyBytes(bytes); st.Boost = boost; visitedTerms[st.Bytes] = st; Debug.Assert(st.TermState.DocFreq == 0); st.TermState.Register(state, m_readerContext.Ord, termsEnum.DocFreq, termsEnum.TotalTermFreq); stQueue.Add(st); // possibly drop entries from queue if (stQueue.Count > maxSize) { st = stQueue.Dequeue(); visitedTerms.Remove(st.Bytes); st.TermState.Clear(); // reset the termstate! } else { st = new ScoreTerm(termComp, new TermContext(m_topReaderContext)); } Debug.Assert(stQueue.Count <= maxSize, "the PQ size must be limited to maxSize"); // set maxBoostAtt with values to help FuzzyTermsEnum to optimize if (stQueue.Count == maxSize) { t2 = stQueue.Peek(); maxBoostAtt.MaxNonCompetitiveBoost = t2.Boost; maxBoostAtt.CompetitiveTerm = t2.Bytes; } } return(true); }
public override bool Collect(BytesRef bytes) { float boost = boostAtt.Boost; // make sure within a single seg we always collect // terms in order if (Debugging.AssertsEnabled) { Debugging.Assert(CompareToLastTerm(bytes)); } //System.out.println("TTR.collect term=" + bytes.utf8ToString() + " boost=" + boost + " ord=" + readerContext.ord); // ignore uncompetitive hits if (stQueue.Count == maxSize) { ScoreTerm t = stQueue.Peek(); // LUCENENET specific - compare bits rather than using equality operators to prevent these comparisons from failing in x86 in .NET Framework with optimizations enabled if (NumericUtils.SingleToSortableInt32(boost) < NumericUtils.SingleToSortableInt32(t.Boost)) { return(true); } // LUCENENET specific - compare bits rather than using equality operators to prevent these comparisons from failing in x86 in .NET Framework with optimizations enabled if (NumericUtils.SingleToSortableInt32(boost) == NumericUtils.SingleToSortableInt32(t.Boost) && termComp.Compare(bytes, t.Bytes) > 0) { return(true); } } TermState state = termsEnum.GetTermState(); if (Debugging.AssertsEnabled) { Debugging.Assert(state != null); } if (visitedTerms.TryGetValue(bytes, out ScoreTerm t2)) { // if the term is already in the PQ, only update docFreq of term in PQ // LUCENENET specific - compare bits rather than using equality operators to prevent these comparisons from failing in x86 in .NET Framework with optimizations enabled if (Debugging.AssertsEnabled) { Debugging.Assert(NumericUtils.SingleToSortableInt32(t2.Boost) == NumericUtils.SingleToSortableInt32(boost), "boost should be equal in all segment TermsEnums"); } t2.TermState.Register(state, m_readerContext.Ord, termsEnum.DocFreq, termsEnum.TotalTermFreq); } else { // add new entry in PQ, we must clone the term, else it may get overwritten! st.Bytes.CopyBytes(bytes); st.Boost = boost; visitedTerms[st.Bytes] = st; if (Debugging.AssertsEnabled) { Debugging.Assert(st.TermState.DocFreq == 0); } st.TermState.Register(state, m_readerContext.Ord, termsEnum.DocFreq, termsEnum.TotalTermFreq); stQueue.Add(st); // possibly drop entries from queue if (stQueue.Count > maxSize) { st = stQueue.Dequeue(); visitedTerms.Remove(st.Bytes); st.TermState.Clear(); // reset the termstate! } else { st = new ScoreTerm(termComp, new TermContext(m_topReaderContext)); } if (Debugging.AssertsEnabled) { Debugging.Assert(stQueue.Count <= maxSize, "the PQ size must be limited to maxSize"); } // set maxBoostAtt with values to help FuzzyTermsEnum to optimize if (stQueue.Count == maxSize) { t2 = stQueue.Peek(); maxBoostAtt.MaxNonCompetitiveBoost = t2.Boost; maxBoostAtt.CompetitiveTerm = t2.Bytes; } } return(true); }